Home | History | Annotate | Line # | Download | only in ixgbe
ixv.c revision 1.68
      1 /*$NetBSD: ixv.c,v 1.68 2017/10/03 03:12:29 msaitoh Exp $*/
      2 
      3 /******************************************************************************
      4 
      5   Copyright (c) 2001-2017, Intel Corporation
      6   All rights reserved.
      7 
      8   Redistribution and use in source and binary forms, with or without
      9   modification, are permitted provided that the following conditions are met:
     10 
     11    1. Redistributions of source code must retain the above copyright notice,
     12       this list of conditions and the following disclaimer.
     13 
     14    2. Redistributions in binary form must reproduce the above copyright
     15       notice, this list of conditions and the following disclaimer in the
     16       documentation and/or other materials provided with the distribution.
     17 
     18    3. Neither the name of the Intel Corporation nor the names of its
     19       contributors may be used to endorse or promote products derived from
     20       this software without specific prior written permission.
     21 
     22   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     23   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     24   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     25   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     26   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     27   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     28   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     29   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     30   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     31   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     32   POSSIBILITY OF SUCH DAMAGE.
     33 
     34 ******************************************************************************/
     35 /*$FreeBSD: head/sys/dev/ixgbe/if_ixv.c 320688 2017-07-05 17:27:03Z erj $*/
     36 
     37 
     38 #ifdef _KERNEL_OPT
     39 #include "opt_inet.h"
     40 #include "opt_inet6.h"
     41 #include "opt_net_mpsafe.h"
     42 #endif
     43 
     44 #include "ixgbe.h"
     45 #include "vlan.h"
     46 
     47 /************************************************************************
     48  * Driver version
     49  ************************************************************************/
     50 char ixv_driver_version[] = "1.5.13-k";
     51 
     52 /************************************************************************
     53  * PCI Device ID Table
     54  *
     55  *   Used by probe to select devices to load on
     56  *   Last field stores an index into ixv_strings
     57  *   Last entry must be all 0s
     58  *
     59  *   { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
     60  ************************************************************************/
     61 static ixgbe_vendor_info_t ixv_vendor_info_array[] =
     62 {
     63 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
     64 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0},
     65 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, 0, 0, 0},
     66 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, 0, 0, 0},
     67 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF, 0, 0, 0},
     68 	/* required last entry */
     69 	{0, 0, 0, 0, 0}
     70 };
     71 
     72 /************************************************************************
     73  * Table of branding strings
     74  ************************************************************************/
     75 static const char *ixv_strings[] = {
     76 	"Intel(R) PRO/10GbE Virtual Function Network Driver"
     77 };
     78 
     79 /*********************************************************************
     80  *  Function prototypes
     81  *********************************************************************/
     82 static int      ixv_probe(device_t, cfdata_t, void *);
     83 static void	ixv_attach(device_t, device_t, void *);
     84 static int      ixv_detach(device_t, int);
     85 #if 0
     86 static int      ixv_shutdown(device_t);
     87 #endif
     88 static int	ixv_ifflags_cb(struct ethercom *);
     89 static int      ixv_ioctl(struct ifnet *, u_long, void *);
     90 static int	ixv_init(struct ifnet *);
     91 static void	ixv_init_locked(struct adapter *);
     92 static void	ixv_ifstop(struct ifnet *, int);
     93 static void     ixv_stop(void *);
     94 static void     ixv_init_device_features(struct adapter *);
     95 static void     ixv_media_status(struct ifnet *, struct ifmediareq *);
     96 static int      ixv_media_change(struct ifnet *);
     97 static int      ixv_allocate_pci_resources(struct adapter *,
     98 		    const struct pci_attach_args *);
     99 static int      ixv_allocate_msix(struct adapter *,
    100 		    const struct pci_attach_args *);
    101 static int      ixv_configure_interrupts(struct adapter *);
    102 static void	ixv_free_pci_resources(struct adapter *);
    103 static void     ixv_local_timer(void *);
    104 static void     ixv_local_timer_locked(void *);
    105 static void     ixv_setup_interface(device_t, struct adapter *);
    106 static int      ixv_negotiate_api(struct adapter *);
    107 
    108 static void     ixv_initialize_transmit_units(struct adapter *);
    109 static void     ixv_initialize_receive_units(struct adapter *);
    110 static void     ixv_initialize_rss_mapping(struct adapter *);
    111 static void     ixv_check_link(struct adapter *);
    112 
    113 static void     ixv_enable_intr(struct adapter *);
    114 static void     ixv_disable_intr(struct adapter *);
    115 static void     ixv_set_multi(struct adapter *);
    116 static void     ixv_update_link_status(struct adapter *);
    117 static int	ixv_sysctl_debug(SYSCTLFN_PROTO);
    118 static void	ixv_set_ivar(struct adapter *, u8, u8, s8);
    119 static void	ixv_configure_ivars(struct adapter *);
    120 static u8 *	ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
    121 
    122 static void	ixv_setup_vlan_support(struct adapter *);
    123 #if 0
    124 static void	ixv_register_vlan(void *, struct ifnet *, u16);
    125 static void	ixv_unregister_vlan(void *, struct ifnet *, u16);
    126 #endif
    127 
    128 static void	ixv_add_device_sysctls(struct adapter *);
    129 static void	ixv_save_stats(struct adapter *);
    130 static void	ixv_init_stats(struct adapter *);
    131 static void	ixv_update_stats(struct adapter *);
    132 static void	ixv_add_stats_sysctls(struct adapter *);
    133 static void	ixv_set_sysctl_value(struct adapter *, const char *,
    134 		    const char *, int *, int);
    135 
    136 /* The MSI-X Interrupt handlers */
    137 static int	ixv_msix_que(void *);
    138 static int	ixv_msix_mbx(void *);
    139 
    140 /* Deferred interrupt tasklets */
    141 static void	ixv_handle_que(void *);
    142 static void     ixv_handle_link(void *);
    143 
    144 const struct sysctlnode *ixv_sysctl_instance(struct adapter *);
    145 static ixgbe_vendor_info_t *ixv_lookup(const struct pci_attach_args *);
    146 
    147 /************************************************************************
    148  * FreeBSD Device Interface Entry Points
    149  ************************************************************************/
    150 CFATTACH_DECL3_NEW(ixv, sizeof(struct adapter),
    151     ixv_probe, ixv_attach, ixv_detach, NULL, NULL, NULL,
    152     DVF_DETACH_SHUTDOWN);
    153 
    154 #if 0
    155 static driver_t ixv_driver = {
    156 	"ixv", ixv_methods, sizeof(struct adapter),
    157 };
    158 
    159 devclass_t ixv_devclass;
    160 DRIVER_MODULE(ixv, pci, ixv_driver, ixv_devclass, 0, 0);
    161 MODULE_DEPEND(ixv, pci, 1, 1, 1);
    162 MODULE_DEPEND(ixv, ether, 1, 1, 1);
    163 #endif
    164 
    165 /*
    166  * TUNEABLE PARAMETERS:
    167  */
    168 
    169 /* Number of Queues - do not exceed MSI-X vectors - 1 */
    170 static int ixv_num_queues = 0;
    171 #define	TUNABLE_INT(__x, __y)
    172 TUNABLE_INT("hw.ixv.num_queues", &ixv_num_queues);
    173 
    174 /*
    175  * AIM: Adaptive Interrupt Moderation
    176  * which means that the interrupt rate
    177  * is varied over time based on the
    178  * traffic for that interrupt vector
    179  */
    180 static bool ixv_enable_aim = false;
    181 TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
    182 
    183 /* How many packets rxeof tries to clean at a time */
    184 static int ixv_rx_process_limit = 256;
    185 TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit);
    186 
    187 /* How many packets txeof tries to clean at a time */
    188 static int ixv_tx_process_limit = 256;
    189 TUNABLE_INT("hw.ixv.tx_process_limit", &ixv_tx_process_limit);
    190 
    191 /*
    192  * Number of TX descriptors per ring,
    193  * setting higher than RX as this seems
    194  * the better performing choice.
    195  */
    196 static int ixv_txd = PERFORM_TXD;
    197 TUNABLE_INT("hw.ixv.txd", &ixv_txd);
    198 
    199 /* Number of RX descriptors per ring */
    200 static int ixv_rxd = PERFORM_RXD;
    201 TUNABLE_INT("hw.ixv.rxd", &ixv_rxd);
    202 
    203 /* Legacy Transmit (single queue) */
    204 static int ixv_enable_legacy_tx = 0;
    205 TUNABLE_INT("hw.ixv.enable_legacy_tx", &ixv_enable_legacy_tx);
    206 
    207 #ifdef NET_MPSAFE
    208 #define IXGBE_MPSAFE		1
    209 #define IXGBE_CALLOUT_FLAGS	CALLOUT_MPSAFE
    210 #define IXGBE_SOFTINFT_FLAGS	SOFTINT_MPSAFE
    211 #else
    212 #define IXGBE_CALLOUT_FLAGS	0
    213 #define IXGBE_SOFTINFT_FLAGS	0
    214 #endif
    215 
    216 #if 0
    217 static int (*ixv_start_locked)(struct ifnet *, struct tx_ring *);
    218 static int (*ixv_ring_empty)(struct ifnet *, struct buf_ring *);
    219 #endif
    220 
    221 /************************************************************************
    222  * ixv_probe - Device identification routine
    223  *
    224  *   Determines if the driver should be loaded on
    225  *   adapter based on its PCI vendor/device ID.
    226  *
    227  *   return BUS_PROBE_DEFAULT on success, positive on failure
    228  ************************************************************************/
    229 static int
    230 ixv_probe(device_t dev, cfdata_t cf, void *aux)
    231 {
    232 #ifdef __HAVE_PCI_MSI_MSIX
    233 	const struct pci_attach_args *pa = aux;
    234 
    235 	return (ixv_lookup(pa) != NULL) ? 1 : 0;
    236 #else
    237 	return 0;
    238 #endif
    239 } /* ixv_probe */
    240 
    241 static ixgbe_vendor_info_t *
    242 ixv_lookup(const struct pci_attach_args *pa)
    243 {
    244 	ixgbe_vendor_info_t *ent;
    245 	pcireg_t subid;
    246 
    247 	INIT_DEBUGOUT("ixv_lookup: begin");
    248 
    249 	if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
    250 		return NULL;
    251 
    252 	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
    253 
    254 	for (ent = ixv_vendor_info_array; ent->vendor_id != 0; ent++) {
    255 		if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
    256 		    (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
    257 		    ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
    258 		     (ent->subvendor_id == 0)) &&
    259 		    ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
    260 		     (ent->subdevice_id == 0))) {
    261 			return ent;
    262 		}
    263 	}
    264 
    265 	return NULL;
    266 }
    267 
    268 /************************************************************************
    269  * ixv_attach - Device initialization routine
    270  *
    271  *   Called when the driver is being loaded.
    272  *   Identifies the type of hardware, allocates all resources
    273  *   and initializes the hardware.
    274  *
    275  *   return 0 on success, positive on failure
    276  ************************************************************************/
    277 static void
    278 ixv_attach(device_t parent, device_t dev, void *aux)
    279 {
    280 	struct adapter *adapter;
    281 	struct ixgbe_hw *hw;
    282 	int             error = 0;
    283 	pcireg_t	id, subid;
    284 	ixgbe_vendor_info_t *ent;
    285 	const struct pci_attach_args *pa = aux;
    286 	const char *apivstr;
    287 	const char *str;
    288 	char buf[256];
    289 
    290 	INIT_DEBUGOUT("ixv_attach: begin");
    291 
    292 	/*
    293 	 * Make sure BUSMASTER is set, on a VM under
    294 	 * KVM it may not be and will break things.
    295 	 */
    296 	ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag);
    297 
    298 	/* Allocate, clear, and link in our adapter structure */
    299 	adapter = device_private(dev);
    300 	adapter->dev = dev;
    301 	adapter->hw.back = adapter;
    302 	hw = &adapter->hw;
    303 
    304 	adapter->init_locked = ixv_init_locked;
    305 	adapter->stop_locked = ixv_stop;
    306 
    307 	adapter->osdep.pc = pa->pa_pc;
    308 	adapter->osdep.tag = pa->pa_tag;
    309 	if (pci_dma64_available(pa))
    310 		adapter->osdep.dmat = pa->pa_dmat64;
    311 	else
    312 		adapter->osdep.dmat = pa->pa_dmat;
    313 	adapter->osdep.attached = false;
    314 
    315 	ent = ixv_lookup(pa);
    316 
    317 	KASSERT(ent != NULL);
    318 
    319 	aprint_normal(": %s, Version - %s\n",
    320 	    ixv_strings[ent->index], ixv_driver_version);
    321 
    322 	/* Core Lock Init*/
    323 	IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
    324 
    325 	/* Do base PCI setup - map BAR0 */
    326 	if (ixv_allocate_pci_resources(adapter, pa)) {
    327 		aprint_error_dev(dev, "ixv_allocate_pci_resources() failed!\n");
    328 		error = ENXIO;
    329 		goto err_out;
    330 	}
    331 
    332 	/* SYSCTL APIs */
    333 	ixv_add_device_sysctls(adapter);
    334 
    335 	/* Set up the timer callout */
    336 	callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS);
    337 
    338 	/* Save off the information about this board */
    339 	id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG);
    340 	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
    341 	hw->vendor_id = PCI_VENDOR(id);
    342 	hw->device_id = PCI_PRODUCT(id);
    343 	hw->revision_id =
    344 	    PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG));
    345 	hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
    346 	hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
    347 
    348 	/* A subset of set_mac_type */
    349 	switch (hw->device_id) {
    350 	case IXGBE_DEV_ID_82599_VF:
    351 		hw->mac.type = ixgbe_mac_82599_vf;
    352 		str = "82599 VF";
    353 		break;
    354 	case IXGBE_DEV_ID_X540_VF:
    355 		hw->mac.type = ixgbe_mac_X540_vf;
    356 		str = "X540 VF";
    357 		break;
    358 	case IXGBE_DEV_ID_X550_VF:
    359 		hw->mac.type = ixgbe_mac_X550_vf;
    360 		str = "X550 VF";
    361 		break;
    362 	case IXGBE_DEV_ID_X550EM_X_VF:
    363 		hw->mac.type = ixgbe_mac_X550EM_x_vf;
    364 		str = "X550EM X VF";
    365 		break;
    366 	case IXGBE_DEV_ID_X550EM_A_VF:
    367 		hw->mac.type = ixgbe_mac_X550EM_a_vf;
    368 		str = "X550EM A VF";
    369 		break;
    370 	default:
    371 		/* Shouldn't get here since probe succeeded */
    372 		aprint_error_dev(dev, "Unknown device ID!\n");
    373 		error = ENXIO;
    374 		goto err_out;
    375 		break;
    376 	}
    377 	aprint_normal_dev(dev, "device %s\n", str);
    378 
    379 	ixv_init_device_features(adapter);
    380 
    381 	/* Initialize the shared code */
    382 	error = ixgbe_init_ops_vf(hw);
    383 	if (error) {
    384 		aprint_error_dev(dev, "ixgbe_init_ops_vf() failed!\n");
    385 		error = EIO;
    386 		goto err_out;
    387 	}
    388 
    389 	/* Setup the mailbox */
    390 	ixgbe_init_mbx_params_vf(hw);
    391 
    392 	/* Set the right number of segments */
    393 	adapter->num_segs = IXGBE_82599_SCATTER;
    394 
    395 	/* Reset mbox api to 1.0 */
    396 	error = hw->mac.ops.reset_hw(hw);
    397 	if (error == IXGBE_ERR_RESET_FAILED)
    398 		aprint_error_dev(dev, "...reset_hw() failure: Reset Failed!\n");
    399 	else if (error)
    400 		aprint_error_dev(dev, "...reset_hw() failed with error %d\n",
    401 		    error);
    402 	if (error) {
    403 		error = EIO;
    404 		goto err_out;
    405 	}
    406 
    407 	error = hw->mac.ops.init_hw(hw);
    408 	if (error) {
    409 		aprint_error_dev(dev, "...init_hw() failed!\n");
    410 		error = EIO;
    411 		goto err_out;
    412 	}
    413 
    414 	/* Negotiate mailbox API version */
    415 	error = ixv_negotiate_api(adapter);
    416 	if (error)
    417 		aprint_normal_dev(dev,
    418 		    "MBX API negotiation failed during attach!\n");
    419 	switch (hw->api_version) {
    420 	case ixgbe_mbox_api_10:
    421 		apivstr = "1.0";
    422 		break;
    423 	case ixgbe_mbox_api_20:
    424 		apivstr = "2.0";
    425 		break;
    426 	case ixgbe_mbox_api_11:
    427 		apivstr = "1.1";
    428 		break;
    429 	case ixgbe_mbox_api_12:
    430 		apivstr = "1.2";
    431 		break;
    432 	case ixgbe_mbox_api_13:
    433 		apivstr = "1.3";
    434 		break;
    435 	default:
    436 		apivstr = "unknown";
    437 		break;
    438 	}
    439 	aprint_normal_dev(dev, "Mailbox API %s\n", apivstr);
    440 
    441 	/* If no mac address was assigned, make a random one */
    442 	if (!ixv_check_ether_addr(hw->mac.addr)) {
    443 		u8 addr[ETHER_ADDR_LEN];
    444 		uint64_t rndval = cprng_strong64();
    445 
    446 		memcpy(addr, &rndval, sizeof(addr));
    447 		addr[0] &= 0xFE;
    448 		addr[0] |= 0x02;
    449 		bcopy(addr, hw->mac.addr, sizeof(addr));
    450 	}
    451 
    452 	/* Register for VLAN events */
    453 #if 0 /* XXX delete after write? */
    454 	adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
    455 	    ixv_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
    456 	adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
    457 	    ixv_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
    458 #endif
    459 
    460 	/* Sysctls for limiting the amount of work done in the taskqueues */
    461 	ixv_set_sysctl_value(adapter, "rx_processing_limit",
    462 	    "max number of rx packets to process",
    463 	    &adapter->rx_process_limit, ixv_rx_process_limit);
    464 
    465 	ixv_set_sysctl_value(adapter, "tx_processing_limit",
    466 	    "max number of tx packets to process",
    467 	    &adapter->tx_process_limit, ixv_tx_process_limit);
    468 
    469 	/* Do descriptor calc and sanity checks */
    470 	if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
    471 	    ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) {
    472 		aprint_error_dev(dev, "TXD config issue, using default!\n");
    473 		adapter->num_tx_desc = DEFAULT_TXD;
    474 	} else
    475 		adapter->num_tx_desc = ixv_txd;
    476 
    477 	if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
    478 	    ixv_rxd < MIN_RXD || ixv_rxd > MAX_RXD) {
    479 		aprint_error_dev(dev, "RXD config issue, using default!\n");
    480 		adapter->num_rx_desc = DEFAULT_RXD;
    481 	} else
    482 		adapter->num_rx_desc = ixv_rxd;
    483 
    484 	/* Setup MSI-X */
    485 	error = ixv_configure_interrupts(adapter);
    486 	if (error)
    487 		goto err_out;
    488 
    489 	/* Allocate our TX/RX Queues */
    490 	if (ixgbe_allocate_queues(adapter)) {
    491 		aprint_error_dev(dev, "ixgbe_allocate_queues() failed!\n");
    492 		error = ENOMEM;
    493 		goto err_out;
    494 	}
    495 
    496 	/* hw.ix defaults init */
    497 	adapter->enable_aim = ixv_enable_aim;
    498 
    499 	/* Setup OS specific network interface */
    500 	ixv_setup_interface(dev, adapter);
    501 
    502 	error = ixv_allocate_msix(adapter, pa);
    503 	if (error) {
    504 		device_printf(dev, "ixv_allocate_msix() failed!\n");
    505 		goto err_late;
    506 	}
    507 
    508 	/* Do the stats setup */
    509 	ixv_save_stats(adapter);
    510 	ixv_init_stats(adapter);
    511 	ixv_add_stats_sysctls(adapter);
    512 
    513 	if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
    514 		ixgbe_netmap_attach(adapter);
    515 
    516 	snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_cap);
    517 	aprint_verbose_dev(dev, "feature cap %s\n", buf);
    518 	snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_en);
    519 	aprint_verbose_dev(dev, "feature ena %s\n", buf);
    520 
    521 	INIT_DEBUGOUT("ixv_attach: end");
    522 	adapter->osdep.attached = true;
    523 
    524 	return;
    525 
    526 err_late:
    527 	ixgbe_free_transmit_structures(adapter);
    528 	ixgbe_free_receive_structures(adapter);
    529 	free(adapter->queues, M_DEVBUF);
    530 err_out:
    531 	ixv_free_pci_resources(adapter);
    532 	IXGBE_CORE_LOCK_DESTROY(adapter);
    533 
    534 	return;
    535 } /* ixv_attach */
    536 
    537 /************************************************************************
    538  * ixv_detach - Device removal routine
    539  *
    540  *   Called when the driver is being removed.
    541  *   Stops the adapter and deallocates all the resources
    542  *   that were allocated for driver operation.
    543  *
    544  *   return 0 on success, positive on failure
    545  ************************************************************************/
    546 static int
    547 ixv_detach(device_t dev, int flags)
    548 {
    549 	struct adapter  *adapter = device_private(dev);
    550 	struct ixgbe_hw *hw = &adapter->hw;
    551 	struct ix_queue *que = adapter->queues;
    552 	struct tx_ring *txr = adapter->tx_rings;
    553 	struct rx_ring *rxr = adapter->rx_rings;
    554 	struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
    555 
    556 	INIT_DEBUGOUT("ixv_detach: begin");
    557 	if (adapter->osdep.attached == false)
    558 		return 0;
    559 
    560 	/* Stop the interface. Callouts are stopped in it. */
    561 	ixv_ifstop(adapter->ifp, 1);
    562 
    563 #if NVLAN > 0
    564 	/* Make sure VLANs are not using driver */
    565 	if (!VLAN_ATTACHED(&adapter->osdep.ec))
    566 		;	/* nothing to do: no VLANs */
    567 	else if ((flags & (DETACH_SHUTDOWN|DETACH_FORCE)) != 0)
    568 		vlan_ifdetach(adapter->ifp);
    569 	else {
    570 		aprint_error_dev(dev, "VLANs in use, detach first\n");
    571 		return EBUSY;
    572 	}
    573 #endif
    574 
    575 	IXGBE_CORE_LOCK(adapter);
    576 	ixv_stop(adapter);
    577 	IXGBE_CORE_UNLOCK(adapter);
    578 
    579 	for (int i = 0; i < adapter->num_queues; i++, que++, txr++) {
    580 		if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
    581 			softint_disestablish(txr->txr_si);
    582 		softint_disestablish(que->que_si);
    583 	}
    584 
    585 	/* Drain the Mailbox(link) queue */
    586 	softint_disestablish(adapter->link_si);
    587 
    588 	/* Unregister VLAN events */
    589 #if 0 /* XXX msaitoh delete after write? */
    590 	if (adapter->vlan_attach != NULL)
    591 		EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
    592 	if (adapter->vlan_detach != NULL)
    593 		EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
    594 #endif
    595 
    596 	ether_ifdetach(adapter->ifp);
    597 	callout_halt(&adapter->timer, NULL);
    598 
    599 	if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
    600 		netmap_detach(adapter->ifp);
    601 
    602 	ixv_free_pci_resources(adapter);
    603 #if 0 /* XXX the NetBSD port is probably missing something here */
    604 	bus_generic_detach(dev);
    605 #endif
    606 	if_detach(adapter->ifp);
    607 	if_percpuq_destroy(adapter->ipq);
    608 
    609 	sysctl_teardown(&adapter->sysctllog);
    610 	evcnt_detach(&adapter->handleq);
    611 	evcnt_detach(&adapter->req);
    612 	evcnt_detach(&adapter->efbig_tx_dma_setup);
    613 	evcnt_detach(&adapter->mbuf_defrag_failed);
    614 	evcnt_detach(&adapter->efbig2_tx_dma_setup);
    615 	evcnt_detach(&adapter->einval_tx_dma_setup);
    616 	evcnt_detach(&adapter->other_tx_dma_setup);
    617 	evcnt_detach(&adapter->eagain_tx_dma_setup);
    618 	evcnt_detach(&adapter->enomem_tx_dma_setup);
    619 	evcnt_detach(&adapter->watchdog_events);
    620 	evcnt_detach(&adapter->tso_err);
    621 	evcnt_detach(&adapter->link_irq);
    622 
    623 	txr = adapter->tx_rings;
    624 	for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
    625 		evcnt_detach(&adapter->queues[i].irqs);
    626 		evcnt_detach(&txr->no_desc_avail);
    627 		evcnt_detach(&txr->total_packets);
    628 		evcnt_detach(&txr->tso_tx);
    629 #ifndef IXGBE_LEGACY_TX
    630 		evcnt_detach(&txr->pcq_drops);
    631 #endif
    632 
    633 		evcnt_detach(&rxr->rx_packets);
    634 		evcnt_detach(&rxr->rx_bytes);
    635 		evcnt_detach(&rxr->rx_copies);
    636 		evcnt_detach(&rxr->no_jmbuf);
    637 		evcnt_detach(&rxr->rx_discarded);
    638 	}
    639 	evcnt_detach(&stats->ipcs);
    640 	evcnt_detach(&stats->l4cs);
    641 	evcnt_detach(&stats->ipcs_bad);
    642 	evcnt_detach(&stats->l4cs_bad);
    643 
    644 	/* Packet Reception Stats */
    645 	evcnt_detach(&stats->vfgorc);
    646 	evcnt_detach(&stats->vfgprc);
    647 	evcnt_detach(&stats->vfmprc);
    648 
    649 	/* Packet Transmission Stats */
    650 	evcnt_detach(&stats->vfgotc);
    651 	evcnt_detach(&stats->vfgptc);
    652 
    653 	/* Mailbox Stats */
    654 	evcnt_detach(&hw->mbx.stats.msgs_tx);
    655 	evcnt_detach(&hw->mbx.stats.msgs_rx);
    656 	evcnt_detach(&hw->mbx.stats.acks);
    657 	evcnt_detach(&hw->mbx.stats.reqs);
    658 	evcnt_detach(&hw->mbx.stats.rsts);
    659 
    660 	ixgbe_free_transmit_structures(adapter);
    661 	ixgbe_free_receive_structures(adapter);
    662 	free(adapter->queues, M_DEVBUF);
    663 
    664 	IXGBE_CORE_LOCK_DESTROY(adapter);
    665 
    666 	return (0);
    667 } /* ixv_detach */
    668 
    669 /************************************************************************
    670  * ixv_init_locked - Init entry point
    671  *
    672  *   Used in two ways: It is used by the stack as an init entry
    673  *   point in network interface structure. It is also used
    674  *   by the driver as a hw/sw initialization routine to get
    675  *   to a consistent state.
    676  *
    677  *   return 0 on success, positive on failure
    678  ************************************************************************/
    679 static void
    680 ixv_init_locked(struct adapter *adapter)
    681 {
    682 	struct ifnet	*ifp = adapter->ifp;
    683 	device_t 	dev = adapter->dev;
    684 	struct ixgbe_hw *hw = &adapter->hw;
    685 	struct ix_queue	*que = adapter->queues;
    686 	int             error = 0;
    687 	uint32_t mask;
    688 	int i;
    689 
    690 	INIT_DEBUGOUT("ixv_init_locked: begin");
    691 	KASSERT(mutex_owned(&adapter->core_mtx));
    692 	hw->adapter_stopped = FALSE;
    693 	hw->mac.ops.stop_adapter(hw);
    694 	callout_stop(&adapter->timer);
    695 
    696 	/* reprogram the RAR[0] in case user changed it. */
    697 	hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
    698 
    699 	/* Get the latest mac address, User can use a LAA */
    700 	memcpy(hw->mac.addr, CLLADDR(adapter->ifp->if_sadl),
    701 	     IXGBE_ETH_LENGTH_OF_ADDRESS);
    702 	hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, 1);
    703 
    704 	/* Prepare transmit descriptors and buffers */
    705 	if (ixgbe_setup_transmit_structures(adapter)) {
    706 		aprint_error_dev(dev, "Could not setup transmit structures\n");
    707 		ixv_stop(adapter);
    708 		return;
    709 	}
    710 
    711 	/* Reset VF and renegotiate mailbox API version */
    712 	hw->mac.ops.reset_hw(hw);
    713 	error = ixv_negotiate_api(adapter);
    714 	if (error)
    715 		device_printf(dev,
    716 		    "Mailbox API negotiation failed in init_locked!\n");
    717 
    718 	ixv_initialize_transmit_units(adapter);
    719 
    720 	/* Setup Multicast table */
    721 	ixv_set_multi(adapter);
    722 
    723 	/*
    724 	 * Determine the correct mbuf pool
    725 	 * for doing jumbo/headersplit
    726 	 */
    727 	if (ifp->if_mtu > ETHERMTU)
    728 		adapter->rx_mbuf_sz = MJUMPAGESIZE;
    729 	else
    730 		adapter->rx_mbuf_sz = MCLBYTES;
    731 
    732 	/* Prepare receive descriptors and buffers */
    733 	if (ixgbe_setup_receive_structures(adapter)) {
    734 		device_printf(dev, "Could not setup receive structures\n");
    735 		ixv_stop(adapter);
    736 		return;
    737 	}
    738 
    739 	/* Configure RX settings */
    740 	ixv_initialize_receive_units(adapter);
    741 
    742 #if 0 /* XXX isn't it required? -- msaitoh  */
    743 	/* Set the various hardware offload abilities */
    744 	ifp->if_hwassist = 0;
    745 	if (ifp->if_capenable & IFCAP_TSO4)
    746 		ifp->if_hwassist |= CSUM_TSO;
    747 	if (ifp->if_capenable & IFCAP_TXCSUM) {
    748 		ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
    749 #if __FreeBSD_version >= 800000
    750 		ifp->if_hwassist |= CSUM_SCTP;
    751 #endif
    752 	}
    753 #endif
    754 
    755 	/* Set up VLAN offload and filter */
    756 	ixv_setup_vlan_support(adapter);
    757 
    758 	/* Set up MSI-X routing */
    759 	ixv_configure_ivars(adapter);
    760 
    761 	/* Set up auto-mask */
    762 	mask = (1 << adapter->vector);
    763 	for (i = 0; i < adapter->num_queues; i++, que++)
    764 		mask |= (1 << que->msix);
    765 	IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, mask);
    766 
    767 	/* Set moderation on the Link interrupt */
    768 	IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->vector), IXGBE_LINK_ITR);
    769 
    770 	/* Stats init */
    771 	ixv_init_stats(adapter);
    772 
    773 	/* Config/Enable Link */
    774 	hw->mac.get_link_status = TRUE;
    775 	hw->mac.ops.check_link(hw, &adapter->link_speed, &adapter->link_up,
    776 	    FALSE);
    777 
    778 	/* Start watchdog */
    779 	callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
    780 
    781 	/* And now turn on interrupts */
    782 	ixv_enable_intr(adapter);
    783 
    784 	/* Now inform the stack we're ready */
    785 	ifp->if_flags |= IFF_RUNNING;
    786 	ifp->if_flags &= ~IFF_OACTIVE;
    787 
    788 	return;
    789 } /* ixv_init_locked */
    790 
    791 /*
    792  * MSI-X Interrupt Handlers and Tasklets
    793  */
    794 
    795 static inline void
    796 ixv_enable_queue(struct adapter *adapter, u32 vector)
    797 {
    798 	struct ixgbe_hw *hw = &adapter->hw;
    799 	u32             queue = 1 << vector;
    800 	u32             mask;
    801 
    802 	mask = (IXGBE_EIMS_RTX_QUEUE & queue);
    803 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
    804 } /* ixv_enable_queue */
    805 
    806 static inline void
    807 ixv_disable_queue(struct adapter *adapter, u32 vector)
    808 {
    809 	struct ixgbe_hw *hw = &adapter->hw;
    810 	u64             queue = (u64)(1 << vector);
    811 	u32             mask;
    812 
    813 	mask = (IXGBE_EIMS_RTX_QUEUE & queue);
    814 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
    815 } /* ixv_disable_queue */
    816 
    817 static inline void
    818 ixv_rearm_queues(struct adapter *adapter, u64 queues)
    819 {
    820 	u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
    821 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask);
    822 } /* ixv_rearm_queues */
    823 
    824 
    825 /************************************************************************
    826  * ixv_msix_que - MSI Queue Interrupt Service routine
    827  ************************************************************************/
    828 static int
    829 ixv_msix_que(void *arg)
    830 {
    831 	struct ix_queue	*que = arg;
    832 	struct adapter  *adapter = que->adapter;
    833 	struct tx_ring	*txr = que->txr;
    834 	struct rx_ring	*rxr = que->rxr;
    835 	bool		more;
    836 	u32		newitr = 0;
    837 
    838 	ixv_disable_queue(adapter, que->msix);
    839 	++que->irqs.ev_count;
    840 
    841 #ifdef __NetBSD__
    842 	/* Don't run ixgbe_rxeof in interrupt context */
    843 	more = true;
    844 #else
    845 	more = ixgbe_rxeof(que);
    846 #endif
    847 
    848 	IXGBE_TX_LOCK(txr);
    849 	ixgbe_txeof(txr);
    850 	IXGBE_TX_UNLOCK(txr);
    851 
    852 	/* Do AIM now? */
    853 
    854 	if (adapter->enable_aim == false)
    855 		goto no_calc;
    856 	/*
    857 	 * Do Adaptive Interrupt Moderation:
    858 	 *  - Write out last calculated setting
    859 	 *  - Calculate based on average size over
    860 	 *    the last interval.
    861 	 */
    862 	if (que->eitr_setting)
    863 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEITR(que->msix),
    864 		    que->eitr_setting);
    865 
    866 	que->eitr_setting = 0;
    867 
    868 	/* Idle, do nothing */
    869 	if ((txr->bytes == 0) && (rxr->bytes == 0))
    870 		goto no_calc;
    871 
    872 	if ((txr->bytes) && (txr->packets))
    873 		newitr = txr->bytes/txr->packets;
    874 	if ((rxr->bytes) && (rxr->packets))
    875 		newitr = max(newitr, (rxr->bytes / rxr->packets));
    876 	newitr += 24; /* account for hardware frame, crc */
    877 
    878 	/* set an upper boundary */
    879 	newitr = min(newitr, 3000);
    880 
    881 	/* Be nice to the mid range */
    882 	if ((newitr > 300) && (newitr < 1200))
    883 		newitr = (newitr / 3);
    884 	else
    885 		newitr = (newitr / 2);
    886 
    887 	newitr |= newitr << 16;
    888 
    889 	/* save for next interrupt */
    890 	que->eitr_setting = newitr;
    891 
    892 	/* Reset state */
    893 	txr->bytes = 0;
    894 	txr->packets = 0;
    895 	rxr->bytes = 0;
    896 	rxr->packets = 0;
    897 
    898 no_calc:
    899 	if (more)
    900 		softint_schedule(que->que_si);
    901 	else /* Re-enable this interrupt */
    902 		ixv_enable_queue(adapter, que->msix);
    903 
    904 	return 1;
    905 } /* ixv_msix_que */
    906 
    907 /************************************************************************
    908  * ixv_msix_mbx
    909  ************************************************************************/
    910 static int
    911 ixv_msix_mbx(void *arg)
    912 {
    913 	struct adapter	*adapter = arg;
    914 	struct ixgbe_hw *hw = &adapter->hw;
    915 	u32		reg;
    916 
    917 	++adapter->link_irq.ev_count;
    918 
    919 	/* First get the cause */
    920 	reg = IXGBE_READ_REG(hw, IXGBE_VTEICR);
    921 #if 0	/* NetBSD: We use auto-clear, so it's not required to write VTEICR */
    922 	/* Clear interrupt with write */
    923 	IXGBE_WRITE_REG(hw, IXGBE_VTEICR, (1 << adapter->vector));
    924 #endif
    925 
    926 	/* Link status change */
    927 	if (reg & (1 << adapter->vector))
    928 		softint_schedule(adapter->link_si);
    929 
    930 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, (1 << adapter->vector));
    931 
    932 	return 1;
    933 } /* ixv_msix_mbx */
    934 
    935 /************************************************************************
    936  * ixv_media_status - Media Ioctl callback
    937  *
    938  *   Called whenever the user queries the status of
    939  *   the interface using ifconfig.
    940  ************************************************************************/
    941 static void
    942 ixv_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
    943 {
    944 	struct adapter *adapter = ifp->if_softc;
    945 
    946 	INIT_DEBUGOUT("ixv_media_status: begin");
    947 	IXGBE_CORE_LOCK(adapter);
    948 	ixv_update_link_status(adapter);
    949 
    950 	ifmr->ifm_status = IFM_AVALID;
    951 	ifmr->ifm_active = IFM_ETHER;
    952 
    953 	if (!adapter->link_active) {
    954 		ifmr->ifm_active |= IFM_NONE;
    955 		IXGBE_CORE_UNLOCK(adapter);
    956 		return;
    957 	}
    958 
    959 	ifmr->ifm_status |= IFM_ACTIVE;
    960 
    961 	switch (adapter->link_speed) {
    962 		case IXGBE_LINK_SPEED_10GB_FULL:
    963 			ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
    964 			break;
    965 		case IXGBE_LINK_SPEED_1GB_FULL:
    966 			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
    967 			break;
    968 		case IXGBE_LINK_SPEED_100_FULL:
    969 			ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
    970 			break;
    971 		case IXGBE_LINK_SPEED_10_FULL:
    972 			ifmr->ifm_active |= IFM_10_T | IFM_FDX;
    973 			break;
    974 	}
    975 
    976 	IXGBE_CORE_UNLOCK(adapter);
    977 
    978 	return;
    979 } /* ixv_media_status */
    980 
    981 /************************************************************************
    982  * ixv_media_change - Media Ioctl callback
    983  *
    984  *   Called when the user changes speed/duplex using
    985  *   media/mediopt option with ifconfig.
    986  ************************************************************************/
    987 static int
    988 ixv_media_change(struct ifnet *ifp)
    989 {
    990 	struct adapter *adapter = ifp->if_softc;
    991 	struct ifmedia *ifm = &adapter->media;
    992 
    993 	INIT_DEBUGOUT("ixv_media_change: begin");
    994 
    995 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
    996 		return (EINVAL);
    997 
    998 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
    999 	case IFM_AUTO:
   1000 		break;
   1001 	default:
   1002 		device_printf(adapter->dev, "Only auto media type\n");
   1003 		return (EINVAL);
   1004 	}
   1005 
   1006 	return (0);
   1007 } /* ixv_media_change */
   1008 
   1009 
   1010 /************************************************************************
   1011  * ixv_negotiate_api
   1012  *
   1013  *   Negotiate the Mailbox API with the PF;
   1014  *   start with the most featured API first.
   1015  ************************************************************************/
   1016 static int
   1017 ixv_negotiate_api(struct adapter *adapter)
   1018 {
   1019 	struct ixgbe_hw *hw = &adapter->hw;
   1020 	int             mbx_api[] = { ixgbe_mbox_api_11,
   1021 	                              ixgbe_mbox_api_10,
   1022 	                              ixgbe_mbox_api_unknown };
   1023 	int             i = 0;
   1024 
   1025 	while (mbx_api[i] != ixgbe_mbox_api_unknown) {
   1026 		if (ixgbevf_negotiate_api_version(hw, mbx_api[i]) == 0)
   1027 			return (0);
   1028 		i++;
   1029 	}
   1030 
   1031 	return (EINVAL);
   1032 } /* ixv_negotiate_api */
   1033 
   1034 
   1035 /************************************************************************
   1036  * ixv_set_multi - Multicast Update
   1037  *
   1038  *   Called whenever multicast address list is updated.
   1039  ************************************************************************/
   1040 static void
   1041 ixv_set_multi(struct adapter *adapter)
   1042 {
   1043 	struct ether_multi *enm;
   1044 	struct ether_multistep step;
   1045 	struct ethercom *ec = &adapter->osdep.ec;
   1046 	u8	mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
   1047 	u8                 *update_ptr;
   1048 	int                mcnt = 0;
   1049 
   1050 	IOCTL_DEBUGOUT("ixv_set_multi: begin");
   1051 
   1052 	ETHER_FIRST_MULTI(step, ec, enm);
   1053 	while (enm != NULL) {
   1054 		bcopy(enm->enm_addrlo,
   1055 		    &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
   1056 		    IXGBE_ETH_LENGTH_OF_ADDRESS);
   1057 		mcnt++;
   1058 		/* XXX This might be required --msaitoh */
   1059 		if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES)
   1060 			break;
   1061 		ETHER_NEXT_MULTI(step, enm);
   1062 	}
   1063 
   1064 	update_ptr = mta;
   1065 
   1066 	adapter->hw.mac.ops.update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
   1067 	    ixv_mc_array_itr, TRUE);
   1068 
   1069 	return;
   1070 } /* ixv_set_multi */
   1071 
   1072 /************************************************************************
   1073  * ixv_mc_array_itr
   1074  *
   1075  *   An iterator function needed by the multicast shared code.
   1076  *   It feeds the shared code routine the addresses in the
   1077  *   array of ixv_set_multi() one by one.
   1078  ************************************************************************/
   1079 static u8 *
   1080 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
   1081 {
   1082 	u8 *addr = *update_ptr;
   1083 	u8 *newptr;
   1084 	*vmdq = 0;
   1085 
   1086 	newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
   1087 	*update_ptr = newptr;
   1088 
   1089 	return addr;
   1090 } /* ixv_mc_array_itr */
   1091 
   1092 /************************************************************************
   1093  * ixv_local_timer - Timer routine
   1094  *
   1095  *   Checks for link status, updates statistics,
   1096  *   and runs the watchdog check.
   1097  ************************************************************************/
   1098 static void
   1099 ixv_local_timer(void *arg)
   1100 {
   1101 	struct adapter *adapter = arg;
   1102 
   1103 	IXGBE_CORE_LOCK(adapter);
   1104 	ixv_local_timer_locked(adapter);
   1105 	IXGBE_CORE_UNLOCK(adapter);
   1106 }
   1107 
   1108 static void
   1109 ixv_local_timer_locked(void *arg)
   1110 {
   1111 	struct adapter	*adapter = arg;
   1112 	device_t	dev = adapter->dev;
   1113 	struct ix_queue	*que = adapter->queues;
   1114 	u64		queues = 0;
   1115 	int		hung = 0;
   1116 
   1117 	KASSERT(mutex_owned(&adapter->core_mtx));
   1118 
   1119 	ixv_check_link(adapter);
   1120 
   1121 	/* Stats Update */
   1122 	ixv_update_stats(adapter);
   1123 
   1124 	/*
   1125 	 * Check the TX queues status
   1126 	 *      - mark hung queues so we don't schedule on them
   1127 	 *      - watchdog only if all queues show hung
   1128 	 */
   1129 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   1130 		/* Keep track of queues with work for soft irq */
   1131 		if (que->txr->busy)
   1132 			queues |= ((u64)1 << que->me);
   1133 		/*
   1134 		 * Each time txeof runs without cleaning, but there
   1135 		 * are uncleaned descriptors it increments busy. If
   1136 		 * we get to the MAX we declare it hung.
   1137 		 */
   1138 		if (que->busy == IXGBE_QUEUE_HUNG) {
   1139 			++hung;
   1140 			/* Mark the queue as inactive */
   1141 			adapter->active_queues &= ~((u64)1 << que->me);
   1142 			continue;
   1143 		} else {
   1144 			/* Check if we've come back from hung */
   1145 			if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
   1146 				adapter->active_queues |= ((u64)1 << que->me);
   1147 		}
   1148 		if (que->busy >= IXGBE_MAX_TX_BUSY) {
   1149 			device_printf(dev,
   1150 			    "Warning queue %d appears to be hung!\n", i);
   1151 			que->txr->busy = IXGBE_QUEUE_HUNG;
   1152 			++hung;
   1153 		}
   1154 	}
   1155 
   1156 	/* Only truly watchdog if all queues show hung */
   1157 	if (hung == adapter->num_queues)
   1158 		goto watchdog;
   1159 	else if (queues != 0) { /* Force an IRQ on queues with work */
   1160 		ixv_rearm_queues(adapter, queues);
   1161 	}
   1162 
   1163 	callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
   1164 
   1165 	return;
   1166 
   1167 watchdog:
   1168 
   1169 	device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
   1170 	adapter->ifp->if_flags &= ~IFF_RUNNING;
   1171 	adapter->watchdog_events.ev_count++;
   1172 	ixv_init_locked(adapter);
   1173 } /* ixv_local_timer */
   1174 
   1175 /************************************************************************
   1176  * ixv_update_link_status - Update OS on link state
   1177  *
   1178  * Note: Only updates the OS on the cached link state.
   1179  *       The real check of the hardware only happens with
   1180  *       a link interrupt.
   1181  ************************************************************************/
   1182 static void
   1183 ixv_update_link_status(struct adapter *adapter)
   1184 {
   1185 	struct ifnet *ifp = adapter->ifp;
   1186 	device_t     dev = adapter->dev;
   1187 
   1188 	if (adapter->link_up) {
   1189 		if (adapter->link_active == FALSE) {
   1190 			if (bootverbose) {
   1191 				const char *bpsmsg;
   1192 
   1193 				switch (adapter->link_speed) {
   1194 				case IXGBE_LINK_SPEED_10GB_FULL:
   1195 					bpsmsg = "10 Gbps";
   1196 					break;
   1197 				case IXGBE_LINK_SPEED_5GB_FULL:
   1198 					bpsmsg = "5 Gbps";
   1199 					break;
   1200 				case IXGBE_LINK_SPEED_2_5GB_FULL:
   1201 					bpsmsg = "2.5 Gbps";
   1202 					break;
   1203 				case IXGBE_LINK_SPEED_1GB_FULL:
   1204 					bpsmsg = "1 Gbps";
   1205 					break;
   1206 				case IXGBE_LINK_SPEED_100_FULL:
   1207 					bpsmsg = "100 Mbps";
   1208 					break;
   1209 				case IXGBE_LINK_SPEED_10_FULL:
   1210 					bpsmsg = "10 Mbps";
   1211 					break;
   1212 				default:
   1213 					bpsmsg = "unknown speed";
   1214 					break;
   1215 				}
   1216 				device_printf(dev, "Link is up %s %s \n",
   1217 				    bpsmsg, "Full Duplex");
   1218 			}
   1219 			adapter->link_active = TRUE;
   1220 			if_link_state_change(ifp, LINK_STATE_UP);
   1221 		}
   1222 	} else { /* Link down */
   1223 		if (adapter->link_active == TRUE) {
   1224 			if (bootverbose)
   1225 				device_printf(dev, "Link is Down\n");
   1226 			if_link_state_change(ifp, LINK_STATE_DOWN);
   1227 			adapter->link_active = FALSE;
   1228 		}
   1229 	}
   1230 
   1231 	return;
   1232 } /* ixv_update_link_status */
   1233 
   1234 
   1235 /************************************************************************
   1236  * ixv_stop - Stop the hardware
   1237  *
   1238  *   Disables all traffic on the adapter by issuing a
   1239  *   global reset on the MAC and deallocates TX/RX buffers.
   1240  ************************************************************************/
   1241 static void
   1242 ixv_ifstop(struct ifnet *ifp, int disable)
   1243 {
   1244 	struct adapter *adapter = ifp->if_softc;
   1245 
   1246 	IXGBE_CORE_LOCK(adapter);
   1247 	ixv_stop(adapter);
   1248 	IXGBE_CORE_UNLOCK(adapter);
   1249 }
   1250 
   1251 static void
   1252 ixv_stop(void *arg)
   1253 {
   1254 	struct ifnet    *ifp;
   1255 	struct adapter  *adapter = arg;
   1256 	struct ixgbe_hw *hw = &adapter->hw;
   1257 
   1258 	ifp = adapter->ifp;
   1259 
   1260 	KASSERT(mutex_owned(&adapter->core_mtx));
   1261 
   1262 	INIT_DEBUGOUT("ixv_stop: begin\n");
   1263 	ixv_disable_intr(adapter);
   1264 
   1265 	/* Tell the stack that the interface is no longer active */
   1266 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   1267 
   1268 	hw->mac.ops.reset_hw(hw);
   1269 	adapter->hw.adapter_stopped = FALSE;
   1270 	hw->mac.ops.stop_adapter(hw);
   1271 	callout_stop(&adapter->timer);
   1272 
   1273 	/* reprogram the RAR[0] in case user changed it. */
   1274 	hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
   1275 
   1276 	return;
   1277 } /* ixv_stop */
   1278 
   1279 
   1280 /************************************************************************
   1281  * ixv_allocate_pci_resources
   1282  ************************************************************************/
   1283 static int
   1284 ixv_allocate_pci_resources(struct adapter *adapter,
   1285     const struct pci_attach_args *pa)
   1286 {
   1287 	pcireg_t	memtype;
   1288 	device_t        dev = adapter->dev;
   1289 	bus_addr_t addr;
   1290 	int flags;
   1291 
   1292 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
   1293 	switch (memtype) {
   1294 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1295 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1296 		adapter->osdep.mem_bus_space_tag = pa->pa_memt;
   1297 		if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
   1298 	              memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
   1299 			goto map_err;
   1300 		if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
   1301 			aprint_normal_dev(dev, "clearing prefetchable bit\n");
   1302 			flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
   1303 		}
   1304 		if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
   1305 		     adapter->osdep.mem_size, flags,
   1306 		     &adapter->osdep.mem_bus_space_handle) != 0) {
   1307 map_err:
   1308 			adapter->osdep.mem_size = 0;
   1309 			aprint_error_dev(dev, "unable to map BAR0\n");
   1310 			return ENXIO;
   1311 		}
   1312 		break;
   1313 	default:
   1314 		aprint_error_dev(dev, "unexpected type on BAR0\n");
   1315 		return ENXIO;
   1316 	}
   1317 
   1318 	/* Pick up the tuneable queues */
   1319 	adapter->num_queues = ixv_num_queues;
   1320 
   1321 	return (0);
   1322 } /* ixv_allocate_pci_resources */
   1323 
   1324 /************************************************************************
   1325  * ixv_free_pci_resources
   1326  ************************************************************************/
   1327 static void
   1328 ixv_free_pci_resources(struct adapter * adapter)
   1329 {
   1330 	struct 		ix_queue *que = adapter->queues;
   1331 	int		rid;
   1332 
   1333 	/*
   1334 	 *  Release all msix queue resources:
   1335 	 */
   1336 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   1337 		if (que->res != NULL)
   1338 			pci_intr_disestablish(adapter->osdep.pc,
   1339 			    adapter->osdep.ihs[i]);
   1340 	}
   1341 
   1342 
   1343 	/* Clean the Mailbox interrupt last */
   1344 	rid = adapter->vector;
   1345 
   1346 	if (adapter->osdep.ihs[rid] != NULL) {
   1347 		pci_intr_disestablish(adapter->osdep.pc,
   1348 		    adapter->osdep.ihs[rid]);
   1349 		adapter->osdep.ihs[rid] = NULL;
   1350 	}
   1351 
   1352 	pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
   1353 	    adapter->osdep.nintrs);
   1354 
   1355 	if (adapter->osdep.mem_size != 0) {
   1356 		bus_space_unmap(adapter->osdep.mem_bus_space_tag,
   1357 		    adapter->osdep.mem_bus_space_handle,
   1358 		    adapter->osdep.mem_size);
   1359 	}
   1360 
   1361 	return;
   1362 } /* ixv_free_pci_resources */
   1363 
   1364 /************************************************************************
   1365  * ixv_setup_interface
   1366  *
   1367  *   Setup networking device structure and register an interface.
   1368  ************************************************************************/
   1369 static void
   1370 ixv_setup_interface(device_t dev, struct adapter *adapter)
   1371 {
   1372 	struct ethercom *ec = &adapter->osdep.ec;
   1373 	struct ifnet   *ifp;
   1374 
   1375 	INIT_DEBUGOUT("ixv_setup_interface: begin");
   1376 
   1377 	ifp = adapter->ifp = &ec->ec_if;
   1378 	strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
   1379 	ifp->if_baudrate = IF_Gbps(10);
   1380 	ifp->if_init = ixv_init;
   1381 	ifp->if_stop = ixv_ifstop;
   1382 	ifp->if_softc = adapter;
   1383 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   1384 #ifdef IXGBE_MPSAFE
   1385 	ifp->if_extflags = IFEF_START_MPSAFE;
   1386 #endif
   1387 	ifp->if_ioctl = ixv_ioctl;
   1388 	if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
   1389 #if 0
   1390 		ixv_start_locked = ixgbe_legacy_start_locked;
   1391 #endif
   1392 	} else {
   1393 		ifp->if_transmit = ixgbe_mq_start;
   1394 #if 0
   1395 		ixv_start_locked = ixgbe_mq_start_locked;
   1396 #endif
   1397 	}
   1398 	ifp->if_start = ixgbe_legacy_start;
   1399 	IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
   1400 	IFQ_SET_READY(&ifp->if_snd);
   1401 
   1402 	if_initialize(ifp);
   1403 	adapter->ipq = if_percpuq_create(&adapter->osdep.ec.ec_if);
   1404 	ether_ifattach(ifp, adapter->hw.mac.addr);
   1405 	/*
   1406 	 * We use per TX queue softint, so if_deferred_start_init() isn't
   1407 	 * used.
   1408 	 */
   1409 	if_register(ifp);
   1410 	ether_set_ifflags_cb(ec, ixv_ifflags_cb);
   1411 
   1412 	adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR;
   1413 
   1414 	/*
   1415 	 * Tell the upper layer(s) we support long frames.
   1416 	 */
   1417 	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
   1418 
   1419 	/* Set capability flags */
   1420 	ifp->if_capabilities |= IFCAP_HWCSUM
   1421 	                     |  IFCAP_TSOv4
   1422 	                     |  IFCAP_TSOv6;
   1423 	ifp->if_capenable = 0;
   1424 
   1425 	ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
   1426 			    |  ETHERCAP_VLAN_HWCSUM
   1427 			    |  ETHERCAP_JUMBO_MTU
   1428 			    |  ETHERCAP_VLAN_MTU;
   1429 
   1430 	/* Enable the above capabilities by default */
   1431 	ec->ec_capenable = ec->ec_capabilities;
   1432 
   1433 	/* Don't enable LRO by default */
   1434 	ifp->if_capabilities |= IFCAP_LRO;
   1435 #if 0
   1436 	ifp->if_capenable = ifp->if_capabilities;
   1437 #endif
   1438 
   1439 	/*
   1440 	 * Specify the media types supported by this adapter and register
   1441 	 * callbacks to update media and link information
   1442 	 */
   1443 	ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change,
   1444 	    ixv_media_status);
   1445 	ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
   1446 	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
   1447 
   1448 	return;
   1449 } /* ixv_setup_interface */
   1450 
   1451 
   1452 /************************************************************************
   1453  * ixv_initialize_transmit_units - Enable transmit unit.
   1454  ************************************************************************/
   1455 static void
   1456 ixv_initialize_transmit_units(struct adapter *adapter)
   1457 {
   1458 	struct tx_ring	*txr = adapter->tx_rings;
   1459 	struct ixgbe_hw	*hw = &adapter->hw;
   1460 
   1461 
   1462 	for (int i = 0; i < adapter->num_queues; i++, txr++) {
   1463 		u64 tdba = txr->txdma.dma_paddr;
   1464 		u32 txctrl, txdctl;
   1465 
   1466 		/* Set WTHRESH to 8, burst writeback */
   1467 		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
   1468 		txdctl |= (8 << 16);
   1469 		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
   1470 
   1471 		/* Set the HW Tx Head and Tail indices */
   1472 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(i), 0);
   1473 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(i), 0);
   1474 
   1475 		/* Set Tx Tail register */
   1476 		txr->tail = IXGBE_VFTDT(i);
   1477 
   1478 		/* Set Ring parameters */
   1479 		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
   1480 		    (tdba & 0x00000000ffffffffULL));
   1481 		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i), (tdba >> 32));
   1482 		IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
   1483 		    adapter->num_tx_desc * sizeof(struct ixgbe_legacy_tx_desc));
   1484 		txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(i));
   1485 		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
   1486 		IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), txctrl);
   1487 
   1488 		/* Now enable */
   1489 		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
   1490 		txdctl |= IXGBE_TXDCTL_ENABLE;
   1491 		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
   1492 	}
   1493 
   1494 	return;
   1495 } /* ixv_initialize_transmit_units */
   1496 
   1497 
   1498 /************************************************************************
   1499  * ixv_initialize_rss_mapping
   1500  ************************************************************************/
   1501 static void
   1502 ixv_initialize_rss_mapping(struct adapter *adapter)
   1503 {
   1504 	struct ixgbe_hw *hw = &adapter->hw;
   1505 	u32             reta = 0, mrqc, rss_key[10];
   1506 	int             queue_id;
   1507 	int             i, j;
   1508 	u32             rss_hash_config;
   1509 
   1510 	if (adapter->feat_en & IXGBE_FEATURE_RSS) {
   1511 		/* Fetch the configured RSS key */
   1512 		rss_getkey((uint8_t *)&rss_key);
   1513 	} else {
   1514 		/* set up random bits */
   1515 		cprng_fast(&rss_key, sizeof(rss_key));
   1516 	}
   1517 
   1518 	/* Now fill out hash function seeds */
   1519 	for (i = 0; i < 10; i++)
   1520 		IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), rss_key[i]);
   1521 
   1522 	/* Set up the redirection table */
   1523 	for (i = 0, j = 0; i < 64; i++, j++) {
   1524 		if (j == adapter->num_queues)
   1525 			j = 0;
   1526 
   1527 		if (adapter->feat_en & IXGBE_FEATURE_RSS) {
   1528 			/*
   1529 			 * Fetch the RSS bucket id for the given indirection
   1530 			 * entry. Cap it at the number of configured buckets
   1531 			 * (which is num_queues.)
   1532 			 */
   1533 			queue_id = rss_get_indirection_to_bucket(i);
   1534 			queue_id = queue_id % adapter->num_queues;
   1535 		} else
   1536 			queue_id = j;
   1537 
   1538 		/*
   1539 		 * The low 8 bits are for hash value (n+0);
   1540 		 * The next 8 bits are for hash value (n+1), etc.
   1541 		 */
   1542 		reta >>= 8;
   1543 		reta |= ((uint32_t)queue_id) << 24;
   1544 		if ((i & 3) == 3) {
   1545 			IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), reta);
   1546 			reta = 0;
   1547 		}
   1548 	}
   1549 
   1550 	/* Perform hash on these packet types */
   1551 	if (adapter->feat_en & IXGBE_FEATURE_RSS)
   1552 		rss_hash_config = rss_gethashconfig();
   1553 	else {
   1554 		/*
   1555 		 * Disable UDP - IP fragments aren't currently being handled
   1556 		 * and so we end up with a mix of 2-tuple and 4-tuple
   1557 		 * traffic.
   1558 		 */
   1559 		rss_hash_config = RSS_HASHTYPE_RSS_IPV4
   1560 		                | RSS_HASHTYPE_RSS_TCP_IPV4
   1561 		                | RSS_HASHTYPE_RSS_IPV6
   1562 		                | RSS_HASHTYPE_RSS_TCP_IPV6;
   1563 	}
   1564 
   1565 	mrqc = IXGBE_MRQC_RSSEN;
   1566 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
   1567 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
   1568 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
   1569 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
   1570 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
   1571 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
   1572 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
   1573 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
   1574 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
   1575 		device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_IPV6_EX defined, but not supported\n",
   1576 		    __func__);
   1577 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
   1578 		device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_TCP_IPV6_EX defined, but not supported\n",
   1579 		    __func__);
   1580 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
   1581 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
   1582 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4_EX)
   1583 		device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV4_EX defined, but not supported\n",
   1584 		    __func__);
   1585 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
   1586 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
   1587 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
   1588 		device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV6_EX defined, but not supported\n",
   1589 		    __func__);
   1590 	IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, mrqc);
   1591 } /* ixv_initialize_rss_mapping */
   1592 
   1593 
   1594 /************************************************************************
   1595  * ixv_initialize_receive_units - Setup receive registers and features.
   1596  ************************************************************************/
   1597 static void
   1598 ixv_initialize_receive_units(struct adapter *adapter)
   1599 {
   1600 	struct	rx_ring	*rxr = adapter->rx_rings;
   1601 	struct ixgbe_hw	*hw = &adapter->hw;
   1602 	struct ifnet	*ifp = adapter->ifp;
   1603 	u32		bufsz, rxcsum, psrtype;
   1604 
   1605 	if (ifp->if_mtu > ETHERMTU)
   1606 		bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
   1607 	else
   1608 		bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
   1609 
   1610 	psrtype = IXGBE_PSRTYPE_TCPHDR
   1611 	        | IXGBE_PSRTYPE_UDPHDR
   1612 	        | IXGBE_PSRTYPE_IPV4HDR
   1613 	        | IXGBE_PSRTYPE_IPV6HDR
   1614 	        | IXGBE_PSRTYPE_L2HDR;
   1615 
   1616 	if (adapter->num_queues > 1)
   1617 		psrtype |= 1 << 29;
   1618 
   1619 	IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
   1620 
   1621 	/* Tell PF our max_frame size */
   1622 	if (ixgbevf_rlpml_set_vf(hw, adapter->max_frame_size) != 0) {
   1623 		device_printf(adapter->dev, "There is a problem with the PF setup.  It is likely the receive unit for this VF will not function correctly.\n");
   1624 	}
   1625 
   1626 	for (int i = 0; i < adapter->num_queues; i++, rxr++) {
   1627 		u64 rdba = rxr->rxdma.dma_paddr;
   1628 		u32 reg, rxdctl;
   1629 
   1630 		/* Disable the queue */
   1631 		rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
   1632 		rxdctl &= ~IXGBE_RXDCTL_ENABLE;
   1633 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
   1634 		for (int j = 0; j < 10; j++) {
   1635 			if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
   1636 			    IXGBE_RXDCTL_ENABLE)
   1637 				msec_delay(1);
   1638 			else
   1639 				break;
   1640 		}
   1641 		wmb();
   1642 		/* Setup the Base and Length of the Rx Descriptor Ring */
   1643 		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
   1644 		    (rdba & 0x00000000ffffffffULL));
   1645 		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i), (rdba >> 32));
   1646 		IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
   1647 		    adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
   1648 
   1649 		/* Reset the ring indices */
   1650 		IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
   1651 		IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 0);
   1652 
   1653 		/* Set up the SRRCTL register */
   1654 		reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
   1655 		reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
   1656 		reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
   1657 		reg |= bufsz;
   1658 		reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
   1659 		IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), reg);
   1660 
   1661 		/* Capture Rx Tail index */
   1662 		rxr->tail = IXGBE_VFRDT(rxr->me);
   1663 
   1664 		/* Do the queue enabling last */
   1665 		rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
   1666 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
   1667 		for (int k = 0; k < 10; k++) {
   1668 			if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
   1669 			    IXGBE_RXDCTL_ENABLE)
   1670 				break;
   1671 			msec_delay(1);
   1672 		}
   1673 		wmb();
   1674 
   1675 		/* Set the Tail Pointer */
   1676 		/*
   1677 		 * In netmap mode, we must preserve the buffers made
   1678 		 * available to userspace before the if_init()
   1679 		 * (this is true by default on the TX side, because
   1680 		 * init makes all buffers available to userspace).
   1681 		 *
   1682 		 * netmap_reset() and the device specific routines
   1683 		 * (e.g. ixgbe_setup_receive_rings()) map these
   1684 		 * buffers at the end of the NIC ring, so here we
   1685 		 * must set the RDT (tail) register to make sure
   1686 		 * they are not overwritten.
   1687 		 *
   1688 		 * In this driver the NIC ring starts at RDH = 0,
   1689 		 * RDT points to the last slot available for reception (?),
   1690 		 * so RDT = num_rx_desc - 1 means the whole ring is available.
   1691 		 */
   1692 #ifdef DEV_NETMAP
   1693 		if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
   1694 		    (ifp->if_capenable & IFCAP_NETMAP)) {
   1695 			struct netmap_adapter *na = NA(adapter->ifp);
   1696 			struct netmap_kring *kring = &na->rx_rings[i];
   1697 			int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
   1698 
   1699 			IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), t);
   1700 		} else
   1701 #endif /* DEV_NETMAP */
   1702 			IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
   1703 			    adapter->num_rx_desc - 1);
   1704 	}
   1705 
   1706 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
   1707 
   1708 	ixv_initialize_rss_mapping(adapter);
   1709 
   1710 	if (adapter->num_queues > 1) {
   1711 		/* RSS and RX IPP Checksum are mutually exclusive */
   1712 		rxcsum |= IXGBE_RXCSUM_PCSD;
   1713 	}
   1714 
   1715 	if (ifp->if_capenable & IFCAP_RXCSUM)
   1716 		rxcsum |= IXGBE_RXCSUM_PCSD;
   1717 
   1718 	if (!(rxcsum & IXGBE_RXCSUM_PCSD))
   1719 		rxcsum |= IXGBE_RXCSUM_IPPCSE;
   1720 
   1721 	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
   1722 
   1723 	return;
   1724 } /* ixv_initialize_receive_units */
   1725 
   1726 /************************************************************************
   1727  * ixv_setup_vlan_support
   1728  ************************************************************************/
   1729 static void
   1730 ixv_setup_vlan_support(struct adapter *adapter)
   1731 {
   1732 	struct ethercom *ec = &adapter->osdep.ec;
   1733 	struct ixgbe_hw *hw = &adapter->hw;
   1734 	struct rx_ring  *rxr;
   1735 	u32		ctrl, vid, vfta, retry;
   1736 
   1737 	/*
   1738 	 * We get here thru init_locked, meaning
   1739 	 * a soft reset, this has already cleared
   1740 	 * the VFTA and other state, so if there
   1741 	 * have been no vlan's registered do nothing.
   1742 	 */
   1743 	if (!VLAN_ATTACHED(ec))
   1744 		return;
   1745 
   1746 	/* Enable the queues */
   1747 	for (int i = 0; i < adapter->num_queues; i++) {
   1748 		rxr = &adapter->rx_rings[i];
   1749 		ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(rxr->me));
   1750 		ctrl |= IXGBE_RXDCTL_VME;
   1751 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(rxr->me), ctrl);
   1752 		/*
   1753 		 * Let Rx path know that it needs to store VLAN tag
   1754 		 * as part of extra mbuf info.
   1755 		 */
   1756 		rxr->vtag_strip = TRUE;
   1757 	}
   1758 
   1759 #if 1
   1760 	/* XXX dirty hack. Enable all VIDs */
   1761 	for (int i = 0; i < IXGBE_VFTA_SIZE; i++)
   1762 	  adapter->shadow_vfta[i] = 0xffffffff;
   1763 #endif
   1764 	/*
   1765 	 * A soft reset zero's out the VFTA, so
   1766 	 * we need to repopulate it now.
   1767 	 */
   1768 	for (int i = 0; i < IXGBE_VFTA_SIZE; i++) {
   1769 		if (adapter->shadow_vfta[i] == 0)
   1770 			continue;
   1771 		vfta = adapter->shadow_vfta[i];
   1772 		/*
   1773 		 * Reconstruct the vlan id's
   1774 		 * based on the bits set in each
   1775 		 * of the array ints.
   1776 		 */
   1777 		for (int j = 0; j < 32; j++) {
   1778 			retry = 0;
   1779 			if ((vfta & (1 << j)) == 0)
   1780 				continue;
   1781 			vid = (i * 32) + j;
   1782 			/* Call the shared code mailbox routine */
   1783 			while (hw->mac.ops.set_vfta(hw, vid, 0, TRUE, FALSE)) {
   1784 				if (++retry > 5)
   1785 					break;
   1786 			}
   1787 		}
   1788 	}
   1789 } /* ixv_setup_vlan_support */
   1790 
   1791 #if 0	/* XXX Badly need to overhaul vlan(4) on NetBSD. */
   1792 /************************************************************************
   1793  * ixv_register_vlan
   1794  *
   1795  *   Run via a vlan config EVENT, it enables us to use the
   1796  *   HW Filter table since we can get the vlan id. This just
   1797  *   creates the entry in the soft version of the VFTA, init
   1798  *   will repopulate the real table.
   1799  ************************************************************************/
   1800 static void
   1801 ixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   1802 {
   1803 	struct adapter	*adapter = ifp->if_softc;
   1804 	u16		index, bit;
   1805 
   1806 	if (ifp->if_softc != arg) /* Not our event */
   1807 		return;
   1808 
   1809 	if ((vtag == 0) || (vtag > 4095)) /* Invalid */
   1810 		return;
   1811 
   1812 	IXGBE_CORE_LOCK(adapter);
   1813 	index = (vtag >> 5) & 0x7F;
   1814 	bit = vtag & 0x1F;
   1815 	adapter->shadow_vfta[index] |= (1 << bit);
   1816 	/* Re-init to load the changes */
   1817 	ixv_init_locked(adapter);
   1818 	IXGBE_CORE_UNLOCK(adapter);
   1819 } /* ixv_register_vlan */
   1820 
   1821 /************************************************************************
   1822  * ixv_unregister_vlan
   1823  *
   1824  *   Run via a vlan unconfig EVENT, remove our entry
   1825  *   in the soft vfta.
   1826  ************************************************************************/
   1827 static void
   1828 ixv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   1829 {
   1830 	struct adapter	*adapter = ifp->if_softc;
   1831 	u16		index, bit;
   1832 
   1833 	if (ifp->if_softc !=  arg)
   1834 		return;
   1835 
   1836 	if ((vtag == 0) || (vtag > 4095))  /* Invalid */
   1837 		return;
   1838 
   1839 	IXGBE_CORE_LOCK(adapter);
   1840 	index = (vtag >> 5) & 0x7F;
   1841 	bit = vtag & 0x1F;
   1842 	adapter->shadow_vfta[index] &= ~(1 << bit);
   1843 	/* Re-init to load the changes */
   1844 	ixv_init_locked(adapter);
   1845 	IXGBE_CORE_UNLOCK(adapter);
   1846 } /* ixv_unregister_vlan */
   1847 #endif
   1848 
   1849 /************************************************************************
   1850  * ixv_enable_intr
   1851  ************************************************************************/
   1852 static void
   1853 ixv_enable_intr(struct adapter *adapter)
   1854 {
   1855 	struct ixgbe_hw *hw = &adapter->hw;
   1856 	struct ix_queue *que = adapter->queues;
   1857 	u32             mask;
   1858 	int i;
   1859 
   1860 	/* For VTEIAC */
   1861 	mask = (1 << adapter->vector);
   1862 	for (i = 0; i < adapter->num_queues; i++, que++)
   1863 		mask |= (1 << que->msix);
   1864 	IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
   1865 
   1866 	/* For VTEIMS */
   1867 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, (1 << adapter->vector));
   1868 	for (i = 0; i < adapter->num_queues; i++, que++)
   1869 		ixv_enable_queue(adapter, que->msix);
   1870 
   1871 	IXGBE_WRITE_FLUSH(hw);
   1872 
   1873 	return;
   1874 } /* ixv_enable_intr */
   1875 
   1876 /************************************************************************
   1877  * ixv_disable_intr
   1878  ************************************************************************/
   1879 static void
   1880 ixv_disable_intr(struct adapter *adapter)
   1881 {
   1882 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
   1883 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, ~0);
   1884 	IXGBE_WRITE_FLUSH(&adapter->hw);
   1885 
   1886 	return;
   1887 } /* ixv_disable_intr */
   1888 
   1889 /************************************************************************
   1890  * ixv_set_ivar
   1891  *
   1892  *   Setup the correct IVAR register for a particular MSI-X interrupt
   1893  *    - entry is the register array entry
   1894  *    - vector is the MSI-X vector for this queue
   1895  *    - type is RX/TX/MISC
   1896  ************************************************************************/
   1897 static void
   1898 ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
   1899 {
   1900 	struct ixgbe_hw *hw = &adapter->hw;
   1901 	u32             ivar, index;
   1902 
   1903 	vector |= IXGBE_IVAR_ALLOC_VAL;
   1904 
   1905 	if (type == -1) { /* MISC IVAR */
   1906 		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
   1907 		ivar &= ~0xFF;
   1908 		ivar |= vector;
   1909 		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
   1910 	} else {          /* RX/TX IVARS */
   1911 		index = (16 * (entry & 1)) + (8 * type);
   1912 		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
   1913 		ivar &= ~(0xFF << index);
   1914 		ivar |= (vector << index);
   1915 		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
   1916 	}
   1917 } /* ixv_set_ivar */
   1918 
   1919 /************************************************************************
   1920  * ixv_configure_ivars
   1921  ************************************************************************/
   1922 static void
   1923 ixv_configure_ivars(struct adapter *adapter)
   1924 {
   1925 	struct ix_queue *que = adapter->queues;
   1926 
   1927 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   1928 		/* First the RX queue entry */
   1929 		ixv_set_ivar(adapter, i, que->msix, 0);
   1930 		/* ... and the TX */
   1931 		ixv_set_ivar(adapter, i, que->msix, 1);
   1932 		/* Set an initial value in EITR */
   1933 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEITR(que->msix),
   1934 		    IXGBE_EITR_DEFAULT);
   1935 	}
   1936 
   1937 	/* For the mailbox interrupt */
   1938 	ixv_set_ivar(adapter, 1, adapter->vector, -1);
   1939 } /* ixv_configure_ivars */
   1940 
   1941 
   1942 /************************************************************************
   1943  * ixv_save_stats
   1944  *
   1945  *   The VF stats registers never have a truly virgin
   1946  *   starting point, so this routine tries to make an
   1947  *   artificial one, marking ground zero on attach as
   1948  *   it were.
   1949  ************************************************************************/
   1950 static void
   1951 ixv_save_stats(struct adapter *adapter)
   1952 {
   1953 	struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
   1954 
   1955 	if (stats->vfgprc.ev_count || stats->vfgptc.ev_count) {
   1956 		stats->saved_reset_vfgprc +=
   1957 		    stats->vfgprc.ev_count - stats->base_vfgprc;
   1958 		stats->saved_reset_vfgptc +=
   1959 		    stats->vfgptc.ev_count - stats->base_vfgptc;
   1960 		stats->saved_reset_vfgorc +=
   1961 		    stats->vfgorc.ev_count - stats->base_vfgorc;
   1962 		stats->saved_reset_vfgotc +=
   1963 		    stats->vfgotc.ev_count - stats->base_vfgotc;
   1964 		stats->saved_reset_vfmprc +=
   1965 		    stats->vfmprc.ev_count - stats->base_vfmprc;
   1966 	}
   1967 } /* ixv_save_stats */
   1968 
   1969 /************************************************************************
   1970  * ixv_init_stats
   1971  ************************************************************************/
   1972 static void
   1973 ixv_init_stats(struct adapter *adapter)
   1974 {
   1975 	struct ixgbe_hw *hw = &adapter->hw;
   1976 
   1977 	adapter->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
   1978 	adapter->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
   1979 	adapter->stats.vf.last_vfgorc |=
   1980 	    (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
   1981 
   1982 	adapter->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
   1983 	adapter->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
   1984 	adapter->stats.vf.last_vfgotc |=
   1985 	    (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
   1986 
   1987 	adapter->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
   1988 
   1989 	adapter->stats.vf.base_vfgprc = adapter->stats.vf.last_vfgprc;
   1990 	adapter->stats.vf.base_vfgorc = adapter->stats.vf.last_vfgorc;
   1991 	adapter->stats.vf.base_vfgptc = adapter->stats.vf.last_vfgptc;
   1992 	adapter->stats.vf.base_vfgotc = adapter->stats.vf.last_vfgotc;
   1993 	adapter->stats.vf.base_vfmprc = adapter->stats.vf.last_vfmprc;
   1994 } /* ixv_init_stats */
   1995 
   1996 #define UPDATE_STAT_32(reg, last, count)		\
   1997 {                                                       \
   1998 	u32 current = IXGBE_READ_REG(hw, (reg));	\
   1999 	if (current < (last))				\
   2000 		count.ev_count += 0x100000000LL;	\
   2001 	(last) = current;				\
   2002 	count.ev_count &= 0xFFFFFFFF00000000LL;		\
   2003 	count.ev_count |= current;			\
   2004 }
   2005 
   2006 #define UPDATE_STAT_36(lsb, msb, last, count)           \
   2007 {                                                       \
   2008 	u64 cur_lsb = IXGBE_READ_REG(hw, (lsb));	\
   2009 	u64 cur_msb = IXGBE_READ_REG(hw, (msb));	\
   2010 	u64 current = ((cur_msb << 32) | cur_lsb);      \
   2011 	if (current < (last))				\
   2012 		count.ev_count += 0x1000000000LL;	\
   2013 	(last) = current;				\
   2014 	count.ev_count &= 0xFFFFFFF000000000LL;		\
   2015 	count.ev_count |= current;			\
   2016 }
   2017 
   2018 /************************************************************************
   2019  * ixv_update_stats - Update the board statistics counters.
   2020  ************************************************************************/
   2021 void
   2022 ixv_update_stats(struct adapter *adapter)
   2023 {
   2024 	struct ixgbe_hw *hw = &adapter->hw;
   2025 	struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
   2026 
   2027         UPDATE_STAT_32(IXGBE_VFGPRC, stats->last_vfgprc, stats->vfgprc);
   2028         UPDATE_STAT_32(IXGBE_VFGPTC, stats->last_vfgptc, stats->vfgptc);
   2029         UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB, stats->last_vfgorc,
   2030 	    stats->vfgorc);
   2031         UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB, stats->last_vfgotc,
   2032 	    stats->vfgotc);
   2033         UPDATE_STAT_32(IXGBE_VFMPRC, stats->last_vfmprc, stats->vfmprc);
   2034 
   2035 	/* Fill out the OS statistics structure */
   2036 	/*
   2037 	 * NetBSD: Don't override if_{i|o}{packets|bytes|mcasts} with
   2038 	 * adapter->stats counters. It's required to make ifconfig -z
   2039 	 * (SOICZIFDATA) work.
   2040 	 */
   2041 } /* ixv_update_stats */
   2042 
   2043 const struct sysctlnode *
   2044 ixv_sysctl_instance(struct adapter *adapter)
   2045 {
   2046 	const char *dvname;
   2047 	struct sysctllog **log;
   2048 	int rc;
   2049 	const struct sysctlnode *rnode;
   2050 
   2051 	log = &adapter->sysctllog;
   2052 	dvname = device_xname(adapter->dev);
   2053 
   2054 	if ((rc = sysctl_createv(log, 0, NULL, &rnode,
   2055 	    0, CTLTYPE_NODE, dvname,
   2056 	    SYSCTL_DESCR("ixv information and settings"),
   2057 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
   2058 		goto err;
   2059 
   2060 	return rnode;
   2061 err:
   2062 	printf("%s: sysctl_createv failed, rc = %d\n", __func__, rc);
   2063 	return NULL;
   2064 }
   2065 
   2066 static void
   2067 ixv_add_device_sysctls(struct adapter *adapter)
   2068 {
   2069 	struct sysctllog **log;
   2070 	const struct sysctlnode *rnode, *cnode;
   2071 	device_t dev;
   2072 
   2073 	dev = adapter->dev;
   2074 	log = &adapter->sysctllog;
   2075 
   2076 	if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
   2077 		aprint_error_dev(dev, "could not create sysctl root\n");
   2078 		return;
   2079 	}
   2080 
   2081 	if (sysctl_createv(log, 0, &rnode, &cnode,
   2082 	    CTLFLAG_READWRITE, CTLTYPE_INT,
   2083 	    "debug", SYSCTL_DESCR("Debug Info"),
   2084 	    ixv_sysctl_debug, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
   2085 		aprint_error_dev(dev, "could not create sysctl\n");
   2086 
   2087 	if (sysctl_createv(log, 0, &rnode, &cnode,
   2088 	    CTLFLAG_READWRITE, CTLTYPE_BOOL,
   2089 	    "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
   2090 	    NULL, 0, &adapter->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
   2091 		aprint_error_dev(dev, "could not create sysctl\n");
   2092 }
   2093 
   2094 /************************************************************************
   2095  * ixv_add_stats_sysctls - Add statistic sysctls for the VF.
   2096  ************************************************************************/
   2097 static void
   2098 ixv_add_stats_sysctls(struct adapter *adapter)
   2099 {
   2100 	device_t                dev = adapter->dev;
   2101 	struct tx_ring          *txr = adapter->tx_rings;
   2102 	struct rx_ring          *rxr = adapter->rx_rings;
   2103 	struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
   2104 	struct ixgbe_hw *hw = &adapter->hw;
   2105 	const struct sysctlnode *rnode;
   2106 	struct sysctllog **log = &adapter->sysctllog;
   2107 	const char *xname = device_xname(dev);
   2108 
   2109 	/* Driver Statistics */
   2110 	evcnt_attach_dynamic(&adapter->handleq, EVCNT_TYPE_MISC,
   2111 	    NULL, xname, "Handled queue in softint");
   2112 	evcnt_attach_dynamic(&adapter->req, EVCNT_TYPE_MISC,
   2113 	    NULL, xname, "Requeued in softint");
   2114 	evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
   2115 	    NULL, xname, "Driver tx dma soft fail EFBIG");
   2116 	evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
   2117 	    NULL, xname, "m_defrag() failed");
   2118 	evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
   2119 	    NULL, xname, "Driver tx dma hard fail EFBIG");
   2120 	evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC,
   2121 	    NULL, xname, "Driver tx dma hard fail EINVAL");
   2122 	evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC,
   2123 	    NULL, xname, "Driver tx dma hard fail other");
   2124 	evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
   2125 	    NULL, xname, "Driver tx dma soft fail EAGAIN");
   2126 	evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
   2127 	    NULL, xname, "Driver tx dma soft fail ENOMEM");
   2128 	evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
   2129 	    NULL, xname, "Watchdog timeouts");
   2130 	evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC,
   2131 	    NULL, xname, "TSO errors");
   2132 	evcnt_attach_dynamic(&adapter->link_irq, EVCNT_TYPE_INTR,
   2133 	    NULL, xname, "Link MSI-X IRQ Handled");
   2134 
   2135 	for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
   2136 		snprintf(adapter->queues[i].evnamebuf,
   2137 		    sizeof(adapter->queues[i].evnamebuf), "%s q%d",
   2138 		    xname, i);
   2139 		snprintf(adapter->queues[i].namebuf,
   2140 		    sizeof(adapter->queues[i].namebuf), "q%d", i);
   2141 
   2142 		if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
   2143 			aprint_error_dev(dev, "could not create sysctl root\n");
   2144 			break;
   2145 		}
   2146 
   2147 		if (sysctl_createv(log, 0, &rnode, &rnode,
   2148 		    0, CTLTYPE_NODE,
   2149 		    adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
   2150 		    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
   2151 			break;
   2152 
   2153 #if 0 /* not yet */
   2154 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2155 		    CTLFLAG_READWRITE, CTLTYPE_INT,
   2156 		    "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
   2157 		    ixgbe_sysctl_interrupt_rate_handler, 0,
   2158 		    (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
   2159 			break;
   2160 
   2161 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2162 		    CTLFLAG_READONLY, CTLTYPE_QUAD,
   2163 		    "irqs", SYSCTL_DESCR("irqs on this queue"),
   2164 			NULL, 0, &(adapter->queues[i].irqs),
   2165 		    0, CTL_CREATE, CTL_EOL) != 0)
   2166 			break;
   2167 
   2168 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2169 		    CTLFLAG_READONLY, CTLTYPE_INT,
   2170 		    "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
   2171 		    ixgbe_sysctl_tdh_handler, 0, (void *)txr,
   2172 		    0, CTL_CREATE, CTL_EOL) != 0)
   2173 			break;
   2174 
   2175 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2176 		    CTLFLAG_READONLY, CTLTYPE_INT,
   2177 		    "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
   2178 		    ixgbe_sysctl_tdt_handler, 0, (void *)txr,
   2179 		    0, CTL_CREATE, CTL_EOL) != 0)
   2180 			break;
   2181 #endif
   2182 		evcnt_attach_dynamic(&adapter->queues[i].irqs, EVCNT_TYPE_INTR,
   2183 		    NULL, adapter->queues[i].evnamebuf, "IRQs on queue");
   2184 		evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC,
   2185 		    NULL, adapter->queues[i].evnamebuf, "TSO");
   2186 		evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
   2187 		    NULL, adapter->queues[i].evnamebuf,
   2188 		    "Queue No Descriptor Available");
   2189 		evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
   2190 		    NULL, adapter->queues[i].evnamebuf,
   2191 		    "Queue Packets Transmitted");
   2192 #ifndef IXGBE_LEGACY_TX
   2193 		evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC,
   2194 		    NULL, adapter->queues[i].evnamebuf,
   2195 		    "Packets dropped in pcq");
   2196 #endif
   2197 
   2198 #ifdef LRO
   2199 		struct lro_ctrl *lro = &rxr->lro;
   2200 #endif /* LRO */
   2201 
   2202 #if 0 /* not yet */
   2203 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2204 		    CTLFLAG_READONLY,
   2205 		    CTLTYPE_INT,
   2206 		    "rxd_head", SYSCTL_DESCR("Receive Descriptor Head"),
   2207 		    ixgbe_sysctl_rdh_handler, 0, (void *)rxr, 0,
   2208 		    CTL_CREATE, CTL_EOL) != 0)
   2209 			break;
   2210 
   2211 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2212 		    CTLFLAG_READONLY,
   2213 		    CTLTYPE_INT,
   2214 		    "rxd_tail", SYSCTL_DESCR("Receive Descriptor Tail"),
   2215 		    ixgbe_sysctl_rdt_handler, 0, (void *)rxr, 0,
   2216 		    CTL_CREATE, CTL_EOL) != 0)
   2217 			break;
   2218 #endif
   2219 
   2220 		evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
   2221 		    NULL, adapter->queues[i].evnamebuf, "Queue Packets Received");
   2222 		evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
   2223 		    NULL, adapter->queues[i].evnamebuf, "Queue Bytes Received");
   2224 		evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC,
   2225 		    NULL, adapter->queues[i].evnamebuf, "Copied RX Frames");
   2226 		evcnt_attach_dynamic(&rxr->no_jmbuf, EVCNT_TYPE_MISC,
   2227 		    NULL, adapter->queues[i].evnamebuf, "Rx no jumbo mbuf");
   2228 		evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
   2229 		    NULL, adapter->queues[i].evnamebuf, "Rx discarded");
   2230 #ifdef LRO
   2231 		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
   2232 				CTLFLAG_RD, &lro->lro_queued, 0,
   2233 				"LRO Queued");
   2234 		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
   2235 				CTLFLAG_RD, &lro->lro_flushed, 0,
   2236 				"LRO Flushed");
   2237 #endif /* LRO */
   2238 	}
   2239 
   2240 	/* MAC stats get their own sub node */
   2241 
   2242 	snprintf(stats->namebuf,
   2243 	    sizeof(stats->namebuf), "%s MAC Statistics", xname);
   2244 
   2245 	evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
   2246 	    stats->namebuf, "rx csum offload - IP");
   2247 	evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
   2248 	    stats->namebuf, "rx csum offload - L4");
   2249 	evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
   2250 	    stats->namebuf, "rx csum offload - IP bad");
   2251 	evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
   2252 	    stats->namebuf, "rx csum offload - L4 bad");
   2253 
   2254 	/* Packet Reception Stats */
   2255 	evcnt_attach_dynamic(&stats->vfgprc, EVCNT_TYPE_MISC, NULL,
   2256 	    xname, "Good Packets Received");
   2257 	evcnt_attach_dynamic(&stats->vfgorc, EVCNT_TYPE_MISC, NULL,
   2258 	    xname, "Good Octets Received");
   2259 	evcnt_attach_dynamic(&stats->vfmprc, EVCNT_TYPE_MISC, NULL,
   2260 	    xname, "Multicast Packets Received");
   2261 	evcnt_attach_dynamic(&stats->vfgptc, EVCNT_TYPE_MISC, NULL,
   2262 	    xname, "Good Packets Transmitted");
   2263 	evcnt_attach_dynamic(&stats->vfgotc, EVCNT_TYPE_MISC, NULL,
   2264 	    xname, "Good Octets Transmitted");
   2265 
   2266 	/* Mailbox Stats */
   2267 	evcnt_attach_dynamic(&hw->mbx.stats.msgs_tx, EVCNT_TYPE_MISC, NULL,
   2268 	    xname, "message TXs");
   2269 	evcnt_attach_dynamic(&hw->mbx.stats.msgs_rx, EVCNT_TYPE_MISC, NULL,
   2270 	    xname, "message RXs");
   2271 	evcnt_attach_dynamic(&hw->mbx.stats.acks, EVCNT_TYPE_MISC, NULL,
   2272 	    xname, "ACKs");
   2273 	evcnt_attach_dynamic(&hw->mbx.stats.reqs, EVCNT_TYPE_MISC, NULL,
   2274 	    xname, "REQs");
   2275 	evcnt_attach_dynamic(&hw->mbx.stats.rsts, EVCNT_TYPE_MISC, NULL,
   2276 	    xname, "RSTs");
   2277 
   2278 } /* ixv_add_stats_sysctls */
   2279 
   2280 /************************************************************************
   2281  * ixv_set_sysctl_value
   2282  ************************************************************************/
   2283 static void
   2284 ixv_set_sysctl_value(struct adapter *adapter, const char *name,
   2285 	const char *description, int *limit, int value)
   2286 {
   2287 	device_t dev =  adapter->dev;
   2288 	struct sysctllog **log;
   2289 	const struct sysctlnode *rnode, *cnode;
   2290 
   2291 	log = &adapter->sysctllog;
   2292 	if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
   2293 		aprint_error_dev(dev, "could not create sysctl root\n");
   2294 		return;
   2295 	}
   2296 	if (sysctl_createv(log, 0, &rnode, &cnode,
   2297 	    CTLFLAG_READWRITE, CTLTYPE_INT,
   2298 	    name, SYSCTL_DESCR(description),
   2299 	    NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0)
   2300 		aprint_error_dev(dev, "could not create sysctl\n");
   2301 	*limit = value;
   2302 } /* ixv_set_sysctl_value */
   2303 
   2304 /************************************************************************
   2305  * ixv_print_debug_info
   2306  *
   2307  *   Called only when em_display_debug_stats is enabled.
   2308  *   Provides a way to take a look at important statistics
   2309  *   maintained by the driver and hardware.
   2310  ************************************************************************/
   2311 static void
   2312 ixv_print_debug_info(struct adapter *adapter)
   2313 {
   2314         device_t        dev = adapter->dev;
   2315         struct ixgbe_hw *hw = &adapter->hw;
   2316         struct ix_queue *que = adapter->queues;
   2317         struct rx_ring  *rxr;
   2318         struct tx_ring  *txr;
   2319 #ifdef LRO
   2320         struct lro_ctrl *lro;
   2321 #endif /* LRO */
   2322 
   2323 	device_printf(dev, "Error Byte Count = %u \n",
   2324 	    IXGBE_READ_REG(hw, IXGBE_ERRBC));
   2325 
   2326 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   2327 		txr = que->txr;
   2328 		rxr = que->rxr;
   2329 #ifdef LRO
   2330 		lro = &rxr->lro;
   2331 #endif /* LRO */
   2332 		device_printf(dev, "QUE(%d) IRQs Handled: %lu\n",
   2333 		    que->msix, (long)que->irqs.ev_count);
   2334 		device_printf(dev, "RX(%d) Packets Received: %lld\n",
   2335 		    rxr->me, (long long)rxr->rx_packets.ev_count);
   2336 		device_printf(dev, "RX(%d) Bytes Received: %lu\n",
   2337 		    rxr->me, (long)rxr->rx_bytes.ev_count);
   2338 #ifdef LRO
   2339 		device_printf(dev, "RX(%d) LRO Queued= %lld\n",
   2340 		    rxr->me, (long long)lro->lro_queued);
   2341 		device_printf(dev, "RX(%d) LRO Flushed= %lld\n",
   2342 		    rxr->me, (long long)lro->lro_flushed);
   2343 #endif /* LRO */
   2344 		device_printf(dev, "TX(%d) Packets Sent: %lu\n",
   2345 		    txr->me, (long)txr->total_packets.ev_count);
   2346 		device_printf(dev, "TX(%d) NO Desc Avail: %lu\n",
   2347 		    txr->me, (long)txr->no_desc_avail.ev_count);
   2348 	}
   2349 
   2350 	device_printf(dev, "MBX IRQ Handled: %lu\n",
   2351 	    (long)adapter->link_irq.ev_count);
   2352 } /* ixv_print_debug_info */
   2353 
   2354 /************************************************************************
   2355  * ixv_sysctl_debug
   2356  ************************************************************************/
   2357 static int
   2358 ixv_sysctl_debug(SYSCTLFN_ARGS)
   2359 {
   2360 	struct sysctlnode node;
   2361 	struct adapter *adapter;
   2362 	int            error, result;
   2363 
   2364 	node = *rnode;
   2365 	node.sysctl_data = &result;
   2366 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   2367 
   2368 	if (error || newp == NULL)
   2369 		return error;
   2370 
   2371 	if (result == 1) {
   2372 		adapter = (struct adapter *)node.sysctl_data;
   2373 		ixv_print_debug_info(adapter);
   2374 	}
   2375 
   2376 	return 0;
   2377 } /* ixv_sysctl_debug */
   2378 
   2379 /************************************************************************
   2380  * ixv_init_device_features
   2381  ************************************************************************/
   2382 static void
   2383 ixv_init_device_features(struct adapter *adapter)
   2384 {
   2385 	adapter->feat_cap = IXGBE_FEATURE_NETMAP
   2386 	                  | IXGBE_FEATURE_VF
   2387 	                  | IXGBE_FEATURE_RSS
   2388 	                  | IXGBE_FEATURE_LEGACY_TX;
   2389 
   2390 	/* A tad short on feature flags for VFs, atm. */
   2391 	switch (adapter->hw.mac.type) {
   2392 	case ixgbe_mac_82599_vf:
   2393 		break;
   2394 	case ixgbe_mac_X540_vf:
   2395 		break;
   2396 	case ixgbe_mac_X550_vf:
   2397 	case ixgbe_mac_X550EM_x_vf:
   2398 	case ixgbe_mac_X550EM_a_vf:
   2399 		adapter->feat_cap |= IXGBE_FEATURE_NEEDS_CTXD;
   2400 		break;
   2401 	default:
   2402 		break;
   2403 	}
   2404 
   2405 	/* Enabled by default... */
   2406 	/* Is a virtual function (VF) */
   2407 	if (adapter->feat_cap & IXGBE_FEATURE_VF)
   2408 		adapter->feat_en |= IXGBE_FEATURE_VF;
   2409 	/* Netmap */
   2410 	if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
   2411 		adapter->feat_en |= IXGBE_FEATURE_NETMAP;
   2412 	/* Receive-Side Scaling (RSS) */
   2413 	if (adapter->feat_cap & IXGBE_FEATURE_RSS)
   2414 		adapter->feat_en |= IXGBE_FEATURE_RSS;
   2415 	/* Needs advanced context descriptor regardless of offloads req'd */
   2416 	if (adapter->feat_cap & IXGBE_FEATURE_NEEDS_CTXD)
   2417 		adapter->feat_en |= IXGBE_FEATURE_NEEDS_CTXD;
   2418 
   2419 	/* Enabled via sysctl... */
   2420 	/* Legacy (single queue) transmit */
   2421 	if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
   2422 	    ixv_enable_legacy_tx)
   2423 		adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
   2424 } /* ixv_init_device_features */
   2425 
   2426 /************************************************************************
   2427  * ixv_shutdown - Shutdown entry point
   2428  ************************************************************************/
   2429 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
   2430 static int
   2431 ixv_shutdown(device_t dev)
   2432 {
   2433 	struct adapter *adapter = device_private(dev);
   2434 	IXGBE_CORE_LOCK(adapter);
   2435 	ixv_stop(adapter);
   2436 	IXGBE_CORE_UNLOCK(adapter);
   2437 
   2438 	return (0);
   2439 } /* ixv_shutdown */
   2440 #endif
   2441 
   2442 static int
   2443 ixv_ifflags_cb(struct ethercom *ec)
   2444 {
   2445 	struct ifnet *ifp = &ec->ec_if;
   2446 	struct adapter *adapter = ifp->if_softc;
   2447 	int change = ifp->if_flags ^ adapter->if_flags, rc = 0;
   2448 
   2449 	IXGBE_CORE_LOCK(adapter);
   2450 
   2451 	if (change != 0)
   2452 		adapter->if_flags = ifp->if_flags;
   2453 
   2454 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0)
   2455 		rc = ENETRESET;
   2456 
   2457 	/* Set up VLAN support and filter */
   2458 	ixv_setup_vlan_support(adapter);
   2459 
   2460 	IXGBE_CORE_UNLOCK(adapter);
   2461 
   2462 	return rc;
   2463 }
   2464 
   2465 
   2466 /************************************************************************
   2467  * ixv_ioctl - Ioctl entry point
   2468  *
   2469  *   Called when the user wants to configure the interface.
   2470  *
   2471  *   return 0 on success, positive on failure
   2472  ************************************************************************/
   2473 static int
   2474 ixv_ioctl(struct ifnet *ifp, u_long command, void *data)
   2475 {
   2476 	struct adapter	*adapter = ifp->if_softc;
   2477 	struct ifcapreq *ifcr = data;
   2478 	struct ifreq	*ifr = data;
   2479 	int             error = 0;
   2480 	int l4csum_en;
   2481 	const int l4csum = IFCAP_CSUM_TCPv4_Rx|IFCAP_CSUM_UDPv4_Rx|
   2482 	     IFCAP_CSUM_TCPv6_Rx|IFCAP_CSUM_UDPv6_Rx;
   2483 
   2484 	switch (command) {
   2485 	case SIOCSIFFLAGS:
   2486 		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
   2487 		break;
   2488 	case SIOCADDMULTI:
   2489 	case SIOCDELMULTI:
   2490 		IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
   2491 		break;
   2492 	case SIOCSIFMEDIA:
   2493 	case SIOCGIFMEDIA:
   2494 		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
   2495 		break;
   2496 	case SIOCSIFCAP:
   2497 		IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
   2498 		break;
   2499 	case SIOCSIFMTU:
   2500 		IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
   2501 		break;
   2502 	default:
   2503 		IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
   2504 		break;
   2505 	}
   2506 
   2507 	switch (command) {
   2508 	case SIOCSIFMEDIA:
   2509 	case SIOCGIFMEDIA:
   2510 		return ifmedia_ioctl(ifp, ifr, &adapter->media, command);
   2511 	case SIOCSIFCAP:
   2512 		/* Layer-4 Rx checksum offload has to be turned on and
   2513 		 * off as a unit.
   2514 		 */
   2515 		l4csum_en = ifcr->ifcr_capenable & l4csum;
   2516 		if (l4csum_en != l4csum && l4csum_en != 0)
   2517 			return EINVAL;
   2518 		/*FALLTHROUGH*/
   2519 	case SIOCADDMULTI:
   2520 	case SIOCDELMULTI:
   2521 	case SIOCSIFFLAGS:
   2522 	case SIOCSIFMTU:
   2523 	default:
   2524 		if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
   2525 			return error;
   2526 		if ((ifp->if_flags & IFF_RUNNING) == 0)
   2527 			;
   2528 		else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
   2529 			IXGBE_CORE_LOCK(adapter);
   2530 			ixv_init_locked(adapter);
   2531 			IXGBE_CORE_UNLOCK(adapter);
   2532 		} else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
   2533 			/*
   2534 			 * Multicast list has changed; set the hardware filter
   2535 			 * accordingly.
   2536 			 */
   2537 			IXGBE_CORE_LOCK(adapter);
   2538 			ixv_disable_intr(adapter);
   2539 			ixv_set_multi(adapter);
   2540 			ixv_enable_intr(adapter);
   2541 			IXGBE_CORE_UNLOCK(adapter);
   2542 		}
   2543 		return 0;
   2544 	}
   2545 } /* ixv_ioctl */
   2546 
   2547 /************************************************************************
   2548  * ixv_init
   2549  ************************************************************************/
   2550 static int
   2551 ixv_init(struct ifnet *ifp)
   2552 {
   2553 	struct adapter *adapter = ifp->if_softc;
   2554 
   2555 	IXGBE_CORE_LOCK(adapter);
   2556 	ixv_init_locked(adapter);
   2557 	IXGBE_CORE_UNLOCK(adapter);
   2558 
   2559 	return 0;
   2560 } /* ixv_init */
   2561 
   2562 
   2563 /************************************************************************
   2564  * ixv_handle_que
   2565  ************************************************************************/
   2566 static void
   2567 ixv_handle_que(void *context)
   2568 {
   2569 	struct ix_queue *que = context;
   2570 	struct adapter  *adapter = que->adapter;
   2571 	struct tx_ring	*txr = que->txr;
   2572 	struct ifnet    *ifp = adapter->ifp;
   2573 	bool		more;
   2574 
   2575 	adapter->handleq.ev_count++;
   2576 
   2577 	if (ifp->if_flags & IFF_RUNNING) {
   2578 		more = ixgbe_rxeof(que);
   2579 		IXGBE_TX_LOCK(txr);
   2580 		ixgbe_txeof(txr);
   2581 		if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
   2582 			if (!ixgbe_mq_ring_empty(ifp, txr->txr_interq))
   2583 				ixgbe_mq_start_locked(ifp, txr);
   2584 		/* Only for queue 0 */
   2585 		/* NetBSD still needs this for CBQ */
   2586 		if ((&adapter->queues[0] == que)
   2587 		    && (!ixgbe_legacy_ring_empty(ifp, NULL)))
   2588 			ixgbe_legacy_start_locked(ifp, txr);
   2589 		IXGBE_TX_UNLOCK(txr);
   2590 		if (more) {
   2591 			adapter->req.ev_count++;
   2592 			softint_schedule(que->que_si);
   2593 			return;
   2594 		}
   2595 	}
   2596 
   2597 	/* Re-enable this interrupt */
   2598 	ixv_enable_queue(adapter, que->msix);
   2599 
   2600 	return;
   2601 } /* ixv_handle_que */
   2602 
   2603 /************************************************************************
   2604  * ixv_allocate_msix - Setup MSI-X Interrupt resources and handlers
   2605  ************************************************************************/
   2606 static int
   2607 ixv_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
   2608 {
   2609 	device_t	dev = adapter->dev;
   2610 	struct ix_queue *que = adapter->queues;
   2611 	struct		tx_ring *txr = adapter->tx_rings;
   2612 	int 		error, msix_ctrl, rid, vector = 0;
   2613 	pci_chipset_tag_t pc;
   2614 	pcitag_t	tag;
   2615 	char		intrbuf[PCI_INTRSTR_LEN];
   2616 	char		intr_xname[32];
   2617 	const char	*intrstr = NULL;
   2618 	kcpuset_t	*affinity;
   2619 	int		cpu_id = 0;
   2620 
   2621 	pc = adapter->osdep.pc;
   2622 	tag = adapter->osdep.tag;
   2623 
   2624 	adapter->osdep.nintrs = adapter->num_queues + 1;
   2625 	if (pci_msix_alloc_exact(pa, &adapter->osdep.intrs,
   2626 	    adapter->osdep.nintrs) != 0) {
   2627 		aprint_error_dev(dev,
   2628 		    "failed to allocate MSI-X interrupt\n");
   2629 		return (ENXIO);
   2630 	}
   2631 
   2632 	kcpuset_create(&affinity, false);
   2633 	for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
   2634 		snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d",
   2635 		    device_xname(dev), i);
   2636 		intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
   2637 		    sizeof(intrbuf));
   2638 #ifdef IXGBE_MPSAFE
   2639 		pci_intr_setattr(pc, &adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
   2640 		    true);
   2641 #endif
   2642 		/* Set the handler function */
   2643 		que->res = adapter->osdep.ihs[i] = pci_intr_establish_xname(pc,
   2644 		    adapter->osdep.intrs[i], IPL_NET, ixv_msix_que, que,
   2645 		    intr_xname);
   2646 		if (que->res == NULL) {
   2647 			pci_intr_release(pc, adapter->osdep.intrs,
   2648 			    adapter->osdep.nintrs);
   2649 			aprint_error_dev(dev,
   2650 			    "Failed to register QUE handler\n");
   2651 			kcpuset_destroy(affinity);
   2652 			return (ENXIO);
   2653 		}
   2654 		que->msix = vector;
   2655         	adapter->active_queues |= (u64)(1 << que->msix);
   2656 
   2657 		cpu_id = i;
   2658 		/* Round-robin affinity */
   2659 		kcpuset_zero(affinity);
   2660 		kcpuset_set(affinity, cpu_id % ncpu);
   2661 		error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
   2662 		    NULL);
   2663 		aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
   2664 		    intrstr);
   2665 		if (error == 0)
   2666 			aprint_normal(", bound queue %d to cpu %d\n",
   2667 			    i, cpu_id % ncpu);
   2668 		else
   2669 			aprint_normal("\n");
   2670 
   2671 #ifndef IXGBE_LEGACY_TX
   2672 		txr->txr_si
   2673 		    = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   2674 			ixgbe_deferred_mq_start, txr);
   2675 #endif
   2676 		que->que_si
   2677 		    = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   2678 			ixv_handle_que, que);
   2679 		if (que->que_si == NULL) {
   2680 			aprint_error_dev(dev,
   2681 			    "could not establish software interrupt\n");
   2682 		}
   2683 	}
   2684 
   2685 	/* and Mailbox */
   2686 	cpu_id++;
   2687 	snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
   2688 	intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
   2689 	    sizeof(intrbuf));
   2690 #ifdef IXGBE_MPSAFE
   2691 	pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE,
   2692 	    true);
   2693 #endif
   2694 	/* Set the mbx handler function */
   2695 	adapter->osdep.ihs[vector] = pci_intr_establish_xname(pc,
   2696 	    adapter->osdep.intrs[vector], IPL_NET, ixv_msix_mbx, adapter,
   2697 	    intr_xname);
   2698 	if (adapter->osdep.ihs[vector] == NULL) {
   2699 		adapter->res = NULL;
   2700 		aprint_error_dev(dev, "Failed to register LINK handler\n");
   2701 		kcpuset_destroy(affinity);
   2702 		return (ENXIO);
   2703 	}
   2704 	/* Round-robin affinity */
   2705 	kcpuset_zero(affinity);
   2706 	kcpuset_set(affinity, cpu_id % ncpu);
   2707 	error = interrupt_distribute(adapter->osdep.ihs[vector], affinity,NULL);
   2708 
   2709 	aprint_normal_dev(dev,
   2710 	    "for link, interrupting at %s", intrstr);
   2711 	if (error == 0)
   2712 		aprint_normal(", affinity to cpu %d\n", cpu_id % ncpu);
   2713 	else
   2714 		aprint_normal("\n");
   2715 
   2716 	adapter->vector = vector;
   2717 	/* Tasklets for Mailbox */
   2718 	adapter->link_si = softint_establish(SOFTINT_NET |IXGBE_SOFTINFT_FLAGS,
   2719 	    ixv_handle_link, adapter);
   2720 	/*
   2721 	 * Due to a broken design QEMU will fail to properly
   2722 	 * enable the guest for MSI-X unless the vectors in
   2723 	 * the table are all set up, so we must rewrite the
   2724 	 * ENABLE in the MSI-X control register again at this
   2725 	 * point to cause it to successfully initialize us.
   2726 	 */
   2727 	if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
   2728 		pci_get_capability(pc, tag, PCI_CAP_MSIX, &rid, NULL);
   2729 		rid += PCI_MSIX_CTL;
   2730 		msix_ctrl = pci_conf_read(pc, tag, rid);
   2731 		msix_ctrl |= PCI_MSIX_CTL_ENABLE;
   2732 		pci_conf_write(pc, tag, rid, msix_ctrl);
   2733 	}
   2734 
   2735 	kcpuset_destroy(affinity);
   2736 	return (0);
   2737 } /* ixv_allocate_msix */
   2738 
   2739 /************************************************************************
   2740  * ixv_configure_interrupts - Setup MSI-X resources
   2741  *
   2742  *   Note: The VF device MUST use MSI-X, there is no fallback.
   2743  ************************************************************************/
   2744 static int
   2745 ixv_configure_interrupts(struct adapter *adapter)
   2746 {
   2747 	device_t dev = adapter->dev;
   2748 	int want, queues, msgs;
   2749 
   2750 	/* Must have at least 2 MSI-X vectors */
   2751 	msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag);
   2752 	if (msgs < 2) {
   2753 		aprint_error_dev(dev, "MSIX config error\n");
   2754 		return (ENXIO);
   2755 	}
   2756 	msgs = MIN(msgs, IXG_MAX_NINTR);
   2757 
   2758 	/* Figure out a reasonable auto config value */
   2759 	queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu;
   2760 
   2761 	if (ixv_num_queues != 0)
   2762 		queues = ixv_num_queues;
   2763 	else if ((ixv_num_queues == 0) && (queues > IXGBE_VF_MAX_TX_QUEUES))
   2764 		queues = IXGBE_VF_MAX_TX_QUEUES;
   2765 
   2766 	/*
   2767 	 * Want vectors for the queues,
   2768 	 * plus an additional for mailbox.
   2769 	 */
   2770 	want = queues + 1;
   2771 	if (msgs >= want)
   2772 		msgs = want;
   2773 	else {
   2774                	aprint_error_dev(dev,
   2775 		    "MSI-X Configuration Problem, "
   2776 		    "%d vectors but %d queues wanted!\n",
   2777 		    msgs, want);
   2778 		return -1;
   2779 	}
   2780 
   2781 	adapter->msix_mem = (void *)1; /* XXX */
   2782 	aprint_normal_dev(dev,
   2783 	    "Using MSI-X interrupts with %d vectors\n", msgs);
   2784 	adapter->num_queues = queues;
   2785 
   2786 	return (0);
   2787 } /* ixv_configure_interrupts */
   2788 
   2789 
   2790 /************************************************************************
   2791  * ixv_handle_link - Tasklet handler for MSI-X MBX interrupts
   2792  *
   2793  *   Done outside of interrupt context since the driver might sleep
   2794  ************************************************************************/
   2795 static void
   2796 ixv_handle_link(void *context)
   2797 {
   2798 	struct adapter *adapter = context;
   2799 
   2800 	adapter->hw.mac.ops.check_link(&adapter->hw, &adapter->link_speed,
   2801 	    &adapter->link_up, FALSE);
   2802 	ixv_update_link_status(adapter);
   2803 } /* ixv_handle_link */
   2804 
   2805 /************************************************************************
   2806  * ixv_check_link - Used in the local timer to poll for link changes
   2807  ************************************************************************/
   2808 static void
   2809 ixv_check_link(struct adapter *adapter)
   2810 {
   2811 	adapter->hw.mac.get_link_status = TRUE;
   2812 
   2813 	adapter->hw.mac.ops.check_link(&adapter->hw, &adapter->link_speed,
   2814 	    &adapter->link_up, FALSE);
   2815 	ixv_update_link_status(adapter);
   2816 } /* ixv_check_link */
   2817