Home | History | Annotate | Line # | Download | only in ixgbe
ixv.c revision 1.72
      1 /*$NetBSD: ixv.c,v 1.72 2017/10/18 10:43:32 msaitoh Exp $*/
      2 
      3 /******************************************************************************
      4 
      5   Copyright (c) 2001-2017, Intel Corporation
      6   All rights reserved.
      7 
      8   Redistribution and use in source and binary forms, with or without
      9   modification, are permitted provided that the following conditions are met:
     10 
     11    1. Redistributions of source code must retain the above copyright notice,
     12       this list of conditions and the following disclaimer.
     13 
     14    2. Redistributions in binary form must reproduce the above copyright
     15       notice, this list of conditions and the following disclaimer in the
     16       documentation and/or other materials provided with the distribution.
     17 
     18    3. Neither the name of the Intel Corporation nor the names of its
     19       contributors may be used to endorse or promote products derived from
     20       this software without specific prior written permission.
     21 
     22   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     23   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     24   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     25   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     26   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     27   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     28   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     29   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     30   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     31   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     32   POSSIBILITY OF SUCH DAMAGE.
     33 
     34 ******************************************************************************/
     35 /*$FreeBSD: head/sys/dev/ixgbe/if_ixv.c 320688 2017-07-05 17:27:03Z erj $*/
     36 
     37 
     38 #ifdef _KERNEL_OPT
     39 #include "opt_inet.h"
     40 #include "opt_inet6.h"
     41 #include "opt_net_mpsafe.h"
     42 #endif
     43 
     44 #include "ixgbe.h"
     45 #include "vlan.h"
     46 
     47 /************************************************************************
     48  * Driver version
     49  ************************************************************************/
     50 char ixv_driver_version[] = "1.5.13-k";
     51 
     52 /************************************************************************
     53  * PCI Device ID Table
     54  *
     55  *   Used by probe to select devices to load on
     56  *   Last field stores an index into ixv_strings
     57  *   Last entry must be all 0s
     58  *
     59  *   { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
     60  ************************************************************************/
     61 static ixgbe_vendor_info_t ixv_vendor_info_array[] =
     62 {
     63 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
     64 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0},
     65 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, 0, 0, 0},
     66 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, 0, 0, 0},
     67 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF, 0, 0, 0},
     68 	/* required last entry */
     69 	{0, 0, 0, 0, 0}
     70 };
     71 
     72 /************************************************************************
     73  * Table of branding strings
     74  ************************************************************************/
     75 static const char *ixv_strings[] = {
     76 	"Intel(R) PRO/10GbE Virtual Function Network Driver"
     77 };
     78 
     79 /*********************************************************************
     80  *  Function prototypes
     81  *********************************************************************/
     82 static int      ixv_probe(device_t, cfdata_t, void *);
     83 static void	ixv_attach(device_t, device_t, void *);
     84 static int      ixv_detach(device_t, int);
     85 #if 0
     86 static int      ixv_shutdown(device_t);
     87 #endif
     88 static int	ixv_ifflags_cb(struct ethercom *);
     89 static int      ixv_ioctl(struct ifnet *, u_long, void *);
     90 static int	ixv_init(struct ifnet *);
     91 static void	ixv_init_locked(struct adapter *);
     92 static void	ixv_ifstop(struct ifnet *, int);
     93 static void     ixv_stop(void *);
     94 static void     ixv_init_device_features(struct adapter *);
     95 static void     ixv_media_status(struct ifnet *, struct ifmediareq *);
     96 static int      ixv_media_change(struct ifnet *);
     97 static int      ixv_allocate_pci_resources(struct adapter *,
     98 		    const struct pci_attach_args *);
     99 static int      ixv_allocate_msix(struct adapter *,
    100 		    const struct pci_attach_args *);
    101 static int      ixv_configure_interrupts(struct adapter *);
    102 static void	ixv_free_pci_resources(struct adapter *);
    103 static void     ixv_local_timer(void *);
    104 static void     ixv_local_timer_locked(void *);
    105 static void     ixv_setup_interface(device_t, struct adapter *);
    106 static int      ixv_negotiate_api(struct adapter *);
    107 
    108 static void     ixv_initialize_transmit_units(struct adapter *);
    109 static void     ixv_initialize_receive_units(struct adapter *);
    110 static void     ixv_initialize_rss_mapping(struct adapter *);
    111 static void     ixv_check_link(struct adapter *);
    112 
    113 static void     ixv_enable_intr(struct adapter *);
    114 static void     ixv_disable_intr(struct adapter *);
    115 static void     ixv_set_multi(struct adapter *);
    116 static void     ixv_update_link_status(struct adapter *);
    117 static int	ixv_sysctl_debug(SYSCTLFN_PROTO);
    118 static void	ixv_set_ivar(struct adapter *, u8, u8, s8);
    119 static void	ixv_configure_ivars(struct adapter *);
    120 static u8 *	ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
    121 
    122 static void	ixv_setup_vlan_support(struct adapter *);
    123 #if 0
    124 static void	ixv_register_vlan(void *, struct ifnet *, u16);
    125 static void	ixv_unregister_vlan(void *, struct ifnet *, u16);
    126 #endif
    127 
    128 static void	ixv_add_device_sysctls(struct adapter *);
    129 static void	ixv_save_stats(struct adapter *);
    130 static void	ixv_init_stats(struct adapter *);
    131 static void	ixv_update_stats(struct adapter *);
    132 static void	ixv_add_stats_sysctls(struct adapter *);
    133 static void	ixv_set_sysctl_value(struct adapter *, const char *,
    134 		    const char *, int *, int);
    135 
    136 /* The MSI-X Interrupt handlers */
    137 static int	ixv_msix_que(void *);
    138 static int	ixv_msix_mbx(void *);
    139 
    140 /* Deferred interrupt tasklets */
    141 static void	ixv_handle_que(void *);
    142 static void     ixv_handle_link(void *);
    143 
    144 const struct sysctlnode *ixv_sysctl_instance(struct adapter *);
    145 static ixgbe_vendor_info_t *ixv_lookup(const struct pci_attach_args *);
    146 
    147 /************************************************************************
    148  * FreeBSD Device Interface Entry Points
    149  ************************************************************************/
    150 CFATTACH_DECL3_NEW(ixv, sizeof(struct adapter),
    151     ixv_probe, ixv_attach, ixv_detach, NULL, NULL, NULL,
    152     DVF_DETACH_SHUTDOWN);
    153 
    154 #if 0
    155 static driver_t ixv_driver = {
    156 	"ixv", ixv_methods, sizeof(struct adapter),
    157 };
    158 
    159 devclass_t ixv_devclass;
    160 DRIVER_MODULE(ixv, pci, ixv_driver, ixv_devclass, 0, 0);
    161 MODULE_DEPEND(ixv, pci, 1, 1, 1);
    162 MODULE_DEPEND(ixv, ether, 1, 1, 1);
    163 #endif
    164 
    165 /*
    166  * TUNEABLE PARAMETERS:
    167  */
    168 
    169 /* Number of Queues - do not exceed MSI-X vectors - 1 */
    170 static int ixv_num_queues = 0;
    171 #define	TUNABLE_INT(__x, __y)
    172 TUNABLE_INT("hw.ixv.num_queues", &ixv_num_queues);
    173 
    174 /*
    175  * AIM: Adaptive Interrupt Moderation
    176  * which means that the interrupt rate
    177  * is varied over time based on the
    178  * traffic for that interrupt vector
    179  */
    180 static bool ixv_enable_aim = false;
    181 TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
    182 
    183 /* How many packets rxeof tries to clean at a time */
    184 static int ixv_rx_process_limit = 256;
    185 TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit);
    186 
    187 /* How many packets txeof tries to clean at a time */
    188 static int ixv_tx_process_limit = 256;
    189 TUNABLE_INT("hw.ixv.tx_process_limit", &ixv_tx_process_limit);
    190 
    191 /*
    192  * Number of TX descriptors per ring,
    193  * setting higher than RX as this seems
    194  * the better performing choice.
    195  */
    196 static int ixv_txd = PERFORM_TXD;
    197 TUNABLE_INT("hw.ixv.txd", &ixv_txd);
    198 
    199 /* Number of RX descriptors per ring */
    200 static int ixv_rxd = PERFORM_RXD;
    201 TUNABLE_INT("hw.ixv.rxd", &ixv_rxd);
    202 
    203 /* Legacy Transmit (single queue) */
    204 static int ixv_enable_legacy_tx = 0;
    205 TUNABLE_INT("hw.ixv.enable_legacy_tx", &ixv_enable_legacy_tx);
    206 
    207 #ifdef NET_MPSAFE
    208 #define IXGBE_MPSAFE		1
    209 #define IXGBE_CALLOUT_FLAGS	CALLOUT_MPSAFE
    210 #define IXGBE_SOFTINFT_FLAGS	SOFTINT_MPSAFE
    211 #else
    212 #define IXGBE_CALLOUT_FLAGS	0
    213 #define IXGBE_SOFTINFT_FLAGS	0
    214 #endif
    215 
    216 #if 0
    217 static int (*ixv_start_locked)(struct ifnet *, struct tx_ring *);
    218 static int (*ixv_ring_empty)(struct ifnet *, struct buf_ring *);
    219 #endif
    220 
    221 /************************************************************************
    222  * ixv_probe - Device identification routine
    223  *
    224  *   Determines if the driver should be loaded on
    225  *   adapter based on its PCI vendor/device ID.
    226  *
    227  *   return BUS_PROBE_DEFAULT on success, positive on failure
    228  ************************************************************************/
    229 static int
    230 ixv_probe(device_t dev, cfdata_t cf, void *aux)
    231 {
    232 #ifdef __HAVE_PCI_MSI_MSIX
    233 	const struct pci_attach_args *pa = aux;
    234 
    235 	return (ixv_lookup(pa) != NULL) ? 1 : 0;
    236 #else
    237 	return 0;
    238 #endif
    239 } /* ixv_probe */
    240 
    241 static ixgbe_vendor_info_t *
    242 ixv_lookup(const struct pci_attach_args *pa)
    243 {
    244 	ixgbe_vendor_info_t *ent;
    245 	pcireg_t subid;
    246 
    247 	INIT_DEBUGOUT("ixv_lookup: begin");
    248 
    249 	if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
    250 		return NULL;
    251 
    252 	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
    253 
    254 	for (ent = ixv_vendor_info_array; ent->vendor_id != 0; ent++) {
    255 		if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
    256 		    (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
    257 		    ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
    258 		     (ent->subvendor_id == 0)) &&
    259 		    ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
    260 		     (ent->subdevice_id == 0))) {
    261 			return ent;
    262 		}
    263 	}
    264 
    265 	return NULL;
    266 }
    267 
    268 /************************************************************************
    269  * ixv_attach - Device initialization routine
    270  *
    271  *   Called when the driver is being loaded.
    272  *   Identifies the type of hardware, allocates all resources
    273  *   and initializes the hardware.
    274  *
    275  *   return 0 on success, positive on failure
    276  ************************************************************************/
    277 static void
    278 ixv_attach(device_t parent, device_t dev, void *aux)
    279 {
    280 	struct adapter *adapter;
    281 	struct ixgbe_hw *hw;
    282 	int             error = 0;
    283 	pcireg_t	id, subid;
    284 	ixgbe_vendor_info_t *ent;
    285 	const struct pci_attach_args *pa = aux;
    286 	const char *apivstr;
    287 	const char *str;
    288 	char buf[256];
    289 
    290 	INIT_DEBUGOUT("ixv_attach: begin");
    291 
    292 	/*
    293 	 * Make sure BUSMASTER is set, on a VM under
    294 	 * KVM it may not be and will break things.
    295 	 */
    296 	ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag);
    297 
    298 	/* Allocate, clear, and link in our adapter structure */
    299 	adapter = device_private(dev);
    300 	adapter->dev = dev;
    301 	adapter->hw.back = adapter;
    302 	hw = &adapter->hw;
    303 
    304 	adapter->init_locked = ixv_init_locked;
    305 	adapter->stop_locked = ixv_stop;
    306 
    307 	adapter->osdep.pc = pa->pa_pc;
    308 	adapter->osdep.tag = pa->pa_tag;
    309 	if (pci_dma64_available(pa))
    310 		adapter->osdep.dmat = pa->pa_dmat64;
    311 	else
    312 		adapter->osdep.dmat = pa->pa_dmat;
    313 	adapter->osdep.attached = false;
    314 
    315 	ent = ixv_lookup(pa);
    316 
    317 	KASSERT(ent != NULL);
    318 
    319 	aprint_normal(": %s, Version - %s\n",
    320 	    ixv_strings[ent->index], ixv_driver_version);
    321 
    322 	/* Core Lock Init*/
    323 	IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
    324 
    325 	/* Do base PCI setup - map BAR0 */
    326 	if (ixv_allocate_pci_resources(adapter, pa)) {
    327 		aprint_error_dev(dev, "ixv_allocate_pci_resources() failed!\n");
    328 		error = ENXIO;
    329 		goto err_out;
    330 	}
    331 
    332 	/* SYSCTL APIs */
    333 	ixv_add_device_sysctls(adapter);
    334 
    335 	/* Set up the timer callout */
    336 	callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS);
    337 
    338 	/* Save off the information about this board */
    339 	id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG);
    340 	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
    341 	hw->vendor_id = PCI_VENDOR(id);
    342 	hw->device_id = PCI_PRODUCT(id);
    343 	hw->revision_id =
    344 	    PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG));
    345 	hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
    346 	hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
    347 
    348 	/* A subset of set_mac_type */
    349 	switch (hw->device_id) {
    350 	case IXGBE_DEV_ID_82599_VF:
    351 		hw->mac.type = ixgbe_mac_82599_vf;
    352 		str = "82599 VF";
    353 		break;
    354 	case IXGBE_DEV_ID_X540_VF:
    355 		hw->mac.type = ixgbe_mac_X540_vf;
    356 		str = "X540 VF";
    357 		break;
    358 	case IXGBE_DEV_ID_X550_VF:
    359 		hw->mac.type = ixgbe_mac_X550_vf;
    360 		str = "X550 VF";
    361 		break;
    362 	case IXGBE_DEV_ID_X550EM_X_VF:
    363 		hw->mac.type = ixgbe_mac_X550EM_x_vf;
    364 		str = "X550EM X VF";
    365 		break;
    366 	case IXGBE_DEV_ID_X550EM_A_VF:
    367 		hw->mac.type = ixgbe_mac_X550EM_a_vf;
    368 		str = "X550EM A VF";
    369 		break;
    370 	default:
    371 		/* Shouldn't get here since probe succeeded */
    372 		aprint_error_dev(dev, "Unknown device ID!\n");
    373 		error = ENXIO;
    374 		goto err_out;
    375 		break;
    376 	}
    377 	aprint_normal_dev(dev, "device %s\n", str);
    378 
    379 	ixv_init_device_features(adapter);
    380 
    381 	/* Initialize the shared code */
    382 	error = ixgbe_init_ops_vf(hw);
    383 	if (error) {
    384 		aprint_error_dev(dev, "ixgbe_init_ops_vf() failed!\n");
    385 		error = EIO;
    386 		goto err_out;
    387 	}
    388 
    389 	/* Setup the mailbox */
    390 	ixgbe_init_mbx_params_vf(hw);
    391 
    392 	/* Set the right number of segments */
    393 	adapter->num_segs = IXGBE_82599_SCATTER;
    394 
    395 	/* Reset mbox api to 1.0 */
    396 	error = hw->mac.ops.reset_hw(hw);
    397 	if (error == IXGBE_ERR_RESET_FAILED)
    398 		aprint_error_dev(dev, "...reset_hw() failure: Reset Failed!\n");
    399 	else if (error)
    400 		aprint_error_dev(dev, "...reset_hw() failed with error %d\n",
    401 		    error);
    402 	if (error) {
    403 		error = EIO;
    404 		goto err_out;
    405 	}
    406 
    407 	error = hw->mac.ops.init_hw(hw);
    408 	if (error) {
    409 		aprint_error_dev(dev, "...init_hw() failed!\n");
    410 		error = EIO;
    411 		goto err_out;
    412 	}
    413 
    414 	/* Negotiate mailbox API version */
    415 	error = ixv_negotiate_api(adapter);
    416 	if (error)
    417 		aprint_normal_dev(dev,
    418 		    "MBX API negotiation failed during attach!\n");
    419 	switch (hw->api_version) {
    420 	case ixgbe_mbox_api_10:
    421 		apivstr = "1.0";
    422 		break;
    423 	case ixgbe_mbox_api_20:
    424 		apivstr = "2.0";
    425 		break;
    426 	case ixgbe_mbox_api_11:
    427 		apivstr = "1.1";
    428 		break;
    429 	case ixgbe_mbox_api_12:
    430 		apivstr = "1.2";
    431 		break;
    432 	case ixgbe_mbox_api_13:
    433 		apivstr = "1.3";
    434 		break;
    435 	default:
    436 		apivstr = "unknown";
    437 		break;
    438 	}
    439 	aprint_normal_dev(dev, "Mailbox API %s\n", apivstr);
    440 
    441 	/* If no mac address was assigned, make a random one */
    442 	if (!ixv_check_ether_addr(hw->mac.addr)) {
    443 		u8 addr[ETHER_ADDR_LEN];
    444 		uint64_t rndval = cprng_strong64();
    445 
    446 		memcpy(addr, &rndval, sizeof(addr));
    447 		addr[0] &= 0xFE;
    448 		addr[0] |= 0x02;
    449 		bcopy(addr, hw->mac.addr, sizeof(addr));
    450 	}
    451 
    452 	/* Register for VLAN events */
    453 #if 0 /* XXX delete after write? */
    454 	adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
    455 	    ixv_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
    456 	adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
    457 	    ixv_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
    458 #endif
    459 
    460 	/* Sysctls for limiting the amount of work done in the taskqueues */
    461 	ixv_set_sysctl_value(adapter, "rx_processing_limit",
    462 	    "max number of rx packets to process",
    463 	    &adapter->rx_process_limit, ixv_rx_process_limit);
    464 
    465 	ixv_set_sysctl_value(adapter, "tx_processing_limit",
    466 	    "max number of tx packets to process",
    467 	    &adapter->tx_process_limit, ixv_tx_process_limit);
    468 
    469 	/* Do descriptor calc and sanity checks */
    470 	if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
    471 	    ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) {
    472 		aprint_error_dev(dev, "TXD config issue, using default!\n");
    473 		adapter->num_tx_desc = DEFAULT_TXD;
    474 	} else
    475 		adapter->num_tx_desc = ixv_txd;
    476 
    477 	if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
    478 	    ixv_rxd < MIN_RXD || ixv_rxd > MAX_RXD) {
    479 		aprint_error_dev(dev, "RXD config issue, using default!\n");
    480 		adapter->num_rx_desc = DEFAULT_RXD;
    481 	} else
    482 		adapter->num_rx_desc = ixv_rxd;
    483 
    484 	/* Setup MSI-X */
    485 	error = ixv_configure_interrupts(adapter);
    486 	if (error)
    487 		goto err_out;
    488 
    489 	/* Allocate our TX/RX Queues */
    490 	if (ixgbe_allocate_queues(adapter)) {
    491 		aprint_error_dev(dev, "ixgbe_allocate_queues() failed!\n");
    492 		error = ENOMEM;
    493 		goto err_out;
    494 	}
    495 
    496 	/* hw.ix defaults init */
    497 	adapter->enable_aim = ixv_enable_aim;
    498 
    499 	/* Setup OS specific network interface */
    500 	ixv_setup_interface(dev, adapter);
    501 
    502 	error = ixv_allocate_msix(adapter, pa);
    503 	if (error) {
    504 		device_printf(dev, "ixv_allocate_msix() failed!\n");
    505 		goto err_late;
    506 	}
    507 
    508 	/* Do the stats setup */
    509 	ixv_save_stats(adapter);
    510 	ixv_init_stats(adapter);
    511 	ixv_add_stats_sysctls(adapter);
    512 
    513 	if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
    514 		ixgbe_netmap_attach(adapter);
    515 
    516 	snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_cap);
    517 	aprint_verbose_dev(dev, "feature cap %s\n", buf);
    518 	snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_en);
    519 	aprint_verbose_dev(dev, "feature ena %s\n", buf);
    520 
    521 	INIT_DEBUGOUT("ixv_attach: end");
    522 	adapter->osdep.attached = true;
    523 
    524 	return;
    525 
    526 err_late:
    527 	ixgbe_free_transmit_structures(adapter);
    528 	ixgbe_free_receive_structures(adapter);
    529 	free(adapter->queues, M_DEVBUF);
    530 err_out:
    531 	ixv_free_pci_resources(adapter);
    532 	IXGBE_CORE_LOCK_DESTROY(adapter);
    533 
    534 	return;
    535 } /* ixv_attach */
    536 
    537 /************************************************************************
    538  * ixv_detach - Device removal routine
    539  *
    540  *   Called when the driver is being removed.
    541  *   Stops the adapter and deallocates all the resources
    542  *   that were allocated for driver operation.
    543  *
    544  *   return 0 on success, positive on failure
    545  ************************************************************************/
    546 static int
    547 ixv_detach(device_t dev, int flags)
    548 {
    549 	struct adapter  *adapter = device_private(dev);
    550 	struct ixgbe_hw *hw = &adapter->hw;
    551 	struct ix_queue *que = adapter->queues;
    552 	struct tx_ring *txr = adapter->tx_rings;
    553 	struct rx_ring *rxr = adapter->rx_rings;
    554 	struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
    555 
    556 	INIT_DEBUGOUT("ixv_detach: begin");
    557 	if (adapter->osdep.attached == false)
    558 		return 0;
    559 
    560 	/* Stop the interface. Callouts are stopped in it. */
    561 	ixv_ifstop(adapter->ifp, 1);
    562 
    563 #if NVLAN > 0
    564 	/* Make sure VLANs are not using driver */
    565 	if (!VLAN_ATTACHED(&adapter->osdep.ec))
    566 		;	/* nothing to do: no VLANs */
    567 	else if ((flags & (DETACH_SHUTDOWN|DETACH_FORCE)) != 0)
    568 		vlan_ifdetach(adapter->ifp);
    569 	else {
    570 		aprint_error_dev(dev, "VLANs in use, detach first\n");
    571 		return EBUSY;
    572 	}
    573 #endif
    574 
    575 	IXGBE_CORE_LOCK(adapter);
    576 	ixv_stop(adapter);
    577 	IXGBE_CORE_UNLOCK(adapter);
    578 
    579 	for (int i = 0; i < adapter->num_queues; i++, que++, txr++) {
    580 		if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
    581 			softint_disestablish(txr->txr_si);
    582 		softint_disestablish(que->que_si);
    583 	}
    584 
    585 	/* Drain the Mailbox(link) queue */
    586 	softint_disestablish(adapter->link_si);
    587 
    588 	/* Unregister VLAN events */
    589 #if 0 /* XXX msaitoh delete after write? */
    590 	if (adapter->vlan_attach != NULL)
    591 		EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
    592 	if (adapter->vlan_detach != NULL)
    593 		EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
    594 #endif
    595 
    596 	ether_ifdetach(adapter->ifp);
    597 	callout_halt(&adapter->timer, NULL);
    598 
    599 	if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
    600 		netmap_detach(adapter->ifp);
    601 
    602 	ixv_free_pci_resources(adapter);
    603 #if 0 /* XXX the NetBSD port is probably missing something here */
    604 	bus_generic_detach(dev);
    605 #endif
    606 	if_detach(adapter->ifp);
    607 	if_percpuq_destroy(adapter->ipq);
    608 
    609 	sysctl_teardown(&adapter->sysctllog);
    610 	evcnt_detach(&adapter->handleq);
    611 	evcnt_detach(&adapter->req);
    612 	evcnt_detach(&adapter->efbig_tx_dma_setup);
    613 	evcnt_detach(&adapter->mbuf_defrag_failed);
    614 	evcnt_detach(&adapter->efbig2_tx_dma_setup);
    615 	evcnt_detach(&adapter->einval_tx_dma_setup);
    616 	evcnt_detach(&adapter->other_tx_dma_setup);
    617 	evcnt_detach(&adapter->eagain_tx_dma_setup);
    618 	evcnt_detach(&adapter->enomem_tx_dma_setup);
    619 	evcnt_detach(&adapter->watchdog_events);
    620 	evcnt_detach(&adapter->tso_err);
    621 	evcnt_detach(&adapter->link_irq);
    622 
    623 	txr = adapter->tx_rings;
    624 	for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
    625 		evcnt_detach(&adapter->queues[i].irqs);
    626 		evcnt_detach(&txr->no_desc_avail);
    627 		evcnt_detach(&txr->total_packets);
    628 		evcnt_detach(&txr->tso_tx);
    629 #ifndef IXGBE_LEGACY_TX
    630 		evcnt_detach(&txr->pcq_drops);
    631 #endif
    632 
    633 		evcnt_detach(&rxr->rx_packets);
    634 		evcnt_detach(&rxr->rx_bytes);
    635 		evcnt_detach(&rxr->rx_copies);
    636 		evcnt_detach(&rxr->no_jmbuf);
    637 		evcnt_detach(&rxr->rx_discarded);
    638 	}
    639 	evcnt_detach(&stats->ipcs);
    640 	evcnt_detach(&stats->l4cs);
    641 	evcnt_detach(&stats->ipcs_bad);
    642 	evcnt_detach(&stats->l4cs_bad);
    643 
    644 	/* Packet Reception Stats */
    645 	evcnt_detach(&stats->vfgorc);
    646 	evcnt_detach(&stats->vfgprc);
    647 	evcnt_detach(&stats->vfmprc);
    648 
    649 	/* Packet Transmission Stats */
    650 	evcnt_detach(&stats->vfgotc);
    651 	evcnt_detach(&stats->vfgptc);
    652 
    653 	/* Mailbox Stats */
    654 	evcnt_detach(&hw->mbx.stats.msgs_tx);
    655 	evcnt_detach(&hw->mbx.stats.msgs_rx);
    656 	evcnt_detach(&hw->mbx.stats.acks);
    657 	evcnt_detach(&hw->mbx.stats.reqs);
    658 	evcnt_detach(&hw->mbx.stats.rsts);
    659 
    660 	ixgbe_free_transmit_structures(adapter);
    661 	ixgbe_free_receive_structures(adapter);
    662 	free(adapter->queues, M_DEVBUF);
    663 
    664 	IXGBE_CORE_LOCK_DESTROY(adapter);
    665 
    666 	return (0);
    667 } /* ixv_detach */
    668 
    669 /************************************************************************
    670  * ixv_init_locked - Init entry point
    671  *
    672  *   Used in two ways: It is used by the stack as an init entry
    673  *   point in network interface structure. It is also used
    674  *   by the driver as a hw/sw initialization routine to get
    675  *   to a consistent state.
    676  *
    677  *   return 0 on success, positive on failure
    678  ************************************************************************/
    679 static void
    680 ixv_init_locked(struct adapter *adapter)
    681 {
    682 	struct ifnet	*ifp = adapter->ifp;
    683 	device_t 	dev = adapter->dev;
    684 	struct ixgbe_hw *hw = &adapter->hw;
    685 	struct ix_queue	*que = adapter->queues;
    686 	int             error = 0;
    687 	uint32_t mask;
    688 	int i;
    689 
    690 	INIT_DEBUGOUT("ixv_init_locked: begin");
    691 	KASSERT(mutex_owned(&adapter->core_mtx));
    692 	hw->adapter_stopped = FALSE;
    693 	hw->mac.ops.stop_adapter(hw);
    694 	callout_stop(&adapter->timer);
    695 
    696 	/* reprogram the RAR[0] in case user changed it. */
    697 	hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
    698 
    699 	/* Get the latest mac address, User can use a LAA */
    700 	memcpy(hw->mac.addr, CLLADDR(adapter->ifp->if_sadl),
    701 	     IXGBE_ETH_LENGTH_OF_ADDRESS);
    702 	hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, 1);
    703 
    704 	/* Prepare transmit descriptors and buffers */
    705 	if (ixgbe_setup_transmit_structures(adapter)) {
    706 		aprint_error_dev(dev, "Could not setup transmit structures\n");
    707 		ixv_stop(adapter);
    708 		return;
    709 	}
    710 
    711 	/* Reset VF and renegotiate mailbox API version */
    712 	hw->mac.ops.reset_hw(hw);
    713 	error = ixv_negotiate_api(adapter);
    714 	if (error)
    715 		device_printf(dev,
    716 		    "Mailbox API negotiation failed in init_locked!\n");
    717 
    718 	ixv_initialize_transmit_units(adapter);
    719 
    720 	/* Setup Multicast table */
    721 	ixv_set_multi(adapter);
    722 
    723 	/*
    724 	 * Determine the correct mbuf pool
    725 	 * for doing jumbo/headersplit
    726 	 */
    727 	if (ifp->if_mtu > ETHERMTU)
    728 		adapter->rx_mbuf_sz = MJUMPAGESIZE;
    729 	else
    730 		adapter->rx_mbuf_sz = MCLBYTES;
    731 
    732 	/* Prepare receive descriptors and buffers */
    733 	if (ixgbe_setup_receive_structures(adapter)) {
    734 		device_printf(dev, "Could not setup receive structures\n");
    735 		ixv_stop(adapter);
    736 		return;
    737 	}
    738 
    739 	/* Configure RX settings */
    740 	ixv_initialize_receive_units(adapter);
    741 
    742 #if 0 /* XXX isn't it required? -- msaitoh  */
    743 	/* Set the various hardware offload abilities */
    744 	ifp->if_hwassist = 0;
    745 	if (ifp->if_capenable & IFCAP_TSO4)
    746 		ifp->if_hwassist |= CSUM_TSO;
    747 	if (ifp->if_capenable & IFCAP_TXCSUM) {
    748 		ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
    749 #if __FreeBSD_version >= 800000
    750 		ifp->if_hwassist |= CSUM_SCTP;
    751 #endif
    752 	}
    753 #endif
    754 
    755 	/* Set up VLAN offload and filter */
    756 	ixv_setup_vlan_support(adapter);
    757 
    758 	/* Set up MSI-X routing */
    759 	ixv_configure_ivars(adapter);
    760 
    761 	/* Set up auto-mask */
    762 	mask = (1 << adapter->vector);
    763 	for (i = 0; i < adapter->num_queues; i++, que++)
    764 		mask |= (1 << que->msix);
    765 	IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, mask);
    766 
    767 	/* Set moderation on the Link interrupt */
    768 	IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->vector), IXGBE_LINK_ITR);
    769 
    770 	/* Stats init */
    771 	ixv_init_stats(adapter);
    772 
    773 	/* Config/Enable Link */
    774 	hw->mac.get_link_status = TRUE;
    775 	hw->mac.ops.check_link(hw, &adapter->link_speed, &adapter->link_up,
    776 	    FALSE);
    777 
    778 	/* Start watchdog */
    779 	callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
    780 
    781 	/* And now turn on interrupts */
    782 	ixv_enable_intr(adapter);
    783 
    784 	/* Now inform the stack we're ready */
    785 	ifp->if_flags |= IFF_RUNNING;
    786 	ifp->if_flags &= ~IFF_OACTIVE;
    787 
    788 	return;
    789 } /* ixv_init_locked */
    790 
    791 /*
    792  * MSI-X Interrupt Handlers and Tasklets
    793  */
    794 
    795 static inline void
    796 ixv_enable_queue(struct adapter *adapter, u32 vector)
    797 {
    798 	struct ixgbe_hw *hw = &adapter->hw;
    799 	u32             queue = 1 << vector;
    800 	u32             mask;
    801 
    802 	mask = (IXGBE_EIMS_RTX_QUEUE & queue);
    803 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
    804 } /* ixv_enable_queue */
    805 
    806 static inline void
    807 ixv_disable_queue(struct adapter *adapter, u32 vector)
    808 {
    809 	struct ixgbe_hw *hw = &adapter->hw;
    810 	u64             queue = (u64)(1 << vector);
    811 	u32             mask;
    812 
    813 	mask = (IXGBE_EIMS_RTX_QUEUE & queue);
    814 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
    815 } /* ixv_disable_queue */
    816 
    817 static inline void
    818 ixv_rearm_queues(struct adapter *adapter, u64 queues)
    819 {
    820 	u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
    821 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask);
    822 } /* ixv_rearm_queues */
    823 
    824 
    825 /************************************************************************
    826  * ixv_msix_que - MSI Queue Interrupt Service routine
    827  ************************************************************************/
    828 static int
    829 ixv_msix_que(void *arg)
    830 {
    831 	struct ix_queue	*que = arg;
    832 	struct adapter  *adapter = que->adapter;
    833 	struct tx_ring	*txr = que->txr;
    834 	struct rx_ring	*rxr = que->rxr;
    835 	bool		more;
    836 	u32		newitr = 0;
    837 
    838 	ixv_disable_queue(adapter, que->msix);
    839 	++que->irqs.ev_count;
    840 
    841 #ifdef __NetBSD__
    842 	/* Don't run ixgbe_rxeof in interrupt context */
    843 	more = true;
    844 #else
    845 	more = ixgbe_rxeof(que);
    846 #endif
    847 
    848 	IXGBE_TX_LOCK(txr);
    849 	ixgbe_txeof(txr);
    850 	IXGBE_TX_UNLOCK(txr);
    851 
    852 	/* Do AIM now? */
    853 
    854 	if (adapter->enable_aim == false)
    855 		goto no_calc;
    856 	/*
    857 	 * Do Adaptive Interrupt Moderation:
    858 	 *  - Write out last calculated setting
    859 	 *  - Calculate based on average size over
    860 	 *    the last interval.
    861 	 */
    862 	if (que->eitr_setting)
    863 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEITR(que->msix),
    864 		    que->eitr_setting);
    865 
    866 	que->eitr_setting = 0;
    867 
    868 	/* Idle, do nothing */
    869 	if ((txr->bytes == 0) && (rxr->bytes == 0))
    870 		goto no_calc;
    871 
    872 	if ((txr->bytes) && (txr->packets))
    873 		newitr = txr->bytes/txr->packets;
    874 	if ((rxr->bytes) && (rxr->packets))
    875 		newitr = max(newitr, (rxr->bytes / rxr->packets));
    876 	newitr += 24; /* account for hardware frame, crc */
    877 
    878 	/* set an upper boundary */
    879 	newitr = min(newitr, 3000);
    880 
    881 	/* Be nice to the mid range */
    882 	if ((newitr > 300) && (newitr < 1200))
    883 		newitr = (newitr / 3);
    884 	else
    885 		newitr = (newitr / 2);
    886 
    887 	newitr |= newitr << 16;
    888 
    889 	/* save for next interrupt */
    890 	que->eitr_setting = newitr;
    891 
    892 	/* Reset state */
    893 	txr->bytes = 0;
    894 	txr->packets = 0;
    895 	rxr->bytes = 0;
    896 	rxr->packets = 0;
    897 
    898 no_calc:
    899 	if (more)
    900 		softint_schedule(que->que_si);
    901 	else /* Re-enable this interrupt */
    902 		ixv_enable_queue(adapter, que->msix);
    903 
    904 	return 1;
    905 } /* ixv_msix_que */
    906 
    907 /************************************************************************
    908  * ixv_msix_mbx
    909  ************************************************************************/
    910 static int
    911 ixv_msix_mbx(void *arg)
    912 {
    913 	struct adapter	*adapter = arg;
    914 	struct ixgbe_hw *hw = &adapter->hw;
    915 
    916 	++adapter->link_irq.ev_count;
    917 	/* NetBSD: We use auto-clear, so it's not required to write VTEICR */
    918 
    919 	/* Link status change */
    920 	hw->mac.get_link_status = TRUE;
    921 	softint_schedule(adapter->link_si);
    922 
    923 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, (1 << adapter->vector));
    924 
    925 	return 1;
    926 } /* ixv_msix_mbx */
    927 
    928 /************************************************************************
    929  * ixv_media_status - Media Ioctl callback
    930  *
    931  *   Called whenever the user queries the status of
    932  *   the interface using ifconfig.
    933  ************************************************************************/
    934 static void
    935 ixv_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
    936 {
    937 	struct adapter *adapter = ifp->if_softc;
    938 
    939 	INIT_DEBUGOUT("ixv_media_status: begin");
    940 	IXGBE_CORE_LOCK(adapter);
    941 	ixv_update_link_status(adapter);
    942 
    943 	ifmr->ifm_status = IFM_AVALID;
    944 	ifmr->ifm_active = IFM_ETHER;
    945 
    946 	if (!adapter->link_active) {
    947 		ifmr->ifm_active |= IFM_NONE;
    948 		IXGBE_CORE_UNLOCK(adapter);
    949 		return;
    950 	}
    951 
    952 	ifmr->ifm_status |= IFM_ACTIVE;
    953 
    954 	switch (adapter->link_speed) {
    955 		case IXGBE_LINK_SPEED_10GB_FULL:
    956 			ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
    957 			break;
    958 		case IXGBE_LINK_SPEED_5GB_FULL:
    959 			ifmr->ifm_active |= IFM_5000_T | IFM_FDX;
    960 			break;
    961 		case IXGBE_LINK_SPEED_2_5GB_FULL:
    962 			ifmr->ifm_active |= IFM_2500_T | IFM_FDX;
    963 			break;
    964 		case IXGBE_LINK_SPEED_1GB_FULL:
    965 			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
    966 			break;
    967 		case IXGBE_LINK_SPEED_100_FULL:
    968 			ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
    969 			break;
    970 		case IXGBE_LINK_SPEED_10_FULL:
    971 			ifmr->ifm_active |= IFM_10_T | IFM_FDX;
    972 			break;
    973 	}
    974 
    975 	ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
    976 
    977 	IXGBE_CORE_UNLOCK(adapter);
    978 
    979 	return;
    980 } /* ixv_media_status */
    981 
    982 /************************************************************************
    983  * ixv_media_change - Media Ioctl callback
    984  *
    985  *   Called when the user changes speed/duplex using
    986  *   media/mediopt option with ifconfig.
    987  ************************************************************************/
    988 static int
    989 ixv_media_change(struct ifnet *ifp)
    990 {
    991 	struct adapter *adapter = ifp->if_softc;
    992 	struct ifmedia *ifm = &adapter->media;
    993 
    994 	INIT_DEBUGOUT("ixv_media_change: begin");
    995 
    996 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
    997 		return (EINVAL);
    998 
    999 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
   1000 	case IFM_AUTO:
   1001 		break;
   1002 	default:
   1003 		device_printf(adapter->dev, "Only auto media type\n");
   1004 		return (EINVAL);
   1005 	}
   1006 
   1007 	return (0);
   1008 } /* ixv_media_change */
   1009 
   1010 
   1011 /************************************************************************
   1012  * ixv_negotiate_api
   1013  *
   1014  *   Negotiate the Mailbox API with the PF;
   1015  *   start with the most featured API first.
   1016  ************************************************************************/
   1017 static int
   1018 ixv_negotiate_api(struct adapter *adapter)
   1019 {
   1020 	struct ixgbe_hw *hw = &adapter->hw;
   1021 	int             mbx_api[] = { ixgbe_mbox_api_11,
   1022 	                              ixgbe_mbox_api_10,
   1023 	                              ixgbe_mbox_api_unknown };
   1024 	int             i = 0;
   1025 
   1026 	while (mbx_api[i] != ixgbe_mbox_api_unknown) {
   1027 		if (ixgbevf_negotiate_api_version(hw, mbx_api[i]) == 0)
   1028 			return (0);
   1029 		i++;
   1030 	}
   1031 
   1032 	return (EINVAL);
   1033 } /* ixv_negotiate_api */
   1034 
   1035 
   1036 /************************************************************************
   1037  * ixv_set_multi - Multicast Update
   1038  *
   1039  *   Called whenever multicast address list is updated.
   1040  ************************************************************************/
   1041 static void
   1042 ixv_set_multi(struct adapter *adapter)
   1043 {
   1044 	struct ether_multi *enm;
   1045 	struct ether_multistep step;
   1046 	struct ethercom *ec = &adapter->osdep.ec;
   1047 	u8	mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
   1048 	u8                 *update_ptr;
   1049 	int                mcnt = 0;
   1050 
   1051 	KASSERT(mutex_owned(&adapter->core_mtx));
   1052 	IOCTL_DEBUGOUT("ixv_set_multi: begin");
   1053 
   1054 	ETHER_LOCK(ec);
   1055 	ETHER_FIRST_MULTI(step, ec, enm);
   1056 	while (enm != NULL) {
   1057 		bcopy(enm->enm_addrlo,
   1058 		    &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
   1059 		    IXGBE_ETH_LENGTH_OF_ADDRESS);
   1060 		mcnt++;
   1061 		/* XXX This might be required --msaitoh */
   1062 		if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES)
   1063 			break;
   1064 		ETHER_NEXT_MULTI(step, enm);
   1065 	}
   1066 	ETHER_UNLOCK(ec);
   1067 
   1068 	update_ptr = mta;
   1069 
   1070 	adapter->hw.mac.ops.update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
   1071 	    ixv_mc_array_itr, TRUE);
   1072 
   1073 	return;
   1074 } /* ixv_set_multi */
   1075 
   1076 /************************************************************************
   1077  * ixv_mc_array_itr
   1078  *
   1079  *   An iterator function needed by the multicast shared code.
   1080  *   It feeds the shared code routine the addresses in the
   1081  *   array of ixv_set_multi() one by one.
   1082  ************************************************************************/
   1083 static u8 *
   1084 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
   1085 {
   1086 	u8 *addr = *update_ptr;
   1087 	u8 *newptr;
   1088 	*vmdq = 0;
   1089 
   1090 	newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
   1091 	*update_ptr = newptr;
   1092 
   1093 	return addr;
   1094 } /* ixv_mc_array_itr */
   1095 
   1096 /************************************************************************
   1097  * ixv_local_timer - Timer routine
   1098  *
   1099  *   Checks for link status, updates statistics,
   1100  *   and runs the watchdog check.
   1101  ************************************************************************/
   1102 static void
   1103 ixv_local_timer(void *arg)
   1104 {
   1105 	struct adapter *adapter = arg;
   1106 
   1107 	IXGBE_CORE_LOCK(adapter);
   1108 	ixv_local_timer_locked(adapter);
   1109 	IXGBE_CORE_UNLOCK(adapter);
   1110 }
   1111 
   1112 static void
   1113 ixv_local_timer_locked(void *arg)
   1114 {
   1115 	struct adapter	*adapter = arg;
   1116 	device_t	dev = adapter->dev;
   1117 	struct ix_queue	*que = adapter->queues;
   1118 	u64		queues = 0;
   1119 	int		hung = 0;
   1120 
   1121 	KASSERT(mutex_owned(&adapter->core_mtx));
   1122 
   1123 	ixv_check_link(adapter);
   1124 
   1125 	/* Stats Update */
   1126 	ixv_update_stats(adapter);
   1127 
   1128 	/*
   1129 	 * Check the TX queues status
   1130 	 *      - mark hung queues so we don't schedule on them
   1131 	 *      - watchdog only if all queues show hung
   1132 	 */
   1133 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   1134 		/* Keep track of queues with work for soft irq */
   1135 		if (que->txr->busy)
   1136 			queues |= ((u64)1 << que->me);
   1137 		/*
   1138 		 * Each time txeof runs without cleaning, but there
   1139 		 * are uncleaned descriptors it increments busy. If
   1140 		 * we get to the MAX we declare it hung.
   1141 		 */
   1142 		if (que->busy == IXGBE_QUEUE_HUNG) {
   1143 			++hung;
   1144 			/* Mark the queue as inactive */
   1145 			adapter->active_queues &= ~((u64)1 << que->me);
   1146 			continue;
   1147 		} else {
   1148 			/* Check if we've come back from hung */
   1149 			if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
   1150 				adapter->active_queues |= ((u64)1 << que->me);
   1151 		}
   1152 		if (que->busy >= IXGBE_MAX_TX_BUSY) {
   1153 			device_printf(dev,
   1154 			    "Warning queue %d appears to be hung!\n", i);
   1155 			que->txr->busy = IXGBE_QUEUE_HUNG;
   1156 			++hung;
   1157 		}
   1158 	}
   1159 
   1160 	/* Only truly watchdog if all queues show hung */
   1161 	if (hung == adapter->num_queues)
   1162 		goto watchdog;
   1163 	else if (queues != 0) { /* Force an IRQ on queues with work */
   1164 		ixv_rearm_queues(adapter, queues);
   1165 	}
   1166 
   1167 	callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
   1168 
   1169 	return;
   1170 
   1171 watchdog:
   1172 
   1173 	device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
   1174 	adapter->ifp->if_flags &= ~IFF_RUNNING;
   1175 	adapter->watchdog_events.ev_count++;
   1176 	ixv_init_locked(adapter);
   1177 } /* ixv_local_timer */
   1178 
   1179 /************************************************************************
   1180  * ixv_update_link_status - Update OS on link state
   1181  *
   1182  * Note: Only updates the OS on the cached link state.
   1183  *       The real check of the hardware only happens with
   1184  *       a link interrupt.
   1185  ************************************************************************/
   1186 static void
   1187 ixv_update_link_status(struct adapter *adapter)
   1188 {
   1189 	struct ifnet *ifp = adapter->ifp;
   1190 	device_t     dev = adapter->dev;
   1191 
   1192 	if (adapter->link_up) {
   1193 		if (adapter->link_active == FALSE) {
   1194 			if (bootverbose) {
   1195 				const char *bpsmsg;
   1196 
   1197 				switch (adapter->link_speed) {
   1198 				case IXGBE_LINK_SPEED_10GB_FULL:
   1199 					bpsmsg = "10 Gbps";
   1200 					break;
   1201 				case IXGBE_LINK_SPEED_5GB_FULL:
   1202 					bpsmsg = "5 Gbps";
   1203 					break;
   1204 				case IXGBE_LINK_SPEED_2_5GB_FULL:
   1205 					bpsmsg = "2.5 Gbps";
   1206 					break;
   1207 				case IXGBE_LINK_SPEED_1GB_FULL:
   1208 					bpsmsg = "1 Gbps";
   1209 					break;
   1210 				case IXGBE_LINK_SPEED_100_FULL:
   1211 					bpsmsg = "100 Mbps";
   1212 					break;
   1213 				case IXGBE_LINK_SPEED_10_FULL:
   1214 					bpsmsg = "10 Mbps";
   1215 					break;
   1216 				default:
   1217 					bpsmsg = "unknown speed";
   1218 					break;
   1219 				}
   1220 				device_printf(dev, "Link is up %s %s \n",
   1221 				    bpsmsg, "Full Duplex");
   1222 			}
   1223 			adapter->link_active = TRUE;
   1224 			if_link_state_change(ifp, LINK_STATE_UP);
   1225 		}
   1226 	} else { /* Link down */
   1227 		if (adapter->link_active == TRUE) {
   1228 			if (bootverbose)
   1229 				device_printf(dev, "Link is Down\n");
   1230 			if_link_state_change(ifp, LINK_STATE_DOWN);
   1231 			adapter->link_active = FALSE;
   1232 		}
   1233 	}
   1234 
   1235 	return;
   1236 } /* ixv_update_link_status */
   1237 
   1238 
   1239 /************************************************************************
   1240  * ixv_stop - Stop the hardware
   1241  *
   1242  *   Disables all traffic on the adapter by issuing a
   1243  *   global reset on the MAC and deallocates TX/RX buffers.
   1244  ************************************************************************/
   1245 static void
   1246 ixv_ifstop(struct ifnet *ifp, int disable)
   1247 {
   1248 	struct adapter *adapter = ifp->if_softc;
   1249 
   1250 	IXGBE_CORE_LOCK(adapter);
   1251 	ixv_stop(adapter);
   1252 	IXGBE_CORE_UNLOCK(adapter);
   1253 }
   1254 
   1255 static void
   1256 ixv_stop(void *arg)
   1257 {
   1258 	struct ifnet    *ifp;
   1259 	struct adapter  *adapter = arg;
   1260 	struct ixgbe_hw *hw = &adapter->hw;
   1261 
   1262 	ifp = adapter->ifp;
   1263 
   1264 	KASSERT(mutex_owned(&adapter->core_mtx));
   1265 
   1266 	INIT_DEBUGOUT("ixv_stop: begin\n");
   1267 	ixv_disable_intr(adapter);
   1268 
   1269 	/* Tell the stack that the interface is no longer active */
   1270 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   1271 
   1272 	hw->mac.ops.reset_hw(hw);
   1273 	adapter->hw.adapter_stopped = FALSE;
   1274 	hw->mac.ops.stop_adapter(hw);
   1275 	callout_stop(&adapter->timer);
   1276 
   1277 	/* reprogram the RAR[0] in case user changed it. */
   1278 	hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
   1279 
   1280 	return;
   1281 } /* ixv_stop */
   1282 
   1283 
   1284 /************************************************************************
   1285  * ixv_allocate_pci_resources
   1286  ************************************************************************/
   1287 static int
   1288 ixv_allocate_pci_resources(struct adapter *adapter,
   1289     const struct pci_attach_args *pa)
   1290 {
   1291 	pcireg_t	memtype;
   1292 	device_t        dev = adapter->dev;
   1293 	bus_addr_t addr;
   1294 	int flags;
   1295 
   1296 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
   1297 	switch (memtype) {
   1298 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1299 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1300 		adapter->osdep.mem_bus_space_tag = pa->pa_memt;
   1301 		if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
   1302 	              memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
   1303 			goto map_err;
   1304 		if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
   1305 			aprint_normal_dev(dev, "clearing prefetchable bit\n");
   1306 			flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
   1307 		}
   1308 		if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
   1309 		     adapter->osdep.mem_size, flags,
   1310 		     &adapter->osdep.mem_bus_space_handle) != 0) {
   1311 map_err:
   1312 			adapter->osdep.mem_size = 0;
   1313 			aprint_error_dev(dev, "unable to map BAR0\n");
   1314 			return ENXIO;
   1315 		}
   1316 		break;
   1317 	default:
   1318 		aprint_error_dev(dev, "unexpected type on BAR0\n");
   1319 		return ENXIO;
   1320 	}
   1321 
   1322 	/* Pick up the tuneable queues */
   1323 	adapter->num_queues = ixv_num_queues;
   1324 
   1325 	return (0);
   1326 } /* ixv_allocate_pci_resources */
   1327 
   1328 /************************************************************************
   1329  * ixv_free_pci_resources
   1330  ************************************************************************/
   1331 static void
   1332 ixv_free_pci_resources(struct adapter * adapter)
   1333 {
   1334 	struct 		ix_queue *que = adapter->queues;
   1335 	int		rid;
   1336 
   1337 	/*
   1338 	 *  Release all msix queue resources:
   1339 	 */
   1340 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   1341 		if (que->res != NULL)
   1342 			pci_intr_disestablish(adapter->osdep.pc,
   1343 			    adapter->osdep.ihs[i]);
   1344 	}
   1345 
   1346 
   1347 	/* Clean the Mailbox interrupt last */
   1348 	rid = adapter->vector;
   1349 
   1350 	if (adapter->osdep.ihs[rid] != NULL) {
   1351 		pci_intr_disestablish(adapter->osdep.pc,
   1352 		    adapter->osdep.ihs[rid]);
   1353 		adapter->osdep.ihs[rid] = NULL;
   1354 	}
   1355 
   1356 	pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
   1357 	    adapter->osdep.nintrs);
   1358 
   1359 	if (adapter->osdep.mem_size != 0) {
   1360 		bus_space_unmap(adapter->osdep.mem_bus_space_tag,
   1361 		    adapter->osdep.mem_bus_space_handle,
   1362 		    adapter->osdep.mem_size);
   1363 	}
   1364 
   1365 	return;
   1366 } /* ixv_free_pci_resources */
   1367 
   1368 /************************************************************************
   1369  * ixv_setup_interface
   1370  *
   1371  *   Setup networking device structure and register an interface.
   1372  ************************************************************************/
   1373 static void
   1374 ixv_setup_interface(device_t dev, struct adapter *adapter)
   1375 {
   1376 	struct ethercom *ec = &adapter->osdep.ec;
   1377 	struct ifnet   *ifp;
   1378 
   1379 	INIT_DEBUGOUT("ixv_setup_interface: begin");
   1380 
   1381 	ifp = adapter->ifp = &ec->ec_if;
   1382 	strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
   1383 	ifp->if_baudrate = IF_Gbps(10);
   1384 	ifp->if_init = ixv_init;
   1385 	ifp->if_stop = ixv_ifstop;
   1386 	ifp->if_softc = adapter;
   1387 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   1388 #ifdef IXGBE_MPSAFE
   1389 	ifp->if_extflags = IFEF_START_MPSAFE;
   1390 #endif
   1391 	ifp->if_ioctl = ixv_ioctl;
   1392 	if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
   1393 #if 0
   1394 		ixv_start_locked = ixgbe_legacy_start_locked;
   1395 #endif
   1396 	} else {
   1397 		ifp->if_transmit = ixgbe_mq_start;
   1398 #if 0
   1399 		ixv_start_locked = ixgbe_mq_start_locked;
   1400 #endif
   1401 	}
   1402 	ifp->if_start = ixgbe_legacy_start;
   1403 	IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
   1404 	IFQ_SET_READY(&ifp->if_snd);
   1405 
   1406 	if_initialize(ifp);
   1407 	adapter->ipq = if_percpuq_create(&adapter->osdep.ec.ec_if);
   1408 	ether_ifattach(ifp, adapter->hw.mac.addr);
   1409 	/*
   1410 	 * We use per TX queue softint, so if_deferred_start_init() isn't
   1411 	 * used.
   1412 	 */
   1413 	if_register(ifp);
   1414 	ether_set_ifflags_cb(ec, ixv_ifflags_cb);
   1415 
   1416 	adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR;
   1417 
   1418 	/*
   1419 	 * Tell the upper layer(s) we support long frames.
   1420 	 */
   1421 	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
   1422 
   1423 	/* Set capability flags */
   1424 	ifp->if_capabilities |= IFCAP_HWCSUM
   1425 	                     |  IFCAP_TSOv4
   1426 	                     |  IFCAP_TSOv6;
   1427 	ifp->if_capenable = 0;
   1428 
   1429 	ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
   1430 			    |  ETHERCAP_VLAN_HWCSUM
   1431 			    |  ETHERCAP_JUMBO_MTU
   1432 			    |  ETHERCAP_VLAN_MTU;
   1433 
   1434 	/* Enable the above capabilities by default */
   1435 	ec->ec_capenable = ec->ec_capabilities;
   1436 
   1437 	/* Don't enable LRO by default */
   1438 	ifp->if_capabilities |= IFCAP_LRO;
   1439 #if 0
   1440 	ifp->if_capenable = ifp->if_capabilities;
   1441 #endif
   1442 
   1443 	/*
   1444 	 * Specify the media types supported by this adapter and register
   1445 	 * callbacks to update media and link information
   1446 	 */
   1447 	ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change,
   1448 	    ixv_media_status);
   1449 	ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
   1450 	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
   1451 
   1452 	return;
   1453 } /* ixv_setup_interface */
   1454 
   1455 
   1456 /************************************************************************
   1457  * ixv_initialize_transmit_units - Enable transmit unit.
   1458  ************************************************************************/
   1459 static void
   1460 ixv_initialize_transmit_units(struct adapter *adapter)
   1461 {
   1462 	struct tx_ring	*txr = adapter->tx_rings;
   1463 	struct ixgbe_hw	*hw = &adapter->hw;
   1464 
   1465 
   1466 	for (int i = 0; i < adapter->num_queues; i++, txr++) {
   1467 		u64 tdba = txr->txdma.dma_paddr;
   1468 		u32 txctrl, txdctl;
   1469 
   1470 		/* Set WTHRESH to 8, burst writeback */
   1471 		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
   1472 		txdctl |= (8 << 16);
   1473 		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
   1474 
   1475 		/* Set the HW Tx Head and Tail indices */
   1476 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(i), 0);
   1477 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(i), 0);
   1478 
   1479 		/* Set Tx Tail register */
   1480 		txr->tail = IXGBE_VFTDT(i);
   1481 
   1482 		/* Set Ring parameters */
   1483 		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
   1484 		    (tdba & 0x00000000ffffffffULL));
   1485 		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i), (tdba >> 32));
   1486 		IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
   1487 		    adapter->num_tx_desc * sizeof(struct ixgbe_legacy_tx_desc));
   1488 		txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(i));
   1489 		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
   1490 		IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), txctrl);
   1491 
   1492 		/* Now enable */
   1493 		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
   1494 		txdctl |= IXGBE_TXDCTL_ENABLE;
   1495 		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
   1496 	}
   1497 
   1498 	return;
   1499 } /* ixv_initialize_transmit_units */
   1500 
   1501 
   1502 /************************************************************************
   1503  * ixv_initialize_rss_mapping
   1504  ************************************************************************/
   1505 static void
   1506 ixv_initialize_rss_mapping(struct adapter *adapter)
   1507 {
   1508 	struct ixgbe_hw *hw = &adapter->hw;
   1509 	u32             reta = 0, mrqc, rss_key[10];
   1510 	int             queue_id;
   1511 	int             i, j;
   1512 	u32             rss_hash_config;
   1513 
   1514 	if (adapter->feat_en & IXGBE_FEATURE_RSS) {
   1515 		/* Fetch the configured RSS key */
   1516 		rss_getkey((uint8_t *)&rss_key);
   1517 	} else {
   1518 		/* set up random bits */
   1519 		cprng_fast(&rss_key, sizeof(rss_key));
   1520 	}
   1521 
   1522 	/* Now fill out hash function seeds */
   1523 	for (i = 0; i < 10; i++)
   1524 		IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), rss_key[i]);
   1525 
   1526 	/* Set up the redirection table */
   1527 	for (i = 0, j = 0; i < 64; i++, j++) {
   1528 		if (j == adapter->num_queues)
   1529 			j = 0;
   1530 
   1531 		if (adapter->feat_en & IXGBE_FEATURE_RSS) {
   1532 			/*
   1533 			 * Fetch the RSS bucket id for the given indirection
   1534 			 * entry. Cap it at the number of configured buckets
   1535 			 * (which is num_queues.)
   1536 			 */
   1537 			queue_id = rss_get_indirection_to_bucket(i);
   1538 			queue_id = queue_id % adapter->num_queues;
   1539 		} else
   1540 			queue_id = j;
   1541 
   1542 		/*
   1543 		 * The low 8 bits are for hash value (n+0);
   1544 		 * The next 8 bits are for hash value (n+1), etc.
   1545 		 */
   1546 		reta >>= 8;
   1547 		reta |= ((uint32_t)queue_id) << 24;
   1548 		if ((i & 3) == 3) {
   1549 			IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), reta);
   1550 			reta = 0;
   1551 		}
   1552 	}
   1553 
   1554 	/* Perform hash on these packet types */
   1555 	if (adapter->feat_en & IXGBE_FEATURE_RSS)
   1556 		rss_hash_config = rss_gethashconfig();
   1557 	else {
   1558 		/*
   1559 		 * Disable UDP - IP fragments aren't currently being handled
   1560 		 * and so we end up with a mix of 2-tuple and 4-tuple
   1561 		 * traffic.
   1562 		 */
   1563 		rss_hash_config = RSS_HASHTYPE_RSS_IPV4
   1564 		                | RSS_HASHTYPE_RSS_TCP_IPV4
   1565 		                | RSS_HASHTYPE_RSS_IPV6
   1566 		                | RSS_HASHTYPE_RSS_TCP_IPV6;
   1567 	}
   1568 
   1569 	mrqc = IXGBE_MRQC_RSSEN;
   1570 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
   1571 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
   1572 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
   1573 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
   1574 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
   1575 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
   1576 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
   1577 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
   1578 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
   1579 		device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_IPV6_EX defined, but not supported\n",
   1580 		    __func__);
   1581 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
   1582 		device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_TCP_IPV6_EX defined, but not supported\n",
   1583 		    __func__);
   1584 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
   1585 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
   1586 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4_EX)
   1587 		device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV4_EX defined, but not supported\n",
   1588 		    __func__);
   1589 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
   1590 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
   1591 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
   1592 		device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV6_EX defined, but not supported\n",
   1593 		    __func__);
   1594 	IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, mrqc);
   1595 } /* ixv_initialize_rss_mapping */
   1596 
   1597 
   1598 /************************************************************************
   1599  * ixv_initialize_receive_units - Setup receive registers and features.
   1600  ************************************************************************/
   1601 static void
   1602 ixv_initialize_receive_units(struct adapter *adapter)
   1603 {
   1604 	struct	rx_ring	*rxr = adapter->rx_rings;
   1605 	struct ixgbe_hw	*hw = &adapter->hw;
   1606 	struct ifnet	*ifp = adapter->ifp;
   1607 	u32		bufsz, rxcsum, psrtype;
   1608 
   1609 	if (ifp->if_mtu > ETHERMTU)
   1610 		bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
   1611 	else
   1612 		bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
   1613 
   1614 	psrtype = IXGBE_PSRTYPE_TCPHDR
   1615 	        | IXGBE_PSRTYPE_UDPHDR
   1616 	        | IXGBE_PSRTYPE_IPV4HDR
   1617 	        | IXGBE_PSRTYPE_IPV6HDR
   1618 	        | IXGBE_PSRTYPE_L2HDR;
   1619 
   1620 	if (adapter->num_queues > 1)
   1621 		psrtype |= 1 << 29;
   1622 
   1623 	IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
   1624 
   1625 	/* Tell PF our max_frame size */
   1626 	if (ixgbevf_rlpml_set_vf(hw, adapter->max_frame_size) != 0) {
   1627 		device_printf(adapter->dev, "There is a problem with the PF setup.  It is likely the receive unit for this VF will not function correctly.\n");
   1628 	}
   1629 
   1630 	for (int i = 0; i < adapter->num_queues; i++, rxr++) {
   1631 		u64 rdba = rxr->rxdma.dma_paddr;
   1632 		u32 reg, rxdctl;
   1633 
   1634 		/* Disable the queue */
   1635 		rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
   1636 		rxdctl &= ~IXGBE_RXDCTL_ENABLE;
   1637 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
   1638 		for (int j = 0; j < 10; j++) {
   1639 			if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
   1640 			    IXGBE_RXDCTL_ENABLE)
   1641 				msec_delay(1);
   1642 			else
   1643 				break;
   1644 		}
   1645 		wmb();
   1646 		/* Setup the Base and Length of the Rx Descriptor Ring */
   1647 		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
   1648 		    (rdba & 0x00000000ffffffffULL));
   1649 		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i), (rdba >> 32));
   1650 		IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
   1651 		    adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
   1652 
   1653 		/* Reset the ring indices */
   1654 		IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
   1655 		IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 0);
   1656 
   1657 		/* Set up the SRRCTL register */
   1658 		reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
   1659 		reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
   1660 		reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
   1661 		reg |= bufsz;
   1662 		reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
   1663 		IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), reg);
   1664 
   1665 		/* Capture Rx Tail index */
   1666 		rxr->tail = IXGBE_VFRDT(rxr->me);
   1667 
   1668 		/* Do the queue enabling last */
   1669 		rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
   1670 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
   1671 		for (int k = 0; k < 10; k++) {
   1672 			if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
   1673 			    IXGBE_RXDCTL_ENABLE)
   1674 				break;
   1675 			msec_delay(1);
   1676 		}
   1677 		wmb();
   1678 
   1679 		/* Set the Tail Pointer */
   1680 		/*
   1681 		 * In netmap mode, we must preserve the buffers made
   1682 		 * available to userspace before the if_init()
   1683 		 * (this is true by default on the TX side, because
   1684 		 * init makes all buffers available to userspace).
   1685 		 *
   1686 		 * netmap_reset() and the device specific routines
   1687 		 * (e.g. ixgbe_setup_receive_rings()) map these
   1688 		 * buffers at the end of the NIC ring, so here we
   1689 		 * must set the RDT (tail) register to make sure
   1690 		 * they are not overwritten.
   1691 		 *
   1692 		 * In this driver the NIC ring starts at RDH = 0,
   1693 		 * RDT points to the last slot available for reception (?),
   1694 		 * so RDT = num_rx_desc - 1 means the whole ring is available.
   1695 		 */
   1696 #ifdef DEV_NETMAP
   1697 		if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
   1698 		    (ifp->if_capenable & IFCAP_NETMAP)) {
   1699 			struct netmap_adapter *na = NA(adapter->ifp);
   1700 			struct netmap_kring *kring = &na->rx_rings[i];
   1701 			int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
   1702 
   1703 			IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), t);
   1704 		} else
   1705 #endif /* DEV_NETMAP */
   1706 			IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
   1707 			    adapter->num_rx_desc - 1);
   1708 	}
   1709 
   1710 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
   1711 
   1712 	ixv_initialize_rss_mapping(adapter);
   1713 
   1714 	if (adapter->num_queues > 1) {
   1715 		/* RSS and RX IPP Checksum are mutually exclusive */
   1716 		rxcsum |= IXGBE_RXCSUM_PCSD;
   1717 	}
   1718 
   1719 	if (ifp->if_capenable & IFCAP_RXCSUM)
   1720 		rxcsum |= IXGBE_RXCSUM_PCSD;
   1721 
   1722 	if (!(rxcsum & IXGBE_RXCSUM_PCSD))
   1723 		rxcsum |= IXGBE_RXCSUM_IPPCSE;
   1724 
   1725 	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
   1726 
   1727 	return;
   1728 } /* ixv_initialize_receive_units */
   1729 
   1730 /************************************************************************
   1731  * ixv_setup_vlan_support
   1732  ************************************************************************/
   1733 static void
   1734 ixv_setup_vlan_support(struct adapter *adapter)
   1735 {
   1736 	struct ethercom *ec = &adapter->osdep.ec;
   1737 	struct ixgbe_hw *hw = &adapter->hw;
   1738 	struct rx_ring  *rxr;
   1739 	u32		ctrl, vid, vfta, retry;
   1740 
   1741 	/*
   1742 	 * We get here thru init_locked, meaning
   1743 	 * a soft reset, this has already cleared
   1744 	 * the VFTA and other state, so if there
   1745 	 * have been no vlan's registered do nothing.
   1746 	 */
   1747 	if (!VLAN_ATTACHED(ec))
   1748 		return;
   1749 
   1750 	/* Enable the queues */
   1751 	for (int i = 0; i < adapter->num_queues; i++) {
   1752 		rxr = &adapter->rx_rings[i];
   1753 		ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(rxr->me));
   1754 		ctrl |= IXGBE_RXDCTL_VME;
   1755 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(rxr->me), ctrl);
   1756 		/*
   1757 		 * Let Rx path know that it needs to store VLAN tag
   1758 		 * as part of extra mbuf info.
   1759 		 */
   1760 		rxr->vtag_strip = TRUE;
   1761 	}
   1762 
   1763 #if 1
   1764 	/* XXX dirty hack. Enable all VIDs */
   1765 	for (int i = 0; i < IXGBE_VFTA_SIZE; i++)
   1766 	  adapter->shadow_vfta[i] = 0xffffffff;
   1767 #endif
   1768 	/*
   1769 	 * A soft reset zero's out the VFTA, so
   1770 	 * we need to repopulate it now.
   1771 	 */
   1772 	for (int i = 0; i < IXGBE_VFTA_SIZE; i++) {
   1773 		if (adapter->shadow_vfta[i] == 0)
   1774 			continue;
   1775 		vfta = adapter->shadow_vfta[i];
   1776 		/*
   1777 		 * Reconstruct the vlan id's
   1778 		 * based on the bits set in each
   1779 		 * of the array ints.
   1780 		 */
   1781 		for (int j = 0; j < 32; j++) {
   1782 			retry = 0;
   1783 			if ((vfta & (1 << j)) == 0)
   1784 				continue;
   1785 			vid = (i * 32) + j;
   1786 			/* Call the shared code mailbox routine */
   1787 			while (hw->mac.ops.set_vfta(hw, vid, 0, TRUE, FALSE)) {
   1788 				if (++retry > 5)
   1789 					break;
   1790 			}
   1791 		}
   1792 	}
   1793 } /* ixv_setup_vlan_support */
   1794 
   1795 #if 0	/* XXX Badly need to overhaul vlan(4) on NetBSD. */
   1796 /************************************************************************
   1797  * ixv_register_vlan
   1798  *
   1799  *   Run via a vlan config EVENT, it enables us to use the
   1800  *   HW Filter table since we can get the vlan id. This just
   1801  *   creates the entry in the soft version of the VFTA, init
   1802  *   will repopulate the real table.
   1803  ************************************************************************/
   1804 static void
   1805 ixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   1806 {
   1807 	struct adapter	*adapter = ifp->if_softc;
   1808 	u16		index, bit;
   1809 
   1810 	if (ifp->if_softc != arg) /* Not our event */
   1811 		return;
   1812 
   1813 	if ((vtag == 0) || (vtag > 4095)) /* Invalid */
   1814 		return;
   1815 
   1816 	IXGBE_CORE_LOCK(adapter);
   1817 	index = (vtag >> 5) & 0x7F;
   1818 	bit = vtag & 0x1F;
   1819 	adapter->shadow_vfta[index] |= (1 << bit);
   1820 	/* Re-init to load the changes */
   1821 	ixv_init_locked(adapter);
   1822 	IXGBE_CORE_UNLOCK(adapter);
   1823 } /* ixv_register_vlan */
   1824 
   1825 /************************************************************************
   1826  * ixv_unregister_vlan
   1827  *
   1828  *   Run via a vlan unconfig EVENT, remove our entry
   1829  *   in the soft vfta.
   1830  ************************************************************************/
   1831 static void
   1832 ixv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   1833 {
   1834 	struct adapter	*adapter = ifp->if_softc;
   1835 	u16		index, bit;
   1836 
   1837 	if (ifp->if_softc !=  arg)
   1838 		return;
   1839 
   1840 	if ((vtag == 0) || (vtag > 4095))  /* Invalid */
   1841 		return;
   1842 
   1843 	IXGBE_CORE_LOCK(adapter);
   1844 	index = (vtag >> 5) & 0x7F;
   1845 	bit = vtag & 0x1F;
   1846 	adapter->shadow_vfta[index] &= ~(1 << bit);
   1847 	/* Re-init to load the changes */
   1848 	ixv_init_locked(adapter);
   1849 	IXGBE_CORE_UNLOCK(adapter);
   1850 } /* ixv_unregister_vlan */
   1851 #endif
   1852 
   1853 /************************************************************************
   1854  * ixv_enable_intr
   1855  ************************************************************************/
   1856 static void
   1857 ixv_enable_intr(struct adapter *adapter)
   1858 {
   1859 	struct ixgbe_hw *hw = &adapter->hw;
   1860 	struct ix_queue *que = adapter->queues;
   1861 	u32             mask;
   1862 	int i;
   1863 
   1864 	/* For VTEIAC */
   1865 	mask = (1 << adapter->vector);
   1866 	for (i = 0; i < adapter->num_queues; i++, que++)
   1867 		mask |= (1 << que->msix);
   1868 	IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
   1869 
   1870 	/* For VTEIMS */
   1871 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, (1 << adapter->vector));
   1872 	que = adapter->queues;
   1873 	for (i = 0; i < adapter->num_queues; i++, que++)
   1874 		ixv_enable_queue(adapter, que->msix);
   1875 
   1876 	IXGBE_WRITE_FLUSH(hw);
   1877 
   1878 	return;
   1879 } /* ixv_enable_intr */
   1880 
   1881 /************************************************************************
   1882  * ixv_disable_intr
   1883  ************************************************************************/
   1884 static void
   1885 ixv_disable_intr(struct adapter *adapter)
   1886 {
   1887 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
   1888 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, ~0);
   1889 	IXGBE_WRITE_FLUSH(&adapter->hw);
   1890 
   1891 	return;
   1892 } /* ixv_disable_intr */
   1893 
   1894 /************************************************************************
   1895  * ixv_set_ivar
   1896  *
   1897  *   Setup the correct IVAR register for a particular MSI-X interrupt
   1898  *    - entry is the register array entry
   1899  *    - vector is the MSI-X vector for this queue
   1900  *    - type is RX/TX/MISC
   1901  ************************************************************************/
   1902 static void
   1903 ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
   1904 {
   1905 	struct ixgbe_hw *hw = &adapter->hw;
   1906 	u32             ivar, index;
   1907 
   1908 	vector |= IXGBE_IVAR_ALLOC_VAL;
   1909 
   1910 	if (type == -1) { /* MISC IVAR */
   1911 		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
   1912 		ivar &= ~0xFF;
   1913 		ivar |= vector;
   1914 		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
   1915 	} else {          /* RX/TX IVARS */
   1916 		index = (16 * (entry & 1)) + (8 * type);
   1917 		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
   1918 		ivar &= ~(0xFF << index);
   1919 		ivar |= (vector << index);
   1920 		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
   1921 	}
   1922 } /* ixv_set_ivar */
   1923 
   1924 /************************************************************************
   1925  * ixv_configure_ivars
   1926  ************************************************************************/
   1927 static void
   1928 ixv_configure_ivars(struct adapter *adapter)
   1929 {
   1930 	struct ix_queue *que = adapter->queues;
   1931 
   1932 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   1933 		/* First the RX queue entry */
   1934 		ixv_set_ivar(adapter, i, que->msix, 0);
   1935 		/* ... and the TX */
   1936 		ixv_set_ivar(adapter, i, que->msix, 1);
   1937 		/* Set an initial value in EITR */
   1938 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEITR(que->msix),
   1939 		    IXGBE_EITR_DEFAULT);
   1940 	}
   1941 
   1942 	/* For the mailbox interrupt */
   1943 	ixv_set_ivar(adapter, 1, adapter->vector, -1);
   1944 } /* ixv_configure_ivars */
   1945 
   1946 
   1947 /************************************************************************
   1948  * ixv_save_stats
   1949  *
   1950  *   The VF stats registers never have a truly virgin
   1951  *   starting point, so this routine tries to make an
   1952  *   artificial one, marking ground zero on attach as
   1953  *   it were.
   1954  ************************************************************************/
   1955 static void
   1956 ixv_save_stats(struct adapter *adapter)
   1957 {
   1958 	struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
   1959 
   1960 	if (stats->vfgprc.ev_count || stats->vfgptc.ev_count) {
   1961 		stats->saved_reset_vfgprc +=
   1962 		    stats->vfgprc.ev_count - stats->base_vfgprc;
   1963 		stats->saved_reset_vfgptc +=
   1964 		    stats->vfgptc.ev_count - stats->base_vfgptc;
   1965 		stats->saved_reset_vfgorc +=
   1966 		    stats->vfgorc.ev_count - stats->base_vfgorc;
   1967 		stats->saved_reset_vfgotc +=
   1968 		    stats->vfgotc.ev_count - stats->base_vfgotc;
   1969 		stats->saved_reset_vfmprc +=
   1970 		    stats->vfmprc.ev_count - stats->base_vfmprc;
   1971 	}
   1972 } /* ixv_save_stats */
   1973 
   1974 /************************************************************************
   1975  * ixv_init_stats
   1976  ************************************************************************/
   1977 static void
   1978 ixv_init_stats(struct adapter *adapter)
   1979 {
   1980 	struct ixgbe_hw *hw = &adapter->hw;
   1981 
   1982 	adapter->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
   1983 	adapter->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
   1984 	adapter->stats.vf.last_vfgorc |=
   1985 	    (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
   1986 
   1987 	adapter->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
   1988 	adapter->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
   1989 	adapter->stats.vf.last_vfgotc |=
   1990 	    (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
   1991 
   1992 	adapter->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
   1993 
   1994 	adapter->stats.vf.base_vfgprc = adapter->stats.vf.last_vfgprc;
   1995 	adapter->stats.vf.base_vfgorc = adapter->stats.vf.last_vfgorc;
   1996 	adapter->stats.vf.base_vfgptc = adapter->stats.vf.last_vfgptc;
   1997 	adapter->stats.vf.base_vfgotc = adapter->stats.vf.last_vfgotc;
   1998 	adapter->stats.vf.base_vfmprc = adapter->stats.vf.last_vfmprc;
   1999 } /* ixv_init_stats */
   2000 
   2001 #define UPDATE_STAT_32(reg, last, count)		\
   2002 {                                                       \
   2003 	u32 current = IXGBE_READ_REG(hw, (reg));	\
   2004 	if (current < (last))				\
   2005 		count.ev_count += 0x100000000LL;	\
   2006 	(last) = current;				\
   2007 	count.ev_count &= 0xFFFFFFFF00000000LL;		\
   2008 	count.ev_count |= current;			\
   2009 }
   2010 
   2011 #define UPDATE_STAT_36(lsb, msb, last, count)           \
   2012 {                                                       \
   2013 	u64 cur_lsb = IXGBE_READ_REG(hw, (lsb));	\
   2014 	u64 cur_msb = IXGBE_READ_REG(hw, (msb));	\
   2015 	u64 current = ((cur_msb << 32) | cur_lsb);      \
   2016 	if (current < (last))				\
   2017 		count.ev_count += 0x1000000000LL;	\
   2018 	(last) = current;				\
   2019 	count.ev_count &= 0xFFFFFFF000000000LL;		\
   2020 	count.ev_count |= current;			\
   2021 }
   2022 
   2023 /************************************************************************
   2024  * ixv_update_stats - Update the board statistics counters.
   2025  ************************************************************************/
   2026 void
   2027 ixv_update_stats(struct adapter *adapter)
   2028 {
   2029 	struct ixgbe_hw *hw = &adapter->hw;
   2030 	struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
   2031 
   2032         UPDATE_STAT_32(IXGBE_VFGPRC, stats->last_vfgprc, stats->vfgprc);
   2033         UPDATE_STAT_32(IXGBE_VFGPTC, stats->last_vfgptc, stats->vfgptc);
   2034         UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB, stats->last_vfgorc,
   2035 	    stats->vfgorc);
   2036         UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB, stats->last_vfgotc,
   2037 	    stats->vfgotc);
   2038         UPDATE_STAT_32(IXGBE_VFMPRC, stats->last_vfmprc, stats->vfmprc);
   2039 
   2040 	/* Fill out the OS statistics structure */
   2041 	/*
   2042 	 * NetBSD: Don't override if_{i|o}{packets|bytes|mcasts} with
   2043 	 * adapter->stats counters. It's required to make ifconfig -z
   2044 	 * (SOICZIFDATA) work.
   2045 	 */
   2046 } /* ixv_update_stats */
   2047 
   2048 const struct sysctlnode *
   2049 ixv_sysctl_instance(struct adapter *adapter)
   2050 {
   2051 	const char *dvname;
   2052 	struct sysctllog **log;
   2053 	int rc;
   2054 	const struct sysctlnode *rnode;
   2055 
   2056 	log = &adapter->sysctllog;
   2057 	dvname = device_xname(adapter->dev);
   2058 
   2059 	if ((rc = sysctl_createv(log, 0, NULL, &rnode,
   2060 	    0, CTLTYPE_NODE, dvname,
   2061 	    SYSCTL_DESCR("ixv information and settings"),
   2062 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
   2063 		goto err;
   2064 
   2065 	return rnode;
   2066 err:
   2067 	printf("%s: sysctl_createv failed, rc = %d\n", __func__, rc);
   2068 	return NULL;
   2069 }
   2070 
   2071 static void
   2072 ixv_add_device_sysctls(struct adapter *adapter)
   2073 {
   2074 	struct sysctllog **log;
   2075 	const struct sysctlnode *rnode, *cnode;
   2076 	device_t dev;
   2077 
   2078 	dev = adapter->dev;
   2079 	log = &adapter->sysctllog;
   2080 
   2081 	if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
   2082 		aprint_error_dev(dev, "could not create sysctl root\n");
   2083 		return;
   2084 	}
   2085 
   2086 	if (sysctl_createv(log, 0, &rnode, &cnode,
   2087 	    CTLFLAG_READWRITE, CTLTYPE_INT,
   2088 	    "debug", SYSCTL_DESCR("Debug Info"),
   2089 	    ixv_sysctl_debug, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
   2090 		aprint_error_dev(dev, "could not create sysctl\n");
   2091 
   2092 	if (sysctl_createv(log, 0, &rnode, &cnode,
   2093 	    CTLFLAG_READWRITE, CTLTYPE_BOOL,
   2094 	    "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
   2095 	    NULL, 0, &adapter->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
   2096 		aprint_error_dev(dev, "could not create sysctl\n");
   2097 }
   2098 
   2099 /************************************************************************
   2100  * ixv_add_stats_sysctls - Add statistic sysctls for the VF.
   2101  ************************************************************************/
   2102 static void
   2103 ixv_add_stats_sysctls(struct adapter *adapter)
   2104 {
   2105 	device_t                dev = adapter->dev;
   2106 	struct tx_ring          *txr = adapter->tx_rings;
   2107 	struct rx_ring          *rxr = adapter->rx_rings;
   2108 	struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
   2109 	struct ixgbe_hw *hw = &adapter->hw;
   2110 	const struct sysctlnode *rnode;
   2111 	struct sysctllog **log = &adapter->sysctllog;
   2112 	const char *xname = device_xname(dev);
   2113 
   2114 	/* Driver Statistics */
   2115 	evcnt_attach_dynamic(&adapter->handleq, EVCNT_TYPE_MISC,
   2116 	    NULL, xname, "Handled queue in softint");
   2117 	evcnt_attach_dynamic(&adapter->req, EVCNT_TYPE_MISC,
   2118 	    NULL, xname, "Requeued in softint");
   2119 	evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
   2120 	    NULL, xname, "Driver tx dma soft fail EFBIG");
   2121 	evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
   2122 	    NULL, xname, "m_defrag() failed");
   2123 	evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
   2124 	    NULL, xname, "Driver tx dma hard fail EFBIG");
   2125 	evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC,
   2126 	    NULL, xname, "Driver tx dma hard fail EINVAL");
   2127 	evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC,
   2128 	    NULL, xname, "Driver tx dma hard fail other");
   2129 	evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
   2130 	    NULL, xname, "Driver tx dma soft fail EAGAIN");
   2131 	evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
   2132 	    NULL, xname, "Driver tx dma soft fail ENOMEM");
   2133 	evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
   2134 	    NULL, xname, "Watchdog timeouts");
   2135 	evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC,
   2136 	    NULL, xname, "TSO errors");
   2137 	evcnt_attach_dynamic(&adapter->link_irq, EVCNT_TYPE_INTR,
   2138 	    NULL, xname, "Link MSI-X IRQ Handled");
   2139 
   2140 	for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
   2141 		snprintf(adapter->queues[i].evnamebuf,
   2142 		    sizeof(adapter->queues[i].evnamebuf), "%s q%d",
   2143 		    xname, i);
   2144 		snprintf(adapter->queues[i].namebuf,
   2145 		    sizeof(adapter->queues[i].namebuf), "q%d", i);
   2146 
   2147 		if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
   2148 			aprint_error_dev(dev, "could not create sysctl root\n");
   2149 			break;
   2150 		}
   2151 
   2152 		if (sysctl_createv(log, 0, &rnode, &rnode,
   2153 		    0, CTLTYPE_NODE,
   2154 		    adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
   2155 		    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
   2156 			break;
   2157 
   2158 #if 0 /* not yet */
   2159 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2160 		    CTLFLAG_READWRITE, CTLTYPE_INT,
   2161 		    "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
   2162 		    ixgbe_sysctl_interrupt_rate_handler, 0,
   2163 		    (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
   2164 			break;
   2165 
   2166 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2167 		    CTLFLAG_READONLY, CTLTYPE_QUAD,
   2168 		    "irqs", SYSCTL_DESCR("irqs on this queue"),
   2169 			NULL, 0, &(adapter->queues[i].irqs),
   2170 		    0, CTL_CREATE, CTL_EOL) != 0)
   2171 			break;
   2172 
   2173 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2174 		    CTLFLAG_READONLY, CTLTYPE_INT,
   2175 		    "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
   2176 		    ixgbe_sysctl_tdh_handler, 0, (void *)txr,
   2177 		    0, CTL_CREATE, CTL_EOL) != 0)
   2178 			break;
   2179 
   2180 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2181 		    CTLFLAG_READONLY, CTLTYPE_INT,
   2182 		    "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
   2183 		    ixgbe_sysctl_tdt_handler, 0, (void *)txr,
   2184 		    0, CTL_CREATE, CTL_EOL) != 0)
   2185 			break;
   2186 #endif
   2187 		evcnt_attach_dynamic(&adapter->queues[i].irqs, EVCNT_TYPE_INTR,
   2188 		    NULL, adapter->queues[i].evnamebuf, "IRQs on queue");
   2189 		evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC,
   2190 		    NULL, adapter->queues[i].evnamebuf, "TSO");
   2191 		evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
   2192 		    NULL, adapter->queues[i].evnamebuf,
   2193 		    "Queue No Descriptor Available");
   2194 		evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
   2195 		    NULL, adapter->queues[i].evnamebuf,
   2196 		    "Queue Packets Transmitted");
   2197 #ifndef IXGBE_LEGACY_TX
   2198 		evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC,
   2199 		    NULL, adapter->queues[i].evnamebuf,
   2200 		    "Packets dropped in pcq");
   2201 #endif
   2202 
   2203 #ifdef LRO
   2204 		struct lro_ctrl *lro = &rxr->lro;
   2205 #endif /* LRO */
   2206 
   2207 #if 0 /* not yet */
   2208 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2209 		    CTLFLAG_READONLY,
   2210 		    CTLTYPE_INT,
   2211 		    "rxd_head", SYSCTL_DESCR("Receive Descriptor Head"),
   2212 		    ixgbe_sysctl_rdh_handler, 0, (void *)rxr, 0,
   2213 		    CTL_CREATE, CTL_EOL) != 0)
   2214 			break;
   2215 
   2216 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2217 		    CTLFLAG_READONLY,
   2218 		    CTLTYPE_INT,
   2219 		    "rxd_tail", SYSCTL_DESCR("Receive Descriptor Tail"),
   2220 		    ixgbe_sysctl_rdt_handler, 0, (void *)rxr, 0,
   2221 		    CTL_CREATE, CTL_EOL) != 0)
   2222 			break;
   2223 #endif
   2224 
   2225 		evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
   2226 		    NULL, adapter->queues[i].evnamebuf, "Queue Packets Received");
   2227 		evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
   2228 		    NULL, adapter->queues[i].evnamebuf, "Queue Bytes Received");
   2229 		evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC,
   2230 		    NULL, adapter->queues[i].evnamebuf, "Copied RX Frames");
   2231 		evcnt_attach_dynamic(&rxr->no_jmbuf, EVCNT_TYPE_MISC,
   2232 		    NULL, adapter->queues[i].evnamebuf, "Rx no jumbo mbuf");
   2233 		evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
   2234 		    NULL, adapter->queues[i].evnamebuf, "Rx discarded");
   2235 #ifdef LRO
   2236 		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
   2237 				CTLFLAG_RD, &lro->lro_queued, 0,
   2238 				"LRO Queued");
   2239 		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
   2240 				CTLFLAG_RD, &lro->lro_flushed, 0,
   2241 				"LRO Flushed");
   2242 #endif /* LRO */
   2243 	}
   2244 
   2245 	/* MAC stats get their own sub node */
   2246 
   2247 	snprintf(stats->namebuf,
   2248 	    sizeof(stats->namebuf), "%s MAC Statistics", xname);
   2249 
   2250 	evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
   2251 	    stats->namebuf, "rx csum offload - IP");
   2252 	evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
   2253 	    stats->namebuf, "rx csum offload - L4");
   2254 	evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
   2255 	    stats->namebuf, "rx csum offload - IP bad");
   2256 	evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
   2257 	    stats->namebuf, "rx csum offload - L4 bad");
   2258 
   2259 	/* Packet Reception Stats */
   2260 	evcnt_attach_dynamic(&stats->vfgprc, EVCNT_TYPE_MISC, NULL,
   2261 	    xname, "Good Packets Received");
   2262 	evcnt_attach_dynamic(&stats->vfgorc, EVCNT_TYPE_MISC, NULL,
   2263 	    xname, "Good Octets Received");
   2264 	evcnt_attach_dynamic(&stats->vfmprc, EVCNT_TYPE_MISC, NULL,
   2265 	    xname, "Multicast Packets Received");
   2266 	evcnt_attach_dynamic(&stats->vfgptc, EVCNT_TYPE_MISC, NULL,
   2267 	    xname, "Good Packets Transmitted");
   2268 	evcnt_attach_dynamic(&stats->vfgotc, EVCNT_TYPE_MISC, NULL,
   2269 	    xname, "Good Octets Transmitted");
   2270 
   2271 	/* Mailbox Stats */
   2272 	evcnt_attach_dynamic(&hw->mbx.stats.msgs_tx, EVCNT_TYPE_MISC, NULL,
   2273 	    xname, "message TXs");
   2274 	evcnt_attach_dynamic(&hw->mbx.stats.msgs_rx, EVCNT_TYPE_MISC, NULL,
   2275 	    xname, "message RXs");
   2276 	evcnt_attach_dynamic(&hw->mbx.stats.acks, EVCNT_TYPE_MISC, NULL,
   2277 	    xname, "ACKs");
   2278 	evcnt_attach_dynamic(&hw->mbx.stats.reqs, EVCNT_TYPE_MISC, NULL,
   2279 	    xname, "REQs");
   2280 	evcnt_attach_dynamic(&hw->mbx.stats.rsts, EVCNT_TYPE_MISC, NULL,
   2281 	    xname, "RSTs");
   2282 
   2283 } /* ixv_add_stats_sysctls */
   2284 
   2285 /************************************************************************
   2286  * ixv_set_sysctl_value
   2287  ************************************************************************/
   2288 static void
   2289 ixv_set_sysctl_value(struct adapter *adapter, const char *name,
   2290 	const char *description, int *limit, int value)
   2291 {
   2292 	device_t dev =  adapter->dev;
   2293 	struct sysctllog **log;
   2294 	const struct sysctlnode *rnode, *cnode;
   2295 
   2296 	log = &adapter->sysctllog;
   2297 	if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
   2298 		aprint_error_dev(dev, "could not create sysctl root\n");
   2299 		return;
   2300 	}
   2301 	if (sysctl_createv(log, 0, &rnode, &cnode,
   2302 	    CTLFLAG_READWRITE, CTLTYPE_INT,
   2303 	    name, SYSCTL_DESCR(description),
   2304 	    NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0)
   2305 		aprint_error_dev(dev, "could not create sysctl\n");
   2306 	*limit = value;
   2307 } /* ixv_set_sysctl_value */
   2308 
   2309 /************************************************************************
   2310  * ixv_print_debug_info
   2311  *
   2312  *   Called only when em_display_debug_stats is enabled.
   2313  *   Provides a way to take a look at important statistics
   2314  *   maintained by the driver and hardware.
   2315  ************************************************************************/
   2316 static void
   2317 ixv_print_debug_info(struct adapter *adapter)
   2318 {
   2319         device_t        dev = adapter->dev;
   2320         struct ixgbe_hw *hw = &adapter->hw;
   2321         struct ix_queue *que = adapter->queues;
   2322         struct rx_ring  *rxr;
   2323         struct tx_ring  *txr;
   2324 #ifdef LRO
   2325         struct lro_ctrl *lro;
   2326 #endif /* LRO */
   2327 
   2328 	device_printf(dev, "Error Byte Count = %u \n",
   2329 	    IXGBE_READ_REG(hw, IXGBE_ERRBC));
   2330 
   2331 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   2332 		txr = que->txr;
   2333 		rxr = que->rxr;
   2334 #ifdef LRO
   2335 		lro = &rxr->lro;
   2336 #endif /* LRO */
   2337 		device_printf(dev, "QUE(%d) IRQs Handled: %lu\n",
   2338 		    que->msix, (long)que->irqs.ev_count);
   2339 		device_printf(dev, "RX(%d) Packets Received: %lld\n",
   2340 		    rxr->me, (long long)rxr->rx_packets.ev_count);
   2341 		device_printf(dev, "RX(%d) Bytes Received: %lu\n",
   2342 		    rxr->me, (long)rxr->rx_bytes.ev_count);
   2343 #ifdef LRO
   2344 		device_printf(dev, "RX(%d) LRO Queued= %lld\n",
   2345 		    rxr->me, (long long)lro->lro_queued);
   2346 		device_printf(dev, "RX(%d) LRO Flushed= %lld\n",
   2347 		    rxr->me, (long long)lro->lro_flushed);
   2348 #endif /* LRO */
   2349 		device_printf(dev, "TX(%d) Packets Sent: %lu\n",
   2350 		    txr->me, (long)txr->total_packets.ev_count);
   2351 		device_printf(dev, "TX(%d) NO Desc Avail: %lu\n",
   2352 		    txr->me, (long)txr->no_desc_avail.ev_count);
   2353 	}
   2354 
   2355 	device_printf(dev, "MBX IRQ Handled: %lu\n",
   2356 	    (long)adapter->link_irq.ev_count);
   2357 } /* ixv_print_debug_info */
   2358 
   2359 /************************************************************************
   2360  * ixv_sysctl_debug
   2361  ************************************************************************/
   2362 static int
   2363 ixv_sysctl_debug(SYSCTLFN_ARGS)
   2364 {
   2365 	struct sysctlnode node;
   2366 	struct adapter *adapter;
   2367 	int            error, result;
   2368 
   2369 	node = *rnode;
   2370 	node.sysctl_data = &result;
   2371 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   2372 
   2373 	if (error || newp == NULL)
   2374 		return error;
   2375 
   2376 	if (result == 1) {
   2377 		adapter = (struct adapter *)node.sysctl_data;
   2378 		ixv_print_debug_info(adapter);
   2379 	}
   2380 
   2381 	return 0;
   2382 } /* ixv_sysctl_debug */
   2383 
   2384 /************************************************************************
   2385  * ixv_init_device_features
   2386  ************************************************************************/
   2387 static void
   2388 ixv_init_device_features(struct adapter *adapter)
   2389 {
   2390 	adapter->feat_cap = IXGBE_FEATURE_NETMAP
   2391 	                  | IXGBE_FEATURE_VF
   2392 	                  | IXGBE_FEATURE_RSS
   2393 	                  | IXGBE_FEATURE_LEGACY_TX;
   2394 
   2395 	/* A tad short on feature flags for VFs, atm. */
   2396 	switch (adapter->hw.mac.type) {
   2397 	case ixgbe_mac_82599_vf:
   2398 		break;
   2399 	case ixgbe_mac_X540_vf:
   2400 		break;
   2401 	case ixgbe_mac_X550_vf:
   2402 	case ixgbe_mac_X550EM_x_vf:
   2403 	case ixgbe_mac_X550EM_a_vf:
   2404 		adapter->feat_cap |= IXGBE_FEATURE_NEEDS_CTXD;
   2405 		break;
   2406 	default:
   2407 		break;
   2408 	}
   2409 
   2410 	/* Enabled by default... */
   2411 	/* Is a virtual function (VF) */
   2412 	if (adapter->feat_cap & IXGBE_FEATURE_VF)
   2413 		adapter->feat_en |= IXGBE_FEATURE_VF;
   2414 	/* Netmap */
   2415 	if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
   2416 		adapter->feat_en |= IXGBE_FEATURE_NETMAP;
   2417 	/* Receive-Side Scaling (RSS) */
   2418 	if (adapter->feat_cap & IXGBE_FEATURE_RSS)
   2419 		adapter->feat_en |= IXGBE_FEATURE_RSS;
   2420 	/* Needs advanced context descriptor regardless of offloads req'd */
   2421 	if (adapter->feat_cap & IXGBE_FEATURE_NEEDS_CTXD)
   2422 		adapter->feat_en |= IXGBE_FEATURE_NEEDS_CTXD;
   2423 
   2424 	/* Enabled via sysctl... */
   2425 	/* Legacy (single queue) transmit */
   2426 	if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
   2427 	    ixv_enable_legacy_tx)
   2428 		adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
   2429 } /* ixv_init_device_features */
   2430 
   2431 /************************************************************************
   2432  * ixv_shutdown - Shutdown entry point
   2433  ************************************************************************/
   2434 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
   2435 static int
   2436 ixv_shutdown(device_t dev)
   2437 {
   2438 	struct adapter *adapter = device_private(dev);
   2439 	IXGBE_CORE_LOCK(adapter);
   2440 	ixv_stop(adapter);
   2441 	IXGBE_CORE_UNLOCK(adapter);
   2442 
   2443 	return (0);
   2444 } /* ixv_shutdown */
   2445 #endif
   2446 
   2447 static int
   2448 ixv_ifflags_cb(struct ethercom *ec)
   2449 {
   2450 	struct ifnet *ifp = &ec->ec_if;
   2451 	struct adapter *adapter = ifp->if_softc;
   2452 	int change = ifp->if_flags ^ adapter->if_flags, rc = 0;
   2453 
   2454 	IXGBE_CORE_LOCK(adapter);
   2455 
   2456 	if (change != 0)
   2457 		adapter->if_flags = ifp->if_flags;
   2458 
   2459 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0)
   2460 		rc = ENETRESET;
   2461 
   2462 	/* Set up VLAN support and filter */
   2463 	ixv_setup_vlan_support(adapter);
   2464 
   2465 	IXGBE_CORE_UNLOCK(adapter);
   2466 
   2467 	return rc;
   2468 }
   2469 
   2470 
   2471 /************************************************************************
   2472  * ixv_ioctl - Ioctl entry point
   2473  *
   2474  *   Called when the user wants to configure the interface.
   2475  *
   2476  *   return 0 on success, positive on failure
   2477  ************************************************************************/
   2478 static int
   2479 ixv_ioctl(struct ifnet *ifp, u_long command, void *data)
   2480 {
   2481 	struct adapter	*adapter = ifp->if_softc;
   2482 	struct ifcapreq *ifcr = data;
   2483 	struct ifreq	*ifr = data;
   2484 	int             error = 0;
   2485 	int l4csum_en;
   2486 	const int l4csum = IFCAP_CSUM_TCPv4_Rx|IFCAP_CSUM_UDPv4_Rx|
   2487 	     IFCAP_CSUM_TCPv6_Rx|IFCAP_CSUM_UDPv6_Rx;
   2488 
   2489 	switch (command) {
   2490 	case SIOCSIFFLAGS:
   2491 		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
   2492 		break;
   2493 	case SIOCADDMULTI:
   2494 	case SIOCDELMULTI:
   2495 		IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
   2496 		break;
   2497 	case SIOCSIFMEDIA:
   2498 	case SIOCGIFMEDIA:
   2499 		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
   2500 		break;
   2501 	case SIOCSIFCAP:
   2502 		IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
   2503 		break;
   2504 	case SIOCSIFMTU:
   2505 		IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
   2506 		break;
   2507 	default:
   2508 		IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
   2509 		break;
   2510 	}
   2511 
   2512 	switch (command) {
   2513 	case SIOCSIFMEDIA:
   2514 	case SIOCGIFMEDIA:
   2515 		return ifmedia_ioctl(ifp, ifr, &adapter->media, command);
   2516 	case SIOCSIFCAP:
   2517 		/* Layer-4 Rx checksum offload has to be turned on and
   2518 		 * off as a unit.
   2519 		 */
   2520 		l4csum_en = ifcr->ifcr_capenable & l4csum;
   2521 		if (l4csum_en != l4csum && l4csum_en != 0)
   2522 			return EINVAL;
   2523 		/*FALLTHROUGH*/
   2524 	case SIOCADDMULTI:
   2525 	case SIOCDELMULTI:
   2526 	case SIOCSIFFLAGS:
   2527 	case SIOCSIFMTU:
   2528 	default:
   2529 		if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
   2530 			return error;
   2531 		if ((ifp->if_flags & IFF_RUNNING) == 0)
   2532 			;
   2533 		else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
   2534 			IXGBE_CORE_LOCK(adapter);
   2535 			ixv_init_locked(adapter);
   2536 			IXGBE_CORE_UNLOCK(adapter);
   2537 		} else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
   2538 			/*
   2539 			 * Multicast list has changed; set the hardware filter
   2540 			 * accordingly.
   2541 			 */
   2542 			IXGBE_CORE_LOCK(adapter);
   2543 			ixv_disable_intr(adapter);
   2544 			ixv_set_multi(adapter);
   2545 			ixv_enable_intr(adapter);
   2546 			IXGBE_CORE_UNLOCK(adapter);
   2547 		}
   2548 		return 0;
   2549 	}
   2550 } /* ixv_ioctl */
   2551 
   2552 /************************************************************************
   2553  * ixv_init
   2554  ************************************************************************/
   2555 static int
   2556 ixv_init(struct ifnet *ifp)
   2557 {
   2558 	struct adapter *adapter = ifp->if_softc;
   2559 
   2560 	IXGBE_CORE_LOCK(adapter);
   2561 	ixv_init_locked(adapter);
   2562 	IXGBE_CORE_UNLOCK(adapter);
   2563 
   2564 	return 0;
   2565 } /* ixv_init */
   2566 
   2567 
   2568 /************************************************************************
   2569  * ixv_handle_que
   2570  ************************************************************************/
   2571 static void
   2572 ixv_handle_que(void *context)
   2573 {
   2574 	struct ix_queue *que = context;
   2575 	struct adapter  *adapter = que->adapter;
   2576 	struct tx_ring	*txr = que->txr;
   2577 	struct ifnet    *ifp = adapter->ifp;
   2578 	bool		more;
   2579 
   2580 	adapter->handleq.ev_count++;
   2581 
   2582 	if (ifp->if_flags & IFF_RUNNING) {
   2583 		more = ixgbe_rxeof(que);
   2584 		IXGBE_TX_LOCK(txr);
   2585 		ixgbe_txeof(txr);
   2586 		if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
   2587 			if (!ixgbe_mq_ring_empty(ifp, txr->txr_interq))
   2588 				ixgbe_mq_start_locked(ifp, txr);
   2589 		/* Only for queue 0 */
   2590 		/* NetBSD still needs this for CBQ */
   2591 		if ((&adapter->queues[0] == que)
   2592 		    && (!ixgbe_legacy_ring_empty(ifp, NULL)))
   2593 			ixgbe_legacy_start_locked(ifp, txr);
   2594 		IXGBE_TX_UNLOCK(txr);
   2595 		if (more) {
   2596 			adapter->req.ev_count++;
   2597 			softint_schedule(que->que_si);
   2598 			return;
   2599 		}
   2600 	}
   2601 
   2602 	/* Re-enable this interrupt */
   2603 	ixv_enable_queue(adapter, que->msix);
   2604 
   2605 	return;
   2606 } /* ixv_handle_que */
   2607 
   2608 /************************************************************************
   2609  * ixv_allocate_msix - Setup MSI-X Interrupt resources and handlers
   2610  ************************************************************************/
   2611 static int
   2612 ixv_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
   2613 {
   2614 	device_t	dev = adapter->dev;
   2615 	struct ix_queue *que = adapter->queues;
   2616 	struct		tx_ring *txr = adapter->tx_rings;
   2617 	int 		error, msix_ctrl, rid, vector = 0;
   2618 	pci_chipset_tag_t pc;
   2619 	pcitag_t	tag;
   2620 	char		intrbuf[PCI_INTRSTR_LEN];
   2621 	char		intr_xname[32];
   2622 	const char	*intrstr = NULL;
   2623 	kcpuset_t	*affinity;
   2624 	int		cpu_id = 0;
   2625 
   2626 	pc = adapter->osdep.pc;
   2627 	tag = adapter->osdep.tag;
   2628 
   2629 	adapter->osdep.nintrs = adapter->num_queues + 1;
   2630 	if (pci_msix_alloc_exact(pa, &adapter->osdep.intrs,
   2631 	    adapter->osdep.nintrs) != 0) {
   2632 		aprint_error_dev(dev,
   2633 		    "failed to allocate MSI-X interrupt\n");
   2634 		return (ENXIO);
   2635 	}
   2636 
   2637 	kcpuset_create(&affinity, false);
   2638 	for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
   2639 		snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d",
   2640 		    device_xname(dev), i);
   2641 		intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
   2642 		    sizeof(intrbuf));
   2643 #ifdef IXGBE_MPSAFE
   2644 		pci_intr_setattr(pc, &adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
   2645 		    true);
   2646 #endif
   2647 		/* Set the handler function */
   2648 		que->res = adapter->osdep.ihs[i] = pci_intr_establish_xname(pc,
   2649 		    adapter->osdep.intrs[i], IPL_NET, ixv_msix_que, que,
   2650 		    intr_xname);
   2651 		if (que->res == NULL) {
   2652 			pci_intr_release(pc, adapter->osdep.intrs,
   2653 			    adapter->osdep.nintrs);
   2654 			aprint_error_dev(dev,
   2655 			    "Failed to register QUE handler\n");
   2656 			kcpuset_destroy(affinity);
   2657 			return (ENXIO);
   2658 		}
   2659 		que->msix = vector;
   2660         	adapter->active_queues |= (u64)(1 << que->msix);
   2661 
   2662 		cpu_id = i;
   2663 		/* Round-robin affinity */
   2664 		kcpuset_zero(affinity);
   2665 		kcpuset_set(affinity, cpu_id % ncpu);
   2666 		error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
   2667 		    NULL);
   2668 		aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
   2669 		    intrstr);
   2670 		if (error == 0)
   2671 			aprint_normal(", bound queue %d to cpu %d\n",
   2672 			    i, cpu_id % ncpu);
   2673 		else
   2674 			aprint_normal("\n");
   2675 
   2676 #ifndef IXGBE_LEGACY_TX
   2677 		txr->txr_si
   2678 		    = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   2679 			ixgbe_deferred_mq_start, txr);
   2680 #endif
   2681 		que->que_si
   2682 		    = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   2683 			ixv_handle_que, que);
   2684 		if (que->que_si == NULL) {
   2685 			aprint_error_dev(dev,
   2686 			    "could not establish software interrupt\n");
   2687 		}
   2688 	}
   2689 
   2690 	/* and Mailbox */
   2691 	cpu_id++;
   2692 	snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
   2693 	intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
   2694 	    sizeof(intrbuf));
   2695 #ifdef IXGBE_MPSAFE
   2696 	pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE,
   2697 	    true);
   2698 #endif
   2699 	/* Set the mbx handler function */
   2700 	adapter->osdep.ihs[vector] = pci_intr_establish_xname(pc,
   2701 	    adapter->osdep.intrs[vector], IPL_NET, ixv_msix_mbx, adapter,
   2702 	    intr_xname);
   2703 	if (adapter->osdep.ihs[vector] == NULL) {
   2704 		adapter->res = NULL;
   2705 		aprint_error_dev(dev, "Failed to register LINK handler\n");
   2706 		kcpuset_destroy(affinity);
   2707 		return (ENXIO);
   2708 	}
   2709 	/* Round-robin affinity */
   2710 	kcpuset_zero(affinity);
   2711 	kcpuset_set(affinity, cpu_id % ncpu);
   2712 	error = interrupt_distribute(adapter->osdep.ihs[vector], affinity,NULL);
   2713 
   2714 	aprint_normal_dev(dev,
   2715 	    "for link, interrupting at %s", intrstr);
   2716 	if (error == 0)
   2717 		aprint_normal(", affinity to cpu %d\n", cpu_id % ncpu);
   2718 	else
   2719 		aprint_normal("\n");
   2720 
   2721 	adapter->vector = vector;
   2722 	/* Tasklets for Mailbox */
   2723 	adapter->link_si = softint_establish(SOFTINT_NET |IXGBE_SOFTINFT_FLAGS,
   2724 	    ixv_handle_link, adapter);
   2725 	/*
   2726 	 * Due to a broken design QEMU will fail to properly
   2727 	 * enable the guest for MSI-X unless the vectors in
   2728 	 * the table are all set up, so we must rewrite the
   2729 	 * ENABLE in the MSI-X control register again at this
   2730 	 * point to cause it to successfully initialize us.
   2731 	 */
   2732 	if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
   2733 		pci_get_capability(pc, tag, PCI_CAP_MSIX, &rid, NULL);
   2734 		rid += PCI_MSIX_CTL;
   2735 		msix_ctrl = pci_conf_read(pc, tag, rid);
   2736 		msix_ctrl |= PCI_MSIX_CTL_ENABLE;
   2737 		pci_conf_write(pc, tag, rid, msix_ctrl);
   2738 	}
   2739 
   2740 	kcpuset_destroy(affinity);
   2741 	return (0);
   2742 } /* ixv_allocate_msix */
   2743 
   2744 /************************************************************************
   2745  * ixv_configure_interrupts - Setup MSI-X resources
   2746  *
   2747  *   Note: The VF device MUST use MSI-X, there is no fallback.
   2748  ************************************************************************/
   2749 static int
   2750 ixv_configure_interrupts(struct adapter *adapter)
   2751 {
   2752 	device_t dev = adapter->dev;
   2753 	int want, queues, msgs;
   2754 
   2755 	/* Must have at least 2 MSI-X vectors */
   2756 	msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag);
   2757 	if (msgs < 2) {
   2758 		aprint_error_dev(dev, "MSIX config error\n");
   2759 		return (ENXIO);
   2760 	}
   2761 	msgs = MIN(msgs, IXG_MAX_NINTR);
   2762 
   2763 	/* Figure out a reasonable auto config value */
   2764 	queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu;
   2765 
   2766 	if (ixv_num_queues != 0)
   2767 		queues = ixv_num_queues;
   2768 	else if ((ixv_num_queues == 0) && (queues > IXGBE_VF_MAX_TX_QUEUES))
   2769 		queues = IXGBE_VF_MAX_TX_QUEUES;
   2770 
   2771 	/*
   2772 	 * Want vectors for the queues,
   2773 	 * plus an additional for mailbox.
   2774 	 */
   2775 	want = queues + 1;
   2776 	if (msgs >= want)
   2777 		msgs = want;
   2778 	else {
   2779                	aprint_error_dev(dev,
   2780 		    "MSI-X Configuration Problem, "
   2781 		    "%d vectors but %d queues wanted!\n",
   2782 		    msgs, want);
   2783 		return -1;
   2784 	}
   2785 
   2786 	adapter->msix_mem = (void *)1; /* XXX */
   2787 	aprint_normal_dev(dev,
   2788 	    "Using MSI-X interrupts with %d vectors\n", msgs);
   2789 	adapter->num_queues = queues;
   2790 
   2791 	return (0);
   2792 } /* ixv_configure_interrupts */
   2793 
   2794 
   2795 /************************************************************************
   2796  * ixv_handle_link - Tasklet handler for MSI-X MBX interrupts
   2797  *
   2798  *   Done outside of interrupt context since the driver might sleep
   2799  ************************************************************************/
   2800 static void
   2801 ixv_handle_link(void *context)
   2802 {
   2803 	struct adapter *adapter = context;
   2804 
   2805 	adapter->hw.mac.ops.check_link(&adapter->hw, &adapter->link_speed,
   2806 	    &adapter->link_up, FALSE);
   2807 	ixv_update_link_status(adapter);
   2808 } /* ixv_handle_link */
   2809 
   2810 /************************************************************************
   2811  * ixv_check_link - Used in the local timer to poll for link changes
   2812  ************************************************************************/
   2813 static void
   2814 ixv_check_link(struct adapter *adapter)
   2815 {
   2816 	adapter->hw.mac.get_link_status = TRUE;
   2817 
   2818 	adapter->hw.mac.ops.check_link(&adapter->hw, &adapter->link_speed,
   2819 	    &adapter->link_up, FALSE);
   2820 	ixv_update_link_status(adapter);
   2821 } /* ixv_check_link */
   2822