Home | History | Annotate | Line # | Download | only in ixgbe
ixv.c revision 1.52
      1 /******************************************************************************
      2 
      3   Copyright (c) 2001-2015, Intel Corporation
      4   All rights reserved.
      5 
      6   Redistribution and use in source and binary forms, with or without
      7   modification, are permitted provided that the following conditions are met:
      8 
      9    1. Redistributions of source code must retain the above copyright notice,
     10       this list of conditions and the following disclaimer.
     11 
     12    2. Redistributions in binary form must reproduce the above copyright
     13       notice, this list of conditions and the following disclaimer in the
     14       documentation and/or other materials provided with the distribution.
     15 
     16    3. Neither the name of the Intel Corporation nor the names of its
     17       contributors may be used to endorse or promote products derived from
     18       this software without specific prior written permission.
     19 
     20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     30   POSSIBILITY OF SUCH DAMAGE.
     31 
     32 ******************************************************************************/
     33 /*$FreeBSD: head/sys/dev/ixgbe/if_ixv.c 302384 2016-07-07 03:39:18Z sbruno $*/
     34 /*$NetBSD: ixv.c,v 1.52 2017/02/13 10:13:54 msaitoh Exp $*/
     35 
     36 #include "opt_inet.h"
     37 #include "opt_inet6.h"
     38 
     39 #include "ixgbe.h"
     40 #include "vlan.h"
     41 
     42 /*********************************************************************
     43  *  Driver version
     44  *********************************************************************/
     45 char ixv_driver_version[] = "1.4.6-k";
     46 
     47 /*********************************************************************
     48  *  PCI Device ID Table
     49  *
     50  *  Used by probe to select devices to load on
     51  *  Last field stores an index into ixv_strings
     52  *  Last entry must be all 0s
     53  *
     54  *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
     55  *********************************************************************/
     56 
     57 static ixgbe_vendor_info_t ixv_vendor_info_array[] =
     58 {
     59 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
     60 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0},
     61 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, 0, 0, 0},
     62 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, 0, 0, 0},
     63 	/* required last entry */
     64 	{0, 0, 0, 0, 0}
     65 };
     66 
     67 /*********************************************************************
     68  *  Table of branding strings
     69  *********************************************************************/
     70 
     71 static const char    *ixv_strings[] = {
     72 	"Intel(R) PRO/10GbE Virtual Function Network Driver"
     73 };
     74 
     75 /*********************************************************************
     76  *  Function prototypes
     77  *********************************************************************/
     78 static int      ixv_probe(device_t, cfdata_t, void *);
     79 static void	ixv_attach(device_t, device_t, void *);
     80 static int      ixv_detach(device_t, int);
     81 #if 0
     82 static int      ixv_shutdown(device_t);
     83 #endif
     84 static int      ixv_ioctl(struct ifnet *, u_long, void *);
     85 static int	ixv_init(struct ifnet *);
     86 static void	ixv_init_locked(struct adapter *);
     87 static void     ixv_stop(void *);
     88 static void     ixv_media_status(struct ifnet *, struct ifmediareq *);
     89 static int      ixv_media_change(struct ifnet *);
     90 static void     ixv_identify_hardware(struct adapter *);
     91 static int      ixv_allocate_pci_resources(struct adapter *,
     92 		    const struct pci_attach_args *);
     93 static int      ixv_allocate_msix(struct adapter *,
     94 		    const struct pci_attach_args *);
     95 static int	ixv_setup_msix(struct adapter *);
     96 static void	ixv_free_pci_resources(struct adapter *);
     97 static void     ixv_local_timer(void *);
     98 static void     ixv_local_timer_locked(void *);
     99 static void     ixv_setup_interface(device_t, struct adapter *);
    100 static void     ixv_config_link(struct adapter *);
    101 
    102 static void     ixv_initialize_transmit_units(struct adapter *);
    103 static void     ixv_initialize_receive_units(struct adapter *);
    104 
    105 static void     ixv_enable_intr(struct adapter *);
    106 static void     ixv_disable_intr(struct adapter *);
    107 static void     ixv_set_multi(struct adapter *);
    108 static void     ixv_update_link_status(struct adapter *);
    109 static int	ixv_sysctl_debug(SYSCTLFN_PROTO);
    110 static void	ixv_set_ivar(struct adapter *, u8, u8, s8);
    111 static void	ixv_configure_ivars(struct adapter *);
    112 static u8 *	ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
    113 
    114 static void	ixv_setup_vlan_support(struct adapter *);
    115 #if 0
    116 static void	ixv_register_vlan(void *, struct ifnet *, u16);
    117 static void	ixv_unregister_vlan(void *, struct ifnet *, u16);
    118 #endif
    119 
    120 static void	ixv_add_device_sysctls(struct adapter *);
    121 static void	ixv_save_stats(struct adapter *);
    122 static void	ixv_init_stats(struct adapter *);
    123 static void	ixv_update_stats(struct adapter *);
    124 static void	ixv_add_stats_sysctls(struct adapter *);
    125 static void	ixv_set_sysctl_value(struct adapter *, const char *,
    126 		    const char *, int *, int);
    127 
    128 /* The MSI/X Interrupt handlers */
    129 static int	ixv_msix_que(void *);
    130 static int	ixv_msix_mbx(void *);
    131 
    132 /* Deferred interrupt tasklets */
    133 static void	ixv_handle_que(void *);
    134 static void	ixv_handle_mbx(void *);
    135 
    136 const struct sysctlnode *ixv_sysctl_instance(struct adapter *);
    137 static ixgbe_vendor_info_t *ixv_lookup(const struct pci_attach_args *);
    138 
    139 #ifdef DEV_NETMAP
    140 /*
    141  * This is defined in <dev/netmap/ixgbe_netmap.h>, which is included by
    142  * if_ix.c.
    143  */
    144 extern void ixgbe_netmap_attach(struct adapter *adapter);
    145 
    146 #include <net/netmap.h>
    147 #include <sys/selinfo.h>
    148 #include <dev/netmap/netmap_kern.h>
    149 #endif /* DEV_NETMAP */
    150 
    151 /*********************************************************************
    152  *  FreeBSD Device Interface Entry Points
    153  *********************************************************************/
    154 
    155 CFATTACH_DECL3_NEW(ixv, sizeof(struct adapter),
    156     ixv_probe, ixv_attach, ixv_detach, NULL, NULL, NULL,
    157     DVF_DETACH_SHUTDOWN);
    158 
    159 # if 0
    160 static device_method_t ixv_methods[] = {
    161 	/* Device interface */
    162 	DEVMETHOD(device_probe, ixv_probe),
    163 	DEVMETHOD(device_attach, ixv_attach),
    164 	DEVMETHOD(device_detach, ixv_detach),
    165 	DEVMETHOD(device_shutdown, ixv_shutdown),
    166 	DEVMETHOD_END
    167 };
    168 #endif
    169 
    170 #if 0
    171 static driver_t ixv_driver = {
    172 	"ixv", ixv_methods, sizeof(struct adapter),
    173 };
    174 
    175 devclass_t ixv_devclass;
    176 DRIVER_MODULE(ixv, pci, ixv_driver, ixv_devclass, 0, 0);
    177 MODULE_DEPEND(ixv, pci, 1, 1, 1);
    178 MODULE_DEPEND(ixv, ether, 1, 1, 1);
    179 #ifdef DEV_NETMAP
    180 MODULE_DEPEND(ix, netmap, 1, 1, 1);
    181 #endif /* DEV_NETMAP */
    182 /* XXX depend on 'ix' ? */
    183 #endif
    184 
    185 /*
    186 ** TUNEABLE PARAMETERS:
    187 */
    188 
    189 /* Number of Queues - do not exceed MSIX vectors - 1 */
    190 static int ixv_num_queues = 0;
    191 #define	TUNABLE_INT(__x, __y)
    192 TUNABLE_INT("hw.ixv.num_queues", &ixv_num_queues);
    193 
    194 /*
    195 ** AIM: Adaptive Interrupt Moderation
    196 ** which means that the interrupt rate
    197 ** is varied over time based on the
    198 ** traffic for that interrupt vector
    199 */
    200 static bool ixv_enable_aim = false;
    201 TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
    202 
    203 /* How many packets rxeof tries to clean at a time */
    204 static int ixv_rx_process_limit = 256;
    205 TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit);
    206 
    207 /* How many packets txeof tries to clean at a time */
    208 static int ixv_tx_process_limit = 256;
    209 TUNABLE_INT("hw.ixv.tx_process_limit", &ixv_tx_process_limit);
    210 
    211 /*
    212 ** Number of TX descriptors per ring,
    213 ** setting higher than RX as this seems
    214 ** the better performing choice.
    215 */
    216 static int ixv_txd = DEFAULT_TXD;
    217 TUNABLE_INT("hw.ixv.txd", &ixv_txd);
    218 
    219 /* Number of RX descriptors per ring */
    220 static int ixv_rxd = DEFAULT_RXD;
    221 TUNABLE_INT("hw.ixv.rxd", &ixv_rxd);
    222 
    223 /*
    224 ** Shadow VFTA table, this is needed because
    225 ** the real filter table gets cleared during
    226 ** a soft reset and we need to repopulate it.
    227 */
    228 static u32 ixv_shadow_vfta[IXGBE_VFTA_SIZE];
    229 
    230 /*********************************************************************
    231  *  Device identification routine
    232  *
    233  *  ixv_probe determines if the driver should be loaded on
    234  *  adapter based on PCI vendor/device id of the adapter.
    235  *
    236  *  return 1 on success, 0 on failure
    237  *********************************************************************/
    238 
    239 static int
    240 ixv_probe(device_t dev, cfdata_t cf, void *aux)
    241 {
    242 #ifdef __HAVE_PCI_MSI_MSIX
    243 	const struct pci_attach_args *pa = aux;
    244 
    245 	return (ixv_lookup(pa) != NULL) ? 1 : 0;
    246 #else
    247 	return 0;
    248 #endif
    249 }
    250 
    251 static ixgbe_vendor_info_t *
    252 ixv_lookup(const struct pci_attach_args *pa)
    253 {
    254 	pcireg_t subid;
    255 	ixgbe_vendor_info_t *ent;
    256 
    257 	INIT_DEBUGOUT("ixv_lookup: begin");
    258 
    259 	if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
    260 		return NULL;
    261 
    262 	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
    263 
    264 	for (ent = ixv_vendor_info_array; ent->vendor_id != 0; ent++) {
    265 		if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
    266 		    (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
    267 
    268 		    ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
    269 		     (ent->subvendor_id == 0)) &&
    270 
    271 		    ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
    272 		     (ent->subdevice_id == 0))) {
    273 			return ent;
    274 		}
    275 	}
    276 	return NULL;
    277 }
    278 
    279 
    280 /*********************************************************************
    281  *  Device initialization routine
    282  *
    283  *  The attach entry point is called when the driver is being loaded.
    284  *  This routine identifies the type of hardware, allocates all resources
    285  *  and initializes the hardware.
    286  *
    287  *  return 0 on success, positive on failure
    288  *********************************************************************/
    289 
    290 static void
    291 ixv_attach(device_t parent, device_t dev, void *aux)
    292 {
    293 	struct adapter *adapter;
    294 	struct ixgbe_hw *hw;
    295 	int             error = 0;
    296 	ixgbe_vendor_info_t *ent;
    297 	const struct pci_attach_args *pa = aux;
    298 
    299 	INIT_DEBUGOUT("ixv_attach: begin");
    300 
    301 	/* Allocate, clear, and link in our adapter structure */
    302 	adapter = device_private(dev);
    303 	adapter->dev = dev;
    304 	hw = &adapter->hw;
    305 
    306 #ifdef DEV_NETMAP
    307 	adapter->init_locked = ixv_init_locked;
    308 	adapter->stop_locked = ixv_stop;
    309 #endif
    310 
    311 	adapter->osdep.pc = pa->pa_pc;
    312 	adapter->osdep.tag = pa->pa_tag;
    313 	if (pci_dma64_available(pa))
    314 		adapter->osdep.dmat = pa->pa_dmat64;
    315 	else
    316 		adapter->osdep.dmat = pa->pa_dmat;
    317 	adapter->osdep.attached = false;
    318 
    319 	ent = ixv_lookup(pa);
    320 
    321 	KASSERT(ent != NULL);
    322 
    323 	aprint_normal(": %s, Version - %s\n",
    324 	    ixv_strings[ent->index], ixv_driver_version);
    325 
    326 	/* Core Lock Init*/
    327 	IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
    328 
    329 	/* Set up the timer callout */
    330 	callout_init(&adapter->timer, 0);
    331 
    332 	/* Determine hardware revision */
    333 	ixv_identify_hardware(adapter);
    334 
    335 	/* Do base PCI setup - map BAR0 */
    336 	if (ixv_allocate_pci_resources(adapter, pa)) {
    337 		aprint_error_dev(dev, "ixv_allocate_pci_resources() failed!\n");
    338 		error = ENXIO;
    339 		goto err_out;
    340 	}
    341 
    342 	/* Sysctls for limiting the amount of work done in the taskqueues */
    343 	ixv_set_sysctl_value(adapter, "rx_processing_limit",
    344 	    "max number of rx packets to process",
    345 	    &adapter->rx_process_limit, ixv_rx_process_limit);
    346 
    347 	ixv_set_sysctl_value(adapter, "tx_processing_limit",
    348 	    "max number of tx packets to process",
    349 	    &adapter->tx_process_limit, ixv_tx_process_limit);
    350 
    351 	/* Do descriptor calc and sanity checks */
    352 	if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
    353 	    ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) {
    354 		aprint_error_dev(dev, "TXD config issue, using default!\n");
    355 		adapter->num_tx_desc = DEFAULT_TXD;
    356 	} else
    357 		adapter->num_tx_desc = ixv_txd;
    358 
    359 	if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
    360 	    ixv_rxd < MIN_RXD || ixv_rxd > MAX_RXD) {
    361 		aprint_error_dev(dev, "RXD config issue, using default!\n");
    362 		adapter->num_rx_desc = DEFAULT_RXD;
    363 	} else
    364 		adapter->num_rx_desc = ixv_rxd;
    365 
    366 	/* Allocate our TX/RX Queues */
    367 	if (ixgbe_allocate_queues(adapter)) {
    368 		aprint_error_dev(dev, "ixgbe_allocate_queues() failed!\n");
    369 		error = ENOMEM;
    370 		goto err_out;
    371 	}
    372 
    373 	/*
    374 	** Initialize the shared code: its
    375 	** at this point the mac type is set.
    376 	*/
    377 	error = ixgbe_init_shared_code(hw);
    378 	if (error) {
    379 		aprint_error_dev(dev, "ixgbe_init_shared_code() failed!\n");
    380 		error = EIO;
    381 		goto err_late;
    382 	}
    383 
    384 	/* Setup the mailbox */
    385 	ixgbe_init_mbx_params_vf(hw);
    386 
    387 	/* Reset mbox api to 1.0 */
    388 	error = ixgbe_reset_hw(hw);
    389 	if (error == IXGBE_ERR_RESET_FAILED)
    390 		aprint_error_dev(dev, "ixgbe_reset_hw() failure: Reset Failed!\n");
    391 	else if (error)
    392 		aprint_error_dev(dev, "ixgbe_reset_hw() failed with error %d\n", error);
    393 	if (error) {
    394 		error = EIO;
    395 		goto err_late;
    396 	}
    397 
    398 	/* Negotiate mailbox API version */
    399 	error = ixgbevf_negotiate_api_version(hw, ixgbe_mbox_api_11);
    400 	if (error)
    401 		aprint_debug_dev(dev,
    402 		    "MBX API 1.1 negotiation failed! Error %d\n", error);
    403 
    404 	error = ixgbe_init_hw(hw);
    405 	if (error) {
    406 		aprint_error_dev(dev, "ixgbe_init_hw() failed!\n");
    407 		error = EIO;
    408 		goto err_late;
    409 	}
    410 
    411 	error = ixv_allocate_msix(adapter, pa);
    412 	if (error) {
    413 		device_printf(dev, "ixv_allocate_msix() failed!\n");
    414 		goto err_late;
    415 	}
    416 
    417 	/* If no mac address was assigned, make a random one */
    418 	if (!ixv_check_ether_addr(hw->mac.addr)) {
    419 		u8 addr[ETHER_ADDR_LEN];
    420 		uint64_t rndval = cprng_fast64();
    421 
    422 		memcpy(addr, &rndval, sizeof(addr));
    423 		addr[0] &= 0xFE;
    424 		addr[0] |= 0x02;
    425 		bcopy(addr, hw->mac.addr, sizeof(addr));
    426 	}
    427 
    428 	/* hw.ix defaults init */
    429 	adapter->enable_aim = ixv_enable_aim;
    430 
    431 	/* Setup OS specific network interface */
    432 	ixv_setup_interface(dev, adapter);
    433 
    434 	/* Do the stats setup */
    435 	ixv_save_stats(adapter);
    436 	ixv_init_stats(adapter);
    437 
    438 	/* Register for VLAN events */
    439 #if 0 /* XXX delete after write? */
    440 	adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
    441 	    ixv_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
    442 	adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
    443 	    ixv_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
    444 #endif
    445 
    446 	/* Add sysctls */
    447 	ixv_add_device_sysctls(adapter);
    448 	ixv_add_stats_sysctls(adapter);
    449 
    450 #ifdef DEV_NETMAP
    451 	ixgbe_netmap_attach(adapter);
    452 #endif /* DEV_NETMAP */
    453 	INIT_DEBUGOUT("ixv_attach: end");
    454 	adapter->osdep.attached = true;
    455 	return;
    456 
    457 err_late:
    458 	ixgbe_free_transmit_structures(adapter);
    459 	ixgbe_free_receive_structures(adapter);
    460 err_out:
    461 	ixv_free_pci_resources(adapter);
    462 	return;
    463 
    464 }
    465 
    466 /*********************************************************************
    467  *  Device removal routine
    468  *
    469  *  The detach entry point is called when the driver is being removed.
    470  *  This routine stops the adapter and deallocates all the resources
    471  *  that were allocated for driver operation.
    472  *
    473  *  return 0 on success, positive on failure
    474  *********************************************************************/
    475 
    476 static int
    477 ixv_detach(device_t dev, int flags)
    478 {
    479 	struct adapter *adapter = device_private(dev);
    480 	struct ix_queue *que = adapter->queues;
    481 	struct tx_ring *txr = adapter->tx_rings;
    482 	struct rx_ring *rxr = adapter->rx_rings;
    483 	struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
    484 
    485 	INIT_DEBUGOUT("ixv_detach: begin");
    486 	if (adapter->osdep.attached == false)
    487 		return 0;
    488 
    489 #if NVLAN > 0
    490 	/* Make sure VLANS are not using driver */
    491 	if (!VLAN_ATTACHED(&adapter->osdep.ec))
    492 		;	/* nothing to do: no VLANs */
    493 	else if ((flags & (DETACH_SHUTDOWN|DETACH_FORCE)) != 0)
    494 		vlan_ifdetach(adapter->ifp);
    495 	else {
    496 		aprint_error_dev(dev, "VLANs in use, detach first\n");
    497 		return EBUSY;
    498 	}
    499 #endif
    500 
    501 	IXGBE_CORE_LOCK(adapter);
    502 	ixv_stop(adapter);
    503 	IXGBE_CORE_UNLOCK(adapter);
    504 
    505 	for (int i = 0; i < adapter->num_queues; i++, que++, txr++) {
    506 #ifndef IXGBE_LEGACY_TX
    507 		softint_disestablish(txr->txr_si);
    508 #endif
    509 		softint_disestablish(que->que_si);
    510 	}
    511 
    512 	/* Drain the Mailbox(link) queue */
    513 	softint_disestablish(adapter->link_si);
    514 
    515 	/* Unregister VLAN events */
    516 #if 0 /* XXX msaitoh delete after write? */
    517 	if (adapter->vlan_attach != NULL)
    518 		EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
    519 	if (adapter->vlan_detach != NULL)
    520 		EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
    521 #endif
    522 
    523 	ether_ifdetach(adapter->ifp);
    524 	callout_halt(&adapter->timer, NULL);
    525 #ifdef DEV_NETMAP
    526 	netmap_detach(adapter->ifp);
    527 #endif /* DEV_NETMAP */
    528 	ixv_free_pci_resources(adapter);
    529 #if 0 /* XXX the NetBSD port is probably missing something here */
    530 	bus_generic_detach(dev);
    531 #endif
    532 	if_detach(adapter->ifp);
    533 	if_percpuq_destroy(adapter->ipq);
    534 
    535 	sysctl_teardown(&adapter->sysctllog);
    536 	evcnt_detach(&adapter->handleq);
    537 	evcnt_detach(&adapter->req);
    538 	evcnt_detach(&adapter->efbig_tx_dma_setup);
    539 	evcnt_detach(&adapter->mbuf_defrag_failed);
    540 	evcnt_detach(&adapter->efbig2_tx_dma_setup);
    541 	evcnt_detach(&adapter->einval_tx_dma_setup);
    542 	evcnt_detach(&adapter->other_tx_dma_setup);
    543 	evcnt_detach(&adapter->eagain_tx_dma_setup);
    544 	evcnt_detach(&adapter->enomem_tx_dma_setup);
    545 	evcnt_detach(&adapter->watchdog_events);
    546 	evcnt_detach(&adapter->tso_err);
    547 	evcnt_detach(&adapter->link_irq);
    548 
    549 	txr = adapter->tx_rings;
    550 	for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
    551 		evcnt_detach(&adapter->queues[i].irqs);
    552 		evcnt_detach(&txr->no_desc_avail);
    553 		evcnt_detach(&txr->total_packets);
    554 		evcnt_detach(&txr->tso_tx);
    555 #ifndef IXGBE_LEGACY_TX
    556 		evcnt_detach(&txr->pcq_drops);
    557 #endif
    558 
    559 		evcnt_detach(&rxr->rx_packets);
    560 		evcnt_detach(&rxr->rx_bytes);
    561 		evcnt_detach(&rxr->rx_copies);
    562 		evcnt_detach(&rxr->no_jmbuf);
    563 		evcnt_detach(&rxr->rx_discarded);
    564 	}
    565 	evcnt_detach(&stats->ipcs);
    566 	evcnt_detach(&stats->l4cs);
    567 	evcnt_detach(&stats->ipcs_bad);
    568 	evcnt_detach(&stats->l4cs_bad);
    569 
    570 	/* Packet Reception Stats */
    571 	evcnt_detach(&stats->vfgorc);
    572 	evcnt_detach(&stats->vfgprc);
    573 	evcnt_detach(&stats->vfmprc);
    574 
    575 	/* Packet Transmission Stats */
    576 	evcnt_detach(&stats->vfgotc);
    577 	evcnt_detach(&stats->vfgptc);
    578 
    579 	ixgbe_free_transmit_structures(adapter);
    580 	ixgbe_free_receive_structures(adapter);
    581 
    582 	IXGBE_CORE_LOCK_DESTROY(adapter);
    583 	return (0);
    584 }
    585 
    586 /*********************************************************************
    587  *
    588  *  Shutdown entry point
    589  *
    590  **********************************************************************/
    591 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
    592 static int
    593 ixv_shutdown(device_t dev)
    594 {
    595 	struct adapter *adapter = device_private(dev);
    596 	IXGBE_CORE_LOCK(adapter);
    597 	ixv_stop(adapter);
    598 	IXGBE_CORE_UNLOCK(adapter);
    599 	return (0);
    600 }
    601 #endif
    602 
    603 static int
    604 ixv_ifflags_cb(struct ethercom *ec)
    605 {
    606 	struct ifnet *ifp = &ec->ec_if;
    607 	struct adapter *adapter = ifp->if_softc;
    608 	int change = ifp->if_flags ^ adapter->if_flags, rc = 0;
    609 
    610 	IXGBE_CORE_LOCK(adapter);
    611 
    612 	if (change != 0)
    613 		adapter->if_flags = ifp->if_flags;
    614 
    615 	if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0)
    616 		rc = ENETRESET;
    617 
    618 	IXGBE_CORE_UNLOCK(adapter);
    619 
    620 	return rc;
    621 }
    622 
    623 /*********************************************************************
    624  *  Ioctl entry point
    625  *
    626  *  ixv_ioctl is called when the user wants to configure the
    627  *  interface.
    628  *
    629  *  return 0 on success, positive on failure
    630  **********************************************************************/
    631 
    632 static int
    633 ixv_ioctl(struct ifnet * ifp, u_long command, void *data)
    634 {
    635 	struct adapter	*adapter = ifp->if_softc;
    636 	struct ifcapreq *ifcr = data;
    637 	struct ifreq	*ifr = (struct ifreq *) data;
    638 	int             error = 0;
    639 	int l4csum_en;
    640 	const int l4csum = IFCAP_CSUM_TCPv4_Rx|IFCAP_CSUM_UDPv4_Rx|
    641 	     IFCAP_CSUM_TCPv6_Rx|IFCAP_CSUM_UDPv6_Rx;
    642 
    643 	switch (command) {
    644 	case SIOCSIFFLAGS:
    645 		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
    646 		break;
    647 	case SIOCADDMULTI:
    648 	case SIOCDELMULTI:
    649 		IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
    650 		break;
    651 	case SIOCSIFMEDIA:
    652 	case SIOCGIFMEDIA:
    653 		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
    654 		break;
    655 	case SIOCSIFCAP:
    656 		IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
    657 		break;
    658 	case SIOCSIFMTU:
    659 		IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
    660 		break;
    661 	default:
    662 		IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
    663 		break;
    664 	}
    665 
    666 	switch (command) {
    667 	case SIOCSIFMEDIA:
    668 	case SIOCGIFMEDIA:
    669 		return ifmedia_ioctl(ifp, ifr, &adapter->media, command);
    670 	case SIOCSIFCAP:
    671 		/* Layer-4 Rx checksum offload has to be turned on and
    672 		 * off as a unit.
    673 		 */
    674 		l4csum_en = ifcr->ifcr_capenable & l4csum;
    675 		if (l4csum_en != l4csum && l4csum_en != 0)
    676 			return EINVAL;
    677 		/*FALLTHROUGH*/
    678 	case SIOCADDMULTI:
    679 	case SIOCDELMULTI:
    680 	case SIOCSIFFLAGS:
    681 	case SIOCSIFMTU:
    682 	default:
    683 		if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
    684 			return error;
    685 		if ((ifp->if_flags & IFF_RUNNING) == 0)
    686 			;
    687 		else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
    688 			IXGBE_CORE_LOCK(adapter);
    689 			ixv_init_locked(adapter);
    690 			IXGBE_CORE_UNLOCK(adapter);
    691 		} else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
    692 			/*
    693 			 * Multicast list has changed; set the hardware filter
    694 			 * accordingly.
    695 			 */
    696 			IXGBE_CORE_LOCK(adapter);
    697 			ixv_disable_intr(adapter);
    698 			ixv_set_multi(adapter);
    699 			ixv_enable_intr(adapter);
    700 			IXGBE_CORE_UNLOCK(adapter);
    701 		}
    702 		return 0;
    703 	}
    704 }
    705 
    706 /*********************************************************************
    707  *  Init entry point
    708  *
    709  *  This routine is used in two ways. It is used by the stack as
    710  *  init entry point in network interface structure. It is also used
    711  *  by the driver as a hw/sw initialization routine to get to a
    712  *  consistent state.
    713  *
    714  *  return 0 on success, positive on failure
    715  **********************************************************************/
    716 #define IXGBE_MHADD_MFS_SHIFT 16
    717 
    718 static void
    719 ixv_init_locked(struct adapter *adapter)
    720 {
    721 	struct ifnet	*ifp = adapter->ifp;
    722 	device_t 	dev = adapter->dev;
    723 	struct ixgbe_hw *hw = &adapter->hw;
    724 	int error = 0;
    725 
    726 	INIT_DEBUGOUT("ixv_init_locked: begin");
    727 	KASSERT(mutex_owned(&adapter->core_mtx));
    728 	hw->adapter_stopped = FALSE;
    729 	ixgbe_stop_adapter(hw);
    730         callout_stop(&adapter->timer);
    731 
    732         /* reprogram the RAR[0] in case user changed it. */
    733         ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
    734 
    735 	/* Get the latest mac address, User can use a LAA */
    736 	memcpy(hw->mac.addr, CLLADDR(adapter->ifp->if_sadl),
    737 	     IXGBE_ETH_LENGTH_OF_ADDRESS);
    738         ixgbe_set_rar(hw, 0, hw->mac.addr, 0, 1);
    739 	hw->addr_ctrl.rar_used_count = 1;
    740 
    741 	/* Prepare transmit descriptors and buffers */
    742 	if (ixgbe_setup_transmit_structures(adapter)) {
    743 		aprint_error_dev(dev, "Could not setup transmit structures\n");
    744 		ixv_stop(adapter);
    745 		return;
    746 	}
    747 
    748 	/* Reset VF and renegotiate mailbox API version */
    749 	ixgbe_reset_hw(hw);
    750 	error = ixgbevf_negotiate_api_version(hw, ixgbe_mbox_api_11);
    751 	if (error)
    752 		device_printf(dev, "MBX API 1.1 negotiation failed! Error %d\n", error);
    753 
    754 	ixv_initialize_transmit_units(adapter);
    755 
    756 	/* Setup Multicast table */
    757 	ixv_set_multi(adapter);
    758 
    759 	/*
    760 	** Determine the correct mbuf pool
    761 	** for doing jumbo/headersplit
    762 	*/
    763 	if (ifp->if_mtu > ETHERMTU)
    764 		adapter->rx_mbuf_sz = MJUMPAGESIZE;
    765 	else
    766 		adapter->rx_mbuf_sz = MCLBYTES;
    767 
    768 	/* Prepare receive descriptors and buffers */
    769 	if (ixgbe_setup_receive_structures(adapter)) {
    770 		device_printf(dev, "Could not setup receive structures\n");
    771 		ixv_stop(adapter);
    772 		return;
    773 	}
    774 
    775 	/* Configure RX settings */
    776 	ixv_initialize_receive_units(adapter);
    777 
    778 #if 0 /* XXX isn't it required? -- msaitoh  */
    779 	/* Set the various hardware offload abilities */
    780 	ifp->if_hwassist = 0;
    781 	if (ifp->if_capenable & IFCAP_TSO4)
    782 		ifp->if_hwassist |= CSUM_TSO;
    783 	if (ifp->if_capenable & IFCAP_TXCSUM) {
    784 		ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
    785 #if __FreeBSD_version >= 800000
    786 		ifp->if_hwassist |= CSUM_SCTP;
    787 #endif
    788 	}
    789 #endif
    790 
    791 	/* Set up VLAN offload and filter */
    792 	ixv_setup_vlan_support(adapter);
    793 
    794 	/* Set up MSI/X routing */
    795 	ixv_configure_ivars(adapter);
    796 
    797 	/* Set up auto-mask */
    798 	IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_EICS_RTX_QUEUE);
    799 
    800         /* Set moderation on the Link interrupt */
    801         IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->vector), IXGBE_LINK_ITR);
    802 
    803 	/* Stats init */
    804 	ixv_init_stats(adapter);
    805 
    806 	/* Config/Enable Link */
    807 	ixv_config_link(adapter);
    808 	hw->mac.get_link_status = TRUE;
    809 
    810 	/* Start watchdog */
    811 	callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
    812 
    813 	/* And now turn on interrupts */
    814 	ixv_enable_intr(adapter);
    815 
    816 	/* Now inform the stack we're ready */
    817 	ifp->if_flags |= IFF_RUNNING;
    818 	ifp->if_flags &= ~IFF_OACTIVE;
    819 
    820 	return;
    821 }
    822 
    823 static int
    824 ixv_init(struct ifnet *ifp)
    825 {
    826 	struct adapter *adapter = ifp->if_softc;
    827 
    828 	IXGBE_CORE_LOCK(adapter);
    829 	ixv_init_locked(adapter);
    830 	IXGBE_CORE_UNLOCK(adapter);
    831 	return 0;
    832 }
    833 
    834 
    835 /*
    836 **
    837 ** MSIX Interrupt Handlers and Tasklets
    838 **
    839 */
    840 
    841 static inline void
    842 ixv_enable_queue(struct adapter *adapter, u32 vector)
    843 {
    844 	struct ixgbe_hw *hw = &adapter->hw;
    845 	u32	queue = 1 << vector;
    846 	u32	mask;
    847 
    848 	mask = (IXGBE_EIMS_RTX_QUEUE & queue);
    849 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
    850 }
    851 
    852 static inline void
    853 ixv_disable_queue(struct adapter *adapter, u32 vector)
    854 {
    855 	struct ixgbe_hw *hw = &adapter->hw;
    856 	u64	queue = (u64)(1 << vector);
    857 	u32	mask;
    858 
    859 	mask = (IXGBE_EIMS_RTX_QUEUE & queue);
    860 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
    861 }
    862 
    863 static inline void
    864 ixv_rearm_queues(struct adapter *adapter, u64 queues)
    865 {
    866 	u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
    867 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask);
    868 }
    869 
    870 
    871 static void
    872 ixv_handle_que(void *context)
    873 {
    874 	struct ix_queue *que = context;
    875 	struct adapter  *adapter = que->adapter;
    876 	struct tx_ring	*txr = que->txr;
    877 	struct ifnet    *ifp = adapter->ifp;
    878 	bool		more;
    879 
    880 	adapter->handleq.ev_count++;
    881 
    882 	if (ifp->if_flags & IFF_RUNNING) {
    883 		more = ixgbe_rxeof(que);
    884 		IXGBE_TX_LOCK(txr);
    885 		ixgbe_txeof(txr);
    886 #ifndef IXGBE_LEGACY_TX
    887 		if (pcq_peek(txr->txr_interq) != NULL)
    888 			ixgbe_mq_start_locked(ifp, txr);
    889 #else
    890 		if (!IFQ_IS_EMPTY(&ifp->if_snd))
    891 			ixgbe_start_locked(txr, ifp);
    892 #endif
    893 		IXGBE_TX_UNLOCK(txr);
    894 		if (more) {
    895 			adapter->req.ev_count++;
    896 			softint_schedule(que->que_si);
    897 			return;
    898 		}
    899 	}
    900 
    901 	/* Reenable this interrupt */
    902 	ixv_enable_queue(adapter, que->msix);
    903 	return;
    904 }
    905 
    906 /*********************************************************************
    907  *
    908  *  MSI Queue Interrupt Service routine
    909  *
    910  **********************************************************************/
    911 int
    912 ixv_msix_que(void *arg)
    913 {
    914 	struct ix_queue	*que = arg;
    915 	struct adapter  *adapter = que->adapter;
    916 #ifdef IXGBE_LEGACY_TX
    917 	struct ifnet    *ifp = adapter->ifp;
    918 #endif
    919 	struct tx_ring	*txr = que->txr;
    920 	struct rx_ring	*rxr = que->rxr;
    921 	bool		more;
    922 	u32		newitr = 0;
    923 
    924 	ixv_disable_queue(adapter, que->msix);
    925 	++que->irqs.ev_count;
    926 
    927 #ifdef __NetBSD__
    928 	/* Don't run ixgbe_rxeof in interrupt context */
    929 	more = true;
    930 #else
    931 	more = ixgbe_rxeof(que);
    932 #endif
    933 
    934 	IXGBE_TX_LOCK(txr);
    935 	ixgbe_txeof(txr);
    936 	/*
    937 	** Make certain that if the stack
    938 	** has anything queued the task gets
    939 	** scheduled to handle it.
    940 	*/
    941 #ifdef IXGBE_LEGACY_TX
    942 	if (!IFQ_IS_EMPTY(&adapter->ifp->if_snd))
    943 		ixgbe_start_locked(txr, ifp);
    944 #endif
    945 	IXGBE_TX_UNLOCK(txr);
    946 
    947 	/* Do AIM now? */
    948 
    949 	if (adapter->enable_aim == false)
    950 		goto no_calc;
    951 	/*
    952 	** Do Adaptive Interrupt Moderation:
    953         **  - Write out last calculated setting
    954 	**  - Calculate based on average size over
    955 	**    the last interval.
    956 	*/
    957         if (que->eitr_setting)
    958                 IXGBE_WRITE_REG(&adapter->hw,
    959                     IXGBE_VTEITR(que->msix),
    960 		    que->eitr_setting);
    961 
    962         que->eitr_setting = 0;
    963 
    964         /* Idle, do nothing */
    965         if ((txr->bytes == 0) && (rxr->bytes == 0))
    966                 goto no_calc;
    967 
    968 	if ((txr->bytes) && (txr->packets))
    969                	newitr = txr->bytes/txr->packets;
    970 	if ((rxr->bytes) && (rxr->packets))
    971 		newitr = max(newitr,
    972 		    (rxr->bytes / rxr->packets));
    973 	newitr += 24; /* account for hardware frame, crc */
    974 
    975 	/* set an upper boundary */
    976 	newitr = min(newitr, 3000);
    977 
    978 	/* Be nice to the mid range */
    979 	if ((newitr > 300) && (newitr < 1200))
    980 		newitr = (newitr / 3);
    981 	else
    982 		newitr = (newitr / 2);
    983 
    984 	newitr |= newitr << 16;
    985 
    986         /* save for next interrupt */
    987         que->eitr_setting = newitr;
    988 
    989         /* Reset state */
    990         txr->bytes = 0;
    991         txr->packets = 0;
    992         rxr->bytes = 0;
    993         rxr->packets = 0;
    994 
    995 no_calc:
    996 	if (more)
    997 		softint_schedule(que->que_si);
    998 	else /* Reenable this interrupt */
    999 		ixv_enable_queue(adapter, que->msix);
   1000 	return 1;
   1001 }
   1002 
   1003 static int
   1004 ixv_msix_mbx(void *arg)
   1005 {
   1006 	struct adapter	*adapter = arg;
   1007 	struct ixgbe_hw *hw = &adapter->hw;
   1008 	u32		reg;
   1009 
   1010 	++adapter->link_irq.ev_count;
   1011 
   1012 	/* First get the cause */
   1013 	reg = IXGBE_READ_REG(hw, IXGBE_VTEICS);
   1014 	/* Clear interrupt with write */
   1015 	IXGBE_WRITE_REG(hw, IXGBE_VTEICR, reg);
   1016 
   1017 	/* Link status change */
   1018 	if (reg & IXGBE_EICR_LSC)
   1019 		softint_schedule(adapter->link_si);
   1020 
   1021 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER);
   1022 	return 1;
   1023 }
   1024 
   1025 /*********************************************************************
   1026  *
   1027  *  Media Ioctl callback
   1028  *
   1029  *  This routine is called whenever the user queries the status of
   1030  *  the interface using ifconfig.
   1031  *
   1032  **********************************************************************/
   1033 static void
   1034 ixv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
   1035 {
   1036 	struct adapter *adapter = ifp->if_softc;
   1037 
   1038 	INIT_DEBUGOUT("ixv_media_status: begin");
   1039 	IXGBE_CORE_LOCK(adapter);
   1040 	ixv_update_link_status(adapter);
   1041 
   1042 	ifmr->ifm_status = IFM_AVALID;
   1043 	ifmr->ifm_active = IFM_ETHER;
   1044 
   1045 	if (!adapter->link_active) {
   1046 		ifmr->ifm_active |= IFM_NONE;
   1047 		IXGBE_CORE_UNLOCK(adapter);
   1048 		return;
   1049 	}
   1050 
   1051 	ifmr->ifm_status |= IFM_ACTIVE;
   1052 
   1053 	switch (adapter->link_speed) {
   1054 		case IXGBE_LINK_SPEED_10GB_FULL:
   1055 			ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
   1056 			break;
   1057 		case IXGBE_LINK_SPEED_1GB_FULL:
   1058 			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
   1059 			break;
   1060 		case IXGBE_LINK_SPEED_100_FULL:
   1061 			ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
   1062 			break;
   1063 	}
   1064 
   1065 	IXGBE_CORE_UNLOCK(adapter);
   1066 
   1067 	return;
   1068 }
   1069 
   1070 /*********************************************************************
   1071  *
   1072  *  Media Ioctl callback
   1073  *
   1074  *  This routine is called when the user changes speed/duplex using
   1075  *  media/mediopt option with ifconfig.
   1076  *
   1077  **********************************************************************/
   1078 static int
   1079 ixv_media_change(struct ifnet * ifp)
   1080 {
   1081 	struct adapter *adapter = ifp->if_softc;
   1082 	struct ifmedia *ifm = &adapter->media;
   1083 
   1084 	INIT_DEBUGOUT("ixv_media_change: begin");
   1085 
   1086 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
   1087 		return (EINVAL);
   1088 
   1089         switch (IFM_SUBTYPE(ifm->ifm_media)) {
   1090         case IFM_AUTO:
   1091                 break;
   1092         default:
   1093                 device_printf(adapter->dev, "Only auto media type\n");
   1094 		return (EINVAL);
   1095         }
   1096 
   1097 	return (0);
   1098 }
   1099 
   1100 
   1101 /*********************************************************************
   1102  *  Multicast Update
   1103  *
   1104  *  This routine is called whenever multicast address list is updated.
   1105  *
   1106  **********************************************************************/
   1107 #define IXGBE_RAR_ENTRIES 16
   1108 
   1109 static void
   1110 ixv_set_multi(struct adapter *adapter)
   1111 {
   1112 	struct ether_multi *enm;
   1113 	struct ether_multistep step;
   1114 	u8	mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
   1115 	u8	*update_ptr;
   1116 	int	mcnt = 0;
   1117 	struct ethercom *ec = &adapter->osdep.ec;
   1118 
   1119 	IOCTL_DEBUGOUT("ixv_set_multi: begin");
   1120 
   1121 	ETHER_FIRST_MULTI(step, ec, enm);
   1122 	while (enm != NULL) {
   1123 		bcopy(enm->enm_addrlo,
   1124 		    &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
   1125 		    IXGBE_ETH_LENGTH_OF_ADDRESS);
   1126 		mcnt++;
   1127 		/* XXX This might be required --msaitoh */
   1128 		if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES)
   1129 			break;
   1130 		ETHER_NEXT_MULTI(step, enm);
   1131 	}
   1132 
   1133 	update_ptr = mta;
   1134 
   1135 	ixgbe_update_mc_addr_list(&adapter->hw,
   1136 	    update_ptr, mcnt, ixv_mc_array_itr, TRUE);
   1137 
   1138 	return;
   1139 }
   1140 
   1141 /*
   1142  * This is an iterator function now needed by the multicast
   1143  * shared code. It simply feeds the shared code routine the
   1144  * addresses in the array of ixv_set_multi() one by one.
   1145  */
   1146 static u8 *
   1147 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
   1148 {
   1149 	u8 *addr = *update_ptr;
   1150 	u8 *newptr;
   1151 	*vmdq = 0;
   1152 
   1153 	newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
   1154 	*update_ptr = newptr;
   1155 	return addr;
   1156 }
   1157 
   1158 /*********************************************************************
   1159  *  Timer routine
   1160  *
   1161  *  This routine checks for link status,updates statistics,
   1162  *  and runs the watchdog check.
   1163  *
   1164  **********************************************************************/
   1165 
   1166 static void
   1167 ixv_local_timer(void *arg)
   1168 {
   1169 	struct adapter *adapter = arg;
   1170 
   1171 	IXGBE_CORE_LOCK(adapter);
   1172 	ixv_local_timer_locked(adapter);
   1173 	IXGBE_CORE_UNLOCK(adapter);
   1174 }
   1175 
   1176 static void
   1177 ixv_local_timer_locked(void *arg)
   1178 {
   1179 	struct adapter	*adapter = arg;
   1180 	device_t	dev = adapter->dev;
   1181 	struct ix_queue	*que = adapter->queues;
   1182 	u64		queues = 0;
   1183 	int		hung = 0;
   1184 
   1185 	KASSERT(mutex_owned(&adapter->core_mtx));
   1186 
   1187 	ixv_update_link_status(adapter);
   1188 
   1189 	/* Stats Update */
   1190 	ixv_update_stats(adapter);
   1191 
   1192 	/*
   1193 	** Check the TX queues status
   1194 	**      - mark hung queues so we don't schedule on them
   1195 	**      - watchdog only if all queues show hung
   1196 	*/
   1197 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   1198 		/* Keep track of queues with work for soft irq */
   1199 		if (que->txr->busy)
   1200 			queues |= ((u64)1 << que->me);
   1201 		/*
   1202 		** Each time txeof runs without cleaning, but there
   1203 		** are uncleaned descriptors it increments busy. If
   1204 		** we get to the MAX we declare it hung.
   1205 		*/
   1206 		if (que->busy == IXGBE_QUEUE_HUNG) {
   1207 			++hung;
   1208 			/* Mark the queue as inactive */
   1209 			adapter->active_queues &= ~((u64)1 << que->me);
   1210 			continue;
   1211 		} else {
   1212 			/* Check if we've come back from hung */
   1213 			if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
   1214                                 adapter->active_queues |= ((u64)1 << que->me);
   1215 		}
   1216 		if (que->busy >= IXGBE_MAX_TX_BUSY) {
   1217 			device_printf(dev,"Warning queue %d "
   1218 			    "appears to be hung!\n", i);
   1219 			que->txr->busy = IXGBE_QUEUE_HUNG;
   1220 			++hung;
   1221 		}
   1222 
   1223 	}
   1224 
   1225 	/* Only truly watchdog if all queues show hung */
   1226 	if (hung == adapter->num_queues)
   1227 		goto watchdog;
   1228 	else if (queues != 0) { /* Force an IRQ on queues with work */
   1229 		ixv_rearm_queues(adapter, queues);
   1230 	}
   1231 
   1232 	callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
   1233 	return;
   1234 
   1235 watchdog:
   1236 	device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
   1237 	adapter->ifp->if_flags &= ~IFF_RUNNING;
   1238 	adapter->watchdog_events.ev_count++;
   1239 	ixv_init_locked(adapter);
   1240 }
   1241 
   1242 /*
   1243 ** Note: this routine updates the OS on the link state
   1244 **	the real check of the hardware only happens with
   1245 **	a link interrupt.
   1246 */
   1247 static void
   1248 ixv_update_link_status(struct adapter *adapter)
   1249 {
   1250 	struct ifnet	*ifp = adapter->ifp;
   1251 	device_t dev = adapter->dev;
   1252 
   1253 	if (adapter->link_up){
   1254 		if (adapter->link_active == FALSE) {
   1255 			if (bootverbose) {
   1256 				const char *bpsmsg;
   1257 
   1258 				switch (adapter->link_speed) {
   1259 				case IXGBE_LINK_SPEED_10GB_FULL:
   1260 					bpsmsg = "10 Gbps";
   1261 					break;
   1262 				case IXGBE_LINK_SPEED_1GB_FULL:
   1263 					bpsmsg = "1 Gbps";
   1264 					break;
   1265 				case IXGBE_LINK_SPEED_100_FULL:
   1266 					bpsmsg = "100 Mbps";
   1267 					break;
   1268 				default:
   1269 					bpsmsg = "unknown speed";
   1270 					break;
   1271 				}
   1272 				device_printf(dev,"Link is up %s %s \n",
   1273 				    bpsmsg, "Full Duplex");
   1274 			}
   1275 			adapter->link_active = TRUE;
   1276 			if_link_state_change(ifp, LINK_STATE_UP);
   1277 		}
   1278 	} else { /* Link down */
   1279 		if (adapter->link_active == TRUE) {
   1280 			if (bootverbose)
   1281 				device_printf(dev,"Link is Down\n");
   1282 			if_link_state_change(ifp, LINK_STATE_DOWN);
   1283 			adapter->link_active = FALSE;
   1284 		}
   1285 	}
   1286 
   1287 	return;
   1288 }
   1289 
   1290 
   1291 static void
   1292 ixv_ifstop(struct ifnet *ifp, int disable)
   1293 {
   1294 	struct adapter *adapter = ifp->if_softc;
   1295 
   1296 	IXGBE_CORE_LOCK(adapter);
   1297 	ixv_stop(adapter);
   1298 	IXGBE_CORE_UNLOCK(adapter);
   1299 }
   1300 
   1301 /*********************************************************************
   1302  *
   1303  *  This routine disables all traffic on the adapter by issuing a
   1304  *  global reset on the MAC and deallocates TX/RX buffers.
   1305  *
   1306  **********************************************************************/
   1307 
   1308 static void
   1309 ixv_stop(void *arg)
   1310 {
   1311 	struct ifnet   *ifp;
   1312 	struct adapter *adapter = arg;
   1313 	struct ixgbe_hw *hw = &adapter->hw;
   1314 	ifp = adapter->ifp;
   1315 
   1316 	KASSERT(mutex_owned(&adapter->core_mtx));
   1317 
   1318 	INIT_DEBUGOUT("ixv_stop: begin\n");
   1319 	ixv_disable_intr(adapter);
   1320 
   1321 	/* Tell the stack that the interface is no longer active */
   1322 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   1323 
   1324 	ixgbe_reset_hw(hw);
   1325 	adapter->hw.adapter_stopped = FALSE;
   1326 	ixgbe_stop_adapter(hw);
   1327 	callout_stop(&adapter->timer);
   1328 
   1329 	/* reprogram the RAR[0] in case user changed it. */
   1330 	ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
   1331 
   1332 	return;
   1333 }
   1334 
   1335 
   1336 /*********************************************************************
   1337  *
   1338  *  Determine hardware revision.
   1339  *
   1340  **********************************************************************/
   1341 static void
   1342 ixv_identify_hardware(struct adapter *adapter)
   1343 {
   1344 	pcitag_t tag;
   1345 	pci_chipset_tag_t pc;
   1346 	pcireg_t subid, id;
   1347 	struct ixgbe_hw *hw = &adapter->hw;
   1348 
   1349 	pc = adapter->osdep.pc;
   1350 	tag = adapter->osdep.tag;
   1351 
   1352 	/*
   1353 	** Make sure BUSMASTER is set, on a VM under
   1354 	** KVM it may not be and will break things.
   1355 	*/
   1356 	ixgbe_pci_enable_busmaster(pc, tag);
   1357 
   1358 	id = pci_conf_read(pc, tag, PCI_ID_REG);
   1359 	subid = pci_conf_read(pc, tag, PCI_SUBSYS_ID_REG);
   1360 
   1361 	/* Save off the information about this board */
   1362 	hw->vendor_id = PCI_VENDOR(id);
   1363 	hw->device_id = PCI_PRODUCT(id);
   1364 	hw->revision_id = PCI_REVISION(pci_conf_read(pc, tag, PCI_CLASS_REG));
   1365 	hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
   1366 	hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
   1367 
   1368 	/* We need this to determine device-specific things */
   1369 	ixgbe_set_mac_type(hw);
   1370 
   1371 	/* Set the right number of segments */
   1372 	adapter->num_segs = IXGBE_82599_SCATTER;
   1373 
   1374 	return;
   1375 }
   1376 
   1377 /*********************************************************************
   1378  *
   1379  *  Setup MSIX Interrupt resources and handlers
   1380  *
   1381  **********************************************************************/
   1382 static int
   1383 ixv_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
   1384 {
   1385 	device_t	dev = adapter->dev;
   1386 	struct ix_queue *que = adapter->queues;
   1387 	struct		tx_ring *txr = adapter->tx_rings;
   1388 	int 		error, rid, vector = 0;
   1389 	pci_chipset_tag_t pc;
   1390 	pcitag_t	tag;
   1391 	char		intrbuf[PCI_INTRSTR_LEN];
   1392 	char		intr_xname[32];
   1393 	const char	*intrstr = NULL;
   1394 	kcpuset_t	*affinity;
   1395 	int		cpu_id = 0;
   1396 
   1397 	pc = adapter->osdep.pc;
   1398 	tag = adapter->osdep.tag;
   1399 
   1400 	adapter->osdep.nintrs = adapter->num_queues + 1;
   1401 	if (pci_msix_alloc_exact(pa, &adapter->osdep.intrs,
   1402 	    adapter->osdep.nintrs) != 0) {
   1403 		aprint_error_dev(dev,
   1404 		    "failed to allocate MSI-X interrupt\n");
   1405 		return (ENXIO);
   1406 	}
   1407 
   1408 	kcpuset_create(&affinity, false);
   1409 	for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
   1410 		snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d",
   1411 		    device_xname(dev), i);
   1412 		intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
   1413 		    sizeof(intrbuf));
   1414 #ifdef IXV_MPSAFE
   1415 		pci_intr_setattr(pc, adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
   1416 		    true);
   1417 #endif
   1418 		/* Set the handler function */
   1419 		que->res = adapter->osdep.ihs[i] = pci_intr_establish_xname(pc,
   1420 		    adapter->osdep.intrs[i], IPL_NET, ixv_msix_que, que,
   1421 		    intr_xname);
   1422 		if (que->res == NULL) {
   1423 			pci_intr_release(pc, adapter->osdep.intrs,
   1424 			    adapter->osdep.nintrs);
   1425 			aprint_error_dev(dev,
   1426 			    "Failed to register QUE handler\n");
   1427 			kcpuset_destroy(affinity);
   1428 			return (ENXIO);
   1429 		}
   1430 		que->msix = vector;
   1431         	adapter->active_queues |= (u64)(1 << que->msix);
   1432 
   1433 		cpu_id = i;
   1434 		/* Round-robin affinity */
   1435 		kcpuset_zero(affinity);
   1436 		kcpuset_set(affinity, cpu_id % ncpu);
   1437 		error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
   1438 		    NULL);
   1439 		aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
   1440 		    intrstr);
   1441 		if (error == 0)
   1442 			aprint_normal(", bound queue %d to cpu %d\n",
   1443 			    i, cpu_id % ncpu);
   1444 		else
   1445 			aprint_normal("\n");
   1446 
   1447 #ifndef IXGBE_LEGACY_TX
   1448 		txr->txr_si = softint_establish(SOFTINT_NET,
   1449 		    ixgbe_deferred_mq_start, txr);
   1450 #endif
   1451 		que->que_si = softint_establish(SOFTINT_NET, ixv_handle_que,
   1452 		    que);
   1453 		if (que->que_si == NULL) {
   1454 			aprint_error_dev(dev,
   1455 			    "could not establish software interrupt\n");
   1456 		}
   1457 	}
   1458 
   1459 	/* and Mailbox */
   1460 	cpu_id++;
   1461 	snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
   1462 	intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
   1463 	    sizeof(intrbuf));
   1464 #ifdef IXG_MPSAFE
   1465 	pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE,
   1466 	    true);
   1467 #endif
   1468 	/* Set the mbx handler function */
   1469 	adapter->osdep.ihs[vector] = pci_intr_establish_xname(pc,
   1470 	    adapter->osdep.intrs[vector], IPL_NET, ixv_msix_mbx, adapter,
   1471 	    intr_xname);
   1472 	if (adapter->osdep.ihs[vector] == NULL) {
   1473 		adapter->res = NULL;
   1474 		aprint_error_dev(dev, "Failed to register LINK handler\n");
   1475 		kcpuset_destroy(affinity);
   1476 		return (ENXIO);
   1477 	}
   1478 	/* Round-robin affinity */
   1479 	kcpuset_zero(affinity);
   1480 	kcpuset_set(affinity, cpu_id % ncpu);
   1481 	error = interrupt_distribute(adapter->osdep.ihs[vector], affinity,NULL);
   1482 
   1483 	aprint_normal_dev(dev,
   1484 	    "for link, interrupting at %s", intrstr);
   1485 	if (error == 0)
   1486 		aprint_normal(", affinity to cpu %d\n", cpu_id % ncpu);
   1487 	else
   1488 		aprint_normal("\n");
   1489 
   1490 	adapter->vector = vector;
   1491 	/* Tasklets for Mailbox */
   1492 	adapter->link_si = softint_establish(SOFTINT_NET, ixv_handle_mbx,
   1493 	    adapter);
   1494 	/*
   1495 	** Due to a broken design QEMU will fail to properly
   1496 	** enable the guest for MSIX unless the vectors in
   1497 	** the table are all set up, so we must rewrite the
   1498 	** ENABLE in the MSIX control register again at this
   1499 	** point to cause it to successfully initialize us.
   1500 	*/
   1501 	if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
   1502 		int msix_ctrl;
   1503 		pci_get_capability(pc, tag, PCI_CAP_MSIX, &rid, NULL);
   1504 		rid += PCI_MSIX_CTL;
   1505 		msix_ctrl = pci_conf_read(pc, tag, rid);
   1506 		msix_ctrl |= PCI_MSIX_CTL_ENABLE;
   1507 		pci_conf_write(pc, tag, rid, msix_ctrl);
   1508 	}
   1509 
   1510 	kcpuset_destroy(affinity);
   1511 	return (0);
   1512 }
   1513 
   1514 /*
   1515  * Setup MSIX resources, note that the VF
   1516  * device MUST use MSIX, there is no fallback.
   1517  */
   1518 static int
   1519 ixv_setup_msix(struct adapter *adapter)
   1520 {
   1521 	device_t dev = adapter->dev;
   1522 	int want, queues, msgs;
   1523 
   1524 	/* Must have at least 2 MSIX vectors */
   1525 	msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag);
   1526 	if (msgs < 2) {
   1527 		aprint_error_dev(dev,"MSIX config error\n");
   1528 		return (ENXIO);
   1529 	}
   1530 	msgs = MIN(msgs, IXG_MAX_NINTR);
   1531 
   1532 	/* Figure out a reasonable auto config value */
   1533 	queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu;
   1534 
   1535 	if (ixv_num_queues != 0)
   1536 		queues = ixv_num_queues;
   1537 	else if ((ixv_num_queues == 0) && (queues > IXGBE_VF_MAX_TX_QUEUES))
   1538 		queues = IXGBE_VF_MAX_TX_QUEUES;
   1539 
   1540 	/*
   1541 	** Want vectors for the queues,
   1542 	** plus an additional for mailbox.
   1543 	*/
   1544 	want = queues + 1;
   1545 	if (msgs >= want)
   1546 		msgs = want;
   1547 	else {
   1548                	aprint_error_dev(dev,
   1549 		    "MSIX Configuration Problem, "
   1550 		    "%d vectors but %d queues wanted!\n",
   1551 		    msgs, want);
   1552 		return -1;
   1553 	}
   1554 
   1555 	adapter->msix_mem = (void *)1; /* XXX */
   1556 	aprint_normal_dev(dev,
   1557 	    "Using MSIX interrupts with %d vectors\n", msgs);
   1558 	adapter->num_queues = queues;
   1559 	return (msgs);
   1560 }
   1561 
   1562 
   1563 static int
   1564 ixv_allocate_pci_resources(struct adapter *adapter,
   1565     const struct pci_attach_args *pa)
   1566 {
   1567 	pcireg_t	memtype;
   1568 	device_t        dev = adapter->dev;
   1569 	bus_addr_t addr;
   1570 	int flags;
   1571 
   1572 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
   1573 	switch (memtype) {
   1574 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1575 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1576 		adapter->osdep.mem_bus_space_tag = pa->pa_memt;
   1577 		if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
   1578 	              memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
   1579 			goto map_err;
   1580 		if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
   1581 			aprint_normal_dev(dev, "clearing prefetchable bit\n");
   1582 			flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
   1583 		}
   1584 		if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
   1585 		     adapter->osdep.mem_size, flags,
   1586 		     &adapter->osdep.mem_bus_space_handle) != 0) {
   1587 map_err:
   1588 			adapter->osdep.mem_size = 0;
   1589 			aprint_error_dev(dev, "unable to map BAR0\n");
   1590 			return ENXIO;
   1591 		}
   1592 		break;
   1593 	default:
   1594 		aprint_error_dev(dev, "unexpected type on BAR0\n");
   1595 		return ENXIO;
   1596 	}
   1597 	adapter->hw.back = adapter;
   1598 
   1599 	/* Pick up the tuneable queues */
   1600 	adapter->num_queues = ixv_num_queues;
   1601 
   1602 	/*
   1603 	** Now setup MSI/X, should
   1604 	** return us the number of
   1605 	** configured vectors.
   1606 	*/
   1607 	adapter->msix = ixv_setup_msix(adapter);
   1608 	if (adapter->msix == ENXIO)
   1609 		return (ENXIO);
   1610 	else
   1611 		return (0);
   1612 }
   1613 
   1614 static void
   1615 ixv_free_pci_resources(struct adapter * adapter)
   1616 {
   1617 	struct 		ix_queue *que = adapter->queues;
   1618 	int		rid;
   1619 
   1620 	/*
   1621 	**  Release all msix queue resources:
   1622 	*/
   1623 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   1624 		if (que->res != NULL)
   1625 			pci_intr_disestablish(adapter->osdep.pc,
   1626 			    adapter->osdep.ihs[i]);
   1627 	}
   1628 
   1629 
   1630 	/* Clean the Link interrupt last */
   1631 	rid = adapter->vector;
   1632 
   1633 	if (adapter->osdep.ihs[rid] != NULL) {
   1634 		pci_intr_disestablish(adapter->osdep.pc,
   1635 		    adapter->osdep.ihs[rid]);
   1636 		adapter->osdep.ihs[rid] = NULL;
   1637 	}
   1638 
   1639 	pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
   1640 	    adapter->osdep.nintrs);
   1641 
   1642 	if (adapter->osdep.mem_size != 0) {
   1643 		bus_space_unmap(adapter->osdep.mem_bus_space_tag,
   1644 		    adapter->osdep.mem_bus_space_handle,
   1645 		    adapter->osdep.mem_size);
   1646 	}
   1647 
   1648 	return;
   1649 }
   1650 
   1651 /*********************************************************************
   1652  *
   1653  *  Setup networking device structure and register an interface.
   1654  *
   1655  **********************************************************************/
   1656 static void
   1657 ixv_setup_interface(device_t dev, struct adapter *adapter)
   1658 {
   1659 	struct ethercom *ec = &adapter->osdep.ec;
   1660 	struct ifnet   *ifp;
   1661 
   1662 	INIT_DEBUGOUT("ixv_setup_interface: begin");
   1663 
   1664 	ifp = adapter->ifp = &ec->ec_if;
   1665 	strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
   1666 	ifp->if_baudrate = IF_Gbps(10);
   1667 	ifp->if_init = ixv_init;
   1668 	ifp->if_stop = ixv_ifstop;
   1669 	ifp->if_softc = adapter;
   1670 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   1671 	ifp->if_ioctl = ixv_ioctl;
   1672 #ifndef IXGBE_LEGACY_TX
   1673 	ifp->if_transmit = ixgbe_mq_start;
   1674 #endif
   1675 	ifp->if_start = ixgbe_start;
   1676 	IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
   1677 	IFQ_SET_READY(&ifp->if_snd);
   1678 
   1679 	if_initialize(ifp);
   1680 	adapter->ipq = if_percpuq_create(&adapter->osdep.ec.ec_if);
   1681 	ether_ifattach(ifp, adapter->hw.mac.addr);
   1682 	/*
   1683 	 * We use per TX queue softint, so if_deferred_start_init() isn't
   1684 	 * used.
   1685 	 */
   1686 	if_register(ifp);
   1687 	ether_set_ifflags_cb(ec, ixv_ifflags_cb);
   1688 
   1689 	adapter->max_frame_size =
   1690 	    ifp->if_mtu + IXGBE_MTU_HDR_VLAN;
   1691 
   1692 	/*
   1693 	 * Tell the upper layer(s) we support long frames.
   1694 	 */
   1695 	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
   1696 
   1697 	ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSOv4;
   1698 	ifp->if_capenable = 0;
   1699 
   1700 	ec->ec_capabilities |= ETHERCAP_VLAN_HWCSUM;
   1701 	ec->ec_capabilities |= ETHERCAP_JUMBO_MTU;
   1702 	ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
   1703 	    		| ETHERCAP_VLAN_MTU;
   1704 	ec->ec_capenable = ec->ec_capabilities;
   1705 
   1706 	/* Don't enable LRO by default */
   1707 	ifp->if_capabilities |= IFCAP_LRO;
   1708 #if 0
   1709 	ifp->if_capenable = ifp->if_capabilities;
   1710 #endif
   1711 
   1712 	/*
   1713 	** Dont turn this on by default, if vlans are
   1714 	** created on another pseudo device (eg. lagg)
   1715 	** then vlan events are not passed thru, breaking
   1716 	** operation, but with HW FILTER off it works. If
   1717 	** using vlans directly on the em driver you can
   1718 	** enable this and get full hardware tag filtering.
   1719 	*/
   1720 	ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
   1721 
   1722 	/*
   1723 	 * Specify the media types supported by this adapter and register
   1724 	 * callbacks to update media and link information
   1725 	 */
   1726 	ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change,
   1727 		     ixv_media_status);
   1728 	ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
   1729 	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
   1730 
   1731 	return;
   1732 }
   1733 
   1734 static void
   1735 ixv_config_link(struct adapter *adapter)
   1736 {
   1737 	struct ixgbe_hw *hw = &adapter->hw;
   1738 
   1739 	if (hw->mac.ops.check_link)
   1740 		hw->mac.ops.check_link(hw, &adapter->link_speed,
   1741 		    &adapter->link_up, FALSE);
   1742 }
   1743 
   1744 
   1745 /*********************************************************************
   1746  *
   1747  *  Enable transmit unit.
   1748  *
   1749  **********************************************************************/
   1750 static void
   1751 ixv_initialize_transmit_units(struct adapter *adapter)
   1752 {
   1753 	struct tx_ring	*txr = adapter->tx_rings;
   1754 	struct ixgbe_hw	*hw = &adapter->hw;
   1755 
   1756 
   1757 	for (int i = 0; i < adapter->num_queues; i++, txr++) {
   1758 		u64	tdba = txr->txdma.dma_paddr;
   1759 		u32	txctrl, txdctl;
   1760 
   1761 		/* Set WTHRESH to 8, burst writeback */
   1762 		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
   1763 		txdctl |= (8 << 16);
   1764 		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
   1765 
   1766 		/* Set the HW Tx Head and Tail indices */
   1767 	    	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(i), 0);
   1768 	    	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(i), 0);
   1769 
   1770 		/* Set Tx Tail register */
   1771 		txr->tail = IXGBE_VFTDT(i);
   1772 
   1773 		/* Set Ring parameters */
   1774 		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
   1775 		       (tdba & 0x00000000ffffffffULL));
   1776 		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i), (tdba >> 32));
   1777 		IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
   1778 		    adapter->num_tx_desc *
   1779 		    sizeof(struct ixgbe_legacy_tx_desc));
   1780 		txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(i));
   1781 		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
   1782 		IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), txctrl);
   1783 
   1784 		/* Now enable */
   1785 		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
   1786 		txdctl |= IXGBE_TXDCTL_ENABLE;
   1787 		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
   1788 	}
   1789 
   1790 	return;
   1791 }
   1792 
   1793 
   1794 /*********************************************************************
   1795  *
   1796  *  Setup receive registers and features.
   1797  *
   1798  **********************************************************************/
   1799 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
   1800 
   1801 static void
   1802 ixv_initialize_receive_units(struct adapter *adapter)
   1803 {
   1804 	struct	rx_ring	*rxr = adapter->rx_rings;
   1805 	struct ixgbe_hw	*hw = &adapter->hw;
   1806 	struct ifnet	*ifp = adapter->ifp;
   1807 	u32		bufsz, rxcsum, psrtype;
   1808 
   1809 	if (ifp->if_mtu > ETHERMTU)
   1810 		bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
   1811 	else
   1812 		bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
   1813 
   1814 	psrtype = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
   1815 	    IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR |
   1816 	    IXGBE_PSRTYPE_L2HDR;
   1817 
   1818 	IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
   1819 
   1820 	/* Tell PF our max_frame size */
   1821 	ixgbevf_rlpml_set_vf(hw, adapter->max_frame_size);
   1822 
   1823 	for (int i = 0; i < adapter->num_queues; i++, rxr++) {
   1824 		u64 rdba = rxr->rxdma.dma_paddr;
   1825 		u32 reg, rxdctl;
   1826 
   1827 		/* Disable the queue */
   1828 		rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
   1829 		rxdctl &= ~IXGBE_RXDCTL_ENABLE;
   1830 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
   1831 		for (int j = 0; j < 10; j++) {
   1832 			if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
   1833 			    IXGBE_RXDCTL_ENABLE)
   1834 				msec_delay(1);
   1835 			else
   1836 				break;
   1837 		}
   1838 		wmb();
   1839 		/* Setup the Base and Length of the Rx Descriptor Ring */
   1840 		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
   1841 		    (rdba & 0x00000000ffffffffULL));
   1842 		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i),
   1843 		    (rdba >> 32));
   1844 		IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
   1845 		    adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
   1846 
   1847 		/* Reset the ring indices */
   1848 		IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
   1849 		IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 0);
   1850 
   1851 		/* Set up the SRRCTL register */
   1852 		reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
   1853 		reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
   1854 		reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
   1855 		reg |= bufsz;
   1856 		reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
   1857 		IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), reg);
   1858 
   1859 		/* Capture Rx Tail index */
   1860 		rxr->tail = IXGBE_VFRDT(rxr->me);
   1861 
   1862 		/* Do the queue enabling last */
   1863 		rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
   1864 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
   1865 		for (int k = 0; k < 10; k++) {
   1866 			if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
   1867 			    IXGBE_RXDCTL_ENABLE)
   1868 				break;
   1869 			else
   1870 				msec_delay(1);
   1871 		}
   1872 		wmb();
   1873 
   1874 		/* Set the Tail Pointer */
   1875 #ifdef DEV_NETMAP
   1876 		/*
   1877 		 * In netmap mode, we must preserve the buffers made
   1878 		 * available to userspace before the if_init()
   1879 		 * (this is true by default on the TX side, because
   1880 		 * init makes all buffers available to userspace).
   1881 		 *
   1882 		 * netmap_reset() and the device specific routines
   1883 		 * (e.g. ixgbe_setup_receive_rings()) map these
   1884 		 * buffers at the end of the NIC ring, so here we
   1885 		 * must set the RDT (tail) register to make sure
   1886 		 * they are not overwritten.
   1887 		 *
   1888 		 * In this driver the NIC ring starts at RDH = 0,
   1889 		 * RDT points to the last slot available for reception (?),
   1890 		 * so RDT = num_rx_desc - 1 means the whole ring is available.
   1891 		 */
   1892 		if (ifp->if_capenable & IFCAP_NETMAP) {
   1893 			struct netmap_adapter *na = NA(adapter->ifp);
   1894 			struct netmap_kring *kring = &na->rx_rings[i];
   1895 			int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
   1896 
   1897 			IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), t);
   1898 		} else
   1899 #endif /* DEV_NETMAP */
   1900 			IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
   1901 			    adapter->num_rx_desc - 1);
   1902 	}
   1903 
   1904 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
   1905 
   1906 	if (ifp->if_capenable & IFCAP_RXCSUM)
   1907 		rxcsum |= IXGBE_RXCSUM_PCSD;
   1908 
   1909 	if (!(rxcsum & IXGBE_RXCSUM_PCSD))
   1910 		rxcsum |= IXGBE_RXCSUM_IPPCSE;
   1911 
   1912 	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
   1913 
   1914 	return;
   1915 }
   1916 
   1917 static void
   1918 ixv_setup_vlan_support(struct adapter *adapter)
   1919 {
   1920 	struct ixgbe_hw *hw = &adapter->hw;
   1921 	u32		ctrl, vid, vfta, retry;
   1922 	struct rx_ring	*rxr;
   1923 
   1924 	/*
   1925 	** We get here thru init_locked, meaning
   1926 	** a soft reset, this has already cleared
   1927 	** the VFTA and other state, so if there
   1928 	** have been no vlan's registered do nothing.
   1929 	*/
   1930 	if (!VLAN_ATTACHED(&adapter->osdep.ec))
   1931 		return;
   1932 
   1933 	/* Enable the queues */
   1934 	for (int i = 0; i < adapter->num_queues; i++) {
   1935 		ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
   1936 		ctrl |= IXGBE_RXDCTL_VME;
   1937 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl);
   1938 		/*
   1939 		 * Let Rx path know that it needs to store VLAN tag
   1940 		 * as part of extra mbuf info.
   1941 		 */
   1942 		rxr = &adapter->rx_rings[i];
   1943 		rxr->vtag_strip = TRUE;
   1944 	}
   1945 
   1946 	/*
   1947 	** A soft reset zero's out the VFTA, so
   1948 	** we need to repopulate it now.
   1949 	*/
   1950 	for (int i = 0; i < IXGBE_VFTA_SIZE; i++) {
   1951 		if (ixv_shadow_vfta[i] == 0)
   1952 			continue;
   1953 		vfta = ixv_shadow_vfta[i];
   1954 		/*
   1955 		** Reconstruct the vlan id's
   1956 		** based on the bits set in each
   1957 		** of the array ints.
   1958 		*/
   1959 		for (int j = 0; j < 32; j++) {
   1960 			retry = 0;
   1961 			if ((vfta & (1 << j)) == 0)
   1962 				continue;
   1963 			vid = (i * 32) + j;
   1964 			/* Call the shared code mailbox routine */
   1965 			while (ixgbe_set_vfta(hw, vid, 0, TRUE)) {
   1966 				if (++retry > 5)
   1967 					break;
   1968 			}
   1969 		}
   1970 	}
   1971 }
   1972 
   1973 #if 0	/* XXX Badly need to overhaul vlan(4) on NetBSD. */
   1974 /*
   1975 ** This routine is run via an vlan config EVENT,
   1976 ** it enables us to use the HW Filter table since
   1977 ** we can get the vlan id. This just creates the
   1978 ** entry in the soft version of the VFTA, init will
   1979 ** repopulate the real table.
   1980 */
   1981 static void
   1982 ixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   1983 {
   1984 	struct adapter	*adapter = ifp->if_softc;
   1985 	u16		index, bit;
   1986 
   1987 	if (ifp->if_softc != arg) /* Not our event */
   1988 		return;
   1989 
   1990 	if ((vtag == 0) || (vtag > 4095)) /* Invalid */
   1991 		return;
   1992 
   1993 	IXGBE_CORE_LOCK(adapter);
   1994 	index = (vtag >> 5) & 0x7F;
   1995 	bit = vtag & 0x1F;
   1996 	ixv_shadow_vfta[index] |= (1 << bit);
   1997 	/* Re-init to load the changes */
   1998 	ixv_init_locked(adapter);
   1999 	IXGBE_CORE_UNLOCK(adapter);
   2000 }
   2001 
   2002 /*
   2003 ** This routine is run via an vlan
   2004 ** unconfig EVENT, remove our entry
   2005 ** in the soft vfta.
   2006 */
   2007 static void
   2008 ixv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   2009 {
   2010 	struct adapter	*adapter = ifp->if_softc;
   2011 	u16		index, bit;
   2012 
   2013 	if (ifp->if_softc !=  arg)
   2014 		return;
   2015 
   2016 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
   2017 		return;
   2018 
   2019 	IXGBE_CORE_LOCK(adapter);
   2020 	index = (vtag >> 5) & 0x7F;
   2021 	bit = vtag & 0x1F;
   2022 	ixv_shadow_vfta[index] &= ~(1 << bit);
   2023 	/* Re-init to load the changes */
   2024 	ixv_init_locked(adapter);
   2025 	IXGBE_CORE_UNLOCK(adapter);
   2026 }
   2027 #endif
   2028 
   2029 static void
   2030 ixv_enable_intr(struct adapter *adapter)
   2031 {
   2032 	struct ixgbe_hw *hw = &adapter->hw;
   2033 	struct ix_queue *que = adapter->queues;
   2034 	u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
   2035 
   2036 
   2037 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
   2038 
   2039 	mask = IXGBE_EIMS_ENABLE_MASK;
   2040 	mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
   2041 	IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
   2042 
   2043         for (int i = 0; i < adapter->num_queues; i++, que++)
   2044 		ixv_enable_queue(adapter, que->msix);
   2045 
   2046 	IXGBE_WRITE_FLUSH(hw);
   2047 
   2048 	return;
   2049 }
   2050 
   2051 static void
   2052 ixv_disable_intr(struct adapter *adapter)
   2053 {
   2054 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
   2055 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, ~0);
   2056 	IXGBE_WRITE_FLUSH(&adapter->hw);
   2057 	return;
   2058 }
   2059 
   2060 /*
   2061 ** Setup the correct IVAR register for a particular MSIX interrupt
   2062 **  - entry is the register array entry
   2063 **  - vector is the MSIX vector for this queue
   2064 **  - type is RX/TX/MISC
   2065 */
   2066 static void
   2067 ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
   2068 {
   2069 	struct ixgbe_hw *hw = &adapter->hw;
   2070 	u32 ivar, index;
   2071 
   2072 	vector |= IXGBE_IVAR_ALLOC_VAL;
   2073 
   2074 	if (type == -1) { /* MISC IVAR */
   2075 		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
   2076 		ivar &= ~0xFF;
   2077 		ivar |= vector;
   2078 		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
   2079 	} else {	/* RX/TX IVARS */
   2080 		index = (16 * (entry & 1)) + (8 * type);
   2081 		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
   2082 		ivar &= ~(0xFF << index);
   2083 		ivar |= (vector << index);
   2084 		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
   2085 	}
   2086 }
   2087 
   2088 static void
   2089 ixv_configure_ivars(struct adapter *adapter)
   2090 {
   2091 	struct  ix_queue *que = adapter->queues;
   2092 
   2093         for (int i = 0; i < adapter->num_queues; i++, que++) {
   2094 		/* First the RX queue entry */
   2095                 ixv_set_ivar(adapter, i, que->msix, 0);
   2096 		/* ... and the TX */
   2097 		ixv_set_ivar(adapter, i, que->msix, 1);
   2098 		/* Set an initial value in EITR */
   2099                 IXGBE_WRITE_REG(&adapter->hw,
   2100                     IXGBE_VTEITR(que->msix), IXV_EITR_DEFAULT);
   2101 	}
   2102 
   2103 	/* For the mailbox interrupt */
   2104         ixv_set_ivar(adapter, 1, adapter->vector, -1);
   2105 }
   2106 
   2107 
   2108 /*
   2109 ** Tasklet handler for MSIX MBX interrupts
   2110 **  - do outside interrupt since it might sleep
   2111 */
   2112 static void
   2113 ixv_handle_mbx(void *context)
   2114 {
   2115 	struct adapter  *adapter = context;
   2116 
   2117 	ixgbe_check_link(&adapter->hw,
   2118 	    &adapter->link_speed, &adapter->link_up, 0);
   2119 	ixv_update_link_status(adapter);
   2120 }
   2121 
   2122 /*
   2123 ** The VF stats registers never have a truly virgin
   2124 ** starting point, so this routine tries to make an
   2125 ** artificial one, marking ground zero on attach as
   2126 ** it were.
   2127 */
   2128 static void
   2129 ixv_save_stats(struct adapter *adapter)
   2130 {
   2131 	struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
   2132 
   2133 	if (stats->vfgprc.ev_count || stats->vfgptc.ev_count) {
   2134 		stats->saved_reset_vfgprc +=
   2135 		    stats->vfgprc.ev_count - stats->base_vfgprc;
   2136 		stats->saved_reset_vfgptc +=
   2137 		    stats->vfgptc.ev_count - stats->base_vfgptc;
   2138 		stats->saved_reset_vfgorc +=
   2139 		    stats->vfgorc.ev_count - stats->base_vfgorc;
   2140 		stats->saved_reset_vfgotc +=
   2141 		    stats->vfgotc.ev_count - stats->base_vfgotc;
   2142 		stats->saved_reset_vfmprc +=
   2143 		    stats->vfmprc.ev_count - stats->base_vfmprc;
   2144 	}
   2145 }
   2146 
   2147 static void
   2148 ixv_init_stats(struct adapter *adapter)
   2149 {
   2150 	struct ixgbe_hw *hw = &adapter->hw;
   2151 
   2152 	adapter->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
   2153 	adapter->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
   2154 	adapter->stats.vf.last_vfgorc |=
   2155 	    (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
   2156 
   2157 	adapter->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
   2158 	adapter->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
   2159 	adapter->stats.vf.last_vfgotc |=
   2160 	    (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
   2161 
   2162 	adapter->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
   2163 
   2164 	adapter->stats.vf.base_vfgprc = adapter->stats.vf.last_vfgprc;
   2165 	adapter->stats.vf.base_vfgorc = adapter->stats.vf.last_vfgorc;
   2166 	adapter->stats.vf.base_vfgptc = adapter->stats.vf.last_vfgptc;
   2167 	adapter->stats.vf.base_vfgotc = adapter->stats.vf.last_vfgotc;
   2168 	adapter->stats.vf.base_vfmprc = adapter->stats.vf.last_vfmprc;
   2169 }
   2170 
   2171 #define UPDATE_STAT_32(reg, last, count)		\
   2172 {							\
   2173 	u32 current = IXGBE_READ_REG(hw, reg);		\
   2174 	if (current < last)				\
   2175 		count.ev_count += 0x100000000LL;	\
   2176 	last = current;					\
   2177 	count.ev_count &= 0xFFFFFFFF00000000LL;		\
   2178 	count.ev_count |= current;			\
   2179 }
   2180 
   2181 #define UPDATE_STAT_36(lsb, msb, last, count) 		\
   2182 {							\
   2183 	u64 cur_lsb = IXGBE_READ_REG(hw, lsb);		\
   2184 	u64 cur_msb = IXGBE_READ_REG(hw, msb);		\
   2185 	u64 current = ((cur_msb << 32) | cur_lsb);	\
   2186 	if (current < last)				\
   2187 		count.ev_count += 0x1000000000LL;	\
   2188 	last = current;					\
   2189 	count.ev_count &= 0xFFFFFFF000000000LL;		\
   2190 	count.ev_count |= current;			\
   2191 }
   2192 
   2193 /*
   2194 ** ixv_update_stats - Update the board statistics counters.
   2195 */
   2196 void
   2197 ixv_update_stats(struct adapter *adapter)
   2198 {
   2199         struct ixgbe_hw *hw = &adapter->hw;
   2200 
   2201         UPDATE_STAT_32(IXGBE_VFGPRC, adapter->stats.vf.last_vfgprc,
   2202 	    adapter->stats.vf.vfgprc);
   2203         UPDATE_STAT_32(IXGBE_VFGPTC, adapter->stats.vf.last_vfgptc,
   2204 	    adapter->stats.vf.vfgptc);
   2205         UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
   2206 	    adapter->stats.vf.last_vfgorc, adapter->stats.vf.vfgorc);
   2207         UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
   2208 	    adapter->stats.vf.last_vfgotc, adapter->stats.vf.vfgotc);
   2209         UPDATE_STAT_32(IXGBE_VFMPRC, adapter->stats.vf.last_vfmprc,
   2210 	    adapter->stats.vf.vfmprc);
   2211 }
   2212 
   2213 /**********************************************************************
   2214  *
   2215  *  This routine is called only when em_display_debug_stats is enabled.
   2216  *  This routine provides a way to take a look at important statistics
   2217  *  maintained by the driver and hardware.
   2218  *
   2219  **********************************************************************/
   2220 static void
   2221 ixv_print_debug_info(struct adapter *adapter)
   2222 {
   2223         device_t dev = adapter->dev;
   2224         struct ixgbe_hw         *hw = &adapter->hw;
   2225         struct ix_queue         *que = adapter->queues;
   2226         struct rx_ring          *rxr;
   2227         struct tx_ring          *txr;
   2228 #ifdef LRO
   2229         struct lro_ctrl         *lro;
   2230 #endif /* LRO */
   2231 
   2232         device_printf(dev,"Error Byte Count = %u \n",
   2233             IXGBE_READ_REG(hw, IXGBE_ERRBC));
   2234 
   2235         for (int i = 0; i < adapter->num_queues; i++, que++) {
   2236                 txr = que->txr;
   2237                 rxr = que->rxr;
   2238 #ifdef LRO
   2239                 lro = &rxr->lro;
   2240 #endif /* LRO */
   2241                 device_printf(dev,"QUE(%d) IRQs Handled: %lu\n",
   2242                     que->msix, (long)que->irqs.ev_count);
   2243                 device_printf(dev,"RX(%d) Packets Received: %lld\n",
   2244                     rxr->me, (long long)rxr->rx_packets.ev_count);
   2245                 device_printf(dev,"RX(%d) Bytes Received: %lu\n",
   2246                     rxr->me, (long)rxr->rx_bytes.ev_count);
   2247 #ifdef LRO
   2248                 device_printf(dev,"RX(%d) LRO Queued= %lld\n",
   2249                     rxr->me, (long long)lro->lro_queued);
   2250                 device_printf(dev,"RX(%d) LRO Flushed= %lld\n",
   2251                     rxr->me, (long long)lro->lro_flushed);
   2252 #endif /* LRO */
   2253                 device_printf(dev,"TX(%d) Packets Sent: %lu\n",
   2254                     txr->me, (long)txr->total_packets.ev_count);
   2255                 device_printf(dev,"TX(%d) NO Desc Avail: %lu\n",
   2256                     txr->me, (long)txr->no_desc_avail.ev_count);
   2257         }
   2258 
   2259         device_printf(dev,"MBX IRQ Handled: %lu\n",
   2260             (long)adapter->link_irq.ev_count);
   2261         return;
   2262 }
   2263 
   2264 static int
   2265 ixv_sysctl_debug(SYSCTLFN_ARGS)
   2266 {
   2267 	struct sysctlnode node;
   2268 	int error, result;
   2269 	struct adapter *adapter;
   2270 
   2271 	node = *rnode;
   2272 	adapter = (struct adapter *)node.sysctl_data;
   2273 	node.sysctl_data = &result;
   2274 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   2275 
   2276 	if (error)
   2277 		return error;
   2278 
   2279 	if (result == 1)
   2280 		ixv_print_debug_info(adapter);
   2281 
   2282 	return 0;
   2283 }
   2284 
   2285 const struct sysctlnode *
   2286 ixv_sysctl_instance(struct adapter *adapter)
   2287 {
   2288 	const char *dvname;
   2289 	struct sysctllog **log;
   2290 	int rc;
   2291 	const struct sysctlnode *rnode;
   2292 
   2293 	log = &adapter->sysctllog;
   2294 	dvname = device_xname(adapter->dev);
   2295 
   2296 	if ((rc = sysctl_createv(log, 0, NULL, &rnode,
   2297 	    0, CTLTYPE_NODE, dvname,
   2298 	    SYSCTL_DESCR("ixv information and settings"),
   2299 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
   2300 		goto err;
   2301 
   2302 	return rnode;
   2303 err:
   2304 	printf("%s: sysctl_createv failed, rc = %d\n", __func__, rc);
   2305 	return NULL;
   2306 }
   2307 
   2308 static void
   2309 ixv_add_device_sysctls(struct adapter *adapter)
   2310 {
   2311 	struct sysctllog **log;
   2312 	const struct sysctlnode *rnode, *cnode;
   2313 	device_t dev;
   2314 
   2315 	dev = adapter->dev;
   2316 	log = &adapter->sysctllog;
   2317 
   2318 	if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
   2319 		aprint_error_dev(dev, "could not create sysctl root\n");
   2320 		return;
   2321 	}
   2322 
   2323 	if (sysctl_createv(log, 0, &rnode, &cnode,
   2324 	    CTLFLAG_READWRITE, CTLTYPE_INT,
   2325 	    "debug", SYSCTL_DESCR("Debug Info"),
   2326 	    ixv_sysctl_debug, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
   2327 		aprint_error_dev(dev, "could not create sysctl\n");
   2328 
   2329 	if (sysctl_createv(log, 0, &rnode, &cnode,
   2330 	    CTLFLAG_READWRITE, CTLTYPE_BOOL,
   2331 	    "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
   2332 	    NULL, 0, &adapter->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
   2333 		aprint_error_dev(dev, "could not create sysctl\n");
   2334 }
   2335 
   2336 /*
   2337  * Add statistic sysctls for the VF.
   2338  */
   2339 static void
   2340 ixv_add_stats_sysctls(struct adapter *adapter)
   2341 {
   2342 	device_t dev = adapter->dev;
   2343 	const struct sysctlnode *rnode;
   2344 	struct sysctllog **log = &adapter->sysctllog;
   2345 	struct ix_queue *que = &adapter->queues[0];
   2346 	struct tx_ring *txr = que->txr;
   2347 	struct rx_ring *rxr = que->rxr;
   2348 
   2349 	struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
   2350 	const char *xname = device_xname(dev);
   2351 
   2352 	/* Driver Statistics */
   2353 	evcnt_attach_dynamic(&adapter->handleq, EVCNT_TYPE_MISC,
   2354 	    NULL, xname, "Handled queue in softint");
   2355 	evcnt_attach_dynamic(&adapter->req, EVCNT_TYPE_MISC,
   2356 	    NULL, xname, "Requeued in softint");
   2357 	evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
   2358 	    NULL, xname, "Driver tx dma soft fail EFBIG");
   2359 	evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
   2360 	    NULL, xname, "m_defrag() failed");
   2361 	evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
   2362 	    NULL, xname, "Driver tx dma hard fail EFBIG");
   2363 	evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC,
   2364 	    NULL, xname, "Driver tx dma hard fail EINVAL");
   2365 	evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC,
   2366 	    NULL, xname, "Driver tx dma hard fail other");
   2367 	evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
   2368 	    NULL, xname, "Driver tx dma soft fail EAGAIN");
   2369 	evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
   2370 	    NULL, xname, "Driver tx dma soft fail ENOMEM");
   2371 	evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
   2372 	    NULL, xname, "Watchdog timeouts");
   2373 	evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC,
   2374 	    NULL, xname, "TSO errors");
   2375 	evcnt_attach_dynamic(&adapter->link_irq, EVCNT_TYPE_INTR,
   2376 	    NULL, xname, "Link MSIX IRQ Handled");
   2377 
   2378 	for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
   2379 		snprintf(adapter->queues[i].evnamebuf,
   2380 		    sizeof(adapter->queues[i].evnamebuf), "%s q%d",
   2381 		    xname, i);
   2382 		snprintf(adapter->queues[i].namebuf,
   2383 		    sizeof(adapter->queues[i].namebuf), "q%d", i);
   2384 
   2385 		if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
   2386 			aprint_error_dev(dev, "could not create sysctl root\n");
   2387 			break;
   2388 		}
   2389 
   2390 		if (sysctl_createv(log, 0, &rnode, &rnode,
   2391 		    0, CTLTYPE_NODE,
   2392 		    adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
   2393 		    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
   2394 			break;
   2395 
   2396 #if 0 /* not yet */
   2397 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2398 		    CTLFLAG_READWRITE, CTLTYPE_INT,
   2399 		    "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
   2400 		    ixgbe_sysctl_interrupt_rate_handler, 0,
   2401 		    (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
   2402 			break;
   2403 
   2404 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2405 		    CTLFLAG_READONLY, CTLTYPE_QUAD,
   2406 		    "irqs", SYSCTL_DESCR("irqs on this queue"),
   2407 			NULL, 0, &(adapter->queues[i].irqs),
   2408 		    0, CTL_CREATE, CTL_EOL) != 0)
   2409 			break;
   2410 
   2411 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2412 		    CTLFLAG_READONLY, CTLTYPE_INT,
   2413 		    "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
   2414 		    ixgbe_sysctl_tdh_handler, 0, (void *)txr,
   2415 		    0, CTL_CREATE, CTL_EOL) != 0)
   2416 			break;
   2417 
   2418 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2419 		    CTLFLAG_READONLY, CTLTYPE_INT,
   2420 		    "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
   2421 		    ixgbe_sysctl_tdt_handler, 0, (void *)txr,
   2422 		    0, CTL_CREATE, CTL_EOL) != 0)
   2423 			break;
   2424 #endif
   2425 		evcnt_attach_dynamic(&adapter->queues[i].irqs, EVCNT_TYPE_INTR,
   2426 		    NULL, adapter->queues[i].evnamebuf, "IRQs on queue");
   2427 		evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC,
   2428 		    NULL, adapter->queues[i].evnamebuf, "TSO");
   2429 		evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
   2430 		    NULL, adapter->queues[i].evnamebuf,
   2431 		    "Queue No Descriptor Available");
   2432 		evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
   2433 		    NULL, adapter->queues[i].evnamebuf,
   2434 		    "Queue Packets Transmitted");
   2435 #ifndef IXGBE_LEGACY_TX
   2436 		evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC,
   2437 		    NULL, adapter->queues[i].evnamebuf,
   2438 		    "Packets dropped in pcq");
   2439 #endif
   2440 
   2441 #ifdef LRO
   2442 		struct lro_ctrl *lro = &rxr->lro;
   2443 #endif /* LRO */
   2444 
   2445 #if 0 /* not yet */
   2446 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2447 		    CTLFLAG_READONLY,
   2448 		    CTLTYPE_INT,
   2449 		    "rxd_head", SYSCTL_DESCR("Receive Descriptor Head"),
   2450 		    ixgbe_sysctl_rdh_handler, 0, (void *)rxr, 0,
   2451 		    CTL_CREATE, CTL_EOL) != 0)
   2452 			break;
   2453 
   2454 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2455 		    CTLFLAG_READONLY,
   2456 		    CTLTYPE_INT,
   2457 		    "rxd_tail", SYSCTL_DESCR("Receive Descriptor Tail"),
   2458 		    ixgbe_sysctl_rdt_handler, 0, (void *)rxr, 0,
   2459 		    CTL_CREATE, CTL_EOL) != 0)
   2460 			break;
   2461 #endif
   2462 
   2463 		evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
   2464 		    NULL, adapter->queues[i].evnamebuf, "Queue Packets Received");
   2465 		evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
   2466 		    NULL, adapter->queues[i].evnamebuf, "Queue Bytes Received");
   2467 		evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC,
   2468 		    NULL, adapter->queues[i].evnamebuf, "Copied RX Frames");
   2469 		evcnt_attach_dynamic(&rxr->no_jmbuf, EVCNT_TYPE_MISC,
   2470 		    NULL, adapter->queues[i].evnamebuf, "Rx no jumbo mbuf");
   2471 		evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
   2472 		    NULL, adapter->queues[i].evnamebuf, "Rx discarded");
   2473 #ifdef LRO
   2474 		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
   2475 				CTLFLAG_RD, &lro->lro_queued, 0,
   2476 				"LRO Queued");
   2477 		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
   2478 				CTLFLAG_RD, &lro->lro_flushed, 0,
   2479 				"LRO Flushed");
   2480 #endif /* LRO */
   2481 	}
   2482 
   2483 	/* MAC stats get the own sub node */
   2484 
   2485 	snprintf(stats->namebuf,
   2486 	    sizeof(stats->namebuf), "%s MAC Statistics", xname);
   2487 
   2488 	evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
   2489 	    stats->namebuf, "rx csum offload - IP");
   2490 	evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
   2491 	    stats->namebuf, "rx csum offload - L4");
   2492 	evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
   2493 	    stats->namebuf, "rx csum offload - IP bad");
   2494 	evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
   2495 	    stats->namebuf, "rx csum offload - L4 bad");
   2496 
   2497 	/* Packet Reception Stats */
   2498 	evcnt_attach_dynamic(&stats->vfgprc, EVCNT_TYPE_MISC, NULL,
   2499 	    xname, "Good Packets Received");
   2500 	evcnt_attach_dynamic(&stats->vfgorc, EVCNT_TYPE_MISC, NULL,
   2501 	    xname, "Good Octets Received");
   2502 	evcnt_attach_dynamic(&stats->vfmprc, EVCNT_TYPE_MISC, NULL,
   2503 	    xname, "Multicast Packets Received");
   2504 	evcnt_attach_dynamic(&stats->vfgptc, EVCNT_TYPE_MISC, NULL,
   2505 	    xname, "Good Packets Transmitted");
   2506 	evcnt_attach_dynamic(&stats->vfgotc, EVCNT_TYPE_MISC, NULL,
   2507 	    xname, "Good Octets Transmitted");
   2508 }
   2509 
   2510 static void
   2511 ixv_set_sysctl_value(struct adapter *adapter, const char *name,
   2512 	const char *description, int *limit, int value)
   2513 {
   2514 	device_t dev =  adapter->dev;
   2515 	struct sysctllog **log;
   2516 	const struct sysctlnode *rnode, *cnode;
   2517 
   2518 	log = &adapter->sysctllog;
   2519 	if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
   2520 		aprint_error_dev(dev, "could not create sysctl root\n");
   2521 		return;
   2522 	}
   2523 	if (sysctl_createv(log, 0, &rnode, &cnode,
   2524 	    CTLFLAG_READWRITE, CTLTYPE_INT,
   2525 	    name, SYSCTL_DESCR(description),
   2526 	    NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0)
   2527 		aprint_error_dev(dev, "could not create sysctl\n");
   2528 	*limit = value;
   2529 }
   2530