Home | History | Annotate | Line # | Download | only in ixgbe
ixv.c revision 1.41
      1 /******************************************************************************
      2 
      3   Copyright (c) 2001-2015, Intel Corporation
      4   All rights reserved.
      5 
      6   Redistribution and use in source and binary forms, with or without
      7   modification, are permitted provided that the following conditions are met:
      8 
      9    1. Redistributions of source code must retain the above copyright notice,
     10       this list of conditions and the following disclaimer.
     11 
     12    2. Redistributions in binary form must reproduce the above copyright
     13       notice, this list of conditions and the following disclaimer in the
     14       documentation and/or other materials provided with the distribution.
     15 
     16    3. Neither the name of the Intel Corporation nor the names of its
     17       contributors may be used to endorse or promote products derived from
     18       this software without specific prior written permission.
     19 
     20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     30   POSSIBILITY OF SUCH DAMAGE.
     31 
     32 ******************************************************************************/
     33 /*$FreeBSD: head/sys/dev/ixgbe/if_ixv.c 302384 2016-07-07 03:39:18Z sbruno $*/
     34 /*$NetBSD: ixv.c,v 1.41 2017/02/08 04:14:05 msaitoh Exp $*/
     35 
     36 #include "opt_inet.h"
     37 #include "opt_inet6.h"
     38 
     39 #include "ixgbe.h"
     40 #include "vlan.h"
     41 
     42 /*********************************************************************
     43  *  Driver version
     44  *********************************************************************/
     45 char ixv_driver_version[] = "1.4.6-k";
     46 
     47 /*********************************************************************
     48  *  PCI Device ID Table
     49  *
     50  *  Used by probe to select devices to load on
     51  *  Last field stores an index into ixv_strings
     52  *  Last entry must be all 0s
     53  *
     54  *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
     55  *********************************************************************/
     56 
     57 static ixgbe_vendor_info_t ixv_vendor_info_array[] =
     58 {
     59 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
     60 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0},
     61 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, 0, 0, 0},
     62 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, 0, 0, 0},
     63 	/* required last entry */
     64 	{0, 0, 0, 0, 0}
     65 };
     66 
     67 /*********************************************************************
     68  *  Table of branding strings
     69  *********************************************************************/
     70 
     71 static const char    *ixv_strings[] = {
     72 	"Intel(R) PRO/10GbE Virtual Function Network Driver"
     73 };
     74 
     75 /*********************************************************************
     76  *  Function prototypes
     77  *********************************************************************/
     78 static int      ixv_probe(device_t, cfdata_t, void *);
     79 static void	ixv_attach(device_t, device_t, void *);
     80 static int      ixv_detach(device_t, int);
     81 #if 0
     82 static int      ixv_shutdown(device_t);
     83 #endif
     84 static int      ixv_ioctl(struct ifnet *, u_long, void *);
     85 static int	ixv_init(struct ifnet *);
     86 static void	ixv_init_locked(struct adapter *);
     87 static void     ixv_stop(void *);
     88 static void     ixv_media_status(struct ifnet *, struct ifmediareq *);
     89 static int      ixv_media_change(struct ifnet *);
     90 static void     ixv_identify_hardware(struct adapter *);
     91 static int      ixv_allocate_pci_resources(struct adapter *,
     92 		    const struct pci_attach_args *);
     93 static int      ixv_allocate_msix(struct adapter *,
     94 		    const struct pci_attach_args *);
     95 static int	ixv_setup_msix(struct adapter *);
     96 static void	ixv_free_pci_resources(struct adapter *);
     97 static void     ixv_local_timer(void *);
     98 static void     ixv_local_timer_locked(void *);
     99 static void     ixv_setup_interface(device_t, struct adapter *);
    100 static void     ixv_config_link(struct adapter *);
    101 
    102 static void     ixv_initialize_transmit_units(struct adapter *);
    103 static void     ixv_initialize_receive_units(struct adapter *);
    104 
    105 static void     ixv_enable_intr(struct adapter *);
    106 static void     ixv_disable_intr(struct adapter *);
    107 static void     ixv_set_multi(struct adapter *);
    108 static void     ixv_update_link_status(struct adapter *);
    109 static int	ixv_sysctl_debug(SYSCTLFN_PROTO);
    110 static void	ixv_set_ivar(struct adapter *, u8, u8, s8);
    111 static void	ixv_configure_ivars(struct adapter *);
    112 static u8 *	ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
    113 
    114 static void	ixv_setup_vlan_support(struct adapter *);
    115 #if 0
    116 static void	ixv_register_vlan(void *, struct ifnet *, u16);
    117 static void	ixv_unregister_vlan(void *, struct ifnet *, u16);
    118 #endif
    119 
    120 static void	ixv_save_stats(struct adapter *);
    121 static void	ixv_init_stats(struct adapter *);
    122 static void	ixv_update_stats(struct adapter *);
    123 static void	ixv_add_stats_sysctls(struct adapter *);
    124 static void	ixv_set_sysctl_value(struct adapter *, const char *,
    125 		    const char *, int *, int);
    126 
    127 /* The MSI/X Interrupt handlers */
    128 static int	ixv_msix_que(void *);
    129 static int	ixv_msix_mbx(void *);
    130 
    131 /* Deferred interrupt tasklets */
    132 static void	ixv_handle_que(void *);
    133 static void	ixv_handle_mbx(void *);
    134 
    135 const struct sysctlnode *ixv_sysctl_instance(struct adapter *);
    136 static ixgbe_vendor_info_t *ixv_lookup(const struct pci_attach_args *);
    137 
    138 #ifdef DEV_NETMAP
    139 /*
    140  * This is defined in <dev/netmap/ixgbe_netmap.h>, which is included by
    141  * if_ix.c.
    142  */
    143 extern void ixgbe_netmap_attach(struct adapter *adapter);
    144 
    145 #include <net/netmap.h>
    146 #include <sys/selinfo.h>
    147 #include <dev/netmap/netmap_kern.h>
    148 #endif /* DEV_NETMAP */
    149 
    150 /*********************************************************************
    151  *  FreeBSD Device Interface Entry Points
    152  *********************************************************************/
    153 
    154 CFATTACH_DECL3_NEW(ixv, sizeof(struct adapter),
    155     ixv_probe, ixv_attach, ixv_detach, NULL, NULL, NULL,
    156     DVF_DETACH_SHUTDOWN);
    157 
    158 # if 0
    159 static device_method_t ixv_methods[] = {
    160 	/* Device interface */
    161 	DEVMETHOD(device_probe, ixv_probe),
    162 	DEVMETHOD(device_attach, ixv_attach),
    163 	DEVMETHOD(device_detach, ixv_detach),
    164 	DEVMETHOD(device_shutdown, ixv_shutdown),
    165 	DEVMETHOD_END
    166 };
    167 #endif
    168 
    169 #if 0
    170 static driver_t ixv_driver = {
    171 	"ixv", ixv_methods, sizeof(struct adapter),
    172 };
    173 
    174 devclass_t ixv_devclass;
    175 DRIVER_MODULE(ixv, pci, ixv_driver, ixv_devclass, 0, 0);
    176 MODULE_DEPEND(ixv, pci, 1, 1, 1);
    177 MODULE_DEPEND(ixv, ether, 1, 1, 1);
    178 #ifdef DEV_NETMAP
    179 MODULE_DEPEND(ix, netmap, 1, 1, 1);
    180 #endif /* DEV_NETMAP */
    181 /* XXX depend on 'ix' ? */
    182 #endif
    183 
    184 /*
    185 ** TUNEABLE PARAMETERS:
    186 */
    187 
    188 /* Number of Queues - do not exceed MSIX vectors - 1 */
    189 static int ixv_num_queues = 1;
    190 #define	TUNABLE_INT(__x, __y)
    191 TUNABLE_INT("hw.ixv.num_queues", &ixv_num_queues);
    192 
    193 /*
    194 ** AIM: Adaptive Interrupt Moderation
    195 ** which means that the interrupt rate
    196 ** is varied over time based on the
    197 ** traffic for that interrupt vector
    198 */
    199 static int ixv_enable_aim = FALSE;
    200 TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
    201 
    202 /* How many packets rxeof tries to clean at a time */
    203 static int ixv_rx_process_limit = 256;
    204 TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit);
    205 
    206 /* How many packets txeof tries to clean at a time */
    207 static int ixv_tx_process_limit = 256;
    208 TUNABLE_INT("hw.ixv.tx_process_limit", &ixv_tx_process_limit);
    209 
    210 /*
    211 ** Number of TX descriptors per ring,
    212 ** setting higher than RX as this seems
    213 ** the better performing choice.
    214 */
    215 static int ixv_txd = DEFAULT_TXD;
    216 TUNABLE_INT("hw.ixv.txd", &ixv_txd);
    217 
    218 /* Number of RX descriptors per ring */
    219 static int ixv_rxd = DEFAULT_RXD;
    220 TUNABLE_INT("hw.ixv.rxd", &ixv_rxd);
    221 
    222 /*
    223 ** Shadow VFTA table, this is needed because
    224 ** the real filter table gets cleared during
    225 ** a soft reset and we need to repopulate it.
    226 */
    227 static u32 ixv_shadow_vfta[IXGBE_VFTA_SIZE];
    228 
    229 /*********************************************************************
    230  *  Device identification routine
    231  *
    232  *  ixv_probe determines if the driver should be loaded on
    233  *  adapter based on PCI vendor/device id of the adapter.
    234  *
    235  *  return 1 on success, 0 on failure
    236  *********************************************************************/
    237 
    238 static int
    239 ixv_probe(device_t dev, cfdata_t cf, void *aux)
    240 {
    241 #ifdef __HAVE_PCI_MSI_MSIX
    242 	const struct pci_attach_args *pa = aux;
    243 
    244 	return (ixv_lookup(pa) != NULL) ? 1 : 0;
    245 #else
    246 	return 0;
    247 #endif
    248 }
    249 
    250 static ixgbe_vendor_info_t *
    251 ixv_lookup(const struct pci_attach_args *pa)
    252 {
    253 	pcireg_t subid;
    254 	ixgbe_vendor_info_t *ent;
    255 
    256 	INIT_DEBUGOUT("ixv_lookup: begin");
    257 
    258 	if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
    259 		return NULL;
    260 
    261 	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
    262 
    263 	for (ent = ixv_vendor_info_array; ent->vendor_id != 0; ent++) {
    264 		if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
    265 		    (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
    266 
    267 		    ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
    268 		     (ent->subvendor_id == 0)) &&
    269 
    270 		    ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
    271 		     (ent->subdevice_id == 0))) {
    272 			return ent;
    273 		}
    274 	}
    275 	return NULL;
    276 }
    277 
    278 
    279 static void
    280 ixv_sysctl_attach(struct adapter *adapter)
    281 {
    282 	struct sysctllog **log;
    283 	const struct sysctlnode *rnode, *cnode;
    284 	device_t dev;
    285 
    286 	dev = adapter->dev;
    287 	log = &adapter->sysctllog;
    288 
    289 	if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
    290 		aprint_error_dev(dev, "could not create sysctl root\n");
    291 		return;
    292 	}
    293 
    294 	if (sysctl_createv(log, 0, &rnode, &cnode,
    295 	    CTLFLAG_READWRITE, CTLTYPE_INT,
    296 	    "debug", SYSCTL_DESCR("Debug Info"),
    297 	    ixv_sysctl_debug, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
    298 		aprint_error_dev(dev, "could not create sysctl\n");
    299 
    300 	/* XXX This is an *instance* sysctl controlling a *global* variable.
    301 	 * XXX It's that way in the FreeBSD driver that this derives from.
    302 	 */
    303 	if (sysctl_createv(log, 0, &rnode, &cnode,
    304 	    CTLFLAG_READWRITE, CTLTYPE_INT,
    305 	    "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
    306 	    NULL, 0, &ixv_enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
    307 		aprint_error_dev(dev, "could not create sysctl\n");
    308 }
    309 
    310 /*********************************************************************
    311  *  Device initialization routine
    312  *
    313  *  The attach entry point is called when the driver is being loaded.
    314  *  This routine identifies the type of hardware, allocates all resources
    315  *  and initializes the hardware.
    316  *
    317  *  return 0 on success, positive on failure
    318  *********************************************************************/
    319 
    320 static void
    321 ixv_attach(device_t parent, device_t dev, void *aux)
    322 {
    323 	struct adapter *adapter;
    324 	struct ixgbe_hw *hw;
    325 	int             error = 0;
    326 	ixgbe_vendor_info_t *ent;
    327 	const struct pci_attach_args *pa = aux;
    328 
    329 	INIT_DEBUGOUT("ixv_attach: begin");
    330 
    331 	/* Allocate, clear, and link in our adapter structure */
    332 	adapter = device_private(dev);
    333 	adapter->dev = dev;
    334 	hw = &adapter->hw;
    335 
    336 #ifdef DEV_NETMAP
    337 	adapter->init_locked = ixv_init_locked;
    338 	adapter->stop_locked = ixv_stop;
    339 #endif
    340 
    341 	adapter->osdep.pc = pa->pa_pc;
    342 	adapter->osdep.tag = pa->pa_tag;
    343 	adapter->osdep.dmat = pa->pa_dmat;
    344 	adapter->osdep.attached = false;
    345 
    346 	ent = ixv_lookup(pa);
    347 
    348 	KASSERT(ent != NULL);
    349 
    350 	aprint_normal(": %s, Version - %s\n",
    351 	    ixv_strings[ent->index], ixv_driver_version);
    352 
    353 	/* Core Lock Init*/
    354 	IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
    355 
    356 	/* SYSCTL APIs */
    357 	ixv_sysctl_attach(adapter);
    358 
    359 	/* Set up the timer callout */
    360 	callout_init(&adapter->timer, 0);
    361 
    362 	/* Determine hardware revision */
    363 	ixv_identify_hardware(adapter);
    364 
    365 	/* Do base PCI setup - map BAR0 */
    366 	if (ixv_allocate_pci_resources(adapter, pa)) {
    367 		aprint_error_dev(dev, "ixv_allocate_pci_resources() failed!\n");
    368 		error = ENXIO;
    369 		goto err_out;
    370 	}
    371 
    372 	/* Sysctls for limiting the amount of work done in the taskqueues */
    373 	ixv_set_sysctl_value(adapter, "rx_processing_limit",
    374 	    "max number of rx packets to process",
    375 	    &adapter->rx_process_limit, ixv_rx_process_limit);
    376 
    377 	ixv_set_sysctl_value(adapter, "tx_processing_limit",
    378 	    "max number of tx packets to process",
    379 	    &adapter->tx_process_limit, ixv_tx_process_limit);
    380 
    381 	/* Do descriptor calc and sanity checks */
    382 	if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
    383 	    ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) {
    384 		aprint_error_dev(dev, "TXD config issue, using default!\n");
    385 		adapter->num_tx_desc = DEFAULT_TXD;
    386 	} else
    387 		adapter->num_tx_desc = ixv_txd;
    388 
    389 	if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
    390 	    ixv_rxd < MIN_RXD || ixv_rxd > MAX_RXD) {
    391 		aprint_error_dev(dev, "RXD config issue, using default!\n");
    392 		adapter->num_rx_desc = DEFAULT_RXD;
    393 	} else
    394 		adapter->num_rx_desc = ixv_rxd;
    395 
    396 	/* Allocate our TX/RX Queues */
    397 	if (ixgbe_allocate_queues(adapter)) {
    398 		aprint_error_dev(dev, "ixgbe_allocate_queues() failed!\n");
    399 		error = ENOMEM;
    400 		goto err_out;
    401 	}
    402 
    403 	/*
    404 	** Initialize the shared code: its
    405 	** at this point the mac type is set.
    406 	*/
    407 	error = ixgbe_init_shared_code(hw);
    408 	if (error) {
    409 		aprint_error_dev(dev, "ixgbe_init_shared_code() failed!\n");
    410 		error = EIO;
    411 		goto err_late;
    412 	}
    413 
    414 	/* Setup the mailbox */
    415 	ixgbe_init_mbx_params_vf(hw);
    416 
    417 	/* Reset mbox api to 1.0 */
    418 	error = ixgbe_reset_hw(hw);
    419 	if (error == IXGBE_ERR_RESET_FAILED)
    420 		aprint_error_dev(dev, "ixgbe_reset_hw() failure: Reset Failed!\n");
    421 	else if (error)
    422 		aprint_error_dev(dev, "ixgbe_reset_hw() failed with error %d\n", error);
    423 	if (error) {
    424 		error = EIO;
    425 		goto err_late;
    426 	}
    427 
    428 	/* Negotiate mailbox API version */
    429 	error = ixgbevf_negotiate_api_version(hw, ixgbe_mbox_api_11);
    430 	if (error)
    431 		aprint_debug_dev(dev,
    432 		    "MBX API 1.1 negotiation failed! Error %d\n", error);
    433 
    434 	error = ixgbe_init_hw(hw);
    435 	if (error) {
    436 		aprint_error_dev(dev, "ixgbe_init_hw() failed!\n");
    437 		error = EIO;
    438 		goto err_late;
    439 	}
    440 
    441 	error = ixv_allocate_msix(adapter, pa);
    442 	if (error) {
    443 		device_printf(dev, "ixv_allocate_msix() failed!\n");
    444 		goto err_late;
    445 	}
    446 
    447 	/* If no mac address was assigned, make a random one */
    448 	if (!ixv_check_ether_addr(hw->mac.addr)) {
    449 		u8 addr[ETHER_ADDR_LEN];
    450 		uint64_t rndval = cprng_fast64();
    451 
    452 		memcpy(addr, &rndval, sizeof(addr));
    453 		addr[0] &= 0xFE;
    454 		addr[0] |= 0x02;
    455 		bcopy(addr, hw->mac.addr, sizeof(addr));
    456 	}
    457 
    458 	/* Setup OS specific network interface */
    459 	ixv_setup_interface(dev, adapter);
    460 
    461 	/* Do the stats setup */
    462 	ixv_save_stats(adapter);
    463 	ixv_init_stats(adapter);
    464 	ixv_add_stats_sysctls(adapter);
    465 
    466 	/* Register for VLAN events */
    467 #if 0 /* XXX delete after write? */
    468 	adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
    469 	    ixv_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
    470 	adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
    471 	    ixv_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
    472 #endif
    473 
    474 #ifdef DEV_NETMAP
    475 	ixgbe_netmap_attach(adapter);
    476 #endif /* DEV_NETMAP */
    477 	INIT_DEBUGOUT("ixv_attach: end");
    478 	adapter->osdep.attached = true;
    479 	return;
    480 
    481 err_late:
    482 	ixgbe_free_transmit_structures(adapter);
    483 	ixgbe_free_receive_structures(adapter);
    484 err_out:
    485 	ixv_free_pci_resources(adapter);
    486 	return;
    487 
    488 }
    489 
    490 /*********************************************************************
    491  *  Device removal routine
    492  *
    493  *  The detach entry point is called when the driver is being removed.
    494  *  This routine stops the adapter and deallocates all the resources
    495  *  that were allocated for driver operation.
    496  *
    497  *  return 0 on success, positive on failure
    498  *********************************************************************/
    499 
    500 static int
    501 ixv_detach(device_t dev, int flags)
    502 {
    503 	struct adapter *adapter = device_private(dev);
    504 	struct ix_queue *que = adapter->queues;
    505 	struct tx_ring *txr = adapter->tx_rings;
    506 
    507 	INIT_DEBUGOUT("ixv_detach: begin");
    508 	if (adapter->osdep.attached == false)
    509 		return 0;
    510 
    511 #if NVLAN > 0
    512 	/* Make sure VLANS are not using driver */
    513 	if (!VLAN_ATTACHED(&adapter->osdep.ec))
    514 		;	/* nothing to do: no VLANs */
    515 	else if ((flags & (DETACH_SHUTDOWN|DETACH_FORCE)) != 0)
    516 		vlan_ifdetach(adapter->ifp);
    517 	else {
    518 		aprint_error_dev(dev, "VLANs in use, detach first\n");
    519 		return EBUSY;
    520 	}
    521 #endif
    522 
    523 	IXGBE_CORE_LOCK(adapter);
    524 	ixv_stop(adapter);
    525 	IXGBE_CORE_UNLOCK(adapter);
    526 
    527 	for (int i = 0; i < adapter->num_queues; i++, que++, txr++) {
    528 #ifndef IXGBE_LEGACY_TX
    529 		softint_disestablish(txr->txr_si);
    530 #endif
    531 		softint_disestablish(que->que_si);
    532 	}
    533 
    534 	/* Drain the Mailbox(link) queue */
    535 	softint_disestablish(adapter->link_si);
    536 
    537 	/* Unregister VLAN events */
    538 #if 0 /* XXX msaitoh delete after write? */
    539 	if (adapter->vlan_attach != NULL)
    540 		EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
    541 	if (adapter->vlan_detach != NULL)
    542 		EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
    543 #endif
    544 
    545 	ether_ifdetach(adapter->ifp);
    546 	callout_halt(&adapter->timer, NULL);
    547 #ifdef DEV_NETMAP
    548 	netmap_detach(adapter->ifp);
    549 #endif /* DEV_NETMAP */
    550 	ixv_free_pci_resources(adapter);
    551 #if 0 /* XXX the NetBSD port is probably missing something here */
    552 	bus_generic_detach(dev);
    553 #endif
    554 	if_detach(adapter->ifp);
    555 
    556 	sysctl_teardown(&adapter->sysctllog);
    557 
    558 	ixgbe_free_transmit_structures(adapter);
    559 	ixgbe_free_receive_structures(adapter);
    560 
    561 	IXGBE_CORE_LOCK_DESTROY(adapter);
    562 	return (0);
    563 }
    564 
    565 /*********************************************************************
    566  *
    567  *  Shutdown entry point
    568  *
    569  **********************************************************************/
    570 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
    571 static int
    572 ixv_shutdown(device_t dev)
    573 {
    574 	struct adapter *adapter = device_private(dev);
    575 	IXGBE_CORE_LOCK(adapter);
    576 	ixv_stop(adapter);
    577 	IXGBE_CORE_UNLOCK(adapter);
    578 	return (0);
    579 }
    580 #endif
    581 
    582 static int
    583 ixv_ifflags_cb(struct ethercom *ec)
    584 {
    585 	struct ifnet *ifp = &ec->ec_if;
    586 	struct adapter *adapter = ifp->if_softc;
    587 	int change = ifp->if_flags ^ adapter->if_flags, rc = 0;
    588 
    589 	IXGBE_CORE_LOCK(adapter);
    590 
    591 	if (change != 0)
    592 		adapter->if_flags = ifp->if_flags;
    593 
    594 	if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0)
    595 		rc = ENETRESET;
    596 
    597 	IXGBE_CORE_UNLOCK(adapter);
    598 
    599 	return rc;
    600 }
    601 
    602 /*********************************************************************
    603  *  Ioctl entry point
    604  *
    605  *  ixv_ioctl is called when the user wants to configure the
    606  *  interface.
    607  *
    608  *  return 0 on success, positive on failure
    609  **********************************************************************/
    610 
    611 static int
    612 ixv_ioctl(struct ifnet * ifp, u_long command, void *data)
    613 {
    614 	struct adapter	*adapter = ifp->if_softc;
    615 	struct ifcapreq *ifcr = data;
    616 	struct ifreq	*ifr = (struct ifreq *) data;
    617 	int             error = 0;
    618 	int l4csum_en;
    619 	const int l4csum = IFCAP_CSUM_TCPv4_Rx|IFCAP_CSUM_UDPv4_Rx|
    620 	     IFCAP_CSUM_TCPv6_Rx|IFCAP_CSUM_UDPv6_Rx;
    621 
    622 	switch (command) {
    623 	case SIOCSIFFLAGS:
    624 		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
    625 		break;
    626 	case SIOCADDMULTI:
    627 	case SIOCDELMULTI:
    628 		IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
    629 		break;
    630 	case SIOCSIFMEDIA:
    631 	case SIOCGIFMEDIA:
    632 		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
    633 		break;
    634 	case SIOCSIFCAP:
    635 		IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
    636 		break;
    637 	case SIOCSIFMTU:
    638 		IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
    639 		break;
    640 	default:
    641 		IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
    642 		break;
    643 	}
    644 
    645 	switch (command) {
    646 	case SIOCSIFMEDIA:
    647 	case SIOCGIFMEDIA:
    648 		return ifmedia_ioctl(ifp, ifr, &adapter->media, command);
    649 	case SIOCSIFCAP:
    650 		/* Layer-4 Rx checksum offload has to be turned on and
    651 		 * off as a unit.
    652 		 */
    653 		l4csum_en = ifcr->ifcr_capenable & l4csum;
    654 		if (l4csum_en != l4csum && l4csum_en != 0)
    655 			return EINVAL;
    656 		/*FALLTHROUGH*/
    657 	case SIOCADDMULTI:
    658 	case SIOCDELMULTI:
    659 	case SIOCSIFFLAGS:
    660 	case SIOCSIFMTU:
    661 	default:
    662 		if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
    663 			return error;
    664 		if ((ifp->if_flags & IFF_RUNNING) == 0)
    665 			;
    666 		else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
    667 			IXGBE_CORE_LOCK(adapter);
    668 			ixv_init_locked(adapter);
    669 			IXGBE_CORE_UNLOCK(adapter);
    670 		} else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
    671 			/*
    672 			 * Multicast list has changed; set the hardware filter
    673 			 * accordingly.
    674 			 */
    675 			IXGBE_CORE_LOCK(adapter);
    676 			ixv_disable_intr(adapter);
    677 			ixv_set_multi(adapter);
    678 			ixv_enable_intr(adapter);
    679 			IXGBE_CORE_UNLOCK(adapter);
    680 		}
    681 		return 0;
    682 	}
    683 }
    684 
    685 /*********************************************************************
    686  *  Init entry point
    687  *
    688  *  This routine is used in two ways. It is used by the stack as
    689  *  init entry point in network interface structure. It is also used
    690  *  by the driver as a hw/sw initialization routine to get to a
    691  *  consistent state.
    692  *
    693  *  return 0 on success, positive on failure
    694  **********************************************************************/
    695 #define IXGBE_MHADD_MFS_SHIFT 16
    696 
    697 static void
    698 ixv_init_locked(struct adapter *adapter)
    699 {
    700 	struct ifnet	*ifp = adapter->ifp;
    701 	device_t 	dev = adapter->dev;
    702 	struct ixgbe_hw *hw = &adapter->hw;
    703 	int error = 0;
    704 
    705 	INIT_DEBUGOUT("ixv_init_locked: begin");
    706 	KASSERT(mutex_owned(&adapter->core_mtx));
    707 	hw->adapter_stopped = FALSE;
    708 	ixgbe_stop_adapter(hw);
    709         callout_stop(&adapter->timer);
    710 
    711         /* reprogram the RAR[0] in case user changed it. */
    712         ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
    713 
    714 	/* Get the latest mac address, User can use a LAA */
    715 	memcpy(hw->mac.addr, CLLADDR(adapter->ifp->if_sadl),
    716 	     IXGBE_ETH_LENGTH_OF_ADDRESS);
    717         ixgbe_set_rar(hw, 0, hw->mac.addr, 0, 1);
    718 	hw->addr_ctrl.rar_used_count = 1;
    719 
    720 	/* Prepare transmit descriptors and buffers */
    721 	if (ixgbe_setup_transmit_structures(adapter)) {
    722 		aprint_error_dev(dev, "Could not setup transmit structures\n");
    723 		ixv_stop(adapter);
    724 		return;
    725 	}
    726 
    727 	/* Reset VF and renegotiate mailbox API version */
    728 	ixgbe_reset_hw(hw);
    729 	error = ixgbevf_negotiate_api_version(hw, ixgbe_mbox_api_11);
    730 	if (error)
    731 		device_printf(dev, "MBX API 1.1 negotiation failed! Error %d\n", error);
    732 
    733 	ixv_initialize_transmit_units(adapter);
    734 
    735 	/* Setup Multicast table */
    736 	ixv_set_multi(adapter);
    737 
    738 	/*
    739 	** Determine the correct mbuf pool
    740 	** for doing jumbo/headersplit
    741 	*/
    742 	if (ifp->if_mtu > ETHERMTU)
    743 		adapter->rx_mbuf_sz = MJUMPAGESIZE;
    744 	else
    745 		adapter->rx_mbuf_sz = MCLBYTES;
    746 
    747 	/* Prepare receive descriptors and buffers */
    748 	if (ixgbe_setup_receive_structures(adapter)) {
    749 		device_printf(dev, "Could not setup receive structures\n");
    750 		ixv_stop(adapter);
    751 		return;
    752 	}
    753 
    754 	/* Configure RX settings */
    755 	ixv_initialize_receive_units(adapter);
    756 
    757 #if 0 /* XXX isn't it required? -- msaitoh  */
    758 	/* Set the various hardware offload abilities */
    759 	ifp->if_hwassist = 0;
    760 	if (ifp->if_capenable & IFCAP_TSO4)
    761 		ifp->if_hwassist |= CSUM_TSO;
    762 	if (ifp->if_capenable & IFCAP_TXCSUM) {
    763 		ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
    764 #if __FreeBSD_version >= 800000
    765 		ifp->if_hwassist |= CSUM_SCTP;
    766 #endif
    767 	}
    768 #endif
    769 
    770 	/* Set up VLAN offload and filter */
    771 	ixv_setup_vlan_support(adapter);
    772 
    773 	/* Set up MSI/X routing */
    774 	ixv_configure_ivars(adapter);
    775 
    776 	/* Set up auto-mask */
    777 	IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_EICS_RTX_QUEUE);
    778 
    779         /* Set moderation on the Link interrupt */
    780         IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->vector), IXGBE_LINK_ITR);
    781 
    782 	/* Stats init */
    783 	ixv_init_stats(adapter);
    784 
    785 	/* Config/Enable Link */
    786 	ixv_config_link(adapter);
    787 	hw->mac.get_link_status = TRUE;
    788 
    789 	/* Start watchdog */
    790 	callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
    791 
    792 	/* And now turn on interrupts */
    793 	ixv_enable_intr(adapter);
    794 
    795 	/* Now inform the stack we're ready */
    796 	ifp->if_flags |= IFF_RUNNING;
    797 	ifp->if_flags &= ~IFF_OACTIVE;
    798 
    799 	return;
    800 }
    801 
    802 static int
    803 ixv_init(struct ifnet *ifp)
    804 {
    805 	struct adapter *adapter = ifp->if_softc;
    806 
    807 	IXGBE_CORE_LOCK(adapter);
    808 	ixv_init_locked(adapter);
    809 	IXGBE_CORE_UNLOCK(adapter);
    810 	return 0;
    811 }
    812 
    813 
    814 /*
    815 **
    816 ** MSIX Interrupt Handlers and Tasklets
    817 **
    818 */
    819 
    820 static inline void
    821 ixv_enable_queue(struct adapter *adapter, u32 vector)
    822 {
    823 	struct ixgbe_hw *hw = &adapter->hw;
    824 	u32	queue = 1 << vector;
    825 	u32	mask;
    826 
    827 	mask = (IXGBE_EIMS_RTX_QUEUE & queue);
    828 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
    829 }
    830 
    831 static inline void
    832 ixv_disable_queue(struct adapter *adapter, u32 vector)
    833 {
    834 	struct ixgbe_hw *hw = &adapter->hw;
    835 	u64	queue = (u64)(1 << vector);
    836 	u32	mask;
    837 
    838 	mask = (IXGBE_EIMS_RTX_QUEUE & queue);
    839 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
    840 }
    841 
    842 static inline void
    843 ixv_rearm_queues(struct adapter *adapter, u64 queues)
    844 {
    845 	u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
    846 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask);
    847 }
    848 
    849 
    850 static void
    851 ixv_handle_que(void *context)
    852 {
    853 	struct ix_queue *que = context;
    854 	struct adapter  *adapter = que->adapter;
    855 	struct tx_ring	*txr = que->txr;
    856 	struct ifnet    *ifp = adapter->ifp;
    857 	bool		more;
    858 
    859 	if (ifp->if_flags & IFF_RUNNING) {
    860 		more = ixgbe_rxeof(que);
    861 		IXGBE_TX_LOCK(txr);
    862 		ixgbe_txeof(txr);
    863 #ifndef IXGBE_LEGACY_TX
    864 		if (pcq_peek(txr->txr_interq) != NULL)
    865 			ixgbe_mq_start_locked(ifp, txr);
    866 #else
    867 		if (!IFQ_IS_EMPTY(&ifp->if_snd))
    868 			ixgbe_start_locked(txr, ifp);
    869 #endif
    870 		IXGBE_TX_UNLOCK(txr);
    871 		if (more) {
    872 			adapter->req.ev_count++;
    873 			softint_schedule(que->que_si);
    874 			return;
    875 		}
    876 	}
    877 
    878 	/* Reenable this interrupt */
    879 	ixv_enable_queue(adapter, que->msix);
    880 	return;
    881 }
    882 
    883 /*********************************************************************
    884  *
    885  *  MSI Queue Interrupt Service routine
    886  *
    887  **********************************************************************/
    888 int
    889 ixv_msix_que(void *arg)
    890 {
    891 	struct ix_queue	*que = arg;
    892 	struct adapter  *adapter = que->adapter;
    893 	struct ifnet    *ifp = adapter->ifp;
    894 	struct tx_ring	*txr = que->txr;
    895 	struct rx_ring	*rxr = que->rxr;
    896 	bool		more;
    897 	u32		newitr = 0;
    898 
    899 	ixv_disable_queue(adapter, que->msix);
    900 	++que->irqs.ev_count;
    901 
    902 #ifdef __NetBSD__
    903 	/* Don't run ixgbe_rxeof in interrupt context */
    904 	more = true;
    905 #else
    906 	more = ixgbe_rxeof(que);
    907 #endif
    908 
    909 	IXGBE_TX_LOCK(txr);
    910 	ixgbe_txeof(txr);
    911 	/*
    912 	** Make certain that if the stack
    913 	** has anything queued the task gets
    914 	** scheduled to handle it.
    915 	*/
    916 #ifdef IXGBE_LEGACY_TX
    917 	if (!IFQ_IS_EMPTY(&adapter->ifp->if_snd))
    918 		ixgbe_start_locked(txr, ifp);
    919 #else
    920 	if (pcq_peek(txr->txr_interq) != NULL)
    921 		ixgbe_mq_start_locked(ifp, txr);
    922 #endif
    923 	IXGBE_TX_UNLOCK(txr);
    924 
    925 	/* Do AIM now? */
    926 
    927 	if (ixv_enable_aim == FALSE)
    928 		goto no_calc;
    929 	/*
    930 	** Do Adaptive Interrupt Moderation:
    931         **  - Write out last calculated setting
    932 	**  - Calculate based on average size over
    933 	**    the last interval.
    934 	*/
    935         if (que->eitr_setting)
    936                 IXGBE_WRITE_REG(&adapter->hw,
    937                     IXGBE_VTEITR(que->msix),
    938 		    que->eitr_setting);
    939 
    940         que->eitr_setting = 0;
    941 
    942         /* Idle, do nothing */
    943         if ((txr->bytes == 0) && (rxr->bytes == 0))
    944                 goto no_calc;
    945 
    946 	if ((txr->bytes) && (txr->packets))
    947                	newitr = txr->bytes/txr->packets;
    948 	if ((rxr->bytes) && (rxr->packets))
    949 		newitr = max(newitr,
    950 		    (rxr->bytes / rxr->packets));
    951 	newitr += 24; /* account for hardware frame, crc */
    952 
    953 	/* set an upper boundary */
    954 	newitr = min(newitr, 3000);
    955 
    956 	/* Be nice to the mid range */
    957 	if ((newitr > 300) && (newitr < 1200))
    958 		newitr = (newitr / 3);
    959 	else
    960 		newitr = (newitr / 2);
    961 
    962 	newitr |= newitr << 16;
    963 
    964         /* save for next interrupt */
    965         que->eitr_setting = newitr;
    966 
    967         /* Reset state */
    968         txr->bytes = 0;
    969         txr->packets = 0;
    970         rxr->bytes = 0;
    971         rxr->packets = 0;
    972 
    973 no_calc:
    974 	if (more)
    975 		softint_schedule(que->que_si);
    976 	else /* Reenable this interrupt */
    977 		ixv_enable_queue(adapter, que->msix);
    978 	return 1;
    979 }
    980 
    981 static int
    982 ixv_msix_mbx(void *arg)
    983 {
    984 	struct adapter	*adapter = arg;
    985 	struct ixgbe_hw *hw = &adapter->hw;
    986 	u32		reg;
    987 
    988 	++adapter->link_irq.ev_count;
    989 
    990 	/* First get the cause */
    991 	reg = IXGBE_READ_REG(hw, IXGBE_VTEICS);
    992 	/* Clear interrupt with write */
    993 	IXGBE_WRITE_REG(hw, IXGBE_VTEICR, reg);
    994 
    995 	/* Link status change */
    996 	if (reg & IXGBE_EICR_LSC)
    997 		softint_schedule(adapter->link_si);
    998 
    999 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER);
   1000 	return 1;
   1001 }
   1002 
   1003 /*********************************************************************
   1004  *
   1005  *  Media Ioctl callback
   1006  *
   1007  *  This routine is called whenever the user queries the status of
   1008  *  the interface using ifconfig.
   1009  *
   1010  **********************************************************************/
   1011 static void
   1012 ixv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
   1013 {
   1014 	struct adapter *adapter = ifp->if_softc;
   1015 
   1016 	INIT_DEBUGOUT("ixv_media_status: begin");
   1017 	IXGBE_CORE_LOCK(adapter);
   1018 	ixv_update_link_status(adapter);
   1019 
   1020 	ifmr->ifm_status = IFM_AVALID;
   1021 	ifmr->ifm_active = IFM_ETHER;
   1022 
   1023 	if (!adapter->link_active) {
   1024 		ifmr->ifm_active |= IFM_NONE;
   1025 		IXGBE_CORE_UNLOCK(adapter);
   1026 		return;
   1027 	}
   1028 
   1029 	ifmr->ifm_status |= IFM_ACTIVE;
   1030 
   1031 	switch (adapter->link_speed) {
   1032 		case IXGBE_LINK_SPEED_1GB_FULL:
   1033 			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
   1034 			break;
   1035 		case IXGBE_LINK_SPEED_10GB_FULL:
   1036 			ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
   1037 			break;
   1038 	}
   1039 
   1040 	IXGBE_CORE_UNLOCK(adapter);
   1041 
   1042 	return;
   1043 }
   1044 
   1045 /*********************************************************************
   1046  *
   1047  *  Media Ioctl callback
   1048  *
   1049  *  This routine is called when the user changes speed/duplex using
   1050  *  media/mediopt option with ifconfig.
   1051  *
   1052  **********************************************************************/
   1053 static int
   1054 ixv_media_change(struct ifnet * ifp)
   1055 {
   1056 	struct adapter *adapter = ifp->if_softc;
   1057 	struct ifmedia *ifm = &adapter->media;
   1058 
   1059 	INIT_DEBUGOUT("ixv_media_change: begin");
   1060 
   1061 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
   1062 		return (EINVAL);
   1063 
   1064         switch (IFM_SUBTYPE(ifm->ifm_media)) {
   1065         case IFM_AUTO:
   1066                 break;
   1067         default:
   1068                 device_printf(adapter->dev, "Only auto media type\n");
   1069 		return (EINVAL);
   1070         }
   1071 
   1072 	return (0);
   1073 }
   1074 
   1075 
   1076 /*********************************************************************
   1077  *  Multicast Update
   1078  *
   1079  *  This routine is called whenever multicast address list is updated.
   1080  *
   1081  **********************************************************************/
   1082 #define IXGBE_RAR_ENTRIES 16
   1083 
   1084 static void
   1085 ixv_set_multi(struct adapter *adapter)
   1086 {
   1087 	struct ether_multi *enm;
   1088 	struct ether_multistep step;
   1089 	u8	mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
   1090 	u8	*update_ptr;
   1091 	int	mcnt = 0;
   1092 	struct ethercom *ec = &adapter->osdep.ec;
   1093 
   1094 	IOCTL_DEBUGOUT("ixv_set_multi: begin");
   1095 
   1096 	ETHER_FIRST_MULTI(step, ec, enm);
   1097 	while (enm != NULL) {
   1098 		bcopy(enm->enm_addrlo,
   1099 		    &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
   1100 		    IXGBE_ETH_LENGTH_OF_ADDRESS);
   1101 		mcnt++;
   1102 		/* XXX This might be required --msaitoh */
   1103 		if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES)
   1104 			break;
   1105 		ETHER_NEXT_MULTI(step, enm);
   1106 	}
   1107 
   1108 	update_ptr = mta;
   1109 
   1110 	ixgbe_update_mc_addr_list(&adapter->hw,
   1111 	    update_ptr, mcnt, ixv_mc_array_itr, TRUE);
   1112 
   1113 	return;
   1114 }
   1115 
   1116 /*
   1117  * This is an iterator function now needed by the multicast
   1118  * shared code. It simply feeds the shared code routine the
   1119  * addresses in the array of ixv_set_multi() one by one.
   1120  */
   1121 static u8 *
   1122 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
   1123 {
   1124 	u8 *addr = *update_ptr;
   1125 	u8 *newptr;
   1126 	*vmdq = 0;
   1127 
   1128 	newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
   1129 	*update_ptr = newptr;
   1130 	return addr;
   1131 }
   1132 
   1133 /*********************************************************************
   1134  *  Timer routine
   1135  *
   1136  *  This routine checks for link status,updates statistics,
   1137  *  and runs the watchdog check.
   1138  *
   1139  **********************************************************************/
   1140 
   1141 static void
   1142 ixv_local_timer(void *arg)
   1143 {
   1144 	struct adapter *adapter = arg;
   1145 
   1146 	IXGBE_CORE_LOCK(adapter);
   1147 	ixv_local_timer_locked(adapter);
   1148 	IXGBE_CORE_UNLOCK(adapter);
   1149 }
   1150 
   1151 static void
   1152 ixv_local_timer_locked(void *arg)
   1153 {
   1154 	struct adapter	*adapter = arg;
   1155 	device_t	dev = adapter->dev;
   1156 	struct ix_queue	*que = adapter->queues;
   1157 	u64		queues = 0;
   1158 	int		hung = 0;
   1159 
   1160 	KASSERT(mutex_owned(&adapter->core_mtx));
   1161 
   1162 	ixv_update_link_status(adapter);
   1163 
   1164 	/* Stats Update */
   1165 	ixv_update_stats(adapter);
   1166 
   1167 	/*
   1168 	** Check the TX queues status
   1169 	**      - mark hung queues so we don't schedule on them
   1170 	**      - watchdog only if all queues show hung
   1171 	*/
   1172 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   1173 		/* Keep track of queues with work for soft irq */
   1174 		if (que->txr->busy)
   1175 			queues |= ((u64)1 << que->me);
   1176 		/*
   1177 		** Each time txeof runs without cleaning, but there
   1178 		** are uncleaned descriptors it increments busy. If
   1179 		** we get to the MAX we declare it hung.
   1180 		*/
   1181 		if (que->busy == IXGBE_QUEUE_HUNG) {
   1182 			++hung;
   1183 			/* Mark the queue as inactive */
   1184 			adapter->active_queues &= ~((u64)1 << que->me);
   1185 			continue;
   1186 		} else {
   1187 			/* Check if we've come back from hung */
   1188 			if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
   1189                                 adapter->active_queues |= ((u64)1 << que->me);
   1190 		}
   1191 		if (que->busy >= IXGBE_MAX_TX_BUSY) {
   1192 			device_printf(dev,"Warning queue %d "
   1193 			    "appears to be hung!\n", i);
   1194 			que->txr->busy = IXGBE_QUEUE_HUNG;
   1195 			++hung;
   1196 		}
   1197 
   1198 	}
   1199 
   1200 	/* Only truly watchdog if all queues show hung */
   1201 	if (hung == adapter->num_queues)
   1202 		goto watchdog;
   1203 	else if (queues != 0) { /* Force an IRQ on queues with work */
   1204 		ixv_rearm_queues(adapter, queues);
   1205 	}
   1206 
   1207 	callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
   1208 	return;
   1209 
   1210 watchdog:
   1211 	device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
   1212 	adapter->ifp->if_flags &= ~IFF_RUNNING;
   1213 	adapter->watchdog_events.ev_count++;
   1214 	ixv_init_locked(adapter);
   1215 }
   1216 
   1217 /*
   1218 ** Note: this routine updates the OS on the link state
   1219 **	the real check of the hardware only happens with
   1220 **	a link interrupt.
   1221 */
   1222 static void
   1223 ixv_update_link_status(struct adapter *adapter)
   1224 {
   1225 	struct ifnet	*ifp = adapter->ifp;
   1226 	device_t dev = adapter->dev;
   1227 
   1228 	if (adapter->link_up){
   1229 		if (adapter->link_active == FALSE) {
   1230 			if (bootverbose)
   1231 				device_printf(dev,"Link is up %d Gbps %s \n",
   1232 				    ((adapter->link_speed == 128)? 10:1),
   1233 				    "Full Duplex");
   1234 			adapter->link_active = TRUE;
   1235 			if_link_state_change(ifp, LINK_STATE_UP);
   1236 		}
   1237 	} else { /* Link down */
   1238 		if (adapter->link_active == TRUE) {
   1239 			if (bootverbose)
   1240 				device_printf(dev,"Link is Down\n");
   1241 			if_link_state_change(ifp, LINK_STATE_DOWN);
   1242 			adapter->link_active = FALSE;
   1243 		}
   1244 	}
   1245 
   1246 	return;
   1247 }
   1248 
   1249 
   1250 static void
   1251 ixv_ifstop(struct ifnet *ifp, int disable)
   1252 {
   1253 	struct adapter *adapter = ifp->if_softc;
   1254 
   1255 	IXGBE_CORE_LOCK(adapter);
   1256 	ixv_stop(adapter);
   1257 	IXGBE_CORE_UNLOCK(adapter);
   1258 }
   1259 
   1260 /*********************************************************************
   1261  *
   1262  *  This routine disables all traffic on the adapter by issuing a
   1263  *  global reset on the MAC and deallocates TX/RX buffers.
   1264  *
   1265  **********************************************************************/
   1266 
   1267 static void
   1268 ixv_stop(void *arg)
   1269 {
   1270 	struct ifnet   *ifp;
   1271 	struct adapter *adapter = arg;
   1272 	struct ixgbe_hw *hw = &adapter->hw;
   1273 	ifp = adapter->ifp;
   1274 
   1275 	KASSERT(mutex_owned(&adapter->core_mtx));
   1276 
   1277 	INIT_DEBUGOUT("ixv_stop: begin\n");
   1278 	ixv_disable_intr(adapter);
   1279 
   1280 	/* Tell the stack that the interface is no longer active */
   1281 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   1282 
   1283 	ixgbe_reset_hw(hw);
   1284 	adapter->hw.adapter_stopped = FALSE;
   1285 	ixgbe_stop_adapter(hw);
   1286 	callout_stop(&adapter->timer);
   1287 
   1288 	/* reprogram the RAR[0] in case user changed it. */
   1289 	ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
   1290 
   1291 	return;
   1292 }
   1293 
   1294 
   1295 /*********************************************************************
   1296  *
   1297  *  Determine hardware revision.
   1298  *
   1299  **********************************************************************/
   1300 static void
   1301 ixv_identify_hardware(struct adapter *adapter)
   1302 {
   1303 	pcitag_t tag;
   1304 	pci_chipset_tag_t pc;
   1305 	pcireg_t subid, id;
   1306 	struct ixgbe_hw *hw = &adapter->hw;
   1307 
   1308 	pc = adapter->osdep.pc;
   1309 	tag = adapter->osdep.tag;
   1310 
   1311 	/*
   1312 	** Make sure BUSMASTER is set, on a VM under
   1313 	** KVM it may not be and will break things.
   1314 	*/
   1315 	ixgbe_pci_enable_busmaster(pc, tag);
   1316 
   1317 	id = pci_conf_read(pc, tag, PCI_ID_REG);
   1318 	subid = pci_conf_read(pc, tag, PCI_SUBSYS_ID_REG);
   1319 
   1320 	/* Save off the information about this board */
   1321 	hw->vendor_id = PCI_VENDOR(id);
   1322 	hw->device_id = PCI_PRODUCT(id);
   1323 	hw->revision_id = PCI_REVISION(pci_conf_read(pc, tag, PCI_CLASS_REG));
   1324 	hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
   1325 	hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
   1326 
   1327 	/* We need this to determine device-specific things */
   1328 	ixgbe_set_mac_type(hw);
   1329 
   1330 	/* Set the right number of segments */
   1331 	adapter->num_segs = IXGBE_82599_SCATTER;
   1332 
   1333 	return;
   1334 }
   1335 
   1336 /*********************************************************************
   1337  *
   1338  *  Setup MSIX Interrupt resources and handlers
   1339  *
   1340  **********************************************************************/
   1341 static int
   1342 ixv_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
   1343 {
   1344 	device_t	dev = adapter->dev;
   1345 	struct ix_queue *que = adapter->queues;
   1346 	struct		tx_ring *txr = adapter->tx_rings;
   1347 	int 		error, rid, vector = 0;
   1348 	pci_chipset_tag_t pc;
   1349 	pcitag_t	tag;
   1350 	char		intrbuf[PCI_INTRSTR_LEN];
   1351 	char		intr_xname[32];
   1352 	const char	*intrstr = NULL;
   1353 	kcpuset_t	*affinity;
   1354 	int		cpu_id = 0;
   1355 
   1356 	pc = adapter->osdep.pc;
   1357 	tag = adapter->osdep.tag;
   1358 
   1359 	adapter->osdep.nintrs = adapter->num_queues + 1;
   1360 	if (pci_msix_alloc_exact(pa,
   1361 	    &adapter->osdep.intrs, adapter->osdep.nintrs) != 0)
   1362 		return (ENXIO);
   1363 
   1364 	kcpuset_create(&affinity, false);
   1365 	for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
   1366 		snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d",
   1367 		    device_xname(dev), i);
   1368 		intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
   1369 		    sizeof(intrbuf));
   1370 #ifdef IXV_MPSAFE
   1371 		pci_intr_setattr(pc, adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
   1372 		    true);
   1373 #endif
   1374 		/* Set the handler function */
   1375 		que->res = adapter->osdep.ihs[i] = pci_intr_establish_xname(pc,
   1376 		    adapter->osdep.intrs[i], IPL_NET, ixv_msix_que, que,
   1377 			intr_xname);
   1378 		if (que->res == NULL) {
   1379 			pci_intr_release(pc, adapter->osdep.intrs,
   1380 			    adapter->osdep.nintrs);
   1381 			aprint_error_dev(dev,
   1382 			    "Failed to register QUE handler");
   1383 			kcpuset_destroy(affinity);
   1384 			return (ENXIO);
   1385 		}
   1386 		que->msix = vector;
   1387         	adapter->active_queues |= (u64)(1 << que->msix);
   1388 
   1389 		cpu_id = i;
   1390 		/* Round-robin affinity */
   1391 		kcpuset_zero(affinity);
   1392 		kcpuset_set(affinity, cpu_id % ncpu);
   1393 		error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
   1394 		    NULL);
   1395 		aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
   1396 		    intrstr);
   1397 		if (error == 0)
   1398 			aprint_normal(", bound queue %d to cpu %d\n",
   1399 			    i, cpu_id);
   1400 		else
   1401 			aprint_normal("\n");
   1402 
   1403 #ifndef IXGBE_LEGACY_TX
   1404 		txr->txr_si = softint_establish(SOFTINT_NET,
   1405 		    ixgbe_deferred_mq_start, txr);
   1406 #endif
   1407 		que->que_si = softint_establish(SOFTINT_NET, ixv_handle_que,
   1408 		    que);
   1409 		if (que->que_si == NULL) {
   1410 			aprint_error_dev(dev,
   1411 			    "could not establish software interrupt\n");
   1412 		}
   1413 	}
   1414 
   1415 	/* and Mailbox */
   1416 	cpu_id++;
   1417 	snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
   1418 	intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
   1419 	    sizeof(intrbuf));
   1420 #ifdef IXG_MPSAFE
   1421 	pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE, true);
   1422 #endif
   1423 	/* Set the mbx handler function */
   1424 	adapter->osdep.ihs[vector] = pci_intr_establish_xname(pc,
   1425 	    adapter->osdep.intrs[vector], IPL_NET, ixv_msix_mbx, adapter,
   1426 		intr_xname);
   1427 	if (adapter->osdep.ihs[vector] == NULL) {
   1428 		adapter->res = NULL;
   1429 		aprint_error_dev(dev, "Failed to register LINK handler\n");
   1430 		kcpuset_destroy(affinity);
   1431 		return (ENXIO);
   1432 	}
   1433 	/* Round-robin affinity */
   1434 	kcpuset_zero(affinity);
   1435 	kcpuset_set(affinity, cpu_id % ncpu);
   1436 	error = interrupt_distribute(adapter->osdep.ihs[vector], affinity,NULL);
   1437 
   1438 	aprint_normal_dev(dev,
   1439 	    "for link, interrupting at %s, ", intrstr);
   1440 	if (error == 0) {
   1441 		aprint_normal("affinity to cpu %d\n", cpu_id);
   1442 	}
   1443 	adapter->vector = vector;
   1444 	/* Tasklets for Mailbox */
   1445 	adapter->link_si = softint_establish(SOFTINT_NET, ixv_handle_mbx,
   1446 	    adapter);
   1447 	/*
   1448 	** Due to a broken design QEMU will fail to properly
   1449 	** enable the guest for MSIX unless the vectors in
   1450 	** the table are all set up, so we must rewrite the
   1451 	** ENABLE in the MSIX control register again at this
   1452 	** point to cause it to successfully initialize us.
   1453 	*/
   1454 	if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
   1455 		int msix_ctrl;
   1456 		pci_get_capability(pc, tag, PCI_CAP_MSIX, &rid, NULL);
   1457 		rid += PCI_MSIX_CTL;
   1458 		msix_ctrl = pci_conf_read(pc, tag, rid);
   1459 		msix_ctrl |= PCI_MSIX_CTL_ENABLE;
   1460 		pci_conf_write(pc, tag, rid, msix_ctrl);
   1461 	}
   1462 
   1463 	kcpuset_destroy(affinity);
   1464 	return (0);
   1465 }
   1466 
   1467 /*
   1468  * Setup MSIX resources, note that the VF
   1469  * device MUST use MSIX, there is no fallback.
   1470  */
   1471 static int
   1472 ixv_setup_msix(struct adapter *adapter)
   1473 {
   1474 	device_t dev = adapter->dev;
   1475 	int want, queues, msgs;
   1476 
   1477 	/* Must have at least 2 MSIX vectors */
   1478 	msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag);
   1479 	if (msgs < 2) {
   1480 		aprint_error_dev(dev,"MSIX config error\n");
   1481 		return (ENXIO);
   1482 	}
   1483 	msgs = MIN(msgs, IXG_MAX_NINTR);
   1484 
   1485 	/* Figure out a reasonable auto config value */
   1486 	queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu;
   1487 
   1488 	if (ixv_num_queues != 0)
   1489 		queues = ixv_num_queues;
   1490 	else if ((ixv_num_queues == 0) && (queues > IXGBE_VF_MAX_TX_QUEUES))
   1491 		queues = IXGBE_VF_MAX_TX_QUEUES;
   1492 
   1493 	/*
   1494 	** Want vectors for the queues,
   1495 	** plus an additional for mailbox.
   1496 	*/
   1497 	want = queues + 1;
   1498 	if (msgs >= want) {
   1499 		msgs = want;
   1500 	} else {
   1501                	aprint_error_dev(dev,
   1502 		    "MSIX Configuration Problem, "
   1503 		    "%d vectors but %d queues wanted!\n",
   1504 		    msgs, want);
   1505 		return -1;
   1506 	}
   1507 
   1508 	adapter->msix_mem = (void *)1; /* XXX */
   1509 	aprint_normal_dev(dev,
   1510 	    "Using MSIX interrupts with %d vectors\n", msgs);
   1511 	adapter->num_queues = queues;
   1512 	return (msgs);
   1513 }
   1514 
   1515 
   1516 static int
   1517 ixv_allocate_pci_resources(struct adapter *adapter,
   1518     const struct pci_attach_args *pa)
   1519 {
   1520 	pcireg_t	memtype;
   1521 	device_t        dev = adapter->dev;
   1522 	bus_addr_t addr;
   1523 	int flags;
   1524 
   1525 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
   1526 
   1527 	switch (memtype) {
   1528 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1529 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1530 		adapter->osdep.mem_bus_space_tag = pa->pa_memt;
   1531 		if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
   1532 	              memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
   1533 			goto map_err;
   1534 		if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
   1535 			aprint_normal_dev(dev, "clearing prefetchable bit\n");
   1536 			flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
   1537 		}
   1538 		if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
   1539 		     adapter->osdep.mem_size, flags,
   1540 		     &adapter->osdep.mem_bus_space_handle) != 0) {
   1541 map_err:
   1542 			adapter->osdep.mem_size = 0;
   1543 			aprint_error_dev(dev, "unable to map BAR0\n");
   1544 			return ENXIO;
   1545 		}
   1546 		break;
   1547 	default:
   1548 		aprint_error_dev(dev, "unexpected type on BAR0\n");
   1549 		return ENXIO;
   1550 	}
   1551 
   1552 	/* Pick up the tuneable queues */
   1553 	adapter->num_queues = ixv_num_queues;
   1554 	adapter->hw.back = adapter;
   1555 
   1556 	/*
   1557 	** Now setup MSI/X, should
   1558 	** return us the number of
   1559 	** configured vectors.
   1560 	*/
   1561 	adapter->msix = ixv_setup_msix(adapter);
   1562 	if (adapter->msix == ENXIO)
   1563 		return (ENXIO);
   1564 	else
   1565 		return (0);
   1566 }
   1567 
   1568 static void
   1569 ixv_free_pci_resources(struct adapter * adapter)
   1570 {
   1571 	struct 		ix_queue *que = adapter->queues;
   1572 	int		rid;
   1573 
   1574 	/*
   1575 	**  Release all msix queue resources:
   1576 	*/
   1577 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   1578 		if (que->res != NULL)
   1579 			pci_intr_disestablish(adapter->osdep.pc,
   1580 			    adapter->osdep.ihs[i]);
   1581 	}
   1582 
   1583 
   1584 	/* Clean the Legacy or Link interrupt last */
   1585 	if (adapter->vector) /* we are doing MSIX */
   1586 		rid = adapter->vector;
   1587 	else
   1588 		rid = 0;
   1589 
   1590 	if (adapter->osdep.ihs[rid] != NULL) {
   1591 		pci_intr_disestablish(adapter->osdep.pc,
   1592 		    adapter->osdep.ihs[rid]);
   1593 		adapter->osdep.ihs[rid] = NULL;
   1594 	}
   1595 
   1596 	pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
   1597 	    adapter->osdep.nintrs);
   1598 
   1599 	if (adapter->osdep.mem_size != 0) {
   1600 		bus_space_unmap(adapter->osdep.mem_bus_space_tag,
   1601 		    adapter->osdep.mem_bus_space_handle,
   1602 		    adapter->osdep.mem_size);
   1603 	}
   1604 
   1605 	return;
   1606 }
   1607 
   1608 /*********************************************************************
   1609  *
   1610  *  Setup networking device structure and register an interface.
   1611  *
   1612  **********************************************************************/
   1613 static void
   1614 ixv_setup_interface(device_t dev, struct adapter *adapter)
   1615 {
   1616 	struct ethercom *ec = &adapter->osdep.ec;
   1617 	struct ifnet   *ifp;
   1618 
   1619 	INIT_DEBUGOUT("ixv_setup_interface: begin");
   1620 
   1621 	ifp = adapter->ifp = &ec->ec_if;
   1622 	strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
   1623 	ifp->if_baudrate = 1000000000;
   1624 	ifp->if_init = ixv_init;
   1625 	ifp->if_stop = ixv_ifstop;
   1626 	ifp->if_softc = adapter;
   1627 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   1628 	ifp->if_ioctl = ixv_ioctl;
   1629 #ifndef IXGBE_LEGACY_TX
   1630 	ifp->if_transmit = ixgbe_mq_start;
   1631 #endif
   1632 	ifp->if_start = ixgbe_start;
   1633 	ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 2;
   1634 
   1635 	if_initialize(ifp);
   1636 	ether_ifattach(ifp, adapter->hw.mac.addr);
   1637 #ifndef IXGBE_LEGACY_TX
   1638 #if 0	/* We use per TX queue softint */
   1639 	if_deferred_start_init(ifp, ixgbe_deferred_mq_start);
   1640 #endif
   1641 #endif
   1642 	if_register(ifp);
   1643 	ether_set_ifflags_cb(ec, ixv_ifflags_cb);
   1644 
   1645 	adapter->max_frame_size =
   1646 	    ifp->if_mtu + IXGBE_MTU_HDR_VLAN;
   1647 
   1648 	/*
   1649 	 * Tell the upper layer(s) we support long frames.
   1650 	 */
   1651 	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
   1652 
   1653 	ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSOv4;
   1654 	ifp->if_capenable = 0;
   1655 
   1656 	ec->ec_capabilities |= ETHERCAP_VLAN_HWCSUM;
   1657 	ec->ec_capabilities |= ETHERCAP_JUMBO_MTU;
   1658 	ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
   1659 	    		| ETHERCAP_VLAN_MTU;
   1660 	ec->ec_capenable = ec->ec_capabilities;
   1661 
   1662 	/* Don't enable LRO by default */
   1663 	ifp->if_capabilities |= IFCAP_LRO;
   1664 #if 0
   1665 	ifp->if_capenable = ifp->if_capabilities;
   1666 #endif
   1667 
   1668 	/*
   1669 	** Dont turn this on by default, if vlans are
   1670 	** created on another pseudo device (eg. lagg)
   1671 	** then vlan events are not passed thru, breaking
   1672 	** operation, but with HW FILTER off it works. If
   1673 	** using vlans directly on the em driver you can
   1674 	** enable this and get full hardware tag filtering.
   1675 	*/
   1676 	ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
   1677 
   1678 	/*
   1679 	 * Specify the media types supported by this adapter and register
   1680 	 * callbacks to update media and link information
   1681 	 */
   1682 	ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change,
   1683 		     ixv_media_status);
   1684 	ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
   1685 	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
   1686 
   1687 	return;
   1688 }
   1689 
   1690 static void
   1691 ixv_config_link(struct adapter *adapter)
   1692 {
   1693 	struct ixgbe_hw *hw = &adapter->hw;
   1694 
   1695 	if (hw->mac.ops.check_link)
   1696 		hw->mac.ops.check_link(hw, &adapter->link_speed,
   1697 		    &adapter->link_up, FALSE);
   1698 }
   1699 
   1700 
   1701 /*********************************************************************
   1702  *
   1703  *  Enable transmit unit.
   1704  *
   1705  **********************************************************************/
   1706 static void
   1707 ixv_initialize_transmit_units(struct adapter *adapter)
   1708 {
   1709 	struct tx_ring	*txr = adapter->tx_rings;
   1710 	struct ixgbe_hw	*hw = &adapter->hw;
   1711 
   1712 
   1713 	for (int i = 0; i < adapter->num_queues; i++, txr++) {
   1714 		u64	tdba = txr->txdma.dma_paddr;
   1715 		u32	txctrl, txdctl;
   1716 
   1717 		/* Set WTHRESH to 8, burst writeback */
   1718 		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
   1719 		txdctl |= (8 << 16);
   1720 		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
   1721 
   1722 		/* Set the HW Tx Head and Tail indices */
   1723 	    	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(i), 0);
   1724 	    	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(i), 0);
   1725 
   1726 		/* Set Tx Tail register */
   1727 		txr->tail = IXGBE_VFTDT(i);
   1728 
   1729 		/* Set Ring parameters */
   1730 		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
   1731 		       (tdba & 0x00000000ffffffffULL));
   1732 		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i), (tdba >> 32));
   1733 		IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
   1734 		    adapter->num_tx_desc *
   1735 		    sizeof(struct ixgbe_legacy_tx_desc));
   1736 		txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(i));
   1737 		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
   1738 		IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), txctrl);
   1739 
   1740 		/* Now enable */
   1741 		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
   1742 		txdctl |= IXGBE_TXDCTL_ENABLE;
   1743 		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
   1744 	}
   1745 
   1746 	return;
   1747 }
   1748 
   1749 
   1750 /*********************************************************************
   1751  *
   1752  *  Setup receive registers and features.
   1753  *
   1754  **********************************************************************/
   1755 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
   1756 
   1757 static void
   1758 ixv_initialize_receive_units(struct adapter *adapter)
   1759 {
   1760 	struct	rx_ring	*rxr = adapter->rx_rings;
   1761 	struct ixgbe_hw	*hw = &adapter->hw;
   1762 	struct ifnet	*ifp = adapter->ifp;
   1763 	u32		bufsz, rxcsum, psrtype;
   1764 
   1765 	if (ifp->if_mtu > ETHERMTU)
   1766 		bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
   1767 	else
   1768 		bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
   1769 
   1770 	psrtype = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
   1771 	    IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR |
   1772 	    IXGBE_PSRTYPE_L2HDR;
   1773 
   1774 	IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
   1775 
   1776 	/* Tell PF our max_frame size */
   1777 	ixgbevf_rlpml_set_vf(hw, adapter->max_frame_size);
   1778 
   1779 	for (int i = 0; i < adapter->num_queues; i++, rxr++) {
   1780 		u64 rdba = rxr->rxdma.dma_paddr;
   1781 		u32 reg, rxdctl;
   1782 
   1783 		/* Disable the queue */
   1784 		rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
   1785 		rxdctl &= ~IXGBE_RXDCTL_ENABLE;
   1786 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
   1787 		for (int j = 0; j < 10; j++) {
   1788 			if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
   1789 			    IXGBE_RXDCTL_ENABLE)
   1790 				msec_delay(1);
   1791 			else
   1792 				break;
   1793 		}
   1794 		wmb();
   1795 		/* Setup the Base and Length of the Rx Descriptor Ring */
   1796 		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
   1797 		    (rdba & 0x00000000ffffffffULL));
   1798 		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i),
   1799 		    (rdba >> 32));
   1800 		IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
   1801 		    adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
   1802 
   1803 		/* Reset the ring indices */
   1804 		IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
   1805 		IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 0);
   1806 
   1807 		/* Set up the SRRCTL register */
   1808 		reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
   1809 		reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
   1810 		reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
   1811 		reg |= bufsz;
   1812 		reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
   1813 		IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), reg);
   1814 
   1815 		/* Capture Rx Tail index */
   1816 		rxr->tail = IXGBE_VFRDT(rxr->me);
   1817 
   1818 		/* Do the queue enabling last */
   1819 		rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
   1820 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
   1821 		for (int k = 0; k < 10; k++) {
   1822 			if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
   1823 			    IXGBE_RXDCTL_ENABLE)
   1824 				break;
   1825 			else
   1826 				msec_delay(1);
   1827 		}
   1828 		wmb();
   1829 
   1830 		/* Set the Tail Pointer */
   1831 #ifdef DEV_NETMAP
   1832 		/*
   1833 		 * In netmap mode, we must preserve the buffers made
   1834 		 * available to userspace before the if_init()
   1835 		 * (this is true by default on the TX side, because
   1836 		 * init makes all buffers available to userspace).
   1837 		 *
   1838 		 * netmap_reset() and the device specific routines
   1839 		 * (e.g. ixgbe_setup_receive_rings()) map these
   1840 		 * buffers at the end of the NIC ring, so here we
   1841 		 * must set the RDT (tail) register to make sure
   1842 		 * they are not overwritten.
   1843 		 *
   1844 		 * In this driver the NIC ring starts at RDH = 0,
   1845 		 * RDT points to the last slot available for reception (?),
   1846 		 * so RDT = num_rx_desc - 1 means the whole ring is available.
   1847 		 */
   1848 		if (ifp->if_capenable & IFCAP_NETMAP) {
   1849 			struct netmap_adapter *na = NA(adapter->ifp);
   1850 			struct netmap_kring *kring = &na->rx_rings[i];
   1851 			int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
   1852 
   1853 			IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), t);
   1854 		} else
   1855 #endif /* DEV_NETMAP */
   1856 			IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
   1857 			    adapter->num_rx_desc - 1);
   1858 	}
   1859 
   1860 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
   1861 
   1862 	if (ifp->if_capenable & IFCAP_RXCSUM)
   1863 		rxcsum |= IXGBE_RXCSUM_PCSD;
   1864 
   1865 	if (!(rxcsum & IXGBE_RXCSUM_PCSD))
   1866 		rxcsum |= IXGBE_RXCSUM_IPPCSE;
   1867 
   1868 	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
   1869 
   1870 	return;
   1871 }
   1872 
   1873 static void
   1874 ixv_setup_vlan_support(struct adapter *adapter)
   1875 {
   1876 	struct ixgbe_hw *hw = &adapter->hw;
   1877 	u32		ctrl, vid, vfta, retry;
   1878 	struct rx_ring	*rxr;
   1879 
   1880 	/*
   1881 	** We get here thru init_locked, meaning
   1882 	** a soft reset, this has already cleared
   1883 	** the VFTA and other state, so if there
   1884 	** have been no vlan's registered do nothing.
   1885 	*/
   1886 	if (!VLAN_ATTACHED(&adapter->osdep.ec))
   1887 		return;
   1888 
   1889 	/* Enable the queues */
   1890 	for (int i = 0; i < adapter->num_queues; i++) {
   1891 		ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
   1892 		ctrl |= IXGBE_RXDCTL_VME;
   1893 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl);
   1894 		/*
   1895 		 * Let Rx path know that it needs to store VLAN tag
   1896 		 * as part of extra mbuf info.
   1897 		 */
   1898 		rxr = &adapter->rx_rings[i];
   1899 		rxr->vtag_strip = TRUE;
   1900 	}
   1901 
   1902 	/*
   1903 	** A soft reset zero's out the VFTA, so
   1904 	** we need to repopulate it now.
   1905 	*/
   1906 	for (int i = 0; i < IXGBE_VFTA_SIZE; i++) {
   1907 		if (ixv_shadow_vfta[i] == 0)
   1908 			continue;
   1909 		vfta = ixv_shadow_vfta[i];
   1910 		/*
   1911 		** Reconstruct the vlan id's
   1912 		** based on the bits set in each
   1913 		** of the array ints.
   1914 		*/
   1915 		for (int j = 0; j < 32; j++) {
   1916 			retry = 0;
   1917 			if ((vfta & (1 << j)) == 0)
   1918 				continue;
   1919 			vid = (i * 32) + j;
   1920 			/* Call the shared code mailbox routine */
   1921 			while (ixgbe_set_vfta(hw, vid, 0, TRUE)) {
   1922 				if (++retry > 5)
   1923 					break;
   1924 			}
   1925 		}
   1926 	}
   1927 }
   1928 
   1929 #if 0	/* XXX Badly need to overhaul vlan(4) on NetBSD. */
   1930 /*
   1931 ** This routine is run via an vlan config EVENT,
   1932 ** it enables us to use the HW Filter table since
   1933 ** we can get the vlan id. This just creates the
   1934 ** entry in the soft version of the VFTA, init will
   1935 ** repopulate the real table.
   1936 */
   1937 static void
   1938 ixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   1939 {
   1940 	struct adapter	*adapter = ifp->if_softc;
   1941 	u16		index, bit;
   1942 
   1943 	if (ifp->if_softc != arg) /* Not our event */
   1944 		return;
   1945 
   1946 	if ((vtag == 0) || (vtag > 4095)) /* Invalid */
   1947 		return;
   1948 
   1949 	IXGBE_CORE_LOCK(adapter);
   1950 	index = (vtag >> 5) & 0x7F;
   1951 	bit = vtag & 0x1F;
   1952 	ixv_shadow_vfta[index] |= (1 << bit);
   1953 	/* Re-init to load the changes */
   1954 	ixv_init_locked(adapter);
   1955 	IXGBE_CORE_UNLOCK(adapter);
   1956 }
   1957 
   1958 /*
   1959 ** This routine is run via an vlan
   1960 ** unconfig EVENT, remove our entry
   1961 ** in the soft vfta.
   1962 */
   1963 static void
   1964 ixv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   1965 {
   1966 	struct adapter	*adapter = ifp->if_softc;
   1967 	u16		index, bit;
   1968 
   1969 	if (ifp->if_softc !=  arg)
   1970 		return;
   1971 
   1972 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
   1973 		return;
   1974 
   1975 	IXGBE_CORE_LOCK(adapter);
   1976 	index = (vtag >> 5) & 0x7F;
   1977 	bit = vtag & 0x1F;
   1978 	ixv_shadow_vfta[index] &= ~(1 << bit);
   1979 	/* Re-init to load the changes */
   1980 	ixv_init_locked(adapter);
   1981 	IXGBE_CORE_UNLOCK(adapter);
   1982 }
   1983 #endif
   1984 
   1985 static void
   1986 ixv_enable_intr(struct adapter *adapter)
   1987 {
   1988 	struct ixgbe_hw *hw = &adapter->hw;
   1989 	struct ix_queue *que = adapter->queues;
   1990 	u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
   1991 
   1992 
   1993 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
   1994 
   1995 	mask = IXGBE_EIMS_ENABLE_MASK;
   1996 	mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
   1997 	IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
   1998 
   1999         for (int i = 0; i < adapter->num_queues; i++, que++)
   2000 		ixv_enable_queue(adapter, que->msix);
   2001 
   2002 	IXGBE_WRITE_FLUSH(hw);
   2003 
   2004 	return;
   2005 }
   2006 
   2007 static void
   2008 ixv_disable_intr(struct adapter *adapter)
   2009 {
   2010 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
   2011 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, ~0);
   2012 	IXGBE_WRITE_FLUSH(&adapter->hw);
   2013 	return;
   2014 }
   2015 
   2016 /*
   2017 ** Setup the correct IVAR register for a particular MSIX interrupt
   2018 **  - entry is the register array entry
   2019 **  - vector is the MSIX vector for this queue
   2020 **  - type is RX/TX/MISC
   2021 */
   2022 static void
   2023 ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
   2024 {
   2025 	struct ixgbe_hw *hw = &adapter->hw;
   2026 	u32 ivar, index;
   2027 
   2028 	vector |= IXGBE_IVAR_ALLOC_VAL;
   2029 
   2030 	if (type == -1) { /* MISC IVAR */
   2031 		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
   2032 		ivar &= ~0xFF;
   2033 		ivar |= vector;
   2034 		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
   2035 	} else {	/* RX/TX IVARS */
   2036 		index = (16 * (entry & 1)) + (8 * type);
   2037 		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
   2038 		ivar &= ~(0xFF << index);
   2039 		ivar |= (vector << index);
   2040 		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
   2041 	}
   2042 }
   2043 
   2044 static void
   2045 ixv_configure_ivars(struct adapter *adapter)
   2046 {
   2047 	struct  ix_queue *que = adapter->queues;
   2048 
   2049         for (int i = 0; i < adapter->num_queues; i++, que++) {
   2050 		/* First the RX queue entry */
   2051                 ixv_set_ivar(adapter, i, que->msix, 0);
   2052 		/* ... and the TX */
   2053 		ixv_set_ivar(adapter, i, que->msix, 1);
   2054 		/* Set an initial value in EITR */
   2055                 IXGBE_WRITE_REG(&adapter->hw,
   2056                     IXGBE_VTEITR(que->msix), IXV_EITR_DEFAULT);
   2057 	}
   2058 
   2059 	/* For the mailbox interrupt */
   2060         ixv_set_ivar(adapter, 1, adapter->vector, -1);
   2061 }
   2062 
   2063 
   2064 /*
   2065 ** Tasklet handler for MSIX MBX interrupts
   2066 **  - do outside interrupt since it might sleep
   2067 */
   2068 static void
   2069 ixv_handle_mbx(void *context)
   2070 {
   2071 	struct adapter  *adapter = context;
   2072 
   2073 	ixgbe_check_link(&adapter->hw,
   2074 	    &adapter->link_speed, &adapter->link_up, 0);
   2075 	ixv_update_link_status(adapter);
   2076 }
   2077 
   2078 /*
   2079 ** The VF stats registers never have a truly virgin
   2080 ** starting point, so this routine tries to make an
   2081 ** artificial one, marking ground zero on attach as
   2082 ** it were.
   2083 */
   2084 static void
   2085 ixv_save_stats(struct adapter *adapter)
   2086 {
   2087 	struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
   2088 
   2089 	if (stats->vfgprc.ev_count || stats->vfgptc.ev_count) {
   2090 		stats->saved_reset_vfgprc +=
   2091 		    stats->vfgprc.ev_count - stats->base_vfgprc;
   2092 		stats->saved_reset_vfgptc +=
   2093 		    stats->vfgptc.ev_count - stats->base_vfgptc;
   2094 		stats->saved_reset_vfgorc +=
   2095 		    stats->vfgorc.ev_count - stats->base_vfgorc;
   2096 		stats->saved_reset_vfgotc +=
   2097 		    stats->vfgotc.ev_count - stats->base_vfgotc;
   2098 		stats->saved_reset_vfmprc +=
   2099 		    stats->vfmprc.ev_count - stats->base_vfmprc;
   2100 	}
   2101 }
   2102 
   2103 static void
   2104 ixv_init_stats(struct adapter *adapter)
   2105 {
   2106 	struct ixgbe_hw *hw = &adapter->hw;
   2107 
   2108 	adapter->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
   2109 	adapter->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
   2110 	adapter->stats.vf.last_vfgorc |=
   2111 	    (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
   2112 
   2113 	adapter->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
   2114 	adapter->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
   2115 	adapter->stats.vf.last_vfgotc |=
   2116 	    (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
   2117 
   2118 	adapter->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
   2119 
   2120 	adapter->stats.vf.base_vfgprc = adapter->stats.vf.last_vfgprc;
   2121 	adapter->stats.vf.base_vfgorc = adapter->stats.vf.last_vfgorc;
   2122 	adapter->stats.vf.base_vfgptc = adapter->stats.vf.last_vfgptc;
   2123 	adapter->stats.vf.base_vfgotc = adapter->stats.vf.last_vfgotc;
   2124 	adapter->stats.vf.base_vfmprc = adapter->stats.vf.last_vfmprc;
   2125 }
   2126 
   2127 #define UPDATE_STAT_32(reg, last, count)		\
   2128 {							\
   2129 	u32 current = IXGBE_READ_REG(hw, reg);		\
   2130 	if (current < last)				\
   2131 		count.ev_count += 0x100000000LL;	\
   2132 	last = current;					\
   2133 	count.ev_count &= 0xFFFFFFFF00000000LL;		\
   2134 	count.ev_count |= current;			\
   2135 }
   2136 
   2137 #define UPDATE_STAT_36(lsb, msb, last, count) 		\
   2138 {							\
   2139 	u64 cur_lsb = IXGBE_READ_REG(hw, lsb);		\
   2140 	u64 cur_msb = IXGBE_READ_REG(hw, msb);		\
   2141 	u64 current = ((cur_msb << 32) | cur_lsb);	\
   2142 	if (current < last)				\
   2143 		count.ev_count += 0x1000000000LL;	\
   2144 	last = current;					\
   2145 	count.ev_count &= 0xFFFFFFF000000000LL;		\
   2146 	count.ev_count |= current;			\
   2147 }
   2148 
   2149 /*
   2150 ** ixv_update_stats - Update the board statistics counters.
   2151 */
   2152 void
   2153 ixv_update_stats(struct adapter *adapter)
   2154 {
   2155         struct ixgbe_hw *hw = &adapter->hw;
   2156 
   2157         UPDATE_STAT_32(IXGBE_VFGPRC, adapter->stats.vf.last_vfgprc,
   2158 	    adapter->stats.vf.vfgprc);
   2159         UPDATE_STAT_32(IXGBE_VFGPTC, adapter->stats.vf.last_vfgptc,
   2160 	    adapter->stats.vf.vfgptc);
   2161         UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
   2162 	    adapter->stats.vf.last_vfgorc, adapter->stats.vf.vfgorc);
   2163         UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
   2164 	    adapter->stats.vf.last_vfgotc, adapter->stats.vf.vfgotc);
   2165         UPDATE_STAT_32(IXGBE_VFMPRC, adapter->stats.vf.last_vfmprc,
   2166 	    adapter->stats.vf.vfmprc);
   2167 }
   2168 
   2169 /*
   2170  * Add statistic sysctls for the VF.
   2171  */
   2172 static void
   2173 ixv_add_stats_sysctls(struct adapter *adapter)
   2174 {
   2175 	device_t dev = adapter->dev;
   2176 	struct ix_queue *que = &adapter->queues[0];
   2177 	struct tx_ring *txr = que->txr;
   2178 	struct rx_ring *rxr = que->rxr;
   2179 
   2180 	struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
   2181 
   2182 	const char *xname = device_xname(dev);
   2183 
   2184 	/* Driver Statistics */
   2185 	evcnt_attach_dynamic(&adapter->dropped_pkts, EVCNT_TYPE_MISC,
   2186 	    NULL, xname, "Driver dropped packets");
   2187 	evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
   2188 	    NULL, xname, "m_defrag() failed");
   2189 	evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
   2190 	    NULL, xname, "Watchdog timeouts");
   2191 
   2192 	evcnt_attach_dynamic(&stats->vfgprc, EVCNT_TYPE_MISC, NULL,
   2193 	    xname, "Good Packets Received");
   2194 	evcnt_attach_dynamic(&stats->vfgorc, EVCNT_TYPE_MISC, NULL,
   2195 	    xname, "Good Octets Received");
   2196 	evcnt_attach_dynamic(&stats->vfmprc, EVCNT_TYPE_MISC, NULL,
   2197 	    xname, "Multicast Packets Received");
   2198 	evcnt_attach_dynamic(&stats->vfgptc, EVCNT_TYPE_MISC, NULL,
   2199 	    xname, "Good Packets Transmitted");
   2200 	evcnt_attach_dynamic(&stats->vfgotc, EVCNT_TYPE_MISC, NULL,
   2201 	    xname, "Good Octets Transmitted");
   2202 	evcnt_attach_dynamic(&que->irqs, EVCNT_TYPE_INTR, NULL,
   2203 	    xname, "IRQs on queue");
   2204 	evcnt_attach_dynamic(&rxr->rx_irq, EVCNT_TYPE_INTR, NULL,
   2205 	    xname, "RX irqs on queue");
   2206 	evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC, NULL,
   2207 	    xname, "RX packets");
   2208 	evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC, NULL,
   2209 	    xname, "RX bytes");
   2210 	evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC, NULL,
   2211 	    xname, "Discarded RX packets");
   2212 	evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC, NULL,
   2213 	    xname, "TX Packets");
   2214 	evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC, NULL,
   2215 	    xname, "# of times not enough descriptors were available during TX");
   2216 	evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC, NULL,
   2217 	    xname, "TX TSO");
   2218 }
   2219 
   2220 static void
   2221 ixv_set_sysctl_value(struct adapter *adapter, const char *name,
   2222 	const char *description, int *limit, int value)
   2223 {
   2224 	device_t dev =  adapter->dev;
   2225 	struct sysctllog **log;
   2226 	const struct sysctlnode *rnode, *cnode;
   2227 
   2228 	log = &adapter->sysctllog;
   2229 	if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
   2230 		aprint_error_dev(dev, "could not create sysctl root\n");
   2231 		return;
   2232 	}
   2233 	if (sysctl_createv(log, 0, &rnode, &cnode,
   2234 	    CTLFLAG_READWRITE, CTLTYPE_INT,
   2235 	    name, SYSCTL_DESCR(description),
   2236 	    NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0)
   2237 		aprint_error_dev(dev, "could not create sysctl\n");
   2238 	*limit = value;
   2239 }
   2240 
   2241 /**********************************************************************
   2242  *
   2243  *  This routine is called only when em_display_debug_stats is enabled.
   2244  *  This routine provides a way to take a look at important statistics
   2245  *  maintained by the driver and hardware.
   2246  *
   2247  **********************************************************************/
   2248 static void
   2249 ixv_print_debug_info(struct adapter *adapter)
   2250 {
   2251         device_t dev = adapter->dev;
   2252         struct ixgbe_hw         *hw = &adapter->hw;
   2253         struct ix_queue         *que = adapter->queues;
   2254         struct rx_ring          *rxr;
   2255         struct tx_ring          *txr;
   2256 #ifdef LRO
   2257         struct lro_ctrl         *lro;
   2258 #endif /* LRO */
   2259 
   2260         device_printf(dev,"Error Byte Count = %u \n",
   2261             IXGBE_READ_REG(hw, IXGBE_ERRBC));
   2262 
   2263         for (int i = 0; i < adapter->num_queues; i++, que++) {
   2264                 txr = que->txr;
   2265                 rxr = que->rxr;
   2266 #ifdef LRO
   2267                 lro = &rxr->lro;
   2268 #endif /* LRO */
   2269                 device_printf(dev,"QUE(%d) IRQs Handled: %lu\n",
   2270                     que->msix, (long)que->irqs.ev_count);
   2271                 device_printf(dev,"RX(%d) Packets Received: %lld\n",
   2272                     rxr->me, (long long)rxr->rx_packets.ev_count);
   2273                 device_printf(dev,"RX(%d) Bytes Received: %lu\n",
   2274                     rxr->me, (long)rxr->rx_bytes.ev_count);
   2275 #ifdef LRO
   2276                 device_printf(dev,"RX(%d) LRO Queued= %lld\n",
   2277                     rxr->me, (long long)lro->lro_queued);
   2278                 device_printf(dev,"RX(%d) LRO Flushed= %lld\n",
   2279                     rxr->me, (long long)lro->lro_flushed);
   2280 #endif /* LRO */
   2281                 device_printf(dev,"TX(%d) Packets Sent: %lu\n",
   2282                     txr->me, (long)txr->total_packets.ev_count);
   2283                 device_printf(dev,"TX(%d) NO Desc Avail: %lu\n",
   2284                     txr->me, (long)txr->no_desc_avail.ev_count);
   2285         }
   2286 
   2287         device_printf(dev,"MBX IRQ Handled: %lu\n",
   2288             (long)adapter->link_irq.ev_count);
   2289         return;
   2290 }
   2291 
   2292 static int
   2293 ixv_sysctl_debug(SYSCTLFN_ARGS)
   2294 {
   2295 	struct sysctlnode node;
   2296 	int error, result;
   2297 	struct adapter *adapter;
   2298 
   2299 	node = *rnode;
   2300 	adapter = (struct adapter *)node.sysctl_data;
   2301 	node.sysctl_data = &result;
   2302 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   2303 
   2304 	if (error)
   2305 		return error;
   2306 
   2307 	if (result == 1)
   2308 		ixv_print_debug_info(adapter);
   2309 
   2310 	return 0;
   2311 }
   2312 
   2313 const struct sysctlnode *
   2314 ixv_sysctl_instance(struct adapter *adapter)
   2315 {
   2316 	const char *dvname;
   2317 	struct sysctllog **log;
   2318 	int rc;
   2319 	const struct sysctlnode *rnode;
   2320 
   2321 	log = &adapter->sysctllog;
   2322 	dvname = device_xname(adapter->dev);
   2323 
   2324 	if ((rc = sysctl_createv(log, 0, NULL, &rnode,
   2325 	    0, CTLTYPE_NODE, dvname,
   2326 	    SYSCTL_DESCR("ixv information and settings"),
   2327 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
   2328 		goto err;
   2329 
   2330 	return rnode;
   2331 err:
   2332 	printf("%s: sysctl_createv failed, rc = %d\n", __func__, rc);
   2333 	return NULL;
   2334 }
   2335