Home | History | Annotate | Line # | Download | only in ixgbe
ixv.c revision 1.36
      1 /******************************************************************************
      2 
      3   Copyright (c) 2001-2015, Intel Corporation
      4   All rights reserved.
      5 
      6   Redistribution and use in source and binary forms, with or without
      7   modification, are permitted provided that the following conditions are met:
      8 
      9    1. Redistributions of source code must retain the above copyright notice,
     10       this list of conditions and the following disclaimer.
     11 
     12    2. Redistributions in binary form must reproduce the above copyright
     13       notice, this list of conditions and the following disclaimer in the
     14       documentation and/or other materials provided with the distribution.
     15 
     16    3. Neither the name of the Intel Corporation nor the names of its
     17       contributors may be used to endorse or promote products derived from
     18       this software without specific prior written permission.
     19 
     20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     30   POSSIBILITY OF SUCH DAMAGE.
     31 
     32 ******************************************************************************/
     33 /*$FreeBSD: head/sys/dev/ixgbe/if_ixv.c 302384 2016-07-07 03:39:18Z sbruno $*/
     34 /*$NetBSD: ixv.c,v 1.36 2017/02/07 04:20:59 msaitoh Exp $*/
     35 
     36 #include "opt_inet.h"
     37 #include "opt_inet6.h"
     38 
     39 #include "ixgbe.h"
     40 #include "vlan.h"
     41 
     42 /*********************************************************************
     43  *  Driver version
     44  *********************************************************************/
     45 char ixv_driver_version[] = "1.4.6-k";
     46 
     47 /*********************************************************************
     48  *  PCI Device ID Table
     49  *
     50  *  Used by probe to select devices to load on
     51  *  Last field stores an index into ixv_strings
     52  *  Last entry must be all 0s
     53  *
     54  *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
     55  *********************************************************************/
     56 
     57 static ixgbe_vendor_info_t ixv_vendor_info_array[] =
     58 {
     59 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
     60 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0},
     61 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, 0, 0, 0},
     62 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, 0, 0, 0},
     63 	/* required last entry */
     64 	{0, 0, 0, 0, 0}
     65 };
     66 
     67 /*********************************************************************
     68  *  Table of branding strings
     69  *********************************************************************/
     70 
     71 static const char    *ixv_strings[] = {
     72 	"Intel(R) PRO/10GbE Virtual Function Network Driver"
     73 };
     74 
     75 /*********************************************************************
     76  *  Function prototypes
     77  *********************************************************************/
     78 static int      ixv_probe(device_t, cfdata_t, void *);
     79 static void	ixv_attach(device_t, device_t, void *);
     80 static int      ixv_detach(device_t, int);
     81 #if 0
     82 static int      ixv_shutdown(device_t);
     83 #endif
     84 static int      ixv_ioctl(struct ifnet *, u_long, void *);
     85 static int	ixv_init(struct ifnet *);
     86 static void	ixv_init_locked(struct adapter *);
     87 static void     ixv_stop(void *);
     88 static void     ixv_media_status(struct ifnet *, struct ifmediareq *);
     89 static int      ixv_media_change(struct ifnet *);
     90 static void     ixv_identify_hardware(struct adapter *);
     91 static int      ixv_allocate_pci_resources(struct adapter *,
     92 		    const struct pci_attach_args *);
     93 static int      ixv_allocate_msix(struct adapter *,
     94 		    const struct pci_attach_args *);
     95 static int	ixv_setup_msix(struct adapter *);
     96 static void	ixv_free_pci_resources(struct adapter *);
     97 static void     ixv_local_timer(void *);
     98 static void     ixv_local_timer_locked(void *);
     99 static void     ixv_setup_interface(device_t, struct adapter *);
    100 static void     ixv_config_link(struct adapter *);
    101 
    102 static void     ixv_initialize_transmit_units(struct adapter *);
    103 static void     ixv_initialize_receive_units(struct adapter *);
    104 
    105 static void     ixv_enable_intr(struct adapter *);
    106 static void     ixv_disable_intr(struct adapter *);
    107 static void     ixv_set_multi(struct adapter *);
    108 static void     ixv_update_link_status(struct adapter *);
    109 static int	ixv_sysctl_debug(SYSCTLFN_PROTO);
    110 static void	ixv_set_ivar(struct adapter *, u8, u8, s8);
    111 static void	ixv_configure_ivars(struct adapter *);
    112 static u8 *	ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
    113 
    114 static void	ixv_setup_vlan_support(struct adapter *);
    115 #if 0
    116 static void	ixv_register_vlan(void *, struct ifnet *, u16);
    117 static void	ixv_unregister_vlan(void *, struct ifnet *, u16);
    118 #endif
    119 
    120 static void	ixv_save_stats(struct adapter *);
    121 static void	ixv_init_stats(struct adapter *);
    122 static void	ixv_update_stats(struct adapter *);
    123 static void	ixv_add_stats_sysctls(struct adapter *);
    124 static void	ixv_set_sysctl_value(struct adapter *, const char *,
    125 		    const char *, int *, int);
    126 
    127 /* The MSI/X Interrupt handlers */
    128 static int	ixv_msix_que(void *);
    129 static int	ixv_msix_mbx(void *);
    130 
    131 /* Deferred interrupt tasklets */
    132 static void	ixv_handle_que(void *);
    133 static void	ixv_handle_mbx(void *);
    134 
    135 const struct sysctlnode *ixv_sysctl_instance(struct adapter *);
    136 static ixgbe_vendor_info_t *ixv_lookup(const struct pci_attach_args *);
    137 
    138 #ifdef DEV_NETMAP
    139 /*
    140  * This is defined in <dev/netmap/ixgbe_netmap.h>, which is included by
    141  * if_ix.c.
    142  */
    143 extern void ixgbe_netmap_attach(struct adapter *adapter);
    144 
    145 #include <net/netmap.h>
    146 #include <sys/selinfo.h>
    147 #include <dev/netmap/netmap_kern.h>
    148 #endif /* DEV_NETMAP */
    149 
    150 /*********************************************************************
    151  *  FreeBSD Device Interface Entry Points
    152  *********************************************************************/
    153 
    154 CFATTACH_DECL3_NEW(ixv, sizeof(struct adapter),
    155     ixv_probe, ixv_attach, ixv_detach, NULL, NULL, NULL,
    156     DVF_DETACH_SHUTDOWN);
    157 
    158 # if 0
    159 static device_method_t ixv_methods[] = {
    160 	/* Device interface */
    161 	DEVMETHOD(device_probe, ixv_probe),
    162 	DEVMETHOD(device_attach, ixv_attach),
    163 	DEVMETHOD(device_detach, ixv_detach),
    164 	DEVMETHOD(device_shutdown, ixv_shutdown),
    165 	DEVMETHOD_END
    166 };
    167 #endif
    168 
    169 #if 0
    170 static driver_t ixv_driver = {
    171 	"ixv", ixv_methods, sizeof(struct adapter),
    172 };
    173 
    174 devclass_t ixv_devclass;
    175 DRIVER_MODULE(ixv, pci, ixv_driver, ixv_devclass, 0, 0);
    176 MODULE_DEPEND(ixv, pci, 1, 1, 1);
    177 MODULE_DEPEND(ixv, ether, 1, 1, 1);
    178 #ifdef DEV_NETMAP
    179 MODULE_DEPEND(ix, netmap, 1, 1, 1);
    180 #endif /* DEV_NETMAP */
    181 /* XXX depend on 'ix' ? */
    182 #endif
    183 
    184 /*
    185 ** TUNEABLE PARAMETERS:
    186 */
    187 
    188 /* Number of Queues - do not exceed MSIX vectors - 1 */
    189 static int ixv_num_queues = 1;
    190 #define	TUNABLE_INT(__x, __y)
    191 TUNABLE_INT("hw.ixv.num_queues", &ixv_num_queues);
    192 
    193 /*
    194 ** AIM: Adaptive Interrupt Moderation
    195 ** which means that the interrupt rate
    196 ** is varied over time based on the
    197 ** traffic for that interrupt vector
    198 */
    199 static int ixv_enable_aim = FALSE;
    200 TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
    201 
    202 /* How many packets rxeof tries to clean at a time */
    203 static int ixv_rx_process_limit = 256;
    204 TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit);
    205 
    206 /* How many packets txeof tries to clean at a time */
    207 static int ixv_tx_process_limit = 256;
    208 TUNABLE_INT("hw.ixv.tx_process_limit", &ixv_tx_process_limit);
    209 
    210 /*
    211 ** Number of TX descriptors per ring,
    212 ** setting higher than RX as this seems
    213 ** the better performing choice.
    214 */
    215 static int ixv_txd = DEFAULT_TXD;
    216 TUNABLE_INT("hw.ixv.txd", &ixv_txd);
    217 
    218 /* Number of RX descriptors per ring */
    219 static int ixv_rxd = DEFAULT_RXD;
    220 TUNABLE_INT("hw.ixv.rxd", &ixv_rxd);
    221 
    222 /*
    223 ** Shadow VFTA table, this is needed because
    224 ** the real filter table gets cleared during
    225 ** a soft reset and we need to repopulate it.
    226 */
    227 static u32 ixv_shadow_vfta[IXGBE_VFTA_SIZE];
    228 
    229 /*********************************************************************
    230  *  Device identification routine
    231  *
    232  *  ixv_probe determines if the driver should be loaded on
    233  *  adapter based on PCI vendor/device id of the adapter.
    234  *
    235  *  return 1 on success, 0 on failure
    236  *********************************************************************/
    237 
    238 static int
    239 ixv_probe(device_t dev, cfdata_t cf, void *aux)
    240 {
    241 #ifdef __HAVE_PCI_MSI_MSIX
    242 	const struct pci_attach_args *pa = aux;
    243 
    244 	return (ixv_lookup(pa) != NULL) ? 1 : 0;
    245 #else
    246 	return 0;
    247 #endif
    248 }
    249 
    250 static ixgbe_vendor_info_t *
    251 ixv_lookup(const struct pci_attach_args *pa)
    252 {
    253 	pcireg_t subid;
    254 	ixgbe_vendor_info_t *ent;
    255 
    256 	INIT_DEBUGOUT("ixv_lookup: begin");
    257 
    258 	if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
    259 		return NULL;
    260 
    261 	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
    262 
    263 	for (ent = ixv_vendor_info_array; ent->vendor_id != 0; ent++) {
    264 		if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
    265 		    (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
    266 
    267 		    ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
    268 		     (ent->subvendor_id == 0)) &&
    269 
    270 		    ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
    271 		     (ent->subdevice_id == 0))) {
    272 			return ent;
    273 		}
    274 	}
    275 	return NULL;
    276 }
    277 
    278 
    279 static void
    280 ixv_sysctl_attach(struct adapter *adapter)
    281 {
    282 	struct sysctllog **log;
    283 	const struct sysctlnode *rnode, *cnode;
    284 	device_t dev;
    285 
    286 	dev = adapter->dev;
    287 	log = &adapter->sysctllog;
    288 
    289 	if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
    290 		aprint_error_dev(dev, "could not create sysctl root\n");
    291 		return;
    292 	}
    293 
    294 	if (sysctl_createv(log, 0, &rnode, &cnode,
    295 	    CTLFLAG_READWRITE, CTLTYPE_INT,
    296 	    "debug", SYSCTL_DESCR("Debug Info"),
    297 	    ixv_sysctl_debug, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
    298 		aprint_error_dev(dev, "could not create sysctl\n");
    299 
    300 	/* XXX This is an *instance* sysctl controlling a *global* variable.
    301 	 * XXX It's that way in the FreeBSD driver that this derives from.
    302 	 */
    303 	if (sysctl_createv(log, 0, &rnode, &cnode,
    304 	    CTLFLAG_READWRITE, CTLTYPE_INT,
    305 	    "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
    306 	    NULL, 0, &ixv_enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
    307 		aprint_error_dev(dev, "could not create sysctl\n");
    308 }
    309 
    310 /*********************************************************************
    311  *  Device initialization routine
    312  *
    313  *  The attach entry point is called when the driver is being loaded.
    314  *  This routine identifies the type of hardware, allocates all resources
    315  *  and initializes the hardware.
    316  *
    317  *  return 0 on success, positive on failure
    318  *********************************************************************/
    319 
    320 static void
    321 ixv_attach(device_t parent, device_t dev, void *aux)
    322 {
    323 	struct adapter *adapter;
    324 	struct ixgbe_hw *hw;
    325 	int             error = 0;
    326 	ixgbe_vendor_info_t *ent;
    327 	const struct pci_attach_args *pa = aux;
    328 
    329 	INIT_DEBUGOUT("ixv_attach: begin");
    330 
    331 	/* Allocate, clear, and link in our adapter structure */
    332 	adapter = device_private(dev);
    333 	adapter->dev = dev;
    334 	hw = &adapter->hw;
    335 
    336 #ifdef DEV_NETMAP
    337 	adapter->init_locked = ixv_init_locked;
    338 	adapter->stop_locked = ixv_stop;
    339 #endif
    340 
    341 	adapter->osdep.pc = pa->pa_pc;
    342 	adapter->osdep.tag = pa->pa_tag;
    343 	adapter->osdep.dmat = pa->pa_dmat;
    344 	adapter->osdep.attached = false;
    345 
    346 	ent = ixv_lookup(pa);
    347 
    348 	KASSERT(ent != NULL);
    349 
    350 	aprint_normal(": %s, Version - %s\n",
    351 	    ixv_strings[ent->index], ixv_driver_version);
    352 
    353 	/* Core Lock Init*/
    354 	IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
    355 
    356 	/* SYSCTL APIs */
    357 	ixv_sysctl_attach(adapter);
    358 
    359 	/* Set up the timer callout */
    360 	callout_init(&adapter->timer, 0);
    361 
    362 	/* Determine hardware revision */
    363 	ixv_identify_hardware(adapter);
    364 
    365 	/* Do base PCI setup - map BAR0 */
    366 	if (ixv_allocate_pci_resources(adapter, pa)) {
    367 		aprint_error_dev(dev, "ixv_allocate_pci_resources() failed!\n");
    368 		error = ENXIO;
    369 		goto err_out;
    370 	}
    371 
    372 	/* Sysctls for limiting the amount of work done in the taskqueues */
    373 	ixv_set_sysctl_value(adapter, "rx_processing_limit",
    374 	    "max number of rx packets to process",
    375 	    &adapter->rx_process_limit, ixv_rx_process_limit);
    376 
    377 	ixv_set_sysctl_value(adapter, "tx_processing_limit",
    378 	    "max number of tx packets to process",
    379 	    &adapter->tx_process_limit, ixv_tx_process_limit);
    380 
    381 	/* Do descriptor calc and sanity checks */
    382 	if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
    383 	    ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) {
    384 		aprint_error_dev(dev, "TXD config issue, using default!\n");
    385 		adapter->num_tx_desc = DEFAULT_TXD;
    386 	} else
    387 		adapter->num_tx_desc = ixv_txd;
    388 
    389 	if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
    390 	    ixv_rxd < MIN_RXD || ixv_rxd > MAX_RXD) {
    391 		aprint_error_dev(dev, "RXD config issue, using default!\n");
    392 		adapter->num_rx_desc = DEFAULT_RXD;
    393 	} else
    394 		adapter->num_rx_desc = ixv_rxd;
    395 
    396 	/* Allocate our TX/RX Queues */
    397 	if (ixgbe_allocate_queues(adapter)) {
    398 		aprint_error_dev(dev, "ixgbe_allocate_queues() failed!\n");
    399 		error = ENOMEM;
    400 		goto err_out;
    401 	}
    402 
    403 	/*
    404 	** Initialize the shared code: its
    405 	** at this point the mac type is set.
    406 	*/
    407 	error = ixgbe_init_shared_code(hw);
    408 	if (error) {
    409 		aprint_error_dev(dev, "ixgbe_init_shared_code() failed!\n");
    410 		error = EIO;
    411 		goto err_late;
    412 	}
    413 
    414 	/* Setup the mailbox */
    415 	ixgbe_init_mbx_params_vf(hw);
    416 
    417 	/* Reset mbox api to 1.0 */
    418 	error = ixgbe_reset_hw(hw);
    419 	if (error == IXGBE_ERR_RESET_FAILED)
    420 		aprint_error_dev(dev, "ixgbe_reset_hw() failure: Reset Failed!\n");
    421 	else if (error)
    422 		aprint_error_dev(dev, "ixgbe_reset_hw() failed with error %d\n", error);
    423 	if (error) {
    424 		error = EIO;
    425 		goto err_late;
    426 	}
    427 
    428 	/* Negotiate mailbox API version */
    429 	error = ixgbevf_negotiate_api_version(hw, ixgbe_mbox_api_11);
    430 	if (error)
    431 		aprint_debug_dev(dev,
    432 		    "MBX API 1.1 negotiation failed! Error %d\n", error);
    433 
    434 	error = ixgbe_init_hw(hw);
    435 	if (error) {
    436 		aprint_error_dev(dev, "ixgbe_init_hw() failed!\n");
    437 		error = EIO;
    438 		goto err_late;
    439 	}
    440 
    441 	error = ixv_allocate_msix(adapter, pa);
    442 	if (error) {
    443 		device_printf(dev, "ixv_allocate_msix() failed!\n");
    444 		goto err_late;
    445 	}
    446 
    447 	/* If no mac address was assigned, make a random one */
    448 	if (!ixv_check_ether_addr(hw->mac.addr)) {
    449 		u8 addr[ETHER_ADDR_LEN];
    450 		uint64_t rndval = cprng_fast64();
    451 
    452 		memcpy(addr, &rndval, sizeof(addr));
    453 		addr[0] &= 0xFE;
    454 		addr[0] |= 0x02;
    455 		bcopy(addr, hw->mac.addr, sizeof(addr));
    456 	}
    457 
    458 	/* Setup OS specific network interface */
    459 	ixv_setup_interface(dev, adapter);
    460 
    461 	/* Do the stats setup */
    462 	ixv_save_stats(adapter);
    463 	ixv_init_stats(adapter);
    464 	ixv_add_stats_sysctls(adapter);
    465 
    466 	/* Register for VLAN events */
    467 #if 0 /* XXX delete after write? */
    468 	adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
    469 	    ixv_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
    470 	adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
    471 	    ixv_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
    472 #endif
    473 
    474 #ifdef DEV_NETMAP
    475 	ixgbe_netmap_attach(adapter);
    476 #endif /* DEV_NETMAP */
    477 	INIT_DEBUGOUT("ixv_attach: end");
    478 	adapter->osdep.attached = true;
    479 	return;
    480 
    481 err_late:
    482 	ixgbe_free_transmit_structures(adapter);
    483 	ixgbe_free_receive_structures(adapter);
    484 err_out:
    485 	ixv_free_pci_resources(adapter);
    486 	return;
    487 
    488 }
    489 
    490 /*********************************************************************
    491  *  Device removal routine
    492  *
    493  *  The detach entry point is called when the driver is being removed.
    494  *  This routine stops the adapter and deallocates all the resources
    495  *  that were allocated for driver operation.
    496  *
    497  *  return 0 on success, positive on failure
    498  *********************************************************************/
    499 
    500 static int
    501 ixv_detach(device_t dev, int flags)
    502 {
    503 	struct adapter *adapter = device_private(dev);
    504 	struct ix_queue *que = adapter->queues;
    505 
    506 	INIT_DEBUGOUT("ixv_detach: begin");
    507 	if (adapter->osdep.attached == false)
    508 		return 0;
    509 
    510 #if NVLAN > 0
    511 	/* Make sure VLANS are not using driver */
    512 	if (!VLAN_ATTACHED(&adapter->osdep.ec))
    513 		;	/* nothing to do: no VLANs */
    514 	else if ((flags & (DETACH_SHUTDOWN|DETACH_FORCE)) != 0)
    515 		vlan_ifdetach(adapter->ifp);
    516 	else {
    517 		aprint_error_dev(dev, "VLANs in use, detach first\n");
    518 		return EBUSY;
    519 	}
    520 #endif
    521 
    522 	IXGBE_CORE_LOCK(adapter);
    523 	ixv_stop(adapter);
    524 	IXGBE_CORE_UNLOCK(adapter);
    525 
    526 	for (int i = 0; i < adapter->num_queues; i++, que++) {
    527 #ifndef IXGBE_LEGACY_TX
    528 		struct tx_ring *txr = adapter->tx_rings;
    529 
    530 		softint_disestablish(txr->txr_si);
    531 #endif
    532 		softint_disestablish(que->que_si);
    533 	}
    534 
    535 	/* Drain the Mailbox(link) queue */
    536 	softint_disestablish(adapter->link_si);
    537 
    538 	/* Unregister VLAN events */
    539 #if 0 /* XXX msaitoh delete after write? */
    540 	if (adapter->vlan_attach != NULL)
    541 		EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
    542 	if (adapter->vlan_detach != NULL)
    543 		EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
    544 #endif
    545 
    546 	ether_ifdetach(adapter->ifp);
    547 	callout_halt(&adapter->timer, NULL);
    548 #ifdef DEV_NETMAP
    549 	netmap_detach(adapter->ifp);
    550 #endif /* DEV_NETMAP */
    551 	ixv_free_pci_resources(adapter);
    552 #if 0 /* XXX the NetBSD port is probably missing something here */
    553 	bus_generic_detach(dev);
    554 #endif
    555 	if_detach(adapter->ifp);
    556 
    557 	ixgbe_free_transmit_structures(adapter);
    558 	ixgbe_free_receive_structures(adapter);
    559 
    560 	IXGBE_CORE_LOCK_DESTROY(adapter);
    561 	return (0);
    562 }
    563 
    564 /*********************************************************************
    565  *
    566  *  Shutdown entry point
    567  *
    568  **********************************************************************/
    569 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
    570 static int
    571 ixv_shutdown(device_t dev)
    572 {
    573 	struct adapter *adapter = device_private(dev);
    574 	IXGBE_CORE_LOCK(adapter);
    575 	ixv_stop(adapter);
    576 	IXGBE_CORE_UNLOCK(adapter);
    577 	return (0);
    578 }
    579 #endif
    580 
    581 static int
    582 ixv_ifflags_cb(struct ethercom *ec)
    583 {
    584 	struct ifnet *ifp = &ec->ec_if;
    585 	struct adapter *adapter = ifp->if_softc;
    586 	int change = ifp->if_flags ^ adapter->if_flags, rc = 0;
    587 
    588 	IXGBE_CORE_LOCK(adapter);
    589 
    590 	if (change != 0)
    591 		adapter->if_flags = ifp->if_flags;
    592 
    593 	if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0)
    594 		rc = ENETRESET;
    595 
    596 	IXGBE_CORE_UNLOCK(adapter);
    597 
    598 	return rc;
    599 }
    600 
    601 /*********************************************************************
    602  *  Ioctl entry point
    603  *
    604  *  ixv_ioctl is called when the user wants to configure the
    605  *  interface.
    606  *
    607  *  return 0 on success, positive on failure
    608  **********************************************************************/
    609 
    610 static int
    611 ixv_ioctl(struct ifnet * ifp, u_long command, void *data)
    612 {
    613 	struct adapter	*adapter = ifp->if_softc;
    614 	struct ifcapreq *ifcr = data;
    615 	struct ifreq	*ifr = (struct ifreq *) data;
    616 	int             error = 0;
    617 	int l4csum_en;
    618 	const int l4csum = IFCAP_CSUM_TCPv4_Rx|IFCAP_CSUM_UDPv4_Rx|
    619 	     IFCAP_CSUM_TCPv6_Rx|IFCAP_CSUM_UDPv6_Rx;
    620 
    621 	switch (command) {
    622 	case SIOCSIFFLAGS:
    623 		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
    624 		break;
    625 	case SIOCADDMULTI:
    626 	case SIOCDELMULTI:
    627 		IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
    628 		break;
    629 	case SIOCSIFMEDIA:
    630 	case SIOCGIFMEDIA:
    631 		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
    632 		break;
    633 	case SIOCSIFCAP:
    634 		IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
    635 		break;
    636 	case SIOCSIFMTU:
    637 		IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
    638 		break;
    639 	default:
    640 		IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
    641 		break;
    642 	}
    643 
    644 	switch (command) {
    645 	case SIOCSIFMEDIA:
    646 	case SIOCGIFMEDIA:
    647 		return ifmedia_ioctl(ifp, ifr, &adapter->media, command);
    648 	case SIOCSIFCAP:
    649 		/* Layer-4 Rx checksum offload has to be turned on and
    650 		 * off as a unit.
    651 		 */
    652 		l4csum_en = ifcr->ifcr_capenable & l4csum;
    653 		if (l4csum_en != l4csum && l4csum_en != 0)
    654 			return EINVAL;
    655 		/*FALLTHROUGH*/
    656 	case SIOCADDMULTI:
    657 	case SIOCDELMULTI:
    658 	case SIOCSIFFLAGS:
    659 	case SIOCSIFMTU:
    660 	default:
    661 		if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
    662 			return error;
    663 		if ((ifp->if_flags & IFF_RUNNING) == 0)
    664 			;
    665 		else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
    666 			IXGBE_CORE_LOCK(adapter);
    667 			ixv_init_locked(adapter);
    668 			IXGBE_CORE_UNLOCK(adapter);
    669 		} else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
    670 			/*
    671 			 * Multicast list has changed; set the hardware filter
    672 			 * accordingly.
    673 			 */
    674 			IXGBE_CORE_LOCK(adapter);
    675 			ixv_disable_intr(adapter);
    676 			ixv_set_multi(adapter);
    677 			ixv_enable_intr(adapter);
    678 			IXGBE_CORE_UNLOCK(adapter);
    679 		}
    680 		return 0;
    681 	}
    682 }
    683 
    684 /*********************************************************************
    685  *  Init entry point
    686  *
    687  *  This routine is used in two ways. It is used by the stack as
    688  *  init entry point in network interface structure. It is also used
    689  *  by the driver as a hw/sw initialization routine to get to a
    690  *  consistent state.
    691  *
    692  *  return 0 on success, positive on failure
    693  **********************************************************************/
    694 #define IXGBE_MHADD_MFS_SHIFT 16
    695 
    696 static void
    697 ixv_init_locked(struct adapter *adapter)
    698 {
    699 	struct ifnet	*ifp = adapter->ifp;
    700 	device_t 	dev = adapter->dev;
    701 	struct ixgbe_hw *hw = &adapter->hw;
    702 	int error = 0;
    703 
    704 	INIT_DEBUGOUT("ixv_init_locked: begin");
    705 	KASSERT(mutex_owned(&adapter->core_mtx));
    706 	hw->adapter_stopped = FALSE;
    707 	ixgbe_stop_adapter(hw);
    708         callout_stop(&adapter->timer);
    709 
    710         /* reprogram the RAR[0] in case user changed it. */
    711         ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
    712 
    713 	/* Get the latest mac address, User can use a LAA */
    714 	memcpy(hw->mac.addr, CLLADDR(adapter->ifp->if_sadl),
    715 	     IXGBE_ETH_LENGTH_OF_ADDRESS);
    716         ixgbe_set_rar(hw, 0, hw->mac.addr, 0, 1);
    717 	hw->addr_ctrl.rar_used_count = 1;
    718 
    719 	/* Prepare transmit descriptors and buffers */
    720 	if (ixgbe_setup_transmit_structures(adapter)) {
    721 		aprint_error_dev(dev, "Could not setup transmit structures\n");
    722 		ixv_stop(adapter);
    723 		return;
    724 	}
    725 
    726 	/* Reset VF and renegotiate mailbox API version */
    727 	ixgbe_reset_hw(hw);
    728 	error = ixgbevf_negotiate_api_version(hw, ixgbe_mbox_api_11);
    729 	if (error)
    730 		device_printf(dev, "MBX API 1.1 negotiation failed! Error %d\n", error);
    731 
    732 	ixv_initialize_transmit_units(adapter);
    733 
    734 	/* Setup Multicast table */
    735 	ixv_set_multi(adapter);
    736 
    737 	/*
    738 	** Determine the correct mbuf pool
    739 	** for doing jumbo/headersplit
    740 	*/
    741 	if (ifp->if_mtu > ETHERMTU)
    742 		adapter->rx_mbuf_sz = MJUMPAGESIZE;
    743 	else
    744 		adapter->rx_mbuf_sz = MCLBYTES;
    745 
    746 	/* Prepare receive descriptors and buffers */
    747 	if (ixgbe_setup_receive_structures(adapter)) {
    748 		device_printf(dev, "Could not setup receive structures\n");
    749 		ixv_stop(adapter);
    750 		return;
    751 	}
    752 
    753 	/* Configure RX settings */
    754 	ixv_initialize_receive_units(adapter);
    755 
    756 #if 0 /* XXX isn't it required? -- msaitoh  */
    757 	/* Set the various hardware offload abilities */
    758 	ifp->if_hwassist = 0;
    759 	if (ifp->if_capenable & IFCAP_TSO4)
    760 		ifp->if_hwassist |= CSUM_TSO;
    761 	if (ifp->if_capenable & IFCAP_TXCSUM) {
    762 		ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
    763 #if __FreeBSD_version >= 800000
    764 		ifp->if_hwassist |= CSUM_SCTP;
    765 #endif
    766 	}
    767 #endif
    768 
    769 	/* Set up VLAN offload and filter */
    770 	ixv_setup_vlan_support(adapter);
    771 
    772 	/* Set up MSI/X routing */
    773 	ixv_configure_ivars(adapter);
    774 
    775 	/* Set up auto-mask */
    776 	IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_EICS_RTX_QUEUE);
    777 
    778         /* Set moderation on the Link interrupt */
    779         IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->vector), IXGBE_LINK_ITR);
    780 
    781 	/* Stats init */
    782 	ixv_init_stats(adapter);
    783 
    784 	/* Config/Enable Link */
    785 	ixv_config_link(adapter);
    786 
    787 	/* Start watchdog */
    788 	callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
    789 
    790 	/* And now turn on interrupts */
    791 	ixv_enable_intr(adapter);
    792 
    793 	/* Now inform the stack we're ready */
    794 	ifp->if_flags |= IFF_RUNNING;
    795 	ifp->if_flags &= ~IFF_OACTIVE;
    796 
    797 	return;
    798 }
    799 
    800 static int
    801 ixv_init(struct ifnet *ifp)
    802 {
    803 	struct adapter *adapter = ifp->if_softc;
    804 
    805 	IXGBE_CORE_LOCK(adapter);
    806 	ixv_init_locked(adapter);
    807 	IXGBE_CORE_UNLOCK(adapter);
    808 	return 0;
    809 }
    810 
    811 
    812 /*
    813 **
    814 ** MSIX Interrupt Handlers and Tasklets
    815 **
    816 */
    817 
    818 static inline void
    819 ixv_enable_queue(struct adapter *adapter, u32 vector)
    820 {
    821 	struct ixgbe_hw *hw = &adapter->hw;
    822 	u32	queue = 1 << vector;
    823 	u32	mask;
    824 
    825 	mask = (IXGBE_EIMS_RTX_QUEUE & queue);
    826 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
    827 }
    828 
    829 static inline void
    830 ixv_disable_queue(struct adapter *adapter, u32 vector)
    831 {
    832 	struct ixgbe_hw *hw = &adapter->hw;
    833 	u64	queue = (u64)(1 << vector);
    834 	u32	mask;
    835 
    836 	mask = (IXGBE_EIMS_RTX_QUEUE & queue);
    837 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
    838 }
    839 
    840 static inline void
    841 ixv_rearm_queues(struct adapter *adapter, u64 queues)
    842 {
    843 	u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
    844 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask);
    845 }
    846 
    847 
    848 static void
    849 ixv_handle_que(void *context)
    850 {
    851 	struct ix_queue *que = context;
    852 	struct adapter  *adapter = que->adapter;
    853 	struct tx_ring	*txr = que->txr;
    854 	struct ifnet    *ifp = adapter->ifp;
    855 	bool		more;
    856 
    857 	if (ifp->if_flags & IFF_RUNNING) {
    858 		more = ixgbe_rxeof(que);
    859 		IXGBE_TX_LOCK(txr);
    860 		ixgbe_txeof(txr);
    861 #ifndef IXGBE_LEGACY_TX
    862 		if (pcq_peek(txr->txr_interq) != NULL)
    863 			ixgbe_mq_start_locked(ifp, txr);
    864 #else
    865 		if (!IFQ_IS_EMPTY(&ifp->if_snd))
    866 			ixgbe_start_locked(txr, ifp);
    867 #endif
    868 		IXGBE_TX_UNLOCK(txr);
    869 		if (more) {
    870 			adapter->req.ev_count++;
    871 			softint_schedule(que->que_si);
    872 			return;
    873 		}
    874 	}
    875 
    876 	/* Reenable this interrupt */
    877 	ixv_enable_queue(adapter, que->msix);
    878 	return;
    879 }
    880 
    881 /*********************************************************************
    882  *
    883  *  MSI Queue Interrupt Service routine
    884  *
    885  **********************************************************************/
    886 int
    887 ixv_msix_que(void *arg)
    888 {
    889 	struct ix_queue	*que = arg;
    890 	struct adapter  *adapter = que->adapter;
    891 	struct ifnet    *ifp = adapter->ifp;
    892 	struct tx_ring	*txr = que->txr;
    893 	struct rx_ring	*rxr = que->rxr;
    894 	bool		more;
    895 	u32		newitr = 0;
    896 
    897 	ixv_disable_queue(adapter, que->msix);
    898 	++que->irqs.ev_count;
    899 
    900 #ifdef __NetBSD__
    901 	/* Don't run ixgbe_rxeof in interrupt context */
    902 	more = true;
    903 #else
    904 	more = ixgbe_rxeof(que);
    905 #endif
    906 
    907 	IXGBE_TX_LOCK(txr);
    908 	ixgbe_txeof(txr);
    909 	/*
    910 	** Make certain that if the stack
    911 	** has anything queued the task gets
    912 	** scheduled to handle it.
    913 	*/
    914 #ifdef IXGBE_LEGACY_TX
    915 	if (!IFQ_IS_EMPTY(&adapter->ifp->if_snd))
    916 		ixgbe_start_locked(txr, ifp);
    917 #else
    918 	if (pcq_peek(txr->txr_interq) != NULL)
    919 		ixgbe_mq_start_locked(ifp, txr);
    920 #endif
    921 	IXGBE_TX_UNLOCK(txr);
    922 
    923 	/* Do AIM now? */
    924 
    925 	if (ixv_enable_aim == FALSE)
    926 		goto no_calc;
    927 	/*
    928 	** Do Adaptive Interrupt Moderation:
    929         **  - Write out last calculated setting
    930 	**  - Calculate based on average size over
    931 	**    the last interval.
    932 	*/
    933         if (que->eitr_setting)
    934                 IXGBE_WRITE_REG(&adapter->hw,
    935                     IXGBE_VTEITR(que->msix),
    936 		    que->eitr_setting);
    937 
    938         que->eitr_setting = 0;
    939 
    940         /* Idle, do nothing */
    941         if ((txr->bytes == 0) && (rxr->bytes == 0))
    942                 goto no_calc;
    943 
    944 	if ((txr->bytes) && (txr->packets))
    945                	newitr = txr->bytes/txr->packets;
    946 	if ((rxr->bytes) && (rxr->packets))
    947 		newitr = max(newitr,
    948 		    (rxr->bytes / rxr->packets));
    949 	newitr += 24; /* account for hardware frame, crc */
    950 
    951 	/* set an upper boundary */
    952 	newitr = min(newitr, 3000);
    953 
    954 	/* Be nice to the mid range */
    955 	if ((newitr > 300) && (newitr < 1200))
    956 		newitr = (newitr / 3);
    957 	else
    958 		newitr = (newitr / 2);
    959 
    960 	newitr |= newitr << 16;
    961 
    962         /* save for next interrupt */
    963         que->eitr_setting = newitr;
    964 
    965         /* Reset state */
    966         txr->bytes = 0;
    967         txr->packets = 0;
    968         rxr->bytes = 0;
    969         rxr->packets = 0;
    970 
    971 no_calc:
    972 	if (more)
    973 		softint_schedule(que->que_si);
    974 	else /* Reenable this interrupt */
    975 		ixv_enable_queue(adapter, que->msix);
    976 	return 1;
    977 }
    978 
    979 static int
    980 ixv_msix_mbx(void *arg)
    981 {
    982 	struct adapter	*adapter = arg;
    983 	struct ixgbe_hw *hw = &adapter->hw;
    984 	u32		reg;
    985 
    986 	++adapter->link_irq.ev_count;
    987 
    988 	/* First get the cause */
    989 	reg = IXGBE_READ_REG(hw, IXGBE_VTEICS);
    990 	/* Clear interrupt with write */
    991 	IXGBE_WRITE_REG(hw, IXGBE_VTEICR, reg);
    992 
    993 	/* Link status change */
    994 	if (reg & IXGBE_EICR_LSC)
    995 		softint_schedule(adapter->link_si);
    996 
    997 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER);
    998 	return 1;
    999 }
   1000 
   1001 /*********************************************************************
   1002  *
   1003  *  Media Ioctl callback
   1004  *
   1005  *  This routine is called whenever the user queries the status of
   1006  *  the interface using ifconfig.
   1007  *
   1008  **********************************************************************/
   1009 static void
   1010 ixv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
   1011 {
   1012 	struct adapter *adapter = ifp->if_softc;
   1013 
   1014 	INIT_DEBUGOUT("ixv_media_status: begin");
   1015 	IXGBE_CORE_LOCK(adapter);
   1016 	ixv_update_link_status(adapter);
   1017 
   1018 	ifmr->ifm_status = IFM_AVALID;
   1019 	ifmr->ifm_active = IFM_ETHER;
   1020 
   1021 	if (!adapter->link_active) {
   1022 		IXGBE_CORE_UNLOCK(adapter);
   1023 		return;
   1024 	}
   1025 
   1026 	ifmr->ifm_status |= IFM_ACTIVE;
   1027 
   1028 	switch (adapter->link_speed) {
   1029 		case IXGBE_LINK_SPEED_1GB_FULL:
   1030 			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
   1031 			break;
   1032 		case IXGBE_LINK_SPEED_10GB_FULL:
   1033 			ifmr->ifm_active |= IFM_FDX;
   1034 			break;
   1035 	}
   1036 
   1037 	IXGBE_CORE_UNLOCK(adapter);
   1038 
   1039 	return;
   1040 }
   1041 
   1042 /*********************************************************************
   1043  *
   1044  *  Media Ioctl callback
   1045  *
   1046  *  This routine is called when the user changes speed/duplex using
   1047  *  media/mediopt option with ifconfig.
   1048  *
   1049  **********************************************************************/
   1050 static int
   1051 ixv_media_change(struct ifnet * ifp)
   1052 {
   1053 	struct adapter *adapter = ifp->if_softc;
   1054 	struct ifmedia *ifm = &adapter->media;
   1055 
   1056 	INIT_DEBUGOUT("ixv_media_change: begin");
   1057 
   1058 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
   1059 		return (EINVAL);
   1060 
   1061         switch (IFM_SUBTYPE(ifm->ifm_media)) {
   1062         case IFM_AUTO:
   1063                 break;
   1064         default:
   1065                 device_printf(adapter->dev, "Only auto media type\n");
   1066 		return (EINVAL);
   1067         }
   1068 
   1069 	return (0);
   1070 }
   1071 
   1072 
   1073 /*********************************************************************
   1074  *  Multicast Update
   1075  *
   1076  *  This routine is called whenever multicast address list is updated.
   1077  *
   1078  **********************************************************************/
   1079 #define IXGBE_RAR_ENTRIES 16
   1080 
   1081 static void
   1082 ixv_set_multi(struct adapter *adapter)
   1083 {
   1084 	struct ether_multi *enm;
   1085 	struct ether_multistep step;
   1086 	u8	mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
   1087 	u8	*update_ptr;
   1088 	int	mcnt = 0;
   1089 	struct ethercom *ec = &adapter->osdep.ec;
   1090 
   1091 	IOCTL_DEBUGOUT("ixv_set_multi: begin");
   1092 
   1093 	ETHER_FIRST_MULTI(step, ec, enm);
   1094 	while (enm != NULL) {
   1095 		bcopy(enm->enm_addrlo,
   1096 		    &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
   1097 		    IXGBE_ETH_LENGTH_OF_ADDRESS);
   1098 		mcnt++;
   1099 		/* XXX This might be required --msaitoh */
   1100 		if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES)
   1101 			break;
   1102 		ETHER_NEXT_MULTI(step, enm);
   1103 	}
   1104 
   1105 	update_ptr = mta;
   1106 
   1107 	ixgbe_update_mc_addr_list(&adapter->hw,
   1108 	    update_ptr, mcnt, ixv_mc_array_itr, TRUE);
   1109 
   1110 	return;
   1111 }
   1112 
   1113 /*
   1114  * This is an iterator function now needed by the multicast
   1115  * shared code. It simply feeds the shared code routine the
   1116  * addresses in the array of ixv_set_multi() one by one.
   1117  */
   1118 static u8 *
   1119 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
   1120 {
   1121 	u8 *addr = *update_ptr;
   1122 	u8 *newptr;
   1123 	*vmdq = 0;
   1124 
   1125 	newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
   1126 	*update_ptr = newptr;
   1127 	return addr;
   1128 }
   1129 
   1130 /*********************************************************************
   1131  *  Timer routine
   1132  *
   1133  *  This routine checks for link status,updates statistics,
   1134  *  and runs the watchdog check.
   1135  *
   1136  **********************************************************************/
   1137 
   1138 static void
   1139 ixv_local_timer(void *arg)
   1140 {
   1141 	struct adapter *adapter = arg;
   1142 
   1143 	IXGBE_CORE_LOCK(adapter);
   1144 	ixv_local_timer_locked(adapter);
   1145 	IXGBE_CORE_UNLOCK(adapter);
   1146 }
   1147 
   1148 static void
   1149 ixv_local_timer_locked(void *arg)
   1150 {
   1151 	struct adapter	*adapter = arg;
   1152 	device_t	dev = adapter->dev;
   1153 	struct ix_queue	*que = adapter->queues;
   1154 	u64		queues = 0;
   1155 	int		hung = 0;
   1156 
   1157 	KASSERT(mutex_owned(&adapter->core_mtx));
   1158 
   1159 	ixv_update_link_status(adapter);
   1160 
   1161 	/* Stats Update */
   1162 	ixv_update_stats(adapter);
   1163 
   1164 	/*
   1165 	** Check the TX queues status
   1166 	**      - mark hung queues so we don't schedule on them
   1167 	**      - watchdog only if all queues show hung
   1168 	*/
   1169 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   1170 		/* Keep track of queues with work for soft irq */
   1171 		if (que->txr->busy)
   1172 			queues |= ((u64)1 << que->me);
   1173 		/*
   1174 		** Each time txeof runs without cleaning, but there
   1175 		** are uncleaned descriptors it increments busy. If
   1176 		** we get to the MAX we declare it hung.
   1177 		*/
   1178 		if (que->busy == IXGBE_QUEUE_HUNG) {
   1179 			++hung;
   1180 			/* Mark the queue as inactive */
   1181 			adapter->active_queues &= ~((u64)1 << que->me);
   1182 			continue;
   1183 		} else {
   1184 			/* Check if we've come back from hung */
   1185 			if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
   1186                                 adapter->active_queues |= ((u64)1 << que->me);
   1187 		}
   1188 		if (que->busy >= IXGBE_MAX_TX_BUSY) {
   1189 			device_printf(dev,"Warning queue %d "
   1190 			    "appears to be hung!\n", i);
   1191 			que->txr->busy = IXGBE_QUEUE_HUNG;
   1192 			++hung;
   1193 		}
   1194 
   1195 	}
   1196 
   1197 	/* Only truly watchdog if all queues show hung */
   1198 	if (hung == adapter->num_queues)
   1199 		goto watchdog;
   1200 	else if (queues != 0) { /* Force an IRQ on queues with work */
   1201 		ixv_rearm_queues(adapter, queues);
   1202 	}
   1203 
   1204 	callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
   1205 	return;
   1206 
   1207 watchdog:
   1208 	device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
   1209 	adapter->ifp->if_flags &= ~IFF_RUNNING;
   1210 	adapter->watchdog_events.ev_count++;
   1211 	ixv_init_locked(adapter);
   1212 }
   1213 
   1214 /*
   1215 ** Note: this routine updates the OS on the link state
   1216 **	the real check of the hardware only happens with
   1217 **	a link interrupt.
   1218 */
   1219 static void
   1220 ixv_update_link_status(struct adapter *adapter)
   1221 {
   1222 	struct ifnet	*ifp = adapter->ifp;
   1223 	device_t dev = adapter->dev;
   1224 
   1225 	if (adapter->link_up){
   1226 		if (adapter->link_active == FALSE) {
   1227 			if (bootverbose)
   1228 				device_printf(dev,"Link is up %d Gbps %s \n",
   1229 				    ((adapter->link_speed == 128)? 10:1),
   1230 				    "Full Duplex");
   1231 			adapter->link_active = TRUE;
   1232 			if_link_state_change(ifp, LINK_STATE_UP);
   1233 		}
   1234 	} else { /* Link down */
   1235 		if (adapter->link_active == TRUE) {
   1236 			if (bootverbose)
   1237 				device_printf(dev,"Link is Down\n");
   1238 			if_link_state_change(ifp, LINK_STATE_DOWN);
   1239 			adapter->link_active = FALSE;
   1240 		}
   1241 	}
   1242 
   1243 	return;
   1244 }
   1245 
   1246 
   1247 static void
   1248 ixv_ifstop(struct ifnet *ifp, int disable)
   1249 {
   1250 	struct adapter *adapter = ifp->if_softc;
   1251 
   1252 	IXGBE_CORE_LOCK(adapter);
   1253 	ixv_stop(adapter);
   1254 	IXGBE_CORE_UNLOCK(adapter);
   1255 }
   1256 
   1257 /*********************************************************************
   1258  *
   1259  *  This routine disables all traffic on the adapter by issuing a
   1260  *  global reset on the MAC and deallocates TX/RX buffers.
   1261  *
   1262  **********************************************************************/
   1263 
   1264 static void
   1265 ixv_stop(void *arg)
   1266 {
   1267 	struct ifnet   *ifp;
   1268 	struct adapter *adapter = arg;
   1269 	struct ixgbe_hw *hw = &adapter->hw;
   1270 	ifp = adapter->ifp;
   1271 
   1272 	KASSERT(mutex_owned(&adapter->core_mtx));
   1273 
   1274 	INIT_DEBUGOUT("ixv_stop: begin\n");
   1275 	ixv_disable_intr(adapter);
   1276 
   1277 	/* Tell the stack that the interface is no longer active */
   1278 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   1279 
   1280 	ixgbe_reset_hw(hw);
   1281 	adapter->hw.adapter_stopped = FALSE;
   1282 	ixgbe_stop_adapter(hw);
   1283 	callout_stop(&adapter->timer);
   1284 
   1285 	/* reprogram the RAR[0] in case user changed it. */
   1286 	ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
   1287 
   1288 	return;
   1289 }
   1290 
   1291 
   1292 /*********************************************************************
   1293  *
   1294  *  Determine hardware revision.
   1295  *
   1296  **********************************************************************/
   1297 static void
   1298 ixv_identify_hardware(struct adapter *adapter)
   1299 {
   1300 	pcitag_t tag;
   1301 	pci_chipset_tag_t pc;
   1302 	pcireg_t subid, id;
   1303 	struct ixgbe_hw *hw = &adapter->hw;
   1304 
   1305 	pc = adapter->osdep.pc;
   1306 	tag = adapter->osdep.tag;
   1307 
   1308 	/*
   1309 	** Make sure BUSMASTER is set, on a VM under
   1310 	** KVM it may not be and will break things.
   1311 	*/
   1312 	ixgbe_pci_enable_busmaster(pc, tag);
   1313 
   1314 	id = pci_conf_read(pc, tag, PCI_ID_REG);
   1315 	subid = pci_conf_read(pc, tag, PCI_SUBSYS_ID_REG);
   1316 
   1317 	/* Save off the information about this board */
   1318 	hw->vendor_id = PCI_VENDOR(id);
   1319 	hw->device_id = PCI_PRODUCT(id);
   1320 	hw->revision_id = PCI_REVISION(pci_conf_read(pc, tag, PCI_CLASS_REG));
   1321 	hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
   1322 	hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
   1323 
   1324 	/* We need this to determine device-specific things */
   1325 	ixgbe_set_mac_type(hw);
   1326 
   1327 	/* Set the right number of segments */
   1328 	adapter->num_segs = IXGBE_82599_SCATTER;
   1329 
   1330 	return;
   1331 }
   1332 
   1333 /*********************************************************************
   1334  *
   1335  *  Setup MSIX Interrupt resources and handlers
   1336  *
   1337  **********************************************************************/
   1338 static int
   1339 ixv_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
   1340 {
   1341 	device_t	dev = adapter->dev;
   1342 	struct ix_queue *que = adapter->queues;
   1343 	struct		tx_ring *txr = adapter->tx_rings;
   1344 	int 		error, rid, vector = 0;
   1345 	pci_chipset_tag_t pc;
   1346 	pcitag_t	tag;
   1347 	char		intrbuf[PCI_INTRSTR_LEN];
   1348 	char		intr_xname[32];
   1349 	const char	*intrstr = NULL;
   1350 	kcpuset_t	*affinity;
   1351 	int		cpu_id = 0;
   1352 
   1353 	pc = adapter->osdep.pc;
   1354 	tag = adapter->osdep.tag;
   1355 
   1356 	adapter->osdep.nintrs = adapter->num_queues + 1;
   1357 	if (pci_msix_alloc_exact(pa,
   1358 	    &adapter->osdep.intrs, adapter->osdep.nintrs) != 0)
   1359 		return (ENXIO);
   1360 
   1361 	kcpuset_create(&affinity, false);
   1362 	for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
   1363 		snprintf(intr_xname, sizeof(intr_xname), "%s TX/RX",
   1364 		    device_xname(dev));
   1365 		intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
   1366 		    sizeof(intrbuf));
   1367 #ifdef IXV_MPSAFE
   1368 		pci_intr_setattr(pc, adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
   1369 		    true);
   1370 #endif
   1371 		/* Set the handler function */
   1372 		que->res = adapter->osdep.ihs[i] = pci_intr_establish_xname(pc,
   1373 		    adapter->osdep.intrs[i], IPL_NET, ixv_msix_que, que,
   1374 			intr_xname);
   1375 		if (que->res == NULL) {
   1376 			pci_intr_release(pc, adapter->osdep.intrs,
   1377 			    adapter->osdep.nintrs);
   1378 			aprint_error_dev(dev,
   1379 			    "Failed to register QUE handler");
   1380 			kcpuset_destroy(affinity);
   1381 			return (ENXIO);
   1382 		}
   1383 		que->msix = vector;
   1384         	adapter->active_queues |= (u64)(1 << que->msix);
   1385 
   1386 		cpu_id = i;
   1387 		/* Round-robin affinity */
   1388 		kcpuset_zero(affinity);
   1389 		kcpuset_set(affinity, cpu_id % ncpu);
   1390 		error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
   1391 		    NULL);
   1392 		aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
   1393 		    intrstr);
   1394 		if (error == 0)
   1395 			aprint_normal(", bound queue %d to cpu %d\n",
   1396 			    i, cpu_id);
   1397 		else
   1398 			aprint_normal("\n");
   1399 
   1400 #ifndef IXGBE_LEGACY_TX
   1401 		txr->txr_si = softint_establish(SOFTINT_NET,
   1402 		    ixgbe_deferred_mq_start, txr);
   1403 #endif
   1404 		que->que_si = softint_establish(SOFTINT_NET, ixv_handle_que,
   1405 		    que);
   1406 		if (que->que_si == NULL) {
   1407 			aprint_error_dev(dev,
   1408 			    "could not establish software interrupt\n");
   1409 		}
   1410 	}
   1411 
   1412 	/* and Mailbox */
   1413 	cpu_id++;
   1414 	snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
   1415 	intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
   1416 	    sizeof(intrbuf));
   1417 #ifdef IXG_MPSAFE
   1418 	pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE, true);
   1419 #endif
   1420 	/* Set the mbx handler function */
   1421 	adapter->osdep.ihs[vector] = pci_intr_establish_xname(pc,
   1422 	    adapter->osdep.intrs[vector], IPL_NET, ixv_msix_mbx, adapter,
   1423 		intr_xname);
   1424 	if (adapter->osdep.ihs[vector] == NULL) {
   1425 		adapter->res = NULL;
   1426 		aprint_error_dev(dev, "Failed to register LINK handler\n");
   1427 		kcpuset_destroy(affinity);
   1428 		return (ENXIO);
   1429 	}
   1430 	/* Round-robin affinity */
   1431 	kcpuset_zero(affinity);
   1432 	kcpuset_set(affinity, cpu_id % ncpu);
   1433 	error = interrupt_distribute(adapter->osdep.ihs[vector], affinity,NULL);
   1434 
   1435 	aprint_normal_dev(dev,
   1436 	    "for link, interrupting at %s, ", intrstr);
   1437 	if (error == 0) {
   1438 		aprint_normal("affinity to cpu %d\n", cpu_id);
   1439 	}
   1440 	adapter->vector = vector;
   1441 	/* Tasklets for Mailbox */
   1442 	adapter->link_si = softint_establish(SOFTINT_NET, ixv_handle_mbx,
   1443 	    adapter);
   1444 	/*
   1445 	** Due to a broken design QEMU will fail to properly
   1446 	** enable the guest for MSIX unless the vectors in
   1447 	** the table are all set up, so we must rewrite the
   1448 	** ENABLE in the MSIX control register again at this
   1449 	** point to cause it to successfully initialize us.
   1450 	*/
   1451 	if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
   1452 		int msix_ctrl;
   1453 		pci_get_capability(pc, tag, PCI_CAP_MSIX, &rid, NULL);
   1454 		rid += PCI_MSIX_CTL;
   1455 		msix_ctrl = pci_conf_read(pc, tag, rid);
   1456 		msix_ctrl |= PCI_MSIX_CTL_ENABLE;
   1457 		pci_conf_write(pc, tag, rid, msix_ctrl);
   1458 	}
   1459 
   1460 	kcpuset_destroy(affinity);
   1461 	return (0);
   1462 }
   1463 
   1464 /*
   1465  * Setup MSIX resources, note that the VF
   1466  * device MUST use MSIX, there is no fallback.
   1467  */
   1468 static int
   1469 ixv_setup_msix(struct adapter *adapter)
   1470 {
   1471 	device_t dev = adapter->dev;
   1472 	int want, queues, msgs;
   1473 
   1474 	/* Must have at least 2 MSIX vectors */
   1475 	msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag);
   1476 	if (msgs < 2) {
   1477 		aprint_error_dev(dev,"MSIX config error\n");
   1478 		return (ENXIO);
   1479 	}
   1480 	msgs = MIN(msgs, IXG_MAX_NINTR);
   1481 
   1482 	/* Figure out a reasonable auto config value */
   1483 	queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu;
   1484 
   1485 	if (ixv_num_queues != 0)
   1486 		queues = ixv_num_queues;
   1487 	else if ((ixv_num_queues == 0) && (queues > IXGBE_VF_MAX_TX_QUEUES))
   1488 		queues = IXGBE_VF_MAX_TX_QUEUES;
   1489 
   1490 	/*
   1491 	** Want vectors for the queues,
   1492 	** plus an additional for mailbox.
   1493 	*/
   1494 	want = queues + 1;
   1495 	if (msgs >= want) {
   1496 		msgs = want;
   1497 	} else {
   1498                	aprint_error_dev(dev,
   1499 		    "MSIX Configuration Problem, "
   1500 		    "%d vectors but %d queues wanted!\n",
   1501 		    msgs, want);
   1502 		return -1;
   1503 	}
   1504 
   1505 	adapter->msix_mem = (void *)1; /* XXX */
   1506 	aprint_normal_dev(dev,
   1507 	    "Using MSIX interrupts with %d vectors\n", msgs);
   1508 	adapter->num_queues = queues;
   1509 	return (msgs);
   1510 }
   1511 
   1512 
   1513 static int
   1514 ixv_allocate_pci_resources(struct adapter *adapter,
   1515     const struct pci_attach_args *pa)
   1516 {
   1517 	pcireg_t	memtype;
   1518 	device_t        dev = adapter->dev;
   1519 	bus_addr_t addr;
   1520 	int flags;
   1521 
   1522 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
   1523 
   1524 	switch (memtype) {
   1525 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1526 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1527 		adapter->osdep.mem_bus_space_tag = pa->pa_memt;
   1528 		if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
   1529 	              memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
   1530 			goto map_err;
   1531 		if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
   1532 			aprint_normal_dev(dev, "clearing prefetchable bit\n");
   1533 			flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
   1534 		}
   1535 		if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
   1536 		     adapter->osdep.mem_size, flags,
   1537 		     &adapter->osdep.mem_bus_space_handle) != 0) {
   1538 map_err:
   1539 			adapter->osdep.mem_size = 0;
   1540 			aprint_error_dev(dev, "unable to map BAR0\n");
   1541 			return ENXIO;
   1542 		}
   1543 		break;
   1544 	default:
   1545 		aprint_error_dev(dev, "unexpected type on BAR0\n");
   1546 		return ENXIO;
   1547 	}
   1548 
   1549 	/* Pick up the tuneable queues */
   1550 	adapter->num_queues = ixv_num_queues;
   1551 	adapter->hw.back = adapter;
   1552 
   1553 	/*
   1554 	** Now setup MSI/X, should
   1555 	** return us the number of
   1556 	** configured vectors.
   1557 	*/
   1558 	adapter->msix = ixv_setup_msix(adapter);
   1559 	if (adapter->msix == ENXIO)
   1560 		return (ENXIO);
   1561 	else
   1562 		return (0);
   1563 }
   1564 
   1565 static void
   1566 ixv_free_pci_resources(struct adapter * adapter)
   1567 {
   1568 	struct 		ix_queue *que = adapter->queues;
   1569 	int		rid;
   1570 
   1571 	/*
   1572 	**  Release all msix queue resources:
   1573 	*/
   1574 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   1575 		rid = que->msix + 1;
   1576 		if (que->res != NULL)
   1577 			pci_intr_disestablish(adapter->osdep.pc,
   1578 			    adapter->osdep.ihs[i]);
   1579 	}
   1580 
   1581 
   1582 	/* Clean the Legacy or Link interrupt last */
   1583 	if (adapter->vector) /* we are doing MSIX */
   1584 		rid = adapter->vector + 1;
   1585 	else
   1586 		(adapter->msix != 0) ? (rid = 1):(rid = 0);
   1587 
   1588 	if (adapter->osdep.ihs[rid] != NULL)
   1589 		pci_intr_disestablish(adapter->osdep.pc,
   1590 		    adapter->osdep.ihs[rid]);
   1591 	adapter->osdep.ihs[rid] = NULL;
   1592 
   1593 #if defined(NETBSD_MSI_OR_MSIX)
   1594 	pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
   1595 	    adapter->osdep.nintrs);
   1596 #endif
   1597 
   1598 	if (adapter->osdep.mem_size != 0) {
   1599 		bus_space_unmap(adapter->osdep.mem_bus_space_tag,
   1600 		    adapter->osdep.mem_bus_space_handle,
   1601 		    adapter->osdep.mem_size);
   1602 	}
   1603 
   1604 	return;
   1605 }
   1606 
   1607 /*********************************************************************
   1608  *
   1609  *  Setup networking device structure and register an interface.
   1610  *
   1611  **********************************************************************/
   1612 static void
   1613 ixv_setup_interface(device_t dev, struct adapter *adapter)
   1614 {
   1615 	struct ethercom *ec = &adapter->osdep.ec;
   1616 	struct ifnet   *ifp;
   1617 
   1618 	INIT_DEBUGOUT("ixv_setup_interface: begin");
   1619 
   1620 	ifp = adapter->ifp = &ec->ec_if;
   1621 	strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
   1622 	ifp->if_baudrate = 1000000000;
   1623 	ifp->if_init = ixv_init;
   1624 	ifp->if_stop = ixv_ifstop;
   1625 	ifp->if_softc = adapter;
   1626 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   1627 	ifp->if_ioctl = ixv_ioctl;
   1628 #ifndef IXGBE_LEGACY_TX
   1629 	ifp->if_transmit = ixgbe_mq_start;
   1630 #endif
   1631 	ifp->if_start = ixgbe_start;
   1632 	ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 2;
   1633 
   1634 	if_initialize(ifp);
   1635 	ether_ifattach(ifp, adapter->hw.mac.addr);
   1636 #ifndef IXGBE_LEGACY_TX
   1637 #if 0	/* We use per TX queue softint */
   1638 	if_deferred_start_init(ifp, ixgbe_deferred_mq_start);
   1639 #endif
   1640 #endif
   1641 	if_register(ifp);
   1642 	ether_set_ifflags_cb(ec, ixv_ifflags_cb);
   1643 
   1644 	adapter->max_frame_size =
   1645 	    ifp->if_mtu + IXGBE_MTU_HDR_VLAN;
   1646 
   1647 	/*
   1648 	 * Tell the upper layer(s) we support long frames.
   1649 	 */
   1650 	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
   1651 
   1652 	ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSOv4;
   1653 	ifp->if_capenable = 0;
   1654 
   1655 	ec->ec_capabilities |= ETHERCAP_VLAN_HWCSUM;
   1656 	ec->ec_capabilities |= ETHERCAP_JUMBO_MTU;
   1657 	ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
   1658 	    		| ETHERCAP_VLAN_MTU;
   1659 	ec->ec_capenable = ec->ec_capabilities;
   1660 
   1661 	/* Don't enable LRO by default */
   1662 	ifp->if_capabilities |= IFCAP_LRO;
   1663 #if 0
   1664 	ifp->if_capenable = ifp->if_capabilities;
   1665 #endif
   1666 
   1667 	/*
   1668 	** Dont turn this on by default, if vlans are
   1669 	** created on another pseudo device (eg. lagg)
   1670 	** then vlan events are not passed thru, breaking
   1671 	** operation, but with HW FILTER off it works. If
   1672 	** using vlans directly on the em driver you can
   1673 	** enable this and get full hardware tag filtering.
   1674 	*/
   1675 	ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
   1676 
   1677 	/*
   1678 	 * Specify the media types supported by this adapter and register
   1679 	 * callbacks to update media and link information
   1680 	 */
   1681 	ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change,
   1682 		     ixv_media_status);
   1683 	ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
   1684 	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
   1685 
   1686 	return;
   1687 }
   1688 
   1689 static void
   1690 ixv_config_link(struct adapter *adapter)
   1691 {
   1692 	struct ixgbe_hw *hw = &adapter->hw;
   1693 	u32	autoneg;
   1694 
   1695 	if (hw->mac.ops.check_link)
   1696 		hw->mac.ops.check_link(hw, &autoneg,
   1697 		    &adapter->link_up, FALSE);
   1698 }
   1699 
   1700 
   1701 /*********************************************************************
   1702  *
   1703  *  Enable transmit unit.
   1704  *
   1705  **********************************************************************/
   1706 static void
   1707 ixv_initialize_transmit_units(struct adapter *adapter)
   1708 {
   1709 	struct tx_ring	*txr = adapter->tx_rings;
   1710 	struct ixgbe_hw	*hw = &adapter->hw;
   1711 
   1712 
   1713 	for (int i = 0; i < adapter->num_queues; i++, txr++) {
   1714 		u64	tdba = txr->txdma.dma_paddr;
   1715 		u32	txctrl, txdctl;
   1716 
   1717 		/* Set WTHRESH to 8, burst writeback */
   1718 		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
   1719 		txdctl |= (8 << 16);
   1720 		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
   1721 
   1722 		/* Set the HW Tx Head and Tail indices */
   1723 	    	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(i), 0);
   1724 	    	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(i), 0);
   1725 
   1726 		/* Set Tx Tail register */
   1727 		txr->tail = IXGBE_VFTDT(i);
   1728 
   1729 		/* Set Ring parameters */
   1730 		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
   1731 		       (tdba & 0x00000000ffffffffULL));
   1732 		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i), (tdba >> 32));
   1733 		IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
   1734 		    adapter->num_tx_desc *
   1735 		    sizeof(struct ixgbe_legacy_tx_desc));
   1736 		txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(i));
   1737 		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
   1738 		IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), txctrl);
   1739 
   1740 		/* Now enable */
   1741 		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
   1742 		txdctl |= IXGBE_TXDCTL_ENABLE;
   1743 		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
   1744 	}
   1745 
   1746 	return;
   1747 }
   1748 
   1749 
   1750 /*********************************************************************
   1751  *
   1752  *  Setup receive registers and features.
   1753  *
   1754  **********************************************************************/
   1755 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
   1756 
   1757 static void
   1758 ixv_initialize_receive_units(struct adapter *adapter)
   1759 {
   1760 	struct	rx_ring	*rxr = adapter->rx_rings;
   1761 	struct ixgbe_hw	*hw = &adapter->hw;
   1762 	struct ifnet	*ifp = adapter->ifp;
   1763 	u32		bufsz, rxcsum, psrtype;
   1764 
   1765 	if (ifp->if_mtu > ETHERMTU)
   1766 		bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
   1767 	else
   1768 		bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
   1769 
   1770 	psrtype = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
   1771 	    IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR |
   1772 	    IXGBE_PSRTYPE_L2HDR;
   1773 
   1774 	IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
   1775 
   1776 	/* Tell PF our max_frame size */
   1777 	ixgbevf_rlpml_set_vf(hw, adapter->max_frame_size);
   1778 
   1779 	for (int i = 0; i < adapter->num_queues; i++, rxr++) {
   1780 		u64 rdba = rxr->rxdma.dma_paddr;
   1781 		u32 reg, rxdctl;
   1782 
   1783 		/* Disable the queue */
   1784 		rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
   1785 		rxdctl &= ~IXGBE_RXDCTL_ENABLE;
   1786 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
   1787 		for (int j = 0; j < 10; j++) {
   1788 			if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
   1789 			    IXGBE_RXDCTL_ENABLE)
   1790 				msec_delay(1);
   1791 			else
   1792 				break;
   1793 		}
   1794 		wmb();
   1795 		/* Setup the Base and Length of the Rx Descriptor Ring */
   1796 		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
   1797 		    (rdba & 0x00000000ffffffffULL));
   1798 		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i),
   1799 		    (rdba >> 32));
   1800 		IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
   1801 		    adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
   1802 
   1803 		/* Reset the ring indices */
   1804 		IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
   1805 		IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 0);
   1806 
   1807 		/* Set up the SRRCTL register */
   1808 		reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
   1809 		reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
   1810 		reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
   1811 		reg |= bufsz;
   1812 		reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
   1813 		IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), reg);
   1814 
   1815 		/* Capture Rx Tail index */
   1816 		rxr->tail = IXGBE_VFRDT(rxr->me);
   1817 
   1818 		/* Do the queue enabling last */
   1819 		rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
   1820 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
   1821 		for (int k = 0; k < 10; k++) {
   1822 			if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
   1823 			    IXGBE_RXDCTL_ENABLE)
   1824 				break;
   1825 			else
   1826 				msec_delay(1);
   1827 		}
   1828 		wmb();
   1829 
   1830 		/* Set the Tail Pointer */
   1831 #ifdef DEV_NETMAP
   1832 		/*
   1833 		 * In netmap mode, we must preserve the buffers made
   1834 		 * available to userspace before the if_init()
   1835 		 * (this is true by default on the TX side, because
   1836 		 * init makes all buffers available to userspace).
   1837 		 *
   1838 		 * netmap_reset() and the device specific routines
   1839 		 * (e.g. ixgbe_setup_receive_rings()) map these
   1840 		 * buffers at the end of the NIC ring, so here we
   1841 		 * must set the RDT (tail) register to make sure
   1842 		 * they are not overwritten.
   1843 		 *
   1844 		 * In this driver the NIC ring starts at RDH = 0,
   1845 		 * RDT points to the last slot available for reception (?),
   1846 		 * so RDT = num_rx_desc - 1 means the whole ring is available.
   1847 		 */
   1848 		if (ifp->if_capenable & IFCAP_NETMAP) {
   1849 			struct netmap_adapter *na = NA(adapter->ifp);
   1850 			struct netmap_kring *kring = &na->rx_rings[i];
   1851 			int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
   1852 
   1853 			IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), t);
   1854 		} else
   1855 #endif /* DEV_NETMAP */
   1856 			IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
   1857 			    adapter->num_rx_desc - 1);
   1858 	}
   1859 
   1860 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
   1861 
   1862 	if (ifp->if_capenable & IFCAP_RXCSUM)
   1863 		rxcsum |= IXGBE_RXCSUM_PCSD;
   1864 
   1865 	if (!(rxcsum & IXGBE_RXCSUM_PCSD))
   1866 		rxcsum |= IXGBE_RXCSUM_IPPCSE;
   1867 
   1868 	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
   1869 
   1870 	return;
   1871 }
   1872 
   1873 static void
   1874 ixv_setup_vlan_support(struct adapter *adapter)
   1875 {
   1876 	struct ixgbe_hw *hw = &adapter->hw;
   1877 	u32		ctrl, vid, vfta, retry;
   1878 	struct rx_ring	*rxr;
   1879 
   1880 	/*
   1881 	** We get here thru init_locked, meaning
   1882 	** a soft reset, this has already cleared
   1883 	** the VFTA and other state, so if there
   1884 	** have been no vlan's registered do nothing.
   1885 	*/
   1886 	if (!VLAN_ATTACHED(&adapter->osdep.ec))
   1887 		return;
   1888 
   1889 	/* Enable the queues */
   1890 	for (int i = 0; i < adapter->num_queues; i++) {
   1891 		ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
   1892 		ctrl |= IXGBE_RXDCTL_VME;
   1893 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl);
   1894 		/*
   1895 		 * Let Rx path know that it needs to store VLAN tag
   1896 		 * as part of extra mbuf info.
   1897 		 */
   1898 		rxr = &adapter->rx_rings[i];
   1899 		rxr->vtag_strip = TRUE;
   1900 	}
   1901 
   1902 	/*
   1903 	** A soft reset zero's out the VFTA, so
   1904 	** we need to repopulate it now.
   1905 	*/
   1906 	for (int i = 0; i < IXGBE_VFTA_SIZE; i++) {
   1907 		if (ixv_shadow_vfta[i] == 0)
   1908 			continue;
   1909 		vfta = ixv_shadow_vfta[i];
   1910 		/*
   1911 		** Reconstruct the vlan id's
   1912 		** based on the bits set in each
   1913 		** of the array ints.
   1914 		*/
   1915 		for (int j = 0; j < 32; j++) {
   1916 			retry = 0;
   1917 			if ((vfta & (1 << j)) == 0)
   1918 				continue;
   1919 			vid = (i * 32) + j;
   1920 			/* Call the shared code mailbox routine */
   1921 			while (ixgbe_set_vfta(hw, vid, 0, TRUE)) {
   1922 				if (++retry > 5)
   1923 					break;
   1924 			}
   1925 		}
   1926 	}
   1927 }
   1928 
   1929 #if 0	/* XXX Badly need to overhaul vlan(4) on NetBSD. */
   1930 /*
   1931 ** This routine is run via an vlan config EVENT,
   1932 ** it enables us to use the HW Filter table since
   1933 ** we can get the vlan id. This just creates the
   1934 ** entry in the soft version of the VFTA, init will
   1935 ** repopulate the real table.
   1936 */
   1937 static void
   1938 ixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   1939 {
   1940 	struct adapter	*adapter = ifp->if_softc;
   1941 	u16		index, bit;
   1942 
   1943 	if (ifp->if_softc != arg) /* Not our event */
   1944 		return;
   1945 
   1946 	if ((vtag == 0) || (vtag > 4095)) /* Invalid */
   1947 		return;
   1948 
   1949 	IXGBE_CORE_LOCK(adapter);
   1950 	index = (vtag >> 5) & 0x7F;
   1951 	bit = vtag & 0x1F;
   1952 	ixv_shadow_vfta[index] |= (1 << bit);
   1953 	/* Re-init to load the changes */
   1954 	ixv_init_locked(adapter);
   1955 	IXGBE_CORE_UNLOCK(adapter);
   1956 }
   1957 
   1958 /*
   1959 ** This routine is run via an vlan
   1960 ** unconfig EVENT, remove our entry
   1961 ** in the soft vfta.
   1962 */
   1963 static void
   1964 ixv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   1965 {
   1966 	struct adapter	*adapter = ifp->if_softc;
   1967 	u16		index, bit;
   1968 
   1969 	if (ifp->if_softc !=  arg)
   1970 		return;
   1971 
   1972 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
   1973 		return;
   1974 
   1975 	IXGBE_CORE_LOCK(adapter);
   1976 	index = (vtag >> 5) & 0x7F;
   1977 	bit = vtag & 0x1F;
   1978 	ixv_shadow_vfta[index] &= ~(1 << bit);
   1979 	/* Re-init to load the changes */
   1980 	ixv_init_locked(adapter);
   1981 	IXGBE_CORE_UNLOCK(adapter);
   1982 }
   1983 #endif
   1984 
   1985 static void
   1986 ixv_enable_intr(struct adapter *adapter)
   1987 {
   1988 	struct ixgbe_hw *hw = &adapter->hw;
   1989 	struct ix_queue *que = adapter->queues;
   1990 	u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
   1991 
   1992 
   1993 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
   1994 
   1995 	mask = IXGBE_EIMS_ENABLE_MASK;
   1996 	mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
   1997 	IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
   1998 
   1999         for (int i = 0; i < adapter->num_queues; i++, que++)
   2000 		ixv_enable_queue(adapter, que->msix);
   2001 
   2002 	IXGBE_WRITE_FLUSH(hw);
   2003 
   2004 	return;
   2005 }
   2006 
   2007 static void
   2008 ixv_disable_intr(struct adapter *adapter)
   2009 {
   2010 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
   2011 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, ~0);
   2012 	IXGBE_WRITE_FLUSH(&adapter->hw);
   2013 	return;
   2014 }
   2015 
   2016 /*
   2017 ** Setup the correct IVAR register for a particular MSIX interrupt
   2018 **  - entry is the register array entry
   2019 **  - vector is the MSIX vector for this queue
   2020 **  - type is RX/TX/MISC
   2021 */
   2022 static void
   2023 ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
   2024 {
   2025 	struct ixgbe_hw *hw = &adapter->hw;
   2026 	u32 ivar, index;
   2027 
   2028 	vector |= IXGBE_IVAR_ALLOC_VAL;
   2029 
   2030 	if (type == -1) { /* MISC IVAR */
   2031 		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
   2032 		ivar &= ~0xFF;
   2033 		ivar |= vector;
   2034 		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
   2035 	} else {	/* RX/TX IVARS */
   2036 		index = (16 * (entry & 1)) + (8 * type);
   2037 		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
   2038 		ivar &= ~(0xFF << index);
   2039 		ivar |= (vector << index);
   2040 		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
   2041 	}
   2042 }
   2043 
   2044 static void
   2045 ixv_configure_ivars(struct adapter *adapter)
   2046 {
   2047 	struct  ix_queue *que = adapter->queues;
   2048 
   2049         for (int i = 0; i < adapter->num_queues; i++, que++) {
   2050 		/* First the RX queue entry */
   2051                 ixv_set_ivar(adapter, i, que->msix, 0);
   2052 		/* ... and the TX */
   2053 		ixv_set_ivar(adapter, i, que->msix, 1);
   2054 		/* Set an initial value in EITR */
   2055                 IXGBE_WRITE_REG(&adapter->hw,
   2056                     IXGBE_VTEITR(que->msix), IXV_EITR_DEFAULT);
   2057 	}
   2058 
   2059 	/* For the mailbox interrupt */
   2060         ixv_set_ivar(adapter, 1, adapter->vector, -1);
   2061 }
   2062 
   2063 
   2064 /*
   2065 ** Tasklet handler for MSIX MBX interrupts
   2066 **  - do outside interrupt since it might sleep
   2067 */
   2068 static void
   2069 ixv_handle_mbx(void *context)
   2070 {
   2071 	struct adapter  *adapter = context;
   2072 
   2073 	ixgbe_check_link(&adapter->hw,
   2074 	    &adapter->link_speed, &adapter->link_up, 0);
   2075 	ixv_update_link_status(adapter);
   2076 }
   2077 
   2078 /*
   2079 ** The VF stats registers never have a truly virgin
   2080 ** starting point, so this routine tries to make an
   2081 ** artificial one, marking ground zero on attach as
   2082 ** it were.
   2083 */
   2084 static void
   2085 ixv_save_stats(struct adapter *adapter)
   2086 {
   2087 	struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
   2088 
   2089 	if (stats->vfgprc.ev_count || stats->vfgptc.ev_count) {
   2090 		stats->saved_reset_vfgprc +=
   2091 		    stats->vfgprc.ev_count - stats->base_vfgprc;
   2092 		stats->saved_reset_vfgptc +=
   2093 		    stats->vfgptc.ev_count - stats->base_vfgptc;
   2094 		stats->saved_reset_vfgorc +=
   2095 		    stats->vfgorc.ev_count - stats->base_vfgorc;
   2096 		stats->saved_reset_vfgotc +=
   2097 		    stats->vfgotc.ev_count - stats->base_vfgotc;
   2098 		stats->saved_reset_vfmprc +=
   2099 		    stats->vfmprc.ev_count - stats->base_vfmprc;
   2100 	}
   2101 }
   2102 
   2103 static void
   2104 ixv_init_stats(struct adapter *adapter)
   2105 {
   2106 	struct ixgbe_hw *hw = &adapter->hw;
   2107 
   2108 	adapter->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
   2109 	adapter->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
   2110 	adapter->stats.vf.last_vfgorc |=
   2111 	    (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
   2112 
   2113 	adapter->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
   2114 	adapter->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
   2115 	adapter->stats.vf.last_vfgotc |=
   2116 	    (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
   2117 
   2118 	adapter->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
   2119 
   2120 	adapter->stats.vf.base_vfgprc = adapter->stats.vf.last_vfgprc;
   2121 	adapter->stats.vf.base_vfgorc = adapter->stats.vf.last_vfgorc;
   2122 	adapter->stats.vf.base_vfgptc = adapter->stats.vf.last_vfgptc;
   2123 	adapter->stats.vf.base_vfgotc = adapter->stats.vf.last_vfgotc;
   2124 	adapter->stats.vf.base_vfmprc = adapter->stats.vf.last_vfmprc;
   2125 }
   2126 
   2127 #define UPDATE_STAT_32(reg, last, count)		\
   2128 {							\
   2129 	u32 current = IXGBE_READ_REG(hw, reg);		\
   2130 	if (current < last)				\
   2131 		count.ev_count += 0x100000000LL;	\
   2132 	last = current;					\
   2133 	count.ev_count &= 0xFFFFFFFF00000000LL;		\
   2134 	count.ev_count |= current;			\
   2135 }
   2136 
   2137 #define UPDATE_STAT_36(lsb, msb, last, count) 		\
   2138 {							\
   2139 	u64 cur_lsb = IXGBE_READ_REG(hw, lsb);		\
   2140 	u64 cur_msb = IXGBE_READ_REG(hw, msb);		\
   2141 	u64 current = ((cur_msb << 32) | cur_lsb);	\
   2142 	if (current < last)				\
   2143 		count.ev_count += 0x1000000000LL;	\
   2144 	last = current;					\
   2145 	count.ev_count &= 0xFFFFFFF000000000LL;		\
   2146 	count.ev_count |= current;			\
   2147 }
   2148 
   2149 /*
   2150 ** ixv_update_stats - Update the board statistics counters.
   2151 */
   2152 void
   2153 ixv_update_stats(struct adapter *adapter)
   2154 {
   2155         struct ixgbe_hw *hw = &adapter->hw;
   2156 
   2157         UPDATE_STAT_32(IXGBE_VFGPRC, adapter->stats.vf.last_vfgprc,
   2158 	    adapter->stats.vf.vfgprc);
   2159         UPDATE_STAT_32(IXGBE_VFGPTC, adapter->stats.vf.last_vfgptc,
   2160 	    adapter->stats.vf.vfgptc);
   2161         UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
   2162 	    adapter->stats.vf.last_vfgorc, adapter->stats.vf.vfgorc);
   2163         UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
   2164 	    adapter->stats.vf.last_vfgotc, adapter->stats.vf.vfgotc);
   2165         UPDATE_STAT_32(IXGBE_VFMPRC, adapter->stats.vf.last_vfmprc,
   2166 	    adapter->stats.vf.vfmprc);
   2167 }
   2168 
   2169 /*
   2170  * Add statistic sysctls for the VF.
   2171  */
   2172 static void
   2173 ixv_add_stats_sysctls(struct adapter *adapter)
   2174 {
   2175 	device_t dev = adapter->dev;
   2176 	struct ix_queue *que = &adapter->queues[0];
   2177 	struct tx_ring *txr = que->txr;
   2178 	struct rx_ring *rxr = que->rxr;
   2179 
   2180 	struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
   2181 
   2182 	const char *xname = device_xname(dev);
   2183 
   2184 	/* Driver Statistics */
   2185 	evcnt_attach_dynamic(&adapter->dropped_pkts, EVCNT_TYPE_MISC,
   2186 	    NULL, xname, "Driver dropped packets");
   2187 	evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
   2188 	    NULL, xname, "m_defrag() failed");
   2189 	evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
   2190 	    NULL, xname, "Watchdog timeouts");
   2191 
   2192 	evcnt_attach_dynamic(&stats->vfgprc, EVCNT_TYPE_MISC, NULL,
   2193 	    xname, "Good Packets Received");
   2194 	evcnt_attach_dynamic(&stats->vfgorc, EVCNT_TYPE_MISC, NULL,
   2195 	    xname, "Good Octets Received");
   2196 	evcnt_attach_dynamic(&stats->vfmprc, EVCNT_TYPE_MISC, NULL,
   2197 	    xname, "Multicast Packets Received");
   2198 	evcnt_attach_dynamic(&stats->vfgptc, EVCNT_TYPE_MISC, NULL,
   2199 	    xname, "Good Packets Transmitted");
   2200 	evcnt_attach_dynamic(&stats->vfgotc, EVCNT_TYPE_MISC, NULL,
   2201 	    xname, "Good Octets Transmitted");
   2202 	evcnt_attach_dynamic(&que->irqs, EVCNT_TYPE_INTR, NULL,
   2203 	    xname, "IRQs on queue");
   2204 	evcnt_attach_dynamic(&rxr->rx_irq, EVCNT_TYPE_INTR, NULL,
   2205 	    xname, "RX irqs on queue");
   2206 	evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC, NULL,
   2207 	    xname, "RX packets");
   2208 	evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC, NULL,
   2209 	    xname, "RX bytes");
   2210 	evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC, NULL,
   2211 	    xname, "Discarded RX packets");
   2212 	evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC, NULL,
   2213 	    xname, "TX Packets");
   2214 	evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC, NULL,
   2215 	    xname, "# of times not enough descriptors were available during TX");
   2216 	evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC, NULL,
   2217 	    xname, "TX TSO");
   2218 }
   2219 
   2220 static void
   2221 ixv_set_sysctl_value(struct adapter *adapter, const char *name,
   2222 	const char *description, int *limit, int value)
   2223 {
   2224 	device_t dev =  adapter->dev;
   2225 	struct sysctllog **log;
   2226 	const struct sysctlnode *rnode, *cnode;
   2227 
   2228 	log = &adapter->sysctllog;
   2229 	if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
   2230 		aprint_error_dev(dev, "could not create sysctl root\n");
   2231 		return;
   2232 	}
   2233 	if (sysctl_createv(log, 0, &rnode, &cnode,
   2234 	    CTLFLAG_READWRITE, CTLTYPE_INT,
   2235 	    name, SYSCTL_DESCR(description),
   2236 	    NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0)
   2237 		aprint_error_dev(dev, "could not create sysctl\n");
   2238 	*limit = value;
   2239 }
   2240 
   2241 /**********************************************************************
   2242  *
   2243  *  This routine is called only when em_display_debug_stats is enabled.
   2244  *  This routine provides a way to take a look at important statistics
   2245  *  maintained by the driver and hardware.
   2246  *
   2247  **********************************************************************/
   2248 static void
   2249 ixv_print_debug_info(struct adapter *adapter)
   2250 {
   2251         device_t dev = adapter->dev;
   2252         struct ixgbe_hw         *hw = &adapter->hw;
   2253         struct ix_queue         *que = adapter->queues;
   2254         struct rx_ring          *rxr;
   2255         struct tx_ring          *txr;
   2256 #ifdef LRO
   2257         struct lro_ctrl         *lro;
   2258 #endif /* LRO */
   2259 
   2260         device_printf(dev,"Error Byte Count = %u \n",
   2261             IXGBE_READ_REG(hw, IXGBE_ERRBC));
   2262 
   2263         for (int i = 0; i < adapter->num_queues; i++, que++) {
   2264                 txr = que->txr;
   2265                 rxr = que->rxr;
   2266 #ifdef LRO
   2267                 lro = &rxr->lro;
   2268 #endif /* LRO */
   2269                 device_printf(dev,"QUE(%d) IRQs Handled: %lu\n",
   2270                     que->msix, (long)que->irqs.ev_count);
   2271                 device_printf(dev,"RX(%d) Packets Received: %lld\n",
   2272                     rxr->me, (long long)rxr->rx_packets.ev_count);
   2273                 device_printf(dev,"RX(%d) Bytes Received: %lu\n",
   2274                     rxr->me, (long)rxr->rx_bytes.ev_count);
   2275 #ifdef LRO
   2276                 device_printf(dev,"RX(%d) LRO Queued= %lld\n",
   2277                     rxr->me, (long long)lro->lro_queued);
   2278                 device_printf(dev,"RX(%d) LRO Flushed= %lld\n",
   2279                     rxr->me, (long long)lro->lro_flushed);
   2280 #endif /* LRO */
   2281                 device_printf(dev,"TX(%d) Packets Sent: %lu\n",
   2282                     txr->me, (long)txr->total_packets.ev_count);
   2283                 device_printf(dev,"TX(%d) NO Desc Avail: %lu\n",
   2284                     txr->me, (long)txr->no_desc_avail.ev_count);
   2285         }
   2286 
   2287         device_printf(dev,"MBX IRQ Handled: %lu\n",
   2288             (long)adapter->link_irq.ev_count);
   2289         return;
   2290 }
   2291 
   2292 static int
   2293 ixv_sysctl_debug(SYSCTLFN_ARGS)
   2294 {
   2295 	struct sysctlnode node;
   2296 	int error, result;
   2297 	struct adapter *adapter;
   2298 
   2299 	node = *rnode;
   2300 	adapter = (struct adapter *)node.sysctl_data;
   2301 	node.sysctl_data = &result;
   2302 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   2303 
   2304 	if (error)
   2305 		return error;
   2306 
   2307 	if (result == 1)
   2308 		ixv_print_debug_info(adapter);
   2309 
   2310 	return 0;
   2311 }
   2312 
   2313 const struct sysctlnode *
   2314 ixv_sysctl_instance(struct adapter *adapter)
   2315 {
   2316 	const char *dvname;
   2317 	struct sysctllog **log;
   2318 	int rc;
   2319 	const struct sysctlnode *rnode;
   2320 
   2321 	log = &adapter->sysctllog;
   2322 	dvname = device_xname(adapter->dev);
   2323 
   2324 	if ((rc = sysctl_createv(log, 0, NULL, &rnode,
   2325 	    0, CTLTYPE_NODE, dvname,
   2326 	    SYSCTL_DESCR("ixv information and settings"),
   2327 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
   2328 		goto err;
   2329 
   2330 	return rnode;
   2331 err:
   2332 	printf("%s: sysctl_createv failed, rc = %d\n", __func__, rc);
   2333 	return NULL;
   2334 }
   2335