Home | History | Annotate | Line # | Download | only in ixgbe
ixv.c revision 1.35
      1 /******************************************************************************
      2 
      3   Copyright (c) 2001-2015, Intel Corporation
      4   All rights reserved.
      5 
      6   Redistribution and use in source and binary forms, with or without
      7   modification, are permitted provided that the following conditions are met:
      8 
      9    1. Redistributions of source code must retain the above copyright notice,
     10       this list of conditions and the following disclaimer.
     11 
     12    2. Redistributions in binary form must reproduce the above copyright
     13       notice, this list of conditions and the following disclaimer in the
     14       documentation and/or other materials provided with the distribution.
     15 
     16    3. Neither the name of the Intel Corporation nor the names of its
     17       contributors may be used to endorse or promote products derived from
     18       this software without specific prior written permission.
     19 
     20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     30   POSSIBILITY OF SUCH DAMAGE.
     31 
     32 ******************************************************************************/
     33 /*$FreeBSD: head/sys/dev/ixgbe/if_ixv.c 302384 2016-07-07 03:39:18Z sbruno $*/
     34 /*$NetBSD: ixv.c,v 1.35 2017/02/01 10:47:13 msaitoh Exp $*/
     35 
     36 #include "opt_inet.h"
     37 #include "opt_inet6.h"
     38 
     39 #include "ixgbe.h"
     40 #include "vlan.h"
     41 
     42 /*********************************************************************
     43  *  Driver version
     44  *********************************************************************/
     45 char ixv_driver_version[] = "1.4.6-k";
     46 
     47 /*********************************************************************
     48  *  PCI Device ID Table
     49  *
     50  *  Used by probe to select devices to load on
     51  *  Last field stores an index into ixv_strings
     52  *  Last entry must be all 0s
     53  *
     54  *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
     55  *********************************************************************/
     56 
     57 static ixgbe_vendor_info_t ixv_vendor_info_array[] =
     58 {
     59 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
     60 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0},
     61 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, 0, 0, 0},
     62 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, 0, 0, 0},
     63 	/* required last entry */
     64 	{0, 0, 0, 0, 0}
     65 };
     66 
     67 /*********************************************************************
     68  *  Table of branding strings
     69  *********************************************************************/
     70 
     71 static const char    *ixv_strings[] = {
     72 	"Intel(R) PRO/10GbE Virtual Function Network Driver"
     73 };
     74 
     75 /*********************************************************************
     76  *  Function prototypes
     77  *********************************************************************/
     78 static int      ixv_probe(device_t, cfdata_t, void *);
     79 static void	ixv_attach(device_t, device_t, void *);
     80 static int      ixv_detach(device_t, int);
     81 #if 0
     82 static int      ixv_shutdown(device_t);
     83 #endif
     84 static int      ixv_ioctl(struct ifnet *, u_long, void *);
     85 static int	ixv_init(struct ifnet *);
     86 static void	ixv_init_locked(struct adapter *);
     87 static void     ixv_stop(void *);
     88 static void     ixv_media_status(struct ifnet *, struct ifmediareq *);
     89 static int      ixv_media_change(struct ifnet *);
     90 static void     ixv_identify_hardware(struct adapter *);
     91 static int      ixv_allocate_pci_resources(struct adapter *,
     92 		    const struct pci_attach_args *);
     93 static int      ixv_allocate_msix(struct adapter *,
     94 		    const struct pci_attach_args *);
     95 static int	ixv_setup_msix(struct adapter *);
     96 static void	ixv_free_pci_resources(struct adapter *);
     97 static void     ixv_local_timer(void *);
     98 static void     ixv_local_timer_locked(void *);
     99 static void     ixv_setup_interface(device_t, struct adapter *);
    100 static void     ixv_config_link(struct adapter *);
    101 
    102 static void     ixv_initialize_transmit_units(struct adapter *);
    103 static void     ixv_initialize_receive_units(struct adapter *);
    104 
    105 static void     ixv_enable_intr(struct adapter *);
    106 static void     ixv_disable_intr(struct adapter *);
    107 static void     ixv_set_multi(struct adapter *);
    108 static void     ixv_update_link_status(struct adapter *);
    109 static int	ixv_sysctl_debug(SYSCTLFN_PROTO);
    110 static void	ixv_set_ivar(struct adapter *, u8, u8, s8);
    111 static void	ixv_configure_ivars(struct adapter *);
    112 static u8 *	ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
    113 
    114 static void	ixv_setup_vlan_support(struct adapter *);
    115 #if 0
    116 static void	ixv_register_vlan(void *, struct ifnet *, u16);
    117 static void	ixv_unregister_vlan(void *, struct ifnet *, u16);
    118 #endif
    119 
    120 static void	ixv_save_stats(struct adapter *);
    121 static void	ixv_init_stats(struct adapter *);
    122 static void	ixv_update_stats(struct adapter *);
    123 static void	ixv_add_stats_sysctls(struct adapter *);
    124 static void	ixv_set_sysctl_value(struct adapter *, const char *,
    125 		    const char *, int *, int);
    126 
    127 /* The MSI/X Interrupt handlers */
    128 static int	ixv_msix_que(void *);
    129 static int	ixv_msix_mbx(void *);
    130 
    131 /* Deferred interrupt tasklets */
    132 static void	ixv_handle_que(void *);
    133 static void	ixv_handle_mbx(void *);
    134 
    135 const struct sysctlnode *ixv_sysctl_instance(struct adapter *);
    136 static ixgbe_vendor_info_t *ixv_lookup(const struct pci_attach_args *);
    137 
    138 #ifdef DEV_NETMAP
    139 /*
    140  * This is defined in <dev/netmap/ixgbe_netmap.h>, which is included by
    141  * if_ix.c.
    142  */
    143 extern void ixgbe_netmap_attach(struct adapter *adapter);
    144 
    145 #include <net/netmap.h>
    146 #include <sys/selinfo.h>
    147 #include <dev/netmap/netmap_kern.h>
    148 #endif /* DEV_NETMAP */
    149 
    150 /*********************************************************************
    151  *  FreeBSD Device Interface Entry Points
    152  *********************************************************************/
    153 
    154 CFATTACH_DECL3_NEW(ixv, sizeof(struct adapter),
    155     ixv_probe, ixv_attach, ixv_detach, NULL, NULL, NULL,
    156     DVF_DETACH_SHUTDOWN);
    157 
    158 # if 0
    159 static device_method_t ixv_methods[] = {
    160 	/* Device interface */
    161 	DEVMETHOD(device_probe, ixv_probe),
    162 	DEVMETHOD(device_attach, ixv_attach),
    163 	DEVMETHOD(device_detach, ixv_detach),
    164 	DEVMETHOD(device_shutdown, ixv_shutdown),
    165 	DEVMETHOD_END
    166 };
    167 #endif
    168 
    169 #if 0
    170 static driver_t ixv_driver = {
    171 	"ixv", ixv_methods, sizeof(struct adapter),
    172 };
    173 
    174 devclass_t ixv_devclass;
    175 DRIVER_MODULE(ixv, pci, ixv_driver, ixv_devclass, 0, 0);
    176 MODULE_DEPEND(ixv, pci, 1, 1, 1);
    177 MODULE_DEPEND(ixv, ether, 1, 1, 1);
    178 #ifdef DEV_NETMAP
    179 MODULE_DEPEND(ix, netmap, 1, 1, 1);
    180 #endif /* DEV_NETMAP */
    181 /* XXX depend on 'ix' ? */
    182 #endif
    183 
    184 /*
    185 ** TUNEABLE PARAMETERS:
    186 */
    187 
    188 /* Number of Queues - do not exceed MSIX vectors - 1 */
    189 static int ixv_num_queues = 1;
    190 #define	TUNABLE_INT(__x, __y)
    191 TUNABLE_INT("hw.ixv.num_queues", &ixv_num_queues);
    192 
    193 /*
    194 ** AIM: Adaptive Interrupt Moderation
    195 ** which means that the interrupt rate
    196 ** is varied over time based on the
    197 ** traffic for that interrupt vector
    198 */
    199 static int ixv_enable_aim = FALSE;
    200 TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
    201 
    202 /* How many packets rxeof tries to clean at a time */
    203 static int ixv_rx_process_limit = 256;
    204 TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit);
    205 
    206 /* How many packets txeof tries to clean at a time */
    207 static int ixv_tx_process_limit = 256;
    208 TUNABLE_INT("hw.ixv.tx_process_limit", &ixv_tx_process_limit);
    209 
    210 /*
    211 ** Number of TX descriptors per ring,
    212 ** setting higher than RX as this seems
    213 ** the better performing choice.
    214 */
    215 static int ixv_txd = DEFAULT_TXD;
    216 TUNABLE_INT("hw.ixv.txd", &ixv_txd);
    217 
    218 /* Number of RX descriptors per ring */
    219 static int ixv_rxd = DEFAULT_RXD;
    220 TUNABLE_INT("hw.ixv.rxd", &ixv_rxd);
    221 
    222 /*
    223 ** Shadow VFTA table, this is needed because
    224 ** the real filter table gets cleared during
    225 ** a soft reset and we need to repopulate it.
    226 */
    227 static u32 ixv_shadow_vfta[IXGBE_VFTA_SIZE];
    228 
    229 /*********************************************************************
    230  *  Device identification routine
    231  *
    232  *  ixv_probe determines if the driver should be loaded on
    233  *  adapter based on PCI vendor/device id of the adapter.
    234  *
    235  *  return 1 on success, 0 on failure
    236  *********************************************************************/
    237 
    238 static int
    239 ixv_probe(device_t dev, cfdata_t cf, void *aux)
    240 {
    241 #ifdef __HAVE_PCI_MSI_MSIX
    242 	const struct pci_attach_args *pa = aux;
    243 
    244 	return (ixv_lookup(pa) != NULL) ? 1 : 0;
    245 #else
    246 	return 0;
    247 #endif
    248 }
    249 
    250 static ixgbe_vendor_info_t *
    251 ixv_lookup(const struct pci_attach_args *pa)
    252 {
    253 	pcireg_t subid;
    254 	ixgbe_vendor_info_t *ent;
    255 
    256 	INIT_DEBUGOUT("ixv_lookup: begin");
    257 
    258 	if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
    259 		return NULL;
    260 
    261 	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
    262 
    263 	for (ent = ixv_vendor_info_array; ent->vendor_id != 0; ent++) {
    264 		if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
    265 		    (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
    266 
    267 		    ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
    268 		     (ent->subvendor_id == 0)) &&
    269 
    270 		    ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
    271 		     (ent->subdevice_id == 0))) {
    272 			return ent;
    273 		}
    274 	}
    275 	return NULL;
    276 }
    277 
    278 
    279 static void
    280 ixv_sysctl_attach(struct adapter *adapter)
    281 {
    282 	struct sysctllog **log;
    283 	const struct sysctlnode *rnode, *cnode;
    284 	device_t dev;
    285 
    286 	dev = adapter->dev;
    287 	log = &adapter->sysctllog;
    288 
    289 	if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
    290 		aprint_error_dev(dev, "could not create sysctl root\n");
    291 		return;
    292 	}
    293 
    294 	if (sysctl_createv(log, 0, &rnode, &cnode,
    295 	    CTLFLAG_READWRITE, CTLTYPE_INT,
    296 	    "debug", SYSCTL_DESCR("Debug Info"),
    297 	    ixv_sysctl_debug, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
    298 		aprint_error_dev(dev, "could not create sysctl\n");
    299 
    300 	/* XXX This is an *instance* sysctl controlling a *global* variable.
    301 	 * XXX It's that way in the FreeBSD driver that this derives from.
    302 	 */
    303 	if (sysctl_createv(log, 0, &rnode, &cnode,
    304 	    CTLFLAG_READWRITE, CTLTYPE_INT,
    305 	    "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
    306 	    NULL, 0, &ixv_enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
    307 		aprint_error_dev(dev, "could not create sysctl\n");
    308 }
    309 
    310 /*********************************************************************
    311  *  Device initialization routine
    312  *
    313  *  The attach entry point is called when the driver is being loaded.
    314  *  This routine identifies the type of hardware, allocates all resources
    315  *  and initializes the hardware.
    316  *
    317  *  return 0 on success, positive on failure
    318  *********************************************************************/
    319 
    320 static void
    321 ixv_attach(device_t parent, device_t dev, void *aux)
    322 {
    323 	struct adapter *adapter;
    324 	struct ixgbe_hw *hw;
    325 	int             error = 0;
    326 	ixgbe_vendor_info_t *ent;
    327 	const struct pci_attach_args *pa = aux;
    328 
    329 	INIT_DEBUGOUT("ixv_attach: begin");
    330 
    331 	/* Allocate, clear, and link in our adapter structure */
    332 	adapter = device_private(dev);
    333 	adapter->dev = dev;
    334 	hw = &adapter->hw;
    335 
    336 #ifdef DEV_NETMAP
    337 	adapter->init_locked = ixv_init_locked;
    338 	adapter->stop_locked = ixv_stop;
    339 #endif
    340 
    341 	adapter->osdep.pc = pa->pa_pc;
    342 	adapter->osdep.tag = pa->pa_tag;
    343 	adapter->osdep.dmat = pa->pa_dmat;
    344 	adapter->osdep.attached = false;
    345 
    346 	ent = ixv_lookup(pa);
    347 
    348 	KASSERT(ent != NULL);
    349 
    350 	aprint_normal(": %s, Version - %s\n",
    351 	    ixv_strings[ent->index], ixv_driver_version);
    352 
    353 	/* Core Lock Init*/
    354 	IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
    355 
    356 	/* SYSCTL APIs */
    357 	ixv_sysctl_attach(adapter);
    358 
    359 	/* Set up the timer callout */
    360 	callout_init(&adapter->timer, 0);
    361 
    362 	/* Determine hardware revision */
    363 	ixv_identify_hardware(adapter);
    364 
    365 	/* Do base PCI setup - map BAR0 */
    366 	if (ixv_allocate_pci_resources(adapter, pa)) {
    367 		aprint_error_dev(dev, "ixv_allocate_pci_resources() failed!\n");
    368 		error = ENXIO;
    369 		goto err_out;
    370 	}
    371 
    372 	/* Sysctls for limiting the amount of work done in the taskqueues */
    373 	ixv_set_sysctl_value(adapter, "rx_processing_limit",
    374 	    "max number of rx packets to process",
    375 	    &adapter->rx_process_limit, ixv_rx_process_limit);
    376 
    377 	ixv_set_sysctl_value(adapter, "tx_processing_limit",
    378 	    "max number of tx packets to process",
    379 	    &adapter->tx_process_limit, ixv_tx_process_limit);
    380 
    381 	/* Do descriptor calc and sanity checks */
    382 	if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
    383 	    ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) {
    384 		aprint_error_dev(dev, "TXD config issue, using default!\n");
    385 		adapter->num_tx_desc = DEFAULT_TXD;
    386 	} else
    387 		adapter->num_tx_desc = ixv_txd;
    388 
    389 	if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
    390 	    ixv_rxd < MIN_RXD || ixv_rxd > MAX_RXD) {
    391 		aprint_error_dev(dev, "RXD config issue, using default!\n");
    392 		adapter->num_rx_desc = DEFAULT_RXD;
    393 	} else
    394 		adapter->num_rx_desc = ixv_rxd;
    395 
    396 	/* Allocate our TX/RX Queues */
    397 	if (ixgbe_allocate_queues(adapter)) {
    398 		aprint_error_dev(dev, "ixgbe_allocate_queues() failed!\n");
    399 		error = ENOMEM;
    400 		goto err_out;
    401 	}
    402 
    403 	/*
    404 	** Initialize the shared code: its
    405 	** at this point the mac type is set.
    406 	*/
    407 	error = ixgbe_init_shared_code(hw);
    408 	if (error) {
    409 		aprint_error_dev(dev, "ixgbe_init_shared_code() failed!\n");
    410 		error = EIO;
    411 		goto err_late;
    412 	}
    413 
    414 	/* Setup the mailbox */
    415 	ixgbe_init_mbx_params_vf(hw);
    416 
    417 	/* Reset mbox api to 1.0 */
    418 	error = ixgbe_reset_hw(hw);
    419 	if (error == IXGBE_ERR_RESET_FAILED)
    420 		aprint_error_dev(dev, "ixgbe_reset_hw() failure: Reset Failed!\n");
    421 	else if (error)
    422 		aprint_error_dev(dev, "ixgbe_reset_hw() failed with error %d\n", error);
    423 	if (error) {
    424 		error = EIO;
    425 		goto err_late;
    426 	}
    427 
    428 	/* Negotiate mailbox API version */
    429 	error = ixgbevf_negotiate_api_version(hw, ixgbe_mbox_api_11);
    430 	if (error) {
    431 		device_printf(dev, "MBX API 1.1 negotiation failed! Error %d\n", error);
    432 		error = EIO;
    433 		goto err_late;
    434 	}
    435 
    436 	error = ixgbe_init_hw(hw);
    437 	if (error) {
    438 		aprint_error_dev(dev, "ixgbe_init_hw() failed!\n");
    439 		error = EIO;
    440 		goto err_late;
    441 	}
    442 
    443 	error = ixv_allocate_msix(adapter, pa);
    444 	if (error) {
    445 		device_printf(dev, "ixv_allocate_msix() failed!\n");
    446 		goto err_late;
    447 	}
    448 
    449 	/* If no mac address was assigned, make a random one */
    450 	if (!ixv_check_ether_addr(hw->mac.addr)) {
    451 		u8 addr[ETHER_ADDR_LEN];
    452 		uint64_t rndval = cprng_fast64();
    453 
    454 		memcpy(addr, &rndval, sizeof(addr));
    455 		addr[0] &= 0xFE;
    456 		addr[0] |= 0x02;
    457 		bcopy(addr, hw->mac.addr, sizeof(addr));
    458 	}
    459 
    460 	/* Setup OS specific network interface */
    461 	ixv_setup_interface(dev, adapter);
    462 
    463 	/* Do the stats setup */
    464 	ixv_save_stats(adapter);
    465 	ixv_init_stats(adapter);
    466 	ixv_add_stats_sysctls(adapter);
    467 
    468 	/* Register for VLAN events */
    469 #if 0 /* XXX delete after write? */
    470 	adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
    471 	    ixv_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
    472 	adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
    473 	    ixv_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
    474 #endif
    475 
    476 #ifdef DEV_NETMAP
    477 	ixgbe_netmap_attach(adapter);
    478 #endif /* DEV_NETMAP */
    479 	INIT_DEBUGOUT("ixv_attach: end");
    480 	adapter->osdep.attached = true;
    481 	return;
    482 
    483 err_late:
    484 	ixgbe_free_transmit_structures(adapter);
    485 	ixgbe_free_receive_structures(adapter);
    486 err_out:
    487 	ixv_free_pci_resources(adapter);
    488 	return;
    489 
    490 }
    491 
    492 /*********************************************************************
    493  *  Device removal routine
    494  *
    495  *  The detach entry point is called when the driver is being removed.
    496  *  This routine stops the adapter and deallocates all the resources
    497  *  that were allocated for driver operation.
    498  *
    499  *  return 0 on success, positive on failure
    500  *********************************************************************/
    501 
    502 static int
    503 ixv_detach(device_t dev, int flags)
    504 {
    505 	struct adapter *adapter = device_private(dev);
    506 	struct ix_queue *que = adapter->queues;
    507 
    508 	INIT_DEBUGOUT("ixv_detach: begin");
    509 	if (adapter->osdep.attached == false)
    510 		return 0;
    511 
    512 #if NVLAN > 0
    513 	/* Make sure VLANS are not using driver */
    514 	if (!VLAN_ATTACHED(&adapter->osdep.ec))
    515 		;	/* nothing to do: no VLANs */
    516 	else if ((flags & (DETACH_SHUTDOWN|DETACH_FORCE)) != 0)
    517 		vlan_ifdetach(adapter->ifp);
    518 	else {
    519 		aprint_error_dev(dev, "VLANs in use, detach first\n");
    520 		return EBUSY;
    521 	}
    522 #endif
    523 
    524 	IXGBE_CORE_LOCK(adapter);
    525 	ixv_stop(adapter);
    526 	IXGBE_CORE_UNLOCK(adapter);
    527 
    528 	for (int i = 0; i < adapter->num_queues; i++, que++) {
    529 #ifndef IXGBE_LEGACY_TX
    530 		struct tx_ring *txr = adapter->tx_rings;
    531 
    532 		softint_disestablish(txr->txr_si);
    533 #endif
    534 		softint_disestablish(que->que_si);
    535 	}
    536 
    537 	/* Drain the Mailbox(link) queue */
    538 	softint_disestablish(adapter->link_si);
    539 
    540 	/* Unregister VLAN events */
    541 #if 0 /* XXX msaitoh delete after write? */
    542 	if (adapter->vlan_attach != NULL)
    543 		EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
    544 	if (adapter->vlan_detach != NULL)
    545 		EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
    546 #endif
    547 
    548 	ether_ifdetach(adapter->ifp);
    549 	callout_halt(&adapter->timer, NULL);
    550 #ifdef DEV_NETMAP
    551 	netmap_detach(adapter->ifp);
    552 #endif /* DEV_NETMAP */
    553 	ixv_free_pci_resources(adapter);
    554 #if 0 /* XXX the NetBSD port is probably missing something here */
    555 	bus_generic_detach(dev);
    556 #endif
    557 	if_detach(adapter->ifp);
    558 
    559 	ixgbe_free_transmit_structures(adapter);
    560 	ixgbe_free_receive_structures(adapter);
    561 
    562 	IXGBE_CORE_LOCK_DESTROY(adapter);
    563 	return (0);
    564 }
    565 
    566 /*********************************************************************
    567  *
    568  *  Shutdown entry point
    569  *
    570  **********************************************************************/
    571 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
    572 static int
    573 ixv_shutdown(device_t dev)
    574 {
    575 	struct adapter *adapter = device_private(dev);
    576 	IXGBE_CORE_LOCK(adapter);
    577 	ixv_stop(adapter);
    578 	IXGBE_CORE_UNLOCK(adapter);
    579 	return (0);
    580 }
    581 #endif
    582 
    583 static int
    584 ixv_ifflags_cb(struct ethercom *ec)
    585 {
    586 	struct ifnet *ifp = &ec->ec_if;
    587 	struct adapter *adapter = ifp->if_softc;
    588 	int change = ifp->if_flags ^ adapter->if_flags, rc = 0;
    589 
    590 	IXGBE_CORE_LOCK(adapter);
    591 
    592 	if (change != 0)
    593 		adapter->if_flags = ifp->if_flags;
    594 
    595 	if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0)
    596 		rc = ENETRESET;
    597 
    598 	IXGBE_CORE_UNLOCK(adapter);
    599 
    600 	return rc;
    601 }
    602 
    603 /*********************************************************************
    604  *  Ioctl entry point
    605  *
    606  *  ixv_ioctl is called when the user wants to configure the
    607  *  interface.
    608  *
    609  *  return 0 on success, positive on failure
    610  **********************************************************************/
    611 
    612 static int
    613 ixv_ioctl(struct ifnet * ifp, u_long command, void *data)
    614 {
    615 	struct adapter	*adapter = ifp->if_softc;
    616 	struct ifcapreq *ifcr = data;
    617 	struct ifreq	*ifr = (struct ifreq *) data;
    618 	int             error = 0;
    619 	int l4csum_en;
    620 	const int l4csum = IFCAP_CSUM_TCPv4_Rx|IFCAP_CSUM_UDPv4_Rx|
    621 	     IFCAP_CSUM_TCPv6_Rx|IFCAP_CSUM_UDPv6_Rx;
    622 
    623 	switch (command) {
    624 	case SIOCSIFFLAGS:
    625 		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
    626 		break;
    627 	case SIOCADDMULTI:
    628 	case SIOCDELMULTI:
    629 		IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
    630 		break;
    631 	case SIOCSIFMEDIA:
    632 	case SIOCGIFMEDIA:
    633 		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
    634 		break;
    635 	case SIOCSIFCAP:
    636 		IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
    637 		break;
    638 	case SIOCSIFMTU:
    639 		IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
    640 		break;
    641 	default:
    642 		IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
    643 		break;
    644 	}
    645 
    646 	switch (command) {
    647 	case SIOCSIFMEDIA:
    648 	case SIOCGIFMEDIA:
    649 		return ifmedia_ioctl(ifp, ifr, &adapter->media, command);
    650 	case SIOCSIFCAP:
    651 		/* Layer-4 Rx checksum offload has to be turned on and
    652 		 * off as a unit.
    653 		 */
    654 		l4csum_en = ifcr->ifcr_capenable & l4csum;
    655 		if (l4csum_en != l4csum && l4csum_en != 0)
    656 			return EINVAL;
    657 		/*FALLTHROUGH*/
    658 	case SIOCADDMULTI:
    659 	case SIOCDELMULTI:
    660 	case SIOCSIFFLAGS:
    661 	case SIOCSIFMTU:
    662 	default:
    663 		if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
    664 			return error;
    665 		if ((ifp->if_flags & IFF_RUNNING) == 0)
    666 			;
    667 		else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
    668 			IXGBE_CORE_LOCK(adapter);
    669 			ixv_init_locked(adapter);
    670 			IXGBE_CORE_UNLOCK(adapter);
    671 		} else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
    672 			/*
    673 			 * Multicast list has changed; set the hardware filter
    674 			 * accordingly.
    675 			 */
    676 			IXGBE_CORE_LOCK(adapter);
    677 			ixv_disable_intr(adapter);
    678 			ixv_set_multi(adapter);
    679 			ixv_enable_intr(adapter);
    680 			IXGBE_CORE_UNLOCK(adapter);
    681 		}
    682 		return 0;
    683 	}
    684 }
    685 
    686 /*********************************************************************
    687  *  Init entry point
    688  *
    689  *  This routine is used in two ways. It is used by the stack as
    690  *  init entry point in network interface structure. It is also used
    691  *  by the driver as a hw/sw initialization routine to get to a
    692  *  consistent state.
    693  *
    694  *  return 0 on success, positive on failure
    695  **********************************************************************/
    696 #define IXGBE_MHADD_MFS_SHIFT 16
    697 
    698 static void
    699 ixv_init_locked(struct adapter *adapter)
    700 {
    701 	struct ifnet	*ifp = adapter->ifp;
    702 	device_t 	dev = adapter->dev;
    703 	struct ixgbe_hw *hw = &adapter->hw;
    704 	int error = 0;
    705 
    706 	INIT_DEBUGOUT("ixv_init_locked: begin");
    707 	KASSERT(mutex_owned(&adapter->core_mtx));
    708 	hw->adapter_stopped = FALSE;
    709 	ixgbe_stop_adapter(hw);
    710         callout_stop(&adapter->timer);
    711 
    712         /* reprogram the RAR[0] in case user changed it. */
    713         ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
    714 
    715 	/* Get the latest mac address, User can use a LAA */
    716 	memcpy(hw->mac.addr, CLLADDR(adapter->ifp->if_sadl),
    717 	     IXGBE_ETH_LENGTH_OF_ADDRESS);
    718         ixgbe_set_rar(hw, 0, hw->mac.addr, 0, 1);
    719 	hw->addr_ctrl.rar_used_count = 1;
    720 
    721 	/* Prepare transmit descriptors and buffers */
    722 	if (ixgbe_setup_transmit_structures(adapter)) {
    723 		aprint_error_dev(dev, "Could not setup transmit structures\n");
    724 		ixv_stop(adapter);
    725 		return;
    726 	}
    727 
    728 	/* Reset VF and renegotiate mailbox API version */
    729 	ixgbe_reset_hw(hw);
    730 	error = ixgbevf_negotiate_api_version(hw, ixgbe_mbox_api_11);
    731 	if (error)
    732 		device_printf(dev, "MBX API 1.1 negotiation failed! Error %d\n", error);
    733 
    734 	ixv_initialize_transmit_units(adapter);
    735 
    736 	/* Setup Multicast table */
    737 	ixv_set_multi(adapter);
    738 
    739 	/*
    740 	** Determine the correct mbuf pool
    741 	** for doing jumbo/headersplit
    742 	*/
    743 	if (ifp->if_mtu > ETHERMTU)
    744 		adapter->rx_mbuf_sz = MJUMPAGESIZE;
    745 	else
    746 		adapter->rx_mbuf_sz = MCLBYTES;
    747 
    748 	/* Prepare receive descriptors and buffers */
    749 	if (ixgbe_setup_receive_structures(adapter)) {
    750 		device_printf(dev, "Could not setup receive structures\n");
    751 		ixv_stop(adapter);
    752 		return;
    753 	}
    754 
    755 	/* Configure RX settings */
    756 	ixv_initialize_receive_units(adapter);
    757 
    758 #if 0 /* XXX isn't it required? -- msaitoh  */
    759 	/* Set the various hardware offload abilities */
    760 	ifp->if_hwassist = 0;
    761 	if (ifp->if_capenable & IFCAP_TSO4)
    762 		ifp->if_hwassist |= CSUM_TSO;
    763 	if (ifp->if_capenable & IFCAP_TXCSUM) {
    764 		ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
    765 #if __FreeBSD_version >= 800000
    766 		ifp->if_hwassist |= CSUM_SCTP;
    767 #endif
    768 	}
    769 #endif
    770 
    771 	/* Set up VLAN offload and filter */
    772 	ixv_setup_vlan_support(adapter);
    773 
    774 	/* Set up MSI/X routing */
    775 	ixv_configure_ivars(adapter);
    776 
    777 	/* Set up auto-mask */
    778 	IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_EICS_RTX_QUEUE);
    779 
    780         /* Set moderation on the Link interrupt */
    781         IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->vector), IXGBE_LINK_ITR);
    782 
    783 	/* Stats init */
    784 	ixv_init_stats(adapter);
    785 
    786 	/* Config/Enable Link */
    787 	ixv_config_link(adapter);
    788 
    789 	/* Start watchdog */
    790 	callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
    791 
    792 	/* And now turn on interrupts */
    793 	ixv_enable_intr(adapter);
    794 
    795 	/* Now inform the stack we're ready */
    796 	ifp->if_flags |= IFF_RUNNING;
    797 	ifp->if_flags &= ~IFF_OACTIVE;
    798 
    799 	return;
    800 }
    801 
    802 static int
    803 ixv_init(struct ifnet *ifp)
    804 {
    805 	struct adapter *adapter = ifp->if_softc;
    806 
    807 	IXGBE_CORE_LOCK(adapter);
    808 	ixv_init_locked(adapter);
    809 	IXGBE_CORE_UNLOCK(adapter);
    810 	return 0;
    811 }
    812 
    813 
    814 /*
    815 **
    816 ** MSIX Interrupt Handlers and Tasklets
    817 **
    818 */
    819 
    820 static inline void
    821 ixv_enable_queue(struct adapter *adapter, u32 vector)
    822 {
    823 	struct ixgbe_hw *hw = &adapter->hw;
    824 	u32	queue = 1 << vector;
    825 	u32	mask;
    826 
    827 	mask = (IXGBE_EIMS_RTX_QUEUE & queue);
    828 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
    829 }
    830 
    831 static inline void
    832 ixv_disable_queue(struct adapter *adapter, u32 vector)
    833 {
    834 	struct ixgbe_hw *hw = &adapter->hw;
    835 	u64	queue = (u64)(1 << vector);
    836 	u32	mask;
    837 
    838 	mask = (IXGBE_EIMS_RTX_QUEUE & queue);
    839 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
    840 }
    841 
    842 static inline void
    843 ixv_rearm_queues(struct adapter *adapter, u64 queues)
    844 {
    845 	u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
    846 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask);
    847 }
    848 
    849 
    850 static void
    851 ixv_handle_que(void *context)
    852 {
    853 	struct ix_queue *que = context;
    854 	struct adapter  *adapter = que->adapter;
    855 	struct tx_ring	*txr = que->txr;
    856 	struct ifnet    *ifp = adapter->ifp;
    857 	bool		more;
    858 
    859 	if (ifp->if_flags & IFF_RUNNING) {
    860 		more = ixgbe_rxeof(que);
    861 		IXGBE_TX_LOCK(txr);
    862 		ixgbe_txeof(txr);
    863 #ifndef IXGBE_LEGACY_TX
    864 		if (pcq_peek(txr->txr_interq) != NULL)
    865 			ixgbe_mq_start_locked(ifp, txr);
    866 #else
    867 		if (!IFQ_IS_EMPTY(&ifp->if_snd))
    868 			ixgbe_start_locked(txr, ifp);
    869 #endif
    870 		IXGBE_TX_UNLOCK(txr);
    871 		if (more) {
    872 			adapter->req.ev_count++;
    873 			softint_schedule(que->que_si);
    874 			return;
    875 		}
    876 	}
    877 
    878 	/* Reenable this interrupt */
    879 	ixv_enable_queue(adapter, que->msix);
    880 	return;
    881 }
    882 
    883 /*********************************************************************
    884  *
    885  *  MSI Queue Interrupt Service routine
    886  *
    887  **********************************************************************/
    888 int
    889 ixv_msix_que(void *arg)
    890 {
    891 	struct ix_queue	*que = arg;
    892 	struct adapter  *adapter = que->adapter;
    893 	struct ifnet    *ifp = adapter->ifp;
    894 	struct tx_ring	*txr = que->txr;
    895 	struct rx_ring	*rxr = que->rxr;
    896 	bool		more;
    897 	u32		newitr = 0;
    898 
    899 	ixv_disable_queue(adapter, que->msix);
    900 	++que->irqs.ev_count;
    901 
    902 #ifdef __NetBSD__
    903 	/* Don't run ixgbe_rxeof in interrupt context */
    904 	more = true;
    905 #else
    906 	more = ixgbe_rxeof(que);
    907 #endif
    908 
    909 	IXGBE_TX_LOCK(txr);
    910 	ixgbe_txeof(txr);
    911 	/*
    912 	** Make certain that if the stack
    913 	** has anything queued the task gets
    914 	** scheduled to handle it.
    915 	*/
    916 #ifdef IXGBE_LEGACY_TX
    917 	if (!IFQ_IS_EMPTY(&adapter->ifp->if_snd))
    918 		ixgbe_start_locked(txr, ifp);
    919 #else
    920 	if (pcq_peek(txr->txr_interq) != NULL)
    921 		ixgbe_mq_start_locked(ifp, txr);
    922 #endif
    923 	IXGBE_TX_UNLOCK(txr);
    924 
    925 	/* Do AIM now? */
    926 
    927 	if (ixv_enable_aim == FALSE)
    928 		goto no_calc;
    929 	/*
    930 	** Do Adaptive Interrupt Moderation:
    931         **  - Write out last calculated setting
    932 	**  - Calculate based on average size over
    933 	**    the last interval.
    934 	*/
    935         if (que->eitr_setting)
    936                 IXGBE_WRITE_REG(&adapter->hw,
    937                     IXGBE_VTEITR(que->msix),
    938 		    que->eitr_setting);
    939 
    940         que->eitr_setting = 0;
    941 
    942         /* Idle, do nothing */
    943         if ((txr->bytes == 0) && (rxr->bytes == 0))
    944                 goto no_calc;
    945 
    946 	if ((txr->bytes) && (txr->packets))
    947                	newitr = txr->bytes/txr->packets;
    948 	if ((rxr->bytes) && (rxr->packets))
    949 		newitr = max(newitr,
    950 		    (rxr->bytes / rxr->packets));
    951 	newitr += 24; /* account for hardware frame, crc */
    952 
    953 	/* set an upper boundary */
    954 	newitr = min(newitr, 3000);
    955 
    956 	/* Be nice to the mid range */
    957 	if ((newitr > 300) && (newitr < 1200))
    958 		newitr = (newitr / 3);
    959 	else
    960 		newitr = (newitr / 2);
    961 
    962 	newitr |= newitr << 16;
    963 
    964         /* save for next interrupt */
    965         que->eitr_setting = newitr;
    966 
    967         /* Reset state */
    968         txr->bytes = 0;
    969         txr->packets = 0;
    970         rxr->bytes = 0;
    971         rxr->packets = 0;
    972 
    973 no_calc:
    974 	if (more)
    975 		softint_schedule(que->que_si);
    976 	else /* Reenable this interrupt */
    977 		ixv_enable_queue(adapter, que->msix);
    978 	return 1;
    979 }
    980 
    981 static int
    982 ixv_msix_mbx(void *arg)
    983 {
    984 	struct adapter	*adapter = arg;
    985 	struct ixgbe_hw *hw = &adapter->hw;
    986 	u32		reg;
    987 
    988 	++adapter->link_irq.ev_count;
    989 
    990 	/* First get the cause */
    991 	reg = IXGBE_READ_REG(hw, IXGBE_VTEICS);
    992 	/* Clear interrupt with write */
    993 	IXGBE_WRITE_REG(hw, IXGBE_VTEICR, reg);
    994 
    995 	/* Link status change */
    996 	if (reg & IXGBE_EICR_LSC)
    997 		softint_schedule(adapter->link_si);
    998 
    999 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER);
   1000 	return 1;
   1001 }
   1002 
   1003 /*********************************************************************
   1004  *
   1005  *  Media Ioctl callback
   1006  *
   1007  *  This routine is called whenever the user queries the status of
   1008  *  the interface using ifconfig.
   1009  *
   1010  **********************************************************************/
   1011 static void
   1012 ixv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
   1013 {
   1014 	struct adapter *adapter = ifp->if_softc;
   1015 
   1016 	INIT_DEBUGOUT("ixv_media_status: begin");
   1017 	IXGBE_CORE_LOCK(adapter);
   1018 	ixv_update_link_status(adapter);
   1019 
   1020 	ifmr->ifm_status = IFM_AVALID;
   1021 	ifmr->ifm_active = IFM_ETHER;
   1022 
   1023 	if (!adapter->link_active) {
   1024 		IXGBE_CORE_UNLOCK(adapter);
   1025 		return;
   1026 	}
   1027 
   1028 	ifmr->ifm_status |= IFM_ACTIVE;
   1029 
   1030 	switch (adapter->link_speed) {
   1031 		case IXGBE_LINK_SPEED_1GB_FULL:
   1032 			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
   1033 			break;
   1034 		case IXGBE_LINK_SPEED_10GB_FULL:
   1035 			ifmr->ifm_active |= IFM_FDX;
   1036 			break;
   1037 	}
   1038 
   1039 	IXGBE_CORE_UNLOCK(adapter);
   1040 
   1041 	return;
   1042 }
   1043 
   1044 /*********************************************************************
   1045  *
   1046  *  Media Ioctl callback
   1047  *
   1048  *  This routine is called when the user changes speed/duplex using
   1049  *  media/mediopt option with ifconfig.
   1050  *
   1051  **********************************************************************/
   1052 static int
   1053 ixv_media_change(struct ifnet * ifp)
   1054 {
   1055 	struct adapter *adapter = ifp->if_softc;
   1056 	struct ifmedia *ifm = &adapter->media;
   1057 
   1058 	INIT_DEBUGOUT("ixv_media_change: begin");
   1059 
   1060 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
   1061 		return (EINVAL);
   1062 
   1063         switch (IFM_SUBTYPE(ifm->ifm_media)) {
   1064         case IFM_AUTO:
   1065                 break;
   1066         default:
   1067                 device_printf(adapter->dev, "Only auto media type\n");
   1068 		return (EINVAL);
   1069         }
   1070 
   1071 	return (0);
   1072 }
   1073 
   1074 
   1075 /*********************************************************************
   1076  *  Multicast Update
   1077  *
   1078  *  This routine is called whenever multicast address list is updated.
   1079  *
   1080  **********************************************************************/
   1081 #define IXGBE_RAR_ENTRIES 16
   1082 
   1083 static void
   1084 ixv_set_multi(struct adapter *adapter)
   1085 {
   1086 	struct ether_multi *enm;
   1087 	struct ether_multistep step;
   1088 	u8	mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
   1089 	u8	*update_ptr;
   1090 	int	mcnt = 0;
   1091 	struct ethercom *ec = &adapter->osdep.ec;
   1092 
   1093 	IOCTL_DEBUGOUT("ixv_set_multi: begin");
   1094 
   1095 	ETHER_FIRST_MULTI(step, ec, enm);
   1096 	while (enm != NULL) {
   1097 		bcopy(enm->enm_addrlo,
   1098 		    &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
   1099 		    IXGBE_ETH_LENGTH_OF_ADDRESS);
   1100 		mcnt++;
   1101 		/* XXX This might be required --msaitoh */
   1102 		if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES)
   1103 			break;
   1104 		ETHER_NEXT_MULTI(step, enm);
   1105 	}
   1106 
   1107 	update_ptr = mta;
   1108 
   1109 	ixgbe_update_mc_addr_list(&adapter->hw,
   1110 	    update_ptr, mcnt, ixv_mc_array_itr, TRUE);
   1111 
   1112 	return;
   1113 }
   1114 
   1115 /*
   1116  * This is an iterator function now needed by the multicast
   1117  * shared code. It simply feeds the shared code routine the
   1118  * addresses in the array of ixv_set_multi() one by one.
   1119  */
   1120 static u8 *
   1121 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
   1122 {
   1123 	u8 *addr = *update_ptr;
   1124 	u8 *newptr;
   1125 	*vmdq = 0;
   1126 
   1127 	newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
   1128 	*update_ptr = newptr;
   1129 	return addr;
   1130 }
   1131 
   1132 /*********************************************************************
   1133  *  Timer routine
   1134  *
   1135  *  This routine checks for link status,updates statistics,
   1136  *  and runs the watchdog check.
   1137  *
   1138  **********************************************************************/
   1139 
   1140 static void
   1141 ixv_local_timer(void *arg)
   1142 {
   1143 	struct adapter *adapter = arg;
   1144 
   1145 	IXGBE_CORE_LOCK(adapter);
   1146 	ixv_local_timer_locked(adapter);
   1147 	IXGBE_CORE_UNLOCK(adapter);
   1148 }
   1149 
   1150 static void
   1151 ixv_local_timer_locked(void *arg)
   1152 {
   1153 	struct adapter	*adapter = arg;
   1154 	device_t	dev = adapter->dev;
   1155 	struct ix_queue	*que = adapter->queues;
   1156 	u64		queues = 0;
   1157 	int		hung = 0;
   1158 
   1159 	KASSERT(mutex_owned(&adapter->core_mtx));
   1160 
   1161 	ixv_update_link_status(adapter);
   1162 
   1163 	/* Stats Update */
   1164 	ixv_update_stats(adapter);
   1165 
   1166 	/*
   1167 	** Check the TX queues status
   1168 	**      - mark hung queues so we don't schedule on them
   1169 	**      - watchdog only if all queues show hung
   1170 	*/
   1171 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   1172 		/* Keep track of queues with work for soft irq */
   1173 		if (que->txr->busy)
   1174 			queues |= ((u64)1 << que->me);
   1175 		/*
   1176 		** Each time txeof runs without cleaning, but there
   1177 		** are uncleaned descriptors it increments busy. If
   1178 		** we get to the MAX we declare it hung.
   1179 		*/
   1180 		if (que->busy == IXGBE_QUEUE_HUNG) {
   1181 			++hung;
   1182 			/* Mark the queue as inactive */
   1183 			adapter->active_queues &= ~((u64)1 << que->me);
   1184 			continue;
   1185 		} else {
   1186 			/* Check if we've come back from hung */
   1187 			if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
   1188                                 adapter->active_queues |= ((u64)1 << que->me);
   1189 		}
   1190 		if (que->busy >= IXGBE_MAX_TX_BUSY) {
   1191 			device_printf(dev,"Warning queue %d "
   1192 			    "appears to be hung!\n", i);
   1193 			que->txr->busy = IXGBE_QUEUE_HUNG;
   1194 			++hung;
   1195 		}
   1196 
   1197 	}
   1198 
   1199 	/* Only truly watchdog if all queues show hung */
   1200 	if (hung == adapter->num_queues)
   1201 		goto watchdog;
   1202 	else if (queues != 0) { /* Force an IRQ on queues with work */
   1203 		ixv_rearm_queues(adapter, queues);
   1204 	}
   1205 
   1206 	callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
   1207 	return;
   1208 
   1209 watchdog:
   1210 	device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
   1211 	adapter->ifp->if_flags &= ~IFF_RUNNING;
   1212 	adapter->watchdog_events.ev_count++;
   1213 	ixv_init_locked(adapter);
   1214 }
   1215 
   1216 /*
   1217 ** Note: this routine updates the OS on the link state
   1218 **	the real check of the hardware only happens with
   1219 **	a link interrupt.
   1220 */
   1221 static void
   1222 ixv_update_link_status(struct adapter *adapter)
   1223 {
   1224 	struct ifnet	*ifp = adapter->ifp;
   1225 	device_t dev = adapter->dev;
   1226 
   1227 	if (adapter->link_up){
   1228 		if (adapter->link_active == FALSE) {
   1229 			if (bootverbose)
   1230 				device_printf(dev,"Link is up %d Gbps %s \n",
   1231 				    ((adapter->link_speed == 128)? 10:1),
   1232 				    "Full Duplex");
   1233 			adapter->link_active = TRUE;
   1234 			if_link_state_change(ifp, LINK_STATE_UP);
   1235 		}
   1236 	} else { /* Link down */
   1237 		if (adapter->link_active == TRUE) {
   1238 			if (bootverbose)
   1239 				device_printf(dev,"Link is Down\n");
   1240 			if_link_state_change(ifp, LINK_STATE_DOWN);
   1241 			adapter->link_active = FALSE;
   1242 		}
   1243 	}
   1244 
   1245 	return;
   1246 }
   1247 
   1248 
   1249 static void
   1250 ixv_ifstop(struct ifnet *ifp, int disable)
   1251 {
   1252 	struct adapter *adapter = ifp->if_softc;
   1253 
   1254 	IXGBE_CORE_LOCK(adapter);
   1255 	ixv_stop(adapter);
   1256 	IXGBE_CORE_UNLOCK(adapter);
   1257 }
   1258 
   1259 /*********************************************************************
   1260  *
   1261  *  This routine disables all traffic on the adapter by issuing a
   1262  *  global reset on the MAC and deallocates TX/RX buffers.
   1263  *
   1264  **********************************************************************/
   1265 
   1266 static void
   1267 ixv_stop(void *arg)
   1268 {
   1269 	struct ifnet   *ifp;
   1270 	struct adapter *adapter = arg;
   1271 	struct ixgbe_hw *hw = &adapter->hw;
   1272 	ifp = adapter->ifp;
   1273 
   1274 	KASSERT(mutex_owned(&adapter->core_mtx));
   1275 
   1276 	INIT_DEBUGOUT("ixv_stop: begin\n");
   1277 	ixv_disable_intr(adapter);
   1278 
   1279 	/* Tell the stack that the interface is no longer active */
   1280 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   1281 
   1282 	ixgbe_reset_hw(hw);
   1283 	adapter->hw.adapter_stopped = FALSE;
   1284 	ixgbe_stop_adapter(hw);
   1285 	callout_stop(&adapter->timer);
   1286 
   1287 	/* reprogram the RAR[0] in case user changed it. */
   1288 	ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
   1289 
   1290 	return;
   1291 }
   1292 
   1293 
   1294 /*********************************************************************
   1295  *
   1296  *  Determine hardware revision.
   1297  *
   1298  **********************************************************************/
   1299 static void
   1300 ixv_identify_hardware(struct adapter *adapter)
   1301 {
   1302 	pcitag_t tag;
   1303 	pci_chipset_tag_t pc;
   1304 	pcireg_t subid, id;
   1305 	struct ixgbe_hw *hw = &adapter->hw;
   1306 
   1307 	pc = adapter->osdep.pc;
   1308 	tag = adapter->osdep.tag;
   1309 
   1310 	/*
   1311 	** Make sure BUSMASTER is set, on a VM under
   1312 	** KVM it may not be and will break things.
   1313 	*/
   1314 	ixgbe_pci_enable_busmaster(pc, tag);
   1315 
   1316 	id = pci_conf_read(pc, tag, PCI_ID_REG);
   1317 	subid = pci_conf_read(pc, tag, PCI_SUBSYS_ID_REG);
   1318 
   1319 	/* Save off the information about this board */
   1320 	hw->vendor_id = PCI_VENDOR(id);
   1321 	hw->device_id = PCI_PRODUCT(id);
   1322 	hw->revision_id = PCI_REVISION(pci_conf_read(pc, tag, PCI_CLASS_REG));
   1323 	hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
   1324 	hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
   1325 
   1326 	/* We need this to determine device-specific things */
   1327 	ixgbe_set_mac_type(hw);
   1328 
   1329 	/* Set the right number of segments */
   1330 	adapter->num_segs = IXGBE_82599_SCATTER;
   1331 
   1332 	return;
   1333 }
   1334 
   1335 /*********************************************************************
   1336  *
   1337  *  Setup MSIX Interrupt resources and handlers
   1338  *
   1339  **********************************************************************/
   1340 static int
   1341 ixv_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
   1342 {
   1343 	device_t	dev = adapter->dev;
   1344 	struct ix_queue *que = adapter->queues;
   1345 	struct		tx_ring *txr = adapter->tx_rings;
   1346 	int 		error, rid, vector = 0;
   1347 	pci_chipset_tag_t pc;
   1348 	pcitag_t	tag;
   1349 	char intrbuf[PCI_INTRSTR_LEN];
   1350 	const char	*intrstr = NULL;
   1351 	kcpuset_t	*affinity;
   1352 	int		cpu_id = 0;
   1353 
   1354 	pc = adapter->osdep.pc;
   1355 	tag = adapter->osdep.tag;
   1356 
   1357 	if (pci_msix_alloc_exact(pa,
   1358 		&adapter->osdep.intrs, IXG_MAX_NINTR) != 0)
   1359 		return (ENXIO);
   1360 
   1361 	kcpuset_create(&affinity, false);
   1362 	for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
   1363 		intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
   1364 		    sizeof(intrbuf));
   1365 #ifdef IXV_MPSAFE
   1366 		pci_intr_setattr(pc, adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
   1367 		    true);
   1368 #endif
   1369 		/* Set the handler function */
   1370 		adapter->osdep.ihs[i] = pci_intr_establish(pc,
   1371 		    adapter->osdep.intrs[i], IPL_NET, ixv_msix_que, que);
   1372 		if (adapter->osdep.ihs[i] == NULL) {
   1373 			que->res = NULL;
   1374 			aprint_error_dev(dev,
   1375 			    "Failed to register QUE handler");
   1376 			kcpuset_destroy(affinity);
   1377 			return (ENXIO);
   1378 		}
   1379 		que->msix = vector;
   1380         	adapter->active_queues |= (u64)(1 << que->msix);
   1381 
   1382 		cpu_id = i;
   1383 		/* Round-robin affinity */
   1384 		kcpuset_zero(affinity);
   1385 		kcpuset_set(affinity, cpu_id % ncpu);
   1386 		error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
   1387 		    NULL);
   1388 		aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
   1389 		    intrstr);
   1390 		if (error == 0)
   1391 			aprint_normal(", bound queue %d to cpu %d\n",
   1392 			    i, cpu_id);
   1393 		else
   1394 			aprint_normal("\n");
   1395 
   1396 #ifndef IXGBE_LEGACY_TX
   1397 		txr->txr_si = softint_establish(SOFTINT_NET,
   1398 		    ixgbe_deferred_mq_start, txr);
   1399 #endif
   1400 		que->que_si = softint_establish(SOFTINT_NET, ixv_handle_que,
   1401 		    que);
   1402 		if (que->que_si == NULL) {
   1403 			aprint_error_dev(dev,
   1404 			    "could not establish software interrupt\n");
   1405 		}
   1406 	}
   1407 
   1408 	/* and Mailbox */
   1409 	cpu_id++;
   1410 	intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
   1411 	    sizeof(intrbuf));
   1412 #ifdef IXG_MPSAFE
   1413 	pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE, true);
   1414 #endif
   1415 	/* Set the mbx handler function */
   1416 	adapter->osdep.ihs[vector] = pci_intr_establish(pc,
   1417 	    adapter->osdep.intrs[vector], IPL_NET, ixv_msix_mbx, adapter);
   1418 	if (adapter->osdep.ihs[vector] == NULL) {
   1419 		adapter->res = NULL;
   1420 		aprint_error_dev(dev, "Failed to register LINK handler\n");
   1421 		kcpuset_destroy(affinity);
   1422 		return (ENXIO);
   1423 	}
   1424 	/* Round-robin affinity */
   1425 	kcpuset_zero(affinity);
   1426 	kcpuset_set(affinity, cpu_id % ncpu);
   1427 	error = interrupt_distribute(adapter->osdep.ihs[vector], affinity,NULL);
   1428 
   1429 	aprint_normal_dev(dev,
   1430 	    "for link, interrupting at %s, ", intrstr);
   1431 	if (error == 0) {
   1432 		aprint_normal("affinity to cpu %d\n", cpu_id);
   1433 	}
   1434 	adapter->vector = vector;
   1435 	/* Tasklets for Mailbox */
   1436 	adapter->link_si = softint_establish(SOFTINT_NET, ixv_handle_mbx,
   1437 	    adapter);
   1438 	/*
   1439 	** Due to a broken design QEMU will fail to properly
   1440 	** enable the guest for MSIX unless the vectors in
   1441 	** the table are all set up, so we must rewrite the
   1442 	** ENABLE in the MSIX control register again at this
   1443 	** point to cause it to successfully initialize us.
   1444 	*/
   1445 	if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
   1446 		int msix_ctrl;
   1447 		pci_get_capability(pc, tag, PCI_CAP_MSIX, &rid, NULL);
   1448 		rid += PCI_MSIX_CTL;
   1449 		msix_ctrl = pci_conf_read(pc, tag, rid);
   1450 		msix_ctrl |= PCI_MSIX_CTL_ENABLE;
   1451 		pci_conf_write(pc, tag, rid, msix_ctrl);
   1452 	}
   1453 
   1454 	return (0);
   1455 }
   1456 
   1457 /*
   1458  * Setup MSIX resources, note that the VF
   1459  * device MUST use MSIX, there is no fallback.
   1460  */
   1461 static int
   1462 ixv_setup_msix(struct adapter *adapter)
   1463 {
   1464 	device_t dev = adapter->dev;
   1465 	int want, msgs;
   1466 
   1467 	/* Must have at least 2 MSIX vectors */
   1468 	msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag);
   1469 	if (msgs < 2) {
   1470 		aprint_error_dev(dev,"MSIX config error\n");
   1471 		return (ENXIO);
   1472 	}
   1473 	msgs = MIN(msgs, IXG_MAX_NINTR);
   1474 
   1475 	/*
   1476 	** Want vectors for the queues,
   1477 	** plus an additional for mailbox.
   1478 	*/
   1479 	want = adapter->num_queues + 1;
   1480 	if (want > msgs) {
   1481 		want = msgs;
   1482 		adapter->num_queues = msgs - 1;
   1483 	} else
   1484 		msgs = want;
   1485 
   1486 	adapter->msix_mem = (void *)1; /* XXX */
   1487 	aprint_normal_dev(dev,
   1488 	    "Using MSIX interrupts with %d vectors\n", msgs);
   1489 	return (want);
   1490 }
   1491 
   1492 
   1493 static int
   1494 ixv_allocate_pci_resources(struct adapter *adapter,
   1495     const struct pci_attach_args *pa)
   1496 {
   1497 	pcireg_t	memtype;
   1498 	device_t        dev = adapter->dev;
   1499 	bus_addr_t addr;
   1500 	int flags;
   1501 
   1502 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
   1503 
   1504 	switch (memtype) {
   1505 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1506 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1507 		adapter->osdep.mem_bus_space_tag = pa->pa_memt;
   1508 		if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
   1509 	              memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
   1510 			goto map_err;
   1511 		if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
   1512 			aprint_normal_dev(dev, "clearing prefetchable bit\n");
   1513 			flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
   1514 		}
   1515 		if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
   1516 		     adapter->osdep.mem_size, flags,
   1517 		     &adapter->osdep.mem_bus_space_handle) != 0) {
   1518 map_err:
   1519 			adapter->osdep.mem_size = 0;
   1520 			aprint_error_dev(dev, "unable to map BAR0\n");
   1521 			return ENXIO;
   1522 		}
   1523 		break;
   1524 	default:
   1525 		aprint_error_dev(dev, "unexpected type on BAR0\n");
   1526 		return ENXIO;
   1527 	}
   1528 
   1529 	/* Pick up the tuneable queues */
   1530 	adapter->num_queues = ixv_num_queues;
   1531 	adapter->hw.back = adapter;
   1532 
   1533 	/*
   1534 	** Now setup MSI/X, should
   1535 	** return us the number of
   1536 	** configured vectors.
   1537 	*/
   1538 	adapter->msix = ixv_setup_msix(adapter);
   1539 	if (adapter->msix == ENXIO)
   1540 		return (ENXIO);
   1541 	else
   1542 		return (0);
   1543 }
   1544 
   1545 static void
   1546 ixv_free_pci_resources(struct adapter * adapter)
   1547 {
   1548 	struct 		ix_queue *que = adapter->queues;
   1549 	int		rid;
   1550 
   1551 	/*
   1552 	**  Release all msix queue resources:
   1553 	*/
   1554 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   1555 		rid = que->msix + 1;
   1556 		if (que->res != NULL)
   1557 			pci_intr_disestablish(adapter->osdep.pc,
   1558 			    adapter->osdep.ihs[i]);
   1559 	}
   1560 
   1561 
   1562 	/* Clean the Legacy or Link interrupt last */
   1563 	if (adapter->vector) /* we are doing MSIX */
   1564 		rid = adapter->vector + 1;
   1565 	else
   1566 		(adapter->msix != 0) ? (rid = 1):(rid = 0);
   1567 
   1568 	if (adapter->osdep.ihs[rid] != NULL)
   1569 		pci_intr_disestablish(adapter->osdep.pc,
   1570 		    adapter->osdep.ihs[rid]);
   1571 	adapter->osdep.ihs[rid] = NULL;
   1572 
   1573 #if defined(NETBSD_MSI_OR_MSIX)
   1574 	pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
   1575 	    adapter->osdep.nintrs);
   1576 #endif
   1577 
   1578 	if (adapter->osdep.mem_size != 0) {
   1579 		bus_space_unmap(adapter->osdep.mem_bus_space_tag,
   1580 		    adapter->osdep.mem_bus_space_handle,
   1581 		    adapter->osdep.mem_size);
   1582 	}
   1583 
   1584 	return;
   1585 }
   1586 
   1587 /*********************************************************************
   1588  *
   1589  *  Setup networking device structure and register an interface.
   1590  *
   1591  **********************************************************************/
   1592 static void
   1593 ixv_setup_interface(device_t dev, struct adapter *adapter)
   1594 {
   1595 	struct ethercom *ec = &adapter->osdep.ec;
   1596 	struct ifnet   *ifp;
   1597 
   1598 	INIT_DEBUGOUT("ixv_setup_interface: begin");
   1599 
   1600 	ifp = adapter->ifp = &ec->ec_if;
   1601 	strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
   1602 	ifp->if_baudrate = 1000000000;
   1603 	ifp->if_init = ixv_init;
   1604 	ifp->if_stop = ixv_ifstop;
   1605 	ifp->if_softc = adapter;
   1606 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   1607 	ifp->if_ioctl = ixv_ioctl;
   1608 #ifndef IXGBE_LEGACY_TX
   1609 	ifp->if_transmit = ixgbe_mq_start;
   1610 #endif
   1611 	ifp->if_start = ixgbe_start;
   1612 	ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 2;
   1613 
   1614 	if_initialize(ifp);
   1615 	ether_ifattach(ifp, adapter->hw.mac.addr);
   1616 #ifndef IXGBE_LEGACY_TX
   1617 #if 0	/* We use per TX queue softint */
   1618 	if_deferred_start_init(ifp, ixgbe_deferred_mq_start);
   1619 #endif
   1620 #endif
   1621 	if_register(ifp);
   1622 	ether_set_ifflags_cb(ec, ixv_ifflags_cb);
   1623 
   1624 	adapter->max_frame_size =
   1625 	    ifp->if_mtu + IXGBE_MTU_HDR_VLAN;
   1626 
   1627 	/*
   1628 	 * Tell the upper layer(s) we support long frames.
   1629 	 */
   1630 	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
   1631 
   1632 	ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSOv4;
   1633 	ifp->if_capenable = 0;
   1634 
   1635 	ec->ec_capabilities |= ETHERCAP_VLAN_HWCSUM;
   1636 	ec->ec_capabilities |= ETHERCAP_JUMBO_MTU;
   1637 	ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
   1638 	    		| ETHERCAP_VLAN_MTU;
   1639 	ec->ec_capenable = ec->ec_capabilities;
   1640 
   1641 	/* Don't enable LRO by default */
   1642 	ifp->if_capabilities |= IFCAP_LRO;
   1643 #if 0
   1644 	ifp->if_capenable = ifp->if_capabilities;
   1645 #endif
   1646 
   1647 	/*
   1648 	** Dont turn this on by default, if vlans are
   1649 	** created on another pseudo device (eg. lagg)
   1650 	** then vlan events are not passed thru, breaking
   1651 	** operation, but with HW FILTER off it works. If
   1652 	** using vlans directly on the em driver you can
   1653 	** enable this and get full hardware tag filtering.
   1654 	*/
   1655 	ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
   1656 
   1657 	/*
   1658 	 * Specify the media types supported by this adapter and register
   1659 	 * callbacks to update media and link information
   1660 	 */
   1661 	ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change,
   1662 		     ixv_media_status);
   1663 	ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
   1664 	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
   1665 
   1666 	return;
   1667 }
   1668 
   1669 static void
   1670 ixv_config_link(struct adapter *adapter)
   1671 {
   1672 	struct ixgbe_hw *hw = &adapter->hw;
   1673 	u32	autoneg;
   1674 
   1675 	if (hw->mac.ops.check_link)
   1676 		hw->mac.ops.check_link(hw, &autoneg,
   1677 		    &adapter->link_up, FALSE);
   1678 }
   1679 
   1680 
   1681 /*********************************************************************
   1682  *
   1683  *  Enable transmit unit.
   1684  *
   1685  **********************************************************************/
   1686 static void
   1687 ixv_initialize_transmit_units(struct adapter *adapter)
   1688 {
   1689 	struct tx_ring	*txr = adapter->tx_rings;
   1690 	struct ixgbe_hw	*hw = &adapter->hw;
   1691 
   1692 
   1693 	for (int i = 0; i < adapter->num_queues; i++, txr++) {
   1694 		u64	tdba = txr->txdma.dma_paddr;
   1695 		u32	txctrl, txdctl;
   1696 
   1697 		/* Set WTHRESH to 8, burst writeback */
   1698 		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
   1699 		txdctl |= (8 << 16);
   1700 		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
   1701 
   1702 		/* Set the HW Tx Head and Tail indices */
   1703 	    	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(i), 0);
   1704 	    	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(i), 0);
   1705 
   1706 		/* Set Tx Tail register */
   1707 		txr->tail = IXGBE_VFTDT(i);
   1708 
   1709 		/* Set Ring parameters */
   1710 		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
   1711 		       (tdba & 0x00000000ffffffffULL));
   1712 		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i), (tdba >> 32));
   1713 		IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
   1714 		    adapter->num_tx_desc *
   1715 		    sizeof(struct ixgbe_legacy_tx_desc));
   1716 		txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(i));
   1717 		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
   1718 		IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), txctrl);
   1719 
   1720 		/* Now enable */
   1721 		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
   1722 		txdctl |= IXGBE_TXDCTL_ENABLE;
   1723 		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
   1724 	}
   1725 
   1726 	return;
   1727 }
   1728 
   1729 
   1730 /*********************************************************************
   1731  *
   1732  *  Setup receive registers and features.
   1733  *
   1734  **********************************************************************/
   1735 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
   1736 
   1737 static void
   1738 ixv_initialize_receive_units(struct adapter *adapter)
   1739 {
   1740 	struct	rx_ring	*rxr = adapter->rx_rings;
   1741 	struct ixgbe_hw	*hw = &adapter->hw;
   1742 	struct ifnet	*ifp = adapter->ifp;
   1743 	u32		bufsz, rxcsum, psrtype;
   1744 
   1745 	if (ifp->if_mtu > ETHERMTU)
   1746 		bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
   1747 	else
   1748 		bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
   1749 
   1750 	psrtype = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
   1751 	    IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR |
   1752 	    IXGBE_PSRTYPE_L2HDR;
   1753 
   1754 	IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
   1755 
   1756 	/* Tell PF our max_frame size */
   1757 	ixgbevf_rlpml_set_vf(hw, adapter->max_frame_size);
   1758 
   1759 	for (int i = 0; i < adapter->num_queues; i++, rxr++) {
   1760 		u64 rdba = rxr->rxdma.dma_paddr;
   1761 		u32 reg, rxdctl;
   1762 
   1763 		/* Disable the queue */
   1764 		rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
   1765 		rxdctl &= ~IXGBE_RXDCTL_ENABLE;
   1766 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
   1767 		for (int j = 0; j < 10; j++) {
   1768 			if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
   1769 			    IXGBE_RXDCTL_ENABLE)
   1770 				msec_delay(1);
   1771 			else
   1772 				break;
   1773 		}
   1774 		wmb();
   1775 		/* Setup the Base and Length of the Rx Descriptor Ring */
   1776 		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
   1777 		    (rdba & 0x00000000ffffffffULL));
   1778 		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i),
   1779 		    (rdba >> 32));
   1780 		IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
   1781 		    adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
   1782 
   1783 		/* Reset the ring indices */
   1784 		IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
   1785 		IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 0);
   1786 
   1787 		/* Set up the SRRCTL register */
   1788 		reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
   1789 		reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
   1790 		reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
   1791 		reg |= bufsz;
   1792 		reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
   1793 		IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), reg);
   1794 
   1795 		/* Capture Rx Tail index */
   1796 		rxr->tail = IXGBE_VFRDT(rxr->me);
   1797 
   1798 		/* Do the queue enabling last */
   1799 		rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
   1800 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
   1801 		for (int k = 0; k < 10; k++) {
   1802 			if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
   1803 			    IXGBE_RXDCTL_ENABLE)
   1804 				break;
   1805 			else
   1806 				msec_delay(1);
   1807 		}
   1808 		wmb();
   1809 
   1810 		/* Set the Tail Pointer */
   1811 #ifdef DEV_NETMAP
   1812 		/*
   1813 		 * In netmap mode, we must preserve the buffers made
   1814 		 * available to userspace before the if_init()
   1815 		 * (this is true by default on the TX side, because
   1816 		 * init makes all buffers available to userspace).
   1817 		 *
   1818 		 * netmap_reset() and the device specific routines
   1819 		 * (e.g. ixgbe_setup_receive_rings()) map these
   1820 		 * buffers at the end of the NIC ring, so here we
   1821 		 * must set the RDT (tail) register to make sure
   1822 		 * they are not overwritten.
   1823 		 *
   1824 		 * In this driver the NIC ring starts at RDH = 0,
   1825 		 * RDT points to the last slot available for reception (?),
   1826 		 * so RDT = num_rx_desc - 1 means the whole ring is available.
   1827 		 */
   1828 		if (ifp->if_capenable & IFCAP_NETMAP) {
   1829 			struct netmap_adapter *na = NA(adapter->ifp);
   1830 			struct netmap_kring *kring = &na->rx_rings[i];
   1831 			int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
   1832 
   1833 			IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), t);
   1834 		} else
   1835 #endif /* DEV_NETMAP */
   1836 			IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
   1837 			    adapter->num_rx_desc - 1);
   1838 	}
   1839 
   1840 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
   1841 
   1842 	if (ifp->if_capenable & IFCAP_RXCSUM)
   1843 		rxcsum |= IXGBE_RXCSUM_PCSD;
   1844 
   1845 	if (!(rxcsum & IXGBE_RXCSUM_PCSD))
   1846 		rxcsum |= IXGBE_RXCSUM_IPPCSE;
   1847 
   1848 	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
   1849 
   1850 	return;
   1851 }
   1852 
   1853 static void
   1854 ixv_setup_vlan_support(struct adapter *adapter)
   1855 {
   1856 	struct ixgbe_hw *hw = &adapter->hw;
   1857 	u32		ctrl, vid, vfta, retry;
   1858 	struct rx_ring	*rxr;
   1859 
   1860 	/*
   1861 	** We get here thru init_locked, meaning
   1862 	** a soft reset, this has already cleared
   1863 	** the VFTA and other state, so if there
   1864 	** have been no vlan's registered do nothing.
   1865 	*/
   1866 	if (!VLAN_ATTACHED(&adapter->osdep.ec))
   1867 		return;
   1868 
   1869 	/* Enable the queues */
   1870 	for (int i = 0; i < adapter->num_queues; i++) {
   1871 		ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
   1872 		ctrl |= IXGBE_RXDCTL_VME;
   1873 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl);
   1874 		/*
   1875 		 * Let Rx path know that it needs to store VLAN tag
   1876 		 * as part of extra mbuf info.
   1877 		 */
   1878 		rxr = &adapter->rx_rings[i];
   1879 		rxr->vtag_strip = TRUE;
   1880 	}
   1881 
   1882 	/*
   1883 	** A soft reset zero's out the VFTA, so
   1884 	** we need to repopulate it now.
   1885 	*/
   1886 	for (int i = 0; i < IXGBE_VFTA_SIZE; i++) {
   1887 		if (ixv_shadow_vfta[i] == 0)
   1888 			continue;
   1889 		vfta = ixv_shadow_vfta[i];
   1890 		/*
   1891 		** Reconstruct the vlan id's
   1892 		** based on the bits set in each
   1893 		** of the array ints.
   1894 		*/
   1895 		for (int j = 0; j < 32; j++) {
   1896 			retry = 0;
   1897 			if ((vfta & (1 << j)) == 0)
   1898 				continue;
   1899 			vid = (i * 32) + j;
   1900 			/* Call the shared code mailbox routine */
   1901 			while (ixgbe_set_vfta(hw, vid, 0, TRUE)) {
   1902 				if (++retry > 5)
   1903 					break;
   1904 			}
   1905 		}
   1906 	}
   1907 }
   1908 
   1909 #if 0	/* XXX Badly need to overhaul vlan(4) on NetBSD. */
   1910 /*
   1911 ** This routine is run via an vlan config EVENT,
   1912 ** it enables us to use the HW Filter table since
   1913 ** we can get the vlan id. This just creates the
   1914 ** entry in the soft version of the VFTA, init will
   1915 ** repopulate the real table.
   1916 */
   1917 static void
   1918 ixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   1919 {
   1920 	struct adapter	*adapter = ifp->if_softc;
   1921 	u16		index, bit;
   1922 
   1923 	if (ifp->if_softc != arg) /* Not our event */
   1924 		return;
   1925 
   1926 	if ((vtag == 0) || (vtag > 4095)) /* Invalid */
   1927 		return;
   1928 
   1929 	IXGBE_CORE_LOCK(adapter);
   1930 	index = (vtag >> 5) & 0x7F;
   1931 	bit = vtag & 0x1F;
   1932 	ixv_shadow_vfta[index] |= (1 << bit);
   1933 	/* Re-init to load the changes */
   1934 	ixv_init_locked(adapter);
   1935 	IXGBE_CORE_UNLOCK(adapter);
   1936 }
   1937 
   1938 /*
   1939 ** This routine is run via an vlan
   1940 ** unconfig EVENT, remove our entry
   1941 ** in the soft vfta.
   1942 */
   1943 static void
   1944 ixv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   1945 {
   1946 	struct adapter	*adapter = ifp->if_softc;
   1947 	u16		index, bit;
   1948 
   1949 	if (ifp->if_softc !=  arg)
   1950 		return;
   1951 
   1952 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
   1953 		return;
   1954 
   1955 	IXGBE_CORE_LOCK(adapter);
   1956 	index = (vtag >> 5) & 0x7F;
   1957 	bit = vtag & 0x1F;
   1958 	ixv_shadow_vfta[index] &= ~(1 << bit);
   1959 	/* Re-init to load the changes */
   1960 	ixv_init_locked(adapter);
   1961 	IXGBE_CORE_UNLOCK(adapter);
   1962 }
   1963 #endif
   1964 
   1965 static void
   1966 ixv_enable_intr(struct adapter *adapter)
   1967 {
   1968 	struct ixgbe_hw *hw = &adapter->hw;
   1969 	struct ix_queue *que = adapter->queues;
   1970 	u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
   1971 
   1972 
   1973 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
   1974 
   1975 	mask = IXGBE_EIMS_ENABLE_MASK;
   1976 	mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
   1977 	IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
   1978 
   1979         for (int i = 0; i < adapter->num_queues; i++, que++)
   1980 		ixv_enable_queue(adapter, que->msix);
   1981 
   1982 	IXGBE_WRITE_FLUSH(hw);
   1983 
   1984 	return;
   1985 }
   1986 
   1987 static void
   1988 ixv_disable_intr(struct adapter *adapter)
   1989 {
   1990 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
   1991 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, ~0);
   1992 	IXGBE_WRITE_FLUSH(&adapter->hw);
   1993 	return;
   1994 }
   1995 
   1996 /*
   1997 ** Setup the correct IVAR register for a particular MSIX interrupt
   1998 **  - entry is the register array entry
   1999 **  - vector is the MSIX vector for this queue
   2000 **  - type is RX/TX/MISC
   2001 */
   2002 static void
   2003 ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
   2004 {
   2005 	struct ixgbe_hw *hw = &adapter->hw;
   2006 	u32 ivar, index;
   2007 
   2008 	vector |= IXGBE_IVAR_ALLOC_VAL;
   2009 
   2010 	if (type == -1) { /* MISC IVAR */
   2011 		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
   2012 		ivar &= ~0xFF;
   2013 		ivar |= vector;
   2014 		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
   2015 	} else {	/* RX/TX IVARS */
   2016 		index = (16 * (entry & 1)) + (8 * type);
   2017 		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
   2018 		ivar &= ~(0xFF << index);
   2019 		ivar |= (vector << index);
   2020 		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
   2021 	}
   2022 }
   2023 
   2024 static void
   2025 ixv_configure_ivars(struct adapter *adapter)
   2026 {
   2027 	struct  ix_queue *que = adapter->queues;
   2028 
   2029         for (int i = 0; i < adapter->num_queues; i++, que++) {
   2030 		/* First the RX queue entry */
   2031                 ixv_set_ivar(adapter, i, que->msix, 0);
   2032 		/* ... and the TX */
   2033 		ixv_set_ivar(adapter, i, que->msix, 1);
   2034 		/* Set an initial value in EITR */
   2035                 IXGBE_WRITE_REG(&adapter->hw,
   2036                     IXGBE_VTEITR(que->msix), IXV_EITR_DEFAULT);
   2037 	}
   2038 
   2039 	/* For the mailbox interrupt */
   2040         ixv_set_ivar(adapter, 1, adapter->vector, -1);
   2041 }
   2042 
   2043 
   2044 /*
   2045 ** Tasklet handler for MSIX MBX interrupts
   2046 **  - do outside interrupt since it might sleep
   2047 */
   2048 static void
   2049 ixv_handle_mbx(void *context)
   2050 {
   2051 	struct adapter  *adapter = context;
   2052 
   2053 	ixgbe_check_link(&adapter->hw,
   2054 	    &adapter->link_speed, &adapter->link_up, 0);
   2055 	ixv_update_link_status(adapter);
   2056 }
   2057 
   2058 /*
   2059 ** The VF stats registers never have a truly virgin
   2060 ** starting point, so this routine tries to make an
   2061 ** artificial one, marking ground zero on attach as
   2062 ** it were.
   2063 */
   2064 static void
   2065 ixv_save_stats(struct adapter *adapter)
   2066 {
   2067 	struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
   2068 
   2069 	if (stats->vfgprc.ev_count || stats->vfgptc.ev_count) {
   2070 		stats->saved_reset_vfgprc +=
   2071 		    stats->vfgprc.ev_count - stats->base_vfgprc;
   2072 		stats->saved_reset_vfgptc +=
   2073 		    stats->vfgptc.ev_count - stats->base_vfgptc;
   2074 		stats->saved_reset_vfgorc +=
   2075 		    stats->vfgorc.ev_count - stats->base_vfgorc;
   2076 		stats->saved_reset_vfgotc +=
   2077 		    stats->vfgotc.ev_count - stats->base_vfgotc;
   2078 		stats->saved_reset_vfmprc +=
   2079 		    stats->vfmprc.ev_count - stats->base_vfmprc;
   2080 	}
   2081 }
   2082 
   2083 static void
   2084 ixv_init_stats(struct adapter *adapter)
   2085 {
   2086 	struct ixgbe_hw *hw = &adapter->hw;
   2087 
   2088 	adapter->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
   2089 	adapter->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
   2090 	adapter->stats.vf.last_vfgorc |=
   2091 	    (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
   2092 
   2093 	adapter->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
   2094 	adapter->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
   2095 	adapter->stats.vf.last_vfgotc |=
   2096 	    (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
   2097 
   2098 	adapter->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
   2099 
   2100 	adapter->stats.vf.base_vfgprc = adapter->stats.vf.last_vfgprc;
   2101 	adapter->stats.vf.base_vfgorc = adapter->stats.vf.last_vfgorc;
   2102 	adapter->stats.vf.base_vfgptc = adapter->stats.vf.last_vfgptc;
   2103 	adapter->stats.vf.base_vfgotc = adapter->stats.vf.last_vfgotc;
   2104 	adapter->stats.vf.base_vfmprc = adapter->stats.vf.last_vfmprc;
   2105 }
   2106 
   2107 #define UPDATE_STAT_32(reg, last, count)		\
   2108 {							\
   2109 	u32 current = IXGBE_READ_REG(hw, reg);		\
   2110 	if (current < last)				\
   2111 		count.ev_count += 0x100000000LL;	\
   2112 	last = current;					\
   2113 	count.ev_count &= 0xFFFFFFFF00000000LL;		\
   2114 	count.ev_count |= current;			\
   2115 }
   2116 
   2117 #define UPDATE_STAT_36(lsb, msb, last, count) 		\
   2118 {							\
   2119 	u64 cur_lsb = IXGBE_READ_REG(hw, lsb);		\
   2120 	u64 cur_msb = IXGBE_READ_REG(hw, msb);		\
   2121 	u64 current = ((cur_msb << 32) | cur_lsb);	\
   2122 	if (current < last)				\
   2123 		count.ev_count += 0x1000000000LL;	\
   2124 	last = current;					\
   2125 	count.ev_count &= 0xFFFFFFF000000000LL;		\
   2126 	count.ev_count |= current;			\
   2127 }
   2128 
   2129 /*
   2130 ** ixv_update_stats - Update the board statistics counters.
   2131 */
   2132 void
   2133 ixv_update_stats(struct adapter *adapter)
   2134 {
   2135         struct ixgbe_hw *hw = &adapter->hw;
   2136 
   2137         UPDATE_STAT_32(IXGBE_VFGPRC, adapter->stats.vf.last_vfgprc,
   2138 	    adapter->stats.vf.vfgprc);
   2139         UPDATE_STAT_32(IXGBE_VFGPTC, adapter->stats.vf.last_vfgptc,
   2140 	    adapter->stats.vf.vfgptc);
   2141         UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
   2142 	    adapter->stats.vf.last_vfgorc, adapter->stats.vf.vfgorc);
   2143         UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
   2144 	    adapter->stats.vf.last_vfgotc, adapter->stats.vf.vfgotc);
   2145         UPDATE_STAT_32(IXGBE_VFMPRC, adapter->stats.vf.last_vfmprc,
   2146 	    adapter->stats.vf.vfmprc);
   2147 }
   2148 
   2149 /*
   2150  * Add statistic sysctls for the VF.
   2151  */
   2152 static void
   2153 ixv_add_stats_sysctls(struct adapter *adapter)
   2154 {
   2155 	device_t dev = adapter->dev;
   2156 	struct ix_queue *que = &adapter->queues[0];
   2157 	struct tx_ring *txr = que->txr;
   2158 	struct rx_ring *rxr = que->rxr;
   2159 
   2160 	struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
   2161 
   2162 	const char *xname = device_xname(dev);
   2163 
   2164 	/* Driver Statistics */
   2165 	evcnt_attach_dynamic(&adapter->dropped_pkts, EVCNT_TYPE_MISC,
   2166 	    NULL, xname, "Driver dropped packets");
   2167 	evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
   2168 	    NULL, xname, "m_defrag() failed");
   2169 	evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
   2170 	    NULL, xname, "Watchdog timeouts");
   2171 
   2172 	evcnt_attach_dynamic(&stats->vfgprc, EVCNT_TYPE_MISC, NULL,
   2173 	    xname, "Good Packets Received");
   2174 	evcnt_attach_dynamic(&stats->vfgorc, EVCNT_TYPE_MISC, NULL,
   2175 	    xname, "Good Octets Received");
   2176 	evcnt_attach_dynamic(&stats->vfmprc, EVCNT_TYPE_MISC, NULL,
   2177 	    xname, "Multicast Packets Received");
   2178 	evcnt_attach_dynamic(&stats->vfgptc, EVCNT_TYPE_MISC, NULL,
   2179 	    xname, "Good Packets Transmitted");
   2180 	evcnt_attach_dynamic(&stats->vfgotc, EVCNT_TYPE_MISC, NULL,
   2181 	    xname, "Good Octets Transmitted");
   2182 	evcnt_attach_dynamic(&que->irqs, EVCNT_TYPE_INTR, NULL,
   2183 	    xname, "IRQs on queue");
   2184 	evcnt_attach_dynamic(&rxr->rx_irq, EVCNT_TYPE_INTR, NULL,
   2185 	    xname, "RX irqs on queue");
   2186 	evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC, NULL,
   2187 	    xname, "RX packets");
   2188 	evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC, NULL,
   2189 	    xname, "RX bytes");
   2190 	evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC, NULL,
   2191 	    xname, "Discarded RX packets");
   2192 	evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC, NULL,
   2193 	    xname, "TX Packets");
   2194 	evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC, NULL,
   2195 	    xname, "# of times not enough descriptors were available during TX");
   2196 	evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC, NULL,
   2197 	    xname, "TX TSO");
   2198 }
   2199 
   2200 static void
   2201 ixv_set_sysctl_value(struct adapter *adapter, const char *name,
   2202 	const char *description, int *limit, int value)
   2203 {
   2204 	device_t dev =  adapter->dev;
   2205 	struct sysctllog **log;
   2206 	const struct sysctlnode *rnode, *cnode;
   2207 
   2208 	log = &adapter->sysctllog;
   2209 	if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
   2210 		aprint_error_dev(dev, "could not create sysctl root\n");
   2211 		return;
   2212 	}
   2213 	if (sysctl_createv(log, 0, &rnode, &cnode,
   2214 	    CTLFLAG_READWRITE, CTLTYPE_INT,
   2215 	    name, SYSCTL_DESCR(description),
   2216 	    NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0)
   2217 		aprint_error_dev(dev, "could not create sysctl\n");
   2218 	*limit = value;
   2219 }
   2220 
   2221 /**********************************************************************
   2222  *
   2223  *  This routine is called only when em_display_debug_stats is enabled.
   2224  *  This routine provides a way to take a look at important statistics
   2225  *  maintained by the driver and hardware.
   2226  *
   2227  **********************************************************************/
   2228 static void
   2229 ixv_print_debug_info(struct adapter *adapter)
   2230 {
   2231         device_t dev = adapter->dev;
   2232         struct ixgbe_hw         *hw = &adapter->hw;
   2233         struct ix_queue         *que = adapter->queues;
   2234         struct rx_ring          *rxr;
   2235         struct tx_ring          *txr;
   2236 #ifdef LRO
   2237         struct lro_ctrl         *lro;
   2238 #endif /* LRO */
   2239 
   2240         device_printf(dev,"Error Byte Count = %u \n",
   2241             IXGBE_READ_REG(hw, IXGBE_ERRBC));
   2242 
   2243         for (int i = 0; i < adapter->num_queues; i++, que++) {
   2244                 txr = que->txr;
   2245                 rxr = que->rxr;
   2246 #ifdef LRO
   2247                 lro = &rxr->lro;
   2248 #endif /* LRO */
   2249                 device_printf(dev,"QUE(%d) IRQs Handled: %lu\n",
   2250                     que->msix, (long)que->irqs.ev_count);
   2251                 device_printf(dev,"RX(%d) Packets Received: %lld\n",
   2252                     rxr->me, (long long)rxr->rx_packets.ev_count);
   2253                 device_printf(dev,"RX(%d) Bytes Received: %lu\n",
   2254                     rxr->me, (long)rxr->rx_bytes.ev_count);
   2255 #ifdef LRO
   2256                 device_printf(dev,"RX(%d) LRO Queued= %lld\n",
   2257                     rxr->me, (long long)lro->lro_queued);
   2258                 device_printf(dev,"RX(%d) LRO Flushed= %lld\n",
   2259                     rxr->me, (long long)lro->lro_flushed);
   2260 #endif /* LRO */
   2261                 device_printf(dev,"TX(%d) Packets Sent: %lu\n",
   2262                     txr->me, (long)txr->total_packets.ev_count);
   2263                 device_printf(dev,"TX(%d) NO Desc Avail: %lu\n",
   2264                     txr->me, (long)txr->no_desc_avail.ev_count);
   2265         }
   2266 
   2267         device_printf(dev,"MBX IRQ Handled: %lu\n",
   2268             (long)adapter->link_irq.ev_count);
   2269         return;
   2270 }
   2271 
   2272 static int
   2273 ixv_sysctl_debug(SYSCTLFN_ARGS)
   2274 {
   2275 	struct sysctlnode node;
   2276 	int error, result;
   2277 	struct adapter *adapter;
   2278 
   2279 	node = *rnode;
   2280 	adapter = (struct adapter *)node.sysctl_data;
   2281 	node.sysctl_data = &result;
   2282 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   2283 
   2284 	if (error)
   2285 		return error;
   2286 
   2287 	if (result == 1)
   2288 		ixv_print_debug_info(adapter);
   2289 
   2290 	return 0;
   2291 }
   2292 
   2293 const struct sysctlnode *
   2294 ixv_sysctl_instance(struct adapter *adapter)
   2295 {
   2296 	const char *dvname;
   2297 	struct sysctllog **log;
   2298 	int rc;
   2299 	const struct sysctlnode *rnode;
   2300 
   2301 	log = &adapter->sysctllog;
   2302 	dvname = device_xname(adapter->dev);
   2303 
   2304 	if ((rc = sysctl_createv(log, 0, NULL, &rnode,
   2305 	    0, CTLTYPE_NODE, dvname,
   2306 	    SYSCTL_DESCR("ixv information and settings"),
   2307 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
   2308 		goto err;
   2309 
   2310 	return rnode;
   2311 err:
   2312 	printf("%s: sysctl_createv failed, rc = %d\n", __func__, rc);
   2313 	return NULL;
   2314 }
   2315