Home | History | Annotate | Line # | Download | only in ixgbe
ixv.c revision 1.38
      1 /******************************************************************************
      2 
      3   Copyright (c) 2001-2015, Intel Corporation
      4   All rights reserved.
      5 
      6   Redistribution and use in source and binary forms, with or without
      7   modification, are permitted provided that the following conditions are met:
      8 
      9    1. Redistributions of source code must retain the above copyright notice,
     10       this list of conditions and the following disclaimer.
     11 
     12    2. Redistributions in binary form must reproduce the above copyright
     13       notice, this list of conditions and the following disclaimer in the
     14       documentation and/or other materials provided with the distribution.
     15 
     16    3. Neither the name of the Intel Corporation nor the names of its
     17       contributors may be used to endorse or promote products derived from
     18       this software without specific prior written permission.
     19 
     20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     30   POSSIBILITY OF SUCH DAMAGE.
     31 
     32 ******************************************************************************/
     33 /*$FreeBSD: head/sys/dev/ixgbe/if_ixv.c 302384 2016-07-07 03:39:18Z sbruno $*/
     34 /*$NetBSD: ixv.c,v 1.38 2017/02/07 04:27:43 msaitoh Exp $*/
     35 
     36 #include "opt_inet.h"
     37 #include "opt_inet6.h"
     38 
     39 #include "ixgbe.h"
     40 #include "vlan.h"
     41 
     42 /*********************************************************************
     43  *  Driver version
     44  *********************************************************************/
     45 char ixv_driver_version[] = "1.4.6-k";
     46 
     47 /*********************************************************************
     48  *  PCI Device ID Table
     49  *
     50  *  Used by probe to select devices to load on
     51  *  Last field stores an index into ixv_strings
     52  *  Last entry must be all 0s
     53  *
     54  *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
     55  *********************************************************************/
     56 
     57 static ixgbe_vendor_info_t ixv_vendor_info_array[] =
     58 {
     59 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
     60 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0},
     61 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, 0, 0, 0},
     62 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, 0, 0, 0},
     63 	/* required last entry */
     64 	{0, 0, 0, 0, 0}
     65 };
     66 
     67 /*********************************************************************
     68  *  Table of branding strings
     69  *********************************************************************/
     70 
     71 static const char    *ixv_strings[] = {
     72 	"Intel(R) PRO/10GbE Virtual Function Network Driver"
     73 };
     74 
     75 /*********************************************************************
     76  *  Function prototypes
     77  *********************************************************************/
     78 static int      ixv_probe(device_t, cfdata_t, void *);
     79 static void	ixv_attach(device_t, device_t, void *);
     80 static int      ixv_detach(device_t, int);
     81 #if 0
     82 static int      ixv_shutdown(device_t);
     83 #endif
     84 static int      ixv_ioctl(struct ifnet *, u_long, void *);
     85 static int	ixv_init(struct ifnet *);
     86 static void	ixv_init_locked(struct adapter *);
     87 static void     ixv_stop(void *);
     88 static void     ixv_media_status(struct ifnet *, struct ifmediareq *);
     89 static int      ixv_media_change(struct ifnet *);
     90 static void     ixv_identify_hardware(struct adapter *);
     91 static int      ixv_allocate_pci_resources(struct adapter *,
     92 		    const struct pci_attach_args *);
     93 static int      ixv_allocate_msix(struct adapter *,
     94 		    const struct pci_attach_args *);
     95 static int	ixv_setup_msix(struct adapter *);
     96 static void	ixv_free_pci_resources(struct adapter *);
     97 static void     ixv_local_timer(void *);
     98 static void     ixv_local_timer_locked(void *);
     99 static void     ixv_setup_interface(device_t, struct adapter *);
    100 static void     ixv_config_link(struct adapter *);
    101 
    102 static void     ixv_initialize_transmit_units(struct adapter *);
    103 static void     ixv_initialize_receive_units(struct adapter *);
    104 
    105 static void     ixv_enable_intr(struct adapter *);
    106 static void     ixv_disable_intr(struct adapter *);
    107 static void     ixv_set_multi(struct adapter *);
    108 static void     ixv_update_link_status(struct adapter *);
    109 static int	ixv_sysctl_debug(SYSCTLFN_PROTO);
    110 static void	ixv_set_ivar(struct adapter *, u8, u8, s8);
    111 static void	ixv_configure_ivars(struct adapter *);
    112 static u8 *	ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
    113 
    114 static void	ixv_setup_vlan_support(struct adapter *);
    115 #if 0
    116 static void	ixv_register_vlan(void *, struct ifnet *, u16);
    117 static void	ixv_unregister_vlan(void *, struct ifnet *, u16);
    118 #endif
    119 
    120 static void	ixv_save_stats(struct adapter *);
    121 static void	ixv_init_stats(struct adapter *);
    122 static void	ixv_update_stats(struct adapter *);
    123 static void	ixv_add_stats_sysctls(struct adapter *);
    124 static void	ixv_set_sysctl_value(struct adapter *, const char *,
    125 		    const char *, int *, int);
    126 
    127 /* The MSI/X Interrupt handlers */
    128 static int	ixv_msix_que(void *);
    129 static int	ixv_msix_mbx(void *);
    130 
    131 /* Deferred interrupt tasklets */
    132 static void	ixv_handle_que(void *);
    133 static void	ixv_handle_mbx(void *);
    134 
    135 const struct sysctlnode *ixv_sysctl_instance(struct adapter *);
    136 static ixgbe_vendor_info_t *ixv_lookup(const struct pci_attach_args *);
    137 
    138 #ifdef DEV_NETMAP
    139 /*
    140  * This is defined in <dev/netmap/ixgbe_netmap.h>, which is included by
    141  * if_ix.c.
    142  */
    143 extern void ixgbe_netmap_attach(struct adapter *adapter);
    144 
    145 #include <net/netmap.h>
    146 #include <sys/selinfo.h>
    147 #include <dev/netmap/netmap_kern.h>
    148 #endif /* DEV_NETMAP */
    149 
    150 /*********************************************************************
    151  *  FreeBSD Device Interface Entry Points
    152  *********************************************************************/
    153 
    154 CFATTACH_DECL3_NEW(ixv, sizeof(struct adapter),
    155     ixv_probe, ixv_attach, ixv_detach, NULL, NULL, NULL,
    156     DVF_DETACH_SHUTDOWN);
    157 
    158 # if 0
    159 static device_method_t ixv_methods[] = {
    160 	/* Device interface */
    161 	DEVMETHOD(device_probe, ixv_probe),
    162 	DEVMETHOD(device_attach, ixv_attach),
    163 	DEVMETHOD(device_detach, ixv_detach),
    164 	DEVMETHOD(device_shutdown, ixv_shutdown),
    165 	DEVMETHOD_END
    166 };
    167 #endif
    168 
    169 #if 0
    170 static driver_t ixv_driver = {
    171 	"ixv", ixv_methods, sizeof(struct adapter),
    172 };
    173 
    174 devclass_t ixv_devclass;
    175 DRIVER_MODULE(ixv, pci, ixv_driver, ixv_devclass, 0, 0);
    176 MODULE_DEPEND(ixv, pci, 1, 1, 1);
    177 MODULE_DEPEND(ixv, ether, 1, 1, 1);
    178 #ifdef DEV_NETMAP
    179 MODULE_DEPEND(ix, netmap, 1, 1, 1);
    180 #endif /* DEV_NETMAP */
    181 /* XXX depend on 'ix' ? */
    182 #endif
    183 
    184 /*
    185 ** TUNEABLE PARAMETERS:
    186 */
    187 
    188 /* Number of Queues - do not exceed MSIX vectors - 1 */
    189 static int ixv_num_queues = 1;
    190 #define	TUNABLE_INT(__x, __y)
    191 TUNABLE_INT("hw.ixv.num_queues", &ixv_num_queues);
    192 
    193 /*
    194 ** AIM: Adaptive Interrupt Moderation
    195 ** which means that the interrupt rate
    196 ** is varied over time based on the
    197 ** traffic for that interrupt vector
    198 */
    199 static int ixv_enable_aim = FALSE;
    200 TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
    201 
    202 /* How many packets rxeof tries to clean at a time */
    203 static int ixv_rx_process_limit = 256;
    204 TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit);
    205 
    206 /* How many packets txeof tries to clean at a time */
    207 static int ixv_tx_process_limit = 256;
    208 TUNABLE_INT("hw.ixv.tx_process_limit", &ixv_tx_process_limit);
    209 
    210 /*
    211 ** Number of TX descriptors per ring,
    212 ** setting higher than RX as this seems
    213 ** the better performing choice.
    214 */
    215 static int ixv_txd = DEFAULT_TXD;
    216 TUNABLE_INT("hw.ixv.txd", &ixv_txd);
    217 
    218 /* Number of RX descriptors per ring */
    219 static int ixv_rxd = DEFAULT_RXD;
    220 TUNABLE_INT("hw.ixv.rxd", &ixv_rxd);
    221 
    222 /*
    223 ** Shadow VFTA table, this is needed because
    224 ** the real filter table gets cleared during
    225 ** a soft reset and we need to repopulate it.
    226 */
    227 static u32 ixv_shadow_vfta[IXGBE_VFTA_SIZE];
    228 
    229 /*********************************************************************
    230  *  Device identification routine
    231  *
    232  *  ixv_probe determines if the driver should be loaded on
    233  *  adapter based on PCI vendor/device id of the adapter.
    234  *
    235  *  return 1 on success, 0 on failure
    236  *********************************************************************/
    237 
    238 static int
    239 ixv_probe(device_t dev, cfdata_t cf, void *aux)
    240 {
    241 #ifdef __HAVE_PCI_MSI_MSIX
    242 	const struct pci_attach_args *pa = aux;
    243 
    244 	return (ixv_lookup(pa) != NULL) ? 1 : 0;
    245 #else
    246 	return 0;
    247 #endif
    248 }
    249 
    250 static ixgbe_vendor_info_t *
    251 ixv_lookup(const struct pci_attach_args *pa)
    252 {
    253 	pcireg_t subid;
    254 	ixgbe_vendor_info_t *ent;
    255 
    256 	INIT_DEBUGOUT("ixv_lookup: begin");
    257 
    258 	if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
    259 		return NULL;
    260 
    261 	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
    262 
    263 	for (ent = ixv_vendor_info_array; ent->vendor_id != 0; ent++) {
    264 		if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
    265 		    (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
    266 
    267 		    ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
    268 		     (ent->subvendor_id == 0)) &&
    269 
    270 		    ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
    271 		     (ent->subdevice_id == 0))) {
    272 			return ent;
    273 		}
    274 	}
    275 	return NULL;
    276 }
    277 
    278 
    279 static void
    280 ixv_sysctl_attach(struct adapter *adapter)
    281 {
    282 	struct sysctllog **log;
    283 	const struct sysctlnode *rnode, *cnode;
    284 	device_t dev;
    285 
    286 	dev = adapter->dev;
    287 	log = &adapter->sysctllog;
    288 
    289 	if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
    290 		aprint_error_dev(dev, "could not create sysctl root\n");
    291 		return;
    292 	}
    293 
    294 	if (sysctl_createv(log, 0, &rnode, &cnode,
    295 	    CTLFLAG_READWRITE, CTLTYPE_INT,
    296 	    "debug", SYSCTL_DESCR("Debug Info"),
    297 	    ixv_sysctl_debug, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
    298 		aprint_error_dev(dev, "could not create sysctl\n");
    299 
    300 	/* XXX This is an *instance* sysctl controlling a *global* variable.
    301 	 * XXX It's that way in the FreeBSD driver that this derives from.
    302 	 */
    303 	if (sysctl_createv(log, 0, &rnode, &cnode,
    304 	    CTLFLAG_READWRITE, CTLTYPE_INT,
    305 	    "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
    306 	    NULL, 0, &ixv_enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
    307 		aprint_error_dev(dev, "could not create sysctl\n");
    308 }
    309 
    310 /*********************************************************************
    311  *  Device initialization routine
    312  *
    313  *  The attach entry point is called when the driver is being loaded.
    314  *  This routine identifies the type of hardware, allocates all resources
    315  *  and initializes the hardware.
    316  *
    317  *  return 0 on success, positive on failure
    318  *********************************************************************/
    319 
    320 static void
    321 ixv_attach(device_t parent, device_t dev, void *aux)
    322 {
    323 	struct adapter *adapter;
    324 	struct ixgbe_hw *hw;
    325 	int             error = 0;
    326 	ixgbe_vendor_info_t *ent;
    327 	const struct pci_attach_args *pa = aux;
    328 
    329 	INIT_DEBUGOUT("ixv_attach: begin");
    330 
    331 	/* Allocate, clear, and link in our adapter structure */
    332 	adapter = device_private(dev);
    333 	adapter->dev = dev;
    334 	hw = &adapter->hw;
    335 
    336 #ifdef DEV_NETMAP
    337 	adapter->init_locked = ixv_init_locked;
    338 	adapter->stop_locked = ixv_stop;
    339 #endif
    340 
    341 	adapter->osdep.pc = pa->pa_pc;
    342 	adapter->osdep.tag = pa->pa_tag;
    343 	adapter->osdep.dmat = pa->pa_dmat;
    344 	adapter->osdep.attached = false;
    345 
    346 	ent = ixv_lookup(pa);
    347 
    348 	KASSERT(ent != NULL);
    349 
    350 	aprint_normal(": %s, Version - %s\n",
    351 	    ixv_strings[ent->index], ixv_driver_version);
    352 
    353 	/* Core Lock Init*/
    354 	IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
    355 
    356 	/* SYSCTL APIs */
    357 	ixv_sysctl_attach(adapter);
    358 
    359 	/* Set up the timer callout */
    360 	callout_init(&adapter->timer, 0);
    361 
    362 	/* Determine hardware revision */
    363 	ixv_identify_hardware(adapter);
    364 
    365 	/* Do base PCI setup - map BAR0 */
    366 	if (ixv_allocate_pci_resources(adapter, pa)) {
    367 		aprint_error_dev(dev, "ixv_allocate_pci_resources() failed!\n");
    368 		error = ENXIO;
    369 		goto err_out;
    370 	}
    371 
    372 	/* Sysctls for limiting the amount of work done in the taskqueues */
    373 	ixv_set_sysctl_value(adapter, "rx_processing_limit",
    374 	    "max number of rx packets to process",
    375 	    &adapter->rx_process_limit, ixv_rx_process_limit);
    376 
    377 	ixv_set_sysctl_value(adapter, "tx_processing_limit",
    378 	    "max number of tx packets to process",
    379 	    &adapter->tx_process_limit, ixv_tx_process_limit);
    380 
    381 	/* Do descriptor calc and sanity checks */
    382 	if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
    383 	    ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) {
    384 		aprint_error_dev(dev, "TXD config issue, using default!\n");
    385 		adapter->num_tx_desc = DEFAULT_TXD;
    386 	} else
    387 		adapter->num_tx_desc = ixv_txd;
    388 
    389 	if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
    390 	    ixv_rxd < MIN_RXD || ixv_rxd > MAX_RXD) {
    391 		aprint_error_dev(dev, "RXD config issue, using default!\n");
    392 		adapter->num_rx_desc = DEFAULT_RXD;
    393 	} else
    394 		adapter->num_rx_desc = ixv_rxd;
    395 
    396 	/* Allocate our TX/RX Queues */
    397 	if (ixgbe_allocate_queues(adapter)) {
    398 		aprint_error_dev(dev, "ixgbe_allocate_queues() failed!\n");
    399 		error = ENOMEM;
    400 		goto err_out;
    401 	}
    402 
    403 	/*
    404 	** Initialize the shared code: its
    405 	** at this point the mac type is set.
    406 	*/
    407 	error = ixgbe_init_shared_code(hw);
    408 	if (error) {
    409 		aprint_error_dev(dev, "ixgbe_init_shared_code() failed!\n");
    410 		error = EIO;
    411 		goto err_late;
    412 	}
    413 
    414 	/* Setup the mailbox */
    415 	ixgbe_init_mbx_params_vf(hw);
    416 
    417 	/* Reset mbox api to 1.0 */
    418 	error = ixgbe_reset_hw(hw);
    419 	if (error == IXGBE_ERR_RESET_FAILED)
    420 		aprint_error_dev(dev, "ixgbe_reset_hw() failure: Reset Failed!\n");
    421 	else if (error)
    422 		aprint_error_dev(dev, "ixgbe_reset_hw() failed with error %d\n", error);
    423 	if (error) {
    424 		error = EIO;
    425 		goto err_late;
    426 	}
    427 
    428 	/* Negotiate mailbox API version */
    429 	error = ixgbevf_negotiate_api_version(hw, ixgbe_mbox_api_11);
    430 	if (error)
    431 		aprint_debug_dev(dev,
    432 		    "MBX API 1.1 negotiation failed! Error %d\n", error);
    433 
    434 	error = ixgbe_init_hw(hw);
    435 	if (error) {
    436 		aprint_error_dev(dev, "ixgbe_init_hw() failed!\n");
    437 		error = EIO;
    438 		goto err_late;
    439 	}
    440 
    441 	error = ixv_allocate_msix(adapter, pa);
    442 	if (error) {
    443 		device_printf(dev, "ixv_allocate_msix() failed!\n");
    444 		goto err_late;
    445 	}
    446 
    447 	/* If no mac address was assigned, make a random one */
    448 	if (!ixv_check_ether_addr(hw->mac.addr)) {
    449 		u8 addr[ETHER_ADDR_LEN];
    450 		uint64_t rndval = cprng_fast64();
    451 
    452 		memcpy(addr, &rndval, sizeof(addr));
    453 		addr[0] &= 0xFE;
    454 		addr[0] |= 0x02;
    455 		bcopy(addr, hw->mac.addr, sizeof(addr));
    456 	}
    457 
    458 	/* Setup OS specific network interface */
    459 	ixv_setup_interface(dev, adapter);
    460 
    461 	/* Do the stats setup */
    462 	ixv_save_stats(adapter);
    463 	ixv_init_stats(adapter);
    464 	ixv_add_stats_sysctls(adapter);
    465 
    466 	/* Register for VLAN events */
    467 #if 0 /* XXX delete after write? */
    468 	adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
    469 	    ixv_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
    470 	adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
    471 	    ixv_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
    472 #endif
    473 
    474 #ifdef DEV_NETMAP
    475 	ixgbe_netmap_attach(adapter);
    476 #endif /* DEV_NETMAP */
    477 	INIT_DEBUGOUT("ixv_attach: end");
    478 	adapter->osdep.attached = true;
    479 	return;
    480 
    481 err_late:
    482 	ixgbe_free_transmit_structures(adapter);
    483 	ixgbe_free_receive_structures(adapter);
    484 err_out:
    485 	ixv_free_pci_resources(adapter);
    486 	return;
    487 
    488 }
    489 
    490 /*********************************************************************
    491  *  Device removal routine
    492  *
    493  *  The detach entry point is called when the driver is being removed.
    494  *  This routine stops the adapter and deallocates all the resources
    495  *  that were allocated for driver operation.
    496  *
    497  *  return 0 on success, positive on failure
    498  *********************************************************************/
    499 
    500 static int
    501 ixv_detach(device_t dev, int flags)
    502 {
    503 	struct adapter *adapter = device_private(dev);
    504 	struct ix_queue *que = adapter->queues;
    505 
    506 	INIT_DEBUGOUT("ixv_detach: begin");
    507 	if (adapter->osdep.attached == false)
    508 		return 0;
    509 
    510 #if NVLAN > 0
    511 	/* Make sure VLANS are not using driver */
    512 	if (!VLAN_ATTACHED(&adapter->osdep.ec))
    513 		;	/* nothing to do: no VLANs */
    514 	else if ((flags & (DETACH_SHUTDOWN|DETACH_FORCE)) != 0)
    515 		vlan_ifdetach(adapter->ifp);
    516 	else {
    517 		aprint_error_dev(dev, "VLANs in use, detach first\n");
    518 		return EBUSY;
    519 	}
    520 #endif
    521 
    522 	IXGBE_CORE_LOCK(adapter);
    523 	ixv_stop(adapter);
    524 	IXGBE_CORE_UNLOCK(adapter);
    525 
    526 	for (int i = 0; i < adapter->num_queues; i++, que++) {
    527 #ifndef IXGBE_LEGACY_TX
    528 		struct tx_ring *txr = adapter->tx_rings;
    529 
    530 		softint_disestablish(txr->txr_si);
    531 #endif
    532 		softint_disestablish(que->que_si);
    533 	}
    534 
    535 	/* Drain the Mailbox(link) queue */
    536 	softint_disestablish(adapter->link_si);
    537 
    538 	/* Unregister VLAN events */
    539 #if 0 /* XXX msaitoh delete after write? */
    540 	if (adapter->vlan_attach != NULL)
    541 		EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
    542 	if (adapter->vlan_detach != NULL)
    543 		EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
    544 #endif
    545 
    546 	ether_ifdetach(adapter->ifp);
    547 	callout_halt(&adapter->timer, NULL);
    548 #ifdef DEV_NETMAP
    549 	netmap_detach(adapter->ifp);
    550 #endif /* DEV_NETMAP */
    551 	ixv_free_pci_resources(adapter);
    552 #if 0 /* XXX the NetBSD port is probably missing something here */
    553 	bus_generic_detach(dev);
    554 #endif
    555 	if_detach(adapter->ifp);
    556 
    557 	ixgbe_free_transmit_structures(adapter);
    558 	ixgbe_free_receive_structures(adapter);
    559 
    560 	IXGBE_CORE_LOCK_DESTROY(adapter);
    561 	return (0);
    562 }
    563 
    564 /*********************************************************************
    565  *
    566  *  Shutdown entry point
    567  *
    568  **********************************************************************/
    569 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
    570 static int
    571 ixv_shutdown(device_t dev)
    572 {
    573 	struct adapter *adapter = device_private(dev);
    574 	IXGBE_CORE_LOCK(adapter);
    575 	ixv_stop(adapter);
    576 	IXGBE_CORE_UNLOCK(adapter);
    577 	return (0);
    578 }
    579 #endif
    580 
    581 static int
    582 ixv_ifflags_cb(struct ethercom *ec)
    583 {
    584 	struct ifnet *ifp = &ec->ec_if;
    585 	struct adapter *adapter = ifp->if_softc;
    586 	int change = ifp->if_flags ^ adapter->if_flags, rc = 0;
    587 
    588 	IXGBE_CORE_LOCK(adapter);
    589 
    590 	if (change != 0)
    591 		adapter->if_flags = ifp->if_flags;
    592 
    593 	if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0)
    594 		rc = ENETRESET;
    595 
    596 	IXGBE_CORE_UNLOCK(adapter);
    597 
    598 	return rc;
    599 }
    600 
    601 /*********************************************************************
    602  *  Ioctl entry point
    603  *
    604  *  ixv_ioctl is called when the user wants to configure the
    605  *  interface.
    606  *
    607  *  return 0 on success, positive on failure
    608  **********************************************************************/
    609 
    610 static int
    611 ixv_ioctl(struct ifnet * ifp, u_long command, void *data)
    612 {
    613 	struct adapter	*adapter = ifp->if_softc;
    614 	struct ifcapreq *ifcr = data;
    615 	struct ifreq	*ifr = (struct ifreq *) data;
    616 	int             error = 0;
    617 	int l4csum_en;
    618 	const int l4csum = IFCAP_CSUM_TCPv4_Rx|IFCAP_CSUM_UDPv4_Rx|
    619 	     IFCAP_CSUM_TCPv6_Rx|IFCAP_CSUM_UDPv6_Rx;
    620 
    621 	switch (command) {
    622 	case SIOCSIFFLAGS:
    623 		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
    624 		break;
    625 	case SIOCADDMULTI:
    626 	case SIOCDELMULTI:
    627 		IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
    628 		break;
    629 	case SIOCSIFMEDIA:
    630 	case SIOCGIFMEDIA:
    631 		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
    632 		break;
    633 	case SIOCSIFCAP:
    634 		IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
    635 		break;
    636 	case SIOCSIFMTU:
    637 		IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
    638 		break;
    639 	default:
    640 		IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
    641 		break;
    642 	}
    643 
    644 	switch (command) {
    645 	case SIOCSIFMEDIA:
    646 	case SIOCGIFMEDIA:
    647 		return ifmedia_ioctl(ifp, ifr, &adapter->media, command);
    648 	case SIOCSIFCAP:
    649 		/* Layer-4 Rx checksum offload has to be turned on and
    650 		 * off as a unit.
    651 		 */
    652 		l4csum_en = ifcr->ifcr_capenable & l4csum;
    653 		if (l4csum_en != l4csum && l4csum_en != 0)
    654 			return EINVAL;
    655 		/*FALLTHROUGH*/
    656 	case SIOCADDMULTI:
    657 	case SIOCDELMULTI:
    658 	case SIOCSIFFLAGS:
    659 	case SIOCSIFMTU:
    660 	default:
    661 		if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
    662 			return error;
    663 		if ((ifp->if_flags & IFF_RUNNING) == 0)
    664 			;
    665 		else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
    666 			IXGBE_CORE_LOCK(adapter);
    667 			ixv_init_locked(adapter);
    668 			IXGBE_CORE_UNLOCK(adapter);
    669 		} else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
    670 			/*
    671 			 * Multicast list has changed; set the hardware filter
    672 			 * accordingly.
    673 			 */
    674 			IXGBE_CORE_LOCK(adapter);
    675 			ixv_disable_intr(adapter);
    676 			ixv_set_multi(adapter);
    677 			ixv_enable_intr(adapter);
    678 			IXGBE_CORE_UNLOCK(adapter);
    679 		}
    680 		return 0;
    681 	}
    682 }
    683 
    684 /*********************************************************************
    685  *  Init entry point
    686  *
    687  *  This routine is used in two ways. It is used by the stack as
    688  *  init entry point in network interface structure. It is also used
    689  *  by the driver as a hw/sw initialization routine to get to a
    690  *  consistent state.
    691  *
    692  *  return 0 on success, positive on failure
    693  **********************************************************************/
    694 #define IXGBE_MHADD_MFS_SHIFT 16
    695 
    696 static void
    697 ixv_init_locked(struct adapter *adapter)
    698 {
    699 	struct ifnet	*ifp = adapter->ifp;
    700 	device_t 	dev = adapter->dev;
    701 	struct ixgbe_hw *hw = &adapter->hw;
    702 	int error = 0;
    703 
    704 	INIT_DEBUGOUT("ixv_init_locked: begin");
    705 	KASSERT(mutex_owned(&adapter->core_mtx));
    706 	hw->adapter_stopped = FALSE;
    707 	ixgbe_stop_adapter(hw);
    708         callout_stop(&adapter->timer);
    709 
    710         /* reprogram the RAR[0] in case user changed it. */
    711         ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
    712 
    713 	/* Get the latest mac address, User can use a LAA */
    714 	memcpy(hw->mac.addr, CLLADDR(adapter->ifp->if_sadl),
    715 	     IXGBE_ETH_LENGTH_OF_ADDRESS);
    716         ixgbe_set_rar(hw, 0, hw->mac.addr, 0, 1);
    717 	hw->addr_ctrl.rar_used_count = 1;
    718 
    719 	/* Prepare transmit descriptors and buffers */
    720 	if (ixgbe_setup_transmit_structures(adapter)) {
    721 		aprint_error_dev(dev, "Could not setup transmit structures\n");
    722 		ixv_stop(adapter);
    723 		return;
    724 	}
    725 
    726 	/* Reset VF and renegotiate mailbox API version */
    727 	ixgbe_reset_hw(hw);
    728 	error = ixgbevf_negotiate_api_version(hw, ixgbe_mbox_api_11);
    729 	if (error)
    730 		device_printf(dev, "MBX API 1.1 negotiation failed! Error %d\n", error);
    731 
    732 	ixv_initialize_transmit_units(adapter);
    733 
    734 	/* Setup Multicast table */
    735 	ixv_set_multi(adapter);
    736 
    737 	/*
    738 	** Determine the correct mbuf pool
    739 	** for doing jumbo/headersplit
    740 	*/
    741 	if (ifp->if_mtu > ETHERMTU)
    742 		adapter->rx_mbuf_sz = MJUMPAGESIZE;
    743 	else
    744 		adapter->rx_mbuf_sz = MCLBYTES;
    745 
    746 	/* Prepare receive descriptors and buffers */
    747 	if (ixgbe_setup_receive_structures(adapter)) {
    748 		device_printf(dev, "Could not setup receive structures\n");
    749 		ixv_stop(adapter);
    750 		return;
    751 	}
    752 
    753 	/* Configure RX settings */
    754 	ixv_initialize_receive_units(adapter);
    755 
    756 #if 0 /* XXX isn't it required? -- msaitoh  */
    757 	/* Set the various hardware offload abilities */
    758 	ifp->if_hwassist = 0;
    759 	if (ifp->if_capenable & IFCAP_TSO4)
    760 		ifp->if_hwassist |= CSUM_TSO;
    761 	if (ifp->if_capenable & IFCAP_TXCSUM) {
    762 		ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
    763 #if __FreeBSD_version >= 800000
    764 		ifp->if_hwassist |= CSUM_SCTP;
    765 #endif
    766 	}
    767 #endif
    768 
    769 	/* Set up VLAN offload and filter */
    770 	ixv_setup_vlan_support(adapter);
    771 
    772 	/* Set up MSI/X routing */
    773 	ixv_configure_ivars(adapter);
    774 
    775 	/* Set up auto-mask */
    776 	IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_EICS_RTX_QUEUE);
    777 
    778         /* Set moderation on the Link interrupt */
    779         IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->vector), IXGBE_LINK_ITR);
    780 
    781 	/* Stats init */
    782 	ixv_init_stats(adapter);
    783 
    784 	/* Config/Enable Link */
    785 	ixv_config_link(adapter);
    786 	hw->mac.get_link_status = TRUE;
    787 
    788 	/* Start watchdog */
    789 	callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
    790 
    791 	/* And now turn on interrupts */
    792 	ixv_enable_intr(adapter);
    793 
    794 	/* Now inform the stack we're ready */
    795 	ifp->if_flags |= IFF_RUNNING;
    796 	ifp->if_flags &= ~IFF_OACTIVE;
    797 
    798 	return;
    799 }
    800 
    801 static int
    802 ixv_init(struct ifnet *ifp)
    803 {
    804 	struct adapter *adapter = ifp->if_softc;
    805 
    806 	IXGBE_CORE_LOCK(adapter);
    807 	ixv_init_locked(adapter);
    808 	IXGBE_CORE_UNLOCK(adapter);
    809 	return 0;
    810 }
    811 
    812 
    813 /*
    814 **
    815 ** MSIX Interrupt Handlers and Tasklets
    816 **
    817 */
    818 
    819 static inline void
    820 ixv_enable_queue(struct adapter *adapter, u32 vector)
    821 {
    822 	struct ixgbe_hw *hw = &adapter->hw;
    823 	u32	queue = 1 << vector;
    824 	u32	mask;
    825 
    826 	mask = (IXGBE_EIMS_RTX_QUEUE & queue);
    827 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
    828 }
    829 
    830 static inline void
    831 ixv_disable_queue(struct adapter *adapter, u32 vector)
    832 {
    833 	struct ixgbe_hw *hw = &adapter->hw;
    834 	u64	queue = (u64)(1 << vector);
    835 	u32	mask;
    836 
    837 	mask = (IXGBE_EIMS_RTX_QUEUE & queue);
    838 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
    839 }
    840 
    841 static inline void
    842 ixv_rearm_queues(struct adapter *adapter, u64 queues)
    843 {
    844 	u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
    845 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask);
    846 }
    847 
    848 
    849 static void
    850 ixv_handle_que(void *context)
    851 {
    852 	struct ix_queue *que = context;
    853 	struct adapter  *adapter = que->adapter;
    854 	struct tx_ring	*txr = que->txr;
    855 	struct ifnet    *ifp = adapter->ifp;
    856 	bool		more;
    857 
    858 	if (ifp->if_flags & IFF_RUNNING) {
    859 		more = ixgbe_rxeof(que);
    860 		IXGBE_TX_LOCK(txr);
    861 		ixgbe_txeof(txr);
    862 #ifndef IXGBE_LEGACY_TX
    863 		if (pcq_peek(txr->txr_interq) != NULL)
    864 			ixgbe_mq_start_locked(ifp, txr);
    865 #else
    866 		if (!IFQ_IS_EMPTY(&ifp->if_snd))
    867 			ixgbe_start_locked(txr, ifp);
    868 #endif
    869 		IXGBE_TX_UNLOCK(txr);
    870 		if (more) {
    871 			adapter->req.ev_count++;
    872 			softint_schedule(que->que_si);
    873 			return;
    874 		}
    875 	}
    876 
    877 	/* Reenable this interrupt */
    878 	ixv_enable_queue(adapter, que->msix);
    879 	return;
    880 }
    881 
    882 /*********************************************************************
    883  *
    884  *  MSI Queue Interrupt Service routine
    885  *
    886  **********************************************************************/
    887 int
    888 ixv_msix_que(void *arg)
    889 {
    890 	struct ix_queue	*que = arg;
    891 	struct adapter  *adapter = que->adapter;
    892 	struct ifnet    *ifp = adapter->ifp;
    893 	struct tx_ring	*txr = que->txr;
    894 	struct rx_ring	*rxr = que->rxr;
    895 	bool		more;
    896 	u32		newitr = 0;
    897 
    898 	ixv_disable_queue(adapter, que->msix);
    899 	++que->irqs.ev_count;
    900 
    901 #ifdef __NetBSD__
    902 	/* Don't run ixgbe_rxeof in interrupt context */
    903 	more = true;
    904 #else
    905 	more = ixgbe_rxeof(que);
    906 #endif
    907 
    908 	IXGBE_TX_LOCK(txr);
    909 	ixgbe_txeof(txr);
    910 	/*
    911 	** Make certain that if the stack
    912 	** has anything queued the task gets
    913 	** scheduled to handle it.
    914 	*/
    915 #ifdef IXGBE_LEGACY_TX
    916 	if (!IFQ_IS_EMPTY(&adapter->ifp->if_snd))
    917 		ixgbe_start_locked(txr, ifp);
    918 #else
    919 	if (pcq_peek(txr->txr_interq) != NULL)
    920 		ixgbe_mq_start_locked(ifp, txr);
    921 #endif
    922 	IXGBE_TX_UNLOCK(txr);
    923 
    924 	/* Do AIM now? */
    925 
    926 	if (ixv_enable_aim == FALSE)
    927 		goto no_calc;
    928 	/*
    929 	** Do Adaptive Interrupt Moderation:
    930         **  - Write out last calculated setting
    931 	**  - Calculate based on average size over
    932 	**    the last interval.
    933 	*/
    934         if (que->eitr_setting)
    935                 IXGBE_WRITE_REG(&adapter->hw,
    936                     IXGBE_VTEITR(que->msix),
    937 		    que->eitr_setting);
    938 
    939         que->eitr_setting = 0;
    940 
    941         /* Idle, do nothing */
    942         if ((txr->bytes == 0) && (rxr->bytes == 0))
    943                 goto no_calc;
    944 
    945 	if ((txr->bytes) && (txr->packets))
    946                	newitr = txr->bytes/txr->packets;
    947 	if ((rxr->bytes) && (rxr->packets))
    948 		newitr = max(newitr,
    949 		    (rxr->bytes / rxr->packets));
    950 	newitr += 24; /* account for hardware frame, crc */
    951 
    952 	/* set an upper boundary */
    953 	newitr = min(newitr, 3000);
    954 
    955 	/* Be nice to the mid range */
    956 	if ((newitr > 300) && (newitr < 1200))
    957 		newitr = (newitr / 3);
    958 	else
    959 		newitr = (newitr / 2);
    960 
    961 	newitr |= newitr << 16;
    962 
    963         /* save for next interrupt */
    964         que->eitr_setting = newitr;
    965 
    966         /* Reset state */
    967         txr->bytes = 0;
    968         txr->packets = 0;
    969         rxr->bytes = 0;
    970         rxr->packets = 0;
    971 
    972 no_calc:
    973 	if (more)
    974 		softint_schedule(que->que_si);
    975 	else /* Reenable this interrupt */
    976 		ixv_enable_queue(adapter, que->msix);
    977 	return 1;
    978 }
    979 
    980 static int
    981 ixv_msix_mbx(void *arg)
    982 {
    983 	struct adapter	*adapter = arg;
    984 	struct ixgbe_hw *hw = &adapter->hw;
    985 	u32		reg;
    986 
    987 	++adapter->link_irq.ev_count;
    988 
    989 	/* First get the cause */
    990 	reg = IXGBE_READ_REG(hw, IXGBE_VTEICS);
    991 	/* Clear interrupt with write */
    992 	IXGBE_WRITE_REG(hw, IXGBE_VTEICR, reg);
    993 
    994 	/* Link status change */
    995 	if (reg & IXGBE_EICR_LSC)
    996 		softint_schedule(adapter->link_si);
    997 
    998 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER);
    999 	return 1;
   1000 }
   1001 
   1002 /*********************************************************************
   1003  *
   1004  *  Media Ioctl callback
   1005  *
   1006  *  This routine is called whenever the user queries the status of
   1007  *  the interface using ifconfig.
   1008  *
   1009  **********************************************************************/
   1010 static void
   1011 ixv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
   1012 {
   1013 	struct adapter *adapter = ifp->if_softc;
   1014 
   1015 	INIT_DEBUGOUT("ixv_media_status: begin");
   1016 	IXGBE_CORE_LOCK(adapter);
   1017 	ixv_update_link_status(adapter);
   1018 
   1019 	ifmr->ifm_status = IFM_AVALID;
   1020 	ifmr->ifm_active = IFM_ETHER;
   1021 
   1022 	if (!adapter->link_active) {
   1023 		IXGBE_CORE_UNLOCK(adapter);
   1024 		return;
   1025 	}
   1026 
   1027 	ifmr->ifm_status |= IFM_ACTIVE;
   1028 
   1029 	switch (adapter->link_speed) {
   1030 		case IXGBE_LINK_SPEED_1GB_FULL:
   1031 			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
   1032 			break;
   1033 		case IXGBE_LINK_SPEED_10GB_FULL:
   1034 			ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
   1035 			break;
   1036 	}
   1037 
   1038 	IXGBE_CORE_UNLOCK(adapter);
   1039 
   1040 	return;
   1041 }
   1042 
   1043 /*********************************************************************
   1044  *
   1045  *  Media Ioctl callback
   1046  *
   1047  *  This routine is called when the user changes speed/duplex using
   1048  *  media/mediopt option with ifconfig.
   1049  *
   1050  **********************************************************************/
   1051 static int
   1052 ixv_media_change(struct ifnet * ifp)
   1053 {
   1054 	struct adapter *adapter = ifp->if_softc;
   1055 	struct ifmedia *ifm = &adapter->media;
   1056 
   1057 	INIT_DEBUGOUT("ixv_media_change: begin");
   1058 
   1059 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
   1060 		return (EINVAL);
   1061 
   1062         switch (IFM_SUBTYPE(ifm->ifm_media)) {
   1063         case IFM_AUTO:
   1064                 break;
   1065         default:
   1066                 device_printf(adapter->dev, "Only auto media type\n");
   1067 		return (EINVAL);
   1068         }
   1069 
   1070 	return (0);
   1071 }
   1072 
   1073 
   1074 /*********************************************************************
   1075  *  Multicast Update
   1076  *
   1077  *  This routine is called whenever multicast address list is updated.
   1078  *
   1079  **********************************************************************/
   1080 #define IXGBE_RAR_ENTRIES 16
   1081 
   1082 static void
   1083 ixv_set_multi(struct adapter *adapter)
   1084 {
   1085 	struct ether_multi *enm;
   1086 	struct ether_multistep step;
   1087 	u8	mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
   1088 	u8	*update_ptr;
   1089 	int	mcnt = 0;
   1090 	struct ethercom *ec = &adapter->osdep.ec;
   1091 
   1092 	IOCTL_DEBUGOUT("ixv_set_multi: begin");
   1093 
   1094 	ETHER_FIRST_MULTI(step, ec, enm);
   1095 	while (enm != NULL) {
   1096 		bcopy(enm->enm_addrlo,
   1097 		    &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
   1098 		    IXGBE_ETH_LENGTH_OF_ADDRESS);
   1099 		mcnt++;
   1100 		/* XXX This might be required --msaitoh */
   1101 		if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES)
   1102 			break;
   1103 		ETHER_NEXT_MULTI(step, enm);
   1104 	}
   1105 
   1106 	update_ptr = mta;
   1107 
   1108 	ixgbe_update_mc_addr_list(&adapter->hw,
   1109 	    update_ptr, mcnt, ixv_mc_array_itr, TRUE);
   1110 
   1111 	return;
   1112 }
   1113 
   1114 /*
   1115  * This is an iterator function now needed by the multicast
   1116  * shared code. It simply feeds the shared code routine the
   1117  * addresses in the array of ixv_set_multi() one by one.
   1118  */
   1119 static u8 *
   1120 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
   1121 {
   1122 	u8 *addr = *update_ptr;
   1123 	u8 *newptr;
   1124 	*vmdq = 0;
   1125 
   1126 	newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
   1127 	*update_ptr = newptr;
   1128 	return addr;
   1129 }
   1130 
   1131 /*********************************************************************
   1132  *  Timer routine
   1133  *
   1134  *  This routine checks for link status,updates statistics,
   1135  *  and runs the watchdog check.
   1136  *
   1137  **********************************************************************/
   1138 
   1139 static void
   1140 ixv_local_timer(void *arg)
   1141 {
   1142 	struct adapter *adapter = arg;
   1143 
   1144 	IXGBE_CORE_LOCK(adapter);
   1145 	ixv_local_timer_locked(adapter);
   1146 	IXGBE_CORE_UNLOCK(adapter);
   1147 }
   1148 
   1149 static void
   1150 ixv_local_timer_locked(void *arg)
   1151 {
   1152 	struct adapter	*adapter = arg;
   1153 	device_t	dev = adapter->dev;
   1154 	struct ix_queue	*que = adapter->queues;
   1155 	u64		queues = 0;
   1156 	int		hung = 0;
   1157 
   1158 	KASSERT(mutex_owned(&adapter->core_mtx));
   1159 
   1160 	ixv_update_link_status(adapter);
   1161 
   1162 	/* Stats Update */
   1163 	ixv_update_stats(adapter);
   1164 
   1165 	/*
   1166 	** Check the TX queues status
   1167 	**      - mark hung queues so we don't schedule on them
   1168 	**      - watchdog only if all queues show hung
   1169 	*/
   1170 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   1171 		/* Keep track of queues with work for soft irq */
   1172 		if (que->txr->busy)
   1173 			queues |= ((u64)1 << que->me);
   1174 		/*
   1175 		** Each time txeof runs without cleaning, but there
   1176 		** are uncleaned descriptors it increments busy. If
   1177 		** we get to the MAX we declare it hung.
   1178 		*/
   1179 		if (que->busy == IXGBE_QUEUE_HUNG) {
   1180 			++hung;
   1181 			/* Mark the queue as inactive */
   1182 			adapter->active_queues &= ~((u64)1 << que->me);
   1183 			continue;
   1184 		} else {
   1185 			/* Check if we've come back from hung */
   1186 			if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
   1187                                 adapter->active_queues |= ((u64)1 << que->me);
   1188 		}
   1189 		if (que->busy >= IXGBE_MAX_TX_BUSY) {
   1190 			device_printf(dev,"Warning queue %d "
   1191 			    "appears to be hung!\n", i);
   1192 			que->txr->busy = IXGBE_QUEUE_HUNG;
   1193 			++hung;
   1194 		}
   1195 
   1196 	}
   1197 
   1198 	/* Only truly watchdog if all queues show hung */
   1199 	if (hung == adapter->num_queues)
   1200 		goto watchdog;
   1201 	else if (queues != 0) { /* Force an IRQ on queues with work */
   1202 		ixv_rearm_queues(adapter, queues);
   1203 	}
   1204 
   1205 	callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
   1206 	return;
   1207 
   1208 watchdog:
   1209 	device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
   1210 	adapter->ifp->if_flags &= ~IFF_RUNNING;
   1211 	adapter->watchdog_events.ev_count++;
   1212 	ixv_init_locked(adapter);
   1213 }
   1214 
   1215 /*
   1216 ** Note: this routine updates the OS on the link state
   1217 **	the real check of the hardware only happens with
   1218 **	a link interrupt.
   1219 */
   1220 static void
   1221 ixv_update_link_status(struct adapter *adapter)
   1222 {
   1223 	struct ifnet	*ifp = adapter->ifp;
   1224 	device_t dev = adapter->dev;
   1225 
   1226 	if (adapter->link_up){
   1227 		if (adapter->link_active == FALSE) {
   1228 			if (bootverbose)
   1229 				device_printf(dev,"Link is up %d Gbps %s \n",
   1230 				    ((adapter->link_speed == 128)? 10:1),
   1231 				    "Full Duplex");
   1232 			adapter->link_active = TRUE;
   1233 			if_link_state_change(ifp, LINK_STATE_UP);
   1234 		}
   1235 	} else { /* Link down */
   1236 		if (adapter->link_active == TRUE) {
   1237 			if (bootverbose)
   1238 				device_printf(dev,"Link is Down\n");
   1239 			if_link_state_change(ifp, LINK_STATE_DOWN);
   1240 			adapter->link_active = FALSE;
   1241 		}
   1242 	}
   1243 
   1244 	return;
   1245 }
   1246 
   1247 
   1248 static void
   1249 ixv_ifstop(struct ifnet *ifp, int disable)
   1250 {
   1251 	struct adapter *adapter = ifp->if_softc;
   1252 
   1253 	IXGBE_CORE_LOCK(adapter);
   1254 	ixv_stop(adapter);
   1255 	IXGBE_CORE_UNLOCK(adapter);
   1256 }
   1257 
   1258 /*********************************************************************
   1259  *
   1260  *  This routine disables all traffic on the adapter by issuing a
   1261  *  global reset on the MAC and deallocates TX/RX buffers.
   1262  *
   1263  **********************************************************************/
   1264 
   1265 static void
   1266 ixv_stop(void *arg)
   1267 {
   1268 	struct ifnet   *ifp;
   1269 	struct adapter *adapter = arg;
   1270 	struct ixgbe_hw *hw = &adapter->hw;
   1271 	ifp = adapter->ifp;
   1272 
   1273 	KASSERT(mutex_owned(&adapter->core_mtx));
   1274 
   1275 	INIT_DEBUGOUT("ixv_stop: begin\n");
   1276 	ixv_disable_intr(adapter);
   1277 
   1278 	/* Tell the stack that the interface is no longer active */
   1279 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   1280 
   1281 	ixgbe_reset_hw(hw);
   1282 	adapter->hw.adapter_stopped = FALSE;
   1283 	ixgbe_stop_adapter(hw);
   1284 	callout_stop(&adapter->timer);
   1285 
   1286 	/* reprogram the RAR[0] in case user changed it. */
   1287 	ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
   1288 
   1289 	return;
   1290 }
   1291 
   1292 
   1293 /*********************************************************************
   1294  *
   1295  *  Determine hardware revision.
   1296  *
   1297  **********************************************************************/
   1298 static void
   1299 ixv_identify_hardware(struct adapter *adapter)
   1300 {
   1301 	pcitag_t tag;
   1302 	pci_chipset_tag_t pc;
   1303 	pcireg_t subid, id;
   1304 	struct ixgbe_hw *hw = &adapter->hw;
   1305 
   1306 	pc = adapter->osdep.pc;
   1307 	tag = adapter->osdep.tag;
   1308 
   1309 	/*
   1310 	** Make sure BUSMASTER is set, on a VM under
   1311 	** KVM it may not be and will break things.
   1312 	*/
   1313 	ixgbe_pci_enable_busmaster(pc, tag);
   1314 
   1315 	id = pci_conf_read(pc, tag, PCI_ID_REG);
   1316 	subid = pci_conf_read(pc, tag, PCI_SUBSYS_ID_REG);
   1317 
   1318 	/* Save off the information about this board */
   1319 	hw->vendor_id = PCI_VENDOR(id);
   1320 	hw->device_id = PCI_PRODUCT(id);
   1321 	hw->revision_id = PCI_REVISION(pci_conf_read(pc, tag, PCI_CLASS_REG));
   1322 	hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
   1323 	hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
   1324 
   1325 	/* We need this to determine device-specific things */
   1326 	ixgbe_set_mac_type(hw);
   1327 
   1328 	/* Set the right number of segments */
   1329 	adapter->num_segs = IXGBE_82599_SCATTER;
   1330 
   1331 	return;
   1332 }
   1333 
   1334 /*********************************************************************
   1335  *
   1336  *  Setup MSIX Interrupt resources and handlers
   1337  *
   1338  **********************************************************************/
   1339 static int
   1340 ixv_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
   1341 {
   1342 	device_t	dev = adapter->dev;
   1343 	struct ix_queue *que = adapter->queues;
   1344 	struct		tx_ring *txr = adapter->tx_rings;
   1345 	int 		error, rid, vector = 0;
   1346 	pci_chipset_tag_t pc;
   1347 	pcitag_t	tag;
   1348 	char		intrbuf[PCI_INTRSTR_LEN];
   1349 	char		intr_xname[32];
   1350 	const char	*intrstr = NULL;
   1351 	kcpuset_t	*affinity;
   1352 	int		cpu_id = 0;
   1353 
   1354 	pc = adapter->osdep.pc;
   1355 	tag = adapter->osdep.tag;
   1356 
   1357 	adapter->osdep.nintrs = adapter->num_queues + 1;
   1358 	if (pci_msix_alloc_exact(pa,
   1359 	    &adapter->osdep.intrs, adapter->osdep.nintrs) != 0)
   1360 		return (ENXIO);
   1361 
   1362 	kcpuset_create(&affinity, false);
   1363 	for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
   1364 		snprintf(intr_xname, sizeof(intr_xname), "%s TX/RX",
   1365 		    device_xname(dev));
   1366 		intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
   1367 		    sizeof(intrbuf));
   1368 #ifdef IXV_MPSAFE
   1369 		pci_intr_setattr(pc, adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
   1370 		    true);
   1371 #endif
   1372 		/* Set the handler function */
   1373 		que->res = adapter->osdep.ihs[i] = pci_intr_establish_xname(pc,
   1374 		    adapter->osdep.intrs[i], IPL_NET, ixv_msix_que, que,
   1375 			intr_xname);
   1376 		if (que->res == NULL) {
   1377 			pci_intr_release(pc, adapter->osdep.intrs,
   1378 			    adapter->osdep.nintrs);
   1379 			aprint_error_dev(dev,
   1380 			    "Failed to register QUE handler");
   1381 			kcpuset_destroy(affinity);
   1382 			return (ENXIO);
   1383 		}
   1384 		que->msix = vector;
   1385         	adapter->active_queues |= (u64)(1 << que->msix);
   1386 
   1387 		cpu_id = i;
   1388 		/* Round-robin affinity */
   1389 		kcpuset_zero(affinity);
   1390 		kcpuset_set(affinity, cpu_id % ncpu);
   1391 		error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
   1392 		    NULL);
   1393 		aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
   1394 		    intrstr);
   1395 		if (error == 0)
   1396 			aprint_normal(", bound queue %d to cpu %d\n",
   1397 			    i, cpu_id);
   1398 		else
   1399 			aprint_normal("\n");
   1400 
   1401 #ifndef IXGBE_LEGACY_TX
   1402 		txr->txr_si = softint_establish(SOFTINT_NET,
   1403 		    ixgbe_deferred_mq_start, txr);
   1404 #endif
   1405 		que->que_si = softint_establish(SOFTINT_NET, ixv_handle_que,
   1406 		    que);
   1407 		if (que->que_si == NULL) {
   1408 			aprint_error_dev(dev,
   1409 			    "could not establish software interrupt\n");
   1410 		}
   1411 	}
   1412 
   1413 	/* and Mailbox */
   1414 	cpu_id++;
   1415 	snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
   1416 	intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
   1417 	    sizeof(intrbuf));
   1418 #ifdef IXG_MPSAFE
   1419 	pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE, true);
   1420 #endif
   1421 	/* Set the mbx handler function */
   1422 	adapter->osdep.ihs[vector] = pci_intr_establish_xname(pc,
   1423 	    adapter->osdep.intrs[vector], IPL_NET, ixv_msix_mbx, adapter,
   1424 		intr_xname);
   1425 	if (adapter->osdep.ihs[vector] == NULL) {
   1426 		adapter->res = NULL;
   1427 		aprint_error_dev(dev, "Failed to register LINK handler\n");
   1428 		kcpuset_destroy(affinity);
   1429 		return (ENXIO);
   1430 	}
   1431 	/* Round-robin affinity */
   1432 	kcpuset_zero(affinity);
   1433 	kcpuset_set(affinity, cpu_id % ncpu);
   1434 	error = interrupt_distribute(adapter->osdep.ihs[vector], affinity,NULL);
   1435 
   1436 	aprint_normal_dev(dev,
   1437 	    "for link, interrupting at %s, ", intrstr);
   1438 	if (error == 0) {
   1439 		aprint_normal("affinity to cpu %d\n", cpu_id);
   1440 	}
   1441 	adapter->vector = vector;
   1442 	/* Tasklets for Mailbox */
   1443 	adapter->link_si = softint_establish(SOFTINT_NET, ixv_handle_mbx,
   1444 	    adapter);
   1445 	/*
   1446 	** Due to a broken design QEMU will fail to properly
   1447 	** enable the guest for MSIX unless the vectors in
   1448 	** the table are all set up, so we must rewrite the
   1449 	** ENABLE in the MSIX control register again at this
   1450 	** point to cause it to successfully initialize us.
   1451 	*/
   1452 	if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
   1453 		int msix_ctrl;
   1454 		pci_get_capability(pc, tag, PCI_CAP_MSIX, &rid, NULL);
   1455 		rid += PCI_MSIX_CTL;
   1456 		msix_ctrl = pci_conf_read(pc, tag, rid);
   1457 		msix_ctrl |= PCI_MSIX_CTL_ENABLE;
   1458 		pci_conf_write(pc, tag, rid, msix_ctrl);
   1459 	}
   1460 
   1461 	kcpuset_destroy(affinity);
   1462 	return (0);
   1463 }
   1464 
   1465 /*
   1466  * Setup MSIX resources, note that the VF
   1467  * device MUST use MSIX, there is no fallback.
   1468  */
   1469 static int
   1470 ixv_setup_msix(struct adapter *adapter)
   1471 {
   1472 	device_t dev = adapter->dev;
   1473 	int want, queues, msgs;
   1474 
   1475 	/* Must have at least 2 MSIX vectors */
   1476 	msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag);
   1477 	if (msgs < 2) {
   1478 		aprint_error_dev(dev,"MSIX config error\n");
   1479 		return (ENXIO);
   1480 	}
   1481 	msgs = MIN(msgs, IXG_MAX_NINTR);
   1482 
   1483 	/* Figure out a reasonable auto config value */
   1484 	queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu;
   1485 
   1486 	if (ixv_num_queues != 0)
   1487 		queues = ixv_num_queues;
   1488 	else if ((ixv_num_queues == 0) && (queues > IXGBE_VF_MAX_TX_QUEUES))
   1489 		queues = IXGBE_VF_MAX_TX_QUEUES;
   1490 
   1491 	/*
   1492 	** Want vectors for the queues,
   1493 	** plus an additional for mailbox.
   1494 	*/
   1495 	want = queues + 1;
   1496 	if (msgs >= want) {
   1497 		msgs = want;
   1498 	} else {
   1499                	aprint_error_dev(dev,
   1500 		    "MSIX Configuration Problem, "
   1501 		    "%d vectors but %d queues wanted!\n",
   1502 		    msgs, want);
   1503 		return -1;
   1504 	}
   1505 
   1506 	adapter->msix_mem = (void *)1; /* XXX */
   1507 	aprint_normal_dev(dev,
   1508 	    "Using MSIX interrupts with %d vectors\n", msgs);
   1509 	adapter->num_queues = queues;
   1510 	return (msgs);
   1511 }
   1512 
   1513 
   1514 static int
   1515 ixv_allocate_pci_resources(struct adapter *adapter,
   1516     const struct pci_attach_args *pa)
   1517 {
   1518 	pcireg_t	memtype;
   1519 	device_t        dev = adapter->dev;
   1520 	bus_addr_t addr;
   1521 	int flags;
   1522 
   1523 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
   1524 
   1525 	switch (memtype) {
   1526 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1527 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1528 		adapter->osdep.mem_bus_space_tag = pa->pa_memt;
   1529 		if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
   1530 	              memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
   1531 			goto map_err;
   1532 		if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
   1533 			aprint_normal_dev(dev, "clearing prefetchable bit\n");
   1534 			flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
   1535 		}
   1536 		if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
   1537 		     adapter->osdep.mem_size, flags,
   1538 		     &adapter->osdep.mem_bus_space_handle) != 0) {
   1539 map_err:
   1540 			adapter->osdep.mem_size = 0;
   1541 			aprint_error_dev(dev, "unable to map BAR0\n");
   1542 			return ENXIO;
   1543 		}
   1544 		break;
   1545 	default:
   1546 		aprint_error_dev(dev, "unexpected type on BAR0\n");
   1547 		return ENXIO;
   1548 	}
   1549 
   1550 	/* Pick up the tuneable queues */
   1551 	adapter->num_queues = ixv_num_queues;
   1552 	adapter->hw.back = adapter;
   1553 
   1554 	/*
   1555 	** Now setup MSI/X, should
   1556 	** return us the number of
   1557 	** configured vectors.
   1558 	*/
   1559 	adapter->msix = ixv_setup_msix(adapter);
   1560 	if (adapter->msix == ENXIO)
   1561 		return (ENXIO);
   1562 	else
   1563 		return (0);
   1564 }
   1565 
   1566 static void
   1567 ixv_free_pci_resources(struct adapter * adapter)
   1568 {
   1569 	struct 		ix_queue *que = adapter->queues;
   1570 	int		rid;
   1571 
   1572 	/*
   1573 	**  Release all msix queue resources:
   1574 	*/
   1575 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   1576 		rid = que->msix + 1;
   1577 		if (que->res != NULL)
   1578 			pci_intr_disestablish(adapter->osdep.pc,
   1579 			    adapter->osdep.ihs[i]);
   1580 	}
   1581 
   1582 
   1583 	/* Clean the Legacy or Link interrupt last */
   1584 	if (adapter->vector) /* we are doing MSIX */
   1585 		rid = adapter->vector + 1;
   1586 	else
   1587 		(adapter->msix != 0) ? (rid = 1):(rid = 0);
   1588 
   1589 	if (adapter->osdep.ihs[rid] != NULL)
   1590 		pci_intr_disestablish(adapter->osdep.pc,
   1591 		    adapter->osdep.ihs[rid]);
   1592 	adapter->osdep.ihs[rid] = NULL;
   1593 
   1594 #if defined(NETBSD_MSI_OR_MSIX)
   1595 	pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
   1596 	    adapter->osdep.nintrs);
   1597 #endif
   1598 
   1599 	if (adapter->osdep.mem_size != 0) {
   1600 		bus_space_unmap(adapter->osdep.mem_bus_space_tag,
   1601 		    adapter->osdep.mem_bus_space_handle,
   1602 		    adapter->osdep.mem_size);
   1603 	}
   1604 
   1605 	return;
   1606 }
   1607 
   1608 /*********************************************************************
   1609  *
   1610  *  Setup networking device structure and register an interface.
   1611  *
   1612  **********************************************************************/
   1613 static void
   1614 ixv_setup_interface(device_t dev, struct adapter *adapter)
   1615 {
   1616 	struct ethercom *ec = &adapter->osdep.ec;
   1617 	struct ifnet   *ifp;
   1618 
   1619 	INIT_DEBUGOUT("ixv_setup_interface: begin");
   1620 
   1621 	ifp = adapter->ifp = &ec->ec_if;
   1622 	strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
   1623 	ifp->if_baudrate = 1000000000;
   1624 	ifp->if_init = ixv_init;
   1625 	ifp->if_stop = ixv_ifstop;
   1626 	ifp->if_softc = adapter;
   1627 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   1628 	ifp->if_ioctl = ixv_ioctl;
   1629 #ifndef IXGBE_LEGACY_TX
   1630 	ifp->if_transmit = ixgbe_mq_start;
   1631 #endif
   1632 	ifp->if_start = ixgbe_start;
   1633 	ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 2;
   1634 
   1635 	if_initialize(ifp);
   1636 	ether_ifattach(ifp, adapter->hw.mac.addr);
   1637 #ifndef IXGBE_LEGACY_TX
   1638 #if 0	/* We use per TX queue softint */
   1639 	if_deferred_start_init(ifp, ixgbe_deferred_mq_start);
   1640 #endif
   1641 #endif
   1642 	if_register(ifp);
   1643 	ether_set_ifflags_cb(ec, ixv_ifflags_cb);
   1644 
   1645 	adapter->max_frame_size =
   1646 	    ifp->if_mtu + IXGBE_MTU_HDR_VLAN;
   1647 
   1648 	/*
   1649 	 * Tell the upper layer(s) we support long frames.
   1650 	 */
   1651 	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
   1652 
   1653 	ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSOv4;
   1654 	ifp->if_capenable = 0;
   1655 
   1656 	ec->ec_capabilities |= ETHERCAP_VLAN_HWCSUM;
   1657 	ec->ec_capabilities |= ETHERCAP_JUMBO_MTU;
   1658 	ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
   1659 	    		| ETHERCAP_VLAN_MTU;
   1660 	ec->ec_capenable = ec->ec_capabilities;
   1661 
   1662 	/* Don't enable LRO by default */
   1663 	ifp->if_capabilities |= IFCAP_LRO;
   1664 #if 0
   1665 	ifp->if_capenable = ifp->if_capabilities;
   1666 #endif
   1667 
   1668 	/*
   1669 	** Dont turn this on by default, if vlans are
   1670 	** created on another pseudo device (eg. lagg)
   1671 	** then vlan events are not passed thru, breaking
   1672 	** operation, but with HW FILTER off it works. If
   1673 	** using vlans directly on the em driver you can
   1674 	** enable this and get full hardware tag filtering.
   1675 	*/
   1676 	ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
   1677 
   1678 	/*
   1679 	 * Specify the media types supported by this adapter and register
   1680 	 * callbacks to update media and link information
   1681 	 */
   1682 	ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change,
   1683 		     ixv_media_status);
   1684 	ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
   1685 	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
   1686 
   1687 	return;
   1688 }
   1689 
   1690 static void
   1691 ixv_config_link(struct adapter *adapter)
   1692 {
   1693 	struct ixgbe_hw *hw = &adapter->hw;
   1694 
   1695 	if (hw->mac.ops.check_link)
   1696 		hw->mac.ops.check_link(hw, &adapter->link_speed,
   1697 		    &adapter->link_up, FALSE);
   1698 }
   1699 
   1700 
   1701 /*********************************************************************
   1702  *
   1703  *  Enable transmit unit.
   1704  *
   1705  **********************************************************************/
   1706 static void
   1707 ixv_initialize_transmit_units(struct adapter *adapter)
   1708 {
   1709 	struct tx_ring	*txr = adapter->tx_rings;
   1710 	struct ixgbe_hw	*hw = &adapter->hw;
   1711 
   1712 
   1713 	for (int i = 0; i < adapter->num_queues; i++, txr++) {
   1714 		u64	tdba = txr->txdma.dma_paddr;
   1715 		u32	txctrl, txdctl;
   1716 
   1717 		/* Set WTHRESH to 8, burst writeback */
   1718 		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
   1719 		txdctl |= (8 << 16);
   1720 		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
   1721 
   1722 		/* Set the HW Tx Head and Tail indices */
   1723 	    	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(i), 0);
   1724 	    	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(i), 0);
   1725 
   1726 		/* Set Tx Tail register */
   1727 		txr->tail = IXGBE_VFTDT(i);
   1728 
   1729 		/* Set Ring parameters */
   1730 		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
   1731 		       (tdba & 0x00000000ffffffffULL));
   1732 		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i), (tdba >> 32));
   1733 		IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
   1734 		    adapter->num_tx_desc *
   1735 		    sizeof(struct ixgbe_legacy_tx_desc));
   1736 		txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(i));
   1737 		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
   1738 		IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), txctrl);
   1739 
   1740 		/* Now enable */
   1741 		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
   1742 		txdctl |= IXGBE_TXDCTL_ENABLE;
   1743 		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
   1744 	}
   1745 
   1746 	return;
   1747 }
   1748 
   1749 
   1750 /*********************************************************************
   1751  *
   1752  *  Setup receive registers and features.
   1753  *
   1754  **********************************************************************/
   1755 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
   1756 
   1757 static void
   1758 ixv_initialize_receive_units(struct adapter *adapter)
   1759 {
   1760 	struct	rx_ring	*rxr = adapter->rx_rings;
   1761 	struct ixgbe_hw	*hw = &adapter->hw;
   1762 	struct ifnet	*ifp = adapter->ifp;
   1763 	u32		bufsz, rxcsum, psrtype;
   1764 
   1765 	if (ifp->if_mtu > ETHERMTU)
   1766 		bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
   1767 	else
   1768 		bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
   1769 
   1770 	psrtype = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
   1771 	    IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR |
   1772 	    IXGBE_PSRTYPE_L2HDR;
   1773 
   1774 	IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
   1775 
   1776 	/* Tell PF our max_frame size */
   1777 	ixgbevf_rlpml_set_vf(hw, adapter->max_frame_size);
   1778 
   1779 	for (int i = 0; i < adapter->num_queues; i++, rxr++) {
   1780 		u64 rdba = rxr->rxdma.dma_paddr;
   1781 		u32 reg, rxdctl;
   1782 
   1783 		/* Disable the queue */
   1784 		rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
   1785 		rxdctl &= ~IXGBE_RXDCTL_ENABLE;
   1786 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
   1787 		for (int j = 0; j < 10; j++) {
   1788 			if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
   1789 			    IXGBE_RXDCTL_ENABLE)
   1790 				msec_delay(1);
   1791 			else
   1792 				break;
   1793 		}
   1794 		wmb();
   1795 		/* Setup the Base and Length of the Rx Descriptor Ring */
   1796 		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
   1797 		    (rdba & 0x00000000ffffffffULL));
   1798 		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i),
   1799 		    (rdba >> 32));
   1800 		IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
   1801 		    adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
   1802 
   1803 		/* Reset the ring indices */
   1804 		IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
   1805 		IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 0);
   1806 
   1807 		/* Set up the SRRCTL register */
   1808 		reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
   1809 		reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
   1810 		reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
   1811 		reg |= bufsz;
   1812 		reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
   1813 		IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), reg);
   1814 
   1815 		/* Capture Rx Tail index */
   1816 		rxr->tail = IXGBE_VFRDT(rxr->me);
   1817 
   1818 		/* Do the queue enabling last */
   1819 		rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
   1820 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
   1821 		for (int k = 0; k < 10; k++) {
   1822 			if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
   1823 			    IXGBE_RXDCTL_ENABLE)
   1824 				break;
   1825 			else
   1826 				msec_delay(1);
   1827 		}
   1828 		wmb();
   1829 
   1830 		/* Set the Tail Pointer */
   1831 #ifdef DEV_NETMAP
   1832 		/*
   1833 		 * In netmap mode, we must preserve the buffers made
   1834 		 * available to userspace before the if_init()
   1835 		 * (this is true by default on the TX side, because
   1836 		 * init makes all buffers available to userspace).
   1837 		 *
   1838 		 * netmap_reset() and the device specific routines
   1839 		 * (e.g. ixgbe_setup_receive_rings()) map these
   1840 		 * buffers at the end of the NIC ring, so here we
   1841 		 * must set the RDT (tail) register to make sure
   1842 		 * they are not overwritten.
   1843 		 *
   1844 		 * In this driver the NIC ring starts at RDH = 0,
   1845 		 * RDT points to the last slot available for reception (?),
   1846 		 * so RDT = num_rx_desc - 1 means the whole ring is available.
   1847 		 */
   1848 		if (ifp->if_capenable & IFCAP_NETMAP) {
   1849 			struct netmap_adapter *na = NA(adapter->ifp);
   1850 			struct netmap_kring *kring = &na->rx_rings[i];
   1851 			int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
   1852 
   1853 			IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), t);
   1854 		} else
   1855 #endif /* DEV_NETMAP */
   1856 			IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
   1857 			    adapter->num_rx_desc - 1);
   1858 	}
   1859 
   1860 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
   1861 
   1862 	if (ifp->if_capenable & IFCAP_RXCSUM)
   1863 		rxcsum |= IXGBE_RXCSUM_PCSD;
   1864 
   1865 	if (!(rxcsum & IXGBE_RXCSUM_PCSD))
   1866 		rxcsum |= IXGBE_RXCSUM_IPPCSE;
   1867 
   1868 	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
   1869 
   1870 	return;
   1871 }
   1872 
   1873 static void
   1874 ixv_setup_vlan_support(struct adapter *adapter)
   1875 {
   1876 	struct ixgbe_hw *hw = &adapter->hw;
   1877 	u32		ctrl, vid, vfta, retry;
   1878 	struct rx_ring	*rxr;
   1879 
   1880 	/*
   1881 	** We get here thru init_locked, meaning
   1882 	** a soft reset, this has already cleared
   1883 	** the VFTA and other state, so if there
   1884 	** have been no vlan's registered do nothing.
   1885 	*/
   1886 	if (!VLAN_ATTACHED(&adapter->osdep.ec))
   1887 		return;
   1888 
   1889 	/* Enable the queues */
   1890 	for (int i = 0; i < adapter->num_queues; i++) {
   1891 		ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
   1892 		ctrl |= IXGBE_RXDCTL_VME;
   1893 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl);
   1894 		/*
   1895 		 * Let Rx path know that it needs to store VLAN tag
   1896 		 * as part of extra mbuf info.
   1897 		 */
   1898 		rxr = &adapter->rx_rings[i];
   1899 		rxr->vtag_strip = TRUE;
   1900 	}
   1901 
   1902 	/*
   1903 	** A soft reset zero's out the VFTA, so
   1904 	** we need to repopulate it now.
   1905 	*/
   1906 	for (int i = 0; i < IXGBE_VFTA_SIZE; i++) {
   1907 		if (ixv_shadow_vfta[i] == 0)
   1908 			continue;
   1909 		vfta = ixv_shadow_vfta[i];
   1910 		/*
   1911 		** Reconstruct the vlan id's
   1912 		** based on the bits set in each
   1913 		** of the array ints.
   1914 		*/
   1915 		for (int j = 0; j < 32; j++) {
   1916 			retry = 0;
   1917 			if ((vfta & (1 << j)) == 0)
   1918 				continue;
   1919 			vid = (i * 32) + j;
   1920 			/* Call the shared code mailbox routine */
   1921 			while (ixgbe_set_vfta(hw, vid, 0, TRUE)) {
   1922 				if (++retry > 5)
   1923 					break;
   1924 			}
   1925 		}
   1926 	}
   1927 }
   1928 
   1929 #if 0	/* XXX Badly need to overhaul vlan(4) on NetBSD. */
   1930 /*
   1931 ** This routine is run via an vlan config EVENT,
   1932 ** it enables us to use the HW Filter table since
   1933 ** we can get the vlan id. This just creates the
   1934 ** entry in the soft version of the VFTA, init will
   1935 ** repopulate the real table.
   1936 */
   1937 static void
   1938 ixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   1939 {
   1940 	struct adapter	*adapter = ifp->if_softc;
   1941 	u16		index, bit;
   1942 
   1943 	if (ifp->if_softc != arg) /* Not our event */
   1944 		return;
   1945 
   1946 	if ((vtag == 0) || (vtag > 4095)) /* Invalid */
   1947 		return;
   1948 
   1949 	IXGBE_CORE_LOCK(adapter);
   1950 	index = (vtag >> 5) & 0x7F;
   1951 	bit = vtag & 0x1F;
   1952 	ixv_shadow_vfta[index] |= (1 << bit);
   1953 	/* Re-init to load the changes */
   1954 	ixv_init_locked(adapter);
   1955 	IXGBE_CORE_UNLOCK(adapter);
   1956 }
   1957 
   1958 /*
   1959 ** This routine is run via an vlan
   1960 ** unconfig EVENT, remove our entry
   1961 ** in the soft vfta.
   1962 */
   1963 static void
   1964 ixv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   1965 {
   1966 	struct adapter	*adapter = ifp->if_softc;
   1967 	u16		index, bit;
   1968 
   1969 	if (ifp->if_softc !=  arg)
   1970 		return;
   1971 
   1972 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
   1973 		return;
   1974 
   1975 	IXGBE_CORE_LOCK(adapter);
   1976 	index = (vtag >> 5) & 0x7F;
   1977 	bit = vtag & 0x1F;
   1978 	ixv_shadow_vfta[index] &= ~(1 << bit);
   1979 	/* Re-init to load the changes */
   1980 	ixv_init_locked(adapter);
   1981 	IXGBE_CORE_UNLOCK(adapter);
   1982 }
   1983 #endif
   1984 
   1985 static void
   1986 ixv_enable_intr(struct adapter *adapter)
   1987 {
   1988 	struct ixgbe_hw *hw = &adapter->hw;
   1989 	struct ix_queue *que = adapter->queues;
   1990 	u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
   1991 
   1992 
   1993 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
   1994 
   1995 	mask = IXGBE_EIMS_ENABLE_MASK;
   1996 	mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
   1997 	IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
   1998 
   1999         for (int i = 0; i < adapter->num_queues; i++, que++)
   2000 		ixv_enable_queue(adapter, que->msix);
   2001 
   2002 	IXGBE_WRITE_FLUSH(hw);
   2003 
   2004 	return;
   2005 }
   2006 
   2007 static void
   2008 ixv_disable_intr(struct adapter *adapter)
   2009 {
   2010 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
   2011 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, ~0);
   2012 	IXGBE_WRITE_FLUSH(&adapter->hw);
   2013 	return;
   2014 }
   2015 
   2016 /*
   2017 ** Setup the correct IVAR register for a particular MSIX interrupt
   2018 **  - entry is the register array entry
   2019 **  - vector is the MSIX vector for this queue
   2020 **  - type is RX/TX/MISC
   2021 */
   2022 static void
   2023 ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
   2024 {
   2025 	struct ixgbe_hw *hw = &adapter->hw;
   2026 	u32 ivar, index;
   2027 
   2028 	vector |= IXGBE_IVAR_ALLOC_VAL;
   2029 
   2030 	if (type == -1) { /* MISC IVAR */
   2031 		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
   2032 		ivar &= ~0xFF;
   2033 		ivar |= vector;
   2034 		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
   2035 	} else {	/* RX/TX IVARS */
   2036 		index = (16 * (entry & 1)) + (8 * type);
   2037 		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
   2038 		ivar &= ~(0xFF << index);
   2039 		ivar |= (vector << index);
   2040 		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
   2041 	}
   2042 }
   2043 
   2044 static void
   2045 ixv_configure_ivars(struct adapter *adapter)
   2046 {
   2047 	struct  ix_queue *que = adapter->queues;
   2048 
   2049         for (int i = 0; i < adapter->num_queues; i++, que++) {
   2050 		/* First the RX queue entry */
   2051                 ixv_set_ivar(adapter, i, que->msix, 0);
   2052 		/* ... and the TX */
   2053 		ixv_set_ivar(adapter, i, que->msix, 1);
   2054 		/* Set an initial value in EITR */
   2055                 IXGBE_WRITE_REG(&adapter->hw,
   2056                     IXGBE_VTEITR(que->msix), IXV_EITR_DEFAULT);
   2057 	}
   2058 
   2059 	/* For the mailbox interrupt */
   2060         ixv_set_ivar(adapter, 1, adapter->vector, -1);
   2061 }
   2062 
   2063 
   2064 /*
   2065 ** Tasklet handler for MSIX MBX interrupts
   2066 **  - do outside interrupt since it might sleep
   2067 */
   2068 static void
   2069 ixv_handle_mbx(void *context)
   2070 {
   2071 	struct adapter  *adapter = context;
   2072 
   2073 	ixgbe_check_link(&adapter->hw,
   2074 	    &adapter->link_speed, &adapter->link_up, 0);
   2075 	ixv_update_link_status(adapter);
   2076 }
   2077 
   2078 /*
   2079 ** The VF stats registers never have a truly virgin
   2080 ** starting point, so this routine tries to make an
   2081 ** artificial one, marking ground zero on attach as
   2082 ** it were.
   2083 */
   2084 static void
   2085 ixv_save_stats(struct adapter *adapter)
   2086 {
   2087 	struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
   2088 
   2089 	if (stats->vfgprc.ev_count || stats->vfgptc.ev_count) {
   2090 		stats->saved_reset_vfgprc +=
   2091 		    stats->vfgprc.ev_count - stats->base_vfgprc;
   2092 		stats->saved_reset_vfgptc +=
   2093 		    stats->vfgptc.ev_count - stats->base_vfgptc;
   2094 		stats->saved_reset_vfgorc +=
   2095 		    stats->vfgorc.ev_count - stats->base_vfgorc;
   2096 		stats->saved_reset_vfgotc +=
   2097 		    stats->vfgotc.ev_count - stats->base_vfgotc;
   2098 		stats->saved_reset_vfmprc +=
   2099 		    stats->vfmprc.ev_count - stats->base_vfmprc;
   2100 	}
   2101 }
   2102 
   2103 static void
   2104 ixv_init_stats(struct adapter *adapter)
   2105 {
   2106 	struct ixgbe_hw *hw = &adapter->hw;
   2107 
   2108 	adapter->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
   2109 	adapter->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
   2110 	adapter->stats.vf.last_vfgorc |=
   2111 	    (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
   2112 
   2113 	adapter->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
   2114 	adapter->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
   2115 	adapter->stats.vf.last_vfgotc |=
   2116 	    (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
   2117 
   2118 	adapter->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
   2119 
   2120 	adapter->stats.vf.base_vfgprc = adapter->stats.vf.last_vfgprc;
   2121 	adapter->stats.vf.base_vfgorc = adapter->stats.vf.last_vfgorc;
   2122 	adapter->stats.vf.base_vfgptc = adapter->stats.vf.last_vfgptc;
   2123 	adapter->stats.vf.base_vfgotc = adapter->stats.vf.last_vfgotc;
   2124 	adapter->stats.vf.base_vfmprc = adapter->stats.vf.last_vfmprc;
   2125 }
   2126 
   2127 #define UPDATE_STAT_32(reg, last, count)		\
   2128 {							\
   2129 	u32 current = IXGBE_READ_REG(hw, reg);		\
   2130 	if (current < last)				\
   2131 		count.ev_count += 0x100000000LL;	\
   2132 	last = current;					\
   2133 	count.ev_count &= 0xFFFFFFFF00000000LL;		\
   2134 	count.ev_count |= current;			\
   2135 }
   2136 
   2137 #define UPDATE_STAT_36(lsb, msb, last, count) 		\
   2138 {							\
   2139 	u64 cur_lsb = IXGBE_READ_REG(hw, lsb);		\
   2140 	u64 cur_msb = IXGBE_READ_REG(hw, msb);		\
   2141 	u64 current = ((cur_msb << 32) | cur_lsb);	\
   2142 	if (current < last)				\
   2143 		count.ev_count += 0x1000000000LL;	\
   2144 	last = current;					\
   2145 	count.ev_count &= 0xFFFFFFF000000000LL;		\
   2146 	count.ev_count |= current;			\
   2147 }
   2148 
   2149 /*
   2150 ** ixv_update_stats - Update the board statistics counters.
   2151 */
   2152 void
   2153 ixv_update_stats(struct adapter *adapter)
   2154 {
   2155         struct ixgbe_hw *hw = &adapter->hw;
   2156 
   2157         UPDATE_STAT_32(IXGBE_VFGPRC, adapter->stats.vf.last_vfgprc,
   2158 	    adapter->stats.vf.vfgprc);
   2159         UPDATE_STAT_32(IXGBE_VFGPTC, adapter->stats.vf.last_vfgptc,
   2160 	    adapter->stats.vf.vfgptc);
   2161         UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
   2162 	    adapter->stats.vf.last_vfgorc, adapter->stats.vf.vfgorc);
   2163         UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
   2164 	    adapter->stats.vf.last_vfgotc, adapter->stats.vf.vfgotc);
   2165         UPDATE_STAT_32(IXGBE_VFMPRC, adapter->stats.vf.last_vfmprc,
   2166 	    adapter->stats.vf.vfmprc);
   2167 }
   2168 
   2169 /*
   2170  * Add statistic sysctls for the VF.
   2171  */
   2172 static void
   2173 ixv_add_stats_sysctls(struct adapter *adapter)
   2174 {
   2175 	device_t dev = adapter->dev;
   2176 	struct ix_queue *que = &adapter->queues[0];
   2177 	struct tx_ring *txr = que->txr;
   2178 	struct rx_ring *rxr = que->rxr;
   2179 
   2180 	struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
   2181 
   2182 	const char *xname = device_xname(dev);
   2183 
   2184 	/* Driver Statistics */
   2185 	evcnt_attach_dynamic(&adapter->dropped_pkts, EVCNT_TYPE_MISC,
   2186 	    NULL, xname, "Driver dropped packets");
   2187 	evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
   2188 	    NULL, xname, "m_defrag() failed");
   2189 	evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
   2190 	    NULL, xname, "Watchdog timeouts");
   2191 
   2192 	evcnt_attach_dynamic(&stats->vfgprc, EVCNT_TYPE_MISC, NULL,
   2193 	    xname, "Good Packets Received");
   2194 	evcnt_attach_dynamic(&stats->vfgorc, EVCNT_TYPE_MISC, NULL,
   2195 	    xname, "Good Octets Received");
   2196 	evcnt_attach_dynamic(&stats->vfmprc, EVCNT_TYPE_MISC, NULL,
   2197 	    xname, "Multicast Packets Received");
   2198 	evcnt_attach_dynamic(&stats->vfgptc, EVCNT_TYPE_MISC, NULL,
   2199 	    xname, "Good Packets Transmitted");
   2200 	evcnt_attach_dynamic(&stats->vfgotc, EVCNT_TYPE_MISC, NULL,
   2201 	    xname, "Good Octets Transmitted");
   2202 	evcnt_attach_dynamic(&que->irqs, EVCNT_TYPE_INTR, NULL,
   2203 	    xname, "IRQs on queue");
   2204 	evcnt_attach_dynamic(&rxr->rx_irq, EVCNT_TYPE_INTR, NULL,
   2205 	    xname, "RX irqs on queue");
   2206 	evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC, NULL,
   2207 	    xname, "RX packets");
   2208 	evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC, NULL,
   2209 	    xname, "RX bytes");
   2210 	evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC, NULL,
   2211 	    xname, "Discarded RX packets");
   2212 	evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC, NULL,
   2213 	    xname, "TX Packets");
   2214 	evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC, NULL,
   2215 	    xname, "# of times not enough descriptors were available during TX");
   2216 	evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC, NULL,
   2217 	    xname, "TX TSO");
   2218 }
   2219 
   2220 static void
   2221 ixv_set_sysctl_value(struct adapter *adapter, const char *name,
   2222 	const char *description, int *limit, int value)
   2223 {
   2224 	device_t dev =  adapter->dev;
   2225 	struct sysctllog **log;
   2226 	const struct sysctlnode *rnode, *cnode;
   2227 
   2228 	log = &adapter->sysctllog;
   2229 	if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
   2230 		aprint_error_dev(dev, "could not create sysctl root\n");
   2231 		return;
   2232 	}
   2233 	if (sysctl_createv(log, 0, &rnode, &cnode,
   2234 	    CTLFLAG_READWRITE, CTLTYPE_INT,
   2235 	    name, SYSCTL_DESCR(description),
   2236 	    NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0)
   2237 		aprint_error_dev(dev, "could not create sysctl\n");
   2238 	*limit = value;
   2239 }
   2240 
   2241 /**********************************************************************
   2242  *
   2243  *  This routine is called only when em_display_debug_stats is enabled.
   2244  *  This routine provides a way to take a look at important statistics
   2245  *  maintained by the driver and hardware.
   2246  *
   2247  **********************************************************************/
   2248 static void
   2249 ixv_print_debug_info(struct adapter *adapter)
   2250 {
   2251         device_t dev = adapter->dev;
   2252         struct ixgbe_hw         *hw = &adapter->hw;
   2253         struct ix_queue         *que = adapter->queues;
   2254         struct rx_ring          *rxr;
   2255         struct tx_ring          *txr;
   2256 #ifdef LRO
   2257         struct lro_ctrl         *lro;
   2258 #endif /* LRO */
   2259 
   2260         device_printf(dev,"Error Byte Count = %u \n",
   2261             IXGBE_READ_REG(hw, IXGBE_ERRBC));
   2262 
   2263         for (int i = 0; i < adapter->num_queues; i++, que++) {
   2264                 txr = que->txr;
   2265                 rxr = que->rxr;
   2266 #ifdef LRO
   2267                 lro = &rxr->lro;
   2268 #endif /* LRO */
   2269                 device_printf(dev,"QUE(%d) IRQs Handled: %lu\n",
   2270                     que->msix, (long)que->irqs.ev_count);
   2271                 device_printf(dev,"RX(%d) Packets Received: %lld\n",
   2272                     rxr->me, (long long)rxr->rx_packets.ev_count);
   2273                 device_printf(dev,"RX(%d) Bytes Received: %lu\n",
   2274                     rxr->me, (long)rxr->rx_bytes.ev_count);
   2275 #ifdef LRO
   2276                 device_printf(dev,"RX(%d) LRO Queued= %lld\n",
   2277                     rxr->me, (long long)lro->lro_queued);
   2278                 device_printf(dev,"RX(%d) LRO Flushed= %lld\n",
   2279                     rxr->me, (long long)lro->lro_flushed);
   2280 #endif /* LRO */
   2281                 device_printf(dev,"TX(%d) Packets Sent: %lu\n",
   2282                     txr->me, (long)txr->total_packets.ev_count);
   2283                 device_printf(dev,"TX(%d) NO Desc Avail: %lu\n",
   2284                     txr->me, (long)txr->no_desc_avail.ev_count);
   2285         }
   2286 
   2287         device_printf(dev,"MBX IRQ Handled: %lu\n",
   2288             (long)adapter->link_irq.ev_count);
   2289         return;
   2290 }
   2291 
   2292 static int
   2293 ixv_sysctl_debug(SYSCTLFN_ARGS)
   2294 {
   2295 	struct sysctlnode node;
   2296 	int error, result;
   2297 	struct adapter *adapter;
   2298 
   2299 	node = *rnode;
   2300 	adapter = (struct adapter *)node.sysctl_data;
   2301 	node.sysctl_data = &result;
   2302 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   2303 
   2304 	if (error)
   2305 		return error;
   2306 
   2307 	if (result == 1)
   2308 		ixv_print_debug_info(adapter);
   2309 
   2310 	return 0;
   2311 }
   2312 
   2313 const struct sysctlnode *
   2314 ixv_sysctl_instance(struct adapter *adapter)
   2315 {
   2316 	const char *dvname;
   2317 	struct sysctllog **log;
   2318 	int rc;
   2319 	const struct sysctlnode *rnode;
   2320 
   2321 	log = &adapter->sysctllog;
   2322 	dvname = device_xname(adapter->dev);
   2323 
   2324 	if ((rc = sysctl_createv(log, 0, NULL, &rnode,
   2325 	    0, CTLTYPE_NODE, dvname,
   2326 	    SYSCTL_DESCR("ixv information and settings"),
   2327 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
   2328 		goto err;
   2329 
   2330 	return rnode;
   2331 err:
   2332 	printf("%s: sysctl_createv failed, rc = %d\n", __func__, rc);
   2333 	return NULL;
   2334 }
   2335