Home | History | Annotate | Line # | Download | only in ixgbe
ixv.c revision 1.40
      1 /******************************************************************************
      2 
      3   Copyright (c) 2001-2015, Intel Corporation
      4   All rights reserved.
      5 
      6   Redistribution and use in source and binary forms, with or without
      7   modification, are permitted provided that the following conditions are met:
      8 
      9    1. Redistributions of source code must retain the above copyright notice,
     10       this list of conditions and the following disclaimer.
     11 
     12    2. Redistributions in binary form must reproduce the above copyright
     13       notice, this list of conditions and the following disclaimer in the
     14       documentation and/or other materials provided with the distribution.
     15 
     16    3. Neither the name of the Intel Corporation nor the names of its
     17       contributors may be used to endorse or promote products derived from
     18       this software without specific prior written permission.
     19 
     20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     30   POSSIBILITY OF SUCH DAMAGE.
     31 
     32 ******************************************************************************/
     33 /*$FreeBSD: head/sys/dev/ixgbe/if_ixv.c 302384 2016-07-07 03:39:18Z sbruno $*/
     34 /*$NetBSD: ixv.c,v 1.40 2017/02/08 04:05:13 msaitoh Exp $*/
     35 
     36 #include "opt_inet.h"
     37 #include "opt_inet6.h"
     38 
     39 #include "ixgbe.h"
     40 #include "vlan.h"
     41 
     42 /*********************************************************************
     43  *  Driver version
     44  *********************************************************************/
     45 char ixv_driver_version[] = "1.4.6-k";
     46 
     47 /*********************************************************************
     48  *  PCI Device ID Table
     49  *
     50  *  Used by probe to select devices to load on
     51  *  Last field stores an index into ixv_strings
     52  *  Last entry must be all 0s
     53  *
     54  *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
     55  *********************************************************************/
     56 
     57 static ixgbe_vendor_info_t ixv_vendor_info_array[] =
     58 {
     59 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
     60 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0},
     61 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, 0, 0, 0},
     62 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, 0, 0, 0},
     63 	/* required last entry */
     64 	{0, 0, 0, 0, 0}
     65 };
     66 
     67 /*********************************************************************
     68  *  Table of branding strings
     69  *********************************************************************/
     70 
     71 static const char    *ixv_strings[] = {
     72 	"Intel(R) PRO/10GbE Virtual Function Network Driver"
     73 };
     74 
     75 /*********************************************************************
     76  *  Function prototypes
     77  *********************************************************************/
     78 static int      ixv_probe(device_t, cfdata_t, void *);
     79 static void	ixv_attach(device_t, device_t, void *);
     80 static int      ixv_detach(device_t, int);
     81 #if 0
     82 static int      ixv_shutdown(device_t);
     83 #endif
     84 static int      ixv_ioctl(struct ifnet *, u_long, void *);
     85 static int	ixv_init(struct ifnet *);
     86 static void	ixv_init_locked(struct adapter *);
     87 static void     ixv_stop(void *);
     88 static void     ixv_media_status(struct ifnet *, struct ifmediareq *);
     89 static int      ixv_media_change(struct ifnet *);
     90 static void     ixv_identify_hardware(struct adapter *);
     91 static int      ixv_allocate_pci_resources(struct adapter *,
     92 		    const struct pci_attach_args *);
     93 static int      ixv_allocate_msix(struct adapter *,
     94 		    const struct pci_attach_args *);
     95 static int	ixv_setup_msix(struct adapter *);
     96 static void	ixv_free_pci_resources(struct adapter *);
     97 static void     ixv_local_timer(void *);
     98 static void     ixv_local_timer_locked(void *);
     99 static void     ixv_setup_interface(device_t, struct adapter *);
    100 static void     ixv_config_link(struct adapter *);
    101 
    102 static void     ixv_initialize_transmit_units(struct adapter *);
    103 static void     ixv_initialize_receive_units(struct adapter *);
    104 
    105 static void     ixv_enable_intr(struct adapter *);
    106 static void     ixv_disable_intr(struct adapter *);
    107 static void     ixv_set_multi(struct adapter *);
    108 static void     ixv_update_link_status(struct adapter *);
    109 static int	ixv_sysctl_debug(SYSCTLFN_PROTO);
    110 static void	ixv_set_ivar(struct adapter *, u8, u8, s8);
    111 static void	ixv_configure_ivars(struct adapter *);
    112 static u8 *	ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
    113 
    114 static void	ixv_setup_vlan_support(struct adapter *);
    115 #if 0
    116 static void	ixv_register_vlan(void *, struct ifnet *, u16);
    117 static void	ixv_unregister_vlan(void *, struct ifnet *, u16);
    118 #endif
    119 
    120 static void	ixv_save_stats(struct adapter *);
    121 static void	ixv_init_stats(struct adapter *);
    122 static void	ixv_update_stats(struct adapter *);
    123 static void	ixv_add_stats_sysctls(struct adapter *);
    124 static void	ixv_set_sysctl_value(struct adapter *, const char *,
    125 		    const char *, int *, int);
    126 
    127 /* The MSI/X Interrupt handlers */
    128 static int	ixv_msix_que(void *);
    129 static int	ixv_msix_mbx(void *);
    130 
    131 /* Deferred interrupt tasklets */
    132 static void	ixv_handle_que(void *);
    133 static void	ixv_handle_mbx(void *);
    134 
    135 const struct sysctlnode *ixv_sysctl_instance(struct adapter *);
    136 static ixgbe_vendor_info_t *ixv_lookup(const struct pci_attach_args *);
    137 
    138 #ifdef DEV_NETMAP
    139 /*
    140  * This is defined in <dev/netmap/ixgbe_netmap.h>, which is included by
    141  * if_ix.c.
    142  */
    143 extern void ixgbe_netmap_attach(struct adapter *adapter);
    144 
    145 #include <net/netmap.h>
    146 #include <sys/selinfo.h>
    147 #include <dev/netmap/netmap_kern.h>
    148 #endif /* DEV_NETMAP */
    149 
    150 /*********************************************************************
    151  *  FreeBSD Device Interface Entry Points
    152  *********************************************************************/
    153 
    154 CFATTACH_DECL3_NEW(ixv, sizeof(struct adapter),
    155     ixv_probe, ixv_attach, ixv_detach, NULL, NULL, NULL,
    156     DVF_DETACH_SHUTDOWN);
    157 
    158 # if 0
    159 static device_method_t ixv_methods[] = {
    160 	/* Device interface */
    161 	DEVMETHOD(device_probe, ixv_probe),
    162 	DEVMETHOD(device_attach, ixv_attach),
    163 	DEVMETHOD(device_detach, ixv_detach),
    164 	DEVMETHOD(device_shutdown, ixv_shutdown),
    165 	DEVMETHOD_END
    166 };
    167 #endif
    168 
    169 #if 0
    170 static driver_t ixv_driver = {
    171 	"ixv", ixv_methods, sizeof(struct adapter),
    172 };
    173 
    174 devclass_t ixv_devclass;
    175 DRIVER_MODULE(ixv, pci, ixv_driver, ixv_devclass, 0, 0);
    176 MODULE_DEPEND(ixv, pci, 1, 1, 1);
    177 MODULE_DEPEND(ixv, ether, 1, 1, 1);
    178 #ifdef DEV_NETMAP
    179 MODULE_DEPEND(ix, netmap, 1, 1, 1);
    180 #endif /* DEV_NETMAP */
    181 /* XXX depend on 'ix' ? */
    182 #endif
    183 
    184 /*
    185 ** TUNEABLE PARAMETERS:
    186 */
    187 
    188 /* Number of Queues - do not exceed MSIX vectors - 1 */
    189 static int ixv_num_queues = 1;
    190 #define	TUNABLE_INT(__x, __y)
    191 TUNABLE_INT("hw.ixv.num_queues", &ixv_num_queues);
    192 
    193 /*
    194 ** AIM: Adaptive Interrupt Moderation
    195 ** which means that the interrupt rate
    196 ** is varied over time based on the
    197 ** traffic for that interrupt vector
    198 */
    199 static int ixv_enable_aim = FALSE;
    200 TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
    201 
    202 /* How many packets rxeof tries to clean at a time */
    203 static int ixv_rx_process_limit = 256;
    204 TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit);
    205 
    206 /* How many packets txeof tries to clean at a time */
    207 static int ixv_tx_process_limit = 256;
    208 TUNABLE_INT("hw.ixv.tx_process_limit", &ixv_tx_process_limit);
    209 
    210 /*
    211 ** Number of TX descriptors per ring,
    212 ** setting higher than RX as this seems
    213 ** the better performing choice.
    214 */
    215 static int ixv_txd = DEFAULT_TXD;
    216 TUNABLE_INT("hw.ixv.txd", &ixv_txd);
    217 
    218 /* Number of RX descriptors per ring */
    219 static int ixv_rxd = DEFAULT_RXD;
    220 TUNABLE_INT("hw.ixv.rxd", &ixv_rxd);
    221 
    222 /*
    223 ** Shadow VFTA table, this is needed because
    224 ** the real filter table gets cleared during
    225 ** a soft reset and we need to repopulate it.
    226 */
    227 static u32 ixv_shadow_vfta[IXGBE_VFTA_SIZE];
    228 
    229 /*********************************************************************
    230  *  Device identification routine
    231  *
    232  *  ixv_probe determines if the driver should be loaded on
    233  *  adapter based on PCI vendor/device id of the adapter.
    234  *
    235  *  return 1 on success, 0 on failure
    236  *********************************************************************/
    237 
    238 static int
    239 ixv_probe(device_t dev, cfdata_t cf, void *aux)
    240 {
    241 #ifdef __HAVE_PCI_MSI_MSIX
    242 	const struct pci_attach_args *pa = aux;
    243 
    244 	return (ixv_lookup(pa) != NULL) ? 1 : 0;
    245 #else
    246 	return 0;
    247 #endif
    248 }
    249 
    250 static ixgbe_vendor_info_t *
    251 ixv_lookup(const struct pci_attach_args *pa)
    252 {
    253 	pcireg_t subid;
    254 	ixgbe_vendor_info_t *ent;
    255 
    256 	INIT_DEBUGOUT("ixv_lookup: begin");
    257 
    258 	if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
    259 		return NULL;
    260 
    261 	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
    262 
    263 	for (ent = ixv_vendor_info_array; ent->vendor_id != 0; ent++) {
    264 		if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
    265 		    (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
    266 
    267 		    ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
    268 		     (ent->subvendor_id == 0)) &&
    269 
    270 		    ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
    271 		     (ent->subdevice_id == 0))) {
    272 			return ent;
    273 		}
    274 	}
    275 	return NULL;
    276 }
    277 
    278 
    279 static void
    280 ixv_sysctl_attach(struct adapter *adapter)
    281 {
    282 	struct sysctllog **log;
    283 	const struct sysctlnode *rnode, *cnode;
    284 	device_t dev;
    285 
    286 	dev = adapter->dev;
    287 	log = &adapter->sysctllog;
    288 
    289 	if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
    290 		aprint_error_dev(dev, "could not create sysctl root\n");
    291 		return;
    292 	}
    293 
    294 	if (sysctl_createv(log, 0, &rnode, &cnode,
    295 	    CTLFLAG_READWRITE, CTLTYPE_INT,
    296 	    "debug", SYSCTL_DESCR("Debug Info"),
    297 	    ixv_sysctl_debug, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
    298 		aprint_error_dev(dev, "could not create sysctl\n");
    299 
    300 	/* XXX This is an *instance* sysctl controlling a *global* variable.
    301 	 * XXX It's that way in the FreeBSD driver that this derives from.
    302 	 */
    303 	if (sysctl_createv(log, 0, &rnode, &cnode,
    304 	    CTLFLAG_READWRITE, CTLTYPE_INT,
    305 	    "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
    306 	    NULL, 0, &ixv_enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
    307 		aprint_error_dev(dev, "could not create sysctl\n");
    308 }
    309 
    310 /*********************************************************************
    311  *  Device initialization routine
    312  *
    313  *  The attach entry point is called when the driver is being loaded.
    314  *  This routine identifies the type of hardware, allocates all resources
    315  *  and initializes the hardware.
    316  *
    317  *  return 0 on success, positive on failure
    318  *********************************************************************/
    319 
    320 static void
    321 ixv_attach(device_t parent, device_t dev, void *aux)
    322 {
    323 	struct adapter *adapter;
    324 	struct ixgbe_hw *hw;
    325 	int             error = 0;
    326 	ixgbe_vendor_info_t *ent;
    327 	const struct pci_attach_args *pa = aux;
    328 
    329 	INIT_DEBUGOUT("ixv_attach: begin");
    330 
    331 	/* Allocate, clear, and link in our adapter structure */
    332 	adapter = device_private(dev);
    333 	adapter->dev = dev;
    334 	hw = &adapter->hw;
    335 
    336 #ifdef DEV_NETMAP
    337 	adapter->init_locked = ixv_init_locked;
    338 	adapter->stop_locked = ixv_stop;
    339 #endif
    340 
    341 	adapter->osdep.pc = pa->pa_pc;
    342 	adapter->osdep.tag = pa->pa_tag;
    343 	adapter->osdep.dmat = pa->pa_dmat;
    344 	adapter->osdep.attached = false;
    345 
    346 	ent = ixv_lookup(pa);
    347 
    348 	KASSERT(ent != NULL);
    349 
    350 	aprint_normal(": %s, Version - %s\n",
    351 	    ixv_strings[ent->index], ixv_driver_version);
    352 
    353 	/* Core Lock Init*/
    354 	IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
    355 
    356 	/* SYSCTL APIs */
    357 	ixv_sysctl_attach(adapter);
    358 
    359 	/* Set up the timer callout */
    360 	callout_init(&adapter->timer, 0);
    361 
    362 	/* Determine hardware revision */
    363 	ixv_identify_hardware(adapter);
    364 
    365 	/* Do base PCI setup - map BAR0 */
    366 	if (ixv_allocate_pci_resources(adapter, pa)) {
    367 		aprint_error_dev(dev, "ixv_allocate_pci_resources() failed!\n");
    368 		error = ENXIO;
    369 		goto err_out;
    370 	}
    371 
    372 	/* Sysctls for limiting the amount of work done in the taskqueues */
    373 	ixv_set_sysctl_value(adapter, "rx_processing_limit",
    374 	    "max number of rx packets to process",
    375 	    &adapter->rx_process_limit, ixv_rx_process_limit);
    376 
    377 	ixv_set_sysctl_value(adapter, "tx_processing_limit",
    378 	    "max number of tx packets to process",
    379 	    &adapter->tx_process_limit, ixv_tx_process_limit);
    380 
    381 	/* Do descriptor calc and sanity checks */
    382 	if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
    383 	    ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) {
    384 		aprint_error_dev(dev, "TXD config issue, using default!\n");
    385 		adapter->num_tx_desc = DEFAULT_TXD;
    386 	} else
    387 		adapter->num_tx_desc = ixv_txd;
    388 
    389 	if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
    390 	    ixv_rxd < MIN_RXD || ixv_rxd > MAX_RXD) {
    391 		aprint_error_dev(dev, "RXD config issue, using default!\n");
    392 		adapter->num_rx_desc = DEFAULT_RXD;
    393 	} else
    394 		adapter->num_rx_desc = ixv_rxd;
    395 
    396 	/* Allocate our TX/RX Queues */
    397 	if (ixgbe_allocate_queues(adapter)) {
    398 		aprint_error_dev(dev, "ixgbe_allocate_queues() failed!\n");
    399 		error = ENOMEM;
    400 		goto err_out;
    401 	}
    402 
    403 	/*
    404 	** Initialize the shared code: its
    405 	** at this point the mac type is set.
    406 	*/
    407 	error = ixgbe_init_shared_code(hw);
    408 	if (error) {
    409 		aprint_error_dev(dev, "ixgbe_init_shared_code() failed!\n");
    410 		error = EIO;
    411 		goto err_late;
    412 	}
    413 
    414 	/* Setup the mailbox */
    415 	ixgbe_init_mbx_params_vf(hw);
    416 
    417 	/* Reset mbox api to 1.0 */
    418 	error = ixgbe_reset_hw(hw);
    419 	if (error == IXGBE_ERR_RESET_FAILED)
    420 		aprint_error_dev(dev, "ixgbe_reset_hw() failure: Reset Failed!\n");
    421 	else if (error)
    422 		aprint_error_dev(dev, "ixgbe_reset_hw() failed with error %d\n", error);
    423 	if (error) {
    424 		error = EIO;
    425 		goto err_late;
    426 	}
    427 
    428 	/* Negotiate mailbox API version */
    429 	error = ixgbevf_negotiate_api_version(hw, ixgbe_mbox_api_11);
    430 	if (error)
    431 		aprint_debug_dev(dev,
    432 		    "MBX API 1.1 negotiation failed! Error %d\n", error);
    433 
    434 	error = ixgbe_init_hw(hw);
    435 	if (error) {
    436 		aprint_error_dev(dev, "ixgbe_init_hw() failed!\n");
    437 		error = EIO;
    438 		goto err_late;
    439 	}
    440 
    441 	error = ixv_allocate_msix(adapter, pa);
    442 	if (error) {
    443 		device_printf(dev, "ixv_allocate_msix() failed!\n");
    444 		goto err_late;
    445 	}
    446 
    447 	/* If no mac address was assigned, make a random one */
    448 	if (!ixv_check_ether_addr(hw->mac.addr)) {
    449 		u8 addr[ETHER_ADDR_LEN];
    450 		uint64_t rndval = cprng_fast64();
    451 
    452 		memcpy(addr, &rndval, sizeof(addr));
    453 		addr[0] &= 0xFE;
    454 		addr[0] |= 0x02;
    455 		bcopy(addr, hw->mac.addr, sizeof(addr));
    456 	}
    457 
    458 	/* Setup OS specific network interface */
    459 	ixv_setup_interface(dev, adapter);
    460 
    461 	/* Do the stats setup */
    462 	ixv_save_stats(adapter);
    463 	ixv_init_stats(adapter);
    464 	ixv_add_stats_sysctls(adapter);
    465 
    466 	/* Register for VLAN events */
    467 #if 0 /* XXX delete after write? */
    468 	adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
    469 	    ixv_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
    470 	adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
    471 	    ixv_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
    472 #endif
    473 
    474 #ifdef DEV_NETMAP
    475 	ixgbe_netmap_attach(adapter);
    476 #endif /* DEV_NETMAP */
    477 	INIT_DEBUGOUT("ixv_attach: end");
    478 	adapter->osdep.attached = true;
    479 	return;
    480 
    481 err_late:
    482 	ixgbe_free_transmit_structures(adapter);
    483 	ixgbe_free_receive_structures(adapter);
    484 err_out:
    485 	ixv_free_pci_resources(adapter);
    486 	return;
    487 
    488 }
    489 
    490 /*********************************************************************
    491  *  Device removal routine
    492  *
    493  *  The detach entry point is called when the driver is being removed.
    494  *  This routine stops the adapter and deallocates all the resources
    495  *  that were allocated for driver operation.
    496  *
    497  *  return 0 on success, positive on failure
    498  *********************************************************************/
    499 
    500 static int
    501 ixv_detach(device_t dev, int flags)
    502 {
    503 	struct adapter *adapter = device_private(dev);
    504 	struct ix_queue *que = adapter->queues;
    505 
    506 	INIT_DEBUGOUT("ixv_detach: begin");
    507 	if (adapter->osdep.attached == false)
    508 		return 0;
    509 
    510 #if NVLAN > 0
    511 	/* Make sure VLANS are not using driver */
    512 	if (!VLAN_ATTACHED(&adapter->osdep.ec))
    513 		;	/* nothing to do: no VLANs */
    514 	else if ((flags & (DETACH_SHUTDOWN|DETACH_FORCE)) != 0)
    515 		vlan_ifdetach(adapter->ifp);
    516 	else {
    517 		aprint_error_dev(dev, "VLANs in use, detach first\n");
    518 		return EBUSY;
    519 	}
    520 #endif
    521 
    522 	IXGBE_CORE_LOCK(adapter);
    523 	ixv_stop(adapter);
    524 	IXGBE_CORE_UNLOCK(adapter);
    525 
    526 	for (int i = 0; i < adapter->num_queues; i++, que++) {
    527 #ifndef IXGBE_LEGACY_TX
    528 		struct tx_ring *txr = adapter->tx_rings;
    529 
    530 		softint_disestablish(txr->txr_si);
    531 #endif
    532 		softint_disestablish(que->que_si);
    533 	}
    534 
    535 	/* Drain the Mailbox(link) queue */
    536 	softint_disestablish(adapter->link_si);
    537 
    538 	/* Unregister VLAN events */
    539 #if 0 /* XXX msaitoh delete after write? */
    540 	if (adapter->vlan_attach != NULL)
    541 		EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
    542 	if (adapter->vlan_detach != NULL)
    543 		EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
    544 #endif
    545 
    546 	ether_ifdetach(adapter->ifp);
    547 	callout_halt(&adapter->timer, NULL);
    548 #ifdef DEV_NETMAP
    549 	netmap_detach(adapter->ifp);
    550 #endif /* DEV_NETMAP */
    551 	ixv_free_pci_resources(adapter);
    552 #if 0 /* XXX the NetBSD port is probably missing something here */
    553 	bus_generic_detach(dev);
    554 #endif
    555 	if_detach(adapter->ifp);
    556 
    557 	ixgbe_free_transmit_structures(adapter);
    558 	ixgbe_free_receive_structures(adapter);
    559 
    560 	IXGBE_CORE_LOCK_DESTROY(adapter);
    561 	return (0);
    562 }
    563 
    564 /*********************************************************************
    565  *
    566  *  Shutdown entry point
    567  *
    568  **********************************************************************/
    569 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
    570 static int
    571 ixv_shutdown(device_t dev)
    572 {
    573 	struct adapter *adapter = device_private(dev);
    574 	IXGBE_CORE_LOCK(adapter);
    575 	ixv_stop(adapter);
    576 	IXGBE_CORE_UNLOCK(adapter);
    577 	return (0);
    578 }
    579 #endif
    580 
    581 static int
    582 ixv_ifflags_cb(struct ethercom *ec)
    583 {
    584 	struct ifnet *ifp = &ec->ec_if;
    585 	struct adapter *adapter = ifp->if_softc;
    586 	int change = ifp->if_flags ^ adapter->if_flags, rc = 0;
    587 
    588 	IXGBE_CORE_LOCK(adapter);
    589 
    590 	if (change != 0)
    591 		adapter->if_flags = ifp->if_flags;
    592 
    593 	if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0)
    594 		rc = ENETRESET;
    595 
    596 	IXGBE_CORE_UNLOCK(adapter);
    597 
    598 	return rc;
    599 }
    600 
    601 /*********************************************************************
    602  *  Ioctl entry point
    603  *
    604  *  ixv_ioctl is called when the user wants to configure the
    605  *  interface.
    606  *
    607  *  return 0 on success, positive on failure
    608  **********************************************************************/
    609 
    610 static int
    611 ixv_ioctl(struct ifnet * ifp, u_long command, void *data)
    612 {
    613 	struct adapter	*adapter = ifp->if_softc;
    614 	struct ifcapreq *ifcr = data;
    615 	struct ifreq	*ifr = (struct ifreq *) data;
    616 	int             error = 0;
    617 	int l4csum_en;
    618 	const int l4csum = IFCAP_CSUM_TCPv4_Rx|IFCAP_CSUM_UDPv4_Rx|
    619 	     IFCAP_CSUM_TCPv6_Rx|IFCAP_CSUM_UDPv6_Rx;
    620 
    621 	switch (command) {
    622 	case SIOCSIFFLAGS:
    623 		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
    624 		break;
    625 	case SIOCADDMULTI:
    626 	case SIOCDELMULTI:
    627 		IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
    628 		break;
    629 	case SIOCSIFMEDIA:
    630 	case SIOCGIFMEDIA:
    631 		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
    632 		break;
    633 	case SIOCSIFCAP:
    634 		IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
    635 		break;
    636 	case SIOCSIFMTU:
    637 		IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
    638 		break;
    639 	default:
    640 		IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
    641 		break;
    642 	}
    643 
    644 	switch (command) {
    645 	case SIOCSIFMEDIA:
    646 	case SIOCGIFMEDIA:
    647 		return ifmedia_ioctl(ifp, ifr, &adapter->media, command);
    648 	case SIOCSIFCAP:
    649 		/* Layer-4 Rx checksum offload has to be turned on and
    650 		 * off as a unit.
    651 		 */
    652 		l4csum_en = ifcr->ifcr_capenable & l4csum;
    653 		if (l4csum_en != l4csum && l4csum_en != 0)
    654 			return EINVAL;
    655 		/*FALLTHROUGH*/
    656 	case SIOCADDMULTI:
    657 	case SIOCDELMULTI:
    658 	case SIOCSIFFLAGS:
    659 	case SIOCSIFMTU:
    660 	default:
    661 		if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
    662 			return error;
    663 		if ((ifp->if_flags & IFF_RUNNING) == 0)
    664 			;
    665 		else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
    666 			IXGBE_CORE_LOCK(adapter);
    667 			ixv_init_locked(adapter);
    668 			IXGBE_CORE_UNLOCK(adapter);
    669 		} else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
    670 			/*
    671 			 * Multicast list has changed; set the hardware filter
    672 			 * accordingly.
    673 			 */
    674 			IXGBE_CORE_LOCK(adapter);
    675 			ixv_disable_intr(adapter);
    676 			ixv_set_multi(adapter);
    677 			ixv_enable_intr(adapter);
    678 			IXGBE_CORE_UNLOCK(adapter);
    679 		}
    680 		return 0;
    681 	}
    682 }
    683 
    684 /*********************************************************************
    685  *  Init entry point
    686  *
    687  *  This routine is used in two ways. It is used by the stack as
    688  *  init entry point in network interface structure. It is also used
    689  *  by the driver as a hw/sw initialization routine to get to a
    690  *  consistent state.
    691  *
    692  *  return 0 on success, positive on failure
    693  **********************************************************************/
    694 #define IXGBE_MHADD_MFS_SHIFT 16
    695 
    696 static void
    697 ixv_init_locked(struct adapter *adapter)
    698 {
    699 	struct ifnet	*ifp = adapter->ifp;
    700 	device_t 	dev = adapter->dev;
    701 	struct ixgbe_hw *hw = &adapter->hw;
    702 	int error = 0;
    703 
    704 	INIT_DEBUGOUT("ixv_init_locked: begin");
    705 	KASSERT(mutex_owned(&adapter->core_mtx));
    706 	hw->adapter_stopped = FALSE;
    707 	ixgbe_stop_adapter(hw);
    708         callout_stop(&adapter->timer);
    709 
    710         /* reprogram the RAR[0] in case user changed it. */
    711         ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
    712 
    713 	/* Get the latest mac address, User can use a LAA */
    714 	memcpy(hw->mac.addr, CLLADDR(adapter->ifp->if_sadl),
    715 	     IXGBE_ETH_LENGTH_OF_ADDRESS);
    716         ixgbe_set_rar(hw, 0, hw->mac.addr, 0, 1);
    717 	hw->addr_ctrl.rar_used_count = 1;
    718 
    719 	/* Prepare transmit descriptors and buffers */
    720 	if (ixgbe_setup_transmit_structures(adapter)) {
    721 		aprint_error_dev(dev, "Could not setup transmit structures\n");
    722 		ixv_stop(adapter);
    723 		return;
    724 	}
    725 
    726 	/* Reset VF and renegotiate mailbox API version */
    727 	ixgbe_reset_hw(hw);
    728 	error = ixgbevf_negotiate_api_version(hw, ixgbe_mbox_api_11);
    729 	if (error)
    730 		device_printf(dev, "MBX API 1.1 negotiation failed! Error %d\n", error);
    731 
    732 	ixv_initialize_transmit_units(adapter);
    733 
    734 	/* Setup Multicast table */
    735 	ixv_set_multi(adapter);
    736 
    737 	/*
    738 	** Determine the correct mbuf pool
    739 	** for doing jumbo/headersplit
    740 	*/
    741 	if (ifp->if_mtu > ETHERMTU)
    742 		adapter->rx_mbuf_sz = MJUMPAGESIZE;
    743 	else
    744 		adapter->rx_mbuf_sz = MCLBYTES;
    745 
    746 	/* Prepare receive descriptors and buffers */
    747 	if (ixgbe_setup_receive_structures(adapter)) {
    748 		device_printf(dev, "Could not setup receive structures\n");
    749 		ixv_stop(adapter);
    750 		return;
    751 	}
    752 
    753 	/* Configure RX settings */
    754 	ixv_initialize_receive_units(adapter);
    755 
    756 #if 0 /* XXX isn't it required? -- msaitoh  */
    757 	/* Set the various hardware offload abilities */
    758 	ifp->if_hwassist = 0;
    759 	if (ifp->if_capenable & IFCAP_TSO4)
    760 		ifp->if_hwassist |= CSUM_TSO;
    761 	if (ifp->if_capenable & IFCAP_TXCSUM) {
    762 		ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
    763 #if __FreeBSD_version >= 800000
    764 		ifp->if_hwassist |= CSUM_SCTP;
    765 #endif
    766 	}
    767 #endif
    768 
    769 	/* Set up VLAN offload and filter */
    770 	ixv_setup_vlan_support(adapter);
    771 
    772 	/* Set up MSI/X routing */
    773 	ixv_configure_ivars(adapter);
    774 
    775 	/* Set up auto-mask */
    776 	IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_EICS_RTX_QUEUE);
    777 
    778         /* Set moderation on the Link interrupt */
    779         IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->vector), IXGBE_LINK_ITR);
    780 
    781 	/* Stats init */
    782 	ixv_init_stats(adapter);
    783 
    784 	/* Config/Enable Link */
    785 	ixv_config_link(adapter);
    786 	hw->mac.get_link_status = TRUE;
    787 
    788 	/* Start watchdog */
    789 	callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
    790 
    791 	/* And now turn on interrupts */
    792 	ixv_enable_intr(adapter);
    793 
    794 	/* Now inform the stack we're ready */
    795 	ifp->if_flags |= IFF_RUNNING;
    796 	ifp->if_flags &= ~IFF_OACTIVE;
    797 
    798 	return;
    799 }
    800 
    801 static int
    802 ixv_init(struct ifnet *ifp)
    803 {
    804 	struct adapter *adapter = ifp->if_softc;
    805 
    806 	IXGBE_CORE_LOCK(adapter);
    807 	ixv_init_locked(adapter);
    808 	IXGBE_CORE_UNLOCK(adapter);
    809 	return 0;
    810 }
    811 
    812 
    813 /*
    814 **
    815 ** MSIX Interrupt Handlers and Tasklets
    816 **
    817 */
    818 
    819 static inline void
    820 ixv_enable_queue(struct adapter *adapter, u32 vector)
    821 {
    822 	struct ixgbe_hw *hw = &adapter->hw;
    823 	u32	queue = 1 << vector;
    824 	u32	mask;
    825 
    826 	mask = (IXGBE_EIMS_RTX_QUEUE & queue);
    827 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
    828 }
    829 
    830 static inline void
    831 ixv_disable_queue(struct adapter *adapter, u32 vector)
    832 {
    833 	struct ixgbe_hw *hw = &adapter->hw;
    834 	u64	queue = (u64)(1 << vector);
    835 	u32	mask;
    836 
    837 	mask = (IXGBE_EIMS_RTX_QUEUE & queue);
    838 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
    839 }
    840 
    841 static inline void
    842 ixv_rearm_queues(struct adapter *adapter, u64 queues)
    843 {
    844 	u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
    845 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask);
    846 }
    847 
    848 
    849 static void
    850 ixv_handle_que(void *context)
    851 {
    852 	struct ix_queue *que = context;
    853 	struct adapter  *adapter = que->adapter;
    854 	struct tx_ring	*txr = que->txr;
    855 	struct ifnet    *ifp = adapter->ifp;
    856 	bool		more;
    857 
    858 	if (ifp->if_flags & IFF_RUNNING) {
    859 		more = ixgbe_rxeof(que);
    860 		IXGBE_TX_LOCK(txr);
    861 		ixgbe_txeof(txr);
    862 #ifndef IXGBE_LEGACY_TX
    863 		if (pcq_peek(txr->txr_interq) != NULL)
    864 			ixgbe_mq_start_locked(ifp, txr);
    865 #else
    866 		if (!IFQ_IS_EMPTY(&ifp->if_snd))
    867 			ixgbe_start_locked(txr, ifp);
    868 #endif
    869 		IXGBE_TX_UNLOCK(txr);
    870 		if (more) {
    871 			adapter->req.ev_count++;
    872 			softint_schedule(que->que_si);
    873 			return;
    874 		}
    875 	}
    876 
    877 	/* Reenable this interrupt */
    878 	ixv_enable_queue(adapter, que->msix);
    879 	return;
    880 }
    881 
    882 /*********************************************************************
    883  *
    884  *  MSI Queue Interrupt Service routine
    885  *
    886  **********************************************************************/
    887 int
    888 ixv_msix_que(void *arg)
    889 {
    890 	struct ix_queue	*que = arg;
    891 	struct adapter  *adapter = que->adapter;
    892 	struct ifnet    *ifp = adapter->ifp;
    893 	struct tx_ring	*txr = que->txr;
    894 	struct rx_ring	*rxr = que->rxr;
    895 	bool		more;
    896 	u32		newitr = 0;
    897 
    898 	ixv_disable_queue(adapter, que->msix);
    899 	++que->irqs.ev_count;
    900 
    901 #ifdef __NetBSD__
    902 	/* Don't run ixgbe_rxeof in interrupt context */
    903 	more = true;
    904 #else
    905 	more = ixgbe_rxeof(que);
    906 #endif
    907 
    908 	IXGBE_TX_LOCK(txr);
    909 	ixgbe_txeof(txr);
    910 	/*
    911 	** Make certain that if the stack
    912 	** has anything queued the task gets
    913 	** scheduled to handle it.
    914 	*/
    915 #ifdef IXGBE_LEGACY_TX
    916 	if (!IFQ_IS_EMPTY(&adapter->ifp->if_snd))
    917 		ixgbe_start_locked(txr, ifp);
    918 #else
    919 	if (pcq_peek(txr->txr_interq) != NULL)
    920 		ixgbe_mq_start_locked(ifp, txr);
    921 #endif
    922 	IXGBE_TX_UNLOCK(txr);
    923 
    924 	/* Do AIM now? */
    925 
    926 	if (ixv_enable_aim == FALSE)
    927 		goto no_calc;
    928 	/*
    929 	** Do Adaptive Interrupt Moderation:
    930         **  - Write out last calculated setting
    931 	**  - Calculate based on average size over
    932 	**    the last interval.
    933 	*/
    934         if (que->eitr_setting)
    935                 IXGBE_WRITE_REG(&adapter->hw,
    936                     IXGBE_VTEITR(que->msix),
    937 		    que->eitr_setting);
    938 
    939         que->eitr_setting = 0;
    940 
    941         /* Idle, do nothing */
    942         if ((txr->bytes == 0) && (rxr->bytes == 0))
    943                 goto no_calc;
    944 
    945 	if ((txr->bytes) && (txr->packets))
    946                	newitr = txr->bytes/txr->packets;
    947 	if ((rxr->bytes) && (rxr->packets))
    948 		newitr = max(newitr,
    949 		    (rxr->bytes / rxr->packets));
    950 	newitr += 24; /* account for hardware frame, crc */
    951 
    952 	/* set an upper boundary */
    953 	newitr = min(newitr, 3000);
    954 
    955 	/* Be nice to the mid range */
    956 	if ((newitr > 300) && (newitr < 1200))
    957 		newitr = (newitr / 3);
    958 	else
    959 		newitr = (newitr / 2);
    960 
    961 	newitr |= newitr << 16;
    962 
    963         /* save for next interrupt */
    964         que->eitr_setting = newitr;
    965 
    966         /* Reset state */
    967         txr->bytes = 0;
    968         txr->packets = 0;
    969         rxr->bytes = 0;
    970         rxr->packets = 0;
    971 
    972 no_calc:
    973 	if (more)
    974 		softint_schedule(que->que_si);
    975 	else /* Reenable this interrupt */
    976 		ixv_enable_queue(adapter, que->msix);
    977 	return 1;
    978 }
    979 
    980 static int
    981 ixv_msix_mbx(void *arg)
    982 {
    983 	struct adapter	*adapter = arg;
    984 	struct ixgbe_hw *hw = &adapter->hw;
    985 	u32		reg;
    986 
    987 	++adapter->link_irq.ev_count;
    988 
    989 	/* First get the cause */
    990 	reg = IXGBE_READ_REG(hw, IXGBE_VTEICS);
    991 	/* Clear interrupt with write */
    992 	IXGBE_WRITE_REG(hw, IXGBE_VTEICR, reg);
    993 
    994 	/* Link status change */
    995 	if (reg & IXGBE_EICR_LSC)
    996 		softint_schedule(adapter->link_si);
    997 
    998 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER);
    999 	return 1;
   1000 }
   1001 
   1002 /*********************************************************************
   1003  *
   1004  *  Media Ioctl callback
   1005  *
   1006  *  This routine is called whenever the user queries the status of
   1007  *  the interface using ifconfig.
   1008  *
   1009  **********************************************************************/
   1010 static void
   1011 ixv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
   1012 {
   1013 	struct adapter *adapter = ifp->if_softc;
   1014 
   1015 	INIT_DEBUGOUT("ixv_media_status: begin");
   1016 	IXGBE_CORE_LOCK(adapter);
   1017 	ixv_update_link_status(adapter);
   1018 
   1019 	ifmr->ifm_status = IFM_AVALID;
   1020 	ifmr->ifm_active = IFM_ETHER;
   1021 
   1022 	if (!adapter->link_active) {
   1023 		ifmr->ifm_active |= IFM_NONE;
   1024 		IXGBE_CORE_UNLOCK(adapter);
   1025 		return;
   1026 	}
   1027 
   1028 	ifmr->ifm_status |= IFM_ACTIVE;
   1029 
   1030 	switch (adapter->link_speed) {
   1031 		case IXGBE_LINK_SPEED_1GB_FULL:
   1032 			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
   1033 			break;
   1034 		case IXGBE_LINK_SPEED_10GB_FULL:
   1035 			ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
   1036 			break;
   1037 	}
   1038 
   1039 	IXGBE_CORE_UNLOCK(adapter);
   1040 
   1041 	return;
   1042 }
   1043 
   1044 /*********************************************************************
   1045  *
   1046  *  Media Ioctl callback
   1047  *
   1048  *  This routine is called when the user changes speed/duplex using
   1049  *  media/mediopt option with ifconfig.
   1050  *
   1051  **********************************************************************/
   1052 static int
   1053 ixv_media_change(struct ifnet * ifp)
   1054 {
   1055 	struct adapter *adapter = ifp->if_softc;
   1056 	struct ifmedia *ifm = &adapter->media;
   1057 
   1058 	INIT_DEBUGOUT("ixv_media_change: begin");
   1059 
   1060 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
   1061 		return (EINVAL);
   1062 
   1063         switch (IFM_SUBTYPE(ifm->ifm_media)) {
   1064         case IFM_AUTO:
   1065                 break;
   1066         default:
   1067                 device_printf(adapter->dev, "Only auto media type\n");
   1068 		return (EINVAL);
   1069         }
   1070 
   1071 	return (0);
   1072 }
   1073 
   1074 
   1075 /*********************************************************************
   1076  *  Multicast Update
   1077  *
   1078  *  This routine is called whenever multicast address list is updated.
   1079  *
   1080  **********************************************************************/
   1081 #define IXGBE_RAR_ENTRIES 16
   1082 
   1083 static void
   1084 ixv_set_multi(struct adapter *adapter)
   1085 {
   1086 	struct ether_multi *enm;
   1087 	struct ether_multistep step;
   1088 	u8	mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
   1089 	u8	*update_ptr;
   1090 	int	mcnt = 0;
   1091 	struct ethercom *ec = &adapter->osdep.ec;
   1092 
   1093 	IOCTL_DEBUGOUT("ixv_set_multi: begin");
   1094 
   1095 	ETHER_FIRST_MULTI(step, ec, enm);
   1096 	while (enm != NULL) {
   1097 		bcopy(enm->enm_addrlo,
   1098 		    &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
   1099 		    IXGBE_ETH_LENGTH_OF_ADDRESS);
   1100 		mcnt++;
   1101 		/* XXX This might be required --msaitoh */
   1102 		if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES)
   1103 			break;
   1104 		ETHER_NEXT_MULTI(step, enm);
   1105 	}
   1106 
   1107 	update_ptr = mta;
   1108 
   1109 	ixgbe_update_mc_addr_list(&adapter->hw,
   1110 	    update_ptr, mcnt, ixv_mc_array_itr, TRUE);
   1111 
   1112 	return;
   1113 }
   1114 
   1115 /*
   1116  * This is an iterator function now needed by the multicast
   1117  * shared code. It simply feeds the shared code routine the
   1118  * addresses in the array of ixv_set_multi() one by one.
   1119  */
   1120 static u8 *
   1121 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
   1122 {
   1123 	u8 *addr = *update_ptr;
   1124 	u8 *newptr;
   1125 	*vmdq = 0;
   1126 
   1127 	newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
   1128 	*update_ptr = newptr;
   1129 	return addr;
   1130 }
   1131 
   1132 /*********************************************************************
   1133  *  Timer routine
   1134  *
   1135  *  This routine checks for link status,updates statistics,
   1136  *  and runs the watchdog check.
   1137  *
   1138  **********************************************************************/
   1139 
   1140 static void
   1141 ixv_local_timer(void *arg)
   1142 {
   1143 	struct adapter *adapter = arg;
   1144 
   1145 	IXGBE_CORE_LOCK(adapter);
   1146 	ixv_local_timer_locked(adapter);
   1147 	IXGBE_CORE_UNLOCK(adapter);
   1148 }
   1149 
   1150 static void
   1151 ixv_local_timer_locked(void *arg)
   1152 {
   1153 	struct adapter	*adapter = arg;
   1154 	device_t	dev = adapter->dev;
   1155 	struct ix_queue	*que = adapter->queues;
   1156 	u64		queues = 0;
   1157 	int		hung = 0;
   1158 
   1159 	KASSERT(mutex_owned(&adapter->core_mtx));
   1160 
   1161 	ixv_update_link_status(adapter);
   1162 
   1163 	/* Stats Update */
   1164 	ixv_update_stats(adapter);
   1165 
   1166 	/*
   1167 	** Check the TX queues status
   1168 	**      - mark hung queues so we don't schedule on them
   1169 	**      - watchdog only if all queues show hung
   1170 	*/
   1171 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   1172 		/* Keep track of queues with work for soft irq */
   1173 		if (que->txr->busy)
   1174 			queues |= ((u64)1 << que->me);
   1175 		/*
   1176 		** Each time txeof runs without cleaning, but there
   1177 		** are uncleaned descriptors it increments busy. If
   1178 		** we get to the MAX we declare it hung.
   1179 		*/
   1180 		if (que->busy == IXGBE_QUEUE_HUNG) {
   1181 			++hung;
   1182 			/* Mark the queue as inactive */
   1183 			adapter->active_queues &= ~((u64)1 << que->me);
   1184 			continue;
   1185 		} else {
   1186 			/* Check if we've come back from hung */
   1187 			if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
   1188                                 adapter->active_queues |= ((u64)1 << que->me);
   1189 		}
   1190 		if (que->busy >= IXGBE_MAX_TX_BUSY) {
   1191 			device_printf(dev,"Warning queue %d "
   1192 			    "appears to be hung!\n", i);
   1193 			que->txr->busy = IXGBE_QUEUE_HUNG;
   1194 			++hung;
   1195 		}
   1196 
   1197 	}
   1198 
   1199 	/* Only truly watchdog if all queues show hung */
   1200 	if (hung == adapter->num_queues)
   1201 		goto watchdog;
   1202 	else if (queues != 0) { /* Force an IRQ on queues with work */
   1203 		ixv_rearm_queues(adapter, queues);
   1204 	}
   1205 
   1206 	callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
   1207 	return;
   1208 
   1209 watchdog:
   1210 	device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
   1211 	adapter->ifp->if_flags &= ~IFF_RUNNING;
   1212 	adapter->watchdog_events.ev_count++;
   1213 	ixv_init_locked(adapter);
   1214 }
   1215 
   1216 /*
   1217 ** Note: this routine updates the OS on the link state
   1218 **	the real check of the hardware only happens with
   1219 **	a link interrupt.
   1220 */
   1221 static void
   1222 ixv_update_link_status(struct adapter *adapter)
   1223 {
   1224 	struct ifnet	*ifp = adapter->ifp;
   1225 	device_t dev = adapter->dev;
   1226 
   1227 	if (adapter->link_up){
   1228 		if (adapter->link_active == FALSE) {
   1229 			if (bootverbose)
   1230 				device_printf(dev,"Link is up %d Gbps %s \n",
   1231 				    ((adapter->link_speed == 128)? 10:1),
   1232 				    "Full Duplex");
   1233 			adapter->link_active = TRUE;
   1234 			if_link_state_change(ifp, LINK_STATE_UP);
   1235 		}
   1236 	} else { /* Link down */
   1237 		if (adapter->link_active == TRUE) {
   1238 			if (bootverbose)
   1239 				device_printf(dev,"Link is Down\n");
   1240 			if_link_state_change(ifp, LINK_STATE_DOWN);
   1241 			adapter->link_active = FALSE;
   1242 		}
   1243 	}
   1244 
   1245 	return;
   1246 }
   1247 
   1248 
   1249 static void
   1250 ixv_ifstop(struct ifnet *ifp, int disable)
   1251 {
   1252 	struct adapter *adapter = ifp->if_softc;
   1253 
   1254 	IXGBE_CORE_LOCK(adapter);
   1255 	ixv_stop(adapter);
   1256 	IXGBE_CORE_UNLOCK(adapter);
   1257 }
   1258 
   1259 /*********************************************************************
   1260  *
   1261  *  This routine disables all traffic on the adapter by issuing a
   1262  *  global reset on the MAC and deallocates TX/RX buffers.
   1263  *
   1264  **********************************************************************/
   1265 
   1266 static void
   1267 ixv_stop(void *arg)
   1268 {
   1269 	struct ifnet   *ifp;
   1270 	struct adapter *adapter = arg;
   1271 	struct ixgbe_hw *hw = &adapter->hw;
   1272 	ifp = adapter->ifp;
   1273 
   1274 	KASSERT(mutex_owned(&adapter->core_mtx));
   1275 
   1276 	INIT_DEBUGOUT("ixv_stop: begin\n");
   1277 	ixv_disable_intr(adapter);
   1278 
   1279 	/* Tell the stack that the interface is no longer active */
   1280 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   1281 
   1282 	ixgbe_reset_hw(hw);
   1283 	adapter->hw.adapter_stopped = FALSE;
   1284 	ixgbe_stop_adapter(hw);
   1285 	callout_stop(&adapter->timer);
   1286 
   1287 	/* reprogram the RAR[0] in case user changed it. */
   1288 	ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
   1289 
   1290 	return;
   1291 }
   1292 
   1293 
   1294 /*********************************************************************
   1295  *
   1296  *  Determine hardware revision.
   1297  *
   1298  **********************************************************************/
   1299 static void
   1300 ixv_identify_hardware(struct adapter *adapter)
   1301 {
   1302 	pcitag_t tag;
   1303 	pci_chipset_tag_t pc;
   1304 	pcireg_t subid, id;
   1305 	struct ixgbe_hw *hw = &adapter->hw;
   1306 
   1307 	pc = adapter->osdep.pc;
   1308 	tag = adapter->osdep.tag;
   1309 
   1310 	/*
   1311 	** Make sure BUSMASTER is set, on a VM under
   1312 	** KVM it may not be and will break things.
   1313 	*/
   1314 	ixgbe_pci_enable_busmaster(pc, tag);
   1315 
   1316 	id = pci_conf_read(pc, tag, PCI_ID_REG);
   1317 	subid = pci_conf_read(pc, tag, PCI_SUBSYS_ID_REG);
   1318 
   1319 	/* Save off the information about this board */
   1320 	hw->vendor_id = PCI_VENDOR(id);
   1321 	hw->device_id = PCI_PRODUCT(id);
   1322 	hw->revision_id = PCI_REVISION(pci_conf_read(pc, tag, PCI_CLASS_REG));
   1323 	hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
   1324 	hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
   1325 
   1326 	/* We need this to determine device-specific things */
   1327 	ixgbe_set_mac_type(hw);
   1328 
   1329 	/* Set the right number of segments */
   1330 	adapter->num_segs = IXGBE_82599_SCATTER;
   1331 
   1332 	return;
   1333 }
   1334 
   1335 /*********************************************************************
   1336  *
   1337  *  Setup MSIX Interrupt resources and handlers
   1338  *
   1339  **********************************************************************/
   1340 static int
   1341 ixv_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
   1342 {
   1343 	device_t	dev = adapter->dev;
   1344 	struct ix_queue *que = adapter->queues;
   1345 	struct		tx_ring *txr = adapter->tx_rings;
   1346 	int 		error, rid, vector = 0;
   1347 	pci_chipset_tag_t pc;
   1348 	pcitag_t	tag;
   1349 	char		intrbuf[PCI_INTRSTR_LEN];
   1350 	char		intr_xname[32];
   1351 	const char	*intrstr = NULL;
   1352 	kcpuset_t	*affinity;
   1353 	int		cpu_id = 0;
   1354 
   1355 	pc = adapter->osdep.pc;
   1356 	tag = adapter->osdep.tag;
   1357 
   1358 	adapter->osdep.nintrs = adapter->num_queues + 1;
   1359 	if (pci_msix_alloc_exact(pa,
   1360 	    &adapter->osdep.intrs, adapter->osdep.nintrs) != 0)
   1361 		return (ENXIO);
   1362 
   1363 	kcpuset_create(&affinity, false);
   1364 	for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
   1365 		snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d",
   1366 		    device_xname(dev), i);
   1367 		intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
   1368 		    sizeof(intrbuf));
   1369 #ifdef IXV_MPSAFE
   1370 		pci_intr_setattr(pc, adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
   1371 		    true);
   1372 #endif
   1373 		/* Set the handler function */
   1374 		que->res = adapter->osdep.ihs[i] = pci_intr_establish_xname(pc,
   1375 		    adapter->osdep.intrs[i], IPL_NET, ixv_msix_que, que,
   1376 			intr_xname);
   1377 		if (que->res == NULL) {
   1378 			pci_intr_release(pc, adapter->osdep.intrs,
   1379 			    adapter->osdep.nintrs);
   1380 			aprint_error_dev(dev,
   1381 			    "Failed to register QUE handler");
   1382 			kcpuset_destroy(affinity);
   1383 			return (ENXIO);
   1384 		}
   1385 		que->msix = vector;
   1386         	adapter->active_queues |= (u64)(1 << que->msix);
   1387 
   1388 		cpu_id = i;
   1389 		/* Round-robin affinity */
   1390 		kcpuset_zero(affinity);
   1391 		kcpuset_set(affinity, cpu_id % ncpu);
   1392 		error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
   1393 		    NULL);
   1394 		aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
   1395 		    intrstr);
   1396 		if (error == 0)
   1397 			aprint_normal(", bound queue %d to cpu %d\n",
   1398 			    i, cpu_id);
   1399 		else
   1400 			aprint_normal("\n");
   1401 
   1402 #ifndef IXGBE_LEGACY_TX
   1403 		txr->txr_si = softint_establish(SOFTINT_NET,
   1404 		    ixgbe_deferred_mq_start, txr);
   1405 #endif
   1406 		que->que_si = softint_establish(SOFTINT_NET, ixv_handle_que,
   1407 		    que);
   1408 		if (que->que_si == NULL) {
   1409 			aprint_error_dev(dev,
   1410 			    "could not establish software interrupt\n");
   1411 		}
   1412 	}
   1413 
   1414 	/* and Mailbox */
   1415 	cpu_id++;
   1416 	snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
   1417 	intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
   1418 	    sizeof(intrbuf));
   1419 #ifdef IXG_MPSAFE
   1420 	pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE, true);
   1421 #endif
   1422 	/* Set the mbx handler function */
   1423 	adapter->osdep.ihs[vector] = pci_intr_establish_xname(pc,
   1424 	    adapter->osdep.intrs[vector], IPL_NET, ixv_msix_mbx, adapter,
   1425 		intr_xname);
   1426 	if (adapter->osdep.ihs[vector] == NULL) {
   1427 		adapter->res = NULL;
   1428 		aprint_error_dev(dev, "Failed to register LINK handler\n");
   1429 		kcpuset_destroy(affinity);
   1430 		return (ENXIO);
   1431 	}
   1432 	/* Round-robin affinity */
   1433 	kcpuset_zero(affinity);
   1434 	kcpuset_set(affinity, cpu_id % ncpu);
   1435 	error = interrupt_distribute(adapter->osdep.ihs[vector], affinity,NULL);
   1436 
   1437 	aprint_normal_dev(dev,
   1438 	    "for link, interrupting at %s, ", intrstr);
   1439 	if (error == 0) {
   1440 		aprint_normal("affinity to cpu %d\n", cpu_id);
   1441 	}
   1442 	adapter->vector = vector;
   1443 	/* Tasklets for Mailbox */
   1444 	adapter->link_si = softint_establish(SOFTINT_NET, ixv_handle_mbx,
   1445 	    adapter);
   1446 	/*
   1447 	** Due to a broken design QEMU will fail to properly
   1448 	** enable the guest for MSIX unless the vectors in
   1449 	** the table are all set up, so we must rewrite the
   1450 	** ENABLE in the MSIX control register again at this
   1451 	** point to cause it to successfully initialize us.
   1452 	*/
   1453 	if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
   1454 		int msix_ctrl;
   1455 		pci_get_capability(pc, tag, PCI_CAP_MSIX, &rid, NULL);
   1456 		rid += PCI_MSIX_CTL;
   1457 		msix_ctrl = pci_conf_read(pc, tag, rid);
   1458 		msix_ctrl |= PCI_MSIX_CTL_ENABLE;
   1459 		pci_conf_write(pc, tag, rid, msix_ctrl);
   1460 	}
   1461 
   1462 	kcpuset_destroy(affinity);
   1463 	return (0);
   1464 }
   1465 
   1466 /*
   1467  * Setup MSIX resources, note that the VF
   1468  * device MUST use MSIX, there is no fallback.
   1469  */
   1470 static int
   1471 ixv_setup_msix(struct adapter *adapter)
   1472 {
   1473 	device_t dev = adapter->dev;
   1474 	int want, queues, msgs;
   1475 
   1476 	/* Must have at least 2 MSIX vectors */
   1477 	msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag);
   1478 	if (msgs < 2) {
   1479 		aprint_error_dev(dev,"MSIX config error\n");
   1480 		return (ENXIO);
   1481 	}
   1482 	msgs = MIN(msgs, IXG_MAX_NINTR);
   1483 
   1484 	/* Figure out a reasonable auto config value */
   1485 	queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu;
   1486 
   1487 	if (ixv_num_queues != 0)
   1488 		queues = ixv_num_queues;
   1489 	else if ((ixv_num_queues == 0) && (queues > IXGBE_VF_MAX_TX_QUEUES))
   1490 		queues = IXGBE_VF_MAX_TX_QUEUES;
   1491 
   1492 	/*
   1493 	** Want vectors for the queues,
   1494 	** plus an additional for mailbox.
   1495 	*/
   1496 	want = queues + 1;
   1497 	if (msgs >= want) {
   1498 		msgs = want;
   1499 	} else {
   1500                	aprint_error_dev(dev,
   1501 		    "MSIX Configuration Problem, "
   1502 		    "%d vectors but %d queues wanted!\n",
   1503 		    msgs, want);
   1504 		return -1;
   1505 	}
   1506 
   1507 	adapter->msix_mem = (void *)1; /* XXX */
   1508 	aprint_normal_dev(dev,
   1509 	    "Using MSIX interrupts with %d vectors\n", msgs);
   1510 	adapter->num_queues = queues;
   1511 	return (msgs);
   1512 }
   1513 
   1514 
   1515 static int
   1516 ixv_allocate_pci_resources(struct adapter *adapter,
   1517     const struct pci_attach_args *pa)
   1518 {
   1519 	pcireg_t	memtype;
   1520 	device_t        dev = adapter->dev;
   1521 	bus_addr_t addr;
   1522 	int flags;
   1523 
   1524 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
   1525 
   1526 	switch (memtype) {
   1527 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1528 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1529 		adapter->osdep.mem_bus_space_tag = pa->pa_memt;
   1530 		if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
   1531 	              memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
   1532 			goto map_err;
   1533 		if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
   1534 			aprint_normal_dev(dev, "clearing prefetchable bit\n");
   1535 			flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
   1536 		}
   1537 		if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
   1538 		     adapter->osdep.mem_size, flags,
   1539 		     &adapter->osdep.mem_bus_space_handle) != 0) {
   1540 map_err:
   1541 			adapter->osdep.mem_size = 0;
   1542 			aprint_error_dev(dev, "unable to map BAR0\n");
   1543 			return ENXIO;
   1544 		}
   1545 		break;
   1546 	default:
   1547 		aprint_error_dev(dev, "unexpected type on BAR0\n");
   1548 		return ENXIO;
   1549 	}
   1550 
   1551 	/* Pick up the tuneable queues */
   1552 	adapter->num_queues = ixv_num_queues;
   1553 	adapter->hw.back = adapter;
   1554 
   1555 	/*
   1556 	** Now setup MSI/X, should
   1557 	** return us the number of
   1558 	** configured vectors.
   1559 	*/
   1560 	adapter->msix = ixv_setup_msix(adapter);
   1561 	if (adapter->msix == ENXIO)
   1562 		return (ENXIO);
   1563 	else
   1564 		return (0);
   1565 }
   1566 
   1567 static void
   1568 ixv_free_pci_resources(struct adapter * adapter)
   1569 {
   1570 	struct 		ix_queue *que = adapter->queues;
   1571 	int		rid;
   1572 
   1573 	/*
   1574 	**  Release all msix queue resources:
   1575 	*/
   1576 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   1577 		rid = que->msix + 1;
   1578 		if (que->res != NULL)
   1579 			pci_intr_disestablish(adapter->osdep.pc,
   1580 			    adapter->osdep.ihs[i]);
   1581 	}
   1582 
   1583 
   1584 	/* Clean the Legacy or Link interrupt last */
   1585 	if (adapter->vector) /* we are doing MSIX */
   1586 		rid = adapter->vector + 1;
   1587 	else
   1588 		(adapter->msix != 0) ? (rid = 1):(rid = 0);
   1589 
   1590 	if (adapter->osdep.ihs[rid] != NULL)
   1591 		pci_intr_disestablish(adapter->osdep.pc,
   1592 		    adapter->osdep.ihs[rid]);
   1593 	adapter->osdep.ihs[rid] = NULL;
   1594 
   1595 #if defined(NETBSD_MSI_OR_MSIX)
   1596 	pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
   1597 	    adapter->osdep.nintrs);
   1598 #endif
   1599 
   1600 	if (adapter->osdep.mem_size != 0) {
   1601 		bus_space_unmap(adapter->osdep.mem_bus_space_tag,
   1602 		    adapter->osdep.mem_bus_space_handle,
   1603 		    adapter->osdep.mem_size);
   1604 	}
   1605 
   1606 	return;
   1607 }
   1608 
   1609 /*********************************************************************
   1610  *
   1611  *  Setup networking device structure and register an interface.
   1612  *
   1613  **********************************************************************/
   1614 static void
   1615 ixv_setup_interface(device_t dev, struct adapter *adapter)
   1616 {
   1617 	struct ethercom *ec = &adapter->osdep.ec;
   1618 	struct ifnet   *ifp;
   1619 
   1620 	INIT_DEBUGOUT("ixv_setup_interface: begin");
   1621 
   1622 	ifp = adapter->ifp = &ec->ec_if;
   1623 	strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
   1624 	ifp->if_baudrate = 1000000000;
   1625 	ifp->if_init = ixv_init;
   1626 	ifp->if_stop = ixv_ifstop;
   1627 	ifp->if_softc = adapter;
   1628 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   1629 	ifp->if_ioctl = ixv_ioctl;
   1630 #ifndef IXGBE_LEGACY_TX
   1631 	ifp->if_transmit = ixgbe_mq_start;
   1632 #endif
   1633 	ifp->if_start = ixgbe_start;
   1634 	ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 2;
   1635 
   1636 	if_initialize(ifp);
   1637 	ether_ifattach(ifp, adapter->hw.mac.addr);
   1638 #ifndef IXGBE_LEGACY_TX
   1639 #if 0	/* We use per TX queue softint */
   1640 	if_deferred_start_init(ifp, ixgbe_deferred_mq_start);
   1641 #endif
   1642 #endif
   1643 	if_register(ifp);
   1644 	ether_set_ifflags_cb(ec, ixv_ifflags_cb);
   1645 
   1646 	adapter->max_frame_size =
   1647 	    ifp->if_mtu + IXGBE_MTU_HDR_VLAN;
   1648 
   1649 	/*
   1650 	 * Tell the upper layer(s) we support long frames.
   1651 	 */
   1652 	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
   1653 
   1654 	ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSOv4;
   1655 	ifp->if_capenable = 0;
   1656 
   1657 	ec->ec_capabilities |= ETHERCAP_VLAN_HWCSUM;
   1658 	ec->ec_capabilities |= ETHERCAP_JUMBO_MTU;
   1659 	ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
   1660 	    		| ETHERCAP_VLAN_MTU;
   1661 	ec->ec_capenable = ec->ec_capabilities;
   1662 
   1663 	/* Don't enable LRO by default */
   1664 	ifp->if_capabilities |= IFCAP_LRO;
   1665 #if 0
   1666 	ifp->if_capenable = ifp->if_capabilities;
   1667 #endif
   1668 
   1669 	/*
   1670 	** Dont turn this on by default, if vlans are
   1671 	** created on another pseudo device (eg. lagg)
   1672 	** then vlan events are not passed thru, breaking
   1673 	** operation, but with HW FILTER off it works. If
   1674 	** using vlans directly on the em driver you can
   1675 	** enable this and get full hardware tag filtering.
   1676 	*/
   1677 	ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
   1678 
   1679 	/*
   1680 	 * Specify the media types supported by this adapter and register
   1681 	 * callbacks to update media and link information
   1682 	 */
   1683 	ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change,
   1684 		     ixv_media_status);
   1685 	ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
   1686 	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
   1687 
   1688 	return;
   1689 }
   1690 
   1691 static void
   1692 ixv_config_link(struct adapter *adapter)
   1693 {
   1694 	struct ixgbe_hw *hw = &adapter->hw;
   1695 
   1696 	if (hw->mac.ops.check_link)
   1697 		hw->mac.ops.check_link(hw, &adapter->link_speed,
   1698 		    &adapter->link_up, FALSE);
   1699 }
   1700 
   1701 
   1702 /*********************************************************************
   1703  *
   1704  *  Enable transmit unit.
   1705  *
   1706  **********************************************************************/
   1707 static void
   1708 ixv_initialize_transmit_units(struct adapter *adapter)
   1709 {
   1710 	struct tx_ring	*txr = adapter->tx_rings;
   1711 	struct ixgbe_hw	*hw = &adapter->hw;
   1712 
   1713 
   1714 	for (int i = 0; i < adapter->num_queues; i++, txr++) {
   1715 		u64	tdba = txr->txdma.dma_paddr;
   1716 		u32	txctrl, txdctl;
   1717 
   1718 		/* Set WTHRESH to 8, burst writeback */
   1719 		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
   1720 		txdctl |= (8 << 16);
   1721 		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
   1722 
   1723 		/* Set the HW Tx Head and Tail indices */
   1724 	    	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(i), 0);
   1725 	    	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(i), 0);
   1726 
   1727 		/* Set Tx Tail register */
   1728 		txr->tail = IXGBE_VFTDT(i);
   1729 
   1730 		/* Set Ring parameters */
   1731 		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
   1732 		       (tdba & 0x00000000ffffffffULL));
   1733 		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i), (tdba >> 32));
   1734 		IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
   1735 		    adapter->num_tx_desc *
   1736 		    sizeof(struct ixgbe_legacy_tx_desc));
   1737 		txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(i));
   1738 		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
   1739 		IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), txctrl);
   1740 
   1741 		/* Now enable */
   1742 		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
   1743 		txdctl |= IXGBE_TXDCTL_ENABLE;
   1744 		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
   1745 	}
   1746 
   1747 	return;
   1748 }
   1749 
   1750 
   1751 /*********************************************************************
   1752  *
   1753  *  Setup receive registers and features.
   1754  *
   1755  **********************************************************************/
   1756 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
   1757 
   1758 static void
   1759 ixv_initialize_receive_units(struct adapter *adapter)
   1760 {
   1761 	struct	rx_ring	*rxr = adapter->rx_rings;
   1762 	struct ixgbe_hw	*hw = &adapter->hw;
   1763 	struct ifnet	*ifp = adapter->ifp;
   1764 	u32		bufsz, rxcsum, psrtype;
   1765 
   1766 	if (ifp->if_mtu > ETHERMTU)
   1767 		bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
   1768 	else
   1769 		bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
   1770 
   1771 	psrtype = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
   1772 	    IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR |
   1773 	    IXGBE_PSRTYPE_L2HDR;
   1774 
   1775 	IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
   1776 
   1777 	/* Tell PF our max_frame size */
   1778 	ixgbevf_rlpml_set_vf(hw, adapter->max_frame_size);
   1779 
   1780 	for (int i = 0; i < adapter->num_queues; i++, rxr++) {
   1781 		u64 rdba = rxr->rxdma.dma_paddr;
   1782 		u32 reg, rxdctl;
   1783 
   1784 		/* Disable the queue */
   1785 		rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
   1786 		rxdctl &= ~IXGBE_RXDCTL_ENABLE;
   1787 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
   1788 		for (int j = 0; j < 10; j++) {
   1789 			if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
   1790 			    IXGBE_RXDCTL_ENABLE)
   1791 				msec_delay(1);
   1792 			else
   1793 				break;
   1794 		}
   1795 		wmb();
   1796 		/* Setup the Base and Length of the Rx Descriptor Ring */
   1797 		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
   1798 		    (rdba & 0x00000000ffffffffULL));
   1799 		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i),
   1800 		    (rdba >> 32));
   1801 		IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
   1802 		    adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
   1803 
   1804 		/* Reset the ring indices */
   1805 		IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
   1806 		IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 0);
   1807 
   1808 		/* Set up the SRRCTL register */
   1809 		reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
   1810 		reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
   1811 		reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
   1812 		reg |= bufsz;
   1813 		reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
   1814 		IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), reg);
   1815 
   1816 		/* Capture Rx Tail index */
   1817 		rxr->tail = IXGBE_VFRDT(rxr->me);
   1818 
   1819 		/* Do the queue enabling last */
   1820 		rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
   1821 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
   1822 		for (int k = 0; k < 10; k++) {
   1823 			if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
   1824 			    IXGBE_RXDCTL_ENABLE)
   1825 				break;
   1826 			else
   1827 				msec_delay(1);
   1828 		}
   1829 		wmb();
   1830 
   1831 		/* Set the Tail Pointer */
   1832 #ifdef DEV_NETMAP
   1833 		/*
   1834 		 * In netmap mode, we must preserve the buffers made
   1835 		 * available to userspace before the if_init()
   1836 		 * (this is true by default on the TX side, because
   1837 		 * init makes all buffers available to userspace).
   1838 		 *
   1839 		 * netmap_reset() and the device specific routines
   1840 		 * (e.g. ixgbe_setup_receive_rings()) map these
   1841 		 * buffers at the end of the NIC ring, so here we
   1842 		 * must set the RDT (tail) register to make sure
   1843 		 * they are not overwritten.
   1844 		 *
   1845 		 * In this driver the NIC ring starts at RDH = 0,
   1846 		 * RDT points to the last slot available for reception (?),
   1847 		 * so RDT = num_rx_desc - 1 means the whole ring is available.
   1848 		 */
   1849 		if (ifp->if_capenable & IFCAP_NETMAP) {
   1850 			struct netmap_adapter *na = NA(adapter->ifp);
   1851 			struct netmap_kring *kring = &na->rx_rings[i];
   1852 			int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
   1853 
   1854 			IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), t);
   1855 		} else
   1856 #endif /* DEV_NETMAP */
   1857 			IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
   1858 			    adapter->num_rx_desc - 1);
   1859 	}
   1860 
   1861 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
   1862 
   1863 	if (ifp->if_capenable & IFCAP_RXCSUM)
   1864 		rxcsum |= IXGBE_RXCSUM_PCSD;
   1865 
   1866 	if (!(rxcsum & IXGBE_RXCSUM_PCSD))
   1867 		rxcsum |= IXGBE_RXCSUM_IPPCSE;
   1868 
   1869 	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
   1870 
   1871 	return;
   1872 }
   1873 
   1874 static void
   1875 ixv_setup_vlan_support(struct adapter *adapter)
   1876 {
   1877 	struct ixgbe_hw *hw = &adapter->hw;
   1878 	u32		ctrl, vid, vfta, retry;
   1879 	struct rx_ring	*rxr;
   1880 
   1881 	/*
   1882 	** We get here thru init_locked, meaning
   1883 	** a soft reset, this has already cleared
   1884 	** the VFTA and other state, so if there
   1885 	** have been no vlan's registered do nothing.
   1886 	*/
   1887 	if (!VLAN_ATTACHED(&adapter->osdep.ec))
   1888 		return;
   1889 
   1890 	/* Enable the queues */
   1891 	for (int i = 0; i < adapter->num_queues; i++) {
   1892 		ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
   1893 		ctrl |= IXGBE_RXDCTL_VME;
   1894 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl);
   1895 		/*
   1896 		 * Let Rx path know that it needs to store VLAN tag
   1897 		 * as part of extra mbuf info.
   1898 		 */
   1899 		rxr = &adapter->rx_rings[i];
   1900 		rxr->vtag_strip = TRUE;
   1901 	}
   1902 
   1903 	/*
   1904 	** A soft reset zero's out the VFTA, so
   1905 	** we need to repopulate it now.
   1906 	*/
   1907 	for (int i = 0; i < IXGBE_VFTA_SIZE; i++) {
   1908 		if (ixv_shadow_vfta[i] == 0)
   1909 			continue;
   1910 		vfta = ixv_shadow_vfta[i];
   1911 		/*
   1912 		** Reconstruct the vlan id's
   1913 		** based on the bits set in each
   1914 		** of the array ints.
   1915 		*/
   1916 		for (int j = 0; j < 32; j++) {
   1917 			retry = 0;
   1918 			if ((vfta & (1 << j)) == 0)
   1919 				continue;
   1920 			vid = (i * 32) + j;
   1921 			/* Call the shared code mailbox routine */
   1922 			while (ixgbe_set_vfta(hw, vid, 0, TRUE)) {
   1923 				if (++retry > 5)
   1924 					break;
   1925 			}
   1926 		}
   1927 	}
   1928 }
   1929 
   1930 #if 0	/* XXX Badly need to overhaul vlan(4) on NetBSD. */
   1931 /*
   1932 ** This routine is run via an vlan config EVENT,
   1933 ** it enables us to use the HW Filter table since
   1934 ** we can get the vlan id. This just creates the
   1935 ** entry in the soft version of the VFTA, init will
   1936 ** repopulate the real table.
   1937 */
   1938 static void
   1939 ixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   1940 {
   1941 	struct adapter	*adapter = ifp->if_softc;
   1942 	u16		index, bit;
   1943 
   1944 	if (ifp->if_softc != arg) /* Not our event */
   1945 		return;
   1946 
   1947 	if ((vtag == 0) || (vtag > 4095)) /* Invalid */
   1948 		return;
   1949 
   1950 	IXGBE_CORE_LOCK(adapter);
   1951 	index = (vtag >> 5) & 0x7F;
   1952 	bit = vtag & 0x1F;
   1953 	ixv_shadow_vfta[index] |= (1 << bit);
   1954 	/* Re-init to load the changes */
   1955 	ixv_init_locked(adapter);
   1956 	IXGBE_CORE_UNLOCK(adapter);
   1957 }
   1958 
   1959 /*
   1960 ** This routine is run via an vlan
   1961 ** unconfig EVENT, remove our entry
   1962 ** in the soft vfta.
   1963 */
   1964 static void
   1965 ixv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   1966 {
   1967 	struct adapter	*adapter = ifp->if_softc;
   1968 	u16		index, bit;
   1969 
   1970 	if (ifp->if_softc !=  arg)
   1971 		return;
   1972 
   1973 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
   1974 		return;
   1975 
   1976 	IXGBE_CORE_LOCK(adapter);
   1977 	index = (vtag >> 5) & 0x7F;
   1978 	bit = vtag & 0x1F;
   1979 	ixv_shadow_vfta[index] &= ~(1 << bit);
   1980 	/* Re-init to load the changes */
   1981 	ixv_init_locked(adapter);
   1982 	IXGBE_CORE_UNLOCK(adapter);
   1983 }
   1984 #endif
   1985 
   1986 static void
   1987 ixv_enable_intr(struct adapter *adapter)
   1988 {
   1989 	struct ixgbe_hw *hw = &adapter->hw;
   1990 	struct ix_queue *que = adapter->queues;
   1991 	u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
   1992 
   1993 
   1994 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
   1995 
   1996 	mask = IXGBE_EIMS_ENABLE_MASK;
   1997 	mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
   1998 	IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
   1999 
   2000         for (int i = 0; i < adapter->num_queues; i++, que++)
   2001 		ixv_enable_queue(adapter, que->msix);
   2002 
   2003 	IXGBE_WRITE_FLUSH(hw);
   2004 
   2005 	return;
   2006 }
   2007 
   2008 static void
   2009 ixv_disable_intr(struct adapter *adapter)
   2010 {
   2011 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
   2012 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, ~0);
   2013 	IXGBE_WRITE_FLUSH(&adapter->hw);
   2014 	return;
   2015 }
   2016 
   2017 /*
   2018 ** Setup the correct IVAR register for a particular MSIX interrupt
   2019 **  - entry is the register array entry
   2020 **  - vector is the MSIX vector for this queue
   2021 **  - type is RX/TX/MISC
   2022 */
   2023 static void
   2024 ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
   2025 {
   2026 	struct ixgbe_hw *hw = &adapter->hw;
   2027 	u32 ivar, index;
   2028 
   2029 	vector |= IXGBE_IVAR_ALLOC_VAL;
   2030 
   2031 	if (type == -1) { /* MISC IVAR */
   2032 		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
   2033 		ivar &= ~0xFF;
   2034 		ivar |= vector;
   2035 		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
   2036 	} else {	/* RX/TX IVARS */
   2037 		index = (16 * (entry & 1)) + (8 * type);
   2038 		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
   2039 		ivar &= ~(0xFF << index);
   2040 		ivar |= (vector << index);
   2041 		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
   2042 	}
   2043 }
   2044 
   2045 static void
   2046 ixv_configure_ivars(struct adapter *adapter)
   2047 {
   2048 	struct  ix_queue *que = adapter->queues;
   2049 
   2050         for (int i = 0; i < adapter->num_queues; i++, que++) {
   2051 		/* First the RX queue entry */
   2052                 ixv_set_ivar(adapter, i, que->msix, 0);
   2053 		/* ... and the TX */
   2054 		ixv_set_ivar(adapter, i, que->msix, 1);
   2055 		/* Set an initial value in EITR */
   2056                 IXGBE_WRITE_REG(&adapter->hw,
   2057                     IXGBE_VTEITR(que->msix), IXV_EITR_DEFAULT);
   2058 	}
   2059 
   2060 	/* For the mailbox interrupt */
   2061         ixv_set_ivar(adapter, 1, adapter->vector, -1);
   2062 }
   2063 
   2064 
   2065 /*
   2066 ** Tasklet handler for MSIX MBX interrupts
   2067 **  - do outside interrupt since it might sleep
   2068 */
   2069 static void
   2070 ixv_handle_mbx(void *context)
   2071 {
   2072 	struct adapter  *adapter = context;
   2073 
   2074 	ixgbe_check_link(&adapter->hw,
   2075 	    &adapter->link_speed, &adapter->link_up, 0);
   2076 	ixv_update_link_status(adapter);
   2077 }
   2078 
   2079 /*
   2080 ** The VF stats registers never have a truly virgin
   2081 ** starting point, so this routine tries to make an
   2082 ** artificial one, marking ground zero on attach as
   2083 ** it were.
   2084 */
   2085 static void
   2086 ixv_save_stats(struct adapter *adapter)
   2087 {
   2088 	struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
   2089 
   2090 	if (stats->vfgprc.ev_count || stats->vfgptc.ev_count) {
   2091 		stats->saved_reset_vfgprc +=
   2092 		    stats->vfgprc.ev_count - stats->base_vfgprc;
   2093 		stats->saved_reset_vfgptc +=
   2094 		    stats->vfgptc.ev_count - stats->base_vfgptc;
   2095 		stats->saved_reset_vfgorc +=
   2096 		    stats->vfgorc.ev_count - stats->base_vfgorc;
   2097 		stats->saved_reset_vfgotc +=
   2098 		    stats->vfgotc.ev_count - stats->base_vfgotc;
   2099 		stats->saved_reset_vfmprc +=
   2100 		    stats->vfmprc.ev_count - stats->base_vfmprc;
   2101 	}
   2102 }
   2103 
   2104 static void
   2105 ixv_init_stats(struct adapter *adapter)
   2106 {
   2107 	struct ixgbe_hw *hw = &adapter->hw;
   2108 
   2109 	adapter->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
   2110 	adapter->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
   2111 	adapter->stats.vf.last_vfgorc |=
   2112 	    (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
   2113 
   2114 	adapter->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
   2115 	adapter->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
   2116 	adapter->stats.vf.last_vfgotc |=
   2117 	    (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
   2118 
   2119 	adapter->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
   2120 
   2121 	adapter->stats.vf.base_vfgprc = adapter->stats.vf.last_vfgprc;
   2122 	adapter->stats.vf.base_vfgorc = adapter->stats.vf.last_vfgorc;
   2123 	adapter->stats.vf.base_vfgptc = adapter->stats.vf.last_vfgptc;
   2124 	adapter->stats.vf.base_vfgotc = adapter->stats.vf.last_vfgotc;
   2125 	adapter->stats.vf.base_vfmprc = adapter->stats.vf.last_vfmprc;
   2126 }
   2127 
   2128 #define UPDATE_STAT_32(reg, last, count)		\
   2129 {							\
   2130 	u32 current = IXGBE_READ_REG(hw, reg);		\
   2131 	if (current < last)				\
   2132 		count.ev_count += 0x100000000LL;	\
   2133 	last = current;					\
   2134 	count.ev_count &= 0xFFFFFFFF00000000LL;		\
   2135 	count.ev_count |= current;			\
   2136 }
   2137 
   2138 #define UPDATE_STAT_36(lsb, msb, last, count) 		\
   2139 {							\
   2140 	u64 cur_lsb = IXGBE_READ_REG(hw, lsb);		\
   2141 	u64 cur_msb = IXGBE_READ_REG(hw, msb);		\
   2142 	u64 current = ((cur_msb << 32) | cur_lsb);	\
   2143 	if (current < last)				\
   2144 		count.ev_count += 0x1000000000LL;	\
   2145 	last = current;					\
   2146 	count.ev_count &= 0xFFFFFFF000000000LL;		\
   2147 	count.ev_count |= current;			\
   2148 }
   2149 
   2150 /*
   2151 ** ixv_update_stats - Update the board statistics counters.
   2152 */
   2153 void
   2154 ixv_update_stats(struct adapter *adapter)
   2155 {
   2156         struct ixgbe_hw *hw = &adapter->hw;
   2157 
   2158         UPDATE_STAT_32(IXGBE_VFGPRC, adapter->stats.vf.last_vfgprc,
   2159 	    adapter->stats.vf.vfgprc);
   2160         UPDATE_STAT_32(IXGBE_VFGPTC, adapter->stats.vf.last_vfgptc,
   2161 	    adapter->stats.vf.vfgptc);
   2162         UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
   2163 	    adapter->stats.vf.last_vfgorc, adapter->stats.vf.vfgorc);
   2164         UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
   2165 	    adapter->stats.vf.last_vfgotc, adapter->stats.vf.vfgotc);
   2166         UPDATE_STAT_32(IXGBE_VFMPRC, adapter->stats.vf.last_vfmprc,
   2167 	    adapter->stats.vf.vfmprc);
   2168 }
   2169 
   2170 /*
   2171  * Add statistic sysctls for the VF.
   2172  */
   2173 static void
   2174 ixv_add_stats_sysctls(struct adapter *adapter)
   2175 {
   2176 	device_t dev = adapter->dev;
   2177 	struct ix_queue *que = &adapter->queues[0];
   2178 	struct tx_ring *txr = que->txr;
   2179 	struct rx_ring *rxr = que->rxr;
   2180 
   2181 	struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
   2182 
   2183 	const char *xname = device_xname(dev);
   2184 
   2185 	/* Driver Statistics */
   2186 	evcnt_attach_dynamic(&adapter->dropped_pkts, EVCNT_TYPE_MISC,
   2187 	    NULL, xname, "Driver dropped packets");
   2188 	evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
   2189 	    NULL, xname, "m_defrag() failed");
   2190 	evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
   2191 	    NULL, xname, "Watchdog timeouts");
   2192 
   2193 	evcnt_attach_dynamic(&stats->vfgprc, EVCNT_TYPE_MISC, NULL,
   2194 	    xname, "Good Packets Received");
   2195 	evcnt_attach_dynamic(&stats->vfgorc, EVCNT_TYPE_MISC, NULL,
   2196 	    xname, "Good Octets Received");
   2197 	evcnt_attach_dynamic(&stats->vfmprc, EVCNT_TYPE_MISC, NULL,
   2198 	    xname, "Multicast Packets Received");
   2199 	evcnt_attach_dynamic(&stats->vfgptc, EVCNT_TYPE_MISC, NULL,
   2200 	    xname, "Good Packets Transmitted");
   2201 	evcnt_attach_dynamic(&stats->vfgotc, EVCNT_TYPE_MISC, NULL,
   2202 	    xname, "Good Octets Transmitted");
   2203 	evcnt_attach_dynamic(&que->irqs, EVCNT_TYPE_INTR, NULL,
   2204 	    xname, "IRQs on queue");
   2205 	evcnt_attach_dynamic(&rxr->rx_irq, EVCNT_TYPE_INTR, NULL,
   2206 	    xname, "RX irqs on queue");
   2207 	evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC, NULL,
   2208 	    xname, "RX packets");
   2209 	evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC, NULL,
   2210 	    xname, "RX bytes");
   2211 	evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC, NULL,
   2212 	    xname, "Discarded RX packets");
   2213 	evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC, NULL,
   2214 	    xname, "TX Packets");
   2215 	evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC, NULL,
   2216 	    xname, "# of times not enough descriptors were available during TX");
   2217 	evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC, NULL,
   2218 	    xname, "TX TSO");
   2219 }
   2220 
   2221 static void
   2222 ixv_set_sysctl_value(struct adapter *adapter, const char *name,
   2223 	const char *description, int *limit, int value)
   2224 {
   2225 	device_t dev =  adapter->dev;
   2226 	struct sysctllog **log;
   2227 	const struct sysctlnode *rnode, *cnode;
   2228 
   2229 	log = &adapter->sysctllog;
   2230 	if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
   2231 		aprint_error_dev(dev, "could not create sysctl root\n");
   2232 		return;
   2233 	}
   2234 	if (sysctl_createv(log, 0, &rnode, &cnode,
   2235 	    CTLFLAG_READWRITE, CTLTYPE_INT,
   2236 	    name, SYSCTL_DESCR(description),
   2237 	    NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0)
   2238 		aprint_error_dev(dev, "could not create sysctl\n");
   2239 	*limit = value;
   2240 }
   2241 
   2242 /**********************************************************************
   2243  *
   2244  *  This routine is called only when em_display_debug_stats is enabled.
   2245  *  This routine provides a way to take a look at important statistics
   2246  *  maintained by the driver and hardware.
   2247  *
   2248  **********************************************************************/
   2249 static void
   2250 ixv_print_debug_info(struct adapter *adapter)
   2251 {
   2252         device_t dev = adapter->dev;
   2253         struct ixgbe_hw         *hw = &adapter->hw;
   2254         struct ix_queue         *que = adapter->queues;
   2255         struct rx_ring          *rxr;
   2256         struct tx_ring          *txr;
   2257 #ifdef LRO
   2258         struct lro_ctrl         *lro;
   2259 #endif /* LRO */
   2260 
   2261         device_printf(dev,"Error Byte Count = %u \n",
   2262             IXGBE_READ_REG(hw, IXGBE_ERRBC));
   2263 
   2264         for (int i = 0; i < adapter->num_queues; i++, que++) {
   2265                 txr = que->txr;
   2266                 rxr = que->rxr;
   2267 #ifdef LRO
   2268                 lro = &rxr->lro;
   2269 #endif /* LRO */
   2270                 device_printf(dev,"QUE(%d) IRQs Handled: %lu\n",
   2271                     que->msix, (long)que->irqs.ev_count);
   2272                 device_printf(dev,"RX(%d) Packets Received: %lld\n",
   2273                     rxr->me, (long long)rxr->rx_packets.ev_count);
   2274                 device_printf(dev,"RX(%d) Bytes Received: %lu\n",
   2275                     rxr->me, (long)rxr->rx_bytes.ev_count);
   2276 #ifdef LRO
   2277                 device_printf(dev,"RX(%d) LRO Queued= %lld\n",
   2278                     rxr->me, (long long)lro->lro_queued);
   2279                 device_printf(dev,"RX(%d) LRO Flushed= %lld\n",
   2280                     rxr->me, (long long)lro->lro_flushed);
   2281 #endif /* LRO */
   2282                 device_printf(dev,"TX(%d) Packets Sent: %lu\n",
   2283                     txr->me, (long)txr->total_packets.ev_count);
   2284                 device_printf(dev,"TX(%d) NO Desc Avail: %lu\n",
   2285                     txr->me, (long)txr->no_desc_avail.ev_count);
   2286         }
   2287 
   2288         device_printf(dev,"MBX IRQ Handled: %lu\n",
   2289             (long)adapter->link_irq.ev_count);
   2290         return;
   2291 }
   2292 
   2293 static int
   2294 ixv_sysctl_debug(SYSCTLFN_ARGS)
   2295 {
   2296 	struct sysctlnode node;
   2297 	int error, result;
   2298 	struct adapter *adapter;
   2299 
   2300 	node = *rnode;
   2301 	adapter = (struct adapter *)node.sysctl_data;
   2302 	node.sysctl_data = &result;
   2303 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   2304 
   2305 	if (error)
   2306 		return error;
   2307 
   2308 	if (result == 1)
   2309 		ixv_print_debug_info(adapter);
   2310 
   2311 	return 0;
   2312 }
   2313 
   2314 const struct sysctlnode *
   2315 ixv_sysctl_instance(struct adapter *adapter)
   2316 {
   2317 	const char *dvname;
   2318 	struct sysctllog **log;
   2319 	int rc;
   2320 	const struct sysctlnode *rnode;
   2321 
   2322 	log = &adapter->sysctllog;
   2323 	dvname = device_xname(adapter->dev);
   2324 
   2325 	if ((rc = sysctl_createv(log, 0, NULL, &rnode,
   2326 	    0, CTLTYPE_NODE, dvname,
   2327 	    SYSCTL_DESCR("ixv information and settings"),
   2328 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
   2329 		goto err;
   2330 
   2331 	return rnode;
   2332 err:
   2333 	printf("%s: sysctl_createv failed, rc = %d\n", __func__, rc);
   2334 	return NULL;
   2335 }
   2336