Home | History | Annotate | Line # | Download | only in ixgbe
ixv.c revision 1.43
      1 /******************************************************************************
      2 
      3   Copyright (c) 2001-2015, Intel Corporation
      4   All rights reserved.
      5 
      6   Redistribution and use in source and binary forms, with or without
      7   modification, are permitted provided that the following conditions are met:
      8 
      9    1. Redistributions of source code must retain the above copyright notice,
     10       this list of conditions and the following disclaimer.
     11 
     12    2. Redistributions in binary form must reproduce the above copyright
     13       notice, this list of conditions and the following disclaimer in the
     14       documentation and/or other materials provided with the distribution.
     15 
     16    3. Neither the name of the Intel Corporation nor the names of its
     17       contributors may be used to endorse or promote products derived from
     18       this software without specific prior written permission.
     19 
     20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     30   POSSIBILITY OF SUCH DAMAGE.
     31 
     32 ******************************************************************************/
     33 /*$FreeBSD: head/sys/dev/ixgbe/if_ixv.c 302384 2016-07-07 03:39:18Z sbruno $*/
     34 /*$NetBSD: ixv.c,v 1.43 2017/02/08 04:24:44 msaitoh Exp $*/
     35 
     36 #include "opt_inet.h"
     37 #include "opt_inet6.h"
     38 
     39 #include "ixgbe.h"
     40 #include "vlan.h"
     41 
     42 /*********************************************************************
     43  *  Driver version
     44  *********************************************************************/
     45 char ixv_driver_version[] = "1.4.6-k";
     46 
     47 /*********************************************************************
     48  *  PCI Device ID Table
     49  *
     50  *  Used by probe to select devices to load on
     51  *  Last field stores an index into ixv_strings
     52  *  Last entry must be all 0s
     53  *
     54  *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
     55  *********************************************************************/
     56 
     57 static ixgbe_vendor_info_t ixv_vendor_info_array[] =
     58 {
     59 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
     60 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0},
     61 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, 0, 0, 0},
     62 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, 0, 0, 0},
     63 	/* required last entry */
     64 	{0, 0, 0, 0, 0}
     65 };
     66 
     67 /*********************************************************************
     68  *  Table of branding strings
     69  *********************************************************************/
     70 
     71 static const char    *ixv_strings[] = {
     72 	"Intel(R) PRO/10GbE Virtual Function Network Driver"
     73 };
     74 
     75 /*********************************************************************
     76  *  Function prototypes
     77  *********************************************************************/
     78 static int      ixv_probe(device_t, cfdata_t, void *);
     79 static void	ixv_attach(device_t, device_t, void *);
     80 static int      ixv_detach(device_t, int);
     81 #if 0
     82 static int      ixv_shutdown(device_t);
     83 #endif
     84 static int      ixv_ioctl(struct ifnet *, u_long, void *);
     85 static int	ixv_init(struct ifnet *);
     86 static void	ixv_init_locked(struct adapter *);
     87 static void     ixv_stop(void *);
     88 static void     ixv_media_status(struct ifnet *, struct ifmediareq *);
     89 static int      ixv_media_change(struct ifnet *);
     90 static void     ixv_identify_hardware(struct adapter *);
     91 static int      ixv_allocate_pci_resources(struct adapter *,
     92 		    const struct pci_attach_args *);
     93 static int      ixv_allocate_msix(struct adapter *,
     94 		    const struct pci_attach_args *);
     95 static int	ixv_setup_msix(struct adapter *);
     96 static void	ixv_free_pci_resources(struct adapter *);
     97 static void     ixv_local_timer(void *);
     98 static void     ixv_local_timer_locked(void *);
     99 static void     ixv_setup_interface(device_t, struct adapter *);
    100 static void     ixv_config_link(struct adapter *);
    101 
    102 static void     ixv_initialize_transmit_units(struct adapter *);
    103 static void     ixv_initialize_receive_units(struct adapter *);
    104 
    105 static void     ixv_enable_intr(struct adapter *);
    106 static void     ixv_disable_intr(struct adapter *);
    107 static void     ixv_set_multi(struct adapter *);
    108 static void     ixv_update_link_status(struct adapter *);
    109 static int	ixv_sysctl_debug(SYSCTLFN_PROTO);
    110 static void	ixv_set_ivar(struct adapter *, u8, u8, s8);
    111 static void	ixv_configure_ivars(struct adapter *);
    112 static u8 *	ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
    113 
    114 static void	ixv_setup_vlan_support(struct adapter *);
    115 #if 0
    116 static void	ixv_register_vlan(void *, struct ifnet *, u16);
    117 static void	ixv_unregister_vlan(void *, struct ifnet *, u16);
    118 #endif
    119 
    120 static void	ixv_save_stats(struct adapter *);
    121 static void	ixv_init_stats(struct adapter *);
    122 static void	ixv_update_stats(struct adapter *);
    123 static void	ixv_add_stats_sysctls(struct adapter *);
    124 static void	ixv_set_sysctl_value(struct adapter *, const char *,
    125 		    const char *, int *, int);
    126 
    127 /* The MSI/X Interrupt handlers */
    128 static int	ixv_msix_que(void *);
    129 static int	ixv_msix_mbx(void *);
    130 
    131 /* Deferred interrupt tasklets */
    132 static void	ixv_handle_que(void *);
    133 static void	ixv_handle_mbx(void *);
    134 
    135 const struct sysctlnode *ixv_sysctl_instance(struct adapter *);
    136 static ixgbe_vendor_info_t *ixv_lookup(const struct pci_attach_args *);
    137 
    138 #ifdef DEV_NETMAP
    139 /*
    140  * This is defined in <dev/netmap/ixgbe_netmap.h>, which is included by
    141  * if_ix.c.
    142  */
    143 extern void ixgbe_netmap_attach(struct adapter *adapter);
    144 
    145 #include <net/netmap.h>
    146 #include <sys/selinfo.h>
    147 #include <dev/netmap/netmap_kern.h>
    148 #endif /* DEV_NETMAP */
    149 
    150 /*********************************************************************
    151  *  FreeBSD Device Interface Entry Points
    152  *********************************************************************/
    153 
    154 CFATTACH_DECL3_NEW(ixv, sizeof(struct adapter),
    155     ixv_probe, ixv_attach, ixv_detach, NULL, NULL, NULL,
    156     DVF_DETACH_SHUTDOWN);
    157 
    158 # if 0
    159 static device_method_t ixv_methods[] = {
    160 	/* Device interface */
    161 	DEVMETHOD(device_probe, ixv_probe),
    162 	DEVMETHOD(device_attach, ixv_attach),
    163 	DEVMETHOD(device_detach, ixv_detach),
    164 	DEVMETHOD(device_shutdown, ixv_shutdown),
    165 	DEVMETHOD_END
    166 };
    167 #endif
    168 
    169 #if 0
    170 static driver_t ixv_driver = {
    171 	"ixv", ixv_methods, sizeof(struct adapter),
    172 };
    173 
    174 devclass_t ixv_devclass;
    175 DRIVER_MODULE(ixv, pci, ixv_driver, ixv_devclass, 0, 0);
    176 MODULE_DEPEND(ixv, pci, 1, 1, 1);
    177 MODULE_DEPEND(ixv, ether, 1, 1, 1);
    178 #ifdef DEV_NETMAP
    179 MODULE_DEPEND(ix, netmap, 1, 1, 1);
    180 #endif /* DEV_NETMAP */
    181 /* XXX depend on 'ix' ? */
    182 #endif
    183 
    184 /*
    185 ** TUNEABLE PARAMETERS:
    186 */
    187 
    188 /* Number of Queues - do not exceed MSIX vectors - 1 */
    189 static int ixv_num_queues = 1;
    190 #define	TUNABLE_INT(__x, __y)
    191 TUNABLE_INT("hw.ixv.num_queues", &ixv_num_queues);
    192 
    193 /*
    194 ** AIM: Adaptive Interrupt Moderation
    195 ** which means that the interrupt rate
    196 ** is varied over time based on the
    197 ** traffic for that interrupt vector
    198 */
    199 static int ixv_enable_aim = FALSE;
    200 TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
    201 
    202 /* How many packets rxeof tries to clean at a time */
    203 static int ixv_rx_process_limit = 256;
    204 TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit);
    205 
    206 /* How many packets txeof tries to clean at a time */
    207 static int ixv_tx_process_limit = 256;
    208 TUNABLE_INT("hw.ixv.tx_process_limit", &ixv_tx_process_limit);
    209 
    210 /*
    211 ** Number of TX descriptors per ring,
    212 ** setting higher than RX as this seems
    213 ** the better performing choice.
    214 */
    215 static int ixv_txd = DEFAULT_TXD;
    216 TUNABLE_INT("hw.ixv.txd", &ixv_txd);
    217 
    218 /* Number of RX descriptors per ring */
    219 static int ixv_rxd = DEFAULT_RXD;
    220 TUNABLE_INT("hw.ixv.rxd", &ixv_rxd);
    221 
    222 /*
    223 ** Shadow VFTA table, this is needed because
    224 ** the real filter table gets cleared during
    225 ** a soft reset and we need to repopulate it.
    226 */
    227 static u32 ixv_shadow_vfta[IXGBE_VFTA_SIZE];
    228 
    229 /*********************************************************************
    230  *  Device identification routine
    231  *
    232  *  ixv_probe determines if the driver should be loaded on
    233  *  adapter based on PCI vendor/device id of the adapter.
    234  *
    235  *  return 1 on success, 0 on failure
    236  *********************************************************************/
    237 
    238 static int
    239 ixv_probe(device_t dev, cfdata_t cf, void *aux)
    240 {
    241 #ifdef __HAVE_PCI_MSI_MSIX
    242 	const struct pci_attach_args *pa = aux;
    243 
    244 	return (ixv_lookup(pa) != NULL) ? 1 : 0;
    245 #else
    246 	return 0;
    247 #endif
    248 }
    249 
    250 static ixgbe_vendor_info_t *
    251 ixv_lookup(const struct pci_attach_args *pa)
    252 {
    253 	pcireg_t subid;
    254 	ixgbe_vendor_info_t *ent;
    255 
    256 	INIT_DEBUGOUT("ixv_lookup: begin");
    257 
    258 	if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
    259 		return NULL;
    260 
    261 	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
    262 
    263 	for (ent = ixv_vendor_info_array; ent->vendor_id != 0; ent++) {
    264 		if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
    265 		    (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
    266 
    267 		    ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
    268 		     (ent->subvendor_id == 0)) &&
    269 
    270 		    ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
    271 		     (ent->subdevice_id == 0))) {
    272 			return ent;
    273 		}
    274 	}
    275 	return NULL;
    276 }
    277 
    278 
    279 static void
    280 ixv_sysctl_attach(struct adapter *adapter)
    281 {
    282 	struct sysctllog **log;
    283 	const struct sysctlnode *rnode, *cnode;
    284 	device_t dev;
    285 
    286 	dev = adapter->dev;
    287 	log = &adapter->sysctllog;
    288 
    289 	if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
    290 		aprint_error_dev(dev, "could not create sysctl root\n");
    291 		return;
    292 	}
    293 
    294 	if (sysctl_createv(log, 0, &rnode, &cnode,
    295 	    CTLFLAG_READWRITE, CTLTYPE_INT,
    296 	    "debug", SYSCTL_DESCR("Debug Info"),
    297 	    ixv_sysctl_debug, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
    298 		aprint_error_dev(dev, "could not create sysctl\n");
    299 
    300 	/* XXX This is an *instance* sysctl controlling a *global* variable.
    301 	 * XXX It's that way in the FreeBSD driver that this derives from.
    302 	 */
    303 	if (sysctl_createv(log, 0, &rnode, &cnode,
    304 	    CTLFLAG_READWRITE, CTLTYPE_INT,
    305 	    "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
    306 	    NULL, 0, &ixv_enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
    307 		aprint_error_dev(dev, "could not create sysctl\n");
    308 }
    309 
    310 /*********************************************************************
    311  *  Device initialization routine
    312  *
    313  *  The attach entry point is called when the driver is being loaded.
    314  *  This routine identifies the type of hardware, allocates all resources
    315  *  and initializes the hardware.
    316  *
    317  *  return 0 on success, positive on failure
    318  *********************************************************************/
    319 
    320 static void
    321 ixv_attach(device_t parent, device_t dev, void *aux)
    322 {
    323 	struct adapter *adapter;
    324 	struct ixgbe_hw *hw;
    325 	int             error = 0;
    326 	ixgbe_vendor_info_t *ent;
    327 	const struct pci_attach_args *pa = aux;
    328 
    329 	INIT_DEBUGOUT("ixv_attach: begin");
    330 
    331 	/* Allocate, clear, and link in our adapter structure */
    332 	adapter = device_private(dev);
    333 	adapter->dev = dev;
    334 	hw = &adapter->hw;
    335 
    336 #ifdef DEV_NETMAP
    337 	adapter->init_locked = ixv_init_locked;
    338 	adapter->stop_locked = ixv_stop;
    339 #endif
    340 
    341 	adapter->osdep.pc = pa->pa_pc;
    342 	adapter->osdep.tag = pa->pa_tag;
    343 	if (pci_dma64_available(pa))
    344 		adapter->osdep.dmat = pa->pa_dmat64;
    345 	else
    346 		adapter->osdep.dmat = pa->pa_dmat;
    347 	adapter->osdep.attached = false;
    348 
    349 	ent = ixv_lookup(pa);
    350 
    351 	KASSERT(ent != NULL);
    352 
    353 	aprint_normal(": %s, Version - %s\n",
    354 	    ixv_strings[ent->index], ixv_driver_version);
    355 
    356 	/* Core Lock Init*/
    357 	IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
    358 
    359 	/* SYSCTL APIs */
    360 	ixv_sysctl_attach(adapter);
    361 
    362 	/* Set up the timer callout */
    363 	callout_init(&adapter->timer, 0);
    364 
    365 	/* Determine hardware revision */
    366 	ixv_identify_hardware(adapter);
    367 
    368 	/* Do base PCI setup - map BAR0 */
    369 	if (ixv_allocate_pci_resources(adapter, pa)) {
    370 		aprint_error_dev(dev, "ixv_allocate_pci_resources() failed!\n");
    371 		error = ENXIO;
    372 		goto err_out;
    373 	}
    374 
    375 	/* Sysctls for limiting the amount of work done in the taskqueues */
    376 	ixv_set_sysctl_value(adapter, "rx_processing_limit",
    377 	    "max number of rx packets to process",
    378 	    &adapter->rx_process_limit, ixv_rx_process_limit);
    379 
    380 	ixv_set_sysctl_value(adapter, "tx_processing_limit",
    381 	    "max number of tx packets to process",
    382 	    &adapter->tx_process_limit, ixv_tx_process_limit);
    383 
    384 	/* Do descriptor calc and sanity checks */
    385 	if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
    386 	    ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) {
    387 		aprint_error_dev(dev, "TXD config issue, using default!\n");
    388 		adapter->num_tx_desc = DEFAULT_TXD;
    389 	} else
    390 		adapter->num_tx_desc = ixv_txd;
    391 
    392 	if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
    393 	    ixv_rxd < MIN_RXD || ixv_rxd > MAX_RXD) {
    394 		aprint_error_dev(dev, "RXD config issue, using default!\n");
    395 		adapter->num_rx_desc = DEFAULT_RXD;
    396 	} else
    397 		adapter->num_rx_desc = ixv_rxd;
    398 
    399 	/* Allocate our TX/RX Queues */
    400 	if (ixgbe_allocate_queues(adapter)) {
    401 		aprint_error_dev(dev, "ixgbe_allocate_queues() failed!\n");
    402 		error = ENOMEM;
    403 		goto err_out;
    404 	}
    405 
    406 	/*
    407 	** Initialize the shared code: its
    408 	** at this point the mac type is set.
    409 	*/
    410 	error = ixgbe_init_shared_code(hw);
    411 	if (error) {
    412 		aprint_error_dev(dev, "ixgbe_init_shared_code() failed!\n");
    413 		error = EIO;
    414 		goto err_late;
    415 	}
    416 
    417 	/* Setup the mailbox */
    418 	ixgbe_init_mbx_params_vf(hw);
    419 
    420 	/* Reset mbox api to 1.0 */
    421 	error = ixgbe_reset_hw(hw);
    422 	if (error == IXGBE_ERR_RESET_FAILED)
    423 		aprint_error_dev(dev, "ixgbe_reset_hw() failure: Reset Failed!\n");
    424 	else if (error)
    425 		aprint_error_dev(dev, "ixgbe_reset_hw() failed with error %d\n", error);
    426 	if (error) {
    427 		error = EIO;
    428 		goto err_late;
    429 	}
    430 
    431 	/* Negotiate mailbox API version */
    432 	error = ixgbevf_negotiate_api_version(hw, ixgbe_mbox_api_11);
    433 	if (error)
    434 		aprint_debug_dev(dev,
    435 		    "MBX API 1.1 negotiation failed! Error %d\n", error);
    436 
    437 	error = ixgbe_init_hw(hw);
    438 	if (error) {
    439 		aprint_error_dev(dev, "ixgbe_init_hw() failed!\n");
    440 		error = EIO;
    441 		goto err_late;
    442 	}
    443 
    444 	error = ixv_allocate_msix(adapter, pa);
    445 	if (error) {
    446 		device_printf(dev, "ixv_allocate_msix() failed!\n");
    447 		goto err_late;
    448 	}
    449 
    450 	/* If no mac address was assigned, make a random one */
    451 	if (!ixv_check_ether_addr(hw->mac.addr)) {
    452 		u8 addr[ETHER_ADDR_LEN];
    453 		uint64_t rndval = cprng_fast64();
    454 
    455 		memcpy(addr, &rndval, sizeof(addr));
    456 		addr[0] &= 0xFE;
    457 		addr[0] |= 0x02;
    458 		bcopy(addr, hw->mac.addr, sizeof(addr));
    459 	}
    460 
    461 	/* Setup OS specific network interface */
    462 	ixv_setup_interface(dev, adapter);
    463 
    464 	/* Do the stats setup */
    465 	ixv_save_stats(adapter);
    466 	ixv_init_stats(adapter);
    467 	ixv_add_stats_sysctls(adapter);
    468 
    469 	/* Register for VLAN events */
    470 #if 0 /* XXX delete after write? */
    471 	adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
    472 	    ixv_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
    473 	adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
    474 	    ixv_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
    475 #endif
    476 
    477 #ifdef DEV_NETMAP
    478 	ixgbe_netmap_attach(adapter);
    479 #endif /* DEV_NETMAP */
    480 	INIT_DEBUGOUT("ixv_attach: end");
    481 	adapter->osdep.attached = true;
    482 	return;
    483 
    484 err_late:
    485 	ixgbe_free_transmit_structures(adapter);
    486 	ixgbe_free_receive_structures(adapter);
    487 err_out:
    488 	ixv_free_pci_resources(adapter);
    489 	return;
    490 
    491 }
    492 
    493 /*********************************************************************
    494  *  Device removal routine
    495  *
    496  *  The detach entry point is called when the driver is being removed.
    497  *  This routine stops the adapter and deallocates all the resources
    498  *  that were allocated for driver operation.
    499  *
    500  *  return 0 on success, positive on failure
    501  *********************************************************************/
    502 
    503 static int
    504 ixv_detach(device_t dev, int flags)
    505 {
    506 	struct adapter *adapter = device_private(dev);
    507 	struct ix_queue *que = adapter->queues;
    508 	struct tx_ring *txr = adapter->tx_rings;
    509 
    510 	INIT_DEBUGOUT("ixv_detach: begin");
    511 	if (adapter->osdep.attached == false)
    512 		return 0;
    513 
    514 #if NVLAN > 0
    515 	/* Make sure VLANS are not using driver */
    516 	if (!VLAN_ATTACHED(&adapter->osdep.ec))
    517 		;	/* nothing to do: no VLANs */
    518 	else if ((flags & (DETACH_SHUTDOWN|DETACH_FORCE)) != 0)
    519 		vlan_ifdetach(adapter->ifp);
    520 	else {
    521 		aprint_error_dev(dev, "VLANs in use, detach first\n");
    522 		return EBUSY;
    523 	}
    524 #endif
    525 
    526 	IXGBE_CORE_LOCK(adapter);
    527 	ixv_stop(adapter);
    528 	IXGBE_CORE_UNLOCK(adapter);
    529 
    530 	for (int i = 0; i < adapter->num_queues; i++, que++, txr++) {
    531 #ifndef IXGBE_LEGACY_TX
    532 		softint_disestablish(txr->txr_si);
    533 #endif
    534 		softint_disestablish(que->que_si);
    535 	}
    536 
    537 	/* Drain the Mailbox(link) queue */
    538 	softint_disestablish(adapter->link_si);
    539 
    540 	/* Unregister VLAN events */
    541 #if 0 /* XXX msaitoh delete after write? */
    542 	if (adapter->vlan_attach != NULL)
    543 		EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
    544 	if (adapter->vlan_detach != NULL)
    545 		EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
    546 #endif
    547 
    548 	ether_ifdetach(adapter->ifp);
    549 	callout_halt(&adapter->timer, NULL);
    550 #ifdef DEV_NETMAP
    551 	netmap_detach(adapter->ifp);
    552 #endif /* DEV_NETMAP */
    553 	ixv_free_pci_resources(adapter);
    554 #if 0 /* XXX the NetBSD port is probably missing something here */
    555 	bus_generic_detach(dev);
    556 #endif
    557 	if_detach(adapter->ifp);
    558 
    559 	sysctl_teardown(&adapter->sysctllog);
    560 
    561 	ixgbe_free_transmit_structures(adapter);
    562 	ixgbe_free_receive_structures(adapter);
    563 
    564 	IXGBE_CORE_LOCK_DESTROY(adapter);
    565 	return (0);
    566 }
    567 
    568 /*********************************************************************
    569  *
    570  *  Shutdown entry point
    571  *
    572  **********************************************************************/
    573 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
    574 static int
    575 ixv_shutdown(device_t dev)
    576 {
    577 	struct adapter *adapter = device_private(dev);
    578 	IXGBE_CORE_LOCK(adapter);
    579 	ixv_stop(adapter);
    580 	IXGBE_CORE_UNLOCK(adapter);
    581 	return (0);
    582 }
    583 #endif
    584 
    585 static int
    586 ixv_ifflags_cb(struct ethercom *ec)
    587 {
    588 	struct ifnet *ifp = &ec->ec_if;
    589 	struct adapter *adapter = ifp->if_softc;
    590 	int change = ifp->if_flags ^ adapter->if_flags, rc = 0;
    591 
    592 	IXGBE_CORE_LOCK(adapter);
    593 
    594 	if (change != 0)
    595 		adapter->if_flags = ifp->if_flags;
    596 
    597 	if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0)
    598 		rc = ENETRESET;
    599 
    600 	IXGBE_CORE_UNLOCK(adapter);
    601 
    602 	return rc;
    603 }
    604 
    605 /*********************************************************************
    606  *  Ioctl entry point
    607  *
    608  *  ixv_ioctl is called when the user wants to configure the
    609  *  interface.
    610  *
    611  *  return 0 on success, positive on failure
    612  **********************************************************************/
    613 
    614 static int
    615 ixv_ioctl(struct ifnet * ifp, u_long command, void *data)
    616 {
    617 	struct adapter	*adapter = ifp->if_softc;
    618 	struct ifcapreq *ifcr = data;
    619 	struct ifreq	*ifr = (struct ifreq *) data;
    620 	int             error = 0;
    621 	int l4csum_en;
    622 	const int l4csum = IFCAP_CSUM_TCPv4_Rx|IFCAP_CSUM_UDPv4_Rx|
    623 	     IFCAP_CSUM_TCPv6_Rx|IFCAP_CSUM_UDPv6_Rx;
    624 
    625 	switch (command) {
    626 	case SIOCSIFFLAGS:
    627 		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
    628 		break;
    629 	case SIOCADDMULTI:
    630 	case SIOCDELMULTI:
    631 		IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
    632 		break;
    633 	case SIOCSIFMEDIA:
    634 	case SIOCGIFMEDIA:
    635 		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
    636 		break;
    637 	case SIOCSIFCAP:
    638 		IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
    639 		break;
    640 	case SIOCSIFMTU:
    641 		IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
    642 		break;
    643 	default:
    644 		IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
    645 		break;
    646 	}
    647 
    648 	switch (command) {
    649 	case SIOCSIFMEDIA:
    650 	case SIOCGIFMEDIA:
    651 		return ifmedia_ioctl(ifp, ifr, &adapter->media, command);
    652 	case SIOCSIFCAP:
    653 		/* Layer-4 Rx checksum offload has to be turned on and
    654 		 * off as a unit.
    655 		 */
    656 		l4csum_en = ifcr->ifcr_capenable & l4csum;
    657 		if (l4csum_en != l4csum && l4csum_en != 0)
    658 			return EINVAL;
    659 		/*FALLTHROUGH*/
    660 	case SIOCADDMULTI:
    661 	case SIOCDELMULTI:
    662 	case SIOCSIFFLAGS:
    663 	case SIOCSIFMTU:
    664 	default:
    665 		if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
    666 			return error;
    667 		if ((ifp->if_flags & IFF_RUNNING) == 0)
    668 			;
    669 		else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
    670 			IXGBE_CORE_LOCK(adapter);
    671 			ixv_init_locked(adapter);
    672 			IXGBE_CORE_UNLOCK(adapter);
    673 		} else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
    674 			/*
    675 			 * Multicast list has changed; set the hardware filter
    676 			 * accordingly.
    677 			 */
    678 			IXGBE_CORE_LOCK(adapter);
    679 			ixv_disable_intr(adapter);
    680 			ixv_set_multi(adapter);
    681 			ixv_enable_intr(adapter);
    682 			IXGBE_CORE_UNLOCK(adapter);
    683 		}
    684 		return 0;
    685 	}
    686 }
    687 
    688 /*********************************************************************
    689  *  Init entry point
    690  *
    691  *  This routine is used in two ways. It is used by the stack as
    692  *  init entry point in network interface structure. It is also used
    693  *  by the driver as a hw/sw initialization routine to get to a
    694  *  consistent state.
    695  *
    696  *  return 0 on success, positive on failure
    697  **********************************************************************/
    698 #define IXGBE_MHADD_MFS_SHIFT 16
    699 
    700 static void
    701 ixv_init_locked(struct adapter *adapter)
    702 {
    703 	struct ifnet	*ifp = adapter->ifp;
    704 	device_t 	dev = adapter->dev;
    705 	struct ixgbe_hw *hw = &adapter->hw;
    706 	int error = 0;
    707 
    708 	INIT_DEBUGOUT("ixv_init_locked: begin");
    709 	KASSERT(mutex_owned(&adapter->core_mtx));
    710 	hw->adapter_stopped = FALSE;
    711 	ixgbe_stop_adapter(hw);
    712         callout_stop(&adapter->timer);
    713 
    714         /* reprogram the RAR[0] in case user changed it. */
    715         ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
    716 
    717 	/* Get the latest mac address, User can use a LAA */
    718 	memcpy(hw->mac.addr, CLLADDR(adapter->ifp->if_sadl),
    719 	     IXGBE_ETH_LENGTH_OF_ADDRESS);
    720         ixgbe_set_rar(hw, 0, hw->mac.addr, 0, 1);
    721 	hw->addr_ctrl.rar_used_count = 1;
    722 
    723 	/* Prepare transmit descriptors and buffers */
    724 	if (ixgbe_setup_transmit_structures(adapter)) {
    725 		aprint_error_dev(dev, "Could not setup transmit structures\n");
    726 		ixv_stop(adapter);
    727 		return;
    728 	}
    729 
    730 	/* Reset VF and renegotiate mailbox API version */
    731 	ixgbe_reset_hw(hw);
    732 	error = ixgbevf_negotiate_api_version(hw, ixgbe_mbox_api_11);
    733 	if (error)
    734 		device_printf(dev, "MBX API 1.1 negotiation failed! Error %d\n", error);
    735 
    736 	ixv_initialize_transmit_units(adapter);
    737 
    738 	/* Setup Multicast table */
    739 	ixv_set_multi(adapter);
    740 
    741 	/*
    742 	** Determine the correct mbuf pool
    743 	** for doing jumbo/headersplit
    744 	*/
    745 	if (ifp->if_mtu > ETHERMTU)
    746 		adapter->rx_mbuf_sz = MJUMPAGESIZE;
    747 	else
    748 		adapter->rx_mbuf_sz = MCLBYTES;
    749 
    750 	/* Prepare receive descriptors and buffers */
    751 	if (ixgbe_setup_receive_structures(adapter)) {
    752 		device_printf(dev, "Could not setup receive structures\n");
    753 		ixv_stop(adapter);
    754 		return;
    755 	}
    756 
    757 	/* Configure RX settings */
    758 	ixv_initialize_receive_units(adapter);
    759 
    760 #if 0 /* XXX isn't it required? -- msaitoh  */
    761 	/* Set the various hardware offload abilities */
    762 	ifp->if_hwassist = 0;
    763 	if (ifp->if_capenable & IFCAP_TSO4)
    764 		ifp->if_hwassist |= CSUM_TSO;
    765 	if (ifp->if_capenable & IFCAP_TXCSUM) {
    766 		ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
    767 #if __FreeBSD_version >= 800000
    768 		ifp->if_hwassist |= CSUM_SCTP;
    769 #endif
    770 	}
    771 #endif
    772 
    773 	/* Set up VLAN offload and filter */
    774 	ixv_setup_vlan_support(adapter);
    775 
    776 	/* Set up MSI/X routing */
    777 	ixv_configure_ivars(adapter);
    778 
    779 	/* Set up auto-mask */
    780 	IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_EICS_RTX_QUEUE);
    781 
    782         /* Set moderation on the Link interrupt */
    783         IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->vector), IXGBE_LINK_ITR);
    784 
    785 	/* Stats init */
    786 	ixv_init_stats(adapter);
    787 
    788 	/* Config/Enable Link */
    789 	ixv_config_link(adapter);
    790 	hw->mac.get_link_status = TRUE;
    791 
    792 	/* Start watchdog */
    793 	callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
    794 
    795 	/* And now turn on interrupts */
    796 	ixv_enable_intr(adapter);
    797 
    798 	/* Now inform the stack we're ready */
    799 	ifp->if_flags |= IFF_RUNNING;
    800 	ifp->if_flags &= ~IFF_OACTIVE;
    801 
    802 	return;
    803 }
    804 
    805 static int
    806 ixv_init(struct ifnet *ifp)
    807 {
    808 	struct adapter *adapter = ifp->if_softc;
    809 
    810 	IXGBE_CORE_LOCK(adapter);
    811 	ixv_init_locked(adapter);
    812 	IXGBE_CORE_UNLOCK(adapter);
    813 	return 0;
    814 }
    815 
    816 
    817 /*
    818 **
    819 ** MSIX Interrupt Handlers and Tasklets
    820 **
    821 */
    822 
    823 static inline void
    824 ixv_enable_queue(struct adapter *adapter, u32 vector)
    825 {
    826 	struct ixgbe_hw *hw = &adapter->hw;
    827 	u32	queue = 1 << vector;
    828 	u32	mask;
    829 
    830 	mask = (IXGBE_EIMS_RTX_QUEUE & queue);
    831 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
    832 }
    833 
    834 static inline void
    835 ixv_disable_queue(struct adapter *adapter, u32 vector)
    836 {
    837 	struct ixgbe_hw *hw = &adapter->hw;
    838 	u64	queue = (u64)(1 << vector);
    839 	u32	mask;
    840 
    841 	mask = (IXGBE_EIMS_RTX_QUEUE & queue);
    842 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
    843 }
    844 
    845 static inline void
    846 ixv_rearm_queues(struct adapter *adapter, u64 queues)
    847 {
    848 	u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
    849 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask);
    850 }
    851 
    852 
    853 static void
    854 ixv_handle_que(void *context)
    855 {
    856 	struct ix_queue *que = context;
    857 	struct adapter  *adapter = que->adapter;
    858 	struct tx_ring	*txr = que->txr;
    859 	struct ifnet    *ifp = adapter->ifp;
    860 	bool		more;
    861 
    862 	if (ifp->if_flags & IFF_RUNNING) {
    863 		more = ixgbe_rxeof(que);
    864 		IXGBE_TX_LOCK(txr);
    865 		ixgbe_txeof(txr);
    866 #ifndef IXGBE_LEGACY_TX
    867 		if (pcq_peek(txr->txr_interq) != NULL)
    868 			ixgbe_mq_start_locked(ifp, txr);
    869 #else
    870 		if (!IFQ_IS_EMPTY(&ifp->if_snd))
    871 			ixgbe_start_locked(txr, ifp);
    872 #endif
    873 		IXGBE_TX_UNLOCK(txr);
    874 		if (more) {
    875 			adapter->req.ev_count++;
    876 			softint_schedule(que->que_si);
    877 			return;
    878 		}
    879 	}
    880 
    881 	/* Reenable this interrupt */
    882 	ixv_enable_queue(adapter, que->msix);
    883 	return;
    884 }
    885 
    886 /*********************************************************************
    887  *
    888  *  MSI Queue Interrupt Service routine
    889  *
    890  **********************************************************************/
    891 int
    892 ixv_msix_que(void *arg)
    893 {
    894 	struct ix_queue	*que = arg;
    895 	struct adapter  *adapter = que->adapter;
    896 	struct ifnet    *ifp = adapter->ifp;
    897 	struct tx_ring	*txr = que->txr;
    898 	struct rx_ring	*rxr = que->rxr;
    899 	bool		more;
    900 	u32		newitr = 0;
    901 
    902 	ixv_disable_queue(adapter, que->msix);
    903 	++que->irqs.ev_count;
    904 
    905 #ifdef __NetBSD__
    906 	/* Don't run ixgbe_rxeof in interrupt context */
    907 	more = true;
    908 #else
    909 	more = ixgbe_rxeof(que);
    910 #endif
    911 
    912 	IXGBE_TX_LOCK(txr);
    913 	ixgbe_txeof(txr);
    914 	/*
    915 	** Make certain that if the stack
    916 	** has anything queued the task gets
    917 	** scheduled to handle it.
    918 	*/
    919 #ifdef IXGBE_LEGACY_TX
    920 	if (!IFQ_IS_EMPTY(&adapter->ifp->if_snd))
    921 		ixgbe_start_locked(txr, ifp);
    922 #else
    923 	if (pcq_peek(txr->txr_interq) != NULL)
    924 		ixgbe_mq_start_locked(ifp, txr);
    925 #endif
    926 	IXGBE_TX_UNLOCK(txr);
    927 
    928 	/* Do AIM now? */
    929 
    930 	if (ixv_enable_aim == FALSE)
    931 		goto no_calc;
    932 	/*
    933 	** Do Adaptive Interrupt Moderation:
    934         **  - Write out last calculated setting
    935 	**  - Calculate based on average size over
    936 	**    the last interval.
    937 	*/
    938         if (que->eitr_setting)
    939                 IXGBE_WRITE_REG(&adapter->hw,
    940                     IXGBE_VTEITR(que->msix),
    941 		    que->eitr_setting);
    942 
    943         que->eitr_setting = 0;
    944 
    945         /* Idle, do nothing */
    946         if ((txr->bytes == 0) && (rxr->bytes == 0))
    947                 goto no_calc;
    948 
    949 	if ((txr->bytes) && (txr->packets))
    950                	newitr = txr->bytes/txr->packets;
    951 	if ((rxr->bytes) && (rxr->packets))
    952 		newitr = max(newitr,
    953 		    (rxr->bytes / rxr->packets));
    954 	newitr += 24; /* account for hardware frame, crc */
    955 
    956 	/* set an upper boundary */
    957 	newitr = min(newitr, 3000);
    958 
    959 	/* Be nice to the mid range */
    960 	if ((newitr > 300) && (newitr < 1200))
    961 		newitr = (newitr / 3);
    962 	else
    963 		newitr = (newitr / 2);
    964 
    965 	newitr |= newitr << 16;
    966 
    967         /* save for next interrupt */
    968         que->eitr_setting = newitr;
    969 
    970         /* Reset state */
    971         txr->bytes = 0;
    972         txr->packets = 0;
    973         rxr->bytes = 0;
    974         rxr->packets = 0;
    975 
    976 no_calc:
    977 	if (more)
    978 		softint_schedule(que->que_si);
    979 	else /* Reenable this interrupt */
    980 		ixv_enable_queue(adapter, que->msix);
    981 	return 1;
    982 }
    983 
    984 static int
    985 ixv_msix_mbx(void *arg)
    986 {
    987 	struct adapter	*adapter = arg;
    988 	struct ixgbe_hw *hw = &adapter->hw;
    989 	u32		reg;
    990 
    991 	++adapter->link_irq.ev_count;
    992 
    993 	/* First get the cause */
    994 	reg = IXGBE_READ_REG(hw, IXGBE_VTEICS);
    995 	/* Clear interrupt with write */
    996 	IXGBE_WRITE_REG(hw, IXGBE_VTEICR, reg);
    997 
    998 	/* Link status change */
    999 	if (reg & IXGBE_EICR_LSC)
   1000 		softint_schedule(adapter->link_si);
   1001 
   1002 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER);
   1003 	return 1;
   1004 }
   1005 
   1006 /*********************************************************************
   1007  *
   1008  *  Media Ioctl callback
   1009  *
   1010  *  This routine is called whenever the user queries the status of
   1011  *  the interface using ifconfig.
   1012  *
   1013  **********************************************************************/
   1014 static void
   1015 ixv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
   1016 {
   1017 	struct adapter *adapter = ifp->if_softc;
   1018 
   1019 	INIT_DEBUGOUT("ixv_media_status: begin");
   1020 	IXGBE_CORE_LOCK(adapter);
   1021 	ixv_update_link_status(adapter);
   1022 
   1023 	ifmr->ifm_status = IFM_AVALID;
   1024 	ifmr->ifm_active = IFM_ETHER;
   1025 
   1026 	if (!adapter->link_active) {
   1027 		ifmr->ifm_active |= IFM_NONE;
   1028 		IXGBE_CORE_UNLOCK(adapter);
   1029 		return;
   1030 	}
   1031 
   1032 	ifmr->ifm_status |= IFM_ACTIVE;
   1033 
   1034 	switch (adapter->link_speed) {
   1035 		case IXGBE_LINK_SPEED_10GB_FULL:
   1036 			ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
   1037 			break;
   1038 		case IXGBE_LINK_SPEED_1GB_FULL:
   1039 			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
   1040 			break;
   1041 		case IXGBE_LINK_SPEED_100_FULL:
   1042 			ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
   1043 			break;
   1044 	}
   1045 
   1046 	IXGBE_CORE_UNLOCK(adapter);
   1047 
   1048 	return;
   1049 }
   1050 
   1051 /*********************************************************************
   1052  *
   1053  *  Media Ioctl callback
   1054  *
   1055  *  This routine is called when the user changes speed/duplex using
   1056  *  media/mediopt option with ifconfig.
   1057  *
   1058  **********************************************************************/
   1059 static int
   1060 ixv_media_change(struct ifnet * ifp)
   1061 {
   1062 	struct adapter *adapter = ifp->if_softc;
   1063 	struct ifmedia *ifm = &adapter->media;
   1064 
   1065 	INIT_DEBUGOUT("ixv_media_change: begin");
   1066 
   1067 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
   1068 		return (EINVAL);
   1069 
   1070         switch (IFM_SUBTYPE(ifm->ifm_media)) {
   1071         case IFM_AUTO:
   1072                 break;
   1073         default:
   1074                 device_printf(adapter->dev, "Only auto media type\n");
   1075 		return (EINVAL);
   1076         }
   1077 
   1078 	return (0);
   1079 }
   1080 
   1081 
   1082 /*********************************************************************
   1083  *  Multicast Update
   1084  *
   1085  *  This routine is called whenever multicast address list is updated.
   1086  *
   1087  **********************************************************************/
   1088 #define IXGBE_RAR_ENTRIES 16
   1089 
   1090 static void
   1091 ixv_set_multi(struct adapter *adapter)
   1092 {
   1093 	struct ether_multi *enm;
   1094 	struct ether_multistep step;
   1095 	u8	mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
   1096 	u8	*update_ptr;
   1097 	int	mcnt = 0;
   1098 	struct ethercom *ec = &adapter->osdep.ec;
   1099 
   1100 	IOCTL_DEBUGOUT("ixv_set_multi: begin");
   1101 
   1102 	ETHER_FIRST_MULTI(step, ec, enm);
   1103 	while (enm != NULL) {
   1104 		bcopy(enm->enm_addrlo,
   1105 		    &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
   1106 		    IXGBE_ETH_LENGTH_OF_ADDRESS);
   1107 		mcnt++;
   1108 		/* XXX This might be required --msaitoh */
   1109 		if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES)
   1110 			break;
   1111 		ETHER_NEXT_MULTI(step, enm);
   1112 	}
   1113 
   1114 	update_ptr = mta;
   1115 
   1116 	ixgbe_update_mc_addr_list(&adapter->hw,
   1117 	    update_ptr, mcnt, ixv_mc_array_itr, TRUE);
   1118 
   1119 	return;
   1120 }
   1121 
   1122 /*
   1123  * This is an iterator function now needed by the multicast
   1124  * shared code. It simply feeds the shared code routine the
   1125  * addresses in the array of ixv_set_multi() one by one.
   1126  */
   1127 static u8 *
   1128 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
   1129 {
   1130 	u8 *addr = *update_ptr;
   1131 	u8 *newptr;
   1132 	*vmdq = 0;
   1133 
   1134 	newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
   1135 	*update_ptr = newptr;
   1136 	return addr;
   1137 }
   1138 
   1139 /*********************************************************************
   1140  *  Timer routine
   1141  *
   1142  *  This routine checks for link status,updates statistics,
   1143  *  and runs the watchdog check.
   1144  *
   1145  **********************************************************************/
   1146 
   1147 static void
   1148 ixv_local_timer(void *arg)
   1149 {
   1150 	struct adapter *adapter = arg;
   1151 
   1152 	IXGBE_CORE_LOCK(adapter);
   1153 	ixv_local_timer_locked(adapter);
   1154 	IXGBE_CORE_UNLOCK(adapter);
   1155 }
   1156 
   1157 static void
   1158 ixv_local_timer_locked(void *arg)
   1159 {
   1160 	struct adapter	*adapter = arg;
   1161 	device_t	dev = adapter->dev;
   1162 	struct ix_queue	*que = adapter->queues;
   1163 	u64		queues = 0;
   1164 	int		hung = 0;
   1165 
   1166 	KASSERT(mutex_owned(&adapter->core_mtx));
   1167 
   1168 	ixv_update_link_status(adapter);
   1169 
   1170 	/* Stats Update */
   1171 	ixv_update_stats(adapter);
   1172 
   1173 	/*
   1174 	** Check the TX queues status
   1175 	**      - mark hung queues so we don't schedule on them
   1176 	**      - watchdog only if all queues show hung
   1177 	*/
   1178 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   1179 		/* Keep track of queues with work for soft irq */
   1180 		if (que->txr->busy)
   1181 			queues |= ((u64)1 << que->me);
   1182 		/*
   1183 		** Each time txeof runs without cleaning, but there
   1184 		** are uncleaned descriptors it increments busy. If
   1185 		** we get to the MAX we declare it hung.
   1186 		*/
   1187 		if (que->busy == IXGBE_QUEUE_HUNG) {
   1188 			++hung;
   1189 			/* Mark the queue as inactive */
   1190 			adapter->active_queues &= ~((u64)1 << que->me);
   1191 			continue;
   1192 		} else {
   1193 			/* Check if we've come back from hung */
   1194 			if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
   1195                                 adapter->active_queues |= ((u64)1 << que->me);
   1196 		}
   1197 		if (que->busy >= IXGBE_MAX_TX_BUSY) {
   1198 			device_printf(dev,"Warning queue %d "
   1199 			    "appears to be hung!\n", i);
   1200 			que->txr->busy = IXGBE_QUEUE_HUNG;
   1201 			++hung;
   1202 		}
   1203 
   1204 	}
   1205 
   1206 	/* Only truly watchdog if all queues show hung */
   1207 	if (hung == adapter->num_queues)
   1208 		goto watchdog;
   1209 	else if (queues != 0) { /* Force an IRQ on queues with work */
   1210 		ixv_rearm_queues(adapter, queues);
   1211 	}
   1212 
   1213 	callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
   1214 	return;
   1215 
   1216 watchdog:
   1217 	device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
   1218 	adapter->ifp->if_flags &= ~IFF_RUNNING;
   1219 	adapter->watchdog_events.ev_count++;
   1220 	ixv_init_locked(adapter);
   1221 }
   1222 
   1223 /*
   1224 ** Note: this routine updates the OS on the link state
   1225 **	the real check of the hardware only happens with
   1226 **	a link interrupt.
   1227 */
   1228 static void
   1229 ixv_update_link_status(struct adapter *adapter)
   1230 {
   1231 	struct ifnet	*ifp = adapter->ifp;
   1232 	device_t dev = adapter->dev;
   1233 
   1234 	if (adapter->link_up){
   1235 		if (adapter->link_active == FALSE) {
   1236 			if (bootverbose) {
   1237 				const char *bpsmsg;
   1238 
   1239 				switch (adapter->link_speed) {
   1240 				case IXGBE_LINK_SPEED_10GB_FULL:
   1241 					bpsmsg = "10 Gbps";
   1242 					break;
   1243 				case IXGBE_LINK_SPEED_1GB_FULL:
   1244 					bpsmsg = "1 Gbps";
   1245 					break;
   1246 				case IXGBE_LINK_SPEED_100_FULL:
   1247 					bpsmsg = "100 Mbps";
   1248 					break;
   1249 				default:
   1250 					bpsmsg = "unknown speed";
   1251 					break;
   1252 				}
   1253 				device_printf(dev,"Link is up %s %s \n",
   1254 				    bpsmsg, "Full Duplex");
   1255 			}
   1256 			adapter->link_active = TRUE;
   1257 			if_link_state_change(ifp, LINK_STATE_UP);
   1258 		}
   1259 	} else { /* Link down */
   1260 		if (adapter->link_active == TRUE) {
   1261 			if (bootverbose)
   1262 				device_printf(dev,"Link is Down\n");
   1263 			if_link_state_change(ifp, LINK_STATE_DOWN);
   1264 			adapter->link_active = FALSE;
   1265 		}
   1266 	}
   1267 
   1268 	return;
   1269 }
   1270 
   1271 
   1272 static void
   1273 ixv_ifstop(struct ifnet *ifp, int disable)
   1274 {
   1275 	struct adapter *adapter = ifp->if_softc;
   1276 
   1277 	IXGBE_CORE_LOCK(adapter);
   1278 	ixv_stop(adapter);
   1279 	IXGBE_CORE_UNLOCK(adapter);
   1280 }
   1281 
   1282 /*********************************************************************
   1283  *
   1284  *  This routine disables all traffic on the adapter by issuing a
   1285  *  global reset on the MAC and deallocates TX/RX buffers.
   1286  *
   1287  **********************************************************************/
   1288 
   1289 static void
   1290 ixv_stop(void *arg)
   1291 {
   1292 	struct ifnet   *ifp;
   1293 	struct adapter *adapter = arg;
   1294 	struct ixgbe_hw *hw = &adapter->hw;
   1295 	ifp = adapter->ifp;
   1296 
   1297 	KASSERT(mutex_owned(&adapter->core_mtx));
   1298 
   1299 	INIT_DEBUGOUT("ixv_stop: begin\n");
   1300 	ixv_disable_intr(adapter);
   1301 
   1302 	/* Tell the stack that the interface is no longer active */
   1303 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   1304 
   1305 	ixgbe_reset_hw(hw);
   1306 	adapter->hw.adapter_stopped = FALSE;
   1307 	ixgbe_stop_adapter(hw);
   1308 	callout_stop(&adapter->timer);
   1309 
   1310 	/* reprogram the RAR[0] in case user changed it. */
   1311 	ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
   1312 
   1313 	return;
   1314 }
   1315 
   1316 
   1317 /*********************************************************************
   1318  *
   1319  *  Determine hardware revision.
   1320  *
   1321  **********************************************************************/
   1322 static void
   1323 ixv_identify_hardware(struct adapter *adapter)
   1324 {
   1325 	pcitag_t tag;
   1326 	pci_chipset_tag_t pc;
   1327 	pcireg_t subid, id;
   1328 	struct ixgbe_hw *hw = &adapter->hw;
   1329 
   1330 	pc = adapter->osdep.pc;
   1331 	tag = adapter->osdep.tag;
   1332 
   1333 	/*
   1334 	** Make sure BUSMASTER is set, on a VM under
   1335 	** KVM it may not be and will break things.
   1336 	*/
   1337 	ixgbe_pci_enable_busmaster(pc, tag);
   1338 
   1339 	id = pci_conf_read(pc, tag, PCI_ID_REG);
   1340 	subid = pci_conf_read(pc, tag, PCI_SUBSYS_ID_REG);
   1341 
   1342 	/* Save off the information about this board */
   1343 	hw->vendor_id = PCI_VENDOR(id);
   1344 	hw->device_id = PCI_PRODUCT(id);
   1345 	hw->revision_id = PCI_REVISION(pci_conf_read(pc, tag, PCI_CLASS_REG));
   1346 	hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
   1347 	hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
   1348 
   1349 	/* We need this to determine device-specific things */
   1350 	ixgbe_set_mac_type(hw);
   1351 
   1352 	/* Set the right number of segments */
   1353 	adapter->num_segs = IXGBE_82599_SCATTER;
   1354 
   1355 	return;
   1356 }
   1357 
   1358 /*********************************************************************
   1359  *
   1360  *  Setup MSIX Interrupt resources and handlers
   1361  *
   1362  **********************************************************************/
   1363 static int
   1364 ixv_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
   1365 {
   1366 	device_t	dev = adapter->dev;
   1367 	struct ix_queue *que = adapter->queues;
   1368 	struct		tx_ring *txr = adapter->tx_rings;
   1369 	int 		error, rid, vector = 0;
   1370 	pci_chipset_tag_t pc;
   1371 	pcitag_t	tag;
   1372 	char		intrbuf[PCI_INTRSTR_LEN];
   1373 	char		intr_xname[32];
   1374 	const char	*intrstr = NULL;
   1375 	kcpuset_t	*affinity;
   1376 	int		cpu_id = 0;
   1377 
   1378 	pc = adapter->osdep.pc;
   1379 	tag = adapter->osdep.tag;
   1380 
   1381 	adapter->osdep.nintrs = adapter->num_queues + 1;
   1382 	if (pci_msix_alloc_exact(pa,
   1383 	    &adapter->osdep.intrs, adapter->osdep.nintrs) != 0)
   1384 		return (ENXIO);
   1385 
   1386 	kcpuset_create(&affinity, false);
   1387 	for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
   1388 		snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d",
   1389 		    device_xname(dev), i);
   1390 		intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
   1391 		    sizeof(intrbuf));
   1392 #ifdef IXV_MPSAFE
   1393 		pci_intr_setattr(pc, adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
   1394 		    true);
   1395 #endif
   1396 		/* Set the handler function */
   1397 		que->res = adapter->osdep.ihs[i] = pci_intr_establish_xname(pc,
   1398 		    adapter->osdep.intrs[i], IPL_NET, ixv_msix_que, que,
   1399 			intr_xname);
   1400 		if (que->res == NULL) {
   1401 			pci_intr_release(pc, adapter->osdep.intrs,
   1402 			    adapter->osdep.nintrs);
   1403 			aprint_error_dev(dev,
   1404 			    "Failed to register QUE handler");
   1405 			kcpuset_destroy(affinity);
   1406 			return (ENXIO);
   1407 		}
   1408 		que->msix = vector;
   1409         	adapter->active_queues |= (u64)(1 << que->msix);
   1410 
   1411 		cpu_id = i;
   1412 		/* Round-robin affinity */
   1413 		kcpuset_zero(affinity);
   1414 		kcpuset_set(affinity, cpu_id % ncpu);
   1415 		error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
   1416 		    NULL);
   1417 		aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
   1418 		    intrstr);
   1419 		if (error == 0)
   1420 			aprint_normal(", bound queue %d to cpu %d\n",
   1421 			    i, cpu_id);
   1422 		else
   1423 			aprint_normal("\n");
   1424 
   1425 #ifndef IXGBE_LEGACY_TX
   1426 		txr->txr_si = softint_establish(SOFTINT_NET,
   1427 		    ixgbe_deferred_mq_start, txr);
   1428 #endif
   1429 		que->que_si = softint_establish(SOFTINT_NET, ixv_handle_que,
   1430 		    que);
   1431 		if (que->que_si == NULL) {
   1432 			aprint_error_dev(dev,
   1433 			    "could not establish software interrupt\n");
   1434 		}
   1435 	}
   1436 
   1437 	/* and Mailbox */
   1438 	cpu_id++;
   1439 	snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
   1440 	intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
   1441 	    sizeof(intrbuf));
   1442 #ifdef IXG_MPSAFE
   1443 	pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE, true);
   1444 #endif
   1445 	/* Set the mbx handler function */
   1446 	adapter->osdep.ihs[vector] = pci_intr_establish_xname(pc,
   1447 	    adapter->osdep.intrs[vector], IPL_NET, ixv_msix_mbx, adapter,
   1448 		intr_xname);
   1449 	if (adapter->osdep.ihs[vector] == NULL) {
   1450 		adapter->res = NULL;
   1451 		aprint_error_dev(dev, "Failed to register LINK handler\n");
   1452 		kcpuset_destroy(affinity);
   1453 		return (ENXIO);
   1454 	}
   1455 	/* Round-robin affinity */
   1456 	kcpuset_zero(affinity);
   1457 	kcpuset_set(affinity, cpu_id % ncpu);
   1458 	error = interrupt_distribute(adapter->osdep.ihs[vector], affinity,NULL);
   1459 
   1460 	aprint_normal_dev(dev,
   1461 	    "for link, interrupting at %s, ", intrstr);
   1462 	if (error == 0) {
   1463 		aprint_normal("affinity to cpu %d\n", cpu_id);
   1464 	}
   1465 	adapter->vector = vector;
   1466 	/* Tasklets for Mailbox */
   1467 	adapter->link_si = softint_establish(SOFTINT_NET, ixv_handle_mbx,
   1468 	    adapter);
   1469 	/*
   1470 	** Due to a broken design QEMU will fail to properly
   1471 	** enable the guest for MSIX unless the vectors in
   1472 	** the table are all set up, so we must rewrite the
   1473 	** ENABLE in the MSIX control register again at this
   1474 	** point to cause it to successfully initialize us.
   1475 	*/
   1476 	if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
   1477 		int msix_ctrl;
   1478 		pci_get_capability(pc, tag, PCI_CAP_MSIX, &rid, NULL);
   1479 		rid += PCI_MSIX_CTL;
   1480 		msix_ctrl = pci_conf_read(pc, tag, rid);
   1481 		msix_ctrl |= PCI_MSIX_CTL_ENABLE;
   1482 		pci_conf_write(pc, tag, rid, msix_ctrl);
   1483 	}
   1484 
   1485 	kcpuset_destroy(affinity);
   1486 	return (0);
   1487 }
   1488 
   1489 /*
   1490  * Setup MSIX resources, note that the VF
   1491  * device MUST use MSIX, there is no fallback.
   1492  */
   1493 static int
   1494 ixv_setup_msix(struct adapter *adapter)
   1495 {
   1496 	device_t dev = adapter->dev;
   1497 	int want, queues, msgs;
   1498 
   1499 	/* Must have at least 2 MSIX vectors */
   1500 	msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag);
   1501 	if (msgs < 2) {
   1502 		aprint_error_dev(dev,"MSIX config error\n");
   1503 		return (ENXIO);
   1504 	}
   1505 	msgs = MIN(msgs, IXG_MAX_NINTR);
   1506 
   1507 	/* Figure out a reasonable auto config value */
   1508 	queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu;
   1509 
   1510 	if (ixv_num_queues != 0)
   1511 		queues = ixv_num_queues;
   1512 	else if ((ixv_num_queues == 0) && (queues > IXGBE_VF_MAX_TX_QUEUES))
   1513 		queues = IXGBE_VF_MAX_TX_QUEUES;
   1514 
   1515 	/*
   1516 	** Want vectors for the queues,
   1517 	** plus an additional for mailbox.
   1518 	*/
   1519 	want = queues + 1;
   1520 	if (msgs >= want) {
   1521 		msgs = want;
   1522 	} else {
   1523                	aprint_error_dev(dev,
   1524 		    "MSIX Configuration Problem, "
   1525 		    "%d vectors but %d queues wanted!\n",
   1526 		    msgs, want);
   1527 		return -1;
   1528 	}
   1529 
   1530 	adapter->msix_mem = (void *)1; /* XXX */
   1531 	aprint_normal_dev(dev,
   1532 	    "Using MSIX interrupts with %d vectors\n", msgs);
   1533 	adapter->num_queues = queues;
   1534 	return (msgs);
   1535 }
   1536 
   1537 
   1538 static int
   1539 ixv_allocate_pci_resources(struct adapter *adapter,
   1540     const struct pci_attach_args *pa)
   1541 {
   1542 	pcireg_t	memtype;
   1543 	device_t        dev = adapter->dev;
   1544 	bus_addr_t addr;
   1545 	int flags;
   1546 
   1547 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
   1548 
   1549 	switch (memtype) {
   1550 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1551 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1552 		adapter->osdep.mem_bus_space_tag = pa->pa_memt;
   1553 		if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
   1554 	              memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
   1555 			goto map_err;
   1556 		if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
   1557 			aprint_normal_dev(dev, "clearing prefetchable bit\n");
   1558 			flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
   1559 		}
   1560 		if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
   1561 		     adapter->osdep.mem_size, flags,
   1562 		     &adapter->osdep.mem_bus_space_handle) != 0) {
   1563 map_err:
   1564 			adapter->osdep.mem_size = 0;
   1565 			aprint_error_dev(dev, "unable to map BAR0\n");
   1566 			return ENXIO;
   1567 		}
   1568 		break;
   1569 	default:
   1570 		aprint_error_dev(dev, "unexpected type on BAR0\n");
   1571 		return ENXIO;
   1572 	}
   1573 
   1574 	/* Pick up the tuneable queues */
   1575 	adapter->num_queues = ixv_num_queues;
   1576 	adapter->hw.back = adapter;
   1577 
   1578 	/*
   1579 	** Now setup MSI/X, should
   1580 	** return us the number of
   1581 	** configured vectors.
   1582 	*/
   1583 	adapter->msix = ixv_setup_msix(adapter);
   1584 	if (adapter->msix == ENXIO)
   1585 		return (ENXIO);
   1586 	else
   1587 		return (0);
   1588 }
   1589 
   1590 static void
   1591 ixv_free_pci_resources(struct adapter * adapter)
   1592 {
   1593 	struct 		ix_queue *que = adapter->queues;
   1594 	int		rid;
   1595 
   1596 	/*
   1597 	**  Release all msix queue resources:
   1598 	*/
   1599 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   1600 		if (que->res != NULL)
   1601 			pci_intr_disestablish(adapter->osdep.pc,
   1602 			    adapter->osdep.ihs[i]);
   1603 	}
   1604 
   1605 
   1606 	/* Clean the Legacy or Link interrupt last */
   1607 	if (adapter->vector) /* we are doing MSIX */
   1608 		rid = adapter->vector;
   1609 	else
   1610 		rid = 0;
   1611 
   1612 	if (adapter->osdep.ihs[rid] != NULL) {
   1613 		pci_intr_disestablish(adapter->osdep.pc,
   1614 		    adapter->osdep.ihs[rid]);
   1615 		adapter->osdep.ihs[rid] = NULL;
   1616 	}
   1617 
   1618 	pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
   1619 	    adapter->osdep.nintrs);
   1620 
   1621 	if (adapter->osdep.mem_size != 0) {
   1622 		bus_space_unmap(adapter->osdep.mem_bus_space_tag,
   1623 		    adapter->osdep.mem_bus_space_handle,
   1624 		    adapter->osdep.mem_size);
   1625 	}
   1626 
   1627 	return;
   1628 }
   1629 
   1630 /*********************************************************************
   1631  *
   1632  *  Setup networking device structure and register an interface.
   1633  *
   1634  **********************************************************************/
   1635 static void
   1636 ixv_setup_interface(device_t dev, struct adapter *adapter)
   1637 {
   1638 	struct ethercom *ec = &adapter->osdep.ec;
   1639 	struct ifnet   *ifp;
   1640 
   1641 	INIT_DEBUGOUT("ixv_setup_interface: begin");
   1642 
   1643 	ifp = adapter->ifp = &ec->ec_if;
   1644 	strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
   1645 	ifp->if_baudrate = 1000000000;
   1646 	ifp->if_init = ixv_init;
   1647 	ifp->if_stop = ixv_ifstop;
   1648 	ifp->if_softc = adapter;
   1649 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   1650 	ifp->if_ioctl = ixv_ioctl;
   1651 #ifndef IXGBE_LEGACY_TX
   1652 	ifp->if_transmit = ixgbe_mq_start;
   1653 #endif
   1654 	ifp->if_start = ixgbe_start;
   1655 	ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 2;
   1656 
   1657 	if_initialize(ifp);
   1658 	ether_ifattach(ifp, adapter->hw.mac.addr);
   1659 #ifndef IXGBE_LEGACY_TX
   1660 #if 0	/* We use per TX queue softint */
   1661 	if_deferred_start_init(ifp, ixgbe_deferred_mq_start);
   1662 #endif
   1663 #endif
   1664 	if_register(ifp);
   1665 	ether_set_ifflags_cb(ec, ixv_ifflags_cb);
   1666 
   1667 	adapter->max_frame_size =
   1668 	    ifp->if_mtu + IXGBE_MTU_HDR_VLAN;
   1669 
   1670 	/*
   1671 	 * Tell the upper layer(s) we support long frames.
   1672 	 */
   1673 	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
   1674 
   1675 	ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSOv4;
   1676 	ifp->if_capenable = 0;
   1677 
   1678 	ec->ec_capabilities |= ETHERCAP_VLAN_HWCSUM;
   1679 	ec->ec_capabilities |= ETHERCAP_JUMBO_MTU;
   1680 	ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
   1681 	    		| ETHERCAP_VLAN_MTU;
   1682 	ec->ec_capenable = ec->ec_capabilities;
   1683 
   1684 	/* Don't enable LRO by default */
   1685 	ifp->if_capabilities |= IFCAP_LRO;
   1686 #if 0
   1687 	ifp->if_capenable = ifp->if_capabilities;
   1688 #endif
   1689 
   1690 	/*
   1691 	** Dont turn this on by default, if vlans are
   1692 	** created on another pseudo device (eg. lagg)
   1693 	** then vlan events are not passed thru, breaking
   1694 	** operation, but with HW FILTER off it works. If
   1695 	** using vlans directly on the em driver you can
   1696 	** enable this and get full hardware tag filtering.
   1697 	*/
   1698 	ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
   1699 
   1700 	/*
   1701 	 * Specify the media types supported by this adapter and register
   1702 	 * callbacks to update media and link information
   1703 	 */
   1704 	ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change,
   1705 		     ixv_media_status);
   1706 	ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
   1707 	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
   1708 
   1709 	return;
   1710 }
   1711 
   1712 static void
   1713 ixv_config_link(struct adapter *adapter)
   1714 {
   1715 	struct ixgbe_hw *hw = &adapter->hw;
   1716 
   1717 	if (hw->mac.ops.check_link)
   1718 		hw->mac.ops.check_link(hw, &adapter->link_speed,
   1719 		    &adapter->link_up, FALSE);
   1720 }
   1721 
   1722 
   1723 /*********************************************************************
   1724  *
   1725  *  Enable transmit unit.
   1726  *
   1727  **********************************************************************/
   1728 static void
   1729 ixv_initialize_transmit_units(struct adapter *adapter)
   1730 {
   1731 	struct tx_ring	*txr = adapter->tx_rings;
   1732 	struct ixgbe_hw	*hw = &adapter->hw;
   1733 
   1734 
   1735 	for (int i = 0; i < adapter->num_queues; i++, txr++) {
   1736 		u64	tdba = txr->txdma.dma_paddr;
   1737 		u32	txctrl, txdctl;
   1738 
   1739 		/* Set WTHRESH to 8, burst writeback */
   1740 		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
   1741 		txdctl |= (8 << 16);
   1742 		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
   1743 
   1744 		/* Set the HW Tx Head and Tail indices */
   1745 	    	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(i), 0);
   1746 	    	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(i), 0);
   1747 
   1748 		/* Set Tx Tail register */
   1749 		txr->tail = IXGBE_VFTDT(i);
   1750 
   1751 		/* Set Ring parameters */
   1752 		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
   1753 		       (tdba & 0x00000000ffffffffULL));
   1754 		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i), (tdba >> 32));
   1755 		IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
   1756 		    adapter->num_tx_desc *
   1757 		    sizeof(struct ixgbe_legacy_tx_desc));
   1758 		txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(i));
   1759 		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
   1760 		IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), txctrl);
   1761 
   1762 		/* Now enable */
   1763 		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
   1764 		txdctl |= IXGBE_TXDCTL_ENABLE;
   1765 		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
   1766 	}
   1767 
   1768 	return;
   1769 }
   1770 
   1771 
   1772 /*********************************************************************
   1773  *
   1774  *  Setup receive registers and features.
   1775  *
   1776  **********************************************************************/
   1777 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
   1778 
   1779 static void
   1780 ixv_initialize_receive_units(struct adapter *adapter)
   1781 {
   1782 	struct	rx_ring	*rxr = adapter->rx_rings;
   1783 	struct ixgbe_hw	*hw = &adapter->hw;
   1784 	struct ifnet	*ifp = adapter->ifp;
   1785 	u32		bufsz, rxcsum, psrtype;
   1786 
   1787 	if (ifp->if_mtu > ETHERMTU)
   1788 		bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
   1789 	else
   1790 		bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
   1791 
   1792 	psrtype = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
   1793 	    IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR |
   1794 	    IXGBE_PSRTYPE_L2HDR;
   1795 
   1796 	IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
   1797 
   1798 	/* Tell PF our max_frame size */
   1799 	ixgbevf_rlpml_set_vf(hw, adapter->max_frame_size);
   1800 
   1801 	for (int i = 0; i < adapter->num_queues; i++, rxr++) {
   1802 		u64 rdba = rxr->rxdma.dma_paddr;
   1803 		u32 reg, rxdctl;
   1804 
   1805 		/* Disable the queue */
   1806 		rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
   1807 		rxdctl &= ~IXGBE_RXDCTL_ENABLE;
   1808 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
   1809 		for (int j = 0; j < 10; j++) {
   1810 			if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
   1811 			    IXGBE_RXDCTL_ENABLE)
   1812 				msec_delay(1);
   1813 			else
   1814 				break;
   1815 		}
   1816 		wmb();
   1817 		/* Setup the Base and Length of the Rx Descriptor Ring */
   1818 		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
   1819 		    (rdba & 0x00000000ffffffffULL));
   1820 		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i),
   1821 		    (rdba >> 32));
   1822 		IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
   1823 		    adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
   1824 
   1825 		/* Reset the ring indices */
   1826 		IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
   1827 		IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 0);
   1828 
   1829 		/* Set up the SRRCTL register */
   1830 		reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
   1831 		reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
   1832 		reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
   1833 		reg |= bufsz;
   1834 		reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
   1835 		IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), reg);
   1836 
   1837 		/* Capture Rx Tail index */
   1838 		rxr->tail = IXGBE_VFRDT(rxr->me);
   1839 
   1840 		/* Do the queue enabling last */
   1841 		rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
   1842 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
   1843 		for (int k = 0; k < 10; k++) {
   1844 			if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
   1845 			    IXGBE_RXDCTL_ENABLE)
   1846 				break;
   1847 			else
   1848 				msec_delay(1);
   1849 		}
   1850 		wmb();
   1851 
   1852 		/* Set the Tail Pointer */
   1853 #ifdef DEV_NETMAP
   1854 		/*
   1855 		 * In netmap mode, we must preserve the buffers made
   1856 		 * available to userspace before the if_init()
   1857 		 * (this is true by default on the TX side, because
   1858 		 * init makes all buffers available to userspace).
   1859 		 *
   1860 		 * netmap_reset() and the device specific routines
   1861 		 * (e.g. ixgbe_setup_receive_rings()) map these
   1862 		 * buffers at the end of the NIC ring, so here we
   1863 		 * must set the RDT (tail) register to make sure
   1864 		 * they are not overwritten.
   1865 		 *
   1866 		 * In this driver the NIC ring starts at RDH = 0,
   1867 		 * RDT points to the last slot available for reception (?),
   1868 		 * so RDT = num_rx_desc - 1 means the whole ring is available.
   1869 		 */
   1870 		if (ifp->if_capenable & IFCAP_NETMAP) {
   1871 			struct netmap_adapter *na = NA(adapter->ifp);
   1872 			struct netmap_kring *kring = &na->rx_rings[i];
   1873 			int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
   1874 
   1875 			IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), t);
   1876 		} else
   1877 #endif /* DEV_NETMAP */
   1878 			IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
   1879 			    adapter->num_rx_desc - 1);
   1880 	}
   1881 
   1882 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
   1883 
   1884 	if (ifp->if_capenable & IFCAP_RXCSUM)
   1885 		rxcsum |= IXGBE_RXCSUM_PCSD;
   1886 
   1887 	if (!(rxcsum & IXGBE_RXCSUM_PCSD))
   1888 		rxcsum |= IXGBE_RXCSUM_IPPCSE;
   1889 
   1890 	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
   1891 
   1892 	return;
   1893 }
   1894 
   1895 static void
   1896 ixv_setup_vlan_support(struct adapter *adapter)
   1897 {
   1898 	struct ixgbe_hw *hw = &adapter->hw;
   1899 	u32		ctrl, vid, vfta, retry;
   1900 	struct rx_ring	*rxr;
   1901 
   1902 	/*
   1903 	** We get here thru init_locked, meaning
   1904 	** a soft reset, this has already cleared
   1905 	** the VFTA and other state, so if there
   1906 	** have been no vlan's registered do nothing.
   1907 	*/
   1908 	if (!VLAN_ATTACHED(&adapter->osdep.ec))
   1909 		return;
   1910 
   1911 	/* Enable the queues */
   1912 	for (int i = 0; i < adapter->num_queues; i++) {
   1913 		ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
   1914 		ctrl |= IXGBE_RXDCTL_VME;
   1915 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl);
   1916 		/*
   1917 		 * Let Rx path know that it needs to store VLAN tag
   1918 		 * as part of extra mbuf info.
   1919 		 */
   1920 		rxr = &adapter->rx_rings[i];
   1921 		rxr->vtag_strip = TRUE;
   1922 	}
   1923 
   1924 	/*
   1925 	** A soft reset zero's out the VFTA, so
   1926 	** we need to repopulate it now.
   1927 	*/
   1928 	for (int i = 0; i < IXGBE_VFTA_SIZE; i++) {
   1929 		if (ixv_shadow_vfta[i] == 0)
   1930 			continue;
   1931 		vfta = ixv_shadow_vfta[i];
   1932 		/*
   1933 		** Reconstruct the vlan id's
   1934 		** based on the bits set in each
   1935 		** of the array ints.
   1936 		*/
   1937 		for (int j = 0; j < 32; j++) {
   1938 			retry = 0;
   1939 			if ((vfta & (1 << j)) == 0)
   1940 				continue;
   1941 			vid = (i * 32) + j;
   1942 			/* Call the shared code mailbox routine */
   1943 			while (ixgbe_set_vfta(hw, vid, 0, TRUE)) {
   1944 				if (++retry > 5)
   1945 					break;
   1946 			}
   1947 		}
   1948 	}
   1949 }
   1950 
   1951 #if 0	/* XXX Badly need to overhaul vlan(4) on NetBSD. */
   1952 /*
   1953 ** This routine is run via an vlan config EVENT,
   1954 ** it enables us to use the HW Filter table since
   1955 ** we can get the vlan id. This just creates the
   1956 ** entry in the soft version of the VFTA, init will
   1957 ** repopulate the real table.
   1958 */
   1959 static void
   1960 ixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   1961 {
   1962 	struct adapter	*adapter = ifp->if_softc;
   1963 	u16		index, bit;
   1964 
   1965 	if (ifp->if_softc != arg) /* Not our event */
   1966 		return;
   1967 
   1968 	if ((vtag == 0) || (vtag > 4095)) /* Invalid */
   1969 		return;
   1970 
   1971 	IXGBE_CORE_LOCK(adapter);
   1972 	index = (vtag >> 5) & 0x7F;
   1973 	bit = vtag & 0x1F;
   1974 	ixv_shadow_vfta[index] |= (1 << bit);
   1975 	/* Re-init to load the changes */
   1976 	ixv_init_locked(adapter);
   1977 	IXGBE_CORE_UNLOCK(adapter);
   1978 }
   1979 
   1980 /*
   1981 ** This routine is run via an vlan
   1982 ** unconfig EVENT, remove our entry
   1983 ** in the soft vfta.
   1984 */
   1985 static void
   1986 ixv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   1987 {
   1988 	struct adapter	*adapter = ifp->if_softc;
   1989 	u16		index, bit;
   1990 
   1991 	if (ifp->if_softc !=  arg)
   1992 		return;
   1993 
   1994 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
   1995 		return;
   1996 
   1997 	IXGBE_CORE_LOCK(adapter);
   1998 	index = (vtag >> 5) & 0x7F;
   1999 	bit = vtag & 0x1F;
   2000 	ixv_shadow_vfta[index] &= ~(1 << bit);
   2001 	/* Re-init to load the changes */
   2002 	ixv_init_locked(adapter);
   2003 	IXGBE_CORE_UNLOCK(adapter);
   2004 }
   2005 #endif
   2006 
   2007 static void
   2008 ixv_enable_intr(struct adapter *adapter)
   2009 {
   2010 	struct ixgbe_hw *hw = &adapter->hw;
   2011 	struct ix_queue *que = adapter->queues;
   2012 	u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
   2013 
   2014 
   2015 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
   2016 
   2017 	mask = IXGBE_EIMS_ENABLE_MASK;
   2018 	mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
   2019 	IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
   2020 
   2021         for (int i = 0; i < adapter->num_queues; i++, que++)
   2022 		ixv_enable_queue(adapter, que->msix);
   2023 
   2024 	IXGBE_WRITE_FLUSH(hw);
   2025 
   2026 	return;
   2027 }
   2028 
   2029 static void
   2030 ixv_disable_intr(struct adapter *adapter)
   2031 {
   2032 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
   2033 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, ~0);
   2034 	IXGBE_WRITE_FLUSH(&adapter->hw);
   2035 	return;
   2036 }
   2037 
   2038 /*
   2039 ** Setup the correct IVAR register for a particular MSIX interrupt
   2040 **  - entry is the register array entry
   2041 **  - vector is the MSIX vector for this queue
   2042 **  - type is RX/TX/MISC
   2043 */
   2044 static void
   2045 ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
   2046 {
   2047 	struct ixgbe_hw *hw = &adapter->hw;
   2048 	u32 ivar, index;
   2049 
   2050 	vector |= IXGBE_IVAR_ALLOC_VAL;
   2051 
   2052 	if (type == -1) { /* MISC IVAR */
   2053 		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
   2054 		ivar &= ~0xFF;
   2055 		ivar |= vector;
   2056 		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
   2057 	} else {	/* RX/TX IVARS */
   2058 		index = (16 * (entry & 1)) + (8 * type);
   2059 		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
   2060 		ivar &= ~(0xFF << index);
   2061 		ivar |= (vector << index);
   2062 		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
   2063 	}
   2064 }
   2065 
   2066 static void
   2067 ixv_configure_ivars(struct adapter *adapter)
   2068 {
   2069 	struct  ix_queue *que = adapter->queues;
   2070 
   2071         for (int i = 0; i < adapter->num_queues; i++, que++) {
   2072 		/* First the RX queue entry */
   2073                 ixv_set_ivar(adapter, i, que->msix, 0);
   2074 		/* ... and the TX */
   2075 		ixv_set_ivar(adapter, i, que->msix, 1);
   2076 		/* Set an initial value in EITR */
   2077                 IXGBE_WRITE_REG(&adapter->hw,
   2078                     IXGBE_VTEITR(que->msix), IXV_EITR_DEFAULT);
   2079 	}
   2080 
   2081 	/* For the mailbox interrupt */
   2082         ixv_set_ivar(adapter, 1, adapter->vector, -1);
   2083 }
   2084 
   2085 
   2086 /*
   2087 ** Tasklet handler for MSIX MBX interrupts
   2088 **  - do outside interrupt since it might sleep
   2089 */
   2090 static void
   2091 ixv_handle_mbx(void *context)
   2092 {
   2093 	struct adapter  *adapter = context;
   2094 
   2095 	ixgbe_check_link(&adapter->hw,
   2096 	    &adapter->link_speed, &adapter->link_up, 0);
   2097 	ixv_update_link_status(adapter);
   2098 }
   2099 
   2100 /*
   2101 ** The VF stats registers never have a truly virgin
   2102 ** starting point, so this routine tries to make an
   2103 ** artificial one, marking ground zero on attach as
   2104 ** it were.
   2105 */
   2106 static void
   2107 ixv_save_stats(struct adapter *adapter)
   2108 {
   2109 	struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
   2110 
   2111 	if (stats->vfgprc.ev_count || stats->vfgptc.ev_count) {
   2112 		stats->saved_reset_vfgprc +=
   2113 		    stats->vfgprc.ev_count - stats->base_vfgprc;
   2114 		stats->saved_reset_vfgptc +=
   2115 		    stats->vfgptc.ev_count - stats->base_vfgptc;
   2116 		stats->saved_reset_vfgorc +=
   2117 		    stats->vfgorc.ev_count - stats->base_vfgorc;
   2118 		stats->saved_reset_vfgotc +=
   2119 		    stats->vfgotc.ev_count - stats->base_vfgotc;
   2120 		stats->saved_reset_vfmprc +=
   2121 		    stats->vfmprc.ev_count - stats->base_vfmprc;
   2122 	}
   2123 }
   2124 
   2125 static void
   2126 ixv_init_stats(struct adapter *adapter)
   2127 {
   2128 	struct ixgbe_hw *hw = &adapter->hw;
   2129 
   2130 	adapter->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
   2131 	adapter->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
   2132 	adapter->stats.vf.last_vfgorc |=
   2133 	    (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
   2134 
   2135 	adapter->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
   2136 	adapter->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
   2137 	adapter->stats.vf.last_vfgotc |=
   2138 	    (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
   2139 
   2140 	adapter->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
   2141 
   2142 	adapter->stats.vf.base_vfgprc = adapter->stats.vf.last_vfgprc;
   2143 	adapter->stats.vf.base_vfgorc = adapter->stats.vf.last_vfgorc;
   2144 	adapter->stats.vf.base_vfgptc = adapter->stats.vf.last_vfgptc;
   2145 	adapter->stats.vf.base_vfgotc = adapter->stats.vf.last_vfgotc;
   2146 	adapter->stats.vf.base_vfmprc = adapter->stats.vf.last_vfmprc;
   2147 }
   2148 
   2149 #define UPDATE_STAT_32(reg, last, count)		\
   2150 {							\
   2151 	u32 current = IXGBE_READ_REG(hw, reg);		\
   2152 	if (current < last)				\
   2153 		count.ev_count += 0x100000000LL;	\
   2154 	last = current;					\
   2155 	count.ev_count &= 0xFFFFFFFF00000000LL;		\
   2156 	count.ev_count |= current;			\
   2157 }
   2158 
   2159 #define UPDATE_STAT_36(lsb, msb, last, count) 		\
   2160 {							\
   2161 	u64 cur_lsb = IXGBE_READ_REG(hw, lsb);		\
   2162 	u64 cur_msb = IXGBE_READ_REG(hw, msb);		\
   2163 	u64 current = ((cur_msb << 32) | cur_lsb);	\
   2164 	if (current < last)				\
   2165 		count.ev_count += 0x1000000000LL;	\
   2166 	last = current;					\
   2167 	count.ev_count &= 0xFFFFFFF000000000LL;		\
   2168 	count.ev_count |= current;			\
   2169 }
   2170 
   2171 /*
   2172 ** ixv_update_stats - Update the board statistics counters.
   2173 */
   2174 void
   2175 ixv_update_stats(struct adapter *adapter)
   2176 {
   2177         struct ixgbe_hw *hw = &adapter->hw;
   2178 
   2179         UPDATE_STAT_32(IXGBE_VFGPRC, adapter->stats.vf.last_vfgprc,
   2180 	    adapter->stats.vf.vfgprc);
   2181         UPDATE_STAT_32(IXGBE_VFGPTC, adapter->stats.vf.last_vfgptc,
   2182 	    adapter->stats.vf.vfgptc);
   2183         UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
   2184 	    adapter->stats.vf.last_vfgorc, adapter->stats.vf.vfgorc);
   2185         UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
   2186 	    adapter->stats.vf.last_vfgotc, adapter->stats.vf.vfgotc);
   2187         UPDATE_STAT_32(IXGBE_VFMPRC, adapter->stats.vf.last_vfmprc,
   2188 	    adapter->stats.vf.vfmprc);
   2189 }
   2190 
   2191 /*
   2192  * Add statistic sysctls for the VF.
   2193  */
   2194 static void
   2195 ixv_add_stats_sysctls(struct adapter *adapter)
   2196 {
   2197 	device_t dev = adapter->dev;
   2198 	struct ix_queue *que = &adapter->queues[0];
   2199 	struct tx_ring *txr = que->txr;
   2200 	struct rx_ring *rxr = que->rxr;
   2201 
   2202 	struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
   2203 
   2204 	const char *xname = device_xname(dev);
   2205 
   2206 	/* Driver Statistics */
   2207 	evcnt_attach_dynamic(&adapter->dropped_pkts, EVCNT_TYPE_MISC,
   2208 	    NULL, xname, "Driver dropped packets");
   2209 	evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
   2210 	    NULL, xname, "m_defrag() failed");
   2211 	evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
   2212 	    NULL, xname, "Watchdog timeouts");
   2213 
   2214 	evcnt_attach_dynamic(&stats->vfgprc, EVCNT_TYPE_MISC, NULL,
   2215 	    xname, "Good Packets Received");
   2216 	evcnt_attach_dynamic(&stats->vfgorc, EVCNT_TYPE_MISC, NULL,
   2217 	    xname, "Good Octets Received");
   2218 	evcnt_attach_dynamic(&stats->vfmprc, EVCNT_TYPE_MISC, NULL,
   2219 	    xname, "Multicast Packets Received");
   2220 	evcnt_attach_dynamic(&stats->vfgptc, EVCNT_TYPE_MISC, NULL,
   2221 	    xname, "Good Packets Transmitted");
   2222 	evcnt_attach_dynamic(&stats->vfgotc, EVCNT_TYPE_MISC, NULL,
   2223 	    xname, "Good Octets Transmitted");
   2224 	evcnt_attach_dynamic(&que->irqs, EVCNT_TYPE_INTR, NULL,
   2225 	    xname, "IRQs on queue");
   2226 	evcnt_attach_dynamic(&rxr->rx_irq, EVCNT_TYPE_INTR, NULL,
   2227 	    xname, "RX irqs on queue");
   2228 	evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC, NULL,
   2229 	    xname, "RX packets");
   2230 	evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC, NULL,
   2231 	    xname, "RX bytes");
   2232 	evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC, NULL,
   2233 	    xname, "Discarded RX packets");
   2234 	evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC, NULL,
   2235 	    xname, "TX Packets");
   2236 	evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC, NULL,
   2237 	    xname, "# of times not enough descriptors were available during TX");
   2238 	evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC, NULL,
   2239 	    xname, "TX TSO");
   2240 }
   2241 
   2242 static void
   2243 ixv_set_sysctl_value(struct adapter *adapter, const char *name,
   2244 	const char *description, int *limit, int value)
   2245 {
   2246 	device_t dev =  adapter->dev;
   2247 	struct sysctllog **log;
   2248 	const struct sysctlnode *rnode, *cnode;
   2249 
   2250 	log = &adapter->sysctllog;
   2251 	if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
   2252 		aprint_error_dev(dev, "could not create sysctl root\n");
   2253 		return;
   2254 	}
   2255 	if (sysctl_createv(log, 0, &rnode, &cnode,
   2256 	    CTLFLAG_READWRITE, CTLTYPE_INT,
   2257 	    name, SYSCTL_DESCR(description),
   2258 	    NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0)
   2259 		aprint_error_dev(dev, "could not create sysctl\n");
   2260 	*limit = value;
   2261 }
   2262 
   2263 /**********************************************************************
   2264  *
   2265  *  This routine is called only when em_display_debug_stats is enabled.
   2266  *  This routine provides a way to take a look at important statistics
   2267  *  maintained by the driver and hardware.
   2268  *
   2269  **********************************************************************/
   2270 static void
   2271 ixv_print_debug_info(struct adapter *adapter)
   2272 {
   2273         device_t dev = adapter->dev;
   2274         struct ixgbe_hw         *hw = &adapter->hw;
   2275         struct ix_queue         *que = adapter->queues;
   2276         struct rx_ring          *rxr;
   2277         struct tx_ring          *txr;
   2278 #ifdef LRO
   2279         struct lro_ctrl         *lro;
   2280 #endif /* LRO */
   2281 
   2282         device_printf(dev,"Error Byte Count = %u \n",
   2283             IXGBE_READ_REG(hw, IXGBE_ERRBC));
   2284 
   2285         for (int i = 0; i < adapter->num_queues; i++, que++) {
   2286                 txr = que->txr;
   2287                 rxr = que->rxr;
   2288 #ifdef LRO
   2289                 lro = &rxr->lro;
   2290 #endif /* LRO */
   2291                 device_printf(dev,"QUE(%d) IRQs Handled: %lu\n",
   2292                     que->msix, (long)que->irqs.ev_count);
   2293                 device_printf(dev,"RX(%d) Packets Received: %lld\n",
   2294                     rxr->me, (long long)rxr->rx_packets.ev_count);
   2295                 device_printf(dev,"RX(%d) Bytes Received: %lu\n",
   2296                     rxr->me, (long)rxr->rx_bytes.ev_count);
   2297 #ifdef LRO
   2298                 device_printf(dev,"RX(%d) LRO Queued= %lld\n",
   2299                     rxr->me, (long long)lro->lro_queued);
   2300                 device_printf(dev,"RX(%d) LRO Flushed= %lld\n",
   2301                     rxr->me, (long long)lro->lro_flushed);
   2302 #endif /* LRO */
   2303                 device_printf(dev,"TX(%d) Packets Sent: %lu\n",
   2304                     txr->me, (long)txr->total_packets.ev_count);
   2305                 device_printf(dev,"TX(%d) NO Desc Avail: %lu\n",
   2306                     txr->me, (long)txr->no_desc_avail.ev_count);
   2307         }
   2308 
   2309         device_printf(dev,"MBX IRQ Handled: %lu\n",
   2310             (long)adapter->link_irq.ev_count);
   2311         return;
   2312 }
   2313 
   2314 static int
   2315 ixv_sysctl_debug(SYSCTLFN_ARGS)
   2316 {
   2317 	struct sysctlnode node;
   2318 	int error, result;
   2319 	struct adapter *adapter;
   2320 
   2321 	node = *rnode;
   2322 	adapter = (struct adapter *)node.sysctl_data;
   2323 	node.sysctl_data = &result;
   2324 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   2325 
   2326 	if (error)
   2327 		return error;
   2328 
   2329 	if (result == 1)
   2330 		ixv_print_debug_info(adapter);
   2331 
   2332 	return 0;
   2333 }
   2334 
   2335 const struct sysctlnode *
   2336 ixv_sysctl_instance(struct adapter *adapter)
   2337 {
   2338 	const char *dvname;
   2339 	struct sysctllog **log;
   2340 	int rc;
   2341 	const struct sysctlnode *rnode;
   2342 
   2343 	log = &adapter->sysctllog;
   2344 	dvname = device_xname(adapter->dev);
   2345 
   2346 	if ((rc = sysctl_createv(log, 0, NULL, &rnode,
   2347 	    0, CTLTYPE_NODE, dvname,
   2348 	    SYSCTL_DESCR("ixv information and settings"),
   2349 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
   2350 		goto err;
   2351 
   2352 	return rnode;
   2353 err:
   2354 	printf("%s: sysctl_createv failed, rc = %d\n", __func__, rc);
   2355 	return NULL;
   2356 }
   2357