Home | History | Annotate | Line # | Download | only in ixgbe
ixv.c revision 1.55
      1 /******************************************************************************
      2 
      3   Copyright (c) 2001-2015, Intel Corporation
      4   All rights reserved.
      5 
      6   Redistribution and use in source and binary forms, with or without
      7   modification, are permitted provided that the following conditions are met:
      8 
      9    1. Redistributions of source code must retain the above copyright notice,
     10       this list of conditions and the following disclaimer.
     11 
     12    2. Redistributions in binary form must reproduce the above copyright
     13       notice, this list of conditions and the following disclaimer in the
     14       documentation and/or other materials provided with the distribution.
     15 
     16    3. Neither the name of the Intel Corporation nor the names of its
     17       contributors may be used to endorse or promote products derived from
     18       this software without specific prior written permission.
     19 
     20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     30   POSSIBILITY OF SUCH DAMAGE.
     31 
     32 ******************************************************************************/
     33 /*$FreeBSD: head/sys/dev/ixgbe/if_ixv.c 302384 2016-07-07 03:39:18Z sbruno $*/
     34 /*$NetBSD: ixv.c,v 1.55 2017/03/03 04:37:05 msaitoh Exp $*/
     35 
     36 #ifdef _KERNEL_OPT
     37 #include "opt_inet.h"
     38 #include "opt_inet6.h"
     39 #include "opt_net_mpsafe.h"
     40 #endif
     41 
     42 #include "ixgbe.h"
     43 #include "vlan.h"
     44 
     45 /*********************************************************************
     46  *  Driver version
     47  *********************************************************************/
     48 char ixv_driver_version[] = "1.4.6-k";
     49 
     50 /*********************************************************************
     51  *  PCI Device ID Table
     52  *
     53  *  Used by probe to select devices to load on
     54  *  Last field stores an index into ixv_strings
     55  *  Last entry must be all 0s
     56  *
     57  *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
     58  *********************************************************************/
     59 
     60 static ixgbe_vendor_info_t ixv_vendor_info_array[] =
     61 {
     62 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
     63 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0},
     64 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, 0, 0, 0},
     65 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, 0, 0, 0},
     66 	/* required last entry */
     67 	{0, 0, 0, 0, 0}
     68 };
     69 
     70 /*********************************************************************
     71  *  Table of branding strings
     72  *********************************************************************/
     73 
     74 static const char    *ixv_strings[] = {
     75 	"Intel(R) PRO/10GbE Virtual Function Network Driver"
     76 };
     77 
     78 /*********************************************************************
     79  *  Function prototypes
     80  *********************************************************************/
     81 static int      ixv_probe(device_t, cfdata_t, void *);
     82 static void	ixv_attach(device_t, device_t, void *);
     83 static int      ixv_detach(device_t, int);
     84 #if 0
     85 static int      ixv_shutdown(device_t);
     86 #endif
     87 static int      ixv_ioctl(struct ifnet *, u_long, void *);
     88 static int	ixv_init(struct ifnet *);
     89 static void	ixv_init_locked(struct adapter *);
     90 static void     ixv_stop(void *);
     91 static void     ixv_media_status(struct ifnet *, struct ifmediareq *);
     92 static int      ixv_media_change(struct ifnet *);
     93 static void     ixv_identify_hardware(struct adapter *);
     94 static int      ixv_allocate_pci_resources(struct adapter *,
     95 		    const struct pci_attach_args *);
     96 static int      ixv_allocate_msix(struct adapter *,
     97 		    const struct pci_attach_args *);
     98 static int	ixv_setup_msix(struct adapter *);
     99 static void	ixv_free_pci_resources(struct adapter *);
    100 static void     ixv_local_timer(void *);
    101 static void     ixv_local_timer_locked(void *);
    102 static void     ixv_setup_interface(device_t, struct adapter *);
    103 static void     ixv_config_link(struct adapter *);
    104 
    105 static void     ixv_initialize_transmit_units(struct adapter *);
    106 static void     ixv_initialize_receive_units(struct adapter *);
    107 
    108 static void     ixv_enable_intr(struct adapter *);
    109 static void     ixv_disable_intr(struct adapter *);
    110 static void     ixv_set_multi(struct adapter *);
    111 static void     ixv_update_link_status(struct adapter *);
    112 static int	ixv_sysctl_debug(SYSCTLFN_PROTO);
    113 static void	ixv_set_ivar(struct adapter *, u8, u8, s8);
    114 static void	ixv_configure_ivars(struct adapter *);
    115 static u8 *	ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
    116 
    117 static void	ixv_setup_vlan_support(struct adapter *);
    118 #if 0
    119 static void	ixv_register_vlan(void *, struct ifnet *, u16);
    120 static void	ixv_unregister_vlan(void *, struct ifnet *, u16);
    121 #endif
    122 
    123 static void	ixv_add_device_sysctls(struct adapter *);
    124 static void	ixv_save_stats(struct adapter *);
    125 static void	ixv_init_stats(struct adapter *);
    126 static void	ixv_update_stats(struct adapter *);
    127 static void	ixv_add_stats_sysctls(struct adapter *);
    128 static void	ixv_set_sysctl_value(struct adapter *, const char *,
    129 		    const char *, int *, int);
    130 
    131 /* The MSI/X Interrupt handlers */
    132 static int	ixv_msix_que(void *);
    133 static int	ixv_msix_mbx(void *);
    134 
    135 /* Deferred interrupt tasklets */
    136 static void	ixv_handle_que(void *);
    137 static void	ixv_handle_mbx(void *);
    138 
    139 const struct sysctlnode *ixv_sysctl_instance(struct adapter *);
    140 static ixgbe_vendor_info_t *ixv_lookup(const struct pci_attach_args *);
    141 
    142 #ifdef DEV_NETMAP
    143 /*
    144  * This is defined in <dev/netmap/ixgbe_netmap.h>, which is included by
    145  * if_ix.c.
    146  */
    147 extern void ixgbe_netmap_attach(struct adapter *adapter);
    148 
    149 #include <net/netmap.h>
    150 #include <sys/selinfo.h>
    151 #include <dev/netmap/netmap_kern.h>
    152 #endif /* DEV_NETMAP */
    153 
    154 /*********************************************************************
    155  *  FreeBSD Device Interface Entry Points
    156  *********************************************************************/
    157 
    158 CFATTACH_DECL3_NEW(ixv, sizeof(struct adapter),
    159     ixv_probe, ixv_attach, ixv_detach, NULL, NULL, NULL,
    160     DVF_DETACH_SHUTDOWN);
    161 
    162 # if 0
    163 static device_method_t ixv_methods[] = {
    164 	/* Device interface */
    165 	DEVMETHOD(device_probe, ixv_probe),
    166 	DEVMETHOD(device_attach, ixv_attach),
    167 	DEVMETHOD(device_detach, ixv_detach),
    168 	DEVMETHOD(device_shutdown, ixv_shutdown),
    169 	DEVMETHOD_END
    170 };
    171 #endif
    172 
    173 #if 0
    174 static driver_t ixv_driver = {
    175 	"ixv", ixv_methods, sizeof(struct adapter),
    176 };
    177 
    178 devclass_t ixv_devclass;
    179 DRIVER_MODULE(ixv, pci, ixv_driver, ixv_devclass, 0, 0);
    180 MODULE_DEPEND(ixv, pci, 1, 1, 1);
    181 MODULE_DEPEND(ixv, ether, 1, 1, 1);
    182 #ifdef DEV_NETMAP
    183 MODULE_DEPEND(ix, netmap, 1, 1, 1);
    184 #endif /* DEV_NETMAP */
    185 /* XXX depend on 'ix' ? */
    186 #endif
    187 
    188 /*
    189 ** TUNEABLE PARAMETERS:
    190 */
    191 
    192 /* Number of Queues - do not exceed MSIX vectors - 1 */
    193 static int ixv_num_queues = 0;
    194 #define	TUNABLE_INT(__x, __y)
    195 TUNABLE_INT("hw.ixv.num_queues", &ixv_num_queues);
    196 
    197 /*
    198 ** AIM: Adaptive Interrupt Moderation
    199 ** which means that the interrupt rate
    200 ** is varied over time based on the
    201 ** traffic for that interrupt vector
    202 */
    203 static bool ixv_enable_aim = false;
    204 TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
    205 
    206 /* How many packets rxeof tries to clean at a time */
    207 static int ixv_rx_process_limit = 256;
    208 TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit);
    209 
    210 /* How many packets txeof tries to clean at a time */
    211 static int ixv_tx_process_limit = 256;
    212 TUNABLE_INT("hw.ixv.tx_process_limit", &ixv_tx_process_limit);
    213 
    214 /*
    215 ** Number of TX descriptors per ring,
    216 ** setting higher than RX as this seems
    217 ** the better performing choice.
    218 */
    219 static int ixv_txd = DEFAULT_TXD;
    220 TUNABLE_INT("hw.ixv.txd", &ixv_txd);
    221 
    222 /* Number of RX descriptors per ring */
    223 static int ixv_rxd = DEFAULT_RXD;
    224 TUNABLE_INT("hw.ixv.rxd", &ixv_rxd);
    225 
    226 /*
    227 ** Shadow VFTA table, this is needed because
    228 ** the real filter table gets cleared during
    229 ** a soft reset and we need to repopulate it.
    230 */
    231 static u32 ixv_shadow_vfta[IXGBE_VFTA_SIZE];
    232 
    233 #ifdef NET_MPSAFE
    234 #define IXGBE_MPSAFE		1
    235 #define IXGBE_CALLOUT_FLAGS	CALLOUT_MPSAFE
    236 #define IXGBE_SOFTINFT_FLAGS	SOFTINT_MPSAFE
    237 #else
    238 #define IXGBE_CALLOUT_FLAGS	0
    239 #define IXGBE_SOFTINFT_FLAGS	0
    240 #endif
    241 
    242 /*********************************************************************
    243  *  Device identification routine
    244  *
    245  *  ixv_probe determines if the driver should be loaded on
    246  *  adapter based on PCI vendor/device id of the adapter.
    247  *
    248  *  return 1 on success, 0 on failure
    249  *********************************************************************/
    250 
    251 static int
    252 ixv_probe(device_t dev, cfdata_t cf, void *aux)
    253 {
    254 #ifdef __HAVE_PCI_MSI_MSIX
    255 	const struct pci_attach_args *pa = aux;
    256 
    257 	return (ixv_lookup(pa) != NULL) ? 1 : 0;
    258 #else
    259 	return 0;
    260 #endif
    261 }
    262 
    263 static ixgbe_vendor_info_t *
    264 ixv_lookup(const struct pci_attach_args *pa)
    265 {
    266 	pcireg_t subid;
    267 	ixgbe_vendor_info_t *ent;
    268 
    269 	INIT_DEBUGOUT("ixv_lookup: begin");
    270 
    271 	if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
    272 		return NULL;
    273 
    274 	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
    275 
    276 	for (ent = ixv_vendor_info_array; ent->vendor_id != 0; ent++) {
    277 		if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
    278 		    (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
    279 
    280 		    ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
    281 		     (ent->subvendor_id == 0)) &&
    282 
    283 		    ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
    284 		     (ent->subdevice_id == 0))) {
    285 			return ent;
    286 		}
    287 	}
    288 	return NULL;
    289 }
    290 
    291 
    292 /*********************************************************************
    293  *  Device initialization routine
    294  *
    295  *  The attach entry point is called when the driver is being loaded.
    296  *  This routine identifies the type of hardware, allocates all resources
    297  *  and initializes the hardware.
    298  *
    299  *  return 0 on success, positive on failure
    300  *********************************************************************/
    301 
    302 static void
    303 ixv_attach(device_t parent, device_t dev, void *aux)
    304 {
    305 	struct adapter *adapter;
    306 	struct ixgbe_hw *hw;
    307 	int             error = 0;
    308 	ixgbe_vendor_info_t *ent;
    309 	const struct pci_attach_args *pa = aux;
    310 
    311 	INIT_DEBUGOUT("ixv_attach: begin");
    312 
    313 	/* Allocate, clear, and link in our adapter structure */
    314 	adapter = device_private(dev);
    315 	adapter->dev = dev;
    316 	hw = &adapter->hw;
    317 
    318 #ifdef DEV_NETMAP
    319 	adapter->init_locked = ixv_init_locked;
    320 	adapter->stop_locked = ixv_stop;
    321 #endif
    322 
    323 	adapter->osdep.pc = pa->pa_pc;
    324 	adapter->osdep.tag = pa->pa_tag;
    325 	if (pci_dma64_available(pa))
    326 		adapter->osdep.dmat = pa->pa_dmat64;
    327 	else
    328 		adapter->osdep.dmat = pa->pa_dmat;
    329 	adapter->osdep.attached = false;
    330 
    331 	ent = ixv_lookup(pa);
    332 
    333 	KASSERT(ent != NULL);
    334 
    335 	aprint_normal(": %s, Version - %s\n",
    336 	    ixv_strings[ent->index], ixv_driver_version);
    337 
    338 	/* Core Lock Init*/
    339 	IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
    340 
    341 	/* Set up the timer callout */
    342 	callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS);
    343 
    344 	/* Determine hardware revision */
    345 	ixv_identify_hardware(adapter);
    346 
    347 	/* Do base PCI setup - map BAR0 */
    348 	if (ixv_allocate_pci_resources(adapter, pa)) {
    349 		aprint_error_dev(dev, "ixv_allocate_pci_resources() failed!\n");
    350 		error = ENXIO;
    351 		goto err_out;
    352 	}
    353 
    354 	/* Sysctls for limiting the amount of work done in the taskqueues */
    355 	ixv_set_sysctl_value(adapter, "rx_processing_limit",
    356 	    "max number of rx packets to process",
    357 	    &adapter->rx_process_limit, ixv_rx_process_limit);
    358 
    359 	ixv_set_sysctl_value(adapter, "tx_processing_limit",
    360 	    "max number of tx packets to process",
    361 	    &adapter->tx_process_limit, ixv_tx_process_limit);
    362 
    363 	/* Do descriptor calc and sanity checks */
    364 	if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
    365 	    ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) {
    366 		aprint_error_dev(dev, "TXD config issue, using default!\n");
    367 		adapter->num_tx_desc = DEFAULT_TXD;
    368 	} else
    369 		adapter->num_tx_desc = ixv_txd;
    370 
    371 	if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
    372 	    ixv_rxd < MIN_RXD || ixv_rxd > MAX_RXD) {
    373 		aprint_error_dev(dev, "RXD config issue, using default!\n");
    374 		adapter->num_rx_desc = DEFAULT_RXD;
    375 	} else
    376 		adapter->num_rx_desc = ixv_rxd;
    377 
    378 	/* Allocate our TX/RX Queues */
    379 	if (ixgbe_allocate_queues(adapter)) {
    380 		aprint_error_dev(dev, "ixgbe_allocate_queues() failed!\n");
    381 		error = ENOMEM;
    382 		goto err_out;
    383 	}
    384 
    385 	/*
    386 	** Initialize the shared code: its
    387 	** at this point the mac type is set.
    388 	*/
    389 	error = ixgbe_init_shared_code(hw);
    390 	if (error) {
    391 		aprint_error_dev(dev, "ixgbe_init_shared_code() failed!\n");
    392 		error = EIO;
    393 		goto err_late;
    394 	}
    395 
    396 	/* Setup the mailbox */
    397 	ixgbe_init_mbx_params_vf(hw);
    398 
    399 	/* Reset mbox api to 1.0 */
    400 	error = ixgbe_reset_hw(hw);
    401 	if (error == IXGBE_ERR_RESET_FAILED)
    402 		aprint_error_dev(dev, "ixgbe_reset_hw() failure: Reset Failed!\n");
    403 	else if (error)
    404 		aprint_error_dev(dev, "ixgbe_reset_hw() failed with error %d\n", error);
    405 	if (error) {
    406 		error = EIO;
    407 		goto err_late;
    408 	}
    409 
    410 	/* Negotiate mailbox API version */
    411 	error = ixgbevf_negotiate_api_version(hw, ixgbe_mbox_api_11);
    412 	if (error)
    413 		aprint_debug_dev(dev,
    414 		    "MBX API 1.1 negotiation failed! Error %d\n", error);
    415 
    416 	error = ixgbe_init_hw(hw);
    417 	if (error) {
    418 		aprint_error_dev(dev, "ixgbe_init_hw() failed!\n");
    419 		error = EIO;
    420 		goto err_late;
    421 	}
    422 
    423 	error = ixv_allocate_msix(adapter, pa);
    424 	if (error) {
    425 		device_printf(dev, "ixv_allocate_msix() failed!\n");
    426 		goto err_late;
    427 	}
    428 
    429 	/* If no mac address was assigned, make a random one */
    430 	if (!ixv_check_ether_addr(hw->mac.addr)) {
    431 		u8 addr[ETHER_ADDR_LEN];
    432 		uint64_t rndval = cprng_fast64();
    433 
    434 		memcpy(addr, &rndval, sizeof(addr));
    435 		addr[0] &= 0xFE;
    436 		addr[0] |= 0x02;
    437 		bcopy(addr, hw->mac.addr, sizeof(addr));
    438 	}
    439 
    440 	/* hw.ix defaults init */
    441 	adapter->enable_aim = ixv_enable_aim;
    442 
    443 	/* Setup OS specific network interface */
    444 	ixv_setup_interface(dev, adapter);
    445 
    446 	/* Do the stats setup */
    447 	ixv_save_stats(adapter);
    448 	ixv_init_stats(adapter);
    449 
    450 	/* Register for VLAN events */
    451 #if 0 /* XXX delete after write? */
    452 	adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
    453 	    ixv_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
    454 	adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
    455 	    ixv_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
    456 #endif
    457 
    458 	/* Add sysctls */
    459 	ixv_add_device_sysctls(adapter);
    460 	ixv_add_stats_sysctls(adapter);
    461 
    462 #ifdef DEV_NETMAP
    463 	ixgbe_netmap_attach(adapter);
    464 #endif /* DEV_NETMAP */
    465 	INIT_DEBUGOUT("ixv_attach: end");
    466 	adapter->osdep.attached = true;
    467 	return;
    468 
    469 err_late:
    470 	ixgbe_free_transmit_structures(adapter);
    471 	ixgbe_free_receive_structures(adapter);
    472 err_out:
    473 	ixv_free_pci_resources(adapter);
    474 	return;
    475 
    476 }
    477 
    478 /*********************************************************************
    479  *  Device removal routine
    480  *
    481  *  The detach entry point is called when the driver is being removed.
    482  *  This routine stops the adapter and deallocates all the resources
    483  *  that were allocated for driver operation.
    484  *
    485  *  return 0 on success, positive on failure
    486  *********************************************************************/
    487 
    488 static int
    489 ixv_detach(device_t dev, int flags)
    490 {
    491 	struct adapter *adapter = device_private(dev);
    492 	struct ix_queue *que = adapter->queues;
    493 	struct tx_ring *txr = adapter->tx_rings;
    494 	struct rx_ring *rxr = adapter->rx_rings;
    495 	struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
    496 
    497 	INIT_DEBUGOUT("ixv_detach: begin");
    498 	if (adapter->osdep.attached == false)
    499 		return 0;
    500 
    501 #if NVLAN > 0
    502 	/* Make sure VLANS are not using driver */
    503 	if (!VLAN_ATTACHED(&adapter->osdep.ec))
    504 		;	/* nothing to do: no VLANs */
    505 	else if ((flags & (DETACH_SHUTDOWN|DETACH_FORCE)) != 0)
    506 		vlan_ifdetach(adapter->ifp);
    507 	else {
    508 		aprint_error_dev(dev, "VLANs in use, detach first\n");
    509 		return EBUSY;
    510 	}
    511 #endif
    512 
    513 	IXGBE_CORE_LOCK(adapter);
    514 	ixv_stop(adapter);
    515 	IXGBE_CORE_UNLOCK(adapter);
    516 
    517 	for (int i = 0; i < adapter->num_queues; i++, que++, txr++) {
    518 #ifndef IXGBE_LEGACY_TX
    519 		softint_disestablish(txr->txr_si);
    520 #endif
    521 		softint_disestablish(que->que_si);
    522 	}
    523 
    524 	/* Drain the Mailbox(link) queue */
    525 	softint_disestablish(adapter->link_si);
    526 
    527 	/* Unregister VLAN events */
    528 #if 0 /* XXX msaitoh delete after write? */
    529 	if (adapter->vlan_attach != NULL)
    530 		EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
    531 	if (adapter->vlan_detach != NULL)
    532 		EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
    533 #endif
    534 
    535 	ether_ifdetach(adapter->ifp);
    536 	callout_halt(&adapter->timer, NULL);
    537 #ifdef DEV_NETMAP
    538 	netmap_detach(adapter->ifp);
    539 #endif /* DEV_NETMAP */
    540 	ixv_free_pci_resources(adapter);
    541 #if 0 /* XXX the NetBSD port is probably missing something here */
    542 	bus_generic_detach(dev);
    543 #endif
    544 	if_detach(adapter->ifp);
    545 	if_percpuq_destroy(adapter->ipq);
    546 
    547 	sysctl_teardown(&adapter->sysctllog);
    548 	evcnt_detach(&adapter->handleq);
    549 	evcnt_detach(&adapter->req);
    550 	evcnt_detach(&adapter->efbig_tx_dma_setup);
    551 	evcnt_detach(&adapter->mbuf_defrag_failed);
    552 	evcnt_detach(&adapter->efbig2_tx_dma_setup);
    553 	evcnt_detach(&adapter->einval_tx_dma_setup);
    554 	evcnt_detach(&adapter->other_tx_dma_setup);
    555 	evcnt_detach(&adapter->eagain_tx_dma_setup);
    556 	evcnt_detach(&adapter->enomem_tx_dma_setup);
    557 	evcnt_detach(&adapter->watchdog_events);
    558 	evcnt_detach(&adapter->tso_err);
    559 	evcnt_detach(&adapter->link_irq);
    560 
    561 	txr = adapter->tx_rings;
    562 	for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
    563 		evcnt_detach(&adapter->queues[i].irqs);
    564 		evcnt_detach(&txr->no_desc_avail);
    565 		evcnt_detach(&txr->total_packets);
    566 		evcnt_detach(&txr->tso_tx);
    567 #ifndef IXGBE_LEGACY_TX
    568 		evcnt_detach(&txr->pcq_drops);
    569 #endif
    570 
    571 		evcnt_detach(&rxr->rx_packets);
    572 		evcnt_detach(&rxr->rx_bytes);
    573 		evcnt_detach(&rxr->rx_copies);
    574 		evcnt_detach(&rxr->no_jmbuf);
    575 		evcnt_detach(&rxr->rx_discarded);
    576 	}
    577 	evcnt_detach(&stats->ipcs);
    578 	evcnt_detach(&stats->l4cs);
    579 	evcnt_detach(&stats->ipcs_bad);
    580 	evcnt_detach(&stats->l4cs_bad);
    581 
    582 	/* Packet Reception Stats */
    583 	evcnt_detach(&stats->vfgorc);
    584 	evcnt_detach(&stats->vfgprc);
    585 	evcnt_detach(&stats->vfmprc);
    586 
    587 	/* Packet Transmission Stats */
    588 	evcnt_detach(&stats->vfgotc);
    589 	evcnt_detach(&stats->vfgptc);
    590 
    591 	ixgbe_free_transmit_structures(adapter);
    592 	ixgbe_free_receive_structures(adapter);
    593 
    594 	IXGBE_CORE_LOCK_DESTROY(adapter);
    595 	return (0);
    596 }
    597 
    598 /*********************************************************************
    599  *
    600  *  Shutdown entry point
    601  *
    602  **********************************************************************/
    603 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
    604 static int
    605 ixv_shutdown(device_t dev)
    606 {
    607 	struct adapter *adapter = device_private(dev);
    608 	IXGBE_CORE_LOCK(adapter);
    609 	ixv_stop(adapter);
    610 	IXGBE_CORE_UNLOCK(adapter);
    611 	return (0);
    612 }
    613 #endif
    614 
    615 static int
    616 ixv_ifflags_cb(struct ethercom *ec)
    617 {
    618 	struct ifnet *ifp = &ec->ec_if;
    619 	struct adapter *adapter = ifp->if_softc;
    620 	int change = ifp->if_flags ^ adapter->if_flags, rc = 0;
    621 
    622 	IXGBE_CORE_LOCK(adapter);
    623 
    624 	if (change != 0)
    625 		adapter->if_flags = ifp->if_flags;
    626 
    627 	if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0)
    628 		rc = ENETRESET;
    629 
    630 	IXGBE_CORE_UNLOCK(adapter);
    631 
    632 	return rc;
    633 }
    634 
    635 /*********************************************************************
    636  *  Ioctl entry point
    637  *
    638  *  ixv_ioctl is called when the user wants to configure the
    639  *  interface.
    640  *
    641  *  return 0 on success, positive on failure
    642  **********************************************************************/
    643 
    644 static int
    645 ixv_ioctl(struct ifnet * ifp, u_long command, void *data)
    646 {
    647 	struct adapter	*adapter = ifp->if_softc;
    648 	struct ifcapreq *ifcr = data;
    649 	struct ifreq	*ifr = (struct ifreq *) data;
    650 	int             error = 0;
    651 	int l4csum_en;
    652 	const int l4csum = IFCAP_CSUM_TCPv4_Rx|IFCAP_CSUM_UDPv4_Rx|
    653 	     IFCAP_CSUM_TCPv6_Rx|IFCAP_CSUM_UDPv6_Rx;
    654 
    655 	switch (command) {
    656 	case SIOCSIFFLAGS:
    657 		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
    658 		break;
    659 	case SIOCADDMULTI:
    660 	case SIOCDELMULTI:
    661 		IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
    662 		break;
    663 	case SIOCSIFMEDIA:
    664 	case SIOCGIFMEDIA:
    665 		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
    666 		break;
    667 	case SIOCSIFCAP:
    668 		IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
    669 		break;
    670 	case SIOCSIFMTU:
    671 		IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
    672 		break;
    673 	default:
    674 		IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
    675 		break;
    676 	}
    677 
    678 	switch (command) {
    679 	case SIOCSIFMEDIA:
    680 	case SIOCGIFMEDIA:
    681 		return ifmedia_ioctl(ifp, ifr, &adapter->media, command);
    682 	case SIOCSIFCAP:
    683 		/* Layer-4 Rx checksum offload has to be turned on and
    684 		 * off as a unit.
    685 		 */
    686 		l4csum_en = ifcr->ifcr_capenable & l4csum;
    687 		if (l4csum_en != l4csum && l4csum_en != 0)
    688 			return EINVAL;
    689 		/*FALLTHROUGH*/
    690 	case SIOCADDMULTI:
    691 	case SIOCDELMULTI:
    692 	case SIOCSIFFLAGS:
    693 	case SIOCSIFMTU:
    694 	default:
    695 		if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
    696 			return error;
    697 		if ((ifp->if_flags & IFF_RUNNING) == 0)
    698 			;
    699 		else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
    700 			IXGBE_CORE_LOCK(adapter);
    701 			ixv_init_locked(adapter);
    702 			IXGBE_CORE_UNLOCK(adapter);
    703 		} else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
    704 			/*
    705 			 * Multicast list has changed; set the hardware filter
    706 			 * accordingly.
    707 			 */
    708 			IXGBE_CORE_LOCK(adapter);
    709 			ixv_disable_intr(adapter);
    710 			ixv_set_multi(adapter);
    711 			ixv_enable_intr(adapter);
    712 			IXGBE_CORE_UNLOCK(adapter);
    713 		}
    714 		return 0;
    715 	}
    716 }
    717 
    718 /*********************************************************************
    719  *  Init entry point
    720  *
    721  *  This routine is used in two ways. It is used by the stack as
    722  *  init entry point in network interface structure. It is also used
    723  *  by the driver as a hw/sw initialization routine to get to a
    724  *  consistent state.
    725  *
    726  *  return 0 on success, positive on failure
    727  **********************************************************************/
    728 #define IXGBE_MHADD_MFS_SHIFT 16
    729 
    730 static void
    731 ixv_init_locked(struct adapter *adapter)
    732 {
    733 	struct ifnet	*ifp = adapter->ifp;
    734 	device_t 	dev = adapter->dev;
    735 	struct ixgbe_hw *hw = &adapter->hw;
    736 	int error = 0;
    737 
    738 	INIT_DEBUGOUT("ixv_init_locked: begin");
    739 	KASSERT(mutex_owned(&adapter->core_mtx));
    740 	hw->adapter_stopped = FALSE;
    741 	ixgbe_stop_adapter(hw);
    742         callout_stop(&adapter->timer);
    743 
    744         /* reprogram the RAR[0] in case user changed it. */
    745         ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
    746 
    747 	/* Get the latest mac address, User can use a LAA */
    748 	memcpy(hw->mac.addr, CLLADDR(adapter->ifp->if_sadl),
    749 	     IXGBE_ETH_LENGTH_OF_ADDRESS);
    750         ixgbe_set_rar(hw, 0, hw->mac.addr, 0, 1);
    751 	hw->addr_ctrl.rar_used_count = 1;
    752 
    753 	/* Prepare transmit descriptors and buffers */
    754 	if (ixgbe_setup_transmit_structures(adapter)) {
    755 		aprint_error_dev(dev, "Could not setup transmit structures\n");
    756 		ixv_stop(adapter);
    757 		return;
    758 	}
    759 
    760 	/* Reset VF and renegotiate mailbox API version */
    761 	ixgbe_reset_hw(hw);
    762 	error = ixgbevf_negotiate_api_version(hw, ixgbe_mbox_api_11);
    763 	if (error)
    764 		device_printf(dev, "MBX API 1.1 negotiation failed! Error %d\n", error);
    765 
    766 	ixv_initialize_transmit_units(adapter);
    767 
    768 	/* Setup Multicast table */
    769 	ixv_set_multi(adapter);
    770 
    771 	/*
    772 	** Determine the correct mbuf pool
    773 	** for doing jumbo/headersplit
    774 	*/
    775 	if (ifp->if_mtu > ETHERMTU)
    776 		adapter->rx_mbuf_sz = MJUMPAGESIZE;
    777 	else
    778 		adapter->rx_mbuf_sz = MCLBYTES;
    779 
    780 	/* Prepare receive descriptors and buffers */
    781 	if (ixgbe_setup_receive_structures(adapter)) {
    782 		device_printf(dev, "Could not setup receive structures\n");
    783 		ixv_stop(adapter);
    784 		return;
    785 	}
    786 
    787 	/* Configure RX settings */
    788 	ixv_initialize_receive_units(adapter);
    789 
    790 #if 0 /* XXX isn't it required? -- msaitoh  */
    791 	/* Set the various hardware offload abilities */
    792 	ifp->if_hwassist = 0;
    793 	if (ifp->if_capenable & IFCAP_TSO4)
    794 		ifp->if_hwassist |= CSUM_TSO;
    795 	if (ifp->if_capenable & IFCAP_TXCSUM) {
    796 		ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
    797 #if __FreeBSD_version >= 800000
    798 		ifp->if_hwassist |= CSUM_SCTP;
    799 #endif
    800 	}
    801 #endif
    802 
    803 	/* Set up VLAN offload and filter */
    804 	ixv_setup_vlan_support(adapter);
    805 
    806 	/* Set up MSI/X routing */
    807 	ixv_configure_ivars(adapter);
    808 
    809 	/* Set up auto-mask */
    810 	IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_EICS_RTX_QUEUE);
    811 
    812         /* Set moderation on the Link interrupt */
    813         IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->vector), IXGBE_LINK_ITR);
    814 
    815 	/* Stats init */
    816 	ixv_init_stats(adapter);
    817 
    818 	/* Config/Enable Link */
    819 	ixv_config_link(adapter);
    820 	hw->mac.get_link_status = TRUE;
    821 
    822 	/* Start watchdog */
    823 	callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
    824 
    825 	/* And now turn on interrupts */
    826 	ixv_enable_intr(adapter);
    827 
    828 	/* Now inform the stack we're ready */
    829 	ifp->if_flags |= IFF_RUNNING;
    830 	ifp->if_flags &= ~IFF_OACTIVE;
    831 
    832 	return;
    833 }
    834 
    835 static int
    836 ixv_init(struct ifnet *ifp)
    837 {
    838 	struct adapter *adapter = ifp->if_softc;
    839 
    840 	IXGBE_CORE_LOCK(adapter);
    841 	ixv_init_locked(adapter);
    842 	IXGBE_CORE_UNLOCK(adapter);
    843 	return 0;
    844 }
    845 
    846 
    847 /*
    848 **
    849 ** MSIX Interrupt Handlers and Tasklets
    850 **
    851 */
    852 
    853 static inline void
    854 ixv_enable_queue(struct adapter *adapter, u32 vector)
    855 {
    856 	struct ixgbe_hw *hw = &adapter->hw;
    857 	u32	queue = 1 << vector;
    858 	u32	mask;
    859 
    860 	mask = (IXGBE_EIMS_RTX_QUEUE & queue);
    861 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
    862 }
    863 
    864 static inline void
    865 ixv_disable_queue(struct adapter *adapter, u32 vector)
    866 {
    867 	struct ixgbe_hw *hw = &adapter->hw;
    868 	u64	queue = (u64)(1 << vector);
    869 	u32	mask;
    870 
    871 	mask = (IXGBE_EIMS_RTX_QUEUE & queue);
    872 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
    873 }
    874 
    875 static inline void
    876 ixv_rearm_queues(struct adapter *adapter, u64 queues)
    877 {
    878 	u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
    879 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask);
    880 }
    881 
    882 
    883 static void
    884 ixv_handle_que(void *context)
    885 {
    886 	struct ix_queue *que = context;
    887 	struct adapter  *adapter = que->adapter;
    888 	struct tx_ring	*txr = que->txr;
    889 	struct ifnet    *ifp = adapter->ifp;
    890 	bool		more;
    891 
    892 	adapter->handleq.ev_count++;
    893 
    894 	if (ifp->if_flags & IFF_RUNNING) {
    895 		more = ixgbe_rxeof(que);
    896 		IXGBE_TX_LOCK(txr);
    897 		ixgbe_txeof(txr);
    898 #ifndef IXGBE_LEGACY_TX
    899 		if (pcq_peek(txr->txr_interq) != NULL)
    900 			ixgbe_mq_start_locked(ifp, txr);
    901 #endif
    902 		/* Only for queue 0 */
    903 		if ((&adapter->queues[0] == que)
    904 		    && (!IFQ_IS_EMPTY(&ifp->if_snd)))
    905 			ixgbe_start_locked(txr, ifp);
    906 		IXGBE_TX_UNLOCK(txr);
    907 		if (more) {
    908 			adapter->req.ev_count++;
    909 			softint_schedule(que->que_si);
    910 			return;
    911 		}
    912 	}
    913 
    914 	/* Reenable this interrupt */
    915 	ixv_enable_queue(adapter, que->msix);
    916 	return;
    917 }
    918 
    919 /*********************************************************************
    920  *
    921  *  MSI Queue Interrupt Service routine
    922  *
    923  **********************************************************************/
    924 int
    925 ixv_msix_que(void *arg)
    926 {
    927 	struct ix_queue	*que = arg;
    928 	struct adapter  *adapter = que->adapter;
    929 #ifdef IXGBE_LEGACY_TX
    930 	struct ifnet    *ifp = adapter->ifp;
    931 #endif
    932 	struct tx_ring	*txr = que->txr;
    933 	struct rx_ring	*rxr = que->rxr;
    934 	bool		more;
    935 	u32		newitr = 0;
    936 
    937 	ixv_disable_queue(adapter, que->msix);
    938 	++que->irqs.ev_count;
    939 
    940 #ifdef __NetBSD__
    941 	/* Don't run ixgbe_rxeof in interrupt context */
    942 	more = true;
    943 #else
    944 	more = ixgbe_rxeof(que);
    945 #endif
    946 
    947 	IXGBE_TX_LOCK(txr);
    948 	ixgbe_txeof(txr);
    949 	IXGBE_TX_UNLOCK(txr);
    950 
    951 	/* Do AIM now? */
    952 
    953 	if (adapter->enable_aim == false)
    954 		goto no_calc;
    955 	/*
    956 	** Do Adaptive Interrupt Moderation:
    957         **  - Write out last calculated setting
    958 	**  - Calculate based on average size over
    959 	**    the last interval.
    960 	*/
    961         if (que->eitr_setting)
    962                 IXGBE_WRITE_REG(&adapter->hw,
    963                     IXGBE_VTEITR(que->msix),
    964 		    que->eitr_setting);
    965 
    966         que->eitr_setting = 0;
    967 
    968         /* Idle, do nothing */
    969         if ((txr->bytes == 0) && (rxr->bytes == 0))
    970                 goto no_calc;
    971 
    972 	if ((txr->bytes) && (txr->packets))
    973                	newitr = txr->bytes/txr->packets;
    974 	if ((rxr->bytes) && (rxr->packets))
    975 		newitr = max(newitr,
    976 		    (rxr->bytes / rxr->packets));
    977 	newitr += 24; /* account for hardware frame, crc */
    978 
    979 	/* set an upper boundary */
    980 	newitr = min(newitr, 3000);
    981 
    982 	/* Be nice to the mid range */
    983 	if ((newitr > 300) && (newitr < 1200))
    984 		newitr = (newitr / 3);
    985 	else
    986 		newitr = (newitr / 2);
    987 
    988 	newitr |= newitr << 16;
    989 
    990         /* save for next interrupt */
    991         que->eitr_setting = newitr;
    992 
    993         /* Reset state */
    994         txr->bytes = 0;
    995         txr->packets = 0;
    996         rxr->bytes = 0;
    997         rxr->packets = 0;
    998 
    999 no_calc:
   1000 	if (more)
   1001 		softint_schedule(que->que_si);
   1002 	else /* Reenable this interrupt */
   1003 		ixv_enable_queue(adapter, que->msix);
   1004 	return 1;
   1005 }
   1006 
   1007 static int
   1008 ixv_msix_mbx(void *arg)
   1009 {
   1010 	struct adapter	*adapter = arg;
   1011 	struct ixgbe_hw *hw = &adapter->hw;
   1012 	u32		reg;
   1013 
   1014 	++adapter->link_irq.ev_count;
   1015 
   1016 	/* First get the cause */
   1017 	reg = IXGBE_READ_REG(hw, IXGBE_VTEICS);
   1018 	/* Clear interrupt with write */
   1019 	IXGBE_WRITE_REG(hw, IXGBE_VTEICR, reg);
   1020 
   1021 	/* Link status change */
   1022 	if (reg & IXGBE_EICR_LSC)
   1023 		softint_schedule(adapter->link_si);
   1024 
   1025 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER);
   1026 	return 1;
   1027 }
   1028 
   1029 /*********************************************************************
   1030  *
   1031  *  Media Ioctl callback
   1032  *
   1033  *  This routine is called whenever the user queries the status of
   1034  *  the interface using ifconfig.
   1035  *
   1036  **********************************************************************/
   1037 static void
   1038 ixv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
   1039 {
   1040 	struct adapter *adapter = ifp->if_softc;
   1041 
   1042 	INIT_DEBUGOUT("ixv_media_status: begin");
   1043 	IXGBE_CORE_LOCK(adapter);
   1044 	ixv_update_link_status(adapter);
   1045 
   1046 	ifmr->ifm_status = IFM_AVALID;
   1047 	ifmr->ifm_active = IFM_ETHER;
   1048 
   1049 	if (!adapter->link_active) {
   1050 		ifmr->ifm_active |= IFM_NONE;
   1051 		IXGBE_CORE_UNLOCK(adapter);
   1052 		return;
   1053 	}
   1054 
   1055 	ifmr->ifm_status |= IFM_ACTIVE;
   1056 
   1057 	switch (adapter->link_speed) {
   1058 		case IXGBE_LINK_SPEED_10GB_FULL:
   1059 			ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
   1060 			break;
   1061 		case IXGBE_LINK_SPEED_1GB_FULL:
   1062 			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
   1063 			break;
   1064 		case IXGBE_LINK_SPEED_100_FULL:
   1065 			ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
   1066 			break;
   1067 	}
   1068 
   1069 	IXGBE_CORE_UNLOCK(adapter);
   1070 
   1071 	return;
   1072 }
   1073 
   1074 /*********************************************************************
   1075  *
   1076  *  Media Ioctl callback
   1077  *
   1078  *  This routine is called when the user changes speed/duplex using
   1079  *  media/mediopt option with ifconfig.
   1080  *
   1081  **********************************************************************/
   1082 static int
   1083 ixv_media_change(struct ifnet * ifp)
   1084 {
   1085 	struct adapter *adapter = ifp->if_softc;
   1086 	struct ifmedia *ifm = &adapter->media;
   1087 
   1088 	INIT_DEBUGOUT("ixv_media_change: begin");
   1089 
   1090 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
   1091 		return (EINVAL);
   1092 
   1093         switch (IFM_SUBTYPE(ifm->ifm_media)) {
   1094         case IFM_AUTO:
   1095                 break;
   1096         default:
   1097                 device_printf(adapter->dev, "Only auto media type\n");
   1098 		return (EINVAL);
   1099         }
   1100 
   1101 	return (0);
   1102 }
   1103 
   1104 
   1105 /*********************************************************************
   1106  *  Multicast Update
   1107  *
   1108  *  This routine is called whenever multicast address list is updated.
   1109  *
   1110  **********************************************************************/
   1111 #define IXGBE_RAR_ENTRIES 16
   1112 
   1113 static void
   1114 ixv_set_multi(struct adapter *adapter)
   1115 {
   1116 	struct ether_multi *enm;
   1117 	struct ether_multistep step;
   1118 	u8	mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
   1119 	u8	*update_ptr;
   1120 	int	mcnt = 0;
   1121 	struct ethercom *ec = &adapter->osdep.ec;
   1122 
   1123 	IOCTL_DEBUGOUT("ixv_set_multi: begin");
   1124 
   1125 	ETHER_FIRST_MULTI(step, ec, enm);
   1126 	while (enm != NULL) {
   1127 		bcopy(enm->enm_addrlo,
   1128 		    &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
   1129 		    IXGBE_ETH_LENGTH_OF_ADDRESS);
   1130 		mcnt++;
   1131 		/* XXX This might be required --msaitoh */
   1132 		if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES)
   1133 			break;
   1134 		ETHER_NEXT_MULTI(step, enm);
   1135 	}
   1136 
   1137 	update_ptr = mta;
   1138 
   1139 	ixgbe_update_mc_addr_list(&adapter->hw,
   1140 	    update_ptr, mcnt, ixv_mc_array_itr, TRUE);
   1141 
   1142 	return;
   1143 }
   1144 
   1145 /*
   1146  * This is an iterator function now needed by the multicast
   1147  * shared code. It simply feeds the shared code routine the
   1148  * addresses in the array of ixv_set_multi() one by one.
   1149  */
   1150 static u8 *
   1151 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
   1152 {
   1153 	u8 *addr = *update_ptr;
   1154 	u8 *newptr;
   1155 	*vmdq = 0;
   1156 
   1157 	newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
   1158 	*update_ptr = newptr;
   1159 	return addr;
   1160 }
   1161 
   1162 /*********************************************************************
   1163  *  Timer routine
   1164  *
   1165  *  This routine checks for link status,updates statistics,
   1166  *  and runs the watchdog check.
   1167  *
   1168  **********************************************************************/
   1169 
   1170 static void
   1171 ixv_local_timer(void *arg)
   1172 {
   1173 	struct adapter *adapter = arg;
   1174 
   1175 	IXGBE_CORE_LOCK(adapter);
   1176 	ixv_local_timer_locked(adapter);
   1177 	IXGBE_CORE_UNLOCK(adapter);
   1178 }
   1179 
   1180 static void
   1181 ixv_local_timer_locked(void *arg)
   1182 {
   1183 	struct adapter	*adapter = arg;
   1184 	device_t	dev = adapter->dev;
   1185 	struct ix_queue	*que = adapter->queues;
   1186 	u64		queues = 0;
   1187 	int		hung = 0;
   1188 
   1189 	KASSERT(mutex_owned(&adapter->core_mtx));
   1190 
   1191 	ixv_update_link_status(adapter);
   1192 
   1193 	/* Stats Update */
   1194 	ixv_update_stats(adapter);
   1195 
   1196 	/*
   1197 	** Check the TX queues status
   1198 	**      - mark hung queues so we don't schedule on them
   1199 	**      - watchdog only if all queues show hung
   1200 	*/
   1201 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   1202 		/* Keep track of queues with work for soft irq */
   1203 		if (que->txr->busy)
   1204 			queues |= ((u64)1 << que->me);
   1205 		/*
   1206 		** Each time txeof runs without cleaning, but there
   1207 		** are uncleaned descriptors it increments busy. If
   1208 		** we get to the MAX we declare it hung.
   1209 		*/
   1210 		if (que->busy == IXGBE_QUEUE_HUNG) {
   1211 			++hung;
   1212 			/* Mark the queue as inactive */
   1213 			adapter->active_queues &= ~((u64)1 << que->me);
   1214 			continue;
   1215 		} else {
   1216 			/* Check if we've come back from hung */
   1217 			if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
   1218                                 adapter->active_queues |= ((u64)1 << que->me);
   1219 		}
   1220 		if (que->busy >= IXGBE_MAX_TX_BUSY) {
   1221 			device_printf(dev,"Warning queue %d "
   1222 			    "appears to be hung!\n", i);
   1223 			que->txr->busy = IXGBE_QUEUE_HUNG;
   1224 			++hung;
   1225 		}
   1226 
   1227 	}
   1228 
   1229 	/* Only truly watchdog if all queues show hung */
   1230 	if (hung == adapter->num_queues)
   1231 		goto watchdog;
   1232 	else if (queues != 0) { /* Force an IRQ on queues with work */
   1233 		ixv_rearm_queues(adapter, queues);
   1234 	}
   1235 
   1236 	callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
   1237 	return;
   1238 
   1239 watchdog:
   1240 	device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
   1241 	adapter->ifp->if_flags &= ~IFF_RUNNING;
   1242 	adapter->watchdog_events.ev_count++;
   1243 	ixv_init_locked(adapter);
   1244 }
   1245 
   1246 /*
   1247 ** Note: this routine updates the OS on the link state
   1248 **	the real check of the hardware only happens with
   1249 **	a link interrupt.
   1250 */
   1251 static void
   1252 ixv_update_link_status(struct adapter *adapter)
   1253 {
   1254 	struct ifnet	*ifp = adapter->ifp;
   1255 	device_t dev = adapter->dev;
   1256 
   1257 	if (adapter->link_up){
   1258 		if (adapter->link_active == FALSE) {
   1259 			if (bootverbose) {
   1260 				const char *bpsmsg;
   1261 
   1262 				switch (adapter->link_speed) {
   1263 				case IXGBE_LINK_SPEED_10GB_FULL:
   1264 					bpsmsg = "10 Gbps";
   1265 					break;
   1266 				case IXGBE_LINK_SPEED_1GB_FULL:
   1267 					bpsmsg = "1 Gbps";
   1268 					break;
   1269 				case IXGBE_LINK_SPEED_100_FULL:
   1270 					bpsmsg = "100 Mbps";
   1271 					break;
   1272 				default:
   1273 					bpsmsg = "unknown speed";
   1274 					break;
   1275 				}
   1276 				device_printf(dev,"Link is up %s %s \n",
   1277 				    bpsmsg, "Full Duplex");
   1278 			}
   1279 			adapter->link_active = TRUE;
   1280 			if_link_state_change(ifp, LINK_STATE_UP);
   1281 		}
   1282 	} else { /* Link down */
   1283 		if (adapter->link_active == TRUE) {
   1284 			if (bootverbose)
   1285 				device_printf(dev,"Link is Down\n");
   1286 			if_link_state_change(ifp, LINK_STATE_DOWN);
   1287 			adapter->link_active = FALSE;
   1288 		}
   1289 	}
   1290 
   1291 	return;
   1292 }
   1293 
   1294 
   1295 static void
   1296 ixv_ifstop(struct ifnet *ifp, int disable)
   1297 {
   1298 	struct adapter *adapter = ifp->if_softc;
   1299 
   1300 	IXGBE_CORE_LOCK(adapter);
   1301 	ixv_stop(adapter);
   1302 	IXGBE_CORE_UNLOCK(adapter);
   1303 }
   1304 
   1305 /*********************************************************************
   1306  *
   1307  *  This routine disables all traffic on the adapter by issuing a
   1308  *  global reset on the MAC and deallocates TX/RX buffers.
   1309  *
   1310  **********************************************************************/
   1311 
   1312 static void
   1313 ixv_stop(void *arg)
   1314 {
   1315 	struct ifnet   *ifp;
   1316 	struct adapter *adapter = arg;
   1317 	struct ixgbe_hw *hw = &adapter->hw;
   1318 	ifp = adapter->ifp;
   1319 
   1320 	KASSERT(mutex_owned(&adapter->core_mtx));
   1321 
   1322 	INIT_DEBUGOUT("ixv_stop: begin\n");
   1323 	ixv_disable_intr(adapter);
   1324 
   1325 	/* Tell the stack that the interface is no longer active */
   1326 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   1327 
   1328 	ixgbe_reset_hw(hw);
   1329 	adapter->hw.adapter_stopped = FALSE;
   1330 	ixgbe_stop_adapter(hw);
   1331 	callout_stop(&adapter->timer);
   1332 
   1333 	/* reprogram the RAR[0] in case user changed it. */
   1334 	ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
   1335 
   1336 	return;
   1337 }
   1338 
   1339 
   1340 /*********************************************************************
   1341  *
   1342  *  Determine hardware revision.
   1343  *
   1344  **********************************************************************/
   1345 static void
   1346 ixv_identify_hardware(struct adapter *adapter)
   1347 {
   1348 	pcitag_t tag;
   1349 	pci_chipset_tag_t pc;
   1350 	pcireg_t subid, id;
   1351 	struct ixgbe_hw *hw = &adapter->hw;
   1352 
   1353 	pc = adapter->osdep.pc;
   1354 	tag = adapter->osdep.tag;
   1355 
   1356 	/*
   1357 	** Make sure BUSMASTER is set, on a VM under
   1358 	** KVM it may not be and will break things.
   1359 	*/
   1360 	ixgbe_pci_enable_busmaster(pc, tag);
   1361 
   1362 	id = pci_conf_read(pc, tag, PCI_ID_REG);
   1363 	subid = pci_conf_read(pc, tag, PCI_SUBSYS_ID_REG);
   1364 
   1365 	/* Save off the information about this board */
   1366 	hw->vendor_id = PCI_VENDOR(id);
   1367 	hw->device_id = PCI_PRODUCT(id);
   1368 	hw->revision_id = PCI_REVISION(pci_conf_read(pc, tag, PCI_CLASS_REG));
   1369 	hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
   1370 	hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
   1371 
   1372 	/* We need this to determine device-specific things */
   1373 	ixgbe_set_mac_type(hw);
   1374 
   1375 	/* Set the right number of segments */
   1376 	adapter->num_segs = IXGBE_82599_SCATTER;
   1377 
   1378 	return;
   1379 }
   1380 
   1381 /*********************************************************************
   1382  *
   1383  *  Setup MSIX Interrupt resources and handlers
   1384  *
   1385  **********************************************************************/
   1386 static int
   1387 ixv_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
   1388 {
   1389 	device_t	dev = adapter->dev;
   1390 	struct ix_queue *que = adapter->queues;
   1391 	struct		tx_ring *txr = adapter->tx_rings;
   1392 	int 		error, rid, vector = 0;
   1393 	pci_chipset_tag_t pc;
   1394 	pcitag_t	tag;
   1395 	char		intrbuf[PCI_INTRSTR_LEN];
   1396 	char		intr_xname[32];
   1397 	const char	*intrstr = NULL;
   1398 	kcpuset_t	*affinity;
   1399 	int		cpu_id = 0;
   1400 
   1401 	pc = adapter->osdep.pc;
   1402 	tag = adapter->osdep.tag;
   1403 
   1404 	adapter->osdep.nintrs = adapter->num_queues + 1;
   1405 	if (pci_msix_alloc_exact(pa, &adapter->osdep.intrs,
   1406 	    adapter->osdep.nintrs) != 0) {
   1407 		aprint_error_dev(dev,
   1408 		    "failed to allocate MSI-X interrupt\n");
   1409 		return (ENXIO);
   1410 	}
   1411 
   1412 	kcpuset_create(&affinity, false);
   1413 	for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
   1414 		snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d",
   1415 		    device_xname(dev), i);
   1416 		intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
   1417 		    sizeof(intrbuf));
   1418 #ifdef IXGBE_MPSAFE
   1419 		pci_intr_setattr(pc, &adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
   1420 		    true);
   1421 #endif
   1422 		/* Set the handler function */
   1423 		que->res = adapter->osdep.ihs[i] = pci_intr_establish_xname(pc,
   1424 		    adapter->osdep.intrs[i], IPL_NET, ixv_msix_que, que,
   1425 		    intr_xname);
   1426 		if (que->res == NULL) {
   1427 			pci_intr_release(pc, adapter->osdep.intrs,
   1428 			    adapter->osdep.nintrs);
   1429 			aprint_error_dev(dev,
   1430 			    "Failed to register QUE handler\n");
   1431 			kcpuset_destroy(affinity);
   1432 			return (ENXIO);
   1433 		}
   1434 		que->msix = vector;
   1435         	adapter->active_queues |= (u64)(1 << que->msix);
   1436 
   1437 		cpu_id = i;
   1438 		/* Round-robin affinity */
   1439 		kcpuset_zero(affinity);
   1440 		kcpuset_set(affinity, cpu_id % ncpu);
   1441 		error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
   1442 		    NULL);
   1443 		aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
   1444 		    intrstr);
   1445 		if (error == 0)
   1446 			aprint_normal(", bound queue %d to cpu %d\n",
   1447 			    i, cpu_id % ncpu);
   1448 		else
   1449 			aprint_normal("\n");
   1450 
   1451 #ifndef IXGBE_LEGACY_TX
   1452 		txr->txr_si
   1453 		    = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   1454 			ixgbe_deferred_mq_start, txr);
   1455 #endif
   1456 		que->que_si
   1457 		    = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   1458 			ixv_handle_que, que);
   1459 		if (que->que_si == NULL) {
   1460 			aprint_error_dev(dev,
   1461 			    "could not establish software interrupt\n");
   1462 		}
   1463 	}
   1464 
   1465 	/* and Mailbox */
   1466 	cpu_id++;
   1467 	snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
   1468 	intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
   1469 	    sizeof(intrbuf));
   1470 #ifdef IXGBE_MPSAFE
   1471 	pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE,
   1472 	    true);
   1473 #endif
   1474 	/* Set the mbx handler function */
   1475 	adapter->osdep.ihs[vector] = pci_intr_establish_xname(pc,
   1476 	    adapter->osdep.intrs[vector], IPL_NET, ixv_msix_mbx, adapter,
   1477 	    intr_xname);
   1478 	if (adapter->osdep.ihs[vector] == NULL) {
   1479 		adapter->res = NULL;
   1480 		aprint_error_dev(dev, "Failed to register LINK handler\n");
   1481 		kcpuset_destroy(affinity);
   1482 		return (ENXIO);
   1483 	}
   1484 	/* Round-robin affinity */
   1485 	kcpuset_zero(affinity);
   1486 	kcpuset_set(affinity, cpu_id % ncpu);
   1487 	error = interrupt_distribute(adapter->osdep.ihs[vector], affinity,NULL);
   1488 
   1489 	aprint_normal_dev(dev,
   1490 	    "for link, interrupting at %s", intrstr);
   1491 	if (error == 0)
   1492 		aprint_normal(", affinity to cpu %d\n", cpu_id % ncpu);
   1493 	else
   1494 		aprint_normal("\n");
   1495 
   1496 	adapter->vector = vector;
   1497 	/* Tasklets for Mailbox */
   1498 	adapter->link_si = softint_establish(SOFTINT_NET |IXGBE_SOFTINFT_FLAGS,
   1499 	    ixv_handle_mbx, adapter);
   1500 	/*
   1501 	** Due to a broken design QEMU will fail to properly
   1502 	** enable the guest for MSIX unless the vectors in
   1503 	** the table are all set up, so we must rewrite the
   1504 	** ENABLE in the MSIX control register again at this
   1505 	** point to cause it to successfully initialize us.
   1506 	*/
   1507 	if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
   1508 		int msix_ctrl;
   1509 		pci_get_capability(pc, tag, PCI_CAP_MSIX, &rid, NULL);
   1510 		rid += PCI_MSIX_CTL;
   1511 		msix_ctrl = pci_conf_read(pc, tag, rid);
   1512 		msix_ctrl |= PCI_MSIX_CTL_ENABLE;
   1513 		pci_conf_write(pc, tag, rid, msix_ctrl);
   1514 	}
   1515 
   1516 	kcpuset_destroy(affinity);
   1517 	return (0);
   1518 }
   1519 
   1520 /*
   1521  * Setup MSIX resources, note that the VF
   1522  * device MUST use MSIX, there is no fallback.
   1523  */
   1524 static int
   1525 ixv_setup_msix(struct adapter *adapter)
   1526 {
   1527 	device_t dev = adapter->dev;
   1528 	int want, queues, msgs;
   1529 
   1530 	/* Must have at least 2 MSIX vectors */
   1531 	msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag);
   1532 	if (msgs < 2) {
   1533 		aprint_error_dev(dev,"MSIX config error\n");
   1534 		return (ENXIO);
   1535 	}
   1536 	msgs = MIN(msgs, IXG_MAX_NINTR);
   1537 
   1538 	/* Figure out a reasonable auto config value */
   1539 	queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu;
   1540 
   1541 	if (ixv_num_queues != 0)
   1542 		queues = ixv_num_queues;
   1543 	else if ((ixv_num_queues == 0) && (queues > IXGBE_VF_MAX_TX_QUEUES))
   1544 		queues = IXGBE_VF_MAX_TX_QUEUES;
   1545 
   1546 	/*
   1547 	** Want vectors for the queues,
   1548 	** plus an additional for mailbox.
   1549 	*/
   1550 	want = queues + 1;
   1551 	if (msgs >= want)
   1552 		msgs = want;
   1553 	else {
   1554                	aprint_error_dev(dev,
   1555 		    "MSIX Configuration Problem, "
   1556 		    "%d vectors but %d queues wanted!\n",
   1557 		    msgs, want);
   1558 		return -1;
   1559 	}
   1560 
   1561 	adapter->msix_mem = (void *)1; /* XXX */
   1562 	aprint_normal_dev(dev,
   1563 	    "Using MSIX interrupts with %d vectors\n", msgs);
   1564 	adapter->num_queues = queues;
   1565 	return (msgs);
   1566 }
   1567 
   1568 
   1569 static int
   1570 ixv_allocate_pci_resources(struct adapter *adapter,
   1571     const struct pci_attach_args *pa)
   1572 {
   1573 	pcireg_t	memtype;
   1574 	device_t        dev = adapter->dev;
   1575 	bus_addr_t addr;
   1576 	int flags;
   1577 
   1578 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
   1579 	switch (memtype) {
   1580 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1581 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1582 		adapter->osdep.mem_bus_space_tag = pa->pa_memt;
   1583 		if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
   1584 	              memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
   1585 			goto map_err;
   1586 		if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
   1587 			aprint_normal_dev(dev, "clearing prefetchable bit\n");
   1588 			flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
   1589 		}
   1590 		if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
   1591 		     adapter->osdep.mem_size, flags,
   1592 		     &adapter->osdep.mem_bus_space_handle) != 0) {
   1593 map_err:
   1594 			adapter->osdep.mem_size = 0;
   1595 			aprint_error_dev(dev, "unable to map BAR0\n");
   1596 			return ENXIO;
   1597 		}
   1598 		break;
   1599 	default:
   1600 		aprint_error_dev(dev, "unexpected type on BAR0\n");
   1601 		return ENXIO;
   1602 	}
   1603 	adapter->hw.back = adapter;
   1604 
   1605 	/* Pick up the tuneable queues */
   1606 	adapter->num_queues = ixv_num_queues;
   1607 
   1608 	/*
   1609 	** Now setup MSI/X, should
   1610 	** return us the number of
   1611 	** configured vectors.
   1612 	*/
   1613 	adapter->msix = ixv_setup_msix(adapter);
   1614 	if (adapter->msix == ENXIO)
   1615 		return (ENXIO);
   1616 	else
   1617 		return (0);
   1618 }
   1619 
   1620 static void
   1621 ixv_free_pci_resources(struct adapter * adapter)
   1622 {
   1623 	struct 		ix_queue *que = adapter->queues;
   1624 	int		rid;
   1625 
   1626 	/*
   1627 	**  Release all msix queue resources:
   1628 	*/
   1629 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   1630 		if (que->res != NULL)
   1631 			pci_intr_disestablish(adapter->osdep.pc,
   1632 			    adapter->osdep.ihs[i]);
   1633 	}
   1634 
   1635 
   1636 	/* Clean the Link interrupt last */
   1637 	rid = adapter->vector;
   1638 
   1639 	if (adapter->osdep.ihs[rid] != NULL) {
   1640 		pci_intr_disestablish(adapter->osdep.pc,
   1641 		    adapter->osdep.ihs[rid]);
   1642 		adapter->osdep.ihs[rid] = NULL;
   1643 	}
   1644 
   1645 	pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
   1646 	    adapter->osdep.nintrs);
   1647 
   1648 	if (adapter->osdep.mem_size != 0) {
   1649 		bus_space_unmap(adapter->osdep.mem_bus_space_tag,
   1650 		    adapter->osdep.mem_bus_space_handle,
   1651 		    adapter->osdep.mem_size);
   1652 	}
   1653 
   1654 	return;
   1655 }
   1656 
   1657 /*********************************************************************
   1658  *
   1659  *  Setup networking device structure and register an interface.
   1660  *
   1661  **********************************************************************/
   1662 static void
   1663 ixv_setup_interface(device_t dev, struct adapter *adapter)
   1664 {
   1665 	struct ethercom *ec = &adapter->osdep.ec;
   1666 	struct ifnet   *ifp;
   1667 
   1668 	INIT_DEBUGOUT("ixv_setup_interface: begin");
   1669 
   1670 	ifp = adapter->ifp = &ec->ec_if;
   1671 	strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
   1672 	ifp->if_baudrate = IF_Gbps(10);
   1673 	ifp->if_init = ixv_init;
   1674 	ifp->if_stop = ixv_ifstop;
   1675 	ifp->if_softc = adapter;
   1676 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   1677 #ifdef IXGBE_MPSAFE
   1678 	ifp->if_extflags = IFEF_START_MPSAFE;
   1679 #endif
   1680 	ifp->if_ioctl = ixv_ioctl;
   1681 #ifndef IXGBE_LEGACY_TX
   1682 	ifp->if_transmit = ixgbe_mq_start;
   1683 #endif
   1684 	ifp->if_start = ixgbe_start;
   1685 	IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
   1686 	IFQ_SET_READY(&ifp->if_snd);
   1687 
   1688 	if_initialize(ifp);
   1689 	adapter->ipq = if_percpuq_create(&adapter->osdep.ec.ec_if);
   1690 	ether_ifattach(ifp, adapter->hw.mac.addr);
   1691 	/*
   1692 	 * We use per TX queue softint, so if_deferred_start_init() isn't
   1693 	 * used.
   1694 	 */
   1695 	if_register(ifp);
   1696 	ether_set_ifflags_cb(ec, ixv_ifflags_cb);
   1697 
   1698 	adapter->max_frame_size =
   1699 	    ifp->if_mtu + IXGBE_MTU_HDR_VLAN;
   1700 
   1701 	/*
   1702 	 * Tell the upper layer(s) we support long frames.
   1703 	 */
   1704 	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
   1705 
   1706 	ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSOv4;
   1707 	ifp->if_capenable = 0;
   1708 
   1709 	ec->ec_capabilities |= ETHERCAP_VLAN_HWCSUM;
   1710 	ec->ec_capabilities |= ETHERCAP_JUMBO_MTU;
   1711 	ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
   1712 	    		| ETHERCAP_VLAN_MTU;
   1713 	ec->ec_capenable = ec->ec_capabilities;
   1714 
   1715 	/* Don't enable LRO by default */
   1716 	ifp->if_capabilities |= IFCAP_LRO;
   1717 #if 0
   1718 	ifp->if_capenable = ifp->if_capabilities;
   1719 #endif
   1720 
   1721 	/*
   1722 	** Dont turn this on by default, if vlans are
   1723 	** created on another pseudo device (eg. lagg)
   1724 	** then vlan events are not passed thru, breaking
   1725 	** operation, but with HW FILTER off it works. If
   1726 	** using vlans directly on the em driver you can
   1727 	** enable this and get full hardware tag filtering.
   1728 	*/
   1729 	ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
   1730 
   1731 	/*
   1732 	 * Specify the media types supported by this adapter and register
   1733 	 * callbacks to update media and link information
   1734 	 */
   1735 	ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change,
   1736 		     ixv_media_status);
   1737 	ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
   1738 	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
   1739 
   1740 	return;
   1741 }
   1742 
   1743 static void
   1744 ixv_config_link(struct adapter *adapter)
   1745 {
   1746 	struct ixgbe_hw *hw = &adapter->hw;
   1747 
   1748 	if (hw->mac.ops.check_link)
   1749 		hw->mac.ops.check_link(hw, &adapter->link_speed,
   1750 		    &adapter->link_up, FALSE);
   1751 }
   1752 
   1753 
   1754 /*********************************************************************
   1755  *
   1756  *  Enable transmit unit.
   1757  *
   1758  **********************************************************************/
   1759 static void
   1760 ixv_initialize_transmit_units(struct adapter *adapter)
   1761 {
   1762 	struct tx_ring	*txr = adapter->tx_rings;
   1763 	struct ixgbe_hw	*hw = &adapter->hw;
   1764 
   1765 
   1766 	for (int i = 0; i < adapter->num_queues; i++, txr++) {
   1767 		u64	tdba = txr->txdma.dma_paddr;
   1768 		u32	txctrl, txdctl;
   1769 
   1770 		/* Set WTHRESH to 8, burst writeback */
   1771 		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
   1772 		txdctl |= (8 << 16);
   1773 		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
   1774 
   1775 		/* Set the HW Tx Head and Tail indices */
   1776 	    	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(i), 0);
   1777 	    	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(i), 0);
   1778 
   1779 		/* Set Tx Tail register */
   1780 		txr->tail = IXGBE_VFTDT(i);
   1781 
   1782 		/* Set Ring parameters */
   1783 		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
   1784 		       (tdba & 0x00000000ffffffffULL));
   1785 		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i), (tdba >> 32));
   1786 		IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
   1787 		    adapter->num_tx_desc *
   1788 		    sizeof(struct ixgbe_legacy_tx_desc));
   1789 		txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(i));
   1790 		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
   1791 		IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), txctrl);
   1792 
   1793 		/* Now enable */
   1794 		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
   1795 		txdctl |= IXGBE_TXDCTL_ENABLE;
   1796 		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
   1797 	}
   1798 
   1799 	return;
   1800 }
   1801 
   1802 
   1803 /*********************************************************************
   1804  *
   1805  *  Setup receive registers and features.
   1806  *
   1807  **********************************************************************/
   1808 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
   1809 
   1810 static void
   1811 ixv_initialize_receive_units(struct adapter *adapter)
   1812 {
   1813 	struct	rx_ring	*rxr = adapter->rx_rings;
   1814 	struct ixgbe_hw	*hw = &adapter->hw;
   1815 	struct ifnet	*ifp = adapter->ifp;
   1816 	u32		bufsz, rxcsum, psrtype;
   1817 
   1818 	if (ifp->if_mtu > ETHERMTU)
   1819 		bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
   1820 	else
   1821 		bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
   1822 
   1823 	psrtype = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
   1824 	    IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR |
   1825 	    IXGBE_PSRTYPE_L2HDR;
   1826 
   1827 	IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
   1828 
   1829 	/* Tell PF our max_frame size */
   1830 	ixgbevf_rlpml_set_vf(hw, adapter->max_frame_size);
   1831 
   1832 	for (int i = 0; i < adapter->num_queues; i++, rxr++) {
   1833 		u64 rdba = rxr->rxdma.dma_paddr;
   1834 		u32 reg, rxdctl;
   1835 
   1836 		/* Disable the queue */
   1837 		rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
   1838 		rxdctl &= ~IXGBE_RXDCTL_ENABLE;
   1839 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
   1840 		for (int j = 0; j < 10; j++) {
   1841 			if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
   1842 			    IXGBE_RXDCTL_ENABLE)
   1843 				msec_delay(1);
   1844 			else
   1845 				break;
   1846 		}
   1847 		wmb();
   1848 		/* Setup the Base and Length of the Rx Descriptor Ring */
   1849 		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
   1850 		    (rdba & 0x00000000ffffffffULL));
   1851 		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i),
   1852 		    (rdba >> 32));
   1853 		IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
   1854 		    adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
   1855 
   1856 		/* Reset the ring indices */
   1857 		IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
   1858 		IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 0);
   1859 
   1860 		/* Set up the SRRCTL register */
   1861 		reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
   1862 		reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
   1863 		reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
   1864 		reg |= bufsz;
   1865 		reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
   1866 		IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), reg);
   1867 
   1868 		/* Capture Rx Tail index */
   1869 		rxr->tail = IXGBE_VFRDT(rxr->me);
   1870 
   1871 		/* Do the queue enabling last */
   1872 		rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
   1873 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
   1874 		for (int k = 0; k < 10; k++) {
   1875 			if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
   1876 			    IXGBE_RXDCTL_ENABLE)
   1877 				break;
   1878 			else
   1879 				msec_delay(1);
   1880 		}
   1881 		wmb();
   1882 
   1883 		/* Set the Tail Pointer */
   1884 #ifdef DEV_NETMAP
   1885 		/*
   1886 		 * In netmap mode, we must preserve the buffers made
   1887 		 * available to userspace before the if_init()
   1888 		 * (this is true by default on the TX side, because
   1889 		 * init makes all buffers available to userspace).
   1890 		 *
   1891 		 * netmap_reset() and the device specific routines
   1892 		 * (e.g. ixgbe_setup_receive_rings()) map these
   1893 		 * buffers at the end of the NIC ring, so here we
   1894 		 * must set the RDT (tail) register to make sure
   1895 		 * they are not overwritten.
   1896 		 *
   1897 		 * In this driver the NIC ring starts at RDH = 0,
   1898 		 * RDT points to the last slot available for reception (?),
   1899 		 * so RDT = num_rx_desc - 1 means the whole ring is available.
   1900 		 */
   1901 		if (ifp->if_capenable & IFCAP_NETMAP) {
   1902 			struct netmap_adapter *na = NA(adapter->ifp);
   1903 			struct netmap_kring *kring = &na->rx_rings[i];
   1904 			int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
   1905 
   1906 			IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), t);
   1907 		} else
   1908 #endif /* DEV_NETMAP */
   1909 			IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
   1910 			    adapter->num_rx_desc - 1);
   1911 	}
   1912 
   1913 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
   1914 
   1915 	if (ifp->if_capenable & IFCAP_RXCSUM)
   1916 		rxcsum |= IXGBE_RXCSUM_PCSD;
   1917 
   1918 	if (!(rxcsum & IXGBE_RXCSUM_PCSD))
   1919 		rxcsum |= IXGBE_RXCSUM_IPPCSE;
   1920 
   1921 	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
   1922 
   1923 	return;
   1924 }
   1925 
   1926 static void
   1927 ixv_setup_vlan_support(struct adapter *adapter)
   1928 {
   1929 	struct ixgbe_hw *hw = &adapter->hw;
   1930 	u32		ctrl, vid, vfta, retry;
   1931 	struct rx_ring	*rxr;
   1932 
   1933 	/*
   1934 	** We get here thru init_locked, meaning
   1935 	** a soft reset, this has already cleared
   1936 	** the VFTA and other state, so if there
   1937 	** have been no vlan's registered do nothing.
   1938 	*/
   1939 	if (!VLAN_ATTACHED(&adapter->osdep.ec))
   1940 		return;
   1941 
   1942 	/* Enable the queues */
   1943 	for (int i = 0; i < adapter->num_queues; i++) {
   1944 		ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
   1945 		ctrl |= IXGBE_RXDCTL_VME;
   1946 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl);
   1947 		/*
   1948 		 * Let Rx path know that it needs to store VLAN tag
   1949 		 * as part of extra mbuf info.
   1950 		 */
   1951 		rxr = &adapter->rx_rings[i];
   1952 		rxr->vtag_strip = TRUE;
   1953 	}
   1954 
   1955 	/*
   1956 	** A soft reset zero's out the VFTA, so
   1957 	** we need to repopulate it now.
   1958 	*/
   1959 	for (int i = 0; i < IXGBE_VFTA_SIZE; i++) {
   1960 		if (ixv_shadow_vfta[i] == 0)
   1961 			continue;
   1962 		vfta = ixv_shadow_vfta[i];
   1963 		/*
   1964 		** Reconstruct the vlan id's
   1965 		** based on the bits set in each
   1966 		** of the array ints.
   1967 		*/
   1968 		for (int j = 0; j < 32; j++) {
   1969 			retry = 0;
   1970 			if ((vfta & (1 << j)) == 0)
   1971 				continue;
   1972 			vid = (i * 32) + j;
   1973 			/* Call the shared code mailbox routine */
   1974 			while (ixgbe_set_vfta(hw, vid, 0, TRUE)) {
   1975 				if (++retry > 5)
   1976 					break;
   1977 			}
   1978 		}
   1979 	}
   1980 }
   1981 
   1982 #if 0	/* XXX Badly need to overhaul vlan(4) on NetBSD. */
   1983 /*
   1984 ** This routine is run via an vlan config EVENT,
   1985 ** it enables us to use the HW Filter table since
   1986 ** we can get the vlan id. This just creates the
   1987 ** entry in the soft version of the VFTA, init will
   1988 ** repopulate the real table.
   1989 */
   1990 static void
   1991 ixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   1992 {
   1993 	struct adapter	*adapter = ifp->if_softc;
   1994 	u16		index, bit;
   1995 
   1996 	if (ifp->if_softc != arg) /* Not our event */
   1997 		return;
   1998 
   1999 	if ((vtag == 0) || (vtag > 4095)) /* Invalid */
   2000 		return;
   2001 
   2002 	IXGBE_CORE_LOCK(adapter);
   2003 	index = (vtag >> 5) & 0x7F;
   2004 	bit = vtag & 0x1F;
   2005 	ixv_shadow_vfta[index] |= (1 << bit);
   2006 	/* Re-init to load the changes */
   2007 	ixv_init_locked(adapter);
   2008 	IXGBE_CORE_UNLOCK(adapter);
   2009 }
   2010 
   2011 /*
   2012 ** This routine is run via an vlan
   2013 ** unconfig EVENT, remove our entry
   2014 ** in the soft vfta.
   2015 */
   2016 static void
   2017 ixv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   2018 {
   2019 	struct adapter	*adapter = ifp->if_softc;
   2020 	u16		index, bit;
   2021 
   2022 	if (ifp->if_softc !=  arg)
   2023 		return;
   2024 
   2025 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
   2026 		return;
   2027 
   2028 	IXGBE_CORE_LOCK(adapter);
   2029 	index = (vtag >> 5) & 0x7F;
   2030 	bit = vtag & 0x1F;
   2031 	ixv_shadow_vfta[index] &= ~(1 << bit);
   2032 	/* Re-init to load the changes */
   2033 	ixv_init_locked(adapter);
   2034 	IXGBE_CORE_UNLOCK(adapter);
   2035 }
   2036 #endif
   2037 
   2038 static void
   2039 ixv_enable_intr(struct adapter *adapter)
   2040 {
   2041 	struct ixgbe_hw *hw = &adapter->hw;
   2042 	struct ix_queue *que = adapter->queues;
   2043 	u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
   2044 
   2045 
   2046 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
   2047 
   2048 	mask = IXGBE_EIMS_ENABLE_MASK;
   2049 	mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
   2050 	IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
   2051 
   2052         for (int i = 0; i < adapter->num_queues; i++, que++)
   2053 		ixv_enable_queue(adapter, que->msix);
   2054 
   2055 	IXGBE_WRITE_FLUSH(hw);
   2056 
   2057 	return;
   2058 }
   2059 
   2060 static void
   2061 ixv_disable_intr(struct adapter *adapter)
   2062 {
   2063 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
   2064 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, ~0);
   2065 	IXGBE_WRITE_FLUSH(&adapter->hw);
   2066 	return;
   2067 }
   2068 
   2069 /*
   2070 ** Setup the correct IVAR register for a particular MSIX interrupt
   2071 **  - entry is the register array entry
   2072 **  - vector is the MSIX vector for this queue
   2073 **  - type is RX/TX/MISC
   2074 */
   2075 static void
   2076 ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
   2077 {
   2078 	struct ixgbe_hw *hw = &adapter->hw;
   2079 	u32 ivar, index;
   2080 
   2081 	vector |= IXGBE_IVAR_ALLOC_VAL;
   2082 
   2083 	if (type == -1) { /* MISC IVAR */
   2084 		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
   2085 		ivar &= ~0xFF;
   2086 		ivar |= vector;
   2087 		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
   2088 	} else {	/* RX/TX IVARS */
   2089 		index = (16 * (entry & 1)) + (8 * type);
   2090 		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
   2091 		ivar &= ~(0xFF << index);
   2092 		ivar |= (vector << index);
   2093 		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
   2094 	}
   2095 }
   2096 
   2097 static void
   2098 ixv_configure_ivars(struct adapter *adapter)
   2099 {
   2100 	struct  ix_queue *que = adapter->queues;
   2101 
   2102         for (int i = 0; i < adapter->num_queues; i++, que++) {
   2103 		/* First the RX queue entry */
   2104                 ixv_set_ivar(adapter, i, que->msix, 0);
   2105 		/* ... and the TX */
   2106 		ixv_set_ivar(adapter, i, que->msix, 1);
   2107 		/* Set an initial value in EITR */
   2108                 IXGBE_WRITE_REG(&adapter->hw,
   2109                     IXGBE_VTEITR(que->msix), IXV_EITR_DEFAULT);
   2110 	}
   2111 
   2112 	/* For the mailbox interrupt */
   2113         ixv_set_ivar(adapter, 1, adapter->vector, -1);
   2114 }
   2115 
   2116 
   2117 /*
   2118 ** Tasklet handler for MSIX MBX interrupts
   2119 **  - do outside interrupt since it might sleep
   2120 */
   2121 static void
   2122 ixv_handle_mbx(void *context)
   2123 {
   2124 	struct adapter  *adapter = context;
   2125 
   2126 	ixgbe_check_link(&adapter->hw,
   2127 	    &adapter->link_speed, &adapter->link_up, 0);
   2128 	ixv_update_link_status(adapter);
   2129 }
   2130 
   2131 /*
   2132 ** The VF stats registers never have a truly virgin
   2133 ** starting point, so this routine tries to make an
   2134 ** artificial one, marking ground zero on attach as
   2135 ** it were.
   2136 */
   2137 static void
   2138 ixv_save_stats(struct adapter *adapter)
   2139 {
   2140 	struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
   2141 
   2142 	if (stats->vfgprc.ev_count || stats->vfgptc.ev_count) {
   2143 		stats->saved_reset_vfgprc +=
   2144 		    stats->vfgprc.ev_count - stats->base_vfgprc;
   2145 		stats->saved_reset_vfgptc +=
   2146 		    stats->vfgptc.ev_count - stats->base_vfgptc;
   2147 		stats->saved_reset_vfgorc +=
   2148 		    stats->vfgorc.ev_count - stats->base_vfgorc;
   2149 		stats->saved_reset_vfgotc +=
   2150 		    stats->vfgotc.ev_count - stats->base_vfgotc;
   2151 		stats->saved_reset_vfmprc +=
   2152 		    stats->vfmprc.ev_count - stats->base_vfmprc;
   2153 	}
   2154 }
   2155 
   2156 static void
   2157 ixv_init_stats(struct adapter *adapter)
   2158 {
   2159 	struct ixgbe_hw *hw = &adapter->hw;
   2160 
   2161 	adapter->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
   2162 	adapter->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
   2163 	adapter->stats.vf.last_vfgorc |=
   2164 	    (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
   2165 
   2166 	adapter->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
   2167 	adapter->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
   2168 	adapter->stats.vf.last_vfgotc |=
   2169 	    (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
   2170 
   2171 	adapter->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
   2172 
   2173 	adapter->stats.vf.base_vfgprc = adapter->stats.vf.last_vfgprc;
   2174 	adapter->stats.vf.base_vfgorc = adapter->stats.vf.last_vfgorc;
   2175 	adapter->stats.vf.base_vfgptc = adapter->stats.vf.last_vfgptc;
   2176 	adapter->stats.vf.base_vfgotc = adapter->stats.vf.last_vfgotc;
   2177 	adapter->stats.vf.base_vfmprc = adapter->stats.vf.last_vfmprc;
   2178 }
   2179 
   2180 #define UPDATE_STAT_32(reg, last, count)		\
   2181 {							\
   2182 	u32 current = IXGBE_READ_REG(hw, reg);		\
   2183 	if (current < last)				\
   2184 		count.ev_count += 0x100000000LL;	\
   2185 	last = current;					\
   2186 	count.ev_count &= 0xFFFFFFFF00000000LL;		\
   2187 	count.ev_count |= current;			\
   2188 }
   2189 
   2190 #define UPDATE_STAT_36(lsb, msb, last, count) 		\
   2191 {							\
   2192 	u64 cur_lsb = IXGBE_READ_REG(hw, lsb);		\
   2193 	u64 cur_msb = IXGBE_READ_REG(hw, msb);		\
   2194 	u64 current = ((cur_msb << 32) | cur_lsb);	\
   2195 	if (current < last)				\
   2196 		count.ev_count += 0x1000000000LL;	\
   2197 	last = current;					\
   2198 	count.ev_count &= 0xFFFFFFF000000000LL;		\
   2199 	count.ev_count |= current;			\
   2200 }
   2201 
   2202 /*
   2203 ** ixv_update_stats - Update the board statistics counters.
   2204 */
   2205 void
   2206 ixv_update_stats(struct adapter *adapter)
   2207 {
   2208         struct ixgbe_hw *hw = &adapter->hw;
   2209 
   2210         UPDATE_STAT_32(IXGBE_VFGPRC, adapter->stats.vf.last_vfgprc,
   2211 	    adapter->stats.vf.vfgprc);
   2212         UPDATE_STAT_32(IXGBE_VFGPTC, adapter->stats.vf.last_vfgptc,
   2213 	    adapter->stats.vf.vfgptc);
   2214         UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
   2215 	    adapter->stats.vf.last_vfgorc, adapter->stats.vf.vfgorc);
   2216         UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
   2217 	    adapter->stats.vf.last_vfgotc, adapter->stats.vf.vfgotc);
   2218         UPDATE_STAT_32(IXGBE_VFMPRC, adapter->stats.vf.last_vfmprc,
   2219 	    adapter->stats.vf.vfmprc);
   2220 }
   2221 
   2222 /**********************************************************************
   2223  *
   2224  *  This routine is called only when em_display_debug_stats is enabled.
   2225  *  This routine provides a way to take a look at important statistics
   2226  *  maintained by the driver and hardware.
   2227  *
   2228  **********************************************************************/
   2229 static void
   2230 ixv_print_debug_info(struct adapter *adapter)
   2231 {
   2232         device_t dev = adapter->dev;
   2233         struct ixgbe_hw         *hw = &adapter->hw;
   2234         struct ix_queue         *que = adapter->queues;
   2235         struct rx_ring          *rxr;
   2236         struct tx_ring          *txr;
   2237 #ifdef LRO
   2238         struct lro_ctrl         *lro;
   2239 #endif /* LRO */
   2240 
   2241         device_printf(dev,"Error Byte Count = %u \n",
   2242             IXGBE_READ_REG(hw, IXGBE_ERRBC));
   2243 
   2244         for (int i = 0; i < adapter->num_queues; i++, que++) {
   2245                 txr = que->txr;
   2246                 rxr = que->rxr;
   2247 #ifdef LRO
   2248                 lro = &rxr->lro;
   2249 #endif /* LRO */
   2250                 device_printf(dev,"QUE(%d) IRQs Handled: %lu\n",
   2251                     que->msix, (long)que->irqs.ev_count);
   2252                 device_printf(dev,"RX(%d) Packets Received: %lld\n",
   2253                     rxr->me, (long long)rxr->rx_packets.ev_count);
   2254                 device_printf(dev,"RX(%d) Bytes Received: %lu\n",
   2255                     rxr->me, (long)rxr->rx_bytes.ev_count);
   2256 #ifdef LRO
   2257                 device_printf(dev,"RX(%d) LRO Queued= %lld\n",
   2258                     rxr->me, (long long)lro->lro_queued);
   2259                 device_printf(dev,"RX(%d) LRO Flushed= %lld\n",
   2260                     rxr->me, (long long)lro->lro_flushed);
   2261 #endif /* LRO */
   2262                 device_printf(dev,"TX(%d) Packets Sent: %lu\n",
   2263                     txr->me, (long)txr->total_packets.ev_count);
   2264                 device_printf(dev,"TX(%d) NO Desc Avail: %lu\n",
   2265                     txr->me, (long)txr->no_desc_avail.ev_count);
   2266         }
   2267 
   2268         device_printf(dev,"MBX IRQ Handled: %lu\n",
   2269             (long)adapter->link_irq.ev_count);
   2270         return;
   2271 }
   2272 
   2273 static int
   2274 ixv_sysctl_debug(SYSCTLFN_ARGS)
   2275 {
   2276 	struct sysctlnode node;
   2277 	int error, result;
   2278 	struct adapter *adapter;
   2279 
   2280 	node = *rnode;
   2281 	adapter = (struct adapter *)node.sysctl_data;
   2282 	node.sysctl_data = &result;
   2283 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   2284 
   2285 	if (error)
   2286 		return error;
   2287 
   2288 	if (result == 1)
   2289 		ixv_print_debug_info(adapter);
   2290 
   2291 	return 0;
   2292 }
   2293 
   2294 const struct sysctlnode *
   2295 ixv_sysctl_instance(struct adapter *adapter)
   2296 {
   2297 	const char *dvname;
   2298 	struct sysctllog **log;
   2299 	int rc;
   2300 	const struct sysctlnode *rnode;
   2301 
   2302 	log = &adapter->sysctllog;
   2303 	dvname = device_xname(adapter->dev);
   2304 
   2305 	if ((rc = sysctl_createv(log, 0, NULL, &rnode,
   2306 	    0, CTLTYPE_NODE, dvname,
   2307 	    SYSCTL_DESCR("ixv information and settings"),
   2308 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
   2309 		goto err;
   2310 
   2311 	return rnode;
   2312 err:
   2313 	printf("%s: sysctl_createv failed, rc = %d\n", __func__, rc);
   2314 	return NULL;
   2315 }
   2316 
   2317 static void
   2318 ixv_add_device_sysctls(struct adapter *adapter)
   2319 {
   2320 	struct sysctllog **log;
   2321 	const struct sysctlnode *rnode, *cnode;
   2322 	device_t dev;
   2323 
   2324 	dev = adapter->dev;
   2325 	log = &adapter->sysctllog;
   2326 
   2327 	if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
   2328 		aprint_error_dev(dev, "could not create sysctl root\n");
   2329 		return;
   2330 	}
   2331 
   2332 	if (sysctl_createv(log, 0, &rnode, &cnode,
   2333 	    CTLFLAG_READWRITE, CTLTYPE_INT,
   2334 	    "debug", SYSCTL_DESCR("Debug Info"),
   2335 	    ixv_sysctl_debug, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
   2336 		aprint_error_dev(dev, "could not create sysctl\n");
   2337 
   2338 	if (sysctl_createv(log, 0, &rnode, &cnode,
   2339 	    CTLFLAG_READWRITE, CTLTYPE_BOOL,
   2340 	    "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
   2341 	    NULL, 0, &adapter->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
   2342 		aprint_error_dev(dev, "could not create sysctl\n");
   2343 }
   2344 
   2345 /*
   2346  * Add statistic sysctls for the VF.
   2347  */
   2348 static void
   2349 ixv_add_stats_sysctls(struct adapter *adapter)
   2350 {
   2351 	device_t dev = adapter->dev;
   2352 	const struct sysctlnode *rnode;
   2353 	struct sysctllog **log = &adapter->sysctllog;
   2354 	struct ix_queue *que = &adapter->queues[0];
   2355 	struct tx_ring *txr = que->txr;
   2356 	struct rx_ring *rxr = que->rxr;
   2357 
   2358 	struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
   2359 	const char *xname = device_xname(dev);
   2360 
   2361 	/* Driver Statistics */
   2362 	evcnt_attach_dynamic(&adapter->handleq, EVCNT_TYPE_MISC,
   2363 	    NULL, xname, "Handled queue in softint");
   2364 	evcnt_attach_dynamic(&adapter->req, EVCNT_TYPE_MISC,
   2365 	    NULL, xname, "Requeued in softint");
   2366 	evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
   2367 	    NULL, xname, "Driver tx dma soft fail EFBIG");
   2368 	evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
   2369 	    NULL, xname, "m_defrag() failed");
   2370 	evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
   2371 	    NULL, xname, "Driver tx dma hard fail EFBIG");
   2372 	evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC,
   2373 	    NULL, xname, "Driver tx dma hard fail EINVAL");
   2374 	evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC,
   2375 	    NULL, xname, "Driver tx dma hard fail other");
   2376 	evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
   2377 	    NULL, xname, "Driver tx dma soft fail EAGAIN");
   2378 	evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
   2379 	    NULL, xname, "Driver tx dma soft fail ENOMEM");
   2380 	evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
   2381 	    NULL, xname, "Watchdog timeouts");
   2382 	evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC,
   2383 	    NULL, xname, "TSO errors");
   2384 	evcnt_attach_dynamic(&adapter->link_irq, EVCNT_TYPE_INTR,
   2385 	    NULL, xname, "Link MSIX IRQ Handled");
   2386 
   2387 	for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
   2388 		snprintf(adapter->queues[i].evnamebuf,
   2389 		    sizeof(adapter->queues[i].evnamebuf), "%s q%d",
   2390 		    xname, i);
   2391 		snprintf(adapter->queues[i].namebuf,
   2392 		    sizeof(adapter->queues[i].namebuf), "q%d", i);
   2393 
   2394 		if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
   2395 			aprint_error_dev(dev, "could not create sysctl root\n");
   2396 			break;
   2397 		}
   2398 
   2399 		if (sysctl_createv(log, 0, &rnode, &rnode,
   2400 		    0, CTLTYPE_NODE,
   2401 		    adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
   2402 		    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
   2403 			break;
   2404 
   2405 #if 0 /* not yet */
   2406 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2407 		    CTLFLAG_READWRITE, CTLTYPE_INT,
   2408 		    "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
   2409 		    ixgbe_sysctl_interrupt_rate_handler, 0,
   2410 		    (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
   2411 			break;
   2412 
   2413 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2414 		    CTLFLAG_READONLY, CTLTYPE_QUAD,
   2415 		    "irqs", SYSCTL_DESCR("irqs on this queue"),
   2416 			NULL, 0, &(adapter->queues[i].irqs),
   2417 		    0, CTL_CREATE, CTL_EOL) != 0)
   2418 			break;
   2419 
   2420 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2421 		    CTLFLAG_READONLY, CTLTYPE_INT,
   2422 		    "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
   2423 		    ixgbe_sysctl_tdh_handler, 0, (void *)txr,
   2424 		    0, CTL_CREATE, CTL_EOL) != 0)
   2425 			break;
   2426 
   2427 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2428 		    CTLFLAG_READONLY, CTLTYPE_INT,
   2429 		    "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
   2430 		    ixgbe_sysctl_tdt_handler, 0, (void *)txr,
   2431 		    0, CTL_CREATE, CTL_EOL) != 0)
   2432 			break;
   2433 #endif
   2434 		evcnt_attach_dynamic(&adapter->queues[i].irqs, EVCNT_TYPE_INTR,
   2435 		    NULL, adapter->queues[i].evnamebuf, "IRQs on queue");
   2436 		evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC,
   2437 		    NULL, adapter->queues[i].evnamebuf, "TSO");
   2438 		evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
   2439 		    NULL, adapter->queues[i].evnamebuf,
   2440 		    "Queue No Descriptor Available");
   2441 		evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
   2442 		    NULL, adapter->queues[i].evnamebuf,
   2443 		    "Queue Packets Transmitted");
   2444 #ifndef IXGBE_LEGACY_TX
   2445 		evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC,
   2446 		    NULL, adapter->queues[i].evnamebuf,
   2447 		    "Packets dropped in pcq");
   2448 #endif
   2449 
   2450 #ifdef LRO
   2451 		struct lro_ctrl *lro = &rxr->lro;
   2452 #endif /* LRO */
   2453 
   2454 #if 0 /* not yet */
   2455 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2456 		    CTLFLAG_READONLY,
   2457 		    CTLTYPE_INT,
   2458 		    "rxd_head", SYSCTL_DESCR("Receive Descriptor Head"),
   2459 		    ixgbe_sysctl_rdh_handler, 0, (void *)rxr, 0,
   2460 		    CTL_CREATE, CTL_EOL) != 0)
   2461 			break;
   2462 
   2463 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2464 		    CTLFLAG_READONLY,
   2465 		    CTLTYPE_INT,
   2466 		    "rxd_tail", SYSCTL_DESCR("Receive Descriptor Tail"),
   2467 		    ixgbe_sysctl_rdt_handler, 0, (void *)rxr, 0,
   2468 		    CTL_CREATE, CTL_EOL) != 0)
   2469 			break;
   2470 #endif
   2471 
   2472 		evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
   2473 		    NULL, adapter->queues[i].evnamebuf, "Queue Packets Received");
   2474 		evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
   2475 		    NULL, adapter->queues[i].evnamebuf, "Queue Bytes Received");
   2476 		evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC,
   2477 		    NULL, adapter->queues[i].evnamebuf, "Copied RX Frames");
   2478 		evcnt_attach_dynamic(&rxr->no_jmbuf, EVCNT_TYPE_MISC,
   2479 		    NULL, adapter->queues[i].evnamebuf, "Rx no jumbo mbuf");
   2480 		evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
   2481 		    NULL, adapter->queues[i].evnamebuf, "Rx discarded");
   2482 #ifdef LRO
   2483 		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
   2484 				CTLFLAG_RD, &lro->lro_queued, 0,
   2485 				"LRO Queued");
   2486 		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
   2487 				CTLFLAG_RD, &lro->lro_flushed, 0,
   2488 				"LRO Flushed");
   2489 #endif /* LRO */
   2490 	}
   2491 
   2492 	/* MAC stats get the own sub node */
   2493 
   2494 	snprintf(stats->namebuf,
   2495 	    sizeof(stats->namebuf), "%s MAC Statistics", xname);
   2496 
   2497 	evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
   2498 	    stats->namebuf, "rx csum offload - IP");
   2499 	evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
   2500 	    stats->namebuf, "rx csum offload - L4");
   2501 	evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
   2502 	    stats->namebuf, "rx csum offload - IP bad");
   2503 	evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
   2504 	    stats->namebuf, "rx csum offload - L4 bad");
   2505 
   2506 	/* Packet Reception Stats */
   2507 	evcnt_attach_dynamic(&stats->vfgprc, EVCNT_TYPE_MISC, NULL,
   2508 	    xname, "Good Packets Received");
   2509 	evcnt_attach_dynamic(&stats->vfgorc, EVCNT_TYPE_MISC, NULL,
   2510 	    xname, "Good Octets Received");
   2511 	evcnt_attach_dynamic(&stats->vfmprc, EVCNT_TYPE_MISC, NULL,
   2512 	    xname, "Multicast Packets Received");
   2513 	evcnt_attach_dynamic(&stats->vfgptc, EVCNT_TYPE_MISC, NULL,
   2514 	    xname, "Good Packets Transmitted");
   2515 	evcnt_attach_dynamic(&stats->vfgotc, EVCNT_TYPE_MISC, NULL,
   2516 	    xname, "Good Octets Transmitted");
   2517 }
   2518 
   2519 static void
   2520 ixv_set_sysctl_value(struct adapter *adapter, const char *name,
   2521 	const char *description, int *limit, int value)
   2522 {
   2523 	device_t dev =  adapter->dev;
   2524 	struct sysctllog **log;
   2525 	const struct sysctlnode *rnode, *cnode;
   2526 
   2527 	log = &adapter->sysctllog;
   2528 	if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
   2529 		aprint_error_dev(dev, "could not create sysctl root\n");
   2530 		return;
   2531 	}
   2532 	if (sysctl_createv(log, 0, &rnode, &cnode,
   2533 	    CTLFLAG_READWRITE, CTLTYPE_INT,
   2534 	    name, SYSCTL_DESCR(description),
   2535 	    NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0)
   2536 		aprint_error_dev(dev, "could not create sysctl\n");
   2537 	*limit = value;
   2538 }
   2539