Home | History | Annotate | Line # | Download | only in ixgbe
ixv.c revision 1.48
      1 /******************************************************************************
      2 
      3   Copyright (c) 2001-2015, Intel Corporation
      4   All rights reserved.
      5 
      6   Redistribution and use in source and binary forms, with or without
      7   modification, are permitted provided that the following conditions are met:
      8 
      9    1. Redistributions of source code must retain the above copyright notice,
     10       this list of conditions and the following disclaimer.
     11 
     12    2. Redistributions in binary form must reproduce the above copyright
     13       notice, this list of conditions and the following disclaimer in the
     14       documentation and/or other materials provided with the distribution.
     15 
     16    3. Neither the name of the Intel Corporation nor the names of its
     17       contributors may be used to endorse or promote products derived from
     18       this software without specific prior written permission.
     19 
     20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     30   POSSIBILITY OF SUCH DAMAGE.
     31 
     32 ******************************************************************************/
     33 /*$FreeBSD: head/sys/dev/ixgbe/if_ixv.c 302384 2016-07-07 03:39:18Z sbruno $*/
     34 /*$NetBSD: ixv.c,v 1.48 2017/02/10 04:34:11 msaitoh Exp $*/
     35 
     36 #include "opt_inet.h"
     37 #include "opt_inet6.h"
     38 
     39 #include "ixgbe.h"
     40 #include "vlan.h"
     41 
     42 /*********************************************************************
     43  *  Driver version
     44  *********************************************************************/
     45 char ixv_driver_version[] = "1.4.6-k";
     46 
     47 /*********************************************************************
     48  *  PCI Device ID Table
     49  *
     50  *  Used by probe to select devices to load on
     51  *  Last field stores an index into ixv_strings
     52  *  Last entry must be all 0s
     53  *
     54  *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
     55  *********************************************************************/
     56 
     57 static ixgbe_vendor_info_t ixv_vendor_info_array[] =
     58 {
     59 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
     60 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0},
     61 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, 0, 0, 0},
     62 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, 0, 0, 0},
     63 	/* required last entry */
     64 	{0, 0, 0, 0, 0}
     65 };
     66 
     67 /*********************************************************************
     68  *  Table of branding strings
     69  *********************************************************************/
     70 
     71 static const char    *ixv_strings[] = {
     72 	"Intel(R) PRO/10GbE Virtual Function Network Driver"
     73 };
     74 
     75 /*********************************************************************
     76  *  Function prototypes
     77  *********************************************************************/
     78 static int      ixv_probe(device_t, cfdata_t, void *);
     79 static void	ixv_attach(device_t, device_t, void *);
     80 static int      ixv_detach(device_t, int);
     81 #if 0
     82 static int      ixv_shutdown(device_t);
     83 #endif
     84 static int      ixv_ioctl(struct ifnet *, u_long, void *);
     85 static int	ixv_init(struct ifnet *);
     86 static void	ixv_init_locked(struct adapter *);
     87 static void     ixv_stop(void *);
     88 static void     ixv_media_status(struct ifnet *, struct ifmediareq *);
     89 static int      ixv_media_change(struct ifnet *);
     90 static void     ixv_identify_hardware(struct adapter *);
     91 static int      ixv_allocate_pci_resources(struct adapter *,
     92 		    const struct pci_attach_args *);
     93 static int      ixv_allocate_msix(struct adapter *,
     94 		    const struct pci_attach_args *);
     95 static int	ixv_setup_msix(struct adapter *);
     96 static void	ixv_free_pci_resources(struct adapter *);
     97 static void     ixv_local_timer(void *);
     98 static void     ixv_local_timer_locked(void *);
     99 static void     ixv_setup_interface(device_t, struct adapter *);
    100 static void     ixv_config_link(struct adapter *);
    101 
    102 static void     ixv_initialize_transmit_units(struct adapter *);
    103 static void     ixv_initialize_receive_units(struct adapter *);
    104 
    105 static void     ixv_enable_intr(struct adapter *);
    106 static void     ixv_disable_intr(struct adapter *);
    107 static void     ixv_set_multi(struct adapter *);
    108 static void     ixv_update_link_status(struct adapter *);
    109 static int	ixv_sysctl_debug(SYSCTLFN_PROTO);
    110 static void	ixv_set_ivar(struct adapter *, u8, u8, s8);
    111 static void	ixv_configure_ivars(struct adapter *);
    112 static u8 *	ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
    113 
    114 static void	ixv_setup_vlan_support(struct adapter *);
    115 #if 0
    116 static void	ixv_register_vlan(void *, struct ifnet *, u16);
    117 static void	ixv_unregister_vlan(void *, struct ifnet *, u16);
    118 #endif
    119 
    120 static void	ixv_add_device_sysctls(struct adapter *);
    121 static void	ixv_save_stats(struct adapter *);
    122 static void	ixv_init_stats(struct adapter *);
    123 static void	ixv_update_stats(struct adapter *);
    124 static void	ixv_add_stats_sysctls(struct adapter *);
    125 static void	ixv_set_sysctl_value(struct adapter *, const char *,
    126 		    const char *, int *, int);
    127 
    128 /* The MSI/X Interrupt handlers */
    129 static int	ixv_msix_que(void *);
    130 static int	ixv_msix_mbx(void *);
    131 
    132 /* Deferred interrupt tasklets */
    133 static void	ixv_handle_que(void *);
    134 static void	ixv_handle_mbx(void *);
    135 
    136 const struct sysctlnode *ixv_sysctl_instance(struct adapter *);
    137 static ixgbe_vendor_info_t *ixv_lookup(const struct pci_attach_args *);
    138 
    139 #ifdef DEV_NETMAP
    140 /*
    141  * This is defined in <dev/netmap/ixgbe_netmap.h>, which is included by
    142  * if_ix.c.
    143  */
    144 extern void ixgbe_netmap_attach(struct adapter *adapter);
    145 
    146 #include <net/netmap.h>
    147 #include <sys/selinfo.h>
    148 #include <dev/netmap/netmap_kern.h>
    149 #endif /* DEV_NETMAP */
    150 
    151 /*********************************************************************
    152  *  FreeBSD Device Interface Entry Points
    153  *********************************************************************/
    154 
    155 CFATTACH_DECL3_NEW(ixv, sizeof(struct adapter),
    156     ixv_probe, ixv_attach, ixv_detach, NULL, NULL, NULL,
    157     DVF_DETACH_SHUTDOWN);
    158 
    159 # if 0
    160 static device_method_t ixv_methods[] = {
    161 	/* Device interface */
    162 	DEVMETHOD(device_probe, ixv_probe),
    163 	DEVMETHOD(device_attach, ixv_attach),
    164 	DEVMETHOD(device_detach, ixv_detach),
    165 	DEVMETHOD(device_shutdown, ixv_shutdown),
    166 	DEVMETHOD_END
    167 };
    168 #endif
    169 
    170 #if 0
    171 static driver_t ixv_driver = {
    172 	"ixv", ixv_methods, sizeof(struct adapter),
    173 };
    174 
    175 devclass_t ixv_devclass;
    176 DRIVER_MODULE(ixv, pci, ixv_driver, ixv_devclass, 0, 0);
    177 MODULE_DEPEND(ixv, pci, 1, 1, 1);
    178 MODULE_DEPEND(ixv, ether, 1, 1, 1);
    179 #ifdef DEV_NETMAP
    180 MODULE_DEPEND(ix, netmap, 1, 1, 1);
    181 #endif /* DEV_NETMAP */
    182 /* XXX depend on 'ix' ? */
    183 #endif
    184 
    185 /*
    186 ** TUNEABLE PARAMETERS:
    187 */
    188 
    189 /* Number of Queues - do not exceed MSIX vectors - 1 */
    190 static int ixv_num_queues = 0;
    191 #define	TUNABLE_INT(__x, __y)
    192 TUNABLE_INT("hw.ixv.num_queues", &ixv_num_queues);
    193 
    194 /*
    195 ** AIM: Adaptive Interrupt Moderation
    196 ** which means that the interrupt rate
    197 ** is varied over time based on the
    198 ** traffic for that interrupt vector
    199 */
    200 static int ixv_enable_aim = FALSE;
    201 TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
    202 
    203 /* How many packets rxeof tries to clean at a time */
    204 static int ixv_rx_process_limit = 256;
    205 TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit);
    206 
    207 /* How many packets txeof tries to clean at a time */
    208 static int ixv_tx_process_limit = 256;
    209 TUNABLE_INT("hw.ixv.tx_process_limit", &ixv_tx_process_limit);
    210 
    211 /*
    212 ** Number of TX descriptors per ring,
    213 ** setting higher than RX as this seems
    214 ** the better performing choice.
    215 */
    216 static int ixv_txd = DEFAULT_TXD;
    217 TUNABLE_INT("hw.ixv.txd", &ixv_txd);
    218 
    219 /* Number of RX descriptors per ring */
    220 static int ixv_rxd = DEFAULT_RXD;
    221 TUNABLE_INT("hw.ixv.rxd", &ixv_rxd);
    222 
    223 /*
    224 ** Shadow VFTA table, this is needed because
    225 ** the real filter table gets cleared during
    226 ** a soft reset and we need to repopulate it.
    227 */
    228 static u32 ixv_shadow_vfta[IXGBE_VFTA_SIZE];
    229 
    230 /*********************************************************************
    231  *  Device identification routine
    232  *
    233  *  ixv_probe determines if the driver should be loaded on
    234  *  adapter based on PCI vendor/device id of the adapter.
    235  *
    236  *  return 1 on success, 0 on failure
    237  *********************************************************************/
    238 
    239 static int
    240 ixv_probe(device_t dev, cfdata_t cf, void *aux)
    241 {
    242 #ifdef __HAVE_PCI_MSI_MSIX
    243 	const struct pci_attach_args *pa = aux;
    244 
    245 	return (ixv_lookup(pa) != NULL) ? 1 : 0;
    246 #else
    247 	return 0;
    248 #endif
    249 }
    250 
    251 static ixgbe_vendor_info_t *
    252 ixv_lookup(const struct pci_attach_args *pa)
    253 {
    254 	pcireg_t subid;
    255 	ixgbe_vendor_info_t *ent;
    256 
    257 	INIT_DEBUGOUT("ixv_lookup: begin");
    258 
    259 	if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
    260 		return NULL;
    261 
    262 	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
    263 
    264 	for (ent = ixv_vendor_info_array; ent->vendor_id != 0; ent++) {
    265 		if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
    266 		    (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
    267 
    268 		    ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
    269 		     (ent->subvendor_id == 0)) &&
    270 
    271 		    ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
    272 		     (ent->subdevice_id == 0))) {
    273 			return ent;
    274 		}
    275 	}
    276 	return NULL;
    277 }
    278 
    279 
    280 /*********************************************************************
    281  *  Device initialization routine
    282  *
    283  *  The attach entry point is called when the driver is being loaded.
    284  *  This routine identifies the type of hardware, allocates all resources
    285  *  and initializes the hardware.
    286  *
    287  *  return 0 on success, positive on failure
    288  *********************************************************************/
    289 
    290 static void
    291 ixv_attach(device_t parent, device_t dev, void *aux)
    292 {
    293 	struct adapter *adapter;
    294 	struct ixgbe_hw *hw;
    295 	int             error = 0;
    296 	ixgbe_vendor_info_t *ent;
    297 	const struct pci_attach_args *pa = aux;
    298 
    299 	INIT_DEBUGOUT("ixv_attach: begin");
    300 
    301 	/* Allocate, clear, and link in our adapter structure */
    302 	adapter = device_private(dev);
    303 	adapter->dev = dev;
    304 	hw = &adapter->hw;
    305 
    306 #ifdef DEV_NETMAP
    307 	adapter->init_locked = ixv_init_locked;
    308 	adapter->stop_locked = ixv_stop;
    309 #endif
    310 
    311 	adapter->osdep.pc = pa->pa_pc;
    312 	adapter->osdep.tag = pa->pa_tag;
    313 	if (pci_dma64_available(pa))
    314 		adapter->osdep.dmat = pa->pa_dmat64;
    315 	else
    316 		adapter->osdep.dmat = pa->pa_dmat;
    317 	adapter->osdep.attached = false;
    318 
    319 	ent = ixv_lookup(pa);
    320 
    321 	KASSERT(ent != NULL);
    322 
    323 	aprint_normal(": %s, Version - %s\n",
    324 	    ixv_strings[ent->index], ixv_driver_version);
    325 
    326 	/* Core Lock Init*/
    327 	IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
    328 
    329 	/* Set up the timer callout */
    330 	callout_init(&adapter->timer, 0);
    331 
    332 	/* Determine hardware revision */
    333 	ixv_identify_hardware(adapter);
    334 
    335 	/* Do base PCI setup - map BAR0 */
    336 	if (ixv_allocate_pci_resources(adapter, pa)) {
    337 		aprint_error_dev(dev, "ixv_allocate_pci_resources() failed!\n");
    338 		error = ENXIO;
    339 		goto err_out;
    340 	}
    341 
    342 	/* Sysctls for limiting the amount of work done in the taskqueues */
    343 	ixv_set_sysctl_value(adapter, "rx_processing_limit",
    344 	    "max number of rx packets to process",
    345 	    &adapter->rx_process_limit, ixv_rx_process_limit);
    346 
    347 	ixv_set_sysctl_value(adapter, "tx_processing_limit",
    348 	    "max number of tx packets to process",
    349 	    &adapter->tx_process_limit, ixv_tx_process_limit);
    350 
    351 	/* Do descriptor calc and sanity checks */
    352 	if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
    353 	    ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) {
    354 		aprint_error_dev(dev, "TXD config issue, using default!\n");
    355 		adapter->num_tx_desc = DEFAULT_TXD;
    356 	} else
    357 		adapter->num_tx_desc = ixv_txd;
    358 
    359 	if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
    360 	    ixv_rxd < MIN_RXD || ixv_rxd > MAX_RXD) {
    361 		aprint_error_dev(dev, "RXD config issue, using default!\n");
    362 		adapter->num_rx_desc = DEFAULT_RXD;
    363 	} else
    364 		adapter->num_rx_desc = ixv_rxd;
    365 
    366 	/* Allocate our TX/RX Queues */
    367 	if (ixgbe_allocate_queues(adapter)) {
    368 		aprint_error_dev(dev, "ixgbe_allocate_queues() failed!\n");
    369 		error = ENOMEM;
    370 		goto err_out;
    371 	}
    372 
    373 	/*
    374 	** Initialize the shared code: its
    375 	** at this point the mac type is set.
    376 	*/
    377 	error = ixgbe_init_shared_code(hw);
    378 	if (error) {
    379 		aprint_error_dev(dev, "ixgbe_init_shared_code() failed!\n");
    380 		error = EIO;
    381 		goto err_late;
    382 	}
    383 
    384 	/* Setup the mailbox */
    385 	ixgbe_init_mbx_params_vf(hw);
    386 
    387 	/* Reset mbox api to 1.0 */
    388 	error = ixgbe_reset_hw(hw);
    389 	if (error == IXGBE_ERR_RESET_FAILED)
    390 		aprint_error_dev(dev, "ixgbe_reset_hw() failure: Reset Failed!\n");
    391 	else if (error)
    392 		aprint_error_dev(dev, "ixgbe_reset_hw() failed with error %d\n", error);
    393 	if (error) {
    394 		error = EIO;
    395 		goto err_late;
    396 	}
    397 
    398 	/* Negotiate mailbox API version */
    399 	error = ixgbevf_negotiate_api_version(hw, ixgbe_mbox_api_11);
    400 	if (error)
    401 		aprint_debug_dev(dev,
    402 		    "MBX API 1.1 negotiation failed! Error %d\n", error);
    403 
    404 	error = ixgbe_init_hw(hw);
    405 	if (error) {
    406 		aprint_error_dev(dev, "ixgbe_init_hw() failed!\n");
    407 		error = EIO;
    408 		goto err_late;
    409 	}
    410 
    411 	error = ixv_allocate_msix(adapter, pa);
    412 	if (error) {
    413 		device_printf(dev, "ixv_allocate_msix() failed!\n");
    414 		goto err_late;
    415 	}
    416 
    417 	/* If no mac address was assigned, make a random one */
    418 	if (!ixv_check_ether_addr(hw->mac.addr)) {
    419 		u8 addr[ETHER_ADDR_LEN];
    420 		uint64_t rndval = cprng_fast64();
    421 
    422 		memcpy(addr, &rndval, sizeof(addr));
    423 		addr[0] &= 0xFE;
    424 		addr[0] |= 0x02;
    425 		bcopy(addr, hw->mac.addr, sizeof(addr));
    426 	}
    427 
    428 	/* Setup OS specific network interface */
    429 	ixv_setup_interface(dev, adapter);
    430 
    431 	/* Do the stats setup */
    432 	ixv_save_stats(adapter);
    433 	ixv_init_stats(adapter);
    434 
    435 	/* Register for VLAN events */
    436 #if 0 /* XXX delete after write? */
    437 	adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
    438 	    ixv_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
    439 	adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
    440 	    ixv_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
    441 #endif
    442 
    443 	/* Add sysctls */
    444 	ixv_add_device_sysctls(adapter);
    445 	ixv_add_stats_sysctls(adapter);
    446 
    447 #ifdef DEV_NETMAP
    448 	ixgbe_netmap_attach(adapter);
    449 #endif /* DEV_NETMAP */
    450 	INIT_DEBUGOUT("ixv_attach: end");
    451 	adapter->osdep.attached = true;
    452 	return;
    453 
    454 err_late:
    455 	ixgbe_free_transmit_structures(adapter);
    456 	ixgbe_free_receive_structures(adapter);
    457 err_out:
    458 	ixv_free_pci_resources(adapter);
    459 	return;
    460 
    461 }
    462 
    463 /*********************************************************************
    464  *  Device removal routine
    465  *
    466  *  The detach entry point is called when the driver is being removed.
    467  *  This routine stops the adapter and deallocates all the resources
    468  *  that were allocated for driver operation.
    469  *
    470  *  return 0 on success, positive on failure
    471  *********************************************************************/
    472 
    473 static int
    474 ixv_detach(device_t dev, int flags)
    475 {
    476 	struct adapter *adapter = device_private(dev);
    477 	struct ix_queue *que = adapter->queues;
    478 	struct tx_ring *txr = adapter->tx_rings;
    479 
    480 	INIT_DEBUGOUT("ixv_detach: begin");
    481 	if (adapter->osdep.attached == false)
    482 		return 0;
    483 
    484 #if NVLAN > 0
    485 	/* Make sure VLANS are not using driver */
    486 	if (!VLAN_ATTACHED(&adapter->osdep.ec))
    487 		;	/* nothing to do: no VLANs */
    488 	else if ((flags & (DETACH_SHUTDOWN|DETACH_FORCE)) != 0)
    489 		vlan_ifdetach(adapter->ifp);
    490 	else {
    491 		aprint_error_dev(dev, "VLANs in use, detach first\n");
    492 		return EBUSY;
    493 	}
    494 #endif
    495 
    496 	IXGBE_CORE_LOCK(adapter);
    497 	ixv_stop(adapter);
    498 	IXGBE_CORE_UNLOCK(adapter);
    499 
    500 	for (int i = 0; i < adapter->num_queues; i++, que++, txr++) {
    501 #ifndef IXGBE_LEGACY_TX
    502 		softint_disestablish(txr->txr_si);
    503 #endif
    504 		softint_disestablish(que->que_si);
    505 	}
    506 
    507 	/* Drain the Mailbox(link) queue */
    508 	softint_disestablish(adapter->link_si);
    509 
    510 	/* Unregister VLAN events */
    511 #if 0 /* XXX msaitoh delete after write? */
    512 	if (adapter->vlan_attach != NULL)
    513 		EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
    514 	if (adapter->vlan_detach != NULL)
    515 		EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
    516 #endif
    517 
    518 	ether_ifdetach(adapter->ifp);
    519 	callout_halt(&adapter->timer, NULL);
    520 #ifdef DEV_NETMAP
    521 	netmap_detach(adapter->ifp);
    522 #endif /* DEV_NETMAP */
    523 	ixv_free_pci_resources(adapter);
    524 #if 0 /* XXX the NetBSD port is probably missing something here */
    525 	bus_generic_detach(dev);
    526 #endif
    527 	if_detach(adapter->ifp);
    528 
    529 	sysctl_teardown(&adapter->sysctllog);
    530 
    531 	ixgbe_free_transmit_structures(adapter);
    532 	ixgbe_free_receive_structures(adapter);
    533 
    534 	IXGBE_CORE_LOCK_DESTROY(adapter);
    535 	return (0);
    536 }
    537 
    538 /*********************************************************************
    539  *
    540  *  Shutdown entry point
    541  *
    542  **********************************************************************/
    543 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
    544 static int
    545 ixv_shutdown(device_t dev)
    546 {
    547 	struct adapter *adapter = device_private(dev);
    548 	IXGBE_CORE_LOCK(adapter);
    549 	ixv_stop(adapter);
    550 	IXGBE_CORE_UNLOCK(adapter);
    551 	return (0);
    552 }
    553 #endif
    554 
    555 static int
    556 ixv_ifflags_cb(struct ethercom *ec)
    557 {
    558 	struct ifnet *ifp = &ec->ec_if;
    559 	struct adapter *adapter = ifp->if_softc;
    560 	int change = ifp->if_flags ^ adapter->if_flags, rc = 0;
    561 
    562 	IXGBE_CORE_LOCK(adapter);
    563 
    564 	if (change != 0)
    565 		adapter->if_flags = ifp->if_flags;
    566 
    567 	if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0)
    568 		rc = ENETRESET;
    569 
    570 	IXGBE_CORE_UNLOCK(adapter);
    571 
    572 	return rc;
    573 }
    574 
    575 /*********************************************************************
    576  *  Ioctl entry point
    577  *
    578  *  ixv_ioctl is called when the user wants to configure the
    579  *  interface.
    580  *
    581  *  return 0 on success, positive on failure
    582  **********************************************************************/
    583 
    584 static int
    585 ixv_ioctl(struct ifnet * ifp, u_long command, void *data)
    586 {
    587 	struct adapter	*adapter = ifp->if_softc;
    588 	struct ifcapreq *ifcr = data;
    589 	struct ifreq	*ifr = (struct ifreq *) data;
    590 	int             error = 0;
    591 	int l4csum_en;
    592 	const int l4csum = IFCAP_CSUM_TCPv4_Rx|IFCAP_CSUM_UDPv4_Rx|
    593 	     IFCAP_CSUM_TCPv6_Rx|IFCAP_CSUM_UDPv6_Rx;
    594 
    595 	switch (command) {
    596 	case SIOCSIFFLAGS:
    597 		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
    598 		break;
    599 	case SIOCADDMULTI:
    600 	case SIOCDELMULTI:
    601 		IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
    602 		break;
    603 	case SIOCSIFMEDIA:
    604 	case SIOCGIFMEDIA:
    605 		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
    606 		break;
    607 	case SIOCSIFCAP:
    608 		IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
    609 		break;
    610 	case SIOCSIFMTU:
    611 		IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
    612 		break;
    613 	default:
    614 		IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
    615 		break;
    616 	}
    617 
    618 	switch (command) {
    619 	case SIOCSIFMEDIA:
    620 	case SIOCGIFMEDIA:
    621 		return ifmedia_ioctl(ifp, ifr, &adapter->media, command);
    622 	case SIOCSIFCAP:
    623 		/* Layer-4 Rx checksum offload has to be turned on and
    624 		 * off as a unit.
    625 		 */
    626 		l4csum_en = ifcr->ifcr_capenable & l4csum;
    627 		if (l4csum_en != l4csum && l4csum_en != 0)
    628 			return EINVAL;
    629 		/*FALLTHROUGH*/
    630 	case SIOCADDMULTI:
    631 	case SIOCDELMULTI:
    632 	case SIOCSIFFLAGS:
    633 	case SIOCSIFMTU:
    634 	default:
    635 		if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
    636 			return error;
    637 		if ((ifp->if_flags & IFF_RUNNING) == 0)
    638 			;
    639 		else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
    640 			IXGBE_CORE_LOCK(adapter);
    641 			ixv_init_locked(adapter);
    642 			IXGBE_CORE_UNLOCK(adapter);
    643 		} else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
    644 			/*
    645 			 * Multicast list has changed; set the hardware filter
    646 			 * accordingly.
    647 			 */
    648 			IXGBE_CORE_LOCK(adapter);
    649 			ixv_disable_intr(adapter);
    650 			ixv_set_multi(adapter);
    651 			ixv_enable_intr(adapter);
    652 			IXGBE_CORE_UNLOCK(adapter);
    653 		}
    654 		return 0;
    655 	}
    656 }
    657 
    658 /*********************************************************************
    659  *  Init entry point
    660  *
    661  *  This routine is used in two ways. It is used by the stack as
    662  *  init entry point in network interface structure. It is also used
    663  *  by the driver as a hw/sw initialization routine to get to a
    664  *  consistent state.
    665  *
    666  *  return 0 on success, positive on failure
    667  **********************************************************************/
    668 #define IXGBE_MHADD_MFS_SHIFT 16
    669 
    670 static void
    671 ixv_init_locked(struct adapter *adapter)
    672 {
    673 	struct ifnet	*ifp = adapter->ifp;
    674 	device_t 	dev = adapter->dev;
    675 	struct ixgbe_hw *hw = &adapter->hw;
    676 	int error = 0;
    677 
    678 	INIT_DEBUGOUT("ixv_init_locked: begin");
    679 	KASSERT(mutex_owned(&adapter->core_mtx));
    680 	hw->adapter_stopped = FALSE;
    681 	ixgbe_stop_adapter(hw);
    682         callout_stop(&adapter->timer);
    683 
    684         /* reprogram the RAR[0] in case user changed it. */
    685         ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
    686 
    687 	/* Get the latest mac address, User can use a LAA */
    688 	memcpy(hw->mac.addr, CLLADDR(adapter->ifp->if_sadl),
    689 	     IXGBE_ETH_LENGTH_OF_ADDRESS);
    690         ixgbe_set_rar(hw, 0, hw->mac.addr, 0, 1);
    691 	hw->addr_ctrl.rar_used_count = 1;
    692 
    693 	/* Prepare transmit descriptors and buffers */
    694 	if (ixgbe_setup_transmit_structures(adapter)) {
    695 		aprint_error_dev(dev, "Could not setup transmit structures\n");
    696 		ixv_stop(adapter);
    697 		return;
    698 	}
    699 
    700 	/* Reset VF and renegotiate mailbox API version */
    701 	ixgbe_reset_hw(hw);
    702 	error = ixgbevf_negotiate_api_version(hw, ixgbe_mbox_api_11);
    703 	if (error)
    704 		device_printf(dev, "MBX API 1.1 negotiation failed! Error %d\n", error);
    705 
    706 	ixv_initialize_transmit_units(adapter);
    707 
    708 	/* Setup Multicast table */
    709 	ixv_set_multi(adapter);
    710 
    711 	/*
    712 	** Determine the correct mbuf pool
    713 	** for doing jumbo/headersplit
    714 	*/
    715 	if (ifp->if_mtu > ETHERMTU)
    716 		adapter->rx_mbuf_sz = MJUMPAGESIZE;
    717 	else
    718 		adapter->rx_mbuf_sz = MCLBYTES;
    719 
    720 	/* Prepare receive descriptors and buffers */
    721 	if (ixgbe_setup_receive_structures(adapter)) {
    722 		device_printf(dev, "Could not setup receive structures\n");
    723 		ixv_stop(adapter);
    724 		return;
    725 	}
    726 
    727 	/* Configure RX settings */
    728 	ixv_initialize_receive_units(adapter);
    729 
    730 #if 0 /* XXX isn't it required? -- msaitoh  */
    731 	/* Set the various hardware offload abilities */
    732 	ifp->if_hwassist = 0;
    733 	if (ifp->if_capenable & IFCAP_TSO4)
    734 		ifp->if_hwassist |= CSUM_TSO;
    735 	if (ifp->if_capenable & IFCAP_TXCSUM) {
    736 		ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
    737 #if __FreeBSD_version >= 800000
    738 		ifp->if_hwassist |= CSUM_SCTP;
    739 #endif
    740 	}
    741 #endif
    742 
    743 	/* Set up VLAN offload and filter */
    744 	ixv_setup_vlan_support(adapter);
    745 
    746 	/* Set up MSI/X routing */
    747 	ixv_configure_ivars(adapter);
    748 
    749 	/* Set up auto-mask */
    750 	IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_EICS_RTX_QUEUE);
    751 
    752         /* Set moderation on the Link interrupt */
    753         IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->vector), IXGBE_LINK_ITR);
    754 
    755 	/* Stats init */
    756 	ixv_init_stats(adapter);
    757 
    758 	/* Config/Enable Link */
    759 	ixv_config_link(adapter);
    760 	hw->mac.get_link_status = TRUE;
    761 
    762 	/* Start watchdog */
    763 	callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
    764 
    765 	/* And now turn on interrupts */
    766 	ixv_enable_intr(adapter);
    767 
    768 	/* Now inform the stack we're ready */
    769 	ifp->if_flags |= IFF_RUNNING;
    770 	ifp->if_flags &= ~IFF_OACTIVE;
    771 
    772 	return;
    773 }
    774 
    775 static int
    776 ixv_init(struct ifnet *ifp)
    777 {
    778 	struct adapter *adapter = ifp->if_softc;
    779 
    780 	IXGBE_CORE_LOCK(adapter);
    781 	ixv_init_locked(adapter);
    782 	IXGBE_CORE_UNLOCK(adapter);
    783 	return 0;
    784 }
    785 
    786 
    787 /*
    788 **
    789 ** MSIX Interrupt Handlers and Tasklets
    790 **
    791 */
    792 
    793 static inline void
    794 ixv_enable_queue(struct adapter *adapter, u32 vector)
    795 {
    796 	struct ixgbe_hw *hw = &adapter->hw;
    797 	u32	queue = 1 << vector;
    798 	u32	mask;
    799 
    800 	mask = (IXGBE_EIMS_RTX_QUEUE & queue);
    801 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
    802 }
    803 
    804 static inline void
    805 ixv_disable_queue(struct adapter *adapter, u32 vector)
    806 {
    807 	struct ixgbe_hw *hw = &adapter->hw;
    808 	u64	queue = (u64)(1 << vector);
    809 	u32	mask;
    810 
    811 	mask = (IXGBE_EIMS_RTX_QUEUE & queue);
    812 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
    813 }
    814 
    815 static inline void
    816 ixv_rearm_queues(struct adapter *adapter, u64 queues)
    817 {
    818 	u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
    819 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask);
    820 }
    821 
    822 
    823 static void
    824 ixv_handle_que(void *context)
    825 {
    826 	struct ix_queue *que = context;
    827 	struct adapter  *adapter = que->adapter;
    828 	struct tx_ring	*txr = que->txr;
    829 	struct ifnet    *ifp = adapter->ifp;
    830 	bool		more;
    831 
    832 	if (ifp->if_flags & IFF_RUNNING) {
    833 		more = ixgbe_rxeof(que);
    834 		IXGBE_TX_LOCK(txr);
    835 		ixgbe_txeof(txr);
    836 #ifndef IXGBE_LEGACY_TX
    837 		if (pcq_peek(txr->txr_interq) != NULL)
    838 			ixgbe_mq_start_locked(ifp, txr);
    839 #else
    840 		if (!IFQ_IS_EMPTY(&ifp->if_snd))
    841 			ixgbe_start_locked(txr, ifp);
    842 #endif
    843 		IXGBE_TX_UNLOCK(txr);
    844 		if (more) {
    845 			adapter->req.ev_count++;
    846 			softint_schedule(que->que_si);
    847 			return;
    848 		}
    849 	}
    850 
    851 	/* Reenable this interrupt */
    852 	ixv_enable_queue(adapter, que->msix);
    853 	return;
    854 }
    855 
    856 /*********************************************************************
    857  *
    858  *  MSI Queue Interrupt Service routine
    859  *
    860  **********************************************************************/
    861 int
    862 ixv_msix_que(void *arg)
    863 {
    864 	struct ix_queue	*que = arg;
    865 	struct adapter  *adapter = que->adapter;
    866 	struct ifnet    *ifp = adapter->ifp;
    867 	struct tx_ring	*txr = que->txr;
    868 	struct rx_ring	*rxr = que->rxr;
    869 	bool		more;
    870 	u32		newitr = 0;
    871 
    872 	ixv_disable_queue(adapter, que->msix);
    873 	++que->irqs.ev_count;
    874 
    875 #ifdef __NetBSD__
    876 	/* Don't run ixgbe_rxeof in interrupt context */
    877 	more = true;
    878 #else
    879 	more = ixgbe_rxeof(que);
    880 #endif
    881 
    882 	IXGBE_TX_LOCK(txr);
    883 	ixgbe_txeof(txr);
    884 	/*
    885 	** Make certain that if the stack
    886 	** has anything queued the task gets
    887 	** scheduled to handle it.
    888 	*/
    889 #ifdef IXGBE_LEGACY_TX
    890 	if (!IFQ_IS_EMPTY(&adapter->ifp->if_snd))
    891 		ixgbe_start_locked(txr, ifp);
    892 #else
    893 	if (pcq_peek(txr->txr_interq) != NULL)
    894 		ixgbe_mq_start_locked(ifp, txr);
    895 #endif
    896 	IXGBE_TX_UNLOCK(txr);
    897 
    898 	/* Do AIM now? */
    899 
    900 	if (ixv_enable_aim == FALSE)
    901 		goto no_calc;
    902 	/*
    903 	** Do Adaptive Interrupt Moderation:
    904         **  - Write out last calculated setting
    905 	**  - Calculate based on average size over
    906 	**    the last interval.
    907 	*/
    908         if (que->eitr_setting)
    909                 IXGBE_WRITE_REG(&adapter->hw,
    910                     IXGBE_VTEITR(que->msix),
    911 		    que->eitr_setting);
    912 
    913         que->eitr_setting = 0;
    914 
    915         /* Idle, do nothing */
    916         if ((txr->bytes == 0) && (rxr->bytes == 0))
    917                 goto no_calc;
    918 
    919 	if ((txr->bytes) && (txr->packets))
    920                	newitr = txr->bytes/txr->packets;
    921 	if ((rxr->bytes) && (rxr->packets))
    922 		newitr = max(newitr,
    923 		    (rxr->bytes / rxr->packets));
    924 	newitr += 24; /* account for hardware frame, crc */
    925 
    926 	/* set an upper boundary */
    927 	newitr = min(newitr, 3000);
    928 
    929 	/* Be nice to the mid range */
    930 	if ((newitr > 300) && (newitr < 1200))
    931 		newitr = (newitr / 3);
    932 	else
    933 		newitr = (newitr / 2);
    934 
    935 	newitr |= newitr << 16;
    936 
    937         /* save for next interrupt */
    938         que->eitr_setting = newitr;
    939 
    940         /* Reset state */
    941         txr->bytes = 0;
    942         txr->packets = 0;
    943         rxr->bytes = 0;
    944         rxr->packets = 0;
    945 
    946 no_calc:
    947 	if (more)
    948 		softint_schedule(que->que_si);
    949 	else /* Reenable this interrupt */
    950 		ixv_enable_queue(adapter, que->msix);
    951 	return 1;
    952 }
    953 
    954 static int
    955 ixv_msix_mbx(void *arg)
    956 {
    957 	struct adapter	*adapter = arg;
    958 	struct ixgbe_hw *hw = &adapter->hw;
    959 	u32		reg;
    960 
    961 	++adapter->link_irq.ev_count;
    962 
    963 	/* First get the cause */
    964 	reg = IXGBE_READ_REG(hw, IXGBE_VTEICS);
    965 	/* Clear interrupt with write */
    966 	IXGBE_WRITE_REG(hw, IXGBE_VTEICR, reg);
    967 
    968 	/* Link status change */
    969 	if (reg & IXGBE_EICR_LSC)
    970 		softint_schedule(adapter->link_si);
    971 
    972 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER);
    973 	return 1;
    974 }
    975 
    976 /*********************************************************************
    977  *
    978  *  Media Ioctl callback
    979  *
    980  *  This routine is called whenever the user queries the status of
    981  *  the interface using ifconfig.
    982  *
    983  **********************************************************************/
    984 static void
    985 ixv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
    986 {
    987 	struct adapter *adapter = ifp->if_softc;
    988 
    989 	INIT_DEBUGOUT("ixv_media_status: begin");
    990 	IXGBE_CORE_LOCK(adapter);
    991 	ixv_update_link_status(adapter);
    992 
    993 	ifmr->ifm_status = IFM_AVALID;
    994 	ifmr->ifm_active = IFM_ETHER;
    995 
    996 	if (!adapter->link_active) {
    997 		ifmr->ifm_active |= IFM_NONE;
    998 		IXGBE_CORE_UNLOCK(adapter);
    999 		return;
   1000 	}
   1001 
   1002 	ifmr->ifm_status |= IFM_ACTIVE;
   1003 
   1004 	switch (adapter->link_speed) {
   1005 		case IXGBE_LINK_SPEED_10GB_FULL:
   1006 			ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
   1007 			break;
   1008 		case IXGBE_LINK_SPEED_1GB_FULL:
   1009 			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
   1010 			break;
   1011 		case IXGBE_LINK_SPEED_100_FULL:
   1012 			ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
   1013 			break;
   1014 	}
   1015 
   1016 	IXGBE_CORE_UNLOCK(adapter);
   1017 
   1018 	return;
   1019 }
   1020 
   1021 /*********************************************************************
   1022  *
   1023  *  Media Ioctl callback
   1024  *
   1025  *  This routine is called when the user changes speed/duplex using
   1026  *  media/mediopt option with ifconfig.
   1027  *
   1028  **********************************************************************/
   1029 static int
   1030 ixv_media_change(struct ifnet * ifp)
   1031 {
   1032 	struct adapter *adapter = ifp->if_softc;
   1033 	struct ifmedia *ifm = &adapter->media;
   1034 
   1035 	INIT_DEBUGOUT("ixv_media_change: begin");
   1036 
   1037 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
   1038 		return (EINVAL);
   1039 
   1040         switch (IFM_SUBTYPE(ifm->ifm_media)) {
   1041         case IFM_AUTO:
   1042                 break;
   1043         default:
   1044                 device_printf(adapter->dev, "Only auto media type\n");
   1045 		return (EINVAL);
   1046         }
   1047 
   1048 	return (0);
   1049 }
   1050 
   1051 
   1052 /*********************************************************************
   1053  *  Multicast Update
   1054  *
   1055  *  This routine is called whenever multicast address list is updated.
   1056  *
   1057  **********************************************************************/
   1058 #define IXGBE_RAR_ENTRIES 16
   1059 
   1060 static void
   1061 ixv_set_multi(struct adapter *adapter)
   1062 {
   1063 	struct ether_multi *enm;
   1064 	struct ether_multistep step;
   1065 	u8	mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
   1066 	u8	*update_ptr;
   1067 	int	mcnt = 0;
   1068 	struct ethercom *ec = &adapter->osdep.ec;
   1069 
   1070 	IOCTL_DEBUGOUT("ixv_set_multi: begin");
   1071 
   1072 	ETHER_FIRST_MULTI(step, ec, enm);
   1073 	while (enm != NULL) {
   1074 		bcopy(enm->enm_addrlo,
   1075 		    &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
   1076 		    IXGBE_ETH_LENGTH_OF_ADDRESS);
   1077 		mcnt++;
   1078 		/* XXX This might be required --msaitoh */
   1079 		if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES)
   1080 			break;
   1081 		ETHER_NEXT_MULTI(step, enm);
   1082 	}
   1083 
   1084 	update_ptr = mta;
   1085 
   1086 	ixgbe_update_mc_addr_list(&adapter->hw,
   1087 	    update_ptr, mcnt, ixv_mc_array_itr, TRUE);
   1088 
   1089 	return;
   1090 }
   1091 
   1092 /*
   1093  * This is an iterator function now needed by the multicast
   1094  * shared code. It simply feeds the shared code routine the
   1095  * addresses in the array of ixv_set_multi() one by one.
   1096  */
   1097 static u8 *
   1098 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
   1099 {
   1100 	u8 *addr = *update_ptr;
   1101 	u8 *newptr;
   1102 	*vmdq = 0;
   1103 
   1104 	newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
   1105 	*update_ptr = newptr;
   1106 	return addr;
   1107 }
   1108 
   1109 /*********************************************************************
   1110  *  Timer routine
   1111  *
   1112  *  This routine checks for link status,updates statistics,
   1113  *  and runs the watchdog check.
   1114  *
   1115  **********************************************************************/
   1116 
   1117 static void
   1118 ixv_local_timer(void *arg)
   1119 {
   1120 	struct adapter *adapter = arg;
   1121 
   1122 	IXGBE_CORE_LOCK(adapter);
   1123 	ixv_local_timer_locked(adapter);
   1124 	IXGBE_CORE_UNLOCK(adapter);
   1125 }
   1126 
   1127 static void
   1128 ixv_local_timer_locked(void *arg)
   1129 {
   1130 	struct adapter	*adapter = arg;
   1131 	device_t	dev = adapter->dev;
   1132 	struct ix_queue	*que = adapter->queues;
   1133 	u64		queues = 0;
   1134 	int		hung = 0;
   1135 
   1136 	KASSERT(mutex_owned(&adapter->core_mtx));
   1137 
   1138 	ixv_update_link_status(adapter);
   1139 
   1140 	/* Stats Update */
   1141 	ixv_update_stats(adapter);
   1142 
   1143 	/*
   1144 	** Check the TX queues status
   1145 	**      - mark hung queues so we don't schedule on them
   1146 	**      - watchdog only if all queues show hung
   1147 	*/
   1148 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   1149 		/* Keep track of queues with work for soft irq */
   1150 		if (que->txr->busy)
   1151 			queues |= ((u64)1 << que->me);
   1152 		/*
   1153 		** Each time txeof runs without cleaning, but there
   1154 		** are uncleaned descriptors it increments busy. If
   1155 		** we get to the MAX we declare it hung.
   1156 		*/
   1157 		if (que->busy == IXGBE_QUEUE_HUNG) {
   1158 			++hung;
   1159 			/* Mark the queue as inactive */
   1160 			adapter->active_queues &= ~((u64)1 << que->me);
   1161 			continue;
   1162 		} else {
   1163 			/* Check if we've come back from hung */
   1164 			if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
   1165                                 adapter->active_queues |= ((u64)1 << que->me);
   1166 		}
   1167 		if (que->busy >= IXGBE_MAX_TX_BUSY) {
   1168 			device_printf(dev,"Warning queue %d "
   1169 			    "appears to be hung!\n", i);
   1170 			que->txr->busy = IXGBE_QUEUE_HUNG;
   1171 			++hung;
   1172 		}
   1173 
   1174 	}
   1175 
   1176 	/* Only truly watchdog if all queues show hung */
   1177 	if (hung == adapter->num_queues)
   1178 		goto watchdog;
   1179 	else if (queues != 0) { /* Force an IRQ on queues with work */
   1180 		ixv_rearm_queues(adapter, queues);
   1181 	}
   1182 
   1183 	callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
   1184 	return;
   1185 
   1186 watchdog:
   1187 	device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
   1188 	adapter->ifp->if_flags &= ~IFF_RUNNING;
   1189 	adapter->watchdog_events.ev_count++;
   1190 	ixv_init_locked(adapter);
   1191 }
   1192 
   1193 /*
   1194 ** Note: this routine updates the OS on the link state
   1195 **	the real check of the hardware only happens with
   1196 **	a link interrupt.
   1197 */
   1198 static void
   1199 ixv_update_link_status(struct adapter *adapter)
   1200 {
   1201 	struct ifnet	*ifp = adapter->ifp;
   1202 	device_t dev = adapter->dev;
   1203 
   1204 	if (adapter->link_up){
   1205 		if (adapter->link_active == FALSE) {
   1206 			if (bootverbose) {
   1207 				const char *bpsmsg;
   1208 
   1209 				switch (adapter->link_speed) {
   1210 				case IXGBE_LINK_SPEED_10GB_FULL:
   1211 					bpsmsg = "10 Gbps";
   1212 					break;
   1213 				case IXGBE_LINK_SPEED_1GB_FULL:
   1214 					bpsmsg = "1 Gbps";
   1215 					break;
   1216 				case IXGBE_LINK_SPEED_100_FULL:
   1217 					bpsmsg = "100 Mbps";
   1218 					break;
   1219 				default:
   1220 					bpsmsg = "unknown speed";
   1221 					break;
   1222 				}
   1223 				device_printf(dev,"Link is up %s %s \n",
   1224 				    bpsmsg, "Full Duplex");
   1225 			}
   1226 			adapter->link_active = TRUE;
   1227 			if_link_state_change(ifp, LINK_STATE_UP);
   1228 		}
   1229 	} else { /* Link down */
   1230 		if (adapter->link_active == TRUE) {
   1231 			if (bootverbose)
   1232 				device_printf(dev,"Link is Down\n");
   1233 			if_link_state_change(ifp, LINK_STATE_DOWN);
   1234 			adapter->link_active = FALSE;
   1235 		}
   1236 	}
   1237 
   1238 	return;
   1239 }
   1240 
   1241 
   1242 static void
   1243 ixv_ifstop(struct ifnet *ifp, int disable)
   1244 {
   1245 	struct adapter *adapter = ifp->if_softc;
   1246 
   1247 	IXGBE_CORE_LOCK(adapter);
   1248 	ixv_stop(adapter);
   1249 	IXGBE_CORE_UNLOCK(adapter);
   1250 }
   1251 
   1252 /*********************************************************************
   1253  *
   1254  *  This routine disables all traffic on the adapter by issuing a
   1255  *  global reset on the MAC and deallocates TX/RX buffers.
   1256  *
   1257  **********************************************************************/
   1258 
   1259 static void
   1260 ixv_stop(void *arg)
   1261 {
   1262 	struct ifnet   *ifp;
   1263 	struct adapter *adapter = arg;
   1264 	struct ixgbe_hw *hw = &adapter->hw;
   1265 	ifp = adapter->ifp;
   1266 
   1267 	KASSERT(mutex_owned(&adapter->core_mtx));
   1268 
   1269 	INIT_DEBUGOUT("ixv_stop: begin\n");
   1270 	ixv_disable_intr(adapter);
   1271 
   1272 	/* Tell the stack that the interface is no longer active */
   1273 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   1274 
   1275 	ixgbe_reset_hw(hw);
   1276 	adapter->hw.adapter_stopped = FALSE;
   1277 	ixgbe_stop_adapter(hw);
   1278 	callout_stop(&adapter->timer);
   1279 
   1280 	/* reprogram the RAR[0] in case user changed it. */
   1281 	ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
   1282 
   1283 	return;
   1284 }
   1285 
   1286 
   1287 /*********************************************************************
   1288  *
   1289  *  Determine hardware revision.
   1290  *
   1291  **********************************************************************/
   1292 static void
   1293 ixv_identify_hardware(struct adapter *adapter)
   1294 {
   1295 	pcitag_t tag;
   1296 	pci_chipset_tag_t pc;
   1297 	pcireg_t subid, id;
   1298 	struct ixgbe_hw *hw = &adapter->hw;
   1299 
   1300 	pc = adapter->osdep.pc;
   1301 	tag = adapter->osdep.tag;
   1302 
   1303 	/*
   1304 	** Make sure BUSMASTER is set, on a VM under
   1305 	** KVM it may not be and will break things.
   1306 	*/
   1307 	ixgbe_pci_enable_busmaster(pc, tag);
   1308 
   1309 	id = pci_conf_read(pc, tag, PCI_ID_REG);
   1310 	subid = pci_conf_read(pc, tag, PCI_SUBSYS_ID_REG);
   1311 
   1312 	/* Save off the information about this board */
   1313 	hw->vendor_id = PCI_VENDOR(id);
   1314 	hw->device_id = PCI_PRODUCT(id);
   1315 	hw->revision_id = PCI_REVISION(pci_conf_read(pc, tag, PCI_CLASS_REG));
   1316 	hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
   1317 	hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
   1318 
   1319 	/* We need this to determine device-specific things */
   1320 	ixgbe_set_mac_type(hw);
   1321 
   1322 	/* Set the right number of segments */
   1323 	adapter->num_segs = IXGBE_82599_SCATTER;
   1324 
   1325 	return;
   1326 }
   1327 
   1328 /*********************************************************************
   1329  *
   1330  *  Setup MSIX Interrupt resources and handlers
   1331  *
   1332  **********************************************************************/
   1333 static int
   1334 ixv_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
   1335 {
   1336 	device_t	dev = adapter->dev;
   1337 	struct ix_queue *que = adapter->queues;
   1338 	struct		tx_ring *txr = adapter->tx_rings;
   1339 	int 		error, rid, vector = 0;
   1340 	pci_chipset_tag_t pc;
   1341 	pcitag_t	tag;
   1342 	char		intrbuf[PCI_INTRSTR_LEN];
   1343 	char		intr_xname[32];
   1344 	const char	*intrstr = NULL;
   1345 	kcpuset_t	*affinity;
   1346 	int		cpu_id = 0;
   1347 
   1348 	pc = adapter->osdep.pc;
   1349 	tag = adapter->osdep.tag;
   1350 
   1351 	adapter->osdep.nintrs = adapter->num_queues + 1;
   1352 	if (pci_msix_alloc_exact(pa, &adapter->osdep.intrs,
   1353 	    adapter->osdep.nintrs) != 0) {
   1354 		aprint_error_dev(dev,
   1355 		    "failed to allocate MSI-X interrupt\n");
   1356 		return (ENXIO);
   1357 	}
   1358 
   1359 	kcpuset_create(&affinity, false);
   1360 	for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
   1361 		snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d",
   1362 		    device_xname(dev), i);
   1363 		intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
   1364 		    sizeof(intrbuf));
   1365 #ifdef IXV_MPSAFE
   1366 		pci_intr_setattr(pc, adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
   1367 		    true);
   1368 #endif
   1369 		/* Set the handler function */
   1370 		que->res = adapter->osdep.ihs[i] = pci_intr_establish_xname(pc,
   1371 		    adapter->osdep.intrs[i], IPL_NET, ixv_msix_que, que,
   1372 		    intr_xname);
   1373 		if (que->res == NULL) {
   1374 			pci_intr_release(pc, adapter->osdep.intrs,
   1375 			    adapter->osdep.nintrs);
   1376 			aprint_error_dev(dev,
   1377 			    "Failed to register QUE handler\n");
   1378 			kcpuset_destroy(affinity);
   1379 			return (ENXIO);
   1380 		}
   1381 		que->msix = vector;
   1382         	adapter->active_queues |= (u64)(1 << que->msix);
   1383 
   1384 		cpu_id = i;
   1385 		/* Round-robin affinity */
   1386 		kcpuset_zero(affinity);
   1387 		kcpuset_set(affinity, cpu_id % ncpu);
   1388 		error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
   1389 		    NULL);
   1390 		aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
   1391 		    intrstr);
   1392 		if (error == 0)
   1393 			aprint_normal(", bound queue %d to cpu %d\n",
   1394 			    i, cpu_id % ncpu);
   1395 		else
   1396 			aprint_normal("\n");
   1397 
   1398 #ifndef IXGBE_LEGACY_TX
   1399 		txr->txr_si = softint_establish(SOFTINT_NET,
   1400 		    ixgbe_deferred_mq_start, txr);
   1401 #endif
   1402 		que->que_si = softint_establish(SOFTINT_NET, ixv_handle_que,
   1403 		    que);
   1404 		if (que->que_si == NULL) {
   1405 			aprint_error_dev(dev,
   1406 			    "could not establish software interrupt\n");
   1407 		}
   1408 	}
   1409 
   1410 	/* and Mailbox */
   1411 	cpu_id++;
   1412 	snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
   1413 	intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
   1414 	    sizeof(intrbuf));
   1415 #ifdef IXG_MPSAFE
   1416 	pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE,
   1417 	    true);
   1418 #endif
   1419 	/* Set the mbx handler function */
   1420 	adapter->osdep.ihs[vector] = pci_intr_establish_xname(pc,
   1421 	    adapter->osdep.intrs[vector], IPL_NET, ixv_msix_mbx, adapter,
   1422 	    intr_xname);
   1423 	if (adapter->osdep.ihs[vector] == NULL) {
   1424 		adapter->res = NULL;
   1425 		aprint_error_dev(dev, "Failed to register LINK handler\n");
   1426 		kcpuset_destroy(affinity);
   1427 		return (ENXIO);
   1428 	}
   1429 	/* Round-robin affinity */
   1430 	kcpuset_zero(affinity);
   1431 	kcpuset_set(affinity, cpu_id % ncpu);
   1432 	error = interrupt_distribute(adapter->osdep.ihs[vector], affinity,NULL);
   1433 
   1434 	aprint_normal_dev(dev,
   1435 	    "for link, interrupting at %s", intrstr);
   1436 	if (error == 0)
   1437 		aprint_normal(", affinity to cpu %d\n", cpu_id % ncpu);
   1438 	else
   1439 		aprint_normal("\n");
   1440 
   1441 	adapter->vector = vector;
   1442 	/* Tasklets for Mailbox */
   1443 	adapter->link_si = softint_establish(SOFTINT_NET, ixv_handle_mbx,
   1444 	    adapter);
   1445 	/*
   1446 	** Due to a broken design QEMU will fail to properly
   1447 	** enable the guest for MSIX unless the vectors in
   1448 	** the table are all set up, so we must rewrite the
   1449 	** ENABLE in the MSIX control register again at this
   1450 	** point to cause it to successfully initialize us.
   1451 	*/
   1452 	if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
   1453 		int msix_ctrl;
   1454 		pci_get_capability(pc, tag, PCI_CAP_MSIX, &rid, NULL);
   1455 		rid += PCI_MSIX_CTL;
   1456 		msix_ctrl = pci_conf_read(pc, tag, rid);
   1457 		msix_ctrl |= PCI_MSIX_CTL_ENABLE;
   1458 		pci_conf_write(pc, tag, rid, msix_ctrl);
   1459 	}
   1460 
   1461 	kcpuset_destroy(affinity);
   1462 	return (0);
   1463 }
   1464 
   1465 /*
   1466  * Setup MSIX resources, note that the VF
   1467  * device MUST use MSIX, there is no fallback.
   1468  */
   1469 static int
   1470 ixv_setup_msix(struct adapter *adapter)
   1471 {
   1472 	device_t dev = adapter->dev;
   1473 	int want, queues, msgs;
   1474 
   1475 	/* Must have at least 2 MSIX vectors */
   1476 	msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag);
   1477 	if (msgs < 2) {
   1478 		aprint_error_dev(dev,"MSIX config error\n");
   1479 		return (ENXIO);
   1480 	}
   1481 	msgs = MIN(msgs, IXG_MAX_NINTR);
   1482 
   1483 	/* Figure out a reasonable auto config value */
   1484 	queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu;
   1485 
   1486 	if (ixv_num_queues != 0)
   1487 		queues = ixv_num_queues;
   1488 	else if ((ixv_num_queues == 0) && (queues > IXGBE_VF_MAX_TX_QUEUES))
   1489 		queues = IXGBE_VF_MAX_TX_QUEUES;
   1490 
   1491 	/*
   1492 	** Want vectors for the queues,
   1493 	** plus an additional for mailbox.
   1494 	*/
   1495 	want = queues + 1;
   1496 	if (msgs >= want)
   1497 		msgs = want;
   1498 	else {
   1499                	aprint_error_dev(dev,
   1500 		    "MSIX Configuration Problem, "
   1501 		    "%d vectors but %d queues wanted!\n",
   1502 		    msgs, want);
   1503 		return -1;
   1504 	}
   1505 
   1506 	adapter->msix_mem = (void *)1; /* XXX */
   1507 	aprint_normal_dev(dev,
   1508 	    "Using MSIX interrupts with %d vectors\n", msgs);
   1509 	adapter->num_queues = queues;
   1510 	return (msgs);
   1511 }
   1512 
   1513 
   1514 static int
   1515 ixv_allocate_pci_resources(struct adapter *adapter,
   1516     const struct pci_attach_args *pa)
   1517 {
   1518 	pcireg_t	memtype;
   1519 	device_t        dev = adapter->dev;
   1520 	bus_addr_t addr;
   1521 	int flags;
   1522 
   1523 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
   1524 	switch (memtype) {
   1525 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1526 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1527 		adapter->osdep.mem_bus_space_tag = pa->pa_memt;
   1528 		if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
   1529 	              memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
   1530 			goto map_err;
   1531 		if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
   1532 			aprint_normal_dev(dev, "clearing prefetchable bit\n");
   1533 			flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
   1534 		}
   1535 		if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
   1536 		     adapter->osdep.mem_size, flags,
   1537 		     &adapter->osdep.mem_bus_space_handle) != 0) {
   1538 map_err:
   1539 			adapter->osdep.mem_size = 0;
   1540 			aprint_error_dev(dev, "unable to map BAR0\n");
   1541 			return ENXIO;
   1542 		}
   1543 		break;
   1544 	default:
   1545 		aprint_error_dev(dev, "unexpected type on BAR0\n");
   1546 		return ENXIO;
   1547 	}
   1548 	adapter->hw.back = adapter;
   1549 
   1550 	/* Pick up the tuneable queues */
   1551 	adapter->num_queues = ixv_num_queues;
   1552 
   1553 	/*
   1554 	** Now setup MSI/X, should
   1555 	** return us the number of
   1556 	** configured vectors.
   1557 	*/
   1558 	adapter->msix = ixv_setup_msix(adapter);
   1559 	if (adapter->msix == ENXIO)
   1560 		return (ENXIO);
   1561 	else
   1562 		return (0);
   1563 }
   1564 
   1565 static void
   1566 ixv_free_pci_resources(struct adapter * adapter)
   1567 {
   1568 	struct 		ix_queue *que = adapter->queues;
   1569 	int		rid;
   1570 
   1571 	/*
   1572 	**  Release all msix queue resources:
   1573 	*/
   1574 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   1575 		if (que->res != NULL)
   1576 			pci_intr_disestablish(adapter->osdep.pc,
   1577 			    adapter->osdep.ihs[i]);
   1578 	}
   1579 
   1580 
   1581 	/* Clean the Legacy or Link interrupt last */
   1582 	if (adapter->vector) /* we are doing MSIX */
   1583 		rid = adapter->vector;
   1584 	else
   1585 		rid = 0;
   1586 
   1587 	if (adapter->osdep.ihs[rid] != NULL) {
   1588 		pci_intr_disestablish(adapter->osdep.pc,
   1589 		    adapter->osdep.ihs[rid]);
   1590 		adapter->osdep.ihs[rid] = NULL;
   1591 	}
   1592 
   1593 	pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
   1594 	    adapter->osdep.nintrs);
   1595 
   1596 	if (adapter->osdep.mem_size != 0) {
   1597 		bus_space_unmap(adapter->osdep.mem_bus_space_tag,
   1598 		    adapter->osdep.mem_bus_space_handle,
   1599 		    adapter->osdep.mem_size);
   1600 	}
   1601 
   1602 	return;
   1603 }
   1604 
   1605 /*********************************************************************
   1606  *
   1607  *  Setup networking device structure and register an interface.
   1608  *
   1609  **********************************************************************/
   1610 static void
   1611 ixv_setup_interface(device_t dev, struct adapter *adapter)
   1612 {
   1613 	struct ethercom *ec = &adapter->osdep.ec;
   1614 	struct ifnet   *ifp;
   1615 
   1616 	INIT_DEBUGOUT("ixv_setup_interface: begin");
   1617 
   1618 	ifp = adapter->ifp = &ec->ec_if;
   1619 	strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
   1620 	ifp->if_baudrate = IF_Gbps(10);
   1621 	ifp->if_init = ixv_init;
   1622 	ifp->if_stop = ixv_ifstop;
   1623 	ifp->if_softc = adapter;
   1624 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   1625 	ifp->if_ioctl = ixv_ioctl;
   1626 #ifndef IXGBE_LEGACY_TX
   1627 	ifp->if_transmit = ixgbe_mq_start;
   1628 #endif
   1629 	ifp->if_start = ixgbe_start;
   1630 	IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
   1631 	IFQ_SET_READY(&ifp->if_snd);
   1632 
   1633 	if_initialize(ifp);
   1634 	ether_ifattach(ifp, adapter->hw.mac.addr);
   1635 #ifndef IXGBE_LEGACY_TX
   1636 #if 0	/* We use per TX queue softint */
   1637 	if_deferred_start_init(ifp, ixgbe_deferred_mq_start);
   1638 #endif
   1639 #endif
   1640 	if_register(ifp);
   1641 	ether_set_ifflags_cb(ec, ixv_ifflags_cb);
   1642 
   1643 	adapter->max_frame_size =
   1644 	    ifp->if_mtu + IXGBE_MTU_HDR_VLAN;
   1645 
   1646 	/*
   1647 	 * Tell the upper layer(s) we support long frames.
   1648 	 */
   1649 	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
   1650 
   1651 	ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSOv4;
   1652 	ifp->if_capenable = 0;
   1653 
   1654 	ec->ec_capabilities |= ETHERCAP_VLAN_HWCSUM;
   1655 	ec->ec_capabilities |= ETHERCAP_JUMBO_MTU;
   1656 	ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
   1657 	    		| ETHERCAP_VLAN_MTU;
   1658 	ec->ec_capenable = ec->ec_capabilities;
   1659 
   1660 	/* Don't enable LRO by default */
   1661 	ifp->if_capabilities |= IFCAP_LRO;
   1662 #if 0
   1663 	ifp->if_capenable = ifp->if_capabilities;
   1664 #endif
   1665 
   1666 	/*
   1667 	** Dont turn this on by default, if vlans are
   1668 	** created on another pseudo device (eg. lagg)
   1669 	** then vlan events are not passed thru, breaking
   1670 	** operation, but with HW FILTER off it works. If
   1671 	** using vlans directly on the em driver you can
   1672 	** enable this and get full hardware tag filtering.
   1673 	*/
   1674 	ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
   1675 
   1676 	/*
   1677 	 * Specify the media types supported by this adapter and register
   1678 	 * callbacks to update media and link information
   1679 	 */
   1680 	ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change,
   1681 		     ixv_media_status);
   1682 	ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
   1683 	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
   1684 
   1685 	return;
   1686 }
   1687 
   1688 static void
   1689 ixv_config_link(struct adapter *adapter)
   1690 {
   1691 	struct ixgbe_hw *hw = &adapter->hw;
   1692 
   1693 	if (hw->mac.ops.check_link)
   1694 		hw->mac.ops.check_link(hw, &adapter->link_speed,
   1695 		    &adapter->link_up, FALSE);
   1696 }
   1697 
   1698 
   1699 /*********************************************************************
   1700  *
   1701  *  Enable transmit unit.
   1702  *
   1703  **********************************************************************/
   1704 static void
   1705 ixv_initialize_transmit_units(struct adapter *adapter)
   1706 {
   1707 	struct tx_ring	*txr = adapter->tx_rings;
   1708 	struct ixgbe_hw	*hw = &adapter->hw;
   1709 
   1710 
   1711 	for (int i = 0; i < adapter->num_queues; i++, txr++) {
   1712 		u64	tdba = txr->txdma.dma_paddr;
   1713 		u32	txctrl, txdctl;
   1714 
   1715 		/* Set WTHRESH to 8, burst writeback */
   1716 		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
   1717 		txdctl |= (8 << 16);
   1718 		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
   1719 
   1720 		/* Set the HW Tx Head and Tail indices */
   1721 	    	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(i), 0);
   1722 	    	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(i), 0);
   1723 
   1724 		/* Set Tx Tail register */
   1725 		txr->tail = IXGBE_VFTDT(i);
   1726 
   1727 		/* Set Ring parameters */
   1728 		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
   1729 		       (tdba & 0x00000000ffffffffULL));
   1730 		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i), (tdba >> 32));
   1731 		IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
   1732 		    adapter->num_tx_desc *
   1733 		    sizeof(struct ixgbe_legacy_tx_desc));
   1734 		txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(i));
   1735 		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
   1736 		IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), txctrl);
   1737 
   1738 		/* Now enable */
   1739 		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
   1740 		txdctl |= IXGBE_TXDCTL_ENABLE;
   1741 		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
   1742 	}
   1743 
   1744 	return;
   1745 }
   1746 
   1747 
   1748 /*********************************************************************
   1749  *
   1750  *  Setup receive registers and features.
   1751  *
   1752  **********************************************************************/
   1753 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
   1754 
   1755 static void
   1756 ixv_initialize_receive_units(struct adapter *adapter)
   1757 {
   1758 	struct	rx_ring	*rxr = adapter->rx_rings;
   1759 	struct ixgbe_hw	*hw = &adapter->hw;
   1760 	struct ifnet	*ifp = adapter->ifp;
   1761 	u32		bufsz, rxcsum, psrtype;
   1762 
   1763 	if (ifp->if_mtu > ETHERMTU)
   1764 		bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
   1765 	else
   1766 		bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
   1767 
   1768 	psrtype = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
   1769 	    IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR |
   1770 	    IXGBE_PSRTYPE_L2HDR;
   1771 
   1772 	IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
   1773 
   1774 	/* Tell PF our max_frame size */
   1775 	ixgbevf_rlpml_set_vf(hw, adapter->max_frame_size);
   1776 
   1777 	for (int i = 0; i < adapter->num_queues; i++, rxr++) {
   1778 		u64 rdba = rxr->rxdma.dma_paddr;
   1779 		u32 reg, rxdctl;
   1780 
   1781 		/* Disable the queue */
   1782 		rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
   1783 		rxdctl &= ~IXGBE_RXDCTL_ENABLE;
   1784 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
   1785 		for (int j = 0; j < 10; j++) {
   1786 			if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
   1787 			    IXGBE_RXDCTL_ENABLE)
   1788 				msec_delay(1);
   1789 			else
   1790 				break;
   1791 		}
   1792 		wmb();
   1793 		/* Setup the Base and Length of the Rx Descriptor Ring */
   1794 		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
   1795 		    (rdba & 0x00000000ffffffffULL));
   1796 		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i),
   1797 		    (rdba >> 32));
   1798 		IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
   1799 		    adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
   1800 
   1801 		/* Reset the ring indices */
   1802 		IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
   1803 		IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 0);
   1804 
   1805 		/* Set up the SRRCTL register */
   1806 		reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
   1807 		reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
   1808 		reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
   1809 		reg |= bufsz;
   1810 		reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
   1811 		IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), reg);
   1812 
   1813 		/* Capture Rx Tail index */
   1814 		rxr->tail = IXGBE_VFRDT(rxr->me);
   1815 
   1816 		/* Do the queue enabling last */
   1817 		rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
   1818 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
   1819 		for (int k = 0; k < 10; k++) {
   1820 			if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
   1821 			    IXGBE_RXDCTL_ENABLE)
   1822 				break;
   1823 			else
   1824 				msec_delay(1);
   1825 		}
   1826 		wmb();
   1827 
   1828 		/* Set the Tail Pointer */
   1829 #ifdef DEV_NETMAP
   1830 		/*
   1831 		 * In netmap mode, we must preserve the buffers made
   1832 		 * available to userspace before the if_init()
   1833 		 * (this is true by default on the TX side, because
   1834 		 * init makes all buffers available to userspace).
   1835 		 *
   1836 		 * netmap_reset() and the device specific routines
   1837 		 * (e.g. ixgbe_setup_receive_rings()) map these
   1838 		 * buffers at the end of the NIC ring, so here we
   1839 		 * must set the RDT (tail) register to make sure
   1840 		 * they are not overwritten.
   1841 		 *
   1842 		 * In this driver the NIC ring starts at RDH = 0,
   1843 		 * RDT points to the last slot available for reception (?),
   1844 		 * so RDT = num_rx_desc - 1 means the whole ring is available.
   1845 		 */
   1846 		if (ifp->if_capenable & IFCAP_NETMAP) {
   1847 			struct netmap_adapter *na = NA(adapter->ifp);
   1848 			struct netmap_kring *kring = &na->rx_rings[i];
   1849 			int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
   1850 
   1851 			IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), t);
   1852 		} else
   1853 #endif /* DEV_NETMAP */
   1854 			IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
   1855 			    adapter->num_rx_desc - 1);
   1856 	}
   1857 
   1858 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
   1859 
   1860 	if (ifp->if_capenable & IFCAP_RXCSUM)
   1861 		rxcsum |= IXGBE_RXCSUM_PCSD;
   1862 
   1863 	if (!(rxcsum & IXGBE_RXCSUM_PCSD))
   1864 		rxcsum |= IXGBE_RXCSUM_IPPCSE;
   1865 
   1866 	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
   1867 
   1868 	return;
   1869 }
   1870 
   1871 static void
   1872 ixv_setup_vlan_support(struct adapter *adapter)
   1873 {
   1874 	struct ixgbe_hw *hw = &adapter->hw;
   1875 	u32		ctrl, vid, vfta, retry;
   1876 	struct rx_ring	*rxr;
   1877 
   1878 	/*
   1879 	** We get here thru init_locked, meaning
   1880 	** a soft reset, this has already cleared
   1881 	** the VFTA and other state, so if there
   1882 	** have been no vlan's registered do nothing.
   1883 	*/
   1884 	if (!VLAN_ATTACHED(&adapter->osdep.ec))
   1885 		return;
   1886 
   1887 	/* Enable the queues */
   1888 	for (int i = 0; i < adapter->num_queues; i++) {
   1889 		ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
   1890 		ctrl |= IXGBE_RXDCTL_VME;
   1891 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl);
   1892 		/*
   1893 		 * Let Rx path know that it needs to store VLAN tag
   1894 		 * as part of extra mbuf info.
   1895 		 */
   1896 		rxr = &adapter->rx_rings[i];
   1897 		rxr->vtag_strip = TRUE;
   1898 	}
   1899 
   1900 	/*
   1901 	** A soft reset zero's out the VFTA, so
   1902 	** we need to repopulate it now.
   1903 	*/
   1904 	for (int i = 0; i < IXGBE_VFTA_SIZE; i++) {
   1905 		if (ixv_shadow_vfta[i] == 0)
   1906 			continue;
   1907 		vfta = ixv_shadow_vfta[i];
   1908 		/*
   1909 		** Reconstruct the vlan id's
   1910 		** based on the bits set in each
   1911 		** of the array ints.
   1912 		*/
   1913 		for (int j = 0; j < 32; j++) {
   1914 			retry = 0;
   1915 			if ((vfta & (1 << j)) == 0)
   1916 				continue;
   1917 			vid = (i * 32) + j;
   1918 			/* Call the shared code mailbox routine */
   1919 			while (ixgbe_set_vfta(hw, vid, 0, TRUE)) {
   1920 				if (++retry > 5)
   1921 					break;
   1922 			}
   1923 		}
   1924 	}
   1925 }
   1926 
   1927 #if 0	/* XXX Badly need to overhaul vlan(4) on NetBSD. */
   1928 /*
   1929 ** This routine is run via an vlan config EVENT,
   1930 ** it enables us to use the HW Filter table since
   1931 ** we can get the vlan id. This just creates the
   1932 ** entry in the soft version of the VFTA, init will
   1933 ** repopulate the real table.
   1934 */
   1935 static void
   1936 ixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   1937 {
   1938 	struct adapter	*adapter = ifp->if_softc;
   1939 	u16		index, bit;
   1940 
   1941 	if (ifp->if_softc != arg) /* Not our event */
   1942 		return;
   1943 
   1944 	if ((vtag == 0) || (vtag > 4095)) /* Invalid */
   1945 		return;
   1946 
   1947 	IXGBE_CORE_LOCK(adapter);
   1948 	index = (vtag >> 5) & 0x7F;
   1949 	bit = vtag & 0x1F;
   1950 	ixv_shadow_vfta[index] |= (1 << bit);
   1951 	/* Re-init to load the changes */
   1952 	ixv_init_locked(adapter);
   1953 	IXGBE_CORE_UNLOCK(adapter);
   1954 }
   1955 
   1956 /*
   1957 ** This routine is run via an vlan
   1958 ** unconfig EVENT, remove our entry
   1959 ** in the soft vfta.
   1960 */
   1961 static void
   1962 ixv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   1963 {
   1964 	struct adapter	*adapter = ifp->if_softc;
   1965 	u16		index, bit;
   1966 
   1967 	if (ifp->if_softc !=  arg)
   1968 		return;
   1969 
   1970 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
   1971 		return;
   1972 
   1973 	IXGBE_CORE_LOCK(adapter);
   1974 	index = (vtag >> 5) & 0x7F;
   1975 	bit = vtag & 0x1F;
   1976 	ixv_shadow_vfta[index] &= ~(1 << bit);
   1977 	/* Re-init to load the changes */
   1978 	ixv_init_locked(adapter);
   1979 	IXGBE_CORE_UNLOCK(adapter);
   1980 }
   1981 #endif
   1982 
   1983 static void
   1984 ixv_enable_intr(struct adapter *adapter)
   1985 {
   1986 	struct ixgbe_hw *hw = &adapter->hw;
   1987 	struct ix_queue *que = adapter->queues;
   1988 	u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
   1989 
   1990 
   1991 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
   1992 
   1993 	mask = IXGBE_EIMS_ENABLE_MASK;
   1994 	mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
   1995 	IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
   1996 
   1997         for (int i = 0; i < adapter->num_queues; i++, que++)
   1998 		ixv_enable_queue(adapter, que->msix);
   1999 
   2000 	IXGBE_WRITE_FLUSH(hw);
   2001 
   2002 	return;
   2003 }
   2004 
   2005 static void
   2006 ixv_disable_intr(struct adapter *adapter)
   2007 {
   2008 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
   2009 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, ~0);
   2010 	IXGBE_WRITE_FLUSH(&adapter->hw);
   2011 	return;
   2012 }
   2013 
   2014 /*
   2015 ** Setup the correct IVAR register for a particular MSIX interrupt
   2016 **  - entry is the register array entry
   2017 **  - vector is the MSIX vector for this queue
   2018 **  - type is RX/TX/MISC
   2019 */
   2020 static void
   2021 ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
   2022 {
   2023 	struct ixgbe_hw *hw = &adapter->hw;
   2024 	u32 ivar, index;
   2025 
   2026 	vector |= IXGBE_IVAR_ALLOC_VAL;
   2027 
   2028 	if (type == -1) { /* MISC IVAR */
   2029 		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
   2030 		ivar &= ~0xFF;
   2031 		ivar |= vector;
   2032 		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
   2033 	} else {	/* RX/TX IVARS */
   2034 		index = (16 * (entry & 1)) + (8 * type);
   2035 		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
   2036 		ivar &= ~(0xFF << index);
   2037 		ivar |= (vector << index);
   2038 		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
   2039 	}
   2040 }
   2041 
   2042 static void
   2043 ixv_configure_ivars(struct adapter *adapter)
   2044 {
   2045 	struct  ix_queue *que = adapter->queues;
   2046 
   2047         for (int i = 0; i < adapter->num_queues; i++, que++) {
   2048 		/* First the RX queue entry */
   2049                 ixv_set_ivar(adapter, i, que->msix, 0);
   2050 		/* ... and the TX */
   2051 		ixv_set_ivar(adapter, i, que->msix, 1);
   2052 		/* Set an initial value in EITR */
   2053                 IXGBE_WRITE_REG(&adapter->hw,
   2054                     IXGBE_VTEITR(que->msix), IXV_EITR_DEFAULT);
   2055 	}
   2056 
   2057 	/* For the mailbox interrupt */
   2058         ixv_set_ivar(adapter, 1, adapter->vector, -1);
   2059 }
   2060 
   2061 
   2062 /*
   2063 ** Tasklet handler for MSIX MBX interrupts
   2064 **  - do outside interrupt since it might sleep
   2065 */
   2066 static void
   2067 ixv_handle_mbx(void *context)
   2068 {
   2069 	struct adapter  *adapter = context;
   2070 
   2071 	ixgbe_check_link(&adapter->hw,
   2072 	    &adapter->link_speed, &adapter->link_up, 0);
   2073 	ixv_update_link_status(adapter);
   2074 }
   2075 
   2076 /*
   2077 ** The VF stats registers never have a truly virgin
   2078 ** starting point, so this routine tries to make an
   2079 ** artificial one, marking ground zero on attach as
   2080 ** it were.
   2081 */
   2082 static void
   2083 ixv_save_stats(struct adapter *adapter)
   2084 {
   2085 	struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
   2086 
   2087 	if (stats->vfgprc.ev_count || stats->vfgptc.ev_count) {
   2088 		stats->saved_reset_vfgprc +=
   2089 		    stats->vfgprc.ev_count - stats->base_vfgprc;
   2090 		stats->saved_reset_vfgptc +=
   2091 		    stats->vfgptc.ev_count - stats->base_vfgptc;
   2092 		stats->saved_reset_vfgorc +=
   2093 		    stats->vfgorc.ev_count - stats->base_vfgorc;
   2094 		stats->saved_reset_vfgotc +=
   2095 		    stats->vfgotc.ev_count - stats->base_vfgotc;
   2096 		stats->saved_reset_vfmprc +=
   2097 		    stats->vfmprc.ev_count - stats->base_vfmprc;
   2098 	}
   2099 }
   2100 
   2101 static void
   2102 ixv_init_stats(struct adapter *adapter)
   2103 {
   2104 	struct ixgbe_hw *hw = &adapter->hw;
   2105 
   2106 	adapter->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
   2107 	adapter->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
   2108 	adapter->stats.vf.last_vfgorc |=
   2109 	    (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
   2110 
   2111 	adapter->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
   2112 	adapter->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
   2113 	adapter->stats.vf.last_vfgotc |=
   2114 	    (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
   2115 
   2116 	adapter->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
   2117 
   2118 	adapter->stats.vf.base_vfgprc = adapter->stats.vf.last_vfgprc;
   2119 	adapter->stats.vf.base_vfgorc = adapter->stats.vf.last_vfgorc;
   2120 	adapter->stats.vf.base_vfgptc = adapter->stats.vf.last_vfgptc;
   2121 	adapter->stats.vf.base_vfgotc = adapter->stats.vf.last_vfgotc;
   2122 	adapter->stats.vf.base_vfmprc = adapter->stats.vf.last_vfmprc;
   2123 }
   2124 
   2125 #define UPDATE_STAT_32(reg, last, count)		\
   2126 {							\
   2127 	u32 current = IXGBE_READ_REG(hw, reg);		\
   2128 	if (current < last)				\
   2129 		count.ev_count += 0x100000000LL;	\
   2130 	last = current;					\
   2131 	count.ev_count &= 0xFFFFFFFF00000000LL;		\
   2132 	count.ev_count |= current;			\
   2133 }
   2134 
   2135 #define UPDATE_STAT_36(lsb, msb, last, count) 		\
   2136 {							\
   2137 	u64 cur_lsb = IXGBE_READ_REG(hw, lsb);		\
   2138 	u64 cur_msb = IXGBE_READ_REG(hw, msb);		\
   2139 	u64 current = ((cur_msb << 32) | cur_lsb);	\
   2140 	if (current < last)				\
   2141 		count.ev_count += 0x1000000000LL;	\
   2142 	last = current;					\
   2143 	count.ev_count &= 0xFFFFFFF000000000LL;		\
   2144 	count.ev_count |= current;			\
   2145 }
   2146 
   2147 /*
   2148 ** ixv_update_stats - Update the board statistics counters.
   2149 */
   2150 void
   2151 ixv_update_stats(struct adapter *adapter)
   2152 {
   2153         struct ixgbe_hw *hw = &adapter->hw;
   2154 
   2155         UPDATE_STAT_32(IXGBE_VFGPRC, adapter->stats.vf.last_vfgprc,
   2156 	    adapter->stats.vf.vfgprc);
   2157         UPDATE_STAT_32(IXGBE_VFGPTC, adapter->stats.vf.last_vfgptc,
   2158 	    adapter->stats.vf.vfgptc);
   2159         UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
   2160 	    adapter->stats.vf.last_vfgorc, adapter->stats.vf.vfgorc);
   2161         UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
   2162 	    adapter->stats.vf.last_vfgotc, adapter->stats.vf.vfgotc);
   2163         UPDATE_STAT_32(IXGBE_VFMPRC, adapter->stats.vf.last_vfmprc,
   2164 	    adapter->stats.vf.vfmprc);
   2165 }
   2166 
   2167 /**********************************************************************
   2168  *
   2169  *  This routine is called only when em_display_debug_stats is enabled.
   2170  *  This routine provides a way to take a look at important statistics
   2171  *  maintained by the driver and hardware.
   2172  *
   2173  **********************************************************************/
   2174 static void
   2175 ixv_print_debug_info(struct adapter *adapter)
   2176 {
   2177         device_t dev = adapter->dev;
   2178         struct ixgbe_hw         *hw = &adapter->hw;
   2179         struct ix_queue         *que = adapter->queues;
   2180         struct rx_ring          *rxr;
   2181         struct tx_ring          *txr;
   2182 #ifdef LRO
   2183         struct lro_ctrl         *lro;
   2184 #endif /* LRO */
   2185 
   2186         device_printf(dev,"Error Byte Count = %u \n",
   2187             IXGBE_READ_REG(hw, IXGBE_ERRBC));
   2188 
   2189         for (int i = 0; i < adapter->num_queues; i++, que++) {
   2190                 txr = que->txr;
   2191                 rxr = que->rxr;
   2192 #ifdef LRO
   2193                 lro = &rxr->lro;
   2194 #endif /* LRO */
   2195                 device_printf(dev,"QUE(%d) IRQs Handled: %lu\n",
   2196                     que->msix, (long)que->irqs.ev_count);
   2197                 device_printf(dev,"RX(%d) Packets Received: %lld\n",
   2198                     rxr->me, (long long)rxr->rx_packets.ev_count);
   2199                 device_printf(dev,"RX(%d) Bytes Received: %lu\n",
   2200                     rxr->me, (long)rxr->rx_bytes.ev_count);
   2201 #ifdef LRO
   2202                 device_printf(dev,"RX(%d) LRO Queued= %lld\n",
   2203                     rxr->me, (long long)lro->lro_queued);
   2204                 device_printf(dev,"RX(%d) LRO Flushed= %lld\n",
   2205                     rxr->me, (long long)lro->lro_flushed);
   2206 #endif /* LRO */
   2207                 device_printf(dev,"TX(%d) Packets Sent: %lu\n",
   2208                     txr->me, (long)txr->total_packets.ev_count);
   2209                 device_printf(dev,"TX(%d) NO Desc Avail: %lu\n",
   2210                     txr->me, (long)txr->no_desc_avail.ev_count);
   2211         }
   2212 
   2213         device_printf(dev,"MBX IRQ Handled: %lu\n",
   2214             (long)adapter->link_irq.ev_count);
   2215         return;
   2216 }
   2217 
   2218 static int
   2219 ixv_sysctl_debug(SYSCTLFN_ARGS)
   2220 {
   2221 	struct sysctlnode node;
   2222 	int error, result;
   2223 	struct adapter *adapter;
   2224 
   2225 	node = *rnode;
   2226 	adapter = (struct adapter *)node.sysctl_data;
   2227 	node.sysctl_data = &result;
   2228 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   2229 
   2230 	if (error)
   2231 		return error;
   2232 
   2233 	if (result == 1)
   2234 		ixv_print_debug_info(adapter);
   2235 
   2236 	return 0;
   2237 }
   2238 
   2239 const struct sysctlnode *
   2240 ixv_sysctl_instance(struct adapter *adapter)
   2241 {
   2242 	const char *dvname;
   2243 	struct sysctllog **log;
   2244 	int rc;
   2245 	const struct sysctlnode *rnode;
   2246 
   2247 	log = &adapter->sysctllog;
   2248 	dvname = device_xname(adapter->dev);
   2249 
   2250 	if ((rc = sysctl_createv(log, 0, NULL, &rnode,
   2251 	    0, CTLTYPE_NODE, dvname,
   2252 	    SYSCTL_DESCR("ixv information and settings"),
   2253 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
   2254 		goto err;
   2255 
   2256 	return rnode;
   2257 err:
   2258 	printf("%s: sysctl_createv failed, rc = %d\n", __func__, rc);
   2259 	return NULL;
   2260 }
   2261 
   2262 static void
   2263 ixv_add_device_sysctls(struct adapter *adapter)
   2264 {
   2265 	struct sysctllog **log;
   2266 	const struct sysctlnode *rnode, *cnode;
   2267 	device_t dev;
   2268 
   2269 	dev = adapter->dev;
   2270 	log = &adapter->sysctllog;
   2271 
   2272 	if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
   2273 		aprint_error_dev(dev, "could not create sysctl root\n");
   2274 		return;
   2275 	}
   2276 
   2277 	if (sysctl_createv(log, 0, &rnode, &cnode,
   2278 	    CTLFLAG_READWRITE, CTLTYPE_INT,
   2279 	    "debug", SYSCTL_DESCR("Debug Info"),
   2280 	    ixv_sysctl_debug, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
   2281 		aprint_error_dev(dev, "could not create sysctl\n");
   2282 
   2283 	/* XXX This is an *instance* sysctl controlling a *global* variable.
   2284 	 * XXX It's that way in the FreeBSD driver that this derives from.
   2285 	 */
   2286 	if (sysctl_createv(log, 0, &rnode, &cnode,
   2287 	    CTLFLAG_READWRITE, CTLTYPE_INT,
   2288 	    "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
   2289 	    NULL, 0, &ixv_enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
   2290 		aprint_error_dev(dev, "could not create sysctl\n");
   2291 }
   2292 
   2293 /*
   2294  * Add statistic sysctls for the VF.
   2295  */
   2296 static void
   2297 ixv_add_stats_sysctls(struct adapter *adapter)
   2298 {
   2299 	device_t dev = adapter->dev;
   2300 	struct ix_queue *que = &adapter->queues[0];
   2301 	struct tx_ring *txr = que->txr;
   2302 	struct rx_ring *rxr = que->rxr;
   2303 
   2304 	struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
   2305 
   2306 	const char *xname = device_xname(dev);
   2307 
   2308 	/* Driver Statistics */
   2309 	evcnt_attach_dynamic(&adapter->dropped_pkts, EVCNT_TYPE_MISC,
   2310 	    NULL, xname, "Driver dropped packets");
   2311 	evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
   2312 	    NULL, xname, "m_defrag() failed");
   2313 	evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
   2314 	    NULL, xname, "Watchdog timeouts");
   2315 
   2316 	evcnt_attach_dynamic(&stats->vfgprc, EVCNT_TYPE_MISC, NULL,
   2317 	    xname, "Good Packets Received");
   2318 	evcnt_attach_dynamic(&stats->vfgorc, EVCNT_TYPE_MISC, NULL,
   2319 	    xname, "Good Octets Received");
   2320 	evcnt_attach_dynamic(&stats->vfmprc, EVCNT_TYPE_MISC, NULL,
   2321 	    xname, "Multicast Packets Received");
   2322 	evcnt_attach_dynamic(&stats->vfgptc, EVCNT_TYPE_MISC, NULL,
   2323 	    xname, "Good Packets Transmitted");
   2324 	evcnt_attach_dynamic(&stats->vfgotc, EVCNT_TYPE_MISC, NULL,
   2325 	    xname, "Good Octets Transmitted");
   2326 	evcnt_attach_dynamic(&que->irqs, EVCNT_TYPE_INTR, NULL,
   2327 	    xname, "IRQs on queue");
   2328 	evcnt_attach_dynamic(&rxr->rx_irq, EVCNT_TYPE_INTR, NULL,
   2329 	    xname, "RX irqs on queue");
   2330 	evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC, NULL,
   2331 	    xname, "RX packets");
   2332 	evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC, NULL,
   2333 	    xname, "RX bytes");
   2334 	evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC, NULL,
   2335 	    xname, "Discarded RX packets");
   2336 	evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC, NULL,
   2337 	    xname, "TX Packets");
   2338 	evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC, NULL,
   2339 	    xname, "# of times not enough descriptors were available during TX");
   2340 	evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC, NULL,
   2341 	    xname, "TX TSO");
   2342 }
   2343 
   2344 static void
   2345 ixv_set_sysctl_value(struct adapter *adapter, const char *name,
   2346 	const char *description, int *limit, int value)
   2347 {
   2348 	device_t dev =  adapter->dev;
   2349 	struct sysctllog **log;
   2350 	const struct sysctlnode *rnode, *cnode;
   2351 
   2352 	log = &adapter->sysctllog;
   2353 	if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
   2354 		aprint_error_dev(dev, "could not create sysctl root\n");
   2355 		return;
   2356 	}
   2357 	if (sysctl_createv(log, 0, &rnode, &cnode,
   2358 	    CTLFLAG_READWRITE, CTLTYPE_INT,
   2359 	    name, SYSCTL_DESCR(description),
   2360 	    NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0)
   2361 		aprint_error_dev(dev, "could not create sysctl\n");
   2362 	*limit = value;
   2363 }
   2364