Home | History | Annotate | Line # | Download | only in ixgbe
ixv.c revision 1.57
      1 /******************************************************************************
      2 
      3   Copyright (c) 2001-2015, Intel Corporation
      4   All rights reserved.
      5 
      6   Redistribution and use in source and binary forms, with or without
      7   modification, are permitted provided that the following conditions are met:
      8 
      9    1. Redistributions of source code must retain the above copyright notice,
     10       this list of conditions and the following disclaimer.
     11 
     12    2. Redistributions in binary form must reproduce the above copyright
     13       notice, this list of conditions and the following disclaimer in the
     14       documentation and/or other materials provided with the distribution.
     15 
     16    3. Neither the name of the Intel Corporation nor the names of its
     17       contributors may be used to endorse or promote products derived from
     18       this software without specific prior written permission.
     19 
     20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     30   POSSIBILITY OF SUCH DAMAGE.
     31 
     32 ******************************************************************************/
     33 /*$FreeBSD: head/sys/dev/ixgbe/if_ixv.c 302384 2016-07-07 03:39:18Z sbruno $*/
     34 /*$NetBSD: ixv.c,v 1.57 2017/08/30 08:24:57 msaitoh Exp $*/
     35 
     36 #ifdef _KERNEL_OPT
     37 #include "opt_inet.h"
     38 #include "opt_inet6.h"
     39 #include "opt_net_mpsafe.h"
     40 #endif
     41 
     42 #include "ixgbe.h"
     43 #include "vlan.h"
     44 
     45 /*********************************************************************
     46  *  Driver version
     47  *********************************************************************/
     48 char ixv_driver_version[] = "1.4.6-k";
     49 
     50 /*********************************************************************
     51  *  PCI Device ID Table
     52  *
     53  *  Used by probe to select devices to load on
     54  *  Last field stores an index into ixv_strings
     55  *  Last entry must be all 0s
     56  *
     57  *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
     58  *********************************************************************/
     59 
     60 static ixgbe_vendor_info_t ixv_vendor_info_array[] =
     61 {
     62 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
     63 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0},
     64 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, 0, 0, 0},
     65 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, 0, 0, 0},
     66 	/* required last entry */
     67 	{0, 0, 0, 0, 0}
     68 };
     69 
     70 /*********************************************************************
     71  *  Table of branding strings
     72  *********************************************************************/
     73 
     74 static const char    *ixv_strings[] = {
     75 	"Intel(R) PRO/10GbE Virtual Function Network Driver"
     76 };
     77 
     78 /*********************************************************************
     79  *  Function prototypes
     80  *********************************************************************/
     81 static int      ixv_probe(device_t, cfdata_t, void *);
     82 static void	ixv_attach(device_t, device_t, void *);
     83 static int      ixv_detach(device_t, int);
     84 #if 0
     85 static int      ixv_shutdown(device_t);
     86 #endif
     87 static int	ixv_ifflags_cb(struct ethercom *);
     88 static int      ixv_ioctl(struct ifnet *, u_long, void *);
     89 static int	ixv_init(struct ifnet *);
     90 static void	ixv_init_locked(struct adapter *);
     91 static void	ixv_ifstop(struct ifnet *, int);
     92 static void     ixv_stop(void *);
     93 static void     ixv_media_status(struct ifnet *, struct ifmediareq *);
     94 static int      ixv_media_change(struct ifnet *);
     95 static void     ixv_identify_hardware(struct adapter *);
     96 static int      ixv_allocate_pci_resources(struct adapter *,
     97 		    const struct pci_attach_args *);
     98 static int      ixv_allocate_msix(struct adapter *,
     99 		    const struct pci_attach_args *);
    100 static int	ixv_setup_msix(struct adapter *);
    101 static void	ixv_free_pci_resources(struct adapter *);
    102 static void     ixv_local_timer(void *);
    103 static void     ixv_local_timer_locked(void *);
    104 static void     ixv_setup_interface(device_t, struct adapter *);
    105 static void     ixv_config_link(struct adapter *);
    106 
    107 static void     ixv_initialize_transmit_units(struct adapter *);
    108 static void     ixv_initialize_receive_units(struct adapter *);
    109 
    110 static void     ixv_enable_intr(struct adapter *);
    111 static void     ixv_disable_intr(struct adapter *);
    112 static void     ixv_set_multi(struct adapter *);
    113 static void     ixv_update_link_status(struct adapter *);
    114 static int	ixv_sysctl_debug(SYSCTLFN_PROTO);
    115 static void	ixv_set_ivar(struct adapter *, u8, u8, s8);
    116 static void	ixv_configure_ivars(struct adapter *);
    117 static u8 *	ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
    118 
    119 static void	ixv_setup_vlan_support(struct adapter *);
    120 #if 0
    121 static void	ixv_register_vlan(void *, struct ifnet *, u16);
    122 static void	ixv_unregister_vlan(void *, struct ifnet *, u16);
    123 #endif
    124 
    125 static void	ixv_add_device_sysctls(struct adapter *);
    126 static void	ixv_save_stats(struct adapter *);
    127 static void	ixv_init_stats(struct adapter *);
    128 static void	ixv_update_stats(struct adapter *);
    129 static void	ixv_add_stats_sysctls(struct adapter *);
    130 static void	ixv_set_sysctl_value(struct adapter *, const char *,
    131 		    const char *, int *, int);
    132 
    133 /* The MSI/X Interrupt handlers */
    134 static int	ixv_msix_que(void *);
    135 static int	ixv_msix_mbx(void *);
    136 
    137 /* Deferred interrupt tasklets */
    138 static void	ixv_handle_que(void *);
    139 static void	ixv_handle_mbx(void *);
    140 
    141 const struct sysctlnode *ixv_sysctl_instance(struct adapter *);
    142 static ixgbe_vendor_info_t *ixv_lookup(const struct pci_attach_args *);
    143 
    144 #ifdef DEV_NETMAP
    145 /*
    146  * This is defined in <dev/netmap/ixgbe_netmap.h>, which is included by
    147  * if_ix.c.
    148  */
    149 extern void ixgbe_netmap_attach(struct adapter *adapter);
    150 
    151 #include <net/netmap.h>
    152 #include <sys/selinfo.h>
    153 #include <dev/netmap/netmap_kern.h>
    154 #endif /* DEV_NETMAP */
    155 
    156 /*********************************************************************
    157  *  FreeBSD Device Interface Entry Points
    158  *********************************************************************/
    159 
    160 CFATTACH_DECL3_NEW(ixv, sizeof(struct adapter),
    161     ixv_probe, ixv_attach, ixv_detach, NULL, NULL, NULL,
    162     DVF_DETACH_SHUTDOWN);
    163 
    164 # if 0
    165 static device_method_t ixv_methods[] = {
    166 	/* Device interface */
    167 	DEVMETHOD(device_probe, ixv_probe),
    168 	DEVMETHOD(device_attach, ixv_attach),
    169 	DEVMETHOD(device_detach, ixv_detach),
    170 	DEVMETHOD(device_shutdown, ixv_shutdown),
    171 	DEVMETHOD_END
    172 };
    173 #endif
    174 
    175 #if 0
    176 static driver_t ixv_driver = {
    177 	"ixv", ixv_methods, sizeof(struct adapter),
    178 };
    179 
    180 devclass_t ixv_devclass;
    181 DRIVER_MODULE(ixv, pci, ixv_driver, ixv_devclass, 0, 0);
    182 MODULE_DEPEND(ixv, pci, 1, 1, 1);
    183 MODULE_DEPEND(ixv, ether, 1, 1, 1);
    184 #ifdef DEV_NETMAP
    185 MODULE_DEPEND(ix, netmap, 1, 1, 1);
    186 #endif /* DEV_NETMAP */
    187 /* XXX depend on 'ix' ? */
    188 #endif
    189 
    190 /*
    191 ** TUNEABLE PARAMETERS:
    192 */
    193 
    194 /* Number of Queues - do not exceed MSIX vectors - 1 */
    195 static int ixv_num_queues = 0;
    196 #define	TUNABLE_INT(__x, __y)
    197 TUNABLE_INT("hw.ixv.num_queues", &ixv_num_queues);
    198 
    199 /*
    200 ** AIM: Adaptive Interrupt Moderation
    201 ** which means that the interrupt rate
    202 ** is varied over time based on the
    203 ** traffic for that interrupt vector
    204 */
    205 static bool ixv_enable_aim = false;
    206 TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
    207 
    208 /* How many packets rxeof tries to clean at a time */
    209 static int ixv_rx_process_limit = 256;
    210 TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit);
    211 
    212 /* How many packets txeof tries to clean at a time */
    213 static int ixv_tx_process_limit = 256;
    214 TUNABLE_INT("hw.ixv.tx_process_limit", &ixv_tx_process_limit);
    215 
    216 /*
    217 ** Number of TX descriptors per ring,
    218 ** setting higher than RX as this seems
    219 ** the better performing choice.
    220 */
    221 static int ixv_txd = DEFAULT_TXD;
    222 TUNABLE_INT("hw.ixv.txd", &ixv_txd);
    223 
    224 /* Number of RX descriptors per ring */
    225 static int ixv_rxd = DEFAULT_RXD;
    226 TUNABLE_INT("hw.ixv.rxd", &ixv_rxd);
    227 
    228 /*
    229 ** Shadow VFTA table, this is needed because
    230 ** the real filter table gets cleared during
    231 ** a soft reset and we need to repopulate it.
    232 */
    233 static u32 ixv_shadow_vfta[IXGBE_VFTA_SIZE];
    234 
    235 #ifdef NET_MPSAFE
    236 #define IXGBE_MPSAFE		1
    237 #define IXGBE_CALLOUT_FLAGS	CALLOUT_MPSAFE
    238 #define IXGBE_SOFTINFT_FLAGS	SOFTINT_MPSAFE
    239 #else
    240 #define IXGBE_CALLOUT_FLAGS	0
    241 #define IXGBE_SOFTINFT_FLAGS	0
    242 #endif
    243 
    244 /*********************************************************************
    245  *  Device identification routine
    246  *
    247  *  ixv_probe determines if the driver should be loaded on
    248  *  adapter based on PCI vendor/device id of the adapter.
    249  *
    250  *  return 1 on success, 0 on failure
    251  *********************************************************************/
    252 
    253 static int
    254 ixv_probe(device_t dev, cfdata_t cf, void *aux)
    255 {
    256 #ifdef __HAVE_PCI_MSI_MSIX
    257 	const struct pci_attach_args *pa = aux;
    258 
    259 	return (ixv_lookup(pa) != NULL) ? 1 : 0;
    260 #else
    261 	return 0;
    262 #endif
    263 }
    264 
    265 static ixgbe_vendor_info_t *
    266 ixv_lookup(const struct pci_attach_args *pa)
    267 {
    268 	pcireg_t subid;
    269 	ixgbe_vendor_info_t *ent;
    270 
    271 	INIT_DEBUGOUT("ixv_lookup: begin");
    272 
    273 	if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
    274 		return NULL;
    275 
    276 	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
    277 
    278 	for (ent = ixv_vendor_info_array; ent->vendor_id != 0; ent++) {
    279 		if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
    280 		    (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
    281 		    ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
    282 		     (ent->subvendor_id == 0)) &&
    283 		    ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
    284 		     (ent->subdevice_id == 0))) {
    285 			return ent;
    286 		}
    287 	}
    288 	return NULL;
    289 }
    290 
    291 
    292 /*********************************************************************
    293  *
    294  *  Determine hardware revision.
    295  *
    296  **********************************************************************/
    297 static void
    298 ixv_identify_hardware(struct adapter *adapter)
    299 {
    300 	pcitag_t tag;
    301 	pci_chipset_tag_t pc;
    302 	pcireg_t subid, id;
    303 	struct ixgbe_hw *hw = &adapter->hw;
    304 
    305 	pc = adapter->osdep.pc;
    306 	tag = adapter->osdep.tag;
    307 
    308 	/*
    309 	** Make sure BUSMASTER is set, on a VM under
    310 	** KVM it may not be and will break things.
    311 	*/
    312 	ixgbe_pci_enable_busmaster(pc, tag);
    313 
    314 	id = pci_conf_read(pc, tag, PCI_ID_REG);
    315 	subid = pci_conf_read(pc, tag, PCI_SUBSYS_ID_REG);
    316 
    317 	/* Save off the information about this board */
    318 	hw->vendor_id = PCI_VENDOR(id);
    319 	hw->device_id = PCI_PRODUCT(id);
    320 	hw->revision_id = PCI_REVISION(pci_conf_read(pc, tag, PCI_CLASS_REG));
    321 	hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
    322 	hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
    323 
    324 	/* We need this to determine device-specific things */
    325 	ixgbe_set_mac_type(hw);
    326 
    327 	/* Set the right number of segments */
    328 	adapter->num_segs = IXGBE_82599_SCATTER;
    329 
    330 	return;
    331 }
    332 
    333 /*********************************************************************
    334  *  Device initialization routine
    335  *
    336  *  The attach entry point is called when the driver is being loaded.
    337  *  This routine identifies the type of hardware, allocates all resources
    338  *  and initializes the hardware.
    339  *
    340  *  return 0 on success, positive on failure
    341  *********************************************************************/
    342 
    343 static void
    344 ixv_attach(device_t parent, device_t dev, void *aux)
    345 {
    346 	struct adapter *adapter;
    347 	struct ixgbe_hw *hw;
    348 	int             error = 0;
    349 	ixgbe_vendor_info_t *ent;
    350 	const struct pci_attach_args *pa = aux;
    351 
    352 	INIT_DEBUGOUT("ixv_attach: begin");
    353 
    354 	/* Allocate, clear, and link in our adapter structure */
    355 	adapter = device_private(dev);
    356 	adapter->dev = dev;
    357 	hw = &adapter->hw;
    358 
    359 #ifdef DEV_NETMAP
    360 	adapter->init_locked = ixv_init_locked;
    361 	adapter->stop_locked = ixv_stop;
    362 #endif
    363 
    364 	adapter->osdep.pc = pa->pa_pc;
    365 	adapter->osdep.tag = pa->pa_tag;
    366 	if (pci_dma64_available(pa))
    367 		adapter->osdep.dmat = pa->pa_dmat64;
    368 	else
    369 		adapter->osdep.dmat = pa->pa_dmat;
    370 	adapter->osdep.attached = false;
    371 
    372 	ent = ixv_lookup(pa);
    373 
    374 	KASSERT(ent != NULL);
    375 
    376 	aprint_normal(": %s, Version - %s\n",
    377 	    ixv_strings[ent->index], ixv_driver_version);
    378 
    379 	/* Core Lock Init*/
    380 	IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
    381 
    382 	/* Set up the timer callout */
    383 	callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS);
    384 
    385 	/* Determine hardware revision */
    386 	ixv_identify_hardware(adapter);
    387 
    388 	/* Do base PCI setup - map BAR0 */
    389 	if (ixv_allocate_pci_resources(adapter, pa)) {
    390 		aprint_error_dev(dev, "ixv_allocate_pci_resources() failed!\n");
    391 		error = ENXIO;
    392 		goto err_out;
    393 	}
    394 
    395 	/* Sysctls for limiting the amount of work done in the taskqueues */
    396 	ixv_set_sysctl_value(adapter, "rx_processing_limit",
    397 	    "max number of rx packets to process",
    398 	    &adapter->rx_process_limit, ixv_rx_process_limit);
    399 
    400 	ixv_set_sysctl_value(adapter, "tx_processing_limit",
    401 	    "max number of tx packets to process",
    402 	    &adapter->tx_process_limit, ixv_tx_process_limit);
    403 
    404 	/* Do descriptor calc and sanity checks */
    405 	if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
    406 	    ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) {
    407 		aprint_error_dev(dev, "TXD config issue, using default!\n");
    408 		adapter->num_tx_desc = DEFAULT_TXD;
    409 	} else
    410 		adapter->num_tx_desc = ixv_txd;
    411 
    412 	if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
    413 	    ixv_rxd < MIN_RXD || ixv_rxd > MAX_RXD) {
    414 		aprint_error_dev(dev, "RXD config issue, using default!\n");
    415 		adapter->num_rx_desc = DEFAULT_RXD;
    416 	} else
    417 		adapter->num_rx_desc = ixv_rxd;
    418 
    419 	/* Allocate our TX/RX Queues */
    420 	if (ixgbe_allocate_queues(adapter)) {
    421 		aprint_error_dev(dev, "ixgbe_allocate_queues() failed!\n");
    422 		error = ENOMEM;
    423 		goto err_out;
    424 	}
    425 
    426 	/*
    427 	** Initialize the shared code: its
    428 	** at this point the mac type is set.
    429 	*/
    430 	error = ixgbe_init_shared_code(hw);
    431 	if (error) {
    432 		aprint_error_dev(dev, "ixgbe_init_shared_code() failed!\n");
    433 		error = EIO;
    434 		goto err_late;
    435 	}
    436 
    437 	/* Setup the mailbox */
    438 	ixgbe_init_mbx_params_vf(hw);
    439 
    440 	/* Reset mbox api to 1.0 */
    441 	error = ixgbe_reset_hw(hw);
    442 	if (error == IXGBE_ERR_RESET_FAILED)
    443 		aprint_error_dev(dev, "ixgbe_reset_hw() failure: Reset Failed!\n");
    444 	else if (error)
    445 		aprint_error_dev(dev, "ixgbe_reset_hw() failed with error %d\n", error);
    446 	if (error) {
    447 		error = EIO;
    448 		goto err_late;
    449 	}
    450 
    451 	/* Negotiate mailbox API version */
    452 	error = ixgbevf_negotiate_api_version(hw, ixgbe_mbox_api_11);
    453 	if (error)
    454 		aprint_debug_dev(dev,
    455 		    "MBX API 1.1 negotiation failed! Error %d\n", error);
    456 
    457 	error = ixgbe_init_hw(hw);
    458 	if (error) {
    459 		aprint_error_dev(dev, "ixgbe_init_hw() failed!\n");
    460 		error = EIO;
    461 		goto err_late;
    462 	}
    463 
    464 	error = ixv_allocate_msix(adapter, pa);
    465 	if (error) {
    466 		device_printf(dev, "ixv_allocate_msix() failed!\n");
    467 		goto err_late;
    468 	}
    469 
    470 	/* If no mac address was assigned, make a random one */
    471 	if (!ixv_check_ether_addr(hw->mac.addr)) {
    472 		u8 addr[ETHER_ADDR_LEN];
    473 		uint64_t rndval = cprng_fast64();
    474 
    475 		memcpy(addr, &rndval, sizeof(addr));
    476 		addr[0] &= 0xFE;
    477 		addr[0] |= 0x02;
    478 		bcopy(addr, hw->mac.addr, sizeof(addr));
    479 	}
    480 
    481 	/* hw.ix defaults init */
    482 	adapter->enable_aim = ixv_enable_aim;
    483 
    484 	/* Setup OS specific network interface */
    485 	ixv_setup_interface(dev, adapter);
    486 
    487 	/* Do the stats setup */
    488 	ixv_save_stats(adapter);
    489 	ixv_init_stats(adapter);
    490 
    491 	/* Register for VLAN events */
    492 #if 0 /* XXX delete after write? */
    493 	adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
    494 	    ixv_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
    495 	adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
    496 	    ixv_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
    497 #endif
    498 
    499 	/* Add sysctls */
    500 	ixv_add_device_sysctls(adapter);
    501 	ixv_add_stats_sysctls(adapter);
    502 
    503 #ifdef DEV_NETMAP
    504 	ixgbe_netmap_attach(adapter);
    505 #endif /* DEV_NETMAP */
    506 	INIT_DEBUGOUT("ixv_attach: end");
    507 	adapter->osdep.attached = true;
    508 
    509 	return;
    510 
    511 err_late:
    512 	ixgbe_free_transmit_structures(adapter);
    513 	ixgbe_free_receive_structures(adapter);
    514 err_out:
    515 	ixv_free_pci_resources(adapter);
    516 	return;
    517 
    518 }
    519 
    520 /*********************************************************************
    521  *  Device removal routine
    522  *
    523  *  The detach entry point is called when the driver is being removed.
    524  *  This routine stops the adapter and deallocates all the resources
    525  *  that were allocated for driver operation.
    526  *
    527  *  return 0 on success, positive on failure
    528  *********************************************************************/
    529 
    530 static int
    531 ixv_detach(device_t dev, int flags)
    532 {
    533 	struct adapter *adapter = device_private(dev);
    534 	struct ix_queue *que = adapter->queues;
    535 	struct tx_ring *txr = adapter->tx_rings;
    536 	struct rx_ring *rxr = adapter->rx_rings;
    537 	struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
    538 
    539 	INIT_DEBUGOUT("ixv_detach: begin");
    540 	if (adapter->osdep.attached == false)
    541 		return 0;
    542 
    543 	/* Stop the interface. Callouts are stopped in it. */
    544 	ixv_ifstop(adapter->ifp, 1);
    545 
    546 #if NVLAN > 0
    547 	/* Make sure VLANS are not using driver */
    548 	if (!VLAN_ATTACHED(&adapter->osdep.ec))
    549 		;	/* nothing to do: no VLANs */
    550 	else if ((flags & (DETACH_SHUTDOWN|DETACH_FORCE)) != 0)
    551 		vlan_ifdetach(adapter->ifp);
    552 	else {
    553 		aprint_error_dev(dev, "VLANs in use, detach first\n");
    554 		return EBUSY;
    555 	}
    556 #endif
    557 
    558 	IXGBE_CORE_LOCK(adapter);
    559 	ixv_stop(adapter);
    560 	IXGBE_CORE_UNLOCK(adapter);
    561 
    562 	for (int i = 0; i < adapter->num_queues; i++, que++, txr++) {
    563 #ifndef IXGBE_LEGACY_TX
    564 		softint_disestablish(txr->txr_si);
    565 #endif
    566 		softint_disestablish(que->que_si);
    567 	}
    568 
    569 	/* Drain the Mailbox(link) queue */
    570 	softint_disestablish(adapter->link_si);
    571 
    572 	/* Unregister VLAN events */
    573 #if 0 /* XXX msaitoh delete after write? */
    574 	if (adapter->vlan_attach != NULL)
    575 		EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
    576 	if (adapter->vlan_detach != NULL)
    577 		EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
    578 #endif
    579 
    580 	ether_ifdetach(adapter->ifp);
    581 	callout_halt(&adapter->timer, NULL);
    582 #ifdef DEV_NETMAP
    583 	netmap_detach(adapter->ifp);
    584 #endif /* DEV_NETMAP */
    585 	ixv_free_pci_resources(adapter);
    586 #if 0 /* XXX the NetBSD port is probably missing something here */
    587 	bus_generic_detach(dev);
    588 #endif
    589 	if_detach(adapter->ifp);
    590 	if_percpuq_destroy(adapter->ipq);
    591 
    592 	sysctl_teardown(&adapter->sysctllog);
    593 	evcnt_detach(&adapter->handleq);
    594 	evcnt_detach(&adapter->req);
    595 	evcnt_detach(&adapter->efbig_tx_dma_setup);
    596 	evcnt_detach(&adapter->mbuf_defrag_failed);
    597 	evcnt_detach(&adapter->efbig2_tx_dma_setup);
    598 	evcnt_detach(&adapter->einval_tx_dma_setup);
    599 	evcnt_detach(&adapter->other_tx_dma_setup);
    600 	evcnt_detach(&adapter->eagain_tx_dma_setup);
    601 	evcnt_detach(&adapter->enomem_tx_dma_setup);
    602 	evcnt_detach(&adapter->watchdog_events);
    603 	evcnt_detach(&adapter->tso_err);
    604 	evcnt_detach(&adapter->link_irq);
    605 
    606 	txr = adapter->tx_rings;
    607 	for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
    608 		evcnt_detach(&adapter->queues[i].irqs);
    609 		evcnt_detach(&txr->no_desc_avail);
    610 		evcnt_detach(&txr->total_packets);
    611 		evcnt_detach(&txr->tso_tx);
    612 #ifndef IXGBE_LEGACY_TX
    613 		evcnt_detach(&txr->pcq_drops);
    614 #endif
    615 
    616 		evcnt_detach(&rxr->rx_packets);
    617 		evcnt_detach(&rxr->rx_bytes);
    618 		evcnt_detach(&rxr->rx_copies);
    619 		evcnt_detach(&rxr->no_jmbuf);
    620 		evcnt_detach(&rxr->rx_discarded);
    621 	}
    622 	evcnt_detach(&stats->ipcs);
    623 	evcnt_detach(&stats->l4cs);
    624 	evcnt_detach(&stats->ipcs_bad);
    625 	evcnt_detach(&stats->l4cs_bad);
    626 
    627 	/* Packet Reception Stats */
    628 	evcnt_detach(&stats->vfgorc);
    629 	evcnt_detach(&stats->vfgprc);
    630 	evcnt_detach(&stats->vfmprc);
    631 
    632 	/* Packet Transmission Stats */
    633 	evcnt_detach(&stats->vfgotc);
    634 	evcnt_detach(&stats->vfgptc);
    635 
    636 	ixgbe_free_transmit_structures(adapter);
    637 	ixgbe_free_receive_structures(adapter);
    638 
    639 	IXGBE_CORE_LOCK_DESTROY(adapter);
    640 	return (0);
    641 }
    642 
    643 /*********************************************************************
    644  *  Init entry point
    645  *
    646  *  This routine is used in two ways. It is used by the stack as
    647  *  init entry point in network interface structure. It is also used
    648  *  by the driver as a hw/sw initialization routine to get to a
    649  *  consistent state.
    650  *
    651  *  return 0 on success, positive on failure
    652  **********************************************************************/
    653 #define IXGBE_MHADD_MFS_SHIFT 16
    654 
    655 static void
    656 ixv_init_locked(struct adapter *adapter)
    657 {
    658 	struct ifnet	*ifp = adapter->ifp;
    659 	device_t 	dev = adapter->dev;
    660 	struct ixgbe_hw *hw = &adapter->hw;
    661 	int error = 0;
    662 
    663 	INIT_DEBUGOUT("ixv_init_locked: begin");
    664 	KASSERT(mutex_owned(&adapter->core_mtx));
    665 	hw->adapter_stopped = FALSE;
    666 	ixgbe_stop_adapter(hw);
    667         callout_stop(&adapter->timer);
    668 
    669 	/* reprogram the RAR[0] in case user changed it. */
    670 	ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
    671 
    672 	/* Get the latest mac address, User can use a LAA */
    673 	memcpy(hw->mac.addr, CLLADDR(adapter->ifp->if_sadl),
    674 	     IXGBE_ETH_LENGTH_OF_ADDRESS);
    675         ixgbe_set_rar(hw, 0, hw->mac.addr, 0, 1);
    676 	hw->addr_ctrl.rar_used_count = 1;
    677 
    678 	/* Prepare transmit descriptors and buffers */
    679 	if (ixgbe_setup_transmit_structures(adapter)) {
    680 		aprint_error_dev(dev, "Could not setup transmit structures\n");
    681 		ixv_stop(adapter);
    682 		return;
    683 	}
    684 
    685 	/* Reset VF and renegotiate mailbox API version */
    686 	ixgbe_reset_hw(hw);
    687 	error = ixgbevf_negotiate_api_version(hw, ixgbe_mbox_api_11);
    688 	if (error)
    689 		device_printf(dev, "MBX API 1.1 negotiation failed! Error %d\n", error);
    690 
    691 	ixv_initialize_transmit_units(adapter);
    692 
    693 	/* Setup Multicast table */
    694 	ixv_set_multi(adapter);
    695 
    696 	/*
    697 	** Determine the correct mbuf pool
    698 	** for doing jumbo/headersplit
    699 	*/
    700 	if (ifp->if_mtu > ETHERMTU)
    701 		adapter->rx_mbuf_sz = MJUMPAGESIZE;
    702 	else
    703 		adapter->rx_mbuf_sz = MCLBYTES;
    704 
    705 	/* Prepare receive descriptors and buffers */
    706 	if (ixgbe_setup_receive_structures(adapter)) {
    707 		device_printf(dev, "Could not setup receive structures\n");
    708 		ixv_stop(adapter);
    709 		return;
    710 	}
    711 
    712 	/* Configure RX settings */
    713 	ixv_initialize_receive_units(adapter);
    714 
    715 #if 0 /* XXX isn't it required? -- msaitoh  */
    716 	/* Set the various hardware offload abilities */
    717 	ifp->if_hwassist = 0;
    718 	if (ifp->if_capenable & IFCAP_TSO4)
    719 		ifp->if_hwassist |= CSUM_TSO;
    720 	if (ifp->if_capenable & IFCAP_TXCSUM) {
    721 		ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
    722 #if __FreeBSD_version >= 800000
    723 		ifp->if_hwassist |= CSUM_SCTP;
    724 #endif
    725 	}
    726 #endif
    727 
    728 	/* Set up VLAN offload and filter */
    729 	ixv_setup_vlan_support(adapter);
    730 
    731 	/* Set up MSI/X routing */
    732 	ixv_configure_ivars(adapter);
    733 
    734 	/* Set up auto-mask */
    735 	IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_EICS_RTX_QUEUE);
    736 
    737 	/* Set moderation on the Link interrupt */
    738 	IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->vector), IXGBE_LINK_ITR);
    739 
    740 	/* Stats init */
    741 	ixv_init_stats(adapter);
    742 
    743 	/* Config/Enable Link */
    744 	ixv_config_link(adapter);
    745 	hw->mac.get_link_status = TRUE;
    746 
    747 	/* Start watchdog */
    748 	callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
    749 
    750 	/* And now turn on interrupts */
    751 	ixv_enable_intr(adapter);
    752 
    753 	/* Now inform the stack we're ready */
    754 	ifp->if_flags |= IFF_RUNNING;
    755 	ifp->if_flags &= ~IFF_OACTIVE;
    756 
    757 	return;
    758 }
    759 
    760 /*
    761 **
    762 ** MSIX Interrupt Handlers and Tasklets
    763 **
    764 */
    765 
    766 static inline void
    767 ixv_enable_queue(struct adapter *adapter, u32 vector)
    768 {
    769 	struct ixgbe_hw *hw = &adapter->hw;
    770 	u32	queue = 1 << vector;
    771 	u32	mask;
    772 
    773 	mask = (IXGBE_EIMS_RTX_QUEUE & queue);
    774 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
    775 }
    776 
    777 static inline void
    778 ixv_disable_queue(struct adapter *adapter, u32 vector)
    779 {
    780 	struct ixgbe_hw *hw = &adapter->hw;
    781 	u64	queue = (u64)(1 << vector);
    782 	u32	mask;
    783 
    784 	mask = (IXGBE_EIMS_RTX_QUEUE & queue);
    785 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
    786 }
    787 
    788 static inline void
    789 ixv_rearm_queues(struct adapter *adapter, u64 queues)
    790 {
    791 	u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
    792 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask);
    793 }
    794 
    795 
    796 /*********************************************************************
    797  *
    798  *  MSI Queue Interrupt Service routine
    799  *
    800  **********************************************************************/
    801 int
    802 ixv_msix_que(void *arg)
    803 {
    804 	struct ix_queue	*que = arg;
    805 	struct adapter  *adapter = que->adapter;
    806 #ifdef IXGBE_LEGACY_TX
    807 	struct ifnet    *ifp = adapter->ifp;
    808 #endif
    809 	struct tx_ring	*txr = que->txr;
    810 	struct rx_ring	*rxr = que->rxr;
    811 	bool		more;
    812 	u32		newitr = 0;
    813 
    814 	ixv_disable_queue(adapter, que->msix);
    815 	++que->irqs.ev_count;
    816 
    817 #ifdef __NetBSD__
    818 	/* Don't run ixgbe_rxeof in interrupt context */
    819 	more = true;
    820 #else
    821 	more = ixgbe_rxeof(que);
    822 #endif
    823 
    824 	IXGBE_TX_LOCK(txr);
    825 	ixgbe_txeof(txr);
    826 	IXGBE_TX_UNLOCK(txr);
    827 
    828 	/* Do AIM now? */
    829 
    830 	if (adapter->enable_aim == false)
    831 		goto no_calc;
    832 	/*
    833 	** Do Adaptive Interrupt Moderation:
    834         **  - Write out last calculated setting
    835 	**  - Calculate based on average size over
    836 	**    the last interval.
    837 	*/
    838         if (que->eitr_setting)
    839                 IXGBE_WRITE_REG(&adapter->hw,
    840                     IXGBE_VTEITR(que->msix),
    841 		    que->eitr_setting);
    842 
    843 	que->eitr_setting = 0;
    844 
    845 	/* Idle, do nothing */
    846 	if ((txr->bytes == 0) && (rxr->bytes == 0))
    847 		goto no_calc;
    848 
    849 	if ((txr->bytes) && (txr->packets))
    850 		newitr = txr->bytes/txr->packets;
    851 	if ((rxr->bytes) && (rxr->packets))
    852 		newitr = max(newitr,
    853 		    (rxr->bytes / rxr->packets));
    854 	newitr += 24; /* account for hardware frame, crc */
    855 
    856 	/* set an upper boundary */
    857 	newitr = min(newitr, 3000);
    858 
    859 	/* Be nice to the mid range */
    860 	if ((newitr > 300) && (newitr < 1200))
    861 		newitr = (newitr / 3);
    862 	else
    863 		newitr = (newitr / 2);
    864 
    865 	newitr |= newitr << 16;
    866 
    867         /* save for next interrupt */
    868         que->eitr_setting = newitr;
    869 
    870 	/* Reset state */
    871 	txr->bytes = 0;
    872 	txr->packets = 0;
    873 	rxr->bytes = 0;
    874 	rxr->packets = 0;
    875 
    876 no_calc:
    877 	if (more)
    878 		softint_schedule(que->que_si);
    879 	else /* Reenable this interrupt */
    880 		ixv_enable_queue(adapter, que->msix);
    881 	return 1;
    882 }
    883 
    884 static int
    885 ixv_msix_mbx(void *arg)
    886 {
    887 	struct adapter	*adapter = arg;
    888 	struct ixgbe_hw *hw = &adapter->hw;
    889 	u32		reg;
    890 
    891 	++adapter->link_irq.ev_count;
    892 
    893 	/* First get the cause */
    894 	reg = IXGBE_READ_REG(hw, IXGBE_VTEICS);
    895 	/* Clear interrupt with write */
    896 	IXGBE_WRITE_REG(hw, IXGBE_VTEICR, reg);
    897 
    898 	/* Link status change */
    899 	if (reg & IXGBE_EICR_LSC)
    900 		softint_schedule(adapter->link_si);
    901 
    902 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER);
    903 
    904 	return 1;
    905 }
    906 
    907 /*********************************************************************
    908  *
    909  *  Media Ioctl callback
    910  *
    911  *  This routine is called whenever the user queries the status of
    912  *  the interface using ifconfig.
    913  *
    914  **********************************************************************/
    915 static void
    916 ixv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
    917 {
    918 	struct adapter *adapter = ifp->if_softc;
    919 
    920 	INIT_DEBUGOUT("ixv_media_status: begin");
    921 	IXGBE_CORE_LOCK(adapter);
    922 	ixv_update_link_status(adapter);
    923 
    924 	ifmr->ifm_status = IFM_AVALID;
    925 	ifmr->ifm_active = IFM_ETHER;
    926 
    927 	if (!adapter->link_active) {
    928 		ifmr->ifm_active |= IFM_NONE;
    929 		IXGBE_CORE_UNLOCK(adapter);
    930 		return;
    931 	}
    932 
    933 	ifmr->ifm_status |= IFM_ACTIVE;
    934 
    935 	switch (adapter->link_speed) {
    936 		case IXGBE_LINK_SPEED_10GB_FULL:
    937 			ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
    938 			break;
    939 		case IXGBE_LINK_SPEED_1GB_FULL:
    940 			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
    941 			break;
    942 		case IXGBE_LINK_SPEED_100_FULL:
    943 			ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
    944 			break;
    945 	}
    946 
    947 	IXGBE_CORE_UNLOCK(adapter);
    948 
    949 	return;
    950 }
    951 
    952 /*********************************************************************
    953  *
    954  *  Media Ioctl callback
    955  *
    956  *  This routine is called when the user changes speed/duplex using
    957  *  media/mediopt option with ifconfig.
    958  *
    959  **********************************************************************/
    960 static int
    961 ixv_media_change(struct ifnet *ifp)
    962 {
    963 	struct adapter *adapter = ifp->if_softc;
    964 	struct ifmedia *ifm = &adapter->media;
    965 
    966 	INIT_DEBUGOUT("ixv_media_change: begin");
    967 
    968 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
    969 		return (EINVAL);
    970 
    971 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
    972 	case IFM_AUTO:
    973 		break;
    974 	default:
    975 		device_printf(adapter->dev, "Only auto media type\n");
    976 		return (EINVAL);
    977 	}
    978 
    979 	return (0);
    980 }
    981 
    982 
    983 /*********************************************************************
    984  *  Multicast Update
    985  *
    986  *  This routine is called whenever multicast address list is updated.
    987  *
    988  **********************************************************************/
    989 #define IXGBE_RAR_ENTRIES 16
    990 
    991 static void
    992 ixv_set_multi(struct adapter *adapter)
    993 {
    994 	struct ether_multi *enm;
    995 	struct ether_multistep step;
    996 	u8	mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
    997 	u8	*update_ptr;
    998 	int	mcnt = 0;
    999 	struct ethercom *ec = &adapter->osdep.ec;
   1000 
   1001 	IOCTL_DEBUGOUT("ixv_set_multi: begin");
   1002 
   1003 	ETHER_FIRST_MULTI(step, ec, enm);
   1004 	while (enm != NULL) {
   1005 		bcopy(enm->enm_addrlo,
   1006 		    &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
   1007 		    IXGBE_ETH_LENGTH_OF_ADDRESS);
   1008 		mcnt++;
   1009 		/* XXX This might be required --msaitoh */
   1010 		if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES)
   1011 			break;
   1012 		ETHER_NEXT_MULTI(step, enm);
   1013 	}
   1014 
   1015 	update_ptr = mta;
   1016 
   1017 	ixgbe_update_mc_addr_list(&adapter->hw,
   1018 	    update_ptr, mcnt, ixv_mc_array_itr, TRUE);
   1019 
   1020 	return;
   1021 }
   1022 
   1023 /*
   1024  * This is an iterator function now needed by the multicast
   1025  * shared code. It simply feeds the shared code routine the
   1026  * addresses in the array of ixv_set_multi() one by one.
   1027  */
   1028 static u8 *
   1029 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
   1030 {
   1031 	u8 *addr = *update_ptr;
   1032 	u8 *newptr;
   1033 	*vmdq = 0;
   1034 
   1035 	newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
   1036 	*update_ptr = newptr;
   1037 
   1038 	return addr;
   1039 }
   1040 
   1041 /*********************************************************************
   1042  *  Timer routine
   1043  *
   1044  *  This routine checks for link status,updates statistics,
   1045  *  and runs the watchdog check.
   1046  *
   1047  **********************************************************************/
   1048 
   1049 static void
   1050 ixv_local_timer(void *arg)
   1051 {
   1052 	struct adapter *adapter = arg;
   1053 
   1054 	IXGBE_CORE_LOCK(adapter);
   1055 	ixv_local_timer_locked(adapter);
   1056 	IXGBE_CORE_UNLOCK(adapter);
   1057 }
   1058 
   1059 static void
   1060 ixv_local_timer_locked(void *arg)
   1061 {
   1062 	struct adapter	*adapter = arg;
   1063 	device_t	dev = adapter->dev;
   1064 	struct ix_queue	*que = adapter->queues;
   1065 	u64		queues = 0;
   1066 	int		hung = 0;
   1067 
   1068 	KASSERT(mutex_owned(&adapter->core_mtx));
   1069 
   1070 	ixv_update_link_status(adapter);
   1071 
   1072 	/* Stats Update */
   1073 	ixv_update_stats(adapter);
   1074 
   1075 	/*
   1076 	** Check the TX queues status
   1077 	**      - mark hung queues so we don't schedule on them
   1078 	**      - watchdog only if all queues show hung
   1079 	*/
   1080 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   1081 		/* Keep track of queues with work for soft irq */
   1082 		if (que->txr->busy)
   1083 			queues |= ((u64)1 << que->me);
   1084 		/*
   1085 		** Each time txeof runs without cleaning, but there
   1086 		** are uncleaned descriptors it increments busy. If
   1087 		** we get to the MAX we declare it hung.
   1088 		*/
   1089 		if (que->busy == IXGBE_QUEUE_HUNG) {
   1090 			++hung;
   1091 			/* Mark the queue as inactive */
   1092 			adapter->active_queues &= ~((u64)1 << que->me);
   1093 			continue;
   1094 		} else {
   1095 			/* Check if we've come back from hung */
   1096 			if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
   1097 				adapter->active_queues |= ((u64)1 << que->me);
   1098 		}
   1099 		if (que->busy >= IXGBE_MAX_TX_BUSY) {
   1100 			device_printf(dev,"Warning queue %d "
   1101 			    "appears to be hung!\n", i);
   1102 			que->txr->busy = IXGBE_QUEUE_HUNG;
   1103 			++hung;
   1104 		}
   1105 	}
   1106 
   1107 	/* Only truly watchdog if all queues show hung */
   1108 	if (hung == adapter->num_queues)
   1109 		goto watchdog;
   1110 	else if (queues != 0) { /* Force an IRQ on queues with work */
   1111 		ixv_rearm_queues(adapter, queues);
   1112 	}
   1113 
   1114 	callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
   1115 
   1116 	return;
   1117 
   1118 watchdog:
   1119 
   1120 	device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
   1121 	adapter->ifp->if_flags &= ~IFF_RUNNING;
   1122 	adapter->watchdog_events.ev_count++;
   1123 	ixv_init_locked(adapter);
   1124 }
   1125 
   1126 /*
   1127 ** Note: this routine updates the OS on the link state
   1128 **	the real check of the hardware only happens with
   1129 **	a link interrupt.
   1130 */
   1131 static void
   1132 ixv_update_link_status(struct adapter *adapter)
   1133 {
   1134 	struct ifnet	*ifp = adapter->ifp;
   1135 	device_t dev = adapter->dev;
   1136 
   1137 	if (adapter->link_up) {
   1138 		if (adapter->link_active == FALSE) {
   1139 			if (bootverbose) {
   1140 				const char *bpsmsg;
   1141 
   1142 				switch (adapter->link_speed) {
   1143 				case IXGBE_LINK_SPEED_10GB_FULL:
   1144 					bpsmsg = "10 Gbps";
   1145 					break;
   1146 				case IXGBE_LINK_SPEED_1GB_FULL:
   1147 					bpsmsg = "1 Gbps";
   1148 					break;
   1149 				case IXGBE_LINK_SPEED_100_FULL:
   1150 					bpsmsg = "100 Mbps";
   1151 					break;
   1152 				default:
   1153 					bpsmsg = "unknown speed";
   1154 					break;
   1155 				}
   1156 				device_printf(dev,"Link is up %s %s \n",
   1157 				    bpsmsg, "Full Duplex");
   1158 			}
   1159 			adapter->link_active = TRUE;
   1160 			if_link_state_change(ifp, LINK_STATE_UP);
   1161 		}
   1162 	} else { /* Link down */
   1163 		if (adapter->link_active == TRUE) {
   1164 			if (bootverbose)
   1165 				device_printf(dev,"Link is Down\n");
   1166 			if_link_state_change(ifp, LINK_STATE_DOWN);
   1167 			adapter->link_active = FALSE;
   1168 		}
   1169 	}
   1170 
   1171 	return;
   1172 }
   1173 
   1174 
   1175 static void
   1176 ixv_ifstop(struct ifnet *ifp, int disable)
   1177 {
   1178 	struct adapter *adapter = ifp->if_softc;
   1179 
   1180 	IXGBE_CORE_LOCK(adapter);
   1181 	ixv_stop(adapter);
   1182 	IXGBE_CORE_UNLOCK(adapter);
   1183 }
   1184 
   1185 /*********************************************************************
   1186  *
   1187  *  This routine disables all traffic on the adapter by issuing a
   1188  *  global reset on the MAC and deallocates TX/RX buffers.
   1189  *
   1190  **********************************************************************/
   1191 
   1192 static void
   1193 ixv_stop(void *arg)
   1194 {
   1195 	struct ifnet   *ifp;
   1196 	struct adapter *adapter = arg;
   1197 	struct ixgbe_hw *hw = &adapter->hw;
   1198 	ifp = adapter->ifp;
   1199 
   1200 	KASSERT(mutex_owned(&adapter->core_mtx));
   1201 
   1202 	INIT_DEBUGOUT("ixv_stop: begin\n");
   1203 	ixv_disable_intr(adapter);
   1204 
   1205 	/* Tell the stack that the interface is no longer active */
   1206 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   1207 
   1208 	ixgbe_reset_hw(hw);
   1209 	adapter->hw.adapter_stopped = FALSE;
   1210 	ixgbe_stop_adapter(hw);
   1211 	callout_stop(&adapter->timer);
   1212 
   1213 	/* reprogram the RAR[0] in case user changed it. */
   1214 	ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
   1215 
   1216 	return;
   1217 }
   1218 
   1219 
   1220 static int
   1221 ixv_allocate_pci_resources(struct adapter *adapter,
   1222     const struct pci_attach_args *pa)
   1223 {
   1224 	pcireg_t	memtype;
   1225 	device_t        dev = adapter->dev;
   1226 	bus_addr_t addr;
   1227 	int flags;
   1228 
   1229 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
   1230 	switch (memtype) {
   1231 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1232 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1233 		adapter->osdep.mem_bus_space_tag = pa->pa_memt;
   1234 		if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
   1235 	              memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
   1236 			goto map_err;
   1237 		if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
   1238 			aprint_normal_dev(dev, "clearing prefetchable bit\n");
   1239 			flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
   1240 		}
   1241 		if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
   1242 		     adapter->osdep.mem_size, flags,
   1243 		     &adapter->osdep.mem_bus_space_handle) != 0) {
   1244 map_err:
   1245 			adapter->osdep.mem_size = 0;
   1246 			aprint_error_dev(dev, "unable to map BAR0\n");
   1247 			return ENXIO;
   1248 		}
   1249 		break;
   1250 	default:
   1251 		aprint_error_dev(dev, "unexpected type on BAR0\n");
   1252 		return ENXIO;
   1253 	}
   1254 	adapter->hw.back = adapter;
   1255 
   1256 	/* Pick up the tuneable queues */
   1257 	adapter->num_queues = ixv_num_queues;
   1258 
   1259 	/*
   1260 	** Now setup MSI/X, should
   1261 	** return us the number of
   1262 	** configured vectors.
   1263 	*/
   1264 	adapter->msix = ixv_setup_msix(adapter);
   1265 	if (adapter->msix == ENXIO)
   1266 		return (ENXIO);
   1267 	else
   1268 		return (0);
   1269 }
   1270 
   1271 static void
   1272 ixv_free_pci_resources(struct adapter * adapter)
   1273 {
   1274 	struct 		ix_queue *que = adapter->queues;
   1275 	int		rid;
   1276 
   1277 	/*
   1278 	**  Release all msix queue resources:
   1279 	*/
   1280 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   1281 		if (que->res != NULL)
   1282 			pci_intr_disestablish(adapter->osdep.pc,
   1283 			    adapter->osdep.ihs[i]);
   1284 	}
   1285 
   1286 
   1287 	/* Clean the Link interrupt last */
   1288 	rid = adapter->vector;
   1289 
   1290 	if (adapter->osdep.ihs[rid] != NULL) {
   1291 		pci_intr_disestablish(adapter->osdep.pc,
   1292 		    adapter->osdep.ihs[rid]);
   1293 		adapter->osdep.ihs[rid] = NULL;
   1294 	}
   1295 
   1296 	pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
   1297 	    adapter->osdep.nintrs);
   1298 
   1299 	if (adapter->osdep.mem_size != 0) {
   1300 		bus_space_unmap(adapter->osdep.mem_bus_space_tag,
   1301 		    adapter->osdep.mem_bus_space_handle,
   1302 		    adapter->osdep.mem_size);
   1303 	}
   1304 
   1305 	return;
   1306 }
   1307 
   1308 /*********************************************************************
   1309  *
   1310  *  Setup networking device structure and register an interface.
   1311  *
   1312  **********************************************************************/
   1313 static void
   1314 ixv_setup_interface(device_t dev, struct adapter *adapter)
   1315 {
   1316 	struct ethercom *ec = &adapter->osdep.ec;
   1317 	struct ifnet   *ifp;
   1318 
   1319 	INIT_DEBUGOUT("ixv_setup_interface: begin");
   1320 
   1321 	ifp = adapter->ifp = &ec->ec_if;
   1322 	strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
   1323 	ifp->if_baudrate = IF_Gbps(10);
   1324 	ifp->if_init = ixv_init;
   1325 	ifp->if_stop = ixv_ifstop;
   1326 	ifp->if_softc = adapter;
   1327 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   1328 #ifdef IXGBE_MPSAFE
   1329 	ifp->if_extflags = IFEF_START_MPSAFE;
   1330 #endif
   1331 	ifp->if_ioctl = ixv_ioctl;
   1332 #ifndef IXGBE_LEGACY_TX
   1333 	ifp->if_transmit = ixgbe_mq_start;
   1334 #endif
   1335 	ifp->if_start = ixgbe_start;
   1336 	IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
   1337 	IFQ_SET_READY(&ifp->if_snd);
   1338 
   1339 	if_initialize(ifp);
   1340 	adapter->ipq = if_percpuq_create(&adapter->osdep.ec.ec_if);
   1341 	ether_ifattach(ifp, adapter->hw.mac.addr);
   1342 	/*
   1343 	 * We use per TX queue softint, so if_deferred_start_init() isn't
   1344 	 * used.
   1345 	 */
   1346 	if_register(ifp);
   1347 	ether_set_ifflags_cb(ec, ixv_ifflags_cb);
   1348 
   1349 	adapter->max_frame_size =
   1350 	    ifp->if_mtu + IXGBE_MTU_HDR_VLAN;
   1351 
   1352 	/*
   1353 	 * Tell the upper layer(s) we support long frames.
   1354 	 */
   1355 	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
   1356 
   1357 	ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSOv4;
   1358 	ifp->if_capenable = 0;
   1359 
   1360 	ec->ec_capabilities |= ETHERCAP_VLAN_HWCSUM;
   1361 	ec->ec_capabilities |= ETHERCAP_JUMBO_MTU;
   1362 	ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
   1363 	    		| ETHERCAP_VLAN_MTU;
   1364 	ec->ec_capenable = ec->ec_capabilities;
   1365 
   1366 	/* Don't enable LRO by default */
   1367 	ifp->if_capabilities |= IFCAP_LRO;
   1368 #if 0
   1369 	ifp->if_capenable = ifp->if_capabilities;
   1370 #endif
   1371 
   1372 	/*
   1373 	** Dont turn this on by default, if vlans are
   1374 	** created on another pseudo device (eg. lagg)
   1375 	** then vlan events are not passed thru, breaking
   1376 	** operation, but with HW FILTER off it works. If
   1377 	** using vlans directly on the em driver you can
   1378 	** enable this and get full hardware tag filtering.
   1379 	*/
   1380 	ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
   1381 
   1382 	/*
   1383 	 * Specify the media types supported by this adapter and register
   1384 	 * callbacks to update media and link information
   1385 	 */
   1386 	ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change,
   1387 	    ixv_media_status);
   1388 	ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
   1389 	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
   1390 
   1391 	return;
   1392 }
   1393 
   1394 /*********************************************************************
   1395  *
   1396  *  Enable transmit unit.
   1397  *
   1398  **********************************************************************/
   1399 static void
   1400 ixv_initialize_transmit_units(struct adapter *adapter)
   1401 {
   1402 	struct tx_ring	*txr = adapter->tx_rings;
   1403 	struct ixgbe_hw	*hw = &adapter->hw;
   1404 
   1405 
   1406 	for (int i = 0; i < adapter->num_queues; i++, txr++) {
   1407 		u64	tdba = txr->txdma.dma_paddr;
   1408 		u32	txctrl, txdctl;
   1409 
   1410 		/* Set WTHRESH to 8, burst writeback */
   1411 		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
   1412 		txdctl |= (8 << 16);
   1413 		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
   1414 
   1415 		/* Set the HW Tx Head and Tail indices */
   1416 	    	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(i), 0);
   1417 	    	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(i), 0);
   1418 
   1419 		/* Set Tx Tail register */
   1420 		txr->tail = IXGBE_VFTDT(i);
   1421 
   1422 		/* Set Ring parameters */
   1423 		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
   1424 		    (tdba & 0x00000000ffffffffULL));
   1425 		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i), (tdba >> 32));
   1426 		IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
   1427 		    adapter->num_tx_desc *
   1428 		    sizeof(struct ixgbe_legacy_tx_desc));
   1429 		txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(i));
   1430 		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
   1431 		IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), txctrl);
   1432 
   1433 		/* Now enable */
   1434 		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
   1435 		txdctl |= IXGBE_TXDCTL_ENABLE;
   1436 		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
   1437 	}
   1438 
   1439 	return;
   1440 }
   1441 
   1442 
   1443 /*********************************************************************
   1444  *
   1445  *  Setup receive registers and features.
   1446  *
   1447  **********************************************************************/
   1448 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
   1449 
   1450 static void
   1451 ixv_initialize_receive_units(struct adapter *adapter)
   1452 {
   1453 	struct	rx_ring	*rxr = adapter->rx_rings;
   1454 	struct ixgbe_hw	*hw = &adapter->hw;
   1455 	struct ifnet	*ifp = adapter->ifp;
   1456 	u32		bufsz, rxcsum, psrtype;
   1457 
   1458 	if (ifp->if_mtu > ETHERMTU)
   1459 		bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
   1460 	else
   1461 		bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
   1462 
   1463 	psrtype = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
   1464 	    IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR |
   1465 	    IXGBE_PSRTYPE_L2HDR;
   1466 
   1467 	IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
   1468 
   1469 	/* Tell PF our max_frame size */
   1470 	ixgbevf_rlpml_set_vf(hw, adapter->max_frame_size);
   1471 
   1472 	for (int i = 0; i < adapter->num_queues; i++, rxr++) {
   1473 		u64 rdba = rxr->rxdma.dma_paddr;
   1474 		u32 reg, rxdctl;
   1475 
   1476 		/* Disable the queue */
   1477 		rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
   1478 		rxdctl &= ~IXGBE_RXDCTL_ENABLE;
   1479 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
   1480 		for (int j = 0; j < 10; j++) {
   1481 			if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
   1482 			    IXGBE_RXDCTL_ENABLE)
   1483 				msec_delay(1);
   1484 			else
   1485 				break;
   1486 		}
   1487 		wmb();
   1488 		/* Setup the Base and Length of the Rx Descriptor Ring */
   1489 		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
   1490 		    (rdba & 0x00000000ffffffffULL));
   1491 		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i),
   1492 		    (rdba >> 32));
   1493 		IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
   1494 		    adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
   1495 
   1496 		/* Reset the ring indices */
   1497 		IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
   1498 		IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 0);
   1499 
   1500 		/* Set up the SRRCTL register */
   1501 		reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
   1502 		reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
   1503 		reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
   1504 		reg |= bufsz;
   1505 		reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
   1506 		IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), reg);
   1507 
   1508 		/* Capture Rx Tail index */
   1509 		rxr->tail = IXGBE_VFRDT(rxr->me);
   1510 
   1511 		/* Do the queue enabling last */
   1512 		rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
   1513 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
   1514 		for (int k = 0; k < 10; k++) {
   1515 			if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
   1516 			    IXGBE_RXDCTL_ENABLE)
   1517 				break;
   1518 			else
   1519 				msec_delay(1);
   1520 		}
   1521 		wmb();
   1522 
   1523 		/* Set the Tail Pointer */
   1524 #ifdef DEV_NETMAP
   1525 		/*
   1526 		 * In netmap mode, we must preserve the buffers made
   1527 		 * available to userspace before the if_init()
   1528 		 * (this is true by default on the TX side, because
   1529 		 * init makes all buffers available to userspace).
   1530 		 *
   1531 		 * netmap_reset() and the device specific routines
   1532 		 * (e.g. ixgbe_setup_receive_rings()) map these
   1533 		 * buffers at the end of the NIC ring, so here we
   1534 		 * must set the RDT (tail) register to make sure
   1535 		 * they are not overwritten.
   1536 		 *
   1537 		 * In this driver the NIC ring starts at RDH = 0,
   1538 		 * RDT points to the last slot available for reception (?),
   1539 		 * so RDT = num_rx_desc - 1 means the whole ring is available.
   1540 		 */
   1541 		if (ifp->if_capenable & IFCAP_NETMAP) {
   1542 			struct netmap_adapter *na = NA(adapter->ifp);
   1543 			struct netmap_kring *kring = &na->rx_rings[i];
   1544 			int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
   1545 
   1546 			IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), t);
   1547 		} else
   1548 #endif /* DEV_NETMAP */
   1549 			IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
   1550 			    adapter->num_rx_desc - 1);
   1551 	}
   1552 
   1553 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
   1554 
   1555 	if (ifp->if_capenable & IFCAP_RXCSUM)
   1556 		rxcsum |= IXGBE_RXCSUM_PCSD;
   1557 
   1558 	if (!(rxcsum & IXGBE_RXCSUM_PCSD))
   1559 		rxcsum |= IXGBE_RXCSUM_IPPCSE;
   1560 
   1561 	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
   1562 
   1563 	return;
   1564 }
   1565 
   1566 static void
   1567 ixv_setup_vlan_support(struct adapter *adapter)
   1568 {
   1569 	struct ixgbe_hw *hw = &adapter->hw;
   1570 	u32		ctrl, vid, vfta, retry;
   1571 	struct rx_ring	*rxr;
   1572 
   1573 	/*
   1574 	** We get here thru init_locked, meaning
   1575 	** a soft reset, this has already cleared
   1576 	** the VFTA and other state, so if there
   1577 	** have been no vlan's registered do nothing.
   1578 	*/
   1579 	if (!VLAN_ATTACHED(&adapter->osdep.ec))
   1580 		return;
   1581 
   1582 	/* Enable the queues */
   1583 	for (int i = 0; i < adapter->num_queues; i++) {
   1584 		ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
   1585 		ctrl |= IXGBE_RXDCTL_VME;
   1586 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl);
   1587 		/*
   1588 		 * Let Rx path know that it needs to store VLAN tag
   1589 		 * as part of extra mbuf info.
   1590 		 */
   1591 		rxr = &adapter->rx_rings[i];
   1592 		rxr->vtag_strip = TRUE;
   1593 	}
   1594 
   1595 	/*
   1596 	** A soft reset zero's out the VFTA, so
   1597 	** we need to repopulate it now.
   1598 	*/
   1599 	for (int i = 0; i < IXGBE_VFTA_SIZE; i++) {
   1600 		if (ixv_shadow_vfta[i] == 0)
   1601 			continue;
   1602 		vfta = ixv_shadow_vfta[i];
   1603 		/*
   1604 		** Reconstruct the vlan id's
   1605 		** based on the bits set in each
   1606 		** of the array ints.
   1607 		*/
   1608 		for (int j = 0; j < 32; j++) {
   1609 			retry = 0;
   1610 			if ((vfta & (1 << j)) == 0)
   1611 				continue;
   1612 			vid = (i * 32) + j;
   1613 			/* Call the shared code mailbox routine */
   1614 			while (ixgbe_set_vfta(hw, vid, 0, TRUE)) {
   1615 				if (++retry > 5)
   1616 					break;
   1617 			}
   1618 		}
   1619 	}
   1620 }
   1621 
   1622 #if 0	/* XXX Badly need to overhaul vlan(4) on NetBSD. */
   1623 /*
   1624 ** This routine is run via an vlan config EVENT,
   1625 ** it enables us to use the HW Filter table since
   1626 ** we can get the vlan id. This just creates the
   1627 ** entry in the soft version of the VFTA, init will
   1628 ** repopulate the real table.
   1629 */
   1630 static void
   1631 ixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   1632 {
   1633 	struct adapter	*adapter = ifp->if_softc;
   1634 	u16		index, bit;
   1635 
   1636 	if (ifp->if_softc != arg) /* Not our event */
   1637 		return;
   1638 
   1639 	if ((vtag == 0) || (vtag > 4095)) /* Invalid */
   1640 		return;
   1641 
   1642 	IXGBE_CORE_LOCK(adapter);
   1643 	index = (vtag >> 5) & 0x7F;
   1644 	bit = vtag & 0x1F;
   1645 	ixv_shadow_vfta[index] |= (1 << bit);
   1646 	/* Re-init to load the changes */
   1647 	ixv_init_locked(adapter);
   1648 	IXGBE_CORE_UNLOCK(adapter);
   1649 }
   1650 
   1651 /*
   1652 ** This routine is run via an vlan
   1653 ** unconfig EVENT, remove our entry
   1654 ** in the soft vfta.
   1655 */
   1656 static void
   1657 ixv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   1658 {
   1659 	struct adapter	*adapter = ifp->if_softc;
   1660 	u16		index, bit;
   1661 
   1662 	if (ifp->if_softc !=  arg)
   1663 		return;
   1664 
   1665 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
   1666 		return;
   1667 
   1668 	IXGBE_CORE_LOCK(adapter);
   1669 	index = (vtag >> 5) & 0x7F;
   1670 	bit = vtag & 0x1F;
   1671 	ixv_shadow_vfta[index] &= ~(1 << bit);
   1672 	/* Re-init to load the changes */
   1673 	ixv_init_locked(adapter);
   1674 	IXGBE_CORE_UNLOCK(adapter);
   1675 }
   1676 #endif
   1677 
   1678 static void
   1679 ixv_enable_intr(struct adapter *adapter)
   1680 {
   1681 	struct ixgbe_hw *hw = &adapter->hw;
   1682 	struct ix_queue *que = adapter->queues;
   1683 	u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
   1684 
   1685 
   1686 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
   1687 
   1688 	mask = IXGBE_EIMS_ENABLE_MASK;
   1689 	mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
   1690 	IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
   1691 
   1692         for (int i = 0; i < adapter->num_queues; i++, que++)
   1693 		ixv_enable_queue(adapter, que->msix);
   1694 
   1695 	IXGBE_WRITE_FLUSH(hw);
   1696 
   1697 	return;
   1698 }
   1699 
   1700 static void
   1701 ixv_disable_intr(struct adapter *adapter)
   1702 {
   1703 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
   1704 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, ~0);
   1705 	IXGBE_WRITE_FLUSH(&adapter->hw);
   1706 
   1707 	return;
   1708 }
   1709 
   1710 /*
   1711 ** Setup the correct IVAR register for a particular MSIX interrupt
   1712 **  - entry is the register array entry
   1713 **  - vector is the MSIX vector for this queue
   1714 **  - type is RX/TX/MISC
   1715 */
   1716 static void
   1717 ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
   1718 {
   1719 	struct ixgbe_hw *hw = &adapter->hw;
   1720 	u32 ivar, index;
   1721 
   1722 	vector |= IXGBE_IVAR_ALLOC_VAL;
   1723 
   1724 	if (type == -1) { /* MISC IVAR */
   1725 		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
   1726 		ivar &= ~0xFF;
   1727 		ivar |= vector;
   1728 		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
   1729 	} else {	/* RX/TX IVARS */
   1730 		index = (16 * (entry & 1)) + (8 * type);
   1731 		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
   1732 		ivar &= ~(0xFF << index);
   1733 		ivar |= (vector << index);
   1734 		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
   1735 	}
   1736 }
   1737 
   1738 static void
   1739 ixv_configure_ivars(struct adapter *adapter)
   1740 {
   1741 	struct  ix_queue *que = adapter->queues;
   1742 
   1743 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   1744 		/* First the RX queue entry */
   1745 		ixv_set_ivar(adapter, i, que->msix, 0);
   1746 		/* ... and the TX */
   1747 		ixv_set_ivar(adapter, i, que->msix, 1);
   1748 		/* Set an initial value in EITR */
   1749 		IXGBE_WRITE_REG(&adapter->hw,
   1750 		    IXGBE_VTEITR(que->msix), IXV_EITR_DEFAULT);
   1751 	}
   1752 
   1753 	/* For the mailbox interrupt */
   1754 	ixv_set_ivar(adapter, 1, adapter->vector, -1);
   1755 }
   1756 
   1757 
   1758 /*
   1759 ** The VF stats registers never have a truly virgin
   1760 ** starting point, so this routine tries to make an
   1761 ** artificial one, marking ground zero on attach as
   1762 ** it were.
   1763 */
   1764 static void
   1765 ixv_save_stats(struct adapter *adapter)
   1766 {
   1767 	struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
   1768 
   1769 	if (stats->vfgprc.ev_count || stats->vfgptc.ev_count) {
   1770 		stats->saved_reset_vfgprc +=
   1771 		    stats->vfgprc.ev_count - stats->base_vfgprc;
   1772 		stats->saved_reset_vfgptc +=
   1773 		    stats->vfgptc.ev_count - stats->base_vfgptc;
   1774 		stats->saved_reset_vfgorc +=
   1775 		    stats->vfgorc.ev_count - stats->base_vfgorc;
   1776 		stats->saved_reset_vfgotc +=
   1777 		    stats->vfgotc.ev_count - stats->base_vfgotc;
   1778 		stats->saved_reset_vfmprc +=
   1779 		    stats->vfmprc.ev_count - stats->base_vfmprc;
   1780 	}
   1781 }
   1782 
   1783 static void
   1784 ixv_init_stats(struct adapter *adapter)
   1785 {
   1786 	struct ixgbe_hw *hw = &adapter->hw;
   1787 
   1788 	adapter->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
   1789 	adapter->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
   1790 	adapter->stats.vf.last_vfgorc |=
   1791 	    (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
   1792 
   1793 	adapter->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
   1794 	adapter->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
   1795 	adapter->stats.vf.last_vfgotc |=
   1796 	    (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
   1797 
   1798 	adapter->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
   1799 
   1800 	adapter->stats.vf.base_vfgprc = adapter->stats.vf.last_vfgprc;
   1801 	adapter->stats.vf.base_vfgorc = adapter->stats.vf.last_vfgorc;
   1802 	adapter->stats.vf.base_vfgptc = adapter->stats.vf.last_vfgptc;
   1803 	adapter->stats.vf.base_vfgotc = adapter->stats.vf.last_vfgotc;
   1804 	adapter->stats.vf.base_vfmprc = adapter->stats.vf.last_vfmprc;
   1805 }
   1806 
   1807 #define UPDATE_STAT_32(reg, last, count)		\
   1808 {							\
   1809 	u32 current = IXGBE_READ_REG(hw, reg);		\
   1810 	if (current < last)				\
   1811 		count.ev_count += 0x100000000LL;	\
   1812 	last = current;					\
   1813 	count.ev_count &= 0xFFFFFFFF00000000LL;		\
   1814 	count.ev_count |= current;			\
   1815 }
   1816 
   1817 #define UPDATE_STAT_36(lsb, msb, last, count) 		\
   1818 {							\
   1819 	u64 cur_lsb = IXGBE_READ_REG(hw, lsb);		\
   1820 	u64 cur_msb = IXGBE_READ_REG(hw, msb);		\
   1821 	u64 current = ((cur_msb << 32) | cur_lsb);	\
   1822 	if (current < last)				\
   1823 		count.ev_count += 0x1000000000LL;	\
   1824 	last = current;					\
   1825 	count.ev_count &= 0xFFFFFFF000000000LL;		\
   1826 	count.ev_count |= current;			\
   1827 }
   1828 
   1829 /*
   1830 ** ixv_update_stats - Update the board statistics counters.
   1831 */
   1832 void
   1833 ixv_update_stats(struct adapter *adapter)
   1834 {
   1835         struct ixgbe_hw *hw = &adapter->hw;
   1836 
   1837         UPDATE_STAT_32(IXGBE_VFGPRC, adapter->stats.vf.last_vfgprc,
   1838 	    adapter->stats.vf.vfgprc);
   1839         UPDATE_STAT_32(IXGBE_VFGPTC, adapter->stats.vf.last_vfgptc,
   1840 	    adapter->stats.vf.vfgptc);
   1841         UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
   1842 	    adapter->stats.vf.last_vfgorc, adapter->stats.vf.vfgorc);
   1843         UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
   1844 	    adapter->stats.vf.last_vfgotc, adapter->stats.vf.vfgotc);
   1845         UPDATE_STAT_32(IXGBE_VFMPRC, adapter->stats.vf.last_vfmprc,
   1846 	    adapter->stats.vf.vfmprc);
   1847 }
   1848 
   1849 const struct sysctlnode *
   1850 ixv_sysctl_instance(struct adapter *adapter)
   1851 {
   1852 	const char *dvname;
   1853 	struct sysctllog **log;
   1854 	int rc;
   1855 	const struct sysctlnode *rnode;
   1856 
   1857 	log = &adapter->sysctllog;
   1858 	dvname = device_xname(adapter->dev);
   1859 
   1860 	if ((rc = sysctl_createv(log, 0, NULL, &rnode,
   1861 	    0, CTLTYPE_NODE, dvname,
   1862 	    SYSCTL_DESCR("ixv information and settings"),
   1863 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
   1864 		goto err;
   1865 
   1866 	return rnode;
   1867 err:
   1868 	printf("%s: sysctl_createv failed, rc = %d\n", __func__, rc);
   1869 	return NULL;
   1870 }
   1871 
   1872 static void
   1873 ixv_add_device_sysctls(struct adapter *adapter)
   1874 {
   1875 	struct sysctllog **log;
   1876 	const struct sysctlnode *rnode, *cnode;
   1877 	device_t dev;
   1878 
   1879 	dev = adapter->dev;
   1880 	log = &adapter->sysctllog;
   1881 
   1882 	if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
   1883 		aprint_error_dev(dev, "could not create sysctl root\n");
   1884 		return;
   1885 	}
   1886 
   1887 	if (sysctl_createv(log, 0, &rnode, &cnode,
   1888 	    CTLFLAG_READWRITE, CTLTYPE_INT,
   1889 	    "debug", SYSCTL_DESCR("Debug Info"),
   1890 	    ixv_sysctl_debug, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
   1891 		aprint_error_dev(dev, "could not create sysctl\n");
   1892 
   1893 	if (sysctl_createv(log, 0, &rnode, &cnode,
   1894 	    CTLFLAG_READWRITE, CTLTYPE_BOOL,
   1895 	    "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
   1896 	    NULL, 0, &adapter->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
   1897 		aprint_error_dev(dev, "could not create sysctl\n");
   1898 }
   1899 
   1900 /*
   1901  * Add statistic sysctls for the VF.
   1902  */
   1903 static void
   1904 ixv_add_stats_sysctls(struct adapter *adapter)
   1905 {
   1906 	device_t dev = adapter->dev;
   1907 	const struct sysctlnode *rnode;
   1908 	struct sysctllog **log = &adapter->sysctllog;
   1909 	struct ix_queue *que = &adapter->queues[0];
   1910 	struct tx_ring *txr = que->txr;
   1911 	struct rx_ring *rxr = que->rxr;
   1912 
   1913 	struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
   1914 	const char *xname = device_xname(dev);
   1915 
   1916 	/* Driver Statistics */
   1917 	evcnt_attach_dynamic(&adapter->handleq, EVCNT_TYPE_MISC,
   1918 	    NULL, xname, "Handled queue in softint");
   1919 	evcnt_attach_dynamic(&adapter->req, EVCNT_TYPE_MISC,
   1920 	    NULL, xname, "Requeued in softint");
   1921 	evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
   1922 	    NULL, xname, "Driver tx dma soft fail EFBIG");
   1923 	evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
   1924 	    NULL, xname, "m_defrag() failed");
   1925 	evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
   1926 	    NULL, xname, "Driver tx dma hard fail EFBIG");
   1927 	evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC,
   1928 	    NULL, xname, "Driver tx dma hard fail EINVAL");
   1929 	evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC,
   1930 	    NULL, xname, "Driver tx dma hard fail other");
   1931 	evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
   1932 	    NULL, xname, "Driver tx dma soft fail EAGAIN");
   1933 	evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
   1934 	    NULL, xname, "Driver tx dma soft fail ENOMEM");
   1935 	evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
   1936 	    NULL, xname, "Watchdog timeouts");
   1937 	evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC,
   1938 	    NULL, xname, "TSO errors");
   1939 	evcnt_attach_dynamic(&adapter->link_irq, EVCNT_TYPE_INTR,
   1940 	    NULL, xname, "Link MSIX IRQ Handled");
   1941 
   1942 	for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
   1943 		snprintf(adapter->queues[i].evnamebuf,
   1944 		    sizeof(adapter->queues[i].evnamebuf), "%s q%d",
   1945 		    xname, i);
   1946 		snprintf(adapter->queues[i].namebuf,
   1947 		    sizeof(adapter->queues[i].namebuf), "q%d", i);
   1948 
   1949 		if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
   1950 			aprint_error_dev(dev, "could not create sysctl root\n");
   1951 			break;
   1952 		}
   1953 
   1954 		if (sysctl_createv(log, 0, &rnode, &rnode,
   1955 		    0, CTLTYPE_NODE,
   1956 		    adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
   1957 		    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
   1958 			break;
   1959 
   1960 #if 0 /* not yet */
   1961 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1962 		    CTLFLAG_READWRITE, CTLTYPE_INT,
   1963 		    "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
   1964 		    ixgbe_sysctl_interrupt_rate_handler, 0,
   1965 		    (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
   1966 			break;
   1967 
   1968 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1969 		    CTLFLAG_READONLY, CTLTYPE_QUAD,
   1970 		    "irqs", SYSCTL_DESCR("irqs on this queue"),
   1971 			NULL, 0, &(adapter->queues[i].irqs),
   1972 		    0, CTL_CREATE, CTL_EOL) != 0)
   1973 			break;
   1974 
   1975 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1976 		    CTLFLAG_READONLY, CTLTYPE_INT,
   1977 		    "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
   1978 		    ixgbe_sysctl_tdh_handler, 0, (void *)txr,
   1979 		    0, CTL_CREATE, CTL_EOL) != 0)
   1980 			break;
   1981 
   1982 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1983 		    CTLFLAG_READONLY, CTLTYPE_INT,
   1984 		    "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
   1985 		    ixgbe_sysctl_tdt_handler, 0, (void *)txr,
   1986 		    0, CTL_CREATE, CTL_EOL) != 0)
   1987 			break;
   1988 #endif
   1989 		evcnt_attach_dynamic(&adapter->queues[i].irqs, EVCNT_TYPE_INTR,
   1990 		    NULL, adapter->queues[i].evnamebuf, "IRQs on queue");
   1991 		evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC,
   1992 		    NULL, adapter->queues[i].evnamebuf, "TSO");
   1993 		evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
   1994 		    NULL, adapter->queues[i].evnamebuf,
   1995 		    "Queue No Descriptor Available");
   1996 		evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
   1997 		    NULL, adapter->queues[i].evnamebuf,
   1998 		    "Queue Packets Transmitted");
   1999 #ifndef IXGBE_LEGACY_TX
   2000 		evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC,
   2001 		    NULL, adapter->queues[i].evnamebuf,
   2002 		    "Packets dropped in pcq");
   2003 #endif
   2004 
   2005 #ifdef LRO
   2006 		struct lro_ctrl *lro = &rxr->lro;
   2007 #endif /* LRO */
   2008 
   2009 #if 0 /* not yet */
   2010 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2011 		    CTLFLAG_READONLY,
   2012 		    CTLTYPE_INT,
   2013 		    "rxd_head", SYSCTL_DESCR("Receive Descriptor Head"),
   2014 		    ixgbe_sysctl_rdh_handler, 0, (void *)rxr, 0,
   2015 		    CTL_CREATE, CTL_EOL) != 0)
   2016 			break;
   2017 
   2018 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2019 		    CTLFLAG_READONLY,
   2020 		    CTLTYPE_INT,
   2021 		    "rxd_tail", SYSCTL_DESCR("Receive Descriptor Tail"),
   2022 		    ixgbe_sysctl_rdt_handler, 0, (void *)rxr, 0,
   2023 		    CTL_CREATE, CTL_EOL) != 0)
   2024 			break;
   2025 #endif
   2026 
   2027 		evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
   2028 		    NULL, adapter->queues[i].evnamebuf, "Queue Packets Received");
   2029 		evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
   2030 		    NULL, adapter->queues[i].evnamebuf, "Queue Bytes Received");
   2031 		evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC,
   2032 		    NULL, adapter->queues[i].evnamebuf, "Copied RX Frames");
   2033 		evcnt_attach_dynamic(&rxr->no_jmbuf, EVCNT_TYPE_MISC,
   2034 		    NULL, adapter->queues[i].evnamebuf, "Rx no jumbo mbuf");
   2035 		evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
   2036 		    NULL, adapter->queues[i].evnamebuf, "Rx discarded");
   2037 #ifdef LRO
   2038 		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
   2039 				CTLFLAG_RD, &lro->lro_queued, 0,
   2040 				"LRO Queued");
   2041 		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
   2042 				CTLFLAG_RD, &lro->lro_flushed, 0,
   2043 				"LRO Flushed");
   2044 #endif /* LRO */
   2045 	}
   2046 
   2047 	/* MAC stats get the own sub node */
   2048 
   2049 	snprintf(stats->namebuf,
   2050 	    sizeof(stats->namebuf), "%s MAC Statistics", xname);
   2051 
   2052 	evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
   2053 	    stats->namebuf, "rx csum offload - IP");
   2054 	evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
   2055 	    stats->namebuf, "rx csum offload - L4");
   2056 	evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
   2057 	    stats->namebuf, "rx csum offload - IP bad");
   2058 	evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
   2059 	    stats->namebuf, "rx csum offload - L4 bad");
   2060 
   2061 	/* Packet Reception Stats */
   2062 	evcnt_attach_dynamic(&stats->vfgprc, EVCNT_TYPE_MISC, NULL,
   2063 	    xname, "Good Packets Received");
   2064 	evcnt_attach_dynamic(&stats->vfgorc, EVCNT_TYPE_MISC, NULL,
   2065 	    xname, "Good Octets Received");
   2066 	evcnt_attach_dynamic(&stats->vfmprc, EVCNT_TYPE_MISC, NULL,
   2067 	    xname, "Multicast Packets Received");
   2068 	evcnt_attach_dynamic(&stats->vfgptc, EVCNT_TYPE_MISC, NULL,
   2069 	    xname, "Good Packets Transmitted");
   2070 	evcnt_attach_dynamic(&stats->vfgotc, EVCNT_TYPE_MISC, NULL,
   2071 	    xname, "Good Octets Transmitted");
   2072 }
   2073 
   2074 static void
   2075 ixv_set_sysctl_value(struct adapter *adapter, const char *name,
   2076 	const char *description, int *limit, int value)
   2077 {
   2078 	device_t dev =  adapter->dev;
   2079 	struct sysctllog **log;
   2080 	const struct sysctlnode *rnode, *cnode;
   2081 
   2082 	log = &adapter->sysctllog;
   2083 	if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
   2084 		aprint_error_dev(dev, "could not create sysctl root\n");
   2085 		return;
   2086 	}
   2087 	if (sysctl_createv(log, 0, &rnode, &cnode,
   2088 	    CTLFLAG_READWRITE, CTLTYPE_INT,
   2089 	    name, SYSCTL_DESCR(description),
   2090 	    NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0)
   2091 		aprint_error_dev(dev, "could not create sysctl\n");
   2092 	*limit = value;
   2093 }
   2094 
   2095 /**********************************************************************
   2096  *
   2097  *  This routine is called only when em_display_debug_stats is enabled.
   2098  *  This routine provides a way to take a look at important statistics
   2099  *  maintained by the driver and hardware.
   2100  *
   2101  **********************************************************************/
   2102 static void
   2103 ixv_print_debug_info(struct adapter *adapter)
   2104 {
   2105         device_t dev = adapter->dev;
   2106         struct ixgbe_hw         *hw = &adapter->hw;
   2107         struct ix_queue         *que = adapter->queues;
   2108         struct rx_ring          *rxr;
   2109         struct tx_ring          *txr;
   2110 #ifdef LRO
   2111         struct lro_ctrl         *lro;
   2112 #endif /* LRO */
   2113 
   2114         device_printf(dev,"Error Byte Count = %u \n",
   2115             IXGBE_READ_REG(hw, IXGBE_ERRBC));
   2116 
   2117         for (int i = 0; i < adapter->num_queues; i++, que++) {
   2118                 txr = que->txr;
   2119                 rxr = que->rxr;
   2120 #ifdef LRO
   2121                 lro = &rxr->lro;
   2122 #endif /* LRO */
   2123                 device_printf(dev,"QUE(%d) IRQs Handled: %lu\n",
   2124                     que->msix, (long)que->irqs.ev_count);
   2125                 device_printf(dev,"RX(%d) Packets Received: %lld\n",
   2126                     rxr->me, (long long)rxr->rx_packets.ev_count);
   2127                 device_printf(dev,"RX(%d) Bytes Received: %lu\n",
   2128                     rxr->me, (long)rxr->rx_bytes.ev_count);
   2129 #ifdef LRO
   2130                 device_printf(dev,"RX(%d) LRO Queued= %lld\n",
   2131                     rxr->me, (long long)lro->lro_queued);
   2132                 device_printf(dev,"RX(%d) LRO Flushed= %lld\n",
   2133                     rxr->me, (long long)lro->lro_flushed);
   2134 #endif /* LRO */
   2135                 device_printf(dev,"TX(%d) Packets Sent: %lu\n",
   2136                     txr->me, (long)txr->total_packets.ev_count);
   2137                 device_printf(dev,"TX(%d) NO Desc Avail: %lu\n",
   2138                     txr->me, (long)txr->no_desc_avail.ev_count);
   2139         }
   2140 
   2141         device_printf(dev,"MBX IRQ Handled: %lu\n",
   2142             (long)adapter->link_irq.ev_count);
   2143         return;
   2144 }
   2145 
   2146 static int
   2147 ixv_sysctl_debug(SYSCTLFN_ARGS)
   2148 {
   2149 	struct sysctlnode node;
   2150 	int error, result;
   2151 	struct adapter *adapter;
   2152 
   2153 	node = *rnode;
   2154 	adapter = (struct adapter *)node.sysctl_data;
   2155 	node.sysctl_data = &result;
   2156 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   2157 
   2158 	if (error)
   2159 		return error;
   2160 
   2161 	if (result == 1)
   2162 		ixv_print_debug_info(adapter);
   2163 
   2164 	return 0;
   2165 }
   2166 
   2167 /*********************************************************************
   2168  *
   2169  *  Shutdown entry point
   2170  *
   2171  **********************************************************************/
   2172 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
   2173 static int
   2174 ixv_shutdown(device_t dev)
   2175 {
   2176 	struct adapter *adapter = device_private(dev);
   2177 	IXGBE_CORE_LOCK(adapter);
   2178 	ixv_stop(adapter);
   2179 	IXGBE_CORE_UNLOCK(adapter);
   2180 
   2181 	return (0);
   2182 }
   2183 #endif
   2184 
   2185 static int
   2186 ixv_ifflags_cb(struct ethercom *ec)
   2187 {
   2188 	struct ifnet *ifp = &ec->ec_if;
   2189 	struct adapter *adapter = ifp->if_softc;
   2190 	int change = ifp->if_flags ^ adapter->if_flags, rc = 0;
   2191 
   2192 	IXGBE_CORE_LOCK(adapter);
   2193 
   2194 	if (change != 0)
   2195 		adapter->if_flags = ifp->if_flags;
   2196 
   2197 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0)
   2198 		rc = ENETRESET;
   2199 
   2200 	IXGBE_CORE_UNLOCK(adapter);
   2201 
   2202 	return rc;
   2203 }
   2204 
   2205 /*********************************************************************
   2206  *  Ioctl entry point
   2207  *
   2208  *  ixv_ioctl is called when the user wants to configure the
   2209  *  interface.
   2210  *
   2211  *  return 0 on success, positive on failure
   2212  **********************************************************************/
   2213 
   2214 static int
   2215 ixv_ioctl(struct ifnet * ifp, u_long command, void *data)
   2216 {
   2217 	struct adapter	*adapter = ifp->if_softc;
   2218 	struct ifcapreq *ifcr = data;
   2219 	struct ifreq	*ifr = (struct ifreq *) data;
   2220 	int             error = 0;
   2221 	int l4csum_en;
   2222 	const int l4csum = IFCAP_CSUM_TCPv4_Rx|IFCAP_CSUM_UDPv4_Rx|
   2223 	     IFCAP_CSUM_TCPv6_Rx|IFCAP_CSUM_UDPv6_Rx;
   2224 
   2225 	switch (command) {
   2226 	case SIOCSIFFLAGS:
   2227 		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
   2228 		break;
   2229 	case SIOCADDMULTI:
   2230 	case SIOCDELMULTI:
   2231 		IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
   2232 		break;
   2233 	case SIOCSIFMEDIA:
   2234 	case SIOCGIFMEDIA:
   2235 		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
   2236 		break;
   2237 	case SIOCSIFCAP:
   2238 		IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
   2239 		break;
   2240 	case SIOCSIFMTU:
   2241 		IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
   2242 		break;
   2243 	default:
   2244 		IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
   2245 		break;
   2246 	}
   2247 
   2248 	switch (command) {
   2249 	case SIOCSIFMEDIA:
   2250 	case SIOCGIFMEDIA:
   2251 		return ifmedia_ioctl(ifp, ifr, &adapter->media, command);
   2252 	case SIOCSIFCAP:
   2253 		/* Layer-4 Rx checksum offload has to be turned on and
   2254 		 * off as a unit.
   2255 		 */
   2256 		l4csum_en = ifcr->ifcr_capenable & l4csum;
   2257 		if (l4csum_en != l4csum && l4csum_en != 0)
   2258 			return EINVAL;
   2259 		/*FALLTHROUGH*/
   2260 	case SIOCADDMULTI:
   2261 	case SIOCDELMULTI:
   2262 	case SIOCSIFFLAGS:
   2263 	case SIOCSIFMTU:
   2264 	default:
   2265 		if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
   2266 			return error;
   2267 		if ((ifp->if_flags & IFF_RUNNING) == 0)
   2268 			;
   2269 		else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
   2270 			IXGBE_CORE_LOCK(adapter);
   2271 			ixv_init_locked(adapter);
   2272 			IXGBE_CORE_UNLOCK(adapter);
   2273 		} else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
   2274 			/*
   2275 			 * Multicast list has changed; set the hardware filter
   2276 			 * accordingly.
   2277 			 */
   2278 			IXGBE_CORE_LOCK(adapter);
   2279 			ixv_disable_intr(adapter);
   2280 			ixv_set_multi(adapter);
   2281 			ixv_enable_intr(adapter);
   2282 			IXGBE_CORE_UNLOCK(adapter);
   2283 		}
   2284 		return 0;
   2285 	}
   2286 }
   2287 
   2288 static int
   2289 ixv_init(struct ifnet *ifp)
   2290 {
   2291 	struct adapter *adapter = ifp->if_softc;
   2292 
   2293 	IXGBE_CORE_LOCK(adapter);
   2294 	ixv_init_locked(adapter);
   2295 	IXGBE_CORE_UNLOCK(adapter);
   2296 
   2297 	return 0;
   2298 }
   2299 
   2300 
   2301 static void
   2302 ixv_handle_que(void *context)
   2303 {
   2304 	struct ix_queue *que = context;
   2305 	struct adapter  *adapter = que->adapter;
   2306 	struct tx_ring	*txr = que->txr;
   2307 	struct ifnet    *ifp = adapter->ifp;
   2308 	bool		more;
   2309 
   2310 	adapter->handleq.ev_count++;
   2311 
   2312 	if (ifp->if_flags & IFF_RUNNING) {
   2313 		more = ixgbe_rxeof(que);
   2314 		IXGBE_TX_LOCK(txr);
   2315 		ixgbe_txeof(txr);
   2316 #ifndef IXGBE_LEGACY_TX
   2317 		if (pcq_peek(txr->txr_interq) != NULL)
   2318 			ixgbe_mq_start_locked(ifp, txr);
   2319 #endif
   2320 		/* Only for queue 0 */
   2321 		if ((&adapter->queues[0] == que)
   2322 		    && (!IFQ_IS_EMPTY(&ifp->if_snd)))
   2323 			ixgbe_start_locked(txr, ifp);
   2324 		IXGBE_TX_UNLOCK(txr);
   2325 		if (more) {
   2326 			adapter->req.ev_count++;
   2327 			softint_schedule(que->que_si);
   2328 			return;
   2329 		}
   2330 	}
   2331 
   2332 	/* Reenable this interrupt */
   2333 	ixv_enable_queue(adapter, que->msix);
   2334 
   2335 	return;
   2336 }
   2337 
   2338 /*********************************************************************
   2339  *
   2340  *  Setup MSIX Interrupt resources and handlers
   2341  *
   2342  **********************************************************************/
   2343 static int
   2344 ixv_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
   2345 {
   2346 	device_t	dev = adapter->dev;
   2347 	struct ix_queue *que = adapter->queues;
   2348 	struct		tx_ring *txr = adapter->tx_rings;
   2349 	int 		error, rid, vector = 0;
   2350 	pci_chipset_tag_t pc;
   2351 	pcitag_t	tag;
   2352 	char		intrbuf[PCI_INTRSTR_LEN];
   2353 	char		intr_xname[32];
   2354 	const char	*intrstr = NULL;
   2355 	kcpuset_t	*affinity;
   2356 	int		cpu_id = 0;
   2357 
   2358 	pc = adapter->osdep.pc;
   2359 	tag = adapter->osdep.tag;
   2360 
   2361 	adapter->osdep.nintrs = adapter->num_queues + 1;
   2362 	if (pci_msix_alloc_exact(pa, &adapter->osdep.intrs,
   2363 	    adapter->osdep.nintrs) != 0) {
   2364 		aprint_error_dev(dev,
   2365 		    "failed to allocate MSI-X interrupt\n");
   2366 		return (ENXIO);
   2367 	}
   2368 
   2369 	kcpuset_create(&affinity, false);
   2370 	for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
   2371 		snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d",
   2372 		    device_xname(dev), i);
   2373 		intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
   2374 		    sizeof(intrbuf));
   2375 #ifdef IXGBE_MPSAFE
   2376 		pci_intr_setattr(pc, &adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
   2377 		    true);
   2378 #endif
   2379 		/* Set the handler function */
   2380 		que->res = adapter->osdep.ihs[i] = pci_intr_establish_xname(pc,
   2381 		    adapter->osdep.intrs[i], IPL_NET, ixv_msix_que, que,
   2382 		    intr_xname);
   2383 		if (que->res == NULL) {
   2384 			pci_intr_release(pc, adapter->osdep.intrs,
   2385 			    adapter->osdep.nintrs);
   2386 			aprint_error_dev(dev,
   2387 			    "Failed to register QUE handler\n");
   2388 			kcpuset_destroy(affinity);
   2389 			return (ENXIO);
   2390 		}
   2391 		que->msix = vector;
   2392         	adapter->active_queues |= (u64)(1 << que->msix);
   2393 
   2394 		cpu_id = i;
   2395 		/* Round-robin affinity */
   2396 		kcpuset_zero(affinity);
   2397 		kcpuset_set(affinity, cpu_id % ncpu);
   2398 		error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
   2399 		    NULL);
   2400 		aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
   2401 		    intrstr);
   2402 		if (error == 0)
   2403 			aprint_normal(", bound queue %d to cpu %d\n",
   2404 			    i, cpu_id % ncpu);
   2405 		else
   2406 			aprint_normal("\n");
   2407 
   2408 #ifndef IXGBE_LEGACY_TX
   2409 		txr->txr_si
   2410 		    = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   2411 			ixgbe_deferred_mq_start, txr);
   2412 #endif
   2413 		que->que_si
   2414 		    = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   2415 			ixv_handle_que, que);
   2416 		if (que->que_si == NULL) {
   2417 			aprint_error_dev(dev,
   2418 			    "could not establish software interrupt\n");
   2419 		}
   2420 	}
   2421 
   2422 	/* and Mailbox */
   2423 	cpu_id++;
   2424 	snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
   2425 	intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
   2426 	    sizeof(intrbuf));
   2427 #ifdef IXGBE_MPSAFE
   2428 	pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE,
   2429 	    true);
   2430 #endif
   2431 	/* Set the mbx handler function */
   2432 	adapter->osdep.ihs[vector] = pci_intr_establish_xname(pc,
   2433 	    adapter->osdep.intrs[vector], IPL_NET, ixv_msix_mbx, adapter,
   2434 	    intr_xname);
   2435 	if (adapter->osdep.ihs[vector] == NULL) {
   2436 		adapter->res = NULL;
   2437 		aprint_error_dev(dev, "Failed to register LINK handler\n");
   2438 		kcpuset_destroy(affinity);
   2439 		return (ENXIO);
   2440 	}
   2441 	/* Round-robin affinity */
   2442 	kcpuset_zero(affinity);
   2443 	kcpuset_set(affinity, cpu_id % ncpu);
   2444 	error = interrupt_distribute(adapter->osdep.ihs[vector], affinity,NULL);
   2445 
   2446 	aprint_normal_dev(dev,
   2447 	    "for link, interrupting at %s", intrstr);
   2448 	if (error == 0)
   2449 		aprint_normal(", affinity to cpu %d\n", cpu_id % ncpu);
   2450 	else
   2451 		aprint_normal("\n");
   2452 
   2453 	adapter->vector = vector;
   2454 	/* Tasklets for Mailbox */
   2455 	adapter->link_si = softint_establish(SOFTINT_NET |IXGBE_SOFTINFT_FLAGS,
   2456 	    ixv_handle_mbx, adapter);
   2457 	/*
   2458 	** Due to a broken design QEMU will fail to properly
   2459 	** enable the guest for MSIX unless the vectors in
   2460 	** the table are all set up, so we must rewrite the
   2461 	** ENABLE in the MSIX control register again at this
   2462 	** point to cause it to successfully initialize us.
   2463 	*/
   2464 	if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
   2465 		int msix_ctrl;
   2466 		pci_get_capability(pc, tag, PCI_CAP_MSIX, &rid, NULL);
   2467 		rid += PCI_MSIX_CTL;
   2468 		msix_ctrl = pci_conf_read(pc, tag, rid);
   2469 		msix_ctrl |= PCI_MSIX_CTL_ENABLE;
   2470 		pci_conf_write(pc, tag, rid, msix_ctrl);
   2471 	}
   2472 
   2473 	kcpuset_destroy(affinity);
   2474 	return (0);
   2475 }
   2476 
   2477 /*
   2478  * Setup MSIX resources, note that the VF
   2479  * device MUST use MSIX, there is no fallback.
   2480  */
   2481 static int
   2482 ixv_setup_msix(struct adapter *adapter)
   2483 {
   2484 	device_t dev = adapter->dev;
   2485 	int want, queues, msgs;
   2486 
   2487 	/* Must have at least 2 MSIX vectors */
   2488 	msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag);
   2489 	if (msgs < 2) {
   2490 		aprint_error_dev(dev,"MSIX config error\n");
   2491 		return (ENXIO);
   2492 	}
   2493 	msgs = MIN(msgs, IXG_MAX_NINTR);
   2494 
   2495 	/* Figure out a reasonable auto config value */
   2496 	queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu;
   2497 
   2498 	if (ixv_num_queues != 0)
   2499 		queues = ixv_num_queues;
   2500 	else if ((ixv_num_queues == 0) && (queues > IXGBE_VF_MAX_TX_QUEUES))
   2501 		queues = IXGBE_VF_MAX_TX_QUEUES;
   2502 
   2503 	/*
   2504 	** Want vectors for the queues,
   2505 	** plus an additional for mailbox.
   2506 	*/
   2507 	want = queues + 1;
   2508 	if (msgs >= want)
   2509 		msgs = want;
   2510 	else {
   2511                	aprint_error_dev(dev,
   2512 		    "MSIX Configuration Problem, "
   2513 		    "%d vectors but %d queues wanted!\n",
   2514 		    msgs, want);
   2515 		return -1;
   2516 	}
   2517 
   2518 	adapter->msix_mem = (void *)1; /* XXX */
   2519 	aprint_normal_dev(dev,
   2520 	    "Using MSIX interrupts with %d vectors\n", msgs);
   2521 	adapter->num_queues = queues;
   2522 	return (msgs);
   2523 }
   2524 
   2525 /*
   2526 ** Tasklet handler for MSIX MBX interrupts
   2527 **  - do outside interrupt since it might sleep
   2528 */
   2529 static void
   2530 ixv_handle_mbx(void *context)
   2531 {
   2532 	struct adapter  *adapter = context;
   2533 
   2534 	ixgbe_check_link(&adapter->hw,
   2535 	    &adapter->link_speed, &adapter->link_up, 0);
   2536 	ixv_update_link_status(adapter);
   2537 }
   2538 
   2539 
   2540 static void
   2541 ixv_config_link(struct adapter *adapter)
   2542 {
   2543 	struct ixgbe_hw *hw = &adapter->hw;
   2544 
   2545 	if (hw->mac.ops.check_link)
   2546 		hw->mac.ops.check_link(hw, &adapter->link_speed,
   2547 		    &adapter->link_up, FALSE);
   2548 }
   2549 
   2550 
   2551