Home | History | Annotate | Line # | Download | only in ixgbe
ixv.c revision 1.100
      1 /*$NetBSD: ixv.c,v 1.100 2018/05/23 10:11:07 msaitoh Exp $*/
      2 
      3 /******************************************************************************
      4 
      5   Copyright (c) 2001-2017, Intel Corporation
      6   All rights reserved.
      7 
      8   Redistribution and use in source and binary forms, with or without
      9   modification, are permitted provided that the following conditions are met:
     10 
     11    1. Redistributions of source code must retain the above copyright notice,
     12       this list of conditions and the following disclaimer.
     13 
     14    2. Redistributions in binary form must reproduce the above copyright
     15       notice, this list of conditions and the following disclaimer in the
     16       documentation and/or other materials provided with the distribution.
     17 
     18    3. Neither the name of the Intel Corporation nor the names of its
     19       contributors may be used to endorse or promote products derived from
     20       this software without specific prior written permission.
     21 
     22   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     23   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     24   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     25   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     26   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     27   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     28   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     29   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     30   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     31   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     32   POSSIBILITY OF SUCH DAMAGE.
     33 
     34 ******************************************************************************/
     35 /*$FreeBSD: head/sys/dev/ixgbe/if_ixv.c 331224 2018-03-19 20:55:05Z erj $*/
     36 
     37 #ifdef _KERNEL_OPT
     38 #include "opt_inet.h"
     39 #include "opt_inet6.h"
     40 #include "opt_net_mpsafe.h"
     41 #endif
     42 
     43 #include "ixgbe.h"
     44 #include "vlan.h"
     45 
     46 /************************************************************************
     47  * Driver version
     48  ************************************************************************/
     49 char ixv_driver_version[] = "2.0.1-k";
     50 
     51 /************************************************************************
     52  * PCI Device ID Table
     53  *
     54  *   Used by probe to select devices to load on
     55  *   Last field stores an index into ixv_strings
     56  *   Last entry must be all 0s
     57  *
     58  *   { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
     59  ************************************************************************/
     60 static ixgbe_vendor_info_t ixv_vendor_info_array[] =
     61 {
     62 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
     63 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0},
     64 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, 0, 0, 0},
     65 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, 0, 0, 0},
     66 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF, 0, 0, 0},
     67 	/* required last entry */
     68 	{0, 0, 0, 0, 0}
     69 };
     70 
     71 /************************************************************************
     72  * Table of branding strings
     73  ************************************************************************/
     74 static const char *ixv_strings[] = {
     75 	"Intel(R) PRO/10GbE Virtual Function Network Driver"
     76 };
     77 
     78 /*********************************************************************
     79  *  Function prototypes
     80  *********************************************************************/
     81 static int      ixv_probe(device_t, cfdata_t, void *);
     82 static void	ixv_attach(device_t, device_t, void *);
     83 static int      ixv_detach(device_t, int);
     84 #if 0
     85 static int      ixv_shutdown(device_t);
     86 #endif
     87 static int	ixv_ifflags_cb(struct ethercom *);
     88 static int      ixv_ioctl(struct ifnet *, u_long, void *);
     89 static int	ixv_init(struct ifnet *);
     90 static void	ixv_init_locked(struct adapter *);
     91 static void	ixv_ifstop(struct ifnet *, int);
     92 static void     ixv_stop(void *);
     93 static void     ixv_init_device_features(struct adapter *);
     94 static void     ixv_media_status(struct ifnet *, struct ifmediareq *);
     95 static int      ixv_media_change(struct ifnet *);
     96 static int      ixv_allocate_pci_resources(struct adapter *,
     97 		    const struct pci_attach_args *);
     98 static int      ixv_allocate_msix(struct adapter *,
     99 		    const struct pci_attach_args *);
    100 static int      ixv_configure_interrupts(struct adapter *);
    101 static void	ixv_free_pci_resources(struct adapter *);
    102 static void     ixv_local_timer(void *);
    103 static void     ixv_local_timer_locked(void *);
    104 static int      ixv_setup_interface(device_t, struct adapter *);
    105 static int      ixv_negotiate_api(struct adapter *);
    106 
    107 static void     ixv_initialize_transmit_units(struct adapter *);
    108 static void     ixv_initialize_receive_units(struct adapter *);
    109 static void     ixv_initialize_rss_mapping(struct adapter *);
    110 static void     ixv_check_link(struct adapter *);
    111 
    112 static void     ixv_enable_intr(struct adapter *);
    113 static void     ixv_disable_intr(struct adapter *);
    114 static void     ixv_set_multi(struct adapter *);
    115 static void     ixv_update_link_status(struct adapter *);
    116 static int	ixv_sysctl_debug(SYSCTLFN_PROTO);
    117 static void	ixv_set_ivar(struct adapter *, u8, u8, s8);
    118 static void	ixv_configure_ivars(struct adapter *);
    119 static u8 *	ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
    120 static void	ixv_eitr_write(struct adapter *, uint32_t, uint32_t);
    121 
    122 static void	ixv_setup_vlan_support(struct adapter *);
    123 #if 0
    124 static void	ixv_register_vlan(void *, struct ifnet *, u16);
    125 static void	ixv_unregister_vlan(void *, struct ifnet *, u16);
    126 #endif
    127 
    128 static void	ixv_add_device_sysctls(struct adapter *);
    129 static void	ixv_save_stats(struct adapter *);
    130 static void	ixv_init_stats(struct adapter *);
    131 static void	ixv_update_stats(struct adapter *);
    132 static void	ixv_add_stats_sysctls(struct adapter *);
    133 
    134 
    135 /* Sysctl handlers */
    136 static void	ixv_set_sysctl_value(struct adapter *, const char *,
    137 		    const char *, int *, int);
    138 static int      ixv_sysctl_interrupt_rate_handler(SYSCTLFN_PROTO);
    139 static int      ixv_sysctl_next_to_check_handler(SYSCTLFN_PROTO);
    140 static int      ixv_sysctl_rdh_handler(SYSCTLFN_PROTO);
    141 static int      ixv_sysctl_rdt_handler(SYSCTLFN_PROTO);
    142 static int      ixv_sysctl_tdt_handler(SYSCTLFN_PROTO);
    143 static int      ixv_sysctl_tdh_handler(SYSCTLFN_PROTO);
    144 
    145 /* The MSI-X Interrupt handlers */
    146 static int	ixv_msix_que(void *);
    147 static int	ixv_msix_mbx(void *);
    148 
    149 /* Deferred interrupt tasklets */
    150 static void	ixv_handle_que(void *);
    151 static void     ixv_handle_link(void *);
    152 
    153 /* Workqueue handler for deferred work */
    154 static void	ixv_handle_que_work(struct work *, void *);
    155 
    156 const struct sysctlnode *ixv_sysctl_instance(struct adapter *);
    157 static ixgbe_vendor_info_t *ixv_lookup(const struct pci_attach_args *);
    158 
    159 /************************************************************************
    160  * FreeBSD Device Interface Entry Points
    161  ************************************************************************/
    162 CFATTACH_DECL3_NEW(ixv, sizeof(struct adapter),
    163     ixv_probe, ixv_attach, ixv_detach, NULL, NULL, NULL,
    164     DVF_DETACH_SHUTDOWN);
    165 
    166 #if 0
    167 static driver_t ixv_driver = {
    168 	"ixv", ixv_methods, sizeof(struct adapter),
    169 };
    170 
    171 devclass_t ixv_devclass;
    172 DRIVER_MODULE(ixv, pci, ixv_driver, ixv_devclass, 0, 0);
    173 MODULE_DEPEND(ixv, pci, 1, 1, 1);
    174 MODULE_DEPEND(ixv, ether, 1, 1, 1);
    175 #endif
    176 
    177 /*
    178  * TUNEABLE PARAMETERS:
    179  */
    180 
    181 /* Number of Queues - do not exceed MSI-X vectors - 1 */
    182 static int ixv_num_queues = 0;
    183 #define	TUNABLE_INT(__x, __y)
    184 TUNABLE_INT("hw.ixv.num_queues", &ixv_num_queues);
    185 
    186 /*
    187  * AIM: Adaptive Interrupt Moderation
    188  * which means that the interrupt rate
    189  * is varied over time based on the
    190  * traffic for that interrupt vector
    191  */
    192 static bool ixv_enable_aim = false;
    193 TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
    194 
    195 static int ixv_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
    196 TUNABLE_INT("hw.ixv.max_interrupt_rate", &ixv_max_interrupt_rate);
    197 
    198 /* How many packets rxeof tries to clean at a time */
    199 static int ixv_rx_process_limit = 256;
    200 TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit);
    201 
    202 /* How many packets txeof tries to clean at a time */
    203 static int ixv_tx_process_limit = 256;
    204 TUNABLE_INT("hw.ixv.tx_process_limit", &ixv_tx_process_limit);
    205 
    206 /* Which pakcet processing uses workqueue or softint */
    207 static bool ixv_txrx_workqueue = false;
    208 
    209 /*
    210  * Number of TX descriptors per ring,
    211  * setting higher than RX as this seems
    212  * the better performing choice.
    213  */
    214 static int ixv_txd = PERFORM_TXD;
    215 TUNABLE_INT("hw.ixv.txd", &ixv_txd);
    216 
    217 /* Number of RX descriptors per ring */
    218 static int ixv_rxd = PERFORM_RXD;
    219 TUNABLE_INT("hw.ixv.rxd", &ixv_rxd);
    220 
    221 /* Legacy Transmit (single queue) */
    222 static int ixv_enable_legacy_tx = 0;
    223 TUNABLE_INT("hw.ixv.enable_legacy_tx", &ixv_enable_legacy_tx);
    224 
    225 #ifdef NET_MPSAFE
    226 #define IXGBE_MPSAFE		1
    227 #define IXGBE_CALLOUT_FLAGS	CALLOUT_MPSAFE
    228 #define IXGBE_SOFTINFT_FLAGS	SOFTINT_MPSAFE
    229 #define IXGBE_WORKQUEUE_FLAGS	WQ_PERCPU | WQ_MPSAFE
    230 #else
    231 #define IXGBE_CALLOUT_FLAGS	0
    232 #define IXGBE_SOFTINFT_FLAGS	0
    233 #define IXGBE_WORKQUEUE_FLAGS	WQ_PERCPU
    234 #endif
    235 #define IXGBE_WORKQUEUE_PRI PRI_SOFTNET
    236 
    237 #if 0
    238 static int (*ixv_start_locked)(struct ifnet *, struct tx_ring *);
    239 static int (*ixv_ring_empty)(struct ifnet *, struct buf_ring *);
    240 #endif
    241 
    242 /************************************************************************
    243  * ixv_probe - Device identification routine
    244  *
    245  *   Determines if the driver should be loaded on
    246  *   adapter based on its PCI vendor/device ID.
    247  *
    248  *   return BUS_PROBE_DEFAULT on success, positive on failure
    249  ************************************************************************/
    250 static int
    251 ixv_probe(device_t dev, cfdata_t cf, void *aux)
    252 {
    253 #ifdef __HAVE_PCI_MSI_MSIX
    254 	const struct pci_attach_args *pa = aux;
    255 
    256 	return (ixv_lookup(pa) != NULL) ? 1 : 0;
    257 #else
    258 	return 0;
    259 #endif
    260 } /* ixv_probe */
    261 
    262 static ixgbe_vendor_info_t *
    263 ixv_lookup(const struct pci_attach_args *pa)
    264 {
    265 	ixgbe_vendor_info_t *ent;
    266 	pcireg_t subid;
    267 
    268 	INIT_DEBUGOUT("ixv_lookup: begin");
    269 
    270 	if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
    271 		return NULL;
    272 
    273 	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
    274 
    275 	for (ent = ixv_vendor_info_array; ent->vendor_id != 0; ent++) {
    276 		if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
    277 		    (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
    278 		    ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
    279 		     (ent->subvendor_id == 0)) &&
    280 		    ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
    281 		     (ent->subdevice_id == 0))) {
    282 			return ent;
    283 		}
    284 	}
    285 
    286 	return NULL;
    287 }
    288 
    289 /************************************************************************
    290  * ixv_attach - Device initialization routine
    291  *
    292  *   Called when the driver is being loaded.
    293  *   Identifies the type of hardware, allocates all resources
    294  *   and initializes the hardware.
    295  *
    296  *   return 0 on success, positive on failure
    297  ************************************************************************/
    298 static void
    299 ixv_attach(device_t parent, device_t dev, void *aux)
    300 {
    301 	struct adapter *adapter;
    302 	struct ixgbe_hw *hw;
    303 	int             error = 0;
    304 	pcireg_t	id, subid;
    305 	ixgbe_vendor_info_t *ent;
    306 	const struct pci_attach_args *pa = aux;
    307 	const char *apivstr;
    308 	const char *str;
    309 	char buf[256];
    310 
    311 	INIT_DEBUGOUT("ixv_attach: begin");
    312 
    313 	/*
    314 	 * Make sure BUSMASTER is set, on a VM under
    315 	 * KVM it may not be and will break things.
    316 	 */
    317 	ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag);
    318 
    319 	/* Allocate, clear, and link in our adapter structure */
    320 	adapter = device_private(dev);
    321 	adapter->dev = dev;
    322 	adapter->hw.back = adapter;
    323 	hw = &adapter->hw;
    324 
    325 	adapter->init_locked = ixv_init_locked;
    326 	adapter->stop_locked = ixv_stop;
    327 
    328 	adapter->osdep.pc = pa->pa_pc;
    329 	adapter->osdep.tag = pa->pa_tag;
    330 	if (pci_dma64_available(pa))
    331 		adapter->osdep.dmat = pa->pa_dmat64;
    332 	else
    333 		adapter->osdep.dmat = pa->pa_dmat;
    334 	adapter->osdep.attached = false;
    335 
    336 	ent = ixv_lookup(pa);
    337 
    338 	KASSERT(ent != NULL);
    339 
    340 	aprint_normal(": %s, Version - %s\n",
    341 	    ixv_strings[ent->index], ixv_driver_version);
    342 
    343 	/* Core Lock Init*/
    344 	IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
    345 
    346 	/* Do base PCI setup - map BAR0 */
    347 	if (ixv_allocate_pci_resources(adapter, pa)) {
    348 		aprint_error_dev(dev, "ixv_allocate_pci_resources() failed!\n");
    349 		error = ENXIO;
    350 		goto err_out;
    351 	}
    352 
    353 	/* SYSCTL APIs */
    354 	ixv_add_device_sysctls(adapter);
    355 
    356 	/* Set up the timer callout */
    357 	callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS);
    358 
    359 	/* Save off the information about this board */
    360 	id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG);
    361 	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
    362 	hw->vendor_id = PCI_VENDOR(id);
    363 	hw->device_id = PCI_PRODUCT(id);
    364 	hw->revision_id =
    365 	    PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG));
    366 	hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
    367 	hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
    368 
    369 	/* A subset of set_mac_type */
    370 	switch (hw->device_id) {
    371 	case IXGBE_DEV_ID_82599_VF:
    372 		hw->mac.type = ixgbe_mac_82599_vf;
    373 		str = "82599 VF";
    374 		break;
    375 	case IXGBE_DEV_ID_X540_VF:
    376 		hw->mac.type = ixgbe_mac_X540_vf;
    377 		str = "X540 VF";
    378 		break;
    379 	case IXGBE_DEV_ID_X550_VF:
    380 		hw->mac.type = ixgbe_mac_X550_vf;
    381 		str = "X550 VF";
    382 		break;
    383 	case IXGBE_DEV_ID_X550EM_X_VF:
    384 		hw->mac.type = ixgbe_mac_X550EM_x_vf;
    385 		str = "X550EM X VF";
    386 		break;
    387 	case IXGBE_DEV_ID_X550EM_A_VF:
    388 		hw->mac.type = ixgbe_mac_X550EM_a_vf;
    389 		str = "X550EM A VF";
    390 		break;
    391 	default:
    392 		/* Shouldn't get here since probe succeeded */
    393 		aprint_error_dev(dev, "Unknown device ID!\n");
    394 		error = ENXIO;
    395 		goto err_out;
    396 		break;
    397 	}
    398 	aprint_normal_dev(dev, "device %s\n", str);
    399 
    400 	ixv_init_device_features(adapter);
    401 
    402 	/* Initialize the shared code */
    403 	error = ixgbe_init_ops_vf(hw);
    404 	if (error) {
    405 		aprint_error_dev(dev, "ixgbe_init_ops_vf() failed!\n");
    406 		error = EIO;
    407 		goto err_out;
    408 	}
    409 
    410 	/* Setup the mailbox */
    411 	ixgbe_init_mbx_params_vf(hw);
    412 
    413 	/* Set the right number of segments */
    414 	adapter->num_segs = IXGBE_82599_SCATTER;
    415 
    416 	/* Reset mbox api to 1.0 */
    417 	error = hw->mac.ops.reset_hw(hw);
    418 	if (error == IXGBE_ERR_RESET_FAILED)
    419 		aprint_error_dev(dev, "...reset_hw() failure: Reset Failed!\n");
    420 	else if (error)
    421 		aprint_error_dev(dev, "...reset_hw() failed with error %d\n",
    422 		    error);
    423 	if (error) {
    424 		error = EIO;
    425 		goto err_out;
    426 	}
    427 
    428 	error = hw->mac.ops.init_hw(hw);
    429 	if (error) {
    430 		aprint_error_dev(dev, "...init_hw() failed!\n");
    431 		error = EIO;
    432 		goto err_out;
    433 	}
    434 
    435 	/* Negotiate mailbox API version */
    436 	error = ixv_negotiate_api(adapter);
    437 	if (error)
    438 		aprint_normal_dev(dev,
    439 		    "MBX API negotiation failed during attach!\n");
    440 	switch (hw->api_version) {
    441 	case ixgbe_mbox_api_10:
    442 		apivstr = "1.0";
    443 		break;
    444 	case ixgbe_mbox_api_20:
    445 		apivstr = "2.0";
    446 		break;
    447 	case ixgbe_mbox_api_11:
    448 		apivstr = "1.1";
    449 		break;
    450 	case ixgbe_mbox_api_12:
    451 		apivstr = "1.2";
    452 		break;
    453 	case ixgbe_mbox_api_13:
    454 		apivstr = "1.3";
    455 		break;
    456 	default:
    457 		apivstr = "unknown";
    458 		break;
    459 	}
    460 	aprint_normal_dev(dev, "Mailbox API %s\n", apivstr);
    461 
    462 	/* If no mac address was assigned, make a random one */
    463 	if (!ixv_check_ether_addr(hw->mac.addr)) {
    464 		u8 addr[ETHER_ADDR_LEN];
    465 		uint64_t rndval = cprng_strong64();
    466 
    467 		memcpy(addr, &rndval, sizeof(addr));
    468 		addr[0] &= 0xFE;
    469 		addr[0] |= 0x02;
    470 		bcopy(addr, hw->mac.addr, sizeof(addr));
    471 	}
    472 
    473 	/* Register for VLAN events */
    474 #if 0 /* XXX delete after write? */
    475 	adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
    476 	    ixv_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
    477 	adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
    478 	    ixv_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
    479 #endif
    480 
    481 	/* Sysctls for limiting the amount of work done in the taskqueues */
    482 	ixv_set_sysctl_value(adapter, "rx_processing_limit",
    483 	    "max number of rx packets to process",
    484 	    &adapter->rx_process_limit, ixv_rx_process_limit);
    485 
    486 	ixv_set_sysctl_value(adapter, "tx_processing_limit",
    487 	    "max number of tx packets to process",
    488 	    &adapter->tx_process_limit, ixv_tx_process_limit);
    489 
    490 	/* Do descriptor calc and sanity checks */
    491 	if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
    492 	    ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) {
    493 		aprint_error_dev(dev, "TXD config issue, using default!\n");
    494 		adapter->num_tx_desc = DEFAULT_TXD;
    495 	} else
    496 		adapter->num_tx_desc = ixv_txd;
    497 
    498 	if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
    499 	    ixv_rxd < MIN_RXD || ixv_rxd > MAX_RXD) {
    500 		aprint_error_dev(dev, "RXD config issue, using default!\n");
    501 		adapter->num_rx_desc = DEFAULT_RXD;
    502 	} else
    503 		adapter->num_rx_desc = ixv_rxd;
    504 
    505 	/* Setup MSI-X */
    506 	error = ixv_configure_interrupts(adapter);
    507 	if (error)
    508 		goto err_out;
    509 
    510 	/* Allocate our TX/RX Queues */
    511 	if (ixgbe_allocate_queues(adapter)) {
    512 		aprint_error_dev(dev, "ixgbe_allocate_queues() failed!\n");
    513 		error = ENOMEM;
    514 		goto err_out;
    515 	}
    516 
    517 	/* hw.ix defaults init */
    518 	adapter->enable_aim = ixv_enable_aim;
    519 
    520 	adapter->txrx_use_workqueue = ixv_txrx_workqueue;
    521 
    522 	error = ixv_allocate_msix(adapter, pa);
    523 	if (error) {
    524 		device_printf(dev, "ixv_allocate_msix() failed!\n");
    525 		goto err_late;
    526 	}
    527 
    528 	/* Setup OS specific network interface */
    529 	error = ixv_setup_interface(dev, adapter);
    530 	if (error != 0) {
    531 		aprint_error_dev(dev, "ixv_setup_interface() failed!\n");
    532 		goto err_late;
    533 	}
    534 
    535 	/* Do the stats setup */
    536 	ixv_save_stats(adapter);
    537 	ixv_init_stats(adapter);
    538 	ixv_add_stats_sysctls(adapter);
    539 
    540 	if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
    541 		ixgbe_netmap_attach(adapter);
    542 
    543 	snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_cap);
    544 	aprint_verbose_dev(dev, "feature cap %s\n", buf);
    545 	snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_en);
    546 	aprint_verbose_dev(dev, "feature ena %s\n", buf);
    547 
    548 	INIT_DEBUGOUT("ixv_attach: end");
    549 	adapter->osdep.attached = true;
    550 
    551 	return;
    552 
    553 err_late:
    554 	ixgbe_free_transmit_structures(adapter);
    555 	ixgbe_free_receive_structures(adapter);
    556 	free(adapter->queues, M_DEVBUF);
    557 err_out:
    558 	ixv_free_pci_resources(adapter);
    559 	IXGBE_CORE_LOCK_DESTROY(adapter);
    560 
    561 	return;
    562 } /* ixv_attach */
    563 
    564 /************************************************************************
    565  * ixv_detach - Device removal routine
    566  *
    567  *   Called when the driver is being removed.
    568  *   Stops the adapter and deallocates all the resources
    569  *   that were allocated for driver operation.
    570  *
    571  *   return 0 on success, positive on failure
    572  ************************************************************************/
    573 static int
    574 ixv_detach(device_t dev, int flags)
    575 {
    576 	struct adapter  *adapter = device_private(dev);
    577 	struct ixgbe_hw *hw = &adapter->hw;
    578 	struct ix_queue *que = adapter->queues;
    579 	struct tx_ring *txr = adapter->tx_rings;
    580 	struct rx_ring *rxr = adapter->rx_rings;
    581 	struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
    582 
    583 	INIT_DEBUGOUT("ixv_detach: begin");
    584 	if (adapter->osdep.attached == false)
    585 		return 0;
    586 
    587 	/* Stop the interface. Callouts are stopped in it. */
    588 	ixv_ifstop(adapter->ifp, 1);
    589 
    590 #if NVLAN > 0
    591 	/* Make sure VLANs are not using driver */
    592 	if (!VLAN_ATTACHED(&adapter->osdep.ec))
    593 		;	/* nothing to do: no VLANs */
    594 	else if ((flags & (DETACH_SHUTDOWN|DETACH_FORCE)) != 0)
    595 		vlan_ifdetach(adapter->ifp);
    596 	else {
    597 		aprint_error_dev(dev, "VLANs in use, detach first\n");
    598 		return EBUSY;
    599 	}
    600 #endif
    601 
    602 	IXGBE_CORE_LOCK(adapter);
    603 	ixv_stop(adapter);
    604 	IXGBE_CORE_UNLOCK(adapter);
    605 
    606 	for (int i = 0; i < adapter->num_queues; i++, que++, txr++) {
    607 		if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
    608 			softint_disestablish(txr->txr_si);
    609 		softint_disestablish(que->que_si);
    610 	}
    611 	if (adapter->txr_wq != NULL)
    612 		workqueue_destroy(adapter->txr_wq);
    613 	if (adapter->txr_wq_enqueued != NULL)
    614 		percpu_free(adapter->txr_wq_enqueued, sizeof(u_int));
    615 	if (adapter->que_wq != NULL)
    616 		workqueue_destroy(adapter->que_wq);
    617 
    618 	/* Drain the Mailbox(link) queue */
    619 	softint_disestablish(adapter->link_si);
    620 
    621 	/* Unregister VLAN events */
    622 #if 0 /* XXX msaitoh delete after write? */
    623 	if (adapter->vlan_attach != NULL)
    624 		EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
    625 	if (adapter->vlan_detach != NULL)
    626 		EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
    627 #endif
    628 
    629 	ether_ifdetach(adapter->ifp);
    630 	callout_halt(&adapter->timer, NULL);
    631 
    632 	if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
    633 		netmap_detach(adapter->ifp);
    634 
    635 	ixv_free_pci_resources(adapter);
    636 #if 0 /* XXX the NetBSD port is probably missing something here */
    637 	bus_generic_detach(dev);
    638 #endif
    639 	if_detach(adapter->ifp);
    640 	if_percpuq_destroy(adapter->ipq);
    641 
    642 	sysctl_teardown(&adapter->sysctllog);
    643 	evcnt_detach(&adapter->efbig_tx_dma_setup);
    644 	evcnt_detach(&adapter->mbuf_defrag_failed);
    645 	evcnt_detach(&adapter->efbig2_tx_dma_setup);
    646 	evcnt_detach(&adapter->einval_tx_dma_setup);
    647 	evcnt_detach(&adapter->other_tx_dma_setup);
    648 	evcnt_detach(&adapter->eagain_tx_dma_setup);
    649 	evcnt_detach(&adapter->enomem_tx_dma_setup);
    650 	evcnt_detach(&adapter->watchdog_events);
    651 	evcnt_detach(&adapter->tso_err);
    652 	evcnt_detach(&adapter->link_irq);
    653 
    654 	txr = adapter->tx_rings;
    655 	for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
    656 		evcnt_detach(&adapter->queues[i].irqs);
    657 		evcnt_detach(&adapter->queues[i].handleq);
    658 		evcnt_detach(&adapter->queues[i].req);
    659 		evcnt_detach(&txr->no_desc_avail);
    660 		evcnt_detach(&txr->total_packets);
    661 		evcnt_detach(&txr->tso_tx);
    662 #ifndef IXGBE_LEGACY_TX
    663 		evcnt_detach(&txr->pcq_drops);
    664 #endif
    665 
    666 		evcnt_detach(&rxr->rx_packets);
    667 		evcnt_detach(&rxr->rx_bytes);
    668 		evcnt_detach(&rxr->rx_copies);
    669 		evcnt_detach(&rxr->no_jmbuf);
    670 		evcnt_detach(&rxr->rx_discarded);
    671 	}
    672 	evcnt_detach(&stats->ipcs);
    673 	evcnt_detach(&stats->l4cs);
    674 	evcnt_detach(&stats->ipcs_bad);
    675 	evcnt_detach(&stats->l4cs_bad);
    676 
    677 	/* Packet Reception Stats */
    678 	evcnt_detach(&stats->vfgorc);
    679 	evcnt_detach(&stats->vfgprc);
    680 	evcnt_detach(&stats->vfmprc);
    681 
    682 	/* Packet Transmission Stats */
    683 	evcnt_detach(&stats->vfgotc);
    684 	evcnt_detach(&stats->vfgptc);
    685 
    686 	/* Mailbox Stats */
    687 	evcnt_detach(&hw->mbx.stats.msgs_tx);
    688 	evcnt_detach(&hw->mbx.stats.msgs_rx);
    689 	evcnt_detach(&hw->mbx.stats.acks);
    690 	evcnt_detach(&hw->mbx.stats.reqs);
    691 	evcnt_detach(&hw->mbx.stats.rsts);
    692 
    693 	ixgbe_free_transmit_structures(adapter);
    694 	ixgbe_free_receive_structures(adapter);
    695 	for (int i = 0; i < adapter->num_queues; i++) {
    696 		struct ix_queue *lque = &adapter->queues[i];
    697 		mutex_destroy(&lque->dc_mtx);
    698 	}
    699 	free(adapter->queues, M_DEVBUF);
    700 
    701 	IXGBE_CORE_LOCK_DESTROY(adapter);
    702 
    703 	return (0);
    704 } /* ixv_detach */
    705 
    706 /************************************************************************
    707  * ixv_init_locked - Init entry point
    708  *
    709  *   Used in two ways: It is used by the stack as an init entry
    710  *   point in network interface structure. It is also used
    711  *   by the driver as a hw/sw initialization routine to get
    712  *   to a consistent state.
    713  *
    714  *   return 0 on success, positive on failure
    715  ************************************************************************/
    716 static void
    717 ixv_init_locked(struct adapter *adapter)
    718 {
    719 	struct ifnet	*ifp = adapter->ifp;
    720 	device_t 	dev = adapter->dev;
    721 	struct ixgbe_hw *hw = &adapter->hw;
    722 	struct ix_queue	*que = adapter->queues;
    723 	int             error = 0;
    724 	uint32_t mask;
    725 	int i;
    726 
    727 	INIT_DEBUGOUT("ixv_init_locked: begin");
    728 	KASSERT(mutex_owned(&adapter->core_mtx));
    729 	hw->adapter_stopped = FALSE;
    730 	hw->mac.ops.stop_adapter(hw);
    731 	callout_stop(&adapter->timer);
    732 
    733 	/* reprogram the RAR[0] in case user changed it. */
    734 	hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
    735 
    736 	/* Get the latest mac address, User can use a LAA */
    737 	memcpy(hw->mac.addr, CLLADDR(ifp->if_sadl),
    738 	     IXGBE_ETH_LENGTH_OF_ADDRESS);
    739 	hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, 1);
    740 
    741 	/* Prepare transmit descriptors and buffers */
    742 	if (ixgbe_setup_transmit_structures(adapter)) {
    743 		aprint_error_dev(dev, "Could not setup transmit structures\n");
    744 		ixv_stop(adapter);
    745 		return;
    746 	}
    747 
    748 	/* Reset VF and renegotiate mailbox API version */
    749 	hw->mac.ops.reset_hw(hw);
    750 	hw->mac.ops.start_hw(hw);
    751 	error = ixv_negotiate_api(adapter);
    752 	if (error)
    753 		device_printf(dev,
    754 		    "Mailbox API negotiation failed in init_locked!\n");
    755 
    756 	ixv_initialize_transmit_units(adapter);
    757 
    758 	/* Setup Multicast table */
    759 	ixv_set_multi(adapter);
    760 
    761 	/*
    762 	 * Determine the correct mbuf pool
    763 	 * for doing jumbo/headersplit
    764 	 */
    765 	if (ifp->if_mtu > ETHERMTU)
    766 		adapter->rx_mbuf_sz = MJUMPAGESIZE;
    767 	else
    768 		adapter->rx_mbuf_sz = MCLBYTES;
    769 
    770 	/* Prepare receive descriptors and buffers */
    771 	if (ixgbe_setup_receive_structures(adapter)) {
    772 		device_printf(dev, "Could not setup receive structures\n");
    773 		ixv_stop(adapter);
    774 		return;
    775 	}
    776 
    777 	/* Configure RX settings */
    778 	ixv_initialize_receive_units(adapter);
    779 
    780 #if 0 /* XXX isn't it required? -- msaitoh  */
    781 	/* Set the various hardware offload abilities */
    782 	ifp->if_hwassist = 0;
    783 	if (ifp->if_capenable & IFCAP_TSO4)
    784 		ifp->if_hwassist |= CSUM_TSO;
    785 	if (ifp->if_capenable & IFCAP_TXCSUM) {
    786 		ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
    787 #if __FreeBSD_version >= 800000
    788 		ifp->if_hwassist |= CSUM_SCTP;
    789 #endif
    790 	}
    791 #endif
    792 
    793 	/* Set up VLAN offload and filter */
    794 	ixv_setup_vlan_support(adapter);
    795 
    796 	/* Set up MSI-X routing */
    797 	ixv_configure_ivars(adapter);
    798 
    799 	/* Set up auto-mask */
    800 	mask = (1 << adapter->vector);
    801 	for (i = 0; i < adapter->num_queues; i++, que++)
    802 		mask |= (1 << que->msix);
    803 	IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, mask);
    804 
    805 	/* Set moderation on the Link interrupt */
    806 	ixv_eitr_write(adapter, adapter->vector, IXGBE_LINK_ITR);
    807 
    808 	/* Stats init */
    809 	ixv_init_stats(adapter);
    810 
    811 	/* Config/Enable Link */
    812 	hw->mac.get_link_status = TRUE;
    813 	hw->mac.ops.check_link(hw, &adapter->link_speed, &adapter->link_up,
    814 	    FALSE);
    815 
    816 	/* Start watchdog */
    817 	callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
    818 
    819 	/* And now turn on interrupts */
    820 	ixv_enable_intr(adapter);
    821 
    822 	/* Update saved flags. See ixgbe_ifflags_cb() */
    823 	adapter->if_flags = ifp->if_flags;
    824 
    825 	/* Now inform the stack we're ready */
    826 	ifp->if_flags |= IFF_RUNNING;
    827 	ifp->if_flags &= ~IFF_OACTIVE;
    828 
    829 	return;
    830 } /* ixv_init_locked */
    831 
    832 /************************************************************************
    833  * ixv_enable_queue
    834  ************************************************************************/
    835 static inline void
    836 ixv_enable_queue(struct adapter *adapter, u32 vector)
    837 {
    838 	struct ixgbe_hw *hw = &adapter->hw;
    839 	struct ix_queue *que = &adapter->queues[vector];
    840 	u32             queue = 1 << vector;
    841 	u32             mask;
    842 
    843 	mutex_enter(&que->dc_mtx);
    844 	if (que->disabled_count > 0 && --que->disabled_count > 0)
    845 		goto out;
    846 
    847 	mask = (IXGBE_EIMS_RTX_QUEUE & queue);
    848 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
    849 out:
    850 	mutex_exit(&que->dc_mtx);
    851 } /* ixv_enable_queue */
    852 
    853 /************************************************************************
    854  * ixv_disable_queue
    855  ************************************************************************/
    856 static inline void
    857 ixv_disable_queue(struct adapter *adapter, u32 vector)
    858 {
    859 	struct ixgbe_hw *hw = &adapter->hw;
    860 	struct ix_queue *que = &adapter->queues[vector];
    861 	u64             queue = (u64)(1 << vector);
    862 	u32             mask;
    863 
    864 	mutex_enter(&que->dc_mtx);
    865 	if (que->disabled_count++ > 0)
    866 		goto  out;
    867 
    868 	mask = (IXGBE_EIMS_RTX_QUEUE & queue);
    869 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
    870 out:
    871 	mutex_exit(&que->dc_mtx);
    872 } /* ixv_disable_queue */
    873 
    874 static inline void
    875 ixv_rearm_queues(struct adapter *adapter, u64 queues)
    876 {
    877 	u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
    878 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask);
    879 } /* ixv_rearm_queues */
    880 
    881 
    882 /************************************************************************
    883  * ixv_msix_que - MSI-X Queue Interrupt Service routine
    884  ************************************************************************/
    885 static int
    886 ixv_msix_que(void *arg)
    887 {
    888 	struct ix_queue	*que = arg;
    889 	struct adapter  *adapter = que->adapter;
    890 	struct tx_ring	*txr = que->txr;
    891 	struct rx_ring	*rxr = que->rxr;
    892 	bool		more;
    893 	u32		newitr = 0;
    894 
    895 	ixv_disable_queue(adapter, que->msix);
    896 	++que->irqs.ev_count;
    897 
    898 #ifdef __NetBSD__
    899 	/* Don't run ixgbe_rxeof in interrupt context */
    900 	more = true;
    901 #else
    902 	more = ixgbe_rxeof(que);
    903 #endif
    904 
    905 	IXGBE_TX_LOCK(txr);
    906 	ixgbe_txeof(txr);
    907 	IXGBE_TX_UNLOCK(txr);
    908 
    909 	/* Do AIM now? */
    910 
    911 	if (adapter->enable_aim == false)
    912 		goto no_calc;
    913 	/*
    914 	 * Do Adaptive Interrupt Moderation:
    915 	 *  - Write out last calculated setting
    916 	 *  - Calculate based on average size over
    917 	 *    the last interval.
    918 	 */
    919 	if (que->eitr_setting)
    920 		ixv_eitr_write(adapter, que->msix, que->eitr_setting);
    921 
    922 	que->eitr_setting = 0;
    923 
    924 	/* Idle, do nothing */
    925 	if ((txr->bytes == 0) && (rxr->bytes == 0))
    926 		goto no_calc;
    927 
    928 	if ((txr->bytes) && (txr->packets))
    929 		newitr = txr->bytes/txr->packets;
    930 	if ((rxr->bytes) && (rxr->packets))
    931 		newitr = max(newitr, (rxr->bytes / rxr->packets));
    932 	newitr += 24; /* account for hardware frame, crc */
    933 
    934 	/* set an upper boundary */
    935 	newitr = min(newitr, 3000);
    936 
    937 	/* Be nice to the mid range */
    938 	if ((newitr > 300) && (newitr < 1200))
    939 		newitr = (newitr / 3);
    940 	else
    941 		newitr = (newitr / 2);
    942 
    943 	/*
    944 	 * When RSC is used, ITR interval must be larger than RSC_DELAY.
    945 	 * Currently, we use 2us for RSC_DELAY. The minimum value is always
    946 	 * greater than 2us on 100M (and 10M?(not documented)), but it's not
    947 	 * on 1G and higher.
    948 	 */
    949 	if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
    950 	    && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
    951 		if (newitr < IXGBE_MIN_RSC_EITR_10G1G)
    952 			newitr = IXGBE_MIN_RSC_EITR_10G1G;
    953 	}
    954 
    955 	/* save for next interrupt */
    956 	que->eitr_setting = newitr;
    957 
    958 	/* Reset state */
    959 	txr->bytes = 0;
    960 	txr->packets = 0;
    961 	rxr->bytes = 0;
    962 	rxr->packets = 0;
    963 
    964 no_calc:
    965 	if (more)
    966 		softint_schedule(que->que_si);
    967 	else /* Re-enable this interrupt */
    968 		ixv_enable_queue(adapter, que->msix);
    969 
    970 	return 1;
    971 } /* ixv_msix_que */
    972 
    973 /************************************************************************
    974  * ixv_msix_mbx
    975  ************************************************************************/
    976 static int
    977 ixv_msix_mbx(void *arg)
    978 {
    979 	struct adapter	*adapter = arg;
    980 	struct ixgbe_hw *hw = &adapter->hw;
    981 
    982 	++adapter->link_irq.ev_count;
    983 	/* NetBSD: We use auto-clear, so it's not required to write VTEICR */
    984 
    985 	/* Link status change */
    986 	hw->mac.get_link_status = TRUE;
    987 	softint_schedule(adapter->link_si);
    988 
    989 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, (1 << adapter->vector));
    990 
    991 	return 1;
    992 } /* ixv_msix_mbx */
    993 
    994 static void
    995 ixv_eitr_write(struct adapter *adapter, uint32_t index, uint32_t itr)
    996 {
    997 
    998 	/*
    999 	 * Newer devices than 82598 have VF function, so this function is
   1000 	 * simple.
   1001 	 */
   1002 	itr |= IXGBE_EITR_CNT_WDIS;
   1003 
   1004 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEITR(index), itr);
   1005 }
   1006 
   1007 
   1008 /************************************************************************
   1009  * ixv_media_status - Media Ioctl callback
   1010  *
   1011  *   Called whenever the user queries the status of
   1012  *   the interface using ifconfig.
   1013  ************************************************************************/
   1014 static void
   1015 ixv_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
   1016 {
   1017 	struct adapter *adapter = ifp->if_softc;
   1018 
   1019 	INIT_DEBUGOUT("ixv_media_status: begin");
   1020 	IXGBE_CORE_LOCK(adapter);
   1021 	ixv_update_link_status(adapter);
   1022 
   1023 	ifmr->ifm_status = IFM_AVALID;
   1024 	ifmr->ifm_active = IFM_ETHER;
   1025 
   1026 	if (!adapter->link_active) {
   1027 		ifmr->ifm_active |= IFM_NONE;
   1028 		IXGBE_CORE_UNLOCK(adapter);
   1029 		return;
   1030 	}
   1031 
   1032 	ifmr->ifm_status |= IFM_ACTIVE;
   1033 
   1034 	switch (adapter->link_speed) {
   1035 		case IXGBE_LINK_SPEED_10GB_FULL:
   1036 			ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
   1037 			break;
   1038 		case IXGBE_LINK_SPEED_5GB_FULL:
   1039 			ifmr->ifm_active |= IFM_5000_T | IFM_FDX;
   1040 			break;
   1041 		case IXGBE_LINK_SPEED_2_5GB_FULL:
   1042 			ifmr->ifm_active |= IFM_2500_T | IFM_FDX;
   1043 			break;
   1044 		case IXGBE_LINK_SPEED_1GB_FULL:
   1045 			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
   1046 			break;
   1047 		case IXGBE_LINK_SPEED_100_FULL:
   1048 			ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
   1049 			break;
   1050 		case IXGBE_LINK_SPEED_10_FULL:
   1051 			ifmr->ifm_active |= IFM_10_T | IFM_FDX;
   1052 			break;
   1053 	}
   1054 
   1055 	ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
   1056 
   1057 	IXGBE_CORE_UNLOCK(adapter);
   1058 } /* ixv_media_status */
   1059 
   1060 /************************************************************************
   1061  * ixv_media_change - Media Ioctl callback
   1062  *
   1063  *   Called when the user changes speed/duplex using
   1064  *   media/mediopt option with ifconfig.
   1065  ************************************************************************/
   1066 static int
   1067 ixv_media_change(struct ifnet *ifp)
   1068 {
   1069 	struct adapter *adapter = ifp->if_softc;
   1070 	struct ifmedia *ifm = &adapter->media;
   1071 
   1072 	INIT_DEBUGOUT("ixv_media_change: begin");
   1073 
   1074 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
   1075 		return (EINVAL);
   1076 
   1077 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
   1078 	case IFM_AUTO:
   1079 		break;
   1080 	default:
   1081 		device_printf(adapter->dev, "Only auto media type\n");
   1082 		return (EINVAL);
   1083 	}
   1084 
   1085 	return (0);
   1086 } /* ixv_media_change */
   1087 
   1088 
   1089 /************************************************************************
   1090  * ixv_negotiate_api
   1091  *
   1092  *   Negotiate the Mailbox API with the PF;
   1093  *   start with the most featured API first.
   1094  ************************************************************************/
   1095 static int
   1096 ixv_negotiate_api(struct adapter *adapter)
   1097 {
   1098 	struct ixgbe_hw *hw = &adapter->hw;
   1099 	int             mbx_api[] = { ixgbe_mbox_api_11,
   1100 	                              ixgbe_mbox_api_10,
   1101 	                              ixgbe_mbox_api_unknown };
   1102 	int             i = 0;
   1103 
   1104 	while (mbx_api[i] != ixgbe_mbox_api_unknown) {
   1105 		if (ixgbevf_negotiate_api_version(hw, mbx_api[i]) == 0)
   1106 			return (0);
   1107 		i++;
   1108 	}
   1109 
   1110 	return (EINVAL);
   1111 } /* ixv_negotiate_api */
   1112 
   1113 
   1114 /************************************************************************
   1115  * ixv_set_multi - Multicast Update
   1116  *
   1117  *   Called whenever multicast address list is updated.
   1118  ************************************************************************/
   1119 static void
   1120 ixv_set_multi(struct adapter *adapter)
   1121 {
   1122 	struct ether_multi *enm;
   1123 	struct ether_multistep step;
   1124 	struct ethercom *ec = &adapter->osdep.ec;
   1125 	u8	mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
   1126 	u8                 *update_ptr;
   1127 	int                mcnt = 0;
   1128 
   1129 	KASSERT(mutex_owned(&adapter->core_mtx));
   1130 	IOCTL_DEBUGOUT("ixv_set_multi: begin");
   1131 
   1132 	ETHER_LOCK(ec);
   1133 	ETHER_FIRST_MULTI(step, ec, enm);
   1134 	while (enm != NULL) {
   1135 		bcopy(enm->enm_addrlo,
   1136 		    &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
   1137 		    IXGBE_ETH_LENGTH_OF_ADDRESS);
   1138 		mcnt++;
   1139 		/* XXX This might be required --msaitoh */
   1140 		if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES)
   1141 			break;
   1142 		ETHER_NEXT_MULTI(step, enm);
   1143 	}
   1144 	ETHER_UNLOCK(ec);
   1145 
   1146 	update_ptr = mta;
   1147 
   1148 	adapter->hw.mac.ops.update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
   1149 	    ixv_mc_array_itr, TRUE);
   1150 } /* ixv_set_multi */
   1151 
   1152 /************************************************************************
   1153  * ixv_mc_array_itr
   1154  *
   1155  *   An iterator function needed by the multicast shared code.
   1156  *   It feeds the shared code routine the addresses in the
   1157  *   array of ixv_set_multi() one by one.
   1158  ************************************************************************/
   1159 static u8 *
   1160 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
   1161 {
   1162 	u8 *addr = *update_ptr;
   1163 	u8 *newptr;
   1164 
   1165 	*vmdq = 0;
   1166 
   1167 	newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
   1168 	*update_ptr = newptr;
   1169 
   1170 	return addr;
   1171 } /* ixv_mc_array_itr */
   1172 
   1173 /************************************************************************
   1174  * ixv_local_timer - Timer routine
   1175  *
   1176  *   Checks for link status, updates statistics,
   1177  *   and runs the watchdog check.
   1178  ************************************************************************/
   1179 static void
   1180 ixv_local_timer(void *arg)
   1181 {
   1182 	struct adapter *adapter = arg;
   1183 
   1184 	IXGBE_CORE_LOCK(adapter);
   1185 	ixv_local_timer_locked(adapter);
   1186 	IXGBE_CORE_UNLOCK(adapter);
   1187 }
   1188 
   1189 static void
   1190 ixv_local_timer_locked(void *arg)
   1191 {
   1192 	struct adapter	*adapter = arg;
   1193 	device_t	dev = adapter->dev;
   1194 	struct ix_queue	*que = adapter->queues;
   1195 	u64		queues = 0;
   1196 	u64		v0, v1, v2, v3, v4, v5, v6, v7;
   1197 	int		hung = 0;
   1198 	int		i;
   1199 
   1200 	KASSERT(mutex_owned(&adapter->core_mtx));
   1201 
   1202 	ixv_check_link(adapter);
   1203 
   1204 	/* Stats Update */
   1205 	ixv_update_stats(adapter);
   1206 
   1207 	/* Update some event counters */
   1208 	v0 = v1 = v2 = v3 = v4 = v5 = v6 = v7 = 0;
   1209 	que = adapter->queues;
   1210 	for (i = 0; i < adapter->num_queues; i++, que++) {
   1211 		struct tx_ring  *txr = que->txr;
   1212 
   1213 		v0 += txr->q_efbig_tx_dma_setup;
   1214 		v1 += txr->q_mbuf_defrag_failed;
   1215 		v2 += txr->q_efbig2_tx_dma_setup;
   1216 		v3 += txr->q_einval_tx_dma_setup;
   1217 		v4 += txr->q_other_tx_dma_setup;
   1218 		v5 += txr->q_eagain_tx_dma_setup;
   1219 		v6 += txr->q_enomem_tx_dma_setup;
   1220 		v7 += txr->q_tso_err;
   1221 	}
   1222 	adapter->efbig_tx_dma_setup.ev_count = v0;
   1223 	adapter->mbuf_defrag_failed.ev_count = v1;
   1224 	adapter->efbig2_tx_dma_setup.ev_count = v2;
   1225 	adapter->einval_tx_dma_setup.ev_count = v3;
   1226 	adapter->other_tx_dma_setup.ev_count = v4;
   1227 	adapter->eagain_tx_dma_setup.ev_count = v5;
   1228 	adapter->enomem_tx_dma_setup.ev_count = v6;
   1229 	adapter->tso_err.ev_count = v7;
   1230 
   1231 	/*
   1232 	 * Check the TX queues status
   1233 	 *      - mark hung queues so we don't schedule on them
   1234 	 *      - watchdog only if all queues show hung
   1235 	 */
   1236 	que = adapter->queues;
   1237 	for (i = 0; i < adapter->num_queues; i++, que++) {
   1238 		/* Keep track of queues with work for soft irq */
   1239 		if (que->txr->busy)
   1240 			queues |= ((u64)1 << que->me);
   1241 		/*
   1242 		 * Each time txeof runs without cleaning, but there
   1243 		 * are uncleaned descriptors it increments busy. If
   1244 		 * we get to the MAX we declare it hung.
   1245 		 */
   1246 		if (que->busy == IXGBE_QUEUE_HUNG) {
   1247 			++hung;
   1248 			/* Mark the queue as inactive */
   1249 			adapter->active_queues &= ~((u64)1 << que->me);
   1250 			continue;
   1251 		} else {
   1252 			/* Check if we've come back from hung */
   1253 			if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
   1254 				adapter->active_queues |= ((u64)1 << que->me);
   1255 		}
   1256 		if (que->busy >= IXGBE_MAX_TX_BUSY) {
   1257 			device_printf(dev,
   1258 			    "Warning queue %d appears to be hung!\n", i);
   1259 			que->txr->busy = IXGBE_QUEUE_HUNG;
   1260 			++hung;
   1261 		}
   1262 	}
   1263 
   1264 	/* Only truly watchdog if all queues show hung */
   1265 	if (hung == adapter->num_queues)
   1266 		goto watchdog;
   1267 	else if (queues != 0) { /* Force an IRQ on queues with work */
   1268 		ixv_rearm_queues(adapter, queues);
   1269 	}
   1270 
   1271 	callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
   1272 
   1273 	return;
   1274 
   1275 watchdog:
   1276 
   1277 	device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
   1278 	adapter->ifp->if_flags &= ~IFF_RUNNING;
   1279 	adapter->watchdog_events.ev_count++;
   1280 	ixv_init_locked(adapter);
   1281 } /* ixv_local_timer */
   1282 
   1283 /************************************************************************
   1284  * ixv_update_link_status - Update OS on link state
   1285  *
   1286  * Note: Only updates the OS on the cached link state.
   1287  *       The real check of the hardware only happens with
   1288  *       a link interrupt.
   1289  ************************************************************************/
   1290 static void
   1291 ixv_update_link_status(struct adapter *adapter)
   1292 {
   1293 	struct ifnet *ifp = adapter->ifp;
   1294 	device_t     dev = adapter->dev;
   1295 
   1296 	KASSERT(mutex_owned(&adapter->core_mtx));
   1297 
   1298 	if (adapter->link_up) {
   1299 		if (adapter->link_active == FALSE) {
   1300 			if (bootverbose) {
   1301 				const char *bpsmsg;
   1302 
   1303 				switch (adapter->link_speed) {
   1304 				case IXGBE_LINK_SPEED_10GB_FULL:
   1305 					bpsmsg = "10 Gbps";
   1306 					break;
   1307 				case IXGBE_LINK_SPEED_5GB_FULL:
   1308 					bpsmsg = "5 Gbps";
   1309 					break;
   1310 				case IXGBE_LINK_SPEED_2_5GB_FULL:
   1311 					bpsmsg = "2.5 Gbps";
   1312 					break;
   1313 				case IXGBE_LINK_SPEED_1GB_FULL:
   1314 					bpsmsg = "1 Gbps";
   1315 					break;
   1316 				case IXGBE_LINK_SPEED_100_FULL:
   1317 					bpsmsg = "100 Mbps";
   1318 					break;
   1319 				case IXGBE_LINK_SPEED_10_FULL:
   1320 					bpsmsg = "10 Mbps";
   1321 					break;
   1322 				default:
   1323 					bpsmsg = "unknown speed";
   1324 					break;
   1325 				}
   1326 				device_printf(dev, "Link is up %s %s \n",
   1327 				    bpsmsg, "Full Duplex");
   1328 			}
   1329 			adapter->link_active = TRUE;
   1330 			if_link_state_change(ifp, LINK_STATE_UP);
   1331 		}
   1332 	} else { /* Link down */
   1333 		if (adapter->link_active == TRUE) {
   1334 			if (bootverbose)
   1335 				device_printf(dev, "Link is Down\n");
   1336 			if_link_state_change(ifp, LINK_STATE_DOWN);
   1337 			adapter->link_active = FALSE;
   1338 		}
   1339 	}
   1340 } /* ixv_update_link_status */
   1341 
   1342 
   1343 /************************************************************************
   1344  * ixv_stop - Stop the hardware
   1345  *
   1346  *   Disables all traffic on the adapter by issuing a
   1347  *   global reset on the MAC and deallocates TX/RX buffers.
   1348  ************************************************************************/
   1349 static void
   1350 ixv_ifstop(struct ifnet *ifp, int disable)
   1351 {
   1352 	struct adapter *adapter = ifp->if_softc;
   1353 
   1354 	IXGBE_CORE_LOCK(adapter);
   1355 	ixv_stop(adapter);
   1356 	IXGBE_CORE_UNLOCK(adapter);
   1357 }
   1358 
   1359 static void
   1360 ixv_stop(void *arg)
   1361 {
   1362 	struct ifnet    *ifp;
   1363 	struct adapter  *adapter = arg;
   1364 	struct ixgbe_hw *hw = &adapter->hw;
   1365 
   1366 	ifp = adapter->ifp;
   1367 
   1368 	KASSERT(mutex_owned(&adapter->core_mtx));
   1369 
   1370 	INIT_DEBUGOUT("ixv_stop: begin\n");
   1371 	ixv_disable_intr(adapter);
   1372 
   1373 	/* Tell the stack that the interface is no longer active */
   1374 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   1375 
   1376 	hw->mac.ops.reset_hw(hw);
   1377 	adapter->hw.adapter_stopped = FALSE;
   1378 	hw->mac.ops.stop_adapter(hw);
   1379 	callout_stop(&adapter->timer);
   1380 
   1381 	/* reprogram the RAR[0] in case user changed it. */
   1382 	hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
   1383 
   1384 	return;
   1385 } /* ixv_stop */
   1386 
   1387 
   1388 /************************************************************************
   1389  * ixv_allocate_pci_resources
   1390  ************************************************************************/
   1391 static int
   1392 ixv_allocate_pci_resources(struct adapter *adapter,
   1393     const struct pci_attach_args *pa)
   1394 {
   1395 	pcireg_t	memtype;
   1396 	device_t        dev = adapter->dev;
   1397 	bus_addr_t addr;
   1398 	int flags;
   1399 
   1400 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
   1401 	switch (memtype) {
   1402 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1403 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1404 		adapter->osdep.mem_bus_space_tag = pa->pa_memt;
   1405 		if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
   1406 	              memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
   1407 			goto map_err;
   1408 		if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
   1409 			aprint_normal_dev(dev, "clearing prefetchable bit\n");
   1410 			flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
   1411 		}
   1412 		if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
   1413 		     adapter->osdep.mem_size, flags,
   1414 		     &adapter->osdep.mem_bus_space_handle) != 0) {
   1415 map_err:
   1416 			adapter->osdep.mem_size = 0;
   1417 			aprint_error_dev(dev, "unable to map BAR0\n");
   1418 			return ENXIO;
   1419 		}
   1420 		break;
   1421 	default:
   1422 		aprint_error_dev(dev, "unexpected type on BAR0\n");
   1423 		return ENXIO;
   1424 	}
   1425 
   1426 	/* Pick up the tuneable queues */
   1427 	adapter->num_queues = ixv_num_queues;
   1428 
   1429 	return (0);
   1430 } /* ixv_allocate_pci_resources */
   1431 
   1432 /************************************************************************
   1433  * ixv_free_pci_resources
   1434  ************************************************************************/
   1435 static void
   1436 ixv_free_pci_resources(struct adapter * adapter)
   1437 {
   1438 	struct 		ix_queue *que = adapter->queues;
   1439 	int		rid;
   1440 
   1441 	/*
   1442 	 *  Release all msix queue resources:
   1443 	 */
   1444 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   1445 		if (que->res != NULL)
   1446 			pci_intr_disestablish(adapter->osdep.pc,
   1447 			    adapter->osdep.ihs[i]);
   1448 	}
   1449 
   1450 
   1451 	/* Clean the Mailbox interrupt last */
   1452 	rid = adapter->vector;
   1453 
   1454 	if (adapter->osdep.ihs[rid] != NULL) {
   1455 		pci_intr_disestablish(adapter->osdep.pc,
   1456 		    adapter->osdep.ihs[rid]);
   1457 		adapter->osdep.ihs[rid] = NULL;
   1458 	}
   1459 
   1460 	pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
   1461 	    adapter->osdep.nintrs);
   1462 
   1463 	if (adapter->osdep.mem_size != 0) {
   1464 		bus_space_unmap(adapter->osdep.mem_bus_space_tag,
   1465 		    adapter->osdep.mem_bus_space_handle,
   1466 		    adapter->osdep.mem_size);
   1467 	}
   1468 
   1469 	return;
   1470 } /* ixv_free_pci_resources */
   1471 
   1472 /************************************************************************
   1473  * ixv_setup_interface
   1474  *
   1475  *   Setup networking device structure and register an interface.
   1476  ************************************************************************/
   1477 static int
   1478 ixv_setup_interface(device_t dev, struct adapter *adapter)
   1479 {
   1480 	struct ethercom *ec = &adapter->osdep.ec;
   1481 	struct ifnet   *ifp;
   1482 	int rv;
   1483 
   1484 	INIT_DEBUGOUT("ixv_setup_interface: begin");
   1485 
   1486 	ifp = adapter->ifp = &ec->ec_if;
   1487 	strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
   1488 	ifp->if_baudrate = IF_Gbps(10);
   1489 	ifp->if_init = ixv_init;
   1490 	ifp->if_stop = ixv_ifstop;
   1491 	ifp->if_softc = adapter;
   1492 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   1493 #ifdef IXGBE_MPSAFE
   1494 	ifp->if_extflags = IFEF_MPSAFE;
   1495 #endif
   1496 	ifp->if_ioctl = ixv_ioctl;
   1497 	if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
   1498 #if 0
   1499 		ixv_start_locked = ixgbe_legacy_start_locked;
   1500 #endif
   1501 	} else {
   1502 		ifp->if_transmit = ixgbe_mq_start;
   1503 #if 0
   1504 		ixv_start_locked = ixgbe_mq_start_locked;
   1505 #endif
   1506 	}
   1507 	ifp->if_start = ixgbe_legacy_start;
   1508 	IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
   1509 	IFQ_SET_READY(&ifp->if_snd);
   1510 
   1511 	rv = if_initialize(ifp);
   1512 	if (rv != 0) {
   1513 		aprint_error_dev(dev, "if_initialize failed(%d)\n", rv);
   1514 		return rv;
   1515 	}
   1516 	adapter->ipq = if_percpuq_create(&adapter->osdep.ec.ec_if);
   1517 	ether_ifattach(ifp, adapter->hw.mac.addr);
   1518 	/*
   1519 	 * We use per TX queue softint, so if_deferred_start_init() isn't
   1520 	 * used.
   1521 	 */
   1522 	if_register(ifp);
   1523 	ether_set_ifflags_cb(ec, ixv_ifflags_cb);
   1524 
   1525 	adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR;
   1526 
   1527 	/*
   1528 	 * Tell the upper layer(s) we support long frames.
   1529 	 */
   1530 	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
   1531 
   1532 	/* Set capability flags */
   1533 	ifp->if_capabilities |= IFCAP_HWCSUM
   1534 	                     |  IFCAP_TSOv4
   1535 	                     |  IFCAP_TSOv6;
   1536 	ifp->if_capenable = 0;
   1537 
   1538 	ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
   1539 			    |  ETHERCAP_VLAN_HWCSUM
   1540 			    |  ETHERCAP_JUMBO_MTU
   1541 			    |  ETHERCAP_VLAN_MTU;
   1542 
   1543 	/* Enable the above capabilities by default */
   1544 	ec->ec_capenable = ec->ec_capabilities;
   1545 
   1546 	/* Don't enable LRO by default */
   1547 	ifp->if_capabilities |= IFCAP_LRO;
   1548 #if 0
   1549 	ifp->if_capenable = ifp->if_capabilities;
   1550 #endif
   1551 
   1552 	/*
   1553 	 * Specify the media types supported by this adapter and register
   1554 	 * callbacks to update media and link information
   1555 	 */
   1556 	ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change,
   1557 	    ixv_media_status);
   1558 	ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
   1559 	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
   1560 
   1561 	return 0;
   1562 } /* ixv_setup_interface */
   1563 
   1564 
   1565 /************************************************************************
   1566  * ixv_initialize_transmit_units - Enable transmit unit.
   1567  ************************************************************************/
   1568 static void
   1569 ixv_initialize_transmit_units(struct adapter *adapter)
   1570 {
   1571 	struct tx_ring	*txr = adapter->tx_rings;
   1572 	struct ixgbe_hw	*hw = &adapter->hw;
   1573 	int i;
   1574 
   1575 	for (i = 0; i < adapter->num_queues; i++, txr++) {
   1576 		u64 tdba = txr->txdma.dma_paddr;
   1577 		u32 txctrl, txdctl;
   1578 		int j = txr->me;
   1579 
   1580 		/* Set WTHRESH to 8, burst writeback */
   1581 		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
   1582 		txdctl |= (8 << 16);
   1583 		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
   1584 
   1585 		/* Set the HW Tx Head and Tail indices */
   1586 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(j), 0);
   1587 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(j), 0);
   1588 
   1589 		/* Set Tx Tail register */
   1590 		txr->tail = IXGBE_VFTDT(j);
   1591 
   1592 		txr->txr_no_space = false;
   1593 
   1594 		/* Set Ring parameters */
   1595 		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(j),
   1596 		    (tdba & 0x00000000ffffffffULL));
   1597 		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32));
   1598 		IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j),
   1599 		    adapter->num_tx_desc * sizeof(struct ixgbe_legacy_tx_desc));
   1600 		txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j));
   1601 		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
   1602 		IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl);
   1603 
   1604 		/* Now enable */
   1605 		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
   1606 		txdctl |= IXGBE_TXDCTL_ENABLE;
   1607 		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
   1608 	}
   1609 
   1610 	return;
   1611 } /* ixv_initialize_transmit_units */
   1612 
   1613 
   1614 /************************************************************************
   1615  * ixv_initialize_rss_mapping
   1616  ************************************************************************/
   1617 static void
   1618 ixv_initialize_rss_mapping(struct adapter *adapter)
   1619 {
   1620 	struct ixgbe_hw *hw = &adapter->hw;
   1621 	u32             reta = 0, mrqc, rss_key[10];
   1622 	int             queue_id;
   1623 	int             i, j;
   1624 	u32             rss_hash_config;
   1625 
   1626 	/* force use default RSS key. */
   1627 #ifdef __NetBSD__
   1628 	rss_getkey((uint8_t *) &rss_key);
   1629 #else
   1630 	if (adapter->feat_en & IXGBE_FEATURE_RSS) {
   1631 		/* Fetch the configured RSS key */
   1632 		rss_getkey((uint8_t *)&rss_key);
   1633 	} else {
   1634 		/* set up random bits */
   1635 		cprng_fast(&rss_key, sizeof(rss_key));
   1636 	}
   1637 #endif
   1638 
   1639 	/* Now fill out hash function seeds */
   1640 	for (i = 0; i < 10; i++)
   1641 		IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), rss_key[i]);
   1642 
   1643 	/* Set up the redirection table */
   1644 	for (i = 0, j = 0; i < 64; i++, j++) {
   1645 		if (j == adapter->num_queues)
   1646 			j = 0;
   1647 
   1648 		if (adapter->feat_en & IXGBE_FEATURE_RSS) {
   1649 			/*
   1650 			 * Fetch the RSS bucket id for the given indirection
   1651 			 * entry. Cap it at the number of configured buckets
   1652 			 * (which is num_queues.)
   1653 			 */
   1654 			queue_id = rss_get_indirection_to_bucket(i);
   1655 			queue_id = queue_id % adapter->num_queues;
   1656 		} else
   1657 			queue_id = j;
   1658 
   1659 		/*
   1660 		 * The low 8 bits are for hash value (n+0);
   1661 		 * The next 8 bits are for hash value (n+1), etc.
   1662 		 */
   1663 		reta >>= 8;
   1664 		reta |= ((uint32_t)queue_id) << 24;
   1665 		if ((i & 3) == 3) {
   1666 			IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), reta);
   1667 			reta = 0;
   1668 		}
   1669 	}
   1670 
   1671 	/* Perform hash on these packet types */
   1672 	if (adapter->feat_en & IXGBE_FEATURE_RSS)
   1673 		rss_hash_config = rss_gethashconfig();
   1674 	else {
   1675 		/*
   1676 		 * Disable UDP - IP fragments aren't currently being handled
   1677 		 * and so we end up with a mix of 2-tuple and 4-tuple
   1678 		 * traffic.
   1679 		 */
   1680 		rss_hash_config = RSS_HASHTYPE_RSS_IPV4
   1681 		                | RSS_HASHTYPE_RSS_TCP_IPV4
   1682 		                | RSS_HASHTYPE_RSS_IPV6
   1683 		                | RSS_HASHTYPE_RSS_TCP_IPV6;
   1684 	}
   1685 
   1686 	mrqc = IXGBE_MRQC_RSSEN;
   1687 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
   1688 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
   1689 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
   1690 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
   1691 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
   1692 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
   1693 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
   1694 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
   1695 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
   1696 		device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_IPV6_EX defined, but not supported\n",
   1697 		    __func__);
   1698 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
   1699 		device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_TCP_IPV6_EX defined, but not supported\n",
   1700 		    __func__);
   1701 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
   1702 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
   1703 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
   1704 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
   1705 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
   1706 		device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV6_EX defined, but not supported\n",
   1707 		    __func__);
   1708 	IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, mrqc);
   1709 } /* ixv_initialize_rss_mapping */
   1710 
   1711 
   1712 /************************************************************************
   1713  * ixv_initialize_receive_units - Setup receive registers and features.
   1714  ************************************************************************/
   1715 static void
   1716 ixv_initialize_receive_units(struct adapter *adapter)
   1717 {
   1718 	struct	rx_ring	*rxr = adapter->rx_rings;
   1719 	struct ixgbe_hw	*hw = &adapter->hw;
   1720 	struct ifnet	*ifp = adapter->ifp;
   1721 	u32		bufsz, rxcsum, psrtype;
   1722 
   1723 	if (ifp->if_mtu > ETHERMTU)
   1724 		bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
   1725 	else
   1726 		bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
   1727 
   1728 	psrtype = IXGBE_PSRTYPE_TCPHDR
   1729 	        | IXGBE_PSRTYPE_UDPHDR
   1730 	        | IXGBE_PSRTYPE_IPV4HDR
   1731 	        | IXGBE_PSRTYPE_IPV6HDR
   1732 	        | IXGBE_PSRTYPE_L2HDR;
   1733 
   1734 	if (adapter->num_queues > 1)
   1735 		psrtype |= 1 << 29;
   1736 
   1737 	IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
   1738 
   1739 	/* Tell PF our max_frame size */
   1740 	if (ixgbevf_rlpml_set_vf(hw, adapter->max_frame_size) != 0) {
   1741 		device_printf(adapter->dev, "There is a problem with the PF setup.  It is likely the receive unit for this VF will not function correctly.\n");
   1742 	}
   1743 
   1744 	for (int i = 0; i < adapter->num_queues; i++, rxr++) {
   1745 		u64 rdba = rxr->rxdma.dma_paddr;
   1746 		u32 reg, rxdctl;
   1747 		int j = rxr->me;
   1748 
   1749 		/* Disable the queue */
   1750 		rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
   1751 		rxdctl &= ~IXGBE_RXDCTL_ENABLE;
   1752 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
   1753 		for (int k = 0; k < 10; k++) {
   1754 			if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) &
   1755 			    IXGBE_RXDCTL_ENABLE)
   1756 				msec_delay(1);
   1757 			else
   1758 				break;
   1759 		}
   1760 		wmb();
   1761 		/* Setup the Base and Length of the Rx Descriptor Ring */
   1762 		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j),
   1763 		    (rdba & 0x00000000ffffffffULL));
   1764 		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32));
   1765 		IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j),
   1766 		    adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
   1767 
   1768 		/* Reset the ring indices */
   1769 		IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
   1770 		IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 0);
   1771 
   1772 		/* Set up the SRRCTL register */
   1773 		reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(j));
   1774 		reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
   1775 		reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
   1776 		reg |= bufsz;
   1777 		reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
   1778 		IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(j), reg);
   1779 
   1780 		/* Capture Rx Tail index */
   1781 		rxr->tail = IXGBE_VFRDT(rxr->me);
   1782 
   1783 		/* Do the queue enabling last */
   1784 		rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
   1785 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
   1786 		for (int k = 0; k < 10; k++) {
   1787 			if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) &
   1788 			    IXGBE_RXDCTL_ENABLE)
   1789 				break;
   1790 			msec_delay(1);
   1791 		}
   1792 		wmb();
   1793 
   1794 		/* Set the Tail Pointer */
   1795 #ifdef DEV_NETMAP
   1796 		/*
   1797 		 * In netmap mode, we must preserve the buffers made
   1798 		 * available to userspace before the if_init()
   1799 		 * (this is true by default on the TX side, because
   1800 		 * init makes all buffers available to userspace).
   1801 		 *
   1802 		 * netmap_reset() and the device specific routines
   1803 		 * (e.g. ixgbe_setup_receive_rings()) map these
   1804 		 * buffers at the end of the NIC ring, so here we
   1805 		 * must set the RDT (tail) register to make sure
   1806 		 * they are not overwritten.
   1807 		 *
   1808 		 * In this driver the NIC ring starts at RDH = 0,
   1809 		 * RDT points to the last slot available for reception (?),
   1810 		 * so RDT = num_rx_desc - 1 means the whole ring is available.
   1811 		 */
   1812 		if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
   1813 		    (ifp->if_capenable & IFCAP_NETMAP)) {
   1814 			struct netmap_adapter *na = NA(adapter->ifp);
   1815 			struct netmap_kring *kring = &na->rx_rings[i];
   1816 			int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
   1817 
   1818 			IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), t);
   1819 		} else
   1820 #endif /* DEV_NETMAP */
   1821 			IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
   1822 			    adapter->num_rx_desc - 1);
   1823 	}
   1824 
   1825 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
   1826 
   1827 	ixv_initialize_rss_mapping(adapter);
   1828 
   1829 	if (adapter->num_queues > 1) {
   1830 		/* RSS and RX IPP Checksum are mutually exclusive */
   1831 		rxcsum |= IXGBE_RXCSUM_PCSD;
   1832 	}
   1833 
   1834 	if (ifp->if_capenable & IFCAP_RXCSUM)
   1835 		rxcsum |= IXGBE_RXCSUM_PCSD;
   1836 
   1837 	if (!(rxcsum & IXGBE_RXCSUM_PCSD))
   1838 		rxcsum |= IXGBE_RXCSUM_IPPCSE;
   1839 
   1840 	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
   1841 } /* ixv_initialize_receive_units */
   1842 
   1843 /************************************************************************
   1844  * ixv_sysctl_tdh_handler - Transmit Descriptor Head handler function
   1845  *
   1846  *   Retrieves the TDH value from the hardware
   1847  ************************************************************************/
   1848 static int
   1849 ixv_sysctl_tdh_handler(SYSCTLFN_ARGS)
   1850 {
   1851 	struct sysctlnode node = *rnode;
   1852 	struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
   1853 	uint32_t val;
   1854 
   1855 	if (!txr)
   1856 		return (0);
   1857 
   1858 	val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_VFTDH(txr->me));
   1859 	node.sysctl_data = &val;
   1860 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   1861 } /* ixv_sysctl_tdh_handler */
   1862 
   1863 /************************************************************************
   1864  * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
   1865  *
   1866  *   Retrieves the TDT value from the hardware
   1867  ************************************************************************/
   1868 static int
   1869 ixv_sysctl_tdt_handler(SYSCTLFN_ARGS)
   1870 {
   1871 	struct sysctlnode node = *rnode;
   1872 	struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
   1873 	uint32_t val;
   1874 
   1875 	if (!txr)
   1876 		return (0);
   1877 
   1878 	val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_VFTDT(txr->me));
   1879 	node.sysctl_data = &val;
   1880 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   1881 } /* ixv_sysctl_tdt_handler */
   1882 
   1883 /************************************************************************
   1884  * ixv_sysctl_next_to_check_handler - Receive Descriptor next to check
   1885  * handler function
   1886  *
   1887  *   Retrieves the next_to_check value
   1888  ************************************************************************/
   1889 static int
   1890 ixv_sysctl_next_to_check_handler(SYSCTLFN_ARGS)
   1891 {
   1892 	struct sysctlnode node = *rnode;
   1893 	struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
   1894 	uint32_t val;
   1895 
   1896 	if (!rxr)
   1897 		return (0);
   1898 
   1899 	val = rxr->next_to_check;
   1900 	node.sysctl_data = &val;
   1901 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   1902 } /* ixv_sysctl_next_to_check_handler */
   1903 
   1904 /************************************************************************
   1905  * ixv_sysctl_rdh_handler - Receive Descriptor Head handler function
   1906  *
   1907  *   Retrieves the RDH value from the hardware
   1908  ************************************************************************/
   1909 static int
   1910 ixv_sysctl_rdh_handler(SYSCTLFN_ARGS)
   1911 {
   1912 	struct sysctlnode node = *rnode;
   1913 	struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
   1914 	uint32_t val;
   1915 
   1916 	if (!rxr)
   1917 		return (0);
   1918 
   1919 	val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_VFRDH(rxr->me));
   1920 	node.sysctl_data = &val;
   1921 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   1922 } /* ixv_sysctl_rdh_handler */
   1923 
   1924 /************************************************************************
   1925  * ixv_sysctl_rdt_handler - Receive Descriptor Tail handler function
   1926  *
   1927  *   Retrieves the RDT value from the hardware
   1928  ************************************************************************/
   1929 static int
   1930 ixv_sysctl_rdt_handler(SYSCTLFN_ARGS)
   1931 {
   1932 	struct sysctlnode node = *rnode;
   1933 	struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
   1934 	uint32_t val;
   1935 
   1936 	if (!rxr)
   1937 		return (0);
   1938 
   1939 	val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_VFRDT(rxr->me));
   1940 	node.sysctl_data = &val;
   1941 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   1942 } /* ixv_sysctl_rdt_handler */
   1943 
   1944 /************************************************************************
   1945  * ixv_setup_vlan_support
   1946  ************************************************************************/
   1947 static void
   1948 ixv_setup_vlan_support(struct adapter *adapter)
   1949 {
   1950 	struct ethercom *ec = &adapter->osdep.ec;
   1951 	struct ixgbe_hw *hw = &adapter->hw;
   1952 	struct rx_ring  *rxr;
   1953 	u32		ctrl, vid, vfta, retry;
   1954 
   1955 	/*
   1956 	 * We get here thru init_locked, meaning
   1957 	 * a soft reset, this has already cleared
   1958 	 * the VFTA and other state, so if there
   1959 	 * have been no vlan's registered do nothing.
   1960 	 */
   1961 	if (!VLAN_ATTACHED(ec))
   1962 		return;
   1963 
   1964 	/* Enable the queues */
   1965 	for (int i = 0; i < adapter->num_queues; i++) {
   1966 		rxr = &adapter->rx_rings[i];
   1967 		ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(rxr->me));
   1968 		ctrl |= IXGBE_RXDCTL_VME;
   1969 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(rxr->me), ctrl);
   1970 		/*
   1971 		 * Let Rx path know that it needs to store VLAN tag
   1972 		 * as part of extra mbuf info.
   1973 		 */
   1974 		rxr->vtag_strip = TRUE;
   1975 	}
   1976 
   1977 #if 1
   1978 	/* XXX dirty hack. Enable all VIDs */
   1979 	for (int i = 0; i < IXGBE_VFTA_SIZE; i++)
   1980 	  adapter->shadow_vfta[i] = 0xffffffff;
   1981 #endif
   1982 	/*
   1983 	 * A soft reset zero's out the VFTA, so
   1984 	 * we need to repopulate it now.
   1985 	 */
   1986 	for (int i = 0; i < IXGBE_VFTA_SIZE; i++) {
   1987 		if (adapter->shadow_vfta[i] == 0)
   1988 			continue;
   1989 		vfta = adapter->shadow_vfta[i];
   1990 		/*
   1991 		 * Reconstruct the vlan id's
   1992 		 * based on the bits set in each
   1993 		 * of the array ints.
   1994 		 */
   1995 		for (int j = 0; j < 32; j++) {
   1996 			retry = 0;
   1997 			if ((vfta & (1 << j)) == 0)
   1998 				continue;
   1999 			vid = (i * 32) + j;
   2000 			/* Call the shared code mailbox routine */
   2001 			while (hw->mac.ops.set_vfta(hw, vid, 0, TRUE, FALSE)) {
   2002 				if (++retry > 5)
   2003 					break;
   2004 			}
   2005 		}
   2006 	}
   2007 } /* ixv_setup_vlan_support */
   2008 
   2009 #if 0	/* XXX Badly need to overhaul vlan(4) on NetBSD. */
   2010 /************************************************************************
   2011  * ixv_register_vlan
   2012  *
   2013  *   Run via a vlan config EVENT, it enables us to use the
   2014  *   HW Filter table since we can get the vlan id. This just
   2015  *   creates the entry in the soft version of the VFTA, init
   2016  *   will repopulate the real table.
   2017  ************************************************************************/
   2018 static void
   2019 ixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   2020 {
   2021 	struct adapter	*adapter = ifp->if_softc;
   2022 	u16		index, bit;
   2023 
   2024 	if (ifp->if_softc != arg) /* Not our event */
   2025 		return;
   2026 
   2027 	if ((vtag == 0) || (vtag > 4095)) /* Invalid */
   2028 		return;
   2029 
   2030 	IXGBE_CORE_LOCK(adapter);
   2031 	index = (vtag >> 5) & 0x7F;
   2032 	bit = vtag & 0x1F;
   2033 	adapter->shadow_vfta[index] |= (1 << bit);
   2034 	/* Re-init to load the changes */
   2035 	ixv_init_locked(adapter);
   2036 	IXGBE_CORE_UNLOCK(adapter);
   2037 } /* ixv_register_vlan */
   2038 
   2039 /************************************************************************
   2040  * ixv_unregister_vlan
   2041  *
   2042  *   Run via a vlan unconfig EVENT, remove our entry
   2043  *   in the soft vfta.
   2044  ************************************************************************/
   2045 static void
   2046 ixv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   2047 {
   2048 	struct adapter	*adapter = ifp->if_softc;
   2049 	u16		index, bit;
   2050 
   2051 	if (ifp->if_softc !=  arg)
   2052 		return;
   2053 
   2054 	if ((vtag == 0) || (vtag > 4095))  /* Invalid */
   2055 		return;
   2056 
   2057 	IXGBE_CORE_LOCK(adapter);
   2058 	index = (vtag >> 5) & 0x7F;
   2059 	bit = vtag & 0x1F;
   2060 	adapter->shadow_vfta[index] &= ~(1 << bit);
   2061 	/* Re-init to load the changes */
   2062 	ixv_init_locked(adapter);
   2063 	IXGBE_CORE_UNLOCK(adapter);
   2064 } /* ixv_unregister_vlan */
   2065 #endif
   2066 
   2067 /************************************************************************
   2068  * ixv_enable_intr
   2069  ************************************************************************/
   2070 static void
   2071 ixv_enable_intr(struct adapter *adapter)
   2072 {
   2073 	struct ixgbe_hw *hw = &adapter->hw;
   2074 	struct ix_queue *que = adapter->queues;
   2075 	u32             mask;
   2076 	int i;
   2077 
   2078 	/* For VTEIAC */
   2079 	mask = (1 << adapter->vector);
   2080 	for (i = 0; i < adapter->num_queues; i++, que++)
   2081 		mask |= (1 << que->msix);
   2082 	IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
   2083 
   2084 	/* For VTEIMS */
   2085 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, (1 << adapter->vector));
   2086 	que = adapter->queues;
   2087 	for (i = 0; i < adapter->num_queues; i++, que++)
   2088 		ixv_enable_queue(adapter, que->msix);
   2089 
   2090 	IXGBE_WRITE_FLUSH(hw);
   2091 } /* ixv_enable_intr */
   2092 
   2093 /************************************************************************
   2094  * ixv_disable_intr
   2095  ************************************************************************/
   2096 static void
   2097 ixv_disable_intr(struct adapter *adapter)
   2098 {
   2099 	struct ix_queue	*que = adapter->queues;
   2100 
   2101 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
   2102 
   2103 	/* disable interrupts other than queues */
   2104 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, adapter->vector);
   2105 
   2106 	for (int i = 0; i < adapter->num_queues; i++, que++)
   2107 		ixv_disable_queue(adapter, que->msix);
   2108 
   2109 	IXGBE_WRITE_FLUSH(&adapter->hw);
   2110 } /* ixv_disable_intr */
   2111 
   2112 /************************************************************************
   2113  * ixv_set_ivar
   2114  *
   2115  *   Setup the correct IVAR register for a particular MSI-X interrupt
   2116  *    - entry is the register array entry
   2117  *    - vector is the MSI-X vector for this queue
   2118  *    - type is RX/TX/MISC
   2119  ************************************************************************/
   2120 static void
   2121 ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
   2122 {
   2123 	struct ixgbe_hw *hw = &adapter->hw;
   2124 	u32             ivar, index;
   2125 
   2126 	vector |= IXGBE_IVAR_ALLOC_VAL;
   2127 
   2128 	if (type == -1) { /* MISC IVAR */
   2129 		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
   2130 		ivar &= ~0xFF;
   2131 		ivar |= vector;
   2132 		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
   2133 	} else {          /* RX/TX IVARS */
   2134 		index = (16 * (entry & 1)) + (8 * type);
   2135 		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
   2136 		ivar &= ~(0xFF << index);
   2137 		ivar |= (vector << index);
   2138 		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
   2139 	}
   2140 } /* ixv_set_ivar */
   2141 
   2142 /************************************************************************
   2143  * ixv_configure_ivars
   2144  ************************************************************************/
   2145 static void
   2146 ixv_configure_ivars(struct adapter *adapter)
   2147 {
   2148 	struct ix_queue *que = adapter->queues;
   2149 
   2150 	/* XXX We should sync EITR value calculation with ixgbe.c? */
   2151 
   2152 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   2153 		/* First the RX queue entry */
   2154 		ixv_set_ivar(adapter, i, que->msix, 0);
   2155 		/* ... and the TX */
   2156 		ixv_set_ivar(adapter, i, que->msix, 1);
   2157 		/* Set an initial value in EITR */
   2158 		ixv_eitr_write(adapter, que->msix, IXGBE_EITR_DEFAULT);
   2159 	}
   2160 
   2161 	/* For the mailbox interrupt */
   2162 	ixv_set_ivar(adapter, 1, adapter->vector, -1);
   2163 } /* ixv_configure_ivars */
   2164 
   2165 
   2166 /************************************************************************
   2167  * ixv_save_stats
   2168  *
   2169  *   The VF stats registers never have a truly virgin
   2170  *   starting point, so this routine tries to make an
   2171  *   artificial one, marking ground zero on attach as
   2172  *   it were.
   2173  ************************************************************************/
   2174 static void
   2175 ixv_save_stats(struct adapter *adapter)
   2176 {
   2177 	struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
   2178 
   2179 	if (stats->vfgprc.ev_count || stats->vfgptc.ev_count) {
   2180 		stats->saved_reset_vfgprc +=
   2181 		    stats->vfgprc.ev_count - stats->base_vfgprc;
   2182 		stats->saved_reset_vfgptc +=
   2183 		    stats->vfgptc.ev_count - stats->base_vfgptc;
   2184 		stats->saved_reset_vfgorc +=
   2185 		    stats->vfgorc.ev_count - stats->base_vfgorc;
   2186 		stats->saved_reset_vfgotc +=
   2187 		    stats->vfgotc.ev_count - stats->base_vfgotc;
   2188 		stats->saved_reset_vfmprc +=
   2189 		    stats->vfmprc.ev_count - stats->base_vfmprc;
   2190 	}
   2191 } /* ixv_save_stats */
   2192 
   2193 /************************************************************************
   2194  * ixv_init_stats
   2195  ************************************************************************/
   2196 static void
   2197 ixv_init_stats(struct adapter *adapter)
   2198 {
   2199 	struct ixgbe_hw *hw = &adapter->hw;
   2200 
   2201 	adapter->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
   2202 	adapter->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
   2203 	adapter->stats.vf.last_vfgorc |=
   2204 	    (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
   2205 
   2206 	adapter->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
   2207 	adapter->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
   2208 	adapter->stats.vf.last_vfgotc |=
   2209 	    (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
   2210 
   2211 	adapter->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
   2212 
   2213 	adapter->stats.vf.base_vfgprc = adapter->stats.vf.last_vfgprc;
   2214 	adapter->stats.vf.base_vfgorc = adapter->stats.vf.last_vfgorc;
   2215 	adapter->stats.vf.base_vfgptc = adapter->stats.vf.last_vfgptc;
   2216 	adapter->stats.vf.base_vfgotc = adapter->stats.vf.last_vfgotc;
   2217 	adapter->stats.vf.base_vfmprc = adapter->stats.vf.last_vfmprc;
   2218 } /* ixv_init_stats */
   2219 
   2220 #define UPDATE_STAT_32(reg, last, count)		\
   2221 {                                                       \
   2222 	u32 current = IXGBE_READ_REG(hw, (reg));	\
   2223 	if (current < (last))				\
   2224 		count.ev_count += 0x100000000LL;	\
   2225 	(last) = current;				\
   2226 	count.ev_count &= 0xFFFFFFFF00000000LL;		\
   2227 	count.ev_count |= current;			\
   2228 }
   2229 
   2230 #define UPDATE_STAT_36(lsb, msb, last, count)           \
   2231 {                                                       \
   2232 	u64 cur_lsb = IXGBE_READ_REG(hw, (lsb));	\
   2233 	u64 cur_msb = IXGBE_READ_REG(hw, (msb));	\
   2234 	u64 current = ((cur_msb << 32) | cur_lsb);      \
   2235 	if (current < (last))				\
   2236 		count.ev_count += 0x1000000000LL;	\
   2237 	(last) = current;				\
   2238 	count.ev_count &= 0xFFFFFFF000000000LL;		\
   2239 	count.ev_count |= current;			\
   2240 }
   2241 
   2242 /************************************************************************
   2243  * ixv_update_stats - Update the board statistics counters.
   2244  ************************************************************************/
   2245 void
   2246 ixv_update_stats(struct adapter *adapter)
   2247 {
   2248 	struct ixgbe_hw *hw = &adapter->hw;
   2249 	struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
   2250 
   2251 	UPDATE_STAT_32(IXGBE_VFGPRC, stats->last_vfgprc, stats->vfgprc);
   2252 	UPDATE_STAT_32(IXGBE_VFGPTC, stats->last_vfgptc, stats->vfgptc);
   2253 	UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB, stats->last_vfgorc,
   2254 	    stats->vfgorc);
   2255 	UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB, stats->last_vfgotc,
   2256 	    stats->vfgotc);
   2257 	UPDATE_STAT_32(IXGBE_VFMPRC, stats->last_vfmprc, stats->vfmprc);
   2258 
   2259 	/* Fill out the OS statistics structure */
   2260 	/*
   2261 	 * NetBSD: Don't override if_{i|o}{packets|bytes|mcasts} with
   2262 	 * adapter->stats counters. It's required to make ifconfig -z
   2263 	 * (SOICZIFDATA) work.
   2264 	 */
   2265 } /* ixv_update_stats */
   2266 
   2267 /************************************************************************
   2268  * ixv_sysctl_interrupt_rate_handler
   2269  ************************************************************************/
   2270 static int
   2271 ixv_sysctl_interrupt_rate_handler(SYSCTLFN_ARGS)
   2272 {
   2273 	struct sysctlnode node = *rnode;
   2274 	struct ix_queue *que = (struct ix_queue *)node.sysctl_data;
   2275 	struct adapter  *adapter = que->adapter;
   2276 	uint32_t reg, usec, rate;
   2277 	int error;
   2278 
   2279 	if (que == NULL)
   2280 		return 0;
   2281 	reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_VTEITR(que->msix));
   2282 	usec = ((reg & 0x0FF8) >> 3);
   2283 	if (usec > 0)
   2284 		rate = 500000 / usec;
   2285 	else
   2286 		rate = 0;
   2287 	node.sysctl_data = &rate;
   2288 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   2289 	if (error || newp == NULL)
   2290 		return error;
   2291 	reg &= ~0xfff; /* default, no limitation */
   2292 	if (rate > 0 && rate < 500000) {
   2293 		if (rate < 1000)
   2294 			rate = 1000;
   2295 		reg |= ((4000000/rate) & 0xff8);
   2296 		/*
   2297 		 * When RSC is used, ITR interval must be larger than
   2298 		 * RSC_DELAY. Currently, we use 2us for RSC_DELAY.
   2299 		 * The minimum value is always greater than 2us on 100M
   2300 		 * (and 10M?(not documented)), but it's not on 1G and higher.
   2301 		 */
   2302 		if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
   2303 		    && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
   2304 			if ((adapter->num_queues > 1)
   2305 			    && (reg < IXGBE_MIN_RSC_EITR_10G1G))
   2306 				return EINVAL;
   2307 		}
   2308 		ixv_max_interrupt_rate = rate;
   2309 	} else
   2310 		ixv_max_interrupt_rate = 0;
   2311 	ixv_eitr_write(adapter, que->msix, reg);
   2312 
   2313 	return (0);
   2314 } /* ixv_sysctl_interrupt_rate_handler */
   2315 
   2316 const struct sysctlnode *
   2317 ixv_sysctl_instance(struct adapter *adapter)
   2318 {
   2319 	const char *dvname;
   2320 	struct sysctllog **log;
   2321 	int rc;
   2322 	const struct sysctlnode *rnode;
   2323 
   2324 	log = &adapter->sysctllog;
   2325 	dvname = device_xname(adapter->dev);
   2326 
   2327 	if ((rc = sysctl_createv(log, 0, NULL, &rnode,
   2328 	    0, CTLTYPE_NODE, dvname,
   2329 	    SYSCTL_DESCR("ixv information and settings"),
   2330 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
   2331 		goto err;
   2332 
   2333 	return rnode;
   2334 err:
   2335 	printf("%s: sysctl_createv failed, rc = %d\n", __func__, rc);
   2336 	return NULL;
   2337 }
   2338 
   2339 static void
   2340 ixv_add_device_sysctls(struct adapter *adapter)
   2341 {
   2342 	struct sysctllog **log;
   2343 	const struct sysctlnode *rnode, *cnode;
   2344 	device_t dev;
   2345 
   2346 	dev = adapter->dev;
   2347 	log = &adapter->sysctllog;
   2348 
   2349 	if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
   2350 		aprint_error_dev(dev, "could not create sysctl root\n");
   2351 		return;
   2352 	}
   2353 
   2354 	if (sysctl_createv(log, 0, &rnode, &cnode,
   2355 	    CTLFLAG_READWRITE, CTLTYPE_INT,
   2356 	    "debug", SYSCTL_DESCR("Debug Info"),
   2357 	    ixv_sysctl_debug, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
   2358 		aprint_error_dev(dev, "could not create sysctl\n");
   2359 
   2360 	if (sysctl_createv(log, 0, &rnode, &cnode,
   2361 	    CTLFLAG_READWRITE, CTLTYPE_BOOL,
   2362 	    "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
   2363 	    NULL, 0, &adapter->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
   2364 		aprint_error_dev(dev, "could not create sysctl\n");
   2365 
   2366 	if (sysctl_createv(log, 0, &rnode, &cnode,
   2367 	    CTLFLAG_READWRITE, CTLTYPE_BOOL,
   2368 	    "txrx_workqueue", SYSCTL_DESCR("Use workqueue for packet processing"),
   2369 		NULL, 0, &adapter->txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL) != 0)
   2370 		aprint_error_dev(dev, "could not create sysctl\n");
   2371 }
   2372 
   2373 /************************************************************************
   2374  * ixv_add_stats_sysctls - Add statistic sysctls for the VF.
   2375  ************************************************************************/
   2376 static void
   2377 ixv_add_stats_sysctls(struct adapter *adapter)
   2378 {
   2379 	device_t                dev = adapter->dev;
   2380 	struct tx_ring          *txr = adapter->tx_rings;
   2381 	struct rx_ring          *rxr = adapter->rx_rings;
   2382 	struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
   2383 	struct ixgbe_hw *hw = &adapter->hw;
   2384 	const struct sysctlnode *rnode, *cnode;
   2385 	struct sysctllog **log = &adapter->sysctllog;
   2386 	const char *xname = device_xname(dev);
   2387 
   2388 	/* Driver Statistics */
   2389 	evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
   2390 	    NULL, xname, "Driver tx dma soft fail EFBIG");
   2391 	evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
   2392 	    NULL, xname, "m_defrag() failed");
   2393 	evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
   2394 	    NULL, xname, "Driver tx dma hard fail EFBIG");
   2395 	evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC,
   2396 	    NULL, xname, "Driver tx dma hard fail EINVAL");
   2397 	evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC,
   2398 	    NULL, xname, "Driver tx dma hard fail other");
   2399 	evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
   2400 	    NULL, xname, "Driver tx dma soft fail EAGAIN");
   2401 	evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
   2402 	    NULL, xname, "Driver tx dma soft fail ENOMEM");
   2403 	evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
   2404 	    NULL, xname, "Watchdog timeouts");
   2405 	evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC,
   2406 	    NULL, xname, "TSO errors");
   2407 	evcnt_attach_dynamic(&adapter->link_irq, EVCNT_TYPE_INTR,
   2408 	    NULL, xname, "Link MSI-X IRQ Handled");
   2409 
   2410 	for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
   2411 		snprintf(adapter->queues[i].evnamebuf,
   2412 		    sizeof(adapter->queues[i].evnamebuf), "%s q%d",
   2413 		    xname, i);
   2414 		snprintf(adapter->queues[i].namebuf,
   2415 		    sizeof(adapter->queues[i].namebuf), "q%d", i);
   2416 
   2417 		if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
   2418 			aprint_error_dev(dev, "could not create sysctl root\n");
   2419 			break;
   2420 		}
   2421 
   2422 		if (sysctl_createv(log, 0, &rnode, &rnode,
   2423 		    0, CTLTYPE_NODE,
   2424 		    adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
   2425 		    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
   2426 			break;
   2427 
   2428 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2429 		    CTLFLAG_READWRITE, CTLTYPE_INT,
   2430 		    "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
   2431 		    ixv_sysctl_interrupt_rate_handler, 0,
   2432 		    (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
   2433 			break;
   2434 
   2435 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2436 		    CTLFLAG_READONLY, CTLTYPE_INT,
   2437 		    "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
   2438 		    ixv_sysctl_tdh_handler, 0, (void *)txr,
   2439 		    0, CTL_CREATE, CTL_EOL) != 0)
   2440 			break;
   2441 
   2442 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2443 		    CTLFLAG_READONLY, CTLTYPE_INT,
   2444 		    "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
   2445 		    ixv_sysctl_tdt_handler, 0, (void *)txr,
   2446 		    0, CTL_CREATE, CTL_EOL) != 0)
   2447 			break;
   2448 
   2449 		evcnt_attach_dynamic(&adapter->queues[i].irqs, EVCNT_TYPE_INTR,
   2450 		    NULL, adapter->queues[i].evnamebuf, "IRQs on queue");
   2451 		evcnt_attach_dynamic(&adapter->queues[i].handleq,
   2452 		    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   2453 		    "Handled queue in softint");
   2454 		evcnt_attach_dynamic(&adapter->queues[i].req, EVCNT_TYPE_MISC,
   2455 		    NULL, adapter->queues[i].evnamebuf, "Requeued in softint");
   2456 		evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC,
   2457 		    NULL, adapter->queues[i].evnamebuf, "TSO");
   2458 		evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
   2459 		    NULL, adapter->queues[i].evnamebuf,
   2460 		    "Queue No Descriptor Available");
   2461 		evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
   2462 		    NULL, adapter->queues[i].evnamebuf,
   2463 		    "Queue Packets Transmitted");
   2464 #ifndef IXGBE_LEGACY_TX
   2465 		evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC,
   2466 		    NULL, adapter->queues[i].evnamebuf,
   2467 		    "Packets dropped in pcq");
   2468 #endif
   2469 
   2470 #ifdef LRO
   2471 		struct lro_ctrl *lro = &rxr->lro;
   2472 #endif /* LRO */
   2473 
   2474 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2475 		    CTLFLAG_READONLY,
   2476 		    CTLTYPE_INT,
   2477 		    "rxd_nxck", SYSCTL_DESCR("Receive Descriptor next to check"),
   2478 			ixv_sysctl_next_to_check_handler, 0, (void *)rxr, 0,
   2479 		    CTL_CREATE, CTL_EOL) != 0)
   2480 			break;
   2481 
   2482 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2483 		    CTLFLAG_READONLY,
   2484 		    CTLTYPE_INT,
   2485 		    "rxd_head", SYSCTL_DESCR("Receive Descriptor Head"),
   2486 		    ixv_sysctl_rdh_handler, 0, (void *)rxr, 0,
   2487 		    CTL_CREATE, CTL_EOL) != 0)
   2488 			break;
   2489 
   2490 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2491 		    CTLFLAG_READONLY,
   2492 		    CTLTYPE_INT,
   2493 		    "rxd_tail", SYSCTL_DESCR("Receive Descriptor Tail"),
   2494 		    ixv_sysctl_rdt_handler, 0, (void *)rxr, 0,
   2495 		    CTL_CREATE, CTL_EOL) != 0)
   2496 			break;
   2497 
   2498 		evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
   2499 		    NULL, adapter->queues[i].evnamebuf, "Queue Packets Received");
   2500 		evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
   2501 		    NULL, adapter->queues[i].evnamebuf, "Queue Bytes Received");
   2502 		evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC,
   2503 		    NULL, adapter->queues[i].evnamebuf, "Copied RX Frames");
   2504 		evcnt_attach_dynamic(&rxr->no_jmbuf, EVCNT_TYPE_MISC,
   2505 		    NULL, adapter->queues[i].evnamebuf, "Rx no jumbo mbuf");
   2506 		evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
   2507 		    NULL, adapter->queues[i].evnamebuf, "Rx discarded");
   2508 #ifdef LRO
   2509 		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
   2510 				CTLFLAG_RD, &lro->lro_queued, 0,
   2511 				"LRO Queued");
   2512 		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
   2513 				CTLFLAG_RD, &lro->lro_flushed, 0,
   2514 				"LRO Flushed");
   2515 #endif /* LRO */
   2516 	}
   2517 
   2518 	/* MAC stats get their own sub node */
   2519 
   2520 	snprintf(stats->namebuf,
   2521 	    sizeof(stats->namebuf), "%s MAC Statistics", xname);
   2522 
   2523 	evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
   2524 	    stats->namebuf, "rx csum offload - IP");
   2525 	evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
   2526 	    stats->namebuf, "rx csum offload - L4");
   2527 	evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
   2528 	    stats->namebuf, "rx csum offload - IP bad");
   2529 	evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
   2530 	    stats->namebuf, "rx csum offload - L4 bad");
   2531 
   2532 	/* Packet Reception Stats */
   2533 	evcnt_attach_dynamic(&stats->vfgprc, EVCNT_TYPE_MISC, NULL,
   2534 	    xname, "Good Packets Received");
   2535 	evcnt_attach_dynamic(&stats->vfgorc, EVCNT_TYPE_MISC, NULL,
   2536 	    xname, "Good Octets Received");
   2537 	evcnt_attach_dynamic(&stats->vfmprc, EVCNT_TYPE_MISC, NULL,
   2538 	    xname, "Multicast Packets Received");
   2539 	evcnt_attach_dynamic(&stats->vfgptc, EVCNT_TYPE_MISC, NULL,
   2540 	    xname, "Good Packets Transmitted");
   2541 	evcnt_attach_dynamic(&stats->vfgotc, EVCNT_TYPE_MISC, NULL,
   2542 	    xname, "Good Octets Transmitted");
   2543 
   2544 	/* Mailbox Stats */
   2545 	evcnt_attach_dynamic(&hw->mbx.stats.msgs_tx, EVCNT_TYPE_MISC, NULL,
   2546 	    xname, "message TXs");
   2547 	evcnt_attach_dynamic(&hw->mbx.stats.msgs_rx, EVCNT_TYPE_MISC, NULL,
   2548 	    xname, "message RXs");
   2549 	evcnt_attach_dynamic(&hw->mbx.stats.acks, EVCNT_TYPE_MISC, NULL,
   2550 	    xname, "ACKs");
   2551 	evcnt_attach_dynamic(&hw->mbx.stats.reqs, EVCNT_TYPE_MISC, NULL,
   2552 	    xname, "REQs");
   2553 	evcnt_attach_dynamic(&hw->mbx.stats.rsts, EVCNT_TYPE_MISC, NULL,
   2554 	    xname, "RSTs");
   2555 
   2556 } /* ixv_add_stats_sysctls */
   2557 
   2558 /************************************************************************
   2559  * ixv_set_sysctl_value
   2560  ************************************************************************/
   2561 static void
   2562 ixv_set_sysctl_value(struct adapter *adapter, const char *name,
   2563 	const char *description, int *limit, int value)
   2564 {
   2565 	device_t dev =  adapter->dev;
   2566 	struct sysctllog **log;
   2567 	const struct sysctlnode *rnode, *cnode;
   2568 
   2569 	log = &adapter->sysctllog;
   2570 	if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
   2571 		aprint_error_dev(dev, "could not create sysctl root\n");
   2572 		return;
   2573 	}
   2574 	if (sysctl_createv(log, 0, &rnode, &cnode,
   2575 	    CTLFLAG_READWRITE, CTLTYPE_INT,
   2576 	    name, SYSCTL_DESCR(description),
   2577 	    NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0)
   2578 		aprint_error_dev(dev, "could not create sysctl\n");
   2579 	*limit = value;
   2580 } /* ixv_set_sysctl_value */
   2581 
   2582 /************************************************************************
   2583  * ixv_print_debug_info
   2584  *
   2585  *   Called only when em_display_debug_stats is enabled.
   2586  *   Provides a way to take a look at important statistics
   2587  *   maintained by the driver and hardware.
   2588  ************************************************************************/
   2589 static void
   2590 ixv_print_debug_info(struct adapter *adapter)
   2591 {
   2592         device_t        dev = adapter->dev;
   2593         struct ixgbe_hw *hw = &adapter->hw;
   2594         struct ix_queue *que = adapter->queues;
   2595         struct rx_ring  *rxr;
   2596         struct tx_ring  *txr;
   2597 #ifdef LRO
   2598         struct lro_ctrl *lro;
   2599 #endif /* LRO */
   2600 
   2601 	device_printf(dev, "Error Byte Count = %u \n",
   2602 	    IXGBE_READ_REG(hw, IXGBE_ERRBC));
   2603 
   2604 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   2605 		txr = que->txr;
   2606 		rxr = que->rxr;
   2607 #ifdef LRO
   2608 		lro = &rxr->lro;
   2609 #endif /* LRO */
   2610 		device_printf(dev, "QUE(%d) IRQs Handled: %lu\n",
   2611 		    que->msix, (long)que->irqs.ev_count);
   2612 		device_printf(dev, "RX(%d) Packets Received: %lld\n",
   2613 		    rxr->me, (long long)rxr->rx_packets.ev_count);
   2614 		device_printf(dev, "RX(%d) Bytes Received: %lu\n",
   2615 		    rxr->me, (long)rxr->rx_bytes.ev_count);
   2616 #ifdef LRO
   2617 		device_printf(dev, "RX(%d) LRO Queued= %lld\n",
   2618 		    rxr->me, (long long)lro->lro_queued);
   2619 		device_printf(dev, "RX(%d) LRO Flushed= %lld\n",
   2620 		    rxr->me, (long long)lro->lro_flushed);
   2621 #endif /* LRO */
   2622 		device_printf(dev, "TX(%d) Packets Sent: %lu\n",
   2623 		    txr->me, (long)txr->total_packets.ev_count);
   2624 		device_printf(dev, "TX(%d) NO Desc Avail: %lu\n",
   2625 		    txr->me, (long)txr->no_desc_avail.ev_count);
   2626 	}
   2627 
   2628 	device_printf(dev, "MBX IRQ Handled: %lu\n",
   2629 	    (long)adapter->link_irq.ev_count);
   2630 } /* ixv_print_debug_info */
   2631 
   2632 /************************************************************************
   2633  * ixv_sysctl_debug
   2634  ************************************************************************/
   2635 static int
   2636 ixv_sysctl_debug(SYSCTLFN_ARGS)
   2637 {
   2638 	struct sysctlnode node = *rnode;
   2639 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   2640 	int            error, result;
   2641 
   2642 	node.sysctl_data = &result;
   2643 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   2644 
   2645 	if (error || newp == NULL)
   2646 		return error;
   2647 
   2648 	if (result == 1)
   2649 		ixv_print_debug_info(adapter);
   2650 
   2651 	return 0;
   2652 } /* ixv_sysctl_debug */
   2653 
   2654 /************************************************************************
   2655  * ixv_init_device_features
   2656  ************************************************************************/
   2657 static void
   2658 ixv_init_device_features(struct adapter *adapter)
   2659 {
   2660 	adapter->feat_cap = IXGBE_FEATURE_NETMAP
   2661 	                  | IXGBE_FEATURE_VF
   2662 	                  | IXGBE_FEATURE_RSS
   2663 	                  | IXGBE_FEATURE_LEGACY_TX;
   2664 
   2665 	/* A tad short on feature flags for VFs, atm. */
   2666 	switch (adapter->hw.mac.type) {
   2667 	case ixgbe_mac_82599_vf:
   2668 		break;
   2669 	case ixgbe_mac_X540_vf:
   2670 		break;
   2671 	case ixgbe_mac_X550_vf:
   2672 	case ixgbe_mac_X550EM_x_vf:
   2673 	case ixgbe_mac_X550EM_a_vf:
   2674 		adapter->feat_cap |= IXGBE_FEATURE_NEEDS_CTXD;
   2675 		break;
   2676 	default:
   2677 		break;
   2678 	}
   2679 
   2680 	/* Enabled by default... */
   2681 	/* Is a virtual function (VF) */
   2682 	if (adapter->feat_cap & IXGBE_FEATURE_VF)
   2683 		adapter->feat_en |= IXGBE_FEATURE_VF;
   2684 	/* Netmap */
   2685 	if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
   2686 		adapter->feat_en |= IXGBE_FEATURE_NETMAP;
   2687 	/* Receive-Side Scaling (RSS) */
   2688 	if (adapter->feat_cap & IXGBE_FEATURE_RSS)
   2689 		adapter->feat_en |= IXGBE_FEATURE_RSS;
   2690 	/* Needs advanced context descriptor regardless of offloads req'd */
   2691 	if (adapter->feat_cap & IXGBE_FEATURE_NEEDS_CTXD)
   2692 		adapter->feat_en |= IXGBE_FEATURE_NEEDS_CTXD;
   2693 
   2694 	/* Enabled via sysctl... */
   2695 	/* Legacy (single queue) transmit */
   2696 	if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
   2697 	    ixv_enable_legacy_tx)
   2698 		adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
   2699 } /* ixv_init_device_features */
   2700 
   2701 /************************************************************************
   2702  * ixv_shutdown - Shutdown entry point
   2703  ************************************************************************/
   2704 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
   2705 static int
   2706 ixv_shutdown(device_t dev)
   2707 {
   2708 	struct adapter *adapter = device_private(dev);
   2709 	IXGBE_CORE_LOCK(adapter);
   2710 	ixv_stop(adapter);
   2711 	IXGBE_CORE_UNLOCK(adapter);
   2712 
   2713 	return (0);
   2714 } /* ixv_shutdown */
   2715 #endif
   2716 
   2717 static int
   2718 ixv_ifflags_cb(struct ethercom *ec)
   2719 {
   2720 	struct ifnet *ifp = &ec->ec_if;
   2721 	struct adapter *adapter = ifp->if_softc;
   2722 	int change, rc = 0;
   2723 
   2724 	IXGBE_CORE_LOCK(adapter);
   2725 
   2726 	change = ifp->if_flags ^ adapter->if_flags;
   2727 	if (change != 0)
   2728 		adapter->if_flags = ifp->if_flags;
   2729 
   2730 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0)
   2731 		rc = ENETRESET;
   2732 
   2733 	/* Set up VLAN support and filter */
   2734 	ixv_setup_vlan_support(adapter);
   2735 
   2736 	IXGBE_CORE_UNLOCK(adapter);
   2737 
   2738 	return rc;
   2739 }
   2740 
   2741 
   2742 /************************************************************************
   2743  * ixv_ioctl - Ioctl entry point
   2744  *
   2745  *   Called when the user wants to configure the interface.
   2746  *
   2747  *   return 0 on success, positive on failure
   2748  ************************************************************************/
   2749 static int
   2750 ixv_ioctl(struct ifnet *ifp, u_long command, void *data)
   2751 {
   2752 	struct adapter	*adapter = ifp->if_softc;
   2753 	struct ifcapreq *ifcr = data;
   2754 	struct ifreq	*ifr = data;
   2755 	int             error = 0;
   2756 	int l4csum_en;
   2757 	const int l4csum = IFCAP_CSUM_TCPv4_Rx|IFCAP_CSUM_UDPv4_Rx|
   2758 	     IFCAP_CSUM_TCPv6_Rx|IFCAP_CSUM_UDPv6_Rx;
   2759 
   2760 	switch (command) {
   2761 	case SIOCSIFFLAGS:
   2762 		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
   2763 		break;
   2764 	case SIOCADDMULTI:
   2765 	case SIOCDELMULTI:
   2766 		IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
   2767 		break;
   2768 	case SIOCSIFMEDIA:
   2769 	case SIOCGIFMEDIA:
   2770 		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
   2771 		break;
   2772 	case SIOCSIFCAP:
   2773 		IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
   2774 		break;
   2775 	case SIOCSIFMTU:
   2776 		IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
   2777 		break;
   2778 	default:
   2779 		IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
   2780 		break;
   2781 	}
   2782 
   2783 	switch (command) {
   2784 	case SIOCSIFMEDIA:
   2785 	case SIOCGIFMEDIA:
   2786 		return ifmedia_ioctl(ifp, ifr, &adapter->media, command);
   2787 	case SIOCSIFCAP:
   2788 		/* Layer-4 Rx checksum offload has to be turned on and
   2789 		 * off as a unit.
   2790 		 */
   2791 		l4csum_en = ifcr->ifcr_capenable & l4csum;
   2792 		if (l4csum_en != l4csum && l4csum_en != 0)
   2793 			return EINVAL;
   2794 		/*FALLTHROUGH*/
   2795 	case SIOCADDMULTI:
   2796 	case SIOCDELMULTI:
   2797 	case SIOCSIFFLAGS:
   2798 	case SIOCSIFMTU:
   2799 	default:
   2800 		if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
   2801 			return error;
   2802 		if ((ifp->if_flags & IFF_RUNNING) == 0)
   2803 			;
   2804 		else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
   2805 			IXGBE_CORE_LOCK(adapter);
   2806 			ixv_init_locked(adapter);
   2807 			IXGBE_CORE_UNLOCK(adapter);
   2808 		} else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
   2809 			/*
   2810 			 * Multicast list has changed; set the hardware filter
   2811 			 * accordingly.
   2812 			 */
   2813 			IXGBE_CORE_LOCK(adapter);
   2814 			ixv_disable_intr(adapter);
   2815 			ixv_set_multi(adapter);
   2816 			ixv_enable_intr(adapter);
   2817 			IXGBE_CORE_UNLOCK(adapter);
   2818 		}
   2819 		return 0;
   2820 	}
   2821 } /* ixv_ioctl */
   2822 
   2823 /************************************************************************
   2824  * ixv_init
   2825  ************************************************************************/
   2826 static int
   2827 ixv_init(struct ifnet *ifp)
   2828 {
   2829 	struct adapter *adapter = ifp->if_softc;
   2830 
   2831 	IXGBE_CORE_LOCK(adapter);
   2832 	ixv_init_locked(adapter);
   2833 	IXGBE_CORE_UNLOCK(adapter);
   2834 
   2835 	return 0;
   2836 } /* ixv_init */
   2837 
   2838 /************************************************************************
   2839  * ixv_handle_que
   2840  ************************************************************************/
   2841 static void
   2842 ixv_handle_que(void *context)
   2843 {
   2844 	struct ix_queue *que = context;
   2845 	struct adapter  *adapter = que->adapter;
   2846 	struct tx_ring	*txr = que->txr;
   2847 	struct ifnet    *ifp = adapter->ifp;
   2848 	bool		more;
   2849 
   2850 	que->handleq.ev_count++;
   2851 
   2852 	if (ifp->if_flags & IFF_RUNNING) {
   2853 		more = ixgbe_rxeof(que);
   2854 		IXGBE_TX_LOCK(txr);
   2855 		more |= ixgbe_txeof(txr);
   2856 		if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
   2857 			if (!ixgbe_mq_ring_empty(ifp, txr->txr_interq))
   2858 				ixgbe_mq_start_locked(ifp, txr);
   2859 		/* Only for queue 0 */
   2860 		/* NetBSD still needs this for CBQ */
   2861 		if ((&adapter->queues[0] == que)
   2862 		    && (!ixgbe_legacy_ring_empty(ifp, NULL)))
   2863 			ixgbe_legacy_start_locked(ifp, txr);
   2864 		IXGBE_TX_UNLOCK(txr);
   2865 		if (more) {
   2866 			que->req.ev_count++;
   2867 			if (adapter->txrx_use_workqueue) {
   2868 				/*
   2869 				 * "enqueued flag" is not required here
   2870 				 * the same as ixg(4). See ixgbe_msix_que().
   2871 				 */
   2872 				workqueue_enqueue(adapter->que_wq,
   2873 				    &que->wq_cookie, curcpu());
   2874 			} else
   2875 				  softint_schedule(que->que_si);
   2876 			return;
   2877 		}
   2878 	}
   2879 
   2880 	/* Re-enable this interrupt */
   2881 	ixv_enable_queue(adapter, que->msix);
   2882 
   2883 	return;
   2884 } /* ixv_handle_que */
   2885 
   2886 /************************************************************************
   2887  * ixv_handle_que_work
   2888  ************************************************************************/
   2889 static void
   2890 ixv_handle_que_work(struct work *wk, void *context)
   2891 {
   2892 	struct ix_queue *que = container_of(wk, struct ix_queue, wq_cookie);
   2893 
   2894 	/*
   2895 	 * "enqueued flag" is not required here the same as ixg(4).
   2896 	 * See ixgbe_msix_que().
   2897 	 */
   2898 	ixv_handle_que(que);
   2899 }
   2900 
   2901 /************************************************************************
   2902  * ixv_allocate_msix - Setup MSI-X Interrupt resources and handlers
   2903  ************************************************************************/
   2904 static int
   2905 ixv_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
   2906 {
   2907 	device_t	dev = adapter->dev;
   2908 	struct ix_queue *que = adapter->queues;
   2909 	struct		tx_ring *txr = adapter->tx_rings;
   2910 	int 		error, msix_ctrl, rid, vector = 0;
   2911 	pci_chipset_tag_t pc;
   2912 	pcitag_t	tag;
   2913 	char		intrbuf[PCI_INTRSTR_LEN];
   2914 	char		wqname[MAXCOMLEN];
   2915 	char		intr_xname[32];
   2916 	const char	*intrstr = NULL;
   2917 	kcpuset_t	*affinity;
   2918 	int		cpu_id = 0;
   2919 
   2920 	pc = adapter->osdep.pc;
   2921 	tag = adapter->osdep.tag;
   2922 
   2923 	adapter->osdep.nintrs = adapter->num_queues + 1;
   2924 	if (pci_msix_alloc_exact(pa, &adapter->osdep.intrs,
   2925 	    adapter->osdep.nintrs) != 0) {
   2926 		aprint_error_dev(dev,
   2927 		    "failed to allocate MSI-X interrupt\n");
   2928 		return (ENXIO);
   2929 	}
   2930 
   2931 	kcpuset_create(&affinity, false);
   2932 	for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
   2933 		snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d",
   2934 		    device_xname(dev), i);
   2935 		intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
   2936 		    sizeof(intrbuf));
   2937 #ifdef IXGBE_MPSAFE
   2938 		pci_intr_setattr(pc, &adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
   2939 		    true);
   2940 #endif
   2941 		/* Set the handler function */
   2942 		que->res = adapter->osdep.ihs[i] = pci_intr_establish_xname(pc,
   2943 		    adapter->osdep.intrs[i], IPL_NET, ixv_msix_que, que,
   2944 		    intr_xname);
   2945 		if (que->res == NULL) {
   2946 			pci_intr_release(pc, adapter->osdep.intrs,
   2947 			    adapter->osdep.nintrs);
   2948 			aprint_error_dev(dev,
   2949 			    "Failed to register QUE handler\n");
   2950 			kcpuset_destroy(affinity);
   2951 			return (ENXIO);
   2952 		}
   2953 		que->msix = vector;
   2954         	adapter->active_queues |= (u64)(1 << que->msix);
   2955 
   2956 		cpu_id = i;
   2957 		/* Round-robin affinity */
   2958 		kcpuset_zero(affinity);
   2959 		kcpuset_set(affinity, cpu_id % ncpu);
   2960 		error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
   2961 		    NULL);
   2962 		aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
   2963 		    intrstr);
   2964 		if (error == 0)
   2965 			aprint_normal(", bound queue %d to cpu %d\n",
   2966 			    i, cpu_id % ncpu);
   2967 		else
   2968 			aprint_normal("\n");
   2969 
   2970 #ifndef IXGBE_LEGACY_TX
   2971 		txr->txr_si
   2972 		    = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   2973 			ixgbe_deferred_mq_start, txr);
   2974 #endif
   2975 		que->que_si
   2976 		    = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   2977 			ixv_handle_que, que);
   2978 		if (que->que_si == NULL) {
   2979 			aprint_error_dev(dev,
   2980 			    "could not establish software interrupt\n");
   2981 		}
   2982 	}
   2983 	snprintf(wqname, sizeof(wqname), "%sdeferTx", device_xname(dev));
   2984 	error = workqueue_create(&adapter->txr_wq, wqname,
   2985 	    ixgbe_deferred_mq_start_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
   2986 	    IXGBE_WORKQUEUE_FLAGS);
   2987 	if (error) {
   2988 		aprint_error_dev(dev, "couldn't create workqueue for deferred Tx\n");
   2989 	}
   2990 	adapter->txr_wq_enqueued = percpu_alloc(sizeof(u_int));
   2991 
   2992 	snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(dev));
   2993 	error = workqueue_create(&adapter->que_wq, wqname,
   2994 	    ixv_handle_que_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
   2995 	    IXGBE_WORKQUEUE_FLAGS);
   2996 	if (error) {
   2997 		aprint_error_dev(dev,
   2998 		    "couldn't create workqueue\n");
   2999 	}
   3000 
   3001 	/* and Mailbox */
   3002 	cpu_id++;
   3003 	snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
   3004 	adapter->vector = vector;
   3005 	intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
   3006 	    sizeof(intrbuf));
   3007 #ifdef IXGBE_MPSAFE
   3008 	pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE,
   3009 	    true);
   3010 #endif
   3011 	/* Set the mbx handler function */
   3012 	adapter->osdep.ihs[vector] = pci_intr_establish_xname(pc,
   3013 	    adapter->osdep.intrs[vector], IPL_NET, ixv_msix_mbx, adapter,
   3014 	    intr_xname);
   3015 	if (adapter->osdep.ihs[vector] == NULL) {
   3016 		aprint_error_dev(dev, "Failed to register LINK handler\n");
   3017 		kcpuset_destroy(affinity);
   3018 		return (ENXIO);
   3019 	}
   3020 	/* Round-robin affinity */
   3021 	kcpuset_zero(affinity);
   3022 	kcpuset_set(affinity, cpu_id % ncpu);
   3023 	error = interrupt_distribute(adapter->osdep.ihs[vector], affinity,NULL);
   3024 
   3025 	aprint_normal_dev(dev,
   3026 	    "for link, interrupting at %s", intrstr);
   3027 	if (error == 0)
   3028 		aprint_normal(", affinity to cpu %d\n", cpu_id % ncpu);
   3029 	else
   3030 		aprint_normal("\n");
   3031 
   3032 	/* Tasklets for Mailbox */
   3033 	adapter->link_si = softint_establish(SOFTINT_NET |IXGBE_SOFTINFT_FLAGS,
   3034 	    ixv_handle_link, adapter);
   3035 	/*
   3036 	 * Due to a broken design QEMU will fail to properly
   3037 	 * enable the guest for MSI-X unless the vectors in
   3038 	 * the table are all set up, so we must rewrite the
   3039 	 * ENABLE in the MSI-X control register again at this
   3040 	 * point to cause it to successfully initialize us.
   3041 	 */
   3042 	if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
   3043 		pci_get_capability(pc, tag, PCI_CAP_MSIX, &rid, NULL);
   3044 		rid += PCI_MSIX_CTL;
   3045 		msix_ctrl = pci_conf_read(pc, tag, rid);
   3046 		msix_ctrl |= PCI_MSIX_CTL_ENABLE;
   3047 		pci_conf_write(pc, tag, rid, msix_ctrl);
   3048 	}
   3049 
   3050 	kcpuset_destroy(affinity);
   3051 	return (0);
   3052 } /* ixv_allocate_msix */
   3053 
   3054 /************************************************************************
   3055  * ixv_configure_interrupts - Setup MSI-X resources
   3056  *
   3057  *   Note: The VF device MUST use MSI-X, there is no fallback.
   3058  ************************************************************************/
   3059 static int
   3060 ixv_configure_interrupts(struct adapter *adapter)
   3061 {
   3062 	device_t dev = adapter->dev;
   3063 	int want, queues, msgs;
   3064 
   3065 	/* Must have at least 2 MSI-X vectors */
   3066 	msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag);
   3067 	if (msgs < 2) {
   3068 		aprint_error_dev(dev, "MSIX config error\n");
   3069 		return (ENXIO);
   3070 	}
   3071 	msgs = MIN(msgs, IXG_MAX_NINTR);
   3072 
   3073 	/* Figure out a reasonable auto config value */
   3074 	queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu;
   3075 
   3076 	if (ixv_num_queues != 0)
   3077 		queues = ixv_num_queues;
   3078 	else if ((ixv_num_queues == 0) && (queues > IXGBE_VF_MAX_TX_QUEUES))
   3079 		queues = IXGBE_VF_MAX_TX_QUEUES;
   3080 
   3081 	/*
   3082 	 * Want vectors for the queues,
   3083 	 * plus an additional for mailbox.
   3084 	 */
   3085 	want = queues + 1;
   3086 	if (msgs >= want)
   3087 		msgs = want;
   3088 	else {
   3089                	aprint_error_dev(dev,
   3090 		    "MSI-X Configuration Problem, "
   3091 		    "%d vectors but %d queues wanted!\n",
   3092 		    msgs, want);
   3093 		return -1;
   3094 	}
   3095 
   3096 	adapter->msix_mem = (void *)1; /* XXX */
   3097 	aprint_normal_dev(dev,
   3098 	    "Using MSI-X interrupts with %d vectors\n", msgs);
   3099 	adapter->num_queues = queues;
   3100 
   3101 	return (0);
   3102 } /* ixv_configure_interrupts */
   3103 
   3104 
   3105 /************************************************************************
   3106  * ixv_handle_link - Tasklet handler for MSI-X MBX interrupts
   3107  *
   3108  *   Done outside of interrupt context since the driver might sleep
   3109  ************************************************************************/
   3110 static void
   3111 ixv_handle_link(void *context)
   3112 {
   3113 	struct adapter *adapter = context;
   3114 
   3115 	IXGBE_CORE_LOCK(adapter);
   3116 
   3117 	adapter->hw.mac.ops.check_link(&adapter->hw, &adapter->link_speed,
   3118 	    &adapter->link_up, FALSE);
   3119 	ixv_update_link_status(adapter);
   3120 
   3121 	IXGBE_CORE_UNLOCK(adapter);
   3122 } /* ixv_handle_link */
   3123 
   3124 /************************************************************************
   3125  * ixv_check_link - Used in the local timer to poll for link changes
   3126  ************************************************************************/
   3127 static void
   3128 ixv_check_link(struct adapter *adapter)
   3129 {
   3130 
   3131 	KASSERT(mutex_owned(&adapter->core_mtx));
   3132 
   3133 	adapter->hw.mac.get_link_status = TRUE;
   3134 
   3135 	adapter->hw.mac.ops.check_link(&adapter->hw, &adapter->link_speed,
   3136 	    &adapter->link_up, FALSE);
   3137 	ixv_update_link_status(adapter);
   3138 } /* ixv_check_link */
   3139