Home | History | Annotate | Line # | Download | only in ixgbe
ixv.c revision 1.192
      1 /* $NetBSD: ixv.c,v 1.192 2023/10/18 03:52:55 msaitoh Exp $ */
      2 
      3 /******************************************************************************
      4 
      5   Copyright (c) 2001-2017, Intel Corporation
      6   All rights reserved.
      7 
      8   Redistribution and use in source and binary forms, with or without
      9   modification, are permitted provided that the following conditions are met:
     10 
     11    1. Redistributions of source code must retain the above copyright notice,
     12       this list of conditions and the following disclaimer.
     13 
     14    2. Redistributions in binary form must reproduce the above copyright
     15       notice, this list of conditions and the following disclaimer in the
     16       documentation and/or other materials provided with the distribution.
     17 
     18    3. Neither the name of the Intel Corporation nor the names of its
     19       contributors may be used to endorse or promote products derived from
     20       this software without specific prior written permission.
     21 
     22   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     23   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     24   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     25   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     26   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     27   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     28   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     29   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     30   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     31   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     32   POSSIBILITY OF SUCH DAMAGE.
     33 
     34 ******************************************************************************/
     35 /*$FreeBSD: head/sys/dev/ixgbe/if_ixv.c 331224 2018-03-19 20:55:05Z erj $*/
     36 
     37 #include <sys/cdefs.h>
     38 __KERNEL_RCSID(0, "$NetBSD: ixv.c,v 1.192 2023/10/18 03:52:55 msaitoh Exp $");
     39 
     40 #ifdef _KERNEL_OPT
     41 #include "opt_inet.h"
     42 #include "opt_inet6.h"
     43 #include "opt_net_mpsafe.h"
     44 #endif
     45 
     46 #include "ixgbe.h"
     47 
     48 /************************************************************************
     49  * Driver version
     50  ************************************************************************/
     51 static const char ixv_driver_version[] = "2.0.1-k";
     52 /* XXX NetBSD: + 1.5.17 */
     53 
     54 /************************************************************************
     55  * PCI Device ID Table
     56  *
     57  *   Used by probe to select devices to load on
     58  *   Last field stores an index into ixv_strings
     59  *   Last entry must be all 0s
     60  *
     61  *   { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
     62  ************************************************************************/
     63 static const ixgbe_vendor_info_t ixv_vendor_info_array[] =
     64 {
     65 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
     66 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0},
     67 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, 0, 0, 0},
     68 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, 0, 0, 0},
     69 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF, 0, 0, 0},
     70 	/* required last entry */
     71 	{0, 0, 0, 0, 0}
     72 };
     73 
     74 /************************************************************************
     75  * Table of branding strings
     76  ************************************************************************/
     77 static const char *ixv_strings[] = {
     78 	"Intel(R) PRO/10GbE Virtual Function Network Driver"
     79 };
     80 
     81 /*********************************************************************
     82  *  Function prototypes
     83  *********************************************************************/
     84 static int	ixv_probe(device_t, cfdata_t, void *);
     85 static void	ixv_attach(device_t, device_t, void *);
     86 static int	ixv_detach(device_t, int);
     87 #if 0
     88 static int	ixv_shutdown(device_t);
     89 #endif
     90 static int	ixv_ifflags_cb(struct ethercom *);
     91 static int	ixv_ioctl(struct ifnet *, u_long, void *);
     92 static int	ixv_init(struct ifnet *);
     93 static void	ixv_init_locked(struct ixgbe_softc *);
     94 static void	ixv_ifstop(struct ifnet *, int);
     95 static void	ixv_stop_locked(void *);
     96 static void	ixv_init_device_features(struct ixgbe_softc *);
     97 static void	ixv_media_status(struct ifnet *, struct ifmediareq *);
     98 static int	ixv_media_change(struct ifnet *);
     99 static int	ixv_allocate_pci_resources(struct ixgbe_softc *,
    100 		    const struct pci_attach_args *);
    101 static void	ixv_free_deferred_handlers(struct ixgbe_softc *);
    102 static int	ixv_allocate_msix(struct ixgbe_softc *,
    103 		    const struct pci_attach_args *);
    104 static int	ixv_configure_interrupts(struct ixgbe_softc *);
    105 static void	ixv_free_pci_resources(struct ixgbe_softc *);
    106 static void	ixv_local_timer(void *);
    107 static void	ixv_handle_timer(struct work *, void *);
    108 static int	ixv_setup_interface(device_t, struct ixgbe_softc *);
    109 static void	ixv_schedule_admin_tasklet(struct ixgbe_softc *);
    110 static int	ixv_negotiate_api(struct ixgbe_softc *);
    111 
    112 static void	ixv_initialize_transmit_units(struct ixgbe_softc *);
    113 static void	ixv_initialize_receive_units(struct ixgbe_softc *);
    114 static void	ixv_initialize_rss_mapping(struct ixgbe_softc *);
    115 static s32	ixv_check_link(struct ixgbe_softc *);
    116 
    117 static void	ixv_enable_intr(struct ixgbe_softc *);
    118 static void	ixv_disable_intr(struct ixgbe_softc *);
    119 static int	ixv_set_rxfilter(struct ixgbe_softc *);
    120 static void	ixv_update_link_status(struct ixgbe_softc *);
    121 static int	ixv_sysctl_debug(SYSCTLFN_PROTO);
    122 static void	ixv_set_ivar(struct ixgbe_softc *, u8, u8, s8);
    123 static void	ixv_configure_ivars(struct ixgbe_softc *);
    124 static u8 *	ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
    125 static void	ixv_eitr_write(struct ixgbe_softc *, uint32_t, uint32_t);
    126 
    127 static void	ixv_setup_vlan_tagging(struct ixgbe_softc *);
    128 static int	ixv_setup_vlan_support(struct ixgbe_softc *);
    129 static int	ixv_vlan_cb(struct ethercom *, uint16_t, bool);
    130 static int	ixv_register_vlan(struct ixgbe_softc *, u16);
    131 static int	ixv_unregister_vlan(struct ixgbe_softc *, u16);
    132 
    133 static void	ixv_add_device_sysctls(struct ixgbe_softc *);
    134 static void	ixv_init_stats(struct ixgbe_softc *);
    135 static void	ixv_update_stats(struct ixgbe_softc *);
    136 static void	ixv_add_stats_sysctls(struct ixgbe_softc *);
    137 static void	ixv_clear_evcnt(struct ixgbe_softc *);
    138 
    139 /* Sysctl handlers */
    140 static int	ixv_sysctl_interrupt_rate_handler(SYSCTLFN_PROTO);
    141 static int	ixv_sysctl_next_to_check_handler(SYSCTLFN_PROTO);
    142 static int	ixv_sysctl_next_to_refresh_handler(SYSCTLFN_PROTO);
    143 static int	ixv_sysctl_rdh_handler(SYSCTLFN_PROTO);
    144 static int	ixv_sysctl_rdt_handler(SYSCTLFN_PROTO);
    145 static int	ixv_sysctl_tdt_handler(SYSCTLFN_PROTO);
    146 static int	ixv_sysctl_tdh_handler(SYSCTLFN_PROTO);
    147 static int	ixv_sysctl_tx_process_limit(SYSCTLFN_PROTO);
    148 static int	ixv_sysctl_rx_process_limit(SYSCTLFN_PROTO);
    149 static int	ixv_sysctl_rx_copy_len(SYSCTLFN_PROTO);
    150 
    151 /* The MSI-X Interrupt handlers */
    152 static int	ixv_msix_que(void *);
    153 static int	ixv_msix_mbx(void *);
    154 
    155 /* Event handlers running on workqueue */
    156 static void	ixv_handle_que(void *);
    157 
    158 /* Deferred workqueue handlers */
    159 static void	ixv_handle_admin(struct work *, void *);
    160 static void	ixv_handle_que_work(struct work *, void *);
    161 
    162 const struct sysctlnode *ixv_sysctl_instance(struct ixgbe_softc *);
    163 static const ixgbe_vendor_info_t *ixv_lookup(const struct pci_attach_args *);
    164 
    165 /************************************************************************
    166  * NetBSD Device Interface Entry Points
    167  ************************************************************************/
    168 CFATTACH_DECL3_NEW(ixv, sizeof(struct ixgbe_softc),
    169     ixv_probe, ixv_attach, ixv_detach, NULL, NULL, NULL,
    170     DVF_DETACH_SHUTDOWN);
    171 
    172 #if 0
    173 static driver_t ixv_driver = {
    174 	"ixv", ixv_methods, sizeof(struct ixgbe_softc),
    175 };
    176 
    177 devclass_t ixv_devclass;
    178 DRIVER_MODULE(ixv, pci, ixv_driver, ixv_devclass, 0, 0);
    179 MODULE_DEPEND(ixv, pci, 1, 1, 1);
    180 MODULE_DEPEND(ixv, ether, 1, 1, 1);
    181 #endif
    182 
    183 /*
    184  * TUNEABLE PARAMETERS:
    185  */
    186 
    187 /* Number of Queues - do not exceed MSI-X vectors - 1 */
    188 static int ixv_num_queues = 0;
    189 #define	TUNABLE_INT(__x, __y)
    190 TUNABLE_INT("hw.ixv.num_queues", &ixv_num_queues);
    191 
    192 /*
    193  * AIM: Adaptive Interrupt Moderation
    194  * which means that the interrupt rate
    195  * is varied over time based on the
    196  * traffic for that interrupt vector
    197  */
    198 static bool ixv_enable_aim = false;
    199 TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
    200 
    201 static int ixv_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
    202 TUNABLE_INT("hw.ixv.max_interrupt_rate", &ixv_max_interrupt_rate);
    203 
    204 /* How many packets rxeof tries to clean at a time */
    205 static int ixv_rx_process_limit = 256;
    206 TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit);
    207 
    208 /* How many packets txeof tries to clean at a time */
    209 static int ixv_tx_process_limit = 256;
    210 TUNABLE_INT("hw.ixv.tx_process_limit", &ixv_tx_process_limit);
    211 
    212 /* Which packet processing uses workqueue or softint */
    213 static bool ixv_txrx_workqueue = false;
    214 
    215 /*
    216  * Number of TX descriptors per ring,
    217  * setting higher than RX as this seems
    218  * the better performing choice.
    219  */
    220 static int ixv_txd = DEFAULT_TXD;
    221 TUNABLE_INT("hw.ixv.txd", &ixv_txd);
    222 
    223 /* Number of RX descriptors per ring */
    224 static int ixv_rxd = DEFAULT_RXD;
    225 TUNABLE_INT("hw.ixv.rxd", &ixv_rxd);
    226 
    227 /* Legacy Transmit (single queue) */
    228 static int ixv_enable_legacy_tx = 0;
    229 TUNABLE_INT("hw.ixv.enable_legacy_tx", &ixv_enable_legacy_tx);
    230 
    231 #ifdef NET_MPSAFE
    232 #define IXGBE_MPSAFE		1
    233 #define IXGBE_CALLOUT_FLAGS	CALLOUT_MPSAFE
    234 #define IXGBE_SOFTINT_FLAGS	SOFTINT_MPSAFE
    235 #define IXGBE_WORKQUEUE_FLAGS	WQ_PERCPU | WQ_MPSAFE
    236 #define IXGBE_TASKLET_WQ_FLAGS	WQ_MPSAFE
    237 #else
    238 #define IXGBE_CALLOUT_FLAGS	0
    239 #define IXGBE_SOFTINT_FLAGS	0
    240 #define IXGBE_WORKQUEUE_FLAGS	WQ_PERCPU
    241 #define IXGBE_TASKLET_WQ_FLAGS	0
    242 #endif
    243 #define IXGBE_WORKQUEUE_PRI PRI_SOFTNET
    244 
    245 #if 0
    246 static int (*ixv_start_locked)(struct ifnet *, struct tx_ring *);
    247 static int (*ixv_ring_empty)(struct ifnet *, struct buf_ring *);
    248 #endif
    249 
    250 /************************************************************************
    251  * ixv_probe - Device identification routine
    252  *
    253  *   Determines if the driver should be loaded on
    254  *   adapter based on its PCI vendor/device ID.
    255  *
    256  *   return BUS_PROBE_DEFAULT on success, positive on failure
    257  ************************************************************************/
    258 static int
    259 ixv_probe(device_t dev, cfdata_t cf, void *aux)
    260 {
    261 #ifdef __HAVE_PCI_MSI_MSIX
    262 	const struct pci_attach_args *pa = aux;
    263 
    264 	return (ixv_lookup(pa) != NULL) ? 1 : 0;
    265 #else
    266 	return 0;
    267 #endif
    268 } /* ixv_probe */
    269 
    270 static const ixgbe_vendor_info_t *
    271 ixv_lookup(const struct pci_attach_args *pa)
    272 {
    273 	const ixgbe_vendor_info_t *ent;
    274 	pcireg_t subid;
    275 
    276 	INIT_DEBUGOUT("ixv_lookup: begin");
    277 
    278 	if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
    279 		return NULL;
    280 
    281 	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
    282 
    283 	for (ent = ixv_vendor_info_array; ent->vendor_id != 0; ent++) {
    284 		if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
    285 		    (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
    286 		    ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
    287 		     (ent->subvendor_id == 0)) &&
    288 		    ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
    289 		     (ent->subdevice_id == 0))) {
    290 			return ent;
    291 		}
    292 	}
    293 
    294 	return NULL;
    295 }
    296 
    297 /************************************************************************
    298  * ixv_attach - Device initialization routine
    299  *
    300  *   Called when the driver is being loaded.
    301  *   Identifies the type of hardware, allocates all resources
    302  *   and initializes the hardware.
    303  *
    304  *   return 0 on success, positive on failure
    305  ************************************************************************/
    306 static void
    307 ixv_attach(device_t parent, device_t dev, void *aux)
    308 {
    309 	struct ixgbe_softc *sc;
    310 	struct ixgbe_hw *hw;
    311 	int		error = 0;
    312 	pcireg_t	id, subid;
    313 	const ixgbe_vendor_info_t *ent;
    314 	const struct pci_attach_args *pa = aux;
    315 	const char *apivstr;
    316 	const char *str;
    317 	char wqname[MAXCOMLEN];
    318 	char buf[256];
    319 
    320 	INIT_DEBUGOUT("ixv_attach: begin");
    321 
    322 	/*
    323 	 * Make sure BUSMASTER is set, on a VM under
    324 	 * KVM it may not be and will break things.
    325 	 */
    326 	ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag);
    327 
    328 	/* Allocate, clear, and link in our adapter structure */
    329 	sc = device_private(dev);
    330 	sc->hw.back = sc;
    331 	sc->dev = dev;
    332 	hw = &sc->hw;
    333 
    334 	sc->init_locked = ixv_init_locked;
    335 	sc->stop_locked = ixv_stop_locked;
    336 
    337 	sc->osdep.pc = pa->pa_pc;
    338 	sc->osdep.tag = pa->pa_tag;
    339 	if (pci_dma64_available(pa))
    340 		sc->osdep.dmat = pa->pa_dmat64;
    341 	else
    342 		sc->osdep.dmat = pa->pa_dmat;
    343 	sc->osdep.attached = false;
    344 
    345 	ent = ixv_lookup(pa);
    346 
    347 	KASSERT(ent != NULL);
    348 
    349 	aprint_normal(": %s, Version - %s\n",
    350 	    ixv_strings[ent->index], ixv_driver_version);
    351 
    352 	/* Core Lock Init */
    353 	IXGBE_CORE_LOCK_INIT(sc, device_xname(dev));
    354 
    355 	/* Do base PCI setup - map BAR0 */
    356 	if (ixv_allocate_pci_resources(sc, pa)) {
    357 		aprint_error_dev(dev, "ixv_allocate_pci_resources() failed!\n");
    358 		error = ENXIO;
    359 		goto err_out;
    360 	}
    361 
    362 	/* SYSCTL APIs */
    363 	ixv_add_device_sysctls(sc);
    364 
    365 	/* Set up the timer callout and workqueue */
    366 	callout_init(&sc->timer, IXGBE_CALLOUT_FLAGS);
    367 	snprintf(wqname, sizeof(wqname), "%s-timer", device_xname(dev));
    368 	error = workqueue_create(&sc->timer_wq, wqname,
    369 	    ixv_handle_timer, sc, IXGBE_WORKQUEUE_PRI, IPL_NET,
    370 	    IXGBE_TASKLET_WQ_FLAGS);
    371 	if (error) {
    372 		aprint_error_dev(dev,
    373 		    "could not create timer workqueue (%d)\n", error);
    374 		goto err_out;
    375 	}
    376 
    377 	/* Save off the information about this board */
    378 	id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG);
    379 	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
    380 	hw->vendor_id = PCI_VENDOR(id);
    381 	hw->device_id = PCI_PRODUCT(id);
    382 	hw->revision_id =
    383 	    PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG));
    384 	hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
    385 	hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
    386 
    387 	/* A subset of set_mac_type */
    388 	switch (hw->device_id) {
    389 	case IXGBE_DEV_ID_82599_VF:
    390 		hw->mac.type = ixgbe_mac_82599_vf;
    391 		str = "82599 VF";
    392 		break;
    393 	case IXGBE_DEV_ID_X540_VF:
    394 		hw->mac.type = ixgbe_mac_X540_vf;
    395 		str = "X540 VF";
    396 		break;
    397 	case IXGBE_DEV_ID_X550_VF:
    398 		hw->mac.type = ixgbe_mac_X550_vf;
    399 		str = "X550 VF";
    400 		break;
    401 	case IXGBE_DEV_ID_X550EM_X_VF:
    402 		hw->mac.type = ixgbe_mac_X550EM_x_vf;
    403 		str = "X550EM X VF";
    404 		break;
    405 	case IXGBE_DEV_ID_X550EM_A_VF:
    406 		hw->mac.type = ixgbe_mac_X550EM_a_vf;
    407 		str = "X550EM A VF";
    408 		break;
    409 	default:
    410 		/* Shouldn't get here since probe succeeded */
    411 		aprint_error_dev(dev, "Unknown device ID!\n");
    412 		error = ENXIO;
    413 		goto err_out;
    414 		break;
    415 	}
    416 	aprint_normal_dev(dev, "device %s\n", str);
    417 
    418 	ixv_init_device_features(sc);
    419 
    420 	/* Initialize the shared code */
    421 	error = ixgbe_init_ops_vf(hw);
    422 	if (error) {
    423 		aprint_error_dev(dev, "ixgbe_init_ops_vf() failed!\n");
    424 		error = EIO;
    425 		goto err_out;
    426 	}
    427 
    428 	/* Setup the mailbox */
    429 	ixgbe_init_mbx_params_vf(hw);
    430 
    431 	/* Set the right number of segments */
    432 	KASSERT(IXGBE_82599_SCATTER_MAX >= IXGBE_SCATTER_DEFAULT);
    433 	sc->num_segs = IXGBE_SCATTER_DEFAULT;
    434 
    435 	/* Reset mbox api to 1.0 */
    436 	error = hw->mac.ops.reset_hw(hw);
    437 	if (error == IXGBE_ERR_RESET_FAILED)
    438 		aprint_error_dev(dev, "...reset_hw() failure: Reset Failed!\n");
    439 	else if (error)
    440 		aprint_error_dev(dev, "...reset_hw() failed with error %d\n",
    441 		    error);
    442 	if (error) {
    443 		error = EIO;
    444 		goto err_out;
    445 	}
    446 
    447 	error = hw->mac.ops.init_hw(hw);
    448 	if (error) {
    449 		aprint_error_dev(dev, "...init_hw() failed!\n");
    450 		error = EIO;
    451 		goto err_out;
    452 	}
    453 
    454 	/* Negotiate mailbox API version */
    455 	error = ixv_negotiate_api(sc);
    456 	if (error)
    457 		aprint_normal_dev(dev,
    458 		    "MBX API negotiation failed during attach!\n");
    459 	switch (hw->api_version) {
    460 	case ixgbe_mbox_api_10:
    461 		apivstr = "1.0";
    462 		break;
    463 	case ixgbe_mbox_api_20:
    464 		apivstr = "2.0";
    465 		break;
    466 	case ixgbe_mbox_api_11:
    467 		apivstr = "1.1";
    468 		break;
    469 	case ixgbe_mbox_api_12:
    470 		apivstr = "1.2";
    471 		break;
    472 	case ixgbe_mbox_api_13:
    473 		apivstr = "1.3";
    474 		break;
    475 	case ixgbe_mbox_api_14:
    476 		apivstr = "1.4";
    477 		break;
    478 	case ixgbe_mbox_api_15:
    479 		apivstr = "1.5";
    480 		break;
    481 	default:
    482 		apivstr = "unknown";
    483 		break;
    484 	}
    485 	aprint_normal_dev(dev, "Mailbox API %s\n", apivstr);
    486 
    487 	/* If no mac address was assigned, make a random one */
    488 	if (!ixv_check_ether_addr(hw->mac.addr)) {
    489 		u8 addr[ETHER_ADDR_LEN];
    490 		uint64_t rndval = cprng_strong64();
    491 
    492 		memcpy(addr, &rndval, sizeof(addr));
    493 		addr[0] &= 0xFE;
    494 		addr[0] |= 0x02;
    495 		bcopy(addr, hw->mac.addr, sizeof(addr));
    496 	}
    497 
    498 	/* Register for VLAN events */
    499 	ether_set_vlan_cb(&sc->osdep.ec, ixv_vlan_cb);
    500 
    501 	/* Do descriptor calc and sanity checks */
    502 	if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
    503 	    ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) {
    504 		aprint_error_dev(dev, "Invalid TX ring size (%d). "
    505 		    "It must be between %d and %d, "
    506 		    "inclusive, and must be a multiple of %zu. "
    507 		    "Using default value of %d instead.\n",
    508 		    ixv_txd, MIN_TXD, MAX_TXD,
    509 		    DBA_ALIGN / sizeof(union ixgbe_adv_tx_desc),
    510 		    DEFAULT_TXD);
    511 		sc->num_tx_desc = DEFAULT_TXD;
    512 	} else
    513 		sc->num_tx_desc = ixv_txd;
    514 
    515 	if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
    516 	    ixv_rxd < MIN_RXD || ixv_rxd > MAX_RXD) {
    517 		aprint_error_dev(dev, "Invalid RX ring size (%d). "
    518 		    "It must be between %d and %d, "
    519 		    "inclusive, and must be a multiple of %zu. "
    520 		    "Using default value of %d instead.\n",
    521 		    ixv_rxd, MIN_RXD, MAX_RXD,
    522 		    DBA_ALIGN / sizeof(union ixgbe_adv_rx_desc),
    523 		    DEFAULT_RXD);
    524 		sc->num_rx_desc = DEFAULT_RXD;
    525 	} else
    526 		sc->num_rx_desc = ixv_rxd;
    527 
    528 	/* Sysctls for limiting the amount of work done in the taskqueues */
    529 	sc->rx_process_limit
    530 	    = (ixv_rx_process_limit <= sc->num_rx_desc)
    531 	    ? ixv_rx_process_limit : sc->num_rx_desc;
    532 	sc->tx_process_limit
    533 	    = (ixv_tx_process_limit <= sc->num_tx_desc)
    534 	    ? ixv_tx_process_limit : sc->num_tx_desc;
    535 
    536 	/* Set default high limit of copying mbuf in rxeof */
    537 	sc->rx_copy_len = IXGBE_RX_COPY_LEN_MAX;
    538 
    539 	/* Setup MSI-X */
    540 	error = ixv_configure_interrupts(sc);
    541 	if (error)
    542 		goto err_out;
    543 
    544 	/* Allocate our TX/RX Queues */
    545 	if (ixgbe_allocate_queues(sc)) {
    546 		aprint_error_dev(dev, "ixgbe_allocate_queues() failed!\n");
    547 		error = ENOMEM;
    548 		goto err_out;
    549 	}
    550 
    551 	/* hw.ix defaults init */
    552 	sc->enable_aim = ixv_enable_aim;
    553 	sc->max_interrupt_rate = ixv_max_interrupt_rate;
    554 
    555 	sc->txrx_use_workqueue = ixv_txrx_workqueue;
    556 
    557 	error = ixv_allocate_msix(sc, pa);
    558 	if (error) {
    559 		aprint_error_dev(dev, "ixv_allocate_msix() failed!\n");
    560 		goto err_late;
    561 	}
    562 
    563 	/* Setup OS specific network interface */
    564 	error = ixv_setup_interface(dev, sc);
    565 	if (error != 0) {
    566 		aprint_error_dev(dev, "ixv_setup_interface() failed!\n");
    567 		goto err_late;
    568 	}
    569 
    570 	/* Allocate multicast array memory */
    571 	sc->mta = malloc(sizeof(*sc->mta) *
    572 	    IXGBE_MAX_VF_MC, M_DEVBUF, M_WAITOK);
    573 
    574 	/* Check if VF was disabled by PF */
    575 	error = hw->mac.ops.get_link_state(hw, &sc->link_enabled);
    576 	if (error) {
    577 		/* PF is not capable of controlling VF state. Enable the link. */
    578 		sc->link_enabled = TRUE;
    579 	}
    580 
    581 	/* Do the stats setup */
    582 	ixv_init_stats(sc);
    583 	ixv_add_stats_sysctls(sc);
    584 
    585 	if (sc->feat_en & IXGBE_FEATURE_NETMAP)
    586 		ixgbe_netmap_attach(sc);
    587 
    588 	snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, sc->feat_cap);
    589 	aprint_verbose_dev(dev, "feature cap %s\n", buf);
    590 	snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, sc->feat_en);
    591 	aprint_verbose_dev(dev, "feature ena %s\n", buf);
    592 
    593 	INIT_DEBUGOUT("ixv_attach: end");
    594 	sc->osdep.attached = true;
    595 
    596 	return;
    597 
    598 err_late:
    599 	ixgbe_free_queues(sc);
    600 err_out:
    601 	ixv_free_pci_resources(sc);
    602 	IXGBE_CORE_LOCK_DESTROY(sc);
    603 
    604 	return;
    605 } /* ixv_attach */
    606 
    607 /************************************************************************
    608  * ixv_detach - Device removal routine
    609  *
    610  *   Called when the driver is being removed.
    611  *   Stops the adapter and deallocates all the resources
    612  *   that were allocated for driver operation.
    613  *
    614  *   return 0 on success, positive on failure
    615  ************************************************************************/
    616 static int
    617 ixv_detach(device_t dev, int flags)
    618 {
    619 	struct ixgbe_softc *sc = device_private(dev);
    620 	struct ixgbe_hw *hw = &sc->hw;
    621 	struct tx_ring *txr = sc->tx_rings;
    622 	struct rx_ring *rxr = sc->rx_rings;
    623 	struct ixgbevf_hw_stats *stats = &sc->stats.vf;
    624 
    625 	INIT_DEBUGOUT("ixv_detach: begin");
    626 	if (sc->osdep.attached == false)
    627 		return 0;
    628 
    629 	/* Stop the interface. Callouts are stopped in it. */
    630 	ixv_ifstop(sc->ifp, 1);
    631 
    632 	if (VLAN_ATTACHED(&sc->osdep.ec) &&
    633 	    (flags & (DETACH_SHUTDOWN | DETACH_FORCE)) == 0) {
    634 		aprint_error_dev(dev, "VLANs in use, detach first\n");
    635 		return EBUSY;
    636 	}
    637 
    638 	ether_ifdetach(sc->ifp);
    639 	callout_halt(&sc->timer, NULL);
    640 	ixv_free_deferred_handlers(sc);
    641 
    642 	if (sc->feat_en & IXGBE_FEATURE_NETMAP)
    643 		netmap_detach(sc->ifp);
    644 
    645 	ixv_free_pci_resources(sc);
    646 #if 0 /* XXX the NetBSD port is probably missing something here */
    647 	bus_generic_detach(dev);
    648 #endif
    649 	if_detach(sc->ifp);
    650 	ifmedia_fini(&sc->media);
    651 	if_percpuq_destroy(sc->ipq);
    652 
    653 	sysctl_teardown(&sc->sysctllog);
    654 	evcnt_detach(&sc->efbig_tx_dma_setup);
    655 	evcnt_detach(&sc->mbuf_defrag_failed);
    656 	evcnt_detach(&sc->efbig2_tx_dma_setup);
    657 	evcnt_detach(&sc->einval_tx_dma_setup);
    658 	evcnt_detach(&sc->other_tx_dma_setup);
    659 	evcnt_detach(&sc->eagain_tx_dma_setup);
    660 	evcnt_detach(&sc->enomem_tx_dma_setup);
    661 	evcnt_detach(&sc->watchdog_events);
    662 	evcnt_detach(&sc->tso_err);
    663 	evcnt_detach(&sc->admin_irqev);
    664 	evcnt_detach(&sc->link_workev);
    665 
    666 	txr = sc->tx_rings;
    667 	for (int i = 0; i < sc->num_queues; i++, rxr++, txr++) {
    668 		evcnt_detach(&sc->queues[i].irqs);
    669 		evcnt_detach(&sc->queues[i].handleq);
    670 		evcnt_detach(&sc->queues[i].req);
    671 		evcnt_detach(&txr->total_packets);
    672 #ifndef IXGBE_LEGACY_TX
    673 		evcnt_detach(&txr->pcq_drops);
    674 #endif
    675 		evcnt_detach(&txr->no_desc_avail);
    676 		evcnt_detach(&txr->tso_tx);
    677 
    678 		evcnt_detach(&rxr->rx_packets);
    679 		evcnt_detach(&rxr->rx_bytes);
    680 		evcnt_detach(&rxr->rx_copies);
    681 		evcnt_detach(&rxr->no_mbuf);
    682 		evcnt_detach(&rxr->rx_discarded);
    683 	}
    684 	evcnt_detach(&stats->ipcs);
    685 	evcnt_detach(&stats->l4cs);
    686 	evcnt_detach(&stats->ipcs_bad);
    687 	evcnt_detach(&stats->l4cs_bad);
    688 
    689 	/* Packet Reception Stats */
    690 	evcnt_detach(&stats->vfgorc);
    691 	evcnt_detach(&stats->vfgprc);
    692 	evcnt_detach(&stats->vfmprc);
    693 
    694 	/* Packet Transmission Stats */
    695 	evcnt_detach(&stats->vfgotc);
    696 	evcnt_detach(&stats->vfgptc);
    697 
    698 	/* Mailbox Stats */
    699 	evcnt_detach(&hw->mbx.stats.msgs_tx);
    700 	evcnt_detach(&hw->mbx.stats.msgs_rx);
    701 	evcnt_detach(&hw->mbx.stats.acks);
    702 	evcnt_detach(&hw->mbx.stats.reqs);
    703 	evcnt_detach(&hw->mbx.stats.rsts);
    704 
    705 	ixgbe_free_queues(sc);
    706 
    707 	IXGBE_CORE_LOCK_DESTROY(sc);
    708 
    709 	return (0);
    710 } /* ixv_detach */
    711 
    712 /************************************************************************
    713  * ixv_init_locked - Init entry point
    714  *
    715  *   Used in two ways: It is used by the stack as an init entry
    716  *   point in network interface structure. It is also used
    717  *   by the driver as a hw/sw initialization routine to get
    718  *   to a consistent state.
    719  *
    720  *   return 0 on success, positive on failure
    721  ************************************************************************/
    722 static void
    723 ixv_init_locked(struct ixgbe_softc *sc)
    724 {
    725 	struct ifnet	*ifp = sc->ifp;
    726 	device_t	dev = sc->dev;
    727 	struct ixgbe_hw *hw = &sc->hw;
    728 	struct ix_queue	*que;
    729 	int		error = 0;
    730 	uint32_t mask;
    731 	int i;
    732 
    733 	INIT_DEBUGOUT("ixv_init_locked: begin");
    734 	KASSERT(mutex_owned(&sc->core_mtx));
    735 	hw->adapter_stopped = FALSE;
    736 	hw->mac.ops.stop_adapter(hw);
    737 	callout_stop(&sc->timer);
    738 	for (i = 0, que = sc->queues; i < sc->num_queues; i++, que++)
    739 		que->disabled_count = 0;
    740 
    741 	sc->max_frame_size =
    742 	    ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
    743 
    744 	/* reprogram the RAR[0] in case user changed it. */
    745 	hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
    746 
    747 	/* Get the latest mac address, User can use a LAA */
    748 	memcpy(hw->mac.addr, CLLADDR(ifp->if_sadl),
    749 	     IXGBE_ETH_LENGTH_OF_ADDRESS);
    750 	hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, 1);
    751 
    752 	/* Prepare transmit descriptors and buffers */
    753 	if (ixgbe_setup_transmit_structures(sc)) {
    754 		aprint_error_dev(dev, "Could not setup transmit structures\n");
    755 		ixv_stop_locked(sc);
    756 		return;
    757 	}
    758 
    759 	/* Reset VF and renegotiate mailbox API version */
    760 	hw->mac.ops.reset_hw(hw);
    761 	hw->mac.ops.start_hw(hw);
    762 	error = ixv_negotiate_api(sc);
    763 	if (error)
    764 		device_printf(dev,
    765 		    "Mailbox API negotiation failed in init_locked!\n");
    766 
    767 	ixv_initialize_transmit_units(sc);
    768 
    769 	/* Setup Multicast table */
    770 	ixv_set_rxfilter(sc);
    771 
    772 	/* Use fixed buffer size, even for jumbo frames */
    773 	sc->rx_mbuf_sz = MCLBYTES;
    774 
    775 	/* Prepare receive descriptors and buffers */
    776 	error = ixgbe_setup_receive_structures(sc);
    777 	if (error) {
    778 		device_printf(dev,
    779 		    "Could not setup receive structures (err = %d)\n", error);
    780 		ixv_stop_locked(sc);
    781 		return;
    782 	}
    783 
    784 	/* Configure RX settings */
    785 	ixv_initialize_receive_units(sc);
    786 
    787 	/* Initialize variable holding task enqueue requests interrupts */
    788 	sc->task_requests = 0;
    789 
    790 	/* Set up VLAN offload and filter */
    791 	ixv_setup_vlan_support(sc);
    792 
    793 	/* Set up MSI-X routing */
    794 	ixv_configure_ivars(sc);
    795 
    796 	/* Set up auto-mask */
    797 	mask = (1 << sc->vector);
    798 	for (i = 0, que = sc->queues; i < sc->num_queues; i++, que++)
    799 		mask |= (1 << que->msix);
    800 	IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, mask);
    801 
    802 	/* Set moderation on the Link interrupt */
    803 	ixv_eitr_write(sc, sc->vector, IXGBE_LINK_ITR);
    804 
    805 	/* Stats init */
    806 	ixv_init_stats(sc);
    807 
    808 	/* Config/Enable Link */
    809 	error = hw->mac.ops.get_link_state(hw, &sc->link_enabled);
    810 	if (error) {
    811 		/* PF is not capable of controlling VF state. Enable the link. */
    812 		sc->link_enabled = TRUE;
    813 	} else if (sc->link_enabled == FALSE)
    814 		device_printf(dev, "VF is disabled by PF\n");
    815 
    816 	hw->mac.get_link_status = TRUE;
    817 	hw->mac.ops.check_link(hw, &sc->link_speed, &sc->link_up,
    818 	    FALSE);
    819 
    820 	/* Start watchdog */
    821 	callout_reset(&sc->timer, hz, ixv_local_timer, sc);
    822 	atomic_store_relaxed(&sc->timer_pending, 0);
    823 
    824 	/* OK to schedule workqueues. */
    825 	sc->schedule_wqs_ok = true;
    826 
    827 	/* Update saved flags. See ixgbe_ifflags_cb() */
    828 	sc->if_flags = ifp->if_flags;
    829 	sc->ec_capenable = sc->osdep.ec.ec_capenable;
    830 
    831 	/* Inform the stack we're ready */
    832 	ifp->if_flags |= IFF_RUNNING;
    833 	ifp->if_flags &= ~IFF_OACTIVE;
    834 
    835 	/* And now turn on interrupts */
    836 	ixv_enable_intr(sc);
    837 
    838 	return;
    839 } /* ixv_init_locked */
    840 
    841 /************************************************************************
    842  * ixv_enable_queue
    843  ************************************************************************/
    844 static inline void
    845 ixv_enable_queue(struct ixgbe_softc *sc, u32 vector)
    846 {
    847 	struct ixgbe_hw *hw = &sc->hw;
    848 	struct ix_queue *que = &sc->queues[vector];
    849 	u32		queue = 1UL << vector;
    850 	u32		mask;
    851 
    852 	mutex_enter(&que->dc_mtx);
    853 	if (que->disabled_count > 0 && --que->disabled_count > 0)
    854 		goto out;
    855 
    856 	mask = (IXGBE_EIMS_RTX_QUEUE & queue);
    857 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
    858 out:
    859 	mutex_exit(&que->dc_mtx);
    860 } /* ixv_enable_queue */
    861 
    862 /************************************************************************
    863  * ixv_disable_queue
    864  ************************************************************************/
    865 static inline void
    866 ixv_disable_queue(struct ixgbe_softc *sc, u32 vector)
    867 {
    868 	struct ixgbe_hw *hw = &sc->hw;
    869 	struct ix_queue *que = &sc->queues[vector];
    870 	u32		queue = 1UL << vector;
    871 	u32		mask;
    872 
    873 	mutex_enter(&que->dc_mtx);
    874 	if (que->disabled_count++ > 0)
    875 		goto  out;
    876 
    877 	mask = (IXGBE_EIMS_RTX_QUEUE & queue);
    878 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
    879 out:
    880 	mutex_exit(&que->dc_mtx);
    881 } /* ixv_disable_queue */
    882 
    883 #if 0
    884 static inline void
    885 ixv_rearm_queues(struct ixgbe_softc *sc, u64 queues)
    886 {
    887 	u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
    888 	IXGBE_WRITE_REG(&sc->hw, IXGBE_VTEICS, mask);
    889 } /* ixv_rearm_queues */
    890 #endif
    891 
    892 
    893 /************************************************************************
    894  * ixv_msix_que - MSI-X Queue Interrupt Service routine
    895  ************************************************************************/
    896 static int
    897 ixv_msix_que(void *arg)
    898 {
    899 	struct ix_queue	*que = arg;
    900 	struct ixgbe_softc *sc = que->sc;
    901 	struct tx_ring	*txr = que->txr;
    902 	struct rx_ring	*rxr = que->rxr;
    903 	bool		more;
    904 	u32		newitr = 0;
    905 
    906 	ixv_disable_queue(sc, que->msix);
    907 	IXGBE_EVC_ADD(&que->irqs, 1);
    908 
    909 #ifdef __NetBSD__
    910 	/* Don't run ixgbe_rxeof in interrupt context */
    911 	more = true;
    912 #else
    913 	more = ixgbe_rxeof(que);
    914 #endif
    915 
    916 	IXGBE_TX_LOCK(txr);
    917 	ixgbe_txeof(txr);
    918 	IXGBE_TX_UNLOCK(txr);
    919 
    920 	/* Do AIM now? */
    921 
    922 	if (sc->enable_aim == false)
    923 		goto no_calc;
    924 	/*
    925 	 * Do Adaptive Interrupt Moderation:
    926 	 *  - Write out last calculated setting
    927 	 *  - Calculate based on average size over
    928 	 *    the last interval.
    929 	 */
    930 	if (que->eitr_setting)
    931 		ixv_eitr_write(sc, que->msix, que->eitr_setting);
    932 
    933 	que->eitr_setting = 0;
    934 
    935 	/* Idle, do nothing */
    936 	if ((txr->bytes == 0) && (rxr->bytes == 0))
    937 		goto no_calc;
    938 
    939 	if ((txr->bytes) && (txr->packets))
    940 		newitr = txr->bytes/txr->packets;
    941 	if ((rxr->bytes) && (rxr->packets))
    942 		newitr = uimax(newitr, (rxr->bytes / rxr->packets));
    943 	newitr += 24; /* account for hardware frame, crc */
    944 
    945 	/* set an upper boundary */
    946 	newitr = uimin(newitr, 3000);
    947 
    948 	/* Be nice to the mid range */
    949 	if ((newitr > 300) && (newitr < 1200))
    950 		newitr = (newitr / 3);
    951 	else
    952 		newitr = (newitr / 2);
    953 
    954 	/*
    955 	 * When RSC is used, ITR interval must be larger than RSC_DELAY.
    956 	 * Currently, we use 2us for RSC_DELAY. The minimum value is always
    957 	 * greater than 2us on 100M (and 10M?(not documented)), but it's not
    958 	 * on 1G and higher.
    959 	 */
    960 	if ((sc->link_speed != IXGBE_LINK_SPEED_100_FULL)
    961 	    && (sc->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
    962 		if (newitr < IXGBE_MIN_RSC_EITR_10G1G)
    963 			newitr = IXGBE_MIN_RSC_EITR_10G1G;
    964 	}
    965 
    966 	/* save for next interrupt */
    967 	que->eitr_setting = newitr;
    968 
    969 	/* Reset state */
    970 	txr->bytes = 0;
    971 	txr->packets = 0;
    972 	rxr->bytes = 0;
    973 	rxr->packets = 0;
    974 
    975 no_calc:
    976 	if (more)
    977 		softint_schedule(que->que_si);
    978 	else /* Re-enable this interrupt */
    979 		ixv_enable_queue(sc, que->msix);
    980 
    981 	return 1;
    982 } /* ixv_msix_que */
    983 
    984 /************************************************************************
    985  * ixv_msix_mbx
    986  ************************************************************************/
    987 static int
    988 ixv_msix_mbx(void *arg)
    989 {
    990 	struct ixgbe_softc *sc = arg;
    991 	struct ixgbe_hw *hw = &sc->hw;
    992 
    993 	IXGBE_EVC_ADD(&sc->admin_irqev, 1);
    994 	/* NetBSD: We use auto-clear, so it's not required to write VTEICR */
    995 
    996 	/* Link status change */
    997 	hw->mac.get_link_status = TRUE;
    998 	atomic_or_32(&sc->task_requests, IXGBE_REQUEST_TASK_MBX);
    999 	ixv_schedule_admin_tasklet(sc);
   1000 
   1001 	return 1;
   1002 } /* ixv_msix_mbx */
   1003 
   1004 static void
   1005 ixv_eitr_write(struct ixgbe_softc *sc, uint32_t index, uint32_t itr)
   1006 {
   1007 
   1008 	/*
   1009 	 * Newer devices than 82598 have VF function, so this function is
   1010 	 * simple.
   1011 	 */
   1012 	itr |= IXGBE_EITR_CNT_WDIS;
   1013 
   1014 	IXGBE_WRITE_REG(&sc->hw, IXGBE_VTEITR(index), itr);
   1015 }
   1016 
   1017 
   1018 /************************************************************************
   1019  * ixv_media_status - Media Ioctl callback
   1020  *
   1021  *   Called whenever the user queries the status of
   1022  *   the interface using ifconfig.
   1023  ************************************************************************/
   1024 static void
   1025 ixv_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
   1026 {
   1027 	struct ixgbe_softc *sc = ifp->if_softc;
   1028 
   1029 	INIT_DEBUGOUT("ixv_media_status: begin");
   1030 	ixv_update_link_status(sc);
   1031 
   1032 	ifmr->ifm_status = IFM_AVALID;
   1033 	ifmr->ifm_active = IFM_ETHER;
   1034 
   1035 	if (sc->link_active != LINK_STATE_UP) {
   1036 		ifmr->ifm_active |= IFM_NONE;
   1037 		return;
   1038 	}
   1039 
   1040 	ifmr->ifm_status |= IFM_ACTIVE;
   1041 
   1042 	switch (sc->link_speed) {
   1043 		case IXGBE_LINK_SPEED_10GB_FULL:
   1044 			ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
   1045 			break;
   1046 		case IXGBE_LINK_SPEED_5GB_FULL:
   1047 			ifmr->ifm_active |= IFM_5000_T | IFM_FDX;
   1048 			break;
   1049 		case IXGBE_LINK_SPEED_2_5GB_FULL:
   1050 			ifmr->ifm_active |= IFM_2500_T | IFM_FDX;
   1051 			break;
   1052 		case IXGBE_LINK_SPEED_1GB_FULL:
   1053 			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
   1054 			break;
   1055 		case IXGBE_LINK_SPEED_100_FULL:
   1056 			ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
   1057 			break;
   1058 		case IXGBE_LINK_SPEED_10_FULL:
   1059 			ifmr->ifm_active |= IFM_10_T | IFM_FDX;
   1060 			break;
   1061 	}
   1062 
   1063 	ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
   1064 } /* ixv_media_status */
   1065 
   1066 /************************************************************************
   1067  * ixv_media_change - Media Ioctl callback
   1068  *
   1069  *   Called when the user changes speed/duplex using
   1070  *   media/mediopt option with ifconfig.
   1071  ************************************************************************/
   1072 static int
   1073 ixv_media_change(struct ifnet *ifp)
   1074 {
   1075 	struct ixgbe_softc *sc = ifp->if_softc;
   1076 	struct ifmedia *ifm = &sc->media;
   1077 
   1078 	INIT_DEBUGOUT("ixv_media_change: begin");
   1079 
   1080 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
   1081 		return (EINVAL);
   1082 
   1083 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
   1084 	case IFM_AUTO:
   1085 		break;
   1086 	default:
   1087 		device_printf(sc->dev, "Only auto media type\n");
   1088 		return (EINVAL);
   1089 	}
   1090 
   1091 	return (0);
   1092 } /* ixv_media_change */
   1093 
   1094 static void
   1095 ixv_schedule_admin_tasklet(struct ixgbe_softc *sc)
   1096 {
   1097 	if (sc->schedule_wqs_ok) {
   1098 		if (atomic_cas_uint(&sc->admin_pending, 0, 1) == 0)
   1099 			workqueue_enqueue(sc->admin_wq,
   1100 			    &sc->admin_wc, NULL);
   1101 	}
   1102 }
   1103 
   1104 /************************************************************************
   1105  * ixv_negotiate_api
   1106  *
   1107  *   Negotiate the Mailbox API with the PF;
   1108  *   start with the most featured API first.
   1109  ************************************************************************/
   1110 static int
   1111 ixv_negotiate_api(struct ixgbe_softc *sc)
   1112 {
   1113 	struct ixgbe_hw *hw = &sc->hw;
   1114 	int		mbx_api[] = { ixgbe_mbox_api_15,
   1115 				      ixgbe_mbox_api_13,
   1116 				      ixgbe_mbox_api_12,
   1117 				      ixgbe_mbox_api_11,
   1118 				      ixgbe_mbox_api_10,
   1119 				      ixgbe_mbox_api_unknown };
   1120 	int		i = 0;
   1121 
   1122 	while (mbx_api[i] != ixgbe_mbox_api_unknown) {
   1123 		if (ixgbevf_negotiate_api_version(hw, mbx_api[i]) == 0) {
   1124 			if (hw->api_version >= ixgbe_mbox_api_15)
   1125 				ixgbe_upgrade_mbx_params_vf(hw);
   1126 			return (0);
   1127 		}
   1128 		i++;
   1129 	}
   1130 
   1131 	return (EINVAL);
   1132 } /* ixv_negotiate_api */
   1133 
   1134 
   1135 /************************************************************************
   1136  * ixv_set_rxfilter - Multicast Update
   1137  *
   1138  *   Called whenever multicast address list is updated.
   1139  ************************************************************************/
   1140 static int
   1141 ixv_set_rxfilter(struct ixgbe_softc *sc)
   1142 {
   1143 	struct ixgbe_mc_addr	*mta;
   1144 	struct ifnet		*ifp = sc->ifp;
   1145 	struct ixgbe_hw		*hw = &sc->hw;
   1146 	u8			*update_ptr;
   1147 	int			mcnt = 0;
   1148 	struct ethercom		*ec = &sc->osdep.ec;
   1149 	struct ether_multi	*enm;
   1150 	struct ether_multistep	step;
   1151 	bool			overflow = false;
   1152 	int			error, rc = 0;
   1153 
   1154 	KASSERT(mutex_owned(&sc->core_mtx));
   1155 	IOCTL_DEBUGOUT("ixv_set_rxfilter: begin");
   1156 
   1157 	mta = sc->mta;
   1158 	bzero(mta, sizeof(*mta) * IXGBE_MAX_VF_MC);
   1159 
   1160 	/* 1: For PROMISC */
   1161 	if (ifp->if_flags & IFF_PROMISC) {
   1162 		error = hw->mac.ops.update_xcast_mode(hw,
   1163 		    IXGBEVF_XCAST_MODE_PROMISC);
   1164 		if (error == IXGBE_ERR_NOT_TRUSTED) {
   1165 			device_printf(sc->dev,
   1166 			    "this interface is not trusted\n");
   1167 			error = EPERM;
   1168 		} else if (error == IXGBE_ERR_FEATURE_NOT_SUPPORTED) {
   1169 			device_printf(sc->dev,
   1170 			    "the PF doesn't support promisc mode\n");
   1171 			error = EOPNOTSUPP;
   1172 		} else if (error == IXGBE_ERR_NOT_IN_PROMISC) {
   1173 			device_printf(sc->dev,
   1174 			    "the PF may not in promisc mode\n");
   1175 			error = EINVAL;
   1176 		} else if (error) {
   1177 			device_printf(sc->dev,
   1178 			    "failed to set promisc mode. error = %d\n",
   1179 			    error);
   1180 			error = EIO;
   1181 		} else
   1182 			return 0;
   1183 		rc = error;
   1184 	}
   1185 
   1186 	/* 2: For ALLMULTI or normal */
   1187 	ETHER_LOCK(ec);
   1188 	ETHER_FIRST_MULTI(step, ec, enm);
   1189 	while (enm != NULL) {
   1190 		if ((mcnt >= IXGBE_MAX_VF_MC) ||
   1191 		    (memcmp(enm->enm_addrlo, enm->enm_addrhi,
   1192 			ETHER_ADDR_LEN) != 0)) {
   1193 			overflow = true;
   1194 			break;
   1195 		}
   1196 		bcopy(enm->enm_addrlo,
   1197 		    mta[mcnt].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
   1198 		mcnt++;
   1199 		ETHER_NEXT_MULTI(step, enm);
   1200 	}
   1201 	ETHER_UNLOCK(ec);
   1202 
   1203 	/* 3: For ALLMULTI */
   1204 	if (overflow) {
   1205 		error = hw->mac.ops.update_xcast_mode(hw,
   1206 		    IXGBEVF_XCAST_MODE_ALLMULTI);
   1207 		if (error == IXGBE_ERR_NOT_TRUSTED) {
   1208 			device_printf(sc->dev,
   1209 			    "this interface is not trusted\n");
   1210 			error = EPERM;
   1211 		} else if (error == IXGBE_ERR_FEATURE_NOT_SUPPORTED) {
   1212 			device_printf(sc->dev,
   1213 			    "the PF doesn't support allmulti mode\n");
   1214 			error = EOPNOTSUPP;
   1215 		} else if (error) {
   1216 			device_printf(sc->dev,
   1217 			    "number of Ethernet multicast addresses "
   1218 			    "exceeds the limit (%d). error = %d\n",
   1219 			    IXGBE_MAX_VF_MC, error);
   1220 			error = ENOSPC;
   1221 		} else {
   1222 			ETHER_LOCK(ec);
   1223 			ec->ec_flags |= ETHER_F_ALLMULTI;
   1224 			ETHER_UNLOCK(ec);
   1225 			return rc; /* Promisc might have failed */
   1226 		}
   1227 
   1228 		if (rc == 0)
   1229 			rc = error;
   1230 
   1231 		/* Continue to update the multicast table as many as we can */
   1232 	}
   1233 
   1234 	/* 4: For normal operation */
   1235 	error = hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_MULTI);
   1236 	if ((error == IXGBE_ERR_FEATURE_NOT_SUPPORTED) || (error == 0)) {
   1237 		/* Normal operation */
   1238 		ETHER_LOCK(ec);
   1239 		ec->ec_flags &= ~ETHER_F_ALLMULTI;
   1240 		ETHER_UNLOCK(ec);
   1241 		error = 0;
   1242 	} else if (error) {
   1243 		device_printf(sc->dev,
   1244 		    "failed to set Ethernet multicast address "
   1245 		    "operation to normal. error = %d\n", error);
   1246 	}
   1247 
   1248 	update_ptr = (u8 *)mta;
   1249 	error = sc->hw.mac.ops.update_mc_addr_list(&sc->hw,
   1250 	    update_ptr, mcnt, ixv_mc_array_itr, TRUE);
   1251 	if (rc == 0)
   1252 		rc = error;
   1253 
   1254 	return rc;
   1255 } /* ixv_set_rxfilter */
   1256 
   1257 /************************************************************************
   1258  * ixv_mc_array_itr
   1259  *
   1260  *   An iterator function needed by the multicast shared code.
   1261  *   It feeds the shared code routine the addresses in the
   1262  *   array of ixv_set_rxfilter() one by one.
   1263  ************************************************************************/
   1264 static u8 *
   1265 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
   1266 {
   1267 	struct ixgbe_mc_addr *mta;
   1268 
   1269 	mta = (struct ixgbe_mc_addr *)*update_ptr;
   1270 
   1271 	*vmdq = 0;
   1272 	*update_ptr = (u8*)(mta + 1);
   1273 
   1274 	return (mta->addr);
   1275 } /* ixv_mc_array_itr */
   1276 
   1277 /************************************************************************
   1278  * ixv_local_timer - Timer routine
   1279  *
   1280  *   Checks for link status, updates statistics,
   1281  *   and runs the watchdog check.
   1282  ************************************************************************/
   1283 static void
   1284 ixv_local_timer(void *arg)
   1285 {
   1286 	struct ixgbe_softc *sc = arg;
   1287 
   1288 	if (sc->schedule_wqs_ok) {
   1289 		if (atomic_cas_uint(&sc->timer_pending, 0, 1) == 0)
   1290 			workqueue_enqueue(sc->timer_wq,
   1291 			    &sc->timer_wc, NULL);
   1292 	}
   1293 }
   1294 
   1295 static void
   1296 ixv_handle_timer(struct work *wk, void *context)
   1297 {
   1298 	struct ixgbe_softc *sc = context;
   1299 	device_t	dev = sc->dev;
   1300 	struct ix_queue	*que = sc->queues;
   1301 	u64		queues = 0;
   1302 	u64		v0, v1, v2, v3, v4, v5, v6, v7;
   1303 	int		hung = 0;
   1304 	int		i;
   1305 
   1306 	IXGBE_CORE_LOCK(sc);
   1307 
   1308 	if (ixv_check_link(sc)) {
   1309 		ixv_init_locked(sc);
   1310 		IXGBE_CORE_UNLOCK(sc);
   1311 		return;
   1312 	}
   1313 
   1314 	/* Stats Update */
   1315 	ixv_update_stats(sc);
   1316 
   1317 	/* Update some event counters */
   1318 	v0 = v1 = v2 = v3 = v4 = v5 = v6 = v7 = 0;
   1319 	que = sc->queues;
   1320 	for (i = 0; i < sc->num_queues; i++, que++) {
   1321 		struct tx_ring	*txr = que->txr;
   1322 
   1323 		v0 += txr->q_efbig_tx_dma_setup;
   1324 		v1 += txr->q_mbuf_defrag_failed;
   1325 		v2 += txr->q_efbig2_tx_dma_setup;
   1326 		v3 += txr->q_einval_tx_dma_setup;
   1327 		v4 += txr->q_other_tx_dma_setup;
   1328 		v5 += txr->q_eagain_tx_dma_setup;
   1329 		v6 += txr->q_enomem_tx_dma_setup;
   1330 		v7 += txr->q_tso_err;
   1331 	}
   1332 	IXGBE_EVC_STORE(&sc->efbig_tx_dma_setup, v0);
   1333 	IXGBE_EVC_STORE(&sc->mbuf_defrag_failed, v1);
   1334 	IXGBE_EVC_STORE(&sc->efbig2_tx_dma_setup, v2);
   1335 	IXGBE_EVC_STORE(&sc->einval_tx_dma_setup, v3);
   1336 	IXGBE_EVC_STORE(&sc->other_tx_dma_setup, v4);
   1337 	IXGBE_EVC_STORE(&sc->eagain_tx_dma_setup, v5);
   1338 	IXGBE_EVC_STORE(&sc->enomem_tx_dma_setup, v6);
   1339 	IXGBE_EVC_STORE(&sc->tso_err, v7);
   1340 
   1341 	/*
   1342 	 * Check the TX queues status
   1343 	 *	- mark hung queues so we don't schedule on them
   1344 	 *	- watchdog only if all queues show hung
   1345 	 */
   1346 	que = sc->queues;
   1347 	for (i = 0; i < sc->num_queues; i++, que++) {
   1348 		/* Keep track of queues with work for soft irq */
   1349 		if (que->txr->busy)
   1350 			queues |= ((u64)1 << que->me);
   1351 		/*
   1352 		 * Each time txeof runs without cleaning, but there
   1353 		 * are uncleaned descriptors it increments busy. If
   1354 		 * we get to the MAX we declare it hung.
   1355 		 */
   1356 		if (que->busy == IXGBE_QUEUE_HUNG) {
   1357 			++hung;
   1358 			/* Mark the queue as inactive */
   1359 			sc->active_queues &= ~((u64)1 << que->me);
   1360 			continue;
   1361 		} else {
   1362 			/* Check if we've come back from hung */
   1363 			if ((sc->active_queues & ((u64)1 << que->me)) == 0)
   1364 				sc->active_queues |= ((u64)1 << que->me);
   1365 		}
   1366 		if (que->busy >= IXGBE_MAX_TX_BUSY) {
   1367 			device_printf(dev,
   1368 			    "Warning queue %d appears to be hung!\n", i);
   1369 			que->txr->busy = IXGBE_QUEUE_HUNG;
   1370 			++hung;
   1371 		}
   1372 	}
   1373 
   1374 	/* Only truly watchdog if all queues show hung */
   1375 	if (hung == sc->num_queues)
   1376 		goto watchdog;
   1377 #if 0
   1378 	else if (queues != 0) { /* Force an IRQ on queues with work */
   1379 		ixv_rearm_queues(sc, queues);
   1380 	}
   1381 #endif
   1382 
   1383 	atomic_store_relaxed(&sc->timer_pending, 0);
   1384 	IXGBE_CORE_UNLOCK(sc);
   1385 	callout_reset(&sc->timer, hz, ixv_local_timer, sc);
   1386 
   1387 	return;
   1388 
   1389 watchdog:
   1390 	device_printf(sc->dev, "Watchdog timeout -- resetting\n");
   1391 	sc->ifp->if_flags &= ~IFF_RUNNING;
   1392 	IXGBE_EVC_ADD(&sc->watchdog_events, 1);
   1393 	ixv_init_locked(sc);
   1394 	IXGBE_CORE_UNLOCK(sc);
   1395 } /* ixv_handle_timer */
   1396 
   1397 /************************************************************************
   1398  * ixv_update_link_status - Update OS on link state
   1399  *
   1400  * Note: Only updates the OS on the cached link state.
   1401  *	 The real check of the hardware only happens with
   1402  *	 a link interrupt.
   1403  ************************************************************************/
   1404 static void
   1405 ixv_update_link_status(struct ixgbe_softc *sc)
   1406 {
   1407 	struct ifnet *ifp = sc->ifp;
   1408 	device_t     dev = sc->dev;
   1409 
   1410 	KASSERT(mutex_owned(&sc->core_mtx));
   1411 
   1412 	if (sc->link_up && sc->link_enabled) {
   1413 		if (sc->link_active != LINK_STATE_UP) {
   1414 			if (bootverbose) {
   1415 				const char *bpsmsg;
   1416 
   1417 				switch (sc->link_speed) {
   1418 				case IXGBE_LINK_SPEED_10GB_FULL:
   1419 					bpsmsg = "10 Gbps";
   1420 					break;
   1421 				case IXGBE_LINK_SPEED_5GB_FULL:
   1422 					bpsmsg = "5 Gbps";
   1423 					break;
   1424 				case IXGBE_LINK_SPEED_2_5GB_FULL:
   1425 					bpsmsg = "2.5 Gbps";
   1426 					break;
   1427 				case IXGBE_LINK_SPEED_1GB_FULL:
   1428 					bpsmsg = "1 Gbps";
   1429 					break;
   1430 				case IXGBE_LINK_SPEED_100_FULL:
   1431 					bpsmsg = "100 Mbps";
   1432 					break;
   1433 				case IXGBE_LINK_SPEED_10_FULL:
   1434 					bpsmsg = "10 Mbps";
   1435 					break;
   1436 				default:
   1437 					bpsmsg = "unknown speed";
   1438 					break;
   1439 				}
   1440 				device_printf(dev, "Link is up %s %s \n",
   1441 				    bpsmsg, "Full Duplex");
   1442 			}
   1443 			sc->link_active = LINK_STATE_UP;
   1444 			if_link_state_change(ifp, LINK_STATE_UP);
   1445 		}
   1446 	} else {
   1447 		/*
   1448 		 * Do it when link active changes to DOWN. i.e.
   1449 		 * a) LINK_STATE_UNKNOWN -> LINK_STATE_DOWN
   1450 		 * b) LINK_STATE_UP	 -> LINK_STATE_DOWN
   1451 		 */
   1452 		if (sc->link_active != LINK_STATE_DOWN) {
   1453 			if (bootverbose)
   1454 				device_printf(dev, "Link is Down\n");
   1455 			if_link_state_change(ifp, LINK_STATE_DOWN);
   1456 			sc->link_active = LINK_STATE_DOWN;
   1457 		}
   1458 	}
   1459 } /* ixv_update_link_status */
   1460 
   1461 
   1462 /************************************************************************
   1463  * ixv_stop - Stop the hardware
   1464  *
   1465  *   Disables all traffic on the adapter by issuing a
   1466  *   global reset on the MAC and deallocates TX/RX buffers.
   1467  ************************************************************************/
   1468 static void
   1469 ixv_ifstop(struct ifnet *ifp, int disable)
   1470 {
   1471 	struct ixgbe_softc *sc = ifp->if_softc;
   1472 
   1473 	IXGBE_CORE_LOCK(sc);
   1474 	ixv_stop_locked(sc);
   1475 	IXGBE_CORE_UNLOCK(sc);
   1476 
   1477 	workqueue_wait(sc->admin_wq, &sc->admin_wc);
   1478 	atomic_store_relaxed(&sc->admin_pending, 0);
   1479 	workqueue_wait(sc->timer_wq, &sc->timer_wc);
   1480 	atomic_store_relaxed(&sc->timer_pending, 0);
   1481 }
   1482 
   1483 static void
   1484 ixv_stop_locked(void *arg)
   1485 {
   1486 	struct ifnet	*ifp;
   1487 	struct ixgbe_softc *sc = arg;
   1488 	struct ixgbe_hw *hw = &sc->hw;
   1489 
   1490 	ifp = sc->ifp;
   1491 
   1492 	KASSERT(mutex_owned(&sc->core_mtx));
   1493 
   1494 	INIT_DEBUGOUT("ixv_stop_locked: begin\n");
   1495 	ixv_disable_intr(sc);
   1496 
   1497 	/* Tell the stack that the interface is no longer active */
   1498 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   1499 
   1500 	hw->mac.ops.reset_hw(hw);
   1501 	sc->hw.adapter_stopped = FALSE;
   1502 	hw->mac.ops.stop_adapter(hw);
   1503 	callout_stop(&sc->timer);
   1504 
   1505 	/* Don't schedule workqueues. */
   1506 	sc->schedule_wqs_ok = false;
   1507 
   1508 	/* reprogram the RAR[0] in case user changed it. */
   1509 	hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
   1510 
   1511 	return;
   1512 } /* ixv_stop_locked */
   1513 
   1514 
   1515 /************************************************************************
   1516  * ixv_allocate_pci_resources
   1517  ************************************************************************/
   1518 static int
   1519 ixv_allocate_pci_resources(struct ixgbe_softc *sc,
   1520     const struct pci_attach_args *pa)
   1521 {
   1522 	pcireg_t	memtype, csr;
   1523 	device_t	dev = sc->dev;
   1524 	bus_addr_t addr;
   1525 	int flags;
   1526 
   1527 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
   1528 	switch (memtype) {
   1529 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1530 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1531 		sc->osdep.mem_bus_space_tag = pa->pa_memt;
   1532 		if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
   1533 		      memtype, &addr, &sc->osdep.mem_size, &flags) != 0)
   1534 			goto map_err;
   1535 		if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
   1536 			aprint_normal_dev(dev, "clearing prefetchable bit\n");
   1537 			flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
   1538 		}
   1539 		if (bus_space_map(sc->osdep.mem_bus_space_tag, addr,
   1540 		     sc->osdep.mem_size, flags,
   1541 		     &sc->osdep.mem_bus_space_handle) != 0) {
   1542 map_err:
   1543 			sc->osdep.mem_size = 0;
   1544 			aprint_error_dev(dev, "unable to map BAR0\n");
   1545 			return ENXIO;
   1546 		}
   1547 		/*
   1548 		 * Enable address decoding for memory range in case it's not
   1549 		 * set.
   1550 		 */
   1551 		csr = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1552 		    PCI_COMMAND_STATUS_REG);
   1553 		csr |= PCI_COMMAND_MEM_ENABLE;
   1554 		pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
   1555 		    csr);
   1556 		break;
   1557 	default:
   1558 		aprint_error_dev(dev, "unexpected type on BAR0\n");
   1559 		return ENXIO;
   1560 	}
   1561 
   1562 	/* Pick up the tuneable queues */
   1563 	sc->num_queues = ixv_num_queues;
   1564 
   1565 	return (0);
   1566 } /* ixv_allocate_pci_resources */
   1567 
   1568 static void
   1569 ixv_free_deferred_handlers(struct ixgbe_softc *sc)
   1570 {
   1571 	struct ix_queue *que = sc->queues;
   1572 	struct tx_ring *txr = sc->tx_rings;
   1573 	int i;
   1574 
   1575 	for (i = 0; i < sc->num_queues; i++, que++, txr++) {
   1576 		if (!(sc->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
   1577 			if (txr->txr_si != NULL)
   1578 				softint_disestablish(txr->txr_si);
   1579 		}
   1580 		if (que->que_si != NULL)
   1581 			softint_disestablish(que->que_si);
   1582 	}
   1583 	if (sc->txr_wq != NULL)
   1584 		workqueue_destroy(sc->txr_wq);
   1585 	if (sc->txr_wq_enqueued != NULL)
   1586 		percpu_free(sc->txr_wq_enqueued, sizeof(u_int));
   1587 	if (sc->que_wq != NULL)
   1588 		workqueue_destroy(sc->que_wq);
   1589 
   1590 	/* Drain the Mailbox(link) queue */
   1591 	if (sc->admin_wq != NULL) {
   1592 		workqueue_destroy(sc->admin_wq);
   1593 		sc->admin_wq = NULL;
   1594 	}
   1595 	if (sc->timer_wq != NULL) {
   1596 		workqueue_destroy(sc->timer_wq);
   1597 		sc->timer_wq = NULL;
   1598 	}
   1599 } /* ixv_free_deferred_handlers */
   1600 
   1601 /************************************************************************
   1602  * ixv_free_pci_resources
   1603  ************************************************************************/
   1604 static void
   1605 ixv_free_pci_resources(struct ixgbe_softc *sc)
   1606 {
   1607 	struct ix_queue *que = sc->queues;
   1608 	int		rid;
   1609 
   1610 	/*
   1611 	 *  Release all msix queue resources:
   1612 	 */
   1613 	for (int i = 0; i < sc->num_queues; i++, que++) {
   1614 		if (que->res != NULL)
   1615 			pci_intr_disestablish(sc->osdep.pc,
   1616 			    sc->osdep.ihs[i]);
   1617 	}
   1618 
   1619 
   1620 	/* Clean the Mailbox interrupt last */
   1621 	rid = sc->vector;
   1622 
   1623 	if (sc->osdep.ihs[rid] != NULL) {
   1624 		pci_intr_disestablish(sc->osdep.pc,
   1625 		    sc->osdep.ihs[rid]);
   1626 		sc->osdep.ihs[rid] = NULL;
   1627 	}
   1628 
   1629 	pci_intr_release(sc->osdep.pc, sc->osdep.intrs,
   1630 	    sc->osdep.nintrs);
   1631 
   1632 	if (sc->osdep.mem_size != 0) {
   1633 		bus_space_unmap(sc->osdep.mem_bus_space_tag,
   1634 		    sc->osdep.mem_bus_space_handle,
   1635 		    sc->osdep.mem_size);
   1636 	}
   1637 
   1638 	return;
   1639 } /* ixv_free_pci_resources */
   1640 
   1641 /************************************************************************
   1642  * ixv_setup_interface
   1643  *
   1644  *   Setup networking device structure and register an interface.
   1645  ************************************************************************/
   1646 static int
   1647 ixv_setup_interface(device_t dev, struct ixgbe_softc *sc)
   1648 {
   1649 	struct ethercom *ec = &sc->osdep.ec;
   1650 	struct ifnet   *ifp;
   1651 
   1652 	INIT_DEBUGOUT("ixv_setup_interface: begin");
   1653 
   1654 	ifp = sc->ifp = &ec->ec_if;
   1655 	strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
   1656 	ifp->if_baudrate = IF_Gbps(10);
   1657 	ifp->if_init = ixv_init;
   1658 	ifp->if_stop = ixv_ifstop;
   1659 	ifp->if_softc = sc;
   1660 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   1661 #ifdef IXGBE_MPSAFE
   1662 	ifp->if_extflags = IFEF_MPSAFE;
   1663 #endif
   1664 	ifp->if_ioctl = ixv_ioctl;
   1665 	if (sc->feat_en & IXGBE_FEATURE_LEGACY_TX) {
   1666 #if 0
   1667 		ixv_start_locked = ixgbe_legacy_start_locked;
   1668 #endif
   1669 	} else {
   1670 		ifp->if_transmit = ixgbe_mq_start;
   1671 #if 0
   1672 		ixv_start_locked = ixgbe_mq_start_locked;
   1673 #endif
   1674 	}
   1675 	ifp->if_start = ixgbe_legacy_start;
   1676 	IFQ_SET_MAXLEN(&ifp->if_snd, sc->num_tx_desc - 2);
   1677 	IFQ_SET_READY(&ifp->if_snd);
   1678 
   1679 	if_initialize(ifp);
   1680 	sc->ipq = if_percpuq_create(&sc->osdep.ec.ec_if);
   1681 	ether_ifattach(ifp, sc->hw.mac.addr);
   1682 	aprint_normal_dev(dev, "Ethernet address %s\n",
   1683 	    ether_sprintf(sc->hw.mac.addr));
   1684 	/*
   1685 	 * We use per TX queue softint, so if_deferred_start_init() isn't
   1686 	 * used.
   1687 	 */
   1688 	ether_set_ifflags_cb(ec, ixv_ifflags_cb);
   1689 
   1690 	sc->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR;
   1691 
   1692 	/*
   1693 	 * Tell the upper layer(s) we support long frames.
   1694 	 */
   1695 	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
   1696 
   1697 	/* Set capability flags */
   1698 	ifp->if_capabilities |= IFCAP_HWCSUM
   1699 			     |	IFCAP_TSOv4
   1700 			     |	IFCAP_TSOv6;
   1701 	ifp->if_capenable = 0;
   1702 
   1703 	ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER
   1704 			    |  ETHERCAP_VLAN_HWTAGGING
   1705 			    |  ETHERCAP_VLAN_HWCSUM
   1706 			    |  ETHERCAP_JUMBO_MTU
   1707 			    |  ETHERCAP_VLAN_MTU;
   1708 
   1709 	/* Enable the above capabilities by default */
   1710 	ec->ec_capenable = ec->ec_capabilities;
   1711 
   1712 	/* Don't enable LRO by default */
   1713 #if 0
   1714 	/* NetBSD doesn't support LRO yet */
   1715 	ifp->if_capabilities |= IFCAP_LRO;
   1716 #endif
   1717 
   1718 	/*
   1719 	 * Specify the media types supported by this adapter and register
   1720 	 * callbacks to update media and link information
   1721 	 */
   1722 	ec->ec_ifmedia = &sc->media;
   1723 	ifmedia_init_with_lock(&sc->media, IFM_IMASK, ixv_media_change,
   1724 	    ixv_media_status, &sc->core_mtx);
   1725 	ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
   1726 	ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
   1727 
   1728 	if_register(ifp);
   1729 
   1730 	return 0;
   1731 } /* ixv_setup_interface */
   1732 
   1733 
   1734 /************************************************************************
   1735  * ixv_initialize_transmit_units - Enable transmit unit.
   1736  ************************************************************************/
   1737 static void
   1738 ixv_initialize_transmit_units(struct ixgbe_softc *sc)
   1739 {
   1740 	struct tx_ring	*txr = sc->tx_rings;
   1741 	struct ixgbe_hw	*hw = &sc->hw;
   1742 	int i;
   1743 
   1744 	for (i = 0; i < sc->num_queues; i++, txr++) {
   1745 		u64 tdba = txr->txdma.dma_paddr;
   1746 		u32 txctrl, txdctl;
   1747 		int j = txr->me;
   1748 
   1749 		/* Set WTHRESH to 8, burst writeback */
   1750 		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
   1751 		txdctl |= IXGBE_TX_WTHRESH << IXGBE_TXDCTL_WTHRESH_SHIFT;
   1752 		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
   1753 
   1754 		/* Set the HW Tx Head and Tail indices */
   1755 		IXGBE_WRITE_REG(&sc->hw, IXGBE_VFTDH(j), 0);
   1756 		IXGBE_WRITE_REG(&sc->hw, IXGBE_VFTDT(j), 0);
   1757 
   1758 		/* Set Tx Tail register */
   1759 		txr->tail = IXGBE_VFTDT(j);
   1760 
   1761 		txr->txr_no_space = false;
   1762 
   1763 		/* Set Ring parameters */
   1764 		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(j),
   1765 		    (tdba & 0x00000000ffffffffULL));
   1766 		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32));
   1767 		IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j),
   1768 		    sc->num_tx_desc * sizeof(struct ixgbe_legacy_tx_desc));
   1769 		txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j));
   1770 		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
   1771 		IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl);
   1772 
   1773 		/* Now enable */
   1774 		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
   1775 		txdctl |= IXGBE_TXDCTL_ENABLE;
   1776 		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
   1777 	}
   1778 
   1779 	return;
   1780 } /* ixv_initialize_transmit_units */
   1781 
   1782 
   1783 /************************************************************************
   1784  * ixv_initialize_rss_mapping
   1785  ************************************************************************/
   1786 static void
   1787 ixv_initialize_rss_mapping(struct ixgbe_softc *sc)
   1788 {
   1789 	struct ixgbe_hw *hw = &sc->hw;
   1790 	u32		reta = 0, mrqc, rss_key[10];
   1791 	int		queue_id;
   1792 	int		i, j;
   1793 	u32		rss_hash_config;
   1794 
   1795 	/* force use default RSS key. */
   1796 #ifdef __NetBSD__
   1797 	rss_getkey((uint8_t *) &rss_key);
   1798 #else
   1799 	if (sc->feat_en & IXGBE_FEATURE_RSS) {
   1800 		/* Fetch the configured RSS key */
   1801 		rss_getkey((uint8_t *)&rss_key);
   1802 	} else {
   1803 		/* set up random bits */
   1804 		cprng_fast(&rss_key, sizeof(rss_key));
   1805 	}
   1806 #endif
   1807 
   1808 	/* Now fill out hash function seeds */
   1809 	for (i = 0; i < 10; i++)
   1810 		IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), rss_key[i]);
   1811 
   1812 	/* Set up the redirection table */
   1813 	for (i = 0, j = 0; i < 64; i++, j++) {
   1814 		if (j == sc->num_queues)
   1815 			j = 0;
   1816 
   1817 		if (sc->feat_en & IXGBE_FEATURE_RSS) {
   1818 			/*
   1819 			 * Fetch the RSS bucket id for the given indirection
   1820 			 * entry. Cap it at the number of configured buckets
   1821 			 * (which is num_queues.)
   1822 			 */
   1823 			queue_id = rss_get_indirection_to_bucket(i);
   1824 			queue_id = queue_id % sc->num_queues;
   1825 		} else
   1826 			queue_id = j;
   1827 
   1828 		/*
   1829 		 * The low 8 bits are for hash value (n+0);
   1830 		 * The next 8 bits are for hash value (n+1), etc.
   1831 		 */
   1832 		reta >>= 8;
   1833 		reta |= ((uint32_t)queue_id) << 24;
   1834 		if ((i & 3) == 3) {
   1835 			IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), reta);
   1836 			reta = 0;
   1837 		}
   1838 	}
   1839 
   1840 	/* Perform hash on these packet types */
   1841 	if (sc->feat_en & IXGBE_FEATURE_RSS)
   1842 		rss_hash_config = rss_gethashconfig();
   1843 	else {
   1844 		/*
   1845 		 * Disable UDP - IP fragments aren't currently being handled
   1846 		 * and so we end up with a mix of 2-tuple and 4-tuple
   1847 		 * traffic.
   1848 		 */
   1849 		rss_hash_config = RSS_HASHTYPE_RSS_IPV4
   1850 				| RSS_HASHTYPE_RSS_TCP_IPV4
   1851 				| RSS_HASHTYPE_RSS_IPV6
   1852 				| RSS_HASHTYPE_RSS_TCP_IPV6;
   1853 	}
   1854 
   1855 	mrqc = IXGBE_MRQC_RSSEN;
   1856 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
   1857 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
   1858 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
   1859 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
   1860 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
   1861 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
   1862 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
   1863 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
   1864 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
   1865 		device_printf(sc->dev, "%s: RSS_HASHTYPE_RSS_IPV6_EX "
   1866 		    "defined, but not supported\n", __func__);
   1867 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
   1868 		device_printf(sc->dev, "%s: RSS_HASHTYPE_RSS_TCP_IPV6_EX "
   1869 		    "defined, but not supported\n", __func__);
   1870 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
   1871 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
   1872 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
   1873 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
   1874 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
   1875 		device_printf(sc->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV6_EX "
   1876 		    "defined, but not supported\n", __func__);
   1877 	IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, mrqc);
   1878 } /* ixv_initialize_rss_mapping */
   1879 
   1880 
   1881 /************************************************************************
   1882  * ixv_initialize_receive_units - Setup receive registers and features.
   1883  ************************************************************************/
   1884 static void
   1885 ixv_initialize_receive_units(struct ixgbe_softc *sc)
   1886 {
   1887 	struct rx_ring	*rxr = sc->rx_rings;
   1888 	struct ixgbe_hw	*hw = &sc->hw;
   1889 	struct ifnet	*ifp = sc->ifp;
   1890 	u32		bufsz, psrtype;
   1891 
   1892 	if (ifp->if_mtu > ETHERMTU)
   1893 		bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
   1894 	else
   1895 		bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
   1896 
   1897 	psrtype = IXGBE_PSRTYPE_TCPHDR
   1898 		| IXGBE_PSRTYPE_UDPHDR
   1899 		| IXGBE_PSRTYPE_IPV4HDR
   1900 		| IXGBE_PSRTYPE_IPV6HDR
   1901 		| IXGBE_PSRTYPE_L2HDR;
   1902 
   1903 	if (sc->num_queues > 1)
   1904 		psrtype |= 1 << 29;
   1905 
   1906 	IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
   1907 
   1908 	/* Tell PF our max_frame size */
   1909 	if (ixgbevf_rlpml_set_vf(hw, sc->max_frame_size) != 0) {
   1910 		device_printf(sc->dev, "There is a problem with the PF "
   1911 		    "setup.  It is likely the receive unit for this VF will "
   1912 		    "not function correctly.\n");
   1913 	}
   1914 
   1915 	for (int i = 0; i < sc->num_queues; i++, rxr++) {
   1916 		u64 rdba = rxr->rxdma.dma_paddr;
   1917 		u32 reg, rxdctl;
   1918 		int j = rxr->me;
   1919 
   1920 		/* Disable the queue */
   1921 		rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
   1922 		rxdctl &= ~IXGBE_RXDCTL_ENABLE;
   1923 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
   1924 		for (int k = 0; k < 10; k++) {
   1925 			if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) &
   1926 			    IXGBE_RXDCTL_ENABLE)
   1927 				msec_delay(1);
   1928 			else
   1929 				break;
   1930 		}
   1931 		IXGBE_WRITE_BARRIER(hw);
   1932 		/* Setup the Base and Length of the Rx Descriptor Ring */
   1933 		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j),
   1934 		    (rdba & 0x00000000ffffffffULL));
   1935 		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32));
   1936 		IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j),
   1937 		    sc->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
   1938 
   1939 		/* Reset the ring indices */
   1940 		IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
   1941 		IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 0);
   1942 
   1943 		/* Set up the SRRCTL register */
   1944 		reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(j));
   1945 		reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
   1946 		reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
   1947 		reg |= bufsz;
   1948 		reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
   1949 		IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(j), reg);
   1950 
   1951 		/* Capture Rx Tail index */
   1952 		rxr->tail = IXGBE_VFRDT(rxr->me);
   1953 
   1954 		/* Do the queue enabling last */
   1955 		rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
   1956 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
   1957 		for (int k = 0; k < 10; k++) {
   1958 			if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) &
   1959 			    IXGBE_RXDCTL_ENABLE)
   1960 				break;
   1961 			msec_delay(1);
   1962 		}
   1963 		IXGBE_WRITE_BARRIER(hw);
   1964 
   1965 		/* Set the Tail Pointer */
   1966 #ifdef DEV_NETMAP
   1967 		/*
   1968 		 * In netmap mode, we must preserve the buffers made
   1969 		 * available to userspace before the if_init()
   1970 		 * (this is true by default on the TX side, because
   1971 		 * init makes all buffers available to userspace).
   1972 		 *
   1973 		 * netmap_reset() and the device specific routines
   1974 		 * (e.g. ixgbe_setup_receive_rings()) map these
   1975 		 * buffers at the end of the NIC ring, so here we
   1976 		 * must set the RDT (tail) register to make sure
   1977 		 * they are not overwritten.
   1978 		 *
   1979 		 * In this driver the NIC ring starts at RDH = 0,
   1980 		 * RDT points to the last slot available for reception (?),
   1981 		 * so RDT = num_rx_desc - 1 means the whole ring is available.
   1982 		 */
   1983 		if ((sc->feat_en & IXGBE_FEATURE_NETMAP) &&
   1984 		    (ifp->if_capenable & IFCAP_NETMAP)) {
   1985 			struct netmap_adapter *na = NA(sc->ifp);
   1986 			struct netmap_kring *kring = na->rx_rings[i];
   1987 			int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
   1988 
   1989 			IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), t);
   1990 		} else
   1991 #endif /* DEV_NETMAP */
   1992 			IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
   1993 			    sc->num_rx_desc - 1);
   1994 	}
   1995 
   1996 	if (sc->hw.mac.type >= ixgbe_mac_X550_vf)
   1997 		ixv_initialize_rss_mapping(sc);
   1998 } /* ixv_initialize_receive_units */
   1999 
   2000 /************************************************************************
   2001  * ixv_sysctl_tdh_handler - Transmit Descriptor Head handler function
   2002  *
   2003  *   Retrieves the TDH value from the hardware
   2004  ************************************************************************/
   2005 static int
   2006 ixv_sysctl_tdh_handler(SYSCTLFN_ARGS)
   2007 {
   2008 	struct sysctlnode node = *rnode;
   2009 	struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
   2010 	uint32_t val;
   2011 
   2012 	if (!txr)
   2013 		return (0);
   2014 
   2015 	val = IXGBE_READ_REG(&txr->sc->hw, IXGBE_VFTDH(txr->me));
   2016 	node.sysctl_data = &val;
   2017 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2018 } /* ixv_sysctl_tdh_handler */
   2019 
   2020 /************************************************************************
   2021  * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
   2022  *
   2023  *   Retrieves the TDT value from the hardware
   2024  ************************************************************************/
   2025 static int
   2026 ixv_sysctl_tdt_handler(SYSCTLFN_ARGS)
   2027 {
   2028 	struct sysctlnode node = *rnode;
   2029 	struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
   2030 	uint32_t val;
   2031 
   2032 	if (!txr)
   2033 		return (0);
   2034 
   2035 	val = IXGBE_READ_REG(&txr->sc->hw, IXGBE_VFTDT(txr->me));
   2036 	node.sysctl_data = &val;
   2037 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2038 } /* ixv_sysctl_tdt_handler */
   2039 
   2040 /************************************************************************
   2041  * ixv_sysctl_next_to_check_handler - Receive Descriptor next to check
   2042  * handler function
   2043  *
   2044  *   Retrieves the next_to_check value
   2045  ************************************************************************/
   2046 static int
   2047 ixv_sysctl_next_to_check_handler(SYSCTLFN_ARGS)
   2048 {
   2049 	struct sysctlnode node = *rnode;
   2050 	struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
   2051 	uint32_t val;
   2052 
   2053 	if (!rxr)
   2054 		return (0);
   2055 
   2056 	val = rxr->next_to_check;
   2057 	node.sysctl_data = &val;
   2058 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2059 } /* ixv_sysctl_next_to_check_handler */
   2060 
   2061 /************************************************************************
   2062  * ixv_sysctl_next_to_refresh_handler - Receive Descriptor next to refresh
   2063  * handler function
   2064  *
   2065  *   Retrieves the next_to_refresh value
   2066  ************************************************************************/
   2067 static int
   2068 ixv_sysctl_next_to_refresh_handler(SYSCTLFN_ARGS)
   2069 {
   2070 	struct sysctlnode node = *rnode;
   2071 	struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
   2072 	struct ixgbe_softc *sc;
   2073 	uint32_t val;
   2074 
   2075 	if (!rxr)
   2076 		return (0);
   2077 
   2078 	sc = rxr->sc;
   2079 	if (ixgbe_fw_recovery_mode_swflag(sc))
   2080 		return (EPERM);
   2081 
   2082 	val = rxr->next_to_refresh;
   2083 	node.sysctl_data = &val;
   2084 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2085 } /* ixv_sysctl_next_to_refresh_handler */
   2086 
   2087 /************************************************************************
   2088  * ixv_sysctl_rdh_handler - Receive Descriptor Head handler function
   2089  *
   2090  *   Retrieves the RDH value from the hardware
   2091  ************************************************************************/
   2092 static int
   2093 ixv_sysctl_rdh_handler(SYSCTLFN_ARGS)
   2094 {
   2095 	struct sysctlnode node = *rnode;
   2096 	struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
   2097 	uint32_t val;
   2098 
   2099 	if (!rxr)
   2100 		return (0);
   2101 
   2102 	val = IXGBE_READ_REG(&rxr->sc->hw, IXGBE_VFRDH(rxr->me));
   2103 	node.sysctl_data = &val;
   2104 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2105 } /* ixv_sysctl_rdh_handler */
   2106 
   2107 /************************************************************************
   2108  * ixv_sysctl_rdt_handler - Receive Descriptor Tail handler function
   2109  *
   2110  *   Retrieves the RDT value from the hardware
   2111  ************************************************************************/
   2112 static int
   2113 ixv_sysctl_rdt_handler(SYSCTLFN_ARGS)
   2114 {
   2115 	struct sysctlnode node = *rnode;
   2116 	struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
   2117 	uint32_t val;
   2118 
   2119 	if (!rxr)
   2120 		return (0);
   2121 
   2122 	val = IXGBE_READ_REG(&rxr->sc->hw, IXGBE_VFRDT(rxr->me));
   2123 	node.sysctl_data = &val;
   2124 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2125 } /* ixv_sysctl_rdt_handler */
   2126 
   2127 static void
   2128 ixv_setup_vlan_tagging(struct ixgbe_softc *sc)
   2129 {
   2130 	struct ethercom *ec = &sc->osdep.ec;
   2131 	struct ixgbe_hw *hw = &sc->hw;
   2132 	struct rx_ring	*rxr;
   2133 	u32		ctrl;
   2134 	int		i;
   2135 	bool		hwtagging;
   2136 
   2137 	/* Enable HW tagging only if any vlan is attached */
   2138 	hwtagging = (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING)
   2139 	    && VLAN_ATTACHED(ec);
   2140 
   2141 	/* Enable the queues */
   2142 	for (i = 0; i < sc->num_queues; i++) {
   2143 		rxr = &sc->rx_rings[i];
   2144 		ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(rxr->me));
   2145 		if (hwtagging)
   2146 			ctrl |= IXGBE_RXDCTL_VME;
   2147 		else
   2148 			ctrl &= ~IXGBE_RXDCTL_VME;
   2149 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(rxr->me), ctrl);
   2150 		/*
   2151 		 * Let Rx path know that it needs to store VLAN tag
   2152 		 * as part of extra mbuf info.
   2153 		 */
   2154 		rxr->vtag_strip = hwtagging ? TRUE : FALSE;
   2155 	}
   2156 } /* ixv_setup_vlan_tagging */
   2157 
   2158 /************************************************************************
   2159  * ixv_setup_vlan_support
   2160  ************************************************************************/
   2161 static int
   2162 ixv_setup_vlan_support(struct ixgbe_softc *sc)
   2163 {
   2164 	struct ethercom *ec = &sc->osdep.ec;
   2165 	struct ixgbe_hw *hw = &sc->hw;
   2166 	u32		vid, vfta, retry;
   2167 	struct vlanid_list *vlanidp;
   2168 	int rv, error = 0;
   2169 
   2170 	/*
   2171 	 *  This function is called from both if_init and ifflags_cb()
   2172 	 * on NetBSD.
   2173 	 */
   2174 
   2175 	/*
   2176 	 * Part 1:
   2177 	 * Setup VLAN HW tagging
   2178 	 */
   2179 	ixv_setup_vlan_tagging(sc);
   2180 
   2181 	if (!VLAN_ATTACHED(ec))
   2182 		return 0;
   2183 
   2184 	/*
   2185 	 * Part 2:
   2186 	 * Setup VLAN HW filter
   2187 	 */
   2188 	/* Cleanup shadow_vfta */
   2189 	for (int i = 0; i < IXGBE_VFTA_SIZE; i++)
   2190 		sc->shadow_vfta[i] = 0;
   2191 	/* Generate shadow_vfta from ec_vids */
   2192 	ETHER_LOCK(ec);
   2193 	SIMPLEQ_FOREACH(vlanidp, &ec->ec_vids, vid_list) {
   2194 		uint32_t idx;
   2195 
   2196 		idx = vlanidp->vid / 32;
   2197 		KASSERT(idx < IXGBE_VFTA_SIZE);
   2198 		sc->shadow_vfta[idx] |= (u32)1 << (vlanidp->vid % 32);
   2199 	}
   2200 	ETHER_UNLOCK(ec);
   2201 
   2202 	/*
   2203 	 * A soft reset zero's out the VFTA, so
   2204 	 * we need to repopulate it now.
   2205 	 */
   2206 	for (int i = 0; i < IXGBE_VFTA_SIZE; i++) {
   2207 		if (sc->shadow_vfta[i] == 0)
   2208 			continue;
   2209 		vfta = sc->shadow_vfta[i];
   2210 		/*
   2211 		 * Reconstruct the vlan id's
   2212 		 * based on the bits set in each
   2213 		 * of the array ints.
   2214 		 */
   2215 		for (int j = 0; j < 32; j++) {
   2216 			retry = 0;
   2217 			if ((vfta & ((u32)1 << j)) == 0)
   2218 				continue;
   2219 			vid = (i * 32) + j;
   2220 
   2221 			/* Call the shared code mailbox routine */
   2222 			while ((rv = hw->mac.ops.set_vfta(hw, vid, 0, TRUE,
   2223 			    FALSE)) != 0) {
   2224 				if (++retry > 5) {
   2225 					device_printf(sc->dev,
   2226 					    "%s: max retry exceeded\n",
   2227 						__func__);
   2228 					break;
   2229 				}
   2230 			}
   2231 			if (rv != 0) {
   2232 				device_printf(sc->dev,
   2233 				    "failed to set vlan %d\n", vid);
   2234 				error = EACCES;
   2235 			}
   2236 		}
   2237 	}
   2238 	return error;
   2239 } /* ixv_setup_vlan_support */
   2240 
   2241 static int
   2242 ixv_vlan_cb(struct ethercom *ec, uint16_t vid, bool set)
   2243 {
   2244 	struct ifnet *ifp = &ec->ec_if;
   2245 	struct ixgbe_softc *sc = ifp->if_softc;
   2246 	int rv;
   2247 
   2248 	if (set)
   2249 		rv = ixv_register_vlan(sc, vid);
   2250 	else
   2251 		rv = ixv_unregister_vlan(sc, vid);
   2252 
   2253 	if (rv != 0)
   2254 		return rv;
   2255 
   2256 	/*
   2257 	 * Control VLAN HW tagging when ec_nvlan is changed from 1 to 0
   2258 	 * or 0 to 1.
   2259 	 */
   2260 	if ((set && (ec->ec_nvlans == 1)) || (!set && (ec->ec_nvlans == 0)))
   2261 		ixv_setup_vlan_tagging(sc);
   2262 
   2263 	return rv;
   2264 }
   2265 
   2266 /************************************************************************
   2267  * ixv_register_vlan
   2268  *
   2269  *   Run via a vlan config EVENT, it enables us to use the
   2270  *   HW Filter table since we can get the vlan id. This just
   2271  *   creates the entry in the soft version of the VFTA, init
   2272  *   will repopulate the real table.
   2273  ************************************************************************/
   2274 static int
   2275 ixv_register_vlan(struct ixgbe_softc *sc, u16 vtag)
   2276 {
   2277 	struct ixgbe_hw *hw = &sc->hw;
   2278 	u16		index, bit;
   2279 	int error;
   2280 
   2281 	if ((vtag == 0) || (vtag > 4095)) /* Invalid */
   2282 		return EINVAL;
   2283 	IXGBE_CORE_LOCK(sc);
   2284 	index = (vtag >> 5) & 0x7F;
   2285 	bit = vtag & 0x1F;
   2286 	sc->shadow_vfta[index] |= ((u32)1 << bit);
   2287 	error = hw->mac.ops.set_vfta(hw, vtag, 0, true, false);
   2288 	IXGBE_CORE_UNLOCK(sc);
   2289 
   2290 	if (error != 0) {
   2291 		device_printf(sc->dev, "failed to register vlan %hu\n", vtag);
   2292 		error = EACCES;
   2293 	}
   2294 	return error;
   2295 } /* ixv_register_vlan */
   2296 
   2297 /************************************************************************
   2298  * ixv_unregister_vlan
   2299  *
   2300  *   Run via a vlan unconfig EVENT, remove our entry
   2301  *   in the soft vfta.
   2302  ************************************************************************/
   2303 static int
   2304 ixv_unregister_vlan(struct ixgbe_softc *sc, u16 vtag)
   2305 {
   2306 	struct ixgbe_hw *hw = &sc->hw;
   2307 	u16		index, bit;
   2308 	int		error;
   2309 
   2310 	if ((vtag == 0) || (vtag > 4095))  /* Invalid */
   2311 		return EINVAL;
   2312 
   2313 	IXGBE_CORE_LOCK(sc);
   2314 	index = (vtag >> 5) & 0x7F;
   2315 	bit = vtag & 0x1F;
   2316 	sc->shadow_vfta[index] &= ~((u32)1 << bit);
   2317 	error = hw->mac.ops.set_vfta(hw, vtag, 0, false, false);
   2318 	IXGBE_CORE_UNLOCK(sc);
   2319 
   2320 	if (error != 0) {
   2321 		device_printf(sc->dev, "failed to unregister vlan %hu\n",
   2322 		    vtag);
   2323 		error = EIO;
   2324 	}
   2325 	return error;
   2326 } /* ixv_unregister_vlan */
   2327 
   2328 /************************************************************************
   2329  * ixv_enable_intr
   2330  ************************************************************************/
   2331 static void
   2332 ixv_enable_intr(struct ixgbe_softc *sc)
   2333 {
   2334 	struct ixgbe_hw *hw = &sc->hw;
   2335 	struct ix_queue *que = sc->queues;
   2336 	u32		mask;
   2337 	int i;
   2338 
   2339 	/* For VTEIAC */
   2340 	mask = (1 << sc->vector);
   2341 	for (i = 0; i < sc->num_queues; i++, que++)
   2342 		mask |= (1 << que->msix);
   2343 	IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
   2344 
   2345 	/* For VTEIMS */
   2346 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, (1 << sc->vector));
   2347 	que = sc->queues;
   2348 	for (i = 0; i < sc->num_queues; i++, que++)
   2349 		ixv_enable_queue(sc, que->msix);
   2350 
   2351 	IXGBE_WRITE_FLUSH(hw);
   2352 } /* ixv_enable_intr */
   2353 
   2354 /************************************************************************
   2355  * ixv_disable_intr
   2356  ************************************************************************/
   2357 static void
   2358 ixv_disable_intr(struct ixgbe_softc *sc)
   2359 {
   2360 	struct ix_queue	*que = sc->queues;
   2361 
   2362 	IXGBE_WRITE_REG(&sc->hw, IXGBE_VTEIAC, 0);
   2363 
   2364 	/* disable interrupts other than queues */
   2365 	IXGBE_WRITE_REG(&sc->hw, IXGBE_VTEIMC, sc->vector);
   2366 
   2367 	for (int i = 0; i < sc->num_queues; i++, que++)
   2368 		ixv_disable_queue(sc, que->msix);
   2369 
   2370 	IXGBE_WRITE_FLUSH(&sc->hw);
   2371 } /* ixv_disable_intr */
   2372 
   2373 /************************************************************************
   2374  * ixv_set_ivar
   2375  *
   2376  *   Setup the correct IVAR register for a particular MSI-X interrupt
   2377  *    - entry is the register array entry
   2378  *    - vector is the MSI-X vector for this queue
   2379  *    - type is RX/TX/MISC
   2380  ************************************************************************/
   2381 static void
   2382 ixv_set_ivar(struct ixgbe_softc *sc, u8 entry, u8 vector, s8 type)
   2383 {
   2384 	struct ixgbe_hw *hw = &sc->hw;
   2385 	u32		ivar, index;
   2386 
   2387 	vector |= IXGBE_IVAR_ALLOC_VAL;
   2388 
   2389 	if (type == -1) { /* MISC IVAR */
   2390 		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
   2391 		ivar &= ~0xFF;
   2392 		ivar |= vector;
   2393 		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
   2394 	} else {	  /* RX/TX IVARS */
   2395 		index = (16 * (entry & 1)) + (8 * type);
   2396 		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
   2397 		ivar &= ~(0xffUL << index);
   2398 		ivar |= ((u32)vector << index);
   2399 		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
   2400 	}
   2401 } /* ixv_set_ivar */
   2402 
   2403 /************************************************************************
   2404  * ixv_configure_ivars
   2405  ************************************************************************/
   2406 static void
   2407 ixv_configure_ivars(struct ixgbe_softc *sc)
   2408 {
   2409 	struct ix_queue *que = sc->queues;
   2410 
   2411 	/* XXX We should sync EITR value calculation with ixgbe.c? */
   2412 
   2413 	for (int i = 0; i < sc->num_queues; i++, que++) {
   2414 		/* First the RX queue entry */
   2415 		ixv_set_ivar(sc, i, que->msix, 0);
   2416 		/* ... and the TX */
   2417 		ixv_set_ivar(sc, i, que->msix, 1);
   2418 		/* Set an initial value in EITR */
   2419 		ixv_eitr_write(sc, que->msix, IXGBE_EITR_DEFAULT);
   2420 	}
   2421 
   2422 	/* For the mailbox interrupt */
   2423 	ixv_set_ivar(sc, 1, sc->vector, -1);
   2424 } /* ixv_configure_ivars */
   2425 
   2426 
   2427 /************************************************************************
   2428  * ixv_init_stats
   2429  *
   2430  *   The VF stats registers never have a truly virgin
   2431  *   starting point, so this routine save initial vaules to
   2432  *   last_<REGNAME>.
   2433  ************************************************************************/
   2434 static void
   2435 ixv_init_stats(struct ixgbe_softc *sc)
   2436 {
   2437 	struct ixgbe_hw *hw = &sc->hw;
   2438 
   2439 	sc->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
   2440 	sc->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
   2441 	sc->stats.vf.last_vfgorc |=
   2442 	    (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
   2443 
   2444 	sc->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
   2445 	sc->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
   2446 	sc->stats.vf.last_vfgotc |=
   2447 	    (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
   2448 
   2449 	sc->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
   2450 } /* ixv_init_stats */
   2451 
   2452 #define UPDATE_STAT_32(reg, last, count)		\
   2453 {							\
   2454 	u32 current = IXGBE_READ_REG(hw, (reg));	\
   2455 	IXGBE_EVC_ADD(&count, current - (last));	\
   2456 	(last) = current;				\
   2457 }
   2458 
   2459 #define UPDATE_STAT_36(lsb, msb, last, count)				\
   2460 	{								\
   2461 	u64 cur_lsb = IXGBE_READ_REG(hw, (lsb));			\
   2462 	u64 cur_msb = IXGBE_READ_REG(hw, (msb));			\
   2463 	u64 current = ((cur_msb << 32) | cur_lsb);			\
   2464 	if (current < (last))						\
   2465 		IXGBE_EVC_ADD(&count, current + __BIT(36) - (last));	\
   2466 	else								\
   2467 		IXGBE_EVC_ADD(&count, current - (last));		\
   2468 	(last) = current;						\
   2469 }
   2470 
   2471 /************************************************************************
   2472  * ixv_update_stats - Update the board statistics counters.
   2473  ************************************************************************/
   2474 void
   2475 ixv_update_stats(struct ixgbe_softc *sc)
   2476 {
   2477 	struct ixgbe_hw *hw = &sc->hw;
   2478 	struct ixgbevf_hw_stats *stats = &sc->stats.vf;
   2479 
   2480 	UPDATE_STAT_32(IXGBE_VFGPRC, stats->last_vfgprc, stats->vfgprc);
   2481 	UPDATE_STAT_32(IXGBE_VFGPTC, stats->last_vfgptc, stats->vfgptc);
   2482 	UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB, stats->last_vfgorc,
   2483 	    stats->vfgorc);
   2484 	UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB, stats->last_vfgotc,
   2485 	    stats->vfgotc);
   2486 	UPDATE_STAT_32(IXGBE_VFMPRC, stats->last_vfmprc, stats->vfmprc);
   2487 
   2488 	/* VF doesn't count errors by hardware */
   2489 
   2490 } /* ixv_update_stats */
   2491 
   2492 /************************************************************************
   2493  * ixv_sysctl_interrupt_rate_handler
   2494  ************************************************************************/
   2495 static int
   2496 ixv_sysctl_interrupt_rate_handler(SYSCTLFN_ARGS)
   2497 {
   2498 	struct sysctlnode node = *rnode;
   2499 	struct ix_queue *que = (struct ix_queue *)node.sysctl_data;
   2500 	struct ixgbe_softc *sc = que->sc;
   2501 	uint32_t reg, usec, rate;
   2502 	int error;
   2503 
   2504 	if (que == NULL)
   2505 		return 0;
   2506 	reg = IXGBE_READ_REG(&que->sc->hw, IXGBE_VTEITR(que->msix));
   2507 	usec = ((reg & 0x0FF8) >> 3);
   2508 	if (usec > 0)
   2509 		rate = 500000 / usec;
   2510 	else
   2511 		rate = 0;
   2512 	node.sysctl_data = &rate;
   2513 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   2514 	if (error || newp == NULL)
   2515 		return error;
   2516 	reg &= ~0xfff; /* default, no limitation */
   2517 	if (rate > 0 && rate < 500000) {
   2518 		if (rate < 1000)
   2519 			rate = 1000;
   2520 		reg |= ((4000000 / rate) & 0xff8);
   2521 		/*
   2522 		 * When RSC is used, ITR interval must be larger than
   2523 		 * RSC_DELAY. Currently, we use 2us for RSC_DELAY.
   2524 		 * The minimum value is always greater than 2us on 100M
   2525 		 * (and 10M?(not documented)), but it's not on 1G and higher.
   2526 		 */
   2527 		if ((sc->link_speed != IXGBE_LINK_SPEED_100_FULL)
   2528 		    && (sc->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
   2529 			if ((sc->num_queues > 1)
   2530 			    && (reg < IXGBE_MIN_RSC_EITR_10G1G))
   2531 				return EINVAL;
   2532 		}
   2533 		sc->max_interrupt_rate = rate;
   2534 	} else
   2535 		sc->max_interrupt_rate = 0;
   2536 	ixv_eitr_write(sc, que->msix, reg);
   2537 
   2538 	return (0);
   2539 } /* ixv_sysctl_interrupt_rate_handler */
   2540 
   2541 const struct sysctlnode *
   2542 ixv_sysctl_instance(struct ixgbe_softc *sc)
   2543 {
   2544 	const char *dvname;
   2545 	struct sysctllog **log;
   2546 	int rc;
   2547 	const struct sysctlnode *rnode;
   2548 
   2549 	log = &sc->sysctllog;
   2550 	dvname = device_xname(sc->dev);
   2551 
   2552 	if ((rc = sysctl_createv(log, 0, NULL, &rnode,
   2553 	    0, CTLTYPE_NODE, dvname,
   2554 	    SYSCTL_DESCR("ixv information and settings"),
   2555 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
   2556 		goto err;
   2557 
   2558 	return rnode;
   2559 err:
   2560 	device_printf(sc->dev,
   2561 	    "%s: sysctl_createv failed, rc = %d\n", __func__, rc);
   2562 	return NULL;
   2563 }
   2564 
   2565 static void
   2566 ixv_add_device_sysctls(struct ixgbe_softc *sc)
   2567 {
   2568 	struct sysctllog **log;
   2569 	const struct sysctlnode *rnode, *cnode;
   2570 	device_t dev;
   2571 
   2572 	dev = sc->dev;
   2573 	log = &sc->sysctllog;
   2574 
   2575 	if ((rnode = ixv_sysctl_instance(sc)) == NULL) {
   2576 		aprint_error_dev(dev, "could not create sysctl root\n");
   2577 		return;
   2578 	}
   2579 
   2580 	if (sysctl_createv(log, 0, &rnode, &cnode,
   2581 	    CTLFLAG_READWRITE, CTLTYPE_INT, "debug",
   2582 	    SYSCTL_DESCR("Debug Info"),
   2583 	    ixv_sysctl_debug, 0, (void *)sc, 0, CTL_CREATE, CTL_EOL) != 0)
   2584 		aprint_error_dev(dev, "could not create sysctl\n");
   2585 
   2586 	if (sysctl_createv(log, 0, &rnode, &cnode,
   2587 	    CTLFLAG_READWRITE, CTLTYPE_INT,
   2588 	    "rx_copy_len", SYSCTL_DESCR("RX Copy Length"),
   2589 	    ixv_sysctl_rx_copy_len, 0,
   2590 	    (void *)sc, 0, CTL_CREATE, CTL_EOL) != 0)
   2591 		aprint_error_dev(dev, "could not create sysctl\n");
   2592 
   2593 	if (sysctl_createv(log, 0, &rnode, &cnode,
   2594 	    CTLFLAG_READONLY, CTLTYPE_INT,
   2595 	    "num_tx_desc", SYSCTL_DESCR("Number of TX descriptors"),
   2596 	    NULL, 0, &sc->num_tx_desc, 0, CTL_CREATE, CTL_EOL) != 0)
   2597 		aprint_error_dev(dev, "could not create sysctl\n");
   2598 
   2599 	if (sysctl_createv(log, 0, &rnode, &cnode,
   2600 	    CTLFLAG_READONLY, CTLTYPE_INT,
   2601 	    "num_rx_desc", SYSCTL_DESCR("Number of RX descriptors"),
   2602 	    NULL, 0, &sc->num_rx_desc, 0, CTL_CREATE, CTL_EOL) != 0)
   2603 		aprint_error_dev(dev, "could not create sysctl\n");
   2604 
   2605 	if (sysctl_createv(log, 0, &rnode, &cnode,
   2606 	    CTLFLAG_READWRITE, CTLTYPE_INT, "rx_process_limit",
   2607 	    SYSCTL_DESCR("max number of RX packets to process"),
   2608 	    ixv_sysctl_rx_process_limit, 0, (void *)sc, 0, CTL_CREATE,
   2609 	    CTL_EOL) != 0)
   2610 		aprint_error_dev(dev, "could not create sysctl\n");
   2611 
   2612 	if (sysctl_createv(log, 0, &rnode, &cnode,
   2613 	    CTLFLAG_READWRITE, CTLTYPE_INT, "tx_process_limit",
   2614 	    SYSCTL_DESCR("max number of TX packets to process"),
   2615 	    ixv_sysctl_tx_process_limit, 0, (void *)sc, 0, CTL_CREATE,
   2616 	    CTL_EOL) != 0)
   2617 		aprint_error_dev(dev, "could not create sysctl\n");
   2618 
   2619 	if (sysctl_createv(log, 0, &rnode, &cnode,
   2620 	    CTLFLAG_READWRITE, CTLTYPE_BOOL, "enable_aim",
   2621 	    SYSCTL_DESCR("Interrupt Moderation"),
   2622 	    NULL, 0, &sc->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
   2623 		aprint_error_dev(dev, "could not create sysctl\n");
   2624 
   2625 	if (sysctl_createv(log, 0, &rnode, &cnode,
   2626 	    CTLFLAG_READWRITE, CTLTYPE_BOOL, "txrx_workqueue",
   2627 	    SYSCTL_DESCR("Use workqueue for packet processing"),
   2628 	    NULL, 0, &sc->txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL)
   2629 	    != 0)
   2630 		aprint_error_dev(dev, "could not create sysctl\n");
   2631 }
   2632 
   2633 /************************************************************************
   2634  * ixv_add_stats_sysctls - Add statistic sysctls for the VF.
   2635  ************************************************************************/
   2636 static void
   2637 ixv_add_stats_sysctls(struct ixgbe_softc *sc)
   2638 {
   2639 	device_t		dev = sc->dev;
   2640 	struct tx_ring		*txr = sc->tx_rings;
   2641 	struct rx_ring		*rxr = sc->rx_rings;
   2642 	struct ixgbevf_hw_stats *stats = &sc->stats.vf;
   2643 	struct ixgbe_hw *hw = &sc->hw;
   2644 	const struct sysctlnode *rnode, *cnode;
   2645 	struct sysctllog **log = &sc->sysctllog;
   2646 	const char *xname = device_xname(dev);
   2647 
   2648 	/* Driver Statistics */
   2649 	evcnt_attach_dynamic(&sc->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
   2650 	    NULL, xname, "Driver tx dma soft fail EFBIG");
   2651 	evcnt_attach_dynamic(&sc->mbuf_defrag_failed, EVCNT_TYPE_MISC,
   2652 	    NULL, xname, "m_defrag() failed");
   2653 	evcnt_attach_dynamic(&sc->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
   2654 	    NULL, xname, "Driver tx dma hard fail EFBIG");
   2655 	evcnt_attach_dynamic(&sc->einval_tx_dma_setup, EVCNT_TYPE_MISC,
   2656 	    NULL, xname, "Driver tx dma hard fail EINVAL");
   2657 	evcnt_attach_dynamic(&sc->other_tx_dma_setup, EVCNT_TYPE_MISC,
   2658 	    NULL, xname, "Driver tx dma hard fail other");
   2659 	evcnt_attach_dynamic(&sc->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
   2660 	    NULL, xname, "Driver tx dma soft fail EAGAIN");
   2661 	evcnt_attach_dynamic(&sc->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
   2662 	    NULL, xname, "Driver tx dma soft fail ENOMEM");
   2663 	evcnt_attach_dynamic(&sc->watchdog_events, EVCNT_TYPE_MISC,
   2664 	    NULL, xname, "Watchdog timeouts");
   2665 	evcnt_attach_dynamic(&sc->tso_err, EVCNT_TYPE_MISC,
   2666 	    NULL, xname, "TSO errors");
   2667 	evcnt_attach_dynamic(&sc->admin_irqev, EVCNT_TYPE_INTR,
   2668 	    NULL, xname, "Admin MSI-X IRQ Handled");
   2669 	evcnt_attach_dynamic(&sc->link_workev, EVCNT_TYPE_INTR,
   2670 	    NULL, xname, "Admin event");
   2671 
   2672 	for (int i = 0; i < sc->num_queues; i++, rxr++, txr++) {
   2673 #ifdef LRO
   2674 		struct lro_ctrl *lro = &rxr->lro;
   2675 #endif
   2676 
   2677 		snprintf(sc->queues[i].evnamebuf,
   2678 		    sizeof(sc->queues[i].evnamebuf), "%s q%d", xname, i);
   2679 		snprintf(sc->queues[i].namebuf,
   2680 		    sizeof(sc->queues[i].namebuf), "q%d", i);
   2681 
   2682 		if ((rnode = ixv_sysctl_instance(sc)) == NULL) {
   2683 			aprint_error_dev(dev,
   2684 			    "could not create sysctl root\n");
   2685 			break;
   2686 		}
   2687 
   2688 		if (sysctl_createv(log, 0, &rnode, &rnode,
   2689 		    0, CTLTYPE_NODE,
   2690 		    sc->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
   2691 		    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
   2692 			break;
   2693 
   2694 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2695 		    CTLFLAG_READWRITE, CTLTYPE_INT,
   2696 		    "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
   2697 		    ixv_sysctl_interrupt_rate_handler, 0,
   2698 		    (void *)&sc->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
   2699 			break;
   2700 
   2701 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2702 		    CTLFLAG_READONLY, CTLTYPE_INT,
   2703 		    "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
   2704 		    ixv_sysctl_tdh_handler, 0, (void *)txr,
   2705 		    0, CTL_CREATE, CTL_EOL) != 0)
   2706 			break;
   2707 
   2708 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2709 		    CTLFLAG_READONLY, CTLTYPE_INT,
   2710 		    "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
   2711 		    ixv_sysctl_tdt_handler, 0, (void *)txr,
   2712 		    0, CTL_CREATE, CTL_EOL) != 0)
   2713 			break;
   2714 
   2715 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2716 		    CTLFLAG_READONLY, CTLTYPE_INT, "rxd_nxck",
   2717 		    SYSCTL_DESCR("Receive Descriptor next to check"),
   2718 		    ixv_sysctl_next_to_check_handler, 0, (void *)rxr, 0,
   2719 		    CTL_CREATE, CTL_EOL) != 0)
   2720 			break;
   2721 
   2722 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2723 		    CTLFLAG_READONLY, CTLTYPE_INT, "rxd_nxrf",
   2724 		    SYSCTL_DESCR("Receive Descriptor next to refresh"),
   2725 		    ixv_sysctl_next_to_refresh_handler, 0, (void *)rxr, 0,
   2726 		    CTL_CREATE, CTL_EOL) != 0)
   2727 			break;
   2728 
   2729 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2730 		    CTLFLAG_READONLY, CTLTYPE_INT, "rxd_head",
   2731 		    SYSCTL_DESCR("Receive Descriptor Head"),
   2732 		    ixv_sysctl_rdh_handler, 0, (void *)rxr, 0,
   2733 		    CTL_CREATE, CTL_EOL) != 0)
   2734 			break;
   2735 
   2736 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2737 		    CTLFLAG_READONLY, CTLTYPE_INT, "rxd_tail",
   2738 		    SYSCTL_DESCR("Receive Descriptor Tail"),
   2739 		    ixv_sysctl_rdt_handler, 0, (void *)rxr, 0,
   2740 		    CTL_CREATE, CTL_EOL) != 0)
   2741 			break;
   2742 
   2743 		evcnt_attach_dynamic(&sc->queues[i].irqs, EVCNT_TYPE_INTR,
   2744 		    NULL, sc->queues[i].evnamebuf, "IRQs on queue");
   2745 		evcnt_attach_dynamic(&sc->queues[i].handleq,
   2746 		    EVCNT_TYPE_MISC, NULL, sc->queues[i].evnamebuf,
   2747 		    "Handled queue in softint");
   2748 		evcnt_attach_dynamic(&sc->queues[i].req, EVCNT_TYPE_MISC,
   2749 		    NULL, sc->queues[i].evnamebuf, "Requeued in softint");
   2750 		evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
   2751 		    NULL, sc->queues[i].evnamebuf,
   2752 		    "Queue Packets Transmitted");
   2753 #ifndef IXGBE_LEGACY_TX
   2754 		evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC,
   2755 		    NULL, sc->queues[i].evnamebuf,
   2756 		    "Packets dropped in pcq");
   2757 #endif
   2758 		evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
   2759 		    NULL, sc->queues[i].evnamebuf,
   2760 		    "TX Queue No Descriptor Available");
   2761 		evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC,
   2762 		    NULL, sc->queues[i].evnamebuf, "TSO");
   2763 
   2764 		evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
   2765 		    NULL, sc->queues[i].evnamebuf,
   2766 		    "Queue Bytes Received");
   2767 		evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
   2768 		    NULL, sc->queues[i].evnamebuf,
   2769 		    "Queue Packets Received");
   2770 		evcnt_attach_dynamic(&rxr->no_mbuf, EVCNT_TYPE_MISC,
   2771 		    NULL, sc->queues[i].evnamebuf, "Rx no mbuf");
   2772 		evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
   2773 		    NULL, sc->queues[i].evnamebuf, "Rx discarded");
   2774 		evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC,
   2775 		    NULL, sc->queues[i].evnamebuf, "Copied RX Frames");
   2776 #ifdef LRO
   2777 		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
   2778 				CTLFLAG_RD, &lro->lro_queued, 0,
   2779 				"LRO Queued");
   2780 		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
   2781 				CTLFLAG_RD, &lro->lro_flushed, 0,
   2782 				"LRO Flushed");
   2783 #endif /* LRO */
   2784 	}
   2785 
   2786 	/* MAC stats get their own sub node */
   2787 
   2788 	snprintf(stats->namebuf,
   2789 	    sizeof(stats->namebuf), "%s MAC Statistics", xname);
   2790 
   2791 	evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
   2792 	    stats->namebuf, "rx csum offload - IP");
   2793 	evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
   2794 	    stats->namebuf, "rx csum offload - L4");
   2795 	evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
   2796 	    stats->namebuf, "rx csum offload - IP bad");
   2797 	evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
   2798 	    stats->namebuf, "rx csum offload - L4 bad");
   2799 
   2800 	/* Packet Reception Stats */
   2801 	evcnt_attach_dynamic(&stats->vfgprc, EVCNT_TYPE_MISC, NULL,
   2802 	    xname, "Good Packets Received");
   2803 	evcnt_attach_dynamic(&stats->vfgorc, EVCNT_TYPE_MISC, NULL,
   2804 	    xname, "Good Octets Received");
   2805 	evcnt_attach_dynamic(&stats->vfmprc, EVCNT_TYPE_MISC, NULL,
   2806 	    xname, "Multicast Packets Received");
   2807 	evcnt_attach_dynamic(&stats->vfgptc, EVCNT_TYPE_MISC, NULL,
   2808 	    xname, "Good Packets Transmitted");
   2809 	evcnt_attach_dynamic(&stats->vfgotc, EVCNT_TYPE_MISC, NULL,
   2810 	    xname, "Good Octets Transmitted");
   2811 
   2812 	/* Mailbox Stats */
   2813 	evcnt_attach_dynamic(&hw->mbx.stats.msgs_tx, EVCNT_TYPE_MISC, NULL,
   2814 	    xname, "message TXs");
   2815 	evcnt_attach_dynamic(&hw->mbx.stats.msgs_rx, EVCNT_TYPE_MISC, NULL,
   2816 	    xname, "message RXs");
   2817 	evcnt_attach_dynamic(&hw->mbx.stats.acks, EVCNT_TYPE_MISC, NULL,
   2818 	    xname, "ACKs");
   2819 	evcnt_attach_dynamic(&hw->mbx.stats.reqs, EVCNT_TYPE_MISC, NULL,
   2820 	    xname, "REQs");
   2821 	evcnt_attach_dynamic(&hw->mbx.stats.rsts, EVCNT_TYPE_MISC, NULL,
   2822 	    xname, "RSTs");
   2823 
   2824 } /* ixv_add_stats_sysctls */
   2825 
   2826 static void
   2827 ixv_clear_evcnt(struct ixgbe_softc *sc)
   2828 {
   2829 	struct tx_ring		*txr = sc->tx_rings;
   2830 	struct rx_ring		*rxr = sc->rx_rings;
   2831 	struct ixgbevf_hw_stats *stats = &sc->stats.vf;
   2832 	struct ixgbe_hw *hw = &sc->hw;
   2833 	int i;
   2834 
   2835 	/* Driver Statistics */
   2836 	IXGBE_EVC_STORE(&sc->efbig_tx_dma_setup, 0);
   2837 	IXGBE_EVC_STORE(&sc->mbuf_defrag_failed, 0);
   2838 	IXGBE_EVC_STORE(&sc->efbig2_tx_dma_setup, 0);
   2839 	IXGBE_EVC_STORE(&sc->einval_tx_dma_setup, 0);
   2840 	IXGBE_EVC_STORE(&sc->other_tx_dma_setup, 0);
   2841 	IXGBE_EVC_STORE(&sc->eagain_tx_dma_setup, 0);
   2842 	IXGBE_EVC_STORE(&sc->enomem_tx_dma_setup, 0);
   2843 	IXGBE_EVC_STORE(&sc->watchdog_events, 0);
   2844 	IXGBE_EVC_STORE(&sc->tso_err, 0);
   2845 	IXGBE_EVC_STORE(&sc->admin_irqev, 0);
   2846 	IXGBE_EVC_STORE(&sc->link_workev, 0);
   2847 
   2848 	for (i = 0; i < sc->num_queues; i++, rxr++, txr++) {
   2849 		IXGBE_EVC_STORE(&sc->queues[i].irqs, 0);
   2850 		IXGBE_EVC_STORE(&sc->queues[i].handleq, 0);
   2851 		IXGBE_EVC_STORE(&sc->queues[i].req, 0);
   2852 		IXGBE_EVC_STORE(&txr->total_packets, 0);
   2853 #ifndef IXGBE_LEGACY_TX
   2854 		IXGBE_EVC_STORE(&txr->pcq_drops, 0);
   2855 #endif
   2856 		IXGBE_EVC_STORE(&txr->no_desc_avail, 0);
   2857 		IXGBE_EVC_STORE(&txr->tso_tx, 0);
   2858 		txr->q_efbig_tx_dma_setup = 0;
   2859 		txr->q_mbuf_defrag_failed = 0;
   2860 		txr->q_efbig2_tx_dma_setup = 0;
   2861 		txr->q_einval_tx_dma_setup = 0;
   2862 		txr->q_other_tx_dma_setup = 0;
   2863 		txr->q_eagain_tx_dma_setup = 0;
   2864 		txr->q_enomem_tx_dma_setup = 0;
   2865 		txr->q_tso_err = 0;
   2866 
   2867 		IXGBE_EVC_STORE(&rxr->rx_packets, 0);
   2868 		IXGBE_EVC_STORE(&rxr->rx_bytes, 0);
   2869 		IXGBE_EVC_STORE(&rxr->rx_copies, 0);
   2870 		IXGBE_EVC_STORE(&rxr->no_mbuf, 0);
   2871 		IXGBE_EVC_STORE(&rxr->rx_discarded, 0);
   2872 	}
   2873 
   2874 	/* MAC stats get their own sub node */
   2875 
   2876 	IXGBE_EVC_STORE(&stats->ipcs, 0);
   2877 	IXGBE_EVC_STORE(&stats->l4cs, 0);
   2878 	IXGBE_EVC_STORE(&stats->ipcs_bad, 0);
   2879 	IXGBE_EVC_STORE(&stats->l4cs_bad, 0);
   2880 
   2881 	/*
   2882 	 * Packet Reception Stats.
   2883 	 * Call ixv_init_stats() to save last VF counters' values.
   2884 	 */
   2885 	ixv_init_stats(sc);
   2886 	IXGBE_EVC_STORE(&stats->vfgprc, 0);
   2887 	IXGBE_EVC_STORE(&stats->vfgorc, 0);
   2888 	IXGBE_EVC_STORE(&stats->vfmprc, 0);
   2889 	IXGBE_EVC_STORE(&stats->vfgptc, 0);
   2890 	IXGBE_EVC_STORE(&stats->vfgotc, 0);
   2891 
   2892 	/* Mailbox Stats */
   2893 	IXGBE_EVC_STORE(&hw->mbx.stats.msgs_tx, 0);
   2894 	IXGBE_EVC_STORE(&hw->mbx.stats.msgs_rx, 0);
   2895 	IXGBE_EVC_STORE(&hw->mbx.stats.acks, 0);
   2896 	IXGBE_EVC_STORE(&hw->mbx.stats.reqs, 0);
   2897 	IXGBE_EVC_STORE(&hw->mbx.stats.rsts, 0);
   2898 
   2899 } /* ixv_clear_evcnt */
   2900 
   2901 #define PRINTQS(sc, regname)						\
   2902 	do {								\
   2903 		struct ixgbe_hw	*_hw = &(sc)->hw;			\
   2904 		int _i;							\
   2905 									\
   2906 		printf("%s: %s", device_xname((sc)->dev), #regname);	\
   2907 		for (_i = 0; _i < (sc)->num_queues; _i++) {		\
   2908 			printf((_i == 0) ? "\t" : " ");			\
   2909 			printf("%08x", IXGBE_READ_REG(_hw,		\
   2910 				IXGBE_##regname(_i)));			\
   2911 		}							\
   2912 		printf("\n");						\
   2913 	} while (0)
   2914 
   2915 /************************************************************************
   2916  * ixv_print_debug_info
   2917  *
   2918  *   Provides a way to take a look at important statistics
   2919  *   maintained by the driver and hardware.
   2920  ************************************************************************/
   2921 static void
   2922 ixv_print_debug_info(struct ixgbe_softc *sc)
   2923 {
   2924 	device_t	dev = sc->dev;
   2925 	struct ixgbe_hw *hw = &sc->hw;
   2926 	int i;
   2927 
   2928 	device_printf(dev, "queue:");
   2929 	for (i = 0; i < sc->num_queues; i++) {
   2930 		printf((i == 0) ? "\t" : " ");
   2931 		printf("%8d", i);
   2932 	}
   2933 	printf("\n");
   2934 	PRINTQS(sc, VFRDBAL);
   2935 	PRINTQS(sc, VFRDBAH);
   2936 	PRINTQS(sc, VFRDLEN);
   2937 	PRINTQS(sc, VFSRRCTL);
   2938 	PRINTQS(sc, VFRDH);
   2939 	PRINTQS(sc, VFRDT);
   2940 	PRINTQS(sc, VFRXDCTL);
   2941 
   2942 	device_printf(dev, "EIMS:\t%08x\n", IXGBE_READ_REG(hw, IXGBE_VTEIMS));
   2943 	device_printf(dev, "EIAM:\t%08x\n", IXGBE_READ_REG(hw, IXGBE_VTEIAM));
   2944 	device_printf(dev, "EIAC:\t%08x\n", IXGBE_READ_REG(hw, IXGBE_VTEIAC));
   2945 } /* ixv_print_debug_info */
   2946 
   2947 /************************************************************************
   2948  * ixv_sysctl_debug
   2949  ************************************************************************/
   2950 static int
   2951 ixv_sysctl_debug(SYSCTLFN_ARGS)
   2952 {
   2953 	struct sysctlnode node = *rnode;
   2954 	struct ixgbe_softc *sc = (struct ixgbe_softc *)node.sysctl_data;
   2955 	int	       error, result = 0;
   2956 
   2957 	node.sysctl_data = &result;
   2958 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   2959 
   2960 	if (error || newp == NULL)
   2961 		return error;
   2962 
   2963 	if (result == 1)
   2964 		ixv_print_debug_info(sc);
   2965 
   2966 	return 0;
   2967 } /* ixv_sysctl_debug */
   2968 
   2969 /************************************************************************
   2970  * ixv_sysctl_rx_copy_len
   2971  ************************************************************************/
   2972 static int
   2973 ixv_sysctl_rx_copy_len(SYSCTLFN_ARGS)
   2974 {
   2975 	struct sysctlnode node = *rnode;
   2976 	struct ixgbe_softc *sc = (struct ixgbe_softc *)node.sysctl_data;
   2977 	int error;
   2978 	int result = sc->rx_copy_len;
   2979 
   2980 	node.sysctl_data = &result;
   2981 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   2982 
   2983 	if (error || newp == NULL)
   2984 		return error;
   2985 
   2986 	if ((result < 0) || (result > IXGBE_RX_COPY_LEN_MAX))
   2987 		return EINVAL;
   2988 
   2989 	sc->rx_copy_len = result;
   2990 
   2991 	return 0;
   2992 } /* ixv_sysctl_rx_copy_len */
   2993 
   2994 /************************************************************************
   2995  * ixv_sysctl_tx_process_limit
   2996  ************************************************************************/
   2997 static int
   2998 ixv_sysctl_tx_process_limit(SYSCTLFN_ARGS)
   2999 {
   3000 	struct sysctlnode node = *rnode;
   3001 	struct ixgbe_softc *sc = (struct ixgbe_softc *)node.sysctl_data;
   3002 	int error;
   3003 	int result = sc->tx_process_limit;
   3004 
   3005 	node.sysctl_data = &result;
   3006 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   3007 
   3008 	if (error || newp == NULL)
   3009 		return error;
   3010 
   3011 	if ((result <= 0) || (result > sc->num_tx_desc))
   3012 		return EINVAL;
   3013 
   3014 	sc->tx_process_limit = result;
   3015 
   3016 	return 0;
   3017 } /* ixv_sysctl_tx_process_limit */
   3018 
   3019 /************************************************************************
   3020  * ixv_sysctl_rx_process_limit
   3021  ************************************************************************/
   3022 static int
   3023 ixv_sysctl_rx_process_limit(SYSCTLFN_ARGS)
   3024 {
   3025 	struct sysctlnode node = *rnode;
   3026 	struct ixgbe_softc *sc = (struct ixgbe_softc *)node.sysctl_data;
   3027 	int error;
   3028 	int result = sc->rx_process_limit;
   3029 
   3030 	node.sysctl_data = &result;
   3031 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   3032 
   3033 	if (error || newp == NULL)
   3034 		return error;
   3035 
   3036 	if ((result <= 0) || (result > sc->num_rx_desc))
   3037 		return EINVAL;
   3038 
   3039 	sc->rx_process_limit = result;
   3040 
   3041 	return 0;
   3042 } /* ixv_sysctl_rx_process_limit */
   3043 
   3044 /************************************************************************
   3045  * ixv_init_device_features
   3046  ************************************************************************/
   3047 static void
   3048 ixv_init_device_features(struct ixgbe_softc *sc)
   3049 {
   3050 	sc->feat_cap = IXGBE_FEATURE_NETMAP
   3051 			  | IXGBE_FEATURE_VF
   3052 			  | IXGBE_FEATURE_RSS
   3053 			  | IXGBE_FEATURE_LEGACY_TX;
   3054 
   3055 	/* A tad short on feature flags for VFs, atm. */
   3056 	switch (sc->hw.mac.type) {
   3057 	case ixgbe_mac_82599_vf:
   3058 		break;
   3059 	case ixgbe_mac_X540_vf:
   3060 		break;
   3061 	case ixgbe_mac_X550_vf:
   3062 	case ixgbe_mac_X550EM_x_vf:
   3063 	case ixgbe_mac_X550EM_a_vf:
   3064 		sc->feat_cap |= IXGBE_FEATURE_NEEDS_CTXD;
   3065 		break;
   3066 	default:
   3067 		break;
   3068 	}
   3069 
   3070 	/* Enabled by default... */
   3071 	/* Is a virtual function (VF) */
   3072 	if (sc->feat_cap & IXGBE_FEATURE_VF)
   3073 		sc->feat_en |= IXGBE_FEATURE_VF;
   3074 	/* Netmap */
   3075 	if (sc->feat_cap & IXGBE_FEATURE_NETMAP)
   3076 		sc->feat_en |= IXGBE_FEATURE_NETMAP;
   3077 	/* Receive-Side Scaling (RSS) */
   3078 	if (sc->feat_cap & IXGBE_FEATURE_RSS)
   3079 		sc->feat_en |= IXGBE_FEATURE_RSS;
   3080 	/* Needs advanced context descriptor regardless of offloads req'd */
   3081 	if (sc->feat_cap & IXGBE_FEATURE_NEEDS_CTXD)
   3082 		sc->feat_en |= IXGBE_FEATURE_NEEDS_CTXD;
   3083 
   3084 	/* Enabled via sysctl... */
   3085 	/* Legacy (single queue) transmit */
   3086 	if ((sc->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
   3087 	    ixv_enable_legacy_tx)
   3088 		sc->feat_en |= IXGBE_FEATURE_LEGACY_TX;
   3089 } /* ixv_init_device_features */
   3090 
   3091 /************************************************************************
   3092  * ixv_shutdown - Shutdown entry point
   3093  ************************************************************************/
   3094 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
   3095 static int
   3096 ixv_shutdown(device_t dev)
   3097 {
   3098 	struct ixgbe_softc *sc = device_private(dev);
   3099 	IXGBE_CORE_LOCK(sc);
   3100 	ixv_stop_locked(sc);
   3101 	IXGBE_CORE_UNLOCK(sc);
   3102 
   3103 	return (0);
   3104 } /* ixv_shutdown */
   3105 #endif
   3106 
   3107 static int
   3108 ixv_ifflags_cb(struct ethercom *ec)
   3109 {
   3110 	struct ifnet *ifp = &ec->ec_if;
   3111 	struct ixgbe_softc *sc = ifp->if_softc;
   3112 	u_short saved_flags;
   3113 	u_short change;
   3114 	int rv = 0;
   3115 
   3116 	IXGBE_CORE_LOCK(sc);
   3117 
   3118 	saved_flags = sc->if_flags;
   3119 	change = ifp->if_flags ^ sc->if_flags;
   3120 	if (change != 0)
   3121 		sc->if_flags = ifp->if_flags;
   3122 
   3123 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   3124 		rv = ENETRESET;
   3125 		goto out;
   3126 	} else if ((change & IFF_PROMISC) != 0) {
   3127 		rv = ixv_set_rxfilter(sc);
   3128 		if (rv != 0) {
   3129 			/* Restore previous */
   3130 			sc->if_flags = saved_flags;
   3131 			goto out;
   3132 		}
   3133 	}
   3134 
   3135 	/* Check for ec_capenable. */
   3136 	change = ec->ec_capenable ^ sc->ec_capenable;
   3137 	sc->ec_capenable = ec->ec_capenable;
   3138 	if ((change & ~(ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING
   3139 	    | ETHERCAP_VLAN_HWFILTER)) != 0) {
   3140 		rv = ENETRESET;
   3141 		goto out;
   3142 	}
   3143 
   3144 	/*
   3145 	 * Special handling is not required for ETHERCAP_VLAN_MTU.
   3146 	 * PF's MAXFRS(MHADD) does not include the 4bytes of the VLAN header.
   3147 	 */
   3148 
   3149 	/* Set up VLAN support and filter */
   3150 	if ((change & (ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_HWFILTER)) != 0)
   3151 		rv = ixv_setup_vlan_support(sc);
   3152 
   3153 out:
   3154 	IXGBE_CORE_UNLOCK(sc);
   3155 
   3156 	return rv;
   3157 }
   3158 
   3159 
   3160 /************************************************************************
   3161  * ixv_ioctl - Ioctl entry point
   3162  *
   3163  *   Called when the user wants to configure the interface.
   3164  *
   3165  *   return 0 on success, positive on failure
   3166  ************************************************************************/
   3167 static int
   3168 ixv_ioctl(struct ifnet *ifp, u_long command, void *data)
   3169 {
   3170 	struct ixgbe_softc *sc = ifp->if_softc;
   3171 	struct ixgbe_hw *hw = &sc->hw;
   3172 	struct ifcapreq *ifcr = data;
   3173 	int		error;
   3174 	int l4csum_en;
   3175 	const int l4csum = IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx |
   3176 	     IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   3177 
   3178 	switch (command) {
   3179 	case SIOCSIFFLAGS:
   3180 		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
   3181 		break;
   3182 	case SIOCADDMULTI: {
   3183 		struct ether_multi *enm;
   3184 		struct ether_multistep step;
   3185 		struct ethercom *ec = &sc->osdep.ec;
   3186 		bool overflow = false;
   3187 		int mcnt = 0;
   3188 
   3189 		/*
   3190 		 * Check the number of multicast address. If it exceeds,
   3191 		 * return ENOSPC.
   3192 		 * Update this code when we support API 1.3.
   3193 		 */
   3194 		ETHER_LOCK(ec);
   3195 		ETHER_FIRST_MULTI(step, ec, enm);
   3196 		while (enm != NULL) {
   3197 			mcnt++;
   3198 
   3199 			/*
   3200 			 * This code is before adding, so one room is required
   3201 			 * at least.
   3202 			 */
   3203 			if (mcnt > (IXGBE_MAX_VF_MC - 1)) {
   3204 				overflow = true;
   3205 				break;
   3206 			}
   3207 			ETHER_NEXT_MULTI(step, enm);
   3208 		}
   3209 		ETHER_UNLOCK(ec);
   3210 		error = 0;
   3211 		if (overflow && ((ec->ec_flags & ETHER_F_ALLMULTI) == 0)) {
   3212 			error = hw->mac.ops.update_xcast_mode(hw,
   3213 			    IXGBEVF_XCAST_MODE_ALLMULTI);
   3214 			if (error == IXGBE_ERR_NOT_TRUSTED) {
   3215 				device_printf(sc->dev,
   3216 				    "this interface is not trusted\n");
   3217 				error = EPERM;
   3218 			} else if (error == IXGBE_ERR_FEATURE_NOT_SUPPORTED) {
   3219 				device_printf(sc->dev,
   3220 				    "the PF doesn't support allmulti mode\n");
   3221 				error = EOPNOTSUPP;
   3222 			} else if (error) {
   3223 				device_printf(sc->dev,
   3224 				    "number of Ethernet multicast addresses "
   3225 				    "exceeds the limit (%d). error = %d\n",
   3226 				    IXGBE_MAX_VF_MC, error);
   3227 				error = ENOSPC;
   3228 			} else
   3229 				ec->ec_flags |= ETHER_F_ALLMULTI;
   3230 		}
   3231 		if (error)
   3232 			return error;
   3233 	}
   3234 		/*FALLTHROUGH*/
   3235 	case SIOCDELMULTI:
   3236 		IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
   3237 		break;
   3238 	case SIOCSIFMEDIA:
   3239 	case SIOCGIFMEDIA:
   3240 		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
   3241 		break;
   3242 	case SIOCSIFCAP:
   3243 		IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
   3244 		break;
   3245 	case SIOCSIFMTU:
   3246 		IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
   3247 		break;
   3248 	case SIOCZIFDATA:
   3249 		IOCTL_DEBUGOUT("ioctl: SIOCZIFDATA (Zero counter)");
   3250 		ixv_update_stats(sc);
   3251 		ixv_clear_evcnt(sc);
   3252 		break;
   3253 	default:
   3254 		IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
   3255 		break;
   3256 	}
   3257 
   3258 	switch (command) {
   3259 	case SIOCSIFCAP:
   3260 		/* Layer-4 Rx checksum offload has to be turned on and
   3261 		 * off as a unit.
   3262 		 */
   3263 		l4csum_en = ifcr->ifcr_capenable & l4csum;
   3264 		if (l4csum_en != l4csum && l4csum_en != 0)
   3265 			return EINVAL;
   3266 		/*FALLTHROUGH*/
   3267 	case SIOCADDMULTI:
   3268 	case SIOCDELMULTI:
   3269 	case SIOCSIFFLAGS:
   3270 	case SIOCSIFMTU:
   3271 	default:
   3272 		if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
   3273 			return error;
   3274 		if ((ifp->if_flags & IFF_RUNNING) == 0)
   3275 			;
   3276 		else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
   3277 			IXGBE_CORE_LOCK(sc);
   3278 			ixv_init_locked(sc);
   3279 			IXGBE_CORE_UNLOCK(sc);
   3280 		} else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
   3281 			/*
   3282 			 * Multicast list has changed; set the hardware filter
   3283 			 * accordingly.
   3284 			 */
   3285 			IXGBE_CORE_LOCK(sc);
   3286 			ixv_disable_intr(sc);
   3287 			ixv_set_rxfilter(sc);
   3288 			ixv_enable_intr(sc);
   3289 			IXGBE_CORE_UNLOCK(sc);
   3290 		}
   3291 		return 0;
   3292 	}
   3293 } /* ixv_ioctl */
   3294 
   3295 /************************************************************************
   3296  * ixv_init
   3297  ************************************************************************/
   3298 static int
   3299 ixv_init(struct ifnet *ifp)
   3300 {
   3301 	struct ixgbe_softc *sc = ifp->if_softc;
   3302 
   3303 	IXGBE_CORE_LOCK(sc);
   3304 	ixv_init_locked(sc);
   3305 	IXGBE_CORE_UNLOCK(sc);
   3306 
   3307 	return 0;
   3308 } /* ixv_init */
   3309 
   3310 /************************************************************************
   3311  * ixv_handle_que
   3312  ************************************************************************/
   3313 static void
   3314 ixv_handle_que(void *context)
   3315 {
   3316 	struct ix_queue *que = context;
   3317 	struct ixgbe_softc *sc = que->sc;
   3318 	struct tx_ring	*txr = que->txr;
   3319 	struct ifnet	*ifp = sc->ifp;
   3320 	bool		more;
   3321 
   3322 	IXGBE_EVC_ADD(&que->handleq, 1);
   3323 
   3324 	if (ifp->if_flags & IFF_RUNNING) {
   3325 		IXGBE_TX_LOCK(txr);
   3326 		more = ixgbe_txeof(txr);
   3327 		if (!(sc->feat_en & IXGBE_FEATURE_LEGACY_TX))
   3328 			if (!ixgbe_mq_ring_empty(ifp, txr->txr_interq))
   3329 				ixgbe_mq_start_locked(ifp, txr);
   3330 		/* Only for queue 0 */
   3331 		/* NetBSD still needs this for CBQ */
   3332 		if ((&sc->queues[0] == que)
   3333 		    && (!ixgbe_legacy_ring_empty(ifp, NULL)))
   3334 			ixgbe_legacy_start_locked(ifp, txr);
   3335 		IXGBE_TX_UNLOCK(txr);
   3336 		more |= ixgbe_rxeof(que);
   3337 		if (more) {
   3338 			IXGBE_EVC_ADD(&que->req, 1);
   3339 			if (sc->txrx_use_workqueue) {
   3340 				/*
   3341 				 * "enqueued flag" is not required here
   3342 				 * the same as ixg(4). See ixgbe_msix_que().
   3343 				 */
   3344 				workqueue_enqueue(sc->que_wq,
   3345 				    &que->wq_cookie, curcpu());
   3346 			} else
   3347 				  softint_schedule(que->que_si);
   3348 			return;
   3349 		}
   3350 	}
   3351 
   3352 	/* Re-enable this interrupt */
   3353 	ixv_enable_queue(sc, que->msix);
   3354 
   3355 	return;
   3356 } /* ixv_handle_que */
   3357 
   3358 /************************************************************************
   3359  * ixv_handle_que_work
   3360  ************************************************************************/
   3361 static void
   3362 ixv_handle_que_work(struct work *wk, void *context)
   3363 {
   3364 	struct ix_queue *que = container_of(wk, struct ix_queue, wq_cookie);
   3365 
   3366 	/*
   3367 	 * "enqueued flag" is not required here the same as ixg(4).
   3368 	 * See ixgbe_msix_que().
   3369 	 */
   3370 	ixv_handle_que(que);
   3371 }
   3372 
   3373 /************************************************************************
   3374  * ixv_allocate_msix - Setup MSI-X Interrupt resources and handlers
   3375  ************************************************************************/
   3376 static int
   3377 ixv_allocate_msix(struct ixgbe_softc *sc, const struct pci_attach_args *pa)
   3378 {
   3379 	device_t	dev = sc->dev;
   3380 	struct ix_queue *que = sc->queues;
   3381 	struct tx_ring	*txr = sc->tx_rings;
   3382 	int		error, msix_ctrl, rid, vector = 0;
   3383 	pci_chipset_tag_t pc;
   3384 	pcitag_t	tag;
   3385 	char		intrbuf[PCI_INTRSTR_LEN];
   3386 	char		wqname[MAXCOMLEN];
   3387 	char		intr_xname[32];
   3388 	const char	*intrstr = NULL;
   3389 	kcpuset_t	*affinity;
   3390 	int		cpu_id = 0;
   3391 
   3392 	pc = sc->osdep.pc;
   3393 	tag = sc->osdep.tag;
   3394 
   3395 	sc->osdep.nintrs = sc->num_queues + 1;
   3396 	if (pci_msix_alloc_exact(pa, &sc->osdep.intrs,
   3397 	    sc->osdep.nintrs) != 0) {
   3398 		aprint_error_dev(dev,
   3399 		    "failed to allocate MSI-X interrupt\n");
   3400 		return (ENXIO);
   3401 	}
   3402 
   3403 	kcpuset_create(&affinity, false);
   3404 	for (int i = 0; i < sc->num_queues; i++, vector++, que++, txr++) {
   3405 		snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d",
   3406 		    device_xname(dev), i);
   3407 		intrstr = pci_intr_string(pc, sc->osdep.intrs[i], intrbuf,
   3408 		    sizeof(intrbuf));
   3409 #ifdef IXGBE_MPSAFE
   3410 		pci_intr_setattr(pc, &sc->osdep.intrs[i], PCI_INTR_MPSAFE,
   3411 		    true);
   3412 #endif
   3413 		/* Set the handler function */
   3414 		que->res = sc->osdep.ihs[i] = pci_intr_establish_xname(pc,
   3415 		    sc->osdep.intrs[i], IPL_NET, ixv_msix_que, que,
   3416 		    intr_xname);
   3417 		if (que->res == NULL) {
   3418 			pci_intr_release(pc, sc->osdep.intrs,
   3419 			    sc->osdep.nintrs);
   3420 			aprint_error_dev(dev,
   3421 			    "Failed to register QUE handler\n");
   3422 			kcpuset_destroy(affinity);
   3423 			return (ENXIO);
   3424 		}
   3425 		que->msix = vector;
   3426 		sc->active_queues |= (u64)(1 << que->msix);
   3427 
   3428 		cpu_id = i;
   3429 		/* Round-robin affinity */
   3430 		kcpuset_zero(affinity);
   3431 		kcpuset_set(affinity, cpu_id % ncpu);
   3432 		error = interrupt_distribute(sc->osdep.ihs[i], affinity, NULL);
   3433 		aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
   3434 		    intrstr);
   3435 		if (error == 0)
   3436 			aprint_normal(", bound queue %d to cpu %d\n",
   3437 			    i, cpu_id % ncpu);
   3438 		else
   3439 			aprint_normal("\n");
   3440 
   3441 #ifndef IXGBE_LEGACY_TX
   3442 		txr->txr_si
   3443 		    = softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
   3444 			ixgbe_deferred_mq_start, txr);
   3445 #endif
   3446 		que->que_si
   3447 		    = softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
   3448 			ixv_handle_que, que);
   3449 		if (que->que_si == NULL) {
   3450 			aprint_error_dev(dev,
   3451 			    "could not establish software interrupt\n");
   3452 		}
   3453 	}
   3454 	snprintf(wqname, sizeof(wqname), "%sdeferTx", device_xname(dev));
   3455 	error = workqueue_create(&sc->txr_wq, wqname,
   3456 	    ixgbe_deferred_mq_start_work, sc, IXGBE_WORKQUEUE_PRI, IPL_NET,
   3457 	    IXGBE_WORKQUEUE_FLAGS);
   3458 	if (error) {
   3459 		aprint_error_dev(dev,
   3460 		    "couldn't create workqueue for deferred Tx\n");
   3461 	}
   3462 	sc->txr_wq_enqueued = percpu_alloc(sizeof(u_int));
   3463 
   3464 	snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(dev));
   3465 	error = workqueue_create(&sc->que_wq, wqname,
   3466 	    ixv_handle_que_work, sc, IXGBE_WORKQUEUE_PRI, IPL_NET,
   3467 	    IXGBE_WORKQUEUE_FLAGS);
   3468 	if (error) {
   3469 		aprint_error_dev(dev, "couldn't create workqueue for Tx/Rx\n");
   3470 	}
   3471 
   3472 	/* and Mailbox */
   3473 	cpu_id++;
   3474 	snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
   3475 	sc->vector = vector;
   3476 	intrstr = pci_intr_string(pc, sc->osdep.intrs[vector], intrbuf,
   3477 	    sizeof(intrbuf));
   3478 #ifdef IXGBE_MPSAFE
   3479 	pci_intr_setattr(pc, &sc->osdep.intrs[vector], PCI_INTR_MPSAFE,
   3480 	    true);
   3481 #endif
   3482 	/* Set the mbx handler function */
   3483 	sc->osdep.ihs[vector] = pci_intr_establish_xname(pc,
   3484 	    sc->osdep.intrs[vector], IPL_NET, ixv_msix_mbx, sc, intr_xname);
   3485 	if (sc->osdep.ihs[vector] == NULL) {
   3486 		aprint_error_dev(dev, "Failed to register LINK handler\n");
   3487 		kcpuset_destroy(affinity);
   3488 		return (ENXIO);
   3489 	}
   3490 	/* Round-robin affinity */
   3491 	kcpuset_zero(affinity);
   3492 	kcpuset_set(affinity, cpu_id % ncpu);
   3493 	error = interrupt_distribute(sc->osdep.ihs[vector], affinity,
   3494 	    NULL);
   3495 
   3496 	aprint_normal_dev(dev,
   3497 	    "for link, interrupting at %s", intrstr);
   3498 	if (error == 0)
   3499 		aprint_normal(", affinity to cpu %d\n", cpu_id % ncpu);
   3500 	else
   3501 		aprint_normal("\n");
   3502 
   3503 	/* Tasklets for Mailbox */
   3504 	snprintf(wqname, sizeof(wqname), "%s-admin", device_xname(dev));
   3505 	error = workqueue_create(&sc->admin_wq, wqname,
   3506 	    ixv_handle_admin, sc, IXGBE_WORKQUEUE_PRI, IPL_NET,
   3507 	    IXGBE_TASKLET_WQ_FLAGS);
   3508 	if (error) {
   3509 		aprint_error_dev(dev,
   3510 		    "could not create admin workqueue (%d)\n", error);
   3511 		goto err_out;
   3512 	}
   3513 
   3514 	/*
   3515 	 * Due to a broken design QEMU will fail to properly
   3516 	 * enable the guest for MSI-X unless the vectors in
   3517 	 * the table are all set up, so we must rewrite the
   3518 	 * ENABLE in the MSI-X control register again at this
   3519 	 * point to cause it to successfully initialize us.
   3520 	 */
   3521 	if (sc->hw.mac.type == ixgbe_mac_82599_vf) {
   3522 		pci_get_capability(pc, tag, PCI_CAP_MSIX, &rid, NULL);
   3523 		rid += PCI_MSIX_CTL;
   3524 		msix_ctrl = pci_conf_read(pc, tag, rid);
   3525 		msix_ctrl |= PCI_MSIX_CTL_ENABLE;
   3526 		pci_conf_write(pc, tag, rid, msix_ctrl);
   3527 	}
   3528 
   3529 	kcpuset_destroy(affinity);
   3530 	return (0);
   3531 err_out:
   3532 	kcpuset_destroy(affinity);
   3533 	ixv_free_deferred_handlers(sc);
   3534 	ixv_free_pci_resources(sc);
   3535 	return (error);
   3536 } /* ixv_allocate_msix */
   3537 
   3538 /************************************************************************
   3539  * ixv_configure_interrupts - Setup MSI-X resources
   3540  *
   3541  *   Note: The VF device MUST use MSI-X, there is no fallback.
   3542  ************************************************************************/
   3543 static int
   3544 ixv_configure_interrupts(struct ixgbe_softc *sc)
   3545 {
   3546 	device_t dev = sc->dev;
   3547 	int want, queues, msgs;
   3548 
   3549 	/* Must have at least 2 MSI-X vectors */
   3550 	msgs = pci_msix_count(sc->osdep.pc, sc->osdep.tag);
   3551 	if (msgs < 2) {
   3552 		aprint_error_dev(dev, "MSIX config error\n");
   3553 		return (ENXIO);
   3554 	}
   3555 	msgs = MIN(msgs, IXG_MAX_NINTR);
   3556 
   3557 	/* Figure out a reasonable auto config value */
   3558 	queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu;
   3559 
   3560 	if (ixv_num_queues != 0)
   3561 		queues = ixv_num_queues;
   3562 	else if ((ixv_num_queues == 0) && (queues > IXGBE_VF_MAX_TX_QUEUES))
   3563 		queues = IXGBE_VF_MAX_TX_QUEUES;
   3564 
   3565 	/*
   3566 	 * Want vectors for the queues,
   3567 	 * plus an additional for mailbox.
   3568 	 */
   3569 	want = queues + 1;
   3570 	if (msgs >= want)
   3571 		msgs = want;
   3572 	else {
   3573 		aprint_error_dev(dev,
   3574 		    "MSI-X Configuration Problem, "
   3575 		    "%d vectors but %d queues wanted!\n", msgs, want);
   3576 		return -1;
   3577 	}
   3578 
   3579 	aprint_normal_dev(dev,
   3580 	    "Using MSI-X interrupts with %d vectors\n", msgs);
   3581 	sc->num_queues = queues;
   3582 
   3583 	return (0);
   3584 } /* ixv_configure_interrupts */
   3585 
   3586 
   3587 /************************************************************************
   3588  * ixv_handle_admin - Tasklet handler for MSI-X MBX interrupts
   3589  *
   3590  *   Done outside of interrupt context since the driver might sleep
   3591  ************************************************************************/
   3592 static void
   3593 ixv_handle_admin(struct work *wk, void *context)
   3594 {
   3595 	struct ixgbe_softc *sc = context;
   3596 	struct ixgbe_hw	*hw = &sc->hw;
   3597 
   3598 	IXGBE_CORE_LOCK(sc);
   3599 
   3600 	IXGBE_EVC_ADD(&sc->link_workev, 1);
   3601 	sc->hw.mac.ops.check_link(&sc->hw, &sc->link_speed,
   3602 	    &sc->link_up, FALSE);
   3603 	ixv_update_link_status(sc);
   3604 
   3605 	sc->task_requests = 0;
   3606 	atomic_store_relaxed(&sc->admin_pending, 0);
   3607 
   3608 	/* Re-enable interrupts */
   3609 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, (1 << sc->vector));
   3610 
   3611 	IXGBE_CORE_UNLOCK(sc);
   3612 } /* ixv_handle_admin */
   3613 
   3614 /************************************************************************
   3615  * ixv_check_link - Used in the local timer to poll for link changes
   3616  ************************************************************************/
   3617 static s32
   3618 ixv_check_link(struct ixgbe_softc *sc)
   3619 {
   3620 	s32 error;
   3621 
   3622 	KASSERT(mutex_owned(&sc->core_mtx));
   3623 
   3624 	sc->hw.mac.get_link_status = TRUE;
   3625 
   3626 	error = sc->hw.mac.ops.check_link(&sc->hw,
   3627 	    &sc->link_speed, &sc->link_up, FALSE);
   3628 	ixv_update_link_status(sc);
   3629 
   3630 	return error;
   3631 } /* ixv_check_link */
   3632