Home | History | Annotate | Line # | Download | only in ixgbe
ixv.c revision 1.197
      1 /* $NetBSD: ixv.c,v 1.197 2024/07/10 03:23:02 msaitoh Exp $ */
      2 
      3 /******************************************************************************
      4 
      5   Copyright (c) 2001-2017, Intel Corporation
      6   All rights reserved.
      7 
      8   Redistribution and use in source and binary forms, with or without
      9   modification, are permitted provided that the following conditions are met:
     10 
     11    1. Redistributions of source code must retain the above copyright notice,
     12       this list of conditions and the following disclaimer.
     13 
     14    2. Redistributions in binary form must reproduce the above copyright
     15       notice, this list of conditions and the following disclaimer in the
     16       documentation and/or other materials provided with the distribution.
     17 
     18    3. Neither the name of the Intel Corporation nor the names of its
     19       contributors may be used to endorse or promote products derived from
     20       this software without specific prior written permission.
     21 
     22   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     23   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     24   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     25   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     26   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     27   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     28   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     29   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     30   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     31   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     32   POSSIBILITY OF SUCH DAMAGE.
     33 
     34 ******************************************************************************/
     35 /*$FreeBSD: head/sys/dev/ixgbe/if_ixv.c 331224 2018-03-19 20:55:05Z erj $*/
     36 
     37 #include <sys/cdefs.h>
     38 __KERNEL_RCSID(0, "$NetBSD: ixv.c,v 1.197 2024/07/10 03:23:02 msaitoh Exp $");
     39 
     40 #ifdef _KERNEL_OPT
     41 #include "opt_inet.h"
     42 #include "opt_inet6.h"
     43 #include "opt_net_mpsafe.h"
     44 #endif
     45 
     46 #include "ixgbe.h"
     47 
     48 /************************************************************************
     49  * Driver version
     50  ************************************************************************/
     51 static const char ixv_driver_version[] = "2.0.1-k";
     52 /* XXX NetBSD: + 1.5.17 */
     53 
     54 /************************************************************************
     55  * PCI Device ID Table
     56  *
     57  *   Used by probe to select devices to load on
     58  *   Last field stores an index into ixv_strings
     59  *   Last entry must be all 0s
     60  *
     61  *   { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
     62  ************************************************************************/
     63 static const ixgbe_vendor_info_t ixv_vendor_info_array[] =
     64 {
     65 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
     66 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0},
     67 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, 0, 0, 0},
     68 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, 0, 0, 0},
     69 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF, 0, 0, 0},
     70 	/* required last entry */
     71 	{0, 0, 0, 0, 0}
     72 };
     73 
     74 /************************************************************************
     75  * Table of branding strings
     76  ************************************************************************/
     77 static const char *ixv_strings[] = {
     78 	"Intel(R) PRO/10GbE Virtual Function Network Driver"
     79 };
     80 
     81 /*********************************************************************
     82  *  Function prototypes
     83  *********************************************************************/
     84 static int	ixv_probe(device_t, cfdata_t, void *);
     85 static void	ixv_attach(device_t, device_t, void *);
     86 static int	ixv_detach(device_t, int);
     87 #if 0
     88 static int	ixv_shutdown(device_t);
     89 #endif
     90 static int	ixv_ifflags_cb(struct ethercom *);
     91 static int	ixv_ioctl(struct ifnet *, u_long, void *);
     92 static int	ixv_init(struct ifnet *);
     93 static void	ixv_init_locked(struct ixgbe_softc *);
     94 static void	ixv_ifstop(struct ifnet *, int);
     95 static void	ixv_stop_locked(void *);
     96 static void	ixv_init_device_features(struct ixgbe_softc *);
     97 static void	ixv_media_status(struct ifnet *, struct ifmediareq *);
     98 static int	ixv_media_change(struct ifnet *);
     99 static int	ixv_allocate_pci_resources(struct ixgbe_softc *,
    100 		    const struct pci_attach_args *);
    101 static void	ixv_free_deferred_handlers(struct ixgbe_softc *);
    102 static int	ixv_allocate_msix(struct ixgbe_softc *,
    103 		    const struct pci_attach_args *);
    104 static int	ixv_configure_interrupts(struct ixgbe_softc *);
    105 static void	ixv_free_pci_resources(struct ixgbe_softc *);
    106 static void	ixv_local_timer(void *);
    107 static void	ixv_handle_timer(struct work *, void *);
    108 static int	ixv_setup_interface(device_t, struct ixgbe_softc *);
    109 static void	ixv_schedule_admin_tasklet(struct ixgbe_softc *);
    110 static int	ixv_negotiate_api(struct ixgbe_softc *);
    111 
    112 static void	ixv_initialize_transmit_units(struct ixgbe_softc *);
    113 static void	ixv_initialize_receive_units(struct ixgbe_softc *);
    114 static void	ixv_initialize_rss_mapping(struct ixgbe_softc *);
    115 static s32	ixv_check_link(struct ixgbe_softc *);
    116 
    117 static void	ixv_enable_intr(struct ixgbe_softc *);
    118 static void	ixv_disable_intr(struct ixgbe_softc *);
    119 static int	ixv_set_rxfilter(struct ixgbe_softc *);
    120 static void	ixv_update_link_status(struct ixgbe_softc *);
    121 static int	ixv_sysctl_debug(SYSCTLFN_PROTO);
    122 static void	ixv_set_ivar(struct ixgbe_softc *, u8, u8, s8);
    123 static void	ixv_configure_ivars(struct ixgbe_softc *);
    124 static u8 *	ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
    125 static void	ixv_eitr_write(struct ixgbe_softc *, uint32_t, uint32_t);
    126 
    127 static void	ixv_setup_vlan_tagging(struct ixgbe_softc *);
    128 static int	ixv_setup_vlan_support(struct ixgbe_softc *);
    129 static int	ixv_vlan_cb(struct ethercom *, uint16_t, bool);
    130 static int	ixv_register_vlan(struct ixgbe_softc *, u16);
    131 static int	ixv_unregister_vlan(struct ixgbe_softc *, u16);
    132 
    133 static void	ixv_add_device_sysctls(struct ixgbe_softc *);
    134 static void	ixv_init_stats(struct ixgbe_softc *);
    135 static void	ixv_update_stats(struct ixgbe_softc *);
    136 static void	ixv_add_stats_sysctls(struct ixgbe_softc *);
    137 static void	ixv_clear_evcnt(struct ixgbe_softc *);
    138 
    139 /* Sysctl handlers */
    140 static int	ixv_sysctl_interrupt_rate_handler(SYSCTLFN_PROTO);
    141 static int	ixv_sysctl_next_to_check_handler(SYSCTLFN_PROTO);
    142 static int	ixv_sysctl_next_to_refresh_handler(SYSCTLFN_PROTO);
    143 static int	ixv_sysctl_rdh_handler(SYSCTLFN_PROTO);
    144 static int	ixv_sysctl_rdt_handler(SYSCTLFN_PROTO);
    145 static int	ixv_sysctl_tdt_handler(SYSCTLFN_PROTO);
    146 static int	ixv_sysctl_tdh_handler(SYSCTLFN_PROTO);
    147 static int	ixv_sysctl_tx_process_limit(SYSCTLFN_PROTO);
    148 static int	ixv_sysctl_rx_process_limit(SYSCTLFN_PROTO);
    149 static int	ixv_sysctl_rx_copy_len(SYSCTLFN_PROTO);
    150 
    151 /* The MSI-X Interrupt handlers */
    152 static int	ixv_msix_que(void *);
    153 static int	ixv_msix_mbx(void *);
    154 
    155 /* Event handlers running on workqueue */
    156 static void	ixv_handle_que(void *);
    157 
    158 /* Deferred workqueue handlers */
    159 static void	ixv_handle_admin(struct work *, void *);
    160 static void	ixv_handle_que_work(struct work *, void *);
    161 
    162 const struct sysctlnode *ixv_sysctl_instance(struct ixgbe_softc *);
    163 static const ixgbe_vendor_info_t *ixv_lookup(const struct pci_attach_args *);
    164 
    165 /************************************************************************
    166  * NetBSD Device Interface Entry Points
    167  ************************************************************************/
    168 CFATTACH_DECL3_NEW(ixv, sizeof(struct ixgbe_softc),
    169     ixv_probe, ixv_attach, ixv_detach, NULL, NULL, NULL,
    170     DVF_DETACH_SHUTDOWN);
    171 
    172 #if 0
    173 static driver_t ixv_driver = {
    174 	"ixv", ixv_methods, sizeof(struct ixgbe_softc),
    175 };
    176 
    177 devclass_t ixv_devclass;
    178 DRIVER_MODULE(ixv, pci, ixv_driver, ixv_devclass, 0, 0);
    179 MODULE_DEPEND(ixv, pci, 1, 1, 1);
    180 MODULE_DEPEND(ixv, ether, 1, 1, 1);
    181 #endif
    182 
    183 /*
    184  * TUNEABLE PARAMETERS:
    185  */
    186 
    187 /* Number of Queues - do not exceed MSI-X vectors - 1 */
    188 static int ixv_num_queues = 0;
    189 #define	TUNABLE_INT(__x, __y)
    190 TUNABLE_INT("hw.ixv.num_queues", &ixv_num_queues);
    191 
    192 /*
    193  * AIM: Adaptive Interrupt Moderation
    194  * which means that the interrupt rate
    195  * is varied over time based on the
    196  * traffic for that interrupt vector
    197  */
    198 static bool ixv_enable_aim = false;
    199 TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
    200 
    201 static int ixv_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
    202 TUNABLE_INT("hw.ixv.max_interrupt_rate", &ixv_max_interrupt_rate);
    203 
    204 /* How many packets rxeof tries to clean at a time */
    205 static int ixv_rx_process_limit = 256;
    206 TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit);
    207 
    208 /* How many packets txeof tries to clean at a time */
    209 static int ixv_tx_process_limit = 256;
    210 TUNABLE_INT("hw.ixv.tx_process_limit", &ixv_tx_process_limit);
    211 
    212 /* Which packet processing uses workqueue or softint */
    213 static bool ixv_txrx_workqueue = false;
    214 
    215 /*
    216  * Number of TX descriptors per ring,
    217  * setting higher than RX as this seems
    218  * the better performing choice.
    219  */
    220 static int ixv_txd = DEFAULT_TXD;
    221 TUNABLE_INT("hw.ixv.txd", &ixv_txd);
    222 
    223 /* Number of RX descriptors per ring */
    224 static int ixv_rxd = DEFAULT_RXD;
    225 TUNABLE_INT("hw.ixv.rxd", &ixv_rxd);
    226 
    227 /* Legacy Transmit (single queue) */
    228 static int ixv_enable_legacy_tx = 0;
    229 TUNABLE_INT("hw.ixv.enable_legacy_tx", &ixv_enable_legacy_tx);
    230 
    231 #ifdef NET_MPSAFE
    232 #define IXGBE_CALLOUT_FLAGS	CALLOUT_MPSAFE
    233 #define IXGBE_SOFTINT_FLAGS	SOFTINT_MPSAFE
    234 #define IXGBE_WORKQUEUE_FLAGS	WQ_PERCPU | WQ_MPSAFE
    235 #define IXGBE_TASKLET_WQ_FLAGS	WQ_MPSAFE
    236 #else
    237 #define IXGBE_CALLOUT_FLAGS	0
    238 #define IXGBE_SOFTINT_FLAGS	0
    239 #define IXGBE_WORKQUEUE_FLAGS	WQ_PERCPU
    240 #define IXGBE_TASKLET_WQ_FLAGS	0
    241 #endif
    242 #define IXGBE_WORKQUEUE_PRI PRI_SOFTNET
    243 
    244 #if 0
    245 static int (*ixv_start_locked)(struct ifnet *, struct tx_ring *);
    246 static int (*ixv_ring_empty)(struct ifnet *, struct buf_ring *);
    247 #endif
    248 
    249 /************************************************************************
    250  * ixv_probe - Device identification routine
    251  *
    252  *   Determines if the driver should be loaded on
    253  *   adapter based on its PCI vendor/device ID.
    254  *
    255  *   return BUS_PROBE_DEFAULT on success, positive on failure
    256  ************************************************************************/
    257 static int
    258 ixv_probe(device_t dev, cfdata_t cf, void *aux)
    259 {
    260 #ifdef __HAVE_PCI_MSI_MSIX
    261 	const struct pci_attach_args *pa = aux;
    262 
    263 	return (ixv_lookup(pa) != NULL) ? 1 : 0;
    264 #else
    265 	return 0;
    266 #endif
    267 } /* ixv_probe */
    268 
    269 static const ixgbe_vendor_info_t *
    270 ixv_lookup(const struct pci_attach_args *pa)
    271 {
    272 	const ixgbe_vendor_info_t *ent;
    273 	pcireg_t subid;
    274 
    275 	INIT_DEBUGOUT("ixv_lookup: begin");
    276 
    277 	if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
    278 		return NULL;
    279 
    280 	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
    281 
    282 	for (ent = ixv_vendor_info_array; ent->vendor_id != 0; ent++) {
    283 		if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
    284 		    (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
    285 		    ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
    286 		     (ent->subvendor_id == 0)) &&
    287 		    ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
    288 		     (ent->subdevice_id == 0))) {
    289 			return ent;
    290 		}
    291 	}
    292 
    293 	return NULL;
    294 }
    295 
    296 /************************************************************************
    297  * ixv_attach - Device initialization routine
    298  *
    299  *   Called when the driver is being loaded.
    300  *   Identifies the type of hardware, allocates all resources
    301  *   and initializes the hardware.
    302  *
    303  *   return 0 on success, positive on failure
    304  ************************************************************************/
    305 static void
    306 ixv_attach(device_t parent, device_t dev, void *aux)
    307 {
    308 	struct ixgbe_softc *sc;
    309 	struct ixgbe_hw *hw;
    310 	int		error = 0;
    311 	pcireg_t	id, subid;
    312 	const ixgbe_vendor_info_t *ent;
    313 	const struct pci_attach_args *pa = aux;
    314 	const char *apivstr;
    315 	const char *str;
    316 	char wqname[MAXCOMLEN];
    317 	char buf[256];
    318 
    319 	INIT_DEBUGOUT("ixv_attach: begin");
    320 
    321 	/*
    322 	 * Make sure BUSMASTER is set, on a VM under
    323 	 * KVM it may not be and will break things.
    324 	 */
    325 	ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag);
    326 
    327 	/* Allocate, clear, and link in our adapter structure */
    328 	sc = device_private(dev);
    329 	sc->hw.back = sc;
    330 	sc->dev = dev;
    331 	hw = &sc->hw;
    332 
    333 	sc->init_locked = ixv_init_locked;
    334 	sc->stop_locked = ixv_stop_locked;
    335 
    336 	sc->osdep.pc = pa->pa_pc;
    337 	sc->osdep.tag = pa->pa_tag;
    338 	if (pci_dma64_available(pa))
    339 		sc->osdep.dmat = pa->pa_dmat64;
    340 	else
    341 		sc->osdep.dmat = pa->pa_dmat;
    342 	sc->osdep.attached = false;
    343 
    344 	ent = ixv_lookup(pa);
    345 
    346 	KASSERT(ent != NULL);
    347 
    348 	aprint_normal(": %s, Version - %s\n",
    349 	    ixv_strings[ent->index], ixv_driver_version);
    350 
    351 	/* Core Lock Init */
    352 	IXGBE_CORE_LOCK_INIT(sc, device_xname(dev));
    353 
    354 	/* Do base PCI setup - map BAR0 */
    355 	if (ixv_allocate_pci_resources(sc, pa)) {
    356 		aprint_error_dev(dev, "ixv_allocate_pci_resources() failed!\n");
    357 		error = ENXIO;
    358 		goto err_out;
    359 	}
    360 
    361 	/* SYSCTL APIs */
    362 	ixv_add_device_sysctls(sc);
    363 
    364 	/* Set up the timer callout and workqueue */
    365 	callout_init(&sc->timer, IXGBE_CALLOUT_FLAGS);
    366 	snprintf(wqname, sizeof(wqname), "%s-timer", device_xname(dev));
    367 	error = workqueue_create(&sc->timer_wq, wqname,
    368 	    ixv_handle_timer, sc, IXGBE_WORKQUEUE_PRI, IPL_NET,
    369 	    IXGBE_TASKLET_WQ_FLAGS);
    370 	if (error) {
    371 		aprint_error_dev(dev,
    372 		    "could not create timer workqueue (%d)\n", error);
    373 		goto err_out;
    374 	}
    375 
    376 	/* Save off the information about this board */
    377 	id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG);
    378 	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
    379 	hw->vendor_id = PCI_VENDOR(id);
    380 	hw->device_id = PCI_PRODUCT(id);
    381 	hw->revision_id =
    382 	    PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG));
    383 	hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
    384 	hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
    385 
    386 	/* A subset of set_mac_type */
    387 	switch (hw->device_id) {
    388 	case IXGBE_DEV_ID_82599_VF:
    389 		hw->mac.type = ixgbe_mac_82599_vf;
    390 		str = "82599 VF";
    391 		break;
    392 	case IXGBE_DEV_ID_X540_VF:
    393 		hw->mac.type = ixgbe_mac_X540_vf;
    394 		str = "X540 VF";
    395 		break;
    396 	case IXGBE_DEV_ID_X550_VF:
    397 		hw->mac.type = ixgbe_mac_X550_vf;
    398 		str = "X550 VF";
    399 		break;
    400 	case IXGBE_DEV_ID_X550EM_X_VF:
    401 		hw->mac.type = ixgbe_mac_X550EM_x_vf;
    402 		str = "X550EM X VF";
    403 		break;
    404 	case IXGBE_DEV_ID_X550EM_A_VF:
    405 		hw->mac.type = ixgbe_mac_X550EM_a_vf;
    406 		str = "X550EM A VF";
    407 		break;
    408 	default:
    409 		/* Shouldn't get here since probe succeeded */
    410 		aprint_error_dev(dev, "Unknown device ID!\n");
    411 		error = ENXIO;
    412 		goto err_out;
    413 		break;
    414 	}
    415 	aprint_normal_dev(dev, "device %s\n", str);
    416 
    417 	ixv_init_device_features(sc);
    418 
    419 	/* Initialize the shared code */
    420 	error = ixgbe_init_ops_vf(hw);
    421 	if (error) {
    422 		aprint_error_dev(dev, "ixgbe_init_ops_vf() failed!\n");
    423 		error = EIO;
    424 		goto err_out;
    425 	}
    426 
    427 	/* Setup the mailbox */
    428 	ixgbe_init_mbx_params_vf(hw);
    429 
    430 	/* Set the right number of segments */
    431 	KASSERT(IXGBE_82599_SCATTER_MAX >= IXGBE_SCATTER_DEFAULT);
    432 	sc->num_segs = IXGBE_SCATTER_DEFAULT;
    433 
    434 	/* Reset mbox api to 1.0 */
    435 	error = hw->mac.ops.reset_hw(hw);
    436 	if (error == IXGBE_ERR_RESET_FAILED)
    437 		aprint_error_dev(dev, "...reset_hw() failure: Reset Failed!\n");
    438 	else if (error)
    439 		aprint_error_dev(dev, "...reset_hw() failed with error %d\n",
    440 		    error);
    441 	if (error) {
    442 		error = EIO;
    443 		goto err_out;
    444 	}
    445 
    446 	error = hw->mac.ops.init_hw(hw);
    447 	if (error) {
    448 		aprint_error_dev(dev, "...init_hw() failed!\n");
    449 		error = EIO;
    450 		goto err_out;
    451 	}
    452 
    453 	/* Negotiate mailbox API version */
    454 	error = ixv_negotiate_api(sc);
    455 	if (error)
    456 		aprint_normal_dev(dev,
    457 		    "MBX API negotiation failed during attach!\n");
    458 	switch (hw->api_version) {
    459 	case ixgbe_mbox_api_10:
    460 		apivstr = "1.0";
    461 		break;
    462 	case ixgbe_mbox_api_20:
    463 		apivstr = "2.0";
    464 		break;
    465 	case ixgbe_mbox_api_11:
    466 		apivstr = "1.1";
    467 		break;
    468 	case ixgbe_mbox_api_12:
    469 		apivstr = "1.2";
    470 		break;
    471 	case ixgbe_mbox_api_13:
    472 		apivstr = "1.3";
    473 		break;
    474 	case ixgbe_mbox_api_14:
    475 		apivstr = "1.4";
    476 		break;
    477 	case ixgbe_mbox_api_15:
    478 		apivstr = "1.5";
    479 		break;
    480 	default:
    481 		apivstr = "unknown";
    482 		break;
    483 	}
    484 	aprint_normal_dev(dev, "Mailbox API %s\n", apivstr);
    485 
    486 	/* If no mac address was assigned, make a random one */
    487 	if (!ixv_check_ether_addr(hw->mac.addr)) {
    488 		u8 addr[ETHER_ADDR_LEN];
    489 		uint64_t rndval = cprng_strong64();
    490 
    491 		memcpy(addr, &rndval, sizeof(addr));
    492 		addr[0] &= 0xFE;
    493 		addr[0] |= 0x02;
    494 		bcopy(addr, hw->mac.addr, sizeof(addr));
    495 	}
    496 
    497 	/* Register for VLAN events */
    498 	ether_set_vlan_cb(&sc->osdep.ec, ixv_vlan_cb);
    499 
    500 	/* Do descriptor calc and sanity checks */
    501 	if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
    502 	    ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) {
    503 		aprint_error_dev(dev, "Invalid TX ring size (%d). "
    504 		    "It must be between %d and %d, "
    505 		    "inclusive, and must be a multiple of %zu. "
    506 		    "Using default value of %d instead.\n",
    507 		    ixv_txd, MIN_TXD, MAX_TXD,
    508 		    DBA_ALIGN / sizeof(union ixgbe_adv_tx_desc),
    509 		    DEFAULT_TXD);
    510 		sc->num_tx_desc = DEFAULT_TXD;
    511 	} else
    512 		sc->num_tx_desc = ixv_txd;
    513 
    514 	if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
    515 	    ixv_rxd < MIN_RXD || ixv_rxd > MAX_RXD) {
    516 		aprint_error_dev(dev, "Invalid RX ring size (%d). "
    517 		    "It must be between %d and %d, "
    518 		    "inclusive, and must be a multiple of %zu. "
    519 		    "Using default value of %d instead.\n",
    520 		    ixv_rxd, MIN_RXD, MAX_RXD,
    521 		    DBA_ALIGN / sizeof(union ixgbe_adv_rx_desc),
    522 		    DEFAULT_RXD);
    523 		sc->num_rx_desc = DEFAULT_RXD;
    524 	} else
    525 		sc->num_rx_desc = ixv_rxd;
    526 
    527 	/* Sysctls for limiting the amount of work done in the taskqueues */
    528 	sc->rx_process_limit
    529 	    = (ixv_rx_process_limit <= sc->num_rx_desc)
    530 	    ? ixv_rx_process_limit : sc->num_rx_desc;
    531 	sc->tx_process_limit
    532 	    = (ixv_tx_process_limit <= sc->num_tx_desc)
    533 	    ? ixv_tx_process_limit : sc->num_tx_desc;
    534 
    535 	/* Set default high limit of copying mbuf in rxeof */
    536 	sc->rx_copy_len = IXGBE_RX_COPY_LEN_MAX;
    537 
    538 	/* Setup MSI-X */
    539 	error = ixv_configure_interrupts(sc);
    540 	if (error)
    541 		goto err_out;
    542 
    543 	/* Allocate our TX/RX Queues */
    544 	if (ixgbe_allocate_queues(sc)) {
    545 		aprint_error_dev(dev, "ixgbe_allocate_queues() failed!\n");
    546 		error = ENOMEM;
    547 		goto err_out;
    548 	}
    549 
    550 	/* hw.ix defaults init */
    551 	sc->enable_aim = ixv_enable_aim;
    552 	sc->max_interrupt_rate = ixv_max_interrupt_rate;
    553 
    554 	sc->txrx_use_workqueue = ixv_txrx_workqueue;
    555 
    556 	error = ixv_allocate_msix(sc, pa);
    557 	if (error) {
    558 		aprint_error_dev(dev, "ixv_allocate_msix() failed!\n");
    559 		goto err_late;
    560 	}
    561 
    562 	/* Setup OS specific network interface */
    563 	error = ixv_setup_interface(dev, sc);
    564 	if (error != 0) {
    565 		aprint_error_dev(dev, "ixv_setup_interface() failed!\n");
    566 		goto err_late;
    567 	}
    568 
    569 	/* Allocate multicast array memory */
    570 	sc->mta = malloc(sizeof(*sc->mta) *
    571 	    IXGBE_MAX_VF_MC, M_DEVBUF, M_WAITOK);
    572 
    573 	/* Check if VF was disabled by PF */
    574 	error = hw->mac.ops.get_link_state(hw, &sc->link_enabled);
    575 	if (error) {
    576 		/* PF is not capable of controlling VF state. Enable the link. */
    577 		sc->link_enabled = TRUE;
    578 	}
    579 
    580 	/* Do the stats setup */
    581 	ixv_init_stats(sc);
    582 	ixv_add_stats_sysctls(sc);
    583 
    584 	if (sc->feat_en & IXGBE_FEATURE_NETMAP)
    585 		ixgbe_netmap_attach(sc);
    586 
    587 	snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, sc->feat_cap);
    588 	aprint_verbose_dev(dev, "feature cap %s\n", buf);
    589 	snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, sc->feat_en);
    590 	aprint_verbose_dev(dev, "feature ena %s\n", buf);
    591 
    592 	INIT_DEBUGOUT("ixv_attach: end");
    593 	sc->osdep.attached = true;
    594 
    595 	return;
    596 
    597 err_late:
    598 	ixgbe_free_queues(sc);
    599 err_out:
    600 	ixv_free_pci_resources(sc);
    601 	IXGBE_CORE_LOCK_DESTROY(sc);
    602 
    603 	return;
    604 } /* ixv_attach */
    605 
    606 /************************************************************************
    607  * ixv_detach - Device removal routine
    608  *
    609  *   Called when the driver is being removed.
    610  *   Stops the adapter and deallocates all the resources
    611  *   that were allocated for driver operation.
    612  *
    613  *   return 0 on success, positive on failure
    614  ************************************************************************/
    615 static int
    616 ixv_detach(device_t dev, int flags)
    617 {
    618 	struct ixgbe_softc *sc = device_private(dev);
    619 	struct ixgbe_hw *hw = &sc->hw;
    620 	struct tx_ring *txr = sc->tx_rings;
    621 	struct rx_ring *rxr = sc->rx_rings;
    622 	struct ixgbevf_hw_stats *stats = &sc->stats.vf;
    623 
    624 	INIT_DEBUGOUT("ixv_detach: begin");
    625 	if (sc->osdep.attached == false)
    626 		return 0;
    627 
    628 	/* Stop the interface. Callouts are stopped in it. */
    629 	ixv_ifstop(sc->ifp, 1);
    630 
    631 	if (VLAN_ATTACHED(&sc->osdep.ec) &&
    632 	    (flags & (DETACH_SHUTDOWN | DETACH_FORCE)) == 0) {
    633 		aprint_error_dev(dev, "VLANs in use, detach first\n");
    634 		return EBUSY;
    635 	}
    636 
    637 	ether_ifdetach(sc->ifp);
    638 	callout_halt(&sc->timer, NULL);
    639 	ixv_free_deferred_handlers(sc);
    640 
    641 	if (sc->feat_en & IXGBE_FEATURE_NETMAP)
    642 		netmap_detach(sc->ifp);
    643 
    644 	ixv_free_pci_resources(sc);
    645 #if 0 /* XXX the NetBSD port is probably missing something here */
    646 	bus_generic_detach(dev);
    647 #endif
    648 	if_detach(sc->ifp);
    649 	ifmedia_fini(&sc->media);
    650 	if_percpuq_destroy(sc->ipq);
    651 
    652 	sysctl_teardown(&sc->sysctllog);
    653 	evcnt_detach(&sc->efbig_tx_dma_setup);
    654 	evcnt_detach(&sc->mbuf_defrag_failed);
    655 	evcnt_detach(&sc->efbig2_tx_dma_setup);
    656 	evcnt_detach(&sc->einval_tx_dma_setup);
    657 	evcnt_detach(&sc->other_tx_dma_setup);
    658 	evcnt_detach(&sc->eagain_tx_dma_setup);
    659 	evcnt_detach(&sc->enomem_tx_dma_setup);
    660 	evcnt_detach(&sc->watchdog_events);
    661 	evcnt_detach(&sc->tso_err);
    662 	evcnt_detach(&sc->admin_irqev);
    663 	evcnt_detach(&sc->link_workev);
    664 
    665 	txr = sc->tx_rings;
    666 	for (int i = 0; i < sc->num_queues; i++, rxr++, txr++) {
    667 		evcnt_detach(&sc->queues[i].irqs);
    668 		evcnt_detach(&sc->queues[i].handleq);
    669 		evcnt_detach(&sc->queues[i].req);
    670 		evcnt_detach(&txr->total_packets);
    671 #ifndef IXGBE_LEGACY_TX
    672 		evcnt_detach(&txr->pcq_drops);
    673 #endif
    674 		evcnt_detach(&txr->no_desc_avail);
    675 		evcnt_detach(&txr->tso_tx);
    676 
    677 		evcnt_detach(&rxr->rx_packets);
    678 		evcnt_detach(&rxr->rx_bytes);
    679 		evcnt_detach(&rxr->rx_copies);
    680 		evcnt_detach(&rxr->no_mbuf);
    681 		evcnt_detach(&rxr->rx_discarded);
    682 	}
    683 	evcnt_detach(&stats->ipcs);
    684 	evcnt_detach(&stats->l4cs);
    685 	evcnt_detach(&stats->ipcs_bad);
    686 	evcnt_detach(&stats->l4cs_bad);
    687 
    688 	/* Packet Reception Stats */
    689 	evcnt_detach(&stats->vfgorc);
    690 	evcnt_detach(&stats->vfgprc);
    691 	evcnt_detach(&stats->vfmprc);
    692 
    693 	/* Packet Transmission Stats */
    694 	evcnt_detach(&stats->vfgotc);
    695 	evcnt_detach(&stats->vfgptc);
    696 
    697 	/* Mailbox Stats */
    698 	evcnt_detach(&hw->mbx.stats.msgs_tx);
    699 	evcnt_detach(&hw->mbx.stats.msgs_rx);
    700 	evcnt_detach(&hw->mbx.stats.acks);
    701 	evcnt_detach(&hw->mbx.stats.reqs);
    702 	evcnt_detach(&hw->mbx.stats.rsts);
    703 
    704 	ixgbe_free_queues(sc);
    705 
    706 	IXGBE_CORE_LOCK_DESTROY(sc);
    707 
    708 	return (0);
    709 } /* ixv_detach */
    710 
    711 /************************************************************************
    712  * ixv_init_locked - Init entry point
    713  *
    714  *   Used in two ways: It is used by the stack as an init entry
    715  *   point in network interface structure. It is also used
    716  *   by the driver as a hw/sw initialization routine to get
    717  *   to a consistent state.
    718  *
    719  *   return 0 on success, positive on failure
    720  ************************************************************************/
    721 static void
    722 ixv_init_locked(struct ixgbe_softc *sc)
    723 {
    724 	struct ifnet	*ifp = sc->ifp;
    725 	device_t	dev = sc->dev;
    726 	struct ixgbe_hw *hw = &sc->hw;
    727 	struct ix_queue	*que;
    728 	int		error = 0;
    729 	uint32_t mask;
    730 	int i;
    731 
    732 	INIT_DEBUGOUT("ixv_init_locked: begin");
    733 	KASSERT(mutex_owned(&sc->core_mtx));
    734 	hw->adapter_stopped = FALSE;
    735 	hw->mac.ops.stop_adapter(hw);
    736 	callout_stop(&sc->timer);
    737 	for (i = 0, que = sc->queues; i < sc->num_queues; i++, que++)
    738 		que->disabled_count = 0;
    739 
    740 	sc->max_frame_size =
    741 	    ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
    742 
    743 	/* reprogram the RAR[0] in case user changed it. */
    744 	hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
    745 
    746 	/* Get the latest mac address, User can use a LAA */
    747 	memcpy(hw->mac.addr, CLLADDR(ifp->if_sadl),
    748 	     IXGBE_ETH_LENGTH_OF_ADDRESS);
    749 	hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, 1);
    750 
    751 	/* Prepare transmit descriptors and buffers */
    752 	if (ixgbe_setup_transmit_structures(sc)) {
    753 		aprint_error_dev(dev, "Could not setup transmit structures\n");
    754 		ixv_stop_locked(sc);
    755 		return;
    756 	}
    757 
    758 	/* Reset VF and renegotiate mailbox API version */
    759 	hw->mac.ops.reset_hw(hw);
    760 	hw->mac.ops.start_hw(hw);
    761 	error = ixv_negotiate_api(sc);
    762 	if (error)
    763 		device_printf(dev,
    764 		    "Mailbox API negotiation failed in init_locked!\n");
    765 
    766 	ixv_initialize_transmit_units(sc);
    767 
    768 	/* Setup Multicast table */
    769 	ixv_set_rxfilter(sc);
    770 
    771 	/* Use fixed buffer size, even for jumbo frames */
    772 	sc->rx_mbuf_sz = MCLBYTES;
    773 
    774 	/* Prepare receive descriptors and buffers */
    775 	error = ixgbe_setup_receive_structures(sc);
    776 	if (error) {
    777 		device_printf(dev,
    778 		    "Could not setup receive structures (err = %d)\n", error);
    779 		ixv_stop_locked(sc);
    780 		return;
    781 	}
    782 
    783 	/* Configure RX settings */
    784 	ixv_initialize_receive_units(sc);
    785 
    786 	/* Initialize variable holding task enqueue requests interrupts */
    787 	sc->task_requests = 0;
    788 
    789 	/* Set up VLAN offload and filter */
    790 	ixv_setup_vlan_support(sc);
    791 
    792 	/* Set up MSI-X routing */
    793 	ixv_configure_ivars(sc);
    794 
    795 	/* Set up auto-mask */
    796 	mask = (1 << sc->vector);
    797 	for (i = 0, que = sc->queues; i < sc->num_queues; i++, que++)
    798 		mask |= (1 << que->msix);
    799 	IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, mask);
    800 
    801 	/* Set moderation on the Link interrupt */
    802 	ixv_eitr_write(sc, sc->vector, IXGBE_LINK_ITR);
    803 
    804 	/* Stats init */
    805 	ixv_init_stats(sc);
    806 
    807 	/* Config/Enable Link */
    808 	error = hw->mac.ops.get_link_state(hw, &sc->link_enabled);
    809 	if (error) {
    810 		/* PF is not capable of controlling VF state. Enable the link. */
    811 		sc->link_enabled = TRUE;
    812 	} else if (sc->link_enabled == FALSE)
    813 		device_printf(dev, "VF is disabled by PF\n");
    814 
    815 	hw->mac.get_link_status = TRUE;
    816 	hw->mac.ops.check_link(hw, &sc->link_speed, &sc->link_up,
    817 	    FALSE);
    818 
    819 	/* Start watchdog */
    820 	callout_reset(&sc->timer, hz, ixv_local_timer, sc);
    821 	atomic_store_relaxed(&sc->timer_pending, 0);
    822 
    823 	/* OK to schedule workqueues. */
    824 	sc->schedule_wqs_ok = true;
    825 
    826 	/* Update saved flags. See ixgbe_ifflags_cb() */
    827 	sc->if_flags = ifp->if_flags;
    828 	sc->ec_capenable = sc->osdep.ec.ec_capenable;
    829 
    830 	/* Inform the stack we're ready */
    831 	ifp->if_flags |= IFF_RUNNING;
    832 
    833 	/* And now turn on interrupts */
    834 	ixv_enable_intr(sc);
    835 
    836 	return;
    837 } /* ixv_init_locked */
    838 
    839 /************************************************************************
    840  * ixv_enable_queue
    841  ************************************************************************/
    842 static inline void
    843 ixv_enable_queue(struct ixgbe_softc *sc, u32 vector)
    844 {
    845 	struct ixgbe_hw *hw = &sc->hw;
    846 	struct ix_queue *que = &sc->queues[vector];
    847 	u32		queue = 1UL << vector;
    848 	u32		mask;
    849 
    850 	mutex_enter(&que->dc_mtx);
    851 	if (que->disabled_count > 0 && --que->disabled_count > 0)
    852 		goto out;
    853 
    854 	mask = (IXGBE_EIMS_RTX_QUEUE & queue);
    855 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
    856 out:
    857 	mutex_exit(&que->dc_mtx);
    858 } /* ixv_enable_queue */
    859 
    860 /************************************************************************
    861  * ixv_disable_queue
    862  ************************************************************************/
    863 static inline void
    864 ixv_disable_queue(struct ixgbe_softc *sc, u32 vector)
    865 {
    866 	struct ixgbe_hw *hw = &sc->hw;
    867 	struct ix_queue *que = &sc->queues[vector];
    868 	u32		queue = 1UL << vector;
    869 	u32		mask;
    870 
    871 	mutex_enter(&que->dc_mtx);
    872 	if (que->disabled_count++ > 0)
    873 		goto  out;
    874 
    875 	mask = (IXGBE_EIMS_RTX_QUEUE & queue);
    876 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
    877 out:
    878 	mutex_exit(&que->dc_mtx);
    879 } /* ixv_disable_queue */
    880 
    881 #if 0
    882 static inline void
    883 ixv_rearm_queues(struct ixgbe_softc *sc, u64 queues)
    884 {
    885 	u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
    886 	IXGBE_WRITE_REG(&sc->hw, IXGBE_VTEICS, mask);
    887 } /* ixv_rearm_queues */
    888 #endif
    889 
    890 
    891 /************************************************************************
    892  * ixv_msix_que - MSI-X Queue Interrupt Service routine
    893  ************************************************************************/
    894 static int
    895 ixv_msix_que(void *arg)
    896 {
    897 	struct ix_queue	*que = arg;
    898 	struct ixgbe_softc *sc = que->sc;
    899 	struct tx_ring	*txr = que->txr;
    900 	struct rx_ring	*rxr = que->rxr;
    901 	bool		more;
    902 	u32		newitr = 0;
    903 
    904 	ixv_disable_queue(sc, que->msix);
    905 	IXGBE_EVC_ADD(&que->irqs, 1);
    906 
    907 #ifdef __NetBSD__
    908 	/* Don't run ixgbe_rxeof in interrupt context */
    909 	more = true;
    910 #else
    911 	more = ixgbe_rxeof(que);
    912 #endif
    913 
    914 	IXGBE_TX_LOCK(txr);
    915 	ixgbe_txeof(txr);
    916 	IXGBE_TX_UNLOCK(txr);
    917 
    918 	/* Do AIM now? */
    919 
    920 	if (sc->enable_aim == false)
    921 		goto no_calc;
    922 	/*
    923 	 * Do Adaptive Interrupt Moderation:
    924 	 *  - Write out last calculated setting
    925 	 *  - Calculate based on average size over
    926 	 *    the last interval.
    927 	 */
    928 	if (que->eitr_setting)
    929 		ixv_eitr_write(sc, que->msix, que->eitr_setting);
    930 
    931 	que->eitr_setting = 0;
    932 
    933 	/* Idle, do nothing */
    934 	if ((txr->bytes == 0) && (rxr->bytes == 0))
    935 		goto no_calc;
    936 
    937 	if ((txr->bytes) && (txr->packets))
    938 		newitr = txr->bytes/txr->packets;
    939 	if ((rxr->bytes) && (rxr->packets))
    940 		newitr = uimax(newitr, (rxr->bytes / rxr->packets));
    941 	newitr += 24; /* account for hardware frame, crc */
    942 
    943 	/* set an upper boundary */
    944 	newitr = uimin(newitr, 3000);
    945 
    946 	/* Be nice to the mid range */
    947 	if ((newitr > 300) && (newitr < 1200))
    948 		newitr = (newitr / 3);
    949 	else
    950 		newitr = (newitr / 2);
    951 
    952 	/*
    953 	 * When RSC is used, ITR interval must be larger than RSC_DELAY.
    954 	 * Currently, we use 2us for RSC_DELAY. The minimum value is always
    955 	 * greater than 2us on 100M (and 10M?(not documented)), but it's not
    956 	 * on 1G and higher.
    957 	 */
    958 	if ((sc->link_speed != IXGBE_LINK_SPEED_100_FULL)
    959 	    && (sc->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
    960 		if (newitr < IXGBE_MIN_RSC_EITR_10G1G)
    961 			newitr = IXGBE_MIN_RSC_EITR_10G1G;
    962 	}
    963 
    964 	/* save for next interrupt */
    965 	que->eitr_setting = newitr;
    966 
    967 	/* Reset state */
    968 	txr->bytes = 0;
    969 	txr->packets = 0;
    970 	rxr->bytes = 0;
    971 	rxr->packets = 0;
    972 
    973 no_calc:
    974 	if (more)
    975 		softint_schedule(que->que_si);
    976 	else /* Re-enable this interrupt */
    977 		ixv_enable_queue(sc, que->msix);
    978 
    979 	return 1;
    980 } /* ixv_msix_que */
    981 
    982 /************************************************************************
    983  * ixv_msix_mbx
    984  ************************************************************************/
    985 static int
    986 ixv_msix_mbx(void *arg)
    987 {
    988 	struct ixgbe_softc *sc = arg;
    989 	struct ixgbe_hw *hw = &sc->hw;
    990 
    991 	IXGBE_EVC_ADD(&sc->admin_irqev, 1);
    992 	/* NetBSD: We use auto-clear, so it's not required to write VTEICR */
    993 
    994 	/* Link status change */
    995 	hw->mac.get_link_status = TRUE;
    996 	atomic_or_32(&sc->task_requests, IXGBE_REQUEST_TASK_MBX);
    997 	ixv_schedule_admin_tasklet(sc);
    998 
    999 	return 1;
   1000 } /* ixv_msix_mbx */
   1001 
   1002 static void
   1003 ixv_eitr_write(struct ixgbe_softc *sc, uint32_t index, uint32_t itr)
   1004 {
   1005 
   1006 	/*
   1007 	 * Newer devices than 82598 have VF function, so this function is
   1008 	 * simple.
   1009 	 */
   1010 	itr |= IXGBE_EITR_CNT_WDIS;
   1011 
   1012 	IXGBE_WRITE_REG(&sc->hw, IXGBE_VTEITR(index), itr);
   1013 }
   1014 
   1015 
   1016 /************************************************************************
   1017  * ixv_media_status - Media Ioctl callback
   1018  *
   1019  *   Called whenever the user queries the status of
   1020  *   the interface using ifconfig.
   1021  ************************************************************************/
   1022 static void
   1023 ixv_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
   1024 {
   1025 	struct ixgbe_softc *sc = ifp->if_softc;
   1026 
   1027 	INIT_DEBUGOUT("ixv_media_status: begin");
   1028 	ixv_update_link_status(sc);
   1029 
   1030 	ifmr->ifm_status = IFM_AVALID;
   1031 	ifmr->ifm_active = IFM_ETHER;
   1032 
   1033 	if (sc->link_active != LINK_STATE_UP) {
   1034 		ifmr->ifm_active |= IFM_NONE;
   1035 		return;
   1036 	}
   1037 
   1038 	ifmr->ifm_status |= IFM_ACTIVE;
   1039 
   1040 	switch (sc->link_speed) {
   1041 		case IXGBE_LINK_SPEED_10GB_FULL:
   1042 			ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
   1043 			break;
   1044 		case IXGBE_LINK_SPEED_5GB_FULL:
   1045 			ifmr->ifm_active |= IFM_5000_T | IFM_FDX;
   1046 			break;
   1047 		case IXGBE_LINK_SPEED_2_5GB_FULL:
   1048 			ifmr->ifm_active |= IFM_2500_T | IFM_FDX;
   1049 			break;
   1050 		case IXGBE_LINK_SPEED_1GB_FULL:
   1051 			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
   1052 			break;
   1053 		case IXGBE_LINK_SPEED_100_FULL:
   1054 			ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
   1055 			break;
   1056 		case IXGBE_LINK_SPEED_10_FULL:
   1057 			ifmr->ifm_active |= IFM_10_T | IFM_FDX;
   1058 			break;
   1059 	}
   1060 
   1061 	ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
   1062 } /* ixv_media_status */
   1063 
   1064 /************************************************************************
   1065  * ixv_media_change - Media Ioctl callback
   1066  *
   1067  *   Called when the user changes speed/duplex using
   1068  *   media/mediopt option with ifconfig.
   1069  ************************************************************************/
   1070 static int
   1071 ixv_media_change(struct ifnet *ifp)
   1072 {
   1073 	struct ixgbe_softc *sc = ifp->if_softc;
   1074 	struct ifmedia *ifm = &sc->media;
   1075 
   1076 	INIT_DEBUGOUT("ixv_media_change: begin");
   1077 
   1078 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
   1079 		return (EINVAL);
   1080 
   1081 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
   1082 	case IFM_AUTO:
   1083 		break;
   1084 	default:
   1085 		device_printf(sc->dev, "Only auto media type\n");
   1086 		return (EINVAL);
   1087 	}
   1088 
   1089 	return (0);
   1090 } /* ixv_media_change */
   1091 
   1092 static void
   1093 ixv_schedule_admin_tasklet(struct ixgbe_softc *sc)
   1094 {
   1095 	if (sc->schedule_wqs_ok) {
   1096 		if (atomic_cas_uint(&sc->admin_pending, 0, 1) == 0)
   1097 			workqueue_enqueue(sc->admin_wq,
   1098 			    &sc->admin_wc, NULL);
   1099 	}
   1100 }
   1101 
   1102 /************************************************************************
   1103  * ixv_negotiate_api
   1104  *
   1105  *   Negotiate the Mailbox API with the PF;
   1106  *   start with the most featured API first.
   1107  ************************************************************************/
   1108 static int
   1109 ixv_negotiate_api(struct ixgbe_softc *sc)
   1110 {
   1111 	struct ixgbe_hw *hw = &sc->hw;
   1112 	int		mbx_api[] = { ixgbe_mbox_api_15,
   1113 				      ixgbe_mbox_api_13,
   1114 				      ixgbe_mbox_api_12,
   1115 				      ixgbe_mbox_api_11,
   1116 				      ixgbe_mbox_api_10,
   1117 				      ixgbe_mbox_api_unknown };
   1118 	int		i = 0;
   1119 
   1120 	while (mbx_api[i] != ixgbe_mbox_api_unknown) {
   1121 		if (ixgbevf_negotiate_api_version(hw, mbx_api[i]) == 0) {
   1122 			if (hw->api_version >= ixgbe_mbox_api_15)
   1123 				ixgbe_upgrade_mbx_params_vf(hw);
   1124 			return (0);
   1125 		}
   1126 		i++;
   1127 	}
   1128 
   1129 	return (EINVAL);
   1130 } /* ixv_negotiate_api */
   1131 
   1132 
   1133 /************************************************************************
   1134  * ixv_set_rxfilter - Multicast Update
   1135  *
   1136  *   Called whenever multicast address list is updated.
   1137  ************************************************************************/
   1138 static int
   1139 ixv_set_rxfilter(struct ixgbe_softc *sc)
   1140 {
   1141 	struct ixgbe_mc_addr	*mta;
   1142 	struct ifnet		*ifp = sc->ifp;
   1143 	struct ixgbe_hw		*hw = &sc->hw;
   1144 	u8			*update_ptr;
   1145 	int			mcnt = 0;
   1146 	struct ethercom		*ec = &sc->osdep.ec;
   1147 	struct ether_multi	*enm;
   1148 	struct ether_multistep	step;
   1149 	bool			overflow = false;
   1150 	int			error, rc = 0;
   1151 
   1152 	KASSERT(mutex_owned(&sc->core_mtx));
   1153 	IOCTL_DEBUGOUT("ixv_set_rxfilter: begin");
   1154 
   1155 	mta = sc->mta;
   1156 	bzero(mta, sizeof(*mta) * IXGBE_MAX_VF_MC);
   1157 
   1158 	/* 1: For PROMISC */
   1159 	if (ifp->if_flags & IFF_PROMISC) {
   1160 		error = hw->mac.ops.update_xcast_mode(hw,
   1161 		    IXGBEVF_XCAST_MODE_PROMISC);
   1162 		if (error == IXGBE_ERR_NOT_TRUSTED) {
   1163 			device_printf(sc->dev,
   1164 			    "this interface is not trusted\n");
   1165 			error = EPERM;
   1166 		} else if (error == IXGBE_ERR_FEATURE_NOT_SUPPORTED) {
   1167 			device_printf(sc->dev,
   1168 			    "the PF doesn't support promisc mode\n");
   1169 			error = EOPNOTSUPP;
   1170 		} else if (error == IXGBE_ERR_NOT_IN_PROMISC) {
   1171 			device_printf(sc->dev,
   1172 			    "the PF may not in promisc mode\n");
   1173 			error = EINVAL;
   1174 		} else if (error) {
   1175 			device_printf(sc->dev,
   1176 			    "failed to set promisc mode. error = %d\n",
   1177 			    error);
   1178 			error = EIO;
   1179 		} else
   1180 			return 0;
   1181 		rc = error;
   1182 	}
   1183 
   1184 	/* 2: For ALLMULTI or normal */
   1185 	ETHER_LOCK(ec);
   1186 	ETHER_FIRST_MULTI(step, ec, enm);
   1187 	while (enm != NULL) {
   1188 		if ((mcnt >= IXGBE_MAX_VF_MC) ||
   1189 		    (memcmp(enm->enm_addrlo, enm->enm_addrhi,
   1190 			ETHER_ADDR_LEN) != 0)) {
   1191 			overflow = true;
   1192 			break;
   1193 		}
   1194 		bcopy(enm->enm_addrlo,
   1195 		    mta[mcnt].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
   1196 		mcnt++;
   1197 		ETHER_NEXT_MULTI(step, enm);
   1198 	}
   1199 	ETHER_UNLOCK(ec);
   1200 
   1201 	/* 3: For ALLMULTI */
   1202 	if (overflow) {
   1203 		error = hw->mac.ops.update_xcast_mode(hw,
   1204 		    IXGBEVF_XCAST_MODE_ALLMULTI);
   1205 		if (error == IXGBE_ERR_NOT_TRUSTED) {
   1206 			device_printf(sc->dev,
   1207 			    "this interface is not trusted\n");
   1208 			error = EPERM;
   1209 		} else if (error == IXGBE_ERR_FEATURE_NOT_SUPPORTED) {
   1210 			device_printf(sc->dev,
   1211 			    "the PF doesn't support allmulti mode\n");
   1212 			error = EOPNOTSUPP;
   1213 		} else if (error) {
   1214 			device_printf(sc->dev,
   1215 			    "number of Ethernet multicast addresses "
   1216 			    "exceeds the limit (%d). error = %d\n",
   1217 			    IXGBE_MAX_VF_MC, error);
   1218 			error = ENOSPC;
   1219 		} else {
   1220 			ETHER_LOCK(ec);
   1221 			ec->ec_flags |= ETHER_F_ALLMULTI;
   1222 			ETHER_UNLOCK(ec);
   1223 			return rc; /* Promisc might have failed */
   1224 		}
   1225 
   1226 		if (rc == 0)
   1227 			rc = error;
   1228 
   1229 		/* Continue to update the multicast table as many as we can */
   1230 	}
   1231 
   1232 	/* 4: For normal operation */
   1233 	error = hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_MULTI);
   1234 	if ((error == IXGBE_ERR_FEATURE_NOT_SUPPORTED) || (error == 0)) {
   1235 		/* Normal operation */
   1236 		ETHER_LOCK(ec);
   1237 		ec->ec_flags &= ~ETHER_F_ALLMULTI;
   1238 		ETHER_UNLOCK(ec);
   1239 		error = 0;
   1240 	} else if (error) {
   1241 		device_printf(sc->dev,
   1242 		    "failed to set Ethernet multicast address "
   1243 		    "operation to normal. error = %d\n", error);
   1244 	}
   1245 
   1246 	update_ptr = (u8 *)mta;
   1247 	error = sc->hw.mac.ops.update_mc_addr_list(&sc->hw,
   1248 	    update_ptr, mcnt, ixv_mc_array_itr, TRUE);
   1249 	if (rc == 0)
   1250 		rc = error;
   1251 
   1252 	return rc;
   1253 } /* ixv_set_rxfilter */
   1254 
   1255 /************************************************************************
   1256  * ixv_mc_array_itr
   1257  *
   1258  *   An iterator function needed by the multicast shared code.
   1259  *   It feeds the shared code routine the addresses in the
   1260  *   array of ixv_set_rxfilter() one by one.
   1261  ************************************************************************/
   1262 static u8 *
   1263 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
   1264 {
   1265 	struct ixgbe_mc_addr *mta;
   1266 
   1267 	mta = (struct ixgbe_mc_addr *)*update_ptr;
   1268 
   1269 	*vmdq = 0;
   1270 	*update_ptr = (u8*)(mta + 1);
   1271 
   1272 	return (mta->addr);
   1273 } /* ixv_mc_array_itr */
   1274 
   1275 /************************************************************************
   1276  * ixv_local_timer - Timer routine
   1277  *
   1278  *   Checks for link status, updates statistics,
   1279  *   and runs the watchdog check.
   1280  ************************************************************************/
   1281 static void
   1282 ixv_local_timer(void *arg)
   1283 {
   1284 	struct ixgbe_softc *sc = arg;
   1285 
   1286 	if (sc->schedule_wqs_ok) {
   1287 		if (atomic_cas_uint(&sc->timer_pending, 0, 1) == 0)
   1288 			workqueue_enqueue(sc->timer_wq,
   1289 			    &sc->timer_wc, NULL);
   1290 	}
   1291 }
   1292 
   1293 static void
   1294 ixv_handle_timer(struct work *wk, void *context)
   1295 {
   1296 	struct ixgbe_softc *sc = context;
   1297 	device_t	dev = sc->dev;
   1298 	struct ix_queue	*que = sc->queues;
   1299 	u64		queues = 0;
   1300 	u64		v0, v1, v2, v3, v4, v5, v6, v7;
   1301 	int		hung = 0;
   1302 	int		i;
   1303 
   1304 	IXGBE_CORE_LOCK(sc);
   1305 
   1306 	if (ixv_check_link(sc)) {
   1307 		ixv_init_locked(sc);
   1308 		IXGBE_CORE_UNLOCK(sc);
   1309 		return;
   1310 	}
   1311 
   1312 	/* Stats Update */
   1313 	ixv_update_stats(sc);
   1314 
   1315 	/* Update some event counters */
   1316 	v0 = v1 = v2 = v3 = v4 = v5 = v6 = v7 = 0;
   1317 	que = sc->queues;
   1318 	for (i = 0; i < sc->num_queues; i++, que++) {
   1319 		struct tx_ring	*txr = que->txr;
   1320 
   1321 		v0 += txr->q_efbig_tx_dma_setup;
   1322 		v1 += txr->q_mbuf_defrag_failed;
   1323 		v2 += txr->q_efbig2_tx_dma_setup;
   1324 		v3 += txr->q_einval_tx_dma_setup;
   1325 		v4 += txr->q_other_tx_dma_setup;
   1326 		v5 += txr->q_eagain_tx_dma_setup;
   1327 		v6 += txr->q_enomem_tx_dma_setup;
   1328 		v7 += txr->q_tso_err;
   1329 	}
   1330 	IXGBE_EVC_STORE(&sc->efbig_tx_dma_setup, v0);
   1331 	IXGBE_EVC_STORE(&sc->mbuf_defrag_failed, v1);
   1332 	IXGBE_EVC_STORE(&sc->efbig2_tx_dma_setup, v2);
   1333 	IXGBE_EVC_STORE(&sc->einval_tx_dma_setup, v3);
   1334 	IXGBE_EVC_STORE(&sc->other_tx_dma_setup, v4);
   1335 	IXGBE_EVC_STORE(&sc->eagain_tx_dma_setup, v5);
   1336 	IXGBE_EVC_STORE(&sc->enomem_tx_dma_setup, v6);
   1337 	IXGBE_EVC_STORE(&sc->tso_err, v7);
   1338 
   1339 	/*
   1340 	 * Check the TX queues status
   1341 	 *	- mark hung queues so we don't schedule on them
   1342 	 *	- watchdog only if all queues show hung
   1343 	 */
   1344 	que = sc->queues;
   1345 	for (i = 0; i < sc->num_queues; i++, que++) {
   1346 		/* Keep track of queues with work for soft irq */
   1347 		if (que->txr->busy)
   1348 			queues |= ((u64)1 << que->me);
   1349 		/*
   1350 		 * Each time txeof runs without cleaning, but there
   1351 		 * are uncleaned descriptors it increments busy. If
   1352 		 * we get to the MAX we declare it hung.
   1353 		 */
   1354 		if (que->busy == IXGBE_QUEUE_HUNG) {
   1355 			++hung;
   1356 			/* Mark the queue as inactive */
   1357 			sc->active_queues &= ~((u64)1 << que->me);
   1358 			continue;
   1359 		} else {
   1360 			/* Check if we've come back from hung */
   1361 			if ((sc->active_queues & ((u64)1 << que->me)) == 0)
   1362 				sc->active_queues |= ((u64)1 << que->me);
   1363 		}
   1364 		if (que->busy >= IXGBE_MAX_TX_BUSY) {
   1365 			device_printf(dev,
   1366 			    "Warning queue %d appears to be hung!\n", i);
   1367 			que->txr->busy = IXGBE_QUEUE_HUNG;
   1368 			++hung;
   1369 		}
   1370 	}
   1371 
   1372 	/* Only truly watchdog if all queues show hung */
   1373 	if (hung == sc->num_queues)
   1374 		goto watchdog;
   1375 #if 0
   1376 	else if (queues != 0) { /* Force an IRQ on queues with work */
   1377 		ixv_rearm_queues(sc, queues);
   1378 	}
   1379 #endif
   1380 
   1381 	atomic_store_relaxed(&sc->timer_pending, 0);
   1382 	IXGBE_CORE_UNLOCK(sc);
   1383 	callout_reset(&sc->timer, hz, ixv_local_timer, sc);
   1384 
   1385 	return;
   1386 
   1387 watchdog:
   1388 	device_printf(sc->dev, "Watchdog timeout -- resetting\n");
   1389 	sc->ifp->if_flags &= ~IFF_RUNNING;
   1390 	IXGBE_EVC_ADD(&sc->watchdog_events, 1);
   1391 	ixv_init_locked(sc);
   1392 	IXGBE_CORE_UNLOCK(sc);
   1393 } /* ixv_handle_timer */
   1394 
   1395 /************************************************************************
   1396  * ixv_update_link_status - Update OS on link state
   1397  *
   1398  * Note: Only updates the OS on the cached link state.
   1399  *	 The real check of the hardware only happens with
   1400  *	 a link interrupt.
   1401  ************************************************************************/
   1402 static void
   1403 ixv_update_link_status(struct ixgbe_softc *sc)
   1404 {
   1405 	struct ifnet *ifp = sc->ifp;
   1406 	device_t     dev = sc->dev;
   1407 
   1408 	KASSERT(mutex_owned(&sc->core_mtx));
   1409 
   1410 	if (sc->link_up && sc->link_enabled) {
   1411 		if (sc->link_active != LINK_STATE_UP) {
   1412 			if (bootverbose) {
   1413 				const char *bpsmsg;
   1414 
   1415 				switch (sc->link_speed) {
   1416 				case IXGBE_LINK_SPEED_10GB_FULL:
   1417 					bpsmsg = "10 Gbps";
   1418 					break;
   1419 				case IXGBE_LINK_SPEED_5GB_FULL:
   1420 					bpsmsg = "5 Gbps";
   1421 					break;
   1422 				case IXGBE_LINK_SPEED_2_5GB_FULL:
   1423 					bpsmsg = "2.5 Gbps";
   1424 					break;
   1425 				case IXGBE_LINK_SPEED_1GB_FULL:
   1426 					bpsmsg = "1 Gbps";
   1427 					break;
   1428 				case IXGBE_LINK_SPEED_100_FULL:
   1429 					bpsmsg = "100 Mbps";
   1430 					break;
   1431 				case IXGBE_LINK_SPEED_10_FULL:
   1432 					bpsmsg = "10 Mbps";
   1433 					break;
   1434 				default:
   1435 					bpsmsg = "unknown speed";
   1436 					break;
   1437 				}
   1438 				device_printf(dev, "Link is up %s %s \n",
   1439 				    bpsmsg, "Full Duplex");
   1440 			}
   1441 			sc->link_active = LINK_STATE_UP;
   1442 			if_link_state_change(ifp, LINK_STATE_UP);
   1443 		}
   1444 	} else {
   1445 		/*
   1446 		 * Do it when link active changes to DOWN. i.e.
   1447 		 * a) LINK_STATE_UNKNOWN -> LINK_STATE_DOWN
   1448 		 * b) LINK_STATE_UP	 -> LINK_STATE_DOWN
   1449 		 */
   1450 		if (sc->link_active != LINK_STATE_DOWN) {
   1451 			if (bootverbose)
   1452 				device_printf(dev, "Link is Down\n");
   1453 			if_link_state_change(ifp, LINK_STATE_DOWN);
   1454 			sc->link_active = LINK_STATE_DOWN;
   1455 		}
   1456 	}
   1457 } /* ixv_update_link_status */
   1458 
   1459 
   1460 /************************************************************************
   1461  * ixv_stop - Stop the hardware
   1462  *
   1463  *   Disables all traffic on the adapter by issuing a
   1464  *   global reset on the MAC and deallocates TX/RX buffers.
   1465  ************************************************************************/
   1466 static void
   1467 ixv_ifstop(struct ifnet *ifp, int disable)
   1468 {
   1469 	struct ixgbe_softc *sc = ifp->if_softc;
   1470 
   1471 	IXGBE_CORE_LOCK(sc);
   1472 	ixv_stop_locked(sc);
   1473 	IXGBE_CORE_UNLOCK(sc);
   1474 
   1475 	workqueue_wait(sc->admin_wq, &sc->admin_wc);
   1476 	atomic_store_relaxed(&sc->admin_pending, 0);
   1477 	workqueue_wait(sc->timer_wq, &sc->timer_wc);
   1478 	atomic_store_relaxed(&sc->timer_pending, 0);
   1479 }
   1480 
   1481 static void
   1482 ixv_stop_locked(void *arg)
   1483 {
   1484 	struct ifnet	*ifp;
   1485 	struct ixgbe_softc *sc = arg;
   1486 	struct ixgbe_hw *hw = &sc->hw;
   1487 
   1488 	ifp = sc->ifp;
   1489 
   1490 	KASSERT(mutex_owned(&sc->core_mtx));
   1491 
   1492 	INIT_DEBUGOUT("ixv_stop_locked: begin\n");
   1493 	ixv_disable_intr(sc);
   1494 
   1495 	/* Tell the stack that the interface is no longer active */
   1496 	ifp->if_flags &= ~IFF_RUNNING;
   1497 
   1498 	hw->mac.ops.reset_hw(hw);
   1499 	sc->hw.adapter_stopped = FALSE;
   1500 	hw->mac.ops.stop_adapter(hw);
   1501 	callout_stop(&sc->timer);
   1502 
   1503 	/* Don't schedule workqueues. */
   1504 	sc->schedule_wqs_ok = false;
   1505 
   1506 	/* reprogram the RAR[0] in case user changed it. */
   1507 	hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
   1508 
   1509 	return;
   1510 } /* ixv_stop_locked */
   1511 
   1512 
   1513 /************************************************************************
   1514  * ixv_allocate_pci_resources
   1515  ************************************************************************/
   1516 static int
   1517 ixv_allocate_pci_resources(struct ixgbe_softc *sc,
   1518     const struct pci_attach_args *pa)
   1519 {
   1520 	pcireg_t memtype, csr;
   1521 	device_t dev = sc->dev;
   1522 	bus_addr_t addr;
   1523 	int flags;
   1524 
   1525 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
   1526 	switch (memtype) {
   1527 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1528 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1529 		sc->osdep.mem_bus_space_tag = pa->pa_memt;
   1530 		if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
   1531 		      memtype, &addr, &sc->osdep.mem_size, &flags) != 0)
   1532 			goto map_err;
   1533 		if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
   1534 			aprint_normal_dev(dev, "clearing prefetchable bit\n");
   1535 			flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
   1536 		}
   1537 		if (bus_space_map(sc->osdep.mem_bus_space_tag, addr,
   1538 		     sc->osdep.mem_size, flags,
   1539 		     &sc->osdep.mem_bus_space_handle) != 0) {
   1540 map_err:
   1541 			sc->osdep.mem_size = 0;
   1542 			aprint_error_dev(dev, "unable to map BAR0\n");
   1543 			return ENXIO;
   1544 		}
   1545 		/*
   1546 		 * Enable address decoding for memory range in case it's not
   1547 		 * set.
   1548 		 */
   1549 		csr = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1550 		    PCI_COMMAND_STATUS_REG);
   1551 		csr |= PCI_COMMAND_MEM_ENABLE;
   1552 		pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
   1553 		    csr);
   1554 		break;
   1555 	default:
   1556 		aprint_error_dev(dev, "unexpected type on BAR0\n");
   1557 		return ENXIO;
   1558 	}
   1559 
   1560 	/* Pick up the tuneable queues */
   1561 	sc->num_queues = ixv_num_queues;
   1562 
   1563 	return (0);
   1564 } /* ixv_allocate_pci_resources */
   1565 
   1566 static void
   1567 ixv_free_deferred_handlers(struct ixgbe_softc *sc)
   1568 {
   1569 	struct ix_queue *que = sc->queues;
   1570 	struct tx_ring *txr = sc->tx_rings;
   1571 	int i;
   1572 
   1573 	for (i = 0; i < sc->num_queues; i++, que++, txr++) {
   1574 		if (!(sc->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
   1575 			if (txr->txr_si != NULL)
   1576 				softint_disestablish(txr->txr_si);
   1577 		}
   1578 		if (que->que_si != NULL)
   1579 			softint_disestablish(que->que_si);
   1580 	}
   1581 	if (sc->txr_wq != NULL)
   1582 		workqueue_destroy(sc->txr_wq);
   1583 	if (sc->txr_wq_enqueued != NULL)
   1584 		percpu_free(sc->txr_wq_enqueued, sizeof(u_int));
   1585 	if (sc->que_wq != NULL)
   1586 		workqueue_destroy(sc->que_wq);
   1587 
   1588 	/* Drain the Mailbox(link) queue */
   1589 	if (sc->admin_wq != NULL) {
   1590 		workqueue_destroy(sc->admin_wq);
   1591 		sc->admin_wq = NULL;
   1592 	}
   1593 	if (sc->timer_wq != NULL) {
   1594 		workqueue_destroy(sc->timer_wq);
   1595 		sc->timer_wq = NULL;
   1596 	}
   1597 } /* ixv_free_deferred_handlers */
   1598 
   1599 /************************************************************************
   1600  * ixv_free_pci_resources
   1601  ************************************************************************/
   1602 static void
   1603 ixv_free_pci_resources(struct ixgbe_softc *sc)
   1604 {
   1605 	struct ix_queue *que = sc->queues;
   1606 	int		rid;
   1607 
   1608 	/*
   1609 	 *  Release all msix queue resources:
   1610 	 */
   1611 	for (int i = 0; i < sc->num_queues; i++, que++) {
   1612 		if (que->res != NULL)
   1613 			pci_intr_disestablish(sc->osdep.pc,
   1614 			    sc->osdep.ihs[i]);
   1615 	}
   1616 
   1617 
   1618 	/* Clean the Mailbox interrupt last */
   1619 	rid = sc->vector;
   1620 
   1621 	if (sc->osdep.ihs[rid] != NULL) {
   1622 		pci_intr_disestablish(sc->osdep.pc,
   1623 		    sc->osdep.ihs[rid]);
   1624 		sc->osdep.ihs[rid] = NULL;
   1625 	}
   1626 
   1627 	pci_intr_release(sc->osdep.pc, sc->osdep.intrs,
   1628 	    sc->osdep.nintrs);
   1629 
   1630 	if (sc->osdep.mem_size != 0) {
   1631 		bus_space_unmap(sc->osdep.mem_bus_space_tag,
   1632 		    sc->osdep.mem_bus_space_handle,
   1633 		    sc->osdep.mem_size);
   1634 	}
   1635 
   1636 	return;
   1637 } /* ixv_free_pci_resources */
   1638 
   1639 /************************************************************************
   1640  * ixv_setup_interface
   1641  *
   1642  *   Setup networking device structure and register an interface.
   1643  ************************************************************************/
   1644 static int
   1645 ixv_setup_interface(device_t dev, struct ixgbe_softc *sc)
   1646 {
   1647 	struct ethercom *ec = &sc->osdep.ec;
   1648 	struct ifnet   *ifp;
   1649 
   1650 	INIT_DEBUGOUT("ixv_setup_interface: begin");
   1651 
   1652 	ifp = sc->ifp = &ec->ec_if;
   1653 	strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
   1654 	ifp->if_baudrate = IF_Gbps(10);
   1655 	ifp->if_init = ixv_init;
   1656 	ifp->if_stop = ixv_ifstop;
   1657 	ifp->if_softc = sc;
   1658 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   1659 	ifp->if_extflags = IFEF_MPSAFE;
   1660 	ifp->if_ioctl = ixv_ioctl;
   1661 	if (sc->feat_en & IXGBE_FEATURE_LEGACY_TX) {
   1662 #if 0
   1663 		ixv_start_locked = ixgbe_legacy_start_locked;
   1664 #endif
   1665 	} else {
   1666 		ifp->if_transmit = ixgbe_mq_start;
   1667 #if 0
   1668 		ixv_start_locked = ixgbe_mq_start_locked;
   1669 #endif
   1670 	}
   1671 	ifp->if_start = ixgbe_legacy_start;
   1672 	IFQ_SET_MAXLEN(&ifp->if_snd, sc->num_tx_desc - 2);
   1673 	IFQ_SET_READY(&ifp->if_snd);
   1674 
   1675 	if_initialize(ifp);
   1676 	sc->ipq = if_percpuq_create(&sc->osdep.ec.ec_if);
   1677 	/*
   1678 	 * We use per TX queue softint, so if_deferred_start_init() isn't
   1679 	 * used.
   1680 	 */
   1681 
   1682 	sc->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR;
   1683 
   1684 	/*
   1685 	 * Tell the upper layer(s) we support long frames.
   1686 	 */
   1687 	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
   1688 
   1689 	/* Set capability flags */
   1690 	ifp->if_capabilities |= IFCAP_HWCSUM
   1691 			     |	IFCAP_TSOv4
   1692 			     |	IFCAP_TSOv6;
   1693 	ifp->if_capenable = 0;
   1694 
   1695 	ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER
   1696 			    |  ETHERCAP_VLAN_HWTAGGING
   1697 			    |  ETHERCAP_VLAN_HWCSUM
   1698 			    |  ETHERCAP_JUMBO_MTU
   1699 			    |  ETHERCAP_VLAN_MTU;
   1700 
   1701 	/* Enable the above capabilities by default */
   1702 	ec->ec_capenable = ec->ec_capabilities;
   1703 
   1704 	ether_ifattach(ifp, sc->hw.mac.addr);
   1705 	aprint_normal_dev(dev, "Ethernet address %s\n",
   1706 	    ether_sprintf(sc->hw.mac.addr));
   1707 	ether_set_ifflags_cb(ec, ixv_ifflags_cb);
   1708 
   1709 	/* Don't enable LRO by default */
   1710 #if 0
   1711 	/* NetBSD doesn't support LRO yet */
   1712 	ifp->if_capabilities |= IFCAP_LRO;
   1713 #endif
   1714 
   1715 	/*
   1716 	 * Specify the media types supported by this adapter and register
   1717 	 * callbacks to update media and link information
   1718 	 */
   1719 	ec->ec_ifmedia = &sc->media;
   1720 	ifmedia_init_with_lock(&sc->media, IFM_IMASK, ixv_media_change,
   1721 	    ixv_media_status, &sc->core_mtx);
   1722 	ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
   1723 	ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
   1724 
   1725 	if_register(ifp);
   1726 
   1727 	return 0;
   1728 } /* ixv_setup_interface */
   1729 
   1730 
   1731 /************************************************************************
   1732  * ixv_initialize_transmit_units - Enable transmit unit.
   1733  ************************************************************************/
   1734 static void
   1735 ixv_initialize_transmit_units(struct ixgbe_softc *sc)
   1736 {
   1737 	struct tx_ring	*txr = sc->tx_rings;
   1738 	struct ixgbe_hw	*hw = &sc->hw;
   1739 	int i;
   1740 
   1741 	for (i = 0; i < sc->num_queues; i++, txr++) {
   1742 		u64 tdba = txr->txdma.dma_paddr;
   1743 		u32 txctrl, txdctl;
   1744 		int j = txr->me;
   1745 
   1746 		/* Set WTHRESH to 8, burst writeback */
   1747 		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
   1748 		txdctl &= ~IXGBE_TXDCTL_WTHRESH_MASK;
   1749 		txdctl |= IXGBE_TX_WTHRESH << IXGBE_TXDCTL_WTHRESH_SHIFT;
   1750 		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
   1751 
   1752 		/* Set the HW Tx Head and Tail indices */
   1753 		IXGBE_WRITE_REG(&sc->hw, IXGBE_VFTDH(j), 0);
   1754 		IXGBE_WRITE_REG(&sc->hw, IXGBE_VFTDT(j), 0);
   1755 
   1756 		/* Set Tx Tail register */
   1757 		txr->tail = IXGBE_VFTDT(j);
   1758 
   1759 		txr->txr_no_space = false;
   1760 
   1761 		/* Set Ring parameters */
   1762 		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(j),
   1763 		    (tdba & 0x00000000ffffffffULL));
   1764 		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32));
   1765 		IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j),
   1766 		    sc->num_tx_desc * sizeof(struct ixgbe_legacy_tx_desc));
   1767 		txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j));
   1768 		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
   1769 		IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl);
   1770 
   1771 		/* Now enable */
   1772 		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
   1773 		txdctl |= IXGBE_TXDCTL_ENABLE;
   1774 		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
   1775 	}
   1776 
   1777 	return;
   1778 } /* ixv_initialize_transmit_units */
   1779 
   1780 
   1781 /************************************************************************
   1782  * ixv_initialize_rss_mapping
   1783  ************************************************************************/
   1784 static void
   1785 ixv_initialize_rss_mapping(struct ixgbe_softc *sc)
   1786 {
   1787 	struct ixgbe_hw *hw = &sc->hw;
   1788 	u32		reta = 0, mrqc, rss_key[10];
   1789 	int		queue_id;
   1790 	int		i, j;
   1791 	u32		rss_hash_config;
   1792 
   1793 	/* force use default RSS key. */
   1794 #ifdef __NetBSD__
   1795 	rss_getkey((uint8_t *) &rss_key);
   1796 #else
   1797 	if (sc->feat_en & IXGBE_FEATURE_RSS) {
   1798 		/* Fetch the configured RSS key */
   1799 		rss_getkey((uint8_t *)&rss_key);
   1800 	} else {
   1801 		/* set up random bits */
   1802 		cprng_fast(&rss_key, sizeof(rss_key));
   1803 	}
   1804 #endif
   1805 
   1806 	/* Now fill out hash function seeds */
   1807 	for (i = 0; i < 10; i++)
   1808 		IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), rss_key[i]);
   1809 
   1810 	/* Set up the redirection table */
   1811 	for (i = 0, j = 0; i < 64; i++, j++) {
   1812 		if (j == sc->num_queues)
   1813 			j = 0;
   1814 
   1815 		if (sc->feat_en & IXGBE_FEATURE_RSS) {
   1816 			/*
   1817 			 * Fetch the RSS bucket id for the given indirection
   1818 			 * entry. Cap it at the number of configured buckets
   1819 			 * (which is num_queues.)
   1820 			 */
   1821 			queue_id = rss_get_indirection_to_bucket(i);
   1822 			queue_id = queue_id % sc->num_queues;
   1823 		} else
   1824 			queue_id = j;
   1825 
   1826 		/*
   1827 		 * The low 8 bits are for hash value (n+0);
   1828 		 * The next 8 bits are for hash value (n+1), etc.
   1829 		 */
   1830 		reta >>= 8;
   1831 		reta |= ((uint32_t)queue_id) << 24;
   1832 		if ((i & 3) == 3) {
   1833 			IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), reta);
   1834 			reta = 0;
   1835 		}
   1836 	}
   1837 
   1838 	/* Perform hash on these packet types */
   1839 	if (sc->feat_en & IXGBE_FEATURE_RSS)
   1840 		rss_hash_config = rss_gethashconfig();
   1841 	else {
   1842 		/*
   1843 		 * Disable UDP - IP fragments aren't currently being handled
   1844 		 * and so we end up with a mix of 2-tuple and 4-tuple
   1845 		 * traffic.
   1846 		 */
   1847 		rss_hash_config = RSS_HASHTYPE_RSS_IPV4
   1848 				| RSS_HASHTYPE_RSS_TCP_IPV4
   1849 				| RSS_HASHTYPE_RSS_IPV6
   1850 				| RSS_HASHTYPE_RSS_TCP_IPV6;
   1851 	}
   1852 
   1853 	mrqc = IXGBE_MRQC_RSSEN;
   1854 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
   1855 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
   1856 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
   1857 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
   1858 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
   1859 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
   1860 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
   1861 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
   1862 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
   1863 		device_printf(sc->dev, "%s: RSS_HASHTYPE_RSS_IPV6_EX "
   1864 		    "defined, but not supported\n", __func__);
   1865 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
   1866 		device_printf(sc->dev, "%s: RSS_HASHTYPE_RSS_TCP_IPV6_EX "
   1867 		    "defined, but not supported\n", __func__);
   1868 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
   1869 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
   1870 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
   1871 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
   1872 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
   1873 		device_printf(sc->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV6_EX "
   1874 		    "defined, but not supported\n", __func__);
   1875 	IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, mrqc);
   1876 } /* ixv_initialize_rss_mapping */
   1877 
   1878 
   1879 /************************************************************************
   1880  * ixv_initialize_receive_units - Setup receive registers and features.
   1881  ************************************************************************/
   1882 static void
   1883 ixv_initialize_receive_units(struct ixgbe_softc *sc)
   1884 {
   1885 	struct rx_ring	*rxr = sc->rx_rings;
   1886 	struct ixgbe_hw	*hw = &sc->hw;
   1887 	struct ifnet	*ifp = sc->ifp;
   1888 	u32		bufsz, psrtype;
   1889 
   1890 	if (ifp->if_mtu > ETHERMTU)
   1891 		bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
   1892 	else
   1893 		bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
   1894 
   1895 	psrtype = IXGBE_PSRTYPE_TCPHDR
   1896 		| IXGBE_PSRTYPE_UDPHDR
   1897 		| IXGBE_PSRTYPE_IPV4HDR
   1898 		| IXGBE_PSRTYPE_IPV6HDR
   1899 		| IXGBE_PSRTYPE_L2HDR;
   1900 
   1901 	if (sc->num_queues > 1)
   1902 		psrtype |= 1 << 29;
   1903 
   1904 	IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
   1905 
   1906 	/* Tell PF our max_frame size */
   1907 	if (ixgbevf_rlpml_set_vf(hw, sc->max_frame_size) != 0) {
   1908 		device_printf(sc->dev, "There is a problem with the PF "
   1909 		    "setup.  It is likely the receive unit for this VF will "
   1910 		    "not function correctly.\n");
   1911 	}
   1912 
   1913 	for (int i = 0; i < sc->num_queues; i++, rxr++) {
   1914 		u64 rdba = rxr->rxdma.dma_paddr;
   1915 		u32 reg, rxdctl;
   1916 		int j = rxr->me;
   1917 
   1918 		/* Disable the queue */
   1919 		rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
   1920 		rxdctl &= ~IXGBE_RXDCTL_ENABLE;
   1921 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
   1922 		for (int k = 0; k < 10; k++) {
   1923 			if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) &
   1924 			    IXGBE_RXDCTL_ENABLE)
   1925 				msec_delay(1);
   1926 			else
   1927 				break;
   1928 		}
   1929 		IXGBE_WRITE_BARRIER(hw);
   1930 		/* Setup the Base and Length of the Rx Descriptor Ring */
   1931 		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j),
   1932 		    (rdba & 0x00000000ffffffffULL));
   1933 		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32));
   1934 		IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j),
   1935 		    sc->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
   1936 
   1937 		/* Reset the ring indices */
   1938 		IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
   1939 		IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 0);
   1940 
   1941 		/* Set up the SRRCTL register */
   1942 		reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(j));
   1943 		reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
   1944 		reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
   1945 		reg |= bufsz;
   1946 		reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
   1947 		IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(j), reg);
   1948 
   1949 		/* Capture Rx Tail index */
   1950 		rxr->tail = IXGBE_VFRDT(rxr->me);
   1951 
   1952 		/* Do the queue enabling last */
   1953 		rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
   1954 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
   1955 		for (int k = 0; k < 10; k++) {
   1956 			if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) &
   1957 			    IXGBE_RXDCTL_ENABLE)
   1958 				break;
   1959 			msec_delay(1);
   1960 		}
   1961 		IXGBE_WRITE_BARRIER(hw);
   1962 
   1963 		/* Set the Tail Pointer */
   1964 #ifdef DEV_NETMAP
   1965 		/*
   1966 		 * In netmap mode, we must preserve the buffers made
   1967 		 * available to userspace before the if_init()
   1968 		 * (this is true by default on the TX side, because
   1969 		 * init makes all buffers available to userspace).
   1970 		 *
   1971 		 * netmap_reset() and the device specific routines
   1972 		 * (e.g. ixgbe_setup_receive_rings()) map these
   1973 		 * buffers at the end of the NIC ring, so here we
   1974 		 * must set the RDT (tail) register to make sure
   1975 		 * they are not overwritten.
   1976 		 *
   1977 		 * In this driver the NIC ring starts at RDH = 0,
   1978 		 * RDT points to the last slot available for reception (?),
   1979 		 * so RDT = num_rx_desc - 1 means the whole ring is available.
   1980 		 */
   1981 		if ((sc->feat_en & IXGBE_FEATURE_NETMAP) &&
   1982 		    (ifp->if_capenable & IFCAP_NETMAP)) {
   1983 			struct netmap_adapter *na = NA(sc->ifp);
   1984 			struct netmap_kring *kring = na->rx_rings[i];
   1985 			int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
   1986 
   1987 			IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), t);
   1988 		} else
   1989 #endif /* DEV_NETMAP */
   1990 			IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
   1991 			    sc->num_rx_desc - 1);
   1992 	}
   1993 
   1994 	if (sc->hw.mac.type >= ixgbe_mac_X550_vf)
   1995 		ixv_initialize_rss_mapping(sc);
   1996 } /* ixv_initialize_receive_units */
   1997 
   1998 /************************************************************************
   1999  * ixv_sysctl_tdh_handler - Transmit Descriptor Head handler function
   2000  *
   2001  *   Retrieves the TDH value from the hardware
   2002  ************************************************************************/
   2003 static int
   2004 ixv_sysctl_tdh_handler(SYSCTLFN_ARGS)
   2005 {
   2006 	struct sysctlnode node = *rnode;
   2007 	struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
   2008 	uint32_t val;
   2009 
   2010 	if (!txr)
   2011 		return (0);
   2012 
   2013 	val = IXGBE_READ_REG(&txr->sc->hw, IXGBE_VFTDH(txr->me));
   2014 	node.sysctl_data = &val;
   2015 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2016 } /* ixv_sysctl_tdh_handler */
   2017 
   2018 /************************************************************************
   2019  * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
   2020  *
   2021  *   Retrieves the TDT value from the hardware
   2022  ************************************************************************/
   2023 static int
   2024 ixv_sysctl_tdt_handler(SYSCTLFN_ARGS)
   2025 {
   2026 	struct sysctlnode node = *rnode;
   2027 	struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
   2028 	uint32_t val;
   2029 
   2030 	if (!txr)
   2031 		return (0);
   2032 
   2033 	val = IXGBE_READ_REG(&txr->sc->hw, IXGBE_VFTDT(txr->me));
   2034 	node.sysctl_data = &val;
   2035 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2036 } /* ixv_sysctl_tdt_handler */
   2037 
   2038 /************************************************************************
   2039  * ixv_sysctl_next_to_check_handler - Receive Descriptor next to check
   2040  * handler function
   2041  *
   2042  *   Retrieves the next_to_check value
   2043  ************************************************************************/
   2044 static int
   2045 ixv_sysctl_next_to_check_handler(SYSCTLFN_ARGS)
   2046 {
   2047 	struct sysctlnode node = *rnode;
   2048 	struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
   2049 	uint32_t val;
   2050 
   2051 	if (!rxr)
   2052 		return (0);
   2053 
   2054 	val = rxr->next_to_check;
   2055 	node.sysctl_data = &val;
   2056 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2057 } /* ixv_sysctl_next_to_check_handler */
   2058 
   2059 /************************************************************************
   2060  * ixv_sysctl_next_to_refresh_handler - Receive Descriptor next to refresh
   2061  * handler function
   2062  *
   2063  *   Retrieves the next_to_refresh value
   2064  ************************************************************************/
   2065 static int
   2066 ixv_sysctl_next_to_refresh_handler(SYSCTLFN_ARGS)
   2067 {
   2068 	struct sysctlnode node = *rnode;
   2069 	struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
   2070 	struct ixgbe_softc *sc;
   2071 	uint32_t val;
   2072 
   2073 	if (!rxr)
   2074 		return (0);
   2075 
   2076 	sc = rxr->sc;
   2077 	if (ixgbe_fw_recovery_mode_swflag(sc))
   2078 		return (EPERM);
   2079 
   2080 	val = rxr->next_to_refresh;
   2081 	node.sysctl_data = &val;
   2082 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2083 } /* ixv_sysctl_next_to_refresh_handler */
   2084 
   2085 /************************************************************************
   2086  * ixv_sysctl_rdh_handler - Receive Descriptor Head handler function
   2087  *
   2088  *   Retrieves the RDH value from the hardware
   2089  ************************************************************************/
   2090 static int
   2091 ixv_sysctl_rdh_handler(SYSCTLFN_ARGS)
   2092 {
   2093 	struct sysctlnode node = *rnode;
   2094 	struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
   2095 	uint32_t val;
   2096 
   2097 	if (!rxr)
   2098 		return (0);
   2099 
   2100 	val = IXGBE_READ_REG(&rxr->sc->hw, IXGBE_VFRDH(rxr->me));
   2101 	node.sysctl_data = &val;
   2102 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2103 } /* ixv_sysctl_rdh_handler */
   2104 
   2105 /************************************************************************
   2106  * ixv_sysctl_rdt_handler - Receive Descriptor Tail handler function
   2107  *
   2108  *   Retrieves the RDT value from the hardware
   2109  ************************************************************************/
   2110 static int
   2111 ixv_sysctl_rdt_handler(SYSCTLFN_ARGS)
   2112 {
   2113 	struct sysctlnode node = *rnode;
   2114 	struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
   2115 	uint32_t val;
   2116 
   2117 	if (!rxr)
   2118 		return (0);
   2119 
   2120 	val = IXGBE_READ_REG(&rxr->sc->hw, IXGBE_VFRDT(rxr->me));
   2121 	node.sysctl_data = &val;
   2122 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2123 } /* ixv_sysctl_rdt_handler */
   2124 
   2125 static void
   2126 ixv_setup_vlan_tagging(struct ixgbe_softc *sc)
   2127 {
   2128 	struct ethercom *ec = &sc->osdep.ec;
   2129 	struct ixgbe_hw *hw = &sc->hw;
   2130 	struct rx_ring	*rxr;
   2131 	u32		ctrl;
   2132 	int		i;
   2133 	bool		hwtagging;
   2134 
   2135 	/* Enable HW tagging only if any vlan is attached */
   2136 	hwtagging = (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING)
   2137 	    && VLAN_ATTACHED(ec);
   2138 
   2139 	/* Enable the queues */
   2140 	for (i = 0; i < sc->num_queues; i++) {
   2141 		rxr = &sc->rx_rings[i];
   2142 		ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(rxr->me));
   2143 		if (hwtagging)
   2144 			ctrl |= IXGBE_RXDCTL_VME;
   2145 		else
   2146 			ctrl &= ~IXGBE_RXDCTL_VME;
   2147 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(rxr->me), ctrl);
   2148 		/*
   2149 		 * Let Rx path know that it needs to store VLAN tag
   2150 		 * as part of extra mbuf info.
   2151 		 */
   2152 		rxr->vtag_strip = hwtagging ? TRUE : FALSE;
   2153 	}
   2154 } /* ixv_setup_vlan_tagging */
   2155 
   2156 /************************************************************************
   2157  * ixv_setup_vlan_support
   2158  ************************************************************************/
   2159 static int
   2160 ixv_setup_vlan_support(struct ixgbe_softc *sc)
   2161 {
   2162 	struct ethercom *ec = &sc->osdep.ec;
   2163 	struct ixgbe_hw *hw = &sc->hw;
   2164 	u32		vid, vfta, retry;
   2165 	struct vlanid_list *vlanidp;
   2166 	int rv, error = 0;
   2167 
   2168 	/*
   2169 	 *  This function is called from both if_init and ifflags_cb()
   2170 	 * on NetBSD.
   2171 	 */
   2172 
   2173 	/*
   2174 	 * Part 1:
   2175 	 * Setup VLAN HW tagging
   2176 	 */
   2177 	ixv_setup_vlan_tagging(sc);
   2178 
   2179 	if (!VLAN_ATTACHED(ec))
   2180 		return 0;
   2181 
   2182 	/*
   2183 	 * Part 2:
   2184 	 * Setup VLAN HW filter
   2185 	 */
   2186 	/* Cleanup shadow_vfta */
   2187 	for (int i = 0; i < IXGBE_VFTA_SIZE; i++)
   2188 		sc->shadow_vfta[i] = 0;
   2189 	/* Generate shadow_vfta from ec_vids */
   2190 	ETHER_LOCK(ec);
   2191 	SIMPLEQ_FOREACH(vlanidp, &ec->ec_vids, vid_list) {
   2192 		uint32_t idx;
   2193 
   2194 		idx = vlanidp->vid / 32;
   2195 		KASSERT(idx < IXGBE_VFTA_SIZE);
   2196 		sc->shadow_vfta[idx] |= (u32)1 << (vlanidp->vid % 32);
   2197 	}
   2198 	ETHER_UNLOCK(ec);
   2199 
   2200 	/*
   2201 	 * A soft reset zero's out the VFTA, so
   2202 	 * we need to repopulate it now.
   2203 	 */
   2204 	for (int i = 0; i < IXGBE_VFTA_SIZE; i++) {
   2205 		if (sc->shadow_vfta[i] == 0)
   2206 			continue;
   2207 		vfta = sc->shadow_vfta[i];
   2208 		/*
   2209 		 * Reconstruct the vlan id's
   2210 		 * based on the bits set in each
   2211 		 * of the array ints.
   2212 		 */
   2213 		for (int j = 0; j < 32; j++) {
   2214 			retry = 0;
   2215 			if ((vfta & ((u32)1 << j)) == 0)
   2216 				continue;
   2217 			vid = (i * 32) + j;
   2218 
   2219 			/* Call the shared code mailbox routine */
   2220 			while ((rv = hw->mac.ops.set_vfta(hw, vid, 0, TRUE,
   2221 			    FALSE)) != 0) {
   2222 				if (++retry > 5) {
   2223 					device_printf(sc->dev,
   2224 					    "%s: max retry exceeded\n",
   2225 						__func__);
   2226 					break;
   2227 				}
   2228 			}
   2229 			if (rv != 0) {
   2230 				device_printf(sc->dev,
   2231 				    "failed to set vlan %d\n", vid);
   2232 				error = EACCES;
   2233 			}
   2234 		}
   2235 	}
   2236 	return error;
   2237 } /* ixv_setup_vlan_support */
   2238 
   2239 static int
   2240 ixv_vlan_cb(struct ethercom *ec, uint16_t vid, bool set)
   2241 {
   2242 	struct ifnet *ifp = &ec->ec_if;
   2243 	struct ixgbe_softc *sc = ifp->if_softc;
   2244 	int rv;
   2245 
   2246 	if (set)
   2247 		rv = ixv_register_vlan(sc, vid);
   2248 	else
   2249 		rv = ixv_unregister_vlan(sc, vid);
   2250 
   2251 	if (rv != 0)
   2252 		return rv;
   2253 
   2254 	/*
   2255 	 * Control VLAN HW tagging when ec_nvlan is changed from 1 to 0
   2256 	 * or 0 to 1.
   2257 	 */
   2258 	if ((set && (ec->ec_nvlans == 1)) || (!set && (ec->ec_nvlans == 0)))
   2259 		ixv_setup_vlan_tagging(sc);
   2260 
   2261 	return rv;
   2262 }
   2263 
   2264 /************************************************************************
   2265  * ixv_register_vlan
   2266  *
   2267  *   Run via a vlan config EVENT, it enables us to use the
   2268  *   HW Filter table since we can get the vlan id. This just
   2269  *   creates the entry in the soft version of the VFTA, init
   2270  *   will repopulate the real table.
   2271  ************************************************************************/
   2272 static int
   2273 ixv_register_vlan(struct ixgbe_softc *sc, u16 vtag)
   2274 {
   2275 	struct ixgbe_hw *hw = &sc->hw;
   2276 	u16		index, bit;
   2277 	int error;
   2278 
   2279 	if ((vtag == 0) || (vtag > 4095)) /* Invalid */
   2280 		return EINVAL;
   2281 	IXGBE_CORE_LOCK(sc);
   2282 	index = (vtag >> 5) & 0x7F;
   2283 	bit = vtag & 0x1F;
   2284 	sc->shadow_vfta[index] |= ((u32)1 << bit);
   2285 	error = hw->mac.ops.set_vfta(hw, vtag, 0, true, false);
   2286 	IXGBE_CORE_UNLOCK(sc);
   2287 
   2288 	if (error != 0) {
   2289 		device_printf(sc->dev, "failed to register vlan %hu\n", vtag);
   2290 		error = EACCES;
   2291 	}
   2292 	return error;
   2293 } /* ixv_register_vlan */
   2294 
   2295 /************************************************************************
   2296  * ixv_unregister_vlan
   2297  *
   2298  *   Run via a vlan unconfig EVENT, remove our entry
   2299  *   in the soft vfta.
   2300  ************************************************************************/
   2301 static int
   2302 ixv_unregister_vlan(struct ixgbe_softc *sc, u16 vtag)
   2303 {
   2304 	struct ixgbe_hw *hw = &sc->hw;
   2305 	u16		index, bit;
   2306 	int		error;
   2307 
   2308 	if ((vtag == 0) || (vtag > 4095))  /* Invalid */
   2309 		return EINVAL;
   2310 
   2311 	IXGBE_CORE_LOCK(sc);
   2312 	index = (vtag >> 5) & 0x7F;
   2313 	bit = vtag & 0x1F;
   2314 	sc->shadow_vfta[index] &= ~((u32)1 << bit);
   2315 	error = hw->mac.ops.set_vfta(hw, vtag, 0, false, false);
   2316 	IXGBE_CORE_UNLOCK(sc);
   2317 
   2318 	if (error != 0) {
   2319 		device_printf(sc->dev, "failed to unregister vlan %hu\n",
   2320 		    vtag);
   2321 		error = EIO;
   2322 	}
   2323 	return error;
   2324 } /* ixv_unregister_vlan */
   2325 
   2326 /************************************************************************
   2327  * ixv_enable_intr
   2328  ************************************************************************/
   2329 static void
   2330 ixv_enable_intr(struct ixgbe_softc *sc)
   2331 {
   2332 	struct ixgbe_hw *hw = &sc->hw;
   2333 	struct ix_queue *que = sc->queues;
   2334 	u32		mask;
   2335 	int i;
   2336 
   2337 	/* For VTEIAC */
   2338 	mask = (1 << sc->vector);
   2339 	for (i = 0; i < sc->num_queues; i++, que++)
   2340 		mask |= (1 << que->msix);
   2341 	IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
   2342 
   2343 	/* For VTEIMS */
   2344 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, (1 << sc->vector));
   2345 	que = sc->queues;
   2346 	for (i = 0; i < sc->num_queues; i++, que++)
   2347 		ixv_enable_queue(sc, que->msix);
   2348 
   2349 	IXGBE_WRITE_FLUSH(hw);
   2350 } /* ixv_enable_intr */
   2351 
   2352 /************************************************************************
   2353  * ixv_disable_intr
   2354  ************************************************************************/
   2355 static void
   2356 ixv_disable_intr(struct ixgbe_softc *sc)
   2357 {
   2358 	struct ix_queue	*que = sc->queues;
   2359 
   2360 	IXGBE_WRITE_REG(&sc->hw, IXGBE_VTEIAC, 0);
   2361 
   2362 	/* disable interrupts other than queues */
   2363 	IXGBE_WRITE_REG(&sc->hw, IXGBE_VTEIMC, sc->vector);
   2364 
   2365 	for (int i = 0; i < sc->num_queues; i++, que++)
   2366 		ixv_disable_queue(sc, que->msix);
   2367 
   2368 	IXGBE_WRITE_FLUSH(&sc->hw);
   2369 } /* ixv_disable_intr */
   2370 
   2371 /************************************************************************
   2372  * ixv_set_ivar
   2373  *
   2374  *   Setup the correct IVAR register for a particular MSI-X interrupt
   2375  *    - entry is the register array entry
   2376  *    - vector is the MSI-X vector for this queue
   2377  *    - type is RX/TX/MISC
   2378  ************************************************************************/
   2379 static void
   2380 ixv_set_ivar(struct ixgbe_softc *sc, u8 entry, u8 vector, s8 type)
   2381 {
   2382 	struct ixgbe_hw *hw = &sc->hw;
   2383 	u32		ivar, index;
   2384 
   2385 	vector |= IXGBE_IVAR_ALLOC_VAL;
   2386 
   2387 	if (type == -1) { /* MISC IVAR */
   2388 		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
   2389 		ivar &= ~0xFF;
   2390 		ivar |= vector;
   2391 		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
   2392 	} else {	  /* RX/TX IVARS */
   2393 		index = (16 * (entry & 1)) + (8 * type);
   2394 		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
   2395 		ivar &= ~(0xffUL << index);
   2396 		ivar |= ((u32)vector << index);
   2397 		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
   2398 	}
   2399 } /* ixv_set_ivar */
   2400 
   2401 /************************************************************************
   2402  * ixv_configure_ivars
   2403  ************************************************************************/
   2404 static void
   2405 ixv_configure_ivars(struct ixgbe_softc *sc)
   2406 {
   2407 	struct ix_queue *que = sc->queues;
   2408 
   2409 	/* XXX We should sync EITR value calculation with ixgbe.c? */
   2410 
   2411 	for (int i = 0; i < sc->num_queues; i++, que++) {
   2412 		/* First the RX queue entry */
   2413 		ixv_set_ivar(sc, i, que->msix, 0);
   2414 		/* ... and the TX */
   2415 		ixv_set_ivar(sc, i, que->msix, 1);
   2416 		/* Set an initial value in EITR */
   2417 		ixv_eitr_write(sc, que->msix, IXGBE_EITR_DEFAULT);
   2418 	}
   2419 
   2420 	/* For the mailbox interrupt */
   2421 	ixv_set_ivar(sc, 1, sc->vector, -1);
   2422 } /* ixv_configure_ivars */
   2423 
   2424 
   2425 /************************************************************************
   2426  * ixv_init_stats
   2427  *
   2428  *   The VF stats registers never have a truly virgin
   2429  *   starting point, so this routine save initial vaules to
   2430  *   last_<REGNAME>.
   2431  ************************************************************************/
   2432 static void
   2433 ixv_init_stats(struct ixgbe_softc *sc)
   2434 {
   2435 	struct ixgbe_hw *hw = &sc->hw;
   2436 
   2437 	sc->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
   2438 	sc->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
   2439 	sc->stats.vf.last_vfgorc |=
   2440 	    (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
   2441 
   2442 	sc->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
   2443 	sc->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
   2444 	sc->stats.vf.last_vfgotc |=
   2445 	    (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
   2446 
   2447 	sc->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
   2448 } /* ixv_init_stats */
   2449 
   2450 #define UPDATE_STAT_32(reg, last, count)		\
   2451 {							\
   2452 	u32 current = IXGBE_READ_REG(hw, (reg));	\
   2453 	IXGBE_EVC_ADD(&count, current - (last));	\
   2454 	(last) = current;				\
   2455 }
   2456 
   2457 #define UPDATE_STAT_36(lsb, msb, last, count)				\
   2458 	{								\
   2459 	u64 cur_lsb = IXGBE_READ_REG(hw, (lsb));			\
   2460 	u64 cur_msb = IXGBE_READ_REG(hw, (msb));			\
   2461 	u64 current = ((cur_msb << 32) | cur_lsb);			\
   2462 	if (current < (last))						\
   2463 		IXGBE_EVC_ADD(&count, current + __BIT(36) - (last));	\
   2464 	else								\
   2465 		IXGBE_EVC_ADD(&count, current - (last));		\
   2466 	(last) = current;						\
   2467 }
   2468 
   2469 /************************************************************************
   2470  * ixv_update_stats - Update the board statistics counters.
   2471  ************************************************************************/
   2472 void
   2473 ixv_update_stats(struct ixgbe_softc *sc)
   2474 {
   2475 	struct ixgbe_hw *hw = &sc->hw;
   2476 	struct ixgbevf_hw_stats *stats = &sc->stats.vf;
   2477 
   2478 	UPDATE_STAT_32(IXGBE_VFGPRC, stats->last_vfgprc, stats->vfgprc);
   2479 	UPDATE_STAT_32(IXGBE_VFGPTC, stats->last_vfgptc, stats->vfgptc);
   2480 	UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB, stats->last_vfgorc,
   2481 	    stats->vfgorc);
   2482 	UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB, stats->last_vfgotc,
   2483 	    stats->vfgotc);
   2484 	UPDATE_STAT_32(IXGBE_VFMPRC, stats->last_vfmprc, stats->vfmprc);
   2485 
   2486 	/* VF doesn't count errors by hardware */
   2487 
   2488 } /* ixv_update_stats */
   2489 
   2490 /************************************************************************
   2491  * ixv_sysctl_interrupt_rate_handler
   2492  ************************************************************************/
   2493 static int
   2494 ixv_sysctl_interrupt_rate_handler(SYSCTLFN_ARGS)
   2495 {
   2496 	struct sysctlnode node = *rnode;
   2497 	struct ix_queue *que = (struct ix_queue *)node.sysctl_data;
   2498 	struct ixgbe_softc *sc = que->sc;
   2499 	uint32_t reg, usec, rate;
   2500 	int error;
   2501 
   2502 	if (que == NULL)
   2503 		return 0;
   2504 	reg = IXGBE_READ_REG(&que->sc->hw, IXGBE_VTEITR(que->msix));
   2505 	usec = ((reg & 0x0FF8) >> 3);
   2506 	if (usec > 0)
   2507 		rate = 500000 / usec;
   2508 	else
   2509 		rate = 0;
   2510 	node.sysctl_data = &rate;
   2511 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   2512 	if (error || newp == NULL)
   2513 		return error;
   2514 	reg &= ~0xfff; /* default, no limitation */
   2515 	if (rate > 0 && rate < 500000) {
   2516 		if (rate < 1000)
   2517 			rate = 1000;
   2518 		reg |= ((4000000 / rate) & 0xff8);
   2519 		/*
   2520 		 * When RSC is used, ITR interval must be larger than
   2521 		 * RSC_DELAY. Currently, we use 2us for RSC_DELAY.
   2522 		 * The minimum value is always greater than 2us on 100M
   2523 		 * (and 10M?(not documented)), but it's not on 1G and higher.
   2524 		 */
   2525 		if ((sc->link_speed != IXGBE_LINK_SPEED_100_FULL)
   2526 		    && (sc->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
   2527 			if ((sc->num_queues > 1)
   2528 			    && (reg < IXGBE_MIN_RSC_EITR_10G1G))
   2529 				return EINVAL;
   2530 		}
   2531 		sc->max_interrupt_rate = rate;
   2532 	} else
   2533 		sc->max_interrupt_rate = 0;
   2534 	ixv_eitr_write(sc, que->msix, reg);
   2535 
   2536 	return (0);
   2537 } /* ixv_sysctl_interrupt_rate_handler */
   2538 
   2539 const struct sysctlnode *
   2540 ixv_sysctl_instance(struct ixgbe_softc *sc)
   2541 {
   2542 	const char *dvname;
   2543 	struct sysctllog **log;
   2544 	int rc;
   2545 	const struct sysctlnode *rnode;
   2546 
   2547 	log = &sc->sysctllog;
   2548 	dvname = device_xname(sc->dev);
   2549 
   2550 	if ((rc = sysctl_createv(log, 0, NULL, &rnode,
   2551 	    0, CTLTYPE_NODE, dvname,
   2552 	    SYSCTL_DESCR("ixv information and settings"),
   2553 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
   2554 		goto err;
   2555 
   2556 	return rnode;
   2557 err:
   2558 	device_printf(sc->dev,
   2559 	    "%s: sysctl_createv failed, rc = %d\n", __func__, rc);
   2560 	return NULL;
   2561 }
   2562 
   2563 static void
   2564 ixv_add_device_sysctls(struct ixgbe_softc *sc)
   2565 {
   2566 	struct sysctllog **log;
   2567 	const struct sysctlnode *rnode, *cnode;
   2568 	device_t dev;
   2569 
   2570 	dev = sc->dev;
   2571 	log = &sc->sysctllog;
   2572 
   2573 	if ((rnode = ixv_sysctl_instance(sc)) == NULL) {
   2574 		aprint_error_dev(dev, "could not create sysctl root\n");
   2575 		return;
   2576 	}
   2577 
   2578 	if (sysctl_createv(log, 0, &rnode, &cnode,
   2579 	    CTLFLAG_READWRITE, CTLTYPE_INT, "debug",
   2580 	    SYSCTL_DESCR("Debug Info"),
   2581 	    ixv_sysctl_debug, 0, (void *)sc, 0, CTL_CREATE, CTL_EOL) != 0)
   2582 		aprint_error_dev(dev, "could not create sysctl\n");
   2583 
   2584 	if (sysctl_createv(log, 0, &rnode, &cnode,
   2585 	    CTLFLAG_READWRITE, CTLTYPE_INT,
   2586 	    "rx_copy_len", SYSCTL_DESCR("RX Copy Length"),
   2587 	    ixv_sysctl_rx_copy_len, 0,
   2588 	    (void *)sc, 0, CTL_CREATE, CTL_EOL) != 0)
   2589 		aprint_error_dev(dev, "could not create sysctl\n");
   2590 
   2591 	if (sysctl_createv(log, 0, &rnode, &cnode,
   2592 	    CTLFLAG_READONLY, CTLTYPE_INT,
   2593 	    "num_tx_desc", SYSCTL_DESCR("Number of TX descriptors"),
   2594 	    NULL, 0, &sc->num_tx_desc, 0, CTL_CREATE, CTL_EOL) != 0)
   2595 		aprint_error_dev(dev, "could not create sysctl\n");
   2596 
   2597 	if (sysctl_createv(log, 0, &rnode, &cnode,
   2598 	    CTLFLAG_READONLY, CTLTYPE_INT,
   2599 	    "num_rx_desc", SYSCTL_DESCR("Number of RX descriptors"),
   2600 	    NULL, 0, &sc->num_rx_desc, 0, CTL_CREATE, CTL_EOL) != 0)
   2601 		aprint_error_dev(dev, "could not create sysctl\n");
   2602 
   2603 	if (sysctl_createv(log, 0, &rnode, &cnode,
   2604 	    CTLFLAG_READWRITE, CTLTYPE_INT, "rx_process_limit",
   2605 	    SYSCTL_DESCR("max number of RX packets to process"),
   2606 	    ixv_sysctl_rx_process_limit, 0, (void *)sc, 0, CTL_CREATE,
   2607 	    CTL_EOL) != 0)
   2608 		aprint_error_dev(dev, "could not create sysctl\n");
   2609 
   2610 	if (sysctl_createv(log, 0, &rnode, &cnode,
   2611 	    CTLFLAG_READWRITE, CTLTYPE_INT, "tx_process_limit",
   2612 	    SYSCTL_DESCR("max number of TX packets to process"),
   2613 	    ixv_sysctl_tx_process_limit, 0, (void *)sc, 0, CTL_CREATE,
   2614 	    CTL_EOL) != 0)
   2615 		aprint_error_dev(dev, "could not create sysctl\n");
   2616 
   2617 	if (sysctl_createv(log, 0, &rnode, &cnode,
   2618 	    CTLFLAG_READWRITE, CTLTYPE_BOOL, "enable_aim",
   2619 	    SYSCTL_DESCR("Interrupt Moderation"),
   2620 	    NULL, 0, &sc->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
   2621 		aprint_error_dev(dev, "could not create sysctl\n");
   2622 
   2623 	if (sysctl_createv(log, 0, &rnode, &cnode,
   2624 	    CTLFLAG_READWRITE, CTLTYPE_BOOL, "txrx_workqueue",
   2625 	    SYSCTL_DESCR("Use workqueue for packet processing"),
   2626 	    NULL, 0, &sc->txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL)
   2627 	    != 0)
   2628 		aprint_error_dev(dev, "could not create sysctl\n");
   2629 }
   2630 
   2631 /************************************************************************
   2632  * ixv_add_stats_sysctls - Add statistic sysctls for the VF.
   2633  ************************************************************************/
   2634 static void
   2635 ixv_add_stats_sysctls(struct ixgbe_softc *sc)
   2636 {
   2637 	device_t		dev = sc->dev;
   2638 	struct tx_ring		*txr = sc->tx_rings;
   2639 	struct rx_ring		*rxr = sc->rx_rings;
   2640 	struct ixgbevf_hw_stats *stats = &sc->stats.vf;
   2641 	struct ixgbe_hw *hw = &sc->hw;
   2642 	const struct sysctlnode *rnode, *cnode;
   2643 	struct sysctllog **log = &sc->sysctllog;
   2644 	const char *xname = device_xname(dev);
   2645 
   2646 	/* Driver Statistics */
   2647 	evcnt_attach_dynamic(&sc->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
   2648 	    NULL, xname, "Driver tx dma soft fail EFBIG");
   2649 	evcnt_attach_dynamic(&sc->mbuf_defrag_failed, EVCNT_TYPE_MISC,
   2650 	    NULL, xname, "m_defrag() failed");
   2651 	evcnt_attach_dynamic(&sc->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
   2652 	    NULL, xname, "Driver tx dma hard fail EFBIG");
   2653 	evcnt_attach_dynamic(&sc->einval_tx_dma_setup, EVCNT_TYPE_MISC,
   2654 	    NULL, xname, "Driver tx dma hard fail EINVAL");
   2655 	evcnt_attach_dynamic(&sc->other_tx_dma_setup, EVCNT_TYPE_MISC,
   2656 	    NULL, xname, "Driver tx dma hard fail other");
   2657 	evcnt_attach_dynamic(&sc->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
   2658 	    NULL, xname, "Driver tx dma soft fail EAGAIN");
   2659 	evcnt_attach_dynamic(&sc->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
   2660 	    NULL, xname, "Driver tx dma soft fail ENOMEM");
   2661 	evcnt_attach_dynamic(&sc->watchdog_events, EVCNT_TYPE_MISC,
   2662 	    NULL, xname, "Watchdog timeouts");
   2663 	evcnt_attach_dynamic(&sc->tso_err, EVCNT_TYPE_MISC,
   2664 	    NULL, xname, "TSO errors");
   2665 	evcnt_attach_dynamic(&sc->admin_irqev, EVCNT_TYPE_INTR,
   2666 	    NULL, xname, "Admin MSI-X IRQ Handled");
   2667 	evcnt_attach_dynamic(&sc->link_workev, EVCNT_TYPE_INTR,
   2668 	    NULL, xname, "Admin event");
   2669 
   2670 	for (int i = 0; i < sc->num_queues; i++, rxr++, txr++) {
   2671 #ifdef LRO
   2672 		struct lro_ctrl *lro = &rxr->lro;
   2673 #endif
   2674 
   2675 		snprintf(sc->queues[i].evnamebuf,
   2676 		    sizeof(sc->queues[i].evnamebuf), "%s q%d", xname, i);
   2677 		snprintf(sc->queues[i].namebuf,
   2678 		    sizeof(sc->queues[i].namebuf), "q%d", i);
   2679 
   2680 		if ((rnode = ixv_sysctl_instance(sc)) == NULL) {
   2681 			aprint_error_dev(dev,
   2682 			    "could not create sysctl root\n");
   2683 			break;
   2684 		}
   2685 
   2686 		if (sysctl_createv(log, 0, &rnode, &rnode,
   2687 		    0, CTLTYPE_NODE,
   2688 		    sc->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
   2689 		    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
   2690 			break;
   2691 
   2692 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2693 		    CTLFLAG_READWRITE, CTLTYPE_INT,
   2694 		    "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
   2695 		    ixv_sysctl_interrupt_rate_handler, 0,
   2696 		    (void *)&sc->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
   2697 			break;
   2698 
   2699 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2700 		    CTLFLAG_READONLY, CTLTYPE_INT,
   2701 		    "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
   2702 		    ixv_sysctl_tdh_handler, 0, (void *)txr,
   2703 		    0, CTL_CREATE, CTL_EOL) != 0)
   2704 			break;
   2705 
   2706 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2707 		    CTLFLAG_READONLY, CTLTYPE_INT,
   2708 		    "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
   2709 		    ixv_sysctl_tdt_handler, 0, (void *)txr,
   2710 		    0, CTL_CREATE, CTL_EOL) != 0)
   2711 			break;
   2712 
   2713 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2714 		    CTLFLAG_READONLY, CTLTYPE_INT, "rxd_nxck",
   2715 		    SYSCTL_DESCR("Receive Descriptor next to check"),
   2716 		    ixv_sysctl_next_to_check_handler, 0, (void *)rxr, 0,
   2717 		    CTL_CREATE, CTL_EOL) != 0)
   2718 			break;
   2719 
   2720 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2721 		    CTLFLAG_READONLY, CTLTYPE_INT, "rxd_nxrf",
   2722 		    SYSCTL_DESCR("Receive Descriptor next to refresh"),
   2723 		    ixv_sysctl_next_to_refresh_handler, 0, (void *)rxr, 0,
   2724 		    CTL_CREATE, CTL_EOL) != 0)
   2725 			break;
   2726 
   2727 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2728 		    CTLFLAG_READONLY, CTLTYPE_INT, "rxd_head",
   2729 		    SYSCTL_DESCR("Receive Descriptor Head"),
   2730 		    ixv_sysctl_rdh_handler, 0, (void *)rxr, 0,
   2731 		    CTL_CREATE, CTL_EOL) != 0)
   2732 			break;
   2733 
   2734 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2735 		    CTLFLAG_READONLY, CTLTYPE_INT, "rxd_tail",
   2736 		    SYSCTL_DESCR("Receive Descriptor Tail"),
   2737 		    ixv_sysctl_rdt_handler, 0, (void *)rxr, 0,
   2738 		    CTL_CREATE, CTL_EOL) != 0)
   2739 			break;
   2740 
   2741 		evcnt_attach_dynamic(&sc->queues[i].irqs, EVCNT_TYPE_INTR,
   2742 		    NULL, sc->queues[i].evnamebuf, "IRQs on queue");
   2743 		evcnt_attach_dynamic(&sc->queues[i].handleq,
   2744 		    EVCNT_TYPE_MISC, NULL, sc->queues[i].evnamebuf,
   2745 		    "Handled queue in softint");
   2746 		evcnt_attach_dynamic(&sc->queues[i].req, EVCNT_TYPE_MISC,
   2747 		    NULL, sc->queues[i].evnamebuf, "Requeued in softint");
   2748 		evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
   2749 		    NULL, sc->queues[i].evnamebuf,
   2750 		    "Queue Packets Transmitted");
   2751 #ifndef IXGBE_LEGACY_TX
   2752 		evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC,
   2753 		    NULL, sc->queues[i].evnamebuf,
   2754 		    "Packets dropped in pcq");
   2755 #endif
   2756 		evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
   2757 		    NULL, sc->queues[i].evnamebuf,
   2758 		    "TX Queue No Descriptor Available");
   2759 		evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC,
   2760 		    NULL, sc->queues[i].evnamebuf, "TSO");
   2761 
   2762 		evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
   2763 		    NULL, sc->queues[i].evnamebuf,
   2764 		    "Queue Bytes Received");
   2765 		evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
   2766 		    NULL, sc->queues[i].evnamebuf,
   2767 		    "Queue Packets Received");
   2768 		evcnt_attach_dynamic(&rxr->no_mbuf, EVCNT_TYPE_MISC,
   2769 		    NULL, sc->queues[i].evnamebuf, "Rx no mbuf");
   2770 		evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
   2771 		    NULL, sc->queues[i].evnamebuf, "Rx discarded");
   2772 		evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC,
   2773 		    NULL, sc->queues[i].evnamebuf, "Copied RX Frames");
   2774 #ifdef LRO
   2775 		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
   2776 				CTLFLAG_RD, &lro->lro_queued, 0,
   2777 				"LRO Queued");
   2778 		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
   2779 				CTLFLAG_RD, &lro->lro_flushed, 0,
   2780 				"LRO Flushed");
   2781 #endif /* LRO */
   2782 	}
   2783 
   2784 	/* MAC stats get their own sub node */
   2785 
   2786 	snprintf(stats->namebuf,
   2787 	    sizeof(stats->namebuf), "%s MAC Statistics", xname);
   2788 
   2789 	evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
   2790 	    stats->namebuf, "rx csum offload - IP");
   2791 	evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
   2792 	    stats->namebuf, "rx csum offload - L4");
   2793 	evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
   2794 	    stats->namebuf, "rx csum offload - IP bad");
   2795 	evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
   2796 	    stats->namebuf, "rx csum offload - L4 bad");
   2797 
   2798 	/* Packet Reception Stats */
   2799 	evcnt_attach_dynamic(&stats->vfgprc, EVCNT_TYPE_MISC, NULL,
   2800 	    xname, "Good Packets Received");
   2801 	evcnt_attach_dynamic(&stats->vfgorc, EVCNT_TYPE_MISC, NULL,
   2802 	    xname, "Good Octets Received");
   2803 	evcnt_attach_dynamic(&stats->vfmprc, EVCNT_TYPE_MISC, NULL,
   2804 	    xname, "Multicast Packets Received");
   2805 	evcnt_attach_dynamic(&stats->vfgptc, EVCNT_TYPE_MISC, NULL,
   2806 	    xname, "Good Packets Transmitted");
   2807 	evcnt_attach_dynamic(&stats->vfgotc, EVCNT_TYPE_MISC, NULL,
   2808 	    xname, "Good Octets Transmitted");
   2809 
   2810 	/* Mailbox Stats */
   2811 	evcnt_attach_dynamic(&hw->mbx.stats.msgs_tx, EVCNT_TYPE_MISC, NULL,
   2812 	    xname, "message TXs");
   2813 	evcnt_attach_dynamic(&hw->mbx.stats.msgs_rx, EVCNT_TYPE_MISC, NULL,
   2814 	    xname, "message RXs");
   2815 	evcnt_attach_dynamic(&hw->mbx.stats.acks, EVCNT_TYPE_MISC, NULL,
   2816 	    xname, "ACKs");
   2817 	evcnt_attach_dynamic(&hw->mbx.stats.reqs, EVCNT_TYPE_MISC, NULL,
   2818 	    xname, "REQs");
   2819 	evcnt_attach_dynamic(&hw->mbx.stats.rsts, EVCNT_TYPE_MISC, NULL,
   2820 	    xname, "RSTs");
   2821 
   2822 } /* ixv_add_stats_sysctls */
   2823 
   2824 static void
   2825 ixv_clear_evcnt(struct ixgbe_softc *sc)
   2826 {
   2827 	struct tx_ring		*txr = sc->tx_rings;
   2828 	struct rx_ring		*rxr = sc->rx_rings;
   2829 	struct ixgbevf_hw_stats *stats = &sc->stats.vf;
   2830 	struct ixgbe_hw *hw = &sc->hw;
   2831 	int i;
   2832 
   2833 	/* Driver Statistics */
   2834 	IXGBE_EVC_STORE(&sc->efbig_tx_dma_setup, 0);
   2835 	IXGBE_EVC_STORE(&sc->mbuf_defrag_failed, 0);
   2836 	IXGBE_EVC_STORE(&sc->efbig2_tx_dma_setup, 0);
   2837 	IXGBE_EVC_STORE(&sc->einval_tx_dma_setup, 0);
   2838 	IXGBE_EVC_STORE(&sc->other_tx_dma_setup, 0);
   2839 	IXGBE_EVC_STORE(&sc->eagain_tx_dma_setup, 0);
   2840 	IXGBE_EVC_STORE(&sc->enomem_tx_dma_setup, 0);
   2841 	IXGBE_EVC_STORE(&sc->watchdog_events, 0);
   2842 	IXGBE_EVC_STORE(&sc->tso_err, 0);
   2843 	IXGBE_EVC_STORE(&sc->admin_irqev, 0);
   2844 	IXGBE_EVC_STORE(&sc->link_workev, 0);
   2845 
   2846 	for (i = 0; i < sc->num_queues; i++, rxr++, txr++) {
   2847 		IXGBE_EVC_STORE(&sc->queues[i].irqs, 0);
   2848 		IXGBE_EVC_STORE(&sc->queues[i].handleq, 0);
   2849 		IXGBE_EVC_STORE(&sc->queues[i].req, 0);
   2850 		IXGBE_EVC_STORE(&txr->total_packets, 0);
   2851 #ifndef IXGBE_LEGACY_TX
   2852 		IXGBE_EVC_STORE(&txr->pcq_drops, 0);
   2853 #endif
   2854 		IXGBE_EVC_STORE(&txr->no_desc_avail, 0);
   2855 		IXGBE_EVC_STORE(&txr->tso_tx, 0);
   2856 		txr->q_efbig_tx_dma_setup = 0;
   2857 		txr->q_mbuf_defrag_failed = 0;
   2858 		txr->q_efbig2_tx_dma_setup = 0;
   2859 		txr->q_einval_tx_dma_setup = 0;
   2860 		txr->q_other_tx_dma_setup = 0;
   2861 		txr->q_eagain_tx_dma_setup = 0;
   2862 		txr->q_enomem_tx_dma_setup = 0;
   2863 		txr->q_tso_err = 0;
   2864 
   2865 		IXGBE_EVC_STORE(&rxr->rx_packets, 0);
   2866 		IXGBE_EVC_STORE(&rxr->rx_bytes, 0);
   2867 		IXGBE_EVC_STORE(&rxr->rx_copies, 0);
   2868 		IXGBE_EVC_STORE(&rxr->no_mbuf, 0);
   2869 		IXGBE_EVC_STORE(&rxr->rx_discarded, 0);
   2870 	}
   2871 
   2872 	/* MAC stats get their own sub node */
   2873 
   2874 	IXGBE_EVC_STORE(&stats->ipcs, 0);
   2875 	IXGBE_EVC_STORE(&stats->l4cs, 0);
   2876 	IXGBE_EVC_STORE(&stats->ipcs_bad, 0);
   2877 	IXGBE_EVC_STORE(&stats->l4cs_bad, 0);
   2878 
   2879 	/*
   2880 	 * Packet Reception Stats.
   2881 	 * Call ixv_init_stats() to save last VF counters' values.
   2882 	 */
   2883 	ixv_init_stats(sc);
   2884 	IXGBE_EVC_STORE(&stats->vfgprc, 0);
   2885 	IXGBE_EVC_STORE(&stats->vfgorc, 0);
   2886 	IXGBE_EVC_STORE(&stats->vfmprc, 0);
   2887 	IXGBE_EVC_STORE(&stats->vfgptc, 0);
   2888 	IXGBE_EVC_STORE(&stats->vfgotc, 0);
   2889 
   2890 	/* Mailbox Stats */
   2891 	IXGBE_EVC_STORE(&hw->mbx.stats.msgs_tx, 0);
   2892 	IXGBE_EVC_STORE(&hw->mbx.stats.msgs_rx, 0);
   2893 	IXGBE_EVC_STORE(&hw->mbx.stats.acks, 0);
   2894 	IXGBE_EVC_STORE(&hw->mbx.stats.reqs, 0);
   2895 	IXGBE_EVC_STORE(&hw->mbx.stats.rsts, 0);
   2896 
   2897 } /* ixv_clear_evcnt */
   2898 
   2899 #define PRINTQS(sc, regname)						\
   2900 	do {								\
   2901 		struct ixgbe_hw	*_hw = &(sc)->hw;			\
   2902 		int _i;							\
   2903 									\
   2904 		printf("%s: %s", device_xname((sc)->dev), #regname);	\
   2905 		for (_i = 0; _i < (sc)->num_queues; _i++) {		\
   2906 			printf((_i == 0) ? "\t" : " ");			\
   2907 			printf("%08x", IXGBE_READ_REG(_hw,		\
   2908 				IXGBE_##regname(_i)));			\
   2909 		}							\
   2910 		printf("\n");						\
   2911 	} while (0)
   2912 
   2913 /************************************************************************
   2914  * ixv_print_debug_info
   2915  *
   2916  *   Provides a way to take a look at important statistics
   2917  *   maintained by the driver and hardware.
   2918  ************************************************************************/
   2919 static void
   2920 ixv_print_debug_info(struct ixgbe_softc *sc)
   2921 {
   2922 	device_t	dev = sc->dev;
   2923 	struct ixgbe_hw *hw = &sc->hw;
   2924 	int i;
   2925 
   2926 	device_printf(dev, "queue:");
   2927 	for (i = 0; i < sc->num_queues; i++) {
   2928 		printf((i == 0) ? "\t" : " ");
   2929 		printf("%8d", i);
   2930 	}
   2931 	printf("\n");
   2932 	PRINTQS(sc, VFRDBAL);
   2933 	PRINTQS(sc, VFRDBAH);
   2934 	PRINTQS(sc, VFRDLEN);
   2935 	PRINTQS(sc, VFSRRCTL);
   2936 	PRINTQS(sc, VFRDH);
   2937 	PRINTQS(sc, VFRDT);
   2938 	PRINTQS(sc, VFRXDCTL);
   2939 
   2940 	device_printf(dev, "EIMS:\t%08x\n", IXGBE_READ_REG(hw, IXGBE_VTEIMS));
   2941 	device_printf(dev, "EIAM:\t%08x\n", IXGBE_READ_REG(hw, IXGBE_VTEIAM));
   2942 	device_printf(dev, "EIAC:\t%08x\n", IXGBE_READ_REG(hw, IXGBE_VTEIAC));
   2943 } /* ixv_print_debug_info */
   2944 
   2945 /************************************************************************
   2946  * ixv_sysctl_debug
   2947  ************************************************************************/
   2948 static int
   2949 ixv_sysctl_debug(SYSCTLFN_ARGS)
   2950 {
   2951 	struct sysctlnode node = *rnode;
   2952 	struct ixgbe_softc *sc = (struct ixgbe_softc *)node.sysctl_data;
   2953 	int	       error, result = 0;
   2954 
   2955 	node.sysctl_data = &result;
   2956 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   2957 
   2958 	if (error || newp == NULL)
   2959 		return error;
   2960 
   2961 	if (result == 1)
   2962 		ixv_print_debug_info(sc);
   2963 
   2964 	return 0;
   2965 } /* ixv_sysctl_debug */
   2966 
   2967 /************************************************************************
   2968  * ixv_sysctl_rx_copy_len
   2969  ************************************************************************/
   2970 static int
   2971 ixv_sysctl_rx_copy_len(SYSCTLFN_ARGS)
   2972 {
   2973 	struct sysctlnode node = *rnode;
   2974 	struct ixgbe_softc *sc = (struct ixgbe_softc *)node.sysctl_data;
   2975 	int error;
   2976 	int result = sc->rx_copy_len;
   2977 
   2978 	node.sysctl_data = &result;
   2979 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   2980 
   2981 	if (error || newp == NULL)
   2982 		return error;
   2983 
   2984 	if ((result < 0) || (result > IXGBE_RX_COPY_LEN_MAX))
   2985 		return EINVAL;
   2986 
   2987 	sc->rx_copy_len = result;
   2988 
   2989 	return 0;
   2990 } /* ixv_sysctl_rx_copy_len */
   2991 
   2992 /************************************************************************
   2993  * ixv_sysctl_tx_process_limit
   2994  ************************************************************************/
   2995 static int
   2996 ixv_sysctl_tx_process_limit(SYSCTLFN_ARGS)
   2997 {
   2998 	struct sysctlnode node = *rnode;
   2999 	struct ixgbe_softc *sc = (struct ixgbe_softc *)node.sysctl_data;
   3000 	int error;
   3001 	int result = sc->tx_process_limit;
   3002 
   3003 	node.sysctl_data = &result;
   3004 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   3005 
   3006 	if (error || newp == NULL)
   3007 		return error;
   3008 
   3009 	if ((result <= 0) || (result > sc->num_tx_desc))
   3010 		return EINVAL;
   3011 
   3012 	sc->tx_process_limit = result;
   3013 
   3014 	return 0;
   3015 } /* ixv_sysctl_tx_process_limit */
   3016 
   3017 /************************************************************************
   3018  * ixv_sysctl_rx_process_limit
   3019  ************************************************************************/
   3020 static int
   3021 ixv_sysctl_rx_process_limit(SYSCTLFN_ARGS)
   3022 {
   3023 	struct sysctlnode node = *rnode;
   3024 	struct ixgbe_softc *sc = (struct ixgbe_softc *)node.sysctl_data;
   3025 	int error;
   3026 	int result = sc->rx_process_limit;
   3027 
   3028 	node.sysctl_data = &result;
   3029 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   3030 
   3031 	if (error || newp == NULL)
   3032 		return error;
   3033 
   3034 	if ((result <= 0) || (result > sc->num_rx_desc))
   3035 		return EINVAL;
   3036 
   3037 	sc->rx_process_limit = result;
   3038 
   3039 	return 0;
   3040 } /* ixv_sysctl_rx_process_limit */
   3041 
   3042 /************************************************************************
   3043  * ixv_init_device_features
   3044  ************************************************************************/
   3045 static void
   3046 ixv_init_device_features(struct ixgbe_softc *sc)
   3047 {
   3048 	sc->feat_cap = IXGBE_FEATURE_NETMAP
   3049 			  | IXGBE_FEATURE_VF
   3050 			  | IXGBE_FEATURE_RSS
   3051 			  | IXGBE_FEATURE_LEGACY_TX;
   3052 
   3053 	/* A tad short on feature flags for VFs, atm. */
   3054 	switch (sc->hw.mac.type) {
   3055 	case ixgbe_mac_82599_vf:
   3056 		break;
   3057 	case ixgbe_mac_X540_vf:
   3058 		break;
   3059 	case ixgbe_mac_X550_vf:
   3060 	case ixgbe_mac_X550EM_x_vf:
   3061 	case ixgbe_mac_X550EM_a_vf:
   3062 		sc->feat_cap |= IXGBE_FEATURE_NEEDS_CTXD;
   3063 		break;
   3064 	default:
   3065 		break;
   3066 	}
   3067 
   3068 	/* Enabled by default... */
   3069 	/* Is a virtual function (VF) */
   3070 	if (sc->feat_cap & IXGBE_FEATURE_VF)
   3071 		sc->feat_en |= IXGBE_FEATURE_VF;
   3072 	/* Netmap */
   3073 	if (sc->feat_cap & IXGBE_FEATURE_NETMAP)
   3074 		sc->feat_en |= IXGBE_FEATURE_NETMAP;
   3075 	/* Receive-Side Scaling (RSS) */
   3076 	if (sc->feat_cap & IXGBE_FEATURE_RSS)
   3077 		sc->feat_en |= IXGBE_FEATURE_RSS;
   3078 	/* Needs advanced context descriptor regardless of offloads req'd */
   3079 	if (sc->feat_cap & IXGBE_FEATURE_NEEDS_CTXD)
   3080 		sc->feat_en |= IXGBE_FEATURE_NEEDS_CTXD;
   3081 
   3082 	/* Enabled via sysctl... */
   3083 	/* Legacy (single queue) transmit */
   3084 	if ((sc->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
   3085 	    ixv_enable_legacy_tx)
   3086 		sc->feat_en |= IXGBE_FEATURE_LEGACY_TX;
   3087 } /* ixv_init_device_features */
   3088 
   3089 /************************************************************************
   3090  * ixv_shutdown - Shutdown entry point
   3091  ************************************************************************/
   3092 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
   3093 static int
   3094 ixv_shutdown(device_t dev)
   3095 {
   3096 	struct ixgbe_softc *sc = device_private(dev);
   3097 	IXGBE_CORE_LOCK(sc);
   3098 	ixv_stop_locked(sc);
   3099 	IXGBE_CORE_UNLOCK(sc);
   3100 
   3101 	return (0);
   3102 } /* ixv_shutdown */
   3103 #endif
   3104 
   3105 static int
   3106 ixv_ifflags_cb(struct ethercom *ec)
   3107 {
   3108 	struct ifnet *ifp = &ec->ec_if;
   3109 	struct ixgbe_softc *sc = ifp->if_softc;
   3110 	u_short saved_flags;
   3111 	u_short change;
   3112 	int rv = 0;
   3113 
   3114 	IXGBE_CORE_LOCK(sc);
   3115 
   3116 	saved_flags = sc->if_flags;
   3117 	change = ifp->if_flags ^ sc->if_flags;
   3118 	if (change != 0)
   3119 		sc->if_flags = ifp->if_flags;
   3120 
   3121 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   3122 		rv = ENETRESET;
   3123 		goto out;
   3124 	} else if ((change & IFF_PROMISC) != 0) {
   3125 		rv = ixv_set_rxfilter(sc);
   3126 		if (rv != 0) {
   3127 			/* Restore previous */
   3128 			sc->if_flags = saved_flags;
   3129 			goto out;
   3130 		}
   3131 	}
   3132 
   3133 	/* Check for ec_capenable. */
   3134 	change = ec->ec_capenable ^ sc->ec_capenable;
   3135 	sc->ec_capenable = ec->ec_capenable;
   3136 	if ((change & ~(ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING
   3137 	    | ETHERCAP_VLAN_HWFILTER)) != 0) {
   3138 		rv = ENETRESET;
   3139 		goto out;
   3140 	}
   3141 
   3142 	/*
   3143 	 * Special handling is not required for ETHERCAP_VLAN_MTU.
   3144 	 * PF's MAXFRS(MHADD) does not include the 4bytes of the VLAN header.
   3145 	 */
   3146 
   3147 	/* Set up VLAN support and filter */
   3148 	if ((change & (ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_HWFILTER)) != 0)
   3149 		rv = ixv_setup_vlan_support(sc);
   3150 
   3151 out:
   3152 	IXGBE_CORE_UNLOCK(sc);
   3153 
   3154 	return rv;
   3155 }
   3156 
   3157 
   3158 /************************************************************************
   3159  * ixv_ioctl - Ioctl entry point
   3160  *
   3161  *   Called when the user wants to configure the interface.
   3162  *
   3163  *   return 0 on success, positive on failure
   3164  ************************************************************************/
   3165 static int
   3166 ixv_ioctl(struct ifnet *ifp, u_long command, void *data)
   3167 {
   3168 	struct ixgbe_softc *sc = ifp->if_softc;
   3169 	struct ixgbe_hw *hw = &sc->hw;
   3170 	struct ifcapreq *ifcr = data;
   3171 	int		error;
   3172 	int l4csum_en;
   3173 	const int l4csum = IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx |
   3174 	     IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   3175 
   3176 	switch (command) {
   3177 	case SIOCSIFFLAGS:
   3178 		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
   3179 		break;
   3180 	case SIOCADDMULTI: {
   3181 		struct ether_multi *enm;
   3182 		struct ether_multistep step;
   3183 		struct ethercom *ec = &sc->osdep.ec;
   3184 		bool overflow = false;
   3185 		int mcnt = 0;
   3186 
   3187 		/*
   3188 		 * Check the number of multicast address. If it exceeds,
   3189 		 * return ENOSPC.
   3190 		 * Update this code when we support API 1.3.
   3191 		 */
   3192 		ETHER_LOCK(ec);
   3193 		ETHER_FIRST_MULTI(step, ec, enm);
   3194 		while (enm != NULL) {
   3195 			mcnt++;
   3196 
   3197 			/*
   3198 			 * This code is before adding, so one room is required
   3199 			 * at least.
   3200 			 */
   3201 			if (mcnt > (IXGBE_MAX_VF_MC - 1)) {
   3202 				overflow = true;
   3203 				break;
   3204 			}
   3205 			ETHER_NEXT_MULTI(step, enm);
   3206 		}
   3207 		ETHER_UNLOCK(ec);
   3208 		error = 0;
   3209 		if (overflow && ((ec->ec_flags & ETHER_F_ALLMULTI) == 0)) {
   3210 			error = hw->mac.ops.update_xcast_mode(hw,
   3211 			    IXGBEVF_XCAST_MODE_ALLMULTI);
   3212 			if (error == IXGBE_ERR_NOT_TRUSTED) {
   3213 				device_printf(sc->dev,
   3214 				    "this interface is not trusted\n");
   3215 				error = EPERM;
   3216 			} else if (error == IXGBE_ERR_FEATURE_NOT_SUPPORTED) {
   3217 				device_printf(sc->dev,
   3218 				    "the PF doesn't support allmulti mode\n");
   3219 				error = EOPNOTSUPP;
   3220 			} else if (error) {
   3221 				device_printf(sc->dev,
   3222 				    "number of Ethernet multicast addresses "
   3223 				    "exceeds the limit (%d). error = %d\n",
   3224 				    IXGBE_MAX_VF_MC, error);
   3225 				error = ENOSPC;
   3226 			} else
   3227 				ec->ec_flags |= ETHER_F_ALLMULTI;
   3228 		}
   3229 		if (error)
   3230 			return error;
   3231 	}
   3232 		/*FALLTHROUGH*/
   3233 	case SIOCDELMULTI:
   3234 		IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
   3235 		break;
   3236 	case SIOCSIFMEDIA:
   3237 	case SIOCGIFMEDIA:
   3238 		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
   3239 		break;
   3240 	case SIOCSIFCAP:
   3241 		IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
   3242 		break;
   3243 	case SIOCSIFMTU:
   3244 		IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
   3245 		break;
   3246 	case SIOCZIFDATA:
   3247 		IOCTL_DEBUGOUT("ioctl: SIOCZIFDATA (Zero counter)");
   3248 		ixv_update_stats(sc);
   3249 		ixv_clear_evcnt(sc);
   3250 		break;
   3251 	default:
   3252 		IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
   3253 		break;
   3254 	}
   3255 
   3256 	switch (command) {
   3257 	case SIOCSIFCAP:
   3258 		/* Layer-4 Rx checksum offload has to be turned on and
   3259 		 * off as a unit.
   3260 		 */
   3261 		l4csum_en = ifcr->ifcr_capenable & l4csum;
   3262 		if (l4csum_en != l4csum && l4csum_en != 0)
   3263 			return EINVAL;
   3264 		/*FALLTHROUGH*/
   3265 	case SIOCADDMULTI:
   3266 	case SIOCDELMULTI:
   3267 	case SIOCSIFFLAGS:
   3268 	case SIOCSIFMTU:
   3269 	default:
   3270 		if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
   3271 			return error;
   3272 		if ((ifp->if_flags & IFF_RUNNING) == 0)
   3273 			;
   3274 		else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
   3275 			IXGBE_CORE_LOCK(sc);
   3276 			ixv_init_locked(sc);
   3277 			IXGBE_CORE_UNLOCK(sc);
   3278 		} else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
   3279 			/*
   3280 			 * Multicast list has changed; set the hardware filter
   3281 			 * accordingly.
   3282 			 */
   3283 			IXGBE_CORE_LOCK(sc);
   3284 			ixv_disable_intr(sc);
   3285 			ixv_set_rxfilter(sc);
   3286 			ixv_enable_intr(sc);
   3287 			IXGBE_CORE_UNLOCK(sc);
   3288 		}
   3289 		return 0;
   3290 	}
   3291 } /* ixv_ioctl */
   3292 
   3293 /************************************************************************
   3294  * ixv_init
   3295  ************************************************************************/
   3296 static int
   3297 ixv_init(struct ifnet *ifp)
   3298 {
   3299 	struct ixgbe_softc *sc = ifp->if_softc;
   3300 
   3301 	IXGBE_CORE_LOCK(sc);
   3302 	ixv_init_locked(sc);
   3303 	IXGBE_CORE_UNLOCK(sc);
   3304 
   3305 	return 0;
   3306 } /* ixv_init */
   3307 
   3308 /************************************************************************
   3309  * ixv_handle_que
   3310  ************************************************************************/
   3311 static void
   3312 ixv_handle_que(void *context)
   3313 {
   3314 	struct ix_queue *que = context;
   3315 	struct ixgbe_softc *sc = que->sc;
   3316 	struct tx_ring	*txr = que->txr;
   3317 	struct ifnet	*ifp = sc->ifp;
   3318 	bool		more;
   3319 
   3320 	IXGBE_EVC_ADD(&que->handleq, 1);
   3321 
   3322 	if (ifp->if_flags & IFF_RUNNING) {
   3323 		IXGBE_TX_LOCK(txr);
   3324 		more = ixgbe_txeof(txr);
   3325 		if (!(sc->feat_en & IXGBE_FEATURE_LEGACY_TX))
   3326 			if (!ixgbe_mq_ring_empty(ifp, txr->txr_interq))
   3327 				ixgbe_mq_start_locked(ifp, txr);
   3328 		/* Only for queue 0 */
   3329 		/* NetBSD still needs this for CBQ */
   3330 		if ((&sc->queues[0] == que)
   3331 		    && (!ixgbe_legacy_ring_empty(ifp, NULL)))
   3332 			ixgbe_legacy_start_locked(ifp, txr);
   3333 		IXGBE_TX_UNLOCK(txr);
   3334 		more |= ixgbe_rxeof(que);
   3335 		if (more) {
   3336 			IXGBE_EVC_ADD(&que->req, 1);
   3337 			if (sc->txrx_use_workqueue) {
   3338 				/*
   3339 				 * "enqueued flag" is not required here
   3340 				 * the same as ixg(4). See ixgbe_msix_que().
   3341 				 */
   3342 				workqueue_enqueue(sc->que_wq,
   3343 				    &que->wq_cookie, curcpu());
   3344 			} else
   3345 				  softint_schedule(que->que_si);
   3346 			return;
   3347 		}
   3348 	}
   3349 
   3350 	/* Re-enable this interrupt */
   3351 	ixv_enable_queue(sc, que->msix);
   3352 
   3353 	return;
   3354 } /* ixv_handle_que */
   3355 
   3356 /************************************************************************
   3357  * ixv_handle_que_work
   3358  ************************************************************************/
   3359 static void
   3360 ixv_handle_que_work(struct work *wk, void *context)
   3361 {
   3362 	struct ix_queue *que = container_of(wk, struct ix_queue, wq_cookie);
   3363 
   3364 	/*
   3365 	 * "enqueued flag" is not required here the same as ixg(4).
   3366 	 * See ixgbe_msix_que().
   3367 	 */
   3368 	ixv_handle_que(que);
   3369 }
   3370 
   3371 /************************************************************************
   3372  * ixv_allocate_msix - Setup MSI-X Interrupt resources and handlers
   3373  ************************************************************************/
   3374 static int
   3375 ixv_allocate_msix(struct ixgbe_softc *sc, const struct pci_attach_args *pa)
   3376 {
   3377 	device_t	dev = sc->dev;
   3378 	struct ix_queue *que = sc->queues;
   3379 	struct tx_ring	*txr = sc->tx_rings;
   3380 	int		error, msix_ctrl, rid, vector = 0;
   3381 	pci_chipset_tag_t pc;
   3382 	pcitag_t	tag;
   3383 	char		intrbuf[PCI_INTRSTR_LEN];
   3384 	char		wqname[MAXCOMLEN];
   3385 	char		intr_xname[32];
   3386 	const char	*intrstr = NULL;
   3387 	kcpuset_t	*affinity;
   3388 	int		cpu_id = 0;
   3389 
   3390 	pc = sc->osdep.pc;
   3391 	tag = sc->osdep.tag;
   3392 
   3393 	sc->osdep.nintrs = sc->num_queues + 1;
   3394 	if (pci_msix_alloc_exact(pa, &sc->osdep.intrs,
   3395 	    sc->osdep.nintrs) != 0) {
   3396 		aprint_error_dev(dev,
   3397 		    "failed to allocate MSI-X interrupt\n");
   3398 		return (ENXIO);
   3399 	}
   3400 
   3401 	kcpuset_create(&affinity, false);
   3402 	for (int i = 0; i < sc->num_queues; i++, vector++, que++, txr++) {
   3403 		snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d",
   3404 		    device_xname(dev), i);
   3405 		intrstr = pci_intr_string(pc, sc->osdep.intrs[i], intrbuf,
   3406 		    sizeof(intrbuf));
   3407 		pci_intr_setattr(pc, &sc->osdep.intrs[i], PCI_INTR_MPSAFE,
   3408 		    true);
   3409 
   3410 		/* Set the handler function */
   3411 		que->res = sc->osdep.ihs[i] = pci_intr_establish_xname(pc,
   3412 		    sc->osdep.intrs[i], IPL_NET, ixv_msix_que, que,
   3413 		    intr_xname);
   3414 		if (que->res == NULL) {
   3415 			pci_intr_release(pc, sc->osdep.intrs,
   3416 			    sc->osdep.nintrs);
   3417 			aprint_error_dev(dev,
   3418 			    "Failed to register QUE handler\n");
   3419 			kcpuset_destroy(affinity);
   3420 			return (ENXIO);
   3421 		}
   3422 		que->msix = vector;
   3423 		sc->active_queues |= (u64)(1 << que->msix);
   3424 
   3425 		cpu_id = i;
   3426 		/* Round-robin affinity */
   3427 		kcpuset_zero(affinity);
   3428 		kcpuset_set(affinity, cpu_id % ncpu);
   3429 		error = interrupt_distribute(sc->osdep.ihs[i], affinity, NULL);
   3430 		aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
   3431 		    intrstr);
   3432 		if (error == 0)
   3433 			aprint_normal(", bound queue %d to cpu %d\n",
   3434 			    i, cpu_id % ncpu);
   3435 		else
   3436 			aprint_normal("\n");
   3437 
   3438 #ifndef IXGBE_LEGACY_TX
   3439 		txr->txr_si
   3440 		    = softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
   3441 			ixgbe_deferred_mq_start, txr);
   3442 #endif
   3443 		que->que_si
   3444 		    = softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
   3445 			ixv_handle_que, que);
   3446 		if (que->que_si == NULL) {
   3447 			aprint_error_dev(dev,
   3448 			    "could not establish software interrupt\n");
   3449 		}
   3450 	}
   3451 	snprintf(wqname, sizeof(wqname), "%sdeferTx", device_xname(dev));
   3452 	error = workqueue_create(&sc->txr_wq, wqname,
   3453 	    ixgbe_deferred_mq_start_work, sc, IXGBE_WORKQUEUE_PRI, IPL_NET,
   3454 	    IXGBE_WORKQUEUE_FLAGS);
   3455 	if (error) {
   3456 		aprint_error_dev(dev,
   3457 		    "couldn't create workqueue for deferred Tx\n");
   3458 	}
   3459 	sc->txr_wq_enqueued = percpu_alloc(sizeof(u_int));
   3460 
   3461 	snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(dev));
   3462 	error = workqueue_create(&sc->que_wq, wqname,
   3463 	    ixv_handle_que_work, sc, IXGBE_WORKQUEUE_PRI, IPL_NET,
   3464 	    IXGBE_WORKQUEUE_FLAGS);
   3465 	if (error) {
   3466 		aprint_error_dev(dev, "couldn't create workqueue for Tx/Rx\n");
   3467 	}
   3468 
   3469 	/* and Mailbox */
   3470 	cpu_id++;
   3471 	snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
   3472 	sc->vector = vector;
   3473 	intrstr = pci_intr_string(pc, sc->osdep.intrs[vector], intrbuf,
   3474 	    sizeof(intrbuf));
   3475 	pci_intr_setattr(pc, &sc->osdep.intrs[vector], PCI_INTR_MPSAFE, true);
   3476 
   3477 	/* Set the mbx handler function */
   3478 	sc->osdep.ihs[vector] = pci_intr_establish_xname(pc,
   3479 	    sc->osdep.intrs[vector], IPL_NET, ixv_msix_mbx, sc, intr_xname);
   3480 	if (sc->osdep.ihs[vector] == NULL) {
   3481 		aprint_error_dev(dev, "Failed to register LINK handler\n");
   3482 		kcpuset_destroy(affinity);
   3483 		return (ENXIO);
   3484 	}
   3485 	/* Round-robin affinity */
   3486 	kcpuset_zero(affinity);
   3487 	kcpuset_set(affinity, cpu_id % ncpu);
   3488 	error = interrupt_distribute(sc->osdep.ihs[vector], affinity,
   3489 	    NULL);
   3490 
   3491 	aprint_normal_dev(dev,
   3492 	    "for link, interrupting at %s", intrstr);
   3493 	if (error == 0)
   3494 		aprint_normal(", affinity to cpu %d\n", cpu_id % ncpu);
   3495 	else
   3496 		aprint_normal("\n");
   3497 
   3498 	/* Tasklets for Mailbox */
   3499 	snprintf(wqname, sizeof(wqname), "%s-admin", device_xname(dev));
   3500 	error = workqueue_create(&sc->admin_wq, wqname,
   3501 	    ixv_handle_admin, sc, IXGBE_WORKQUEUE_PRI, IPL_NET,
   3502 	    IXGBE_TASKLET_WQ_FLAGS);
   3503 	if (error) {
   3504 		aprint_error_dev(dev,
   3505 		    "could not create admin workqueue (%d)\n", error);
   3506 		goto err_out;
   3507 	}
   3508 
   3509 	/*
   3510 	 * Due to a broken design QEMU will fail to properly
   3511 	 * enable the guest for MSI-X unless the vectors in
   3512 	 * the table are all set up, so we must rewrite the
   3513 	 * ENABLE in the MSI-X control register again at this
   3514 	 * point to cause it to successfully initialize us.
   3515 	 */
   3516 	if (sc->hw.mac.type == ixgbe_mac_82599_vf) {
   3517 		pci_get_capability(pc, tag, PCI_CAP_MSIX, &rid, NULL);
   3518 		rid += PCI_MSIX_CTL;
   3519 		msix_ctrl = pci_conf_read(pc, tag, rid);
   3520 		msix_ctrl |= PCI_MSIX_CTL_ENABLE;
   3521 		pci_conf_write(pc, tag, rid, msix_ctrl);
   3522 	}
   3523 
   3524 	kcpuset_destroy(affinity);
   3525 	return (0);
   3526 err_out:
   3527 	kcpuset_destroy(affinity);
   3528 	ixv_free_deferred_handlers(sc);
   3529 	ixv_free_pci_resources(sc);
   3530 	return (error);
   3531 } /* ixv_allocate_msix */
   3532 
   3533 /************************************************************************
   3534  * ixv_configure_interrupts - Setup MSI-X resources
   3535  *
   3536  *   Note: The VF device MUST use MSI-X, there is no fallback.
   3537  ************************************************************************/
   3538 static int
   3539 ixv_configure_interrupts(struct ixgbe_softc *sc)
   3540 {
   3541 	device_t dev = sc->dev;
   3542 	int want, queues, msgs;
   3543 
   3544 	/* Must have at least 2 MSI-X vectors */
   3545 	msgs = pci_msix_count(sc->osdep.pc, sc->osdep.tag);
   3546 	if (msgs < 2) {
   3547 		aprint_error_dev(dev, "MSIX config error\n");
   3548 		return (ENXIO);
   3549 	}
   3550 	msgs = MIN(msgs, IXG_MAX_NINTR);
   3551 
   3552 	/* Figure out a reasonable auto config value */
   3553 	queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu;
   3554 
   3555 	if (ixv_num_queues != 0)
   3556 		queues = ixv_num_queues;
   3557 	else if ((ixv_num_queues == 0) && (queues > IXGBE_VF_MAX_TX_QUEUES))
   3558 		queues = IXGBE_VF_MAX_TX_QUEUES;
   3559 
   3560 	/*
   3561 	 * Want vectors for the queues,
   3562 	 * plus an additional for mailbox.
   3563 	 */
   3564 	want = queues + 1;
   3565 	if (msgs >= want)
   3566 		msgs = want;
   3567 	else {
   3568 		aprint_error_dev(dev,
   3569 		    "MSI-X Configuration Problem, "
   3570 		    "%d vectors but %d queues wanted!\n", msgs, want);
   3571 		return -1;
   3572 	}
   3573 
   3574 	aprint_normal_dev(dev,
   3575 	    "Using MSI-X interrupts with %d vectors\n", msgs);
   3576 	sc->num_queues = queues;
   3577 
   3578 	return (0);
   3579 } /* ixv_configure_interrupts */
   3580 
   3581 
   3582 /************************************************************************
   3583  * ixv_handle_admin - Tasklet handler for MSI-X MBX interrupts
   3584  *
   3585  *   Done outside of interrupt context since the driver might sleep
   3586  ************************************************************************/
   3587 static void
   3588 ixv_handle_admin(struct work *wk, void *context)
   3589 {
   3590 	struct ixgbe_softc *sc = context;
   3591 	struct ixgbe_hw	*hw = &sc->hw;
   3592 
   3593 	IXGBE_CORE_LOCK(sc);
   3594 
   3595 	IXGBE_EVC_ADD(&sc->link_workev, 1);
   3596 	sc->hw.mac.ops.check_link(&sc->hw, &sc->link_speed,
   3597 	    &sc->link_up, FALSE);
   3598 	ixv_update_link_status(sc);
   3599 
   3600 	sc->task_requests = 0;
   3601 	atomic_store_relaxed(&sc->admin_pending, 0);
   3602 
   3603 	/* Re-enable interrupts */
   3604 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, (1 << sc->vector));
   3605 
   3606 	IXGBE_CORE_UNLOCK(sc);
   3607 } /* ixv_handle_admin */
   3608 
   3609 /************************************************************************
   3610  * ixv_check_link - Used in the local timer to poll for link changes
   3611  ************************************************************************/
   3612 static s32
   3613 ixv_check_link(struct ixgbe_softc *sc)
   3614 {
   3615 	s32 error;
   3616 
   3617 	KASSERT(mutex_owned(&sc->core_mtx));
   3618 
   3619 	sc->hw.mac.get_link_status = TRUE;
   3620 
   3621 	error = sc->hw.mac.ops.check_link(&sc->hw,
   3622 	    &sc->link_speed, &sc->link_up, FALSE);
   3623 	ixv_update_link_status(sc);
   3624 
   3625 	return error;
   3626 } /* ixv_check_link */
   3627