Home | History | Annotate | Line # | Download | only in ixgbe
ixv.c revision 1.22
      1 /******************************************************************************
      2 
      3   Copyright (c) 2001-2015, Intel Corporation
      4   All rights reserved.
      5 
      6   Redistribution and use in source and binary forms, with or without
      7   modification, are permitted provided that the following conditions are met:
      8 
      9    1. Redistributions of source code must retain the above copyright notice,
     10       this list of conditions and the following disclaimer.
     11 
     12    2. Redistributions in binary form must reproduce the above copyright
     13       notice, this list of conditions and the following disclaimer in the
     14       documentation and/or other materials provided with the distribution.
     15 
     16    3. Neither the name of the Intel Corporation nor the names of its
     17       contributors may be used to endorse or promote products derived from
     18       this software without specific prior written permission.
     19 
     20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     30   POSSIBILITY OF SUCH DAMAGE.
     31 
     32 ******************************************************************************/
     33 /*$FreeBSD: head/sys/dev/ixgbe/if_ixv.c 282289 2015-04-30 22:53:27Z erj $*/
     34 /*$NetBSD: ixv.c,v 1.22 2016/12/01 06:56:28 msaitoh Exp $*/
     35 
     36 #include "opt_inet.h"
     37 #include "opt_inet6.h"
     38 
     39 #include "ixgbe.h"
     40 #include "vlan.h"
     41 
     42 /*********************************************************************
     43  *  Driver version
     44  *********************************************************************/
     45 char ixv_driver_version[] = "1.2.5";
     46 
     47 /*********************************************************************
     48  *  PCI Device ID Table
     49  *
     50  *  Used by probe to select devices to load on
     51  *  Last field stores an index into ixv_strings
     52  *  Last entry must be all 0s
     53  *
     54  *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
     55  *********************************************************************/
     56 
     57 static ixgbe_vendor_info_t ixv_vendor_info_array[] =
     58 {
     59 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
     60 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0},
     61 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, 0, 0, 0},
     62 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, 0, 0, 0},
     63 	/* required last entry */
     64 	{0, 0, 0, 0, 0}
     65 };
     66 
     67 /*********************************************************************
     68  *  Table of branding strings
     69  *********************************************************************/
     70 
     71 static const char    *ixv_strings[] = {
     72 	"Intel(R) PRO/10GbE Virtual Function Network Driver"
     73 };
     74 
     75 /*********************************************************************
     76  *  Function prototypes
     77  *********************************************************************/
     78 static int      ixv_probe(device_t, cfdata_t, void *);
     79 static void	ixv_attach(device_t, device_t, void *);
     80 static int      ixv_detach(device_t, int);
     81 #if 0
     82 static int      ixv_shutdown(device_t);
     83 #endif
     84 static int      ixv_ioctl(struct ifnet *, u_long, void *);
     85 static int	ixv_init(struct ifnet *);
     86 static void	ixv_init_locked(struct adapter *);
     87 static void     ixv_stop(void *);
     88 static void     ixv_media_status(struct ifnet *, struct ifmediareq *);
     89 static int      ixv_media_change(struct ifnet *);
     90 static void     ixv_identify_hardware(struct adapter *);
     91 static int      ixv_allocate_pci_resources(struct adapter *,
     92 		    const struct pci_attach_args *);
     93 static int      ixv_allocate_msix(struct adapter *,
     94 		    const struct pci_attach_args *);
     95 static int	ixv_setup_msix(struct adapter *);
     96 static void	ixv_free_pci_resources(struct adapter *);
     97 static void     ixv_local_timer(void *);
     98 static void     ixv_local_timer_locked(void *);
     99 static void     ixv_setup_interface(device_t, struct adapter *);
    100 static void     ixv_config_link(struct adapter *);
    101 
    102 static void     ixv_initialize_transmit_units(struct adapter *);
    103 static void     ixv_initialize_receive_units(struct adapter *);
    104 
    105 static void     ixv_enable_intr(struct adapter *);
    106 static void     ixv_disable_intr(struct adapter *);
    107 static void     ixv_set_multi(struct adapter *);
    108 static void     ixv_update_link_status(struct adapter *);
    109 static int	ixv_sysctl_debug(SYSCTLFN_PROTO);
    110 static void	ixv_set_ivar(struct adapter *, u8, u8, s8);
    111 static void	ixv_configure_ivars(struct adapter *);
    112 static u8 *	ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
    113 
    114 static void	ixv_setup_vlan_support(struct adapter *);
    115 #if 0
    116 static void	ixv_register_vlan(void *, struct ifnet *, u16);
    117 static void	ixv_unregister_vlan(void *, struct ifnet *, u16);
    118 #endif
    119 
    120 static void	ixv_save_stats(struct adapter *);
    121 static void	ixv_init_stats(struct adapter *);
    122 static void	ixv_update_stats(struct adapter *);
    123 static void	ixv_add_stats_sysctls(struct adapter *);
    124 
    125 /* The MSI/X Interrupt handlers */
    126 static int	ixv_msix_que(void *);
    127 static int	ixv_msix_mbx(void *);
    128 
    129 /* Deferred interrupt tasklets */
    130 static void	ixv_handle_que(void *);
    131 static void	ixv_handle_mbx(void *);
    132 
    133 const struct sysctlnode *ixv_sysctl_instance(struct adapter *);
    134 static ixgbe_vendor_info_t *ixv_lookup(const struct pci_attach_args *);
    135 
    136 /*********************************************************************
    137  *  FreeBSD Device Interface Entry Points
    138  *********************************************************************/
    139 
    140 CFATTACH_DECL3_NEW(ixv, sizeof(struct adapter),
    141     ixv_probe, ixv_attach, ixv_detach, NULL, NULL, NULL,
    142     DVF_DETACH_SHUTDOWN);
    143 
    144 # if 0
    145 static device_method_t ixv_methods[] = {
    146 	/* Device interface */
    147 	DEVMETHOD(device_probe, ixv_probe),
    148 	DEVMETHOD(device_attach, ixv_attach),
    149 	DEVMETHOD(device_detach, ixv_detach),
    150 	DEVMETHOD(device_shutdown, ixv_shutdown),
    151 	DEVMETHOD_END
    152 };
    153 #endif
    154 
    155 #if 0
    156 static driver_t ixv_driver = {
    157 	"ixv", ixv_methods, sizeof(struct adapter),
    158 };
    159 
    160 devclass_t ixv_devclass;
    161 DRIVER_MODULE(ixv, pci, ixv_driver, ixv_devclass, 0, 0);
    162 MODULE_DEPEND(ixv, pci, 1, 1, 1);
    163 MODULE_DEPEND(ixv, ether, 1, 1, 1);
    164 /* XXX depend on 'ix' ? */
    165 #endif
    166 
    167 /*
    168 ** TUNEABLE PARAMETERS:
    169 */
    170 
    171 /*
    172 ** AIM: Adaptive Interrupt Moderation
    173 ** which means that the interrupt rate
    174 ** is varied over time based on the
    175 ** traffic for that interrupt vector
    176 */
    177 static int ixv_enable_aim = FALSE;
    178 #define	TUNABLE_INT(__x, __y)
    179 TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
    180 
    181 /* How many packets rxeof tries to clean at a time */
    182 static int ixv_rx_process_limit = 256;
    183 TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit);
    184 
    185 /* How many packets txeof tries to clean at a time */
    186 static int ixv_tx_process_limit = 256;
    187 TUNABLE_INT("hw.ixv.tx_process_limit", &ixv_tx_process_limit);
    188 
    189 /*
    190 ** Number of TX descriptors per ring,
    191 ** setting higher than RX as this seems
    192 ** the better performing choice.
    193 */
    194 static int ixv_txd = DEFAULT_TXD;
    195 TUNABLE_INT("hw.ixv.txd", &ixv_txd);
    196 
    197 /* Number of RX descriptors per ring */
    198 static int ixv_rxd = DEFAULT_RXD;
    199 TUNABLE_INT("hw.ixv.rxd", &ixv_rxd);
    200 
    201 /*
    202 ** Shadow VFTA table, this is needed because
    203 ** the real filter table gets cleared during
    204 ** a soft reset and we need to repopulate it.
    205 */
    206 static u32 ixv_shadow_vfta[IXGBE_VFTA_SIZE];
    207 
    208 /*********************************************************************
    209  *  Device identification routine
    210  *
    211  *  ixv_probe determines if the driver should be loaded on
    212  *  adapter based on PCI vendor/device id of the adapter.
    213  *
    214  *  return 1 on success, 0 on failure
    215  *********************************************************************/
    216 
    217 static int
    218 ixv_probe(device_t dev, cfdata_t cf, void *aux)
    219 {
    220 #ifdef __HAVE_PCI_MSI_MSIX
    221 	const struct pci_attach_args *pa = aux;
    222 
    223 	return (ixv_lookup(pa) != NULL) ? 1 : 0;
    224 #else
    225 	return 0;
    226 #endif
    227 }
    228 
    229 static ixgbe_vendor_info_t *
    230 ixv_lookup(const struct pci_attach_args *pa)
    231 {
    232 	pcireg_t subid;
    233 	ixgbe_vendor_info_t *ent;
    234 
    235 	INIT_DEBUGOUT("ixv_probe: begin");
    236 
    237 	if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
    238 		return NULL;
    239 
    240 	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
    241 
    242 	for (ent = ixv_vendor_info_array; ent->vendor_id != 0; ent++) {
    243 		if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
    244 		    (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
    245 
    246 		    ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
    247 		     (ent->subvendor_id == 0)) &&
    248 
    249 		    ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
    250 		     (ent->subdevice_id == 0))) {
    251 			return ent;
    252 		}
    253 	}
    254 	return NULL;
    255 }
    256 
    257 
    258 static void
    259 ixv_sysctl_attach(struct adapter *adapter)
    260 {
    261 	struct sysctllog **log;
    262 	const struct sysctlnode *rnode, *cnode;
    263 	device_t dev;
    264 
    265 	dev = adapter->dev;
    266 	log = &adapter->sysctllog;
    267 
    268 	if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
    269 		aprint_error_dev(dev, "could not create sysctl root\n");
    270 		return;
    271 	}
    272 
    273 	if (sysctl_createv(log, 0, &rnode, &cnode,
    274 	    CTLFLAG_READWRITE, CTLTYPE_INT,
    275 	    "debug", SYSCTL_DESCR("Debug Info"),
    276 	    ixv_sysctl_debug, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
    277 		aprint_error_dev(dev, "could not create sysctl\n");
    278 
    279 	/* XXX This is an *instance* sysctl controlling a *global* variable.
    280 	 * XXX It's that way in the FreeBSD driver that this derives from.
    281 	 */
    282 	if (sysctl_createv(log, 0, &rnode, &cnode,
    283 	    CTLFLAG_READWRITE, CTLTYPE_INT,
    284 	    "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
    285 	    NULL, 0, &ixv_enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
    286 		aprint_error_dev(dev, "could not create sysctl\n");
    287 }
    288 
    289 /*********************************************************************
    290  *  Device initialization routine
    291  *
    292  *  The attach entry point is called when the driver is being loaded.
    293  *  This routine identifies the type of hardware, allocates all resources
    294  *  and initializes the hardware.
    295  *
    296  *  return 0 on success, positive on failure
    297  *********************************************************************/
    298 
    299 static void
    300 ixv_attach(device_t parent, device_t dev, void *aux)
    301 {
    302 	struct adapter *adapter;
    303 	struct ixgbe_hw *hw;
    304 	int             error = 0;
    305 	ixgbe_vendor_info_t *ent;
    306 	const struct pci_attach_args *pa = aux;
    307 
    308 	INIT_DEBUGOUT("ixv_attach: begin");
    309 
    310 	/* Allocate, clear, and link in our adapter structure */
    311 	adapter = device_private(dev);
    312 	adapter->dev = adapter->osdep.dev = dev;
    313 	hw = &adapter->hw;
    314 	adapter->osdep.pc = pa->pa_pc;
    315 	adapter->osdep.tag = pa->pa_tag;
    316 	adapter->osdep.dmat = pa->pa_dmat;
    317 	adapter->osdep.attached = false;
    318 
    319 	ent = ixv_lookup(pa);
    320 
    321 	KASSERT(ent != NULL);
    322 
    323 	aprint_normal(": %s, Version - %s\n",
    324 	    ixv_strings[ent->index], ixv_driver_version);
    325 
    326 	/* Core Lock Init*/
    327 	IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
    328 
    329 	/* SYSCTL APIs */
    330 	ixv_sysctl_attach(adapter);
    331 
    332 	/* Set up the timer callout */
    333 	callout_init(&adapter->timer, 0);
    334 
    335 	/* Determine hardware revision */
    336 	ixv_identify_hardware(adapter);
    337 
    338 	/* Do base PCI setup - map BAR0 */
    339 	if (ixv_allocate_pci_resources(adapter, pa)) {
    340 		aprint_error_dev(dev, "Allocation of PCI resources failed\n");
    341 		error = ENXIO;
    342 		goto err_out;
    343 	}
    344 
    345 	/* Do descriptor calc and sanity checks */
    346 	if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
    347 	    ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) {
    348 		aprint_error_dev(dev, "TXD config issue, using default!\n");
    349 		adapter->num_tx_desc = DEFAULT_TXD;
    350 	} else
    351 		adapter->num_tx_desc = ixv_txd;
    352 
    353 	if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
    354 	    ixv_rxd < MIN_RXD || ixv_rxd > MAX_RXD) {
    355 		aprint_error_dev(dev, "RXD config issue, using default!\n");
    356 		adapter->num_rx_desc = DEFAULT_RXD;
    357 	} else
    358 		adapter->num_rx_desc = ixv_rxd;
    359 
    360 	/* Allocate our TX/RX Queues */
    361 	if (ixgbe_allocate_queues(adapter)) {
    362 		error = ENOMEM;
    363 		goto err_out;
    364 	}
    365 
    366 	/*
    367 	** Initialize the shared code: its
    368 	** at this point the mac type is set.
    369 	*/
    370 	error = ixgbe_init_shared_code(hw);
    371 	if (error) {
    372 		aprint_error_dev(dev,"Shared Code Initialization Failure\n");
    373 		error = EIO;
    374 		goto err_late;
    375 	}
    376 
    377 	/* Setup the mailbox */
    378 	ixgbe_init_mbx_params_vf(hw);
    379 
    380 	ixgbe_reset_hw(hw);
    381 
    382 	error = ixgbe_init_hw(hw);
    383 	if (error) {
    384 		aprint_error_dev(dev,"Hardware Initialization Failure\n");
    385 		error = EIO;
    386 		goto err_late;
    387 	}
    388 
    389 	error = ixv_allocate_msix(adapter, pa);
    390 	if (error)
    391 		goto err_late;
    392 
    393 	/* If no mac address was assigned, make a random one */
    394 	if (!ixv_check_ether_addr(hw->mac.addr)) {
    395 		u8 addr[ETHER_ADDR_LEN];
    396 		uint64_t rndval = cprng_fast64();
    397 
    398 		memcpy(addr, &rndval, sizeof(addr));
    399 		addr[0] &= 0xFE;
    400 		addr[0] |= 0x02;
    401 		bcopy(addr, hw->mac.addr, sizeof(addr));
    402 	}
    403 
    404 	/* Setup OS specific network interface */
    405 	ixv_setup_interface(dev, adapter);
    406 
    407 	/* Do the stats setup */
    408 	ixv_save_stats(adapter);
    409 	ixv_init_stats(adapter);
    410 	ixv_add_stats_sysctls(adapter);
    411 
    412 	/* Register for VLAN events */
    413 #if 0 /* XXX delete after write? */
    414 	adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
    415 	    ixv_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
    416 	adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
    417 	    ixv_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
    418 #endif
    419 
    420 	INIT_DEBUGOUT("ixv_attach: end");
    421 	adapter->osdep.attached = true;
    422 	return;
    423 
    424 err_late:
    425 	ixgbe_free_transmit_structures(adapter);
    426 	ixgbe_free_receive_structures(adapter);
    427 err_out:
    428 	ixv_free_pci_resources(adapter);
    429 	return;
    430 
    431 }
    432 
    433 /*********************************************************************
    434  *  Device removal routine
    435  *
    436  *  The detach entry point is called when the driver is being removed.
    437  *  This routine stops the adapter and deallocates all the resources
    438  *  that were allocated for driver operation.
    439  *
    440  *  return 0 on success, positive on failure
    441  *********************************************************************/
    442 
    443 static int
    444 ixv_detach(device_t dev, int flags)
    445 {
    446 	struct adapter *adapter = device_private(dev);
    447 	struct ix_queue *que = adapter->queues;
    448 
    449 	INIT_DEBUGOUT("ixv_detach: begin");
    450 	if (adapter->osdep.attached == false)
    451 		return 0;
    452 
    453 #if NVLAN > 0
    454 	/* Make sure VLANS are not using driver */
    455 	if (!VLAN_ATTACHED(&adapter->osdep.ec))
    456 		;	/* nothing to do: no VLANs */
    457 	else if ((flags & (DETACH_SHUTDOWN|DETACH_FORCE)) != 0)
    458 		vlan_ifdetach(adapter->ifp);
    459 	else {
    460 		aprint_error_dev(dev, "VLANs in use\n");
    461 		return EBUSY;
    462 	}
    463 #endif
    464 
    465 	IXGBE_CORE_LOCK(adapter);
    466 	ixv_stop(adapter);
    467 	IXGBE_CORE_UNLOCK(adapter);
    468 
    469 	for (int i = 0; i < adapter->num_queues; i++, que++) {
    470 #ifndef IXGBE_LEGACY_TX
    471 		softint_disestablish(txr->txq_si);
    472 #endif
    473 		softint_disestablish(que->que_si);
    474 	}
    475 
    476 	/* Drain the Mailbox(link) queue */
    477 	softint_disestablish(adapter->link_si);
    478 
    479 	/* Unregister VLAN events */
    480 #if 0 /* XXX msaitoh delete after write? */
    481 	if (adapter->vlan_attach != NULL)
    482 		EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
    483 	if (adapter->vlan_detach != NULL)
    484 		EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
    485 #endif
    486 
    487 	ether_ifdetach(adapter->ifp);
    488 	callout_halt(&adapter->timer, NULL);
    489 	ixv_free_pci_resources(adapter);
    490 #if 0 /* XXX the NetBSD port is probably missing something here */
    491 	bus_generic_detach(dev);
    492 #endif
    493 	if_detach(adapter->ifp);
    494 
    495 	ixgbe_free_transmit_structures(adapter);
    496 	ixgbe_free_receive_structures(adapter);
    497 
    498 	IXGBE_CORE_LOCK_DESTROY(adapter);
    499 	return (0);
    500 }
    501 
    502 /*********************************************************************
    503  *
    504  *  Shutdown entry point
    505  *
    506  **********************************************************************/
    507 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
    508 static int
    509 ixv_shutdown(device_t dev)
    510 {
    511 	struct adapter *adapter = device_private(dev);
    512 	IXGBE_CORE_LOCK(adapter);
    513 	ixv_stop(adapter);
    514 	IXGBE_CORE_UNLOCK(adapter);
    515 	return (0);
    516 }
    517 #endif
    518 
    519 static int
    520 ixv_ifflags_cb(struct ethercom *ec)
    521 {
    522 	struct ifnet *ifp = &ec->ec_if;
    523 	struct adapter *adapter = ifp->if_softc;
    524 	int change = ifp->if_flags ^ adapter->if_flags, rc = 0;
    525 
    526 	IXGBE_CORE_LOCK(adapter);
    527 
    528 	if (change != 0)
    529 		adapter->if_flags = ifp->if_flags;
    530 
    531 	if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0)
    532 		rc = ENETRESET;
    533 
    534 	IXGBE_CORE_UNLOCK(adapter);
    535 
    536 	return rc;
    537 }
    538 
    539 /*********************************************************************
    540  *  Ioctl entry point
    541  *
    542  *  ixv_ioctl is called when the user wants to configure the
    543  *  interface.
    544  *
    545  *  return 0 on success, positive on failure
    546  **********************************************************************/
    547 
    548 static int
    549 ixv_ioctl(struct ifnet * ifp, u_long command, void *data)
    550 {
    551 	struct adapter	*adapter = ifp->if_softc;
    552 	struct ifcapreq *ifcr = data;
    553 	struct ifreq	*ifr = (struct ifreq *) data;
    554 	int             error = 0;
    555 	int l4csum_en;
    556 	const int l4csum = IFCAP_CSUM_TCPv4_Rx|IFCAP_CSUM_UDPv4_Rx|
    557 	     IFCAP_CSUM_TCPv6_Rx|IFCAP_CSUM_UDPv6_Rx;
    558 
    559 	switch (command) {
    560 	case SIOCSIFFLAGS:
    561 		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
    562 		break;
    563 	case SIOCADDMULTI:
    564 	case SIOCDELMULTI:
    565 		IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
    566 		break;
    567 	case SIOCSIFMEDIA:
    568 	case SIOCGIFMEDIA:
    569 		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
    570 		break;
    571 	case SIOCSIFCAP:
    572 		IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
    573 		break;
    574 	case SIOCSIFMTU:
    575 		IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
    576 		break;
    577 	default:
    578 		IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
    579 		break;
    580 	}
    581 
    582 	switch (command) {
    583 	case SIOCSIFMEDIA:
    584 	case SIOCGIFMEDIA:
    585 		return ifmedia_ioctl(ifp, ifr, &adapter->media, command);
    586 	case SIOCSIFCAP:
    587 		/* Layer-4 Rx checksum offload has to be turned on and
    588 		 * off as a unit.
    589 		 */
    590 		l4csum_en = ifcr->ifcr_capenable & l4csum;
    591 		if (l4csum_en != l4csum && l4csum_en != 0)
    592 			return EINVAL;
    593 		/*FALLTHROUGH*/
    594 	case SIOCADDMULTI:
    595 	case SIOCDELMULTI:
    596 	case SIOCSIFFLAGS:
    597 	case SIOCSIFMTU:
    598 	default:
    599 		if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
    600 			return error;
    601 		if ((ifp->if_flags & IFF_RUNNING) == 0)
    602 			;
    603 		else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
    604 			IXGBE_CORE_LOCK(adapter);
    605 			ixv_init_locked(adapter);
    606 			IXGBE_CORE_UNLOCK(adapter);
    607 		} else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
    608 			/*
    609 			 * Multicast list has changed; set the hardware filter
    610 			 * accordingly.
    611 			 */
    612 			IXGBE_CORE_LOCK(adapter);
    613 			ixv_disable_intr(adapter);
    614 			ixv_set_multi(adapter);
    615 			ixv_enable_intr(adapter);
    616 			IXGBE_CORE_UNLOCK(adapter);
    617 		}
    618 		return 0;
    619 	}
    620 }
    621 
    622 /*********************************************************************
    623  *  Init entry point
    624  *
    625  *  This routine is used in two ways. It is used by the stack as
    626  *  init entry point in network interface structure. It is also used
    627  *  by the driver as a hw/sw initialization routine to get to a
    628  *  consistent state.
    629  *
    630  *  return 0 on success, positive on failure
    631  **********************************************************************/
    632 #define IXGBE_MHADD_MFS_SHIFT 16
    633 
    634 static void
    635 ixv_init_locked(struct adapter *adapter)
    636 {
    637 	struct ifnet	*ifp = adapter->ifp;
    638 	device_t 	dev = adapter->dev;
    639 	struct ixgbe_hw *hw = &adapter->hw;
    640 	u32		mhadd, gpie;
    641 
    642 	INIT_DEBUGOUT("ixv_init: begin");
    643 	KASSERT(mutex_owned(&adapter->core_mtx));
    644 	hw->adapter_stopped = FALSE;
    645 	ixgbe_stop_adapter(hw);
    646         callout_stop(&adapter->timer);
    647 
    648         /* reprogram the RAR[0] in case user changed it. */
    649         ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
    650 
    651 	/* Get the latest mac address, User can use a LAA */
    652 	memcpy(hw->mac.addr, CLLADDR(adapter->ifp->if_sadl),
    653 	     IXGBE_ETH_LENGTH_OF_ADDRESS);
    654         ixgbe_set_rar(hw, 0, hw->mac.addr, 0, 1);
    655 	hw->addr_ctrl.rar_used_count = 1;
    656 
    657 	/* Prepare transmit descriptors and buffers */
    658 	if (ixgbe_setup_transmit_structures(adapter)) {
    659 		aprint_error_dev(dev,"Could not setup transmit structures\n");
    660 		ixv_stop(adapter);
    661 		return;
    662 	}
    663 
    664 	ixgbe_reset_hw(hw);
    665 	ixv_initialize_transmit_units(adapter);
    666 
    667 	/* Setup Multicast table */
    668 	ixv_set_multi(adapter);
    669 
    670 	/*
    671 	** Determine the correct mbuf pool
    672 	** for doing jumbo/headersplit
    673 	*/
    674 	if (ifp->if_mtu > ETHERMTU)
    675 		adapter->rx_mbuf_sz = MJUMPAGESIZE;
    676 	else
    677 		adapter->rx_mbuf_sz = MCLBYTES;
    678 
    679 	/* Prepare receive descriptors and buffers */
    680 	if (ixgbe_setup_receive_structures(adapter)) {
    681 		device_printf(dev,"Could not setup receive structures\n");
    682 		ixv_stop(adapter);
    683 		return;
    684 	}
    685 
    686 	/* Configure RX settings */
    687 	ixv_initialize_receive_units(adapter);
    688 
    689 	/* Enable Enhanced MSIX mode */
    690 	gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE);
    691 	gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_EIAME;
    692 	gpie |= IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD;
    693         IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
    694 
    695 #if 0 /* XXX isn't it required? -- msaitoh  */
    696 	/* Set the various hardware offload abilities */
    697 	ifp->if_hwassist = 0;
    698 	if (ifp->if_capenable & IFCAP_TSO4)
    699 		ifp->if_hwassist |= CSUM_TSO;
    700 	if (ifp->if_capenable & IFCAP_TXCSUM) {
    701 		ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
    702 #if __FreeBSD_version >= 800000
    703 		ifp->if_hwassist |= CSUM_SCTP;
    704 #endif
    705 	}
    706 #endif
    707 
    708 	/* Set MTU size */
    709 	if (ifp->if_mtu > ETHERMTU) {
    710 		mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
    711 		mhadd &= ~IXGBE_MHADD_MFS_MASK;
    712 		mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
    713 		IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
    714 	}
    715 
    716 	/* Set up VLAN offload and filter */
    717 	ixv_setup_vlan_support(adapter);
    718 
    719 	callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
    720 
    721 	/* Set up MSI/X routing */
    722 	ixv_configure_ivars(adapter);
    723 
    724 	/* Set up auto-mask */
    725 	IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_EICS_RTX_QUEUE);
    726 
    727         /* Set moderation on the Link interrupt */
    728         IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->vector), IXGBE_LINK_ITR);
    729 
    730 	/* Stats init */
    731 	ixv_init_stats(adapter);
    732 
    733 	/* Config/Enable Link */
    734 	ixv_config_link(adapter);
    735 
    736 	/* And now turn on interrupts */
    737 	ixv_enable_intr(adapter);
    738 
    739 	/* Now inform the stack we're ready */
    740 	ifp->if_flags |= IFF_RUNNING;
    741 	ifp->if_flags &= ~IFF_OACTIVE;
    742 
    743 	return;
    744 }
    745 
    746 static int
    747 ixv_init(struct ifnet *ifp)
    748 {
    749 	struct adapter *adapter = ifp->if_softc;
    750 
    751 	IXGBE_CORE_LOCK(adapter);
    752 	ixv_init_locked(adapter);
    753 	IXGBE_CORE_UNLOCK(adapter);
    754 	return 0;
    755 }
    756 
    757 
    758 /*
    759 **
    760 ** MSIX Interrupt Handlers and Tasklets
    761 **
    762 */
    763 
    764 static inline void
    765 ixv_enable_queue(struct adapter *adapter, u32 vector)
    766 {
    767 	struct ixgbe_hw *hw = &adapter->hw;
    768 	u32	queue = 1 << vector;
    769 	u32	mask;
    770 
    771 	mask = (IXGBE_EIMS_RTX_QUEUE & queue);
    772 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
    773 }
    774 
    775 static inline void
    776 ixv_disable_queue(struct adapter *adapter, u32 vector)
    777 {
    778 	struct ixgbe_hw *hw = &adapter->hw;
    779 	u64	queue = (u64)(1 << vector);
    780 	u32	mask;
    781 
    782 	mask = (IXGBE_EIMS_RTX_QUEUE & queue);
    783 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
    784 }
    785 
    786 static inline void
    787 ixv_rearm_queues(struct adapter *adapter, u64 queues)
    788 {
    789 	u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
    790 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask);
    791 }
    792 
    793 
    794 static void
    795 ixv_handle_que(void *context)
    796 {
    797 	struct ix_queue *que = context;
    798 	struct adapter  *adapter = que->adapter;
    799 	struct tx_ring	*txr = que->txr;
    800 	struct ifnet    *ifp = adapter->ifp;
    801 	bool		more;
    802 
    803 	if (ifp->if_flags & IFF_RUNNING) {
    804 		more = ixgbe_rxeof(que);
    805 		IXGBE_TX_LOCK(txr);
    806 		ixgbe_txeof(txr);
    807 #if __FreeBSD_version >= 800000
    808 		if (!drbr_empty(ifp, txr->br))
    809 			ixgbe_mq_start_locked(ifp, txr);
    810 #else
    811 		if (!IFQ_IS_EMPTY(&ifp->if_snd))
    812 			ixgbe_start_locked(txr, ifp);
    813 #endif
    814 		IXGBE_TX_UNLOCK(txr);
    815 		if (more) {
    816 			adapter->req.ev_count++;
    817 			softint_schedule(que->que_si);
    818 			return;
    819 		}
    820 	}
    821 
    822 	/* Reenable this interrupt */
    823 	ixv_enable_queue(adapter, que->msix);
    824 	return;
    825 }
    826 
    827 /*********************************************************************
    828  *
    829  *  MSI Queue Interrupt Service routine
    830  *
    831  **********************************************************************/
    832 int
    833 ixv_msix_que(void *arg)
    834 {
    835 	struct ix_queue	*que = arg;
    836 	struct adapter  *adapter = que->adapter;
    837 	struct ifnet    *ifp = adapter->ifp;
    838 	struct tx_ring	*txr = que->txr;
    839 	struct rx_ring	*rxr = que->rxr;
    840 	bool		more;
    841 	u32		newitr = 0;
    842 
    843 	ixv_disable_queue(adapter, que->msix);
    844 	++que->irqs.ev_count;
    845 
    846 	more = ixgbe_rxeof(que);
    847 
    848 	IXGBE_TX_LOCK(txr);
    849 	ixgbe_txeof(txr);
    850 	/*
    851 	** Make certain that if the stack
    852 	** has anything queued the task gets
    853 	** scheduled to handle it.
    854 	*/
    855 #ifdef IXGBE_LEGACY_TX
    856 	if (!IFQ_IS_EMPTY(&adapter->ifp->if_snd))
    857 		ixgbe_start_locked(txr, ifp);
    858 #else
    859 	if (!drbr_empty(adapter->ifp, txr->br))
    860 		ixgbe_mq_start_locked(ifp, txr);
    861 #endif
    862 	IXGBE_TX_UNLOCK(txr);
    863 
    864 	/* Do AIM now? */
    865 
    866 	if (ixv_enable_aim == FALSE)
    867 		goto no_calc;
    868 	/*
    869 	** Do Adaptive Interrupt Moderation:
    870         **  - Write out last calculated setting
    871 	**  - Calculate based on average size over
    872 	**    the last interval.
    873 	*/
    874         if (que->eitr_setting)
    875                 IXGBE_WRITE_REG(&adapter->hw,
    876                     IXGBE_VTEITR(que->msix),
    877 		    que->eitr_setting);
    878 
    879         que->eitr_setting = 0;
    880 
    881         /* Idle, do nothing */
    882         if ((txr->bytes == 0) && (rxr->bytes == 0))
    883                 goto no_calc;
    884 
    885 	if ((txr->bytes) && (txr->packets))
    886                	newitr = txr->bytes/txr->packets;
    887 	if ((rxr->bytes) && (rxr->packets))
    888 		newitr = max(newitr,
    889 		    (rxr->bytes / rxr->packets));
    890 	newitr += 24; /* account for hardware frame, crc */
    891 
    892 	/* set an upper boundary */
    893 	newitr = min(newitr, 3000);
    894 
    895 	/* Be nice to the mid range */
    896 	if ((newitr > 300) && (newitr < 1200))
    897 		newitr = (newitr / 3);
    898 	else
    899 		newitr = (newitr / 2);
    900 
    901 	newitr |= newitr << 16;
    902 
    903         /* save for next interrupt */
    904         que->eitr_setting = newitr;
    905 
    906         /* Reset state */
    907         txr->bytes = 0;
    908         txr->packets = 0;
    909         rxr->bytes = 0;
    910         rxr->packets = 0;
    911 
    912 no_calc:
    913 	if (more)
    914 		softint_schedule(que->que_si);
    915 	else /* Reenable this interrupt */
    916 		ixv_enable_queue(adapter, que->msix);
    917 	return 1;
    918 }
    919 
    920 static int
    921 ixv_msix_mbx(void *arg)
    922 {
    923 	struct adapter	*adapter = arg;
    924 	struct ixgbe_hw *hw = &adapter->hw;
    925 	u32		reg;
    926 
    927 	++adapter->link_irq.ev_count;
    928 
    929 	/* First get the cause */
    930 	reg = IXGBE_READ_REG(hw, IXGBE_VTEICS);
    931 	/* Clear interrupt with write */
    932 	IXGBE_WRITE_REG(hw, IXGBE_VTEICR, reg);
    933 
    934 	/* Link status change */
    935 	if (reg & IXGBE_EICR_LSC)
    936 		softint_schedule(adapter->link_si);
    937 
    938 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER);
    939 	return 1;
    940 }
    941 
    942 /*********************************************************************
    943  *
    944  *  Media Ioctl callback
    945  *
    946  *  This routine is called whenever the user queries the status of
    947  *  the interface using ifconfig.
    948  *
    949  **********************************************************************/
    950 static void
    951 ixv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
    952 {
    953 	struct adapter *adapter = ifp->if_softc;
    954 
    955 	INIT_DEBUGOUT("ixv_media_status: begin");
    956 	IXGBE_CORE_LOCK(adapter);
    957 	ixv_update_link_status(adapter);
    958 
    959 	ifmr->ifm_status = IFM_AVALID;
    960 	ifmr->ifm_active = IFM_ETHER;
    961 
    962 	if (!adapter->link_active) {
    963 		IXGBE_CORE_UNLOCK(adapter);
    964 		return;
    965 	}
    966 
    967 	ifmr->ifm_status |= IFM_ACTIVE;
    968 
    969 	switch (adapter->link_speed) {
    970 		case IXGBE_LINK_SPEED_1GB_FULL:
    971 			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
    972 			break;
    973 		case IXGBE_LINK_SPEED_10GB_FULL:
    974 			ifmr->ifm_active |= IFM_FDX;
    975 			break;
    976 	}
    977 
    978 	IXGBE_CORE_UNLOCK(adapter);
    979 
    980 	return;
    981 }
    982 
    983 /*********************************************************************
    984  *
    985  *  Media Ioctl callback
    986  *
    987  *  This routine is called when the user changes speed/duplex using
    988  *  media/mediopt option with ifconfig.
    989  *
    990  **********************************************************************/
    991 static int
    992 ixv_media_change(struct ifnet * ifp)
    993 {
    994 	struct adapter *adapter = ifp->if_softc;
    995 	struct ifmedia *ifm = &adapter->media;
    996 
    997 	INIT_DEBUGOUT("ixv_media_change: begin");
    998 
    999 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
   1000 		return (EINVAL);
   1001 
   1002         switch (IFM_SUBTYPE(ifm->ifm_media)) {
   1003         case IFM_AUTO:
   1004                 break;
   1005         default:
   1006                 device_printf(adapter->dev, "Only auto media type\n");
   1007 		return (EINVAL);
   1008         }
   1009 
   1010 	return (0);
   1011 }
   1012 
   1013 
   1014 /*********************************************************************
   1015  *  Multicast Update
   1016  *
   1017  *  This routine is called whenever multicast address list is updated.
   1018  *
   1019  **********************************************************************/
   1020 #define IXGBE_RAR_ENTRIES 16
   1021 
   1022 static void
   1023 ixv_set_multi(struct adapter *adapter)
   1024 {
   1025 	struct ether_multi *enm;
   1026 	struct ether_multistep step;
   1027 	u8	mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
   1028 	u8	*update_ptr;
   1029 	int	mcnt = 0;
   1030 	struct ethercom *ec = &adapter->osdep.ec;
   1031 
   1032 	IOCTL_DEBUGOUT("ixv_set_multi: begin");
   1033 
   1034 	ETHER_FIRST_MULTI(step, ec, enm);
   1035 	while (enm != NULL) {
   1036 		bcopy(enm->enm_addrlo,
   1037 		    &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
   1038 		    IXGBE_ETH_LENGTH_OF_ADDRESS);
   1039 		mcnt++;
   1040 		/* XXX This might be required --msaitoh */
   1041 		if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES)
   1042 			break;
   1043 		ETHER_NEXT_MULTI(step, enm);
   1044 	}
   1045 
   1046 	update_ptr = mta;
   1047 
   1048 	ixgbe_update_mc_addr_list(&adapter->hw,
   1049 	    update_ptr, mcnt, ixv_mc_array_itr, TRUE);
   1050 
   1051 	return;
   1052 }
   1053 
   1054 /*
   1055  * This is an iterator function now needed by the multicast
   1056  * shared code. It simply feeds the shared code routine the
   1057  * addresses in the array of ixv_set_multi() one by one.
   1058  */
   1059 static u8 *
   1060 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
   1061 {
   1062 	u8 *addr = *update_ptr;
   1063 	u8 *newptr;
   1064 	*vmdq = 0;
   1065 
   1066 	newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
   1067 	*update_ptr = newptr;
   1068 	return addr;
   1069 }
   1070 
   1071 /*********************************************************************
   1072  *  Timer routine
   1073  *
   1074  *  This routine checks for link status,updates statistics,
   1075  *  and runs the watchdog check.
   1076  *
   1077  **********************************************************************/
   1078 
   1079 static void
   1080 ixv_local_timer(void *arg)
   1081 {
   1082 	struct adapter *adapter = arg;
   1083 
   1084 	IXGBE_CORE_LOCK(adapter);
   1085 	ixv_local_timer_locked(adapter);
   1086 	IXGBE_CORE_UNLOCK(adapter);
   1087 }
   1088 
   1089 static void
   1090 ixv_local_timer_locked(void *arg)
   1091 {
   1092 	struct adapter	*adapter = arg;
   1093 	device_t	dev = adapter->dev;
   1094 	struct ix_queue	*que = adapter->queues;
   1095 	u64		queues = 0;
   1096 	int		hung = 0;
   1097 
   1098 	KASSERT(mutex_owned(&adapter->core_mtx));
   1099 
   1100 	ixv_update_link_status(adapter);
   1101 
   1102 	/* Stats Update */
   1103 	ixv_update_stats(adapter);
   1104 
   1105 	/*
   1106 	** Check the TX queues status
   1107 	**      - mark hung queues so we don't schedule on them
   1108 	**      - watchdog only if all queues show hung
   1109 	*/
   1110 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   1111 		/* Keep track of queues with work for soft irq */
   1112 		if (que->txr->busy)
   1113 			queues |= ((u64)1 << que->me);
   1114 		/*
   1115 		** Each time txeof runs without cleaning, but there
   1116 		** are uncleaned descriptors it increments busy. If
   1117 		** we get to the MAX we declare it hung.
   1118 		*/
   1119 		if (que->busy == IXGBE_QUEUE_HUNG) {
   1120 			++hung;
   1121 			/* Mark the queue as inactive */
   1122 			adapter->active_queues &= ~((u64)1 << que->me);
   1123 			continue;
   1124 		} else {
   1125 			/* Check if we've come back from hung */
   1126 			if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
   1127                                 adapter->active_queues |= ((u64)1 << que->me);
   1128 		}
   1129 		if (que->busy >= IXGBE_MAX_TX_BUSY) {
   1130 			device_printf(dev,"Warning queue %d "
   1131 			    "appears to be hung!\n", i);
   1132 			que->txr->busy = IXGBE_QUEUE_HUNG;
   1133 			++hung;
   1134 		}
   1135 
   1136 	}
   1137 
   1138 	/* Only truely watchdog if all queues show hung */
   1139 	if (hung == adapter->num_queues)
   1140 		goto watchdog;
   1141 	else if (queues != 0) { /* Force an IRQ on queues with work */
   1142 		ixv_rearm_queues(adapter, queues);
   1143 	}
   1144 
   1145 	callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
   1146 	return;
   1147 
   1148 watchdog:
   1149 	device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
   1150 	adapter->ifp->if_flags &= ~IFF_RUNNING;
   1151 	adapter->watchdog_events.ev_count++;
   1152 	ixv_init_locked(adapter);
   1153 }
   1154 
   1155 /*
   1156 ** Note: this routine updates the OS on the link state
   1157 **	the real check of the hardware only happens with
   1158 **	a link interrupt.
   1159 */
   1160 static void
   1161 ixv_update_link_status(struct adapter *adapter)
   1162 {
   1163 	struct ifnet	*ifp = adapter->ifp;
   1164 	device_t dev = adapter->dev;
   1165 
   1166 	if (adapter->link_up){
   1167 		if (adapter->link_active == FALSE) {
   1168 			if (bootverbose)
   1169 				device_printf(dev,"Link is up %d Gbps %s \n",
   1170 				    ((adapter->link_speed == 128)? 10:1),
   1171 				    "Full Duplex");
   1172 			adapter->link_active = TRUE;
   1173 			if_link_state_change(ifp, LINK_STATE_UP);
   1174 		}
   1175 	} else { /* Link down */
   1176 		if (adapter->link_active == TRUE) {
   1177 			if (bootverbose)
   1178 				device_printf(dev,"Link is Down\n");
   1179 			if_link_state_change(ifp, LINK_STATE_DOWN);
   1180 			adapter->link_active = FALSE;
   1181 		}
   1182 	}
   1183 
   1184 	return;
   1185 }
   1186 
   1187 
   1188 static void
   1189 ixv_ifstop(struct ifnet *ifp, int disable)
   1190 {
   1191 	struct adapter *adapter = ifp->if_softc;
   1192 
   1193 	IXGBE_CORE_LOCK(adapter);
   1194 	ixv_stop(adapter);
   1195 	IXGBE_CORE_UNLOCK(adapter);
   1196 }
   1197 
   1198 /*********************************************************************
   1199  *
   1200  *  This routine disables all traffic on the adapter by issuing a
   1201  *  global reset on the MAC and deallocates TX/RX buffers.
   1202  *
   1203  **********************************************************************/
   1204 
   1205 static void
   1206 ixv_stop(void *arg)
   1207 {
   1208 	struct ifnet   *ifp;
   1209 	struct adapter *adapter = arg;
   1210 	struct ixgbe_hw *hw = &adapter->hw;
   1211 	ifp = adapter->ifp;
   1212 
   1213 	KASSERT(mutex_owned(&adapter->core_mtx));
   1214 
   1215 	INIT_DEBUGOUT("ixv_stop: begin\n");
   1216 	ixv_disable_intr(adapter);
   1217 
   1218 	/* Tell the stack that the interface is no longer active */
   1219 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   1220 
   1221 	ixgbe_reset_hw(hw);
   1222 	adapter->hw.adapter_stopped = FALSE;
   1223 	ixgbe_stop_adapter(hw);
   1224 	callout_stop(&adapter->timer);
   1225 
   1226 	/* reprogram the RAR[0] in case user changed it. */
   1227 	ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
   1228 
   1229 	return;
   1230 }
   1231 
   1232 
   1233 /*********************************************************************
   1234  *
   1235  *  Determine hardware revision.
   1236  *
   1237  **********************************************************************/
   1238 static void
   1239 ixv_identify_hardware(struct adapter *adapter)
   1240 {
   1241 	pcitag_t tag;
   1242 	pci_chipset_tag_t pc;
   1243 	pcireg_t subid, id;
   1244 	struct ixgbe_hw *hw = &adapter->hw;
   1245 
   1246 	pc = adapter->osdep.pc;
   1247 	tag = adapter->osdep.tag;
   1248 
   1249 	/*
   1250 	** Make sure BUSMASTER is set, on a VM under
   1251 	** KVM it may not be and will break things.
   1252 	*/
   1253 	ixgbe_pci_enable_busmaster(pc, tag);
   1254 
   1255 	id = pci_conf_read(pc, tag, PCI_ID_REG);
   1256 	subid = pci_conf_read(pc, tag, PCI_SUBSYS_ID_REG);
   1257 
   1258 	/* Save off the information about this board */
   1259 	hw->vendor_id = PCI_VENDOR(id);
   1260 	hw->device_id = PCI_PRODUCT(id);
   1261 	hw->revision_id = PCI_REVISION(pci_conf_read(pc, tag, PCI_CLASS_REG));
   1262 	hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
   1263 	hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
   1264 
   1265 	/* We need this to determine device-specific things */
   1266 	ixgbe_set_mac_type(hw);
   1267 
   1268 	/* Set the right number of segments */
   1269 	adapter->num_segs = IXGBE_82599_SCATTER;
   1270 
   1271 	return;
   1272 }
   1273 
   1274 /*********************************************************************
   1275  *
   1276  *  Setup MSIX Interrupt resources and handlers
   1277  *
   1278  **********************************************************************/
   1279 static int
   1280 ixv_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
   1281 {
   1282 	device_t	dev = adapter->dev;
   1283 	struct ix_queue *que = adapter->queues;
   1284 	struct		tx_ring *txr = adapter->tx_rings;
   1285 	int 		error, rid, vector = 0;
   1286 	pci_chipset_tag_t pc;
   1287 	pcitag_t	tag;
   1288 	char intrbuf[PCI_INTRSTR_LEN];
   1289 	const char	*intrstr = NULL;
   1290 	kcpuset_t	*affinity;
   1291 	int		cpu_id = 0;
   1292 
   1293 	pc = adapter->osdep.pc;
   1294 	tag = adapter->osdep.tag;
   1295 
   1296 	if (pci_msix_alloc_exact(pa,
   1297 		&adapter->osdep.intrs, IXG_MSIX_NINTR) != 0)
   1298 		return (ENXIO);
   1299 
   1300 	kcpuset_create(&affinity, false);
   1301 	for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
   1302 		intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
   1303 		    sizeof(intrbuf));
   1304 #ifdef IXV_MPSAFE
   1305 		pci_intr_setattr(pc, adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
   1306 		    true);
   1307 #endif
   1308 		/* Set the handler function */
   1309 		adapter->osdep.ihs[i] = pci_intr_establish(pc,
   1310 		    adapter->osdep.intrs[i], IPL_NET, ixv_msix_que, que);
   1311 		if (adapter->osdep.ihs[i] == NULL) {
   1312 			que->res = NULL;
   1313 			aprint_error_dev(dev,
   1314 			    "Failed to register QUE handler");
   1315 			kcpuset_destroy(affinity);
   1316 			return (ENXIO);
   1317 		}
   1318 		que->msix = vector;
   1319         	adapter->active_queues |= (u64)(1 << que->msix);
   1320 
   1321 		cpu_id = i;
   1322 		/* Round-robin affinity */
   1323 		kcpuset_zero(affinity);
   1324 		kcpuset_set(affinity, cpu_id % ncpu);
   1325 		error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
   1326 		    NULL);
   1327 		aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
   1328 		    intrstr);
   1329 		if (error == 0)
   1330 			aprint_normal(", bound queue %d to cpu %d\n",
   1331 			    i, cpu_id);
   1332 		else
   1333 			aprint_normal("\n");
   1334 
   1335 #ifndef IXGBE_LEGACY_TX
   1336 		txr->txq_si = softint_establish(SOFTINT_NET,
   1337 		    ixgbe_deferred_mq_start, txr);
   1338 #endif
   1339 		que->que_si = softint_establish(SOFTINT_NET, ixv_handle_que,
   1340 		    que);
   1341 		if (que->que_si == NULL) {
   1342 			aprint_error_dev(dev,
   1343 			    "could not establish software interrupt\n");
   1344 		}
   1345 	}
   1346 
   1347 	/* and Mailbox */
   1348 	cpu_id++;
   1349 	intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
   1350 	    sizeof(intrbuf));
   1351 #ifdef IXG_MPSAFE
   1352 	pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE, true);
   1353 #endif
   1354 	/* Set the mbx handler function */
   1355 	adapter->osdep.ihs[vector] = pci_intr_establish(pc,
   1356 	    adapter->osdep.intrs[vector], IPL_NET, ixv_msix_mbx, adapter);
   1357 	if (adapter->osdep.ihs[vector] == NULL) {
   1358 		adapter->res = NULL;
   1359 		aprint_error_dev(dev, "Failed to register LINK handler\n");
   1360 		kcpuset_destroy(affinity);
   1361 		return (ENXIO);
   1362 	}
   1363 	/* Round-robin affinity */
   1364 	kcpuset_zero(affinity);
   1365 	kcpuset_set(affinity, cpu_id % ncpu);
   1366 	error = interrupt_distribute(adapter->osdep.ihs[vector], affinity,NULL);
   1367 
   1368 	aprint_normal_dev(dev,
   1369 	    "for link, interrupting at %s, ", intrstr);
   1370 	if (error == 0) {
   1371 		aprint_normal("affinity to cpu %d\n", cpu_id);
   1372 	}
   1373 	adapter->vector = vector;
   1374 	/* Tasklets for Mailbox */
   1375 	adapter->link_si = softint_establish(SOFTINT_NET, ixv_handle_mbx,
   1376 	    adapter);
   1377 	/*
   1378 	** Due to a broken design QEMU will fail to properly
   1379 	** enable the guest for MSIX unless the vectors in
   1380 	** the table are all set up, so we must rewrite the
   1381 	** ENABLE in the MSIX control register again at this
   1382 	** point to cause it to successfully initialize us.
   1383 	*/
   1384 	if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
   1385 		int msix_ctrl;
   1386 		pci_get_capability(pc, tag, PCI_CAP_MSIX, &rid, NULL);
   1387 		rid += PCI_MSIX_CTL;
   1388 		msix_ctrl = pci_conf_read(pc, tag, rid);
   1389 		msix_ctrl |= PCI_MSIX_CTL_ENABLE;
   1390 		pci_conf_write(pc, tag, rid, msix_ctrl);
   1391 	}
   1392 
   1393 	return (0);
   1394 }
   1395 
   1396 /*
   1397  * Setup MSIX resources, note that the VF
   1398  * device MUST use MSIX, there is no fallback.
   1399  */
   1400 static int
   1401 ixv_setup_msix(struct adapter *adapter)
   1402 {
   1403 	device_t dev = adapter->dev;
   1404 	int want, msgs;
   1405 
   1406 	/*
   1407 	** Want two vectors: one for a queue,
   1408 	** plus an additional for mailbox.
   1409 	*/
   1410 	msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag);
   1411 	if (msgs < IXG_MSIX_NINTR) {
   1412 		aprint_error_dev(dev,"MSIX config error\n");
   1413 		return (ENXIO);
   1414 	}
   1415 	want = MIN(msgs, IXG_MSIX_NINTR);
   1416 
   1417 	adapter->msix_mem = (void *)1; /* XXX */
   1418 	aprint_normal_dev(dev,
   1419 	    "Using MSIX interrupts with %d vectors\n", msgs);
   1420 	return (want);
   1421 }
   1422 
   1423 
   1424 static int
   1425 ixv_allocate_pci_resources(struct adapter *adapter,
   1426     const struct pci_attach_args *pa)
   1427 {
   1428 	pcireg_t	memtype;
   1429 	device_t        dev = adapter->dev;
   1430 	bus_addr_t addr;
   1431 	int flags;
   1432 
   1433 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
   1434 
   1435 	switch (memtype) {
   1436 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1437 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1438 		adapter->osdep.mem_bus_space_tag = pa->pa_memt;
   1439 		if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
   1440 	              memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
   1441 			goto map_err;
   1442 		if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
   1443 			aprint_normal_dev(dev, "clearing prefetchable bit\n");
   1444 			flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
   1445 		}
   1446 		if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
   1447 		     adapter->osdep.mem_size, flags,
   1448 		     &adapter->osdep.mem_bus_space_handle) != 0) {
   1449 map_err:
   1450 			adapter->osdep.mem_size = 0;
   1451 			aprint_error_dev(dev, "unable to map BAR0\n");
   1452 			return ENXIO;
   1453 		}
   1454 		break;
   1455 	default:
   1456 		aprint_error_dev(dev, "unexpected type on BAR0\n");
   1457 		return ENXIO;
   1458 	}
   1459 
   1460 	adapter->num_queues = 1;
   1461 	adapter->hw.back = &adapter->osdep;
   1462 
   1463 	/*
   1464 	** Now setup MSI/X, should
   1465 	** return us the number of
   1466 	** configured vectors.
   1467 	*/
   1468 	adapter->msix = ixv_setup_msix(adapter);
   1469 	if (adapter->msix == ENXIO)
   1470 		return (ENXIO);
   1471 	else
   1472 		return (0);
   1473 }
   1474 
   1475 static void
   1476 ixv_free_pci_resources(struct adapter * adapter)
   1477 {
   1478 	struct 		ix_queue *que = adapter->queues;
   1479 	int		rid;
   1480 
   1481 	/*
   1482 	**  Release all msix queue resources:
   1483 	*/
   1484 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   1485 		rid = que->msix + 1;
   1486 		if (que->res != NULL)
   1487 			pci_intr_disestablish(adapter->osdep.pc,
   1488 			    adapter->osdep.ihs[i]);
   1489 	}
   1490 
   1491 
   1492 	/* Clean the Legacy or Link interrupt last */
   1493 	if (adapter->vector) /* we are doing MSIX */
   1494 		rid = adapter->vector + 1;
   1495 	else
   1496 		(adapter->msix != 0) ? (rid = 1):(rid = 0);
   1497 
   1498 	if (adapter->osdep.ihs[rid] != NULL)
   1499 		pci_intr_disestablish(adapter->osdep.pc,
   1500 		    adapter->osdep.ihs[rid]);
   1501 	adapter->osdep.ihs[rid] = NULL;
   1502 
   1503 #if defined(NETBSD_MSI_OR_MSIX)
   1504 	pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
   1505 	    adapter->osdep.nintrs);
   1506 #endif
   1507 
   1508 	if (adapter->osdep.mem_size != 0) {
   1509 		bus_space_unmap(adapter->osdep.mem_bus_space_tag,
   1510 		    adapter->osdep.mem_bus_space_handle,
   1511 		    adapter->osdep.mem_size);
   1512 	}
   1513 
   1514 	return;
   1515 }
   1516 
   1517 /*********************************************************************
   1518  *
   1519  *  Setup networking device structure and register an interface.
   1520  *
   1521  **********************************************************************/
   1522 static void
   1523 ixv_setup_interface(device_t dev, struct adapter *adapter)
   1524 {
   1525 	struct ethercom *ec = &adapter->osdep.ec;
   1526 	struct ifnet   *ifp;
   1527 
   1528 	INIT_DEBUGOUT("ixv_setup_interface: begin");
   1529 
   1530 	ifp = adapter->ifp = &ec->ec_if;
   1531 	strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
   1532 	ifp->if_baudrate = 1000000000;
   1533 	ifp->if_init = ixv_init;
   1534 	ifp->if_stop = ixv_ifstop;
   1535 	ifp->if_softc = adapter;
   1536 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   1537 	ifp->if_ioctl = ixv_ioctl;
   1538 #if __FreeBSD_version >= 800000
   1539 	ifp->if_transmit = ixgbe_mq_start;
   1540 	ifp->if_qflush = ixgbe_qflush;
   1541 #else
   1542 	ifp->if_start = ixgbe_start;
   1543 #endif
   1544 	ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 2;
   1545 
   1546 	if_attach(ifp);
   1547 	ether_ifattach(ifp, adapter->hw.mac.addr);
   1548 	ether_set_ifflags_cb(ec, ixv_ifflags_cb);
   1549 
   1550 	adapter->max_frame_size =
   1551 	    ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
   1552 
   1553 	/*
   1554 	 * Tell the upper layer(s) we support long frames.
   1555 	 */
   1556 	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
   1557 
   1558 	ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSOv4;
   1559 	ifp->if_capenable = 0;
   1560 
   1561 	ec->ec_capabilities |= ETHERCAP_VLAN_HWCSUM;
   1562 	ec->ec_capabilities |= ETHERCAP_JUMBO_MTU;
   1563 	ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
   1564 	    		| ETHERCAP_VLAN_MTU;
   1565 	ec->ec_capenable = ec->ec_capabilities;
   1566 
   1567 	/* Don't enable LRO by default */
   1568 	ifp->if_capabilities |= IFCAP_LRO;
   1569 #if 0
   1570 	ifp->if_capenable = ifp->if_capabilities;
   1571 #endif
   1572 
   1573 	/*
   1574 	** Dont turn this on by default, if vlans are
   1575 	** created on another pseudo device (eg. lagg)
   1576 	** then vlan events are not passed thru, breaking
   1577 	** operation, but with HW FILTER off it works. If
   1578 	** using vlans directly on the em driver you can
   1579 	** enable this and get full hardware tag filtering.
   1580 	*/
   1581 	ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
   1582 
   1583 	/*
   1584 	 * Specify the media types supported by this adapter and register
   1585 	 * callbacks to update media and link information
   1586 	 */
   1587 	ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change,
   1588 		     ixv_media_status);
   1589 	ifmedia_add(&adapter->media, IFM_ETHER | IFM_FDX, 0, NULL);
   1590 	ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
   1591 	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
   1592 
   1593 	return;
   1594 }
   1595 
   1596 static void
   1597 ixv_config_link(struct adapter *adapter)
   1598 {
   1599 	struct ixgbe_hw *hw = &adapter->hw;
   1600 	u32	autoneg, err = 0;
   1601 
   1602 	if (hw->mac.ops.check_link)
   1603 		err = hw->mac.ops.check_link(hw, &autoneg,
   1604 		    &adapter->link_up, FALSE);
   1605 	if (err)
   1606 		goto out;
   1607 
   1608 	if (hw->mac.ops.setup_link)
   1609                	err = hw->mac.ops.setup_link(hw,
   1610 		    autoneg, adapter->link_up);
   1611 out:
   1612 	return;
   1613 }
   1614 
   1615 
   1616 /*********************************************************************
   1617  *
   1618  *  Enable transmit unit.
   1619  *
   1620  **********************************************************************/
   1621 static void
   1622 ixv_initialize_transmit_units(struct adapter *adapter)
   1623 {
   1624 	struct tx_ring	*txr = adapter->tx_rings;
   1625 	struct ixgbe_hw	*hw = &adapter->hw;
   1626 
   1627 
   1628 	for (int i = 0; i < adapter->num_queues; i++, txr++) {
   1629 		u64	tdba = txr->txdma.dma_paddr;
   1630 		u32	txctrl, txdctl;
   1631 
   1632 		/* Set WTHRESH to 8, burst writeback */
   1633 		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
   1634 		txdctl |= (8 << 16);
   1635 		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
   1636 
   1637 		/* Set the HW Tx Head and Tail indices */
   1638 	    	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(i), 0);
   1639 	    	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(i), 0);
   1640 
   1641 		/* Set Tx Tail register */
   1642 		txr->tail = IXGBE_VFTDT(i);
   1643 
   1644 		/* Set the processing limit */
   1645 		txr->process_limit = ixv_tx_process_limit;
   1646 
   1647 		/* Set Ring parameters */
   1648 		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
   1649 		       (tdba & 0x00000000ffffffffULL));
   1650 		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i), (tdba >> 32));
   1651 		IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
   1652 		    adapter->num_tx_desc *
   1653 		    sizeof(struct ixgbe_legacy_tx_desc));
   1654 		txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(i));
   1655 		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
   1656 		IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), txctrl);
   1657 
   1658 		/* Now enable */
   1659 		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
   1660 		txdctl |= IXGBE_TXDCTL_ENABLE;
   1661 		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
   1662 	}
   1663 
   1664 	return;
   1665 }
   1666 
   1667 
   1668 /*********************************************************************
   1669  *
   1670  *  Setup receive registers and features.
   1671  *
   1672  **********************************************************************/
   1673 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
   1674 
   1675 static void
   1676 ixv_initialize_receive_units(struct adapter *adapter)
   1677 {
   1678 	int i;
   1679 	struct	rx_ring	*rxr = adapter->rx_rings;
   1680 	struct ixgbe_hw	*hw = &adapter->hw;
   1681 	struct ifnet   *ifp = adapter->ifp;
   1682 	u32		bufsz, fctrl, rxcsum, hlreg;
   1683 
   1684 
   1685 	/* Enable broadcasts */
   1686 	fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
   1687 	fctrl |= IXGBE_FCTRL_BAM;
   1688 	fctrl |= IXGBE_FCTRL_DPF;
   1689 	fctrl |= IXGBE_FCTRL_PMCF;
   1690 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
   1691 
   1692 	/* Set for Jumbo Frames? */
   1693 	hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
   1694 	if (ifp->if_mtu > ETHERMTU) {
   1695 		hlreg |= IXGBE_HLREG0_JUMBOEN;
   1696 		bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
   1697 	} else {
   1698 		hlreg &= ~IXGBE_HLREG0_JUMBOEN;
   1699 		bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
   1700 	}
   1701 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
   1702 
   1703 	for (i = 0; i < adapter->num_queues; i++, rxr++) {
   1704 		u64 rdba = rxr->rxdma.dma_paddr;
   1705 		u32 reg, rxdctl;
   1706 
   1707 		/* Setup the Base and Length of the Rx Descriptor Ring */
   1708 		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
   1709 		    (rdba & 0x00000000ffffffffULL));
   1710 		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i),
   1711 		    (rdba >> 32));
   1712 		IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
   1713 		    adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
   1714 
   1715 		/* Set up the SRRCTL register */
   1716 		reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
   1717 		reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
   1718 		reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
   1719 		reg |= bufsz;
   1720 		reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
   1721 		IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), reg);
   1722 
   1723 		/* Setup the HW Rx Head and Tail Descriptor Pointers */
   1724 		IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
   1725 		IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
   1726 		    adapter->num_rx_desc - 1);
   1727 		/* Set the processing limit */
   1728 		rxr->process_limit = ixv_rx_process_limit;
   1729 
   1730 		/* Set Rx Tail register */
   1731 		rxr->tail = IXGBE_VFRDT(rxr->me);
   1732 
   1733 		/* Do the queue enabling last */
   1734 		rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
   1735 		rxdctl |= IXGBE_RXDCTL_ENABLE;
   1736 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
   1737 		for (int k = 0; k < 10; k++) {
   1738 			if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
   1739 			    IXGBE_RXDCTL_ENABLE)
   1740 				break;
   1741 			else
   1742 				msec_delay(1);
   1743 		}
   1744 		wmb();
   1745 	}
   1746 
   1747 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
   1748 
   1749 	if (ifp->if_capenable & IFCAP_RXCSUM)
   1750 		rxcsum |= IXGBE_RXCSUM_PCSD;
   1751 
   1752 	if (!(rxcsum & IXGBE_RXCSUM_PCSD))
   1753 		rxcsum |= IXGBE_RXCSUM_IPPCSE;
   1754 
   1755 	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
   1756 
   1757 	return;
   1758 }
   1759 
   1760 static void
   1761 ixv_setup_vlan_support(struct adapter *adapter)
   1762 {
   1763 	struct ixgbe_hw *hw = &adapter->hw;
   1764 	u32		ctrl, vid, vfta, retry;
   1765 
   1766 
   1767 	/*
   1768 	** We get here thru init_locked, meaning
   1769 	** a soft reset, this has already cleared
   1770 	** the VFTA and other state, so if there
   1771 	** have been no vlan's registered do nothing.
   1772 	*/
   1773 	if (!VLAN_ATTACHED(&adapter->osdep.ec))
   1774 		return;
   1775 
   1776 	/* Enable the queues */
   1777 	for (int i = 0; i < adapter->num_queues; i++) {
   1778 		ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
   1779 		ctrl |= IXGBE_RXDCTL_VME;
   1780 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl);
   1781 	}
   1782 
   1783 	/*
   1784 	** A soft reset zero's out the VFTA, so
   1785 	** we need to repopulate it now.
   1786 	*/
   1787 	for (int i = 0; i < IXGBE_VFTA_SIZE; i++) {
   1788 		if (ixv_shadow_vfta[i] == 0)
   1789 			continue;
   1790 		vfta = ixv_shadow_vfta[i];
   1791 		/*
   1792 		** Reconstruct the vlan id's
   1793 		** based on the bits set in each
   1794 		** of the array ints.
   1795 		*/
   1796 		for ( int j = 0; j < 32; j++) {
   1797 			retry = 0;
   1798 			if ((vfta & (1 << j)) == 0)
   1799 				continue;
   1800 			vid = (i * 32) + j;
   1801 			/* Call the shared code mailbox routine */
   1802 			while (ixgbe_set_vfta(hw, vid, 0, TRUE)) {
   1803 				if (++retry > 5)
   1804 					break;
   1805 			}
   1806 		}
   1807 	}
   1808 }
   1809 
   1810 #if 0	/* XXX Badly need to overhaul vlan(4) on NetBSD. */
   1811 /*
   1812 ** This routine is run via an vlan config EVENT,
   1813 ** it enables us to use the HW Filter table since
   1814 ** we can get the vlan id. This just creates the
   1815 ** entry in the soft version of the VFTA, init will
   1816 ** repopulate the real table.
   1817 */
   1818 static void
   1819 ixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   1820 {
   1821 	struct adapter	*adapter = ifp->if_softc;
   1822 	u16		index, bit;
   1823 
   1824 	if (ifp->if_softc !=  arg)   /* Not our event */
   1825 		return;
   1826 
   1827 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
   1828 		return;
   1829 
   1830 	IXGBE_CORE_LOCK(adapter);
   1831 	index = (vtag >> 5) & 0x7F;
   1832 	bit = vtag & 0x1F;
   1833 	ixv_shadow_vfta[index] |= (1 << bit);
   1834 	/* Re-init to load the changes */
   1835 	ixv_init_locked(adapter);
   1836 	IXGBE_CORE_UNLOCK(adapter);
   1837 }
   1838 
   1839 /*
   1840 ** This routine is run via an vlan
   1841 ** unconfig EVENT, remove our entry
   1842 ** in the soft vfta.
   1843 */
   1844 static void
   1845 ixv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   1846 {
   1847 	struct adapter	*adapter = ifp->if_softc;
   1848 	u16		index, bit;
   1849 
   1850 	if (ifp->if_softc !=  arg)
   1851 		return;
   1852 
   1853 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
   1854 		return;
   1855 
   1856 	IXGBE_CORE_LOCK(adapter);
   1857 	index = (vtag >> 5) & 0x7F;
   1858 	bit = vtag & 0x1F;
   1859 	ixv_shadow_vfta[index] &= ~(1 << bit);
   1860 	/* Re-init to load the changes */
   1861 	ixv_init_locked(adapter);
   1862 	IXGBE_CORE_UNLOCK(adapter);
   1863 }
   1864 #endif
   1865 
   1866 static void
   1867 ixv_enable_intr(struct adapter *adapter)
   1868 {
   1869 	struct ixgbe_hw *hw = &adapter->hw;
   1870 	struct ix_queue *que = adapter->queues;
   1871 	u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
   1872 
   1873 
   1874 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
   1875 
   1876 	mask = IXGBE_EIMS_ENABLE_MASK;
   1877 	mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
   1878 	IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
   1879 
   1880         for (int i = 0; i < adapter->num_queues; i++, que++)
   1881 		ixv_enable_queue(adapter, que->msix);
   1882 
   1883 	IXGBE_WRITE_FLUSH(hw);
   1884 
   1885 	return;
   1886 }
   1887 
   1888 static void
   1889 ixv_disable_intr(struct adapter *adapter)
   1890 {
   1891 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
   1892 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, ~0);
   1893 	IXGBE_WRITE_FLUSH(&adapter->hw);
   1894 	return;
   1895 }
   1896 
   1897 /*
   1898 ** Setup the correct IVAR register for a particular MSIX interrupt
   1899 **  - entry is the register array entry
   1900 **  - vector is the MSIX vector for this queue
   1901 **  - type is RX/TX/MISC
   1902 */
   1903 static void
   1904 ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
   1905 {
   1906 	struct ixgbe_hw *hw = &adapter->hw;
   1907 	u32 ivar, index;
   1908 
   1909 	vector |= IXGBE_IVAR_ALLOC_VAL;
   1910 
   1911 	if (type == -1) { /* MISC IVAR */
   1912 		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
   1913 		ivar &= ~0xFF;
   1914 		ivar |= vector;
   1915 		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
   1916 	} else {	/* RX/TX IVARS */
   1917 		index = (16 * (entry & 1)) + (8 * type);
   1918 		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
   1919 		ivar &= ~(0xFF << index);
   1920 		ivar |= (vector << index);
   1921 		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
   1922 	}
   1923 }
   1924 
   1925 static void
   1926 ixv_configure_ivars(struct adapter *adapter)
   1927 {
   1928 	struct  ix_queue *que = adapter->queues;
   1929 
   1930         for (int i = 0; i < adapter->num_queues; i++, que++) {
   1931 		/* First the RX queue entry */
   1932                 ixv_set_ivar(adapter, i, que->msix, 0);
   1933 		/* ... and the TX */
   1934 		ixv_set_ivar(adapter, i, que->msix, 1);
   1935 		/* Set an initial value in EITR */
   1936                 IXGBE_WRITE_REG(&adapter->hw,
   1937                     IXGBE_VTEITR(que->msix), IXV_EITR_DEFAULT);
   1938 	}
   1939 
   1940 	/* For the mailbox interrupt */
   1941         ixv_set_ivar(adapter, 1, adapter->vector, -1);
   1942 }
   1943 
   1944 
   1945 /*
   1946 ** Tasklet handler for MSIX MBX interrupts
   1947 **  - do outside interrupt since it might sleep
   1948 */
   1949 static void
   1950 ixv_handle_mbx(void *context)
   1951 {
   1952 	struct adapter  *adapter = context;
   1953 
   1954 	ixgbe_check_link(&adapter->hw,
   1955 	    &adapter->link_speed, &adapter->link_up, 0);
   1956 	ixv_update_link_status(adapter);
   1957 }
   1958 
   1959 /*
   1960 ** The VF stats registers never have a truely virgin
   1961 ** starting point, so this routine tries to make an
   1962 ** artificial one, marking ground zero on attach as
   1963 ** it were.
   1964 */
   1965 static void
   1966 ixv_save_stats(struct adapter *adapter)
   1967 {
   1968 	struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
   1969 
   1970 	if (stats->vfgprc.ev_count || stats->vfgptc.ev_count) {
   1971 		stats->saved_reset_vfgprc +=
   1972 		    stats->vfgprc.ev_count - stats->base_vfgprc;
   1973 		stats->saved_reset_vfgptc +=
   1974 		    stats->vfgptc.ev_count - stats->base_vfgptc;
   1975 		stats->saved_reset_vfgorc +=
   1976 		    stats->vfgorc.ev_count - stats->base_vfgorc;
   1977 		stats->saved_reset_vfgotc +=
   1978 		    stats->vfgotc.ev_count - stats->base_vfgotc;
   1979 		stats->saved_reset_vfmprc +=
   1980 		    stats->vfmprc.ev_count - stats->base_vfmprc;
   1981 	}
   1982 }
   1983 
   1984 static void
   1985 ixv_init_stats(struct adapter *adapter)
   1986 {
   1987 	struct ixgbe_hw *hw = &adapter->hw;
   1988 
   1989 	adapter->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
   1990 	adapter->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
   1991 	adapter->stats.vf.last_vfgorc |=
   1992 	    (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
   1993 
   1994 	adapter->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
   1995 	adapter->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
   1996 	adapter->stats.vf.last_vfgotc |=
   1997 	    (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
   1998 
   1999 	adapter->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
   2000 
   2001 	adapter->stats.vf.base_vfgprc = adapter->stats.vf.last_vfgprc;
   2002 	adapter->stats.vf.base_vfgorc = adapter->stats.vf.last_vfgorc;
   2003 	adapter->stats.vf.base_vfgptc = adapter->stats.vf.last_vfgptc;
   2004 	adapter->stats.vf.base_vfgotc = adapter->stats.vf.last_vfgotc;
   2005 	adapter->stats.vf.base_vfmprc = adapter->stats.vf.last_vfmprc;
   2006 }
   2007 
   2008 #define UPDATE_STAT_32(reg, last, count)		\
   2009 {							\
   2010 	u32 current = IXGBE_READ_REG(hw, reg);		\
   2011 	if (current < last)				\
   2012 		count.ev_count += 0x100000000LL;	\
   2013 	last = current;					\
   2014 	count.ev_count &= 0xFFFFFFFF00000000LL;		\
   2015 	count.ev_count |= current;			\
   2016 }
   2017 
   2018 #define UPDATE_STAT_36(lsb, msb, last, count) 		\
   2019 {							\
   2020 	u64 cur_lsb = IXGBE_READ_REG(hw, lsb);		\
   2021 	u64 cur_msb = IXGBE_READ_REG(hw, msb);		\
   2022 	u64 current = ((cur_msb << 32) | cur_lsb);	\
   2023 	if (current < last)				\
   2024 		count.ev_count += 0x1000000000LL;	\
   2025 	last = current;					\
   2026 	count.ev_count &= 0xFFFFFFF000000000LL;		\
   2027 	count.ev_count |= current;			\
   2028 }
   2029 
   2030 /*
   2031 ** ixv_update_stats - Update the board statistics counters.
   2032 */
   2033 void
   2034 ixv_update_stats(struct adapter *adapter)
   2035 {
   2036         struct ixgbe_hw *hw = &adapter->hw;
   2037 
   2038         UPDATE_STAT_32(IXGBE_VFGPRC, adapter->stats.vf.last_vfgprc,
   2039 	    adapter->stats.vf.vfgprc);
   2040         UPDATE_STAT_32(IXGBE_VFGPTC, adapter->stats.vf.last_vfgptc,
   2041 	    adapter->stats.vf.vfgptc);
   2042         UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
   2043 	    adapter->stats.vf.last_vfgorc, adapter->stats.vf.vfgorc);
   2044         UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
   2045 	    adapter->stats.vf.last_vfgotc, adapter->stats.vf.vfgotc);
   2046         UPDATE_STAT_32(IXGBE_VFMPRC, adapter->stats.vf.last_vfmprc,
   2047 	    adapter->stats.vf.vfmprc);
   2048 }
   2049 
   2050 /*
   2051  * Add statistic sysctls for the VF.
   2052  */
   2053 static void
   2054 ixv_add_stats_sysctls(struct adapter *adapter)
   2055 {
   2056 	device_t dev = adapter->dev;
   2057 	struct ix_queue *que = &adapter->queues[0];
   2058 	struct tx_ring *txr = que->txr;
   2059 	struct rx_ring *rxr = que->rxr;
   2060 
   2061 	struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
   2062 
   2063 	const char *xname = device_xname(dev);
   2064 
   2065 	/* Driver Statistics */
   2066 	evcnt_attach_dynamic(&adapter->dropped_pkts, EVCNT_TYPE_MISC,
   2067 	    NULL, xname, "Driver dropped packets");
   2068 	evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
   2069 	    NULL, xname, "m_defrag() failed");
   2070 	evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
   2071 	    NULL, xname, "Watchdog timeouts");
   2072 
   2073 	evcnt_attach_dynamic(&stats->vfgprc, EVCNT_TYPE_MISC, NULL,
   2074 	    xname, "Good Packets Received");
   2075 	evcnt_attach_dynamic(&stats->vfgorc, EVCNT_TYPE_MISC, NULL,
   2076 	    xname, "Good Octets Received");
   2077 	evcnt_attach_dynamic(&stats->vfmprc, EVCNT_TYPE_MISC, NULL,
   2078 	    xname, "Multicast Packets Received");
   2079 	evcnt_attach_dynamic(&stats->vfgptc, EVCNT_TYPE_MISC, NULL,
   2080 	    xname, "Good Packets Transmitted");
   2081 	evcnt_attach_dynamic(&stats->vfgotc, EVCNT_TYPE_MISC, NULL,
   2082 	    xname, "Good Octets Transmitted");
   2083 	evcnt_attach_dynamic(&que->irqs, EVCNT_TYPE_INTR, NULL,
   2084 	    xname, "IRQs on queue");
   2085 	evcnt_attach_dynamic(&rxr->rx_irq, EVCNT_TYPE_INTR, NULL,
   2086 	    xname, "RX irqs on queue");
   2087 	evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC, NULL,
   2088 	    xname, "RX packets");
   2089 	evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC, NULL,
   2090 	    xname, "RX bytes");
   2091 	evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC, NULL,
   2092 	    xname, "Discarded RX packets");
   2093 	evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC, NULL,
   2094 	    xname, "TX Packets");
   2095 #if 0
   2096 	evcnt_attach_dynamic(&txr->bytes, EVCNT_TYPE_MISC, NULL,
   2097 	    xname, "TX Bytes");
   2098 #endif
   2099 	evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC, NULL,
   2100 	    xname, "# of times not enough descriptors were available during TX");
   2101 	evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC, NULL,
   2102 	    xname, "TX TSO");
   2103 }
   2104 
   2105 /**********************************************************************
   2106  *
   2107  *  This routine is called only when em_display_debug_stats is enabled.
   2108  *  This routine provides a way to take a look at important statistics
   2109  *  maintained by the driver and hardware.
   2110  *
   2111  **********************************************************************/
   2112 static void
   2113 ixv_print_debug_info(struct adapter *adapter)
   2114 {
   2115         device_t dev = adapter->dev;
   2116         struct ixgbe_hw         *hw = &adapter->hw;
   2117         struct ix_queue         *que = adapter->queues;
   2118         struct rx_ring          *rxr;
   2119         struct tx_ring          *txr;
   2120 #ifdef LRO
   2121         struct lro_ctrl         *lro;
   2122 #endif /* LRO */
   2123 
   2124         device_printf(dev,"Error Byte Count = %u \n",
   2125             IXGBE_READ_REG(hw, IXGBE_ERRBC));
   2126 
   2127         for (int i = 0; i < adapter->num_queues; i++, que++) {
   2128                 txr = que->txr;
   2129                 rxr = que->rxr;
   2130 #ifdef LRO
   2131                 lro = &rxr->lro;
   2132 #endif /* LRO */
   2133                 device_printf(dev,"QUE(%d) IRQs Handled: %lu\n",
   2134                     que->msix, (long)que->irqs.ev_count);
   2135                 device_printf(dev,"RX(%d) Packets Received: %lld\n",
   2136                     rxr->me, (long long)rxr->rx_packets.ev_count);
   2137                 device_printf(dev,"RX(%d) Bytes Received: %lu\n",
   2138                     rxr->me, (long)rxr->rx_bytes.ev_count);
   2139 #ifdef LRO
   2140                 device_printf(dev,"RX(%d) LRO Queued= %d\n",
   2141                     rxr->me, lro->lro_queued);
   2142                 device_printf(dev,"RX(%d) LRO Flushed= %d\n",
   2143                     rxr->me, lro->lro_flushed);
   2144 #endif /* LRO */
   2145                 device_printf(dev,"TX(%d) Packets Sent: %lu\n",
   2146                     txr->me, (long)txr->total_packets.ev_count);
   2147                 device_printf(dev,"TX(%d) NO Desc Avail: %lu\n",
   2148                     txr->me, (long)txr->no_desc_avail.ev_count);
   2149         }
   2150 
   2151         device_printf(dev,"MBX IRQ Handled: %lu\n",
   2152             (long)adapter->link_irq.ev_count);
   2153         return;
   2154 }
   2155 
   2156 static int
   2157 ixv_sysctl_debug(SYSCTLFN_ARGS)
   2158 {
   2159 	struct sysctlnode node;
   2160 	int error, result;
   2161 	struct adapter *adapter;
   2162 
   2163 	node = *rnode;
   2164 	adapter = (struct adapter *)node.sysctl_data;
   2165 	node.sysctl_data = &result;
   2166 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   2167 
   2168 	if (error)
   2169 		return error;
   2170 
   2171 	if (result == 1)
   2172 		ixv_print_debug_info(adapter);
   2173 
   2174 	return 0;
   2175 }
   2176 
   2177 const struct sysctlnode *
   2178 ixv_sysctl_instance(struct adapter *adapter)
   2179 {
   2180 	const char *dvname;
   2181 	struct sysctllog **log;
   2182 	int rc;
   2183 	const struct sysctlnode *rnode;
   2184 
   2185 	log = &adapter->sysctllog;
   2186 	dvname = device_xname(adapter->dev);
   2187 
   2188 	if ((rc = sysctl_createv(log, 0, NULL, &rnode,
   2189 	    0, CTLTYPE_NODE, dvname,
   2190 	    SYSCTL_DESCR("ixv information and settings"),
   2191 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
   2192 		goto err;
   2193 
   2194 	return rnode;
   2195 err:
   2196 	printf("%s: sysctl_createv failed, rc = %d\n", __func__, rc);
   2197 	return NULL;
   2198 }
   2199