Home | History | Annotate | Line # | Download | only in ixgbe
ixv.c revision 1.12
      1 /******************************************************************************
      2 
      3   Copyright (c) 2001-2013, Intel Corporation
      4   All rights reserved.
      5 
      6   Redistribution and use in source and binary forms, with or without
      7   modification, are permitted provided that the following conditions are met:
      8 
      9    1. Redistributions of source code must retain the above copyright notice,
     10       this list of conditions and the following disclaimer.
     11 
     12    2. Redistributions in binary form must reproduce the above copyright
     13       notice, this list of conditions and the following disclaimer in the
     14       documentation and/or other materials provided with the distribution.
     15 
     16    3. Neither the name of the Intel Corporation nor the names of its
     17       contributors may be used to endorse or promote products derived from
     18       this software without specific prior written permission.
     19 
     20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     30   POSSIBILITY OF SUCH DAMAGE.
     31 
     32 ******************************************************************************/
     33 /*$FreeBSD: head/sys/dev/ixgbe/ixv.c 275358 2014-12-01 11:45:24Z hselasky $*/
     34 /*$NetBSD: ixv.c,v 1.12 2015/08/13 10:03:38 msaitoh Exp $*/
     35 
     36 #include "opt_inet.h"
     37 #include "opt_inet6.h"
     38 
     39 #include "ixv.h"
     40 
     41 /*********************************************************************
     42  *  Driver version
     43  *********************************************************************/
     44 char ixv_driver_version[] = "1.1.4";
     45 
     46 /*********************************************************************
     47  *  PCI Device ID Table
     48  *
     49  *  Used by probe to select devices to load on
     50  *  Last field stores an index into ixv_strings
     51  *  Last entry must be all 0s
     52  *
     53  *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
     54  *********************************************************************/
     55 
     56 static ixv_vendor_info_t ixv_vendor_info_array[] =
     57 {
     58 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
     59 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0},
     60 	/* required last entry */
     61 	{0, 0, 0, 0, 0}
     62 };
     63 
     64 /*********************************************************************
     65  *  Table of branding strings
     66  *********************************************************************/
     67 
     68 static const char    *ixv_strings[] = {
     69 	"Intel(R) PRO/10GbE Virtual Function Network Driver"
     70 };
     71 
     72 /*********************************************************************
     73  *  Function prototypes
     74  *********************************************************************/
     75 static int      ixv_probe(device_t, cfdata_t, void *);
     76 static void      ixv_attach(device_t, device_t, void *);
     77 static int      ixv_detach(device_t, int);
     78 #if 0
     79 static int      ixv_shutdown(device_t);
     80 #endif
     81 #if __FreeBSD_version < 800000
     82 static void     ixv_start(struct ifnet *);
     83 static void     ixv_start_locked(struct tx_ring *, struct ifnet *);
     84 #else
     85 static int	ixv_mq_start(struct ifnet *, struct mbuf *);
     86 static int	ixv_mq_start_locked(struct ifnet *,
     87 		    struct tx_ring *, struct mbuf *);
     88 static void	ixv_qflush(struct ifnet *);
     89 #endif
     90 static int      ixv_ioctl(struct ifnet *, u_long, void *);
     91 static int	ixv_init(struct ifnet *);
     92 static void	ixv_init_locked(struct adapter *);
     93 static void     ixv_stop(void *);
     94 static void     ixv_media_status(struct ifnet *, struct ifmediareq *);
     95 static int      ixv_media_change(struct ifnet *);
     96 static void     ixv_identify_hardware(struct adapter *);
     97 static int      ixv_allocate_pci_resources(struct adapter *,
     98 		    const struct pci_attach_args *);
     99 static int      ixv_allocate_msix(struct adapter *,
    100 		    const struct pci_attach_args *);
    101 static int	ixv_allocate_queues(struct adapter *);
    102 static int	ixv_setup_msix(struct adapter *);
    103 static void	ixv_free_pci_resources(struct adapter *);
    104 static void     ixv_local_timer(void *);
    105 static void     ixv_setup_interface(device_t, struct adapter *);
    106 static void     ixv_config_link(struct adapter *);
    107 
    108 static int      ixv_allocate_transmit_buffers(struct tx_ring *);
    109 static int	ixv_setup_transmit_structures(struct adapter *);
    110 static void	ixv_setup_transmit_ring(struct tx_ring *);
    111 static void     ixv_initialize_transmit_units(struct adapter *);
    112 static void     ixv_free_transmit_structures(struct adapter *);
    113 static void     ixv_free_transmit_buffers(struct tx_ring *);
    114 
    115 static int      ixv_allocate_receive_buffers(struct rx_ring *);
    116 static int      ixv_setup_receive_structures(struct adapter *);
    117 static int	ixv_setup_receive_ring(struct rx_ring *);
    118 static void     ixv_initialize_receive_units(struct adapter *);
    119 static void     ixv_free_receive_structures(struct adapter *);
    120 static void     ixv_free_receive_buffers(struct rx_ring *);
    121 
    122 static void     ixv_enable_intr(struct adapter *);
    123 static void     ixv_disable_intr(struct adapter *);
    124 static bool	ixv_txeof(struct tx_ring *);
    125 static bool	ixv_rxeof(struct ix_queue *, int);
    126 static void	ixv_rx_checksum(u32, struct mbuf *, u32,
    127 		    struct ixgbevf_hw_stats *);
    128 static void     ixv_set_multi(struct adapter *);
    129 static void     ixv_update_link_status(struct adapter *);
    130 static void	ixv_refresh_mbufs(struct rx_ring *, int);
    131 static int      ixv_xmit(struct tx_ring *, struct mbuf *);
    132 static int	ixv_sysctl_stats(SYSCTLFN_PROTO);
    133 static int	ixv_sysctl_debug(SYSCTLFN_PROTO);
    134 static int	ixv_set_flowcntl(SYSCTLFN_PROTO);
    135 static int	ixv_dma_malloc(struct adapter *, bus_size_t,
    136 		    struct ixv_dma_alloc *, int);
    137 static void     ixv_dma_free(struct adapter *, struct ixv_dma_alloc *);
    138 static void	ixv_add_rx_process_limit(struct adapter *, const char *,
    139 		    const char *, int *, int);
    140 static u32	ixv_tx_ctx_setup(struct tx_ring *, struct mbuf *);
    141 static bool	ixv_tso_setup(struct tx_ring *, struct mbuf *, u32 *);
    142 static void	ixv_set_ivar(struct adapter *, u8, u8, s8);
    143 static void	ixv_configure_ivars(struct adapter *);
    144 static u8 *	ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
    145 
    146 static void	ixv_setup_vlan_support(struct adapter *);
    147 #if 0
    148 static void	ixv_register_vlan(void *, struct ifnet *, u16);
    149 static void	ixv_unregister_vlan(void *, struct ifnet *, u16);
    150 #endif
    151 
    152 static void	ixv_save_stats(struct adapter *);
    153 static void	ixv_init_stats(struct adapter *);
    154 static void	ixv_update_stats(struct adapter *);
    155 
    156 static __inline void ixv_rx_discard(struct rx_ring *, int);
    157 static __inline void ixv_rx_input(struct rx_ring *, struct ifnet *,
    158 		    struct mbuf *, u32);
    159 
    160 /* The MSI/X Interrupt handlers */
    161 static int	ixv_msix_que(void *);
    162 static int	ixv_msix_mbx(void *);
    163 
    164 /* Deferred interrupt tasklets */
    165 static void	ixv_handle_que(void *);
    166 static void	ixv_handle_mbx(void *);
    167 
    168 const struct sysctlnode *ixv_sysctl_instance(struct adapter *);
    169 static ixv_vendor_info_t *ixv_lookup(const struct pci_attach_args *);
    170 
    171 /*********************************************************************
    172  *  FreeBSD Device Interface Entry Points
    173  *********************************************************************/
    174 
    175 CFATTACH_DECL3_NEW(ixv, sizeof(struct adapter),
    176     ixv_probe, ixv_attach, ixv_detach, NULL, NULL, NULL,
    177     DVF_DETACH_SHUTDOWN);
    178 
    179 # if 0
    180 static device_method_t ixv_methods[] = {
    181 	/* Device interface */
    182 	DEVMETHOD(device_probe, ixv_probe),
    183 	DEVMETHOD(device_attach, ixv_attach),
    184 	DEVMETHOD(device_detach, ixv_detach),
    185 	DEVMETHOD(device_shutdown, ixv_shutdown),
    186 	DEVMETHOD_END
    187 };
    188 #endif
    189 
    190 #if 0
    191 static driver_t ixv_driver = {
    192 	"ix", ixv_methods, sizeof(struct adapter),
    193 };
    194 
    195 extern devclass_t ixgbe_devclass;
    196 DRIVER_MODULE(ixv, pci, ixv_driver, ixgbe_devclass, 0, 0);
    197 MODULE_DEPEND(ixv, pci, 1, 1, 1);
    198 MODULE_DEPEND(ixv, ether, 1, 1, 1);
    199 #endif
    200 
    201 /*
    202 ** TUNEABLE PARAMETERS:
    203 */
    204 
    205 /*
    206 ** AIM: Adaptive Interrupt Moderation
    207 ** which means that the interrupt rate
    208 ** is varied over time based on the
    209 ** traffic for that interrupt vector
    210 */
    211 static int ixv_enable_aim = FALSE;
    212 #define	TUNABLE_INT(__x, __y)
    213 TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
    214 
    215 /* How many packets rxeof tries to clean at a time */
    216 static int ixv_rx_process_limit = 128;
    217 TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit);
    218 
    219 /* Flow control setting, default to full */
    220 static int ixv_flow_control = ixgbe_fc_full;
    221 TUNABLE_INT("hw.ixv.flow_control", &ixv_flow_control);
    222 
    223 /*
    224  * Header split: this causes the hardware to DMA
    225  * the header into a seperate mbuf from the payload,
    226  * it can be a performance win in some workloads, but
    227  * in others it actually hurts, its off by default.
    228  */
    229 static int ixv_header_split = FALSE;
    230 TUNABLE_INT("hw.ixv.hdr_split", &ixv_header_split);
    231 
    232 /*
    233 ** Number of TX descriptors per ring,
    234 ** setting higher than RX as this seems
    235 ** the better performing choice.
    236 */
    237 static int ixv_txd = DEFAULT_TXD;
    238 TUNABLE_INT("hw.ixv.txd", &ixv_txd);
    239 
    240 /* Number of RX descriptors per ring */
    241 static int ixv_rxd = DEFAULT_RXD;
    242 TUNABLE_INT("hw.ixv.rxd", &ixv_rxd);
    243 
    244 /*
    245 ** Shadow VFTA table, this is needed because
    246 ** the real filter table gets cleared during
    247 ** a soft reset and we need to repopulate it.
    248 */
    249 static u32 ixv_shadow_vfta[VFTA_SIZE];
    250 
    251 /* Keep running tab on them for sanity check */
    252 static int ixv_total_ports;
    253 
    254 /*********************************************************************
    255  *  Device identification routine
    256  *
    257  *  ixv_probe determines if the driver should be loaded on
    258  *  adapter based on PCI vendor/device id of the adapter.
    259  *
    260  *  return 1 on success, 0 on failure
    261  *********************************************************************/
    262 
    263 static int
    264 ixv_probe(device_t dev, cfdata_t cf, void *aux)
    265 {
    266 	const struct pci_attach_args *pa = aux;
    267 
    268 	return (ixv_lookup(pa) != NULL) ? 1 : 0;
    269 }
    270 
    271 static ixv_vendor_info_t *
    272 ixv_lookup(const struct pci_attach_args *pa)
    273 {
    274 	pcireg_t subid;
    275 	ixv_vendor_info_t *ent;
    276 
    277 	INIT_DEBUGOUT("ixv_probe: begin");
    278 
    279 	if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
    280 		return NULL;
    281 
    282 	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
    283 
    284 	for (ent = ixv_vendor_info_array; ent->vendor_id != 0; ent++) {
    285 		if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
    286 		    (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
    287 
    288 		    ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
    289 		     (ent->subvendor_id == 0)) &&
    290 
    291 		    ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
    292 		     (ent->subdevice_id == 0))) {
    293 			++ixv_total_ports;
    294 			return ent;
    295 		}
    296 	}
    297 	return NULL;
    298 }
    299 
    300 
    301 static void
    302 ixv_sysctl_attach(struct adapter *adapter)
    303 {
    304 	struct sysctllog **log;
    305 	const struct sysctlnode *rnode, *cnode;
    306 	device_t dev;
    307 
    308 	dev = adapter->dev;
    309 	log = &adapter->sysctllog;
    310 
    311 	if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
    312 		aprint_error_dev(dev, "could not create sysctl root\n");
    313 		return;
    314 	}
    315 
    316 	if (sysctl_createv(log, 0, &rnode, &cnode,
    317 	    CTLFLAG_READWRITE, CTLTYPE_INT,
    318 	    "stats", SYSCTL_DESCR("Statistics"),
    319 	    ixv_sysctl_stats, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
    320 		aprint_error_dev(dev, "could not create sysctl\n");
    321 
    322 	if (sysctl_createv(log, 0, &rnode, &cnode,
    323 	    CTLFLAG_READWRITE, CTLTYPE_INT,
    324 	    "debug", SYSCTL_DESCR("Debug Info"),
    325 	    ixv_sysctl_debug, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
    326 		aprint_error_dev(dev, "could not create sysctl\n");
    327 
    328 	if (sysctl_createv(log, 0, &rnode, &cnode,
    329 	    CTLFLAG_READWRITE, CTLTYPE_INT,
    330 	    "flow_control", SYSCTL_DESCR("Flow Control"),
    331 	    ixv_set_flowcntl, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
    332 		aprint_error_dev(dev, "could not create sysctl\n");
    333 
    334 	/* XXX This is an *instance* sysctl controlling a *global* variable.
    335 	 * XXX It's that way in the FreeBSD driver that this derives from.
    336 	 */
    337 	if (sysctl_createv(log, 0, &rnode, &cnode,
    338 	    CTLFLAG_READWRITE, CTLTYPE_INT,
    339 	    "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
    340 	    NULL, 0, &ixv_enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
    341 		aprint_error_dev(dev, "could not create sysctl\n");
    342 }
    343 
    344 /*********************************************************************
    345  *  Device initialization routine
    346  *
    347  *  The attach entry point is called when the driver is being loaded.
    348  *  This routine identifies the type of hardware, allocates all resources
    349  *  and initializes the hardware.
    350  *
    351  *  return 0 on success, positive on failure
    352  *********************************************************************/
    353 
    354 static void
    355 ixv_attach(device_t parent, device_t dev, void *aux)
    356 {
    357 	struct adapter *adapter;
    358 	struct ixgbe_hw *hw;
    359 	int             error = 0;
    360 	ixv_vendor_info_t *ent;
    361 	const struct pci_attach_args *pa = aux;
    362 
    363 	INIT_DEBUGOUT("ixv_attach: begin");
    364 
    365 	/* Allocate, clear, and link in our adapter structure */
    366 	adapter = device_private(dev);
    367 	adapter->dev = adapter->osdep.dev = dev;
    368 	hw = &adapter->hw;
    369 
    370 	ent = ixv_lookup(pa);
    371 
    372 	KASSERT(ent != NULL);
    373 
    374 	aprint_normal(": %s, Version - %s\n",
    375 	    ixv_strings[ent->index], ixv_driver_version);
    376 
    377 	/* Core Lock Init*/
    378 	IXV_CORE_LOCK_INIT(adapter, device_xname(dev));
    379 
    380 	/* SYSCTL APIs */
    381 	ixv_sysctl_attach(adapter);
    382 
    383 	/* Set up the timer callout */
    384 	callout_init(&adapter->timer, 0);
    385 
    386 	/* Determine hardware revision */
    387 	ixv_identify_hardware(adapter);
    388 
    389 	/* Do base PCI setup - map BAR0 */
    390 	if (ixv_allocate_pci_resources(adapter, pa)) {
    391 		aprint_error_dev(dev, "Allocation of PCI resources failed\n");
    392 		error = ENXIO;
    393 		goto err_out;
    394 	}
    395 
    396 	/* Do descriptor calc and sanity checks */
    397 	if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
    398 	    ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) {
    399 		aprint_error_dev(dev, "TXD config issue, using default!\n");
    400 		adapter->num_tx_desc = DEFAULT_TXD;
    401 	} else
    402 		adapter->num_tx_desc = ixv_txd;
    403 
    404 	if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
    405 	    ixv_rxd < MIN_RXD || ixv_rxd > MAX_RXD) {
    406 		aprint_error_dev(dev, "RXD config issue, using default!\n");
    407 		adapter->num_rx_desc = DEFAULT_RXD;
    408 	} else
    409 		adapter->num_rx_desc = ixv_rxd;
    410 
    411 	/* Allocate our TX/RX Queues */
    412 	if (ixv_allocate_queues(adapter)) {
    413 		error = ENOMEM;
    414 		goto err_out;
    415 	}
    416 
    417 	/*
    418 	** Initialize the shared code: its
    419 	** at this point the mac type is set.
    420 	*/
    421 	error = ixgbe_init_shared_code(hw);
    422 	if (error) {
    423 		aprint_error_dev(dev,"Shared Code Initialization Failure\n");
    424 		error = EIO;
    425 		goto err_late;
    426 	}
    427 
    428 	/* Setup the mailbox */
    429 	ixgbe_init_mbx_params_vf(hw);
    430 
    431 	ixgbe_reset_hw(hw);
    432 
    433 	/* Get Hardware Flow Control setting */
    434 	hw->fc.requested_mode = ixgbe_fc_full;
    435 	hw->fc.pause_time = IXV_FC_PAUSE;
    436 	hw->fc.low_water[0] = IXV_FC_LO;
    437 	hw->fc.high_water[0] = IXV_FC_HI;
    438 	hw->fc.send_xon = TRUE;
    439 
    440 	error = ixgbe_init_hw(hw);
    441 	if (error) {
    442 		aprint_error_dev(dev,"Hardware Initialization Failure\n");
    443 		error = EIO;
    444 		goto err_late;
    445 	}
    446 
    447 	error = ixv_allocate_msix(adapter, pa);
    448 	if (error)
    449 		goto err_late;
    450 
    451 	/* Setup OS specific network interface */
    452 	ixv_setup_interface(dev, adapter);
    453 
    454 	/* Sysctl for limiting the amount of work done in the taskqueue */
    455 	ixv_add_rx_process_limit(adapter, "rx_processing_limit",
    456 	    "max number of rx packets to process", &adapter->rx_process_limit,
    457 	    ixv_rx_process_limit);
    458 
    459 	/* Do the stats setup */
    460 	ixv_save_stats(adapter);
    461 	ixv_init_stats(adapter);
    462 
    463 	/* Register for VLAN events */
    464 #if 0 /* XXX delete after write? */
    465 	adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
    466 	    ixv_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
    467 	adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
    468 	    ixv_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
    469 #endif
    470 
    471 	INIT_DEBUGOUT("ixv_attach: end");
    472 	return;
    473 
    474 err_late:
    475 	ixv_free_transmit_structures(adapter);
    476 	ixv_free_receive_structures(adapter);
    477 err_out:
    478 	ixv_free_pci_resources(adapter);
    479 	return;
    480 
    481 }
    482 
    483 /*********************************************************************
    484  *  Device removal routine
    485  *
    486  *  The detach entry point is called when the driver is being removed.
    487  *  This routine stops the adapter and deallocates all the resources
    488  *  that were allocated for driver operation.
    489  *
    490  *  return 0 on success, positive on failure
    491  *********************************************************************/
    492 
    493 static int
    494 ixv_detach(device_t dev, int flags)
    495 {
    496 	struct adapter *adapter = device_private(dev);
    497 	struct ix_queue *que = adapter->queues;
    498 
    499 	INIT_DEBUGOUT("ixv_detach: begin");
    500 
    501 	/* Make sure VLANS are not using driver */
    502 	if (!VLAN_ATTACHED(&adapter->osdep.ec))
    503 		;	/* nothing to do: no VLANs */
    504 	else if ((flags & (DETACH_SHUTDOWN|DETACH_FORCE)) != 0)
    505 		vlan_ifdetach(adapter->ifp);
    506 	else {
    507 		aprint_error_dev(dev, "VLANs in use\n");
    508 		return EBUSY;
    509 	}
    510 
    511 	IXV_CORE_LOCK(adapter);
    512 	ixv_stop(adapter);
    513 	IXV_CORE_UNLOCK(adapter);
    514 
    515 	for (int i = 0; i < adapter->num_queues; i++, que++) {
    516 		softint_disestablish(que->que_si);
    517 	}
    518 
    519 	/* Drain the Link queue */
    520 	softint_disestablish(adapter->mbx_si);
    521 
    522 	/* Unregister VLAN events */
    523 #if 0 /* XXX msaitoh delete after write? */
    524 	if (adapter->vlan_attach != NULL)
    525 		EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
    526 	if (adapter->vlan_detach != NULL)
    527 		EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
    528 #endif
    529 
    530 	ether_ifdetach(adapter->ifp);
    531 	callout_halt(&adapter->timer, NULL);
    532 	ixv_free_pci_resources(adapter);
    533 #if 0 /* XXX the NetBSD port is probably missing something here */
    534 	bus_generic_detach(dev);
    535 #endif
    536 	if_detach(adapter->ifp);
    537 
    538 	ixv_free_transmit_structures(adapter);
    539 	ixv_free_receive_structures(adapter);
    540 
    541 	IXV_CORE_LOCK_DESTROY(adapter);
    542 	return (0);
    543 }
    544 
    545 /*********************************************************************
    546  *
    547  *  Shutdown entry point
    548  *
    549  **********************************************************************/
    550 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
    551 static int
    552 ixv_shutdown(device_t dev)
    553 {
    554 	struct adapter *adapter = device_private(dev);
    555 	IXV_CORE_LOCK(adapter);
    556 	ixv_stop(adapter);
    557 	IXV_CORE_UNLOCK(adapter);
    558 	return (0);
    559 }
    560 #endif
    561 
    562 #if __FreeBSD_version < 800000
    563 /*********************************************************************
    564  *  Transmit entry point
    565  *
    566  *  ixv_start is called by the stack to initiate a transmit.
    567  *  The driver will remain in this routine as long as there are
    568  *  packets to transmit and transmit resources are available.
    569  *  In case resources are not available stack is notified and
    570  *  the packet is requeued.
    571  **********************************************************************/
    572 static void
    573 ixv_start_locked(struct tx_ring *txr, struct ifnet * ifp)
    574 {
    575 	int rc;
    576 	struct mbuf    *m_head;
    577 	struct adapter *adapter = txr->adapter;
    578 
    579 	IXV_TX_LOCK_ASSERT(txr);
    580 
    581 	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) !=
    582 	    IFF_RUNNING)
    583 		return;
    584 	if (!adapter->link_active)
    585 		return;
    586 
    587 	while (!IFQ_IS_EMPTY(&ifp->if_snd)) {
    588 
    589 		IFQ_POLL(&ifp->if_snd, m_head);
    590 		if (m_head == NULL)
    591 			break;
    592 
    593 		if ((rc = ixv_xmit(txr, m_head)) == EAGAIN) {
    594 			ifp->if_flags |= IFF_OACTIVE;
    595 			break;
    596 		}
    597 		IFQ_DEQUEUE(&ifp->if_snd, m_head);
    598 		if (rc == EFBIG) {
    599 			struct mbuf *mtmp;
    600 
    601 			if ((mtmp = m_defrag(m_head, M_NOWAIT)) != NULL) {
    602 				m_head = mtmp;
    603 				rc = ixv_xmit(txr, m_head);
    604 				if (rc != 0)
    605 					adapter->efbig2_tx_dma_setup.ev_count++;
    606 			} else
    607 				adapter->m_defrag_failed.ev_count++;
    608 		}
    609 		if (rc != 0) {
    610 			m_freem(m_head);
    611 			continue;
    612 		}
    613 		/* Send a copy of the frame to the BPF listener */
    614 		bpf_mtap(ifp, m_head);
    615 
    616 		/* Set watchdog on */
    617 		txr->watchdog_check = TRUE;
    618 		getmicrotime(&txr->watchdog_time);
    619 	}
    620 	return;
    621 }
    622 
    623 /*
    624  * Legacy TX start - called by the stack, this
    625  * always uses the first tx ring, and should
    626  * not be used with multiqueue tx enabled.
    627  */
    628 static void
    629 ixv_start(struct ifnet *ifp)
    630 {
    631 	struct adapter *adapter = ifp->if_softc;
    632 	struct tx_ring	*txr = adapter->tx_rings;
    633 
    634 	if (ifp->if_flags & IFF_RUNNING) {
    635 		IXV_TX_LOCK(txr);
    636 		ixv_start_locked(txr, ifp);
    637 		IXV_TX_UNLOCK(txr);
    638 	}
    639 	return;
    640 }
    641 
    642 #else
    643 
    644 /*
    645 ** Multiqueue Transmit driver
    646 **
    647 */
    648 static int
    649 ixv_mq_start(struct ifnet *ifp, struct mbuf *m)
    650 {
    651 	struct adapter	*adapter = ifp->if_softc;
    652 	struct ix_queue	*que;
    653 	struct tx_ring	*txr;
    654 	int 		i = 0, err = 0;
    655 
    656 	/* Which queue to use */
    657 	if ((m->m_flags & M_FLOWID) != 0)
    658 		i = m->m_pkthdr.flowid % adapter->num_queues;
    659 
    660 	txr = &adapter->tx_rings[i];
    661 	que = &adapter->queues[i];
    662 
    663 	if (IXV_TX_TRYLOCK(txr)) {
    664 		err = ixv_mq_start_locked(ifp, txr, m);
    665 		IXV_TX_UNLOCK(txr);
    666 	} else {
    667 		err = drbr_enqueue(ifp, txr->br, m);
    668 		softint_schedule(que->que_si);
    669 	}
    670 
    671 	return (err);
    672 }
    673 
    674 static int
    675 ixv_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr, struct mbuf *m)
    676 {
    677 	struct adapter  *adapter = txr->adapter;
    678         struct mbuf     *next;
    679         int             enqueued, err = 0;
    680 
    681 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) !=
    682 	    IFF_RUNNING || adapter->link_active == 0) {
    683 		if (m != NULL)
    684 			err = drbr_enqueue(ifp, txr->br, m);
    685 		return (err);
    686 	}
    687 
    688 	/* Do a clean if descriptors are low */
    689 	if (txr->tx_avail <= IXV_TX_CLEANUP_THRESHOLD)
    690 		ixv_txeof(txr);
    691 
    692 	enqueued = 0;
    693 	if (m != NULL) {
    694 		err = drbr_dequeue(ifp, txr->br, m);
    695 		if (err) {
    696 			return (err);
    697 		}
    698 	}
    699 	/* Process the queue */
    700 	while ((next = drbr_peek(ifp, txr->br)) != NULL) {
    701 		if ((err = ixv_xmit(txr, next)) != 0) {
    702 			if (next != NULL) {
    703 				drbr_advance(ifp, txr->br);
    704 			} else {
    705 				drbr_putback(ifp, txr->br, next);
    706 			}
    707 			break;
    708 		}
    709 		drbr_advance(ifp, txr->br);
    710 		enqueued++;
    711 		ifp->if_obytes += next->m_pkthdr.len;
    712 		if (next->m_flags & M_MCAST)
    713 			ifp->if_omcasts++;
    714 		/* Send a copy of the frame to the BPF listener */
    715 		ETHER_BPF_MTAP(ifp, next);
    716 		if ((ifp->if_flags & IFF_RUNNING) == 0)
    717 			break;
    718 		if (txr->tx_avail <= IXV_TX_OP_THRESHOLD) {
    719 			ifp->if_flags |= IFF_OACTIVE;
    720 			break;
    721 		}
    722 	}
    723 
    724 	if (enqueued > 0) {
    725 		/* Set watchdog on */
    726 		txr->watchdog_check = TRUE;
    727 		getmicrotime(&txr->watchdog_time);
    728 	}
    729 
    730 	return (err);
    731 }
    732 
    733 /*
    734 ** Flush all ring buffers
    735 */
    736 static void
    737 ixv_qflush(struct ifnet *ifp)
    738 {
    739 	struct adapter  *adapter = ifp->if_softc;
    740 	struct tx_ring  *txr = adapter->tx_rings;
    741 	struct mbuf     *m;
    742 
    743 	for (int i = 0; i < adapter->num_queues; i++, txr++) {
    744 		IXV_TX_LOCK(txr);
    745 		while ((m = buf_ring_dequeue_sc(txr->br)) != NULL)
    746 			m_freem(m);
    747 		IXV_TX_UNLOCK(txr);
    748 	}
    749 	if_qflush(ifp);
    750 }
    751 
    752 #endif
    753 
    754 static int
    755 ixv_ifflags_cb(struct ethercom *ec)
    756 {
    757 	struct ifnet *ifp = &ec->ec_if;
    758 	struct adapter *adapter = ifp->if_softc;
    759 	int change = ifp->if_flags ^ adapter->if_flags, rc = 0;
    760 
    761 	IXV_CORE_LOCK(adapter);
    762 
    763 	if (change != 0)
    764 		adapter->if_flags = ifp->if_flags;
    765 
    766 	if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0)
    767 		rc = ENETRESET;
    768 
    769 	IXV_CORE_UNLOCK(adapter);
    770 
    771 	return rc;
    772 }
    773 
    774 /*********************************************************************
    775  *  Ioctl entry point
    776  *
    777  *  ixv_ioctl is called when the user wants to configure the
    778  *  interface.
    779  *
    780  *  return 0 on success, positive on failure
    781  **********************************************************************/
    782 
    783 static int
    784 ixv_ioctl(struct ifnet * ifp, u_long command, void *data)
    785 {
    786 	struct adapter	*adapter = ifp->if_softc;
    787 	struct ifcapreq *ifcr = data;
    788 	struct ifreq	*ifr = (struct ifreq *) data;
    789 	int             error = 0;
    790 	int l4csum_en;
    791 	const int l4csum = IFCAP_CSUM_TCPv4_Rx|IFCAP_CSUM_UDPv4_Rx|
    792 	     IFCAP_CSUM_TCPv6_Rx|IFCAP_CSUM_UDPv6_Rx;
    793 
    794 	switch (command) {
    795 	case SIOCSIFFLAGS:
    796 		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
    797 		break;
    798 	case SIOCADDMULTI:
    799 	case SIOCDELMULTI:
    800 		IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
    801 		break;
    802 	case SIOCSIFMEDIA:
    803 	case SIOCGIFMEDIA:
    804 		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
    805 		break;
    806 	case SIOCSIFCAP:
    807 		IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
    808 		break;
    809 	case SIOCSIFMTU:
    810 		IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
    811 		break;
    812 	default:
    813 		IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
    814 		break;
    815 	}
    816 
    817 	switch (command) {
    818 	case SIOCSIFMEDIA:
    819 	case SIOCGIFMEDIA:
    820 		return ifmedia_ioctl(ifp, ifr, &adapter->media, command);
    821 	case SIOCSIFCAP:
    822 		/* Layer-4 Rx checksum offload has to be turned on and
    823 		 * off as a unit.
    824 		 */
    825 		l4csum_en = ifcr->ifcr_capenable & l4csum;
    826 		if (l4csum_en != l4csum && l4csum_en != 0)
    827 			return EINVAL;
    828 		/*FALLTHROUGH*/
    829 	case SIOCADDMULTI:
    830 	case SIOCDELMULTI:
    831 	case SIOCSIFFLAGS:
    832 	case SIOCSIFMTU:
    833 	default:
    834 		if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
    835 			return error;
    836 		if ((ifp->if_flags & IFF_RUNNING) == 0)
    837 			;
    838 		else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
    839 			IXV_CORE_LOCK(adapter);
    840 			ixv_init_locked(adapter);
    841 			IXV_CORE_UNLOCK(adapter);
    842 		} else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
    843 			/*
    844 			 * Multicast list has changed; set the hardware filter
    845 			 * accordingly.
    846 			 */
    847 			IXV_CORE_LOCK(adapter);
    848 			ixv_disable_intr(adapter);
    849 			ixv_set_multi(adapter);
    850 			ixv_enable_intr(adapter);
    851 			IXV_CORE_UNLOCK(adapter);
    852 		}
    853 		return 0;
    854 	}
    855 }
    856 
    857 /*********************************************************************
    858  *  Init entry point
    859  *
    860  *  This routine is used in two ways. It is used by the stack as
    861  *  init entry point in network interface structure. It is also used
    862  *  by the driver as a hw/sw initialization routine to get to a
    863  *  consistent state.
    864  *
    865  *  return 0 on success, positive on failure
    866  **********************************************************************/
    867 #define IXGBE_MHADD_MFS_SHIFT 16
    868 
    869 static void
    870 ixv_init_locked(struct adapter *adapter)
    871 {
    872 	struct ifnet	*ifp = adapter->ifp;
    873 	device_t 	dev = adapter->dev;
    874 	struct ixgbe_hw *hw = &adapter->hw;
    875 	u32		mhadd, gpie;
    876 
    877 	INIT_DEBUGOUT("ixv_init: begin");
    878 	KASSERT(mutex_owned(&adapter->core_mtx));
    879 	hw->adapter_stopped = FALSE;
    880 	ixgbe_stop_adapter(hw);
    881         callout_stop(&adapter->timer);
    882 
    883         /* reprogram the RAR[0] in case user changed it. */
    884         ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
    885 
    886 	/* Get the latest mac address, User can use a LAA */
    887 	memcpy(hw->mac.addr, CLLADDR(adapter->ifp->if_sadl),
    888 	     IXGBE_ETH_LENGTH_OF_ADDRESS);
    889         ixgbe_set_rar(hw, 0, hw->mac.addr, 0, 1);
    890 	hw->addr_ctrl.rar_used_count = 1;
    891 
    892 	/* Prepare transmit descriptors and buffers */
    893 	if (ixv_setup_transmit_structures(adapter)) {
    894 		aprint_error_dev(dev,"Could not setup transmit structures\n");
    895 		ixv_stop(adapter);
    896 		return;
    897 	}
    898 
    899 	ixgbe_reset_hw(hw);
    900 	ixv_initialize_transmit_units(adapter);
    901 
    902 	/* Setup Multicast table */
    903 	ixv_set_multi(adapter);
    904 
    905 	/*
    906 	** Determine the correct mbuf pool
    907 	** for doing jumbo/headersplit
    908 	*/
    909 	if (ifp->if_mtu > ETHERMTU)
    910 		adapter->rx_mbuf_sz = MJUMPAGESIZE;
    911 	else
    912 		adapter->rx_mbuf_sz = MCLBYTES;
    913 
    914 	/* Prepare receive descriptors and buffers */
    915 	if (ixv_setup_receive_structures(adapter)) {
    916 		device_printf(dev,"Could not setup receive structures\n");
    917 		ixv_stop(adapter);
    918 		return;
    919 	}
    920 
    921 	/* Configure RX settings */
    922 	ixv_initialize_receive_units(adapter);
    923 
    924 	/* Enable Enhanced MSIX mode */
    925 	gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE);
    926 	gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_EIAME;
    927 	gpie |= IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD;
    928         IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
    929 
    930 #if 0 /* XXX isn't it required? -- msaitoh  */
    931 	/* Set the various hardware offload abilities */
    932 	ifp->if_hwassist = 0;
    933 	if (ifp->if_capenable & IFCAP_TSO4)
    934 		ifp->if_hwassist |= CSUM_TSO;
    935 	if (ifp->if_capenable & IFCAP_TXCSUM) {
    936 		ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
    937 #if __FreeBSD_version >= 800000
    938 		ifp->if_hwassist |= CSUM_SCTP;
    939 #endif
    940 	}
    941 #endif
    942 
    943 	/* Set MTU size */
    944 	if (ifp->if_mtu > ETHERMTU) {
    945 		mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
    946 		mhadd &= ~IXGBE_MHADD_MFS_MASK;
    947 		mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
    948 		IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
    949 	}
    950 
    951 	/* Set up VLAN offload and filter */
    952 	ixv_setup_vlan_support(adapter);
    953 
    954 	callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
    955 
    956 	/* Set up MSI/X routing */
    957 	ixv_configure_ivars(adapter);
    958 
    959 	/* Set up auto-mask */
    960 	IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_EICS_RTX_QUEUE);
    961 
    962         /* Set moderation on the Link interrupt */
    963         IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->mbxvec), IXV_LINK_ITR);
    964 
    965 	/* Stats init */
    966 	ixv_init_stats(adapter);
    967 
    968 	/* Config/Enable Link */
    969 	ixv_config_link(adapter);
    970 
    971 	/* And now turn on interrupts */
    972 	ixv_enable_intr(adapter);
    973 
    974 	/* Now inform the stack we're ready */
    975 	ifp->if_flags |= IFF_RUNNING;
    976 	ifp->if_flags &= ~IFF_OACTIVE;
    977 
    978 	return;
    979 }
    980 
    981 static int
    982 ixv_init(struct ifnet *ifp)
    983 {
    984 	struct adapter *adapter = ifp->if_softc;
    985 
    986 	IXV_CORE_LOCK(adapter);
    987 	ixv_init_locked(adapter);
    988 	IXV_CORE_UNLOCK(adapter);
    989 	return 0;
    990 }
    991 
    992 
    993 /*
    994 **
    995 ** MSIX Interrupt Handlers and Tasklets
    996 **
    997 */
    998 
    999 static inline void
   1000 ixv_enable_queue(struct adapter *adapter, u32 vector)
   1001 {
   1002 	struct ixgbe_hw *hw = &adapter->hw;
   1003 	u32	queue = 1 << vector;
   1004 	u32	mask;
   1005 
   1006 	mask = (IXGBE_EIMS_RTX_QUEUE & queue);
   1007 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
   1008 }
   1009 
   1010 static inline void
   1011 ixv_disable_queue(struct adapter *adapter, u32 vector)
   1012 {
   1013 	struct ixgbe_hw *hw = &adapter->hw;
   1014 	u64	queue = (u64)(1 << vector);
   1015 	u32	mask;
   1016 
   1017 	mask = (IXGBE_EIMS_RTX_QUEUE & queue);
   1018 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
   1019 }
   1020 
   1021 static inline void
   1022 ixv_rearm_queues(struct adapter *adapter, u64 queues)
   1023 {
   1024 	u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
   1025 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask);
   1026 }
   1027 
   1028 
   1029 static void
   1030 ixv_handle_que(void *context)
   1031 {
   1032 	struct ix_queue *que = context;
   1033 	struct adapter  *adapter = que->adapter;
   1034 	struct tx_ring  *txr = que->txr;
   1035 	struct ifnet    *ifp = adapter->ifp;
   1036 	bool		more;
   1037 
   1038 	if (ifp->if_flags & IFF_RUNNING) {
   1039 		more = ixv_rxeof(que, adapter->rx_process_limit);
   1040 		IXV_TX_LOCK(txr);
   1041 		ixv_txeof(txr);
   1042 #if __FreeBSD_version >= 800000
   1043 		if (!drbr_empty(ifp, txr->br))
   1044 			ixv_mq_start_locked(ifp, txr, NULL);
   1045 #else
   1046 		if (!IFQ_IS_EMPTY(&ifp->if_snd))
   1047 			ixv_start_locked(txr, ifp);
   1048 #endif
   1049 		IXV_TX_UNLOCK(txr);
   1050 		if (more) {
   1051 			adapter->req.ev_count++;
   1052 			softint_schedule(que->que_si);
   1053 			return;
   1054 		}
   1055 	}
   1056 
   1057 	/* Reenable this interrupt */
   1058 	ixv_enable_queue(adapter, que->msix);
   1059 	return;
   1060 }
   1061 
   1062 /*********************************************************************
   1063  *
   1064  *  MSI Queue Interrupt Service routine
   1065  *
   1066  **********************************************************************/
   1067 int
   1068 ixv_msix_que(void *arg)
   1069 {
   1070 	struct ix_queue	*que = arg;
   1071 	struct adapter  *adapter = que->adapter;
   1072 	struct tx_ring	*txr = que->txr;
   1073 	struct rx_ring	*rxr = que->rxr;
   1074 	bool		more_tx, more_rx;
   1075 	u32		newitr = 0;
   1076 
   1077 	ixv_disable_queue(adapter, que->msix);
   1078 	++que->irqs;
   1079 
   1080 	more_rx = ixv_rxeof(que, adapter->rx_process_limit);
   1081 
   1082 	IXV_TX_LOCK(txr);
   1083 	more_tx = ixv_txeof(txr);
   1084 	/*
   1085 	** Make certain that if the stack
   1086 	** has anything queued the task gets
   1087 	** scheduled to handle it.
   1088 	*/
   1089 #if __FreeBSD_version < 800000
   1090 	if (!IFQ_IS_EMPTY(&adapter->ifp->if_snd))
   1091 #else
   1092 	if (!drbr_empty(adapter->ifp, txr->br))
   1093 #endif
   1094                 more_tx = 1;
   1095 	IXV_TX_UNLOCK(txr);
   1096 
   1097 	more_rx = ixv_rxeof(que, adapter->rx_process_limit);
   1098 
   1099 	/* Do AIM now? */
   1100 
   1101 	if (ixv_enable_aim == FALSE)
   1102 		goto no_calc;
   1103 	/*
   1104 	** Do Adaptive Interrupt Moderation:
   1105         **  - Write out last calculated setting
   1106 	**  - Calculate based on average size over
   1107 	**    the last interval.
   1108 	*/
   1109         if (que->eitr_setting)
   1110                 IXGBE_WRITE_REG(&adapter->hw,
   1111                     IXGBE_VTEITR(que->msix),
   1112 		    que->eitr_setting);
   1113 
   1114         que->eitr_setting = 0;
   1115 
   1116         /* Idle, do nothing */
   1117         if ((txr->bytes == 0) && (rxr->bytes == 0))
   1118                 goto no_calc;
   1119 
   1120 	if ((txr->bytes) && (txr->packets))
   1121                	newitr = txr->bytes/txr->packets;
   1122 	if ((rxr->bytes) && (rxr->packets))
   1123 		newitr = max(newitr,
   1124 		    (rxr->bytes / rxr->packets));
   1125 	newitr += 24; /* account for hardware frame, crc */
   1126 
   1127 	/* set an upper boundary */
   1128 	newitr = min(newitr, 3000);
   1129 
   1130 	/* Be nice to the mid range */
   1131 	if ((newitr > 300) && (newitr < 1200))
   1132 		newitr = (newitr / 3);
   1133 	else
   1134 		newitr = (newitr / 2);
   1135 
   1136 	newitr |= newitr << 16;
   1137 
   1138         /* save for next interrupt */
   1139         que->eitr_setting = newitr;
   1140 
   1141         /* Reset state */
   1142         txr->bytes = 0;
   1143         txr->packets = 0;
   1144         rxr->bytes = 0;
   1145         rxr->packets = 0;
   1146 
   1147 no_calc:
   1148 	if (more_tx || more_rx)
   1149 		softint_schedule(que->que_si);
   1150 	else /* Reenable this interrupt */
   1151 		ixv_enable_queue(adapter, que->msix);
   1152 	return 1;
   1153 }
   1154 
   1155 static int
   1156 ixv_msix_mbx(void *arg)
   1157 {
   1158 	struct adapter	*adapter = arg;
   1159 	struct ixgbe_hw *hw = &adapter->hw;
   1160 	u32		reg;
   1161 
   1162 	++adapter->mbx_irq.ev_count;
   1163 
   1164 	/* First get the cause */
   1165 	reg = IXGBE_READ_REG(hw, IXGBE_VTEICS);
   1166 	/* Clear interrupt with write */
   1167 	IXGBE_WRITE_REG(hw, IXGBE_VTEICR, reg);
   1168 
   1169 	/* Link status change */
   1170 	if (reg & IXGBE_EICR_LSC)
   1171 		softint_schedule(adapter->mbx_si);
   1172 
   1173 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER);
   1174 	return 1;
   1175 }
   1176 
   1177 /*********************************************************************
   1178  *
   1179  *  Media Ioctl callback
   1180  *
   1181  *  This routine is called whenever the user queries the status of
   1182  *  the interface using ifconfig.
   1183  *
   1184  **********************************************************************/
   1185 static void
   1186 ixv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
   1187 {
   1188 	struct adapter *adapter = ifp->if_softc;
   1189 
   1190 	INIT_DEBUGOUT("ixv_media_status: begin");
   1191 	IXV_CORE_LOCK(adapter);
   1192 	ixv_update_link_status(adapter);
   1193 
   1194 	ifmr->ifm_status = IFM_AVALID;
   1195 	ifmr->ifm_active = IFM_ETHER;
   1196 
   1197 	if (!adapter->link_active) {
   1198 		IXV_CORE_UNLOCK(adapter);
   1199 		return;
   1200 	}
   1201 
   1202 	ifmr->ifm_status |= IFM_ACTIVE;
   1203 
   1204 	switch (adapter->link_speed) {
   1205 		case IXGBE_LINK_SPEED_1GB_FULL:
   1206 			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
   1207 			break;
   1208 		case IXGBE_LINK_SPEED_10GB_FULL:
   1209 			ifmr->ifm_active |= IFM_FDX;
   1210 			break;
   1211 	}
   1212 
   1213 	IXV_CORE_UNLOCK(adapter);
   1214 
   1215 	return;
   1216 }
   1217 
   1218 /*********************************************************************
   1219  *
   1220  *  Media Ioctl callback
   1221  *
   1222  *  This routine is called when the user changes speed/duplex using
   1223  *  media/mediopt option with ifconfig.
   1224  *
   1225  **********************************************************************/
   1226 static int
   1227 ixv_media_change(struct ifnet * ifp)
   1228 {
   1229 	struct adapter *adapter = ifp->if_softc;
   1230 	struct ifmedia *ifm = &adapter->media;
   1231 
   1232 	INIT_DEBUGOUT("ixv_media_change: begin");
   1233 
   1234 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
   1235 		return (EINVAL);
   1236 
   1237         switch (IFM_SUBTYPE(ifm->ifm_media)) {
   1238         case IFM_AUTO:
   1239                 break;
   1240         default:
   1241                 device_printf(adapter->dev, "Only auto media type\n");
   1242 		return (EINVAL);
   1243         }
   1244 
   1245 	return (0);
   1246 }
   1247 
   1248 /*********************************************************************
   1249  *
   1250  *  This routine maps the mbufs to tx descriptors, allowing the
   1251  *  TX engine to transmit the packets.
   1252  *  	- return 0 on success, positive on failure
   1253  *
   1254  **********************************************************************/
   1255 
   1256 static int
   1257 ixv_xmit(struct tx_ring *txr, struct mbuf *m_head)
   1258 {
   1259 	struct m_tag *mtag;
   1260 	struct adapter  *adapter = txr->adapter;
   1261 	struct ethercom *ec = &adapter->osdep.ec;
   1262 	u32		olinfo_status = 0, cmd_type_len;
   1263 	u32		paylen = 0;
   1264 	int             i, j, error;
   1265 	int		first, last = 0;
   1266 	bus_dmamap_t	map;
   1267 	struct ixv_tx_buf *txbuf;
   1268 	union ixgbe_adv_tx_desc *txd = NULL;
   1269 
   1270 	/* Basic descriptor defines */
   1271         cmd_type_len = (IXGBE_ADVTXD_DTYP_DATA |
   1272 	    IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT);
   1273 
   1274 	if ((mtag = VLAN_OUTPUT_TAG(ec, m_head)) != NULL)
   1275         	cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
   1276 
   1277         /*
   1278          * Important to capture the first descriptor
   1279          * used because it will contain the index of
   1280          * the one we tell the hardware to report back
   1281          */
   1282         first = txr->next_avail_desc;
   1283 	txbuf = &txr->tx_buffers[first];
   1284 	map = txbuf->map;
   1285 
   1286 	/*
   1287 	 * Map the packet for DMA.
   1288 	 */
   1289 	error = bus_dmamap_load_mbuf(txr->txtag->dt_dmat, map,
   1290 	    m_head, BUS_DMA_NOWAIT);
   1291 
   1292 	switch (error) {
   1293 	case EAGAIN:
   1294 		adapter->eagain_tx_dma_setup.ev_count++;
   1295 		return EAGAIN;
   1296 	case ENOMEM:
   1297 		adapter->enomem_tx_dma_setup.ev_count++;
   1298 		return EAGAIN;
   1299 	case EFBIG:
   1300 		adapter->efbig_tx_dma_setup.ev_count++;
   1301 		return error;
   1302 	case EINVAL:
   1303 		adapter->einval_tx_dma_setup.ev_count++;
   1304 		return error;
   1305 	default:
   1306 		adapter->other_tx_dma_setup.ev_count++;
   1307 		return error;
   1308 	case 0:
   1309 		break;
   1310 	}
   1311 
   1312 	/* Make certain there are enough descriptors */
   1313 	if (map->dm_nsegs > txr->tx_avail - 2) {
   1314 		txr->no_desc_avail.ev_count++;
   1315 		/* XXX s/ixgbe/ixv/ */
   1316 		ixgbe_dmamap_unload(txr->txtag, txbuf->map);
   1317 		return EAGAIN;
   1318 	}
   1319 
   1320 	/*
   1321 	** Set up the appropriate offload context
   1322 	** this becomes the first descriptor of
   1323 	** a packet.
   1324 	*/
   1325 	if (m_head->m_pkthdr.csum_flags & (M_CSUM_TSOv4|M_CSUM_TSOv6)) {
   1326 		if (ixv_tso_setup(txr, m_head, &paylen)) {
   1327 			cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
   1328 			olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
   1329 			olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
   1330 			olinfo_status |= paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
   1331 			++adapter->tso_tx.ev_count;
   1332 		} else {
   1333 			++adapter->tso_err.ev_count;
   1334 			/* XXX unload DMA map! --dyoung -> easy? --msaitoh */
   1335 			return (ENXIO);
   1336 		}
   1337 	} else
   1338 		olinfo_status |= ixv_tx_ctx_setup(txr, m_head);
   1339 
   1340         /* Record payload length */
   1341 	if (paylen == 0)
   1342         	olinfo_status |= m_head->m_pkthdr.len <<
   1343 		    IXGBE_ADVTXD_PAYLEN_SHIFT;
   1344 
   1345 	i = txr->next_avail_desc;
   1346 	for (j = 0; j < map->dm_nsegs; j++) {
   1347 		bus_size_t seglen;
   1348 		bus_addr_t segaddr;
   1349 
   1350 		txbuf = &txr->tx_buffers[i];
   1351 		txd = &txr->tx_base[i];
   1352 		seglen = map->dm_segs[j].ds_len;
   1353 		segaddr = htole64(map->dm_segs[j].ds_addr);
   1354 
   1355 		txd->read.buffer_addr = segaddr;
   1356 		txd->read.cmd_type_len = htole32(txr->txd_cmd |
   1357 		    cmd_type_len |seglen);
   1358 		txd->read.olinfo_status = htole32(olinfo_status);
   1359 		last = i; /* descriptor that will get completion IRQ */
   1360 
   1361 		if (++i == adapter->num_tx_desc)
   1362 			i = 0;
   1363 
   1364 		txbuf->m_head = NULL;
   1365 		txbuf->eop_index = -1;
   1366 	}
   1367 
   1368 	txd->read.cmd_type_len |=
   1369 	    htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS);
   1370 	txr->tx_avail -= map->dm_nsegs;
   1371 	txr->next_avail_desc = i;
   1372 
   1373 	txbuf->m_head = m_head;
   1374 	/* Swap the dma map between the first and last descriptor */
   1375 	txr->tx_buffers[first].map = txbuf->map;
   1376 	txbuf->map = map;
   1377 	bus_dmamap_sync(txr->txtag->dt_dmat, map, 0, m_head->m_pkthdr.len,
   1378 	    BUS_DMASYNC_PREWRITE);
   1379 
   1380         /* Set the index of the descriptor that will be marked done */
   1381         txbuf = &txr->tx_buffers[first];
   1382 	txbuf->eop_index = last;
   1383 
   1384 	/* XXX s/ixgbe/ixg/ */
   1385         ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
   1386             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1387 	/*
   1388 	 * Advance the Transmit Descriptor Tail (Tdt), this tells the
   1389 	 * hardware that this frame is available to transmit.
   1390 	 */
   1391 	++txr->total_packets.ev_count;
   1392 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(txr->me), i);
   1393 
   1394 	return 0;
   1395 }
   1396 
   1397 
   1398 /*********************************************************************
   1399  *  Multicast Update
   1400  *
   1401  *  This routine is called whenever multicast address list is updated.
   1402  *
   1403  **********************************************************************/
   1404 #define IXGBE_RAR_ENTRIES 16
   1405 
   1406 static void
   1407 ixv_set_multi(struct adapter *adapter)
   1408 {
   1409 	struct ether_multi *enm;
   1410 	struct ether_multistep step;
   1411 	u8	mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
   1412 	u8	*update_ptr;
   1413 	int	mcnt = 0;
   1414 	struct ethercom *ec = &adapter->osdep.ec;
   1415 
   1416 	IOCTL_DEBUGOUT("ixv_set_multi: begin");
   1417 
   1418 	ETHER_FIRST_MULTI(step, ec, enm);
   1419 	while (enm != NULL) {
   1420 		bcopy(enm->enm_addrlo,
   1421 		    &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
   1422 		    IXGBE_ETH_LENGTH_OF_ADDRESS);
   1423 		mcnt++;
   1424 		/* XXX This might be required --msaitoh */
   1425 		if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES)
   1426 			break;
   1427 		ETHER_NEXT_MULTI(step, enm);
   1428 	}
   1429 
   1430 	update_ptr = mta;
   1431 
   1432 	ixgbe_update_mc_addr_list(&adapter->hw,
   1433 	    update_ptr, mcnt, ixv_mc_array_itr, TRUE);
   1434 
   1435 	return;
   1436 }
   1437 
   1438 /*
   1439  * This is an iterator function now needed by the multicast
   1440  * shared code. It simply feeds the shared code routine the
   1441  * addresses in the array of ixv_set_multi() one by one.
   1442  */
   1443 static u8 *
   1444 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
   1445 {
   1446 	u8 *addr = *update_ptr;
   1447 	u8 *newptr;
   1448 	*vmdq = 0;
   1449 
   1450 	newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
   1451 	*update_ptr = newptr;
   1452 	return addr;
   1453 }
   1454 
   1455 /*********************************************************************
   1456  *  Timer routine
   1457  *
   1458  *  This routine checks for link status,updates statistics,
   1459  *  and runs the watchdog check.
   1460  *
   1461  **********************************************************************/
   1462 
   1463 static void
   1464 ixv_local_timer1(void *arg)
   1465 {
   1466 	struct adapter	*adapter = arg;
   1467 	device_t	dev = adapter->dev;
   1468 	struct tx_ring	*txr = adapter->tx_rings;
   1469 	int		i;
   1470 	struct timeval now, elapsed;
   1471 
   1472 	KASSERT(mutex_owned(&adapter->core_mtx));
   1473 
   1474 	ixv_update_link_status(adapter);
   1475 
   1476 	/* Stats Update */
   1477 	ixv_update_stats(adapter);
   1478 
   1479 	/*
   1480 	 * If the interface has been paused
   1481 	 * then don't do the watchdog check
   1482 	 */
   1483 	if (IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF)
   1484 		goto out;
   1485 	/*
   1486 	** Check for time since any descriptor was cleaned
   1487 	*/
   1488         for (i = 0; i < adapter->num_queues; i++, txr++) {
   1489 		IXV_TX_LOCK(txr);
   1490 		if (txr->watchdog_check == FALSE) {
   1491 			IXV_TX_UNLOCK(txr);
   1492 			continue;
   1493 		}
   1494 		getmicrotime(&now);
   1495 		timersub(&now, &txr->watchdog_time, &elapsed);
   1496 		if (tvtohz(&elapsed) > IXV_WATCHDOG)
   1497 			goto hung;
   1498 		IXV_TX_UNLOCK(txr);
   1499 	}
   1500 out:
   1501        	ixv_rearm_queues(adapter, adapter->que_mask);
   1502 	callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
   1503 	return;
   1504 
   1505 hung:
   1506 	device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
   1507 	device_printf(dev,"Queue(%d) tdh = %d, hw tdt = %d\n", txr->me,
   1508 	    IXGBE_READ_REG(&adapter->hw, IXGBE_VFTDH(i)),
   1509 	    IXGBE_READ_REG(&adapter->hw, IXGBE_VFTDT(i)));
   1510 	device_printf(dev,"TX(%d) desc avail = %d,"
   1511 	    "Next TX to Clean = %d\n",
   1512 	    txr->me, txr->tx_avail, txr->next_to_clean);
   1513 	adapter->ifp->if_flags &= ~IFF_RUNNING;
   1514 	adapter->watchdog_events.ev_count++;
   1515 	IXV_TX_UNLOCK(txr);
   1516 	ixv_init_locked(adapter);
   1517 }
   1518 
   1519 static void
   1520 ixv_local_timer(void *arg)
   1521 {
   1522 	struct adapter *adapter = arg;
   1523 
   1524 	IXV_CORE_LOCK(adapter);
   1525 	ixv_local_timer1(adapter);
   1526 	IXV_CORE_UNLOCK(adapter);
   1527 }
   1528 
   1529 /*
   1530 ** Note: this routine updates the OS on the link state
   1531 **	the real check of the hardware only happens with
   1532 **	a link interrupt.
   1533 */
   1534 static void
   1535 ixv_update_link_status(struct adapter *adapter)
   1536 {
   1537 	struct ifnet	*ifp = adapter->ifp;
   1538 	struct tx_ring *txr = adapter->tx_rings;
   1539 	device_t dev = adapter->dev;
   1540 
   1541 
   1542 	if (adapter->link_up){
   1543 		if (adapter->link_active == FALSE) {
   1544 			if (bootverbose)
   1545 				device_printf(dev,"Link is up %d Gbps %s \n",
   1546 				    ((adapter->link_speed == 128)? 10:1),
   1547 				    "Full Duplex");
   1548 			adapter->link_active = TRUE;
   1549 			if_link_state_change(ifp, LINK_STATE_UP);
   1550 		}
   1551 	} else { /* Link down */
   1552 		if (adapter->link_active == TRUE) {
   1553 			if (bootverbose)
   1554 				device_printf(dev,"Link is Down\n");
   1555 			if_link_state_change(ifp, LINK_STATE_DOWN);
   1556 			adapter->link_active = FALSE;
   1557 			for (int i = 0; i < adapter->num_queues;
   1558 			    i++, txr++)
   1559 				txr->watchdog_check = FALSE;
   1560 		}
   1561 	}
   1562 
   1563 	return;
   1564 }
   1565 
   1566 
   1567 static void
   1568 ixv_ifstop(struct ifnet *ifp, int disable)
   1569 {
   1570 	struct adapter *adapter = ifp->if_softc;
   1571 
   1572 	IXV_CORE_LOCK(adapter);
   1573 	ixv_stop(adapter);
   1574 	IXV_CORE_UNLOCK(adapter);
   1575 }
   1576 
   1577 /*********************************************************************
   1578  *
   1579  *  This routine disables all traffic on the adapter by issuing a
   1580  *  global reset on the MAC and deallocates TX/RX buffers.
   1581  *
   1582  **********************************************************************/
   1583 
   1584 static void
   1585 ixv_stop(void *arg)
   1586 {
   1587 	struct ifnet   *ifp;
   1588 	struct adapter *adapter = arg;
   1589 	struct ixgbe_hw *hw = &adapter->hw;
   1590 	ifp = adapter->ifp;
   1591 
   1592 	KASSERT(mutex_owned(&adapter->core_mtx));
   1593 
   1594 	INIT_DEBUGOUT("ixv_stop: begin\n");
   1595 	ixv_disable_intr(adapter);
   1596 
   1597 	/* Tell the stack that the interface is no longer active */
   1598 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   1599 
   1600 	ixgbe_reset_hw(hw);
   1601 	adapter->hw.adapter_stopped = FALSE;
   1602 	ixgbe_stop_adapter(hw);
   1603 	callout_stop(&adapter->timer);
   1604 
   1605 	/* reprogram the RAR[0] in case user changed it. */
   1606 	ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
   1607 
   1608 	return;
   1609 }
   1610 
   1611 
   1612 /*********************************************************************
   1613  *
   1614  *  Determine hardware revision.
   1615  *
   1616  **********************************************************************/
   1617 static void
   1618 ixv_identify_hardware(struct adapter *adapter)
   1619 {
   1620 	u16		pci_cmd_word;
   1621 	pcitag_t tag;
   1622 	pci_chipset_tag_t pc;
   1623 	pcireg_t subid, id;
   1624 	struct ixgbe_hw *hw = &adapter->hw;
   1625 
   1626 	pc = adapter->osdep.pc;
   1627 	tag = adapter->osdep.tag;
   1628 
   1629 	/*
   1630 	** Make sure BUSMASTER is set, on a VM under
   1631 	** KVM it may not be and will break things.
   1632 	*/
   1633 	pci_cmd_word = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
   1634 	if (!(pci_cmd_word & PCI_COMMAND_MASTER_ENABLE)) {
   1635 		INIT_DEBUGOUT("Bus Master bit was not set!\n");
   1636 		pci_cmd_word |= PCI_COMMAND_MASTER_ENABLE;
   1637 		pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, pci_cmd_word);
   1638 	}
   1639 
   1640 	id = pci_conf_read(pc, tag, PCI_ID_REG);
   1641 	subid = pci_conf_read(pc, tag, PCI_SUBSYS_ID_REG);
   1642 
   1643 	/* Save off the information about this board */
   1644 	hw->vendor_id = PCI_VENDOR(id);
   1645 	hw->device_id = PCI_PRODUCT(id);
   1646 	hw->revision_id = PCI_REVISION(pci_conf_read(pc, tag, PCI_CLASS_REG));
   1647 	hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
   1648 	hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
   1649 
   1650 	return;
   1651 }
   1652 
   1653 /*********************************************************************
   1654  *
   1655  *  Setup MSIX Interrupt resources and handlers
   1656  *
   1657  **********************************************************************/
   1658 static int
   1659 ixv_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
   1660 {
   1661 #if !defined(NETBSD_MSI_OR_MSIX)
   1662 	return 0;
   1663 #else
   1664 	device_t        dev = adapter->dev;
   1665 	struct ix_queue *que = adapter->queues;
   1666 	int 		error, rid, vector = 0;
   1667 	pci_chipset_tag_t pc;
   1668 	pcitag_t	tag;
   1669 	char intrbuf[PCI_INTRSTR_LEN];
   1670 	const char	*intrstr = NULL;
   1671 	kcpuset_t	*affinity;
   1672 	int		cpu_id = 0;
   1673 
   1674 	pc = adapter->osdep.pc;
   1675 	tag = adapter->osdep.tag;
   1676 
   1677 	if (pci_msix_alloc_exact(pa,
   1678 		&adapter->osdep.intrs, IXG_MSIX_NINTR) != 0)
   1679 		return (ENXIO);
   1680 
   1681 	kcpuset_create(&affinity, false);
   1682 	for (int i = 0; i < adapter->num_queues; i++, vector++, que++) {
   1683 		intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
   1684 		    sizeof(intrbuf));
   1685 #ifdef IXV_MPSAFE
   1686 		pci_intr_setattr(pc, adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
   1687 		    true);
   1688 #endif
   1689 		/* Set the handler function */
   1690 		adapter->osdep.ihs[i] = pci_intr_establish(pc,
   1691 		    adapter->osdep.intrs[i], IPL_NET, ixv_msix_que, que);
   1692 		if (adapter->osdep.ihs[i] == NULL) {
   1693 			que->res = NULL;
   1694 			aprint_error_dev(dev,
   1695 			    "Failed to register QUE handler");
   1696 			kcpuset_destroy(affinity);
   1697 			return (ENXIO);
   1698 		}
   1699 		que->msix = vector;
   1700         	adapter->que_mask |= (u64)(1 << que->msix);
   1701 
   1702 		cpu_id = i;
   1703 		/* Round-robin affinity */
   1704 		kcpuset_zero(affinity);
   1705 		kcpuset_set(affinity, cpu_id % ncpu);
   1706 		error = pci_intr_distribute(adapter->osdep.ihs[i], affinity,
   1707 		    NULL);
   1708 		aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
   1709 		    intrstr);
   1710 		if (error == 0)
   1711 			aprint_normal(", bound queue %d to cpu %d\n",
   1712 			    i, cpu_id);
   1713 		else
   1714 			aprint_normal("\n");
   1715 
   1716 		que->que_si = softint_establish(SOFTINT_NET, ixv_handle_que,
   1717 		    que);
   1718 		if (que->que_si == NULL) {
   1719 			aprint_error_dev(dev,
   1720 			    "could not establish software interrupt\n");
   1721 		}
   1722 	}
   1723 
   1724 	/* and Mailbox */
   1725 	cpu_id++;
   1726 	intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
   1727 	    sizeof(intrbuf));
   1728 #ifdef IXG_MPSAFE
   1729 	pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE, true);
   1730 #endif
   1731 	/* Set the mbx handler function */
   1732 	adapter->osdep.ihs[vector] = pci_intr_establish(pc,
   1733 	    adapter->osdep.intrs[vector], IPL_NET, ixv_msix_mbx, adapter);
   1734 	if (adapter->osdep.ihs[vector] == NULL) {
   1735 		adapter->res = NULL;
   1736 		aprint_error_dev(dev, "Failed to register LINK handler\n");
   1737 		kcpuset_destroy(affinity);
   1738 		return (ENXIO);
   1739 	}
   1740 	/* Round-robin affinity */
   1741 	kcpuset_zero(affinity);
   1742 	kcpuset_set(affinity, cpu_id % ncpu);
   1743 	error = pci_intr_distribute(adapter->osdep.ihs[vector], affinity,NULL);
   1744 
   1745 	aprint_normal_dev(dev,
   1746 	    "for link, interrupting at %s, ", intrstr);
   1747 	if (error == 0) {
   1748 		aprint_normal("affinity to cpu %d\n", cpu_id);
   1749 	}
   1750 	adapter->mbxvec = vector;
   1751 	/* Tasklets for Mailbox */
   1752 	adapter->mbx_si = softint_establish(SOFTINT_NET, ixv_handle_mbx,
   1753 	    adapter);
   1754 	/*
   1755 	** Due to a broken design QEMU will fail to properly
   1756 	** enable the guest for MSIX unless the vectors in
   1757 	** the table are all set up, so we must rewrite the
   1758 	** ENABLE in the MSIX control register again at this
   1759 	** point to cause it to successfully initialize us.
   1760 	*/
   1761 	if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
   1762 		int msix_ctrl;
   1763 		pci_get_capability(pc, tag, PCI_CAP_MSIX, &rid, NULL);
   1764 		rid += PCI_MSIX_CTL;
   1765 		msix_ctrl = pci_conf_read(pc, tag, rid);
   1766 		msix_ctrl |= PCI_MSIX_CTL_ENABLE;
   1767 		pci_conf_write(pc, tag, rid, msix_ctrl);
   1768 	}
   1769 
   1770 	return (0);
   1771 #endif
   1772 }
   1773 
   1774 /*
   1775  * Setup MSIX resources, note that the VF
   1776  * device MUST use MSIX, there is no fallback.
   1777  */
   1778 static int
   1779 ixv_setup_msix(struct adapter *adapter)
   1780 {
   1781 #if !defined(NETBSD_MSI_OR_MSIX)
   1782 	return 0;
   1783 #else
   1784 	device_t dev = adapter->dev;
   1785 	int want, msgs;
   1786 
   1787 
   1788 	/*
   1789 	** Want two vectors: one for a queue,
   1790 	** plus an additional for mailbox.
   1791 	*/
   1792 	msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag);
   1793 	if (msgs < IXG_MSIX_NINTR) {
   1794 		aprint_error_dev(dev,"MSIX config error\n");
   1795 		return (ENXIO);
   1796 	}
   1797 
   1798 	adapter->msix_mem = (void *)1; /* XXX */
   1799 	aprint_normal_dev(dev,
   1800 	    "Using MSIX interrupts with %d vectors\n", msgs);
   1801 	return (want);
   1802 #endif
   1803 }
   1804 
   1805 
   1806 static int
   1807 ixv_allocate_pci_resources(struct adapter *adapter,
   1808     const struct pci_attach_args *pa)
   1809 {
   1810 	pcireg_t	memtype;
   1811 	device_t        dev = adapter->dev;
   1812 	bus_addr_t addr;
   1813 	int flags;
   1814 
   1815 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
   1816 
   1817 	switch (memtype) {
   1818 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1819 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1820 		adapter->osdep.mem_bus_space_tag = pa->pa_memt;
   1821 		if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
   1822 	              memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
   1823 			goto map_err;
   1824 		if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
   1825 			aprint_normal_dev(dev, "clearing prefetchable bit\n");
   1826 			flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
   1827 		}
   1828 		if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
   1829 		     adapter->osdep.mem_size, flags,
   1830 		     &adapter->osdep.mem_bus_space_handle) != 0) {
   1831 map_err:
   1832 			adapter->osdep.mem_size = 0;
   1833 			aprint_error_dev(dev, "unable to map BAR0\n");
   1834 			return ENXIO;
   1835 		}
   1836 		break;
   1837 	default:
   1838 		aprint_error_dev(dev, "unexpected type on BAR0\n");
   1839 		return ENXIO;
   1840 	}
   1841 
   1842 	adapter->num_queues = 1;
   1843 	adapter->hw.back = &adapter->osdep;
   1844 
   1845 	/*
   1846 	** Now setup MSI/X, should
   1847 	** return us the number of
   1848 	** configured vectors.
   1849 	*/
   1850 	adapter->msix = ixv_setup_msix(adapter);
   1851 	if (adapter->msix == ENXIO)
   1852 		return (ENXIO);
   1853 	else
   1854 		return (0);
   1855 }
   1856 
   1857 static void
   1858 ixv_free_pci_resources(struct adapter * adapter)
   1859 {
   1860 #if !defined(NETBSD_MSI_OR_MSIX)
   1861 #else
   1862 	struct 		ix_queue *que = adapter->queues;
   1863 	int		rid;
   1864 
   1865 	/*
   1866 	**  Release all msix queue resources:
   1867 	*/
   1868 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   1869 		rid = que->msix + 1;
   1870 		if (que->res != NULL)
   1871 			pci_intr_disestablish(adapter->osdep.pc,
   1872 			    adapter->osdep.ihs[i]);
   1873 	}
   1874 
   1875 
   1876 	/* Clean the Legacy or Link interrupt last */
   1877 	if (adapter->mbxvec) /* we are doing MSIX */
   1878 		rid = adapter->mbxvec + 1;
   1879 	else
   1880 		(adapter->msix != 0) ? (rid = 1):(rid = 0);
   1881 
   1882 	if (adapter->osdep.ihs[rid] != NULL)
   1883 		pci_intr_disestablish(adapter->osdep.pc,
   1884 		    adapter->osdep.ihs[rid]);
   1885 	adapter->osdep.ihs[rid] = NULL;
   1886 
   1887 #if defined(NETBSD_MSI_OR_MSIX)
   1888 	pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
   1889 	    adapter->osdep.nintrs);
   1890 #endif
   1891 
   1892 	if (adapter->osdep.mem_size != 0) {
   1893 		bus_space_unmap(adapter->osdep.mem_bus_space_tag,
   1894 		    adapter->osdep.mem_bus_space_handle,
   1895 		    adapter->osdep.mem_size);
   1896 	}
   1897 
   1898 #endif
   1899 	return;
   1900 }
   1901 
   1902 /*********************************************************************
   1903  *
   1904  *  Setup networking device structure and register an interface.
   1905  *
   1906  **********************************************************************/
   1907 static void
   1908 ixv_setup_interface(device_t dev, struct adapter *adapter)
   1909 {
   1910 	struct ethercom *ec = &adapter->osdep.ec;
   1911 	struct ifnet   *ifp;
   1912 
   1913 	INIT_DEBUGOUT("ixv_setup_interface: begin");
   1914 
   1915 	ifp = adapter->ifp = &ec->ec_if;
   1916 	strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
   1917 	ifp->if_baudrate = 1000000000;
   1918 	ifp->if_init = ixv_init;
   1919 	ifp->if_stop = ixv_ifstop;
   1920 	ifp->if_softc = adapter;
   1921 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   1922 	ifp->if_ioctl = ixv_ioctl;
   1923 #if __FreeBSD_version >= 800000
   1924 	ifp->if_transmit = ixv_mq_start;
   1925 	ifp->if_qflush = ixv_qflush;
   1926 #else
   1927 	ifp->if_start = ixv_start;
   1928 #endif
   1929 	ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 2;
   1930 
   1931 	if_attach(ifp);
   1932 	ether_ifattach(ifp, adapter->hw.mac.addr);
   1933 	ether_set_ifflags_cb(ec, ixv_ifflags_cb);
   1934 
   1935 	adapter->max_frame_size =
   1936 	    ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
   1937 
   1938 	/*
   1939 	 * Tell the upper layer(s) we support long frames.
   1940 	 */
   1941 	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
   1942 
   1943 	ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSOv4;
   1944 	ifp->if_capenable = 0;
   1945 
   1946 	ec->ec_capabilities |= ETHERCAP_VLAN_HWCSUM;
   1947 	ec->ec_capabilities |= ETHERCAP_JUMBO_MTU;
   1948 	ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
   1949 	    		| ETHERCAP_VLAN_MTU;
   1950 	ec->ec_capenable = ec->ec_capabilities;
   1951 
   1952 	/* Don't enable LRO by default */
   1953 	ifp->if_capabilities |= IFCAP_LRO;
   1954 
   1955 	/*
   1956 	** Dont turn this on by default, if vlans are
   1957 	** created on another pseudo device (eg. lagg)
   1958 	** then vlan events are not passed thru, breaking
   1959 	** operation, but with HW FILTER off it works. If
   1960 	** using vlans directly on the em driver you can
   1961 	** enable this and get full hardware tag filtering.
   1962 	*/
   1963 	ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
   1964 
   1965 	/*
   1966 	 * Specify the media types supported by this adapter and register
   1967 	 * callbacks to update media and link information
   1968 	 */
   1969 	ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change,
   1970 		     ixv_media_status);
   1971 	ifmedia_add(&adapter->media, IFM_ETHER | IFM_FDX, 0, NULL);
   1972 	ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
   1973 	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
   1974 
   1975 	return;
   1976 }
   1977 
   1978 static void
   1979 ixv_config_link(struct adapter *adapter)
   1980 {
   1981 	struct ixgbe_hw *hw = &adapter->hw;
   1982 	u32	autoneg, err = 0;
   1983 
   1984 	if (hw->mac.ops.check_link)
   1985 		err = hw->mac.ops.check_link(hw, &autoneg,
   1986 		    &adapter->link_up, FALSE);
   1987 	if (err)
   1988 		goto out;
   1989 
   1990 	if (hw->mac.ops.setup_link)
   1991                	err = hw->mac.ops.setup_link(hw,
   1992 		    autoneg, adapter->link_up);
   1993 out:
   1994 	return;
   1995 }
   1996 
   1997 /********************************************************************
   1998  * Manage DMA'able memory.
   1999  *******************************************************************/
   2000 
   2001 static int
   2002 ixv_dma_malloc(struct adapter *adapter, bus_size_t size,
   2003 		struct ixv_dma_alloc *dma, int mapflags)
   2004 {
   2005 	device_t dev = adapter->dev;
   2006 	int             r, rsegs;
   2007 
   2008 	r = ixgbe_dma_tag_create(adapter->osdep.dmat,	/* parent */
   2009 			       DBA_ALIGN, 0,	/* alignment, bounds */
   2010 			       size,	/* maxsize */
   2011 			       1,	/* nsegments */
   2012 			       size,	/* maxsegsize */
   2013 			       BUS_DMA_ALLOCNOW,	/* flags */
   2014 			       &dma->dma_tag);
   2015 	if (r != 0) {
   2016 		aprint_error_dev(dev,
   2017 		    "ixv_dma_malloc: bus_dma_tag_create failed; error %u\n", r);
   2018 		goto fail_0;
   2019 	}
   2020 	r = bus_dmamem_alloc(dma->dma_tag->dt_dmat,
   2021 		size,
   2022 		dma->dma_tag->dt_alignment,
   2023 		dma->dma_tag->dt_boundary,
   2024 		&dma->dma_seg, 1, &rsegs, BUS_DMA_NOWAIT);
   2025 	if (r != 0) {
   2026 		aprint_error_dev(dev,
   2027 		    "%s: bus_dmamem_alloc failed; error %u\n", __func__, r);
   2028 		goto fail_1;
   2029 	}
   2030 
   2031 	r = bus_dmamem_map(dma->dma_tag->dt_dmat, &dma->dma_seg, rsegs,
   2032 	    size, &dma->dma_vaddr, BUS_DMA_NOWAIT);
   2033 	if (r != 0) {
   2034 		aprint_error_dev(dev, "%s: bus_dmamem_map failed; error %d\n",
   2035 		    __func__, r);
   2036 		goto fail_2;
   2037 	}
   2038 
   2039 	r = ixgbe_dmamap_create(dma->dma_tag, 0, &dma->dma_map);
   2040 	if (r != 0) {
   2041 		aprint_error_dev(dev, "%s: bus_dmamem_map failed; error %d\n",
   2042 		    __func__, r);
   2043 		goto fail_3;
   2044 	}
   2045 
   2046 	r = bus_dmamap_load(dma->dma_tag->dt_dmat, dma->dma_map, dma->dma_vaddr,
   2047 			    size,
   2048 			    NULL,
   2049 			    mapflags | BUS_DMA_NOWAIT);
   2050 	if (r != 0) {
   2051 		aprint_error_dev(dev,"%s: bus_dmamap_load failed; error %u\n",
   2052 		    __func__, r);
   2053 		goto fail_4;
   2054 	}
   2055 	dma->dma_paddr = dma->dma_map->dm_segs[0].ds_addr;
   2056 	dma->dma_size = size;
   2057 	return 0;
   2058 fail_4:
   2059 	ixgbe_dmamap_destroy(dma->dma_tag, dma->dma_map);
   2060 fail_3:
   2061 	bus_dmamem_unmap(dma->dma_tag->dt_dmat, dma->dma_vaddr, size);
   2062 fail_2:
   2063 	bus_dmamem_free(dma->dma_tag->dt_dmat, &dma->dma_seg, rsegs);
   2064 fail_1:
   2065 	ixgbe_dma_tag_destroy(dma->dma_tag);
   2066 fail_0:
   2067 	dma->dma_tag = NULL;
   2068 	return (r);
   2069 }
   2070 
   2071 static void
   2072 ixv_dma_free(struct adapter *adapter, struct ixv_dma_alloc *dma)
   2073 {
   2074 	bus_dmamap_sync(dma->dma_tag->dt_dmat, dma->dma_map, 0, dma->dma_size,
   2075 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   2076 	ixgbe_dmamap_unload(dma->dma_tag, dma->dma_map);
   2077 	bus_dmamem_free(dma->dma_tag->dt_dmat, &dma->dma_seg, 1);
   2078 	ixgbe_dma_tag_destroy(dma->dma_tag);
   2079 }
   2080 
   2081 
   2082 /*********************************************************************
   2083  *
   2084  *  Allocate memory for the transmit and receive rings, and then
   2085  *  the descriptors associated with each, called only once at attach.
   2086  *
   2087  **********************************************************************/
   2088 static int
   2089 ixv_allocate_queues(struct adapter *adapter)
   2090 {
   2091 	device_t	dev = adapter->dev;
   2092 	struct ix_queue	*que;
   2093 	struct tx_ring	*txr;
   2094 	struct rx_ring	*rxr;
   2095 	int rsize, tsize, error = 0;
   2096 	int txconf = 0, rxconf = 0;
   2097 
   2098         /* First allocate the top level queue structs */
   2099         if (!(adapter->queues =
   2100             (struct ix_queue *) malloc(sizeof(struct ix_queue) *
   2101             adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
   2102                 aprint_error_dev(dev, "Unable to allocate queue memory\n");
   2103                 error = ENOMEM;
   2104                 goto fail;
   2105         }
   2106 
   2107 	/* First allocate the TX ring struct memory */
   2108 	if (!(adapter->tx_rings =
   2109 	    (struct tx_ring *) malloc(sizeof(struct tx_ring) *
   2110 	    adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
   2111 		aprint_error_dev(dev, "Unable to allocate TX ring memory\n");
   2112 		error = ENOMEM;
   2113 		goto tx_fail;
   2114 	}
   2115 
   2116 	/* Next allocate the RX */
   2117 	if (!(adapter->rx_rings =
   2118 	    (struct rx_ring *) malloc(sizeof(struct rx_ring) *
   2119 	    adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
   2120 		aprint_error_dev(dev, "Unable to allocate RX ring memory\n");
   2121 		error = ENOMEM;
   2122 		goto rx_fail;
   2123 	}
   2124 
   2125 	/* For the ring itself */
   2126 	tsize = roundup2(adapter->num_tx_desc *
   2127 	    sizeof(union ixgbe_adv_tx_desc), DBA_ALIGN);
   2128 
   2129 	/*
   2130 	 * Now set up the TX queues, txconf is needed to handle the
   2131 	 * possibility that things fail midcourse and we need to
   2132 	 * undo memory gracefully
   2133 	 */
   2134 	for (int i = 0; i < adapter->num_queues; i++, txconf++) {
   2135 		/* Set up some basics */
   2136 		txr = &adapter->tx_rings[i];
   2137 		txr->adapter = adapter;
   2138 		txr->me = i;
   2139 
   2140 		/* Initialize the TX side lock */
   2141 		snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
   2142 		    device_xname(dev), txr->me);
   2143 		mutex_init(&txr->tx_mtx, MUTEX_DEFAULT, IPL_NET);
   2144 
   2145 		if (ixv_dma_malloc(adapter, tsize,
   2146 			&txr->txdma, BUS_DMA_NOWAIT)) {
   2147 			aprint_error_dev(dev,
   2148 			    "Unable to allocate TX Descriptor memory\n");
   2149 			error = ENOMEM;
   2150 			goto err_tx_desc;
   2151 		}
   2152 		txr->tx_base = (union ixgbe_adv_tx_desc *)txr->txdma.dma_vaddr;
   2153 		bzero((void *)txr->tx_base, tsize);
   2154 
   2155         	/* Now allocate transmit buffers for the ring */
   2156         	if (ixv_allocate_transmit_buffers(txr)) {
   2157 			aprint_error_dev(dev,
   2158 			    "Critical Failure setting up transmit buffers\n");
   2159 			error = ENOMEM;
   2160 			goto err_tx_desc;
   2161         	}
   2162 #if __FreeBSD_version >= 800000
   2163 		/* Allocate a buf ring */
   2164 		txr->br = buf_ring_alloc(IXV_BR_SIZE, M_DEVBUF,
   2165 		    M_WAITOK, &txr->tx_mtx);
   2166 		if (txr->br == NULL) {
   2167 			aprint_error_dev(dev,
   2168 			    "Critical Failure setting up buf ring\n");
   2169 			error = ENOMEM;
   2170 			goto err_tx_desc;
   2171 		}
   2172 #endif
   2173 	}
   2174 
   2175 	/*
   2176 	 * Next the RX queues...
   2177 	 */
   2178 	rsize = roundup2(adapter->num_rx_desc *
   2179 	    sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
   2180 	for (int i = 0; i < adapter->num_queues; i++, rxconf++) {
   2181 		rxr = &adapter->rx_rings[i];
   2182 		/* Set up some basics */
   2183 		rxr->adapter = adapter;
   2184 		rxr->me = i;
   2185 
   2186 		/* Initialize the RX side lock */
   2187 		snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
   2188 		    device_xname(dev), rxr->me);
   2189 		mutex_init(&rxr->rx_mtx, MUTEX_DEFAULT, IPL_NET);
   2190 
   2191 		if (ixv_dma_malloc(adapter, rsize,
   2192 			&rxr->rxdma, BUS_DMA_NOWAIT)) {
   2193 			aprint_error_dev(dev,
   2194 			    "Unable to allocate RxDescriptor memory\n");
   2195 			error = ENOMEM;
   2196 			goto err_rx_desc;
   2197 		}
   2198 		rxr->rx_base = (union ixgbe_adv_rx_desc *)rxr->rxdma.dma_vaddr;
   2199 		bzero((void *)rxr->rx_base, rsize);
   2200 
   2201         	/* Allocate receive buffers for the ring*/
   2202 		if (ixv_allocate_receive_buffers(rxr)) {
   2203 			aprint_error_dev(dev,
   2204 			    "Critical Failure setting up receive buffers\n");
   2205 			error = ENOMEM;
   2206 			goto err_rx_desc;
   2207 		}
   2208 	}
   2209 
   2210 	/*
   2211 	** Finally set up the queue holding structs
   2212 	*/
   2213 	for (int i = 0; i < adapter->num_queues; i++) {
   2214 		que = &adapter->queues[i];
   2215 		que->adapter = adapter;
   2216 		que->txr = &adapter->tx_rings[i];
   2217 		que->rxr = &adapter->rx_rings[i];
   2218 	}
   2219 
   2220 	return (0);
   2221 
   2222 err_rx_desc:
   2223 	for (rxr = adapter->rx_rings; rxconf > 0; rxr++, rxconf--)
   2224 		ixv_dma_free(adapter, &rxr->rxdma);
   2225 err_tx_desc:
   2226 	for (txr = adapter->tx_rings; txconf > 0; txr++, txconf--)
   2227 		ixv_dma_free(adapter, &txr->txdma);
   2228 	free(adapter->rx_rings, M_DEVBUF);
   2229 rx_fail:
   2230 	free(adapter->tx_rings, M_DEVBUF);
   2231 tx_fail:
   2232 	free(adapter->queues, M_DEVBUF);
   2233 fail:
   2234 	return (error);
   2235 }
   2236 
   2237 
   2238 /*********************************************************************
   2239  *
   2240  *  Allocate memory for tx_buffer structures. The tx_buffer stores all
   2241  *  the information needed to transmit a packet on the wire. This is
   2242  *  called only once at attach, setup is done every reset.
   2243  *
   2244  **********************************************************************/
   2245 static int
   2246 ixv_allocate_transmit_buffers(struct tx_ring *txr)
   2247 {
   2248 	struct adapter *adapter = txr->adapter;
   2249 	device_t dev = adapter->dev;
   2250 	struct ixv_tx_buf *txbuf;
   2251 	int error, i;
   2252 
   2253 	/*
   2254 	 * Setup DMA descriptor areas.
   2255 	 */
   2256 	if ((error = ixgbe_dma_tag_create(adapter->osdep.dmat,	/* parent */
   2257 			       1, 0,		/* alignment, bounds */
   2258 			       IXV_TSO_SIZE,		/* maxsize */
   2259 			       32,			/* nsegments */
   2260 			       PAGE_SIZE,		/* maxsegsize */
   2261 			       0,			/* flags */
   2262 			       &txr->txtag))) {
   2263 		aprint_error_dev(dev,"Unable to allocate TX DMA tag\n");
   2264 		goto fail;
   2265 	}
   2266 
   2267 	if (!(txr->tx_buffers =
   2268 	    (struct ixv_tx_buf *) malloc(sizeof(struct ixv_tx_buf) *
   2269 	    adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) {
   2270 		aprint_error_dev(dev, "Unable to allocate tx_buffer memory\n");
   2271 		error = ENOMEM;
   2272 		goto fail;
   2273 	}
   2274 
   2275         /* Create the descriptor buffer dma maps */
   2276 	txbuf = txr->tx_buffers;
   2277 	for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
   2278 		error = ixgbe_dmamap_create(txr->txtag, 0, &txbuf->map);
   2279 		if (error != 0) {
   2280 			aprint_error_dev(dev, "Unable to create TX DMA map\n");
   2281 			goto fail;
   2282 		}
   2283 	}
   2284 
   2285 	return 0;
   2286 fail:
   2287 	/* We free all, it handles case where we are in the middle */
   2288 	ixv_free_transmit_structures(adapter);
   2289 	return (error);
   2290 }
   2291 
   2292 /*********************************************************************
   2293  *
   2294  *  Initialize a transmit ring.
   2295  *
   2296  **********************************************************************/
   2297 static void
   2298 ixv_setup_transmit_ring(struct tx_ring *txr)
   2299 {
   2300 	struct adapter *adapter = txr->adapter;
   2301 	struct ixv_tx_buf *txbuf;
   2302 	int i;
   2303 
   2304 	/* Clear the old ring contents */
   2305 	IXV_TX_LOCK(txr);
   2306 	bzero((void *)txr->tx_base,
   2307 	      (sizeof(union ixgbe_adv_tx_desc)) * adapter->num_tx_desc);
   2308 	/* Reset indices */
   2309 	txr->next_avail_desc = 0;
   2310 	txr->next_to_clean = 0;
   2311 
   2312 	/* Free any existing tx buffers. */
   2313         txbuf = txr->tx_buffers;
   2314 	for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
   2315 		if (txbuf->m_head != NULL) {
   2316 			bus_dmamap_sync(txr->txtag->dt_dmat, txbuf->map,
   2317 			    0, txbuf->m_head->m_pkthdr.len,
   2318 			    BUS_DMASYNC_POSTWRITE);
   2319 			ixgbe_dmamap_unload(txr->txtag, txbuf->map);
   2320 			m_freem(txbuf->m_head);
   2321 			txbuf->m_head = NULL;
   2322 		}
   2323 		/* Clear the EOP index */
   2324 		txbuf->eop_index = -1;
   2325         }
   2326 
   2327 	/* Set number of descriptors available */
   2328 	txr->tx_avail = adapter->num_tx_desc;
   2329 
   2330 	ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
   2331 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   2332 	IXV_TX_UNLOCK(txr);
   2333 }
   2334 
   2335 /*********************************************************************
   2336  *
   2337  *  Initialize all transmit rings.
   2338  *
   2339  **********************************************************************/
   2340 static int
   2341 ixv_setup_transmit_structures(struct adapter *adapter)
   2342 {
   2343 	struct tx_ring *txr = adapter->tx_rings;
   2344 
   2345 	for (int i = 0; i < adapter->num_queues; i++, txr++)
   2346 		ixv_setup_transmit_ring(txr);
   2347 
   2348 	return (0);
   2349 }
   2350 
   2351 /*********************************************************************
   2352  *
   2353  *  Enable transmit unit.
   2354  *
   2355  **********************************************************************/
   2356 static void
   2357 ixv_initialize_transmit_units(struct adapter *adapter)
   2358 {
   2359 	struct tx_ring	*txr = adapter->tx_rings;
   2360 	struct ixgbe_hw	*hw = &adapter->hw;
   2361 
   2362 
   2363 	for (int i = 0; i < adapter->num_queues; i++, txr++) {
   2364 		u64	tdba = txr->txdma.dma_paddr;
   2365 		u32	txctrl, txdctl;
   2366 
   2367 		/* Set WTHRESH to 8, burst writeback */
   2368 		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
   2369 		txdctl |= (8 << 16);
   2370 		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
   2371 		/* Now enable */
   2372 		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
   2373 		txdctl |= IXGBE_TXDCTL_ENABLE;
   2374 		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
   2375 
   2376 		/* Set the HW Tx Head and Tail indices */
   2377 	    	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(i), 0);
   2378 	    	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(i), 0);
   2379 
   2380 		/* Setup Transmit Descriptor Cmd Settings */
   2381 		txr->txd_cmd = IXGBE_TXD_CMD_IFCS;
   2382 		txr->watchdog_check = FALSE;
   2383 
   2384 		/* Set Ring parameters */
   2385 		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
   2386 		       (tdba & 0x00000000ffffffffULL));
   2387 		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i), (tdba >> 32));
   2388 		IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
   2389 		    adapter->num_tx_desc *
   2390 		    sizeof(struct ixgbe_legacy_tx_desc));
   2391 		txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(i));
   2392 		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
   2393 		IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), txctrl);
   2394 		break;
   2395 	}
   2396 
   2397 	return;
   2398 }
   2399 
   2400 /*********************************************************************
   2401  *
   2402  *  Free all transmit rings.
   2403  *
   2404  **********************************************************************/
   2405 static void
   2406 ixv_free_transmit_structures(struct adapter *adapter)
   2407 {
   2408 	struct tx_ring *txr = adapter->tx_rings;
   2409 
   2410 	for (int i = 0; i < adapter->num_queues; i++, txr++) {
   2411 		ixv_free_transmit_buffers(txr);
   2412 		ixv_dma_free(adapter, &txr->txdma);
   2413 		IXV_TX_LOCK_DESTROY(txr);
   2414 	}
   2415 	free(adapter->tx_rings, M_DEVBUF);
   2416 }
   2417 
   2418 /*********************************************************************
   2419  *
   2420  *  Free transmit ring related data structures.
   2421  *
   2422  **********************************************************************/
   2423 static void
   2424 ixv_free_transmit_buffers(struct tx_ring *txr)
   2425 {
   2426 	struct adapter *adapter = txr->adapter;
   2427 	struct ixv_tx_buf *tx_buffer;
   2428 	int             i;
   2429 
   2430 	INIT_DEBUGOUT("free_transmit_ring: begin");
   2431 
   2432 	if (txr->tx_buffers == NULL)
   2433 		return;
   2434 
   2435 	tx_buffer = txr->tx_buffers;
   2436 	for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
   2437 		if (tx_buffer->m_head != NULL) {
   2438 			bus_dmamap_sync(txr->txtag->dt_dmat, tx_buffer->map,
   2439 			    0, tx_buffer->m_head->m_pkthdr.len,
   2440 			    BUS_DMASYNC_POSTWRITE);
   2441 			ixgbe_dmamap_unload(txr->txtag, tx_buffer->map);
   2442 			m_freem(tx_buffer->m_head);
   2443 			tx_buffer->m_head = NULL;
   2444 			if (tx_buffer->map != NULL) {
   2445 				ixgbe_dmamap_destroy(txr->txtag,
   2446 				    tx_buffer->map);
   2447 				tx_buffer->map = NULL;
   2448 			}
   2449 		} else if (tx_buffer->map != NULL) {
   2450 			ixgbe_dmamap_unload(txr->txtag, tx_buffer->map);
   2451 			ixgbe_dmamap_destroy(txr->txtag, tx_buffer->map);
   2452 			tx_buffer->map = NULL;
   2453 		}
   2454 	}
   2455 #if __FreeBSD_version >= 800000
   2456 	if (txr->br != NULL)
   2457 		buf_ring_free(txr->br, M_DEVBUF);
   2458 #endif
   2459 	if (txr->tx_buffers != NULL) {
   2460 		free(txr->tx_buffers, M_DEVBUF);
   2461 		txr->tx_buffers = NULL;
   2462 	}
   2463 	if (txr->txtag != NULL) {
   2464 		ixgbe_dma_tag_destroy(txr->txtag);
   2465 		txr->txtag = NULL;
   2466 	}
   2467 	return;
   2468 }
   2469 
   2470 /*********************************************************************
   2471  *
   2472  *  Advanced Context Descriptor setup for VLAN or CSUM
   2473  *
   2474  **********************************************************************/
   2475 
   2476 static u32
   2477 ixv_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp)
   2478 {
   2479 	struct m_tag *mtag;
   2480 	struct adapter *adapter = txr->adapter;
   2481 	struct ethercom *ec = &adapter->osdep.ec;
   2482 	struct ixgbe_adv_tx_context_desc *TXD;
   2483 	struct ixv_tx_buf        *tx_buffer;
   2484 	u32 olinfo = 0, vlan_macip_lens = 0, type_tucmd_mlhl = 0;
   2485 	struct ether_vlan_header *eh;
   2486 	struct ip ip;
   2487 	struct ip6_hdr ip6;
   2488 	int  ehdrlen, ip_hlen = 0;
   2489 	u16	etype;
   2490 	u8	ipproto = 0;
   2491 	bool	offload;
   2492 	int ctxd = txr->next_avail_desc;
   2493 	u16 vtag = 0;
   2494 
   2495 
   2496 	offload = ((mp->m_pkthdr.csum_flags & M_CSUM_OFFLOAD) != 0);
   2497 
   2498 	tx_buffer = &txr->tx_buffers[ctxd];
   2499 	TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
   2500 
   2501 	/*
   2502 	** In advanced descriptors the vlan tag must
   2503 	** be placed into the descriptor itself.
   2504 	*/
   2505 	if ((mtag = VLAN_OUTPUT_TAG(ec, mp)) != NULL) {
   2506 		vtag = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   2507 		vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
   2508 	} else if (!offload)
   2509 		return 0;
   2510 
   2511 	/*
   2512 	 * Determine where frame payload starts.
   2513 	 * Jump over vlan headers if already present,
   2514 	 * helpful for QinQ too.
   2515 	 */
   2516 	KASSERT(mp->m_len >= offsetof(struct ether_vlan_header, evl_tag));
   2517 	eh = mtod(mp, struct ether_vlan_header *);
   2518 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
   2519 		KASSERT(mp->m_len >= sizeof(struct ether_vlan_header));
   2520 		etype = ntohs(eh->evl_proto);
   2521 		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   2522 	} else {
   2523 		etype = ntohs(eh->evl_encap_proto);
   2524 		ehdrlen = ETHER_HDR_LEN;
   2525 	}
   2526 
   2527 	/* Set the ether header length */
   2528 	vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
   2529 
   2530 	switch (etype) {
   2531 	case ETHERTYPE_IP:
   2532 		m_copydata(mp, ehdrlen, sizeof(ip), &ip);
   2533 		ip_hlen = ip.ip_hl << 2;
   2534 		ipproto = ip.ip_p;
   2535 #if 0
   2536 		ip.ip_sum = 0;
   2537 		m_copyback(mp, ehdrlen, sizeof(ip), &ip);
   2538 #else
   2539 		KASSERT((mp->m_pkthdr.csum_flags & M_CSUM_IPv4) == 0 ||
   2540 		    ip.ip_sum == 0);
   2541 #endif
   2542 		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
   2543 		break;
   2544 	case ETHERTYPE_IPV6:
   2545 		m_copydata(mp, ehdrlen, sizeof(ip6), &ip6);
   2546 		ip_hlen = sizeof(ip6);
   2547 		ipproto = ip6.ip6_nxt;
   2548 		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
   2549 		break;
   2550 	default:
   2551 		break;
   2552 	}
   2553 
   2554 	if ((mp->m_pkthdr.csum_flags & M_CSUM_IPv4) != 0)
   2555 		olinfo |= IXGBE_TXD_POPTS_IXSM << 8;
   2556 
   2557 	vlan_macip_lens |= ip_hlen;
   2558 	type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
   2559 
   2560 	if (mp->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_TCPv6)) {
   2561 		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
   2562 		olinfo |= IXGBE_TXD_POPTS_TXSM << 8;
   2563 		KASSERT(ipproto == IPPROTO_TCP);
   2564 	} else if (mp->m_pkthdr.csum_flags & (M_CSUM_UDPv4|M_CSUM_UDPv6)) {
   2565 		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP;
   2566 		olinfo |= IXGBE_TXD_POPTS_TXSM << 8;
   2567 		KASSERT(ipproto == IPPROTO_UDP);
   2568 	}
   2569 
   2570 	/* Now copy bits into descriptor */
   2571 	TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
   2572 	TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
   2573 	TXD->seqnum_seed = htole32(0);
   2574 	TXD->mss_l4len_idx = htole32(0);
   2575 
   2576 	tx_buffer->m_head = NULL;
   2577 	tx_buffer->eop_index = -1;
   2578 
   2579 	/* We've consumed the first desc, adjust counters */
   2580 	if (++ctxd == adapter->num_tx_desc)
   2581 		ctxd = 0;
   2582 	txr->next_avail_desc = ctxd;
   2583 	--txr->tx_avail;
   2584 
   2585         return olinfo;
   2586 }
   2587 
   2588 /**********************************************************************
   2589  *
   2590  *  Setup work for hardware segmentation offload (TSO) on
   2591  *  adapters using advanced tx descriptors
   2592  *
   2593  **********************************************************************/
   2594 static bool
   2595 ixv_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *paylen)
   2596 {
   2597 	struct m_tag *mtag;
   2598 	struct adapter *adapter = txr->adapter;
   2599 	struct ethercom *ec = &adapter->osdep.ec;
   2600 	struct ixgbe_adv_tx_context_desc *TXD;
   2601 	struct ixv_tx_buf        *tx_buffer;
   2602 	u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
   2603 	u32 mss_l4len_idx = 0;
   2604 	u16 vtag = 0;
   2605 	int ctxd, ehdrlen,  hdrlen, ip_hlen, tcp_hlen;
   2606 	struct ether_vlan_header *eh;
   2607 	struct ip *ip;
   2608 	struct tcphdr *th;
   2609 
   2610 
   2611 	/*
   2612 	 * Determine where frame payload starts.
   2613 	 * Jump over vlan headers if already present
   2614 	 */
   2615 	eh = mtod(mp, struct ether_vlan_header *);
   2616 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN))
   2617 		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   2618 	else
   2619 		ehdrlen = ETHER_HDR_LEN;
   2620 
   2621         /* Ensure we have at least the IP+TCP header in the first mbuf. */
   2622         if (mp->m_len < ehdrlen + sizeof(struct ip) + sizeof(struct tcphdr))
   2623 		return FALSE;
   2624 
   2625 	ctxd = txr->next_avail_desc;
   2626 	tx_buffer = &txr->tx_buffers[ctxd];
   2627 	TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
   2628 
   2629 	ip = (struct ip *)(mp->m_data + ehdrlen);
   2630 	if (ip->ip_p != IPPROTO_TCP)
   2631 		return FALSE;   /* 0 */
   2632 	ip->ip_sum = 0;
   2633 	ip_hlen = ip->ip_hl << 2;
   2634 	th = (struct tcphdr *)((char *)ip + ip_hlen);
   2635 	/* XXX Educated guess: FreeBSD's in_pseudo == NetBSD's in_cksum_phdr */
   2636 	th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   2637 	    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   2638 	tcp_hlen = th->th_off << 2;
   2639 	hdrlen = ehdrlen + ip_hlen + tcp_hlen;
   2640 
   2641 	/* This is used in the transmit desc in encap */
   2642 	*paylen = mp->m_pkthdr.len - hdrlen;
   2643 
   2644 	/* VLAN MACLEN IPLEN */
   2645 	if ((mtag = VLAN_OUTPUT_TAG(ec, mp)) != NULL) {
   2646 		vtag = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   2647                 vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
   2648 	}
   2649 
   2650 	vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
   2651 	vlan_macip_lens |= ip_hlen;
   2652 	TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
   2653 
   2654 	/* ADV DTYPE TUCMD */
   2655 	type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
   2656 	type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
   2657 	type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
   2658 	TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
   2659 
   2660 
   2661 	/* MSS L4LEN IDX */
   2662 	mss_l4len_idx |= (mp->m_pkthdr.segsz << IXGBE_ADVTXD_MSS_SHIFT);
   2663 	mss_l4len_idx |= (tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT);
   2664 	TXD->mss_l4len_idx = htole32(mss_l4len_idx);
   2665 
   2666 	TXD->seqnum_seed = htole32(0);
   2667 	tx_buffer->m_head = NULL;
   2668 	tx_buffer->eop_index = -1;
   2669 
   2670 	if (++ctxd == adapter->num_tx_desc)
   2671 		ctxd = 0;
   2672 
   2673 	txr->tx_avail--;
   2674 	txr->next_avail_desc = ctxd;
   2675 	return TRUE;
   2676 }
   2677 
   2678 
   2679 /**********************************************************************
   2680  *
   2681  *  Examine each tx_buffer in the used queue. If the hardware is done
   2682  *  processing the packet then free associated resources. The
   2683  *  tx_buffer is put back on the free queue.
   2684  *
   2685  **********************************************************************/
   2686 static bool
   2687 ixv_txeof(struct tx_ring *txr)
   2688 {
   2689 	struct adapter	*adapter = txr->adapter;
   2690 	struct ifnet	*ifp = adapter->ifp;
   2691 	u32	first, last, done;
   2692 	struct ixv_tx_buf *tx_buffer;
   2693 	struct ixgbe_legacy_tx_desc *tx_desc, *eop_desc;
   2694 
   2695 	KASSERT(mutex_owned(&txr->tx_mtx));
   2696 
   2697 	if (txr->tx_avail == adapter->num_tx_desc)
   2698 		return false;
   2699 
   2700 	first = txr->next_to_clean;
   2701 	tx_buffer = &txr->tx_buffers[first];
   2702 	/* For cleanup we just use legacy struct */
   2703 	tx_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
   2704 	last = tx_buffer->eop_index;
   2705 	if (last == -1)
   2706 		return false;
   2707 	eop_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
   2708 
   2709 	/*
   2710 	** Get the index of the first descriptor
   2711 	** BEYOND the EOP and call that 'done'.
   2712 	** I do this so the comparison in the
   2713 	** inner while loop below can be simple
   2714 	*/
   2715 	if (++last == adapter->num_tx_desc) last = 0;
   2716 	done = last;
   2717 
   2718         ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
   2719             BUS_DMASYNC_POSTREAD);
   2720 	/*
   2721 	** Only the EOP descriptor of a packet now has the DD
   2722 	** bit set, this is what we look for...
   2723 	*/
   2724 	while (eop_desc->upper.fields.status & IXGBE_TXD_STAT_DD) {
   2725 		/* We clean the range of the packet */
   2726 		while (first != done) {
   2727 			tx_desc->upper.data = 0;
   2728 			tx_desc->lower.data = 0;
   2729 			tx_desc->buffer_addr = 0;
   2730 			++txr->tx_avail;
   2731 
   2732 			if (tx_buffer->m_head) {
   2733 				bus_dmamap_sync(txr->txtag->dt_dmat,
   2734 				    tx_buffer->map,
   2735 				    0, tx_buffer->m_head->m_pkthdr.len,
   2736 				    BUS_DMASYNC_POSTWRITE);
   2737 				ixgbe_dmamap_unload(txr->txtag, tx_buffer->map);
   2738 				m_freem(tx_buffer->m_head);
   2739 				tx_buffer->m_head = NULL;
   2740 				tx_buffer->map = NULL;
   2741 			}
   2742 			tx_buffer->eop_index = -1;
   2743 			getmicrotime(&txr->watchdog_time);
   2744 
   2745 			if (++first == adapter->num_tx_desc)
   2746 				first = 0;
   2747 
   2748 			tx_buffer = &txr->tx_buffers[first];
   2749 			tx_desc =
   2750 			    (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
   2751 		}
   2752 		++ifp->if_opackets;
   2753 		/* See if there is more work now */
   2754 		last = tx_buffer->eop_index;
   2755 		if (last != -1) {
   2756 			eop_desc =
   2757 			    (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
   2758 			/* Get next done point */
   2759 			if (++last == adapter->num_tx_desc) last = 0;
   2760 			done = last;
   2761 		} else
   2762 			break;
   2763 	}
   2764 	ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
   2765 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   2766 
   2767 	txr->next_to_clean = first;
   2768 
   2769 	/*
   2770 	 * If we have enough room, clear IFF_OACTIVE to tell the stack that
   2771 	 * it is OK to send packets. If there are no pending descriptors,
   2772 	 * clear the timeout. Otherwise, if some descriptors have been freed,
   2773 	 * restart the timeout.
   2774 	 */
   2775 	if (txr->tx_avail > IXV_TX_CLEANUP_THRESHOLD) {
   2776 		ifp->if_flags &= ~IFF_OACTIVE;
   2777 		if (txr->tx_avail == adapter->num_tx_desc) {
   2778 			txr->watchdog_check = FALSE;
   2779 			return false;
   2780 		}
   2781 	}
   2782 
   2783 	return true;
   2784 }
   2785 
   2786 /*********************************************************************
   2787  *
   2788  *  Refresh mbuf buffers for RX descriptor rings
   2789  *   - now keeps its own state so discards due to resource
   2790  *     exhaustion are unnecessary, if an mbuf cannot be obtained
   2791  *     it just returns, keeping its placeholder, thus it can simply
   2792  *     be recalled to try again.
   2793  *
   2794  **********************************************************************/
   2795 static void
   2796 ixv_refresh_mbufs(struct rx_ring *rxr, int limit)
   2797 {
   2798 	struct adapter		*adapter = rxr->adapter;
   2799 	struct ixv_rx_buf	*rxbuf;
   2800 	struct mbuf		*mh, *mp;
   2801 	int			i, j, error;
   2802 	bool			refreshed = false;
   2803 
   2804 	i = j = rxr->next_to_refresh;
   2805         /* Get the control variable, one beyond refresh point */
   2806 	if (++j == adapter->num_rx_desc)
   2807 		j = 0;
   2808 	while (j != limit) {
   2809 		rxbuf = &rxr->rx_buffers[i];
   2810 		if ((rxbuf->m_head == NULL) && (rxr->hdr_split)) {
   2811 			mh = m_gethdr(M_NOWAIT, MT_DATA);
   2812 			if (mh == NULL)
   2813 				goto update;
   2814 			mh->m_pkthdr.len = mh->m_len = MHLEN;
   2815 			mh->m_flags |= M_PKTHDR;
   2816 			m_adj(mh, ETHER_ALIGN);
   2817 			/* Get the memory mapping */
   2818 			error = bus_dmamap_load_mbuf(rxr->htag->dt_dmat,
   2819 			    rxbuf->hmap, mh, BUS_DMA_NOWAIT);
   2820 			if (error != 0) {
   2821 				printf("GET BUF: dmamap load"
   2822 				    " failure - %d\n", error);
   2823 				m_free(mh);
   2824 				goto update;
   2825 			}
   2826 			rxbuf->m_head = mh;
   2827 			ixgbe_dmamap_sync(rxr->htag, rxbuf->hmap,
   2828 			    BUS_DMASYNC_PREREAD);
   2829 			rxr->rx_base[i].read.hdr_addr =
   2830 			    htole64(rxbuf->hmap->dm_segs[0].ds_addr);
   2831 		}
   2832 
   2833 		if (rxbuf->m_pack == NULL) {
   2834 			mp = ixgbe_getjcl(&adapter->jcl_head, M_NOWAIT,
   2835 			    MT_DATA, M_PKTHDR, adapter->rx_mbuf_sz);
   2836 			if (mp == NULL) {
   2837 				rxr->no_jmbuf.ev_count++;
   2838 				goto update;
   2839 			} else
   2840 				mp = rxbuf->m_pack;
   2841 
   2842 			mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
   2843 			/* Get the memory mapping */
   2844 			error = bus_dmamap_load_mbuf(rxr->ptag->dt_dmat,
   2845 			    rxbuf->pmap, mp, BUS_DMA_NOWAIT);
   2846 			if (error != 0) {
   2847 				printf("GET BUF: dmamap load"
   2848 				    " failure - %d\n", error);
   2849 				m_free(mp);
   2850 				rxbuf->m_pack = NULL;
   2851 				goto update;
   2852 			}
   2853 			rxbuf->m_pack = mp;
   2854 			bus_dmamap_sync(rxr->ptag->dt_dmat, rxbuf->pmap,
   2855 			    0, mp->m_pkthdr.len, BUS_DMASYNC_PREREAD);
   2856 			rxr->rx_base[i].read.pkt_addr =
   2857 			    htole64(rxbuf->pmap->dm_segs[0].ds_addr);
   2858 		}
   2859 
   2860 		refreshed = true;
   2861 		rxr->next_to_refresh = i = j;
   2862 		/* Calculate next index */
   2863 		if (++j == adapter->num_rx_desc)
   2864 			j = 0;
   2865 	}
   2866 update:
   2867 	if (refreshed) /* update tail index */
   2868 		IXGBE_WRITE_REG(&adapter->hw,
   2869 		    IXGBE_VFRDT(rxr->me), rxr->next_to_refresh);
   2870 	return;
   2871 }
   2872 
   2873 /*********************************************************************
   2874  *
   2875  *  Allocate memory for rx_buffer structures. Since we use one
   2876  *  rx_buffer per received packet, the maximum number of rx_buffer's
   2877  *  that we'll need is equal to the number of receive descriptors
   2878  *  that we've allocated.
   2879  *
   2880  **********************************************************************/
   2881 static int
   2882 ixv_allocate_receive_buffers(struct rx_ring *rxr)
   2883 {
   2884 	struct	adapter 	*adapter = rxr->adapter;
   2885 	device_t 		dev = adapter->dev;
   2886 	struct ixv_rx_buf 	*rxbuf;
   2887 	int             	i, bsize, error;
   2888 
   2889 	bsize = sizeof(struct ixv_rx_buf) * adapter->num_rx_desc;
   2890 	if (!(rxr->rx_buffers =
   2891 	    (struct ixv_rx_buf *) malloc(bsize,
   2892 	    M_DEVBUF, M_NOWAIT | M_ZERO))) {
   2893 		aprint_error_dev(dev, "Unable to allocate rx_buffer memory\n");
   2894 		error = ENOMEM;
   2895 		goto fail;
   2896 	}
   2897 
   2898 	if ((error = ixgbe_dma_tag_create(adapter->osdep.dmat,	/* parent */
   2899 				   1, 0,	/* alignment, bounds */
   2900 				   MSIZE,		/* maxsize */
   2901 				   1,			/* nsegments */
   2902 				   MSIZE,		/* maxsegsize */
   2903 				   0,			/* flags */
   2904 				   &rxr->htag))) {
   2905 		aprint_error_dev(dev, "Unable to create RX DMA tag\n");
   2906 		goto fail;
   2907 	}
   2908 
   2909 	if ((error = ixgbe_dma_tag_create(adapter->osdep.dmat,	/* parent */
   2910 				   1, 0,	/* alignment, bounds */
   2911 				   MJUMPAGESIZE,	/* maxsize */
   2912 				   1,			/* nsegments */
   2913 				   MJUMPAGESIZE,	/* maxsegsize */
   2914 				   0,			/* flags */
   2915 				   &rxr->ptag))) {
   2916 		aprint_error_dev(dev, "Unable to create RX DMA tag\n");
   2917 		goto fail;
   2918 	}
   2919 
   2920 	for (i = 0; i < adapter->num_rx_desc; i++, rxbuf++) {
   2921 		rxbuf = &rxr->rx_buffers[i];
   2922 		error = ixgbe_dmamap_create(rxr->htag,
   2923 		    BUS_DMA_NOWAIT, &rxbuf->hmap);
   2924 		if (error) {
   2925 			aprint_error_dev(dev, "Unable to create RX head map\n");
   2926 			goto fail;
   2927 		}
   2928 		error = ixgbe_dmamap_create(rxr->ptag,
   2929 		    BUS_DMA_NOWAIT, &rxbuf->pmap);
   2930 		if (error) {
   2931 			aprint_error_dev(dev, "Unable to create RX pkt map\n");
   2932 			goto fail;
   2933 		}
   2934 	}
   2935 
   2936 	return (0);
   2937 
   2938 fail:
   2939 	/* Frees all, but can handle partial completion */
   2940 	ixv_free_receive_structures(adapter);
   2941 	return (error);
   2942 }
   2943 
   2944 static void
   2945 ixv_free_receive_ring(struct rx_ring *rxr)
   2946 {
   2947 	struct  adapter         *adapter;
   2948 	struct ixv_rx_buf       *rxbuf;
   2949 	int i;
   2950 
   2951 	adapter = rxr->adapter;
   2952 	for (i = 0; i < adapter->num_rx_desc; i++) {
   2953 		rxbuf = &rxr->rx_buffers[i];
   2954 		if (rxbuf->m_head != NULL) {
   2955 			ixgbe_dmamap_sync(rxr->htag, rxbuf->hmap,
   2956 			    BUS_DMASYNC_POSTREAD);
   2957 			ixgbe_dmamap_unload(rxr->htag, rxbuf->hmap);
   2958 			rxbuf->m_head->m_flags |= M_PKTHDR;
   2959 			m_freem(rxbuf->m_head);
   2960 		}
   2961 		if (rxbuf->m_pack != NULL) {
   2962 			/* XXX not ixgbe_ ? */
   2963 			bus_dmamap_sync(rxr->ptag->dt_dmat, rxbuf->pmap,
   2964 			    0, rxbuf->m_pack->m_pkthdr.len,
   2965 			    BUS_DMASYNC_POSTREAD);
   2966 			ixgbe_dmamap_unload(rxr->ptag, rxbuf->pmap);
   2967 			rxbuf->m_pack->m_flags |= M_PKTHDR;
   2968 			m_freem(rxbuf->m_pack);
   2969 		}
   2970 		rxbuf->m_head = NULL;
   2971 		rxbuf->m_pack = NULL;
   2972 	}
   2973 }
   2974 
   2975 
   2976 /*********************************************************************
   2977  *
   2978  *  Initialize a receive ring and its buffers.
   2979  *
   2980  **********************************************************************/
   2981 static int
   2982 ixv_setup_receive_ring(struct rx_ring *rxr)
   2983 {
   2984 	struct	adapter 	*adapter;
   2985 	struct ixv_rx_buf	*rxbuf;
   2986 #ifdef LRO
   2987 	struct ifnet		*ifp;
   2988 	struct lro_ctrl		*lro = &rxr->lro;
   2989 #endif /* LRO */
   2990 	int			rsize, error = 0;
   2991 
   2992 	adapter = rxr->adapter;
   2993 #ifdef LRO
   2994 	ifp = adapter->ifp;
   2995 #endif /* LRO */
   2996 
   2997 	/* Clear the ring contents */
   2998 	IXV_RX_LOCK(rxr);
   2999 	rsize = roundup2(adapter->num_rx_desc *
   3000 	    sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
   3001 	bzero((void *)rxr->rx_base, rsize);
   3002 
   3003 	/* Free current RX buffer structs and their mbufs */
   3004 	ixv_free_receive_ring(rxr);
   3005 
   3006 	IXV_RX_UNLOCK(rxr);
   3007 
   3008 	/* Now reinitialize our supply of jumbo mbufs.  The number
   3009 	 * or size of jumbo mbufs may have changed.
   3010 	 */
   3011 	ixgbe_jcl_reinit(&adapter->jcl_head, rxr->ptag->dt_dmat,
   3012 	    2 * adapter->num_rx_desc, adapter->rx_mbuf_sz);
   3013 
   3014 	IXV_RX_LOCK(rxr);
   3015 
   3016 	/* Configure header split? */
   3017 	if (ixv_header_split)
   3018 		rxr->hdr_split = TRUE;
   3019 
   3020 	/* Now replenish the mbufs */
   3021 	for (int j = 0; j != adapter->num_rx_desc; ++j) {
   3022 		struct mbuf	*mh, *mp;
   3023 
   3024 		rxbuf = &rxr->rx_buffers[j];
   3025 		/*
   3026 		** Dont allocate mbufs if not
   3027 		** doing header split, its wasteful
   3028 		*/
   3029 		if (rxr->hdr_split == FALSE)
   3030 			goto skip_head;
   3031 
   3032 		/* First the header */
   3033 		rxbuf->m_head = m_gethdr(M_DONTWAIT, MT_DATA);
   3034 		if (rxbuf->m_head == NULL) {
   3035 			error = ENOBUFS;
   3036 			goto fail;
   3037 		}
   3038 		m_adj(rxbuf->m_head, ETHER_ALIGN);
   3039 		mh = rxbuf->m_head;
   3040 		mh->m_len = mh->m_pkthdr.len = MHLEN;
   3041 		mh->m_flags |= M_PKTHDR;
   3042 		/* Get the memory mapping */
   3043 		error = bus_dmamap_load_mbuf(rxr->htag->dt_dmat,
   3044 		    rxbuf->hmap, rxbuf->m_head, BUS_DMA_NOWAIT);
   3045 		if (error != 0) /* Nothing elegant to do here */
   3046 			goto fail;
   3047 		bus_dmamap_sync(rxr->htag->dt_dmat, rxbuf->hmap,
   3048 		    0, mh->m_pkthdr.len, BUS_DMASYNC_PREREAD);
   3049 		/* Update descriptor */
   3050 		rxr->rx_base[j].read.hdr_addr =
   3051 		    htole64(rxbuf->hmap->dm_segs[0].ds_addr);
   3052 
   3053 skip_head:
   3054 		/* Now the payload cluster */
   3055 		rxbuf->m_pack = ixgbe_getjcl(&adapter->jcl_head, M_DONTWAIT,
   3056 		    MT_DATA, M_PKTHDR, adapter->rx_mbuf_sz);
   3057 		if (rxbuf->m_pack == NULL) {
   3058 			error = ENOBUFS;
   3059                         goto fail;
   3060 		}
   3061 		mp = rxbuf->m_pack;
   3062 		mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
   3063 		/* Get the memory mapping */
   3064 		error = bus_dmamap_load_mbuf(rxr->ptag->dt_dmat,
   3065 		    rxbuf->pmap, mp, BUS_DMA_NOWAIT);
   3066 		if (error != 0)
   3067                         goto fail;
   3068 		bus_dmamap_sync(rxr->ptag->dt_dmat, rxbuf->pmap,
   3069 		    0, adapter->rx_mbuf_sz, BUS_DMASYNC_PREREAD);
   3070 		/* Update descriptor */
   3071 		rxr->rx_base[j].read.pkt_addr =
   3072 		    htole64(rxbuf->pmap->dm_segs[0].ds_addr);
   3073 	}
   3074 
   3075 
   3076 	/* Setup our descriptor indices */
   3077 	rxr->next_to_check = 0;
   3078 	rxr->next_to_refresh = 0;
   3079 	rxr->lro_enabled = FALSE;
   3080 	rxr->rx_split_packets.ev_count = 0;
   3081 	rxr->rx_bytes.ev_count = 0;
   3082 	rxr->discard = FALSE;
   3083 
   3084 	ixgbe_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
   3085 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   3086 
   3087 #ifdef LRO
   3088 	/*
   3089 	** Now set up the LRO interface:
   3090 	*/
   3091 	if (ifp->if_capenable & IFCAP_LRO) {
   3092 		device_t dev = adapter->dev;
   3093 		int err = tcp_lro_init(lro);
   3094 		if (err) {
   3095 			device_printf(dev, "LRO Initialization failed!\n");
   3096 			goto fail;
   3097 		}
   3098 		INIT_DEBUGOUT("RX Soft LRO Initialized\n");
   3099 		rxr->lro_enabled = TRUE;
   3100 		lro->ifp = adapter->ifp;
   3101 	}
   3102 #endif /* LRO */
   3103 
   3104 	IXV_RX_UNLOCK(rxr);
   3105 	return (0);
   3106 
   3107 fail:
   3108 	ixv_free_receive_ring(rxr);
   3109 	IXV_RX_UNLOCK(rxr);
   3110 	return (error);
   3111 }
   3112 
   3113 /*********************************************************************
   3114  *
   3115  *  Initialize all receive rings.
   3116  *
   3117  **********************************************************************/
   3118 static int
   3119 ixv_setup_receive_structures(struct adapter *adapter)
   3120 {
   3121 	struct rx_ring *rxr = adapter->rx_rings;
   3122 	int j;
   3123 
   3124 	for (j = 0; j < adapter->num_queues; j++, rxr++)
   3125 		if (ixv_setup_receive_ring(rxr))
   3126 			goto fail;
   3127 
   3128 	return (0);
   3129 fail:
   3130 	/*
   3131 	 * Free RX buffers allocated so far, we will only handle
   3132 	 * the rings that completed, the failing case will have
   3133 	 * cleaned up for itself. 'j' failed, so its the terminus.
   3134 	 */
   3135 	for (int i = 0; i < j; ++i) {
   3136 		rxr = &adapter->rx_rings[i];
   3137 		ixv_free_receive_ring(rxr);
   3138 	}
   3139 
   3140 	return (ENOBUFS);
   3141 }
   3142 
   3143 /*********************************************************************
   3144  *
   3145  *  Setup receive registers and features.
   3146  *
   3147  **********************************************************************/
   3148 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
   3149 
   3150 static void
   3151 ixv_initialize_receive_units(struct adapter *adapter)
   3152 {
   3153 	int i;
   3154 	struct	rx_ring	*rxr = adapter->rx_rings;
   3155 	struct ixgbe_hw	*hw = &adapter->hw;
   3156 	struct ifnet   *ifp = adapter->ifp;
   3157 	u32		bufsz, fctrl, rxcsum, hlreg;
   3158 
   3159 
   3160 	/* Enable broadcasts */
   3161 	fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
   3162 	fctrl |= IXGBE_FCTRL_BAM;
   3163 	fctrl |= IXGBE_FCTRL_DPF;
   3164 	fctrl |= IXGBE_FCTRL_PMCF;
   3165 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
   3166 
   3167 	/* Set for Jumbo Frames? */
   3168 	hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
   3169 	if (ifp->if_mtu > ETHERMTU) {
   3170 		hlreg |= IXGBE_HLREG0_JUMBOEN;
   3171 		bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
   3172 	} else {
   3173 		hlreg &= ~IXGBE_HLREG0_JUMBOEN;
   3174 		bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
   3175 	}
   3176 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
   3177 
   3178 	for (i = 0; i < adapter->num_queues; i++, rxr++) {
   3179 		u64 rdba = rxr->rxdma.dma_paddr;
   3180 		u32 reg, rxdctl;
   3181 
   3182 		/* Do the queue enabling first */
   3183 		rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
   3184 		rxdctl |= IXGBE_RXDCTL_ENABLE;
   3185 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
   3186 		for (int k = 0; k < 10; k++) {
   3187 			if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
   3188 			    IXGBE_RXDCTL_ENABLE)
   3189 				break;
   3190 			else
   3191 				msec_delay(1);
   3192 		}
   3193 		wmb();
   3194 
   3195 		/* Setup the Base and Length of the Rx Descriptor Ring */
   3196 		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
   3197 		    (rdba & 0x00000000ffffffffULL));
   3198 		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i),
   3199 		    (rdba >> 32));
   3200 		IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
   3201 		    adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
   3202 
   3203 		/* Set up the SRRCTL register */
   3204 		reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
   3205 		reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
   3206 		reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
   3207 		reg |= bufsz;
   3208 		if (rxr->hdr_split) {
   3209 			/* Use a standard mbuf for the header */
   3210 			reg |= ((IXV_RX_HDR <<
   3211 			    IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT)
   3212 			    & IXGBE_SRRCTL_BSIZEHDR_MASK);
   3213 			reg |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
   3214 		} else
   3215 			reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
   3216 		IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), reg);
   3217 
   3218 		/* Setup the HW Rx Head and Tail Descriptor Pointers */
   3219 		IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
   3220 		IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
   3221 		    adapter->num_rx_desc - 1);
   3222 	}
   3223 
   3224 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
   3225 
   3226 	if (ifp->if_capenable & IFCAP_RXCSUM)
   3227 		rxcsum |= IXGBE_RXCSUM_PCSD;
   3228 
   3229 	if (!(rxcsum & IXGBE_RXCSUM_PCSD))
   3230 		rxcsum |= IXGBE_RXCSUM_IPPCSE;
   3231 
   3232 	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
   3233 
   3234 	return;
   3235 }
   3236 
   3237 /*********************************************************************
   3238  *
   3239  *  Free all receive rings.
   3240  *
   3241  **********************************************************************/
   3242 static void
   3243 ixv_free_receive_structures(struct adapter *adapter)
   3244 {
   3245 	struct rx_ring *rxr = adapter->rx_rings;
   3246 
   3247 	for (int i = 0; i < adapter->num_queues; i++, rxr++) {
   3248 #ifdef LRO
   3249 		struct lro_ctrl		*lro = &rxr->lro;
   3250 #endif /* LRO */
   3251 		ixv_free_receive_buffers(rxr);
   3252 #ifdef LRO
   3253 		/* Free LRO memory */
   3254 		tcp_lro_free(lro);
   3255 #endif /* LRO */
   3256 		/* Free the ring memory as well */
   3257 		ixv_dma_free(adapter, &rxr->rxdma);
   3258 		IXV_RX_LOCK_DESTROY(rxr);
   3259 	}
   3260 
   3261 	free(adapter->rx_rings, M_DEVBUF);
   3262 }
   3263 
   3264 
   3265 /*********************************************************************
   3266  *
   3267  *  Free receive ring data structures
   3268  *
   3269  **********************************************************************/
   3270 static void
   3271 ixv_free_receive_buffers(struct rx_ring *rxr)
   3272 {
   3273 	struct adapter		*adapter = rxr->adapter;
   3274 	struct ixv_rx_buf	*rxbuf;
   3275 
   3276 	INIT_DEBUGOUT("free_receive_structures: begin");
   3277 
   3278 	/* Cleanup any existing buffers */
   3279 	if (rxr->rx_buffers != NULL) {
   3280 		for (int i = 0; i < adapter->num_rx_desc; i++) {
   3281 			rxbuf = &rxr->rx_buffers[i];
   3282 			if (rxbuf->m_head != NULL) {
   3283 				ixgbe_dmamap_sync(rxr->htag, rxbuf->hmap,
   3284 				    BUS_DMASYNC_POSTREAD);
   3285 				ixgbe_dmamap_unload(rxr->htag, rxbuf->hmap);
   3286 				rxbuf->m_head->m_flags |= M_PKTHDR;
   3287 				m_freem(rxbuf->m_head);
   3288 			}
   3289 			if (rxbuf->m_pack != NULL) {
   3290 				/* XXX not ixgbe_* ? */
   3291 				bus_dmamap_sync(rxr->ptag->dt_dmat, rxbuf->pmap,
   3292 				    0, rxbuf->m_pack->m_pkthdr.len,
   3293 				    BUS_DMASYNC_POSTREAD);
   3294 				ixgbe_dmamap_unload(rxr->ptag, rxbuf->pmap);
   3295 				rxbuf->m_pack->m_flags |= M_PKTHDR;
   3296 				m_freem(rxbuf->m_pack);
   3297 			}
   3298 			rxbuf->m_head = NULL;
   3299 			rxbuf->m_pack = NULL;
   3300 			if (rxbuf->hmap != NULL) {
   3301 				ixgbe_dmamap_destroy(rxr->htag, rxbuf->hmap);
   3302 				rxbuf->hmap = NULL;
   3303 			}
   3304 			if (rxbuf->pmap != NULL) {
   3305 				ixgbe_dmamap_destroy(rxr->ptag, rxbuf->pmap);
   3306 				rxbuf->pmap = NULL;
   3307 			}
   3308 		}
   3309 		if (rxr->rx_buffers != NULL) {
   3310 			free(rxr->rx_buffers, M_DEVBUF);
   3311 			rxr->rx_buffers = NULL;
   3312 		}
   3313 	}
   3314 
   3315 	if (rxr->htag != NULL) {
   3316 		ixgbe_dma_tag_destroy(rxr->htag);
   3317 		rxr->htag = NULL;
   3318 	}
   3319 	if (rxr->ptag != NULL) {
   3320 		ixgbe_dma_tag_destroy(rxr->ptag);
   3321 		rxr->ptag = NULL;
   3322 	}
   3323 
   3324 	return;
   3325 }
   3326 
   3327 static __inline void
   3328 ixv_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m, u32 ptype)
   3329 {
   3330 	int s;
   3331 
   3332 #ifdef LRO
   3333 	struct adapter	*adapter = ifp->if_softc;
   3334 	struct ethercom *ec = &adapter->osdep.ec;
   3335 
   3336         /*
   3337          * ATM LRO is only for IPv4/TCP packets and TCP checksum of the packet
   3338          * should be computed by hardware. Also it should not have VLAN tag in
   3339          * ethernet header.
   3340          */
   3341         if (rxr->lro_enabled &&
   3342             (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING) != 0 &&
   3343             (ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
   3344             (ptype & (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP)) ==
   3345             (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP) &&
   3346             (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) ==
   3347             (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) {
   3348                 /*
   3349                  * Send to the stack if:
   3350                  **  - LRO not enabled, or
   3351                  **  - no LRO resources, or
   3352                  **  - lro enqueue fails
   3353                  */
   3354                 if (rxr->lro.lro_cnt != 0)
   3355                         if (tcp_lro_rx(&rxr->lro, m, 0) == 0)
   3356                                 return;
   3357         }
   3358 #endif /* LRO */
   3359 
   3360 	IXV_RX_UNLOCK(rxr);
   3361 
   3362 	s = splnet();
   3363 	/* Pass this up to any BPF listeners. */
   3364 	bpf_mtap(ifp, m);
   3365         (*ifp->if_input)(ifp, m);
   3366 	splx(s);
   3367 
   3368 	IXV_RX_LOCK(rxr);
   3369 }
   3370 
   3371 static __inline void
   3372 ixv_rx_discard(struct rx_ring *rxr, int i)
   3373 {
   3374 	struct ixv_rx_buf	*rbuf;
   3375 
   3376 	rbuf = &rxr->rx_buffers[i];
   3377 	if (rbuf->fmp != NULL) {/* Partial chain ? */
   3378 		rbuf->fmp->m_flags |= M_PKTHDR;
   3379 		m_freem(rbuf->fmp);
   3380 		rbuf->fmp = NULL;
   3381 	}
   3382 
   3383 	/*
   3384 	** With advanced descriptors the writeback
   3385 	** clobbers the buffer addrs, so its easier
   3386 	** to just free the existing mbufs and take
   3387 	** the normal refresh path to get new buffers
   3388 	** and mapping.
   3389 	*/
   3390 	if (rbuf->m_head) {
   3391 		m_free(rbuf->m_head);
   3392 		rbuf->m_head = NULL;
   3393 	}
   3394 
   3395 	if (rbuf->m_pack) {
   3396 		m_free(rbuf->m_pack);
   3397 		rbuf->m_pack = NULL;
   3398 	}
   3399 
   3400 	return;
   3401 }
   3402 
   3403 
   3404 /*********************************************************************
   3405  *
   3406  *  This routine executes in interrupt context. It replenishes
   3407  *  the mbufs in the descriptor and sends data which has been
   3408  *  dma'ed into host memory to upper layer.
   3409  *
   3410  *  We loop at most count times if count is > 0, or until done if
   3411  *  count < 0.
   3412  *
   3413  *  Return TRUE for more work, FALSE for all clean.
   3414  *********************************************************************/
   3415 static bool
   3416 ixv_rxeof(struct ix_queue *que, int count)
   3417 {
   3418 	struct adapter		*adapter = que->adapter;
   3419 	struct rx_ring		*rxr = que->rxr;
   3420 	struct ifnet		*ifp = adapter->ifp;
   3421 #ifdef LRO
   3422 	struct lro_ctrl		*lro = &rxr->lro;
   3423 	struct lro_entry	*queued;
   3424 #endif /* LRO */
   3425 	int			i, nextp, processed = 0;
   3426 	u32			staterr = 0;
   3427 	union ixgbe_adv_rx_desc	*cur;
   3428 	struct ixv_rx_buf	*rbuf, *nbuf;
   3429 
   3430 	IXV_RX_LOCK(rxr);
   3431 
   3432 	for (i = rxr->next_to_check; count != 0;) {
   3433 		struct mbuf	*sendmp, *mh, *mp;
   3434 		u32		ptype;
   3435 		u16		hlen, plen, hdr, vtag;
   3436 		bool		eop;
   3437 
   3438 		/* Sync the ring. */
   3439 		ixgbe_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
   3440 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   3441 
   3442 		cur = &rxr->rx_base[i];
   3443 		staterr = le32toh(cur->wb.upper.status_error);
   3444 
   3445 		if ((staterr & IXGBE_RXD_STAT_DD) == 0)
   3446 			break;
   3447 		if ((ifp->if_flags & IFF_RUNNING) == 0)
   3448 			break;
   3449 
   3450 		count--;
   3451 		sendmp = NULL;
   3452 		nbuf = NULL;
   3453 		cur->wb.upper.status_error = 0;
   3454 		rbuf = &rxr->rx_buffers[i];
   3455 		mh = rbuf->m_head;
   3456 		mp = rbuf->m_pack;
   3457 
   3458 		plen = le16toh(cur->wb.upper.length);
   3459 		ptype = le32toh(cur->wb.lower.lo_dword.data) &
   3460 		    IXGBE_RXDADV_PKTTYPE_MASK;
   3461 		hdr = le16toh(cur->wb.lower.lo_dword.hs_rss.hdr_info);
   3462 		vtag = le16toh(cur->wb.upper.vlan);
   3463 		eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0);
   3464 
   3465 		/* Make sure all parts of a bad packet are discarded */
   3466 		if (((staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) != 0) ||
   3467 		    (rxr->discard)) {
   3468 			ifp->if_ierrors++;
   3469 			rxr->rx_discarded.ev_count++;
   3470 			if (!eop)
   3471 				rxr->discard = TRUE;
   3472 			else
   3473 				rxr->discard = FALSE;
   3474 			ixv_rx_discard(rxr, i);
   3475 			goto next_desc;
   3476 		}
   3477 
   3478 		if (!eop) {
   3479 			nextp = i + 1;
   3480 			if (nextp == adapter->num_rx_desc)
   3481 				nextp = 0;
   3482 			nbuf = &rxr->rx_buffers[nextp];
   3483 			prefetch(nbuf);
   3484 		}
   3485 		/*
   3486 		** The header mbuf is ONLY used when header
   3487 		** split is enabled, otherwise we get normal
   3488 		** behavior, ie, both header and payload
   3489 		** are DMA'd into the payload buffer.
   3490 		**
   3491 		** Rather than using the fmp/lmp global pointers
   3492 		** we now keep the head of a packet chain in the
   3493 		** buffer struct and pass this along from one
   3494 		** descriptor to the next, until we get EOP.
   3495 		*/
   3496 		if (rxr->hdr_split && (rbuf->fmp == NULL)) {
   3497 			/* This must be an initial descriptor */
   3498 			hlen = (hdr & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
   3499 			    IXGBE_RXDADV_HDRBUFLEN_SHIFT;
   3500 			if (hlen > IXV_RX_HDR)
   3501 				hlen = IXV_RX_HDR;
   3502 			mh->m_len = hlen;
   3503 			mh->m_flags |= M_PKTHDR;
   3504 			mh->m_next = NULL;
   3505 			mh->m_pkthdr.len = mh->m_len;
   3506 			/* Null buf pointer so it is refreshed */
   3507 			rbuf->m_head = NULL;
   3508 			/*
   3509 			** Check the payload length, this
   3510 			** could be zero if its a small
   3511 			** packet.
   3512 			*/
   3513 			if (plen > 0) {
   3514 				mp->m_len = plen;
   3515 				mp->m_next = NULL;
   3516 				mp->m_flags &= ~M_PKTHDR;
   3517 				mh->m_next = mp;
   3518 				mh->m_pkthdr.len += mp->m_len;
   3519 				/* Null buf pointer so it is refreshed */
   3520 				rbuf->m_pack = NULL;
   3521 				rxr->rx_split_packets.ev_count++;
   3522 			}
   3523 			/*
   3524 			** Now create the forward
   3525 			** chain so when complete
   3526 			** we wont have to.
   3527 			*/
   3528                         if (eop == 0) {
   3529 				/* stash the chain head */
   3530                                 nbuf->fmp = mh;
   3531 				/* Make forward chain */
   3532                                 if (plen)
   3533                                         mp->m_next = nbuf->m_pack;
   3534                                 else
   3535                                         mh->m_next = nbuf->m_pack;
   3536                         } else {
   3537 				/* Singlet, prepare to send */
   3538                                 sendmp = mh;
   3539                                 if (VLAN_ATTACHED(&adapter->osdep.ec) &&
   3540 				  (staterr & IXGBE_RXD_STAT_VP)) {
   3541 					VLAN_INPUT_TAG(ifp, sendmp, vtag,
   3542 					    printf("%s: could not apply VLAN "
   3543 					        "tag", __func__));
   3544                                 }
   3545                         }
   3546 		} else {
   3547 			/*
   3548 			** Either no header split, or a
   3549 			** secondary piece of a fragmented
   3550 			** split packet.
   3551 			*/
   3552 			mp->m_len = plen;
   3553 			/*
   3554 			** See if there is a stored head
   3555 			** that determines what we are
   3556 			*/
   3557 			sendmp = rbuf->fmp;
   3558 			rbuf->m_pack = rbuf->fmp = NULL;
   3559 
   3560 			if (sendmp != NULL) /* secondary frag */
   3561 				sendmp->m_pkthdr.len += mp->m_len;
   3562 			else {
   3563 				/* first desc of a non-ps chain */
   3564 				sendmp = mp;
   3565 				sendmp->m_flags |= M_PKTHDR;
   3566 				sendmp->m_pkthdr.len = mp->m_len;
   3567 				if (staterr & IXGBE_RXD_STAT_VP) {
   3568 					/* XXX Do something reasonable on
   3569 					 * error.
   3570 					 */
   3571 					VLAN_INPUT_TAG(ifp, sendmp, vtag,
   3572 					    printf("%s: could not apply VLAN "
   3573 					        "tag", __func__));
   3574 				}
   3575                         }
   3576 			/* Pass the head pointer on */
   3577 			if (eop == 0) {
   3578 				nbuf->fmp = sendmp;
   3579 				sendmp = NULL;
   3580 				mp->m_next = nbuf->m_pack;
   3581 			}
   3582 		}
   3583 		++processed;
   3584 		/* Sending this frame? */
   3585 		if (eop) {
   3586 			sendmp->m_pkthdr.rcvif = ifp;
   3587 			ifp->if_ipackets++;
   3588 			rxr->rx_packets.ev_count++;
   3589 			/* capture data for AIM */
   3590 			rxr->bytes += sendmp->m_pkthdr.len;
   3591 			rxr->rx_bytes.ev_count += sendmp->m_pkthdr.len;
   3592 			if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
   3593 				ixv_rx_checksum(staterr, sendmp, ptype,
   3594 				   &adapter->stats);
   3595 			}
   3596 #if __FreeBSD_version >= 800000
   3597 			sendmp->m_pkthdr.flowid = que->msix;
   3598 			sendmp->m_flags |= M_FLOWID;
   3599 #endif
   3600 		}
   3601 next_desc:
   3602 		ixgbe_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
   3603 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   3604 
   3605 		/* Advance our pointers to the next descriptor. */
   3606 		if (++i == adapter->num_rx_desc)
   3607 			i = 0;
   3608 
   3609 		/* Now send to the stack or do LRO */
   3610 		if (sendmp != NULL)
   3611 			ixv_rx_input(rxr, ifp, sendmp, ptype);
   3612 
   3613                /* Every 8 descriptors we go to refresh mbufs */
   3614 		if (processed == 8) {
   3615 			ixv_refresh_mbufs(rxr, i);
   3616 			processed = 0;
   3617 		}
   3618 	}
   3619 
   3620 	/* Refresh any remaining buf structs */
   3621 	if (ixv_rx_unrefreshed(rxr))
   3622 		ixv_refresh_mbufs(rxr, i);
   3623 
   3624 	rxr->next_to_check = i;
   3625 
   3626 #ifdef LRO
   3627 	/*
   3628 	 * Flush any outstanding LRO work
   3629 	 */
   3630 	while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
   3631 		SLIST_REMOVE_HEAD(&lro->lro_active, next);
   3632 		tcp_lro_flush(lro, queued);
   3633 	}
   3634 #endif /* LRO */
   3635 
   3636 	IXV_RX_UNLOCK(rxr);
   3637 
   3638 	/*
   3639 	** We still have cleaning to do?
   3640 	** Schedule another interrupt if so.
   3641 	*/
   3642 	if ((staterr & IXGBE_RXD_STAT_DD) != 0) {
   3643 		ixv_rearm_queues(adapter, (u64)(1ULL << que->msix));
   3644 		return true;
   3645 	}
   3646 
   3647 	return false;
   3648 }
   3649 
   3650 
   3651 /*********************************************************************
   3652  *
   3653  *  Verify that the hardware indicated that the checksum is valid.
   3654  *  Inform the stack about the status of checksum so that stack
   3655  *  doesn't spend time verifying the checksum.
   3656  *
   3657  *********************************************************************/
   3658 static void
   3659 ixv_rx_checksum(u32 staterr, struct mbuf * mp, u32 ptype,
   3660     struct ixgbevf_hw_stats *stats)
   3661 {
   3662 	u16	status = (u16) staterr;
   3663 	u8	errors = (u8) (staterr >> 24);
   3664 #if 0
   3665 	bool	sctp = FALSE;
   3666 
   3667 	if ((ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
   3668 	    (ptype & IXGBE_RXDADV_PKTTYPE_SCTP) != 0)
   3669 		sctp = TRUE;
   3670 #endif
   3671 	if (status & IXGBE_RXD_STAT_IPCS) {
   3672 		stats->ipcs.ev_count++;
   3673 		if (!(errors & IXGBE_RXD_ERR_IPE)) {
   3674 			/* IP Checksum Good */
   3675 			mp->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   3676 
   3677 		} else {
   3678 			stats->ipcs_bad.ev_count++;
   3679 			mp->m_pkthdr.csum_flags = M_CSUM_IPv4|M_CSUM_IPv4_BAD;
   3680 		}
   3681 	}
   3682 	if (status & IXGBE_RXD_STAT_L4CS) {
   3683 		stats->l4cs.ev_count++;
   3684 		int type = M_CSUM_TCPv4|M_CSUM_TCPv6|M_CSUM_UDPv4|M_CSUM_UDPv6;
   3685 		if (!(errors & IXGBE_RXD_ERR_TCPE)) {
   3686 			mp->m_pkthdr.csum_flags |= type;
   3687 		} else {
   3688 			stats->l4cs_bad.ev_count++;
   3689 			mp->m_pkthdr.csum_flags |= type | M_CSUM_TCP_UDP_BAD;
   3690 		}
   3691 	}
   3692 	return;
   3693 }
   3694 
   3695 static void
   3696 ixv_setup_vlan_support(struct adapter *adapter)
   3697 {
   3698 	struct ixgbe_hw *hw = &adapter->hw;
   3699 	u32		ctrl, vid, vfta, retry;
   3700 
   3701 
   3702 	/*
   3703 	** We get here thru init_locked, meaning
   3704 	** a soft reset, this has already cleared
   3705 	** the VFTA and other state, so if there
   3706 	** have been no vlan's registered do nothing.
   3707 	*/
   3708 	if (adapter->num_vlans == 0)
   3709 		return;
   3710 
   3711 	/* Enable the queues */
   3712 	for (int i = 0; i < adapter->num_queues; i++) {
   3713 		ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
   3714 		ctrl |= IXGBE_RXDCTL_VME;
   3715 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl);
   3716 	}
   3717 
   3718 	/*
   3719 	** A soft reset zero's out the VFTA, so
   3720 	** we need to repopulate it now.
   3721 	*/
   3722 	for (int i = 0; i < VFTA_SIZE; i++) {
   3723 		if (ixv_shadow_vfta[i] == 0)
   3724 			continue;
   3725 		vfta = ixv_shadow_vfta[i];
   3726 		/*
   3727 		** Reconstruct the vlan id's
   3728 		** based on the bits set in each
   3729 		** of the array ints.
   3730 		*/
   3731 		for ( int j = 0; j < 32; j++) {
   3732 			retry = 0;
   3733 			if ((vfta & (1 << j)) == 0)
   3734 				continue;
   3735 			vid = (i * 32) + j;
   3736 			/* Call the shared code mailbox routine */
   3737 			while (ixgbe_set_vfta(hw, vid, 0, TRUE)) {
   3738 				if (++retry > 5)
   3739 					break;
   3740 			}
   3741 		}
   3742 	}
   3743 }
   3744 
   3745 #if 0	/* XXX Badly need to overhaul vlan(4) on NetBSD. */
   3746 /*
   3747 ** This routine is run via an vlan config EVENT,
   3748 ** it enables us to use the HW Filter table since
   3749 ** we can get the vlan id. This just creates the
   3750 ** entry in the soft version of the VFTA, init will
   3751 ** repopulate the real table.
   3752 */
   3753 static void
   3754 ixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   3755 {
   3756 	struct adapter	*adapter = ifp->if_softc;
   3757 	u16		index, bit;
   3758 
   3759 	if (ifp->if_softc !=  arg)   /* Not our event */
   3760 		return;
   3761 
   3762 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
   3763 		return;
   3764 
   3765 	IXV_CORE_LOCK(adapter);
   3766 	index = (vtag >> 5) & 0x7F;
   3767 	bit = vtag & 0x1F;
   3768 	ixv_shadow_vfta[index] |= (1 << bit);
   3769 	/* Re-init to load the changes */
   3770 	ixv_init_locked(adapter);
   3771 	IXV_CORE_UNLOCK(adapter);
   3772 }
   3773 
   3774 /*
   3775 ** This routine is run via an vlan
   3776 ** unconfig EVENT, remove our entry
   3777 ** in the soft vfta.
   3778 */
   3779 static void
   3780 ixv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   3781 {
   3782 	struct adapter	*adapter = ifp->if_softc;
   3783 	u16		index, bit;
   3784 
   3785 	if (ifp->if_softc !=  arg)
   3786 		return;
   3787 
   3788 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
   3789 		return;
   3790 
   3791 	IXV_CORE_LOCK(adapter);
   3792 	index = (vtag >> 5) & 0x7F;
   3793 	bit = vtag & 0x1F;
   3794 	ixv_shadow_vfta[index] &= ~(1 << bit);
   3795 	/* Re-init to load the changes */
   3796 	ixv_init_locked(adapter);
   3797 	IXV_CORE_UNLOCK(adapter);
   3798 }
   3799 #endif
   3800 
   3801 static void
   3802 ixv_enable_intr(struct adapter *adapter)
   3803 {
   3804 	struct ixgbe_hw *hw = &adapter->hw;
   3805 	struct ix_queue *que = adapter->queues;
   3806 	u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
   3807 
   3808 
   3809 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
   3810 
   3811 	mask = IXGBE_EIMS_ENABLE_MASK;
   3812 	mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
   3813 	IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
   3814 
   3815         for (int i = 0; i < adapter->num_queues; i++, que++)
   3816 		ixv_enable_queue(adapter, que->msix);
   3817 
   3818 	IXGBE_WRITE_FLUSH(hw);
   3819 
   3820 	return;
   3821 }
   3822 
   3823 static void
   3824 ixv_disable_intr(struct adapter *adapter)
   3825 {
   3826 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
   3827 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, ~0);
   3828 	IXGBE_WRITE_FLUSH(&adapter->hw);
   3829 	return;
   3830 }
   3831 
   3832 /*
   3833 ** Setup the correct IVAR register for a particular MSIX interrupt
   3834 **  - entry is the register array entry
   3835 **  - vector is the MSIX vector for this queue
   3836 **  - type is RX/TX/MISC
   3837 */
   3838 static void
   3839 ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
   3840 {
   3841 	struct ixgbe_hw *hw = &adapter->hw;
   3842 	u32 ivar, index;
   3843 
   3844 	vector |= IXGBE_IVAR_ALLOC_VAL;
   3845 
   3846 	if (type == -1) { /* MISC IVAR */
   3847 		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
   3848 		ivar &= ~0xFF;
   3849 		ivar |= vector;
   3850 		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
   3851 	} else {	/* RX/TX IVARS */
   3852 		index = (16 * (entry & 1)) + (8 * type);
   3853 		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
   3854 		ivar &= ~(0xFF << index);
   3855 		ivar |= (vector << index);
   3856 		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
   3857 	}
   3858 }
   3859 
   3860 static void
   3861 ixv_configure_ivars(struct adapter *adapter)
   3862 {
   3863 	struct  ix_queue *que = adapter->queues;
   3864 
   3865         for (int i = 0; i < adapter->num_queues; i++, que++) {
   3866 		/* First the RX queue entry */
   3867                 ixv_set_ivar(adapter, i, que->msix, 0);
   3868 		/* ... and the TX */
   3869 		ixv_set_ivar(adapter, i, que->msix, 1);
   3870 		/* Set an initial value in EITR */
   3871                 IXGBE_WRITE_REG(&adapter->hw,
   3872                     IXGBE_VTEITR(que->msix), IXV_EITR_DEFAULT);
   3873 	}
   3874 
   3875 	/* For the Link interrupt */
   3876         ixv_set_ivar(adapter, 1, adapter->mbxvec, -1);
   3877 }
   3878 
   3879 
   3880 /*
   3881 ** Tasklet handler for MSIX MBX interrupts
   3882 **  - do outside interrupt since it might sleep
   3883 */
   3884 static void
   3885 ixv_handle_mbx(void *context)
   3886 {
   3887 	struct adapter  *adapter = context;
   3888 
   3889 	ixgbe_check_link(&adapter->hw,
   3890 	    &adapter->link_speed, &adapter->link_up, 0);
   3891 	ixv_update_link_status(adapter);
   3892 }
   3893 
   3894 /*
   3895 ** The VF stats registers never have a truely virgin
   3896 ** starting point, so this routine tries to make an
   3897 ** artificial one, marking ground zero on attach as
   3898 ** it were.
   3899 */
   3900 static void
   3901 ixv_save_stats(struct adapter *adapter)
   3902 {
   3903 	if (adapter->stats.vfgprc || adapter->stats.vfgptc) {
   3904 		adapter->stats.saved_reset_vfgprc +=
   3905 		    adapter->stats.vfgprc - adapter->stats.base_vfgprc;
   3906 		adapter->stats.saved_reset_vfgptc +=
   3907 		    adapter->stats.vfgptc - adapter->stats.base_vfgptc;
   3908 		adapter->stats.saved_reset_vfgorc +=
   3909 		    adapter->stats.vfgorc - adapter->stats.base_vfgorc;
   3910 		adapter->stats.saved_reset_vfgotc +=
   3911 		    adapter->stats.vfgotc - adapter->stats.base_vfgotc;
   3912 		adapter->stats.saved_reset_vfmprc +=
   3913 		    adapter->stats.vfmprc - adapter->stats.base_vfmprc;
   3914 	}
   3915 }
   3916 
   3917 static void
   3918 ixv_init_stats(struct adapter *adapter)
   3919 {
   3920 	struct ixgbe_hw *hw = &adapter->hw;
   3921 
   3922 	adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
   3923 	adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
   3924 	adapter->stats.last_vfgorc |=
   3925 	    (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
   3926 
   3927 	adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
   3928 	adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
   3929 	adapter->stats.last_vfgotc |=
   3930 	    (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
   3931 
   3932 	adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
   3933 
   3934 	adapter->stats.base_vfgprc = adapter->stats.last_vfgprc;
   3935 	adapter->stats.base_vfgorc = adapter->stats.last_vfgorc;
   3936 	adapter->stats.base_vfgptc = adapter->stats.last_vfgptc;
   3937 	adapter->stats.base_vfgotc = adapter->stats.last_vfgotc;
   3938 	adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
   3939 }
   3940 
   3941 #define UPDATE_STAT_32(reg, last, count)		\
   3942 {							\
   3943 	u32 current = IXGBE_READ_REG(hw, reg);		\
   3944 	if (current < last)				\
   3945 		count += 0x100000000LL;			\
   3946 	last = current;					\
   3947 	count &= 0xFFFFFFFF00000000LL;			\
   3948 	count |= current;				\
   3949 }
   3950 
   3951 #define UPDATE_STAT_36(lsb, msb, last, count) 		\
   3952 {							\
   3953 	u64 cur_lsb = IXGBE_READ_REG(hw, lsb);		\
   3954 	u64 cur_msb = IXGBE_READ_REG(hw, msb);		\
   3955 	u64 current = ((cur_msb << 32) | cur_lsb);	\
   3956 	if (current < last)				\
   3957 		count += 0x1000000000LL;		\
   3958 	last = current;					\
   3959 	count &= 0xFFFFFFF000000000LL;			\
   3960 	count |= current;				\
   3961 }
   3962 
   3963 /*
   3964 ** ixv_update_stats - Update the board statistics counters.
   3965 */
   3966 void
   3967 ixv_update_stats(struct adapter *adapter)
   3968 {
   3969         struct ixgbe_hw *hw = &adapter->hw;
   3970 
   3971         UPDATE_STAT_32(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
   3972 	    adapter->stats.vfgprc);
   3973         UPDATE_STAT_32(IXGBE_VFGPTC, adapter->stats.last_vfgptc,
   3974 	    adapter->stats.vfgptc);
   3975         UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
   3976 	    adapter->stats.last_vfgorc, adapter->stats.vfgorc);
   3977         UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
   3978 	    adapter->stats.last_vfgotc, adapter->stats.vfgotc);
   3979         UPDATE_STAT_32(IXGBE_VFMPRC, adapter->stats.last_vfmprc,
   3980 	    adapter->stats.vfmprc);
   3981 }
   3982 
   3983 /**********************************************************************
   3984  *
   3985  *  This routine is called only when ixgbe_display_debug_stats is enabled.
   3986  *  This routine provides a way to take a look at important statistics
   3987  *  maintained by the driver and hardware.
   3988  *
   3989  **********************************************************************/
   3990 static void
   3991 ixv_print_hw_stats(struct adapter * adapter)
   3992 {
   3993         device_t dev = adapter->dev;
   3994 
   3995         device_printf(dev,"Std Mbuf Failed = %"PRIu64"\n",
   3996                adapter->mbuf_defrag_failed.ev_count);
   3997         device_printf(dev,"Driver dropped packets = %"PRIu64"\n",
   3998                adapter->dropped_pkts.ev_count);
   3999         device_printf(dev, "watchdog timeouts = %"PRIu64"\n",
   4000                adapter->watchdog_events.ev_count);
   4001 
   4002         device_printf(dev,"Good Packets Rcvd = %lld\n",
   4003                (long long)adapter->stats.vfgprc);
   4004         device_printf(dev,"Good Packets Xmtd = %lld\n",
   4005                (long long)adapter->stats.vfgptc);
   4006         device_printf(dev,"TSO Transmissions = %"PRIu64"\n",
   4007                adapter->tso_tx.ev_count);
   4008 
   4009 }
   4010 
   4011 /**********************************************************************
   4012  *
   4013  *  This routine is called only when em_display_debug_stats is enabled.
   4014  *  This routine provides a way to take a look at important statistics
   4015  *  maintained by the driver and hardware.
   4016  *
   4017  **********************************************************************/
   4018 static void
   4019 ixv_print_debug_info(struct adapter *adapter)
   4020 {
   4021         device_t dev = adapter->dev;
   4022         struct ixgbe_hw         *hw = &adapter->hw;
   4023         struct ix_queue         *que = adapter->queues;
   4024         struct rx_ring          *rxr;
   4025         struct tx_ring          *txr;
   4026 #ifdef LRO
   4027         struct lro_ctrl         *lro;
   4028 #endif /* LRO */
   4029 
   4030         device_printf(dev,"Error Byte Count = %u \n",
   4031             IXGBE_READ_REG(hw, IXGBE_ERRBC));
   4032 
   4033         for (int i = 0; i < adapter->num_queues; i++, que++) {
   4034                 txr = que->txr;
   4035                 rxr = que->rxr;
   4036 #ifdef LRO
   4037                 lro = &rxr->lro;
   4038 #endif /* LRO */
   4039                 device_printf(dev,"QUE(%d) IRQs Handled: %lu\n",
   4040                     que->msix, (long)que->irqs);
   4041                 device_printf(dev,"RX(%d) Packets Received: %lld\n",
   4042                     rxr->me, (long long)rxr->rx_packets.ev_count);
   4043                 device_printf(dev,"RX(%d) Split RX Packets: %lld\n",
   4044                     rxr->me, (long long)rxr->rx_split_packets.ev_count);
   4045                 device_printf(dev,"RX(%d) Bytes Received: %lu\n",
   4046                     rxr->me, (long)rxr->rx_bytes.ev_count);
   4047 #ifdef LRO
   4048                 device_printf(dev,"RX(%d) LRO Queued= %d\n",
   4049                     rxr->me, lro->lro_queued);
   4050                 device_printf(dev,"RX(%d) LRO Flushed= %d\n",
   4051                     rxr->me, lro->lro_flushed);
   4052 #endif /* LRO */
   4053                 device_printf(dev,"TX(%d) Packets Sent: %lu\n",
   4054                     txr->me, (long)txr->total_packets.ev_count);
   4055                 device_printf(dev,"TX(%d) NO Desc Avail: %lu\n",
   4056                     txr->me, (long)txr->no_desc_avail.ev_count);
   4057         }
   4058 
   4059         device_printf(dev,"MBX IRQ Handled: %lu\n",
   4060             (long)adapter->mbx_irq.ev_count);
   4061         return;
   4062 }
   4063 
   4064 static int
   4065 ixv_sysctl_stats(SYSCTLFN_ARGS)
   4066 {
   4067 	struct sysctlnode node;
   4068 	int             error;
   4069 	int		result;
   4070 	struct adapter *adapter;
   4071 
   4072 	node = *rnode;
   4073 	adapter = (struct adapter *)node.sysctl_data;
   4074 	node.sysctl_data = &result;
   4075 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   4076 	if (error != 0)
   4077 		return error;
   4078 
   4079 	if (result == 1)
   4080 		ixv_print_hw_stats(adapter);
   4081 
   4082 	return 0;
   4083 }
   4084 
   4085 static int
   4086 ixv_sysctl_debug(SYSCTLFN_ARGS)
   4087 {
   4088 	struct sysctlnode node;
   4089 	int error, result;
   4090 	struct adapter *adapter;
   4091 
   4092 	node = *rnode;
   4093 	adapter = (struct adapter *)node.sysctl_data;
   4094 	node.sysctl_data = &result;
   4095 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   4096 
   4097 	if (error)
   4098 		return error;
   4099 
   4100 	if (result == 1)
   4101 		ixv_print_debug_info(adapter);
   4102 
   4103 	return 0;
   4104 }
   4105 
   4106 /*
   4107 ** Set flow control using sysctl:
   4108 ** Flow control values:
   4109 ** 	0 - off
   4110 **	1 - rx pause
   4111 **	2 - tx pause
   4112 **	3 - full
   4113 */
   4114 static int
   4115 ixv_set_flowcntl(SYSCTLFN_ARGS)
   4116 {
   4117 	struct sysctlnode node;
   4118 	int error;
   4119 	struct adapter *adapter;
   4120 
   4121 	node = *rnode;
   4122 	adapter = (struct adapter *)node.sysctl_data;
   4123 	node.sysctl_data = &ixv_flow_control;
   4124 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   4125 
   4126 	if (error)
   4127 		return (error);
   4128 
   4129 	switch (ixv_flow_control) {
   4130 		case ixgbe_fc_rx_pause:
   4131 		case ixgbe_fc_tx_pause:
   4132 		case ixgbe_fc_full:
   4133 			adapter->hw.fc.requested_mode = ixv_flow_control;
   4134 			break;
   4135 		case ixgbe_fc_none:
   4136 		default:
   4137 			adapter->hw.fc.requested_mode = ixgbe_fc_none;
   4138 	}
   4139 
   4140 	ixgbe_fc_enable(&adapter->hw);
   4141 	return error;
   4142 }
   4143 
   4144 const struct sysctlnode *
   4145 ixv_sysctl_instance(struct adapter *adapter)
   4146 {
   4147 	const char *dvname;
   4148 	struct sysctllog **log;
   4149 	int rc;
   4150 	const struct sysctlnode *rnode;
   4151 
   4152 	log = &adapter->sysctllog;
   4153 	dvname = device_xname(adapter->dev);
   4154 
   4155 	if ((rc = sysctl_createv(log, 0, NULL, &rnode,
   4156 	    0, CTLTYPE_NODE, dvname,
   4157 	    SYSCTL_DESCR("ixv information and settings"),
   4158 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
   4159 		goto err;
   4160 
   4161 	return rnode;
   4162 err:
   4163 	printf("%s: sysctl_createv failed, rc = %d\n", __func__, rc);
   4164 	return NULL;
   4165 }
   4166 
   4167 static void
   4168 ixv_add_rx_process_limit(struct adapter *adapter, const char *name,
   4169         const char *description, int *limit, int value)
   4170 {
   4171 	const struct sysctlnode *rnode, *cnode;
   4172 	struct sysctllog **log = &adapter->sysctllog;
   4173 
   4174         *limit = value;
   4175 
   4176 	if ((rnode = ixv_sysctl_instance(adapter)) == NULL)
   4177 		aprint_error_dev(adapter->dev,
   4178 		    "could not create sysctl root\n");
   4179 	else if (sysctl_createv(log, 0, &rnode, &cnode,
   4180 	    CTLFLAG_READWRITE,
   4181 	    CTLTYPE_INT,
   4182 	    name, SYSCTL_DESCR(description),
   4183 	    NULL, 0, limit, 0,
   4184 	    CTL_CREATE, CTL_EOL) != 0) {
   4185 		aprint_error_dev(adapter->dev, "%s: could not create sysctl",
   4186 		    __func__);
   4187 	}
   4188 }
   4189 
   4190