Home | History | Annotate | Line # | Download | only in ixgbe
ixv.c revision 1.7
      1 /******************************************************************************
      2 
      3   Copyright (c) 2001-2012, Intel Corporation
      4   All rights reserved.
      5 
      6   Redistribution and use in source and binary forms, with or without
      7   modification, are permitted provided that the following conditions are met:
      8 
      9    1. Redistributions of source code must retain the above copyright notice,
     10       this list of conditions and the following disclaimer.
     11 
     12    2. Redistributions in binary form must reproduce the above copyright
     13       notice, this list of conditions and the following disclaimer in the
     14       documentation and/or other materials provided with the distribution.
     15 
     16    3. Neither the name of the Intel Corporation nor the names of its
     17       contributors may be used to endorse or promote products derived from
     18       this software without specific prior written permission.
     19 
     20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     30   POSSIBILITY OF SUCH DAMAGE.
     31 
     32 ******************************************************************************/
     33 /*$FreeBSD: head/sys/dev/ixgbe/ixv.c 241917 2012-10-22 22:29:48Z eadler $*/
     34 /*$NetBSD: ixv.c,v 1.7 2015/04/14 07:17:06 msaitoh Exp $*/
     35 
     36 #include "opt_inet.h"
     37 #include "opt_inet6.h"
     38 
     39 #include "ixv.h"
     40 
     41 /*********************************************************************
     42  *  Driver version
     43  *********************************************************************/
     44 char ixv_driver_version[] = "1.1.4";
     45 
     46 /*********************************************************************
     47  *  PCI Device ID Table
     48  *
     49  *  Used by probe to select devices to load on
     50  *  Last field stores an index into ixv_strings
     51  *  Last entry must be all 0s
     52  *
     53  *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
     54  *********************************************************************/
     55 
     56 static ixv_vendor_info_t ixv_vendor_info_array[] =
     57 {
     58 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
     59 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0},
     60 	/* required last entry */
     61 	{0, 0, 0, 0, 0}
     62 };
     63 
     64 /*********************************************************************
     65  *  Table of branding strings
     66  *********************************************************************/
     67 
     68 static const char    *ixv_strings[] = {
     69 	"Intel(R) PRO/10GbE Virtual Function Network Driver"
     70 };
     71 
     72 /*********************************************************************
     73  *  Function prototypes
     74  *********************************************************************/
     75 static int      ixv_probe(device_t, cfdata_t, void *);
     76 static void      ixv_attach(device_t, device_t, void *);
     77 static int      ixv_detach(device_t, int);
     78 #if 0
     79 static int      ixv_shutdown(device_t);
     80 #endif
     81 #if __FreeBSD_version < 800000
     82 static void     ixv_start(struct ifnet *);
     83 static void     ixv_start_locked(struct tx_ring *, struct ifnet *);
     84 #else
     85 static int	ixv_mq_start(struct ifnet *, struct mbuf *);
     86 static int	ixv_mq_start_locked(struct ifnet *,
     87 		    struct tx_ring *, struct mbuf *);
     88 static void	ixv_qflush(struct ifnet *);
     89 #endif
     90 static int      ixv_ioctl(struct ifnet *, u_long, void *);
     91 static int	ixv_init(struct ifnet *);
     92 static void	ixv_init_locked(struct adapter *);
     93 static void     ixv_stop(void *);
     94 static void     ixv_media_status(struct ifnet *, struct ifmediareq *);
     95 static int      ixv_media_change(struct ifnet *);
     96 static void     ixv_identify_hardware(struct adapter *);
     97 static int      ixv_allocate_pci_resources(struct adapter *,
     98 		    const struct pci_attach_args *);
     99 static int      ixv_allocate_msix(struct adapter *);
    100 static int	ixv_allocate_queues(struct adapter *);
    101 static int	ixv_setup_msix(struct adapter *);
    102 static void	ixv_free_pci_resources(struct adapter *);
    103 static void     ixv_local_timer(void *);
    104 static void     ixv_setup_interface(device_t, struct adapter *);
    105 static void     ixv_config_link(struct adapter *);
    106 
    107 static int      ixv_allocate_transmit_buffers(struct tx_ring *);
    108 static int	ixv_setup_transmit_structures(struct adapter *);
    109 static void	ixv_setup_transmit_ring(struct tx_ring *);
    110 static void     ixv_initialize_transmit_units(struct adapter *);
    111 static void     ixv_free_transmit_structures(struct adapter *);
    112 static void     ixv_free_transmit_buffers(struct tx_ring *);
    113 
    114 static int      ixv_allocate_receive_buffers(struct rx_ring *);
    115 static int      ixv_setup_receive_structures(struct adapter *);
    116 static int	ixv_setup_receive_ring(struct rx_ring *);
    117 static void     ixv_initialize_receive_units(struct adapter *);
    118 static void     ixv_free_receive_structures(struct adapter *);
    119 static void     ixv_free_receive_buffers(struct rx_ring *);
    120 
    121 static void     ixv_enable_intr(struct adapter *);
    122 static void     ixv_disable_intr(struct adapter *);
    123 static bool	ixv_txeof(struct tx_ring *);
    124 static bool	ixv_rxeof(struct ix_queue *, int);
    125 static void	ixv_rx_checksum(u32, struct mbuf *, u32,
    126 		    struct ixgbevf_hw_stats *);
    127 static void     ixv_set_multi(struct adapter *);
    128 static void     ixv_update_link_status(struct adapter *);
    129 static void	ixv_refresh_mbufs(struct rx_ring *, int);
    130 static int      ixv_xmit(struct tx_ring *, struct mbuf *);
    131 static int	ixv_sysctl_stats(SYSCTLFN_PROTO);
    132 static int	ixv_sysctl_debug(SYSCTLFN_PROTO);
    133 static int	ixv_set_flowcntl(SYSCTLFN_PROTO);
    134 static int	ixv_dma_malloc(struct adapter *, bus_size_t,
    135 		    struct ixv_dma_alloc *, int);
    136 static void     ixv_dma_free(struct adapter *, struct ixv_dma_alloc *);
    137 static void	ixv_add_rx_process_limit(struct adapter *, const char *,
    138 		    const char *, int *, int);
    139 static u32	ixv_tx_ctx_setup(struct tx_ring *, struct mbuf *);
    140 static bool	ixv_tso_setup(struct tx_ring *, struct mbuf *, u32 *);
    141 static void	ixv_set_ivar(struct adapter *, u8, u8, s8);
    142 static void	ixv_configure_ivars(struct adapter *);
    143 static u8 *	ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
    144 
    145 static void	ixv_setup_vlan_support(struct adapter *);
    146 #if 0
    147 static void	ixv_register_vlan(void *, struct ifnet *, u16);
    148 static void	ixv_unregister_vlan(void *, struct ifnet *, u16);
    149 #endif
    150 
    151 static void	ixv_save_stats(struct adapter *);
    152 static void	ixv_init_stats(struct adapter *);
    153 static void	ixv_update_stats(struct adapter *);
    154 
    155 static __inline void ixv_rx_discard(struct rx_ring *, int);
    156 static __inline void ixv_rx_input(struct rx_ring *, struct ifnet *,
    157 		    struct mbuf *, u32);
    158 
    159 /* The MSI/X Interrupt handlers */
    160 static void	ixv_msix_que(void *);
    161 static void	ixv_msix_mbx(void *);
    162 
    163 /* Deferred interrupt tasklets */
    164 static void	ixv_handle_que(void *);
    165 static void	ixv_handle_mbx(void *);
    166 
    167 const struct sysctlnode *ixv_sysctl_instance(struct adapter *);
    168 static ixv_vendor_info_t *ixv_lookup(const struct pci_attach_args *);
    169 
    170 /*********************************************************************
    171  *  FreeBSD Device Interface Entry Points
    172  *********************************************************************/
    173 
    174 CFATTACH_DECL3_NEW(ixv, sizeof(struct adapter),
    175     ixv_probe, ixv_attach, ixv_detach, NULL, NULL, NULL,
    176     DVF_DETACH_SHUTDOWN);
    177 
    178 # if 0
    179 static device_method_t ixv_methods[] = {
    180 	/* Device interface */
    181 	DEVMETHOD(device_probe, ixv_probe),
    182 	DEVMETHOD(device_attach, ixv_attach),
    183 	DEVMETHOD(device_detach, ixv_detach),
    184 	DEVMETHOD(device_shutdown, ixv_shutdown),
    185 	{0, 0}
    186 };
    187 #endif
    188 
    189 #if 0
    190 static driver_t ixv_driver = {
    191 	"ix", ixv_methods, sizeof(struct adapter),
    192 };
    193 
    194 extern devclass_t ixgbe_devclass;
    195 DRIVER_MODULE(ixv, pci, ixv_driver, ixgbe_devclass, 0, 0);
    196 MODULE_DEPEND(ixv, pci, 1, 1, 1);
    197 MODULE_DEPEND(ixv, ether, 1, 1, 1);
    198 #endif
    199 
    200 /*
    201 ** TUNEABLE PARAMETERS:
    202 */
    203 
    204 /*
    205 ** AIM: Adaptive Interrupt Moderation
    206 ** which means that the interrupt rate
    207 ** is varied over time based on the
    208 ** traffic for that interrupt vector
    209 */
    210 static int ixv_enable_aim = FALSE;
    211 #define	TUNABLE_INT(__x, __y)
    212 TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
    213 
    214 /* How many packets rxeof tries to clean at a time */
    215 static int ixv_rx_process_limit = 128;
    216 TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit);
    217 
    218 /* Flow control setting, default to full */
    219 static int ixv_flow_control = ixgbe_fc_full;
    220 TUNABLE_INT("hw.ixv.flow_control", &ixv_flow_control);
    221 
    222 /*
    223  * Header split: this causes the hardware to DMA
    224  * the header into a seperate mbuf from the payload,
    225  * it can be a performance win in some workloads, but
    226  * in others it actually hurts, its off by default.
    227  */
    228 static int ixv_header_split = FALSE;
    229 TUNABLE_INT("hw.ixv.hdr_split", &ixv_header_split);
    230 
    231 /*
    232 ** Number of TX descriptors per ring,
    233 ** setting higher than RX as this seems
    234 ** the better performing choice.
    235 */
    236 static int ixv_txd = DEFAULT_TXD;
    237 TUNABLE_INT("hw.ixv.txd", &ixv_txd);
    238 
    239 /* Number of RX descriptors per ring */
    240 static int ixv_rxd = DEFAULT_RXD;
    241 TUNABLE_INT("hw.ixv.rxd", &ixv_rxd);
    242 
    243 /*
    244 ** Shadow VFTA table, this is needed because
    245 ** the real filter table gets cleared during
    246 ** a soft reset and we need to repopulate it.
    247 */
    248 static u32 ixv_shadow_vfta[VFTA_SIZE];
    249 
    250 /* Keep running tab on them for sanity check */
    251 static int ixv_total_ports;
    252 
    253 /*********************************************************************
    254  *  Device identification routine
    255  *
    256  *  ixv_probe determines if the driver should be loaded on
    257  *  adapter based on PCI vendor/device id of the adapter.
    258  *
    259  *  return 1 on success, 0 on failure
    260  *********************************************************************/
    261 
    262 static int
    263 ixv_probe(device_t dev, cfdata_t cf, void *aux)
    264 {
    265 	const struct pci_attach_args *pa = aux;
    266 
    267 	return (ixv_lookup(pa) != NULL) ? 1 : 0;
    268 }
    269 
    270 static ixv_vendor_info_t *
    271 ixv_lookup(const struct pci_attach_args *pa)
    272 {
    273 	pcireg_t subid;
    274 	ixv_vendor_info_t *ent;
    275 
    276 	INIT_DEBUGOUT("ixv_probe: begin");
    277 
    278 	if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
    279 		return NULL;
    280 
    281 	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
    282 
    283 	for (ent = ixv_vendor_info_array; ent->vendor_id != 0; ent++) {
    284 		if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
    285 		    (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
    286 
    287 		    ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
    288 		     (ent->subvendor_id == 0)) &&
    289 
    290 		    ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
    291 		     (ent->subdevice_id == 0))) {
    292 			++ixv_total_ports;
    293 			return ent;
    294 		}
    295 	}
    296 	return NULL;
    297 }
    298 
    299 
    300 static void
    301 ixv_sysctl_attach(struct adapter *adapter)
    302 {
    303 	struct sysctllog **log;
    304 	const struct sysctlnode *rnode, *cnode;
    305 	device_t dev;
    306 
    307 	dev = adapter->dev;
    308 	log = &adapter->sysctllog;
    309 
    310 	if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
    311 		aprint_error_dev(dev, "could not create sysctl root\n");
    312 		return;
    313 	}
    314 
    315 	if (sysctl_createv(log, 0, &rnode, &cnode,
    316 	    CTLFLAG_READWRITE, CTLTYPE_INT,
    317 	    "stats", SYSCTL_DESCR("Statistics"),
    318 	    ixv_sysctl_stats, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
    319 		aprint_error_dev(dev, "could not create sysctl\n");
    320 
    321 	if (sysctl_createv(log, 0, &rnode, &cnode,
    322 	    CTLFLAG_READWRITE, CTLTYPE_INT,
    323 	    "debug", SYSCTL_DESCR("Debug Info"),
    324 	    ixv_sysctl_debug, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
    325 		aprint_error_dev(dev, "could not create sysctl\n");
    326 
    327 	if (sysctl_createv(log, 0, &rnode, &cnode,
    328 	    CTLFLAG_READWRITE, CTLTYPE_INT,
    329 	    "flow_control", SYSCTL_DESCR("Flow Control"),
    330 	    ixv_set_flowcntl, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
    331 		aprint_error_dev(dev, "could not create sysctl\n");
    332 
    333 	/* XXX This is an *instance* sysctl controlling a *global* variable.
    334 	 * XXX It's that way in the FreeBSD driver that this derives from.
    335 	 */
    336 	if (sysctl_createv(log, 0, &rnode, &cnode,
    337 	    CTLFLAG_READWRITE, CTLTYPE_INT,
    338 	    "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
    339 	    NULL, 0, &ixv_enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
    340 		aprint_error_dev(dev, "could not create sysctl\n");
    341 }
    342 
    343 /*********************************************************************
    344  *  Device initialization routine
    345  *
    346  *  The attach entry point is called when the driver is being loaded.
    347  *  This routine identifies the type of hardware, allocates all resources
    348  *  and initializes the hardware.
    349  *
    350  *  return 0 on success, positive on failure
    351  *********************************************************************/
    352 
    353 static void
    354 ixv_attach(device_t parent, device_t dev, void *aux)
    355 {
    356 	struct adapter *adapter;
    357 	struct ixgbe_hw *hw;
    358 	int             error = 0;
    359 	ixv_vendor_info_t *ent;
    360 	const struct pci_attach_args *pa = aux;
    361 
    362 	INIT_DEBUGOUT("ixv_attach: begin");
    363 
    364 	/* Allocate, clear, and link in our adapter structure */
    365 	adapter = device_private(dev);
    366 	adapter->dev = adapter->osdep.dev = dev;
    367 	hw = &adapter->hw;
    368 
    369 	ent = ixv_lookup(pa);
    370 
    371 	KASSERT(ent != NULL);
    372 
    373 	aprint_normal(": %s, Version - %s\n",
    374 	    ixv_strings[ent->index], ixv_driver_version);
    375 
    376 	/* Core Lock Init*/
    377 	IXV_CORE_LOCK_INIT(adapter, device_xname(dev));
    378 
    379 	/* SYSCTL APIs */
    380 	ixv_sysctl_attach(adapter);
    381 
    382 	/* Set up the timer callout */
    383 	callout_init(&adapter->timer, 0);
    384 
    385 	/* Determine hardware revision */
    386 	ixv_identify_hardware(adapter);
    387 
    388 	/* Do base PCI setup - map BAR0 */
    389 	if (ixv_allocate_pci_resources(adapter, pa)) {
    390 		aprint_error_dev(dev, "Allocation of PCI resources failed\n");
    391 		error = ENXIO;
    392 		goto err_out;
    393 	}
    394 
    395 	/* Do descriptor calc and sanity checks */
    396 	if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
    397 	    ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) {
    398 		aprint_error_dev(dev, "TXD config issue, using default!\n");
    399 		adapter->num_tx_desc = DEFAULT_TXD;
    400 	} else
    401 		adapter->num_tx_desc = ixv_txd;
    402 
    403 	if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
    404 	    ixv_rxd < MIN_TXD || ixv_rxd > MAX_TXD) {
    405 		aprint_error_dev(dev, "RXD config issue, using default!\n");
    406 		adapter->num_rx_desc = DEFAULT_RXD;
    407 	} else
    408 		adapter->num_rx_desc = ixv_rxd;
    409 
    410 	/* Allocate our TX/RX Queues */
    411 	if (ixv_allocate_queues(adapter)) {
    412 		error = ENOMEM;
    413 		goto err_out;
    414 	}
    415 
    416 	/*
    417 	** Initialize the shared code: its
    418 	** at this point the mac type is set.
    419 	*/
    420 	error = ixgbe_init_shared_code(hw);
    421 	if (error) {
    422 		aprint_error_dev(dev,"Shared Code Initialization Failure\n");
    423 		error = EIO;
    424 		goto err_late;
    425 	}
    426 
    427 	/* Setup the mailbox */
    428 	ixgbe_init_mbx_params_vf(hw);
    429 
    430 	ixgbe_reset_hw(hw);
    431 
    432 	/* Get Hardware Flow Control setting */
    433 	hw->fc.requested_mode = ixgbe_fc_full;
    434 	hw->fc.pause_time = IXV_FC_PAUSE;
    435 	hw->fc.low_water[0] = IXV_FC_LO;
    436 	hw->fc.high_water[0] = IXV_FC_HI;
    437 	hw->fc.send_xon = TRUE;
    438 
    439 	error = ixgbe_init_hw(hw);
    440 	if (error) {
    441 		aprint_error_dev(dev,"Hardware Initialization Failure\n");
    442 		error = EIO;
    443 		goto err_late;
    444 	}
    445 
    446 	error = ixv_allocate_msix(adapter);
    447 	if (error)
    448 		goto err_late;
    449 
    450 	/* Setup OS specific network interface */
    451 	ixv_setup_interface(dev, adapter);
    452 
    453 	/* Sysctl for limiting the amount of work done in the taskqueue */
    454 	ixv_add_rx_process_limit(adapter, "rx_processing_limit",
    455 	    "max number of rx packets to process", &adapter->rx_process_limit,
    456 	    ixv_rx_process_limit);
    457 
    458 	/* Do the stats setup */
    459 	ixv_save_stats(adapter);
    460 	ixv_init_stats(adapter);
    461 
    462 	/* Register for VLAN events */
    463 #if 0 /* XXX msaitoh delete after write? */
    464 	adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
    465 	    ixv_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
    466 	adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
    467 	    ixv_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
    468 #endif
    469 
    470 	INIT_DEBUGOUT("ixv_attach: end");
    471 	return;
    472 
    473 err_late:
    474 	ixv_free_transmit_structures(adapter);
    475 	ixv_free_receive_structures(adapter);
    476 err_out:
    477 	ixv_free_pci_resources(adapter);
    478 	return;
    479 
    480 }
    481 
    482 /*********************************************************************
    483  *  Device removal routine
    484  *
    485  *  The detach entry point is called when the driver is being removed.
    486  *  This routine stops the adapter and deallocates all the resources
    487  *  that were allocated for driver operation.
    488  *
    489  *  return 0 on success, positive on failure
    490  *********************************************************************/
    491 
    492 static int
    493 ixv_detach(device_t dev, int flags)
    494 {
    495 	struct adapter *adapter = device_private(dev);
    496 	struct ix_queue *que = adapter->queues;
    497 
    498 	INIT_DEBUGOUT("ixv_detach: begin");
    499 
    500 	/* Make sure VLANS are not using driver */
    501 	if (!VLAN_ATTACHED(&adapter->osdep.ec))
    502 		;	/* nothing to do: no VLANs */
    503 	else if ((flags & (DETACH_SHUTDOWN|DETACH_FORCE)) != 0)
    504 		vlan_ifdetach(adapter->ifp);
    505 	else {
    506 		aprint_error_dev(dev, "VLANs in use\n");
    507 		return EBUSY;
    508 	}
    509 
    510 	IXV_CORE_LOCK(adapter);
    511 	ixv_stop(adapter);
    512 	IXV_CORE_UNLOCK(adapter);
    513 
    514 	for (int i = 0; i < adapter->num_queues; i++, que++) {
    515 		softint_disestablish(que->que_si);
    516 	}
    517 
    518 	/* Drain the Link queue */
    519 	softint_disestablish(adapter->mbx_si);
    520 
    521 	/* Unregister VLAN events */
    522 #if 0 /* XXX msaitoh delete after write? */
    523 	if (adapter->vlan_attach != NULL)
    524 		EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
    525 	if (adapter->vlan_detach != NULL)
    526 		EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
    527 #endif
    528 
    529 	ether_ifdetach(adapter->ifp);
    530 	callout_halt(&adapter->timer, NULL);
    531 	ixv_free_pci_resources(adapter);
    532 #if 0 /* XXX the NetBSD port is probably missing something here */
    533 	bus_generic_detach(dev);
    534 #endif
    535 	if_detach(adapter->ifp);
    536 
    537 	ixv_free_transmit_structures(adapter);
    538 	ixv_free_receive_structures(adapter);
    539 
    540 	IXV_CORE_LOCK_DESTROY(adapter);
    541 	return (0);
    542 }
    543 
    544 /*********************************************************************
    545  *
    546  *  Shutdown entry point
    547  *
    548  **********************************************************************/
    549 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
    550 static int
    551 ixv_shutdown(device_t dev)
    552 {
    553 	struct adapter *adapter = device_private(dev);
    554 	IXV_CORE_LOCK(adapter);
    555 	ixv_stop(adapter);
    556 	IXV_CORE_UNLOCK(adapter);
    557 	return (0);
    558 }
    559 #endif
    560 
    561 #if __FreeBSD_version < 800000
    562 /*********************************************************************
    563  *  Transmit entry point
    564  *
    565  *  ixv_start is called by the stack to initiate a transmit.
    566  *  The driver will remain in this routine as long as there are
    567  *  packets to transmit and transmit resources are available.
    568  *  In case resources are not available stack is notified and
    569  *  the packet is requeued.
    570  **********************************************************************/
    571 static void
    572 ixv_start_locked(struct tx_ring *txr, struct ifnet * ifp)
    573 {
    574 	int rc;
    575 	struct mbuf    *m_head;
    576 	struct adapter *adapter = txr->adapter;
    577 
    578 	IXV_TX_LOCK_ASSERT(txr);
    579 
    580 	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) !=
    581 	    IFF_RUNNING)
    582 		return;
    583 	if (!adapter->link_active)
    584 		return;
    585 
    586 	while (!IFQ_IS_EMPTY(&ifp->if_snd)) {
    587 
    588 		IFQ_POLL(&ifp->if_snd, m_head);
    589 		if (m_head == NULL)
    590 			break;
    591 
    592 		if (ixv_xmit(txr, m_head) == EAGAIN) {
    593 			ifp->if_flags |= IFF_OACTIVE;
    594 			break;
    595 		}
    596 		IFQ_DEQUEUE(&ifp->if_snd, m_head);
    597 		if (rc == EFBIG) {
    598 			struct mbuf *mtmp;
    599 
    600 			if ((mtmp = m_defrag(m_head, M_DONTWAIT)) != NULL) {
    601 				m_head = mtmp;
    602 				rc = ixv_xmit(txr, m_head);
    603 				if (rc != 0)
    604 					adapter->efbig2_tx_dma_setup.ev_count++;
    605 			} else
    606 				adapter->m_defrag_failed.ev_count++;
    607 		}
    608 		if (rc != 0) {
    609 			m_freem(m_head);
    610 			continue;
    611 		}
    612 		/* Send a copy of the frame to the BPF listener */
    613 		bpf_mtap(ifp, m_head);
    614 
    615 		/* Set watchdog on */
    616 		txr->watchdog_check = TRUE;
    617 		getmicrotime(&txr->watchdog_time);
    618 	}
    619 	return;
    620 }
    621 
    622 /*
    623  * Legacy TX start - called by the stack, this
    624  * always uses the first tx ring, and should
    625  * not be used with multiqueue tx enabled.
    626  */
    627 static void
    628 ixv_start(struct ifnet *ifp)
    629 {
    630 	struct adapter *adapter = ifp->if_softc;
    631 	struct tx_ring	*txr = adapter->tx_rings;
    632 
    633 	if (ifp->if_flags & IFF_RUNNING) {
    634 		IXV_TX_LOCK(txr);
    635 		ixv_start_locked(txr, ifp);
    636 		IXV_TX_UNLOCK(txr);
    637 	}
    638 	return;
    639 }
    640 
    641 #else
    642 
    643 /*
    644 ** Multiqueue Transmit driver
    645 **
    646 */
    647 static int
    648 ixv_mq_start(struct ifnet *ifp, struct mbuf *m)
    649 {
    650 	struct adapter	*adapter = ifp->if_softc;
    651 	struct ix_queue	*que;
    652 	struct tx_ring	*txr;
    653 	int 		i = 0, err = 0;
    654 
    655 	/* Which queue to use */
    656 	if ((m->m_flags & M_FLOWID) != 0)
    657 		i = m->m_pkthdr.flowid % adapter->num_queues;
    658 
    659 	txr = &adapter->tx_rings[i];
    660 	que = &adapter->queues[i];
    661 
    662 	if (IXV_TX_TRYLOCK(txr)) {
    663 		err = ixv_mq_start_locked(ifp, txr, m);
    664 		IXV_TX_UNLOCK(txr);
    665 	} else {
    666 		err = drbr_enqueue(ifp, txr->br, m);
    667 		softint_schedule(que->que_si);
    668 	}
    669 
    670 	return (err);
    671 }
    672 
    673 static int
    674 ixv_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr, struct mbuf *m)
    675 {
    676 	struct adapter  *adapter = txr->adapter;
    677         struct mbuf     *next;
    678         int             enqueued, err = 0;
    679 
    680 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) !=
    681 	    IFF_RUNNING || adapter->link_active == 0) {
    682 		if (m != NULL)
    683 			err = drbr_enqueue(ifp, txr->br, m);
    684 		return (err);
    685 	}
    686 
    687 	/* Do a clean if descriptors are low */
    688 	if (txr->tx_avail <= IXV_TX_CLEANUP_THRESHOLD)
    689 		ixv_txeof(txr);
    690 
    691 	enqueued = 0;
    692 	if (m == NULL) {
    693 		next = drbr_dequeue(ifp, txr->br);
    694 	} else if (drbr_needs_enqueue(ifp, txr->br)) {
    695 		if ((err = drbr_enqueue(ifp, txr->br, m)) != 0)
    696 			return (err);
    697 		next = drbr_dequeue(ifp, txr->br);
    698 	} else
    699 		next = m;
    700 
    701 	/* Process the queue */
    702 	while (next != NULL) {
    703 		if ((err = ixv_xmit(txr, next)) != 0) {
    704 			if (next != NULL)
    705 				err = drbr_enqueue(ifp, txr->br, next);
    706 			break;
    707 		}
    708 		enqueued++;
    709 		ifp->if_obytes += next->m_pkthdr.len;
    710 		if (next->m_flags & M_MCAST)
    711 			ifp->if_omcasts++;
    712 		/* Send a copy of the frame to the BPF listener */
    713 		ETHER_BPF_MTAP(ifp, next);
    714 		if ((ifp->if_flags & IFF_RUNNING) == 0)
    715 			break;
    716 		if (txr->tx_avail <= IXV_TX_OP_THRESHOLD) {
    717 			ifp->if_flags |= IFF_OACTIVE;
    718 			break;
    719 		}
    720 		next = drbr_dequeue(ifp, txr->br);
    721 	}
    722 
    723 	if (enqueued > 0) {
    724 		/* Set watchdog on */
    725 		txr->watchdog_check = TRUE;
    726 		getmicrotime(&txr->watchdog_time);
    727 	}
    728 
    729 	return (err);
    730 }
    731 
    732 /*
    733 ** Flush all ring buffers
    734 */
    735 static void
    736 ixv_qflush(struct ifnet *ifp)
    737 {
    738 	struct adapter  *adapter = ifp->if_softc;
    739 	struct tx_ring  *txr = adapter->tx_rings;
    740 	struct mbuf     *m;
    741 
    742 	for (int i = 0; i < adapter->num_queues; i++, txr++) {
    743 		IXV_TX_LOCK(txr);
    744 		while ((m = buf_ring_dequeue_sc(txr->br)) != NULL)
    745 			m_freem(m);
    746 		IXV_TX_UNLOCK(txr);
    747 	}
    748 	if_qflush(ifp);
    749 }
    750 
    751 #endif
    752 
    753 static int
    754 ixv_ifflags_cb(struct ethercom *ec)
    755 {
    756 	struct ifnet *ifp = &ec->ec_if;
    757 	struct adapter *adapter = ifp->if_softc;
    758 	int change = ifp->if_flags ^ adapter->if_flags, rc = 0;
    759 
    760 	IXV_CORE_LOCK(adapter);
    761 
    762 	if (change != 0)
    763 		adapter->if_flags = ifp->if_flags;
    764 
    765 	if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0)
    766 		rc = ENETRESET;
    767 
    768 	IXV_CORE_UNLOCK(adapter);
    769 
    770 	return rc;
    771 }
    772 
    773 /*********************************************************************
    774  *  Ioctl entry point
    775  *
    776  *  ixv_ioctl is called when the user wants to configure the
    777  *  interface.
    778  *
    779  *  return 0 on success, positive on failure
    780  **********************************************************************/
    781 
    782 static int
    783 ixv_ioctl(struct ifnet * ifp, u_long command, void *data)
    784 {
    785 	struct adapter	*adapter = ifp->if_softc;
    786 	struct ifcapreq *ifcr = data;
    787 	struct ifreq	*ifr = (struct ifreq *) data;
    788 	int             error = 0;
    789 	int l4csum_en;
    790 	const int l4csum = IFCAP_CSUM_TCPv4_Rx|IFCAP_CSUM_UDPv4_Rx|
    791 	     IFCAP_CSUM_TCPv6_Rx|IFCAP_CSUM_UDPv6_Rx;
    792 
    793 	switch (command) {
    794 	case SIOCSIFFLAGS:
    795 		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
    796 		break;
    797 	case SIOCADDMULTI:
    798 	case SIOCDELMULTI:
    799 		IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
    800 		break;
    801 	case SIOCSIFMEDIA:
    802 	case SIOCGIFMEDIA:
    803 		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
    804 		break;
    805 	case SIOCSIFCAP:
    806 		IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
    807 		break;
    808 	case SIOCSIFMTU:
    809 		IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
    810 		break;
    811 	default:
    812 		IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
    813 		break;
    814 	}
    815 
    816 	switch (command) {
    817 	case SIOCSIFMEDIA:
    818 	case SIOCGIFMEDIA:
    819 		return ifmedia_ioctl(ifp, ifr, &adapter->media, command);
    820 	case SIOCSIFCAP:
    821 		/* Layer-4 Rx checksum offload has to be turned on and
    822 		 * off as a unit.
    823 		 */
    824 		l4csum_en = ifcr->ifcr_capenable & l4csum;
    825 		if (l4csum_en != l4csum && l4csum_en != 0)
    826 			return EINVAL;
    827 		/*FALLTHROUGH*/
    828 	case SIOCADDMULTI:
    829 	case SIOCDELMULTI:
    830 	case SIOCSIFFLAGS:
    831 	case SIOCSIFMTU:
    832 	default:
    833 		if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
    834 			return error;
    835 		if ((ifp->if_flags & IFF_RUNNING) == 0)
    836 			;
    837 		else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
    838 			IXV_CORE_LOCK(adapter);
    839 			ixv_init_locked(adapter);
    840 			IXV_CORE_UNLOCK(adapter);
    841 		} else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
    842 			/*
    843 			 * Multicast list has changed; set the hardware filter
    844 			 * accordingly.
    845 			 */
    846 			IXV_CORE_LOCK(adapter);
    847 			ixv_disable_intr(adapter);
    848 			ixv_set_multi(adapter);
    849 			ixv_enable_intr(adapter);
    850 			IXV_CORE_UNLOCK(adapter);
    851 		}
    852 		return 0;
    853 	}
    854 }
    855 
    856 /*********************************************************************
    857  *  Init entry point
    858  *
    859  *  This routine is used in two ways. It is used by the stack as
    860  *  init entry point in network interface structure. It is also used
    861  *  by the driver as a hw/sw initialization routine to get to a
    862  *  consistent state.
    863  *
    864  *  return 0 on success, positive on failure
    865  **********************************************************************/
    866 #define IXGBE_MHADD_MFS_SHIFT 16
    867 
    868 static void
    869 ixv_init_locked(struct adapter *adapter)
    870 {
    871 	struct ifnet	*ifp = adapter->ifp;
    872 	device_t 	dev = adapter->dev;
    873 	struct ixgbe_hw *hw = &adapter->hw;
    874 	u32		mhadd, gpie;
    875 
    876 	INIT_DEBUGOUT("ixv_init: begin");
    877 	KASSERT(mutex_owned(&adapter->core_mtx));
    878 	hw->adapter_stopped = FALSE;
    879 	ixgbe_stop_adapter(hw);
    880         callout_stop(&adapter->timer);
    881 
    882         /* reprogram the RAR[0] in case user changed it. */
    883         ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
    884 
    885 	/* Get the latest mac address, User can use a LAA */
    886 	memcpy(hw->mac.addr, CLLADDR(adapter->ifp->if_sadl),
    887 	     IXGBE_ETH_LENGTH_OF_ADDRESS);
    888         ixgbe_set_rar(hw, 0, hw->mac.addr, 0, 1);
    889 	hw->addr_ctrl.rar_used_count = 1;
    890 
    891 	/* Prepare transmit descriptors and buffers */
    892 	if (ixv_setup_transmit_structures(adapter)) {
    893 		aprint_error_dev(dev,"Could not setup transmit structures\n");
    894 		ixv_stop(adapter);
    895 		return;
    896 	}
    897 
    898 	ixgbe_reset_hw(hw);
    899 	ixv_initialize_transmit_units(adapter);
    900 
    901 	/* Setup Multicast table */
    902 	ixv_set_multi(adapter);
    903 
    904 	/*
    905 	** Determine the correct mbuf pool
    906 	** for doing jumbo/headersplit
    907 	*/
    908 	if (ifp->if_mtu > ETHERMTU)
    909 		adapter->rx_mbuf_sz = MJUMPAGESIZE;
    910 	else
    911 		adapter->rx_mbuf_sz = MCLBYTES;
    912 
    913 	/* Prepare receive descriptors and buffers */
    914 	if (ixv_setup_receive_structures(adapter)) {
    915 		device_printf(dev,"Could not setup receive structures\n");
    916 		ixv_stop(adapter);
    917 		return;
    918 	}
    919 
    920 	/* Configure RX settings */
    921 	ixv_initialize_receive_units(adapter);
    922 
    923 	/* Enable Enhanced MSIX mode */
    924 	gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE);
    925 	gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_EIAME;
    926 	gpie |= IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD;
    927         IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
    928 
    929 #if 0 /* XXX isn't it required? -- msaitoh  */
    930 	/* Set the various hardware offload abilities */
    931 	ifp->if_hwassist = 0;
    932 	if (ifp->if_capenable & IFCAP_TSO4)
    933 		ifp->if_hwassist |= CSUM_TSO;
    934 	if (ifp->if_capenable & IFCAP_TXCSUM) {
    935 		ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
    936 #if __FreeBSD_version >= 800000
    937 		ifp->if_hwassist |= CSUM_SCTP;
    938 #endif
    939 	}
    940 #endif
    941 
    942 	/* Set MTU size */
    943 	if (ifp->if_mtu > ETHERMTU) {
    944 		mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
    945 		mhadd &= ~IXGBE_MHADD_MFS_MASK;
    946 		mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
    947 		IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
    948 	}
    949 
    950 	/* Set up VLAN offload and filter */
    951 	ixv_setup_vlan_support(adapter);
    952 
    953 	callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
    954 
    955 	/* Set up MSI/X routing */
    956 	ixv_configure_ivars(adapter);
    957 
    958 	/* Set up auto-mask */
    959 	IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_EICS_RTX_QUEUE);
    960 
    961         /* Set moderation on the Link interrupt */
    962         IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->mbxvec), IXV_LINK_ITR);
    963 
    964 	/* Stats init */
    965 	ixv_init_stats(adapter);
    966 
    967 	/* Config/Enable Link */
    968 	ixv_config_link(adapter);
    969 
    970 	/* And now turn on interrupts */
    971 	ixv_enable_intr(adapter);
    972 
    973 	/* Now inform the stack we're ready */
    974 	ifp->if_flags |= IFF_RUNNING;
    975 	ifp->if_flags &= ~IFF_OACTIVE;
    976 
    977 	return;
    978 }
    979 
    980 static int
    981 ixv_init(struct ifnet *ifp)
    982 {
    983 	struct adapter *adapter = ifp->if_softc;
    984 
    985 	IXV_CORE_LOCK(adapter);
    986 	ixv_init_locked(adapter);
    987 	IXV_CORE_UNLOCK(adapter);
    988 	return 0;
    989 }
    990 
    991 
    992 /*
    993 **
    994 ** MSIX Interrupt Handlers and Tasklets
    995 **
    996 */
    997 
    998 static inline void
    999 ixv_enable_queue(struct adapter *adapter, u32 vector)
   1000 {
   1001 	struct ixgbe_hw *hw = &adapter->hw;
   1002 	u32	queue = 1 << vector;
   1003 	u32	mask;
   1004 
   1005 	mask = (IXGBE_EIMS_RTX_QUEUE & queue);
   1006 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
   1007 }
   1008 
   1009 static inline void
   1010 ixv_disable_queue(struct adapter *adapter, u32 vector)
   1011 {
   1012 	struct ixgbe_hw *hw = &adapter->hw;
   1013 	u64	queue = (u64)(1 << vector);
   1014 	u32	mask;
   1015 
   1016 	mask = (IXGBE_EIMS_RTX_QUEUE & queue);
   1017 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
   1018 }
   1019 
   1020 static inline void
   1021 ixv_rearm_queues(struct adapter *adapter, u64 queues)
   1022 {
   1023 	u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
   1024 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask);
   1025 }
   1026 
   1027 
   1028 static void
   1029 ixv_handle_que(void *context)
   1030 {
   1031 	struct ix_queue *que = context;
   1032 	struct adapter  *adapter = que->adapter;
   1033 	struct tx_ring  *txr = que->txr;
   1034 	struct ifnet    *ifp = adapter->ifp;
   1035 	bool		more;
   1036 
   1037 	if (ifp->if_flags & IFF_RUNNING) {
   1038 		more = ixv_rxeof(que, adapter->rx_process_limit);
   1039 		IXV_TX_LOCK(txr);
   1040 		ixv_txeof(txr);
   1041 #if __FreeBSD_version >= 800000
   1042 		if (!drbr_empty(ifp, txr->br))
   1043 			ixv_mq_start_locked(ifp, txr, NULL);
   1044 #else
   1045 		if (!IFQ_IS_EMPTY(&ifp->if_snd))
   1046 			ixv_start_locked(txr, ifp);
   1047 #endif
   1048 		IXV_TX_UNLOCK(txr);
   1049 		if (more) {
   1050 			adapter->req.ev_count++;
   1051 			softint_schedule(que->que_si);
   1052 			return;
   1053 		}
   1054 	}
   1055 
   1056 	/* Reenable this interrupt */
   1057 	ixv_enable_queue(adapter, que->msix);
   1058 	return;
   1059 }
   1060 
   1061 /*********************************************************************
   1062  *
   1063  *  MSI Queue Interrupt Service routine
   1064  *
   1065  **********************************************************************/
   1066 void
   1067 ixv_msix_que(void *arg)
   1068 {
   1069 	struct ix_queue	*que = arg;
   1070 	struct adapter  *adapter = que->adapter;
   1071 	struct tx_ring	*txr = que->txr;
   1072 	struct rx_ring	*rxr = que->rxr;
   1073 	bool		more_tx, more_rx;
   1074 	u32		newitr = 0;
   1075 
   1076 	ixv_disable_queue(adapter, que->msix);
   1077 	++que->irqs;
   1078 
   1079 	more_rx = ixv_rxeof(que, adapter->rx_process_limit);
   1080 
   1081 	IXV_TX_LOCK(txr);
   1082 	more_tx = ixv_txeof(txr);
   1083 	/*
   1084 	** Make certain that if the stack
   1085 	** has anything queued the task gets
   1086 	** scheduled to handle it.
   1087 	*/
   1088 #if __FreeBSD_version < 800000
   1089 	if (!IFQ_IS_EMPTY(&adapter->ifp->if_snd))
   1090 #else
   1091 	if (!drbr_empty(adapter->ifp, txr->br))
   1092 #endif
   1093                 more_tx = 1;
   1094 	IXV_TX_UNLOCK(txr);
   1095 
   1096 	more_rx = ixv_rxeof(que, adapter->rx_process_limit);
   1097 
   1098 	/* Do AIM now? */
   1099 
   1100 	if (ixv_enable_aim == FALSE)
   1101 		goto no_calc;
   1102 	/*
   1103 	** Do Adaptive Interrupt Moderation:
   1104         **  - Write out last calculated setting
   1105 	**  - Calculate based on average size over
   1106 	**    the last interval.
   1107 	*/
   1108         if (que->eitr_setting)
   1109                 IXGBE_WRITE_REG(&adapter->hw,
   1110                     IXGBE_VTEITR(que->msix),
   1111 		    que->eitr_setting);
   1112 
   1113         que->eitr_setting = 0;
   1114 
   1115         /* Idle, do nothing */
   1116         if ((txr->bytes == 0) && (rxr->bytes == 0))
   1117                 goto no_calc;
   1118 
   1119 	if ((txr->bytes) && (txr->packets))
   1120                	newitr = txr->bytes/txr->packets;
   1121 	if ((rxr->bytes) && (rxr->packets))
   1122 		newitr = max(newitr,
   1123 		    (rxr->bytes / rxr->packets));
   1124 	newitr += 24; /* account for hardware frame, crc */
   1125 
   1126 	/* set an upper boundary */
   1127 	newitr = min(newitr, 3000);
   1128 
   1129 	/* Be nice to the mid range */
   1130 	if ((newitr > 300) && (newitr < 1200))
   1131 		newitr = (newitr / 3);
   1132 	else
   1133 		newitr = (newitr / 2);
   1134 
   1135 	newitr |= newitr << 16;
   1136 
   1137         /* save for next interrupt */
   1138         que->eitr_setting = newitr;
   1139 
   1140         /* Reset state */
   1141         txr->bytes = 0;
   1142         txr->packets = 0;
   1143         rxr->bytes = 0;
   1144         rxr->packets = 0;
   1145 
   1146 no_calc:
   1147 	if (more_tx || more_rx)
   1148 		softint_schedule(que->que_si);
   1149 	else /* Reenable this interrupt */
   1150 		ixv_enable_queue(adapter, que->msix);
   1151 	return;
   1152 }
   1153 
   1154 static void
   1155 ixv_msix_mbx(void *arg)
   1156 {
   1157 	struct adapter	*adapter = arg;
   1158 	struct ixgbe_hw *hw = &adapter->hw;
   1159 	u32		reg;
   1160 
   1161 	++adapter->mbx_irq.ev_count;
   1162 
   1163 	/* First get the cause */
   1164 	reg = IXGBE_READ_REG(hw, IXGBE_VTEICS);
   1165 	/* Clear interrupt with write */
   1166 	IXGBE_WRITE_REG(hw, IXGBE_VTEICR, reg);
   1167 
   1168 	/* Link status change */
   1169 	if (reg & IXGBE_EICR_LSC)
   1170 		softint_schedule(adapter->mbx_si);
   1171 
   1172 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER);
   1173 	return;
   1174 }
   1175 
   1176 /*********************************************************************
   1177  *
   1178  *  Media Ioctl callback
   1179  *
   1180  *  This routine is called whenever the user queries the status of
   1181  *  the interface using ifconfig.
   1182  *
   1183  **********************************************************************/
   1184 static void
   1185 ixv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
   1186 {
   1187 	struct adapter *adapter = ifp->if_softc;
   1188 
   1189 	INIT_DEBUGOUT("ixv_media_status: begin");
   1190 	IXV_CORE_LOCK(adapter);
   1191 	ixv_update_link_status(adapter);
   1192 
   1193 	ifmr->ifm_status = IFM_AVALID;
   1194 	ifmr->ifm_active = IFM_ETHER;
   1195 
   1196 	if (!adapter->link_active) {
   1197 		IXV_CORE_UNLOCK(adapter);
   1198 		return;
   1199 	}
   1200 
   1201 	ifmr->ifm_status |= IFM_ACTIVE;
   1202 
   1203 	switch (adapter->link_speed) {
   1204 		case IXGBE_LINK_SPEED_1GB_FULL:
   1205 			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
   1206 			break;
   1207 		case IXGBE_LINK_SPEED_10GB_FULL:
   1208 			ifmr->ifm_active |= IFM_FDX;
   1209 			break;
   1210 	}
   1211 
   1212 	IXV_CORE_UNLOCK(adapter);
   1213 
   1214 	return;
   1215 }
   1216 
   1217 /*********************************************************************
   1218  *
   1219  *  Media Ioctl callback
   1220  *
   1221  *  This routine is called when the user changes speed/duplex using
   1222  *  media/mediopt option with ifconfig.
   1223  *
   1224  **********************************************************************/
   1225 static int
   1226 ixv_media_change(struct ifnet * ifp)
   1227 {
   1228 	struct adapter *adapter = ifp->if_softc;
   1229 	struct ifmedia *ifm = &adapter->media;
   1230 
   1231 	INIT_DEBUGOUT("ixv_media_change: begin");
   1232 
   1233 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
   1234 		return (EINVAL);
   1235 
   1236         switch (IFM_SUBTYPE(ifm->ifm_media)) {
   1237         case IFM_AUTO:
   1238                 break;
   1239         default:
   1240                 device_printf(adapter->dev, "Only auto media type\n");
   1241 		return (EINVAL);
   1242         }
   1243 
   1244 	return (0);
   1245 }
   1246 
   1247 /*********************************************************************
   1248  *
   1249  *  This routine maps the mbufs to tx descriptors, allowing the
   1250  *  TX engine to transmit the packets.
   1251  *  	- return 0 on success, positive on failure
   1252  *
   1253  **********************************************************************/
   1254 
   1255 static int
   1256 ixv_xmit(struct tx_ring *txr, struct mbuf *m_head)
   1257 {
   1258 	struct m_tag *mtag;
   1259 	struct adapter  *adapter = txr->adapter;
   1260 	struct ethercom *ec = &adapter->osdep.ec;
   1261 	u32		olinfo_status = 0, cmd_type_len;
   1262 	u32		paylen = 0;
   1263 	int             i, j, error, nsegs;
   1264 	int		first, last = 0;
   1265 	bus_dmamap_t	map;
   1266 	struct ixv_tx_buf *txbuf;
   1267 	union ixgbe_adv_tx_desc *txd = NULL;
   1268 
   1269 	/* Basic descriptor defines */
   1270         cmd_type_len = (IXGBE_ADVTXD_DTYP_DATA |
   1271 	    IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT);
   1272 
   1273 	if ((mtag = VLAN_OUTPUT_TAG(ec, m_head)) != NULL)
   1274         	cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
   1275 
   1276         /*
   1277          * Important to capture the first descriptor
   1278          * used because it will contain the index of
   1279          * the one we tell the hardware to report back
   1280          */
   1281         first = txr->next_avail_desc;
   1282 	txbuf = &txr->tx_buffers[first];
   1283 	map = txbuf->map;
   1284 
   1285 	/*
   1286 	 * Map the packet for DMA.
   1287 	 */
   1288 	error = bus_dmamap_load_mbuf(txr->txtag->dt_dmat, map,
   1289 	    m_head, BUS_DMA_NOWAIT);
   1290 
   1291 	switch (error) {
   1292 	case EAGAIN:
   1293 		adapter->eagain_tx_dma_setup.ev_count++;
   1294 		return EAGAIN;
   1295 	case ENOMEM:
   1296 		adapter->enomem_tx_dma_setup.ev_count++;
   1297 		return EAGAIN;
   1298 	case EFBIG:
   1299 		adapter->efbig_tx_dma_setup.ev_count++;
   1300 		return error;
   1301 	case EINVAL:
   1302 		adapter->einval_tx_dma_setup.ev_count++;
   1303 		return error;
   1304 	default:
   1305 		adapter->other_tx_dma_setup.ev_count++;
   1306 		return error;
   1307 	case 0:
   1308 		break;
   1309 	}
   1310 
   1311 	/* Make certain there are enough descriptors */
   1312 	if (nsegs > txr->tx_avail - 2) {
   1313 		txr->no_desc_avail.ev_count++;
   1314 		/* XXX s/ixgbe/ixv/ */
   1315 		ixgbe_dmamap_unload(txr->txtag, txbuf->map);
   1316 		return EAGAIN;
   1317 	}
   1318 
   1319 	/*
   1320 	** Set up the appropriate offload context
   1321 	** this becomes the first descriptor of
   1322 	** a packet.
   1323 	*/
   1324 	if (m_head->m_pkthdr.csum_flags & (M_CSUM_TSOv4|M_CSUM_TSOv6)) {
   1325 		if (ixv_tso_setup(txr, m_head, &paylen)) {
   1326 			cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
   1327 			olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
   1328 			olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
   1329 			olinfo_status |= paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
   1330 			++adapter->tso_tx.ev_count;
   1331 		} else {
   1332 			++adapter->tso_err.ev_count;
   1333 			/* XXX unload DMA map! --dyoung -> easy? --msaitoh */
   1334 			return (ENXIO);
   1335 		}
   1336 	} else
   1337 		olinfo_status |= ixv_tx_ctx_setup(txr, m_head);
   1338 
   1339         /* Record payload length */
   1340 	if (paylen == 0)
   1341         	olinfo_status |= m_head->m_pkthdr.len <<
   1342 		    IXGBE_ADVTXD_PAYLEN_SHIFT;
   1343 
   1344 	i = txr->next_avail_desc;
   1345 	for (j = 0; j < map->dm_nsegs; j++) {
   1346 		bus_size_t seglen;
   1347 		bus_addr_t segaddr;
   1348 
   1349 		txbuf = &txr->tx_buffers[i];
   1350 		txd = &txr->tx_base[i];
   1351 		seglen = map->dm_segs[j].ds_len;
   1352 		segaddr = htole64(map->dm_segs[j].ds_addr);
   1353 
   1354 		txd->read.buffer_addr = segaddr;
   1355 		txd->read.cmd_type_len = htole32(txr->txd_cmd |
   1356 		    cmd_type_len |seglen);
   1357 		txd->read.olinfo_status = htole32(olinfo_status);
   1358 		last = i; /* descriptor that will get completion IRQ */
   1359 
   1360 		if (++i == adapter->num_tx_desc)
   1361 			i = 0;
   1362 
   1363 		txbuf->m_head = NULL;
   1364 		txbuf->eop_index = -1;
   1365 	}
   1366 
   1367 	txd->read.cmd_type_len |=
   1368 	    htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS);
   1369 	txr->tx_avail -= map->dm_nsegs;
   1370 	txr->next_avail_desc = i;
   1371 
   1372 	txbuf->m_head = m_head;
   1373 	/* Swap the dma map between the first and last descriptor */
   1374 	txr->tx_buffers[first].map = txbuf->map;
   1375 	txbuf->map = map;
   1376 	bus_dmamap_sync(txr->txtag->dt_dmat, map, 0, m_head->m_pkthdr.len,
   1377 	    BUS_DMASYNC_PREWRITE);
   1378 
   1379         /* Set the index of the descriptor that will be marked done */
   1380         txbuf = &txr->tx_buffers[first];
   1381 	txbuf->eop_index = last;
   1382 
   1383 	/* XXX s/ixgbe/ixg/ */
   1384         ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
   1385             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1386 	/*
   1387 	 * Advance the Transmit Descriptor Tail (Tdt), this tells the
   1388 	 * hardware that this frame is available to transmit.
   1389 	 */
   1390 	++txr->total_packets.ev_count;
   1391 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(txr->me), i);
   1392 
   1393 	return 0;
   1394 }
   1395 
   1396 
   1397 /*********************************************************************
   1398  *  Multicast Update
   1399  *
   1400  *  This routine is called whenever multicast address list is updated.
   1401  *
   1402  **********************************************************************/
   1403 #define IXGBE_RAR_ENTRIES 16
   1404 
   1405 static void
   1406 ixv_set_multi(struct adapter *adapter)
   1407 {
   1408 	struct ether_multi *enm;
   1409 	struct ether_multistep step;
   1410 	u8	mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
   1411 	u8	*update_ptr;
   1412 	int	mcnt = 0;
   1413 	struct ethercom *ec = &adapter->osdep.ec;
   1414 
   1415 	IOCTL_DEBUGOUT("ixv_set_multi: begin");
   1416 
   1417 	ETHER_FIRST_MULTI(step, ec, enm);
   1418 	while (enm != NULL) {
   1419 		bcopy(enm->enm_addrlo,
   1420 		    &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
   1421 		    IXGBE_ETH_LENGTH_OF_ADDRESS);
   1422 		mcnt++;
   1423 		/* XXX This might be required --msaitoh */
   1424 		if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES)
   1425 			break;
   1426 		ETHER_NEXT_MULTI(step, enm);
   1427 	}
   1428 
   1429 	update_ptr = mta;
   1430 
   1431 	ixgbe_update_mc_addr_list(&adapter->hw,
   1432 	    update_ptr, mcnt, ixv_mc_array_itr, TRUE);
   1433 
   1434 	return;
   1435 }
   1436 
   1437 /*
   1438  * This is an iterator function now needed by the multicast
   1439  * shared code. It simply feeds the shared code routine the
   1440  * addresses in the array of ixv_set_multi() one by one.
   1441  */
   1442 static u8 *
   1443 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
   1444 {
   1445 	u8 *addr = *update_ptr;
   1446 	u8 *newptr;
   1447 	*vmdq = 0;
   1448 
   1449 	newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
   1450 	*update_ptr = newptr;
   1451 	return addr;
   1452 }
   1453 
   1454 /*********************************************************************
   1455  *  Timer routine
   1456  *
   1457  *  This routine checks for link status,updates statistics,
   1458  *  and runs the watchdog check.
   1459  *
   1460  **********************************************************************/
   1461 
   1462 static void
   1463 ixv_local_timer1(void *arg)
   1464 {
   1465 	struct adapter	*adapter = arg;
   1466 	device_t	dev = adapter->dev;
   1467 	struct tx_ring	*txr = adapter->tx_rings;
   1468 	int		i;
   1469 	struct timeval now, elapsed;
   1470 
   1471 	KASSERT(mutex_owned(&adapter->core_mtx));
   1472 
   1473 	ixv_update_link_status(adapter);
   1474 
   1475 	/* Stats Update */
   1476 	ixv_update_stats(adapter);
   1477 
   1478 	/*
   1479 	 * If the interface has been paused
   1480 	 * then don't do the watchdog check
   1481 	 */
   1482 	if (IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF)
   1483 		goto out;
   1484 	/*
   1485 	** Check for time since any descriptor was cleaned
   1486 	*/
   1487         for (i = 0; i < adapter->num_queues; i++, txr++) {
   1488 		IXV_TX_LOCK(txr);
   1489 		if (txr->watchdog_check == FALSE) {
   1490 			IXV_TX_UNLOCK(txr);
   1491 			continue;
   1492 		}
   1493 		getmicrotime(&now);
   1494 		timersub(&now, &txr->watchdog_time, &elapsed);
   1495 		if (tvtohz(&elapsed) > IXV_WATCHDOG)
   1496 			goto hung;
   1497 		IXV_TX_UNLOCK(txr);
   1498 	}
   1499 out:
   1500        	ixv_rearm_queues(adapter, adapter->que_mask);
   1501 	callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
   1502 	return;
   1503 
   1504 hung:
   1505 	device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
   1506 	device_printf(dev,"Queue(%d) tdh = %d, hw tdt = %d\n", txr->me,
   1507 	    IXGBE_READ_REG(&adapter->hw, IXGBE_VFTDH(i)),
   1508 	    IXGBE_READ_REG(&adapter->hw, IXGBE_VFTDT(i)));
   1509 	device_printf(dev,"TX(%d) desc avail = %d,"
   1510 	    "Next TX to Clean = %d\n",
   1511 	    txr->me, txr->tx_avail, txr->next_to_clean);
   1512 	adapter->ifp->if_flags &= ~IFF_RUNNING;
   1513 	adapter->watchdog_events.ev_count++;
   1514 	IXV_TX_UNLOCK(txr);
   1515 	ixv_init_locked(adapter);
   1516 }
   1517 
   1518 static void
   1519 ixv_local_timer(void *arg)
   1520 {
   1521 	struct adapter *adapter = arg;
   1522 
   1523 	IXV_CORE_LOCK(adapter);
   1524 	ixv_local_timer1(adapter);
   1525 	IXV_CORE_UNLOCK(adapter);
   1526 }
   1527 
   1528 /*
   1529 ** Note: this routine updates the OS on the link state
   1530 **	the real check of the hardware only happens with
   1531 **	a link interrupt.
   1532 */
   1533 static void
   1534 ixv_update_link_status(struct adapter *adapter)
   1535 {
   1536 	struct ifnet	*ifp = adapter->ifp;
   1537 	struct tx_ring *txr = adapter->tx_rings;
   1538 	device_t dev = adapter->dev;
   1539 
   1540 
   1541 	if (adapter->link_up){
   1542 		if (adapter->link_active == FALSE) {
   1543 			if (bootverbose)
   1544 				device_printf(dev,"Link is up %d Gbps %s \n",
   1545 				    ((adapter->link_speed == 128)? 10:1),
   1546 				    "Full Duplex");
   1547 			adapter->link_active = TRUE;
   1548 			if_link_state_change(ifp, LINK_STATE_UP);
   1549 		}
   1550 	} else { /* Link down */
   1551 		if (adapter->link_active == TRUE) {
   1552 			if (bootverbose)
   1553 				device_printf(dev,"Link is Down\n");
   1554 			if_link_state_change(ifp, LINK_STATE_DOWN);
   1555 			adapter->link_active = FALSE;
   1556 			for (int i = 0; i < adapter->num_queues;
   1557 			    i++, txr++)
   1558 				txr->watchdog_check = FALSE;
   1559 		}
   1560 	}
   1561 
   1562 	return;
   1563 }
   1564 
   1565 
   1566 static void
   1567 ixv_ifstop(struct ifnet *ifp, int disable)
   1568 {
   1569 	struct adapter *adapter = ifp->if_softc;
   1570 
   1571 	IXV_CORE_LOCK(adapter);
   1572 	ixv_stop(adapter);
   1573 	IXV_CORE_UNLOCK(adapter);
   1574 }
   1575 
   1576 /*********************************************************************
   1577  *
   1578  *  This routine disables all traffic on the adapter by issuing a
   1579  *  global reset on the MAC and deallocates TX/RX buffers.
   1580  *
   1581  **********************************************************************/
   1582 
   1583 static void
   1584 ixv_stop(void *arg)
   1585 {
   1586 	struct ifnet   *ifp;
   1587 	struct adapter *adapter = arg;
   1588 	struct ixgbe_hw *hw = &adapter->hw;
   1589 	ifp = adapter->ifp;
   1590 
   1591 	KASSERT(mutex_owned(&adapter->core_mtx));
   1592 
   1593 	INIT_DEBUGOUT("ixv_stop: begin\n");
   1594 	ixv_disable_intr(adapter);
   1595 
   1596 	/* Tell the stack that the interface is no longer active */
   1597 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   1598 
   1599 	ixgbe_reset_hw(hw);
   1600 	adapter->hw.adapter_stopped = FALSE;
   1601 	ixgbe_stop_adapter(hw);
   1602 	callout_stop(&adapter->timer);
   1603 
   1604 	/* reprogram the RAR[0] in case user changed it. */
   1605 	ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
   1606 
   1607 	return;
   1608 }
   1609 
   1610 
   1611 /*********************************************************************
   1612  *
   1613  *  Determine hardware revision.
   1614  *
   1615  **********************************************************************/
   1616 static void
   1617 ixv_identify_hardware(struct adapter *adapter)
   1618 {
   1619 	u16		pci_cmd_word;
   1620 	pcitag_t tag;
   1621 	pci_chipset_tag_t pc;
   1622 	pcireg_t subid, id;
   1623 	struct ixgbe_hw *hw = &adapter->hw;
   1624 
   1625 	pc = adapter->osdep.pc;
   1626 	tag = adapter->osdep.tag;
   1627 
   1628 	/*
   1629 	** Make sure BUSMASTER is set, on a VM under
   1630 	** KVM it may not be and will break things.
   1631 	*/
   1632 	pci_cmd_word = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
   1633 	if (!((pci_cmd_word & PCI_COMMAND_MASTER_ENABLE) &&
   1634 	    (pci_cmd_word & PCI_COMMAND_MEM_ENABLE))) {
   1635 		INIT_DEBUGOUT("Memory Access and/or Bus Master "
   1636 		    "bits were not set!\n");
   1637 		pci_cmd_word |=
   1638 		    (PCI_COMMAND_MASTER_ENABLE | PCI_COMMAND_MEM_ENABLE);
   1639 		pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, pci_cmd_word);
   1640 	}
   1641 
   1642 	id = pci_conf_read(pc, tag, PCI_ID_REG);
   1643 	subid = pci_conf_read(pc, tag, PCI_SUBSYS_ID_REG);
   1644 
   1645 	/* Save off the information about this board */
   1646 	hw->vendor_id = PCI_VENDOR(id);
   1647 	hw->device_id = PCI_PRODUCT(id);
   1648 	hw->revision_id = PCI_REVISION(pci_conf_read(pc, tag, PCI_CLASS_REG));
   1649 	hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
   1650 	hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
   1651 
   1652 	return;
   1653 }
   1654 
   1655 /*********************************************************************
   1656  *
   1657  *  Setup MSIX Interrupt resources and handlers
   1658  *
   1659  **********************************************************************/
   1660 static int
   1661 ixv_allocate_msix(struct adapter *adapter)
   1662 {
   1663 #if !defined(NETBSD_MSI_OR_MSIX)
   1664 	return 0;
   1665 #else
   1666 	device_t        dev = adapter->dev;
   1667 	struct 		ix_queue *que = adapter->queues;
   1668 	int 		error, rid, vector = 0;
   1669 	pcitag_t tag;
   1670 	pci_chipset_tag_t pc;
   1671 
   1672 	pc = adapter->osdep.pc;
   1673 	tag = adapter->osdep.tag;
   1674 
   1675 	for (int i = 0; i < adapter->num_queues; i++, vector++, que++) {
   1676 		rid = vector + 1;
   1677 		que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
   1678 		    RF_SHAREABLE | RF_ACTIVE);
   1679 		if (que->res == NULL) {
   1680 			aprint_error_dev(dev,"Unable to allocate"
   1681 		    	    " bus resource: que interrupt [%d]\n", vector);
   1682 			return (ENXIO);
   1683 		}
   1684 		/* Set the handler function */
   1685 		error = bus_setup_intr(dev, que->res,
   1686 		    INTR_TYPE_NET | INTR_MPSAFE, NULL,
   1687 		    ixv_msix_que, que, &que->tag);
   1688 		if (error) {
   1689 			que->res = NULL;
   1690 			aprint_error_dev(dev,
   1691 			    "Failed to register QUE handler");
   1692 			return (error);
   1693 		}
   1694 #if __FreeBSD_version >= 800504
   1695 		bus_describe_intr(dev, que->res, que->tag, "que %d", i);
   1696 #endif
   1697 		que->msix = vector;
   1698         	adapter->que_mask |= (u64)(1 << que->msix);
   1699 		/*
   1700 		** Bind the msix vector, and thus the
   1701 		** ring to the corresponding cpu.
   1702 		*/
   1703 		if (adapter->num_queues > 1)
   1704 			bus_bind_intr(dev, que->res, i);
   1705 
   1706 		que->que_si = softint_establish(SOFTINT_NET, ixv_handle_que,
   1707 		    que);
   1708 	}
   1709 
   1710 	/* and Mailbox */
   1711 	rid = vector + 1;
   1712 	adapter->res = bus_alloc_resource_any(dev,
   1713     	    SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
   1714 	if (!adapter->res) {
   1715 		aprint_error_dev(dev,"Unable to allocate"
   1716     	    " bus resource: MBX interrupt [%d]\n", rid);
   1717 		return (ENXIO);
   1718 	}
   1719 	/* Set the mbx handler function */
   1720 	error = bus_setup_intr(dev, adapter->res,
   1721 	    INTR_TYPE_NET | INTR_MPSAFE, NULL,
   1722 	    ixv_msix_mbx, adapter, &adapter->tag);
   1723 	if (error) {
   1724 		adapter->res = NULL;
   1725 		aprint_error_dev(dev, "Failed to register LINK handler");
   1726 		return (error);
   1727 	}
   1728 #if __FreeBSD_version >= 800504
   1729 	bus_describe_intr(dev, adapter->res, adapter->tag, "mbx");
   1730 #endif
   1731 	adapter->mbxvec = vector;
   1732 	/* Tasklets for Mailbox */
   1733 	adapter->mbx_si = softint_establish(SOFTINT_NET, ixv_handle_mbx,
   1734 	    adapter);
   1735 	/*
   1736 	** Due to a broken design QEMU will fail to properly
   1737 	** enable the guest for MSIX unless the vectors in
   1738 	** the table are all set up, so we must rewrite the
   1739 	** ENABLE in the MSIX control register again at this
   1740 	** point to cause it to successfully initialize us.
   1741 	*/
   1742 	if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
   1743 		int msix_ctrl;
   1744 		pci_get_capability(pc, tag, PCI_CAP_MSIX, &rid);
   1745 		rid += PCI_MSIX_CTL;
   1746 		msix_ctrl = pci_read_config(pc, tag, rid);
   1747 		msix_ctrl |= PCI_MSIX_CTL_ENABLE;
   1748 		pci_conf_write(pc, tag, msix_ctrl);
   1749 	}
   1750 
   1751 	return (0);
   1752 #endif
   1753 }
   1754 
   1755 /*
   1756  * Setup MSIX resources, note that the VF
   1757  * device MUST use MSIX, there is no fallback.
   1758  */
   1759 static int
   1760 ixv_setup_msix(struct adapter *adapter)
   1761 {
   1762 #if !defined(NETBSD_MSI_OR_MSIX)
   1763 	return 0;
   1764 #else
   1765 	device_t dev = adapter->dev;
   1766 	int rid, vectors, want = 2;
   1767 
   1768 
   1769 	/* First try MSI/X */
   1770 	rid = PCIR_BAR(3);
   1771 	adapter->msix_mem = bus_alloc_resource_any(dev,
   1772 	    SYS_RES_MEMORY, &rid, RF_ACTIVE);
   1773        	if (!adapter->msix_mem) {
   1774 		device_printf(adapter->dev,
   1775 		    "Unable to map MSIX table \n");
   1776 		goto out;
   1777 	}
   1778 
   1779 	vectors = pci_msix_count(dev);
   1780 	if (vectors < 2) {
   1781 		bus_release_resource(dev, SYS_RES_MEMORY,
   1782 		    rid, adapter->msix_mem);
   1783 		adapter->msix_mem = NULL;
   1784 		goto out;
   1785 	}
   1786 
   1787 	/*
   1788 	** Want two vectors: one for a queue,
   1789 	** plus an additional for mailbox.
   1790 	*/
   1791 	if (pci_alloc_msix(dev, &want) == 0) {
   1792                	device_printf(adapter->dev,
   1793 		    "Using MSIX interrupts with %d vectors\n", want);
   1794 		return (want);
   1795 	}
   1796 out:
   1797 	device_printf(adapter->dev,"MSIX config error\n");
   1798 	return (ENXIO);
   1799 #endif
   1800 }
   1801 
   1802 
   1803 static int
   1804 ixv_allocate_pci_resources(struct adapter *adapter,
   1805     const struct pci_attach_args *pa)
   1806 {
   1807 	pcireg_t	memtype;
   1808 	device_t        dev = adapter->dev;
   1809 	bus_addr_t addr;
   1810 	int flags;
   1811 
   1812 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
   1813 
   1814 	switch (memtype) {
   1815 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1816 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1817 		adapter->osdep.mem_bus_space_tag = pa->pa_memt;
   1818 		if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
   1819 	              memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
   1820 			goto map_err;
   1821 		if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
   1822 			aprint_normal_dev(dev, "clearing prefetchable bit\n");
   1823 			flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
   1824 		}
   1825 		if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
   1826 		     adapter->osdep.mem_size, flags,
   1827 		     &adapter->osdep.mem_bus_space_handle) != 0) {
   1828 map_err:
   1829 			adapter->osdep.mem_size = 0;
   1830 			aprint_error_dev(dev, "unable to map BAR0\n");
   1831 			return ENXIO;
   1832 		}
   1833 		break;
   1834 	default:
   1835 		aprint_error_dev(dev, "unexpected type on BAR0\n");
   1836 		return ENXIO;
   1837 	}
   1838 
   1839 	adapter->num_queues = 1;
   1840 	adapter->hw.back = &adapter->osdep;
   1841 
   1842 	/*
   1843 	** Now setup MSI/X, should
   1844 	** return us the number of
   1845 	** configured vectors.
   1846 	*/
   1847 	adapter->msix = ixv_setup_msix(adapter);
   1848 	if (adapter->msix == ENXIO)
   1849 		return (ENXIO);
   1850 	else
   1851 		return (0);
   1852 }
   1853 
   1854 static void
   1855 ixv_free_pci_resources(struct adapter * adapter)
   1856 {
   1857 #if defined(NETBSD_MSI_OR_MSIX)
   1858 	struct 		ix_queue *que = adapter->queues;
   1859 	device_t	dev = adapter->dev;
   1860 	int		rid, memrid;
   1861 
   1862 	memrid = PCI_BAR(MSIX_BAR);
   1863 
   1864 	/*
   1865 	** There is a slight possibility of a failure mode
   1866 	** in attach that will result in entering this function
   1867 	** before interrupt resources have been initialized, and
   1868 	** in that case we do not want to execute the loops below
   1869 	** We can detect this reliably by the state of the adapter
   1870 	** res pointer.
   1871 	*/
   1872 	if (adapter->res == NULL)
   1873 		goto mem;
   1874 
   1875 	/*
   1876 	**  Release all msix queue resources:
   1877 	*/
   1878 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   1879 		rid = que->msix + 1;
   1880 		if (que->tag != NULL) {
   1881 			bus_teardown_intr(dev, que->res, que->tag);
   1882 			que->tag = NULL;
   1883 		}
   1884 		if (que->res != NULL)
   1885 			bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
   1886 	}
   1887 
   1888 
   1889 	/* Clean the Legacy or Link interrupt last */
   1890 	if (adapter->mbxvec) /* we are doing MSIX */
   1891 		rid = adapter->mbxvec + 1;
   1892 	else
   1893 		(adapter->msix != 0) ? (rid = 1):(rid = 0);
   1894 
   1895 	if (adapter->tag != NULL) {
   1896 		bus_teardown_intr(dev, adapter->res, adapter->tag);
   1897 		adapter->tag = NULL;
   1898 	}
   1899 	if (adapter->res != NULL)
   1900 		bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res);
   1901 
   1902 mem:
   1903 	if (adapter->msix)
   1904 		pci_release_msi(dev);
   1905 
   1906 	if (adapter->msix_mem != NULL)
   1907 		bus_release_resource(dev, SYS_RES_MEMORY,
   1908 		    memrid, adapter->msix_mem);
   1909 
   1910 	if (adapter->pci_mem != NULL)
   1911 		bus_release_resource(dev, SYS_RES_MEMORY,
   1912 		    PCIR_BAR(0), adapter->pci_mem);
   1913 
   1914 #endif
   1915 	return;
   1916 }
   1917 
   1918 /*********************************************************************
   1919  *
   1920  *  Setup networking device structure and register an interface.
   1921  *
   1922  **********************************************************************/
   1923 static void
   1924 ixv_setup_interface(device_t dev, struct adapter *adapter)
   1925 {
   1926 	struct ethercom *ec = &adapter->osdep.ec;
   1927 	struct ifnet   *ifp;
   1928 
   1929 	INIT_DEBUGOUT("ixv_setup_interface: begin");
   1930 
   1931 	ifp = adapter->ifp = &ec->ec_if;
   1932 	strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
   1933 	ifp->if_baudrate = 1000000000;
   1934 	ifp->if_init = ixv_init;
   1935 	ifp->if_stop = ixv_ifstop;
   1936 	ifp->if_softc = adapter;
   1937 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   1938 	ifp->if_ioctl = ixv_ioctl;
   1939 #if __FreeBSD_version >= 800000
   1940 	ifp->if_transmit = ixv_mq_start;
   1941 	ifp->if_qflush = ixv_qflush;
   1942 #else
   1943 	ifp->if_start = ixv_start;
   1944 #endif
   1945 	ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 2;
   1946 
   1947 	if_attach(ifp);
   1948 	ether_ifattach(ifp, adapter->hw.mac.addr);
   1949 	ether_set_ifflags_cb(ec, ixv_ifflags_cb);
   1950 
   1951 	adapter->max_frame_size =
   1952 	    ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
   1953 
   1954 	/*
   1955 	 * Tell the upper layer(s) we support long frames.
   1956 	 */
   1957 	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
   1958 
   1959 	ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSOv4;
   1960 	ifp->if_capenable = 0;
   1961 
   1962 	ec->ec_capabilities |= ETHERCAP_VLAN_HWCSUM;
   1963 	ec->ec_capabilities |= ETHERCAP_JUMBO_MTU;
   1964 	ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
   1965 	    		| ETHERCAP_VLAN_MTU;
   1966 	ec->ec_capenable = ec->ec_capabilities;
   1967 
   1968 	/* Don't enable LRO by default */
   1969 	ifp->if_capabilities |= IFCAP_LRO;
   1970 
   1971 	/*
   1972 	** Dont turn this on by default, if vlans are
   1973 	** created on another pseudo device (eg. lagg)
   1974 	** then vlan events are not passed thru, breaking
   1975 	** operation, but with HW FILTER off it works. If
   1976 	** using vlans directly on the em driver you can
   1977 	** enable this and get full hardware tag filtering.
   1978 	*/
   1979 	ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
   1980 
   1981 	/*
   1982 	 * Specify the media types supported by this adapter and register
   1983 	 * callbacks to update media and link information
   1984 	 */
   1985 	ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change,
   1986 		     ixv_media_status);
   1987 	ifmedia_add(&adapter->media, IFM_ETHER | IFM_FDX, 0, NULL);
   1988 	ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
   1989 	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
   1990 
   1991 	return;
   1992 }
   1993 
   1994 static void
   1995 ixv_config_link(struct adapter *adapter)
   1996 {
   1997 	struct ixgbe_hw *hw = &adapter->hw;
   1998 	u32	autoneg, err = 0;
   1999 	bool	negotiate = TRUE;
   2000 
   2001 	if (hw->mac.ops.check_link)
   2002 		err = hw->mac.ops.check_link(hw, &autoneg,
   2003 		    &adapter->link_up, FALSE);
   2004 	if (err)
   2005 		goto out;
   2006 
   2007 	if (hw->mac.ops.setup_link)
   2008                	err = hw->mac.ops.setup_link(hw, autoneg,
   2009 		    negotiate, adapter->link_up);
   2010 out:
   2011 	return;
   2012 }
   2013 
   2014 /********************************************************************
   2015  * Manage DMA'able memory.
   2016  *******************************************************************/
   2017 
   2018 static int
   2019 ixv_dma_malloc(struct adapter *adapter, bus_size_t size,
   2020 		struct ixv_dma_alloc *dma, int mapflags)
   2021 {
   2022 	device_t dev = adapter->dev;
   2023 	int             r, rsegs;
   2024 
   2025 	r = ixgbe_dma_tag_create(adapter->osdep.dmat,	/* parent */
   2026 			       DBA_ALIGN, 0,	/* alignment, bounds */
   2027 			       size,	/* maxsize */
   2028 			       1,	/* nsegments */
   2029 			       size,	/* maxsegsize */
   2030 			       BUS_DMA_ALLOCNOW,	/* flags */
   2031 			       &dma->dma_tag);
   2032 	if (r != 0) {
   2033 		aprint_error_dev(dev,
   2034 		    "ixv_dma_malloc: bus_dma_tag_create failed; error %u\n", r);
   2035 		goto fail_0;
   2036 	}
   2037 	r = bus_dmamem_alloc(dma->dma_tag->dt_dmat,
   2038 		size,
   2039 		dma->dma_tag->dt_alignment,
   2040 		dma->dma_tag->dt_boundary,
   2041 		&dma->dma_seg, 1, &rsegs, BUS_DMA_NOWAIT);
   2042 	if (r != 0) {
   2043 		aprint_error_dev(dev,
   2044 		    "%s: bus_dmamem_alloc failed; error %u\n", __func__, r);
   2045 		goto fail_1;
   2046 	}
   2047 
   2048 	r = bus_dmamem_map(dma->dma_tag->dt_dmat, &dma->dma_seg, rsegs,
   2049 	    size, &dma->dma_vaddr, BUS_DMA_NOWAIT);
   2050 	if (r != 0) {
   2051 		aprint_error_dev(dev, "%s: bus_dmamem_map failed; error %d\n",
   2052 		    __func__, r);
   2053 		goto fail_2;
   2054 	}
   2055 
   2056 	r = ixgbe_dmamap_create(dma->dma_tag, 0, &dma->dma_map);
   2057 	if (r != 0) {
   2058 		aprint_error_dev(dev, "%s: bus_dmamem_map failed; error %d\n",
   2059 		    __func__, r);
   2060 		goto fail_3;
   2061 	}
   2062 
   2063 	r = bus_dmamap_load(dma->dma_tag->dt_dmat, dma->dma_map, dma->dma_vaddr,
   2064 			    size,
   2065 			    NULL,
   2066 			    mapflags | BUS_DMA_NOWAIT);
   2067 	if (r != 0) {
   2068 		aprint_error_dev(dev,"%s: bus_dmamap_load failed; error %u\n",
   2069 		    __func__, r);
   2070 		goto fail_4;
   2071 	}
   2072 	dma->dma_paddr = dma->dma_map->dm_segs[0].ds_addr;
   2073 	dma->dma_size = size;
   2074 	return 0;
   2075 fail_4:
   2076 	ixgbe_dmamap_destroy(dma->dma_tag, dma->dma_map);
   2077 fail_3:
   2078 	bus_dmamem_unmap(dma->dma_tag->dt_dmat, dma->dma_vaddr, size);
   2079 fail_2:
   2080 	bus_dmamem_free(dma->dma_tag->dt_dmat, &dma->dma_seg, rsegs);
   2081 fail_1:
   2082 	ixgbe_dma_tag_destroy(dma->dma_tag);
   2083 fail_0:
   2084 	dma->dma_map = NULL;
   2085 	dma->dma_tag = NULL;
   2086 	return (r);
   2087 }
   2088 
   2089 static void
   2090 ixv_dma_free(struct adapter *adapter, struct ixv_dma_alloc *dma)
   2091 {
   2092 	bus_dmamap_sync(dma->dma_tag->dt_dmat, dma->dma_map, 0, dma->dma_size,
   2093 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   2094 	ixgbe_dmamap_unload(dma->dma_tag, dma->dma_map);
   2095 	bus_dmamem_free(dma->dma_tag->dt_dmat, &dma->dma_seg, 1);
   2096 	ixgbe_dma_tag_destroy(dma->dma_tag);
   2097 }
   2098 
   2099 
   2100 /*********************************************************************
   2101  *
   2102  *  Allocate memory for the transmit and receive rings, and then
   2103  *  the descriptors associated with each, called only once at attach.
   2104  *
   2105  **********************************************************************/
   2106 static int
   2107 ixv_allocate_queues(struct adapter *adapter)
   2108 {
   2109 	device_t	dev = adapter->dev;
   2110 	struct ix_queue	*que;
   2111 	struct tx_ring	*txr;
   2112 	struct rx_ring	*rxr;
   2113 	int rsize, tsize, error = 0;
   2114 	int txconf = 0, rxconf = 0;
   2115 
   2116         /* First allocate the top level queue structs */
   2117         if (!(adapter->queues =
   2118             (struct ix_queue *) malloc(sizeof(struct ix_queue) *
   2119             adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
   2120                 aprint_error_dev(dev, "Unable to allocate queue memory\n");
   2121                 error = ENOMEM;
   2122                 goto fail;
   2123         }
   2124 
   2125 	/* First allocate the TX ring struct memory */
   2126 	if (!(adapter->tx_rings =
   2127 	    (struct tx_ring *) malloc(sizeof(struct tx_ring) *
   2128 	    adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
   2129 		aprint_error_dev(dev, "Unable to allocate TX ring memory\n");
   2130 		error = ENOMEM;
   2131 		goto tx_fail;
   2132 	}
   2133 
   2134 	/* Next allocate the RX */
   2135 	if (!(adapter->rx_rings =
   2136 	    (struct rx_ring *) malloc(sizeof(struct rx_ring) *
   2137 	    adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
   2138 		aprint_error_dev(dev, "Unable to allocate RX ring memory\n");
   2139 		error = ENOMEM;
   2140 		goto rx_fail;
   2141 	}
   2142 
   2143 	/* For the ring itself */
   2144 	tsize = roundup2(adapter->num_tx_desc *
   2145 	    sizeof(union ixgbe_adv_tx_desc), DBA_ALIGN);
   2146 
   2147 	/*
   2148 	 * Now set up the TX queues, txconf is needed to handle the
   2149 	 * possibility that things fail midcourse and we need to
   2150 	 * undo memory gracefully
   2151 	 */
   2152 	for (int i = 0; i < adapter->num_queues; i++, txconf++) {
   2153 		/* Set up some basics */
   2154 		txr = &adapter->tx_rings[i];
   2155 		txr->adapter = adapter;
   2156 		txr->me = i;
   2157 
   2158 		/* Initialize the TX side lock */
   2159 		snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
   2160 		    device_xname(dev), txr->me);
   2161 		mutex_init(&txr->tx_mtx, MUTEX_DEFAULT, IPL_NET);
   2162 
   2163 		if (ixv_dma_malloc(adapter, tsize,
   2164 			&txr->txdma, BUS_DMA_NOWAIT)) {
   2165 			aprint_error_dev(dev,
   2166 			    "Unable to allocate TX Descriptor memory\n");
   2167 			error = ENOMEM;
   2168 			goto err_tx_desc;
   2169 		}
   2170 		txr->tx_base = (union ixgbe_adv_tx_desc *)txr->txdma.dma_vaddr;
   2171 		bzero((void *)txr->tx_base, tsize);
   2172 
   2173         	/* Now allocate transmit buffers for the ring */
   2174         	if (ixv_allocate_transmit_buffers(txr)) {
   2175 			aprint_error_dev(dev,
   2176 			    "Critical Failure setting up transmit buffers\n");
   2177 			error = ENOMEM;
   2178 			goto err_tx_desc;
   2179         	}
   2180 #if __FreeBSD_version >= 800000
   2181 		/* Allocate a buf ring */
   2182 		txr->br = buf_ring_alloc(IXV_BR_SIZE, M_DEVBUF,
   2183 		    M_WAITOK, &txr->tx_mtx);
   2184 		if (txr->br == NULL) {
   2185 			aprint_error_dev(dev,
   2186 			    "Critical Failure setting up buf ring\n");
   2187 			error = ENOMEM;
   2188 			goto err_tx_desc;
   2189 		}
   2190 #endif
   2191 	}
   2192 
   2193 	/*
   2194 	 * Next the RX queues...
   2195 	 */
   2196 	rsize = roundup2(adapter->num_rx_desc *
   2197 	    sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
   2198 	for (int i = 0; i < adapter->num_queues; i++, rxconf++) {
   2199 		rxr = &adapter->rx_rings[i];
   2200 		/* Set up some basics */
   2201 		rxr->adapter = adapter;
   2202 		rxr->me = i;
   2203 
   2204 		/* Initialize the RX side lock */
   2205 		snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
   2206 		    device_xname(dev), rxr->me);
   2207 		mutex_init(&rxr->rx_mtx, MUTEX_DEFAULT, IPL_NET);
   2208 
   2209 		if (ixv_dma_malloc(adapter, rsize,
   2210 			&rxr->rxdma, BUS_DMA_NOWAIT)) {
   2211 			aprint_error_dev(dev,
   2212 			    "Unable to allocate RxDescriptor memory\n");
   2213 			error = ENOMEM;
   2214 			goto err_rx_desc;
   2215 		}
   2216 		rxr->rx_base = (union ixgbe_adv_rx_desc *)rxr->rxdma.dma_vaddr;
   2217 		bzero((void *)rxr->rx_base, rsize);
   2218 
   2219         	/* Allocate receive buffers for the ring*/
   2220 		if (ixv_allocate_receive_buffers(rxr)) {
   2221 			aprint_error_dev(dev,
   2222 			    "Critical Failure setting up receive buffers\n");
   2223 			error = ENOMEM;
   2224 			goto err_rx_desc;
   2225 		}
   2226 	}
   2227 
   2228 	/*
   2229 	** Finally set up the queue holding structs
   2230 	*/
   2231 	for (int i = 0; i < adapter->num_queues; i++) {
   2232 		que = &adapter->queues[i];
   2233 		que->adapter = adapter;
   2234 		que->txr = &adapter->tx_rings[i];
   2235 		que->rxr = &adapter->rx_rings[i];
   2236 	}
   2237 
   2238 	return (0);
   2239 
   2240 err_rx_desc:
   2241 	for (rxr = adapter->rx_rings; rxconf > 0; rxr++, rxconf--)
   2242 		ixv_dma_free(adapter, &rxr->rxdma);
   2243 err_tx_desc:
   2244 	for (txr = adapter->tx_rings; txconf > 0; txr++, txconf--)
   2245 		ixv_dma_free(adapter, &txr->txdma);
   2246 	free(adapter->rx_rings, M_DEVBUF);
   2247 rx_fail:
   2248 	free(adapter->tx_rings, M_DEVBUF);
   2249 tx_fail:
   2250 	free(adapter->queues, M_DEVBUF);
   2251 fail:
   2252 	return (error);
   2253 }
   2254 
   2255 
   2256 /*********************************************************************
   2257  *
   2258  *  Allocate memory for tx_buffer structures. The tx_buffer stores all
   2259  *  the information needed to transmit a packet on the wire. This is
   2260  *  called only once at attach, setup is done every reset.
   2261  *
   2262  **********************************************************************/
   2263 static int
   2264 ixv_allocate_transmit_buffers(struct tx_ring *txr)
   2265 {
   2266 	struct adapter *adapter = txr->adapter;
   2267 	device_t dev = adapter->dev;
   2268 	struct ixv_tx_buf *txbuf;
   2269 	int error, i;
   2270 
   2271 	/*
   2272 	 * Setup DMA descriptor areas.
   2273 	 */
   2274 	if ((error = ixgbe_dma_tag_create(adapter->osdep.dmat,	/* parent */
   2275 			       1, 0,		/* alignment, bounds */
   2276 			       IXV_TSO_SIZE,		/* maxsize */
   2277 			       32,			/* nsegments */
   2278 			       PAGE_SIZE,		/* maxsegsize */
   2279 			       0,			/* flags */
   2280 			       &txr->txtag))) {
   2281 		aprint_error_dev(dev,"Unable to allocate TX DMA tag\n");
   2282 		goto fail;
   2283 	}
   2284 
   2285 	if (!(txr->tx_buffers =
   2286 	    (struct ixv_tx_buf *) malloc(sizeof(struct ixv_tx_buf) *
   2287 	    adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) {
   2288 		aprint_error_dev(dev, "Unable to allocate tx_buffer memory\n");
   2289 		error = ENOMEM;
   2290 		goto fail;
   2291 	}
   2292 
   2293         /* Create the descriptor buffer dma maps */
   2294 	txbuf = txr->tx_buffers;
   2295 	for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
   2296 		error = ixgbe_dmamap_create(txr->txtag, 0, &txbuf->map);
   2297 		if (error != 0) {
   2298 			aprint_error_dev(dev, "Unable to create TX DMA map\n");
   2299 			goto fail;
   2300 		}
   2301 	}
   2302 
   2303 	return 0;
   2304 fail:
   2305 	/* We free all, it handles case where we are in the middle */
   2306 	ixv_free_transmit_structures(adapter);
   2307 	return (error);
   2308 }
   2309 
   2310 /*********************************************************************
   2311  *
   2312  *  Initialize a transmit ring.
   2313  *
   2314  **********************************************************************/
   2315 static void
   2316 ixv_setup_transmit_ring(struct tx_ring *txr)
   2317 {
   2318 	struct adapter *adapter = txr->adapter;
   2319 	struct ixv_tx_buf *txbuf;
   2320 	int i;
   2321 
   2322 	/* Clear the old ring contents */
   2323 	IXV_TX_LOCK(txr);
   2324 	bzero((void *)txr->tx_base,
   2325 	      (sizeof(union ixgbe_adv_tx_desc)) * adapter->num_tx_desc);
   2326 	/* Reset indices */
   2327 	txr->next_avail_desc = 0;
   2328 	txr->next_to_clean = 0;
   2329 
   2330 	/* Free any existing tx buffers. */
   2331         txbuf = txr->tx_buffers;
   2332 	for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
   2333 		if (txbuf->m_head != NULL) {
   2334 			bus_dmamap_sync(txr->txtag->dt_dmat, txbuf->map,
   2335 			    0, txbuf->m_head->m_pkthdr.len,
   2336 			    BUS_DMASYNC_POSTWRITE);
   2337 			ixgbe_dmamap_unload(txr->txtag, txbuf->map);
   2338 			m_freem(txbuf->m_head);
   2339 			txbuf->m_head = NULL;
   2340 		}
   2341 		/* Clear the EOP index */
   2342 		txbuf->eop_index = -1;
   2343         }
   2344 
   2345 	/* Set number of descriptors available */
   2346 	txr->tx_avail = adapter->num_tx_desc;
   2347 
   2348 	ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
   2349 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   2350 	IXV_TX_UNLOCK(txr);
   2351 }
   2352 
   2353 /*********************************************************************
   2354  *
   2355  *  Initialize all transmit rings.
   2356  *
   2357  **********************************************************************/
   2358 static int
   2359 ixv_setup_transmit_structures(struct adapter *adapter)
   2360 {
   2361 	struct tx_ring *txr = adapter->tx_rings;
   2362 
   2363 	for (int i = 0; i < adapter->num_queues; i++, txr++)
   2364 		ixv_setup_transmit_ring(txr);
   2365 
   2366 	return (0);
   2367 }
   2368 
   2369 /*********************************************************************
   2370  *
   2371  *  Enable transmit unit.
   2372  *
   2373  **********************************************************************/
   2374 static void
   2375 ixv_initialize_transmit_units(struct adapter *adapter)
   2376 {
   2377 	struct tx_ring	*txr = adapter->tx_rings;
   2378 	struct ixgbe_hw	*hw = &adapter->hw;
   2379 
   2380 
   2381 	for (int i = 0; i < adapter->num_queues; i++, txr++) {
   2382 		u64	tdba = txr->txdma.dma_paddr;
   2383 		u32	txctrl, txdctl;
   2384 
   2385 		/* Set WTHRESH to 8, burst writeback */
   2386 		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
   2387 		txdctl |= (8 << 16);
   2388 		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
   2389 		/* Now enable */
   2390 		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
   2391 		txdctl |= IXGBE_TXDCTL_ENABLE;
   2392 		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
   2393 
   2394 		/* Set the HW Tx Head and Tail indices */
   2395 	    	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(i), 0);
   2396 	    	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(i), 0);
   2397 
   2398 		/* Setup Transmit Descriptor Cmd Settings */
   2399 		txr->txd_cmd = IXGBE_TXD_CMD_IFCS;
   2400 		txr->watchdog_check = FALSE;
   2401 
   2402 		/* Set Ring parameters */
   2403 		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
   2404 		       (tdba & 0x00000000ffffffffULL));
   2405 		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i), (tdba >> 32));
   2406 		IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
   2407 		    adapter->num_tx_desc *
   2408 		    sizeof(struct ixgbe_legacy_tx_desc));
   2409 		txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(i));
   2410 		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
   2411 		IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), txctrl);
   2412 		break;
   2413 	}
   2414 
   2415 	return;
   2416 }
   2417 
   2418 /*********************************************************************
   2419  *
   2420  *  Free all transmit rings.
   2421  *
   2422  **********************************************************************/
   2423 static void
   2424 ixv_free_transmit_structures(struct adapter *adapter)
   2425 {
   2426 	struct tx_ring *txr = adapter->tx_rings;
   2427 
   2428 	for (int i = 0; i < adapter->num_queues; i++, txr++) {
   2429 		ixv_free_transmit_buffers(txr);
   2430 		ixv_dma_free(adapter, &txr->txdma);
   2431 		IXV_TX_LOCK_DESTROY(txr);
   2432 	}
   2433 	free(adapter->tx_rings, M_DEVBUF);
   2434 }
   2435 
   2436 /*********************************************************************
   2437  *
   2438  *  Free transmit ring related data structures.
   2439  *
   2440  **********************************************************************/
   2441 static void
   2442 ixv_free_transmit_buffers(struct tx_ring *txr)
   2443 {
   2444 	struct adapter *adapter = txr->adapter;
   2445 	struct ixv_tx_buf *tx_buffer;
   2446 	int             i;
   2447 
   2448 	INIT_DEBUGOUT("free_transmit_ring: begin");
   2449 
   2450 	if (txr->tx_buffers == NULL)
   2451 		return;
   2452 
   2453 	tx_buffer = txr->tx_buffers;
   2454 	for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
   2455 		if (tx_buffer->m_head != NULL) {
   2456 			bus_dmamap_sync(txr->txtag->dt_dmat, tx_buffer->map,
   2457 			    0, tx_buffer->m_head->m_pkthdr.len,
   2458 			    BUS_DMASYNC_POSTWRITE);
   2459 			ixgbe_dmamap_unload(txr->txtag, tx_buffer->map);
   2460 			m_freem(tx_buffer->m_head);
   2461 			tx_buffer->m_head = NULL;
   2462 			if (tx_buffer->map != NULL) {
   2463 				ixgbe_dmamap_destroy(txr->txtag,
   2464 				    tx_buffer->map);
   2465 				tx_buffer->map = NULL;
   2466 			}
   2467 		} else if (tx_buffer->map != NULL) {
   2468 			ixgbe_dmamap_unload(txr->txtag, tx_buffer->map);
   2469 			ixgbe_dmamap_destroy(txr->txtag, tx_buffer->map);
   2470 			tx_buffer->map = NULL;
   2471 		}
   2472 	}
   2473 #if __FreeBSD_version >= 800000
   2474 	if (txr->br != NULL)
   2475 		buf_ring_free(txr->br, M_DEVBUF);
   2476 #endif
   2477 	if (txr->tx_buffers != NULL) {
   2478 		free(txr->tx_buffers, M_DEVBUF);
   2479 		txr->tx_buffers = NULL;
   2480 	}
   2481 	if (txr->txtag != NULL) {
   2482 		ixgbe_dma_tag_destroy(txr->txtag);
   2483 		txr->txtag = NULL;
   2484 	}
   2485 	return;
   2486 }
   2487 
   2488 /*********************************************************************
   2489  *
   2490  *  Advanced Context Descriptor setup for VLAN or L4 CSUM
   2491  *
   2492  **********************************************************************/
   2493 
   2494 static u32
   2495 ixv_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp)
   2496 {
   2497 	struct m_tag *mtag;
   2498 	struct adapter *adapter = txr->adapter;
   2499 	struct ethercom *ec = &adapter->osdep.ec;
   2500 	struct ixgbe_adv_tx_context_desc *TXD;
   2501 	struct ixv_tx_buf        *tx_buffer;
   2502 	u32 olinfo = 0, vlan_macip_lens = 0, type_tucmd_mlhl = 0;
   2503 	struct ether_vlan_header *eh;
   2504 	struct ip ip;
   2505 	struct ip6_hdr ip6;
   2506 	int  ehdrlen, ip_hlen = 0;
   2507 	u16	etype;
   2508 	u8	ipproto = 0;
   2509 	bool	offload;
   2510 	int ctxd = txr->next_avail_desc;
   2511 	u16 vtag = 0;
   2512 
   2513 
   2514 	offload = ((mp->m_pkthdr.csum_flags & M_CSUM_OFFLOAD) != 0);
   2515 
   2516 	tx_buffer = &txr->tx_buffers[ctxd];
   2517 	TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
   2518 
   2519 	/*
   2520 	** In advanced descriptors the vlan tag must
   2521 	** be placed into the descriptor itself.
   2522 	*/
   2523 	if ((mtag = VLAN_OUTPUT_TAG(ec, mp)) != NULL) {
   2524 		vtag = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   2525 		vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
   2526 	} else if (!offload)
   2527 		return 0;
   2528 
   2529 	/*
   2530 	 * Determine where frame payload starts.
   2531 	 * Jump over vlan headers if already present,
   2532 	 * helpful for QinQ too.
   2533 	 */
   2534 	KASSERT(mp->m_len >= offsetof(struct ether_vlan_header, evl_tag));
   2535 	eh = mtod(mp, struct ether_vlan_header *);
   2536 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
   2537 		KASSERT(mp->m_len >= sizeof(struct ether_vlan_header));
   2538 		etype = ntohs(eh->evl_proto);
   2539 		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   2540 	} else {
   2541 		etype = ntohs(eh->evl_encap_proto);
   2542 		ehdrlen = ETHER_HDR_LEN;
   2543 	}
   2544 
   2545 	/* Set the ether header length */
   2546 	vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
   2547 
   2548 	switch (etype) {
   2549 	case ETHERTYPE_IP:
   2550 		m_copydata(mp, ehdrlen, sizeof(ip), &ip);
   2551 		ip_hlen = ip.ip_hl << 2;
   2552 		ipproto = ip.ip_p;
   2553 #if 0
   2554 		ip.ip_sum = 0;
   2555 		m_copyback(mp, ehdrlen, sizeof(ip), &ip);
   2556 #else
   2557 		KASSERT((mp->m_pkthdr.csum_flags & M_CSUM_IPv4) == 0 ||
   2558 		    ip.ip_sum == 0);
   2559 #endif
   2560 		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
   2561 		break;
   2562 	case ETHERTYPE_IPV6:
   2563 		m_copydata(mp, ehdrlen, sizeof(ip6), &ip6);
   2564 		ip_hlen = sizeof(ip6);
   2565 		ipproto = ip6.ip6_nxt;
   2566 		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
   2567 		break;
   2568 	default:
   2569 		break;
   2570 	}
   2571 
   2572 	if ((mp->m_pkthdr.csum_flags & M_CSUM_IPv4) != 0)
   2573 		olinfo |= IXGBE_TXD_POPTS_IXSM << 8;
   2574 
   2575 	vlan_macip_lens |= ip_hlen;
   2576 	type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
   2577 
   2578 	if (mp->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_TCPv6)) {
   2579 		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
   2580 		olinfo |= IXGBE_TXD_POPTS_TXSM << 8;
   2581 		KASSERT(ipproto == IPPROTO_TCP);
   2582 	} else if (mp->m_pkthdr.csum_flags & (M_CSUM_UDPv4|M_CSUM_UDPv6)) {
   2583 		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP;
   2584 		olinfo |= IXGBE_TXD_POPTS_TXSM << 8;
   2585 		KASSERT(ipproto == IPPROTO_UDP);
   2586 	}
   2587 
   2588 	/* Now copy bits into descriptor */
   2589 	TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
   2590 	TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
   2591 	TXD->seqnum_seed = htole32(0);
   2592 	TXD->mss_l4len_idx = htole32(0);
   2593 
   2594 	tx_buffer->m_head = NULL;
   2595 	tx_buffer->eop_index = -1;
   2596 
   2597 	/* We've consumed the first desc, adjust counters */
   2598 	if (++ctxd == adapter->num_tx_desc)
   2599 		ctxd = 0;
   2600 	txr->next_avail_desc = ctxd;
   2601 	--txr->tx_avail;
   2602 
   2603         return olinfo;
   2604 }
   2605 
   2606 /**********************************************************************
   2607  *
   2608  *  Setup work for hardware segmentation offload (TSO) on
   2609  *  adapters using advanced tx descriptors
   2610  *
   2611  **********************************************************************/
   2612 static bool
   2613 ixv_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *paylen)
   2614 {
   2615 	struct m_tag *mtag;
   2616 	struct adapter *adapter = txr->adapter;
   2617 	struct ethercom *ec = &adapter->osdep.ec;
   2618 	struct ixgbe_adv_tx_context_desc *TXD;
   2619 	struct ixv_tx_buf        *tx_buffer;
   2620 	u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
   2621 	u32 mss_l4len_idx = 0;
   2622 	u16 vtag = 0;
   2623 	int ctxd, ehdrlen,  hdrlen, ip_hlen, tcp_hlen;
   2624 	struct ether_vlan_header *eh;
   2625 	struct ip *ip;
   2626 	struct tcphdr *th;
   2627 
   2628 
   2629 	/*
   2630 	 * Determine where frame payload starts.
   2631 	 * Jump over vlan headers if already present
   2632 	 */
   2633 	eh = mtod(mp, struct ether_vlan_header *);
   2634 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN))
   2635 		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   2636 	else
   2637 		ehdrlen = ETHER_HDR_LEN;
   2638 
   2639         /* Ensure we have at least the IP+TCP header in the first mbuf. */
   2640         if (mp->m_len < ehdrlen + sizeof(struct ip) + sizeof(struct tcphdr))
   2641 		return FALSE;
   2642 
   2643 	ctxd = txr->next_avail_desc;
   2644 	tx_buffer = &txr->tx_buffers[ctxd];
   2645 	TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
   2646 
   2647 	ip = (struct ip *)(mp->m_data + ehdrlen);
   2648 	if (ip->ip_p != IPPROTO_TCP)
   2649 		return FALSE;   /* 0 */
   2650 	ip->ip_sum = 0;
   2651 	ip_hlen = ip->ip_hl << 2;
   2652 	th = (struct tcphdr *)((char *)ip + ip_hlen);
   2653 	/* XXX Educated guess: FreeBSD's in_pseudo == NetBSD's in_cksum_phdr */
   2654 	th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   2655 	    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   2656 	tcp_hlen = th->th_off << 2;
   2657 	hdrlen = ehdrlen + ip_hlen + tcp_hlen;
   2658 
   2659 	/* This is used in the transmit desc in encap */
   2660 	*paylen = mp->m_pkthdr.len - hdrlen;
   2661 
   2662 	/* VLAN MACLEN IPLEN */
   2663 	if ((mtag = VLAN_OUTPUT_TAG(ec, mp)) != NULL) {
   2664 		vtag = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   2665                 vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
   2666 	}
   2667 
   2668 	vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
   2669 	vlan_macip_lens |= ip_hlen;
   2670 	TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
   2671 
   2672 	/* ADV DTYPE TUCMD */
   2673 	type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
   2674 	type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
   2675 	type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
   2676 	TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
   2677 
   2678 
   2679 	/* MSS L4LEN IDX */
   2680 	mss_l4len_idx |= (mp->m_pkthdr.segsz << IXGBE_ADVTXD_MSS_SHIFT);
   2681 	mss_l4len_idx |= (tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT);
   2682 	TXD->mss_l4len_idx = htole32(mss_l4len_idx);
   2683 
   2684 	TXD->seqnum_seed = htole32(0);
   2685 	tx_buffer->m_head = NULL;
   2686 	tx_buffer->eop_index = -1;
   2687 
   2688 	if (++ctxd == adapter->num_tx_desc)
   2689 		ctxd = 0;
   2690 
   2691 	txr->tx_avail--;
   2692 	txr->next_avail_desc = ctxd;
   2693 	return TRUE;
   2694 }
   2695 
   2696 
   2697 /**********************************************************************
   2698  *
   2699  *  Examine each tx_buffer in the used queue. If the hardware is done
   2700  *  processing the packet then free associated resources. The
   2701  *  tx_buffer is put back on the free queue.
   2702  *
   2703  **********************************************************************/
   2704 static bool
   2705 ixv_txeof(struct tx_ring *txr)
   2706 {
   2707 	struct adapter	*adapter = txr->adapter;
   2708 	struct ifnet	*ifp = adapter->ifp;
   2709 	u32	first, last, done;
   2710 	struct ixv_tx_buf *tx_buffer;
   2711 	struct ixgbe_legacy_tx_desc *tx_desc, *eop_desc;
   2712 
   2713 	KASSERT(mutex_owned(&txr->tx_mtx));
   2714 
   2715 	if (txr->tx_avail == adapter->num_tx_desc)
   2716 		return false;
   2717 
   2718 	first = txr->next_to_clean;
   2719 	tx_buffer = &txr->tx_buffers[first];
   2720 	/* For cleanup we just use legacy struct */
   2721 	tx_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
   2722 	last = tx_buffer->eop_index;
   2723 	if (last == -1)
   2724 		return false;
   2725 	eop_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
   2726 
   2727 	/*
   2728 	** Get the index of the first descriptor
   2729 	** BEYOND the EOP and call that 'done'.
   2730 	** I do this so the comparison in the
   2731 	** inner while loop below can be simple
   2732 	*/
   2733 	if (++last == adapter->num_tx_desc) last = 0;
   2734 	done = last;
   2735 
   2736         ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
   2737             BUS_DMASYNC_POSTREAD);
   2738 	/*
   2739 	** Only the EOP descriptor of a packet now has the DD
   2740 	** bit set, this is what we look for...
   2741 	*/
   2742 	while (eop_desc->upper.fields.status & IXGBE_TXD_STAT_DD) {
   2743 		/* We clean the range of the packet */
   2744 		while (first != done) {
   2745 			tx_desc->upper.data = 0;
   2746 			tx_desc->lower.data = 0;
   2747 			tx_desc->buffer_addr = 0;
   2748 			++txr->tx_avail;
   2749 
   2750 			if (tx_buffer->m_head) {
   2751 				bus_dmamap_sync(txr->txtag->dt_dmat,
   2752 				    tx_buffer->map,
   2753 				    0, tx_buffer->m_head->m_pkthdr.len,
   2754 				    BUS_DMASYNC_POSTWRITE);
   2755 				ixgbe_dmamap_unload(txr->txtag, tx_buffer->map);
   2756 				m_freem(tx_buffer->m_head);
   2757 				tx_buffer->m_head = NULL;
   2758 				tx_buffer->map = NULL;
   2759 			}
   2760 			tx_buffer->eop_index = -1;
   2761 			getmicrotime(&txr->watchdog_time);
   2762 
   2763 			if (++first == adapter->num_tx_desc)
   2764 				first = 0;
   2765 
   2766 			tx_buffer = &txr->tx_buffers[first];
   2767 			tx_desc =
   2768 			    (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
   2769 		}
   2770 		++ifp->if_opackets;
   2771 		/* See if there is more work now */
   2772 		last = tx_buffer->eop_index;
   2773 		if (last != -1) {
   2774 			eop_desc =
   2775 			    (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
   2776 			/* Get next done point */
   2777 			if (++last == adapter->num_tx_desc) last = 0;
   2778 			done = last;
   2779 		} else
   2780 			break;
   2781 	}
   2782 	ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
   2783 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   2784 
   2785 	txr->next_to_clean = first;
   2786 
   2787 	/*
   2788 	 * If we have enough room, clear IFF_OACTIVE to tell the stack that
   2789 	 * it is OK to send packets. If there are no pending descriptors,
   2790 	 * clear the timeout. Otherwise, if some descriptors have been freed,
   2791 	 * restart the timeout.
   2792 	 */
   2793 	if (txr->tx_avail > IXV_TX_CLEANUP_THRESHOLD) {
   2794 		ifp->if_flags &= ~IFF_OACTIVE;
   2795 		if (txr->tx_avail == adapter->num_tx_desc) {
   2796 			txr->watchdog_check = FALSE;
   2797 			return false;
   2798 		}
   2799 	}
   2800 
   2801 	return true;
   2802 }
   2803 
   2804 /*********************************************************************
   2805  *
   2806  *  Refresh mbuf buffers for RX descriptor rings
   2807  *   - now keeps its own state so discards due to resource
   2808  *     exhaustion are unnecessary, if an mbuf cannot be obtained
   2809  *     it just returns, keeping its placeholder, thus it can simply
   2810  *     be recalled to try again.
   2811  *
   2812  **********************************************************************/
   2813 static void
   2814 ixv_refresh_mbufs(struct rx_ring *rxr, int limit)
   2815 {
   2816 	struct adapter		*adapter = rxr->adapter;
   2817 	struct ixv_rx_buf	*rxbuf;
   2818 	struct mbuf		*mh, *mp;
   2819 	int			i, j, error;
   2820 	bool			refreshed = false;
   2821 
   2822 	i = j = rxr->next_to_refresh;
   2823         /* Get the control variable, one beyond refresh point */
   2824 	if (++j == adapter->num_rx_desc)
   2825 		j = 0;
   2826 	while (j != limit) {
   2827 		rxbuf = &rxr->rx_buffers[i];
   2828 		if ((rxbuf->m_head == NULL) && (rxr->hdr_split)) {
   2829 			mh = m_gethdr(M_DONTWAIT, MT_DATA);
   2830 			if (mh == NULL)
   2831 				goto update;
   2832 			mh->m_pkthdr.len = mh->m_len = MHLEN;
   2833 			mh->m_len = MHLEN;
   2834 			mh->m_flags |= M_PKTHDR;
   2835 			m_adj(mh, ETHER_ALIGN);
   2836 			/* Get the memory mapping */
   2837 			error = bus_dmamap_load_mbuf(rxr->htag->dt_dmat,
   2838 			    rxbuf->hmap, mh, BUS_DMA_NOWAIT);
   2839 			if (error != 0) {
   2840 				printf("GET BUF: dmamap load"
   2841 				    " failure - %d\n", error);
   2842 				m_free(mh);
   2843 				goto update;
   2844 			}
   2845 			rxbuf->m_head = mh;
   2846 			ixgbe_dmamap_sync(rxr->htag, rxbuf->hmap,
   2847 			    BUS_DMASYNC_PREREAD);
   2848 			rxr->rx_base[i].read.hdr_addr =
   2849 			    htole64(rxbuf->hmap->dm_segs[0].ds_addr);
   2850 		}
   2851 
   2852 		if (rxbuf->m_pack == NULL) {
   2853 			mp = ixgbe_getjcl(&adapter->jcl_head, M_DONTWAIT,
   2854 			    MT_DATA, M_PKTHDR, adapter->rx_mbuf_sz);
   2855 			if (mp == NULL) {
   2856 				rxr->no_jmbuf.ev_count++;
   2857 				goto update;
   2858 			} else
   2859 				mp = rxbuf->m_pack;
   2860 
   2861 			mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
   2862 			/* Get the memory mapping */
   2863 			error = bus_dmamap_load_mbuf(rxr->ptag->dt_dmat,
   2864 			    rxbuf->pmap, mp, BUS_DMA_NOWAIT);
   2865 			if (error != 0) {
   2866 				printf("GET BUF: dmamap load"
   2867 				    " failure - %d\n", error);
   2868 				m_free(mp);
   2869 				rxbuf->m_pack = NULL;
   2870 				goto update;
   2871 			}
   2872 			rxbuf->m_pack = mp;
   2873 			bus_dmamap_sync(rxr->ptag->dt_dmat, rxbuf->pmap,
   2874 			    0, mp->m_pkthdr.len, BUS_DMASYNC_PREREAD);
   2875 			rxr->rx_base[i].read.pkt_addr =
   2876 			    htole64(rxbuf->pmap->dm_segs[0].ds_addr);
   2877 		}
   2878 
   2879 		refreshed = true;
   2880 		rxr->next_to_refresh = i = j;
   2881 		/* Calculate next index */
   2882 		if (++j == adapter->num_rx_desc)
   2883 			j = 0;
   2884 	}
   2885 update:
   2886 	if (refreshed) /* update tail index */
   2887 		IXGBE_WRITE_REG(&adapter->hw,
   2888 		    IXGBE_VFRDT(rxr->me), rxr->next_to_refresh);
   2889 	return;
   2890 }
   2891 
   2892 /*********************************************************************
   2893  *
   2894  *  Allocate memory for rx_buffer structures. Since we use one
   2895  *  rx_buffer per received packet, the maximum number of rx_buffer's
   2896  *  that we'll need is equal to the number of receive descriptors
   2897  *  that we've allocated.
   2898  *
   2899  **********************************************************************/
   2900 static int
   2901 ixv_allocate_receive_buffers(struct rx_ring *rxr)
   2902 {
   2903 	struct	adapter 	*adapter = rxr->adapter;
   2904 	device_t 		dev = adapter->dev;
   2905 	struct ixv_rx_buf 	*rxbuf;
   2906 	int             	i, bsize, error;
   2907 
   2908 	bsize = sizeof(struct ixv_rx_buf) * adapter->num_rx_desc;
   2909 	if (!(rxr->rx_buffers =
   2910 	    (struct ixv_rx_buf *) malloc(bsize,
   2911 	    M_DEVBUF, M_NOWAIT | M_ZERO))) {
   2912 		aprint_error_dev(dev, "Unable to allocate rx_buffer memory\n");
   2913 		error = ENOMEM;
   2914 		goto fail;
   2915 	}
   2916 
   2917 	if ((error = ixgbe_dma_tag_create(adapter->osdep.dmat,	/* parent */
   2918 				   1, 0,	/* alignment, bounds */
   2919 				   MSIZE,		/* maxsize */
   2920 				   1,			/* nsegments */
   2921 				   MSIZE,		/* maxsegsize */
   2922 				   0,			/* flags */
   2923 				   &rxr->htag))) {
   2924 		aprint_error_dev(dev, "Unable to create RX DMA tag\n");
   2925 		goto fail;
   2926 	}
   2927 
   2928 	if ((error = ixgbe_dma_tag_create(adapter->osdep.dmat,	/* parent */
   2929 				   1, 0,	/* alignment, bounds */
   2930 				   MJUMPAGESIZE,	/* maxsize */
   2931 				   1,			/* nsegments */
   2932 				   MJUMPAGESIZE,	/* maxsegsize */
   2933 				   0,			/* flags */
   2934 				   &rxr->ptag))) {
   2935 		aprint_error_dev(dev, "Unable to create RX DMA tag\n");
   2936 		goto fail;
   2937 	}
   2938 
   2939 	for (i = 0; i < adapter->num_rx_desc; i++, rxbuf++) {
   2940 		rxbuf = &rxr->rx_buffers[i];
   2941 		error = ixgbe_dmamap_create(rxr->htag,
   2942 		    BUS_DMA_NOWAIT, &rxbuf->hmap);
   2943 		if (error) {
   2944 			aprint_error_dev(dev, "Unable to create RX head map\n");
   2945 			goto fail;
   2946 		}
   2947 		error = ixgbe_dmamap_create(rxr->ptag,
   2948 		    BUS_DMA_NOWAIT, &rxbuf->pmap);
   2949 		if (error) {
   2950 			aprint_error_dev(dev, "Unable to create RX pkt map\n");
   2951 			goto fail;
   2952 		}
   2953 	}
   2954 
   2955 	return (0);
   2956 
   2957 fail:
   2958 	/* Frees all, but can handle partial completion */
   2959 	ixv_free_receive_structures(adapter);
   2960 	return (error);
   2961 }
   2962 
   2963 static void
   2964 ixv_free_receive_ring(struct rx_ring *rxr)
   2965 {
   2966 	struct  adapter         *adapter;
   2967 	struct ixv_rx_buf       *rxbuf;
   2968 	int i;
   2969 
   2970 	adapter = rxr->adapter;
   2971 	for (i = 0; i < adapter->num_rx_desc; i++) {
   2972 		rxbuf = &rxr->rx_buffers[i];
   2973 		if (rxbuf->m_head != NULL) {
   2974 			ixgbe_dmamap_sync(rxr->htag, rxbuf->hmap,
   2975 			    BUS_DMASYNC_POSTREAD);
   2976 			ixgbe_dmamap_unload(rxr->htag, rxbuf->hmap);
   2977 			rxbuf->m_head->m_flags |= M_PKTHDR;
   2978 			m_freem(rxbuf->m_head);
   2979 		}
   2980 		if (rxbuf->m_pack != NULL) {
   2981 			/* XXX not ixgbe_ ? */
   2982 			bus_dmamap_sync(rxr->ptag->dt_dmat, rxbuf->pmap,
   2983 			    0, rxbuf->m_pack->m_pkthdr.len,
   2984 			    BUS_DMASYNC_POSTREAD);
   2985 			ixgbe_dmamap_unload(rxr->ptag, rxbuf->pmap);
   2986 			rxbuf->m_pack->m_flags |= M_PKTHDR;
   2987 			m_freem(rxbuf->m_pack);
   2988 		}
   2989 		rxbuf->m_head = NULL;
   2990 		rxbuf->m_pack = NULL;
   2991 	}
   2992 }
   2993 
   2994 
   2995 /*********************************************************************
   2996  *
   2997  *  Initialize a receive ring and its buffers.
   2998  *
   2999  **********************************************************************/
   3000 static int
   3001 ixv_setup_receive_ring(struct rx_ring *rxr)
   3002 {
   3003 	struct	adapter 	*adapter;
   3004 	struct ixv_rx_buf	*rxbuf;
   3005 #ifdef LRO
   3006 	struct ifnet		*ifp;
   3007 	struct lro_ctrl		*lro = &rxr->lro;
   3008 #endif /* LRO */
   3009 	int			rsize, error = 0;
   3010 
   3011 	adapter = rxr->adapter;
   3012 #ifdef LRO
   3013 	ifp = adapter->ifp;
   3014 #endif /* LRO */
   3015 
   3016 	/* Clear the ring contents */
   3017 	IXV_RX_LOCK(rxr);
   3018 	rsize = roundup2(adapter->num_rx_desc *
   3019 	    sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
   3020 	bzero((void *)rxr->rx_base, rsize);
   3021 
   3022 	/* Free current RX buffer structs and their mbufs */
   3023 	ixv_free_receive_ring(rxr);
   3024 
   3025 	IXV_RX_UNLOCK(rxr);
   3026 
   3027 	/* Now reinitialize our supply of jumbo mbufs.  The number
   3028 	 * or size of jumbo mbufs may have changed.
   3029 	 */
   3030 	ixgbe_jcl_reinit(&adapter->jcl_head, rxr->ptag->dt_dmat,
   3031 	    2 * adapter->num_rx_desc, adapter->rx_mbuf_sz);
   3032 
   3033 	IXV_RX_LOCK(rxr);
   3034 
   3035 	/* Configure header split? */
   3036 	if (ixv_header_split)
   3037 		rxr->hdr_split = TRUE;
   3038 
   3039 	/* Now replenish the mbufs */
   3040 	for (int j = 0; j != adapter->num_rx_desc; ++j) {
   3041 		struct mbuf	*mh, *mp;
   3042 
   3043 		rxbuf = &rxr->rx_buffers[j];
   3044 		/*
   3045 		** Dont allocate mbufs if not
   3046 		** doing header split, its wasteful
   3047 		*/
   3048 		if (rxr->hdr_split == FALSE)
   3049 			goto skip_head;
   3050 
   3051 		/* First the header */
   3052 		rxbuf->m_head = m_gethdr(M_DONTWAIT, MT_DATA);
   3053 		if (rxbuf->m_head == NULL) {
   3054 			error = ENOBUFS;
   3055 			goto fail;
   3056 		}
   3057 		m_adj(rxbuf->m_head, ETHER_ALIGN);
   3058 		mh = rxbuf->m_head;
   3059 		mh->m_len = mh->m_pkthdr.len = MHLEN;
   3060 		mh->m_flags |= M_PKTHDR;
   3061 		/* Get the memory mapping */
   3062 		error = bus_dmamap_load_mbuf(rxr->htag->dt_dmat,
   3063 		    rxbuf->hmap, rxbuf->m_head, BUS_DMA_NOWAIT);
   3064 		if (error != 0) /* Nothing elegant to do here */
   3065 			goto fail;
   3066 		bus_dmamap_sync(rxr->htag->dt_dmat, rxbuf->hmap,
   3067 		    0, mh->m_pkthdr.len, BUS_DMASYNC_PREREAD);
   3068 		/* Update descriptor */
   3069 		rxr->rx_base[j].read.hdr_addr =
   3070 		    htole64(rxbuf->hmap->dm_segs[0].ds_addr);
   3071 
   3072 skip_head:
   3073 		/* Now the payload cluster */
   3074 		rxbuf->m_pack = ixgbe_getjcl(&adapter->jcl_head, M_DONTWAIT,
   3075 		    MT_DATA, M_PKTHDR, adapter->rx_mbuf_sz);
   3076 		if (rxbuf->m_pack == NULL) {
   3077 			error = ENOBUFS;
   3078                         goto fail;
   3079 		}
   3080 		mp = rxbuf->m_pack;
   3081 		mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
   3082 		/* Get the memory mapping */
   3083 		error = bus_dmamap_load_mbuf(rxr->ptag->dt_dmat,
   3084 		    rxbuf->pmap, mp, BUS_DMA_NOWAIT);
   3085 		if (error != 0)
   3086                         goto fail;
   3087 		bus_dmamap_sync(rxr->ptag->dt_dmat, rxbuf->pmap,
   3088 		    0, adapter->rx_mbuf_sz, BUS_DMASYNC_PREREAD);
   3089 		/* Update descriptor */
   3090 		rxr->rx_base[j].read.pkt_addr =
   3091 		    htole64(rxbuf->pmap->dm_segs[0].ds_addr);
   3092 	}
   3093 
   3094 
   3095 	/* Setup our descriptor indices */
   3096 	rxr->next_to_check = 0;
   3097 	rxr->next_to_refresh = 0;
   3098 	rxr->lro_enabled = FALSE;
   3099 	rxr->rx_split_packets.ev_count = 0;
   3100 	rxr->rx_bytes.ev_count = 0;
   3101 	rxr->discard = FALSE;
   3102 
   3103 	ixgbe_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
   3104 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   3105 
   3106 #ifdef LRO
   3107 	/*
   3108 	** Now set up the LRO interface:
   3109 	*/
   3110 	if (ifp->if_capenable & IFCAP_LRO) {
   3111 		device_t dev = adapter->dev;
   3112 		int err = tcp_lro_init(lro);
   3113 		if (err) {
   3114 			device_printf(dev, "LRO Initialization failed!\n");
   3115 			goto fail;
   3116 		}
   3117 		INIT_DEBUGOUT("RX Soft LRO Initialized\n");
   3118 		rxr->lro_enabled = TRUE;
   3119 		lro->ifp = adapter->ifp;
   3120 	}
   3121 #endif /* LRO */
   3122 
   3123 	IXV_RX_UNLOCK(rxr);
   3124 	return (0);
   3125 
   3126 fail:
   3127 	ixv_free_receive_ring(rxr);
   3128 	IXV_RX_UNLOCK(rxr);
   3129 	return (error);
   3130 }
   3131 
   3132 /*********************************************************************
   3133  *
   3134  *  Initialize all receive rings.
   3135  *
   3136  **********************************************************************/
   3137 static int
   3138 ixv_setup_receive_structures(struct adapter *adapter)
   3139 {
   3140 	struct rx_ring *rxr = adapter->rx_rings;
   3141 	int j;
   3142 
   3143 	for (j = 0; j < adapter->num_queues; j++, rxr++)
   3144 		if (ixv_setup_receive_ring(rxr))
   3145 			goto fail;
   3146 
   3147 	return (0);
   3148 fail:
   3149 	/*
   3150 	 * Free RX buffers allocated so far, we will only handle
   3151 	 * the rings that completed, the failing case will have
   3152 	 * cleaned up for itself. 'j' failed, so its the terminus.
   3153 	 */
   3154 	for (int i = 0; i < j; ++i) {
   3155 		rxr = &adapter->rx_rings[i];
   3156 		ixv_free_receive_ring(rxr);
   3157 	}
   3158 
   3159 	return (ENOBUFS);
   3160 }
   3161 
   3162 /*********************************************************************
   3163  *
   3164  *  Setup receive registers and features.
   3165  *
   3166  **********************************************************************/
   3167 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
   3168 
   3169 static void
   3170 ixv_initialize_receive_units(struct adapter *adapter)
   3171 {
   3172 	int i;
   3173 	struct	rx_ring	*rxr = adapter->rx_rings;
   3174 	struct ixgbe_hw	*hw = &adapter->hw;
   3175 	struct ifnet   *ifp = adapter->ifp;
   3176 	u32		bufsz, fctrl, rxcsum, hlreg;
   3177 
   3178 
   3179 	/* Enable broadcasts */
   3180 	fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
   3181 	fctrl |= IXGBE_FCTRL_BAM;
   3182 	fctrl |= IXGBE_FCTRL_DPF;
   3183 	fctrl |= IXGBE_FCTRL_PMCF;
   3184 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
   3185 
   3186 	/* Set for Jumbo Frames? */
   3187 	hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
   3188 	if (ifp->if_mtu > ETHERMTU) {
   3189 		hlreg |= IXGBE_HLREG0_JUMBOEN;
   3190 		bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
   3191 	} else {
   3192 		hlreg &= ~IXGBE_HLREG0_JUMBOEN;
   3193 		bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
   3194 	}
   3195 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
   3196 
   3197 	for (i = 0; i < adapter->num_queues; i++, rxr++) {
   3198 		u64 rdba = rxr->rxdma.dma_paddr;
   3199 		u32 reg, rxdctl;
   3200 
   3201 		/* Do the queue enabling first */
   3202 		rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
   3203 		rxdctl |= IXGBE_RXDCTL_ENABLE;
   3204 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
   3205 		for (int k = 0; k < 10; k++) {
   3206 			if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
   3207 			    IXGBE_RXDCTL_ENABLE)
   3208 				break;
   3209 			else
   3210 				msec_delay(1);
   3211 		}
   3212 		wmb();
   3213 
   3214 		/* Setup the Base and Length of the Rx Descriptor Ring */
   3215 		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
   3216 		    (rdba & 0x00000000ffffffffULL));
   3217 		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i),
   3218 		    (rdba >> 32));
   3219 		IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
   3220 		    adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
   3221 
   3222 		/* Set up the SRRCTL register */
   3223 		reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
   3224 		reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
   3225 		reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
   3226 		reg |= bufsz;
   3227 		if (rxr->hdr_split) {
   3228 			/* Use a standard mbuf for the header */
   3229 			reg |= ((IXV_RX_HDR <<
   3230 			    IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT)
   3231 			    & IXGBE_SRRCTL_BSIZEHDR_MASK);
   3232 			reg |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
   3233 		} else
   3234 			reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
   3235 		IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), reg);
   3236 
   3237 		/* Setup the HW Rx Head and Tail Descriptor Pointers */
   3238 		IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
   3239 		IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
   3240 		    adapter->num_rx_desc - 1);
   3241 	}
   3242 
   3243 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
   3244 
   3245 	if (ifp->if_capenable & IFCAP_RXCSUM)
   3246 		rxcsum |= IXGBE_RXCSUM_PCSD;
   3247 
   3248 	if (!(rxcsum & IXGBE_RXCSUM_PCSD))
   3249 		rxcsum |= IXGBE_RXCSUM_IPPCSE;
   3250 
   3251 	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
   3252 
   3253 	return;
   3254 }
   3255 
   3256 /*********************************************************************
   3257  *
   3258  *  Free all receive rings.
   3259  *
   3260  **********************************************************************/
   3261 static void
   3262 ixv_free_receive_structures(struct adapter *adapter)
   3263 {
   3264 	struct rx_ring *rxr = adapter->rx_rings;
   3265 
   3266 	for (int i = 0; i < adapter->num_queues; i++, rxr++) {
   3267 #ifdef LRO
   3268 		struct lro_ctrl		*lro = &rxr->lro;
   3269 #endif /* LRO */
   3270 		ixv_free_receive_buffers(rxr);
   3271 #ifdef LRO
   3272 		/* Free LRO memory */
   3273 		tcp_lro_free(lro);
   3274 #endif /* LRO */
   3275 		/* Free the ring memory as well */
   3276 		ixv_dma_free(adapter, &rxr->rxdma);
   3277 		IXV_RX_LOCK_DESTROY(rxr);
   3278 	}
   3279 
   3280 	free(adapter->rx_rings, M_DEVBUF);
   3281 }
   3282 
   3283 
   3284 /*********************************************************************
   3285  *
   3286  *  Free receive ring data structures
   3287  *
   3288  **********************************************************************/
   3289 static void
   3290 ixv_free_receive_buffers(struct rx_ring *rxr)
   3291 {
   3292 	struct adapter		*adapter = rxr->adapter;
   3293 	struct ixv_rx_buf	*rxbuf;
   3294 
   3295 	INIT_DEBUGOUT("free_receive_structures: begin");
   3296 
   3297 	/* Cleanup any existing buffers */
   3298 	if (rxr->rx_buffers != NULL) {
   3299 		for (int i = 0; i < adapter->num_rx_desc; i++) {
   3300 			rxbuf = &rxr->rx_buffers[i];
   3301 			if (rxbuf->m_head != NULL) {
   3302 				ixgbe_dmamap_sync(rxr->htag, rxbuf->hmap,
   3303 				    BUS_DMASYNC_POSTREAD);
   3304 				ixgbe_dmamap_unload(rxr->htag, rxbuf->hmap);
   3305 				rxbuf->m_head->m_flags |= M_PKTHDR;
   3306 				m_freem(rxbuf->m_head);
   3307 			}
   3308 			if (rxbuf->m_pack != NULL) {
   3309 				/* XXX not ixgbe_* ? */
   3310 				bus_dmamap_sync(rxr->ptag->dt_dmat, rxbuf->pmap,
   3311 				    0, rxbuf->m_pack->m_pkthdr.len,
   3312 				    BUS_DMASYNC_POSTREAD);
   3313 				ixgbe_dmamap_unload(rxr->ptag, rxbuf->pmap);
   3314 				rxbuf->m_pack->m_flags |= M_PKTHDR;
   3315 				m_freem(rxbuf->m_pack);
   3316 			}
   3317 			rxbuf->m_head = NULL;
   3318 			rxbuf->m_pack = NULL;
   3319 			if (rxbuf->hmap != NULL) {
   3320 				ixgbe_dmamap_destroy(rxr->htag, rxbuf->hmap);
   3321 				rxbuf->hmap = NULL;
   3322 			}
   3323 			if (rxbuf->pmap != NULL) {
   3324 				ixgbe_dmamap_destroy(rxr->ptag, rxbuf->pmap);
   3325 				rxbuf->pmap = NULL;
   3326 			}
   3327 		}
   3328 		if (rxr->rx_buffers != NULL) {
   3329 			free(rxr->rx_buffers, M_DEVBUF);
   3330 			rxr->rx_buffers = NULL;
   3331 		}
   3332 	}
   3333 
   3334 	if (rxr->htag != NULL) {
   3335 		ixgbe_dma_tag_destroy(rxr->htag);
   3336 		rxr->htag = NULL;
   3337 	}
   3338 	if (rxr->ptag != NULL) {
   3339 		ixgbe_dma_tag_destroy(rxr->ptag);
   3340 		rxr->ptag = NULL;
   3341 	}
   3342 
   3343 	return;
   3344 }
   3345 
   3346 static __inline void
   3347 ixv_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m, u32 ptype)
   3348 {
   3349 	int s;
   3350 
   3351 #ifdef LRO
   3352 	struct adapter	*adapter = ifp->if_softc;
   3353 	struct ethercom *ec = &adapter->osdep.ec;
   3354 
   3355         /*
   3356          * ATM LRO is only for IPv4/TCP packets and TCP checksum of the packet
   3357          * should be computed by hardware. Also it should not have VLAN tag in
   3358          * ethernet header.
   3359          */
   3360         if (rxr->lro_enabled &&
   3361             (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING) != 0 &&
   3362             (ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
   3363             (ptype & (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP)) ==
   3364             (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP) &&
   3365             (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) ==
   3366             (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) {
   3367                 /*
   3368                  * Send to the stack if:
   3369                  **  - LRO not enabled, or
   3370                  **  - no LRO resources, or
   3371                  **  - lro enqueue fails
   3372                  */
   3373                 if (rxr->lro.lro_cnt != 0)
   3374                         if (tcp_lro_rx(&rxr->lro, m, 0) == 0)
   3375                                 return;
   3376         }
   3377 #endif /* LRO */
   3378 
   3379 	IXV_RX_UNLOCK(rxr);
   3380 
   3381 	s = splnet();
   3382 	/* Pass this up to any BPF listeners. */
   3383 	bpf_mtap(ifp, m);
   3384         (*ifp->if_input)(ifp, m);
   3385 	splx(s);
   3386 
   3387 	IXV_RX_LOCK(rxr);
   3388 }
   3389 
   3390 static __inline void
   3391 ixv_rx_discard(struct rx_ring *rxr, int i)
   3392 {
   3393 	struct ixv_rx_buf	*rbuf;
   3394 
   3395 	rbuf = &rxr->rx_buffers[i];
   3396 	if (rbuf->fmp != NULL) {/* Partial chain ? */
   3397 		rbuf->fmp->m_flags |= M_PKTHDR;
   3398 		m_freem(rbuf->fmp);
   3399 		rbuf->fmp = NULL;
   3400 	}
   3401 
   3402 	/*
   3403 	** With advanced descriptors the writeback
   3404 	** clobbers the buffer addrs, so its easier
   3405 	** to just free the existing mbufs and take
   3406 	** the normal refresh path to get new buffers
   3407 	** and mapping.
   3408 	*/
   3409 	if (rbuf->m_head) {
   3410 		m_free(rbuf->m_head);
   3411 		rbuf->m_head = NULL;
   3412 	}
   3413 
   3414 	if (rbuf->m_pack) {
   3415 		m_free(rbuf->m_pack);
   3416 		rbuf->m_pack = NULL;
   3417 	}
   3418 
   3419 	return;
   3420 }
   3421 
   3422 
   3423 /*********************************************************************
   3424  *
   3425  *  This routine executes in interrupt context. It replenishes
   3426  *  the mbufs in the descriptor and sends data which has been
   3427  *  dma'ed into host memory to upper layer.
   3428  *
   3429  *  We loop at most count times if count is > 0, or until done if
   3430  *  count < 0.
   3431  *
   3432  *  Return TRUE for more work, FALSE for all clean.
   3433  *********************************************************************/
   3434 static bool
   3435 ixv_rxeof(struct ix_queue *que, int count)
   3436 {
   3437 	struct adapter		*adapter = que->adapter;
   3438 	struct rx_ring		*rxr = que->rxr;
   3439 	struct ifnet		*ifp = adapter->ifp;
   3440 #ifdef LRO
   3441 	struct lro_ctrl		*lro = &rxr->lro;
   3442 	struct lro_entry	*queued;
   3443 #endif /* LRO */
   3444 	int			i, nextp, processed = 0;
   3445 	u32			staterr = 0;
   3446 	union ixgbe_adv_rx_desc	*cur;
   3447 	struct ixv_rx_buf	*rbuf, *nbuf;
   3448 
   3449 	IXV_RX_LOCK(rxr);
   3450 
   3451 	for (i = rxr->next_to_check; count != 0;) {
   3452 		struct mbuf	*sendmp, *mh, *mp;
   3453 		u32		ptype;
   3454 		u16		hlen, plen, hdr, vtag;
   3455 		bool		eop;
   3456 
   3457 		/* Sync the ring. */
   3458 		ixgbe_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
   3459 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   3460 
   3461 		cur = &rxr->rx_base[i];
   3462 		staterr = le32toh(cur->wb.upper.status_error);
   3463 
   3464 		if ((staterr & IXGBE_RXD_STAT_DD) == 0)
   3465 			break;
   3466 		if ((ifp->if_flags & IFF_RUNNING) == 0)
   3467 			break;
   3468 
   3469 		count--;
   3470 		sendmp = NULL;
   3471 		nbuf = NULL;
   3472 		cur->wb.upper.status_error = 0;
   3473 		rbuf = &rxr->rx_buffers[i];
   3474 		mh = rbuf->m_head;
   3475 		mp = rbuf->m_pack;
   3476 
   3477 		plen = le16toh(cur->wb.upper.length);
   3478 		ptype = le32toh(cur->wb.lower.lo_dword.data) &
   3479 		    IXGBE_RXDADV_PKTTYPE_MASK;
   3480 		hdr = le16toh(cur->wb.lower.lo_dword.hs_rss.hdr_info);
   3481 		vtag = le16toh(cur->wb.upper.vlan);
   3482 		eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0);
   3483 
   3484 		/* Make sure all parts of a bad packet are discarded */
   3485 		if (((staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) != 0) ||
   3486 		    (rxr->discard)) {
   3487 			ifp->if_ierrors++;
   3488 			rxr->rx_discarded.ev_count++;
   3489 			if (!eop)
   3490 				rxr->discard = TRUE;
   3491 			else
   3492 				rxr->discard = FALSE;
   3493 			ixv_rx_discard(rxr, i);
   3494 			goto next_desc;
   3495 		}
   3496 
   3497 		if (!eop) {
   3498 			nextp = i + 1;
   3499 			if (nextp == adapter->num_rx_desc)
   3500 				nextp = 0;
   3501 			nbuf = &rxr->rx_buffers[nextp];
   3502 			prefetch(nbuf);
   3503 		}
   3504 		/*
   3505 		** The header mbuf is ONLY used when header
   3506 		** split is enabled, otherwise we get normal
   3507 		** behavior, ie, both header and payload
   3508 		** are DMA'd into the payload buffer.
   3509 		**
   3510 		** Rather than using the fmp/lmp global pointers
   3511 		** we now keep the head of a packet chain in the
   3512 		** buffer struct and pass this along from one
   3513 		** descriptor to the next, until we get EOP.
   3514 		*/
   3515 		if (rxr->hdr_split && (rbuf->fmp == NULL)) {
   3516 			/* This must be an initial descriptor */
   3517 			hlen = (hdr & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
   3518 			    IXGBE_RXDADV_HDRBUFLEN_SHIFT;
   3519 			if (hlen > IXV_RX_HDR)
   3520 				hlen = IXV_RX_HDR;
   3521 			mh->m_len = hlen;
   3522 			mh->m_flags |= M_PKTHDR;
   3523 			mh->m_next = NULL;
   3524 			mh->m_pkthdr.len = mh->m_len;
   3525 			/* Null buf pointer so it is refreshed */
   3526 			rbuf->m_head = NULL;
   3527 			/*
   3528 			** Check the payload length, this
   3529 			** could be zero if its a small
   3530 			** packet.
   3531 			*/
   3532 			if (plen > 0) {
   3533 				mp->m_len = plen;
   3534 				mp->m_next = NULL;
   3535 				mp->m_flags &= ~M_PKTHDR;
   3536 				mh->m_next = mp;
   3537 				mh->m_pkthdr.len += mp->m_len;
   3538 				/* Null buf pointer so it is refreshed */
   3539 				rbuf->m_pack = NULL;
   3540 				rxr->rx_split_packets.ev_count++;
   3541 			}
   3542 			/*
   3543 			** Now create the forward
   3544 			** chain so when complete
   3545 			** we wont have to.
   3546 			*/
   3547                         if (eop == 0) {
   3548 				/* stash the chain head */
   3549                                 nbuf->fmp = mh;
   3550 				/* Make forward chain */
   3551                                 if (plen)
   3552                                         mp->m_next = nbuf->m_pack;
   3553                                 else
   3554                                         mh->m_next = nbuf->m_pack;
   3555                         } else {
   3556 				/* Singlet, prepare to send */
   3557                                 sendmp = mh;
   3558                                 if (VLAN_ATTACHED(&adapter->osdep.ec) &&
   3559 				  (staterr & IXGBE_RXD_STAT_VP)) {
   3560 					VLAN_INPUT_TAG(ifp, sendmp, vtag,
   3561 					    printf("%s: could not apply VLAN "
   3562 					        "tag", __func__));
   3563                                 }
   3564                         }
   3565 		} else {
   3566 			/*
   3567 			** Either no header split, or a
   3568 			** secondary piece of a fragmented
   3569 			** split packet.
   3570 			*/
   3571 			mp->m_len = plen;
   3572 			/*
   3573 			** See if there is a stored head
   3574 			** that determines what we are
   3575 			*/
   3576 			sendmp = rbuf->fmp;
   3577 			rbuf->m_pack = rbuf->fmp = NULL;
   3578 
   3579 			if (sendmp != NULL) /* secondary frag */
   3580 				sendmp->m_pkthdr.len += mp->m_len;
   3581 			else {
   3582 				/* first desc of a non-ps chain */
   3583 				sendmp = mp;
   3584 				sendmp->m_flags |= M_PKTHDR;
   3585 				sendmp->m_pkthdr.len = mp->m_len;
   3586 				if (staterr & IXGBE_RXD_STAT_VP) {
   3587 					/* XXX Do something reasonable on
   3588 					 * error.
   3589 					 */
   3590 					VLAN_INPUT_TAG(ifp, sendmp, vtag,
   3591 					    printf("%s: could not apply VLAN "
   3592 					        "tag", __func__));
   3593 				}
   3594                         }
   3595 			/* Pass the head pointer on */
   3596 			if (eop == 0) {
   3597 				nbuf->fmp = sendmp;
   3598 				sendmp = NULL;
   3599 				mp->m_next = nbuf->m_pack;
   3600 			}
   3601 		}
   3602 		++processed;
   3603 		/* Sending this frame? */
   3604 		if (eop) {
   3605 			sendmp->m_pkthdr.rcvif = ifp;
   3606 			ifp->if_ipackets++;
   3607 			rxr->rx_packets.ev_count++;
   3608 			/* capture data for AIM */
   3609 			rxr->bytes += sendmp->m_pkthdr.len;
   3610 			rxr->rx_bytes.ev_count += sendmp->m_pkthdr.len;
   3611 			if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
   3612 				ixv_rx_checksum(staterr, sendmp, ptype,
   3613 				   &adapter->stats);
   3614 			}
   3615 #if __FreeBSD_version >= 800000
   3616 			sendmp->m_pkthdr.flowid = que->msix;
   3617 			sendmp->m_flags |= M_FLOWID;
   3618 #endif
   3619 		}
   3620 next_desc:
   3621 		ixgbe_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
   3622 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   3623 
   3624 		/* Advance our pointers to the next descriptor. */
   3625 		if (++i == adapter->num_rx_desc)
   3626 			i = 0;
   3627 
   3628 		/* Now send to the stack or do LRO */
   3629 		if (sendmp != NULL)
   3630 			ixv_rx_input(rxr, ifp, sendmp, ptype);
   3631 
   3632                /* Every 8 descriptors we go to refresh mbufs */
   3633 		if (processed == 8) {
   3634 			ixv_refresh_mbufs(rxr, i);
   3635 			processed = 0;
   3636 		}
   3637 	}
   3638 
   3639 	/* Refresh any remaining buf structs */
   3640 	if (ixv_rx_unrefreshed(rxr))
   3641 		ixv_refresh_mbufs(rxr, i);
   3642 
   3643 	rxr->next_to_check = i;
   3644 
   3645 #ifdef LRO
   3646 	/*
   3647 	 * Flush any outstanding LRO work
   3648 	 */
   3649 	while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
   3650 		SLIST_REMOVE_HEAD(&lro->lro_active, next);
   3651 		tcp_lro_flush(lro, queued);
   3652 	}
   3653 #endif /* LRO */
   3654 
   3655 	IXV_RX_UNLOCK(rxr);
   3656 
   3657 	/*
   3658 	** We still have cleaning to do?
   3659 	** Schedule another interrupt if so.
   3660 	*/
   3661 	if ((staterr & IXGBE_RXD_STAT_DD) != 0) {
   3662 		ixv_rearm_queues(adapter, (u64)(1ULL << que->msix));
   3663 		return true;
   3664 	}
   3665 
   3666 	return false;
   3667 }
   3668 
   3669 
   3670 /*********************************************************************
   3671  *
   3672  *  Verify that the hardware indicated that the checksum is valid.
   3673  *  Inform the stack about the status of checksum so that stack
   3674  *  doesn't spend time verifying the checksum.
   3675  *
   3676  *********************************************************************/
   3677 static void
   3678 ixv_rx_checksum(u32 staterr, struct mbuf * mp, u32 ptype,
   3679     struct ixgbevf_hw_stats *stats)
   3680 {
   3681 	u16	status = (u16) staterr;
   3682 	u8	errors = (u8) (staterr >> 24);
   3683 #if 0
   3684 	bool	sctp = FALSE;
   3685 	if ((ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
   3686 	    (ptype & IXGBE_RXDADV_PKTTYPE_SCTP) != 0)
   3687 		sctp = TRUE;
   3688 #endif
   3689 	if (status & IXGBE_RXD_STAT_IPCS) {
   3690 		stats->ipcs.ev_count++;
   3691 		if (!(errors & IXGBE_RXD_ERR_IPE)) {
   3692 			/* IP Checksum Good */
   3693 			mp->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   3694 
   3695 		} else {
   3696 			stats->ipcs_bad.ev_count++;
   3697 			mp->m_pkthdr.csum_flags = M_CSUM_IPv4|M_CSUM_IPv4_BAD;
   3698 		}
   3699 	}
   3700 	if (status & IXGBE_RXD_STAT_L4CS) {
   3701 		stats->l4cs.ev_count++;
   3702 		u16 type = M_CSUM_TCPv4|M_CSUM_TCPv6|M_CSUM_UDPv4|M_CSUM_UDPv6;
   3703 		if (!(errors & IXGBE_RXD_ERR_TCPE)) {
   3704 			mp->m_pkthdr.csum_flags |= type;
   3705 		} else {
   3706 			stats->l4cs_bad.ev_count++;
   3707 			mp->m_pkthdr.csum_flags |= type | M_CSUM_TCP_UDP_BAD;
   3708 		}
   3709 	}
   3710 	return;
   3711 }
   3712 
   3713 static void
   3714 ixv_setup_vlan_support(struct adapter *adapter)
   3715 {
   3716 	struct ixgbe_hw *hw = &adapter->hw;
   3717 	u32		ctrl, vid, vfta, retry;
   3718 
   3719 
   3720 	/*
   3721 	** We get here thru init_locked, meaning
   3722 	** a soft reset, this has already cleared
   3723 	** the VFTA and other state, so if there
   3724 	** have been no vlan's registered do nothing.
   3725 	*/
   3726 	if (adapter->num_vlans == 0)
   3727 		return;
   3728 
   3729 	/* Enable the queues */
   3730 	for (int i = 0; i < adapter->num_queues; i++) {
   3731 		ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
   3732 		ctrl |= IXGBE_RXDCTL_VME;
   3733 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl);
   3734 	}
   3735 
   3736 	/*
   3737 	** A soft reset zero's out the VFTA, so
   3738 	** we need to repopulate it now.
   3739 	*/
   3740 	for (int i = 0; i < VFTA_SIZE; i++) {
   3741 		if (ixv_shadow_vfta[i] == 0)
   3742 			continue;
   3743 		vfta = ixv_shadow_vfta[i];
   3744 		/*
   3745 		** Reconstruct the vlan id's
   3746 		** based on the bits set in each
   3747 		** of the array ints.
   3748 		*/
   3749 		for ( int j = 0; j < 32; j++) {
   3750 			retry = 0;
   3751 			if ((vfta & (1 << j)) == 0)
   3752 				continue;
   3753 			vid = (i * 32) + j;
   3754 			/* Call the shared code mailbox routine */
   3755 			while (ixgbe_set_vfta(hw, vid, 0, TRUE)) {
   3756 				if (++retry > 5)
   3757 					break;
   3758 			}
   3759 		}
   3760 	}
   3761 }
   3762 
   3763 #if 0	/* XXX Badly need to overhaul vlan(4) on NetBSD. */
   3764 /*
   3765 ** This routine is run via an vlan config EVENT,
   3766 ** it enables us to use the HW Filter table since
   3767 ** we can get the vlan id. This just creates the
   3768 ** entry in the soft version of the VFTA, init will
   3769 ** repopulate the real table.
   3770 */
   3771 static void
   3772 ixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   3773 {
   3774 	struct adapter	*adapter = ifp->if_softc;
   3775 	u16		index, bit;
   3776 
   3777 	if (ifp->if_softc !=  arg)   /* Not our event */
   3778 		return;
   3779 
   3780 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
   3781 		return;
   3782 
   3783 	IXV_CORE_LOCK(adapter);
   3784 	index = (vtag >> 5) & 0x7F;
   3785 	bit = vtag & 0x1F;
   3786 	ixv_shadow_vfta[index] |= (1 << bit);
   3787 	/* Re-init to load the changes */
   3788 	ixv_init_locked(adapter);
   3789 	IXV_CORE_UNLOCK(adapter);
   3790 }
   3791 
   3792 /*
   3793 ** This routine is run via an vlan
   3794 ** unconfig EVENT, remove our entry
   3795 ** in the soft vfta.
   3796 */
   3797 static void
   3798 ixv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   3799 {
   3800 	struct adapter	*adapter = ifp->if_softc;
   3801 	u16		index, bit;
   3802 
   3803 	if (ifp->if_softc !=  arg)
   3804 		return;
   3805 
   3806 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
   3807 		return;
   3808 
   3809 	IXV_CORE_LOCK(adapter);
   3810 	index = (vtag >> 5) & 0x7F;
   3811 	bit = vtag & 0x1F;
   3812 	ixv_shadow_vfta[index] &= ~(1 << bit);
   3813 	/* Re-init to load the changes */
   3814 	ixv_init_locked(adapter);
   3815 	IXV_CORE_UNLOCK(adapter);
   3816 }
   3817 #endif
   3818 
   3819 static void
   3820 ixv_enable_intr(struct adapter *adapter)
   3821 {
   3822 	struct ixgbe_hw *hw = &adapter->hw;
   3823 	struct ix_queue *que = adapter->queues;
   3824 	u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
   3825 
   3826 
   3827 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
   3828 
   3829 	mask = IXGBE_EIMS_ENABLE_MASK;
   3830 	mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
   3831 	IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
   3832 
   3833         for (int i = 0; i < adapter->num_queues; i++, que++)
   3834 		ixv_enable_queue(adapter, que->msix);
   3835 
   3836 	IXGBE_WRITE_FLUSH(hw);
   3837 
   3838 	return;
   3839 }
   3840 
   3841 static void
   3842 ixv_disable_intr(struct adapter *adapter)
   3843 {
   3844 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
   3845 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, ~0);
   3846 	IXGBE_WRITE_FLUSH(&adapter->hw);
   3847 	return;
   3848 }
   3849 
   3850 /*
   3851 ** Setup the correct IVAR register for a particular MSIX interrupt
   3852 **  - entry is the register array entry
   3853 **  - vector is the MSIX vector for this queue
   3854 **  - type is RX/TX/MISC
   3855 */
   3856 static void
   3857 ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
   3858 {
   3859 	struct ixgbe_hw *hw = &adapter->hw;
   3860 	u32 ivar, index;
   3861 
   3862 	vector |= IXGBE_IVAR_ALLOC_VAL;
   3863 
   3864 	if (type == -1) { /* MISC IVAR */
   3865 		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
   3866 		ivar &= ~0xFF;
   3867 		ivar |= vector;
   3868 		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
   3869 	} else {	/* RX/TX IVARS */
   3870 		index = (16 * (entry & 1)) + (8 * type);
   3871 		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
   3872 		ivar &= ~(0xFF << index);
   3873 		ivar |= (vector << index);
   3874 		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
   3875 	}
   3876 }
   3877 
   3878 static void
   3879 ixv_configure_ivars(struct adapter *adapter)
   3880 {
   3881 	struct  ix_queue *que = adapter->queues;
   3882 
   3883         for (int i = 0; i < adapter->num_queues; i++, que++) {
   3884 		/* First the RX queue entry */
   3885                 ixv_set_ivar(adapter, i, que->msix, 0);
   3886 		/* ... and the TX */
   3887 		ixv_set_ivar(adapter, i, que->msix, 1);
   3888 		/* Set an initial value in EITR */
   3889                 IXGBE_WRITE_REG(&adapter->hw,
   3890                     IXGBE_VTEITR(que->msix), IXV_EITR_DEFAULT);
   3891 	}
   3892 
   3893 	/* For the Link interrupt */
   3894         ixv_set_ivar(adapter, 1, adapter->mbxvec, -1);
   3895 }
   3896 
   3897 
   3898 /*
   3899 ** Tasklet handler for MSIX MBX interrupts
   3900 **  - do outside interrupt since it might sleep
   3901 */
   3902 static void
   3903 ixv_handle_mbx(void *context)
   3904 {
   3905 	struct adapter  *adapter = context;
   3906 
   3907 	ixgbe_check_link(&adapter->hw,
   3908 	    &adapter->link_speed, &adapter->link_up, 0);
   3909 	ixv_update_link_status(adapter);
   3910 }
   3911 
   3912 /*
   3913 ** The VF stats registers never have a truely virgin
   3914 ** starting point, so this routine tries to make an
   3915 ** artificial one, marking ground zero on attach as
   3916 ** it were.
   3917 */
   3918 static void
   3919 ixv_save_stats(struct adapter *adapter)
   3920 {
   3921 	if (adapter->stats.vfgprc || adapter->stats.vfgptc) {
   3922 		adapter->stats.saved_reset_vfgprc +=
   3923 		    adapter->stats.vfgprc - adapter->stats.base_vfgprc;
   3924 		adapter->stats.saved_reset_vfgptc +=
   3925 		    adapter->stats.vfgptc - adapter->stats.base_vfgptc;
   3926 		adapter->stats.saved_reset_vfgorc +=
   3927 		    adapter->stats.vfgorc - adapter->stats.base_vfgorc;
   3928 		adapter->stats.saved_reset_vfgotc +=
   3929 		    adapter->stats.vfgotc - adapter->stats.base_vfgotc;
   3930 		adapter->stats.saved_reset_vfmprc +=
   3931 		    adapter->stats.vfmprc - adapter->stats.base_vfmprc;
   3932 	}
   3933 }
   3934 
   3935 static void
   3936 ixv_init_stats(struct adapter *adapter)
   3937 {
   3938 	struct ixgbe_hw *hw = &adapter->hw;
   3939 
   3940 	adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
   3941 	adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
   3942 	adapter->stats.last_vfgorc |=
   3943 	    (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
   3944 
   3945 	adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
   3946 	adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
   3947 	adapter->stats.last_vfgotc |=
   3948 	    (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
   3949 
   3950 	adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
   3951 
   3952 	adapter->stats.base_vfgprc = adapter->stats.last_vfgprc;
   3953 	adapter->stats.base_vfgorc = adapter->stats.last_vfgorc;
   3954 	adapter->stats.base_vfgptc = adapter->stats.last_vfgptc;
   3955 	adapter->stats.base_vfgotc = adapter->stats.last_vfgotc;
   3956 	adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
   3957 }
   3958 
   3959 #define UPDATE_STAT_32(reg, last, count)		\
   3960 {							\
   3961 	u32 current = IXGBE_READ_REG(hw, reg);		\
   3962 	if (current < last)				\
   3963 		count += 0x100000000LL;			\
   3964 	last = current;					\
   3965 	count &= 0xFFFFFFFF00000000LL;			\
   3966 	count |= current;				\
   3967 }
   3968 
   3969 #define UPDATE_STAT_36(lsb, msb, last, count) 		\
   3970 {							\
   3971 	u64 cur_lsb = IXGBE_READ_REG(hw, lsb);		\
   3972 	u64 cur_msb = IXGBE_READ_REG(hw, msb);		\
   3973 	u64 current = ((cur_msb << 32) | cur_lsb);	\
   3974 	if (current < last)				\
   3975 		count += 0x1000000000LL;		\
   3976 	last = current;					\
   3977 	count &= 0xFFFFFFF000000000LL;			\
   3978 	count |= current;				\
   3979 }
   3980 
   3981 /*
   3982 ** ixv_update_stats - Update the board statistics counters.
   3983 */
   3984 void
   3985 ixv_update_stats(struct adapter *adapter)
   3986 {
   3987         struct ixgbe_hw *hw = &adapter->hw;
   3988 
   3989         UPDATE_STAT_32(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
   3990 	    adapter->stats.vfgprc);
   3991         UPDATE_STAT_32(IXGBE_VFGPTC, adapter->stats.last_vfgptc,
   3992 	    adapter->stats.vfgptc);
   3993         UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
   3994 	    adapter->stats.last_vfgorc, adapter->stats.vfgorc);
   3995         UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
   3996 	    adapter->stats.last_vfgotc, adapter->stats.vfgotc);
   3997         UPDATE_STAT_32(IXGBE_VFMPRC, adapter->stats.last_vfmprc,
   3998 	    adapter->stats.vfmprc);
   3999 }
   4000 
   4001 /**********************************************************************
   4002  *
   4003  *  This routine is called only when ixgbe_display_debug_stats is enabled.
   4004  *  This routine provides a way to take a look at important statistics
   4005  *  maintained by the driver and hardware.
   4006  *
   4007  **********************************************************************/
   4008 static void
   4009 ixv_print_hw_stats(struct adapter * adapter)
   4010 {
   4011         device_t dev = adapter->dev;
   4012 
   4013         device_printf(dev,"Std Mbuf Failed = %lu\n",
   4014                adapter->mbuf_defrag_failed.ev_count);
   4015         device_printf(dev,"Driver dropped packets = %lu\n",
   4016                adapter->dropped_pkts.ev_count);
   4017         device_printf(dev, "watchdog timeouts = %ld\n",
   4018                adapter->watchdog_events.ev_count);
   4019 
   4020         device_printf(dev,"Good Packets Rcvd = %llu\n",
   4021                (long long)adapter->stats.vfgprc);
   4022         device_printf(dev,"Good Packets Xmtd = %llu\n",
   4023                (long long)adapter->stats.vfgptc);
   4024         device_printf(dev,"TSO Transmissions = %lu\n",
   4025                adapter->tso_tx.ev_count);
   4026 
   4027 }
   4028 
   4029 /**********************************************************************
   4030  *
   4031  *  This routine is called only when em_display_debug_stats is enabled.
   4032  *  This routine provides a way to take a look at important statistics
   4033  *  maintained by the driver and hardware.
   4034  *
   4035  **********************************************************************/
   4036 static void
   4037 ixv_print_debug_info(struct adapter *adapter)
   4038 {
   4039         device_t dev = adapter->dev;
   4040         struct ixgbe_hw         *hw = &adapter->hw;
   4041         struct ix_queue         *que = adapter->queues;
   4042         struct rx_ring          *rxr;
   4043         struct tx_ring          *txr;
   4044 #ifdef LRO
   4045         struct lro_ctrl         *lro;
   4046 #endif /* LRO */
   4047 
   4048         device_printf(dev,"Error Byte Count = %u \n",
   4049             IXGBE_READ_REG(hw, IXGBE_ERRBC));
   4050 
   4051         for (int i = 0; i < adapter->num_queues; i++, que++) {
   4052                 txr = que->txr;
   4053                 rxr = que->rxr;
   4054 #ifdef LRO
   4055                 lro = &rxr->lro;
   4056 #endif /* LRO */
   4057                 device_printf(dev,"QUE(%d) IRQs Handled: %lu\n",
   4058                     que->msix, (long)que->irqs);
   4059                 device_printf(dev,"RX(%d) Packets Received: %lld\n",
   4060                     rxr->me, (long long)rxr->rx_packets.ev_count);
   4061                 device_printf(dev,"RX(%d) Split RX Packets: %lld\n",
   4062                     rxr->me, (long long)rxr->rx_split_packets.ev_count);
   4063                 device_printf(dev,"RX(%d) Bytes Received: %lu\n",
   4064                     rxr->me, (long)rxr->rx_bytes.ev_count);
   4065 #ifdef LRO
   4066                 device_printf(dev,"RX(%d) LRO Queued= %d\n",
   4067                     rxr->me, lro->lro_queued);
   4068                 device_printf(dev,"RX(%d) LRO Flushed= %d\n",
   4069                     rxr->me, lro->lro_flushed);
   4070 #endif /* LRO */
   4071                 device_printf(dev,"TX(%d) Packets Sent: %lu\n",
   4072                     txr->me, (long)txr->total_packets.ev_count);
   4073                 device_printf(dev,"TX(%d) NO Desc Avail: %lu\n",
   4074                     txr->me, (long)txr->no_desc_avail.ev_count);
   4075         }
   4076 
   4077         device_printf(dev,"MBX IRQ Handled: %lu\n",
   4078             (long)adapter->mbx_irq.ev_count);
   4079         return;
   4080 }
   4081 
   4082 static int
   4083 ixv_sysctl_stats(SYSCTLFN_ARGS)
   4084 {
   4085 	struct sysctlnode node;
   4086 	int             error;
   4087 	int		result;
   4088 	struct adapter *adapter;
   4089 
   4090 	node = *rnode;
   4091 	adapter = (struct adapter *)node.sysctl_data;
   4092 	node.sysctl_data = &result;
   4093 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   4094 	if (error != 0)
   4095 		return error;
   4096 
   4097 	if (result == 1)
   4098 		ixv_print_hw_stats(adapter);
   4099 
   4100 	return 0;
   4101 }
   4102 
   4103 static int
   4104 ixv_sysctl_debug(SYSCTLFN_ARGS)
   4105 {
   4106 	struct sysctlnode node;
   4107 	int error, result;
   4108 	struct adapter *adapter;
   4109 
   4110 	node = *rnode;
   4111 	adapter = (struct adapter *)node.sysctl_data;
   4112 	node.sysctl_data = &result;
   4113 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   4114 
   4115 	if (error)
   4116 		return error;
   4117 
   4118 	if (result == 1)
   4119 		ixv_print_debug_info(adapter);
   4120 
   4121 	return 0;
   4122 }
   4123 
   4124 /*
   4125 ** Set flow control using sysctl:
   4126 ** Flow control values:
   4127 ** 	0 - off
   4128 **	1 - rx pause
   4129 **	2 - tx pause
   4130 **	3 - full
   4131 */
   4132 static int
   4133 ixv_set_flowcntl(SYSCTLFN_ARGS)
   4134 {
   4135 	struct sysctlnode node;
   4136 	int error;
   4137 	struct adapter *adapter;
   4138 
   4139 	node = *rnode;
   4140 	adapter = (struct adapter *)node.sysctl_data;
   4141 	node.sysctl_data = &ixv_flow_control;
   4142 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   4143 
   4144 	if (error)
   4145 		return (error);
   4146 
   4147 	switch (ixv_flow_control) {
   4148 		case ixgbe_fc_rx_pause:
   4149 		case ixgbe_fc_tx_pause:
   4150 		case ixgbe_fc_full:
   4151 			adapter->hw.fc.requested_mode = ixv_flow_control;
   4152 			break;
   4153 		case ixgbe_fc_none:
   4154 		default:
   4155 			adapter->hw.fc.requested_mode = ixgbe_fc_none;
   4156 	}
   4157 
   4158 	ixgbe_fc_enable(&adapter->hw);
   4159 	return error;
   4160 }
   4161 
   4162 const struct sysctlnode *
   4163 ixv_sysctl_instance(struct adapter *adapter)
   4164 {
   4165 	const char *dvname;
   4166 	struct sysctllog **log;
   4167 	int rc;
   4168 	const struct sysctlnode *rnode;
   4169 
   4170 	log = &adapter->sysctllog;
   4171 	dvname = device_xname(adapter->dev);
   4172 
   4173 	if ((rc = sysctl_createv(log, 0, NULL, &rnode,
   4174 	    0, CTLTYPE_NODE, dvname,
   4175 	    SYSCTL_DESCR("ixv information and settings"),
   4176 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
   4177 		goto err;
   4178 
   4179 	return rnode;
   4180 err:
   4181 	printf("%s: sysctl_createv failed, rc = %d\n", __func__, rc);
   4182 	return NULL;
   4183 }
   4184 
   4185 static void
   4186 ixv_add_rx_process_limit(struct adapter *adapter, const char *name,
   4187         const char *description, int *limit, int value)
   4188 {
   4189 	const struct sysctlnode *rnode, *cnode;
   4190 	struct sysctllog **log = &adapter->sysctllog;
   4191 
   4192         *limit = value;
   4193 
   4194 	if ((rnode = ixv_sysctl_instance(adapter)) == NULL)
   4195 		aprint_error_dev(adapter->dev,
   4196 		    "could not create sysctl root\n");
   4197 	else if (sysctl_createv(log, 0, &rnode, &cnode,
   4198 	    CTLFLAG_READWRITE,
   4199 	    CTLTYPE_INT,
   4200 	    name, SYSCTL_DESCR(description),
   4201 	    NULL, 0, limit, 0,
   4202 	    CTL_CREATE, CTL_EOL) != 0) {
   4203 		aprint_error_dev(adapter->dev, "%s: could not create sysctl",
   4204 		    __func__);
   4205 	}
   4206 }
   4207 
   4208