Home | History | Annotate | Line # | Download | only in ixgbe
ixv.c revision 1.5
      1 /******************************************************************************
      2 
      3   Copyright (c) 2001-2012, Intel Corporation
      4   All rights reserved.
      5 
      6   Redistribution and use in source and binary forms, with or without
      7   modification, are permitted provided that the following conditions are met:
      8 
      9    1. Redistributions of source code must retain the above copyright notice,
     10       this list of conditions and the following disclaimer.
     11 
     12    2. Redistributions in binary form must reproduce the above copyright
     13       notice, this list of conditions and the following disclaimer in the
     14       documentation and/or other materials provided with the distribution.
     15 
     16    3. Neither the name of the Intel Corporation nor the names of its
     17       contributors may be used to endorse or promote products derived from
     18       this software without specific prior written permission.
     19 
     20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     30   POSSIBILITY OF SUCH DAMAGE.
     31 
     32 ******************************************************************************/
     33 /*$FreeBSD: src/sys/dev/ixgbe/ixv.c,v 1.2 2011/03/23 13:10:15 jhb Exp $*/
     34 /*$NetBSD: ixv.c,v 1.5 2015/03/27 05:57:28 msaitoh Exp $*/
     35 
     36 #include "opt_inet.h"
     37 #include "opt_inet6.h"
     38 
     39 #include "ixv.h"
     40 
     41 /*********************************************************************
     42  *  Driver version
     43  *********************************************************************/
     44 char ixv_driver_version[] = "1.1.2";
     45 
     46 /*********************************************************************
     47  *  PCI Device ID Table
     48  *
     49  *  Used by probe to select devices to load on
     50  *  Last field stores an index into ixv_strings
     51  *  Last entry must be all 0s
     52  *
     53  *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
     54  *********************************************************************/
     55 
     56 static ixv_vendor_info_t ixv_vendor_info_array[] =
     57 {
     58 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
     59 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0},
     60 	/* required last entry */
     61 	{0, 0, 0, 0, 0}
     62 };
     63 
     64 /*********************************************************************
     65  *  Table of branding strings
     66  *********************************************************************/
     67 
     68 static const char    *ixv_strings[] = {
     69 	"Intel(R) PRO/10GbE Virtual Function Network Driver"
     70 };
     71 
     72 /*********************************************************************
     73  *  Function prototypes
     74  *********************************************************************/
     75 static int      ixv_probe(device_t, cfdata_t, void *);
     76 static void      ixv_attach(device_t, device_t, void *);
     77 static int      ixv_detach(device_t, int);
     78 #if 0
     79 static int      ixv_shutdown(device_t);
     80 #endif
     81 #if __FreeBSD_version < 800000
     82 static void     ixv_start(struct ifnet *);
     83 static void     ixv_start_locked(struct tx_ring *, struct ifnet *);
     84 #else
     85 static int	ixv_mq_start(struct ifnet *, struct mbuf *);
     86 static int	ixv_mq_start_locked(struct ifnet *,
     87 		    struct tx_ring *, struct mbuf *);
     88 static void	ixv_qflush(struct ifnet *);
     89 #endif
     90 static int      ixv_ioctl(struct ifnet *, u_long, void *);
     91 static int	ixv_init(struct ifnet *);
     92 static void	ixv_init_locked(struct adapter *);
     93 static void     ixv_stop(void *);
     94 static void     ixv_media_status(struct ifnet *, struct ifmediareq *);
     95 static int      ixv_media_change(struct ifnet *);
     96 static void     ixv_identify_hardware(struct adapter *);
     97 static int      ixv_allocate_pci_resources(struct adapter *,
     98 		    const struct pci_attach_args *);
     99 static int      ixv_allocate_msix(struct adapter *);
    100 static int	ixv_allocate_queues(struct adapter *);
    101 static int	ixv_setup_msix(struct adapter *);
    102 static void	ixv_free_pci_resources(struct adapter *);
    103 static void     ixv_local_timer(void *);
    104 static void     ixv_setup_interface(device_t, struct adapter *);
    105 static void     ixv_config_link(struct adapter *);
    106 
    107 static int      ixv_allocate_transmit_buffers(struct tx_ring *);
    108 static int	ixv_setup_transmit_structures(struct adapter *);
    109 static void	ixv_setup_transmit_ring(struct tx_ring *);
    110 static void     ixv_initialize_transmit_units(struct adapter *);
    111 static void     ixv_free_transmit_structures(struct adapter *);
    112 static void     ixv_free_transmit_buffers(struct tx_ring *);
    113 
    114 static int      ixv_allocate_receive_buffers(struct rx_ring *);
    115 static int      ixv_setup_receive_structures(struct adapter *);
    116 static int	ixv_setup_receive_ring(struct rx_ring *);
    117 static void     ixv_initialize_receive_units(struct adapter *);
    118 static void     ixv_free_receive_structures(struct adapter *);
    119 static void     ixv_free_receive_buffers(struct rx_ring *);
    120 
    121 static void     ixv_enable_intr(struct adapter *);
    122 static void     ixv_disable_intr(struct adapter *);
    123 static bool	ixv_txeof(struct tx_ring *);
    124 static bool	ixv_rxeof(struct ix_queue *, int);
    125 static void	ixv_rx_checksum(u32, struct mbuf *, u32,
    126 		    struct ixgbevf_hw_stats *);
    127 static void     ixv_set_multi(struct adapter *);
    128 static void     ixv_update_link_status(struct adapter *);
    129 static void	ixv_refresh_mbufs(struct rx_ring *, int);
    130 static int      ixv_xmit(struct tx_ring *, struct mbuf *);
    131 static int	ixv_sysctl_stats(SYSCTLFN_PROTO);
    132 static int	ixv_sysctl_debug(SYSCTLFN_PROTO);
    133 static int	ixv_set_flowcntl(SYSCTLFN_PROTO);
    134 static int	ixv_dma_malloc(struct adapter *, bus_size_t,
    135 		    struct ixv_dma_alloc *, int);
    136 static void     ixv_dma_free(struct adapter *, struct ixv_dma_alloc *);
    137 static void	ixv_add_rx_process_limit(struct adapter *, const char *,
    138 		    const char *, int *, int);
    139 static u32	ixv_tx_ctx_setup(struct tx_ring *, struct mbuf *);
    140 static bool	ixv_tso_setup(struct tx_ring *, struct mbuf *, u32 *);
    141 static void	ixv_set_ivar(struct adapter *, u8, u8, s8);
    142 static void	ixv_configure_ivars(struct adapter *);
    143 static u8 *	ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
    144 
    145 static void	ixv_setup_vlan_support(struct adapter *);
    146 #if 0
    147 static void	ixv_register_vlan(void *, struct ifnet *, u16);
    148 static void	ixv_unregister_vlan(void *, struct ifnet *, u16);
    149 #endif
    150 
    151 static void	ixv_save_stats(struct adapter *);
    152 static void	ixv_init_stats(struct adapter *);
    153 static void	ixv_update_stats(struct adapter *);
    154 
    155 static __inline void ixv_rx_discard(struct rx_ring *, int);
    156 static __inline void ixv_rx_input(struct rx_ring *, struct ifnet *,
    157 		    struct mbuf *, u32);
    158 
    159 /* The MSI/X Interrupt handlers */
    160 static void	ixv_msix_que(void *);
    161 static void	ixv_msix_mbx(void *);
    162 
    163 /* Deferred interrupt tasklets */
    164 static void	ixv_handle_que(void *);
    165 static void	ixv_handle_mbx(void *);
    166 
    167 const struct sysctlnode *ixv_sysctl_instance(struct adapter *);
    168 static ixv_vendor_info_t *ixv_lookup(const struct pci_attach_args *);
    169 
    170 /*********************************************************************
    171  *  FreeBSD Device Interface Entry Points
    172  *********************************************************************/
    173 
    174 CFATTACH_DECL3_NEW(ixv, sizeof(struct adapter),
    175     ixv_probe, ixv_attach, ixv_detach, NULL, NULL, NULL,
    176     DVF_DETACH_SHUTDOWN);
    177 
    178 # if 0
    179 static device_method_t ixv_methods[] = {
    180 	/* Device interface */
    181 	DEVMETHOD(device_probe, ixv_probe),
    182 	DEVMETHOD(device_attach, ixv_attach),
    183 	DEVMETHOD(device_detach, ixv_detach),
    184 	DEVMETHOD(device_shutdown, ixv_shutdown),
    185 	{0, 0}
    186 };
    187 #endif
    188 
    189 #if 0
    190 static driver_t ixv_driver = {
    191 	"ix", ixv_methods, sizeof(struct adapter),
    192 };
    193 
    194 extern devclass_t ixgbe_devclass;
    195 DRIVER_MODULE(ixv, pci, ixv_driver, ixgbe_devclass, 0, 0);
    196 MODULE_DEPEND(ixv, pci, 1, 1, 1);
    197 MODULE_DEPEND(ixv, ether, 1, 1, 1);
    198 #endif
    199 
    200 /*
    201 ** TUNEABLE PARAMETERS:
    202 */
    203 
    204 /*
    205 ** AIM: Adaptive Interrupt Moderation
    206 ** which means that the interrupt rate
    207 ** is varied over time based on the
    208 ** traffic for that interrupt vector
    209 */
    210 static int ixv_enable_aim = FALSE;
    211 #define	TUNABLE_INT(__x, __y)
    212 TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
    213 
    214 /* How many packets rxeof tries to clean at a time */
    215 static int ixv_rx_process_limit = 128;
    216 TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit);
    217 
    218 /* Flow control setting, default to full */
    219 static int ixv_flow_control = ixgbe_fc_full;
    220 TUNABLE_INT("hw.ixv.flow_control", &ixv_flow_control);
    221 
    222 /*
    223  * Header split: this causes the hardware to DMA
    224  * the header into a seperate mbuf from the payload,
    225  * it can be a performance win in some workloads, but
    226  * in others it actually hurts, its off by default.
    227  */
    228 static int ixv_header_split = FALSE;
    229 TUNABLE_INT("hw.ixv.hdr_split", &ixv_header_split);
    230 
    231 /*
    232 ** Number of TX descriptors per ring,
    233 ** setting higher than RX as this seems
    234 ** the better performing choice.
    235 */
    236 static int ixv_txd = DEFAULT_TXD;
    237 TUNABLE_INT("hw.ixv.txd", &ixv_txd);
    238 
    239 /* Number of RX descriptors per ring */
    240 static int ixv_rxd = DEFAULT_RXD;
    241 TUNABLE_INT("hw.ixv.rxd", &ixv_rxd);
    242 
    243 /*
    244 ** Shadow VFTA table, this is needed because
    245 ** the real filter table gets cleared during
    246 ** a soft reset and we need to repopulate it.
    247 */
    248 static u32 ixv_shadow_vfta[VFTA_SIZE];
    249 
    250 /* Keep running tab on them for sanity check */
    251 static int ixv_total_ports;
    252 
    253 /*********************************************************************
    254  *  Device identification routine
    255  *
    256  *  ixv_probe determines if the driver should be loaded on
    257  *  adapter based on PCI vendor/device id of the adapter.
    258  *
    259  *  return 1 on success, 0 on failure
    260  *********************************************************************/
    261 
    262 static int
    263 ixv_probe(device_t dev, cfdata_t cf, void *aux)
    264 {
    265 	const struct pci_attach_args *pa = aux;
    266 
    267 	return (ixv_lookup(pa) != NULL) ? 1 : 0;
    268 }
    269 
    270 static ixv_vendor_info_t *
    271 ixv_lookup(const struct pci_attach_args *pa)
    272 {
    273 	pcireg_t subid;
    274 	ixv_vendor_info_t *ent;
    275 
    276 	INIT_DEBUGOUT("ixv_probe: begin");
    277 
    278 	if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
    279 		return NULL;
    280 
    281 	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
    282 
    283 	for (ent = ixv_vendor_info_array; ent->vendor_id != 0; ent++) {
    284 		if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
    285 		    (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
    286 
    287 		    ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
    288 		     (ent->subvendor_id == 0)) &&
    289 
    290 		    ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
    291 		     (ent->subdevice_id == 0))) {
    292 			++ixv_total_ports;
    293 			return ent;
    294 		}
    295 	}
    296 	return NULL;
    297 }
    298 
    299 
    300 static void
    301 ixv_sysctl_attach(struct adapter *adapter)
    302 {
    303 	struct sysctllog **log;
    304 	const struct sysctlnode *rnode, *cnode;
    305 	device_t dev;
    306 
    307 	dev = adapter->dev;
    308 	log = &adapter->sysctllog;
    309 
    310 	if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
    311 		aprint_error_dev(dev, "could not create sysctl root\n");
    312 		return;
    313 	}
    314 
    315 	if (sysctl_createv(log, 0, &rnode, &cnode,
    316 	    CTLFLAG_READWRITE, CTLTYPE_INT,
    317 	    "stats", SYSCTL_DESCR("Statistics"),
    318 	    ixv_sysctl_stats, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
    319 		aprint_error_dev(dev, "could not create sysctl\n");
    320 
    321 	if (sysctl_createv(log, 0, &rnode, &cnode,
    322 	    CTLFLAG_READWRITE, CTLTYPE_INT,
    323 	    "debug", SYSCTL_DESCR("Debug Info"),
    324 	    ixv_sysctl_debug, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
    325 		aprint_error_dev(dev, "could not create sysctl\n");
    326 
    327 	if (sysctl_createv(log, 0, &rnode, &cnode,
    328 	    CTLFLAG_READWRITE, CTLTYPE_INT,
    329 	    "flow_control", SYSCTL_DESCR("Flow Control"),
    330 	    ixv_set_flowcntl, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
    331 		aprint_error_dev(dev, "could not create sysctl\n");
    332 
    333 	/* XXX This is an *instance* sysctl controlling a *global* variable.
    334 	 * XXX It's that way in the FreeBSD driver that this derives from.
    335 	 */
    336 	if (sysctl_createv(log, 0, &rnode, &cnode,
    337 	    CTLFLAG_READWRITE, CTLTYPE_INT,
    338 	    "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
    339 	    NULL, 0, &ixv_enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
    340 		aprint_error_dev(dev, "could not create sysctl\n");
    341 }
    342 
    343 /*********************************************************************
    344  *  Device initialization routine
    345  *
    346  *  The attach entry point is called when the driver is being loaded.
    347  *  This routine identifies the type of hardware, allocates all resources
    348  *  and initializes the hardware.
    349  *
    350  *  return 0 on success, positive on failure
    351  *********************************************************************/
    352 
    353 static void
    354 ixv_attach(device_t parent, device_t dev, void *aux)
    355 {
    356 	struct adapter *adapter;
    357 	struct ixgbe_hw *hw;
    358 	int             error = 0;
    359 	ixv_vendor_info_t *ent;
    360 	const struct pci_attach_args *pa = aux;
    361 
    362 	INIT_DEBUGOUT("ixv_attach: begin");
    363 
    364 	/* Allocate, clear, and link in our adapter structure */
    365 	adapter = device_private(dev);
    366 	adapter->dev = adapter->osdep.dev = dev;
    367 	hw = &adapter->hw;
    368 
    369 	ent = ixv_lookup(pa);
    370 
    371 	KASSERT(ent != NULL);
    372 
    373 	aprint_normal(": %s, Version - %s\n",
    374 	    ixv_strings[ent->index], ixv_driver_version);
    375 
    376 	/* Core Lock Init*/
    377 	IXV_CORE_LOCK_INIT(adapter, device_xname(dev));
    378 
    379 	/* SYSCTL APIs */
    380 	ixv_sysctl_attach(adapter);
    381 
    382 	/* Set up the timer callout */
    383 	callout_init(&adapter->timer, 0);
    384 
    385 	/* Determine hardware revision */
    386 	ixv_identify_hardware(adapter);
    387 
    388 	/* Do base PCI setup - map BAR0 */
    389 	if (ixv_allocate_pci_resources(adapter, pa)) {
    390 		aprint_error_dev(dev, "Allocation of PCI resources failed\n");
    391 		error = ENXIO;
    392 		goto err_out;
    393 	}
    394 
    395 	/* Do descriptor calc and sanity checks */
    396 	if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
    397 	    ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) {
    398 		aprint_error_dev(dev, "TXD config issue, using default!\n");
    399 		adapter->num_tx_desc = DEFAULT_TXD;
    400 	} else
    401 		adapter->num_tx_desc = ixv_txd;
    402 
    403 	if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
    404 	    ixv_rxd < MIN_TXD || ixv_rxd > MAX_TXD) {
    405 		aprint_error_dev(dev, "RXD config issue, using default!\n");
    406 		adapter->num_rx_desc = DEFAULT_RXD;
    407 	} else
    408 		adapter->num_rx_desc = ixv_rxd;
    409 
    410 	/* Allocate our TX/RX Queues */
    411 	if (ixv_allocate_queues(adapter)) {
    412 		error = ENOMEM;
    413 		goto err_out;
    414 	}
    415 
    416 	/*
    417 	** Initialize the shared code: its
    418 	** at this point the mac type is set.
    419 	*/
    420 	error = ixgbe_init_shared_code(hw);
    421 	if (error) {
    422 		aprint_error_dev(dev,"Shared Code Initialization Failure\n");
    423 		error = EIO;
    424 		goto err_late;
    425 	}
    426 
    427 	/* Setup the mailbox */
    428 	ixgbe_init_mbx_params_vf(hw);
    429 
    430 	ixgbe_reset_hw(hw);
    431 
    432 	/* Get Hardware Flow Control setting */
    433 	hw->fc.requested_mode = ixgbe_fc_full;
    434 	hw->fc.pause_time = IXV_FC_PAUSE;
    435 	hw->fc.low_water = IXV_FC_LO;
    436 	hw->fc.high_water[0] = IXV_FC_HI;
    437 	hw->fc.send_xon = TRUE;
    438 
    439 	error = ixgbe_init_hw(hw);
    440 	if (error) {
    441 		aprint_error_dev(dev,"Hardware Initialization Failure\n");
    442 		error = EIO;
    443 		goto err_late;
    444 	}
    445 
    446 	error = ixv_allocate_msix(adapter);
    447 	if (error)
    448 		goto err_late;
    449 
    450 	/* Setup OS specific network interface */
    451 	ixv_setup_interface(dev, adapter);
    452 
    453 	/* Sysctl for limiting the amount of work done in the taskqueue */
    454 	ixv_add_rx_process_limit(adapter, "rx_processing_limit",
    455 	    "max number of rx packets to process", &adapter->rx_process_limit,
    456 	    ixv_rx_process_limit);
    457 
    458 	/* Do the stats setup */
    459 	ixv_save_stats(adapter);
    460 	ixv_init_stats(adapter);
    461 
    462 	/* Register for VLAN events */
    463 #if 0 /* XXX msaitoh delete after write? */
    464 	adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
    465 	    ixv_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
    466 	adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
    467 	    ixv_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
    468 #endif
    469 
    470 	INIT_DEBUGOUT("ixv_attach: end");
    471 	return;
    472 
    473 err_late:
    474 	ixv_free_transmit_structures(adapter);
    475 	ixv_free_receive_structures(adapter);
    476 err_out:
    477 	ixv_free_pci_resources(adapter);
    478 	return;
    479 
    480 }
    481 
    482 /*********************************************************************
    483  *  Device removal routine
    484  *
    485  *  The detach entry point is called when the driver is being removed.
    486  *  This routine stops the adapter and deallocates all the resources
    487  *  that were allocated for driver operation.
    488  *
    489  *  return 0 on success, positive on failure
    490  *********************************************************************/
    491 
    492 static int
    493 ixv_detach(device_t dev, int flags)
    494 {
    495 	struct adapter *adapter = device_private(dev);
    496 	struct ix_queue *que = adapter->queues;
    497 
    498 	INIT_DEBUGOUT("ixv_detach: begin");
    499 
    500 	/* Make sure VLANS are not using driver */
    501 	if (!VLAN_ATTACHED(&adapter->osdep.ec))
    502 		;	/* nothing to do: no VLANs */
    503 	else if ((flags & (DETACH_SHUTDOWN|DETACH_FORCE)) != 0)
    504 		vlan_ifdetach(adapter->ifp);
    505 	else {
    506 		aprint_error_dev(dev, "VLANs in use\n");
    507 		return EBUSY;
    508 	}
    509 
    510 	IXV_CORE_LOCK(adapter);
    511 	ixv_stop(adapter);
    512 	IXV_CORE_UNLOCK(adapter);
    513 
    514 	for (int i = 0; i < adapter->num_queues; i++, que++) {
    515 		softint_disestablish(que->que_si);
    516 	}
    517 
    518 	/* Drain the Link queue */
    519 	softint_disestablish(adapter->mbx_si);
    520 
    521 	/* Unregister VLAN events */
    522 #if 0 /* XXX msaitoh delete after write? */
    523 	if (adapter->vlan_attach != NULL)
    524 		EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
    525 	if (adapter->vlan_detach != NULL)
    526 		EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
    527 #endif
    528 
    529 	ether_ifdetach(adapter->ifp);
    530 	callout_halt(&adapter->timer, NULL);
    531 	ixv_free_pci_resources(adapter);
    532 #if 0 /* XXX the NetBSD port is probably missing something here */
    533 	bus_generic_detach(dev);
    534 #endif
    535 	if_detach(adapter->ifp);
    536 
    537 	ixv_free_transmit_structures(adapter);
    538 	ixv_free_receive_structures(adapter);
    539 
    540 	IXV_CORE_LOCK_DESTROY(adapter);
    541 	return (0);
    542 }
    543 
    544 /*********************************************************************
    545  *
    546  *  Shutdown entry point
    547  *
    548  **********************************************************************/
    549 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
    550 static int
    551 ixv_shutdown(device_t dev)
    552 {
    553 	struct adapter *adapter = device_private(dev);
    554 	IXV_CORE_LOCK(adapter);
    555 	ixv_stop(adapter);
    556 	IXV_CORE_UNLOCK(adapter);
    557 	return (0);
    558 }
    559 #endif
    560 
    561 #if __FreeBSD_version < 800000
    562 /*********************************************************************
    563  *  Transmit entry point
    564  *
    565  *  ixv_start is called by the stack to initiate a transmit.
    566  *  The driver will remain in this routine as long as there are
    567  *  packets to transmit and transmit resources are available.
    568  *  In case resources are not available stack is notified and
    569  *  the packet is requeued.
    570  **********************************************************************/
    571 static void
    572 ixv_start_locked(struct tx_ring *txr, struct ifnet * ifp)
    573 {
    574 	int rc;
    575 	struct mbuf    *m_head;
    576 	struct adapter *adapter = txr->adapter;
    577 
    578 	IXV_TX_LOCK_ASSERT(txr);
    579 
    580 	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) !=
    581 	    IFF_RUNNING)
    582 		return;
    583 	if (!adapter->link_active)
    584 		return;
    585 
    586 	while (!IFQ_IS_EMPTY(&ifp->if_snd)) {
    587 
    588 		IFQ_POLL(&ifp->if_snd, m_head);
    589 		if (m_head == NULL)
    590 			break;
    591 
    592 		if (ixv_xmit(txr, m_head) == EAGAIN) {
    593 			ifp->if_flags |= IFF_OACTIVE;
    594 			break;
    595 		}
    596 		IFQ_DEQUEUE(&ifp->if_snd, m_head);
    597 		if (rc == EFBIG) {
    598 			struct mbuf *mtmp;
    599 
    600 			if ((mtmp = m_defrag(m_head, M_DONTWAIT)) != NULL) {
    601 				m_head = mtmp;
    602 				rc = ixv_xmit(txr, m_head);
    603 				if (rc != 0)
    604 					adapter->efbig2_tx_dma_setup.ev_count++;
    605 			} else
    606 				adapter->m_defrag_failed.ev_count++;
    607 		}
    608 		if (rc != 0) {
    609 			m_freem(m_head);
    610 			continue;
    611 		}
    612 		/* Send a copy of the frame to the BPF listener */
    613 		bpf_mtap(ifp, m_head);
    614 
    615 		/* Set watchdog on */
    616 		txr->watchdog_check = TRUE;
    617 		getmicrotime(&txr->watchdog_time);
    618 	}
    619 	return;
    620 }
    621 
    622 /*
    623  * Legacy TX start - called by the stack, this
    624  * always uses the first tx ring, and should
    625  * not be used with multiqueue tx enabled.
    626  */
    627 static void
    628 ixv_start(struct ifnet *ifp)
    629 {
    630 	struct adapter *adapter = ifp->if_softc;
    631 	struct tx_ring	*txr = adapter->tx_rings;
    632 
    633 	if (ifp->if_flags & IFF_RUNNING) {
    634 		IXV_TX_LOCK(txr);
    635 		ixv_start_locked(txr, ifp);
    636 		IXV_TX_UNLOCK(txr);
    637 	}
    638 	return;
    639 }
    640 
    641 #else
    642 
    643 /*
    644 ** Multiqueue Transmit driver
    645 **
    646 */
    647 static int
    648 ixv_mq_start(struct ifnet *ifp, struct mbuf *m)
    649 {
    650 	struct adapter	*adapter = ifp->if_softc;
    651 	struct ix_queue	*que;
    652 	struct tx_ring	*txr;
    653 	int 		i = 0, err = 0;
    654 
    655 	/* Which queue to use */
    656 	if ((m->m_flags & M_FLOWID) != 0)
    657 		i = m->m_pkthdr.flowid % adapter->num_queues;
    658 
    659 	txr = &adapter->tx_rings[i];
    660 	que = &adapter->queues[i];
    661 
    662 	if (IXV_TX_TRYLOCK(txr)) {
    663 		err = ixv_mq_start_locked(ifp, txr, m);
    664 		IXV_TX_UNLOCK(txr);
    665 	} else {
    666 		err = drbr_enqueue(ifp, txr->br, m);
    667 		softint_schedule(que->que_si);
    668 	}
    669 
    670 	return (err);
    671 }
    672 
    673 static int
    674 ixv_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr, struct mbuf *m)
    675 {
    676 	struct adapter  *adapter = txr->adapter;
    677         struct mbuf     *next;
    678         int             enqueued, err = 0;
    679 
    680 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) !=
    681 	    IFF_RUNNING || adapter->link_active == 0) {
    682 		if (m != NULL)
    683 			err = drbr_enqueue(ifp, txr->br, m);
    684 		return (err);
    685 	}
    686 
    687 	/* Do a clean if descriptors are low */
    688 	if (txr->tx_avail <= IXV_TX_CLEANUP_THRESHOLD)
    689 		ixv_txeof(txr);
    690 
    691 	enqueued = 0;
    692 	if (m == NULL) {
    693 		next = drbr_dequeue(ifp, txr->br);
    694 	} else if (drbr_needs_enqueue(ifp, txr->br)) {
    695 		if ((err = drbr_enqueue(ifp, txr->br, m)) != 0)
    696 			return (err);
    697 		next = drbr_dequeue(ifp, txr->br);
    698 	} else
    699 		next = m;
    700 
    701 	/* Process the queue */
    702 	while (next != NULL) {
    703 		if ((err = ixv_xmit(txr, next)) != 0) {
    704 			if (next != NULL)
    705 				err = drbr_enqueue(ifp, txr->br, next);
    706 			break;
    707 		}
    708 		enqueued++;
    709 		drbr_stats_update(ifp, next->m_pkthdr.len, next->m_flags);
    710 		/* Send a copy of the frame to the BPF listener */
    711 		ETHER_BPF_MTAP(ifp, next);
    712 		if ((ifp->if_flags & IFF_RUNNING) == 0)
    713 			break;
    714 		if (txr->tx_avail <= IXV_TX_OP_THRESHOLD) {
    715 			ifp->if_flags |= IFF_OACTIVE;
    716 			break;
    717 		}
    718 		next = drbr_dequeue(ifp, txr->br);
    719 	}
    720 
    721 	if (enqueued > 0) {
    722 		/* Set watchdog on */
    723 		txr->watchdog_check = TRUE;
    724 		getmicrotime(&txr->watchdog_time);
    725 	}
    726 
    727 	return (err);
    728 }
    729 
    730 /*
    731 ** Flush all ring buffers
    732 */
    733 static void
    734 ixv_qflush(struct ifnet *ifp)
    735 {
    736 	struct adapter  *adapter = ifp->if_softc;
    737 	struct tx_ring  *txr = adapter->tx_rings;
    738 	struct mbuf     *m;
    739 
    740 	for (int i = 0; i < adapter->num_queues; i++, txr++) {
    741 		IXV_TX_LOCK(txr);
    742 		while ((m = buf_ring_dequeue_sc(txr->br)) != NULL)
    743 			m_freem(m);
    744 		IXV_TX_UNLOCK(txr);
    745 	}
    746 	if_qflush(ifp);
    747 }
    748 
    749 #endif
    750 
    751 static int
    752 ixv_ifflags_cb(struct ethercom *ec)
    753 {
    754 	struct ifnet *ifp = &ec->ec_if;
    755 	struct adapter *adapter = ifp->if_softc;
    756 	int change = ifp->if_flags ^ adapter->if_flags, rc = 0;
    757 
    758 	IXV_CORE_LOCK(adapter);
    759 
    760 	if (change != 0)
    761 		adapter->if_flags = ifp->if_flags;
    762 
    763 	if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0)
    764 		rc = ENETRESET;
    765 
    766 	IXV_CORE_UNLOCK(adapter);
    767 
    768 	return rc;
    769 }
    770 
    771 /*********************************************************************
    772  *  Ioctl entry point
    773  *
    774  *  ixv_ioctl is called when the user wants to configure the
    775  *  interface.
    776  *
    777  *  return 0 on success, positive on failure
    778  **********************************************************************/
    779 
    780 static int
    781 ixv_ioctl(struct ifnet * ifp, u_long command, void *data)
    782 {
    783 	struct adapter	*adapter = ifp->if_softc;
    784 	struct ifcapreq *ifcr = data;
    785 	struct ifreq	*ifr = (struct ifreq *) data;
    786 	int             error = 0;
    787 	int l4csum_en;
    788 	const int l4csum = IFCAP_CSUM_TCPv4_Rx|IFCAP_CSUM_UDPv4_Rx|
    789 	     IFCAP_CSUM_TCPv6_Rx|IFCAP_CSUM_UDPv6_Rx;
    790 
    791 	switch (command) {
    792 	case SIOCSIFFLAGS:
    793 		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
    794 		break;
    795 	case SIOCADDMULTI:
    796 	case SIOCDELMULTI:
    797 		IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
    798 		break;
    799 	case SIOCSIFMEDIA:
    800 	case SIOCGIFMEDIA:
    801 		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
    802 		break;
    803 	case SIOCSIFCAP:
    804 		IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
    805 		break;
    806 	case SIOCSIFMTU:
    807 		IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
    808 		break;
    809 	default:
    810 		IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
    811 		break;
    812 	}
    813 
    814 	switch (command) {
    815 	case SIOCSIFMEDIA:
    816 	case SIOCGIFMEDIA:
    817 		return ifmedia_ioctl(ifp, ifr, &adapter->media, command);
    818 	case SIOCSIFCAP:
    819 		/* Layer-4 Rx checksum offload has to be turned on and
    820 		 * off as a unit.
    821 		 */
    822 		l4csum_en = ifcr->ifcr_capenable & l4csum;
    823 		if (l4csum_en != l4csum && l4csum_en != 0)
    824 			return EINVAL;
    825 		/*FALLTHROUGH*/
    826 	case SIOCADDMULTI:
    827 	case SIOCDELMULTI:
    828 	case SIOCSIFFLAGS:
    829 	case SIOCSIFMTU:
    830 	default:
    831 		if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
    832 			return error;
    833 		if ((ifp->if_flags & IFF_RUNNING) == 0)
    834 			;
    835 		else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
    836 			IXV_CORE_LOCK(adapter);
    837 			ixv_init_locked(adapter);
    838 			IXV_CORE_UNLOCK(adapter);
    839 		} else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
    840 			/*
    841 			 * Multicast list has changed; set the hardware filter
    842 			 * accordingly.
    843 			 */
    844 			IXV_CORE_LOCK(adapter);
    845 			ixv_disable_intr(adapter);
    846 			ixv_set_multi(adapter);
    847 			ixv_enable_intr(adapter);
    848 			IXV_CORE_UNLOCK(adapter);
    849 		}
    850 		return 0;
    851 	}
    852 }
    853 
    854 /*********************************************************************
    855  *  Init entry point
    856  *
    857  *  This routine is used in two ways. It is used by the stack as
    858  *  init entry point in network interface structure. It is also used
    859  *  by the driver as a hw/sw initialization routine to get to a
    860  *  consistent state.
    861  *
    862  *  return 0 on success, positive on failure
    863  **********************************************************************/
    864 #define IXGBE_MHADD_MFS_SHIFT 16
    865 
    866 static void
    867 ixv_init_locked(struct adapter *adapter)
    868 {
    869 	struct ifnet	*ifp = adapter->ifp;
    870 	device_t 	dev = adapter->dev;
    871 	struct ixgbe_hw *hw = &adapter->hw;
    872 	u32		mhadd, gpie;
    873 
    874 	INIT_DEBUGOUT("ixv_init: begin");
    875 	KASSERT(mutex_owned(&adapter->core_mtx));
    876 	hw->adapter_stopped = FALSE;
    877 	ixgbe_stop_adapter(hw);
    878         callout_stop(&adapter->timer);
    879 
    880         /* reprogram the RAR[0] in case user changed it. */
    881         ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
    882 
    883 	/* Get the latest mac address, User can use a LAA */
    884 	memcpy(hw->mac.addr, CLLADDR(adapter->ifp->if_sadl),
    885 	     IXGBE_ETH_LENGTH_OF_ADDRESS);
    886         ixgbe_set_rar(hw, 0, hw->mac.addr, 0, 1);
    887 	hw->addr_ctrl.rar_used_count = 1;
    888 
    889 	/* Prepare transmit descriptors and buffers */
    890 	if (ixv_setup_transmit_structures(adapter)) {
    891 		aprint_error_dev(dev,"Could not setup transmit structures\n");
    892 		ixv_stop(adapter);
    893 		return;
    894 	}
    895 
    896 	ixgbe_reset_hw(hw);
    897 	ixv_initialize_transmit_units(adapter);
    898 
    899 	/* Setup Multicast table */
    900 	ixv_set_multi(adapter);
    901 
    902 	/*
    903 	** Determine the correct mbuf pool
    904 	** for doing jumbo/headersplit
    905 	*/
    906 	if (ifp->if_mtu > ETHERMTU)
    907 		adapter->rx_mbuf_sz = MJUMPAGESIZE;
    908 	else
    909 		adapter->rx_mbuf_sz = MCLBYTES;
    910 
    911 	/* Prepare receive descriptors and buffers */
    912 	if (ixv_setup_receive_structures(adapter)) {
    913 		device_printf(dev,"Could not setup receive structures\n");
    914 		ixv_stop(adapter);
    915 		return;
    916 	}
    917 
    918 	/* Configure RX settings */
    919 	ixv_initialize_receive_units(adapter);
    920 
    921 	/* Enable Enhanced MSIX mode */
    922 	gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE);
    923 	gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_EIAME;
    924 	gpie |= IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD;
    925         IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
    926 
    927 #if 0 /* XXX isn't it required? -- msaitoh  */
    928 	/* Set the various hardware offload abilities */
    929 	ifp->if_hwassist = 0;
    930 	if (ifp->if_capenable & IFCAP_TSO4)
    931 		ifp->if_hwassist |= CSUM_TSO;
    932 	if (ifp->if_capenable & IFCAP_TXCSUM) {
    933 		ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
    934 #if __FreeBSD_version >= 800000
    935 		ifp->if_hwassist |= CSUM_SCTP;
    936 #endif
    937 	}
    938 #endif
    939 
    940 	/* Set MTU size */
    941 	if (ifp->if_mtu > ETHERMTU) {
    942 		mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
    943 		mhadd &= ~IXGBE_MHADD_MFS_MASK;
    944 		mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
    945 		IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
    946 	}
    947 
    948 	/* Set up VLAN offload and filter */
    949 	ixv_setup_vlan_support(adapter);
    950 
    951 	callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
    952 
    953 	/* Set up MSI/X routing */
    954 	ixv_configure_ivars(adapter);
    955 
    956 	/* Set up auto-mask */
    957 	IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_EICS_RTX_QUEUE);
    958 
    959         /* Set moderation on the Link interrupt */
    960         IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->mbxvec), IXV_LINK_ITR);
    961 
    962 	/* Stats init */
    963 	ixv_init_stats(adapter);
    964 
    965 	/* Config/Enable Link */
    966 	ixv_config_link(adapter);
    967 
    968 	/* And now turn on interrupts */
    969 	ixv_enable_intr(adapter);
    970 
    971 	/* Now inform the stack we're ready */
    972 	ifp->if_flags |= IFF_RUNNING;
    973 	ifp->if_flags &= ~IFF_OACTIVE;
    974 
    975 	return;
    976 }
    977 
    978 static int
    979 ixv_init(struct ifnet *ifp)
    980 {
    981 	struct adapter *adapter = ifp->if_softc;
    982 
    983 	IXV_CORE_LOCK(adapter);
    984 	ixv_init_locked(adapter);
    985 	IXV_CORE_UNLOCK(adapter);
    986 	return 0;
    987 }
    988 
    989 
    990 /*
    991 **
    992 ** MSIX Interrupt Handlers and Tasklets
    993 **
    994 */
    995 
    996 static inline void
    997 ixv_enable_queue(struct adapter *adapter, u32 vector)
    998 {
    999 	struct ixgbe_hw *hw = &adapter->hw;
   1000 	u32	queue = 1 << vector;
   1001 	u32	mask;
   1002 
   1003 	mask = (IXGBE_EIMS_RTX_QUEUE & queue);
   1004 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
   1005 }
   1006 
   1007 static inline void
   1008 ixv_disable_queue(struct adapter *adapter, u32 vector)
   1009 {
   1010 	struct ixgbe_hw *hw = &adapter->hw;
   1011 	u64	queue = (u64)(1 << vector);
   1012 	u32	mask;
   1013 
   1014 	mask = (IXGBE_EIMS_RTX_QUEUE & queue);
   1015 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
   1016 }
   1017 
   1018 static inline void
   1019 ixv_rearm_queues(struct adapter *adapter, u64 queues)
   1020 {
   1021 	u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
   1022 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask);
   1023 }
   1024 
   1025 
   1026 static void
   1027 ixv_handle_que(void *context)
   1028 {
   1029 	struct ix_queue *que = context;
   1030 	struct adapter  *adapter = que->adapter;
   1031 	struct tx_ring  *txr = que->txr;
   1032 	struct ifnet    *ifp = adapter->ifp;
   1033 	bool		more;
   1034 
   1035 	if (ifp->if_flags & IFF_RUNNING) {
   1036 		more = ixv_rxeof(que, adapter->rx_process_limit);
   1037 		IXV_TX_LOCK(txr);
   1038 		ixv_txeof(txr);
   1039 #if __FreeBSD_version >= 800000
   1040 		if (!drbr_empty(ifp, txr->br))
   1041 			ixv_mq_start_locked(ifp, txr, NULL);
   1042 #else
   1043 		if (!IFQ_IS_EMPTY(&ifp->if_snd))
   1044 			ixv_start_locked(txr, ifp);
   1045 #endif
   1046 		IXV_TX_UNLOCK(txr);
   1047 		if (more) {
   1048 			adapter->req.ev_count++;
   1049 			softint_schedule(que->que_si);
   1050 			return;
   1051 		}
   1052 	}
   1053 
   1054 	/* Reenable this interrupt */
   1055 	ixv_enable_queue(adapter, que->msix);
   1056 	return;
   1057 }
   1058 
   1059 /*********************************************************************
   1060  *
   1061  *  MSI Queue Interrupt Service routine
   1062  *
   1063  **********************************************************************/
   1064 void
   1065 ixv_msix_que(void *arg)
   1066 {
   1067 	struct ix_queue	*que = arg;
   1068 	struct adapter  *adapter = que->adapter;
   1069 	struct tx_ring	*txr = que->txr;
   1070 	struct rx_ring	*rxr = que->rxr;
   1071 	bool		more_tx, more_rx;
   1072 	u32		newitr = 0;
   1073 
   1074 	ixv_disable_queue(adapter, que->msix);
   1075 	++que->irqs;
   1076 
   1077 	more_rx = ixv_rxeof(que, adapter->rx_process_limit);
   1078 
   1079 	IXV_TX_LOCK(txr);
   1080 	more_tx = ixv_txeof(txr);
   1081 	/*
   1082 	** Make certain that if the stack
   1083 	** has anything queued the task gets
   1084 	** scheduled to handle it.
   1085 	*/
   1086 #if __FreeBSD_version < 800000
   1087 	if (!IFQ_IS_EMPTY(&adapter->ifp->if_snd))
   1088 #else
   1089 	if (!drbr_empty(adapter->ifp, txr->br))
   1090 #endif
   1091                 more_tx = 1;
   1092 	IXV_TX_UNLOCK(txr);
   1093 
   1094 	more_rx = ixv_rxeof(que, adapter->rx_process_limit);
   1095 
   1096 	/* Do AIM now? */
   1097 
   1098 	if (ixv_enable_aim == FALSE)
   1099 		goto no_calc;
   1100 	/*
   1101 	** Do Adaptive Interrupt Moderation:
   1102         **  - Write out last calculated setting
   1103 	**  - Calculate based on average size over
   1104 	**    the last interval.
   1105 	*/
   1106         if (que->eitr_setting)
   1107                 IXGBE_WRITE_REG(&adapter->hw,
   1108                     IXGBE_VTEITR(que->msix),
   1109 		    que->eitr_setting);
   1110 
   1111         que->eitr_setting = 0;
   1112 
   1113         /* Idle, do nothing */
   1114         if ((txr->bytes == 0) && (rxr->bytes == 0))
   1115                 goto no_calc;
   1116 
   1117 	if ((txr->bytes) && (txr->packets))
   1118                	newitr = txr->bytes/txr->packets;
   1119 	if ((rxr->bytes) && (rxr->packets))
   1120 		newitr = max(newitr,
   1121 		    (rxr->bytes / rxr->packets));
   1122 	newitr += 24; /* account for hardware frame, crc */
   1123 
   1124 	/* set an upper boundary */
   1125 	newitr = min(newitr, 3000);
   1126 
   1127 	/* Be nice to the mid range */
   1128 	if ((newitr > 300) && (newitr < 1200))
   1129 		newitr = (newitr / 3);
   1130 	else
   1131 		newitr = (newitr / 2);
   1132 
   1133 	newitr |= newitr << 16;
   1134 
   1135         /* save for next interrupt */
   1136         que->eitr_setting = newitr;
   1137 
   1138         /* Reset state */
   1139         txr->bytes = 0;
   1140         txr->packets = 0;
   1141         rxr->bytes = 0;
   1142         rxr->packets = 0;
   1143 
   1144 no_calc:
   1145 	if (more_tx || more_rx)
   1146 		softint_schedule(que->que_si);
   1147 	else /* Reenable this interrupt */
   1148 		ixv_enable_queue(adapter, que->msix);
   1149 	return;
   1150 }
   1151 
   1152 static void
   1153 ixv_msix_mbx(void *arg)
   1154 {
   1155 	struct adapter	*adapter = arg;
   1156 	struct ixgbe_hw *hw = &adapter->hw;
   1157 	u32		reg;
   1158 
   1159 	++adapter->mbx_irq.ev_count;
   1160 
   1161 	/* First get the cause */
   1162 	reg = IXGBE_READ_REG(hw, IXGBE_VTEICS);
   1163 	/* Clear interrupt with write */
   1164 	IXGBE_WRITE_REG(hw, IXGBE_VTEICR, reg);
   1165 
   1166 	/* Link status change */
   1167 	if (reg & IXGBE_EICR_LSC)
   1168 		softint_schedule(adapter->mbx_si);
   1169 
   1170 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER);
   1171 	return;
   1172 }
   1173 
   1174 /*********************************************************************
   1175  *
   1176  *  Media Ioctl callback
   1177  *
   1178  *  This routine is called whenever the user queries the status of
   1179  *  the interface using ifconfig.
   1180  *
   1181  **********************************************************************/
   1182 static void
   1183 ixv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
   1184 {
   1185 	struct adapter *adapter = ifp->if_softc;
   1186 
   1187 	INIT_DEBUGOUT("ixv_media_status: begin");
   1188 	IXV_CORE_LOCK(adapter);
   1189 	ixv_update_link_status(adapter);
   1190 
   1191 	ifmr->ifm_status = IFM_AVALID;
   1192 	ifmr->ifm_active = IFM_ETHER;
   1193 
   1194 	if (!adapter->link_active) {
   1195 		IXV_CORE_UNLOCK(adapter);
   1196 		return;
   1197 	}
   1198 
   1199 	ifmr->ifm_status |= IFM_ACTIVE;
   1200 
   1201 	switch (adapter->link_speed) {
   1202 		case IXGBE_LINK_SPEED_1GB_FULL:
   1203 			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
   1204 			break;
   1205 		case IXGBE_LINK_SPEED_10GB_FULL:
   1206 			ifmr->ifm_active |= IFM_FDX;
   1207 			break;
   1208 	}
   1209 
   1210 	IXV_CORE_UNLOCK(adapter);
   1211 
   1212 	return;
   1213 }
   1214 
   1215 /*********************************************************************
   1216  *
   1217  *  Media Ioctl callback
   1218  *
   1219  *  This routine is called when the user changes speed/duplex using
   1220  *  media/mediopt option with ifconfig.
   1221  *
   1222  **********************************************************************/
   1223 static int
   1224 ixv_media_change(struct ifnet * ifp)
   1225 {
   1226 	struct adapter *adapter = ifp->if_softc;
   1227 	struct ifmedia *ifm = &adapter->media;
   1228 
   1229 	INIT_DEBUGOUT("ixv_media_change: begin");
   1230 
   1231 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
   1232 		return (EINVAL);
   1233 
   1234         switch (IFM_SUBTYPE(ifm->ifm_media)) {
   1235         case IFM_AUTO:
   1236                 break;
   1237         default:
   1238                 device_printf(adapter->dev, "Only auto media type\n");
   1239 		return (EINVAL);
   1240         }
   1241 
   1242 	return (0);
   1243 }
   1244 
   1245 /*********************************************************************
   1246  *
   1247  *  This routine maps the mbufs to tx descriptors, allowing the
   1248  *  TX engine to transmit the packets.
   1249  *  	- return 0 on success, positive on failure
   1250  *
   1251  **********************************************************************/
   1252 
   1253 static int
   1254 ixv_xmit(struct tx_ring *txr, struct mbuf *m_head)
   1255 {
   1256 	struct m_tag *mtag;
   1257 	struct adapter  *adapter = txr->adapter;
   1258 	struct ethercom *ec = &adapter->osdep.ec;
   1259 	u32		olinfo_status = 0, cmd_type_len;
   1260 	u32		paylen = 0;
   1261 	int             i, j, error, nsegs;
   1262 	int		first, last = 0;
   1263 	bus_dmamap_t	map;
   1264 	struct ixv_tx_buf *txbuf;
   1265 	union ixgbe_adv_tx_desc *txd = NULL;
   1266 
   1267 	/* Basic descriptor defines */
   1268         cmd_type_len = (IXGBE_ADVTXD_DTYP_DATA |
   1269 	    IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT);
   1270 
   1271 	if ((mtag = VLAN_OUTPUT_TAG(ec, m_head)) != NULL)
   1272         	cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
   1273 
   1274         /*
   1275          * Important to capture the first descriptor
   1276          * used because it will contain the index of
   1277          * the one we tell the hardware to report back
   1278          */
   1279         first = txr->next_avail_desc;
   1280 	txbuf = &txr->tx_buffers[first];
   1281 	map = txbuf->map;
   1282 
   1283 	/*
   1284 	 * Map the packet for DMA.
   1285 	 */
   1286 	error = bus_dmamap_load_mbuf(txr->txtag->dt_dmat, map,
   1287 	    m_head, BUS_DMA_NOWAIT);
   1288 
   1289 	switch (error) {
   1290 	case EAGAIN:
   1291 		adapter->eagain_tx_dma_setup.ev_count++;
   1292 		return EAGAIN;
   1293 	case ENOMEM:
   1294 		adapter->enomem_tx_dma_setup.ev_count++;
   1295 		return EAGAIN;
   1296 	case EFBIG:
   1297 		adapter->efbig_tx_dma_setup.ev_count++;
   1298 		return error;
   1299 	case EINVAL:
   1300 		adapter->einval_tx_dma_setup.ev_count++;
   1301 		return error;
   1302 	default:
   1303 		adapter->other_tx_dma_setup.ev_count++;
   1304 		return error;
   1305 	case 0:
   1306 		break;
   1307 	}
   1308 
   1309 	/* Make certain there are enough descriptors */
   1310 	if (nsegs > txr->tx_avail - 2) {
   1311 		txr->no_desc_avail.ev_count++;
   1312 		/* XXX s/ixgbe/ixv/ */
   1313 		ixgbe_dmamap_unload(txr->txtag, txbuf->map);
   1314 		return EAGAIN;
   1315 	}
   1316 
   1317 	/*
   1318 	** Set up the appropriate offload context
   1319 	** this becomes the first descriptor of
   1320 	** a packet.
   1321 	*/
   1322 	if (m_head->m_pkthdr.csum_flags & (M_CSUM_TSOv4|M_CSUM_TSOv6)) {
   1323 		if (ixv_tso_setup(txr, m_head, &paylen)) {
   1324 			cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
   1325 			olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
   1326 			olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
   1327 			olinfo_status |= paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
   1328 			++adapter->tso_tx.ev_count;
   1329 		} else {
   1330 			++adapter->tso_err.ev_count;
   1331 			/* XXX unload DMA map! --dyoung -> easy? --msaitoh */
   1332 			return (ENXIO);
   1333 		}
   1334 	} else
   1335 		olinfo_status |= ixv_tx_ctx_setup(txr, m_head);
   1336 
   1337         /* Record payload length */
   1338 	if (paylen == 0)
   1339         	olinfo_status |= m_head->m_pkthdr.len <<
   1340 		    IXGBE_ADVTXD_PAYLEN_SHIFT;
   1341 
   1342 	i = txr->next_avail_desc;
   1343 	for (j = 0; j < map->dm_nsegs; j++) {
   1344 		bus_size_t seglen;
   1345 		bus_addr_t segaddr;
   1346 
   1347 		txbuf = &txr->tx_buffers[i];
   1348 		txd = &txr->tx_base[i];
   1349 		seglen = map->dm_segs[j].ds_len;
   1350 		segaddr = htole64(map->dm_segs[j].ds_addr);
   1351 
   1352 		txd->read.buffer_addr = segaddr;
   1353 		txd->read.cmd_type_len = htole32(txr->txd_cmd |
   1354 		    cmd_type_len |seglen);
   1355 		txd->read.olinfo_status = htole32(olinfo_status);
   1356 		last = i; /* descriptor that will get completion IRQ */
   1357 
   1358 		if (++i == adapter->num_tx_desc)
   1359 			i = 0;
   1360 
   1361 		txbuf->m_head = NULL;
   1362 		txbuf->eop_index = -1;
   1363 	}
   1364 
   1365 	txd->read.cmd_type_len |=
   1366 	    htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS);
   1367 	txr->tx_avail -= map->dm_nsegs;
   1368 	txr->next_avail_desc = i;
   1369 
   1370 	txbuf->m_head = m_head;
   1371 	/* Swap the dma map between the first and last descriptor */
   1372 	txr->tx_buffers[first].map = txbuf->map;
   1373 	txbuf->map = map;
   1374 	bus_dmamap_sync(txr->txtag->dt_dmat, map, 0, m_head->m_pkthdr.len,
   1375 	    BUS_DMASYNC_PREWRITE);
   1376 
   1377         /* Set the index of the descriptor that will be marked done */
   1378         txbuf = &txr->tx_buffers[first];
   1379 	txbuf->eop_index = last;
   1380 
   1381 	/* XXX s/ixgbe/ixg/ */
   1382         ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
   1383             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1384 	/*
   1385 	 * Advance the Transmit Descriptor Tail (Tdt), this tells the
   1386 	 * hardware that this frame is available to transmit.
   1387 	 */
   1388 	++txr->total_packets.ev_count;
   1389 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(txr->me), i);
   1390 
   1391 	return 0;
   1392 }
   1393 
   1394 
   1395 /*********************************************************************
   1396  *  Multicast Update
   1397  *
   1398  *  This routine is called whenever multicast address list is updated.
   1399  *
   1400  **********************************************************************/
   1401 #define IXGBE_RAR_ENTRIES 16
   1402 
   1403 static void
   1404 ixv_set_multi(struct adapter *adapter)
   1405 {
   1406 	struct ether_multi *enm;
   1407 	struct ether_multistep step;
   1408 	u8	mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
   1409 	u8	*update_ptr;
   1410 	int	mcnt = 0;
   1411 	struct ethercom *ec = &adapter->osdep.ec;
   1412 
   1413 	IOCTL_DEBUGOUT("ixv_set_multi: begin");
   1414 
   1415 	ETHER_FIRST_MULTI(step, ec, enm);
   1416 	while (enm != NULL) {
   1417 		bcopy(enm->enm_addrlo,
   1418 		    &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
   1419 		    IXGBE_ETH_LENGTH_OF_ADDRESS);
   1420 		mcnt++;
   1421 		/* XXX This might be required --msaitoh */
   1422 		if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES)
   1423 			break;
   1424 		ETHER_NEXT_MULTI(step, enm);
   1425 	}
   1426 
   1427 	update_ptr = mta;
   1428 
   1429 	ixgbe_update_mc_addr_list(&adapter->hw,
   1430 	    update_ptr, mcnt, ixv_mc_array_itr, TRUE);
   1431 
   1432 	return;
   1433 }
   1434 
   1435 /*
   1436  * This is an iterator function now needed by the multicast
   1437  * shared code. It simply feeds the shared code routine the
   1438  * addresses in the array of ixv_set_multi() one by one.
   1439  */
   1440 static u8 *
   1441 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
   1442 {
   1443 	u8 *addr = *update_ptr;
   1444 	u8 *newptr;
   1445 	*vmdq = 0;
   1446 
   1447 	newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
   1448 	*update_ptr = newptr;
   1449 	return addr;
   1450 }
   1451 
   1452 /*********************************************************************
   1453  *  Timer routine
   1454  *
   1455  *  This routine checks for link status,updates statistics,
   1456  *  and runs the watchdog check.
   1457  *
   1458  **********************************************************************/
   1459 
   1460 static void
   1461 ixv_local_timer1(void *arg)
   1462 {
   1463 	struct adapter	*adapter = arg;
   1464 	device_t	dev = adapter->dev;
   1465 	struct tx_ring	*txr = adapter->tx_rings;
   1466 	int		i;
   1467 	struct timeval now, elapsed;
   1468 
   1469 	KASSERT(mutex_owned(&adapter->core_mtx));
   1470 
   1471 	ixv_update_link_status(adapter);
   1472 
   1473 	/* Stats Update */
   1474 	ixv_update_stats(adapter);
   1475 
   1476 	/*
   1477 	 * If the interface has been paused
   1478 	 * then don't do the watchdog check
   1479 	 */
   1480 	if (IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF)
   1481 		goto out;
   1482 	/*
   1483 	** Check for time since any descriptor was cleaned
   1484 	*/
   1485         for (i = 0; i < adapter->num_queues; i++, txr++) {
   1486 		IXV_TX_LOCK(txr);
   1487 		if (txr->watchdog_check == FALSE) {
   1488 			IXV_TX_UNLOCK(txr);
   1489 			continue;
   1490 		}
   1491 		getmicrotime(&now);
   1492 		timersub(&now, &txr->watchdog_time, &elapsed);
   1493 		if (tvtohz(&elapsed) > IXV_WATCHDOG)
   1494 			goto hung;
   1495 		IXV_TX_UNLOCK(txr);
   1496 	}
   1497 out:
   1498        	ixv_rearm_queues(adapter, adapter->que_mask);
   1499 	callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
   1500 	return;
   1501 
   1502 hung:
   1503 	device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
   1504 	device_printf(dev,"Queue(%d) tdh = %d, hw tdt = %d\n", txr->me,
   1505 	    IXGBE_READ_REG(&adapter->hw, IXGBE_VFTDH(i)),
   1506 	    IXGBE_READ_REG(&adapter->hw, IXGBE_VFTDT(i)));
   1507 	device_printf(dev,"TX(%d) desc avail = %d,"
   1508 	    "Next TX to Clean = %d\n",
   1509 	    txr->me, txr->tx_avail, txr->next_to_clean);
   1510 	adapter->ifp->if_flags &= ~IFF_RUNNING;
   1511 	adapter->watchdog_events.ev_count++;
   1512 	IXV_TX_UNLOCK(txr);
   1513 	ixv_init_locked(adapter);
   1514 }
   1515 
   1516 static void
   1517 ixv_local_timer(void *arg)
   1518 {
   1519 	struct adapter *adapter = arg;
   1520 
   1521 	IXV_CORE_LOCK(adapter);
   1522 	ixv_local_timer1(adapter);
   1523 	IXV_CORE_UNLOCK(adapter);
   1524 }
   1525 
   1526 /*
   1527 ** Note: this routine updates the OS on the link state
   1528 **	the real check of the hardware only happens with
   1529 **	a link interrupt.
   1530 */
   1531 static void
   1532 ixv_update_link_status(struct adapter *adapter)
   1533 {
   1534 	struct ifnet	*ifp = adapter->ifp;
   1535 	struct tx_ring *txr = adapter->tx_rings;
   1536 	device_t dev = adapter->dev;
   1537 
   1538 
   1539 	if (adapter->link_up){
   1540 		if (adapter->link_active == FALSE) {
   1541 			if (bootverbose)
   1542 				device_printf(dev,"Link is up %d Gbps %s \n",
   1543 				    ((adapter->link_speed == 128)? 10:1),
   1544 				    "Full Duplex");
   1545 			adapter->link_active = TRUE;
   1546 			if_link_state_change(ifp, LINK_STATE_UP);
   1547 		}
   1548 	} else { /* Link down */
   1549 		if (adapter->link_active == TRUE) {
   1550 			if (bootverbose)
   1551 				device_printf(dev,"Link is Down\n");
   1552 			if_link_state_change(ifp, LINK_STATE_DOWN);
   1553 			adapter->link_active = FALSE;
   1554 			for (int i = 0; i < adapter->num_queues;
   1555 			    i++, txr++)
   1556 				txr->watchdog_check = FALSE;
   1557 		}
   1558 	}
   1559 
   1560 	return;
   1561 }
   1562 
   1563 
   1564 static void
   1565 ixv_ifstop(struct ifnet *ifp, int disable)
   1566 {
   1567 	struct adapter *adapter = ifp->if_softc;
   1568 
   1569 	IXV_CORE_LOCK(adapter);
   1570 	ixv_stop(adapter);
   1571 	IXV_CORE_UNLOCK(adapter);
   1572 }
   1573 
   1574 /*********************************************************************
   1575  *
   1576  *  This routine disables all traffic on the adapter by issuing a
   1577  *  global reset on the MAC and deallocates TX/RX buffers.
   1578  *
   1579  **********************************************************************/
   1580 
   1581 static void
   1582 ixv_stop(void *arg)
   1583 {
   1584 	struct ifnet   *ifp;
   1585 	struct adapter *adapter = arg;
   1586 	struct ixgbe_hw *hw = &adapter->hw;
   1587 	ifp = adapter->ifp;
   1588 
   1589 	KASSERT(mutex_owned(&adapter->core_mtx));
   1590 
   1591 	INIT_DEBUGOUT("ixv_stop: begin\n");
   1592 	ixv_disable_intr(adapter);
   1593 
   1594 	/* Tell the stack that the interface is no longer active */
   1595 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   1596 
   1597 	ixgbe_reset_hw(hw);
   1598 	adapter->hw.adapter_stopped = FALSE;
   1599 	ixgbe_stop_adapter(hw);
   1600 	callout_stop(&adapter->timer);
   1601 
   1602 	/* reprogram the RAR[0] in case user changed it. */
   1603 	ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
   1604 
   1605 	return;
   1606 }
   1607 
   1608 
   1609 /*********************************************************************
   1610  *
   1611  *  Determine hardware revision.
   1612  *
   1613  **********************************************************************/
   1614 static void
   1615 ixv_identify_hardware(struct adapter *adapter)
   1616 {
   1617 	u16		pci_cmd_word;
   1618 	pcitag_t tag;
   1619 	pci_chipset_tag_t pc;
   1620 	pcireg_t subid, id;
   1621 	struct ixgbe_hw *hw = &adapter->hw;
   1622 
   1623 	pc = adapter->osdep.pc;
   1624 	tag = adapter->osdep.tag;
   1625 
   1626 	/*
   1627 	** Make sure BUSMASTER is set, on a VM under
   1628 	** KVM it may not be and will break things.
   1629 	*/
   1630 	pci_cmd_word = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
   1631 	if (!((pci_cmd_word & PCI_COMMAND_MASTER_ENABLE) &&
   1632 	    (pci_cmd_word & PCI_COMMAND_MEM_ENABLE))) {
   1633 		INIT_DEBUGOUT("Memory Access and/or Bus Master "
   1634 		    "bits were not set!\n");
   1635 		pci_cmd_word |=
   1636 		    (PCI_COMMAND_MASTER_ENABLE | PCI_COMMAND_MEM_ENABLE);
   1637 		pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, pci_cmd_word);
   1638 	}
   1639 
   1640 	id = pci_conf_read(pc, tag, PCI_ID_REG);
   1641 	subid = pci_conf_read(pc, tag, PCI_SUBSYS_ID_REG);
   1642 
   1643 	/* Save off the information about this board */
   1644 	hw->vendor_id = PCI_VENDOR(id);
   1645 	hw->device_id = PCI_PRODUCT(id);
   1646 	hw->revision_id = PCI_REVISION(pci_conf_read(pc, tag, PCI_CLASS_REG));
   1647 	hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
   1648 	hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
   1649 
   1650 	return;
   1651 }
   1652 
   1653 /*********************************************************************
   1654  *
   1655  *  Setup MSIX Interrupt resources and handlers
   1656  *
   1657  **********************************************************************/
   1658 static int
   1659 ixv_allocate_msix(struct adapter *adapter)
   1660 {
   1661 #if !defined(NETBSD_MSI_OR_MSIX)
   1662 	return 0;
   1663 #else
   1664 	device_t        dev = adapter->dev;
   1665 	struct 		ix_queue *que = adapter->queues;
   1666 	int 		error, rid, vector = 0;
   1667 	pcitag_t tag;
   1668 	pci_chipset_tag_t pc;
   1669 
   1670 	pc = adapter->osdep.pc;
   1671 	tag = adapter->osdep.tag;
   1672 
   1673 	for (int i = 0; i < adapter->num_queues; i++, vector++, que++) {
   1674 		rid = vector + 1;
   1675 		que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
   1676 		    RF_SHAREABLE | RF_ACTIVE);
   1677 		if (que->res == NULL) {
   1678 			aprint_error_dev(dev,"Unable to allocate"
   1679 		    	    " bus resource: que interrupt [%d]\n", vector);
   1680 			return (ENXIO);
   1681 		}
   1682 		/* Set the handler function */
   1683 		error = bus_setup_intr(dev, que->res,
   1684 		    INTR_TYPE_NET | INTR_MPSAFE, NULL,
   1685 		    ixv_msix_que, que, &que->tag);
   1686 		if (error) {
   1687 			que->res = NULL;
   1688 			aprint_error_dev(dev,
   1689 			    "Failed to register QUE handler");
   1690 			return (error);
   1691 		}
   1692 #if __FreeBSD_version >= 800504
   1693 		bus_describe_intr(dev, que->res, que->tag, "que %d", i);
   1694 #endif
   1695 		que->msix = vector;
   1696         	adapter->que_mask |= (u64)(1 << que->msix);
   1697 		/*
   1698 		** Bind the msix vector, and thus the
   1699 		** ring to the corresponding cpu.
   1700 		*/
   1701 		if (adapter->num_queues > 1)
   1702 			bus_bind_intr(dev, que->res, i);
   1703 
   1704 		que->que_si = softint_establish(SOFTINT_NET, ixv_handle_que,
   1705 		    que);
   1706 	}
   1707 
   1708 	/* and Mailbox */
   1709 	rid = vector + 1;
   1710 	adapter->res = bus_alloc_resource_any(dev,
   1711     	    SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
   1712 	if (!adapter->res) {
   1713 		aprint_error_dev(dev,"Unable to allocate"
   1714     	    " bus resource: MBX interrupt [%d]\n", rid);
   1715 		return (ENXIO);
   1716 	}
   1717 	/* Set the mbx handler function */
   1718 	error = bus_setup_intr(dev, adapter->res,
   1719 	    INTR_TYPE_NET | INTR_MPSAFE, NULL,
   1720 	    ixv_msix_mbx, adapter, &adapter->tag);
   1721 	if (error) {
   1722 		adapter->res = NULL;
   1723 		aprint_error_dev(dev, "Failed to register LINK handler");
   1724 		return (error);
   1725 	}
   1726 #if __FreeBSD_version >= 800504
   1727 	bus_describe_intr(dev, adapter->res, adapter->tag, "mbx");
   1728 #endif
   1729 	adapter->mbxvec = vector;
   1730 	/* Tasklets for Mailbox */
   1731 	adapter->mbx_si = softint_establish(SOFTINT_NET, ixv_handle_mbx,
   1732 	    adapter);
   1733 	/*
   1734 	** Due to a broken design QEMU will fail to properly
   1735 	** enable the guest for MSIX unless the vectors in
   1736 	** the table are all set up, so we must rewrite the
   1737 	** ENABLE in the MSIX control register again at this
   1738 	** point to cause it to successfully initialize us.
   1739 	*/
   1740 	if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
   1741 		int msix_ctrl;
   1742 		pci_get_capability(pc, tag, PCI_CAP_MSIX, &rid);
   1743 		rid += PCI_MSIX_CTL;
   1744 		msix_ctrl = pci_read_config(pc, tag, rid);
   1745 		msix_ctrl |= PCI_MSIX_CTL_ENABLE;
   1746 		pci_conf_write(pc, tag, msix_ctrl);
   1747 	}
   1748 
   1749 	return (0);
   1750 #endif
   1751 }
   1752 
   1753 /*
   1754  * Setup MSIX resources, note that the VF
   1755  * device MUST use MSIX, there is no fallback.
   1756  */
   1757 static int
   1758 ixv_setup_msix(struct adapter *adapter)
   1759 {
   1760 #if !defined(NETBSD_MSI_OR_MSIX)
   1761 	return 0;
   1762 #else
   1763 	device_t dev = adapter->dev;
   1764 	int rid, vectors, want = 2;
   1765 
   1766 
   1767 	/* First try MSI/X */
   1768 	rid = PCIR_BAR(3);
   1769 	adapter->msix_mem = bus_alloc_resource_any(dev,
   1770 	    SYS_RES_MEMORY, &rid, RF_ACTIVE);
   1771        	if (!adapter->msix_mem) {
   1772 		device_printf(adapter->dev,
   1773 		    "Unable to map MSIX table \n");
   1774 		goto out;
   1775 	}
   1776 
   1777 	vectors = pci_msix_count(dev);
   1778 	if (vectors < 2) {
   1779 		bus_release_resource(dev, SYS_RES_MEMORY,
   1780 		    rid, adapter->msix_mem);
   1781 		adapter->msix_mem = NULL;
   1782 		goto out;
   1783 	}
   1784 
   1785 	/*
   1786 	** Want two vectors: one for a queue,
   1787 	** plus an additional for mailbox.
   1788 	*/
   1789 	if (pci_alloc_msix(dev, &want) == 0) {
   1790                	device_printf(adapter->dev,
   1791 		    "Using MSIX interrupts with %d vectors\n", want);
   1792 		return (want);
   1793 	}
   1794 out:
   1795 	device_printf(adapter->dev,"MSIX config error\n");
   1796 	return (ENXIO);
   1797 #endif
   1798 }
   1799 
   1800 
   1801 static int
   1802 ixv_allocate_pci_resources(struct adapter *adapter,
   1803     const struct pci_attach_args *pa)
   1804 {
   1805 	pcireg_t	memtype;
   1806 	device_t        dev = adapter->dev;
   1807 	bus_addr_t addr;
   1808 	int flags;
   1809 
   1810 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
   1811 
   1812 	switch (memtype) {
   1813 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1814 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1815 		adapter->osdep.mem_bus_space_tag = pa->pa_memt;
   1816 		if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
   1817 	              memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
   1818 			goto map_err;
   1819 		if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
   1820 			aprint_normal_dev(dev, "clearing prefetchable bit\n");
   1821 			flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
   1822 		}
   1823 		if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
   1824 		     adapter->osdep.mem_size, flags,
   1825 		     &adapter->osdep.mem_bus_space_handle) != 0) {
   1826 map_err:
   1827 			adapter->osdep.mem_size = 0;
   1828 			aprint_error_dev(dev, "unable to map BAR0\n");
   1829 			return ENXIO;
   1830 		}
   1831 		break;
   1832 	default:
   1833 		aprint_error_dev(dev, "unexpected type on BAR0\n");
   1834 		return ENXIO;
   1835 	}
   1836 
   1837 	adapter->num_queues = 1;
   1838 	adapter->hw.back = &adapter->osdep;
   1839 
   1840 	/*
   1841 	** Now setup MSI/X, should
   1842 	** return us the number of
   1843 	** configured vectors.
   1844 	*/
   1845 	adapter->msix = ixv_setup_msix(adapter);
   1846 	if (adapter->msix == ENXIO)
   1847 		return (ENXIO);
   1848 	else
   1849 		return (0);
   1850 }
   1851 
   1852 static void
   1853 ixv_free_pci_resources(struct adapter * adapter)
   1854 {
   1855 #if defined(NETBSD_MSI_OR_MSIX)
   1856 	struct 		ix_queue *que = adapter->queues;
   1857 	device_t	dev = adapter->dev;
   1858 	int		rid, memrid;
   1859 
   1860 	memrid = PCI_BAR(MSIX_BAR);
   1861 
   1862 	/*
   1863 	** There is a slight possibility of a failure mode
   1864 	** in attach that will result in entering this function
   1865 	** before interrupt resources have been initialized, and
   1866 	** in that case we do not want to execute the loops below
   1867 	** We can detect this reliably by the state of the adapter
   1868 	** res pointer.
   1869 	*/
   1870 	if (adapter->res == NULL)
   1871 		goto mem;
   1872 
   1873 	/*
   1874 	**  Release all msix queue resources:
   1875 	*/
   1876 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   1877 		rid = que->msix + 1;
   1878 		if (que->tag != NULL) {
   1879 			bus_teardown_intr(dev, que->res, que->tag);
   1880 			que->tag = NULL;
   1881 		}
   1882 		if (que->res != NULL)
   1883 			bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
   1884 	}
   1885 
   1886 
   1887 	/* Clean the Legacy or Link interrupt last */
   1888 	if (adapter->mbxvec) /* we are doing MSIX */
   1889 		rid = adapter->mbxvec + 1;
   1890 	else
   1891 		(adapter->msix != 0) ? (rid = 1):(rid = 0);
   1892 
   1893 	if (adapter->tag != NULL) {
   1894 		bus_teardown_intr(dev, adapter->res, adapter->tag);
   1895 		adapter->tag = NULL;
   1896 	}
   1897 	if (adapter->res != NULL)
   1898 		bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res);
   1899 
   1900 mem:
   1901 	if (adapter->msix)
   1902 		pci_release_msi(dev);
   1903 
   1904 	if (adapter->msix_mem != NULL)
   1905 		bus_release_resource(dev, SYS_RES_MEMORY,
   1906 		    memrid, adapter->msix_mem);
   1907 
   1908 	if (adapter->pci_mem != NULL)
   1909 		bus_release_resource(dev, SYS_RES_MEMORY,
   1910 		    PCIR_BAR(0), adapter->pci_mem);
   1911 
   1912 #endif
   1913 	return;
   1914 }
   1915 
   1916 /*********************************************************************
   1917  *
   1918  *  Setup networking device structure and register an interface.
   1919  *
   1920  **********************************************************************/
   1921 static void
   1922 ixv_setup_interface(device_t dev, struct adapter *adapter)
   1923 {
   1924 	struct ethercom *ec = &adapter->osdep.ec;
   1925 	struct ifnet   *ifp;
   1926 
   1927 	INIT_DEBUGOUT("ixv_setup_interface: begin");
   1928 
   1929 	ifp = adapter->ifp = &ec->ec_if;
   1930 	strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
   1931 	ifp->if_baudrate = 1000000000;
   1932 	ifp->if_init = ixv_init;
   1933 	ifp->if_stop = ixv_ifstop;
   1934 	ifp->if_softc = adapter;
   1935 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   1936 	ifp->if_ioctl = ixv_ioctl;
   1937 #if __FreeBSD_version >= 800000
   1938 	ifp->if_transmit = ixv_mq_start;
   1939 	ifp->if_qflush = ixv_qflush;
   1940 #else
   1941 	ifp->if_start = ixv_start;
   1942 #endif
   1943 	ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 2;
   1944 
   1945 	if_attach(ifp);
   1946 	ether_ifattach(ifp, adapter->hw.mac.addr);
   1947 	ether_set_ifflags_cb(ec, ixv_ifflags_cb);
   1948 
   1949 	adapter->max_frame_size =
   1950 	    ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
   1951 
   1952 	/*
   1953 	 * Tell the upper layer(s) we support long frames.
   1954 	 */
   1955 	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
   1956 
   1957 	ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSOv4;
   1958 	ifp->if_capenable = 0;
   1959 
   1960 	ec->ec_capabilities |= ETHERCAP_VLAN_HWCSUM;
   1961 	ec->ec_capabilities |= ETHERCAP_JUMBO_MTU;
   1962 	ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
   1963 	    		| ETHERCAP_VLAN_MTU;
   1964 	ec->ec_capenable = ec->ec_capabilities;
   1965 
   1966 	/* Don't enable LRO by default */
   1967 	ifp->if_capabilities |= IFCAP_LRO;
   1968 
   1969 	/*
   1970 	** Dont turn this on by default, if vlans are
   1971 	** created on another pseudo device (eg. lagg)
   1972 	** then vlan events are not passed thru, breaking
   1973 	** operation, but with HW FILTER off it works. If
   1974 	** using vlans directly on the em driver you can
   1975 	** enable this and get full hardware tag filtering.
   1976 	*/
   1977 	ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
   1978 
   1979 	/*
   1980 	 * Specify the media types supported by this adapter and register
   1981 	 * callbacks to update media and link information
   1982 	 */
   1983 	ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change,
   1984 		     ixv_media_status);
   1985 	ifmedia_add(&adapter->media, IFM_ETHER | IFM_FDX, 0, NULL);
   1986 	ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
   1987 	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
   1988 
   1989 	return;
   1990 }
   1991 
   1992 static void
   1993 ixv_config_link(struct adapter *adapter)
   1994 {
   1995 	struct ixgbe_hw *hw = &adapter->hw;
   1996 	u32	autoneg, err = 0;
   1997 	bool	negotiate = TRUE;
   1998 
   1999 	if (hw->mac.ops.check_link)
   2000 		err = hw->mac.ops.check_link(hw, &autoneg,
   2001 		    &adapter->link_up, FALSE);
   2002 	if (err)
   2003 		goto out;
   2004 
   2005 	if (hw->mac.ops.setup_link)
   2006                	err = hw->mac.ops.setup_link(hw, autoneg,
   2007 		    negotiate, adapter->link_up);
   2008 out:
   2009 	return;
   2010 }
   2011 
   2012 /********************************************************************
   2013  * Manage DMA'able memory.
   2014  *******************************************************************/
   2015 
   2016 static int
   2017 ixv_dma_malloc(struct adapter *adapter, bus_size_t size,
   2018 		struct ixv_dma_alloc *dma, int mapflags)
   2019 {
   2020 	device_t dev = adapter->dev;
   2021 	int             r, rsegs;
   2022 
   2023 	r = ixgbe_dma_tag_create(adapter->osdep.dmat,	/* parent */
   2024 			       DBA_ALIGN, 0,	/* alignment, bounds */
   2025 			       size,	/* maxsize */
   2026 			       1,	/* nsegments */
   2027 			       size,	/* maxsegsize */
   2028 			       BUS_DMA_ALLOCNOW,	/* flags */
   2029 			       &dma->dma_tag);
   2030 	if (r != 0) {
   2031 		aprint_error_dev(dev,
   2032 		    "ixv_dma_malloc: bus_dma_tag_create failed; error %u\n", r);
   2033 		goto fail_0;
   2034 	}
   2035 	r = bus_dmamem_alloc(dma->dma_tag->dt_dmat,
   2036 		size,
   2037 		dma->dma_tag->dt_alignment,
   2038 		dma->dma_tag->dt_boundary,
   2039 		&dma->dma_seg, 1, &rsegs, BUS_DMA_NOWAIT);
   2040 	if (r != 0) {
   2041 		aprint_error_dev(dev,
   2042 		    "%s: bus_dmamem_alloc failed; error %u\n", __func__, r);
   2043 		goto fail_1;
   2044 	}
   2045 
   2046 	r = bus_dmamem_map(dma->dma_tag->dt_dmat, &dma->dma_seg, rsegs,
   2047 	    size, &dma->dma_vaddr, BUS_DMA_NOWAIT);
   2048 	if (r != 0) {
   2049 		aprint_error_dev(dev, "%s: bus_dmamem_map failed; error %d\n",
   2050 		    __func__, r);
   2051 		goto fail_2;
   2052 	}
   2053 
   2054 	r = ixgbe_dmamap_create(dma->dma_tag, 0, &dma->dma_map);
   2055 	if (r != 0) {
   2056 		aprint_error_dev(dev, "%s: bus_dmamem_map failed; error %d\n",
   2057 		    __func__, r);
   2058 		goto fail_3;
   2059 	}
   2060 
   2061 	r = bus_dmamap_load(dma->dma_tag->dt_dmat, dma->dma_map, dma->dma_vaddr,
   2062 			    size,
   2063 			    NULL,
   2064 			    mapflags | BUS_DMA_NOWAIT);
   2065 	if (r != 0) {
   2066 		aprint_error_dev(dev,"%s: bus_dmamap_load failed; error %u\n",
   2067 		    __func__, r);
   2068 		goto fail_4;
   2069 	}
   2070 	dma->dma_paddr = dma->dma_map->dm_segs[0].ds_addr;
   2071 	dma->dma_size = size;
   2072 	return 0;
   2073 fail_4:
   2074 	ixgbe_dmamap_destroy(dma->dma_tag, dma->dma_map);
   2075 fail_3:
   2076 	bus_dmamem_unmap(dma->dma_tag->dt_dmat, dma->dma_vaddr, size);
   2077 fail_2:
   2078 	bus_dmamem_free(dma->dma_tag->dt_dmat, &dma->dma_seg, rsegs);
   2079 fail_1:
   2080 	ixgbe_dma_tag_destroy(dma->dma_tag);
   2081 fail_0:
   2082 	dma->dma_map = NULL;
   2083 	dma->dma_tag = NULL;
   2084 	return (r);
   2085 }
   2086 
   2087 static void
   2088 ixv_dma_free(struct adapter *adapter, struct ixv_dma_alloc *dma)
   2089 {
   2090 	bus_dmamap_sync(dma->dma_tag->dt_dmat, dma->dma_map, 0, dma->dma_size,
   2091 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   2092 	ixgbe_dmamap_unload(dma->dma_tag, dma->dma_map);
   2093 	bus_dmamem_free(dma->dma_tag->dt_dmat, &dma->dma_seg, 1);
   2094 	ixgbe_dma_tag_destroy(dma->dma_tag);
   2095 }
   2096 
   2097 
   2098 /*********************************************************************
   2099  *
   2100  *  Allocate memory for the transmit and receive rings, and then
   2101  *  the descriptors associated with each, called only once at attach.
   2102  *
   2103  **********************************************************************/
   2104 static int
   2105 ixv_allocate_queues(struct adapter *adapter)
   2106 {
   2107 	device_t	dev = adapter->dev;
   2108 	struct ix_queue	*que;
   2109 	struct tx_ring	*txr;
   2110 	struct rx_ring	*rxr;
   2111 	int rsize, tsize, error = 0;
   2112 	int txconf = 0, rxconf = 0;
   2113 
   2114         /* First allocate the top level queue structs */
   2115         if (!(adapter->queues =
   2116             (struct ix_queue *) malloc(sizeof(struct ix_queue) *
   2117             adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
   2118                 aprint_error_dev(dev, "Unable to allocate queue memory\n");
   2119                 error = ENOMEM;
   2120                 goto fail;
   2121         }
   2122 
   2123 	/* First allocate the TX ring struct memory */
   2124 	if (!(adapter->tx_rings =
   2125 	    (struct tx_ring *) malloc(sizeof(struct tx_ring) *
   2126 	    adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
   2127 		aprint_error_dev(dev, "Unable to allocate TX ring memory\n");
   2128 		error = ENOMEM;
   2129 		goto tx_fail;
   2130 	}
   2131 
   2132 	/* Next allocate the RX */
   2133 	if (!(adapter->rx_rings =
   2134 	    (struct rx_ring *) malloc(sizeof(struct rx_ring) *
   2135 	    adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
   2136 		aprint_error_dev(dev, "Unable to allocate RX ring memory\n");
   2137 		error = ENOMEM;
   2138 		goto rx_fail;
   2139 	}
   2140 
   2141 	/* For the ring itself */
   2142 	tsize = roundup2(adapter->num_tx_desc *
   2143 	    sizeof(union ixgbe_adv_tx_desc), DBA_ALIGN);
   2144 
   2145 	/*
   2146 	 * Now set up the TX queues, txconf is needed to handle the
   2147 	 * possibility that things fail midcourse and we need to
   2148 	 * undo memory gracefully
   2149 	 */
   2150 	for (int i = 0; i < adapter->num_queues; i++, txconf++) {
   2151 		/* Set up some basics */
   2152 		txr = &adapter->tx_rings[i];
   2153 		txr->adapter = adapter;
   2154 		txr->me = i;
   2155 
   2156 		/* Initialize the TX side lock */
   2157 		snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
   2158 		    device_xname(dev), txr->me);
   2159 		mutex_init(&txr->tx_mtx, MUTEX_DEFAULT, IPL_NET);
   2160 
   2161 		if (ixv_dma_malloc(adapter, tsize,
   2162 			&txr->txdma, BUS_DMA_NOWAIT)) {
   2163 			aprint_error_dev(dev,
   2164 			    "Unable to allocate TX Descriptor memory\n");
   2165 			error = ENOMEM;
   2166 			goto err_tx_desc;
   2167 		}
   2168 		txr->tx_base = (union ixgbe_adv_tx_desc *)txr->txdma.dma_vaddr;
   2169 		bzero((void *)txr->tx_base, tsize);
   2170 
   2171         	/* Now allocate transmit buffers for the ring */
   2172         	if (ixv_allocate_transmit_buffers(txr)) {
   2173 			aprint_error_dev(dev,
   2174 			    "Critical Failure setting up transmit buffers\n");
   2175 			error = ENOMEM;
   2176 			goto err_tx_desc;
   2177         	}
   2178 #if __FreeBSD_version >= 800000
   2179 		/* Allocate a buf ring */
   2180 		txr->br = buf_ring_alloc(IXV_BR_SIZE, M_DEVBUF,
   2181 		    M_WAITOK, &txr->tx_mtx);
   2182 		if (txr->br == NULL) {
   2183 			aprint_error_dev(dev,
   2184 			    "Critical Failure setting up buf ring\n");
   2185 			error = ENOMEM;
   2186 			goto err_tx_desc;
   2187 		}
   2188 #endif
   2189 	}
   2190 
   2191 	/*
   2192 	 * Next the RX queues...
   2193 	 */
   2194 	rsize = roundup2(adapter->num_rx_desc *
   2195 	    sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
   2196 	for (int i = 0; i < adapter->num_queues; i++, rxconf++) {
   2197 		rxr = &adapter->rx_rings[i];
   2198 		/* Set up some basics */
   2199 		rxr->adapter = adapter;
   2200 		rxr->me = i;
   2201 
   2202 		/* Initialize the RX side lock */
   2203 		snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
   2204 		    device_xname(dev), rxr->me);
   2205 		mutex_init(&rxr->rx_mtx, MUTEX_DEFAULT, IPL_NET);
   2206 
   2207 		if (ixv_dma_malloc(adapter, rsize,
   2208 			&rxr->rxdma, BUS_DMA_NOWAIT)) {
   2209 			aprint_error_dev(dev,
   2210 			    "Unable to allocate RxDescriptor memory\n");
   2211 			error = ENOMEM;
   2212 			goto err_rx_desc;
   2213 		}
   2214 		rxr->rx_base = (union ixgbe_adv_rx_desc *)rxr->rxdma.dma_vaddr;
   2215 		bzero((void *)rxr->rx_base, rsize);
   2216 
   2217         	/* Allocate receive buffers for the ring*/
   2218 		if (ixv_allocate_receive_buffers(rxr)) {
   2219 			aprint_error_dev(dev,
   2220 			    "Critical Failure setting up receive buffers\n");
   2221 			error = ENOMEM;
   2222 			goto err_rx_desc;
   2223 		}
   2224 	}
   2225 
   2226 	/*
   2227 	** Finally set up the queue holding structs
   2228 	*/
   2229 	for (int i = 0; i < adapter->num_queues; i++) {
   2230 		que = &adapter->queues[i];
   2231 		que->adapter = adapter;
   2232 		que->txr = &adapter->tx_rings[i];
   2233 		que->rxr = &adapter->rx_rings[i];
   2234 	}
   2235 
   2236 	return (0);
   2237 
   2238 err_rx_desc:
   2239 	for (rxr = adapter->rx_rings; rxconf > 0; rxr++, rxconf--)
   2240 		ixv_dma_free(adapter, &rxr->rxdma);
   2241 err_tx_desc:
   2242 	for (txr = adapter->tx_rings; txconf > 0; txr++, txconf--)
   2243 		ixv_dma_free(adapter, &txr->txdma);
   2244 	free(adapter->rx_rings, M_DEVBUF);
   2245 rx_fail:
   2246 	free(adapter->tx_rings, M_DEVBUF);
   2247 tx_fail:
   2248 	free(adapter->queues, M_DEVBUF);
   2249 fail:
   2250 	return (error);
   2251 }
   2252 
   2253 
   2254 /*********************************************************************
   2255  *
   2256  *  Allocate memory for tx_buffer structures. The tx_buffer stores all
   2257  *  the information needed to transmit a packet on the wire. This is
   2258  *  called only once at attach, setup is done every reset.
   2259  *
   2260  **********************************************************************/
   2261 static int
   2262 ixv_allocate_transmit_buffers(struct tx_ring *txr)
   2263 {
   2264 	struct adapter *adapter = txr->adapter;
   2265 	device_t dev = adapter->dev;
   2266 	struct ixv_tx_buf *txbuf;
   2267 	int error, i;
   2268 
   2269 	/*
   2270 	 * Setup DMA descriptor areas.
   2271 	 */
   2272 	if ((error = ixgbe_dma_tag_create(adapter->osdep.dmat,	/* parent */
   2273 			       1, 0,		/* alignment, bounds */
   2274 			       IXV_TSO_SIZE,		/* maxsize */
   2275 			       32,			/* nsegments */
   2276 			       PAGE_SIZE,		/* maxsegsize */
   2277 			       0,			/* flags */
   2278 			       &txr->txtag))) {
   2279 		aprint_error_dev(dev,"Unable to allocate TX DMA tag\n");
   2280 		goto fail;
   2281 	}
   2282 
   2283 	if (!(txr->tx_buffers =
   2284 	    (struct ixv_tx_buf *) malloc(sizeof(struct ixv_tx_buf) *
   2285 	    adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) {
   2286 		aprint_error_dev(dev, "Unable to allocate tx_buffer memory\n");
   2287 		error = ENOMEM;
   2288 		goto fail;
   2289 	}
   2290 
   2291         /* Create the descriptor buffer dma maps */
   2292 	txbuf = txr->tx_buffers;
   2293 	for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
   2294 		error = ixgbe_dmamap_create(txr->txtag, 0, &txbuf->map);
   2295 		if (error != 0) {
   2296 			aprint_error_dev(dev, "Unable to create TX DMA map\n");
   2297 			goto fail;
   2298 		}
   2299 	}
   2300 
   2301 	return 0;
   2302 fail:
   2303 	/* We free all, it handles case where we are in the middle */
   2304 	ixv_free_transmit_structures(adapter);
   2305 	return (error);
   2306 }
   2307 
   2308 /*********************************************************************
   2309  *
   2310  *  Initialize a transmit ring.
   2311  *
   2312  **********************************************************************/
   2313 static void
   2314 ixv_setup_transmit_ring(struct tx_ring *txr)
   2315 {
   2316 	struct adapter *adapter = txr->adapter;
   2317 	struct ixv_tx_buf *txbuf;
   2318 	int i;
   2319 
   2320 	/* Clear the old ring contents */
   2321 	IXV_TX_LOCK(txr);
   2322 	bzero((void *)txr->tx_base,
   2323 	      (sizeof(union ixgbe_adv_tx_desc)) * adapter->num_tx_desc);
   2324 	/* Reset indices */
   2325 	txr->next_avail_desc = 0;
   2326 	txr->next_to_clean = 0;
   2327 
   2328 	/* Free any existing tx buffers. */
   2329         txbuf = txr->tx_buffers;
   2330 	for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
   2331 		if (txbuf->m_head != NULL) {
   2332 			bus_dmamap_sync(txr->txtag->dt_dmat, txbuf->map,
   2333 			    0, txbuf->m_head->m_pkthdr.len,
   2334 			    BUS_DMASYNC_POSTWRITE);
   2335 			ixgbe_dmamap_unload(txr->txtag, txbuf->map);
   2336 			m_freem(txbuf->m_head);
   2337 			txbuf->m_head = NULL;
   2338 		}
   2339 		/* Clear the EOP index */
   2340 		txbuf->eop_index = -1;
   2341         }
   2342 
   2343 	/* Set number of descriptors available */
   2344 	txr->tx_avail = adapter->num_tx_desc;
   2345 
   2346 	ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
   2347 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   2348 	IXV_TX_UNLOCK(txr);
   2349 }
   2350 
   2351 /*********************************************************************
   2352  *
   2353  *  Initialize all transmit rings.
   2354  *
   2355  **********************************************************************/
   2356 static int
   2357 ixv_setup_transmit_structures(struct adapter *adapter)
   2358 {
   2359 	struct tx_ring *txr = adapter->tx_rings;
   2360 
   2361 	for (int i = 0; i < adapter->num_queues; i++, txr++)
   2362 		ixv_setup_transmit_ring(txr);
   2363 
   2364 	return (0);
   2365 }
   2366 
   2367 /*********************************************************************
   2368  *
   2369  *  Enable transmit unit.
   2370  *
   2371  **********************************************************************/
   2372 static void
   2373 ixv_initialize_transmit_units(struct adapter *adapter)
   2374 {
   2375 	struct tx_ring	*txr = adapter->tx_rings;
   2376 	struct ixgbe_hw	*hw = &adapter->hw;
   2377 
   2378 
   2379 	for (int i = 0; i < adapter->num_queues; i++, txr++) {
   2380 		u64	tdba = txr->txdma.dma_paddr;
   2381 		u32	txctrl, txdctl;
   2382 
   2383 		/* Set WTHRESH to 8, burst writeback */
   2384 		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
   2385 		txdctl |= (8 << 16);
   2386 		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
   2387 		/* Now enable */
   2388 		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
   2389 		txdctl |= IXGBE_TXDCTL_ENABLE;
   2390 		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
   2391 
   2392 		/* Set the HW Tx Head and Tail indices */
   2393 	    	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(i), 0);
   2394 	    	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(i), 0);
   2395 
   2396 		/* Setup Transmit Descriptor Cmd Settings */
   2397 		txr->txd_cmd = IXGBE_TXD_CMD_IFCS;
   2398 		txr->watchdog_check = FALSE;
   2399 
   2400 		/* Set Ring parameters */
   2401 		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
   2402 		       (tdba & 0x00000000ffffffffULL));
   2403 		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i), (tdba >> 32));
   2404 		IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
   2405 		    adapter->num_tx_desc *
   2406 		    sizeof(struct ixgbe_legacy_tx_desc));
   2407 		txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(i));
   2408 		txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
   2409 		IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), txctrl);
   2410 		break;
   2411 	}
   2412 
   2413 	return;
   2414 }
   2415 
   2416 /*********************************************************************
   2417  *
   2418  *  Free all transmit rings.
   2419  *
   2420  **********************************************************************/
   2421 static void
   2422 ixv_free_transmit_structures(struct adapter *adapter)
   2423 {
   2424 	struct tx_ring *txr = adapter->tx_rings;
   2425 
   2426 	for (int i = 0; i < adapter->num_queues; i++, txr++) {
   2427 		ixv_free_transmit_buffers(txr);
   2428 		ixv_dma_free(adapter, &txr->txdma);
   2429 		IXV_TX_LOCK_DESTROY(txr);
   2430 	}
   2431 	free(adapter->tx_rings, M_DEVBUF);
   2432 }
   2433 
   2434 /*********************************************************************
   2435  *
   2436  *  Free transmit ring related data structures.
   2437  *
   2438  **********************************************************************/
   2439 static void
   2440 ixv_free_transmit_buffers(struct tx_ring *txr)
   2441 {
   2442 	struct adapter *adapter = txr->adapter;
   2443 	struct ixv_tx_buf *tx_buffer;
   2444 	int             i;
   2445 
   2446 	INIT_DEBUGOUT("free_transmit_ring: begin");
   2447 
   2448 	if (txr->tx_buffers == NULL)
   2449 		return;
   2450 
   2451 	tx_buffer = txr->tx_buffers;
   2452 	for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
   2453 		if (tx_buffer->m_head != NULL) {
   2454 			bus_dmamap_sync(txr->txtag->dt_dmat, tx_buffer->map,
   2455 			    0, tx_buffer->m_head->m_pkthdr.len,
   2456 			    BUS_DMASYNC_POSTWRITE);
   2457 			ixgbe_dmamap_unload(txr->txtag, tx_buffer->map);
   2458 			m_freem(tx_buffer->m_head);
   2459 			tx_buffer->m_head = NULL;
   2460 			if (tx_buffer->map != NULL) {
   2461 				ixgbe_dmamap_destroy(txr->txtag,
   2462 				    tx_buffer->map);
   2463 				tx_buffer->map = NULL;
   2464 			}
   2465 		} else if (tx_buffer->map != NULL) {
   2466 			ixgbe_dmamap_unload(txr->txtag, tx_buffer->map);
   2467 			ixgbe_dmamap_destroy(txr->txtag, tx_buffer->map);
   2468 			tx_buffer->map = NULL;
   2469 		}
   2470 	}
   2471 #if __FreeBSD_version >= 800000
   2472 	if (txr->br != NULL)
   2473 		buf_ring_free(txr->br, M_DEVBUF);
   2474 #endif
   2475 	if (txr->tx_buffers != NULL) {
   2476 		free(txr->tx_buffers, M_DEVBUF);
   2477 		txr->tx_buffers = NULL;
   2478 	}
   2479 	if (txr->txtag != NULL) {
   2480 		ixgbe_dma_tag_destroy(txr->txtag);
   2481 		txr->txtag = NULL;
   2482 	}
   2483 	return;
   2484 }
   2485 
   2486 /*********************************************************************
   2487  *
   2488  *  Advanced Context Descriptor setup for VLAN or L4 CSUM
   2489  *
   2490  **********************************************************************/
   2491 
   2492 static u32
   2493 ixv_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp)
   2494 {
   2495 	struct m_tag *mtag;
   2496 	struct adapter *adapter = txr->adapter;
   2497 	struct ethercom *ec = &adapter->osdep.ec;
   2498 	struct ixgbe_adv_tx_context_desc *TXD;
   2499 	struct ixv_tx_buf        *tx_buffer;
   2500 	u32 olinfo = 0, vlan_macip_lens = 0, type_tucmd_mlhl = 0;
   2501 	struct ether_vlan_header *eh;
   2502 	struct ip ip;
   2503 	struct ip6_hdr ip6;
   2504 	int  ehdrlen, ip_hlen = 0;
   2505 	u16	etype;
   2506 	u8	ipproto = 0;
   2507 	bool	offload;
   2508 	int ctxd = txr->next_avail_desc;
   2509 	u16 vtag = 0;
   2510 
   2511 
   2512 	offload = ((mp->m_pkthdr.csum_flags & M_CSUM_OFFLOAD) != 0);
   2513 
   2514 	tx_buffer = &txr->tx_buffers[ctxd];
   2515 	TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
   2516 
   2517 	/*
   2518 	** In advanced descriptors the vlan tag must
   2519 	** be placed into the descriptor itself.
   2520 	*/
   2521 	if ((mtag = VLAN_OUTPUT_TAG(ec, mp)) != NULL) {
   2522 		vtag = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   2523 		vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
   2524 	} else if (!offload)
   2525 		return 0;
   2526 
   2527 	/*
   2528 	 * Determine where frame payload starts.
   2529 	 * Jump over vlan headers if already present,
   2530 	 * helpful for QinQ too.
   2531 	 */
   2532 	KASSERT(mp->m_len >= offsetof(struct ether_vlan_header, evl_tag));
   2533 	eh = mtod(mp, struct ether_vlan_header *);
   2534 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
   2535 		KASSERT(mp->m_len >= sizeof(struct ether_vlan_header));
   2536 		etype = ntohs(eh->evl_proto);
   2537 		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   2538 	} else {
   2539 		etype = ntohs(eh->evl_encap_proto);
   2540 		ehdrlen = ETHER_HDR_LEN;
   2541 	}
   2542 
   2543 	/* Set the ether header length */
   2544 	vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
   2545 
   2546 	switch (etype) {
   2547 	case ETHERTYPE_IP:
   2548 		m_copydata(mp, ehdrlen, sizeof(ip), &ip);
   2549 		ip_hlen = ip.ip_hl << 2;
   2550 		ipproto = ip.ip_p;
   2551 #if 0
   2552 		ip.ip_sum = 0;
   2553 		m_copyback(mp, ehdrlen, sizeof(ip), &ip);
   2554 #else
   2555 		KASSERT((mp->m_pkthdr.csum_flags & M_CSUM_IPv4) == 0 ||
   2556 		    ip.ip_sum == 0);
   2557 #endif
   2558 		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
   2559 		break;
   2560 	case ETHERTYPE_IPV6:
   2561 		m_copydata(mp, ehdrlen, sizeof(ip6), &ip6);
   2562 		ip_hlen = sizeof(ip6);
   2563 		ipproto = ip6.ip6_nxt;
   2564 		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
   2565 		break;
   2566 	default:
   2567 		break;
   2568 	}
   2569 
   2570 	if ((mp->m_pkthdr.csum_flags & M_CSUM_IPv4) != 0)
   2571 		olinfo |= IXGBE_TXD_POPTS_IXSM << 8;
   2572 
   2573 	vlan_macip_lens |= ip_hlen;
   2574 	type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
   2575 
   2576 	if (mp->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_TCPv6)) {
   2577 		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
   2578 		olinfo |= IXGBE_TXD_POPTS_TXSM << 8;
   2579 		KASSERT(ipproto == IPPROTO_TCP);
   2580 	} else if (mp->m_pkthdr.csum_flags & (M_CSUM_UDPv4|M_CSUM_UDPv6)) {
   2581 		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP;
   2582 		olinfo |= IXGBE_TXD_POPTS_TXSM << 8;
   2583 		KASSERT(ipproto == IPPROTO_UDP);
   2584 	}
   2585 
   2586 	/* Now copy bits into descriptor */
   2587 	TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
   2588 	TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
   2589 	TXD->seqnum_seed = htole32(0);
   2590 	TXD->mss_l4len_idx = htole32(0);
   2591 
   2592 	tx_buffer->m_head = NULL;
   2593 	tx_buffer->eop_index = -1;
   2594 
   2595 	/* We've consumed the first desc, adjust counters */
   2596 	if (++ctxd == adapter->num_tx_desc)
   2597 		ctxd = 0;
   2598 	txr->next_avail_desc = ctxd;
   2599 	--txr->tx_avail;
   2600 
   2601         return olinfo;
   2602 }
   2603 
   2604 /**********************************************************************
   2605  *
   2606  *  Setup work for hardware segmentation offload (TSO) on
   2607  *  adapters using advanced tx descriptors
   2608  *
   2609  **********************************************************************/
   2610 static bool
   2611 ixv_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *paylen)
   2612 {
   2613 	struct m_tag *mtag;
   2614 	struct adapter *adapter = txr->adapter;
   2615 	struct ethercom *ec = &adapter->osdep.ec;
   2616 	struct ixgbe_adv_tx_context_desc *TXD;
   2617 	struct ixv_tx_buf        *tx_buffer;
   2618 	u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
   2619 	u32 mss_l4len_idx = 0;
   2620 	u16 vtag = 0;
   2621 	int ctxd, ehdrlen,  hdrlen, ip_hlen, tcp_hlen;
   2622 	struct ether_vlan_header *eh;
   2623 	struct ip *ip;
   2624 	struct tcphdr *th;
   2625 
   2626 
   2627 	/*
   2628 	 * Determine where frame payload starts.
   2629 	 * Jump over vlan headers if already present
   2630 	 */
   2631 	eh = mtod(mp, struct ether_vlan_header *);
   2632 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN))
   2633 		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   2634 	else
   2635 		ehdrlen = ETHER_HDR_LEN;
   2636 
   2637         /* Ensure we have at least the IP+TCP header in the first mbuf. */
   2638         if (mp->m_len < ehdrlen + sizeof(struct ip) + sizeof(struct tcphdr))
   2639 		return FALSE;
   2640 
   2641 	ctxd = txr->next_avail_desc;
   2642 	tx_buffer = &txr->tx_buffers[ctxd];
   2643 	TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
   2644 
   2645 	ip = (struct ip *)(mp->m_data + ehdrlen);
   2646 	if (ip->ip_p != IPPROTO_TCP)
   2647 		return FALSE;   /* 0 */
   2648 	ip->ip_sum = 0;
   2649 	ip_hlen = ip->ip_hl << 2;
   2650 	th = (struct tcphdr *)((char *)ip + ip_hlen);
   2651 	/* XXX Educated guess: FreeBSD's in_pseudo == NetBSD's in_cksum_phdr */
   2652 	th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   2653 	    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   2654 	tcp_hlen = th->th_off << 2;
   2655 	hdrlen = ehdrlen + ip_hlen + tcp_hlen;
   2656 
   2657 	/* This is used in the transmit desc in encap */
   2658 	*paylen = mp->m_pkthdr.len - hdrlen;
   2659 
   2660 	/* VLAN MACLEN IPLEN */
   2661 	if ((mtag = VLAN_OUTPUT_TAG(ec, mp)) != NULL) {
   2662 		vtag = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   2663                 vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
   2664 	}
   2665 
   2666 	vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
   2667 	vlan_macip_lens |= ip_hlen;
   2668 	TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
   2669 
   2670 	/* ADV DTYPE TUCMD */
   2671 	type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
   2672 	type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
   2673 	type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
   2674 	TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
   2675 
   2676 
   2677 	/* MSS L4LEN IDX */
   2678 	mss_l4len_idx |= (mp->m_pkthdr.segsz << IXGBE_ADVTXD_MSS_SHIFT);
   2679 	mss_l4len_idx |= (tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT);
   2680 	TXD->mss_l4len_idx = htole32(mss_l4len_idx);
   2681 
   2682 	TXD->seqnum_seed = htole32(0);
   2683 	tx_buffer->m_head = NULL;
   2684 	tx_buffer->eop_index = -1;
   2685 
   2686 	if (++ctxd == adapter->num_tx_desc)
   2687 		ctxd = 0;
   2688 
   2689 	txr->tx_avail--;
   2690 	txr->next_avail_desc = ctxd;
   2691 	return TRUE;
   2692 }
   2693 
   2694 
   2695 /**********************************************************************
   2696  *
   2697  *  Examine each tx_buffer in the used queue. If the hardware is done
   2698  *  processing the packet then free associated resources. The
   2699  *  tx_buffer is put back on the free queue.
   2700  *
   2701  **********************************************************************/
   2702 static bool
   2703 ixv_txeof(struct tx_ring *txr)
   2704 {
   2705 	struct adapter	*adapter = txr->adapter;
   2706 	struct ifnet	*ifp = adapter->ifp;
   2707 	u32	first, last, done;
   2708 	struct ixv_tx_buf *tx_buffer;
   2709 	struct ixgbe_legacy_tx_desc *tx_desc, *eop_desc;
   2710 
   2711 	KASSERT(mutex_owned(&txr->tx_mtx));
   2712 
   2713 	if (txr->tx_avail == adapter->num_tx_desc)
   2714 		return false;
   2715 
   2716 	first = txr->next_to_clean;
   2717 	tx_buffer = &txr->tx_buffers[first];
   2718 	/* For cleanup we just use legacy struct */
   2719 	tx_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
   2720 	last = tx_buffer->eop_index;
   2721 	if (last == -1)
   2722 		return false;
   2723 	eop_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
   2724 
   2725 	/*
   2726 	** Get the index of the first descriptor
   2727 	** BEYOND the EOP and call that 'done'.
   2728 	** I do this so the comparison in the
   2729 	** inner while loop below can be simple
   2730 	*/
   2731 	if (++last == adapter->num_tx_desc) last = 0;
   2732 	done = last;
   2733 
   2734         ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
   2735             BUS_DMASYNC_POSTREAD);
   2736 	/*
   2737 	** Only the EOP descriptor of a packet now has the DD
   2738 	** bit set, this is what we look for...
   2739 	*/
   2740 	while (eop_desc->upper.fields.status & IXGBE_TXD_STAT_DD) {
   2741 		/* We clean the range of the packet */
   2742 		while (first != done) {
   2743 			tx_desc->upper.data = 0;
   2744 			tx_desc->lower.data = 0;
   2745 			tx_desc->buffer_addr = 0;
   2746 			++txr->tx_avail;
   2747 
   2748 			if (tx_buffer->m_head) {
   2749 				bus_dmamap_sync(txr->txtag->dt_dmat,
   2750 				    tx_buffer->map,
   2751 				    0, tx_buffer->m_head->m_pkthdr.len,
   2752 				    BUS_DMASYNC_POSTWRITE);
   2753 				ixgbe_dmamap_unload(txr->txtag, tx_buffer->map);
   2754 				m_freem(tx_buffer->m_head);
   2755 				tx_buffer->m_head = NULL;
   2756 				tx_buffer->map = NULL;
   2757 			}
   2758 			tx_buffer->eop_index = -1;
   2759 			getmicrotime(&txr->watchdog_time);
   2760 
   2761 			if (++first == adapter->num_tx_desc)
   2762 				first = 0;
   2763 
   2764 			tx_buffer = &txr->tx_buffers[first];
   2765 			tx_desc =
   2766 			    (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
   2767 		}
   2768 		++ifp->if_opackets;
   2769 		/* See if there is more work now */
   2770 		last = tx_buffer->eop_index;
   2771 		if (last != -1) {
   2772 			eop_desc =
   2773 			    (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
   2774 			/* Get next done point */
   2775 			if (++last == adapter->num_tx_desc) last = 0;
   2776 			done = last;
   2777 		} else
   2778 			break;
   2779 	}
   2780 	ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
   2781 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   2782 
   2783 	txr->next_to_clean = first;
   2784 
   2785 	/*
   2786 	 * If we have enough room, clear IFF_OACTIVE to tell the stack that
   2787 	 * it is OK to send packets. If there are no pending descriptors,
   2788 	 * clear the timeout. Otherwise, if some descriptors have been freed,
   2789 	 * restart the timeout.
   2790 	 */
   2791 	if (txr->tx_avail > IXV_TX_CLEANUP_THRESHOLD) {
   2792 		ifp->if_flags &= ~IFF_OACTIVE;
   2793 		if (txr->tx_avail == adapter->num_tx_desc) {
   2794 			txr->watchdog_check = FALSE;
   2795 			return false;
   2796 		}
   2797 	}
   2798 
   2799 	return true;
   2800 }
   2801 
   2802 /*********************************************************************
   2803  *
   2804  *  Refresh mbuf buffers for RX descriptor rings
   2805  *   - now keeps its own state so discards due to resource
   2806  *     exhaustion are unnecessary, if an mbuf cannot be obtained
   2807  *     it just returns, keeping its placeholder, thus it can simply
   2808  *     be recalled to try again.
   2809  *
   2810  **********************************************************************/
   2811 static void
   2812 ixv_refresh_mbufs(struct rx_ring *rxr, int limit)
   2813 {
   2814 	struct adapter		*adapter = rxr->adapter;
   2815 	struct ixv_rx_buf	*rxbuf;
   2816 	struct mbuf		*mh, *mp;
   2817 	int			i, j, error;
   2818 	bool			refreshed = false;
   2819 
   2820 	i = j = rxr->next_to_refresh;
   2821         /* Get the control variable, one beyond refresh point */
   2822 	if (++j == adapter->num_rx_desc)
   2823 		j = 0;
   2824 	while (j != limit) {
   2825 		rxbuf = &rxr->rx_buffers[i];
   2826 		if ((rxbuf->m_head == NULL) && (rxr->hdr_split)) {
   2827 			mh = m_gethdr(M_DONTWAIT, MT_DATA);
   2828 			if (mh == NULL)
   2829 				goto update;
   2830 			mh->m_pkthdr.len = mh->m_len = MHLEN;
   2831 			mh->m_len = MHLEN;
   2832 			mh->m_flags |= M_PKTHDR;
   2833 			m_adj(mh, ETHER_ALIGN);
   2834 			/* Get the memory mapping */
   2835 			error = bus_dmamap_load_mbuf(rxr->htag->dt_dmat,
   2836 			    rxbuf->hmap, mh, BUS_DMA_NOWAIT);
   2837 			if (error != 0) {
   2838 				printf("GET BUF: dmamap load"
   2839 				    " failure - %d\n", error);
   2840 				m_free(mh);
   2841 				goto update;
   2842 			}
   2843 			rxbuf->m_head = mh;
   2844 			ixgbe_dmamap_sync(rxr->htag, rxbuf->hmap,
   2845 			    BUS_DMASYNC_PREREAD);
   2846 			rxr->rx_base[i].read.hdr_addr =
   2847 			    htole64(rxbuf->hmap->dm_segs[0].ds_addr);
   2848 		}
   2849 
   2850 		if (rxbuf->m_pack == NULL) {
   2851 			mp = ixgbe_getjcl(&adapter->jcl_head, M_DONTWAIT,
   2852 			    MT_DATA, M_PKTHDR, adapter->rx_mbuf_sz);
   2853 			if (mp == NULL) {
   2854 				rxr->no_jmbuf.ev_count++;
   2855 				goto update;
   2856 			} else
   2857 				mp = rxbuf->m_pack;
   2858 
   2859 			mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
   2860 			/* Get the memory mapping */
   2861 			error = bus_dmamap_load_mbuf(rxr->ptag->dt_dmat,
   2862 			    rxbuf->pmap, mp, BUS_DMA_NOWAIT);
   2863 			if (error != 0) {
   2864 				printf("GET BUF: dmamap load"
   2865 				    " failure - %d\n", error);
   2866 				m_free(mp);
   2867 				rxbuf->m_pack = NULL;
   2868 				goto update;
   2869 			}
   2870 			rxbuf->m_pack = mp;
   2871 			bus_dmamap_sync(rxr->ptag->dt_dmat, rxbuf->pmap,
   2872 			    0, mp->m_pkthdr.len, BUS_DMASYNC_PREREAD);
   2873 			rxr->rx_base[i].read.pkt_addr =
   2874 			    htole64(rxbuf->pmap->dm_segs[0].ds_addr);
   2875 		}
   2876 
   2877 		refreshed = true;
   2878 		rxr->next_to_refresh = i = j;
   2879 		/* Calculate next index */
   2880 		if (++j == adapter->num_rx_desc)
   2881 			j = 0;
   2882 	}
   2883 update:
   2884 	if (refreshed) /* update tail index */
   2885 		IXGBE_WRITE_REG(&adapter->hw,
   2886 		    IXGBE_VFRDT(rxr->me), rxr->next_to_refresh);
   2887 	return;
   2888 }
   2889 
   2890 /*********************************************************************
   2891  *
   2892  *  Allocate memory for rx_buffer structures. Since we use one
   2893  *  rx_buffer per received packet, the maximum number of rx_buffer's
   2894  *  that we'll need is equal to the number of receive descriptors
   2895  *  that we've allocated.
   2896  *
   2897  **********************************************************************/
   2898 static int
   2899 ixv_allocate_receive_buffers(struct rx_ring *rxr)
   2900 {
   2901 	struct	adapter 	*adapter = rxr->adapter;
   2902 	device_t 		dev = adapter->dev;
   2903 	struct ixv_rx_buf 	*rxbuf;
   2904 	int             	i, bsize, error;
   2905 
   2906 	bsize = sizeof(struct ixv_rx_buf) * adapter->num_rx_desc;
   2907 	if (!(rxr->rx_buffers =
   2908 	    (struct ixv_rx_buf *) malloc(bsize,
   2909 	    M_DEVBUF, M_NOWAIT | M_ZERO))) {
   2910 		aprint_error_dev(dev, "Unable to allocate rx_buffer memory\n");
   2911 		error = ENOMEM;
   2912 		goto fail;
   2913 	}
   2914 
   2915 	if ((error = ixgbe_dma_tag_create(adapter->osdep.dmat,	/* parent */
   2916 				   1, 0,	/* alignment, bounds */
   2917 				   MSIZE,		/* maxsize */
   2918 				   1,			/* nsegments */
   2919 				   MSIZE,		/* maxsegsize */
   2920 				   0,			/* flags */
   2921 				   &rxr->htag))) {
   2922 		aprint_error_dev(dev, "Unable to create RX DMA tag\n");
   2923 		goto fail;
   2924 	}
   2925 
   2926 	if ((error = ixgbe_dma_tag_create(adapter->osdep.dmat,	/* parent */
   2927 				   1, 0,	/* alignment, bounds */
   2928 				   MJUMPAGESIZE,	/* maxsize */
   2929 				   1,			/* nsegments */
   2930 				   MJUMPAGESIZE,	/* maxsegsize */
   2931 				   0,			/* flags */
   2932 				   &rxr->ptag))) {
   2933 		aprint_error_dev(dev, "Unable to create RX DMA tag\n");
   2934 		goto fail;
   2935 	}
   2936 
   2937 	for (i = 0; i < adapter->num_rx_desc; i++, rxbuf++) {
   2938 		rxbuf = &rxr->rx_buffers[i];
   2939 		error = ixgbe_dmamap_create(rxr->htag,
   2940 		    BUS_DMA_NOWAIT, &rxbuf->hmap);
   2941 		if (error) {
   2942 			aprint_error_dev(dev, "Unable to create RX head map\n");
   2943 			goto fail;
   2944 		}
   2945 		error = ixgbe_dmamap_create(rxr->ptag,
   2946 		    BUS_DMA_NOWAIT, &rxbuf->pmap);
   2947 		if (error) {
   2948 			aprint_error_dev(dev, "Unable to create RX pkt map\n");
   2949 			goto fail;
   2950 		}
   2951 	}
   2952 
   2953 	return (0);
   2954 
   2955 fail:
   2956 	/* Frees all, but can handle partial completion */
   2957 	ixv_free_receive_structures(adapter);
   2958 	return (error);
   2959 }
   2960 
   2961 static void
   2962 ixv_free_receive_ring(struct rx_ring *rxr)
   2963 {
   2964 	struct  adapter         *adapter;
   2965 	struct ixv_rx_buf       *rxbuf;
   2966 	int i;
   2967 
   2968 	adapter = rxr->adapter;
   2969 	for (i = 0; i < adapter->num_rx_desc; i++) {
   2970 		rxbuf = &rxr->rx_buffers[i];
   2971 		if (rxbuf->m_head != NULL) {
   2972 			ixgbe_dmamap_sync(rxr->htag, rxbuf->hmap,
   2973 			    BUS_DMASYNC_POSTREAD);
   2974 			ixgbe_dmamap_unload(rxr->htag, rxbuf->hmap);
   2975 			rxbuf->m_head->m_flags |= M_PKTHDR;
   2976 			m_freem(rxbuf->m_head);
   2977 		}
   2978 		if (rxbuf->m_pack != NULL) {
   2979 			/* XXX not ixgbe_ ? */
   2980 			bus_dmamap_sync(rxr->ptag->dt_dmat, rxbuf->pmap,
   2981 			    0, rxbuf->m_pack->m_pkthdr.len,
   2982 			    BUS_DMASYNC_POSTREAD);
   2983 			ixgbe_dmamap_unload(rxr->ptag, rxbuf->pmap);
   2984 			rxbuf->m_pack->m_flags |= M_PKTHDR;
   2985 			m_freem(rxbuf->m_pack);
   2986 		}
   2987 		rxbuf->m_head = NULL;
   2988 		rxbuf->m_pack = NULL;
   2989 	}
   2990 }
   2991 
   2992 
   2993 /*********************************************************************
   2994  *
   2995  *  Initialize a receive ring and its buffers.
   2996  *
   2997  **********************************************************************/
   2998 static int
   2999 ixv_setup_receive_ring(struct rx_ring *rxr)
   3000 {
   3001 	struct	adapter 	*adapter;
   3002 	struct ixv_rx_buf	*rxbuf;
   3003 #ifdef LRO
   3004 	struct ifnet		*ifp;
   3005 	struct lro_ctrl		*lro = &rxr->lro;
   3006 #endif /* LRO */
   3007 	int			rsize, error = 0;
   3008 
   3009 	adapter = rxr->adapter;
   3010 #ifdef LRO
   3011 	ifp = adapter->ifp;
   3012 #endif /* LRO */
   3013 
   3014 	/* Clear the ring contents */
   3015 	IXV_RX_LOCK(rxr);
   3016 	rsize = roundup2(adapter->num_rx_desc *
   3017 	    sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
   3018 	bzero((void *)rxr->rx_base, rsize);
   3019 
   3020 	/* Free current RX buffer structs and their mbufs */
   3021 	ixv_free_receive_ring(rxr);
   3022 
   3023 	IXV_RX_UNLOCK(rxr);
   3024 
   3025 	/* Now reinitialize our supply of jumbo mbufs.  The number
   3026 	 * or size of jumbo mbufs may have changed.
   3027 	 */
   3028 	ixgbe_jcl_reinit(&adapter->jcl_head, rxr->ptag->dt_dmat,
   3029 	    2 * adapter->num_rx_desc, adapter->rx_mbuf_sz);
   3030 
   3031 	IXV_RX_LOCK(rxr);
   3032 
   3033 	/* Configure header split? */
   3034 	if (ixv_header_split)
   3035 		rxr->hdr_split = TRUE;
   3036 
   3037 	/* Now replenish the mbufs */
   3038 	for (int j = 0; j != adapter->num_rx_desc; ++j) {
   3039 		struct mbuf	*mh, *mp;
   3040 
   3041 		rxbuf = &rxr->rx_buffers[j];
   3042 		/*
   3043 		** Dont allocate mbufs if not
   3044 		** doing header split, its wasteful
   3045 		*/
   3046 		if (rxr->hdr_split == FALSE)
   3047 			goto skip_head;
   3048 
   3049 		/* First the header */
   3050 		rxbuf->m_head = m_gethdr(M_DONTWAIT, MT_DATA);
   3051 		if (rxbuf->m_head == NULL) {
   3052 			error = ENOBUFS;
   3053 			goto fail;
   3054 		}
   3055 		m_adj(rxbuf->m_head, ETHER_ALIGN);
   3056 		mh = rxbuf->m_head;
   3057 		mh->m_len = mh->m_pkthdr.len = MHLEN;
   3058 		mh->m_flags |= M_PKTHDR;
   3059 		/* Get the memory mapping */
   3060 		error = bus_dmamap_load_mbuf(rxr->htag->dt_dmat,
   3061 		    rxbuf->hmap, rxbuf->m_head, BUS_DMA_NOWAIT);
   3062 		if (error != 0) /* Nothing elegant to do here */
   3063 			goto fail;
   3064 		bus_dmamap_sync(rxr->htag->dt_dmat, rxbuf->hmap,
   3065 		    0, mh->m_pkthdr.len, BUS_DMASYNC_PREREAD);
   3066 		/* Update descriptor */
   3067 		rxr->rx_base[j].read.hdr_addr =
   3068 		    htole64(rxbuf->hmap->dm_segs[0].ds_addr);
   3069 
   3070 skip_head:
   3071 		/* Now the payload cluster */
   3072 		rxbuf->m_pack = ixgbe_getjcl(&adapter->jcl_head, M_DONTWAIT,
   3073 		    MT_DATA, M_PKTHDR, adapter->rx_mbuf_sz);
   3074 		if (rxbuf->m_pack == NULL) {
   3075 			error = ENOBUFS;
   3076                         goto fail;
   3077 		}
   3078 		mp = rxbuf->m_pack;
   3079 		mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
   3080 		/* Get the memory mapping */
   3081 		error = bus_dmamap_load_mbuf(rxr->ptag->dt_dmat,
   3082 		    rxbuf->pmap, mp, BUS_DMA_NOWAIT);
   3083 		if (error != 0)
   3084                         goto fail;
   3085 		bus_dmamap_sync(rxr->ptag->dt_dmat, rxbuf->pmap,
   3086 		    0, adapter->rx_mbuf_sz, BUS_DMASYNC_PREREAD);
   3087 		/* Update descriptor */
   3088 		rxr->rx_base[j].read.pkt_addr =
   3089 		    htole64(rxbuf->pmap->dm_segs[0].ds_addr);
   3090 	}
   3091 
   3092 
   3093 	/* Setup our descriptor indices */
   3094 	rxr->next_to_check = 0;
   3095 	rxr->next_to_refresh = 0;
   3096 	rxr->lro_enabled = FALSE;
   3097 	rxr->rx_split_packets.ev_count = 0;
   3098 	rxr->rx_bytes.ev_count = 0;
   3099 	rxr->discard = FALSE;
   3100 
   3101 	ixgbe_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
   3102 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   3103 
   3104 #ifdef LRO
   3105 	/*
   3106 	** Now set up the LRO interface:
   3107 	*/
   3108 	if (ifp->if_capenable & IFCAP_LRO) {
   3109 		device_t dev = adapter->dev;
   3110 		int err = tcp_lro_init(lro);
   3111 		if (err) {
   3112 			device_printf(dev, "LRO Initialization failed!\n");
   3113 			goto fail;
   3114 		}
   3115 		INIT_DEBUGOUT("RX Soft LRO Initialized\n");
   3116 		rxr->lro_enabled = TRUE;
   3117 		lro->ifp = adapter->ifp;
   3118 	}
   3119 #endif /* LRO */
   3120 
   3121 	IXV_RX_UNLOCK(rxr);
   3122 	return (0);
   3123 
   3124 fail:
   3125 	ixv_free_receive_ring(rxr);
   3126 	IXV_RX_UNLOCK(rxr);
   3127 	return (error);
   3128 }
   3129 
   3130 /*********************************************************************
   3131  *
   3132  *  Initialize all receive rings.
   3133  *
   3134  **********************************************************************/
   3135 static int
   3136 ixv_setup_receive_structures(struct adapter *adapter)
   3137 {
   3138 	struct rx_ring *rxr = adapter->rx_rings;
   3139 	int j;
   3140 
   3141 	for (j = 0; j < adapter->num_queues; j++, rxr++)
   3142 		if (ixv_setup_receive_ring(rxr))
   3143 			goto fail;
   3144 
   3145 	return (0);
   3146 fail:
   3147 	/*
   3148 	 * Free RX buffers allocated so far, we will only handle
   3149 	 * the rings that completed, the failing case will have
   3150 	 * cleaned up for itself. 'j' failed, so its the terminus.
   3151 	 */
   3152 	for (int i = 0; i < j; ++i) {
   3153 		rxr = &adapter->rx_rings[i];
   3154 		ixv_free_receive_ring(rxr);
   3155 	}
   3156 
   3157 	return (ENOBUFS);
   3158 }
   3159 
   3160 /*********************************************************************
   3161  *
   3162  *  Setup receive registers and features.
   3163  *
   3164  **********************************************************************/
   3165 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
   3166 
   3167 static void
   3168 ixv_initialize_receive_units(struct adapter *adapter)
   3169 {
   3170 	int i;
   3171 	struct	rx_ring	*rxr = adapter->rx_rings;
   3172 	struct ixgbe_hw	*hw = &adapter->hw;
   3173 	struct ifnet   *ifp = adapter->ifp;
   3174 	u32		bufsz, fctrl, rxcsum, hlreg;
   3175 
   3176 
   3177 	/* Enable broadcasts */
   3178 	fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
   3179 	fctrl |= IXGBE_FCTRL_BAM;
   3180 	fctrl |= IXGBE_FCTRL_DPF;
   3181 	fctrl |= IXGBE_FCTRL_PMCF;
   3182 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
   3183 
   3184 	/* Set for Jumbo Frames? */
   3185 	hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
   3186 	if (ifp->if_mtu > ETHERMTU) {
   3187 		hlreg |= IXGBE_HLREG0_JUMBOEN;
   3188 		bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
   3189 	} else {
   3190 		hlreg &= ~IXGBE_HLREG0_JUMBOEN;
   3191 		bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
   3192 	}
   3193 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
   3194 
   3195 	for (i = 0; i < adapter->num_queues; i++, rxr++) {
   3196 		u64 rdba = rxr->rxdma.dma_paddr;
   3197 		u32 reg, rxdctl;
   3198 
   3199 		/* Do the queue enabling first */
   3200 		rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
   3201 		rxdctl |= IXGBE_RXDCTL_ENABLE;
   3202 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
   3203 		for (int k = 0; k < 10; k++) {
   3204 			if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
   3205 			    IXGBE_RXDCTL_ENABLE)
   3206 				break;
   3207 			else
   3208 				msec_delay(1);
   3209 		}
   3210 		wmb();
   3211 
   3212 		/* Setup the Base and Length of the Rx Descriptor Ring */
   3213 		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
   3214 		    (rdba & 0x00000000ffffffffULL));
   3215 		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i),
   3216 		    (rdba >> 32));
   3217 		IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
   3218 		    adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
   3219 
   3220 		/* Set up the SRRCTL register */
   3221 		reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
   3222 		reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
   3223 		reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
   3224 		reg |= bufsz;
   3225 		if (rxr->hdr_split) {
   3226 			/* Use a standard mbuf for the header */
   3227 			reg |= ((IXV_RX_HDR <<
   3228 			    IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT)
   3229 			    & IXGBE_SRRCTL_BSIZEHDR_MASK);
   3230 			reg |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
   3231 		} else
   3232 			reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
   3233 		IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), reg);
   3234 
   3235 		/* Setup the HW Rx Head and Tail Descriptor Pointers */
   3236 		IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
   3237 		IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
   3238 		    adapter->num_rx_desc - 1);
   3239 	}
   3240 
   3241 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
   3242 
   3243 	if (ifp->if_capenable & IFCAP_RXCSUM)
   3244 		rxcsum |= IXGBE_RXCSUM_PCSD;
   3245 
   3246 	if (!(rxcsum & IXGBE_RXCSUM_PCSD))
   3247 		rxcsum |= IXGBE_RXCSUM_IPPCSE;
   3248 
   3249 	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
   3250 
   3251 	return;
   3252 }
   3253 
   3254 /*********************************************************************
   3255  *
   3256  *  Free all receive rings.
   3257  *
   3258  **********************************************************************/
   3259 static void
   3260 ixv_free_receive_structures(struct adapter *adapter)
   3261 {
   3262 	struct rx_ring *rxr = adapter->rx_rings;
   3263 
   3264 	for (int i = 0; i < adapter->num_queues; i++, rxr++) {
   3265 #ifdef LRO
   3266 		struct lro_ctrl		*lro = &rxr->lro;
   3267 #endif /* LRO */
   3268 		ixv_free_receive_buffers(rxr);
   3269 #ifdef LRO
   3270 		/* Free LRO memory */
   3271 		tcp_lro_free(lro);
   3272 #endif /* LRO */
   3273 		/* Free the ring memory as well */
   3274 		ixv_dma_free(adapter, &rxr->rxdma);
   3275 		IXV_RX_LOCK_DESTROY(rxr);
   3276 	}
   3277 
   3278 	free(adapter->rx_rings, M_DEVBUF);
   3279 }
   3280 
   3281 
   3282 /*********************************************************************
   3283  *
   3284  *  Free receive ring data structures
   3285  *
   3286  **********************************************************************/
   3287 static void
   3288 ixv_free_receive_buffers(struct rx_ring *rxr)
   3289 {
   3290 	struct adapter		*adapter = rxr->adapter;
   3291 	struct ixv_rx_buf	*rxbuf;
   3292 
   3293 	INIT_DEBUGOUT("free_receive_structures: begin");
   3294 
   3295 	/* Cleanup any existing buffers */
   3296 	if (rxr->rx_buffers != NULL) {
   3297 		for (int i = 0; i < adapter->num_rx_desc; i++) {
   3298 			rxbuf = &rxr->rx_buffers[i];
   3299 			if (rxbuf->m_head != NULL) {
   3300 				ixgbe_dmamap_sync(rxr->htag, rxbuf->hmap,
   3301 				    BUS_DMASYNC_POSTREAD);
   3302 				ixgbe_dmamap_unload(rxr->htag, rxbuf->hmap);
   3303 				rxbuf->m_head->m_flags |= M_PKTHDR;
   3304 				m_freem(rxbuf->m_head);
   3305 			}
   3306 			if (rxbuf->m_pack != NULL) {
   3307 				/* XXX not ixgbe_* ? */
   3308 				bus_dmamap_sync(rxr->ptag->dt_dmat, rxbuf->pmap,
   3309 				    0, rxbuf->m_pack->m_pkthdr.len,
   3310 				    BUS_DMASYNC_POSTREAD);
   3311 				ixgbe_dmamap_unload(rxr->ptag, rxbuf->pmap);
   3312 				rxbuf->m_pack->m_flags |= M_PKTHDR;
   3313 				m_freem(rxbuf->m_pack);
   3314 			}
   3315 			rxbuf->m_head = NULL;
   3316 			rxbuf->m_pack = NULL;
   3317 			if (rxbuf->hmap != NULL) {
   3318 				ixgbe_dmamap_destroy(rxr->htag, rxbuf->hmap);
   3319 				rxbuf->hmap = NULL;
   3320 			}
   3321 			if (rxbuf->pmap != NULL) {
   3322 				ixgbe_dmamap_destroy(rxr->ptag, rxbuf->pmap);
   3323 				rxbuf->pmap = NULL;
   3324 			}
   3325 		}
   3326 		if (rxr->rx_buffers != NULL) {
   3327 			free(rxr->rx_buffers, M_DEVBUF);
   3328 			rxr->rx_buffers = NULL;
   3329 		}
   3330 	}
   3331 
   3332 	if (rxr->htag != NULL) {
   3333 		ixgbe_dma_tag_destroy(rxr->htag);
   3334 		rxr->htag = NULL;
   3335 	}
   3336 	if (rxr->ptag != NULL) {
   3337 		ixgbe_dma_tag_destroy(rxr->ptag);
   3338 		rxr->ptag = NULL;
   3339 	}
   3340 
   3341 	return;
   3342 }
   3343 
   3344 static __inline void
   3345 ixv_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m, u32 ptype)
   3346 {
   3347 	int s;
   3348 
   3349 #ifdef LRO
   3350 	struct adapter	*adapter = ifp->if_softc;
   3351 	struct ethercom *ec = &adapter->osdep.ec;
   3352 
   3353         /*
   3354          * ATM LRO is only for IPv4/TCP packets and TCP checksum of the packet
   3355          * should be computed by hardware. Also it should not have VLAN tag in
   3356          * ethernet header.
   3357          */
   3358         if (rxr->lro_enabled &&
   3359             (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING) != 0 &&
   3360             (ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
   3361             (ptype & (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP)) ==
   3362             (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP) &&
   3363             (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) ==
   3364             (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) {
   3365                 /*
   3366                  * Send to the stack if:
   3367                  **  - LRO not enabled, or
   3368                  **  - no LRO resources, or
   3369                  **  - lro enqueue fails
   3370                  */
   3371                 if (rxr->lro.lro_cnt != 0)
   3372                         if (tcp_lro_rx(&rxr->lro, m, 0) == 0)
   3373                                 return;
   3374         }
   3375 #endif /* LRO */
   3376 
   3377 	IXV_RX_UNLOCK(rxr);
   3378 
   3379 	s = splnet();
   3380 	/* Pass this up to any BPF listeners. */
   3381 	bpf_mtap(ifp, m);
   3382         (*ifp->if_input)(ifp, m);
   3383 	splx(s);
   3384 
   3385 	IXV_RX_LOCK(rxr);
   3386 }
   3387 
   3388 static __inline void
   3389 ixv_rx_discard(struct rx_ring *rxr, int i)
   3390 {
   3391 	struct ixv_rx_buf	*rbuf;
   3392 
   3393 	rbuf = &rxr->rx_buffers[i];
   3394 	if (rbuf->fmp != NULL) {/* Partial chain ? */
   3395 		rbuf->fmp->m_flags |= M_PKTHDR;
   3396 		m_freem(rbuf->fmp);
   3397 		rbuf->fmp = NULL;
   3398 	}
   3399 
   3400 	/*
   3401 	** With advanced descriptors the writeback
   3402 	** clobbers the buffer addrs, so its easier
   3403 	** to just free the existing mbufs and take
   3404 	** the normal refresh path to get new buffers
   3405 	** and mapping.
   3406 	*/
   3407 	if (rbuf->m_head) {
   3408 		m_free(rbuf->m_head);
   3409 		rbuf->m_head = NULL;
   3410 	}
   3411 
   3412 	if (rbuf->m_pack) {
   3413 		m_free(rbuf->m_pack);
   3414 		rbuf->m_pack = NULL;
   3415 	}
   3416 
   3417 	return;
   3418 }
   3419 
   3420 
   3421 /*********************************************************************
   3422  *
   3423  *  This routine executes in interrupt context. It replenishes
   3424  *  the mbufs in the descriptor and sends data which has been
   3425  *  dma'ed into host memory to upper layer.
   3426  *
   3427  *  We loop at most count times if count is > 0, or until done if
   3428  *  count < 0.
   3429  *
   3430  *  Return TRUE for more work, FALSE for all clean.
   3431  *********************************************************************/
   3432 static bool
   3433 ixv_rxeof(struct ix_queue *que, int count)
   3434 {
   3435 	struct adapter		*adapter = que->adapter;
   3436 	struct rx_ring		*rxr = que->rxr;
   3437 	struct ifnet		*ifp = adapter->ifp;
   3438 #ifdef LRO
   3439 	struct lro_ctrl		*lro = &rxr->lro;
   3440 	struct lro_entry	*queued;
   3441 #endif /* LRO */
   3442 	int			i, nextp, processed = 0;
   3443 	u32			staterr = 0;
   3444 	union ixgbe_adv_rx_desc	*cur;
   3445 	struct ixv_rx_buf	*rbuf, *nbuf;
   3446 
   3447 	IXV_RX_LOCK(rxr);
   3448 
   3449 	for (i = rxr->next_to_check; count != 0;) {
   3450 		struct mbuf	*sendmp, *mh, *mp;
   3451 		u32		ptype;
   3452 		u16		hlen, plen, hdr, vtag;
   3453 		bool		eop;
   3454 
   3455 		/* Sync the ring. */
   3456 		ixgbe_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
   3457 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   3458 
   3459 		cur = &rxr->rx_base[i];
   3460 		staterr = le32toh(cur->wb.upper.status_error);
   3461 
   3462 		if ((staterr & IXGBE_RXD_STAT_DD) == 0)
   3463 			break;
   3464 		if ((ifp->if_flags & IFF_RUNNING) == 0)
   3465 			break;
   3466 
   3467 		count--;
   3468 		sendmp = NULL;
   3469 		nbuf = NULL;
   3470 		cur->wb.upper.status_error = 0;
   3471 		rbuf = &rxr->rx_buffers[i];
   3472 		mh = rbuf->m_head;
   3473 		mp = rbuf->m_pack;
   3474 
   3475 		plen = le16toh(cur->wb.upper.length);
   3476 		ptype = le32toh(cur->wb.lower.lo_dword.data) &
   3477 		    IXGBE_RXDADV_PKTTYPE_MASK;
   3478 		hdr = le16toh(cur->wb.lower.lo_dword.hs_rss.hdr_info);
   3479 		vtag = le16toh(cur->wb.upper.vlan);
   3480 		eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0);
   3481 
   3482 		/* Make sure all parts of a bad packet are discarded */
   3483 		if (((staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) != 0) ||
   3484 		    (rxr->discard)) {
   3485 			ifp->if_ierrors++;
   3486 			rxr->rx_discarded.ev_count++;
   3487 			if (!eop)
   3488 				rxr->discard = TRUE;
   3489 			else
   3490 				rxr->discard = FALSE;
   3491 			ixv_rx_discard(rxr, i);
   3492 			goto next_desc;
   3493 		}
   3494 
   3495 		if (!eop) {
   3496 			nextp = i + 1;
   3497 			if (nextp == adapter->num_rx_desc)
   3498 				nextp = 0;
   3499 			nbuf = &rxr->rx_buffers[nextp];
   3500 			prefetch(nbuf);
   3501 		}
   3502 		/*
   3503 		** The header mbuf is ONLY used when header
   3504 		** split is enabled, otherwise we get normal
   3505 		** behavior, ie, both header and payload
   3506 		** are DMA'd into the payload buffer.
   3507 		**
   3508 		** Rather than using the fmp/lmp global pointers
   3509 		** we now keep the head of a packet chain in the
   3510 		** buffer struct and pass this along from one
   3511 		** descriptor to the next, until we get EOP.
   3512 		*/
   3513 		if (rxr->hdr_split && (rbuf->fmp == NULL)) {
   3514 			/* This must be an initial descriptor */
   3515 			hlen = (hdr & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
   3516 			    IXGBE_RXDADV_HDRBUFLEN_SHIFT;
   3517 			if (hlen > IXV_RX_HDR)
   3518 				hlen = IXV_RX_HDR;
   3519 			mh->m_len = hlen;
   3520 			mh->m_flags |= M_PKTHDR;
   3521 			mh->m_next = NULL;
   3522 			mh->m_pkthdr.len = mh->m_len;
   3523 			/* Null buf pointer so it is refreshed */
   3524 			rbuf->m_head = NULL;
   3525 			/*
   3526 			** Check the payload length, this
   3527 			** could be zero if its a small
   3528 			** packet.
   3529 			*/
   3530 			if (plen > 0) {
   3531 				mp->m_len = plen;
   3532 				mp->m_next = NULL;
   3533 				mp->m_flags &= ~M_PKTHDR;
   3534 				mh->m_next = mp;
   3535 				mh->m_pkthdr.len += mp->m_len;
   3536 				/* Null buf pointer so it is refreshed */
   3537 				rbuf->m_pack = NULL;
   3538 				rxr->rx_split_packets.ev_count++;
   3539 			}
   3540 			/*
   3541 			** Now create the forward
   3542 			** chain so when complete
   3543 			** we wont have to.
   3544 			*/
   3545                         if (eop == 0) {
   3546 				/* stash the chain head */
   3547                                 nbuf->fmp = mh;
   3548 				/* Make forward chain */
   3549                                 if (plen)
   3550                                         mp->m_next = nbuf->m_pack;
   3551                                 else
   3552                                         mh->m_next = nbuf->m_pack;
   3553                         } else {
   3554 				/* Singlet, prepare to send */
   3555                                 sendmp = mh;
   3556                                 if (VLAN_ATTACHED(&adapter->osdep.ec) &&
   3557 				  (staterr & IXGBE_RXD_STAT_VP)) {
   3558 					VLAN_INPUT_TAG(ifp, sendmp, vtag,
   3559 					    printf("%s: could not apply VLAN "
   3560 					        "tag", __func__));
   3561                                 }
   3562                         }
   3563 		} else {
   3564 			/*
   3565 			** Either no header split, or a
   3566 			** secondary piece of a fragmented
   3567 			** split packet.
   3568 			*/
   3569 			mp->m_len = plen;
   3570 			/*
   3571 			** See if there is a stored head
   3572 			** that determines what we are
   3573 			*/
   3574 			sendmp = rbuf->fmp;
   3575 			rbuf->m_pack = rbuf->fmp = NULL;
   3576 
   3577 			if (sendmp != NULL) /* secondary frag */
   3578 				sendmp->m_pkthdr.len += mp->m_len;
   3579 			else {
   3580 				/* first desc of a non-ps chain */
   3581 				sendmp = mp;
   3582 				sendmp->m_flags |= M_PKTHDR;
   3583 				sendmp->m_pkthdr.len = mp->m_len;
   3584 				if (staterr & IXGBE_RXD_STAT_VP) {
   3585 					/* XXX Do something reasonable on
   3586 					 * error.
   3587 					 */
   3588 					VLAN_INPUT_TAG(ifp, sendmp, vtag,
   3589 					    printf("%s: could not apply VLAN "
   3590 					        "tag", __func__));
   3591 				}
   3592                         }
   3593 			/* Pass the head pointer on */
   3594 			if (eop == 0) {
   3595 				nbuf->fmp = sendmp;
   3596 				sendmp = NULL;
   3597 				mp->m_next = nbuf->m_pack;
   3598 			}
   3599 		}
   3600 		++processed;
   3601 		/* Sending this frame? */
   3602 		if (eop) {
   3603 			sendmp->m_pkthdr.rcvif = ifp;
   3604 			ifp->if_ipackets++;
   3605 			rxr->rx_packets.ev_count++;
   3606 			/* capture data for AIM */
   3607 			rxr->bytes += sendmp->m_pkthdr.len;
   3608 			rxr->rx_bytes.ev_count += sendmp->m_pkthdr.len;
   3609 			if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
   3610 				ixv_rx_checksum(staterr, sendmp, ptype,
   3611 				   &adapter->stats);
   3612 			}
   3613 #if __FreeBSD_version >= 800000
   3614 			sendmp->m_pkthdr.flowid = que->msix;
   3615 			sendmp->m_flags |= M_FLOWID;
   3616 #endif
   3617 		}
   3618 next_desc:
   3619 		ixgbe_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
   3620 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   3621 
   3622 		/* Advance our pointers to the next descriptor. */
   3623 		if (++i == adapter->num_rx_desc)
   3624 			i = 0;
   3625 
   3626 		/* Now send to the stack or do LRO */
   3627 		if (sendmp != NULL)
   3628 			ixv_rx_input(rxr, ifp, sendmp, ptype);
   3629 
   3630                /* Every 8 descriptors we go to refresh mbufs */
   3631 		if (processed == 8) {
   3632 			ixv_refresh_mbufs(rxr, i);
   3633 			processed = 0;
   3634 		}
   3635 	}
   3636 
   3637 	/* Refresh any remaining buf structs */
   3638 	if (ixv_rx_unrefreshed(rxr))
   3639 		ixv_refresh_mbufs(rxr, i);
   3640 
   3641 	rxr->next_to_check = i;
   3642 
   3643 #ifdef LRO
   3644 	/*
   3645 	 * Flush any outstanding LRO work
   3646 	 */
   3647 	while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
   3648 		SLIST_REMOVE_HEAD(&lro->lro_active, next);
   3649 		tcp_lro_flush(lro, queued);
   3650 	}
   3651 #endif /* LRO */
   3652 
   3653 	IXV_RX_UNLOCK(rxr);
   3654 
   3655 	/*
   3656 	** We still have cleaning to do?
   3657 	** Schedule another interrupt if so.
   3658 	*/
   3659 	if ((staterr & IXGBE_RXD_STAT_DD) != 0) {
   3660 		ixv_rearm_queues(adapter, (u64)(1ULL << que->msix));
   3661 		return true;
   3662 	}
   3663 
   3664 	return false;
   3665 }
   3666 
   3667 
   3668 /*********************************************************************
   3669  *
   3670  *  Verify that the hardware indicated that the checksum is valid.
   3671  *  Inform the stack about the status of checksum so that stack
   3672  *  doesn't spend time verifying the checksum.
   3673  *
   3674  *********************************************************************/
   3675 static void
   3676 ixv_rx_checksum(u32 staterr, struct mbuf * mp, u32 ptype,
   3677     struct ixgbevf_hw_stats *stats)
   3678 {
   3679 	u16	status = (u16) staterr;
   3680 	u8	errors = (u8) (staterr >> 24);
   3681 #if 0
   3682 	bool	sctp = FALSE;
   3683 	if ((ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
   3684 	    (ptype & IXGBE_RXDADV_PKTTYPE_SCTP) != 0)
   3685 		sctp = TRUE;
   3686 #endif
   3687 	if (status & IXGBE_RXD_STAT_IPCS) {
   3688 		stats->ipcs.ev_count++;
   3689 		if (!(errors & IXGBE_RXD_ERR_IPE)) {
   3690 			/* IP Checksum Good */
   3691 			mp->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   3692 
   3693 		} else {
   3694 			stats->ipcs_bad.ev_count++;
   3695 			mp->m_pkthdr.csum_flags = M_CSUM_IPv4|M_CSUM_IPv4_BAD;
   3696 		}
   3697 	}
   3698 	if (status & IXGBE_RXD_STAT_L4CS) {
   3699 		stats->l4cs.ev_count++;
   3700 		u16 type = M_CSUM_TCPv4|M_CSUM_TCPv6|M_CSUM_UDPv4|M_CSUM_UDPv6;
   3701 		if (!(errors & IXGBE_RXD_ERR_TCPE)) {
   3702 			mp->m_pkthdr.csum_flags |= type;
   3703 		} else {
   3704 			stats->l4cs_bad.ev_count++;
   3705 			mp->m_pkthdr.csum_flags |= type | M_CSUM_TCP_UDP_BAD;
   3706 		}
   3707 	}
   3708 	return;
   3709 }
   3710 
   3711 static void
   3712 ixv_setup_vlan_support(struct adapter *adapter)
   3713 {
   3714 	struct ixgbe_hw *hw = &adapter->hw;
   3715 	u32		ctrl, vid, vfta, retry;
   3716 
   3717 
   3718 	/*
   3719 	** We get here thru init_locked, meaning
   3720 	** a soft reset, this has already cleared
   3721 	** the VFTA and other state, so if there
   3722 	** have been no vlan's registered do nothing.
   3723 	*/
   3724 	if (adapter->num_vlans == 0)
   3725 		return;
   3726 
   3727 	/* Enable the queues */
   3728 	for (int i = 0; i < adapter->num_queues; i++) {
   3729 		ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
   3730 		ctrl |= IXGBE_RXDCTL_VME;
   3731 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl);
   3732 	}
   3733 
   3734 	/*
   3735 	** A soft reset zero's out the VFTA, so
   3736 	** we need to repopulate it now.
   3737 	*/
   3738 	for (int i = 0; i < VFTA_SIZE; i++) {
   3739 		if (ixv_shadow_vfta[i] == 0)
   3740 			continue;
   3741 		vfta = ixv_shadow_vfta[i];
   3742 		/*
   3743 		** Reconstruct the vlan id's
   3744 		** based on the bits set in each
   3745 		** of the array ints.
   3746 		*/
   3747 		for ( int j = 0; j < 32; j++) {
   3748 			retry = 0;
   3749 			if ((vfta & (1 << j)) == 0)
   3750 				continue;
   3751 			vid = (i * 32) + j;
   3752 			/* Call the shared code mailbox routine */
   3753 			while (ixgbe_set_vfta(hw, vid, 0, TRUE)) {
   3754 				if (++retry > 5)
   3755 					break;
   3756 			}
   3757 		}
   3758 	}
   3759 }
   3760 
   3761 #if 0	/* XXX Badly need to overhaul vlan(4) on NetBSD. */
   3762 /*
   3763 ** This routine is run via an vlan config EVENT,
   3764 ** it enables us to use the HW Filter table since
   3765 ** we can get the vlan id. This just creates the
   3766 ** entry in the soft version of the VFTA, init will
   3767 ** repopulate the real table.
   3768 */
   3769 static void
   3770 ixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   3771 {
   3772 	struct adapter	*adapter = ifp->if_softc;
   3773 	u16		index, bit;
   3774 
   3775 	if (ifp->if_softc !=  arg)   /* Not our event */
   3776 		return;
   3777 
   3778 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
   3779 		return;
   3780 
   3781 	IXV_CORE_LOCK(adapter);
   3782 	index = (vtag >> 5) & 0x7F;
   3783 	bit = vtag & 0x1F;
   3784 	ixv_shadow_vfta[index] |= (1 << bit);
   3785 	/* Re-init to load the changes */
   3786 	ixv_init_locked(adapter);
   3787 	IXV_CORE_UNLOCK(adapter);
   3788 }
   3789 
   3790 /*
   3791 ** This routine is run via an vlan
   3792 ** unconfig EVENT, remove our entry
   3793 ** in the soft vfta.
   3794 */
   3795 static void
   3796 ixv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   3797 {
   3798 	struct adapter	*adapter = ifp->if_softc;
   3799 	u16		index, bit;
   3800 
   3801 	if (ifp->if_softc !=  arg)
   3802 		return;
   3803 
   3804 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
   3805 		return;
   3806 
   3807 	IXV_CORE_LOCK(adapter);
   3808 	index = (vtag >> 5) & 0x7F;
   3809 	bit = vtag & 0x1F;
   3810 	ixv_shadow_vfta[index] &= ~(1 << bit);
   3811 	/* Re-init to load the changes */
   3812 	ixv_init_locked(adapter);
   3813 	IXV_CORE_UNLOCK(adapter);
   3814 }
   3815 #endif
   3816 
   3817 static void
   3818 ixv_enable_intr(struct adapter *adapter)
   3819 {
   3820 	struct ixgbe_hw *hw = &adapter->hw;
   3821 	struct ix_queue *que = adapter->queues;
   3822 	u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
   3823 
   3824 
   3825 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
   3826 
   3827 	mask = IXGBE_EIMS_ENABLE_MASK;
   3828 	mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
   3829 	IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
   3830 
   3831         for (int i = 0; i < adapter->num_queues; i++, que++)
   3832 		ixv_enable_queue(adapter, que->msix);
   3833 
   3834 	IXGBE_WRITE_FLUSH(hw);
   3835 
   3836 	return;
   3837 }
   3838 
   3839 static void
   3840 ixv_disable_intr(struct adapter *adapter)
   3841 {
   3842 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
   3843 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, ~0);
   3844 	IXGBE_WRITE_FLUSH(&adapter->hw);
   3845 	return;
   3846 }
   3847 
   3848 /*
   3849 ** Setup the correct IVAR register for a particular MSIX interrupt
   3850 **  - entry is the register array entry
   3851 **  - vector is the MSIX vector for this queue
   3852 **  - type is RX/TX/MISC
   3853 */
   3854 static void
   3855 ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
   3856 {
   3857 	struct ixgbe_hw *hw = &adapter->hw;
   3858 	u32 ivar, index;
   3859 
   3860 	vector |= IXGBE_IVAR_ALLOC_VAL;
   3861 
   3862 	if (type == -1) { /* MISC IVAR */
   3863 		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
   3864 		ivar &= ~0xFF;
   3865 		ivar |= vector;
   3866 		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
   3867 	} else {	/* RX/TX IVARS */
   3868 		index = (16 * (entry & 1)) + (8 * type);
   3869 		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
   3870 		ivar &= ~(0xFF << index);
   3871 		ivar |= (vector << index);
   3872 		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
   3873 	}
   3874 }
   3875 
   3876 static void
   3877 ixv_configure_ivars(struct adapter *adapter)
   3878 {
   3879 	struct  ix_queue *que = adapter->queues;
   3880 
   3881         for (int i = 0; i < adapter->num_queues; i++, que++) {
   3882 		/* First the RX queue entry */
   3883                 ixv_set_ivar(adapter, i, que->msix, 0);
   3884 		/* ... and the TX */
   3885 		ixv_set_ivar(adapter, i, que->msix, 1);
   3886 		/* Set an initial value in EITR */
   3887                 IXGBE_WRITE_REG(&adapter->hw,
   3888                     IXGBE_VTEITR(que->msix), IXV_EITR_DEFAULT);
   3889 	}
   3890 
   3891 	/* For the Link interrupt */
   3892         ixv_set_ivar(adapter, 1, adapter->mbxvec, -1);
   3893 }
   3894 
   3895 
   3896 /*
   3897 ** Tasklet handler for MSIX MBX interrupts
   3898 **  - do outside interrupt since it might sleep
   3899 */
   3900 static void
   3901 ixv_handle_mbx(void *context)
   3902 {
   3903 	struct adapter  *adapter = context;
   3904 
   3905 	ixgbe_check_link(&adapter->hw,
   3906 	    &adapter->link_speed, &adapter->link_up, 0);
   3907 	ixv_update_link_status(adapter);
   3908 }
   3909 
   3910 /*
   3911 ** The VF stats registers never have a truely virgin
   3912 ** starting point, so this routine tries to make an
   3913 ** artificial one, marking ground zero on attach as
   3914 ** it were.
   3915 */
   3916 static void
   3917 ixv_save_stats(struct adapter *adapter)
   3918 {
   3919 	if (adapter->stats.vfgprc || adapter->stats.vfgptc) {
   3920 		adapter->stats.saved_reset_vfgprc +=
   3921 		    adapter->stats.vfgprc - adapter->stats.base_vfgprc;
   3922 		adapter->stats.saved_reset_vfgptc +=
   3923 		    adapter->stats.vfgptc - adapter->stats.base_vfgptc;
   3924 		adapter->stats.saved_reset_vfgorc +=
   3925 		    adapter->stats.vfgorc - adapter->stats.base_vfgorc;
   3926 		adapter->stats.saved_reset_vfgotc +=
   3927 		    adapter->stats.vfgotc - adapter->stats.base_vfgotc;
   3928 		adapter->stats.saved_reset_vfmprc +=
   3929 		    adapter->stats.vfmprc - adapter->stats.base_vfmprc;
   3930 	}
   3931 }
   3932 
   3933 static void
   3934 ixv_init_stats(struct adapter *adapter)
   3935 {
   3936 	struct ixgbe_hw *hw = &adapter->hw;
   3937 
   3938 	adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
   3939 	adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
   3940 	adapter->stats.last_vfgorc |=
   3941 	    (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
   3942 
   3943 	adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
   3944 	adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
   3945 	adapter->stats.last_vfgotc |=
   3946 	    (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
   3947 
   3948 	adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
   3949 
   3950 	adapter->stats.base_vfgprc = adapter->stats.last_vfgprc;
   3951 	adapter->stats.base_vfgorc = adapter->stats.last_vfgorc;
   3952 	adapter->stats.base_vfgptc = adapter->stats.last_vfgptc;
   3953 	adapter->stats.base_vfgotc = adapter->stats.last_vfgotc;
   3954 	adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
   3955 }
   3956 
   3957 #define UPDATE_STAT_32(reg, last, count)		\
   3958 {							\
   3959 	u32 current = IXGBE_READ_REG(hw, reg);		\
   3960 	if (current < last)				\
   3961 		count += 0x100000000LL;			\
   3962 	last = current;					\
   3963 	count &= 0xFFFFFFFF00000000LL;			\
   3964 	count |= current;				\
   3965 }
   3966 
   3967 #define UPDATE_STAT_36(lsb, msb, last, count) 		\
   3968 {							\
   3969 	u64 cur_lsb = IXGBE_READ_REG(hw, lsb);		\
   3970 	u64 cur_msb = IXGBE_READ_REG(hw, msb);		\
   3971 	u64 current = ((cur_msb << 32) | cur_lsb);	\
   3972 	if (current < last)				\
   3973 		count += 0x1000000000LL;		\
   3974 	last = current;					\
   3975 	count &= 0xFFFFFFF000000000LL;			\
   3976 	count |= current;				\
   3977 }
   3978 
   3979 /*
   3980 ** ixv_update_stats - Update the board statistics counters.
   3981 */
   3982 void
   3983 ixv_update_stats(struct adapter *adapter)
   3984 {
   3985         struct ixgbe_hw *hw = &adapter->hw;
   3986 
   3987         UPDATE_STAT_32(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
   3988 	    adapter->stats.vfgprc);
   3989         UPDATE_STAT_32(IXGBE_VFGPTC, adapter->stats.last_vfgptc,
   3990 	    adapter->stats.vfgptc);
   3991         UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
   3992 	    adapter->stats.last_vfgorc, adapter->stats.vfgorc);
   3993         UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
   3994 	    adapter->stats.last_vfgotc, adapter->stats.vfgotc);
   3995         UPDATE_STAT_32(IXGBE_VFMPRC, adapter->stats.last_vfmprc,
   3996 	    adapter->stats.vfmprc);
   3997 }
   3998 
   3999 /**********************************************************************
   4000  *
   4001  *  This routine is called only when ixgbe_display_debug_stats is enabled.
   4002  *  This routine provides a way to take a look at important statistics
   4003  *  maintained by the driver and hardware.
   4004  *
   4005  **********************************************************************/
   4006 static void
   4007 ixv_print_hw_stats(struct adapter * adapter)
   4008 {
   4009         device_t dev = adapter->dev;
   4010 
   4011         device_printf(dev,"Std Mbuf Failed = %lu\n",
   4012                adapter->mbuf_defrag_failed.ev_count);
   4013         device_printf(dev,"Driver dropped packets = %lu\n",
   4014                adapter->dropped_pkts.ev_count);
   4015         device_printf(dev, "watchdog timeouts = %ld\n",
   4016                adapter->watchdog_events.ev_count);
   4017 
   4018         device_printf(dev,"Good Packets Rcvd = %llu\n",
   4019                (long long)adapter->stats.vfgprc);
   4020         device_printf(dev,"Good Packets Xmtd = %llu\n",
   4021                (long long)adapter->stats.vfgptc);
   4022         device_printf(dev,"TSO Transmissions = %lu\n",
   4023                adapter->tso_tx.ev_count);
   4024 
   4025 }
   4026 
   4027 /**********************************************************************
   4028  *
   4029  *  This routine is called only when em_display_debug_stats is enabled.
   4030  *  This routine provides a way to take a look at important statistics
   4031  *  maintained by the driver and hardware.
   4032  *
   4033  **********************************************************************/
   4034 static void
   4035 ixv_print_debug_info(struct adapter *adapter)
   4036 {
   4037         device_t dev = adapter->dev;
   4038         struct ixgbe_hw         *hw = &adapter->hw;
   4039         struct ix_queue         *que = adapter->queues;
   4040         struct rx_ring          *rxr;
   4041         struct tx_ring          *txr;
   4042 #ifdef LRO
   4043         struct lro_ctrl         *lro;
   4044 #endif /* LRO */
   4045 
   4046         device_printf(dev,"Error Byte Count = %u \n",
   4047             IXGBE_READ_REG(hw, IXGBE_ERRBC));
   4048 
   4049         for (int i = 0; i < adapter->num_queues; i++, que++) {
   4050                 txr = que->txr;
   4051                 rxr = que->rxr;
   4052 #ifdef LRO
   4053                 lro = &rxr->lro;
   4054 #endif /* LRO */
   4055                 device_printf(dev,"QUE(%d) IRQs Handled: %lu\n",
   4056                     que->msix, (long)que->irqs);
   4057                 device_printf(dev,"RX(%d) Packets Received: %lld\n",
   4058                     rxr->me, (long long)rxr->rx_packets.ev_count);
   4059                 device_printf(dev,"RX(%d) Split RX Packets: %lld\n",
   4060                     rxr->me, (long long)rxr->rx_split_packets.ev_count);
   4061                 device_printf(dev,"RX(%d) Bytes Received: %lu\n",
   4062                     rxr->me, (long)rxr->rx_bytes.ev_count);
   4063 #ifdef LRO
   4064                 device_printf(dev,"RX(%d) LRO Queued= %d\n",
   4065                     rxr->me, lro->lro_queued);
   4066                 device_printf(dev,"RX(%d) LRO Flushed= %d\n",
   4067                     rxr->me, lro->lro_flushed);
   4068 #endif /* LRO */
   4069                 device_printf(dev,"TX(%d) Packets Sent: %lu\n",
   4070                     txr->me, (long)txr->total_packets.ev_count);
   4071                 device_printf(dev,"TX(%d) NO Desc Avail: %lu\n",
   4072                     txr->me, (long)txr->no_desc_avail.ev_count);
   4073         }
   4074 
   4075         device_printf(dev,"MBX IRQ Handled: %lu\n",
   4076             (long)adapter->mbx_irq.ev_count);
   4077         return;
   4078 }
   4079 
   4080 static int
   4081 ixv_sysctl_stats(SYSCTLFN_ARGS)
   4082 {
   4083 	struct sysctlnode node;
   4084 	int             error;
   4085 	int		result;
   4086 	struct adapter *adapter;
   4087 
   4088 	node = *rnode;
   4089 	adapter = (struct adapter *)node.sysctl_data;
   4090 	node.sysctl_data = &result;
   4091 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   4092 	if (error != 0)
   4093 		return error;
   4094 
   4095 	if (result == 1)
   4096 		ixv_print_hw_stats(adapter);
   4097 
   4098 	return 0;
   4099 }
   4100 
   4101 static int
   4102 ixv_sysctl_debug(SYSCTLFN_ARGS)
   4103 {
   4104 	struct sysctlnode node;
   4105 	int error, result;
   4106 	struct adapter *adapter;
   4107 
   4108 	node = *rnode;
   4109 	adapter = (struct adapter *)node.sysctl_data;
   4110 	node.sysctl_data = &result;
   4111 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   4112 
   4113 	if (error)
   4114 		return error;
   4115 
   4116 	if (result == 1)
   4117 		ixv_print_debug_info(adapter);
   4118 
   4119 	return 0;
   4120 }
   4121 
   4122 /*
   4123 ** Set flow control using sysctl:
   4124 ** Flow control values:
   4125 ** 	0 - off
   4126 **	1 - rx pause
   4127 **	2 - tx pause
   4128 **	3 - full
   4129 */
   4130 static int
   4131 ixv_set_flowcntl(SYSCTLFN_ARGS)
   4132 {
   4133 	struct sysctlnode node;
   4134 	int error;
   4135 	struct adapter *adapter;
   4136 
   4137 	node = *rnode;
   4138 	adapter = (struct adapter *)node.sysctl_data;
   4139 	node.sysctl_data = &ixv_flow_control;
   4140 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   4141 
   4142 	if (error)
   4143 		return (error);
   4144 
   4145 	switch (ixv_flow_control) {
   4146 		case ixgbe_fc_rx_pause:
   4147 		case ixgbe_fc_tx_pause:
   4148 		case ixgbe_fc_full:
   4149 			adapter->hw.fc.requested_mode = ixv_flow_control;
   4150 			break;
   4151 		case ixgbe_fc_none:
   4152 		default:
   4153 			adapter->hw.fc.requested_mode = ixgbe_fc_none;
   4154 	}
   4155 
   4156 	ixgbe_fc_enable(&adapter->hw, 0);
   4157 	return error;
   4158 }
   4159 
   4160 const struct sysctlnode *
   4161 ixv_sysctl_instance(struct adapter *adapter)
   4162 {
   4163 	const char *dvname;
   4164 	struct sysctllog **log;
   4165 	int rc;
   4166 	const struct sysctlnode *rnode;
   4167 
   4168 	log = &adapter->sysctllog;
   4169 	dvname = device_xname(adapter->dev);
   4170 
   4171 	if ((rc = sysctl_createv(log, 0, NULL, &rnode,
   4172 	    0, CTLTYPE_NODE, dvname,
   4173 	    SYSCTL_DESCR("ixv information and settings"),
   4174 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
   4175 		goto err;
   4176 
   4177 	return rnode;
   4178 err:
   4179 	printf("%s: sysctl_createv failed, rc = %d\n", __func__, rc);
   4180 	return NULL;
   4181 }
   4182 
   4183 static void
   4184 ixv_add_rx_process_limit(struct adapter *adapter, const char *name,
   4185         const char *description, int *limit, int value)
   4186 {
   4187 	const struct sysctlnode *rnode, *cnode;
   4188 	struct sysctllog **log = &adapter->sysctllog;
   4189 
   4190         *limit = value;
   4191 
   4192 	if ((rnode = ixv_sysctl_instance(adapter)) == NULL)
   4193 		aprint_error_dev(adapter->dev,
   4194 		    "could not create sysctl root\n");
   4195 	else if (sysctl_createv(log, 0, &rnode, &cnode,
   4196 	    CTLFLAG_READWRITE,
   4197 	    CTLTYPE_INT,
   4198 	    name, SYSCTL_DESCR(description),
   4199 	    NULL, 0, limit, 0,
   4200 	    CTL_CREATE, CTL_EOL) != 0) {
   4201 		aprint_error_dev(adapter->dev, "%s: could not create sysctl",
   4202 		    __func__);
   4203 	}
   4204 }
   4205 
   4206