Home | History | Annotate | Line # | Download | only in ixgbe
ixv.c revision 1.18
      1 /******************************************************************************
      2 
      3   Copyright (c) 2001-2013, Intel Corporation
      4   All rights reserved.
      5 
      6   Redistribution and use in source and binary forms, with or without
      7   modification, are permitted provided that the following conditions are met:
      8 
      9    1. Redistributions of source code must retain the above copyright notice,
     10       this list of conditions and the following disclaimer.
     11 
     12    2. Redistributions in binary form must reproduce the above copyright
     13       notice, this list of conditions and the following disclaimer in the
     14       documentation and/or other materials provided with the distribution.
     15 
     16    3. Neither the name of the Intel Corporation nor the names of its
     17       contributors may be used to endorse or promote products derived from
     18       this software without specific prior written permission.
     19 
     20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     30   POSSIBILITY OF SUCH DAMAGE.
     31 
     32 ******************************************************************************/
     33 /*$FreeBSD: head/sys/dev/ixgbe/ixv.c 275358 2014-12-01 11:45:24Z hselasky $*/
     34 /*$NetBSD: ixv.c,v 1.18 2016/11/25 13:33:24 msaitoh Exp $*/
     35 
     36 #include "opt_inet.h"
     37 #include "opt_inet6.h"
     38 
     39 #include "ixv.h"
     40 #include "vlan.h"
     41 
     42 /*********************************************************************
     43  *  Driver version
     44  *********************************************************************/
     45 char ixv_driver_version[] = "1.1.4";
     46 
     47 /*********************************************************************
     48  *  PCI Device ID Table
     49  *
     50  *  Used by probe to select devices to load on
     51  *  Last field stores an index into ixv_strings
     52  *  Last entry must be all 0s
     53  *
     54  *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
     55  *********************************************************************/
     56 
     57 static ixv_vendor_info_t ixv_vendor_info_array[] =
     58 {
     59 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
     60 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0},
     61 	/* required last entry */
     62 	{0, 0, 0, 0, 0}
     63 };
     64 
     65 /*********************************************************************
     66  *  Table of branding strings
     67  *********************************************************************/
     68 
     69 static const char    *ixv_strings[] = {
     70 	"Intel(R) PRO/10GbE Virtual Function Network Driver"
     71 };
     72 
     73 /*********************************************************************
     74  *  Function prototypes
     75  *********************************************************************/
     76 static int      ixv_probe(device_t, cfdata_t, void *);
     77 static void      ixv_attach(device_t, device_t, void *);
     78 static int      ixv_detach(device_t, int);
     79 #if 0
     80 static int      ixv_shutdown(device_t);
     81 #endif
     82 #if __FreeBSD_version < 800000
     83 static void     ixv_start(struct ifnet *);
     84 static void     ixv_start_locked(struct tx_ring *, struct ifnet *);
     85 #else
     86 static int	ixv_mq_start(struct ifnet *, struct mbuf *);
     87 static int	ixv_mq_start_locked(struct ifnet *,
     88 		    struct tx_ring *, struct mbuf *);
     89 static void	ixv_qflush(struct ifnet *);
     90 #endif
     91 static int      ixv_ioctl(struct ifnet *, u_long, void *);
     92 static int	ixv_init(struct ifnet *);
     93 static void	ixv_init_locked(struct adapter *);
     94 static void     ixv_stop(void *);
     95 static void     ixv_media_status(struct ifnet *, struct ifmediareq *);
     96 static int      ixv_media_change(struct ifnet *);
     97 static void     ixv_identify_hardware(struct adapter *);
     98 static int      ixv_allocate_pci_resources(struct adapter *,
     99 		    const struct pci_attach_args *);
    100 static int      ixv_allocate_msix(struct adapter *,
    101 		    const struct pci_attach_args *);
    102 static int	ixv_allocate_queues(struct adapter *);
    103 static int	ixv_setup_msix(struct adapter *);
    104 static void	ixv_free_pci_resources(struct adapter *);
    105 static void     ixv_local_timer(void *);
    106 static void     ixv_setup_interface(device_t, struct adapter *);
    107 static void     ixv_config_link(struct adapter *);
    108 
    109 static int      ixv_allocate_transmit_buffers(struct tx_ring *);
    110 static int	ixv_setup_transmit_structures(struct adapter *);
    111 static void	ixv_setup_transmit_ring(struct tx_ring *);
    112 static void     ixv_initialize_transmit_units(struct adapter *);
    113 static void     ixv_free_transmit_structures(struct adapter *);
    114 static void     ixv_free_transmit_buffers(struct tx_ring *);
    115 
    116 static int      ixv_allocate_receive_buffers(struct rx_ring *);
    117 static int      ixv_setup_receive_structures(struct adapter *);
    118 static int	ixv_setup_receive_ring(struct rx_ring *);
    119 static void     ixv_initialize_receive_units(struct adapter *);
    120 static void     ixv_free_receive_structures(struct adapter *);
    121 static void     ixv_free_receive_buffers(struct rx_ring *);
    122 
    123 static void     ixv_enable_intr(struct adapter *);
    124 static void     ixv_disable_intr(struct adapter *);
    125 static bool	ixv_txeof(struct tx_ring *);
    126 static bool	ixv_rxeof(struct ix_queue *, int);
    127 static void	ixv_rx_checksum(u32, struct mbuf *, u32,
    128 		    struct ixgbevf_hw_stats *);
    129 static void     ixv_set_multi(struct adapter *);
    130 static void     ixv_update_link_status(struct adapter *);
    131 static void	ixv_refresh_mbufs(struct rx_ring *, int);
    132 static int      ixv_xmit(struct tx_ring *, struct mbuf *);
    133 static int	ixv_sysctl_stats(SYSCTLFN_PROTO);
    134 static int	ixv_sysctl_debug(SYSCTLFN_PROTO);
    135 static int	ixv_set_flowcntl(SYSCTLFN_PROTO);
    136 static int	ixv_dma_malloc(struct adapter *, bus_size_t,
    137 		    struct ixv_dma_alloc *, int);
    138 static void     ixv_dma_free(struct adapter *, struct ixv_dma_alloc *);
    139 static void	ixv_add_rx_process_limit(struct adapter *, const char *,
    140 		    const char *, int *, int);
    141 static u32	ixv_tx_ctx_setup(struct tx_ring *, struct mbuf *);
    142 static bool	ixv_tso_setup(struct tx_ring *, struct mbuf *, u32 *);
    143 static void	ixv_set_ivar(struct adapter *, u8, u8, s8);
    144 static void	ixv_configure_ivars(struct adapter *);
    145 static u8 *	ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
    146 
    147 static void	ixv_setup_vlan_support(struct adapter *);
    148 #if 0
    149 static void	ixv_register_vlan(void *, struct ifnet *, u16);
    150 static void	ixv_unregister_vlan(void *, struct ifnet *, u16);
    151 #endif
    152 
    153 static void	ixv_save_stats(struct adapter *);
    154 static void	ixv_init_stats(struct adapter *);
    155 static void	ixv_update_stats(struct adapter *);
    156 
    157 static __inline void ixv_rx_discard(struct rx_ring *, int);
    158 static __inline void ixv_rx_input(struct rx_ring *, struct ifnet *,
    159 		    struct mbuf *, u32);
    160 
    161 /* The MSI/X Interrupt handlers */
    162 static int	ixv_msix_que(void *);
    163 static int	ixv_msix_mbx(void *);
    164 
    165 /* Deferred interrupt tasklets */
    166 static void	ixv_handle_que(void *);
    167 static void	ixv_handle_mbx(void *);
    168 
    169 const struct sysctlnode *ixv_sysctl_instance(struct adapter *);
    170 static ixv_vendor_info_t *ixv_lookup(const struct pci_attach_args *);
    171 
    172 /*********************************************************************
    173  *  FreeBSD Device Interface Entry Points
    174  *********************************************************************/
    175 
    176 CFATTACH_DECL3_NEW(ixv, sizeof(struct adapter),
    177     ixv_probe, ixv_attach, ixv_detach, NULL, NULL, NULL,
    178     DVF_DETACH_SHUTDOWN);
    179 
    180 # if 0
    181 static device_method_t ixv_methods[] = {
    182 	/* Device interface */
    183 	DEVMETHOD(device_probe, ixv_probe),
    184 	DEVMETHOD(device_attach, ixv_attach),
    185 	DEVMETHOD(device_detach, ixv_detach),
    186 	DEVMETHOD(device_shutdown, ixv_shutdown),
    187 	DEVMETHOD_END
    188 };
    189 #endif
    190 
    191 #if 0
    192 static driver_t ixv_driver = {
    193 	"ix", ixv_methods, sizeof(struct adapter),
    194 };
    195 
    196 extern devclass_t ixgbe_devclass;
    197 DRIVER_MODULE(ixv, pci, ixv_driver, ixgbe_devclass, 0, 0);
    198 MODULE_DEPEND(ixv, pci, 1, 1, 1);
    199 MODULE_DEPEND(ixv, ether, 1, 1, 1);
    200 #endif
    201 
    202 /*
    203 ** TUNEABLE PARAMETERS:
    204 */
    205 
    206 /*
    207 ** AIM: Adaptive Interrupt Moderation
    208 ** which means that the interrupt rate
    209 ** is varied over time based on the
    210 ** traffic for that interrupt vector
    211 */
    212 static int ixv_enable_aim = FALSE;
    213 #define	TUNABLE_INT(__x, __y)
    214 TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
    215 
    216 /* How many packets rxeof tries to clean at a time */
    217 static int ixv_rx_process_limit = 128;
    218 TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit);
    219 
    220 /* Flow control setting, default to full */
    221 static int ixv_flow_control = ixgbe_fc_full;
    222 TUNABLE_INT("hw.ixv.flow_control", &ixv_flow_control);
    223 
    224 /*
    225  * Header split: this causes the hardware to DMA
    226  * the header into a seperate mbuf from the payload,
    227  * it can be a performance win in some workloads, but
    228  * in others it actually hurts, its off by default.
    229  */
    230 static int ixv_header_split = FALSE;
    231 TUNABLE_INT("hw.ixv.hdr_split", &ixv_header_split);
    232 
    233 /*
    234 ** Number of TX descriptors per ring,
    235 ** setting higher than RX as this seems
    236 ** the better performing choice.
    237 */
    238 static int ixv_txd = DEFAULT_TXD;
    239 TUNABLE_INT("hw.ixv.txd", &ixv_txd);
    240 
    241 /* Number of RX descriptors per ring */
    242 static int ixv_rxd = DEFAULT_RXD;
    243 TUNABLE_INT("hw.ixv.rxd", &ixv_rxd);
    244 
    245 /*
    246 ** Shadow VFTA table, this is needed because
    247 ** the real filter table gets cleared during
    248 ** a soft reset and we need to repopulate it.
    249 */
    250 static u32 ixv_shadow_vfta[VFTA_SIZE];
    251 
    252 /* Keep running tab on them for sanity check */
    253 static int ixv_total_ports;
    254 
    255 /*********************************************************************
    256  *  Device identification routine
    257  *
    258  *  ixv_probe determines if the driver should be loaded on
    259  *  adapter based on PCI vendor/device id of the adapter.
    260  *
    261  *  return 1 on success, 0 on failure
    262  *********************************************************************/
    263 
    264 static int
    265 ixv_probe(device_t dev, cfdata_t cf, void *aux)
    266 {
    267 	const struct pci_attach_args *pa = aux;
    268 
    269 #ifdef __HAVE_PCI_MSI_MSIX
    270 	return (ixv_lookup(pa) != NULL) ? 1 : 0;
    271 #else
    272 	return 0;
    273 #endif
    274 }
    275 
    276 static ixv_vendor_info_t *
    277 ixv_lookup(const struct pci_attach_args *pa)
    278 {
    279 	pcireg_t subid;
    280 	ixv_vendor_info_t *ent;
    281 
    282 	INIT_DEBUGOUT("ixv_probe: begin");
    283 
    284 	if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
    285 		return NULL;
    286 
    287 	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
    288 
    289 	for (ent = ixv_vendor_info_array; ent->vendor_id != 0; ent++) {
    290 		if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
    291 		    (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
    292 
    293 		    ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
    294 		     (ent->subvendor_id == 0)) &&
    295 
    296 		    ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
    297 		     (ent->subdevice_id == 0))) {
    298 			++ixv_total_ports;
    299 			return ent;
    300 		}
    301 	}
    302 	return NULL;
    303 }
    304 
    305 
    306 static void
    307 ixv_sysctl_attach(struct adapter *adapter)
    308 {
    309 	struct sysctllog **log;
    310 	const struct sysctlnode *rnode, *cnode;
    311 	device_t dev;
    312 
    313 	dev = adapter->dev;
    314 	log = &adapter->sysctllog;
    315 
    316 	if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
    317 		aprint_error_dev(dev, "could not create sysctl root\n");
    318 		return;
    319 	}
    320 
    321 	if (sysctl_createv(log, 0, &rnode, &cnode,
    322 	    CTLFLAG_READWRITE, CTLTYPE_INT,
    323 	    "stats", SYSCTL_DESCR("Statistics"),
    324 	    ixv_sysctl_stats, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
    325 		aprint_error_dev(dev, "could not create sysctl\n");
    326 
    327 	if (sysctl_createv(log, 0, &rnode, &cnode,
    328 	    CTLFLAG_READWRITE, CTLTYPE_INT,
    329 	    "debug", SYSCTL_DESCR("Debug Info"),
    330 	    ixv_sysctl_debug, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
    331 		aprint_error_dev(dev, "could not create sysctl\n");
    332 
    333 	if (sysctl_createv(log, 0, &rnode, &cnode,
    334 	    CTLFLAG_READWRITE, CTLTYPE_INT,
    335 	    "flow_control", SYSCTL_DESCR("Flow Control"),
    336 	    ixv_set_flowcntl, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
    337 		aprint_error_dev(dev, "could not create sysctl\n");
    338 
    339 	/* XXX This is an *instance* sysctl controlling a *global* variable.
    340 	 * XXX It's that way in the FreeBSD driver that this derives from.
    341 	 */
    342 	if (sysctl_createv(log, 0, &rnode, &cnode,
    343 	    CTLFLAG_READWRITE, CTLTYPE_INT,
    344 	    "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
    345 	    NULL, 0, &ixv_enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
    346 		aprint_error_dev(dev, "could not create sysctl\n");
    347 }
    348 
    349 /*********************************************************************
    350  *  Device initialization routine
    351  *
    352  *  The attach entry point is called when the driver is being loaded.
    353  *  This routine identifies the type of hardware, allocates all resources
    354  *  and initializes the hardware.
    355  *
    356  *  return 0 on success, positive on failure
    357  *********************************************************************/
    358 
    359 static void
    360 ixv_attach(device_t parent, device_t dev, void *aux)
    361 {
    362 	struct adapter *adapter;
    363 	struct ixgbe_hw *hw;
    364 	int             error = 0;
    365 	ixv_vendor_info_t *ent;
    366 	const struct pci_attach_args *pa = aux;
    367 
    368 	INIT_DEBUGOUT("ixv_attach: begin");
    369 
    370 	/* Allocate, clear, and link in our adapter structure */
    371 	adapter = device_private(dev);
    372 	adapter->dev = adapter->osdep.dev = dev;
    373 	hw = &adapter->hw;
    374 	adapter->osdep.pc = pa->pa_pc;
    375 	adapter->osdep.tag = pa->pa_tag;
    376 	adapter->osdep.dmat = pa->pa_dmat;
    377 	adapter->osdep.attached = false;
    378 
    379 	ent = ixv_lookup(pa);
    380 
    381 	KASSERT(ent != NULL);
    382 
    383 	aprint_normal(": %s, Version - %s\n",
    384 	    ixv_strings[ent->index], ixv_driver_version);
    385 
    386 	/* Core Lock Init*/
    387 	IXV_CORE_LOCK_INIT(adapter, device_xname(dev));
    388 
    389 	/* SYSCTL APIs */
    390 	ixv_sysctl_attach(adapter);
    391 
    392 	/* Set up the timer callout */
    393 	callout_init(&adapter->timer, 0);
    394 
    395 	/* Determine hardware revision */
    396 	ixv_identify_hardware(adapter);
    397 
    398 	/* Do base PCI setup - map BAR0 */
    399 	if (ixv_allocate_pci_resources(adapter, pa)) {
    400 		aprint_error_dev(dev, "Allocation of PCI resources failed\n");
    401 		error = ENXIO;
    402 		goto err_out;
    403 	}
    404 
    405 	/* Do descriptor calc and sanity checks */
    406 	if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
    407 	    ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) {
    408 		aprint_error_dev(dev, "TXD config issue, using default!\n");
    409 		adapter->num_tx_desc = DEFAULT_TXD;
    410 	} else
    411 		adapter->num_tx_desc = ixv_txd;
    412 
    413 	if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
    414 	    ixv_rxd < MIN_RXD || ixv_rxd > MAX_RXD) {
    415 		aprint_error_dev(dev, "RXD config issue, using default!\n");
    416 		adapter->num_rx_desc = DEFAULT_RXD;
    417 	} else
    418 		adapter->num_rx_desc = ixv_rxd;
    419 
    420 	/* Allocate our TX/RX Queues */
    421 	if (ixv_allocate_queues(adapter)) {
    422 		error = ENOMEM;
    423 		goto err_out;
    424 	}
    425 
    426 	/*
    427 	** Initialize the shared code: its
    428 	** at this point the mac type is set.
    429 	*/
    430 	error = ixgbe_init_shared_code(hw);
    431 	if (error) {
    432 		aprint_error_dev(dev,"Shared Code Initialization Failure\n");
    433 		error = EIO;
    434 		goto err_late;
    435 	}
    436 
    437 	/* Setup the mailbox */
    438 	ixgbe_init_mbx_params_vf(hw);
    439 
    440 	ixgbe_reset_hw(hw);
    441 
    442 	/* Get Hardware Flow Control setting */
    443 	hw->fc.requested_mode = ixgbe_fc_full;
    444 	hw->fc.pause_time = IXV_FC_PAUSE;
    445 	hw->fc.low_water[0] = IXV_FC_LO;
    446 	hw->fc.high_water[0] = IXV_FC_HI;
    447 	hw->fc.send_xon = TRUE;
    448 
    449 	error = ixgbe_init_hw(hw);
    450 	if (error) {
    451 		aprint_error_dev(dev,"Hardware Initialization Failure\n");
    452 		error = EIO;
    453 		goto err_late;
    454 	}
    455 
    456 	error = ixv_allocate_msix(adapter, pa);
    457 	if (error)
    458 		goto err_late;
    459 
    460 	/* Setup OS specific network interface */
    461 	ixv_setup_interface(dev, adapter);
    462 
    463 	/* Sysctl for limiting the amount of work done in the taskqueue */
    464 	ixv_add_rx_process_limit(adapter, "rx_processing_limit",
    465 	    "max number of rx packets to process", &adapter->rx_process_limit,
    466 	    ixv_rx_process_limit);
    467 
    468 	/* Do the stats setup */
    469 	ixv_save_stats(adapter);
    470 	ixv_init_stats(adapter);
    471 
    472 	/* Register for VLAN events */
    473 #if 0 /* XXX delete after write? */
    474 	adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
    475 	    ixv_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
    476 	adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
    477 	    ixv_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
    478 #endif
    479 
    480 	INIT_DEBUGOUT("ixv_attach: end");
    481 	adapter->osdep.attached = true;
    482 	return;
    483 
    484 err_late:
    485 	ixv_free_transmit_structures(adapter);
    486 	ixv_free_receive_structures(adapter);
    487 err_out:
    488 	ixv_free_pci_resources(adapter);
    489 	return;
    490 
    491 }
    492 
    493 /*********************************************************************
    494  *  Device removal routine
    495  *
    496  *  The detach entry point is called when the driver is being removed.
    497  *  This routine stops the adapter and deallocates all the resources
    498  *  that were allocated for driver operation.
    499  *
    500  *  return 0 on success, positive on failure
    501  *********************************************************************/
    502 
    503 static int
    504 ixv_detach(device_t dev, int flags)
    505 {
    506 	struct adapter *adapter = device_private(dev);
    507 	struct ix_queue *que = adapter->queues;
    508 
    509 	INIT_DEBUGOUT("ixv_detach: begin");
    510 	if (adapter->osdep.attached == false)
    511 		return 0;
    512 
    513 #if NVLAN > 0
    514 	/* Make sure VLANS are not using driver */
    515 	if (!VLAN_ATTACHED(&adapter->osdep.ec))
    516 		;	/* nothing to do: no VLANs */
    517 	else if ((flags & (DETACH_SHUTDOWN|DETACH_FORCE)) != 0)
    518 		vlan_ifdetach(adapter->ifp);
    519 	else {
    520 		aprint_error_dev(dev, "VLANs in use\n");
    521 		return EBUSY;
    522 	}
    523 #endif
    524 
    525 	IXV_CORE_LOCK(adapter);
    526 	ixv_stop(adapter);
    527 	IXV_CORE_UNLOCK(adapter);
    528 
    529 	for (int i = 0; i < adapter->num_queues; i++, que++) {
    530 		softint_disestablish(que->que_si);
    531 	}
    532 
    533 	/* Drain the Link queue */
    534 	softint_disestablish(adapter->mbx_si);
    535 
    536 	/* Unregister VLAN events */
    537 #if 0 /* XXX msaitoh delete after write? */
    538 	if (adapter->vlan_attach != NULL)
    539 		EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
    540 	if (adapter->vlan_detach != NULL)
    541 		EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
    542 #endif
    543 
    544 	ether_ifdetach(adapter->ifp);
    545 	callout_halt(&adapter->timer, NULL);
    546 	ixv_free_pci_resources(adapter);
    547 #if 0 /* XXX the NetBSD port is probably missing something here */
    548 	bus_generic_detach(dev);
    549 #endif
    550 	if_detach(adapter->ifp);
    551 
    552 	ixv_free_transmit_structures(adapter);
    553 	ixv_free_receive_structures(adapter);
    554 
    555 	IXV_CORE_LOCK_DESTROY(adapter);
    556 	return (0);
    557 }
    558 
    559 /*********************************************************************
    560  *
    561  *  Shutdown entry point
    562  *
    563  **********************************************************************/
    564 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
    565 static int
    566 ixv_shutdown(device_t dev)
    567 {
    568 	struct adapter *adapter = device_private(dev);
    569 	IXV_CORE_LOCK(adapter);
    570 	ixv_stop(adapter);
    571 	IXV_CORE_UNLOCK(adapter);
    572 	return (0);
    573 }
    574 #endif
    575 
    576 #if __FreeBSD_version < 800000
    577 /*********************************************************************
    578  *  Transmit entry point
    579  *
    580  *  ixv_start is called by the stack to initiate a transmit.
    581  *  The driver will remain in this routine as long as there are
    582  *  packets to transmit and transmit resources are available.
    583  *  In case resources are not available stack is notified and
    584  *  the packet is requeued.
    585  **********************************************************************/
    586 static void
    587 ixv_start_locked(struct tx_ring *txr, struct ifnet * ifp)
    588 {
    589 	int rc;
    590 	struct mbuf    *m_head;
    591 	struct adapter *adapter = txr->adapter;
    592 
    593 	IXV_TX_LOCK_ASSERT(txr);
    594 
    595 	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) !=
    596 	    IFF_RUNNING)
    597 		return;
    598 	if (!adapter->link_active)
    599 		return;
    600 
    601 	while (!IFQ_IS_EMPTY(&ifp->if_snd)) {
    602 
    603 		IFQ_POLL(&ifp->if_snd, m_head);
    604 		if (m_head == NULL)
    605 			break;
    606 
    607 		if ((rc = ixv_xmit(txr, m_head)) == EAGAIN) {
    608 			ifp->if_flags |= IFF_OACTIVE;
    609 			break;
    610 		}
    611 		IFQ_DEQUEUE(&ifp->if_snd, m_head);
    612 		if (rc == EFBIG) {
    613 			struct mbuf *mtmp;
    614 
    615 			if ((mtmp = m_defrag(m_head, M_NOWAIT)) != NULL) {
    616 				m_head = mtmp;
    617 				rc = ixv_xmit(txr, m_head);
    618 				if (rc != 0)
    619 					adapter->efbig2_tx_dma_setup.ev_count++;
    620 			} else
    621 				adapter->m_defrag_failed.ev_count++;
    622 		}
    623 		if (rc != 0) {
    624 			m_freem(m_head);
    625 			continue;
    626 		}
    627 		/* Send a copy of the frame to the BPF listener */
    628 		bpf_mtap(ifp, m_head);
    629 
    630 		/* Set watchdog on */
    631 		txr->watchdog_check = TRUE;
    632 		getmicrotime(&txr->watchdog_time);
    633 	}
    634 	return;
    635 }
    636 
    637 /*
    638  * Legacy TX start - called by the stack, this
    639  * always uses the first tx ring, and should
    640  * not be used with multiqueue tx enabled.
    641  */
    642 static void
    643 ixv_start(struct ifnet *ifp)
    644 {
    645 	struct adapter *adapter = ifp->if_softc;
    646 	struct tx_ring	*txr = adapter->tx_rings;
    647 
    648 	if (ifp->if_flags & IFF_RUNNING) {
    649 		IXV_TX_LOCK(txr);
    650 		ixv_start_locked(txr, ifp);
    651 		IXV_TX_UNLOCK(txr);
    652 	}
    653 	return;
    654 }
    655 
    656 #else
    657 
    658 /*
    659 ** Multiqueue Transmit driver
    660 **
    661 */
    662 static int
    663 ixv_mq_start(struct ifnet *ifp, struct mbuf *m)
    664 {
    665 	struct adapter	*adapter = ifp->if_softc;
    666 	struct ix_queue	*que;
    667 	struct tx_ring	*txr;
    668 	int 		i = 0, err = 0;
    669 
    670 	/* Which queue to use */
    671 	if ((m->m_flags & M_FLOWID) != 0)
    672 		i = m->m_pkthdr.flowid % adapter->num_queues;
    673 
    674 	txr = &adapter->tx_rings[i];
    675 	que = &adapter->queues[i];
    676 
    677 	if (IXV_TX_TRYLOCK(txr)) {
    678 		err = ixv_mq_start_locked(ifp, txr, m);
    679 		IXV_TX_UNLOCK(txr);
    680 	} else {
    681 		err = drbr_enqueue(ifp, txr->br, m);
    682 		softint_schedule(que->que_si);
    683 	}
    684 
    685 	return (err);
    686 }
    687 
    688 static int
    689 ixv_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr, struct mbuf *m)
    690 {
    691 	struct adapter  *adapter = txr->adapter;
    692         struct mbuf     *next;
    693         int             enqueued, err = 0;
    694 
    695 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) !=
    696 	    IFF_RUNNING || adapter->link_active == 0) {
    697 		if (m != NULL)
    698 			err = drbr_enqueue(ifp, txr->br, m);
    699 		return (err);
    700 	}
    701 
    702 	/* Do a clean if descriptors are low */
    703 	if (txr->tx_avail <= IXV_TX_CLEANUP_THRESHOLD)
    704 		ixv_txeof(txr);
    705 
    706 	enqueued = 0;
    707 	if (m != NULL) {
    708 		err = drbr_dequeue(ifp, txr->br, m);
    709 		if (err) {
    710 			return (err);
    711 		}
    712 	}
    713 	/* Process the queue */
    714 	while ((next = drbr_peek(ifp, txr->br)) != NULL) {
    715 		if ((err = ixv_xmit(txr, next)) != 0) {
    716 			if (next != NULL) {
    717 				drbr_advance(ifp, txr->br);
    718 			} else {
    719 				drbr_putback(ifp, txr->br, next);
    720 			}
    721 			break;
    722 		}
    723 		drbr_advance(ifp, txr->br);
    724 		enqueued++;
    725 		ifp->if_obytes += next->m_pkthdr.len;
    726 		if (next->m_flags & M_MCAST)
    727 			ifp->if_omcasts++;
    728 		/* Send a copy of the frame to the BPF listener */
    729 		ETHER_BPF_MTAP(ifp, next);
    730 		if ((ifp->if_flags & IFF_RUNNING) == 0)
    731 			break;
    732 		if (txr->tx_avail <= IXV_TX_OP_THRESHOLD) {
    733 			ifp->if_flags |= IFF_OACTIVE;
    734 			break;
    735 		}
    736 	}
    737 
    738 	if (enqueued > 0) {
    739 		/* Set watchdog on */
    740 		txr->watchdog_check = TRUE;
    741 		getmicrotime(&txr->watchdog_time);
    742 	}
    743 
    744 	return (err);
    745 }
    746 
    747 /*
    748 ** Flush all ring buffers
    749 */
    750 static void
    751 ixv_qflush(struct ifnet *ifp)
    752 {
    753 	struct adapter  *adapter = ifp->if_softc;
    754 	struct tx_ring  *txr = adapter->tx_rings;
    755 	struct mbuf     *m;
    756 
    757 	for (int i = 0; i < adapter->num_queues; i++, txr++) {
    758 		IXV_TX_LOCK(txr);
    759 		while ((m = buf_ring_dequeue_sc(txr->br)) != NULL)
    760 			m_freem(m);
    761 		IXV_TX_UNLOCK(txr);
    762 	}
    763 	if_qflush(ifp);
    764 }
    765 
    766 #endif
    767 
    768 static int
    769 ixv_ifflags_cb(struct ethercom *ec)
    770 {
    771 	struct ifnet *ifp = &ec->ec_if;
    772 	struct adapter *adapter = ifp->if_softc;
    773 	int change = ifp->if_flags ^ adapter->if_flags, rc = 0;
    774 
    775 	IXV_CORE_LOCK(adapter);
    776 
    777 	if (change != 0)
    778 		adapter->if_flags = ifp->if_flags;
    779 
    780 	if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0)
    781 		rc = ENETRESET;
    782 
    783 	IXV_CORE_UNLOCK(adapter);
    784 
    785 	return rc;
    786 }
    787 
    788 /*********************************************************************
    789  *  Ioctl entry point
    790  *
    791  *  ixv_ioctl is called when the user wants to configure the
    792  *  interface.
    793  *
    794  *  return 0 on success, positive on failure
    795  **********************************************************************/
    796 
    797 static int
    798 ixv_ioctl(struct ifnet * ifp, u_long command, void *data)
    799 {
    800 	struct adapter	*adapter = ifp->if_softc;
    801 	struct ifcapreq *ifcr = data;
    802 	struct ifreq	*ifr = (struct ifreq *) data;
    803 	int             error = 0;
    804 	int l4csum_en;
    805 	const int l4csum = IFCAP_CSUM_TCPv4_Rx|IFCAP_CSUM_UDPv4_Rx|
    806 	     IFCAP_CSUM_TCPv6_Rx|IFCAP_CSUM_UDPv6_Rx;
    807 
    808 	switch (command) {
    809 	case SIOCSIFFLAGS:
    810 		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
    811 		break;
    812 	case SIOCADDMULTI:
    813 	case SIOCDELMULTI:
    814 		IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
    815 		break;
    816 	case SIOCSIFMEDIA:
    817 	case SIOCGIFMEDIA:
    818 		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
    819 		break;
    820 	case SIOCSIFCAP:
    821 		IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
    822 		break;
    823 	case SIOCSIFMTU:
    824 		IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
    825 		break;
    826 	default:
    827 		IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
    828 		break;
    829 	}
    830 
    831 	switch (command) {
    832 	case SIOCSIFMEDIA:
    833 	case SIOCGIFMEDIA:
    834 		return ifmedia_ioctl(ifp, ifr, &adapter->media, command);
    835 	case SIOCSIFCAP:
    836 		/* Layer-4 Rx checksum offload has to be turned on and
    837 		 * off as a unit.
    838 		 */
    839 		l4csum_en = ifcr->ifcr_capenable & l4csum;
    840 		if (l4csum_en != l4csum && l4csum_en != 0)
    841 			return EINVAL;
    842 		/*FALLTHROUGH*/
    843 	case SIOCADDMULTI:
    844 	case SIOCDELMULTI:
    845 	case SIOCSIFFLAGS:
    846 	case SIOCSIFMTU:
    847 	default:
    848 		if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
    849 			return error;
    850 		if ((ifp->if_flags & IFF_RUNNING) == 0)
    851 			;
    852 		else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
    853 			IXV_CORE_LOCK(adapter);
    854 			ixv_init_locked(adapter);
    855 			IXV_CORE_UNLOCK(adapter);
    856 		} else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
    857 			/*
    858 			 * Multicast list has changed; set the hardware filter
    859 			 * accordingly.
    860 			 */
    861 			IXV_CORE_LOCK(adapter);
    862 			ixv_disable_intr(adapter);
    863 			ixv_set_multi(adapter);
    864 			ixv_enable_intr(adapter);
    865 			IXV_CORE_UNLOCK(adapter);
    866 		}
    867 		return 0;
    868 	}
    869 }
    870 
    871 /*********************************************************************
    872  *  Init entry point
    873  *
    874  *  This routine is used in two ways. It is used by the stack as
    875  *  init entry point in network interface structure. It is also used
    876  *  by the driver as a hw/sw initialization routine to get to a
    877  *  consistent state.
    878  *
    879  *  return 0 on success, positive on failure
    880  **********************************************************************/
    881 #define IXGBE_MHADD_MFS_SHIFT 16
    882 
    883 static void
    884 ixv_init_locked(struct adapter *adapter)
    885 {
    886 	struct ifnet	*ifp = adapter->ifp;
    887 	device_t 	dev = adapter->dev;
    888 	struct ixgbe_hw *hw = &adapter->hw;
    889 	u32		mhadd, gpie;
    890 
    891 	INIT_DEBUGOUT("ixv_init: begin");
    892 	KASSERT(mutex_owned(&adapter->core_mtx));
    893 	hw->adapter_stopped = FALSE;
    894 	ixgbe_stop_adapter(hw);
    895         callout_stop(&adapter->timer);
    896 
    897         /* reprogram the RAR[0] in case user changed it. */
    898         ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
    899 
    900 	/* Get the latest mac address, User can use a LAA */
    901 	memcpy(hw->mac.addr, CLLADDR(adapter->ifp->if_sadl),
    902 	     IXGBE_ETH_LENGTH_OF_ADDRESS);
    903         ixgbe_set_rar(hw, 0, hw->mac.addr, 0, 1);
    904 	hw->addr_ctrl.rar_used_count = 1;
    905 
    906 	/* Prepare transmit descriptors and buffers */
    907 	if (ixv_setup_transmit_structures(adapter)) {
    908 		aprint_error_dev(dev,"Could not setup transmit structures\n");
    909 		ixv_stop(adapter);
    910 		return;
    911 	}
    912 
    913 	ixgbe_reset_hw(hw);
    914 	ixv_initialize_transmit_units(adapter);
    915 
    916 	/* Setup Multicast table */
    917 	ixv_set_multi(adapter);
    918 
    919 	/*
    920 	** Determine the correct mbuf pool
    921 	** for doing jumbo/headersplit
    922 	*/
    923 	if (ifp->if_mtu > ETHERMTU)
    924 		adapter->rx_mbuf_sz = MJUMPAGESIZE;
    925 	else
    926 		adapter->rx_mbuf_sz = MCLBYTES;
    927 
    928 	/* Prepare receive descriptors and buffers */
    929 	if (ixv_setup_receive_structures(adapter)) {
    930 		device_printf(dev,"Could not setup receive structures\n");
    931 		ixv_stop(adapter);
    932 		return;
    933 	}
    934 
    935 	/* Configure RX settings */
    936 	ixv_initialize_receive_units(adapter);
    937 
    938 	/* Enable Enhanced MSIX mode */
    939 	gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE);
    940 	gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_EIAME;
    941 	gpie |= IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD;
    942         IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
    943 
    944 #if 0 /* XXX isn't it required? -- msaitoh  */
    945 	/* Set the various hardware offload abilities */
    946 	ifp->if_hwassist = 0;
    947 	if (ifp->if_capenable & IFCAP_TSO4)
    948 		ifp->if_hwassist |= CSUM_TSO;
    949 	if (ifp->if_capenable & IFCAP_TXCSUM) {
    950 		ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
    951 #if __FreeBSD_version >= 800000
    952 		ifp->if_hwassist |= CSUM_SCTP;
    953 #endif
    954 	}
    955 #endif
    956 
    957 	/* Set MTU size */
    958 	if (ifp->if_mtu > ETHERMTU) {
    959 		mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
    960 		mhadd &= ~IXGBE_MHADD_MFS_MASK;
    961 		mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
    962 		IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
    963 	}
    964 
    965 	/* Set up VLAN offload and filter */
    966 	ixv_setup_vlan_support(adapter);
    967 
    968 	callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
    969 
    970 	/* Set up MSI/X routing */
    971 	ixv_configure_ivars(adapter);
    972 
    973 	/* Set up auto-mask */
    974 	IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_EICS_RTX_QUEUE);
    975 
    976         /* Set moderation on the Link interrupt */
    977         IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->mbxvec), IXV_LINK_ITR);
    978 
    979 	/* Stats init */
    980 	ixv_init_stats(adapter);
    981 
    982 	/* Config/Enable Link */
    983 	ixv_config_link(adapter);
    984 
    985 	/* And now turn on interrupts */
    986 	ixv_enable_intr(adapter);
    987 
    988 	/* Now inform the stack we're ready */
    989 	ifp->if_flags |= IFF_RUNNING;
    990 	ifp->if_flags &= ~IFF_OACTIVE;
    991 
    992 	return;
    993 }
    994 
    995 static int
    996 ixv_init(struct ifnet *ifp)
    997 {
    998 	struct adapter *adapter = ifp->if_softc;
    999 
   1000 	IXV_CORE_LOCK(adapter);
   1001 	ixv_init_locked(adapter);
   1002 	IXV_CORE_UNLOCK(adapter);
   1003 	return 0;
   1004 }
   1005 
   1006 
   1007 /*
   1008 **
   1009 ** MSIX Interrupt Handlers and Tasklets
   1010 **
   1011 */
   1012 
   1013 static inline void
   1014 ixv_enable_queue(struct adapter *adapter, u32 vector)
   1015 {
   1016 	struct ixgbe_hw *hw = &adapter->hw;
   1017 	u32	queue = 1 << vector;
   1018 	u32	mask;
   1019 
   1020 	mask = (IXGBE_EIMS_RTX_QUEUE & queue);
   1021 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
   1022 }
   1023 
   1024 static inline void
   1025 ixv_disable_queue(struct adapter *adapter, u32 vector)
   1026 {
   1027 	struct ixgbe_hw *hw = &adapter->hw;
   1028 	u64	queue = (u64)(1 << vector);
   1029 	u32	mask;
   1030 
   1031 	mask = (IXGBE_EIMS_RTX_QUEUE & queue);
   1032 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
   1033 }
   1034 
   1035 static inline void
   1036 ixv_rearm_queues(struct adapter *adapter, u64 queues)
   1037 {
   1038 	u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
   1039 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask);
   1040 }
   1041 
   1042 
   1043 static void
   1044 ixv_handle_que(void *context)
   1045 {
   1046 	struct ix_queue *que = context;
   1047 	struct adapter  *adapter = que->adapter;
   1048 	struct tx_ring  *txr = que->txr;
   1049 	struct ifnet    *ifp = adapter->ifp;
   1050 	bool		more;
   1051 
   1052 	if (ifp->if_flags & IFF_RUNNING) {
   1053 		more = ixv_rxeof(que, adapter->rx_process_limit);
   1054 		IXV_TX_LOCK(txr);
   1055 		ixv_txeof(txr);
   1056 #if __FreeBSD_version >= 800000
   1057 		if (!drbr_empty(ifp, txr->br))
   1058 			ixv_mq_start_locked(ifp, txr, NULL);
   1059 #else
   1060 		if (!IFQ_IS_EMPTY(&ifp->if_snd))
   1061 			ixv_start_locked(txr, ifp);
   1062 #endif
   1063 		IXV_TX_UNLOCK(txr);
   1064 		if (more) {
   1065 			adapter->req.ev_count++;
   1066 			softint_schedule(que->que_si);
   1067 			return;
   1068 		}
   1069 	}
   1070 
   1071 	/* Reenable this interrupt */
   1072 	ixv_enable_queue(adapter, que->msix);
   1073 	return;
   1074 }
   1075 
   1076 /*********************************************************************
   1077  *
   1078  *  MSI Queue Interrupt Service routine
   1079  *
   1080  **********************************************************************/
   1081 int
   1082 ixv_msix_que(void *arg)
   1083 {
   1084 	struct ix_queue	*que = arg;
   1085 	struct adapter  *adapter = que->adapter;
   1086 	struct tx_ring	*txr = que->txr;
   1087 	struct rx_ring	*rxr = que->rxr;
   1088 	bool		more_tx, more_rx;
   1089 	u32		newitr = 0;
   1090 
   1091 	ixv_disable_queue(adapter, que->msix);
   1092 	++que->irqs;
   1093 
   1094 	more_rx = ixv_rxeof(que, adapter->rx_process_limit);
   1095 
   1096 	IXV_TX_LOCK(txr);
   1097 	more_tx = ixv_txeof(txr);
   1098 	/*
   1099 	** Make certain that if the stack
   1100 	** has anything queued the task gets
   1101 	** scheduled to handle it.
   1102 	*/
   1103 #if __FreeBSD_version < 800000
   1104 	if (!IFQ_IS_EMPTY(&adapter->ifp->if_snd))
   1105 #else
   1106 	if (!drbr_empty(adapter->ifp, txr->br))
   1107 #endif
   1108                 more_tx = 1;
   1109 	IXV_TX_UNLOCK(txr);
   1110 
   1111 	more_rx = ixv_rxeof(que, adapter->rx_process_limit);
   1112 
   1113 	/* Do AIM now? */
   1114 
   1115 	if (ixv_enable_aim == FALSE)
   1116 		goto no_calc;
   1117 	/*
   1118 	** Do Adaptive Interrupt Moderation:
   1119         **  - Write out last calculated setting
   1120 	**  - Calculate based on average size over
   1121 	**    the last interval.
   1122 	*/
   1123         if (que->eitr_setting)
   1124                 IXGBE_WRITE_REG(&adapter->hw,
   1125                     IXGBE_VTEITR(que->msix),
   1126 		    que->eitr_setting);
   1127 
   1128         que->eitr_setting = 0;
   1129 
   1130         /* Idle, do nothing */
   1131         if ((txr->bytes == 0) && (rxr->bytes == 0))
   1132                 goto no_calc;
   1133 
   1134 	if ((txr->bytes) && (txr->packets))
   1135                	newitr = txr->bytes/txr->packets;
   1136 	if ((rxr->bytes) && (rxr->packets))
   1137 		newitr = max(newitr,
   1138 		    (rxr->bytes / rxr->packets));
   1139 	newitr += 24; /* account for hardware frame, crc */
   1140 
   1141 	/* set an upper boundary */
   1142 	newitr = min(newitr, 3000);
   1143 
   1144 	/* Be nice to the mid range */
   1145 	if ((newitr > 300) && (newitr < 1200))
   1146 		newitr = (newitr / 3);
   1147 	else
   1148 		newitr = (newitr / 2);
   1149 
   1150 	newitr |= newitr << 16;
   1151 
   1152         /* save for next interrupt */
   1153         que->eitr_setting = newitr;
   1154 
   1155         /* Reset state */
   1156         txr->bytes = 0;
   1157         txr->packets = 0;
   1158         rxr->bytes = 0;
   1159         rxr->packets = 0;
   1160 
   1161 no_calc:
   1162 	if (more_tx || more_rx)
   1163 		softint_schedule(que->que_si);
   1164 	else /* Reenable this interrupt */
   1165 		ixv_enable_queue(adapter, que->msix);
   1166 	return 1;
   1167 }
   1168 
   1169 static int
   1170 ixv_msix_mbx(void *arg)
   1171 {
   1172 	struct adapter	*adapter = arg;
   1173 	struct ixgbe_hw *hw = &adapter->hw;
   1174 	u32		reg;
   1175 
   1176 	++adapter->mbx_irq.ev_count;
   1177 
   1178 	/* First get the cause */
   1179 	reg = IXGBE_READ_REG(hw, IXGBE_VTEICS);
   1180 	/* Clear interrupt with write */
   1181 	IXGBE_WRITE_REG(hw, IXGBE_VTEICR, reg);
   1182 
   1183 	/* Link status change */
   1184 	if (reg & IXGBE_EICR_LSC)
   1185 		softint_schedule(adapter->mbx_si);
   1186 
   1187 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER);
   1188 	return 1;
   1189 }
   1190 
   1191 /*********************************************************************
   1192  *
   1193  *  Media Ioctl callback
   1194  *
   1195  *  This routine is called whenever the user queries the status of
   1196  *  the interface using ifconfig.
   1197  *
   1198  **********************************************************************/
   1199 static void
   1200 ixv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
   1201 {
   1202 	struct adapter *adapter = ifp->if_softc;
   1203 
   1204 	INIT_DEBUGOUT("ixv_media_status: begin");
   1205 	IXV_CORE_LOCK(adapter);
   1206 	ixv_update_link_status(adapter);
   1207 
   1208 	ifmr->ifm_status = IFM_AVALID;
   1209 	ifmr->ifm_active = IFM_ETHER;
   1210 
   1211 	if (!adapter->link_active) {
   1212 		IXV_CORE_UNLOCK(adapter);
   1213 		return;
   1214 	}
   1215 
   1216 	ifmr->ifm_status |= IFM_ACTIVE;
   1217 
   1218 	switch (adapter->link_speed) {
   1219 		case IXGBE_LINK_SPEED_1GB_FULL:
   1220 			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
   1221 			break;
   1222 		case IXGBE_LINK_SPEED_10GB_FULL:
   1223 			ifmr->ifm_active |= IFM_FDX;
   1224 			break;
   1225 	}
   1226 
   1227 	IXV_CORE_UNLOCK(adapter);
   1228 
   1229 	return;
   1230 }
   1231 
   1232 /*********************************************************************
   1233  *
   1234  *  Media Ioctl callback
   1235  *
   1236  *  This routine is called when the user changes speed/duplex using
   1237  *  media/mediopt option with ifconfig.
   1238  *
   1239  **********************************************************************/
   1240 static int
   1241 ixv_media_change(struct ifnet * ifp)
   1242 {
   1243 	struct adapter *adapter = ifp->if_softc;
   1244 	struct ifmedia *ifm = &adapter->media;
   1245 
   1246 	INIT_DEBUGOUT("ixv_media_change: begin");
   1247 
   1248 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
   1249 		return (EINVAL);
   1250 
   1251         switch (IFM_SUBTYPE(ifm->ifm_media)) {
   1252         case IFM_AUTO:
   1253                 break;
   1254         default:
   1255                 device_printf(adapter->dev, "Only auto media type\n");
   1256 		return (EINVAL);
   1257         }
   1258 
   1259 	return (0);
   1260 }
   1261 
   1262 /*********************************************************************
   1263  *
   1264  *  This routine maps the mbufs to tx descriptors, allowing the
   1265  *  TX engine to transmit the packets.
   1266  *  	- return 0 on success, positive on failure
   1267  *
   1268  **********************************************************************/
   1269 
   1270 static int
   1271 ixv_xmit(struct tx_ring *txr, struct mbuf *m_head)
   1272 {
   1273 	struct m_tag *mtag;
   1274 	struct adapter  *adapter = txr->adapter;
   1275 	struct ethercom *ec = &adapter->osdep.ec;
   1276 	u32		olinfo_status = 0, cmd_type_len;
   1277 	u32		paylen = 0;
   1278 	int             i, j, error;
   1279 	int		first, last = 0;
   1280 	bus_dmamap_t	map;
   1281 	struct ixv_tx_buf *txbuf;
   1282 	union ixgbe_adv_tx_desc *txd = NULL;
   1283 
   1284 	/* Basic descriptor defines */
   1285         cmd_type_len = (IXGBE_ADVTXD_DTYP_DATA |
   1286 	    IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT);
   1287 
   1288 	if ((mtag = VLAN_OUTPUT_TAG(ec, m_head)) != NULL)
   1289         	cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
   1290 
   1291         /*
   1292          * Important to capture the first descriptor
   1293          * used because it will contain the index of
   1294          * the one we tell the hardware to report back
   1295          */
   1296         first = txr->next_avail_desc;
   1297 	txbuf = &txr->tx_buffers[first];
   1298 	map = txbuf->map;
   1299 
   1300 	/*
   1301 	 * Map the packet for DMA.
   1302 	 */
   1303 	error = bus_dmamap_load_mbuf(txr->txtag->dt_dmat, map,
   1304 	    m_head, BUS_DMA_NOWAIT);
   1305 
   1306 	switch (error) {
   1307 	case EAGAIN:
   1308 		adapter->eagain_tx_dma_setup.ev_count++;
   1309 		return EAGAIN;
   1310 	case ENOMEM:
   1311 		adapter->enomem_tx_dma_setup.ev_count++;
   1312 		return EAGAIN;
   1313 	case EFBIG:
   1314 		adapter->efbig_tx_dma_setup.ev_count++;
   1315 		return error;
   1316 	case EINVAL:
   1317 		adapter->einval_tx_dma_setup.ev_count++;
   1318 		return error;
   1319 	default:
   1320 		adapter->other_tx_dma_setup.ev_count++;
   1321 		return error;
   1322 	case 0:
   1323 		break;
   1324 	}
   1325 
   1326 	/* Make certain there are enough descriptors */
   1327 	if (map->dm_nsegs > txr->tx_avail - 2) {
   1328 		txr->no_desc_avail.ev_count++;
   1329 		/* XXX s/ixgbe/ixv/ */
   1330 		ixgbe_dmamap_unload(txr->txtag, txbuf->map);
   1331 		return EAGAIN;
   1332 	}
   1333 
   1334 	/*
   1335 	** Set up the appropriate offload context
   1336 	** this becomes the first descriptor of
   1337 	** a packet.
   1338 	*/
   1339 	if (m_head->m_pkthdr.csum_flags & (M_CSUM_TSOv4|M_CSUM_TSOv6)) {
   1340 		if (ixv_tso_setup(txr, m_head, &paylen)) {
   1341 			cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
   1342 			olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
   1343 			olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
   1344 			olinfo_status |= paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
   1345 			++adapter->tso_tx.ev_count;
   1346 		} else {
   1347 			++adapter->tso_err.ev_count;
   1348 			/* XXX unload DMA map! --dyoung -> easy? --msaitoh */
   1349 			return (ENXIO);
   1350 		}
   1351 	} else
   1352 		olinfo_status |= ixv_tx_ctx_setup(txr, m_head);
   1353 
   1354         /* Record payload length */
   1355 	if (paylen == 0)
   1356         	olinfo_status |= m_head->m_pkthdr.len <<
   1357 		    IXGBE_ADVTXD_PAYLEN_SHIFT;
   1358 
   1359 	i = txr->next_avail_desc;
   1360 	for (j = 0; j < map->dm_nsegs; j++) {
   1361 		bus_size_t seglen;
   1362 		bus_addr_t segaddr;
   1363 
   1364 		txbuf = &txr->tx_buffers[i];
   1365 		txd = &txr->tx_base[i];
   1366 		seglen = map->dm_segs[j].ds_len;
   1367 		segaddr = htole64(map->dm_segs[j].ds_addr);
   1368 
   1369 		txd->read.buffer_addr = segaddr;
   1370 		txd->read.cmd_type_len = htole32(txr->txd_cmd |
   1371 		    cmd_type_len |seglen);
   1372 		txd->read.olinfo_status = htole32(olinfo_status);
   1373 		last = i; /* descriptor that will get completion IRQ */
   1374 
   1375 		if (++i == adapter->num_tx_desc)
   1376 			i = 0;
   1377 
   1378 		txbuf->m_head = NULL;
   1379 		txbuf->eop_index = -1;
   1380 	}
   1381 
   1382 	txd->read.cmd_type_len |=
   1383 	    htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS);
   1384 	txr->tx_avail -= map->dm_nsegs;
   1385 	txr->next_avail_desc = i;
   1386 
   1387 	txbuf->m_head = m_head;
   1388 	/* Swap the dma map between the first and last descriptor */
   1389 	txr->tx_buffers[first].map = txbuf->map;
   1390 	txbuf->map = map;
   1391 	bus_dmamap_sync(txr->txtag->dt_dmat, map, 0, m_head->m_pkthdr.len,
   1392 	    BUS_DMASYNC_PREWRITE);
   1393 
   1394         /* Set the index of the descriptor that will be marked done */
   1395         txbuf = &txr->tx_buffers[first];
   1396 	txbuf->eop_index = last;
   1397 
   1398 	/* XXX s/ixgbe/ixg/ */
   1399         ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
   1400             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1401 	/*
   1402 	 * Advance the Transmit Descriptor Tail (Tdt), this tells the
   1403 	 * hardware that this frame is available to transmit.
   1404 	 */
   1405 	++txr->total_packets.ev_count;
   1406 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(txr->me), i);
   1407 
   1408 	return 0;
   1409 }
   1410 
   1411 
   1412 /*********************************************************************
   1413  *  Multicast Update
   1414  *
   1415  *  This routine is called whenever multicast address list is updated.
   1416  *
   1417  **********************************************************************/
   1418 #define IXGBE_RAR_ENTRIES 16
   1419 
   1420 static void
   1421 ixv_set_multi(struct adapter *adapter)
   1422 {
   1423 	struct ether_multi *enm;
   1424 	struct ether_multistep step;
   1425 	u8	mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
   1426 	u8	*update_ptr;
   1427 	int	mcnt = 0;
   1428 	struct ethercom *ec = &adapter->osdep.ec;
   1429 
   1430 	IOCTL_DEBUGOUT("ixv_set_multi: begin");
   1431 
   1432 	ETHER_FIRST_MULTI(step, ec, enm);
   1433 	while (enm != NULL) {
   1434 		bcopy(enm->enm_addrlo,
   1435 		    &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
   1436 		    IXGBE_ETH_LENGTH_OF_ADDRESS);
   1437 		mcnt++;
   1438 		/* XXX This might be required --msaitoh */
   1439 		if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES)
   1440 			break;
   1441 		ETHER_NEXT_MULTI(step, enm);
   1442 	}
   1443 
   1444 	update_ptr = mta;
   1445 
   1446 	ixgbe_update_mc_addr_list(&adapter->hw,
   1447 	    update_ptr, mcnt, ixv_mc_array_itr, TRUE);
   1448 
   1449 	return;
   1450 }
   1451 
   1452 /*
   1453  * This is an iterator function now needed by the multicast
   1454  * shared code. It simply feeds the shared code routine the
   1455  * addresses in the array of ixv_set_multi() one by one.
   1456  */
   1457 static u8 *
   1458 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
   1459 {
   1460 	u8 *addr = *update_ptr;
   1461 	u8 *newptr;
   1462 	*vmdq = 0;
   1463 
   1464 	newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
   1465 	*update_ptr = newptr;
   1466 	return addr;
   1467 }
   1468 
   1469 /*********************************************************************
   1470  *  Timer routine
   1471  *
   1472  *  This routine checks for link status,updates statistics,
   1473  *  and runs the watchdog check.
   1474  *
   1475  **********************************************************************/
   1476 
   1477 static void
   1478 ixv_local_timer1(void *arg)
   1479 {
   1480 	struct adapter	*adapter = arg;
   1481 	device_t	dev = adapter->dev;
   1482 	struct tx_ring	*txr = adapter->tx_rings;
   1483 	int		i;
   1484 	struct timeval now, elapsed;
   1485 
   1486 	KASSERT(mutex_owned(&adapter->core_mtx));
   1487 
   1488 	ixv_update_link_status(adapter);
   1489 
   1490 	/* Stats Update */
   1491 	ixv_update_stats(adapter);
   1492 
   1493 	/*
   1494 	 * If the interface has been paused
   1495 	 * then don't do the watchdog check
   1496 	 */
   1497 	if (IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF)
   1498 		goto out;
   1499 	/*
   1500 	** Check for time since any descriptor was cleaned
   1501 	*/
   1502         for (i = 0; i < adapter->num_queues; i++, txr++) {
   1503 		IXV_TX_LOCK(txr);
   1504 		if (txr->watchdog_check == FALSE) {
   1505 			IXV_TX_UNLOCK(txr);
   1506 			continue;
   1507 		}
   1508 		getmicrotime(&now);
   1509 		timersub(&now, &txr->watchdog_time, &elapsed);
   1510 		if (tvtohz(&elapsed) > IXV_WATCHDOG)
   1511 			goto hung;
   1512 		IXV_TX_UNLOCK(txr);
   1513 	}
   1514 out:
   1515        	ixv_rearm_queues(adapter, adapter->que_mask);
   1516 	callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
   1517 	return;
   1518 
   1519 hung:
   1520 	device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
   1521 	device_printf(dev,"Queue(%d) tdh = %d, hw tdt = %d\n", txr->me,
   1522 	    IXGBE_READ_REG(&adapter->hw, IXGBE_VFTDH(i)),
   1523 	    IXGBE_READ_REG(&adapter->hw, IXGBE_VFTDT(i)));
   1524 	device_printf(dev,"TX(%d) desc avail = %d,"
   1525 	    "Next TX to Clean = %d\n",
   1526 	    txr->me, txr->tx_avail, txr->next_to_clean);
   1527 	adapter->ifp->if_flags &= ~IFF_RUNNING;
   1528 	adapter->watchdog_events.ev_count++;
   1529 	IXV_TX_UNLOCK(txr);
   1530 	ixv_init_locked(adapter);
   1531 }
   1532 
   1533 static void
   1534 ixv_local_timer(void *arg)
   1535 {
   1536 	struct adapter *adapter = arg;
   1537 
   1538 	IXV_CORE_LOCK(adapter);
   1539 	ixv_local_timer1(adapter);
   1540 	IXV_CORE_UNLOCK(adapter);
   1541 }
   1542 
   1543 /*
   1544 ** Note: this routine updates the OS on the link state
   1545 **	the real check of the hardware only happens with
   1546 **	a link interrupt.
   1547 */
   1548 static void
   1549 ixv_update_link_status(struct adapter *adapter)
   1550 {
   1551 	struct ifnet	*ifp = adapter->ifp;
   1552 	struct tx_ring *txr = adapter->tx_rings;
   1553 	device_t dev = adapter->dev;
   1554 
   1555 
   1556 	if (adapter->link_up){
   1557 		if (adapter->link_active == FALSE) {
   1558 			if (bootverbose)
   1559 				device_printf(dev,"Link is up %d Gbps %s \n",
   1560 				    ((adapter->link_speed == 128)? 10:1),
   1561 				    "Full Duplex");
   1562 			adapter->link_active = TRUE;
   1563 			if_link_state_change(ifp, LINK_STATE_UP);
   1564 		}
   1565 	} else { /* Link down */
   1566 		if (adapter->link_active == TRUE) {
   1567 			if (bootverbose)
   1568 				device_printf(dev,"Link is Down\n");
   1569 			if_link_state_change(ifp, LINK_STATE_DOWN);
   1570 			adapter->link_active = FALSE;
   1571 			for (int i = 0; i < adapter->num_queues;
   1572 			    i++, txr++)
   1573 				txr->watchdog_check = FALSE;
   1574 		}
   1575 	}
   1576 
   1577 	return;
   1578 }
   1579 
   1580 
   1581 static void
   1582 ixv_ifstop(struct ifnet *ifp, int disable)
   1583 {
   1584 	struct adapter *adapter = ifp->if_softc;
   1585 
   1586 	IXV_CORE_LOCK(adapter);
   1587 	ixv_stop(adapter);
   1588 	IXV_CORE_UNLOCK(adapter);
   1589 }
   1590 
   1591 /*********************************************************************
   1592  *
   1593  *  This routine disables all traffic on the adapter by issuing a
   1594  *  global reset on the MAC and deallocates TX/RX buffers.
   1595  *
   1596  **********************************************************************/
   1597 
   1598 static void
   1599 ixv_stop(void *arg)
   1600 {
   1601 	struct ifnet   *ifp;
   1602 	struct adapter *adapter = arg;
   1603 	struct ixgbe_hw *hw = &adapter->hw;
   1604 	ifp = adapter->ifp;
   1605 
   1606 	KASSERT(mutex_owned(&adapter->core_mtx));
   1607 
   1608 	INIT_DEBUGOUT("ixv_stop: begin\n");
   1609 	ixv_disable_intr(adapter);
   1610 
   1611 	/* Tell the stack that the interface is no longer active */
   1612 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   1613 
   1614 	ixgbe_reset_hw(hw);
   1615 	adapter->hw.adapter_stopped = FALSE;
   1616 	ixgbe_stop_adapter(hw);
   1617 	callout_stop(&adapter->timer);
   1618 
   1619 	/* reprogram the RAR[0] in case user changed it. */
   1620 	ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
   1621 
   1622 	return;
   1623 }
   1624 
   1625 
   1626 /*********************************************************************
   1627  *
   1628  *  Determine hardware revision.
   1629  *
   1630  **********************************************************************/
   1631 static void
   1632 ixv_identify_hardware(struct adapter *adapter)
   1633 {
   1634 	u16		pci_cmd_word;
   1635 	pcitag_t tag;
   1636 	pci_chipset_tag_t pc;
   1637 	pcireg_t subid, id;
   1638 	struct ixgbe_hw *hw = &adapter->hw;
   1639 
   1640 	pc = adapter->osdep.pc;
   1641 	tag = adapter->osdep.tag;
   1642 
   1643 	/*
   1644 	** Make sure BUSMASTER is set, on a VM under
   1645 	** KVM it may not be and will break things.
   1646 	*/
   1647 	pci_cmd_word = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
   1648 	if (!(pci_cmd_word & PCI_COMMAND_MASTER_ENABLE)) {
   1649 		INIT_DEBUGOUT("Bus Master bit was not set!\n");
   1650 		pci_cmd_word |= PCI_COMMAND_MASTER_ENABLE;
   1651 		pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, pci_cmd_word);
   1652 	}
   1653 
   1654 	id = pci_conf_read(pc, tag, PCI_ID_REG);
   1655 	subid = pci_conf_read(pc, tag, PCI_SUBSYS_ID_REG);
   1656 
   1657 	/* Save off the information about this board */
   1658 	hw->vendor_id = PCI_VENDOR(id);
   1659 	hw->device_id = PCI_PRODUCT(id);
   1660 	hw->revision_id = PCI_REVISION(pci_conf_read(pc, tag, PCI_CLASS_REG));
   1661 	hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
   1662 	hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
   1663 
   1664 	return;
   1665 }
   1666 
   1667 /*********************************************************************
   1668  *
   1669  *  Setup MSIX Interrupt resources and handlers
   1670  *
   1671  **********************************************************************/
   1672 static int
   1673 ixv_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
   1674 {
   1675 	device_t        dev = adapter->dev;
   1676 	struct ix_queue *que = adapter->queues;
   1677 	int 		error, rid, vector = 0;
   1678 	pci_chipset_tag_t pc;
   1679 	pcitag_t	tag;
   1680 	char intrbuf[PCI_INTRSTR_LEN];
   1681 	const char	*intrstr = NULL;
   1682 	kcpuset_t	*affinity;
   1683 	int		cpu_id = 0;
   1684 
   1685 	pc = adapter->osdep.pc;
   1686 	tag = adapter->osdep.tag;
   1687 
   1688 	if (pci_msix_alloc_exact(pa,
   1689 		&adapter->osdep.intrs, IXG_MSIX_NINTR) != 0)
   1690 		return (ENXIO);
   1691 
   1692 	kcpuset_create(&affinity, false);
   1693 	for (int i = 0; i < adapter->num_queues; i++, vector++, que++) {
   1694 		intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
   1695 		    sizeof(intrbuf));
   1696 #ifdef IXV_MPSAFE
   1697 		pci_intr_setattr(pc, adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
   1698 		    true);
   1699 #endif
   1700 		/* Set the handler function */
   1701 		adapter->osdep.ihs[i] = pci_intr_establish(pc,
   1702 		    adapter->osdep.intrs[i], IPL_NET, ixv_msix_que, que);
   1703 		if (adapter->osdep.ihs[i] == NULL) {
   1704 			que->res = NULL;
   1705 			aprint_error_dev(dev,
   1706 			    "Failed to register QUE handler");
   1707 			kcpuset_destroy(affinity);
   1708 			return (ENXIO);
   1709 		}
   1710 		que->msix = vector;
   1711         	adapter->que_mask |= (u64)(1 << que->msix);
   1712 
   1713 		cpu_id = i;
   1714 		/* Round-robin affinity */
   1715 		kcpuset_zero(affinity);
   1716 		kcpuset_set(affinity, cpu_id % ncpu);
   1717 		error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
   1718 		    NULL);
   1719 		aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
   1720 		    intrstr);
   1721 		if (error == 0)
   1722 			aprint_normal(", bound queue %d to cpu %d\n",
   1723 			    i, cpu_id);
   1724 		else
   1725 			aprint_normal("\n");
   1726 
   1727 		que->que_si = softint_establish(SOFTINT_NET, ixv_handle_que,
   1728 		    que);
   1729 		if (que->que_si == NULL) {
   1730 			aprint_error_dev(dev,
   1731 			    "could not establish software interrupt\n");
   1732 		}
   1733 	}
   1734 
   1735 	/* and Mailbox */
   1736 	cpu_id++;
   1737 	intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
   1738 	    sizeof(intrbuf));
   1739 #ifdef IXG_MPSAFE
   1740 	pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE, true);
   1741 #endif
   1742 	/* Set the mbx handler function */
   1743 	adapter->osdep.ihs[vector] = pci_intr_establish(pc,
   1744 	    adapter->osdep.intrs[vector], IPL_NET, ixv_msix_mbx, adapter);
   1745 	if (adapter->osdep.ihs[vector] == NULL) {
   1746 		adapter->res = NULL;
   1747 		aprint_error_dev(dev, "Failed to register LINK handler\n");
   1748 		kcpuset_destroy(affinity);
   1749 		return (ENXIO);
   1750 	}
   1751 	/* Round-robin affinity */
   1752 	kcpuset_zero(affinity);
   1753 	kcpuset_set(affinity, cpu_id % ncpu);
   1754 	error = interrupt_distribute(adapter->osdep.ihs[vector], affinity,NULL);
   1755 
   1756 	aprint_normal_dev(dev,
   1757 	    "for link, interrupting at %s, ", intrstr);
   1758 	if (error == 0) {
   1759 		aprint_normal("affinity to cpu %d\n", cpu_id);
   1760 	}
   1761 	adapter->mbxvec = vector;
   1762 	/* Tasklets for Mailbox */
   1763 	adapter->mbx_si = softint_establish(SOFTINT_NET, ixv_handle_mbx,
   1764 	    adapter);
   1765 	/*
   1766 	** Due to a broken design QEMU will fail to properly
   1767 	** enable the guest for MSIX unless the vectors in
   1768 	** the table are all set up, so we must rewrite the
   1769 	** ENABLE in the MSIX control register again at this
   1770 	** point to cause it to successfully initialize us.
   1771 	*/
   1772 	if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
   1773 		int msix_ctrl;
   1774 		pci_get_capability(pc, tag, PCI_CAP_MSIX, &rid, NULL);
   1775 		rid += PCI_MSIX_CTL;
   1776 		msix_ctrl = pci_conf_read(pc, tag, rid);
   1777 		msix_ctrl |= PCI_MSIX_CTL_ENABLE;
   1778 		pci_conf_write(pc, tag, rid, msix_ctrl);
   1779 	}
   1780 
   1781 	return (0);
   1782 }
   1783 
   1784 /*
   1785  * Setup MSIX resources, note that the VF
   1786  * device MUST use MSIX, there is no fallback.
   1787  */
   1788 static int
   1789 ixv_setup_msix(struct adapter *adapter)
   1790 {
   1791 	device_t dev = adapter->dev;
   1792 	int want, msgs;
   1793 
   1794 	/*
   1795 	** Want two vectors: one for a queue,
   1796 	** plus an additional for mailbox.
   1797 	*/
   1798 	msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag);
   1799 	if (msgs < IXG_MSIX_NINTR) {
   1800 		aprint_error_dev(dev,"MSIX config error\n");
   1801 		return (ENXIO);
   1802 	}
   1803 	want = MIN(msgs, IXG_MSIX_NINTR);
   1804 
   1805 	adapter->msix_mem = (void *)1; /* XXX */
   1806 	aprint_normal_dev(dev,
   1807 	    "Using MSIX interrupts with %d vectors\n", msgs);
   1808 	return (want);
   1809 }
   1810 
   1811 
   1812 static int
   1813 ixv_allocate_pci_resources(struct adapter *adapter,
   1814     const struct pci_attach_args *pa)
   1815 {
   1816 	pcireg_t	memtype;
   1817 	device_t        dev = adapter->dev;
   1818 	bus_addr_t addr;
   1819 	int flags;
   1820 
   1821 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
   1822 
   1823 	switch (memtype) {
   1824 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1825 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1826 		adapter->osdep.mem_bus_space_tag = pa->pa_memt;
   1827 		if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
   1828 	              memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
   1829 			goto map_err;
   1830 		if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
   1831 			aprint_normal_dev(dev, "clearing prefetchable bit\n");
   1832 			flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
   1833 		}
   1834 		if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
   1835 		     adapter->osdep.mem_size, flags,
   1836 		     &adapter->osdep.mem_bus_space_handle) != 0) {
   1837 map_err:
   1838 			adapter->osdep.mem_size = 0;
   1839 			aprint_error_dev(dev, "unable to map BAR0\n");
   1840 			return ENXIO;
   1841 		}
   1842 		break;
   1843 	default:
   1844 		aprint_error_dev(dev, "unexpected type on BAR0\n");
   1845 		return ENXIO;
   1846 	}
   1847 
   1848 	adapter->num_queues = 1;
   1849 	adapter->hw.back = &adapter->osdep;
   1850 
   1851 	/*
   1852 	** Now setup MSI/X, should
   1853 	** return us the number of
   1854 	** configured vectors.
   1855 	*/
   1856 	adapter->msix = ixv_setup_msix(adapter);
   1857 	if (adapter->msix == ENXIO)
   1858 		return (ENXIO);
   1859 	else
   1860 		return (0);
   1861 }
   1862 
   1863 static void
   1864 ixv_free_pci_resources(struct adapter * adapter)
   1865 {
   1866 	struct 		ix_queue *que = adapter->queues;
   1867 	int		rid;
   1868 
   1869 	/*
   1870 	**  Release all msix queue resources:
   1871 	*/
   1872 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   1873 		rid = que->msix + 1;
   1874 		if (que->res != NULL)
   1875 			pci_intr_disestablish(adapter->osdep.pc,
   1876 			    adapter->osdep.ihs[i]);
   1877 	}
   1878 
   1879 
   1880 	/* Clean the Legacy or Link interrupt last */
   1881 	if (adapter->mbxvec) /* we are doing MSIX */
   1882 		rid = adapter->mbxvec + 1;
   1883 	else
   1884 		(adapter->msix != 0) ? (rid = 1):(rid = 0);
   1885 
   1886 	if (adapter->osdep.ihs[rid] != NULL)
   1887 		pci_intr_disestablish(adapter->osdep.pc,
   1888 		    adapter->osdep.ihs[rid]);
   1889 	adapter->osdep.ihs[rid] = NULL;
   1890 
   1891 #if defined(NETBSD_MSI_OR_MSIX)
   1892 	pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
   1893 	    adapter->osdep.nintrs);
   1894 #endif
   1895 
   1896 	if (adapter->osdep.mem_size != 0) {
   1897 		bus_space_unmap(adapter->osdep.mem_bus_space_tag,
   1898 		    adapter->osdep.mem_bus_space_handle,
   1899 		    adapter->osdep.mem_size);
   1900 	}
   1901 
   1902 	return;
   1903 }
   1904 
   1905 /*********************************************************************
   1906  *
   1907  *  Setup networking device structure and register an interface.
   1908  *
   1909  **********************************************************************/
   1910 static void
   1911 ixv_setup_interface(device_t dev, struct adapter *adapter)
   1912 {
   1913 	struct ethercom *ec = &adapter->osdep.ec;
   1914 	struct ifnet   *ifp;
   1915 
   1916 	INIT_DEBUGOUT("ixv_setup_interface: begin");
   1917 
   1918 	ifp = adapter->ifp = &ec->ec_if;
   1919 	strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
   1920 	ifp->if_baudrate = 1000000000;
   1921 	ifp->if_init = ixv_init;
   1922 	ifp->if_stop = ixv_ifstop;
   1923 	ifp->if_softc = adapter;
   1924 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   1925 	ifp->if_ioctl = ixv_ioctl;
   1926 #if __FreeBSD_version >= 800000
   1927 	ifp->if_transmit = ixv_mq_start;
   1928 	ifp->if_qflush = ixv_qflush;
   1929 #else
   1930 	ifp->if_start = ixv_start;
   1931 #endif
   1932 	ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 2;
   1933 
   1934 	if_attach(ifp);
   1935 	ether_ifattach(ifp, adapter->hw.mac.addr);
   1936 	ether_set_ifflags_cb(ec, ixv_ifflags_cb);
   1937 
   1938 	adapter->max_frame_size =
   1939 	    ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
   1940 
   1941 	/*
   1942 	 * Tell the upper layer(s) we support long frames.
   1943 	 */
   1944 	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
   1945 
   1946 	ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSOv4;
   1947 	ifp->if_capenable = 0;
   1948 
   1949 	ec->ec_capabilities |= ETHERCAP_VLAN_HWCSUM;
   1950 	ec->ec_capabilities |= ETHERCAP_JUMBO_MTU;
   1951 	ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
   1952 	    		| ETHERCAP_VLAN_MTU;
   1953 	ec->ec_capenable = ec->ec_capabilities;
   1954 
   1955 	/* Don't enable LRO by default */
   1956 	ifp->if_capabilities |= IFCAP_LRO;
   1957 
   1958 	/*
   1959 	** Dont turn this on by default, if vlans are
   1960 	** created on another pseudo device (eg. lagg)
   1961 	** then vlan events are not passed thru, breaking
   1962 	** operation, but with HW FILTER off it works. If
   1963 	** using vlans directly on the em driver you can
   1964 	** enable this and get full hardware tag filtering.
   1965 	*/
   1966 	ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
   1967 
   1968 	/*
   1969 	 * Specify the media types supported by this adapter and register
   1970 	 * callbacks to update media and link information
   1971 	 */
   1972 	ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change,
   1973 		     ixv_media_status);
   1974 	ifmedia_add(&adapter->media, IFM_ETHER | IFM_FDX, 0, NULL);
   1975 	ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
   1976 	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
   1977 
   1978 	return;
   1979 }
   1980 
   1981 static void
   1982 ixv_config_link(struct adapter *adapter)
   1983 {
   1984 	struct ixgbe_hw *hw = &adapter->hw;
   1985 	u32	autoneg, err = 0;
   1986 
   1987 	if (hw->mac.ops.check_link)
   1988 		err = hw->mac.ops.check_link(hw, &autoneg,
   1989 		    &adapter->link_up, FALSE);
   1990 	if (err)
   1991 		goto out;
   1992 
   1993 	if (hw->mac.ops.setup_link)
   1994                	err = hw->mac.ops.setup_link(hw,
   1995 		    autoneg, adapter->link_up);
   1996 out:
   1997 	return;
   1998 }
   1999 
   2000 /********************************************************************
   2001  * Manage DMA'able memory.
   2002  *******************************************************************/
   2003 
   2004 static int
   2005 ixv_dma_malloc(struct adapter *adapter, bus_size_t size,
   2006 		struct ixv_dma_alloc *dma, int mapflags)
   2007 {
   2008 	device_t dev = adapter->dev;
   2009 	int             r, rsegs;
   2010 
   2011 	r = ixgbe_dma_tag_create(adapter->osdep.dmat,	/* parent */
   2012 			       DBA_ALIGN, 0,	/* alignment, bounds */
   2013 			       size,	/* maxsize */
   2014 			       1,	/* nsegments */
   2015 			       size,	/* maxsegsize */
   2016 			       BUS_DMA_ALLOCNOW,	/* flags */
   2017 			       &dma->dma_tag);
   2018 	if (r != 0) {
   2019 		aprint_error_dev(dev,
   2020 		    "ixv_dma_malloc: bus_dma_tag_create failed; error %u\n", r);
   2021 		goto fail_0;
   2022 	}
   2023 	r = bus_dmamem_alloc(dma->dma_tag->dt_dmat,
   2024 		size,
   2025 		dma->dma_tag->dt_alignment,
   2026 		dma->dma_tag->dt_boundary,
   2027 		&dma->dma_seg, 1, &rsegs, BUS_DMA_NOWAIT);
   2028 	if (r != 0) {
   2029 		aprint_error_dev(dev,
   2030 		    "%s: bus_dmamem_alloc failed; error %u\n", __func__, r);
   2031 		goto fail_1;
   2032 	}
   2033 
   2034 	r = bus_dmamem_map(dma->dma_tag->dt_dmat, &dma->dma_seg, rsegs,
   2035 	    size, &dma->dma_vaddr, BUS_DMA_NOWAIT);
   2036 	if (r != 0) {
   2037 		aprint_error_dev(dev, "%s: bus_dmamem_map failed; error %d\n",
   2038 		    __func__, r);
   2039 		goto fail_2;
   2040 	}
   2041 
   2042 	r = ixgbe_dmamap_create(dma->dma_tag, 0, &dma->dma_map);
   2043 	if (r != 0) {
   2044 		aprint_error_dev(dev, "%s: bus_dmamem_map failed; error %d\n",
   2045 		    __func__, r);
   2046 		goto fail_3;
   2047 	}
   2048 
   2049 	r = bus_dmamap_load(dma->dma_tag->dt_dmat, dma->dma_map, dma->dma_vaddr,
   2050 			    size,
   2051 			    NULL,
   2052 			    mapflags | BUS_DMA_NOWAIT);
   2053 	if (r != 0) {
   2054 		aprint_error_dev(dev,"%s: bus_dmamap_load failed; error %u\n",
   2055 		    __func__, r);
   2056 		goto fail_4;
   2057 	}
   2058 	dma->dma_paddr = dma->dma_map->dm_segs[0].ds_addr;
   2059 	dma->dma_size = size;
   2060 	return 0;
   2061 fail_4:
   2062 	ixgbe_dmamap_destroy(dma->dma_tag, dma->dma_map);
   2063 fail_3:
   2064 	bus_dmamem_unmap(dma->dma_tag->dt_dmat, dma->dma_vaddr, size);
   2065 fail_2:
   2066 	bus_dmamem_free(dma->dma_tag->dt_dmat, &dma->dma_seg, rsegs);
   2067 fail_1:
   2068 	ixgbe_dma_tag_destroy(dma->dma_tag);
   2069 fail_0:
   2070 	dma->dma_tag = NULL;
   2071 	return (r);
   2072 }
   2073 
   2074 static void
   2075 ixv_dma_free(struct adapter *adapter, struct ixv_dma_alloc *dma)
   2076 {
   2077 	bus_dmamap_sync(dma->dma_tag->dt_dmat, dma->dma_map, 0, dma->dma_size,
   2078 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   2079 	ixgbe_dmamap_unload(dma->dma_tag, dma->dma_map);
   2080 	bus_dmamem_free(dma->dma_tag->dt_dmat, &dma->dma_seg, 1);
   2081 	ixgbe_dma_tag_destroy(dma->dma_tag);
   2082 }
   2083 
   2084 
   2085 /*********************************************************************
   2086  *
   2087  *  Allocate memory for the transmit and receive rings, and then
   2088  *  the descriptors associated with each, called only once at attach.
   2089  *
   2090  **********************************************************************/
   2091 static int
   2092 ixv_allocate_queues(struct adapter *adapter)
   2093 {
   2094 	device_t	dev = adapter->dev;
   2095 	struct ix_queue	*que;
   2096 	struct tx_ring	*txr;
   2097 	struct rx_ring	*rxr;
   2098 	int rsize, tsize, error = 0;
   2099 	int txconf = 0, rxconf = 0;
   2100 
   2101         /* First allocate the top level queue structs */
   2102         if (!(adapter->queues =
   2103             (struct ix_queue *) malloc(sizeof(struct ix_queue) *
   2104             adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
   2105                 aprint_error_dev(dev, "Unable to allocate queue memory\n");
   2106                 error = ENOMEM;
   2107                 goto fail;
   2108         }
   2109 
   2110 	/* First allocate the TX ring struct memory */
   2111 	if (!(adapter->tx_rings =
   2112 	    (struct tx_ring *) malloc(sizeof(struct tx_ring) *
   2113 	    adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
   2114 		aprint_error_dev(dev, "Unable to allocate TX ring memory\n");
   2115 		error = ENOMEM;
   2116 		goto tx_fail;
   2117 	}
   2118 
   2119 	/* Next allocate the RX */
   2120 	if (!(adapter->rx_rings =
   2121 	    (struct rx_ring *) malloc(sizeof(struct rx_ring) *
   2122 	    adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
   2123 		aprint_error_dev(dev, "Unable to allocate RX ring memory\n");
   2124 		error = ENOMEM;
   2125 		goto rx_fail;
   2126 	}
   2127 
   2128 	/* For the ring itself */
   2129 	tsize = roundup2(adapter->num_tx_desc *
   2130 	    sizeof(union ixgbe_adv_tx_desc), DBA_ALIGN);
   2131 
   2132 	/*
   2133 	 * Now set up the TX queues, txconf is needed to handle the
   2134 	 * possibility that things fail midcourse and we need to
   2135 	 * undo memory gracefully
   2136 	 */
   2137 	for (int i = 0; i < adapter->num_queues; i++, txconf++) {
   2138 		/* Set up some basics */
   2139 		txr = &adapter->tx_rings[i];
   2140 		txr->adapter = adapter;
   2141 		txr->me = i;
   2142 
   2143 		/* Initialize the TX side lock */
   2144 		snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
   2145 		    device_xname(dev), txr->me);
   2146 		mutex_init(&txr->tx_mtx, MUTEX_DEFAULT, IPL_NET);
   2147 
   2148 		if (ixv_dma_malloc(adapter, tsize,
   2149 			&txr->txdma, BUS_DMA_NOWAIT)) {
   2150 			aprint_error_dev(dev,
   2151 			    "Unable to allocate TX Descriptor memory\n");
   2152 			error = ENOMEM;
   2153 			goto err_tx_desc;
   2154 		}
   2155 		txr->tx_base = (union ixgbe_adv_tx_desc *)txr->txdma.dma_vaddr;
   2156 		bzero((void *)txr->tx_base, tsize);
   2157 
   2158         	/* Now allocate transmit buffers for the ring */
   2159         	if (ixv_allocate_transmit_buffers(txr)) {
   2160 			aprint_error_dev(dev,
   2161 			    "Critical Failure setting up transmit buffers\n");
   2162 			error = ENOMEM;
   2163 			goto err_tx_desc;
   2164         	}
   2165 #if __FreeBSD_version >= 800000
   2166 		/* Allocate a buf ring */
   2167 		txr->br = buf_ring_alloc(IXV_BR_SIZE, M_DEVBUF,
   2168 		    M_WAITOK, &txr->tx_mtx);
   2169 		if (txr->br == NULL) {
   2170 			aprint_error_dev(dev,
   2171 			    "Critical Failure setting up buf ring\n");
   2172 			error = ENOMEM;
   2173 			goto err_tx_desc;
   2174 		}
   2175 #endif
   2176 	}
   2177 
   2178 	/*
   2179 	 * Next the RX queues...
   2180 	 */
   2181 	rsize = roundup2(adapter->num_rx_desc *
   2182 	    sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
   2183 	for (int i = 0; i < adapter->num_queues; i++, rxconf++) {
   2184 		rxr = &adapter->rx_rings[i];
   2185 		/* Set up some basics */
   2186 		rxr->adapter = adapter;
   2187 		rxr->me = i;
   2188 
   2189 		/* Initialize the RX side lock */
   2190 		snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
   2191 		    device_xname(dev), rxr->me);
   2192 		mutex_init(&rxr->rx_mtx, MUTEX_DEFAULT, IPL_NET);
   2193 
   2194 		if (ixv_dma_malloc(adapter, rsize,
   2195 			&rxr->rxdma, BUS_DMA_NOWAIT)) {
   2196 			aprint_error_dev(dev,
   2197 			    "Unable to allocate RxDescriptor memory\n");
   2198 			error = ENOMEM;
   2199 			goto err_rx_desc;
   2200 		}
   2201 		rxr->rx_base = (union ixgbe_adv_rx_desc *)rxr->rxdma.dma_vaddr;
   2202 		bzero((void *)rxr->rx_base, rsize);
   2203 
   2204         	/* Allocate receive buffers for the ring*/
   2205 		if (ixv_allocate_receive_buffers(rxr)) {
   2206 			aprint_error_dev(dev,
   2207 			    "Critical Failure setting up receive buffers\n");
   2208 			error = ENOMEM;
   2209 			goto err_rx_desc;
   2210 		}
   2211 	}
   2212 
   2213 	/*
   2214 	** Finally set up the queue holding structs
   2215 	*/
   2216 	for (int i = 0; i < adapter->num_queues; i++) {
   2217 		que = &adapter->queues[i];
   2218 		que->adapter = adapter;
   2219 		que->txr = &adapter->tx_rings[i];
   2220 		que->rxr = &adapter->rx_rings[i];
   2221 	}
   2222 
   2223 	return (0);
   2224 
   2225 err_rx_desc:
   2226 	for (rxr = adapter->rx_rings; rxconf > 0; rxr++, rxconf--)
   2227 		ixv_dma_free(adapter, &rxr->rxdma);
   2228 err_tx_desc:
   2229 	for (txr = adapter->tx_rings; txconf > 0; txr++, txconf--)
   2230 		ixv_dma_free(adapter, &txr->txdma);
   2231 	free(adapter->rx_rings, M_DEVBUF);
   2232 rx_fail:
   2233 	free(adapter->tx_rings, M_DEVBUF);
   2234 tx_fail:
   2235 	free(adapter->queues, M_DEVBUF);
   2236 fail:
   2237 	return (error);
   2238 }
   2239 
   2240 
   2241 /*********************************************************************
   2242  *
   2243  *  Allocate memory for tx_buffer structures. The tx_buffer stores all
   2244  *  the information needed to transmit a packet on the wire. This is
   2245  *  called only once at attach, setup is done every reset.
   2246  *
   2247  **********************************************************************/
   2248 static int
   2249 ixv_allocate_transmit_buffers(struct tx_ring *txr)
   2250 {
   2251 	struct adapter *adapter = txr->adapter;
   2252 	device_t dev = adapter->dev;
   2253 	struct ixv_tx_buf *txbuf;
   2254 	int error, i;
   2255 
   2256 	/*
   2257 	 * Setup DMA descriptor areas.
   2258 	 */
   2259 	if ((error = ixgbe_dma_tag_create(adapter->osdep.dmat,	/* parent */
   2260 			       1, 0,		/* alignment, bounds */
   2261 			       IXV_TSO_SIZE,		/* maxsize */
   2262 			       32,			/* nsegments */
   2263 			       PAGE_SIZE,		/* maxsegsize */
   2264 			       0,			/* flags */
   2265 			       &txr->txtag))) {
   2266 		aprint_error_dev(dev,"Unable to allocate TX DMA tag\n");
   2267 		goto fail;
   2268 	}
   2269 
   2270 	if (!(txr->tx_buffers =
   2271 	    (struct ixv_tx_buf *) malloc(sizeof(struct ixv_tx_buf) *
   2272 	    adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) {
   2273 		aprint_error_dev(dev, "Unable to allocate tx_buffer memory\n");
   2274 		error = ENOMEM;
   2275 		goto fail;
   2276 	}
   2277 
   2278         /* Create the descriptor buffer dma maps */
   2279 	txbuf = txr->tx_buffers;
   2280 	for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
   2281 		error = ixgbe_dmamap_create(txr->txtag, 0, &txbuf->map);
   2282 		if (error != 0) {
   2283 			aprint_error_dev(dev, "Unable to create TX DMA map\n");
   2284 			goto fail;
   2285 		}
   2286 	}
   2287 
   2288 	return 0;
   2289 fail:
   2290 	/* We free all, it handles case where we are in the middle */
   2291 	ixv_free_transmit_structures(adapter);
   2292 	return (error);
   2293 }
   2294 
   2295 /*********************************************************************
   2296  *
   2297  *  Initialize a transmit ring.
   2298  *
   2299  **********************************************************************/
   2300 static void
   2301 ixv_setup_transmit_ring(struct tx_ring *txr)
   2302 {
   2303 	struct adapter *adapter = txr->adapter;
   2304 	struct ixv_tx_buf *txbuf;
   2305 	int i;
   2306 
   2307 	/* Clear the old ring contents */
   2308 	IXV_TX_LOCK(txr);
   2309 	bzero((void *)txr->tx_base,
   2310 	      (sizeof(union ixgbe_adv_tx_desc)) * adapter->num_tx_desc);
   2311 	/* Reset indices */
   2312 	txr->next_avail_desc = 0;
   2313 	txr->next_to_clean = 0;
   2314 
   2315 	/* Free any existing tx buffers. */
   2316         txbuf = txr->tx_buffers;
   2317 	for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
   2318 		if (txbuf->m_head != NULL) {
   2319 			bus_dmamap_sync(txr->txtag->dt_dmat, txbuf->map,
   2320 			    0, txbuf->m_head->m_pkthdr.len,
   2321 			    BUS_DMASYNC_POSTWRITE);
   2322 			ixgbe_dmamap_unload(txr->txtag, txbuf->map);
   2323 			m_freem(txbuf->m_head);
   2324 			txbuf->m_head = NULL;
   2325 		}
   2326 		/* Clear the EOP index */
   2327 		txbuf->eop_index = -1;
   2328         }
   2329 
   2330 	/* Set number of descriptors available */
   2331 	txr->tx_avail = adapter->num_tx_desc;
   2332 
   2333 	ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
   2334 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   2335 	IXV_TX_UNLOCK(txr);
   2336 }
   2337 
   2338 /*********************************************************************
   2339  *
   2340  *  Initialize all transmit rings.
   2341  *
   2342  **********************************************************************/
   2343 static int
   2344 ixv_setup_transmit_structures(struct adapter *adapter)
   2345 {
   2346 	struct tx_ring *txr = adapter->tx_rings;
   2347 
   2348 	for (int i = 0; i < adapter->num_queues; i++, txr++)
   2349 		ixv_setup_transmit_ring(txr);
   2350 
   2351 	return (0);
   2352 }
   2353 
   2354 /*********************************************************************
   2355  *
   2356  *  Enable transmit unit.
   2357  *
   2358  **********************************************************************/
   2359 static void
   2360 ixv_initialize_transmit_units(struct adapter *adapter)
   2361 {
   2362 	struct tx_ring	*txr = adapter->tx_rings;
   2363 	struct ixgbe_hw	*hw = &adapter->hw;
   2364 
   2365 
   2366 	for (int i = 0; i < adapter->num_queues; i++, txr++) {
   2367 		u64	tdba = txr->txdma.dma_paddr;
   2368 		u32	txctrl, txdctl;
   2369 
   2370 		/* Set WTHRESH to 8, burst writeback */
   2371 		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
   2372 		txdctl |= (8 << 16);
   2373 		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
   2374 		/* Now enable */
   2375 		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
   2376 		txdctl |= IXGBE_TXDCTL_ENABLE;
   2377 		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
   2378 
   2379 		/* Set the HW Tx Head and Tail indices */
   2380 	    	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(i), 0);
   2381 	    	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(i), 0);
   2382 
   2383 		/* Setup Transmit Descriptor Cmd Settings */
   2384 		txr->txd_cmd = IXGBE_TXD_CMD_IFCS;
   2385 		txr->watchdog_check = FALSE;
   2386 
   2387 		/* Set Ring parameters */
   2388 		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
   2389 		       (tdba & 0x00000000ffffffffULL));
   2390 		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i), (tdba >> 32));
   2391 		IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
   2392 		    adapter->num_tx_desc *
   2393 		    sizeof(struct ixgbe_legacy_tx_desc));
   2394 		txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(i));
   2395 		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
   2396 		IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), txctrl);
   2397 		break;
   2398 	}
   2399 
   2400 	return;
   2401 }
   2402 
   2403 /*********************************************************************
   2404  *
   2405  *  Free all transmit rings.
   2406  *
   2407  **********************************************************************/
   2408 static void
   2409 ixv_free_transmit_structures(struct adapter *adapter)
   2410 {
   2411 	struct tx_ring *txr = adapter->tx_rings;
   2412 
   2413 	for (int i = 0; i < adapter->num_queues; i++, txr++) {
   2414 		ixv_free_transmit_buffers(txr);
   2415 		ixv_dma_free(adapter, &txr->txdma);
   2416 		IXV_TX_LOCK_DESTROY(txr);
   2417 	}
   2418 	free(adapter->tx_rings, M_DEVBUF);
   2419 }
   2420 
   2421 /*********************************************************************
   2422  *
   2423  *  Free transmit ring related data structures.
   2424  *
   2425  **********************************************************************/
   2426 static void
   2427 ixv_free_transmit_buffers(struct tx_ring *txr)
   2428 {
   2429 	struct adapter *adapter = txr->adapter;
   2430 	struct ixv_tx_buf *tx_buffer;
   2431 	int             i;
   2432 
   2433 	INIT_DEBUGOUT("free_transmit_ring: begin");
   2434 
   2435 	if (txr->tx_buffers == NULL)
   2436 		return;
   2437 
   2438 	tx_buffer = txr->tx_buffers;
   2439 	for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
   2440 		if (tx_buffer->m_head != NULL) {
   2441 			bus_dmamap_sync(txr->txtag->dt_dmat, tx_buffer->map,
   2442 			    0, tx_buffer->m_head->m_pkthdr.len,
   2443 			    BUS_DMASYNC_POSTWRITE);
   2444 			ixgbe_dmamap_unload(txr->txtag, tx_buffer->map);
   2445 			m_freem(tx_buffer->m_head);
   2446 			tx_buffer->m_head = NULL;
   2447 			if (tx_buffer->map != NULL) {
   2448 				ixgbe_dmamap_destroy(txr->txtag,
   2449 				    tx_buffer->map);
   2450 				tx_buffer->map = NULL;
   2451 			}
   2452 		} else if (tx_buffer->map != NULL) {
   2453 			ixgbe_dmamap_unload(txr->txtag, tx_buffer->map);
   2454 			ixgbe_dmamap_destroy(txr->txtag, tx_buffer->map);
   2455 			tx_buffer->map = NULL;
   2456 		}
   2457 	}
   2458 #if __FreeBSD_version >= 800000
   2459 	if (txr->br != NULL)
   2460 		buf_ring_free(txr->br, M_DEVBUF);
   2461 #endif
   2462 	if (txr->tx_buffers != NULL) {
   2463 		free(txr->tx_buffers, M_DEVBUF);
   2464 		txr->tx_buffers = NULL;
   2465 	}
   2466 	if (txr->txtag != NULL) {
   2467 		ixgbe_dma_tag_destroy(txr->txtag);
   2468 		txr->txtag = NULL;
   2469 	}
   2470 	return;
   2471 }
   2472 
   2473 /*********************************************************************
   2474  *
   2475  *  Advanced Context Descriptor setup for VLAN or CSUM
   2476  *
   2477  **********************************************************************/
   2478 
   2479 static u32
   2480 ixv_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp)
   2481 {
   2482 	struct m_tag *mtag;
   2483 	struct adapter *adapter = txr->adapter;
   2484 	struct ethercom *ec = &adapter->osdep.ec;
   2485 	struct ixgbe_adv_tx_context_desc *TXD;
   2486 	struct ixv_tx_buf        *tx_buffer;
   2487 	u32 olinfo = 0, vlan_macip_lens = 0, type_tucmd_mlhl = 0;
   2488 	struct ether_vlan_header *eh;
   2489 	struct ip ip;
   2490 	struct ip6_hdr ip6;
   2491 	int  ehdrlen, ip_hlen = 0;
   2492 	u16	etype;
   2493 	u8	ipproto __diagused = 0;
   2494 	bool	offload;
   2495 	int ctxd = txr->next_avail_desc;
   2496 	u16 vtag = 0;
   2497 
   2498 
   2499 	offload = ((mp->m_pkthdr.csum_flags & M_CSUM_OFFLOAD) != 0);
   2500 
   2501 	tx_buffer = &txr->tx_buffers[ctxd];
   2502 	TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
   2503 
   2504 	/*
   2505 	** In advanced descriptors the vlan tag must
   2506 	** be placed into the descriptor itself.
   2507 	*/
   2508 	if ((mtag = VLAN_OUTPUT_TAG(ec, mp)) != NULL) {
   2509 		vtag = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   2510 		vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
   2511 	} else if (!offload)
   2512 		return 0;
   2513 
   2514 	/*
   2515 	 * Determine where frame payload starts.
   2516 	 * Jump over vlan headers if already present,
   2517 	 * helpful for QinQ too.
   2518 	 */
   2519 	KASSERT(mp->m_len >= offsetof(struct ether_vlan_header, evl_tag));
   2520 	eh = mtod(mp, struct ether_vlan_header *);
   2521 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
   2522 		KASSERT(mp->m_len >= sizeof(struct ether_vlan_header));
   2523 		etype = ntohs(eh->evl_proto);
   2524 		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   2525 	} else {
   2526 		etype = ntohs(eh->evl_encap_proto);
   2527 		ehdrlen = ETHER_HDR_LEN;
   2528 	}
   2529 
   2530 	/* Set the ether header length */
   2531 	vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
   2532 
   2533 	switch (etype) {
   2534 	case ETHERTYPE_IP:
   2535 		m_copydata(mp, ehdrlen, sizeof(ip), &ip);
   2536 		ip_hlen = ip.ip_hl << 2;
   2537 		ipproto = ip.ip_p;
   2538 #if 0
   2539 		ip.ip_sum = 0;
   2540 		m_copyback(mp, ehdrlen, sizeof(ip), &ip);
   2541 #else
   2542 		KASSERT((mp->m_pkthdr.csum_flags & M_CSUM_IPv4) == 0 ||
   2543 		    ip.ip_sum == 0);
   2544 #endif
   2545 		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
   2546 		break;
   2547 	case ETHERTYPE_IPV6:
   2548 		m_copydata(mp, ehdrlen, sizeof(ip6), &ip6);
   2549 		ip_hlen = sizeof(ip6);
   2550 		ipproto = ip6.ip6_nxt;
   2551 		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
   2552 		break;
   2553 	default:
   2554 		break;
   2555 	}
   2556 
   2557 	if ((mp->m_pkthdr.csum_flags & M_CSUM_IPv4) != 0)
   2558 		olinfo |= IXGBE_TXD_POPTS_IXSM << 8;
   2559 
   2560 	vlan_macip_lens |= ip_hlen;
   2561 	type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
   2562 
   2563 	if (mp->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_TCPv6)) {
   2564 		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
   2565 		olinfo |= IXGBE_TXD_POPTS_TXSM << 8;
   2566 		KASSERT(ipproto == IPPROTO_TCP);
   2567 	} else if (mp->m_pkthdr.csum_flags & (M_CSUM_UDPv4|M_CSUM_UDPv6)) {
   2568 		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP;
   2569 		olinfo |= IXGBE_TXD_POPTS_TXSM << 8;
   2570 		KASSERT(ipproto == IPPROTO_UDP);
   2571 	}
   2572 
   2573 	/* Now copy bits into descriptor */
   2574 	TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
   2575 	TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
   2576 	TXD->seqnum_seed = htole32(0);
   2577 	TXD->mss_l4len_idx = htole32(0);
   2578 
   2579 	tx_buffer->m_head = NULL;
   2580 	tx_buffer->eop_index = -1;
   2581 
   2582 	/* We've consumed the first desc, adjust counters */
   2583 	if (++ctxd == adapter->num_tx_desc)
   2584 		ctxd = 0;
   2585 	txr->next_avail_desc = ctxd;
   2586 	--txr->tx_avail;
   2587 
   2588         return olinfo;
   2589 }
   2590 
   2591 /**********************************************************************
   2592  *
   2593  *  Setup work for hardware segmentation offload (TSO) on
   2594  *  adapters using advanced tx descriptors
   2595  *
   2596  **********************************************************************/
   2597 static bool
   2598 ixv_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *paylen)
   2599 {
   2600 	struct m_tag *mtag;
   2601 	struct adapter *adapter = txr->adapter;
   2602 	struct ethercom *ec = &adapter->osdep.ec;
   2603 	struct ixgbe_adv_tx_context_desc *TXD;
   2604 	struct ixv_tx_buf        *tx_buffer;
   2605 	u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
   2606 	u32 mss_l4len_idx = 0;
   2607 	u16 vtag = 0;
   2608 	int ctxd, ehdrlen,  hdrlen, ip_hlen, tcp_hlen;
   2609 	struct ether_vlan_header *eh;
   2610 	struct ip *ip;
   2611 	struct tcphdr *th;
   2612 
   2613 
   2614 	/*
   2615 	 * Determine where frame payload starts.
   2616 	 * Jump over vlan headers if already present
   2617 	 */
   2618 	eh = mtod(mp, struct ether_vlan_header *);
   2619 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN))
   2620 		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   2621 	else
   2622 		ehdrlen = ETHER_HDR_LEN;
   2623 
   2624         /* Ensure we have at least the IP+TCP header in the first mbuf. */
   2625         if (mp->m_len < ehdrlen + sizeof(struct ip) + sizeof(struct tcphdr))
   2626 		return FALSE;
   2627 
   2628 	ctxd = txr->next_avail_desc;
   2629 	tx_buffer = &txr->tx_buffers[ctxd];
   2630 	TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
   2631 
   2632 	ip = (struct ip *)(mp->m_data + ehdrlen);
   2633 	if (ip->ip_p != IPPROTO_TCP)
   2634 		return FALSE;   /* 0 */
   2635 	ip->ip_sum = 0;
   2636 	ip_hlen = ip->ip_hl << 2;
   2637 	th = (struct tcphdr *)((char *)ip + ip_hlen);
   2638 	/* XXX Educated guess: FreeBSD's in_pseudo == NetBSD's in_cksum_phdr */
   2639 	th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   2640 	    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   2641 	tcp_hlen = th->th_off << 2;
   2642 	hdrlen = ehdrlen + ip_hlen + tcp_hlen;
   2643 
   2644 	/* This is used in the transmit desc in encap */
   2645 	*paylen = mp->m_pkthdr.len - hdrlen;
   2646 
   2647 	/* VLAN MACLEN IPLEN */
   2648 	if ((mtag = VLAN_OUTPUT_TAG(ec, mp)) != NULL) {
   2649 		vtag = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   2650                 vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
   2651 	}
   2652 
   2653 	vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
   2654 	vlan_macip_lens |= ip_hlen;
   2655 	TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
   2656 
   2657 	/* ADV DTYPE TUCMD */
   2658 	type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
   2659 	type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
   2660 	type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
   2661 	TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
   2662 
   2663 
   2664 	/* MSS L4LEN IDX */
   2665 	mss_l4len_idx |= (mp->m_pkthdr.segsz << IXGBE_ADVTXD_MSS_SHIFT);
   2666 	mss_l4len_idx |= (tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT);
   2667 	TXD->mss_l4len_idx = htole32(mss_l4len_idx);
   2668 
   2669 	TXD->seqnum_seed = htole32(0);
   2670 	tx_buffer->m_head = NULL;
   2671 	tx_buffer->eop_index = -1;
   2672 
   2673 	if (++ctxd == adapter->num_tx_desc)
   2674 		ctxd = 0;
   2675 
   2676 	txr->tx_avail--;
   2677 	txr->next_avail_desc = ctxd;
   2678 	return TRUE;
   2679 }
   2680 
   2681 
   2682 /**********************************************************************
   2683  *
   2684  *  Examine each tx_buffer in the used queue. If the hardware is done
   2685  *  processing the packet then free associated resources. The
   2686  *  tx_buffer is put back on the free queue.
   2687  *
   2688  **********************************************************************/
   2689 static bool
   2690 ixv_txeof(struct tx_ring *txr)
   2691 {
   2692 	struct adapter	*adapter = txr->adapter;
   2693 	struct ifnet	*ifp = adapter->ifp;
   2694 	u32	first, last, done;
   2695 	struct ixv_tx_buf *tx_buffer;
   2696 	struct ixgbe_legacy_tx_desc *tx_desc, *eop_desc;
   2697 
   2698 	KASSERT(mutex_owned(&txr->tx_mtx));
   2699 
   2700 	if (txr->tx_avail == adapter->num_tx_desc)
   2701 		return false;
   2702 
   2703 	first = txr->next_to_clean;
   2704 	tx_buffer = &txr->tx_buffers[first];
   2705 	/* For cleanup we just use legacy struct */
   2706 	tx_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
   2707 	last = tx_buffer->eop_index;
   2708 	if (last == -1)
   2709 		return false;
   2710 	eop_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
   2711 
   2712 	/*
   2713 	** Get the index of the first descriptor
   2714 	** BEYOND the EOP and call that 'done'.
   2715 	** I do this so the comparison in the
   2716 	** inner while loop below can be simple
   2717 	*/
   2718 	if (++last == adapter->num_tx_desc) last = 0;
   2719 	done = last;
   2720 
   2721         ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
   2722             BUS_DMASYNC_POSTREAD);
   2723 	/*
   2724 	** Only the EOP descriptor of a packet now has the DD
   2725 	** bit set, this is what we look for...
   2726 	*/
   2727 	while (eop_desc->upper.fields.status & IXGBE_TXD_STAT_DD) {
   2728 		/* We clean the range of the packet */
   2729 		while (first != done) {
   2730 			tx_desc->upper.data = 0;
   2731 			tx_desc->lower.data = 0;
   2732 			tx_desc->buffer_addr = 0;
   2733 			++txr->tx_avail;
   2734 
   2735 			if (tx_buffer->m_head) {
   2736 				bus_dmamap_sync(txr->txtag->dt_dmat,
   2737 				    tx_buffer->map,
   2738 				    0, tx_buffer->m_head->m_pkthdr.len,
   2739 				    BUS_DMASYNC_POSTWRITE);
   2740 				ixgbe_dmamap_unload(txr->txtag, tx_buffer->map);
   2741 				m_freem(tx_buffer->m_head);
   2742 				tx_buffer->m_head = NULL;
   2743 				tx_buffer->map = NULL;
   2744 			}
   2745 			tx_buffer->eop_index = -1;
   2746 			getmicrotime(&txr->watchdog_time);
   2747 
   2748 			if (++first == adapter->num_tx_desc)
   2749 				first = 0;
   2750 
   2751 			tx_buffer = &txr->tx_buffers[first];
   2752 			tx_desc =
   2753 			    (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
   2754 		}
   2755 		++ifp->if_opackets;
   2756 		/* See if there is more work now */
   2757 		last = tx_buffer->eop_index;
   2758 		if (last != -1) {
   2759 			eop_desc =
   2760 			    (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
   2761 			/* Get next done point */
   2762 			if (++last == adapter->num_tx_desc) last = 0;
   2763 			done = last;
   2764 		} else
   2765 			break;
   2766 	}
   2767 	ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
   2768 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   2769 
   2770 	txr->next_to_clean = first;
   2771 
   2772 	/*
   2773 	 * If we have enough room, clear IFF_OACTIVE to tell the stack that
   2774 	 * it is OK to send packets. If there are no pending descriptors,
   2775 	 * clear the timeout. Otherwise, if some descriptors have been freed,
   2776 	 * restart the timeout.
   2777 	 */
   2778 	if (txr->tx_avail > IXV_TX_CLEANUP_THRESHOLD) {
   2779 		ifp->if_flags &= ~IFF_OACTIVE;
   2780 		if (txr->tx_avail == adapter->num_tx_desc) {
   2781 			txr->watchdog_check = FALSE;
   2782 			return false;
   2783 		}
   2784 	}
   2785 
   2786 	return true;
   2787 }
   2788 
   2789 /*********************************************************************
   2790  *
   2791  *  Refresh mbuf buffers for RX descriptor rings
   2792  *   - now keeps its own state so discards due to resource
   2793  *     exhaustion are unnecessary, if an mbuf cannot be obtained
   2794  *     it just returns, keeping its placeholder, thus it can simply
   2795  *     be recalled to try again.
   2796  *
   2797  **********************************************************************/
   2798 static void
   2799 ixv_refresh_mbufs(struct rx_ring *rxr, int limit)
   2800 {
   2801 	struct adapter		*adapter = rxr->adapter;
   2802 	struct ixv_rx_buf	*rxbuf;
   2803 	struct mbuf		*mh, *mp;
   2804 	int			i, j, error;
   2805 	bool			refreshed = false;
   2806 
   2807 	i = j = rxr->next_to_refresh;
   2808         /* Get the control variable, one beyond refresh point */
   2809 	if (++j == adapter->num_rx_desc)
   2810 		j = 0;
   2811 	while (j != limit) {
   2812 		rxbuf = &rxr->rx_buffers[i];
   2813 		if ((rxbuf->m_head == NULL) && (rxr->hdr_split)) {
   2814 			mh = m_gethdr(M_NOWAIT, MT_DATA);
   2815 			if (mh == NULL)
   2816 				goto update;
   2817 			mh->m_pkthdr.len = mh->m_len = MHLEN;
   2818 			mh->m_flags |= M_PKTHDR;
   2819 			m_adj(mh, ETHER_ALIGN);
   2820 			/* Get the memory mapping */
   2821 			error = bus_dmamap_load_mbuf(rxr->htag->dt_dmat,
   2822 			    rxbuf->hmap, mh, BUS_DMA_NOWAIT);
   2823 			if (error != 0) {
   2824 				printf("GET BUF: dmamap load"
   2825 				    " failure - %d\n", error);
   2826 				m_free(mh);
   2827 				goto update;
   2828 			}
   2829 			rxbuf->m_head = mh;
   2830 			ixgbe_dmamap_sync(rxr->htag, rxbuf->hmap,
   2831 			    BUS_DMASYNC_PREREAD);
   2832 			rxr->rx_base[i].read.hdr_addr =
   2833 			    htole64(rxbuf->hmap->dm_segs[0].ds_addr);
   2834 		}
   2835 
   2836 		if (rxbuf->m_pack == NULL) {
   2837 			mp = ixgbe_getjcl(&adapter->jcl_head, M_NOWAIT,
   2838 			    MT_DATA, M_PKTHDR, adapter->rx_mbuf_sz);
   2839 			if (mp == NULL) {
   2840 				rxr->no_jmbuf.ev_count++;
   2841 				goto update;
   2842 			} else
   2843 				mp = rxbuf->m_pack;
   2844 
   2845 			mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
   2846 			/* Get the memory mapping */
   2847 			error = bus_dmamap_load_mbuf(rxr->ptag->dt_dmat,
   2848 			    rxbuf->pmap, mp, BUS_DMA_NOWAIT);
   2849 			if (error != 0) {
   2850 				printf("GET BUF: dmamap load"
   2851 				    " failure - %d\n", error);
   2852 				m_free(mp);
   2853 				rxbuf->m_pack = NULL;
   2854 				goto update;
   2855 			}
   2856 			rxbuf->m_pack = mp;
   2857 			bus_dmamap_sync(rxr->ptag->dt_dmat, rxbuf->pmap,
   2858 			    0, mp->m_pkthdr.len, BUS_DMASYNC_PREREAD);
   2859 			rxr->rx_base[i].read.pkt_addr =
   2860 			    htole64(rxbuf->pmap->dm_segs[0].ds_addr);
   2861 		}
   2862 
   2863 		refreshed = true;
   2864 		rxr->next_to_refresh = i = j;
   2865 		/* Calculate next index */
   2866 		if (++j == adapter->num_rx_desc)
   2867 			j = 0;
   2868 	}
   2869 update:
   2870 	if (refreshed) /* update tail index */
   2871 		IXGBE_WRITE_REG(&adapter->hw,
   2872 		    IXGBE_VFRDT(rxr->me), rxr->next_to_refresh);
   2873 	return;
   2874 }
   2875 
   2876 /*********************************************************************
   2877  *
   2878  *  Allocate memory for rx_buffer structures. Since we use one
   2879  *  rx_buffer per received packet, the maximum number of rx_buffer's
   2880  *  that we'll need is equal to the number of receive descriptors
   2881  *  that we've allocated.
   2882  *
   2883  **********************************************************************/
   2884 static int
   2885 ixv_allocate_receive_buffers(struct rx_ring *rxr)
   2886 {
   2887 	struct	adapter 	*adapter = rxr->adapter;
   2888 	device_t 		dev = adapter->dev;
   2889 	struct ixv_rx_buf 	*rxbuf;
   2890 	int             	i, bsize, error;
   2891 
   2892 	bsize = sizeof(struct ixv_rx_buf) * adapter->num_rx_desc;
   2893 	if (!(rxr->rx_buffers =
   2894 	    (struct ixv_rx_buf *) malloc(bsize,
   2895 	    M_DEVBUF, M_NOWAIT | M_ZERO))) {
   2896 		aprint_error_dev(dev, "Unable to allocate rx_buffer memory\n");
   2897 		error = ENOMEM;
   2898 		goto fail;
   2899 	}
   2900 
   2901 	if ((error = ixgbe_dma_tag_create(adapter->osdep.dmat,	/* parent */
   2902 				   1, 0,	/* alignment, bounds */
   2903 				   MSIZE,		/* maxsize */
   2904 				   1,			/* nsegments */
   2905 				   MSIZE,		/* maxsegsize */
   2906 				   0,			/* flags */
   2907 				   &rxr->htag))) {
   2908 		aprint_error_dev(dev, "Unable to create RX DMA tag\n");
   2909 		goto fail;
   2910 	}
   2911 
   2912 	if ((error = ixgbe_dma_tag_create(adapter->osdep.dmat,	/* parent */
   2913 				   1, 0,	/* alignment, bounds */
   2914 				   MJUMPAGESIZE,	/* maxsize */
   2915 				   1,			/* nsegments */
   2916 				   MJUMPAGESIZE,	/* maxsegsize */
   2917 				   0,			/* flags */
   2918 				   &rxr->ptag))) {
   2919 		aprint_error_dev(dev, "Unable to create RX DMA tag\n");
   2920 		goto fail;
   2921 	}
   2922 
   2923 	for (i = 0; i < adapter->num_rx_desc; i++, rxbuf++) {
   2924 		rxbuf = &rxr->rx_buffers[i];
   2925 		error = ixgbe_dmamap_create(rxr->htag,
   2926 		    BUS_DMA_NOWAIT, &rxbuf->hmap);
   2927 		if (error) {
   2928 			aprint_error_dev(dev, "Unable to create RX head map\n");
   2929 			goto fail;
   2930 		}
   2931 		error = ixgbe_dmamap_create(rxr->ptag,
   2932 		    BUS_DMA_NOWAIT, &rxbuf->pmap);
   2933 		if (error) {
   2934 			aprint_error_dev(dev, "Unable to create RX pkt map\n");
   2935 			goto fail;
   2936 		}
   2937 	}
   2938 
   2939 	return (0);
   2940 
   2941 fail:
   2942 	/* Frees all, but can handle partial completion */
   2943 	ixv_free_receive_structures(adapter);
   2944 	return (error);
   2945 }
   2946 
   2947 static void
   2948 ixv_free_receive_ring(struct rx_ring *rxr)
   2949 {
   2950 	struct  adapter         *adapter;
   2951 	struct ixv_rx_buf       *rxbuf;
   2952 	int i;
   2953 
   2954 	adapter = rxr->adapter;
   2955 	for (i = 0; i < adapter->num_rx_desc; i++) {
   2956 		rxbuf = &rxr->rx_buffers[i];
   2957 		if (rxbuf->m_head != NULL) {
   2958 			ixgbe_dmamap_sync(rxr->htag, rxbuf->hmap,
   2959 			    BUS_DMASYNC_POSTREAD);
   2960 			ixgbe_dmamap_unload(rxr->htag, rxbuf->hmap);
   2961 			rxbuf->m_head->m_flags |= M_PKTHDR;
   2962 			m_freem(rxbuf->m_head);
   2963 		}
   2964 		if (rxbuf->m_pack != NULL) {
   2965 			/* XXX not ixgbe_ ? */
   2966 			bus_dmamap_sync(rxr->ptag->dt_dmat, rxbuf->pmap,
   2967 			    0, rxbuf->m_pack->m_pkthdr.len,
   2968 			    BUS_DMASYNC_POSTREAD);
   2969 			ixgbe_dmamap_unload(rxr->ptag, rxbuf->pmap);
   2970 			rxbuf->m_pack->m_flags |= M_PKTHDR;
   2971 			m_freem(rxbuf->m_pack);
   2972 		}
   2973 		rxbuf->m_head = NULL;
   2974 		rxbuf->m_pack = NULL;
   2975 	}
   2976 }
   2977 
   2978 
   2979 /*********************************************************************
   2980  *
   2981  *  Initialize a receive ring and its buffers.
   2982  *
   2983  **********************************************************************/
   2984 static int
   2985 ixv_setup_receive_ring(struct rx_ring *rxr)
   2986 {
   2987 	struct	adapter 	*adapter;
   2988 	struct ixv_rx_buf	*rxbuf;
   2989 #ifdef LRO
   2990 	struct ifnet		*ifp;
   2991 	struct lro_ctrl		*lro = &rxr->lro;
   2992 #endif /* LRO */
   2993 	int			rsize, error = 0;
   2994 
   2995 	adapter = rxr->adapter;
   2996 #ifdef LRO
   2997 	ifp = adapter->ifp;
   2998 #endif /* LRO */
   2999 
   3000 	/* Clear the ring contents */
   3001 	IXV_RX_LOCK(rxr);
   3002 	rsize = roundup2(adapter->num_rx_desc *
   3003 	    sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
   3004 	bzero((void *)rxr->rx_base, rsize);
   3005 
   3006 	/* Free current RX buffer structs and their mbufs */
   3007 	ixv_free_receive_ring(rxr);
   3008 
   3009 	IXV_RX_UNLOCK(rxr);
   3010 
   3011 	/* Now reinitialize our supply of jumbo mbufs.  The number
   3012 	 * or size of jumbo mbufs may have changed.
   3013 	 */
   3014 	ixgbe_jcl_reinit(&adapter->jcl_head, rxr->ptag->dt_dmat,
   3015 	    2 * adapter->num_rx_desc, adapter->rx_mbuf_sz);
   3016 
   3017 	IXV_RX_LOCK(rxr);
   3018 
   3019 	/* Configure header split? */
   3020 	if (ixv_header_split)
   3021 		rxr->hdr_split = TRUE;
   3022 
   3023 	/* Now replenish the mbufs */
   3024 	for (int j = 0; j != adapter->num_rx_desc; ++j) {
   3025 		struct mbuf	*mh, *mp;
   3026 
   3027 		rxbuf = &rxr->rx_buffers[j];
   3028 		/*
   3029 		** Dont allocate mbufs if not
   3030 		** doing header split, its wasteful
   3031 		*/
   3032 		if (rxr->hdr_split == FALSE)
   3033 			goto skip_head;
   3034 
   3035 		/* First the header */
   3036 		rxbuf->m_head = m_gethdr(M_DONTWAIT, MT_DATA);
   3037 		if (rxbuf->m_head == NULL) {
   3038 			error = ENOBUFS;
   3039 			goto fail;
   3040 		}
   3041 		m_adj(rxbuf->m_head, ETHER_ALIGN);
   3042 		mh = rxbuf->m_head;
   3043 		mh->m_len = mh->m_pkthdr.len = MHLEN;
   3044 		mh->m_flags |= M_PKTHDR;
   3045 		/* Get the memory mapping */
   3046 		error = bus_dmamap_load_mbuf(rxr->htag->dt_dmat,
   3047 		    rxbuf->hmap, rxbuf->m_head, BUS_DMA_NOWAIT);
   3048 		if (error != 0) /* Nothing elegant to do here */
   3049 			goto fail;
   3050 		bus_dmamap_sync(rxr->htag->dt_dmat, rxbuf->hmap,
   3051 		    0, mh->m_pkthdr.len, BUS_DMASYNC_PREREAD);
   3052 		/* Update descriptor */
   3053 		rxr->rx_base[j].read.hdr_addr =
   3054 		    htole64(rxbuf->hmap->dm_segs[0].ds_addr);
   3055 
   3056 skip_head:
   3057 		/* Now the payload cluster */
   3058 		rxbuf->m_pack = ixgbe_getjcl(&adapter->jcl_head, M_DONTWAIT,
   3059 		    MT_DATA, M_PKTHDR, adapter->rx_mbuf_sz);
   3060 		if (rxbuf->m_pack == NULL) {
   3061 			error = ENOBUFS;
   3062                         goto fail;
   3063 		}
   3064 		mp = rxbuf->m_pack;
   3065 		mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
   3066 		/* Get the memory mapping */
   3067 		error = bus_dmamap_load_mbuf(rxr->ptag->dt_dmat,
   3068 		    rxbuf->pmap, mp, BUS_DMA_NOWAIT);
   3069 		if (error != 0)
   3070                         goto fail;
   3071 		bus_dmamap_sync(rxr->ptag->dt_dmat, rxbuf->pmap,
   3072 		    0, adapter->rx_mbuf_sz, BUS_DMASYNC_PREREAD);
   3073 		/* Update descriptor */
   3074 		rxr->rx_base[j].read.pkt_addr =
   3075 		    htole64(rxbuf->pmap->dm_segs[0].ds_addr);
   3076 	}
   3077 
   3078 
   3079 	/* Setup our descriptor indices */
   3080 	rxr->next_to_check = 0;
   3081 	rxr->next_to_refresh = 0;
   3082 	rxr->lro_enabled = FALSE;
   3083 	rxr->rx_split_packets.ev_count = 0;
   3084 	rxr->rx_bytes.ev_count = 0;
   3085 	rxr->discard = FALSE;
   3086 
   3087 	ixgbe_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
   3088 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   3089 
   3090 #ifdef LRO
   3091 	/*
   3092 	** Now set up the LRO interface:
   3093 	*/
   3094 	if (ifp->if_capenable & IFCAP_LRO) {
   3095 		device_t dev = adapter->dev;
   3096 		int err = tcp_lro_init(lro);
   3097 		if (err) {
   3098 			device_printf(dev, "LRO Initialization failed!\n");
   3099 			goto fail;
   3100 		}
   3101 		INIT_DEBUGOUT("RX Soft LRO Initialized\n");
   3102 		rxr->lro_enabled = TRUE;
   3103 		lro->ifp = adapter->ifp;
   3104 	}
   3105 #endif /* LRO */
   3106 
   3107 	IXV_RX_UNLOCK(rxr);
   3108 	return (0);
   3109 
   3110 fail:
   3111 	ixv_free_receive_ring(rxr);
   3112 	IXV_RX_UNLOCK(rxr);
   3113 	return (error);
   3114 }
   3115 
   3116 /*********************************************************************
   3117  *
   3118  *  Initialize all receive rings.
   3119  *
   3120  **********************************************************************/
   3121 static int
   3122 ixv_setup_receive_structures(struct adapter *adapter)
   3123 {
   3124 	struct rx_ring *rxr = adapter->rx_rings;
   3125 	int j;
   3126 
   3127 	for (j = 0; j < adapter->num_queues; j++, rxr++)
   3128 		if (ixv_setup_receive_ring(rxr))
   3129 			goto fail;
   3130 
   3131 	return (0);
   3132 fail:
   3133 	/*
   3134 	 * Free RX buffers allocated so far, we will only handle
   3135 	 * the rings that completed, the failing case will have
   3136 	 * cleaned up for itself. 'j' failed, so its the terminus.
   3137 	 */
   3138 	for (int i = 0; i < j; ++i) {
   3139 		rxr = &adapter->rx_rings[i];
   3140 		ixv_free_receive_ring(rxr);
   3141 	}
   3142 
   3143 	return (ENOBUFS);
   3144 }
   3145 
   3146 /*********************************************************************
   3147  *
   3148  *  Setup receive registers and features.
   3149  *
   3150  **********************************************************************/
   3151 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
   3152 
   3153 static void
   3154 ixv_initialize_receive_units(struct adapter *adapter)
   3155 {
   3156 	int i;
   3157 	struct	rx_ring	*rxr = adapter->rx_rings;
   3158 	struct ixgbe_hw	*hw = &adapter->hw;
   3159 	struct ifnet   *ifp = adapter->ifp;
   3160 	u32		bufsz, fctrl, rxcsum, hlreg;
   3161 
   3162 
   3163 	/* Enable broadcasts */
   3164 	fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
   3165 	fctrl |= IXGBE_FCTRL_BAM;
   3166 	fctrl |= IXGBE_FCTRL_DPF;
   3167 	fctrl |= IXGBE_FCTRL_PMCF;
   3168 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
   3169 
   3170 	/* Set for Jumbo Frames? */
   3171 	hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
   3172 	if (ifp->if_mtu > ETHERMTU) {
   3173 		hlreg |= IXGBE_HLREG0_JUMBOEN;
   3174 		bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
   3175 	} else {
   3176 		hlreg &= ~IXGBE_HLREG0_JUMBOEN;
   3177 		bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
   3178 	}
   3179 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
   3180 
   3181 	for (i = 0; i < adapter->num_queues; i++, rxr++) {
   3182 		u64 rdba = rxr->rxdma.dma_paddr;
   3183 		u32 reg, rxdctl;
   3184 
   3185 		/* Do the queue enabling first */
   3186 		rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
   3187 		rxdctl |= IXGBE_RXDCTL_ENABLE;
   3188 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
   3189 		for (int k = 0; k < 10; k++) {
   3190 			if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
   3191 			    IXGBE_RXDCTL_ENABLE)
   3192 				break;
   3193 			else
   3194 				msec_delay(1);
   3195 		}
   3196 		wmb();
   3197 
   3198 		/* Setup the Base and Length of the Rx Descriptor Ring */
   3199 		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
   3200 		    (rdba & 0x00000000ffffffffULL));
   3201 		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i),
   3202 		    (rdba >> 32));
   3203 		IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
   3204 		    adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
   3205 
   3206 		/* Set up the SRRCTL register */
   3207 		reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
   3208 		reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
   3209 		reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
   3210 		reg |= bufsz;
   3211 		if (rxr->hdr_split) {
   3212 			/* Use a standard mbuf for the header */
   3213 			reg |= ((IXV_RX_HDR <<
   3214 			    IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT)
   3215 			    & IXGBE_SRRCTL_BSIZEHDR_MASK);
   3216 			reg |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
   3217 		} else
   3218 			reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
   3219 		IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), reg);
   3220 
   3221 		/* Setup the HW Rx Head and Tail Descriptor Pointers */
   3222 		IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
   3223 		IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
   3224 		    adapter->num_rx_desc - 1);
   3225 	}
   3226 
   3227 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
   3228 
   3229 	if (ifp->if_capenable & IFCAP_RXCSUM)
   3230 		rxcsum |= IXGBE_RXCSUM_PCSD;
   3231 
   3232 	if (!(rxcsum & IXGBE_RXCSUM_PCSD))
   3233 		rxcsum |= IXGBE_RXCSUM_IPPCSE;
   3234 
   3235 	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
   3236 
   3237 	return;
   3238 }
   3239 
   3240 /*********************************************************************
   3241  *
   3242  *  Free all receive rings.
   3243  *
   3244  **********************************************************************/
   3245 static void
   3246 ixv_free_receive_structures(struct adapter *adapter)
   3247 {
   3248 	struct rx_ring *rxr = adapter->rx_rings;
   3249 
   3250 	for (int i = 0; i < adapter->num_queues; i++, rxr++) {
   3251 #ifdef LRO
   3252 		struct lro_ctrl		*lro = &rxr->lro;
   3253 #endif /* LRO */
   3254 		ixv_free_receive_buffers(rxr);
   3255 #ifdef LRO
   3256 		/* Free LRO memory */
   3257 		tcp_lro_free(lro);
   3258 #endif /* LRO */
   3259 		/* Free the ring memory as well */
   3260 		ixv_dma_free(adapter, &rxr->rxdma);
   3261 		IXV_RX_LOCK_DESTROY(rxr);
   3262 	}
   3263 
   3264 	free(adapter->rx_rings, M_DEVBUF);
   3265 }
   3266 
   3267 
   3268 /*********************************************************************
   3269  *
   3270  *  Free receive ring data structures
   3271  *
   3272  **********************************************************************/
   3273 static void
   3274 ixv_free_receive_buffers(struct rx_ring *rxr)
   3275 {
   3276 	struct adapter		*adapter = rxr->adapter;
   3277 	struct ixv_rx_buf	*rxbuf;
   3278 
   3279 	INIT_DEBUGOUT("free_receive_structures: begin");
   3280 
   3281 	/* Cleanup any existing buffers */
   3282 	if (rxr->rx_buffers != NULL) {
   3283 		for (int i = 0; i < adapter->num_rx_desc; i++) {
   3284 			rxbuf = &rxr->rx_buffers[i];
   3285 			if (rxbuf->m_head != NULL) {
   3286 				ixgbe_dmamap_sync(rxr->htag, rxbuf->hmap,
   3287 				    BUS_DMASYNC_POSTREAD);
   3288 				ixgbe_dmamap_unload(rxr->htag, rxbuf->hmap);
   3289 				rxbuf->m_head->m_flags |= M_PKTHDR;
   3290 				m_freem(rxbuf->m_head);
   3291 			}
   3292 			if (rxbuf->m_pack != NULL) {
   3293 				/* XXX not ixgbe_* ? */
   3294 				bus_dmamap_sync(rxr->ptag->dt_dmat, rxbuf->pmap,
   3295 				    0, rxbuf->m_pack->m_pkthdr.len,
   3296 				    BUS_DMASYNC_POSTREAD);
   3297 				ixgbe_dmamap_unload(rxr->ptag, rxbuf->pmap);
   3298 				rxbuf->m_pack->m_flags |= M_PKTHDR;
   3299 				m_freem(rxbuf->m_pack);
   3300 			}
   3301 			rxbuf->m_head = NULL;
   3302 			rxbuf->m_pack = NULL;
   3303 			if (rxbuf->hmap != NULL) {
   3304 				ixgbe_dmamap_destroy(rxr->htag, rxbuf->hmap);
   3305 				rxbuf->hmap = NULL;
   3306 			}
   3307 			if (rxbuf->pmap != NULL) {
   3308 				ixgbe_dmamap_destroy(rxr->ptag, rxbuf->pmap);
   3309 				rxbuf->pmap = NULL;
   3310 			}
   3311 		}
   3312 		if (rxr->rx_buffers != NULL) {
   3313 			free(rxr->rx_buffers, M_DEVBUF);
   3314 			rxr->rx_buffers = NULL;
   3315 		}
   3316 	}
   3317 
   3318 	if (rxr->htag != NULL) {
   3319 		ixgbe_dma_tag_destroy(rxr->htag);
   3320 		rxr->htag = NULL;
   3321 	}
   3322 	if (rxr->ptag != NULL) {
   3323 		ixgbe_dma_tag_destroy(rxr->ptag);
   3324 		rxr->ptag = NULL;
   3325 	}
   3326 
   3327 	return;
   3328 }
   3329 
   3330 static __inline void
   3331 ixv_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m, u32 ptype)
   3332 {
   3333 	int s;
   3334 
   3335 #ifdef LRO
   3336 	struct adapter	*adapter = ifp->if_softc;
   3337 	struct ethercom *ec = &adapter->osdep.ec;
   3338 
   3339         /*
   3340          * ATM LRO is only for IPv4/TCP packets and TCP checksum of the packet
   3341          * should be computed by hardware. Also it should not have VLAN tag in
   3342          * ethernet header.
   3343          */
   3344         if (rxr->lro_enabled &&
   3345             (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING) != 0 &&
   3346             (ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
   3347             (ptype & (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP)) ==
   3348             (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP) &&
   3349             (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) ==
   3350             (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) {
   3351                 /*
   3352                  * Send to the stack if:
   3353                  **  - LRO not enabled, or
   3354                  **  - no LRO resources, or
   3355                  **  - lro enqueue fails
   3356                  */
   3357                 if (rxr->lro.lro_cnt != 0)
   3358                         if (tcp_lro_rx(&rxr->lro, m, 0) == 0)
   3359                                 return;
   3360         }
   3361 #endif /* LRO */
   3362 
   3363 	IXV_RX_UNLOCK(rxr);
   3364 
   3365 	s = splnet();
   3366 	/* Pass this up to any BPF listeners. */
   3367 	bpf_mtap(ifp, m);
   3368 	if_percpuq_enqueue(ifp->if_percpuq, m);
   3369 	splx(s);
   3370 
   3371 	IXV_RX_LOCK(rxr);
   3372 }
   3373 
   3374 static __inline void
   3375 ixv_rx_discard(struct rx_ring *rxr, int i)
   3376 {
   3377 	struct ixv_rx_buf	*rbuf;
   3378 
   3379 	rbuf = &rxr->rx_buffers[i];
   3380 	if (rbuf->fmp != NULL) {/* Partial chain ? */
   3381 		rbuf->fmp->m_flags |= M_PKTHDR;
   3382 		m_freem(rbuf->fmp);
   3383 		rbuf->fmp = NULL;
   3384 	}
   3385 
   3386 	/*
   3387 	** With advanced descriptors the writeback
   3388 	** clobbers the buffer addrs, so its easier
   3389 	** to just free the existing mbufs and take
   3390 	** the normal refresh path to get new buffers
   3391 	** and mapping.
   3392 	*/
   3393 	if (rbuf->m_head) {
   3394 		m_free(rbuf->m_head);
   3395 		rbuf->m_head = NULL;
   3396 	}
   3397 
   3398 	if (rbuf->m_pack) {
   3399 		m_free(rbuf->m_pack);
   3400 		rbuf->m_pack = NULL;
   3401 	}
   3402 
   3403 	return;
   3404 }
   3405 
   3406 
   3407 /*********************************************************************
   3408  *
   3409  *  This routine executes in interrupt context. It replenishes
   3410  *  the mbufs in the descriptor and sends data which has been
   3411  *  dma'ed into host memory to upper layer.
   3412  *
   3413  *  We loop at most count times if count is > 0, or until done if
   3414  *  count < 0.
   3415  *
   3416  *  Return TRUE for more work, FALSE for all clean.
   3417  *********************************************************************/
   3418 static bool
   3419 ixv_rxeof(struct ix_queue *que, int count)
   3420 {
   3421 	struct adapter		*adapter = que->adapter;
   3422 	struct rx_ring		*rxr = que->rxr;
   3423 	struct ifnet		*ifp = adapter->ifp;
   3424 #ifdef LRO
   3425 	struct lro_ctrl		*lro = &rxr->lro;
   3426 	struct lro_entry	*queued;
   3427 #endif /* LRO */
   3428 	int			i, nextp, processed = 0;
   3429 	u32			staterr = 0;
   3430 	union ixgbe_adv_rx_desc	*cur;
   3431 	struct ixv_rx_buf	*rbuf, *nbuf;
   3432 
   3433 	IXV_RX_LOCK(rxr);
   3434 
   3435 	for (i = rxr->next_to_check; count != 0;) {
   3436 		struct mbuf	*sendmp, *mh, *mp;
   3437 		u32		ptype;
   3438 		u16		hlen, plen, hdr, vtag;
   3439 		bool		eop;
   3440 
   3441 		/* Sync the ring. */
   3442 		ixgbe_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
   3443 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   3444 
   3445 		cur = &rxr->rx_base[i];
   3446 		staterr = le32toh(cur->wb.upper.status_error);
   3447 
   3448 		if ((staterr & IXGBE_RXD_STAT_DD) == 0)
   3449 			break;
   3450 		if ((ifp->if_flags & IFF_RUNNING) == 0)
   3451 			break;
   3452 
   3453 		count--;
   3454 		sendmp = NULL;
   3455 		nbuf = NULL;
   3456 		cur->wb.upper.status_error = 0;
   3457 		rbuf = &rxr->rx_buffers[i];
   3458 		mh = rbuf->m_head;
   3459 		mp = rbuf->m_pack;
   3460 
   3461 		plen = le16toh(cur->wb.upper.length);
   3462 		ptype = le32toh(cur->wb.lower.lo_dword.data) &
   3463 		    IXGBE_RXDADV_PKTTYPE_MASK;
   3464 		hdr = le16toh(cur->wb.lower.lo_dword.hs_rss.hdr_info);
   3465 		vtag = le16toh(cur->wb.upper.vlan);
   3466 		eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0);
   3467 
   3468 		/* Make sure all parts of a bad packet are discarded */
   3469 		if (((staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) != 0) ||
   3470 		    (rxr->discard)) {
   3471 			ifp->if_ierrors++;
   3472 			rxr->rx_discarded.ev_count++;
   3473 			if (!eop)
   3474 				rxr->discard = TRUE;
   3475 			else
   3476 				rxr->discard = FALSE;
   3477 			ixv_rx_discard(rxr, i);
   3478 			goto next_desc;
   3479 		}
   3480 
   3481 		if (!eop) {
   3482 			nextp = i + 1;
   3483 			if (nextp == adapter->num_rx_desc)
   3484 				nextp = 0;
   3485 			nbuf = &rxr->rx_buffers[nextp];
   3486 			prefetch(nbuf);
   3487 		}
   3488 		/*
   3489 		** The header mbuf is ONLY used when header
   3490 		** split is enabled, otherwise we get normal
   3491 		** behavior, ie, both header and payload
   3492 		** are DMA'd into the payload buffer.
   3493 		**
   3494 		** Rather than using the fmp/lmp global pointers
   3495 		** we now keep the head of a packet chain in the
   3496 		** buffer struct and pass this along from one
   3497 		** descriptor to the next, until we get EOP.
   3498 		*/
   3499 		if (rxr->hdr_split && (rbuf->fmp == NULL)) {
   3500 			/* This must be an initial descriptor */
   3501 			hlen = (hdr & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
   3502 			    IXGBE_RXDADV_HDRBUFLEN_SHIFT;
   3503 			if (hlen > IXV_RX_HDR)
   3504 				hlen = IXV_RX_HDR;
   3505 			mh->m_len = hlen;
   3506 			mh->m_flags |= M_PKTHDR;
   3507 			mh->m_next = NULL;
   3508 			mh->m_pkthdr.len = mh->m_len;
   3509 			/* Null buf pointer so it is refreshed */
   3510 			rbuf->m_head = NULL;
   3511 			/*
   3512 			** Check the payload length, this
   3513 			** could be zero if its a small
   3514 			** packet.
   3515 			*/
   3516 			if (plen > 0) {
   3517 				mp->m_len = plen;
   3518 				mp->m_next = NULL;
   3519 				mp->m_flags &= ~M_PKTHDR;
   3520 				mh->m_next = mp;
   3521 				mh->m_pkthdr.len += mp->m_len;
   3522 				/* Null buf pointer so it is refreshed */
   3523 				rbuf->m_pack = NULL;
   3524 				rxr->rx_split_packets.ev_count++;
   3525 			}
   3526 			/*
   3527 			** Now create the forward
   3528 			** chain so when complete
   3529 			** we wont have to.
   3530 			*/
   3531                         if (eop == 0) {
   3532 				/* stash the chain head */
   3533                                 nbuf->fmp = mh;
   3534 				/* Make forward chain */
   3535                                 if (plen)
   3536                                         mp->m_next = nbuf->m_pack;
   3537                                 else
   3538                                         mh->m_next = nbuf->m_pack;
   3539                         } else {
   3540 				/* Singlet, prepare to send */
   3541                                 sendmp = mh;
   3542                                 if (VLAN_ATTACHED(&adapter->osdep.ec) &&
   3543 				  (staterr & IXGBE_RXD_STAT_VP)) {
   3544 					VLAN_INPUT_TAG(ifp, sendmp, vtag,
   3545 					    printf("%s: could not apply VLAN "
   3546 					        "tag", __func__));
   3547                                 }
   3548                         }
   3549 		} else {
   3550 			/*
   3551 			** Either no header split, or a
   3552 			** secondary piece of a fragmented
   3553 			** split packet.
   3554 			*/
   3555 			mp->m_len = plen;
   3556 			/*
   3557 			** See if there is a stored head
   3558 			** that determines what we are
   3559 			*/
   3560 			sendmp = rbuf->fmp;
   3561 			rbuf->m_pack = rbuf->fmp = NULL;
   3562 
   3563 			if (sendmp != NULL) /* secondary frag */
   3564 				sendmp->m_pkthdr.len += mp->m_len;
   3565 			else {
   3566 				/* first desc of a non-ps chain */
   3567 				sendmp = mp;
   3568 				sendmp->m_flags |= M_PKTHDR;
   3569 				sendmp->m_pkthdr.len = mp->m_len;
   3570 				if (staterr & IXGBE_RXD_STAT_VP) {
   3571 					/* XXX Do something reasonable on
   3572 					 * error.
   3573 					 */
   3574 					VLAN_INPUT_TAG(ifp, sendmp, vtag,
   3575 					    printf("%s: could not apply VLAN "
   3576 					        "tag", __func__));
   3577 				}
   3578                         }
   3579 			/* Pass the head pointer on */
   3580 			if (eop == 0) {
   3581 				nbuf->fmp = sendmp;
   3582 				sendmp = NULL;
   3583 				mp->m_next = nbuf->m_pack;
   3584 			}
   3585 		}
   3586 		++processed;
   3587 		/* Sending this frame? */
   3588 		if (eop) {
   3589 			m_set_rcvif(sendmp, ifp);
   3590 			ifp->if_ipackets++;
   3591 			rxr->rx_packets.ev_count++;
   3592 			/* capture data for AIM */
   3593 			rxr->bytes += sendmp->m_pkthdr.len;
   3594 			rxr->rx_bytes.ev_count += sendmp->m_pkthdr.len;
   3595 			if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
   3596 				ixv_rx_checksum(staterr, sendmp, ptype,
   3597 				   &adapter->stats);
   3598 			}
   3599 #if __FreeBSD_version >= 800000
   3600 			sendmp->m_pkthdr.flowid = que->msix;
   3601 			sendmp->m_flags |= M_FLOWID;
   3602 #endif
   3603 		}
   3604 next_desc:
   3605 		ixgbe_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
   3606 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   3607 
   3608 		/* Advance our pointers to the next descriptor. */
   3609 		if (++i == adapter->num_rx_desc)
   3610 			i = 0;
   3611 
   3612 		/* Now send to the stack or do LRO */
   3613 		if (sendmp != NULL)
   3614 			ixv_rx_input(rxr, ifp, sendmp, ptype);
   3615 
   3616                /* Every 8 descriptors we go to refresh mbufs */
   3617 		if (processed == 8) {
   3618 			ixv_refresh_mbufs(rxr, i);
   3619 			processed = 0;
   3620 		}
   3621 	}
   3622 
   3623 	/* Refresh any remaining buf structs */
   3624 	if (ixv_rx_unrefreshed(rxr))
   3625 		ixv_refresh_mbufs(rxr, i);
   3626 
   3627 	rxr->next_to_check = i;
   3628 
   3629 #ifdef LRO
   3630 	/*
   3631 	 * Flush any outstanding LRO work
   3632 	 */
   3633 	while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
   3634 		SLIST_REMOVE_HEAD(&lro->lro_active, next);
   3635 		tcp_lro_flush(lro, queued);
   3636 	}
   3637 #endif /* LRO */
   3638 
   3639 	IXV_RX_UNLOCK(rxr);
   3640 
   3641 	/*
   3642 	** We still have cleaning to do?
   3643 	** Schedule another interrupt if so.
   3644 	*/
   3645 	if ((staterr & IXGBE_RXD_STAT_DD) != 0) {
   3646 		ixv_rearm_queues(adapter, (u64)(1ULL << que->msix));
   3647 		return true;
   3648 	}
   3649 
   3650 	return false;
   3651 }
   3652 
   3653 
   3654 /*********************************************************************
   3655  *
   3656  *  Verify that the hardware indicated that the checksum is valid.
   3657  *  Inform the stack about the status of checksum so that stack
   3658  *  doesn't spend time verifying the checksum.
   3659  *
   3660  *********************************************************************/
   3661 static void
   3662 ixv_rx_checksum(u32 staterr, struct mbuf * mp, u32 ptype,
   3663     struct ixgbevf_hw_stats *stats)
   3664 {
   3665 	u16	status = (u16) staterr;
   3666 	u8	errors = (u8) (staterr >> 24);
   3667 #if 0
   3668 	bool	sctp = FALSE;
   3669 
   3670 	if ((ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
   3671 	    (ptype & IXGBE_RXDADV_PKTTYPE_SCTP) != 0)
   3672 		sctp = TRUE;
   3673 #endif
   3674 	if (status & IXGBE_RXD_STAT_IPCS) {
   3675 		stats->ipcs.ev_count++;
   3676 		if (!(errors & IXGBE_RXD_ERR_IPE)) {
   3677 			/* IP Checksum Good */
   3678 			mp->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   3679 
   3680 		} else {
   3681 			stats->ipcs_bad.ev_count++;
   3682 			mp->m_pkthdr.csum_flags = M_CSUM_IPv4|M_CSUM_IPv4_BAD;
   3683 		}
   3684 	}
   3685 	if (status & IXGBE_RXD_STAT_L4CS) {
   3686 		stats->l4cs.ev_count++;
   3687 		int type = M_CSUM_TCPv4|M_CSUM_TCPv6|M_CSUM_UDPv4|M_CSUM_UDPv6;
   3688 		if (!(errors & IXGBE_RXD_ERR_TCPE)) {
   3689 			mp->m_pkthdr.csum_flags |= type;
   3690 		} else {
   3691 			stats->l4cs_bad.ev_count++;
   3692 			mp->m_pkthdr.csum_flags |= type | M_CSUM_TCP_UDP_BAD;
   3693 		}
   3694 	}
   3695 	return;
   3696 }
   3697 
   3698 static void
   3699 ixv_setup_vlan_support(struct adapter *adapter)
   3700 {
   3701 	struct ixgbe_hw *hw = &adapter->hw;
   3702 	u32		ctrl, vid, vfta, retry;
   3703 
   3704 
   3705 	/*
   3706 	** We get here thru init_locked, meaning
   3707 	** a soft reset, this has already cleared
   3708 	** the VFTA and other state, so if there
   3709 	** have been no vlan's registered do nothing.
   3710 	*/
   3711 	if (adapter->num_vlans == 0)
   3712 		return;
   3713 
   3714 	/* Enable the queues */
   3715 	for (int i = 0; i < adapter->num_queues; i++) {
   3716 		ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
   3717 		ctrl |= IXGBE_RXDCTL_VME;
   3718 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl);
   3719 	}
   3720 
   3721 	/*
   3722 	** A soft reset zero's out the VFTA, so
   3723 	** we need to repopulate it now.
   3724 	*/
   3725 	for (int i = 0; i < VFTA_SIZE; i++) {
   3726 		if (ixv_shadow_vfta[i] == 0)
   3727 			continue;
   3728 		vfta = ixv_shadow_vfta[i];
   3729 		/*
   3730 		** Reconstruct the vlan id's
   3731 		** based on the bits set in each
   3732 		** of the array ints.
   3733 		*/
   3734 		for ( int j = 0; j < 32; j++) {
   3735 			retry = 0;
   3736 			if ((vfta & (1 << j)) == 0)
   3737 				continue;
   3738 			vid = (i * 32) + j;
   3739 			/* Call the shared code mailbox routine */
   3740 			while (ixgbe_set_vfta(hw, vid, 0, TRUE)) {
   3741 				if (++retry > 5)
   3742 					break;
   3743 			}
   3744 		}
   3745 	}
   3746 }
   3747 
   3748 #if 0	/* XXX Badly need to overhaul vlan(4) on NetBSD. */
   3749 /*
   3750 ** This routine is run via an vlan config EVENT,
   3751 ** it enables us to use the HW Filter table since
   3752 ** we can get the vlan id. This just creates the
   3753 ** entry in the soft version of the VFTA, init will
   3754 ** repopulate the real table.
   3755 */
   3756 static void
   3757 ixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   3758 {
   3759 	struct adapter	*adapter = ifp->if_softc;
   3760 	u16		index, bit;
   3761 
   3762 	if (ifp->if_softc !=  arg)   /* Not our event */
   3763 		return;
   3764 
   3765 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
   3766 		return;
   3767 
   3768 	IXV_CORE_LOCK(adapter);
   3769 	index = (vtag >> 5) & 0x7F;
   3770 	bit = vtag & 0x1F;
   3771 	ixv_shadow_vfta[index] |= (1 << bit);
   3772 	/* Re-init to load the changes */
   3773 	ixv_init_locked(adapter);
   3774 	IXV_CORE_UNLOCK(adapter);
   3775 }
   3776 
   3777 /*
   3778 ** This routine is run via an vlan
   3779 ** unconfig EVENT, remove our entry
   3780 ** in the soft vfta.
   3781 */
   3782 static void
   3783 ixv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   3784 {
   3785 	struct adapter	*adapter = ifp->if_softc;
   3786 	u16		index, bit;
   3787 
   3788 	if (ifp->if_softc !=  arg)
   3789 		return;
   3790 
   3791 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
   3792 		return;
   3793 
   3794 	IXV_CORE_LOCK(adapter);
   3795 	index = (vtag >> 5) & 0x7F;
   3796 	bit = vtag & 0x1F;
   3797 	ixv_shadow_vfta[index] &= ~(1 << bit);
   3798 	/* Re-init to load the changes */
   3799 	ixv_init_locked(adapter);
   3800 	IXV_CORE_UNLOCK(adapter);
   3801 }
   3802 #endif
   3803 
   3804 static void
   3805 ixv_enable_intr(struct adapter *adapter)
   3806 {
   3807 	struct ixgbe_hw *hw = &adapter->hw;
   3808 	struct ix_queue *que = adapter->queues;
   3809 	u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
   3810 
   3811 
   3812 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
   3813 
   3814 	mask = IXGBE_EIMS_ENABLE_MASK;
   3815 	mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
   3816 	IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
   3817 
   3818         for (int i = 0; i < adapter->num_queues; i++, que++)
   3819 		ixv_enable_queue(adapter, que->msix);
   3820 
   3821 	IXGBE_WRITE_FLUSH(hw);
   3822 
   3823 	return;
   3824 }
   3825 
   3826 static void
   3827 ixv_disable_intr(struct adapter *adapter)
   3828 {
   3829 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
   3830 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, ~0);
   3831 	IXGBE_WRITE_FLUSH(&adapter->hw);
   3832 	return;
   3833 }
   3834 
   3835 /*
   3836 ** Setup the correct IVAR register for a particular MSIX interrupt
   3837 **  - entry is the register array entry
   3838 **  - vector is the MSIX vector for this queue
   3839 **  - type is RX/TX/MISC
   3840 */
   3841 static void
   3842 ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
   3843 {
   3844 	struct ixgbe_hw *hw = &adapter->hw;
   3845 	u32 ivar, index;
   3846 
   3847 	vector |= IXGBE_IVAR_ALLOC_VAL;
   3848 
   3849 	if (type == -1) { /* MISC IVAR */
   3850 		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
   3851 		ivar &= ~0xFF;
   3852 		ivar |= vector;
   3853 		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
   3854 	} else {	/* RX/TX IVARS */
   3855 		index = (16 * (entry & 1)) + (8 * type);
   3856 		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
   3857 		ivar &= ~(0xFF << index);
   3858 		ivar |= (vector << index);
   3859 		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
   3860 	}
   3861 }
   3862 
   3863 static void
   3864 ixv_configure_ivars(struct adapter *adapter)
   3865 {
   3866 	struct  ix_queue *que = adapter->queues;
   3867 
   3868         for (int i = 0; i < adapter->num_queues; i++, que++) {
   3869 		/* First the RX queue entry */
   3870                 ixv_set_ivar(adapter, i, que->msix, 0);
   3871 		/* ... and the TX */
   3872 		ixv_set_ivar(adapter, i, que->msix, 1);
   3873 		/* Set an initial value in EITR */
   3874                 IXGBE_WRITE_REG(&adapter->hw,
   3875                     IXGBE_VTEITR(que->msix), IXV_EITR_DEFAULT);
   3876 	}
   3877 
   3878 	/* For the Link interrupt */
   3879         ixv_set_ivar(adapter, 1, adapter->mbxvec, -1);
   3880 }
   3881 
   3882 
   3883 /*
   3884 ** Tasklet handler for MSIX MBX interrupts
   3885 **  - do outside interrupt since it might sleep
   3886 */
   3887 static void
   3888 ixv_handle_mbx(void *context)
   3889 {
   3890 	struct adapter  *adapter = context;
   3891 
   3892 	ixgbe_check_link(&adapter->hw,
   3893 	    &adapter->link_speed, &adapter->link_up, 0);
   3894 	ixv_update_link_status(adapter);
   3895 }
   3896 
   3897 /*
   3898 ** The VF stats registers never have a truely virgin
   3899 ** starting point, so this routine tries to make an
   3900 ** artificial one, marking ground zero on attach as
   3901 ** it were.
   3902 */
   3903 static void
   3904 ixv_save_stats(struct adapter *adapter)
   3905 {
   3906 	if (adapter->stats.vfgprc || adapter->stats.vfgptc) {
   3907 		adapter->stats.saved_reset_vfgprc +=
   3908 		    adapter->stats.vfgprc - adapter->stats.base_vfgprc;
   3909 		adapter->stats.saved_reset_vfgptc +=
   3910 		    adapter->stats.vfgptc - adapter->stats.base_vfgptc;
   3911 		adapter->stats.saved_reset_vfgorc +=
   3912 		    adapter->stats.vfgorc - adapter->stats.base_vfgorc;
   3913 		adapter->stats.saved_reset_vfgotc +=
   3914 		    adapter->stats.vfgotc - adapter->stats.base_vfgotc;
   3915 		adapter->stats.saved_reset_vfmprc +=
   3916 		    adapter->stats.vfmprc - adapter->stats.base_vfmprc;
   3917 	}
   3918 }
   3919 
   3920 static void
   3921 ixv_init_stats(struct adapter *adapter)
   3922 {
   3923 	struct ixgbe_hw *hw = &adapter->hw;
   3924 
   3925 	adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
   3926 	adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
   3927 	adapter->stats.last_vfgorc |=
   3928 	    (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
   3929 
   3930 	adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
   3931 	adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
   3932 	adapter->stats.last_vfgotc |=
   3933 	    (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
   3934 
   3935 	adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
   3936 
   3937 	adapter->stats.base_vfgprc = adapter->stats.last_vfgprc;
   3938 	adapter->stats.base_vfgorc = adapter->stats.last_vfgorc;
   3939 	adapter->stats.base_vfgptc = adapter->stats.last_vfgptc;
   3940 	adapter->stats.base_vfgotc = adapter->stats.last_vfgotc;
   3941 	adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
   3942 }
   3943 
   3944 #define UPDATE_STAT_32(reg, last, count)		\
   3945 {							\
   3946 	u32 current = IXGBE_READ_REG(hw, reg);		\
   3947 	if (current < last)				\
   3948 		count += 0x100000000LL;			\
   3949 	last = current;					\
   3950 	count &= 0xFFFFFFFF00000000LL;			\
   3951 	count |= current;				\
   3952 }
   3953 
   3954 #define UPDATE_STAT_36(lsb, msb, last, count) 		\
   3955 {							\
   3956 	u64 cur_lsb = IXGBE_READ_REG(hw, lsb);		\
   3957 	u64 cur_msb = IXGBE_READ_REG(hw, msb);		\
   3958 	u64 current = ((cur_msb << 32) | cur_lsb);	\
   3959 	if (current < last)				\
   3960 		count += 0x1000000000LL;		\
   3961 	last = current;					\
   3962 	count &= 0xFFFFFFF000000000LL;			\
   3963 	count |= current;				\
   3964 }
   3965 
   3966 /*
   3967 ** ixv_update_stats - Update the board statistics counters.
   3968 */
   3969 void
   3970 ixv_update_stats(struct adapter *adapter)
   3971 {
   3972         struct ixgbe_hw *hw = &adapter->hw;
   3973 
   3974         UPDATE_STAT_32(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
   3975 	    adapter->stats.vfgprc);
   3976         UPDATE_STAT_32(IXGBE_VFGPTC, adapter->stats.last_vfgptc,
   3977 	    adapter->stats.vfgptc);
   3978         UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
   3979 	    adapter->stats.last_vfgorc, adapter->stats.vfgorc);
   3980         UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
   3981 	    adapter->stats.last_vfgotc, adapter->stats.vfgotc);
   3982         UPDATE_STAT_32(IXGBE_VFMPRC, adapter->stats.last_vfmprc,
   3983 	    adapter->stats.vfmprc);
   3984 }
   3985 
   3986 /**********************************************************************
   3987  *
   3988  *  This routine is called only when ixgbe_display_debug_stats is enabled.
   3989  *  This routine provides a way to take a look at important statistics
   3990  *  maintained by the driver and hardware.
   3991  *
   3992  **********************************************************************/
   3993 static void
   3994 ixv_print_hw_stats(struct adapter * adapter)
   3995 {
   3996         device_t dev = adapter->dev;
   3997 
   3998         device_printf(dev,"Std Mbuf Failed = %"PRIu64"\n",
   3999                adapter->mbuf_defrag_failed.ev_count);
   4000         device_printf(dev,"Driver dropped packets = %"PRIu64"\n",
   4001                adapter->dropped_pkts.ev_count);
   4002         device_printf(dev, "watchdog timeouts = %"PRIu64"\n",
   4003                adapter->watchdog_events.ev_count);
   4004 
   4005         device_printf(dev,"Good Packets Rcvd = %lld\n",
   4006                (long long)adapter->stats.vfgprc);
   4007         device_printf(dev,"Good Packets Xmtd = %lld\n",
   4008                (long long)adapter->stats.vfgptc);
   4009         device_printf(dev,"TSO Transmissions = %"PRIu64"\n",
   4010                adapter->tso_tx.ev_count);
   4011 
   4012 }
   4013 
   4014 /**********************************************************************
   4015  *
   4016  *  This routine is called only when em_display_debug_stats is enabled.
   4017  *  This routine provides a way to take a look at important statistics
   4018  *  maintained by the driver and hardware.
   4019  *
   4020  **********************************************************************/
   4021 static void
   4022 ixv_print_debug_info(struct adapter *adapter)
   4023 {
   4024         device_t dev = adapter->dev;
   4025         struct ixgbe_hw         *hw = &adapter->hw;
   4026         struct ix_queue         *que = adapter->queues;
   4027         struct rx_ring          *rxr;
   4028         struct tx_ring          *txr;
   4029 #ifdef LRO
   4030         struct lro_ctrl         *lro;
   4031 #endif /* LRO */
   4032 
   4033         device_printf(dev,"Error Byte Count = %u \n",
   4034             IXGBE_READ_REG(hw, IXGBE_ERRBC));
   4035 
   4036         for (int i = 0; i < adapter->num_queues; i++, que++) {
   4037                 txr = que->txr;
   4038                 rxr = que->rxr;
   4039 #ifdef LRO
   4040                 lro = &rxr->lro;
   4041 #endif /* LRO */
   4042                 device_printf(dev,"QUE(%d) IRQs Handled: %lu\n",
   4043                     que->msix, (long)que->irqs);
   4044                 device_printf(dev,"RX(%d) Packets Received: %lld\n",
   4045                     rxr->me, (long long)rxr->rx_packets.ev_count);
   4046                 device_printf(dev,"RX(%d) Split RX Packets: %lld\n",
   4047                     rxr->me, (long long)rxr->rx_split_packets.ev_count);
   4048                 device_printf(dev,"RX(%d) Bytes Received: %lu\n",
   4049                     rxr->me, (long)rxr->rx_bytes.ev_count);
   4050 #ifdef LRO
   4051                 device_printf(dev,"RX(%d) LRO Queued= %d\n",
   4052                     rxr->me, lro->lro_queued);
   4053                 device_printf(dev,"RX(%d) LRO Flushed= %d\n",
   4054                     rxr->me, lro->lro_flushed);
   4055 #endif /* LRO */
   4056                 device_printf(dev,"TX(%d) Packets Sent: %lu\n",
   4057                     txr->me, (long)txr->total_packets.ev_count);
   4058                 device_printf(dev,"TX(%d) NO Desc Avail: %lu\n",
   4059                     txr->me, (long)txr->no_desc_avail.ev_count);
   4060         }
   4061 
   4062         device_printf(dev,"MBX IRQ Handled: %lu\n",
   4063             (long)adapter->mbx_irq.ev_count);
   4064         return;
   4065 }
   4066 
   4067 static int
   4068 ixv_sysctl_stats(SYSCTLFN_ARGS)
   4069 {
   4070 	struct sysctlnode node;
   4071 	int             error;
   4072 	int		result;
   4073 	struct adapter *adapter;
   4074 
   4075 	node = *rnode;
   4076 	adapter = (struct adapter *)node.sysctl_data;
   4077 	node.sysctl_data = &result;
   4078 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   4079 	if (error != 0)
   4080 		return error;
   4081 
   4082 	if (result == 1)
   4083 		ixv_print_hw_stats(adapter);
   4084 
   4085 	return 0;
   4086 }
   4087 
   4088 static int
   4089 ixv_sysctl_debug(SYSCTLFN_ARGS)
   4090 {
   4091 	struct sysctlnode node;
   4092 	int error, result;
   4093 	struct adapter *adapter;
   4094 
   4095 	node = *rnode;
   4096 	adapter = (struct adapter *)node.sysctl_data;
   4097 	node.sysctl_data = &result;
   4098 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   4099 
   4100 	if (error)
   4101 		return error;
   4102 
   4103 	if (result == 1)
   4104 		ixv_print_debug_info(adapter);
   4105 
   4106 	return 0;
   4107 }
   4108 
   4109 /*
   4110 ** Set flow control using sysctl:
   4111 ** Flow control values:
   4112 ** 	0 - off
   4113 **	1 - rx pause
   4114 **	2 - tx pause
   4115 **	3 - full
   4116 */
   4117 static int
   4118 ixv_set_flowcntl(SYSCTLFN_ARGS)
   4119 {
   4120 	struct sysctlnode node;
   4121 	int error;
   4122 	struct adapter *adapter;
   4123 
   4124 	node = *rnode;
   4125 	adapter = (struct adapter *)node.sysctl_data;
   4126 	node.sysctl_data = &ixv_flow_control;
   4127 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   4128 
   4129 	if (error)
   4130 		return (error);
   4131 
   4132 	switch (ixv_flow_control) {
   4133 		case ixgbe_fc_rx_pause:
   4134 		case ixgbe_fc_tx_pause:
   4135 		case ixgbe_fc_full:
   4136 			adapter->hw.fc.requested_mode = ixv_flow_control;
   4137 			break;
   4138 		case ixgbe_fc_none:
   4139 		default:
   4140 			adapter->hw.fc.requested_mode = ixgbe_fc_none;
   4141 	}
   4142 
   4143 	ixgbe_fc_enable(&adapter->hw);
   4144 	return error;
   4145 }
   4146 
   4147 const struct sysctlnode *
   4148 ixv_sysctl_instance(struct adapter *adapter)
   4149 {
   4150 	const char *dvname;
   4151 	struct sysctllog **log;
   4152 	int rc;
   4153 	const struct sysctlnode *rnode;
   4154 
   4155 	log = &adapter->sysctllog;
   4156 	dvname = device_xname(adapter->dev);
   4157 
   4158 	if ((rc = sysctl_createv(log, 0, NULL, &rnode,
   4159 	    0, CTLTYPE_NODE, dvname,
   4160 	    SYSCTL_DESCR("ixv information and settings"),
   4161 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
   4162 		goto err;
   4163 
   4164 	return rnode;
   4165 err:
   4166 	printf("%s: sysctl_createv failed, rc = %d\n", __func__, rc);
   4167 	return NULL;
   4168 }
   4169 
   4170 static void
   4171 ixv_add_rx_process_limit(struct adapter *adapter, const char *name,
   4172         const char *description, int *limit, int value)
   4173 {
   4174 	const struct sysctlnode *rnode, *cnode;
   4175 	struct sysctllog **log = &adapter->sysctllog;
   4176 
   4177         *limit = value;
   4178 
   4179 	if ((rnode = ixv_sysctl_instance(adapter)) == NULL)
   4180 		aprint_error_dev(adapter->dev,
   4181 		    "could not create sysctl root\n");
   4182 	else if (sysctl_createv(log, 0, &rnode, &cnode,
   4183 	    CTLFLAG_READWRITE,
   4184 	    CTLTYPE_INT,
   4185 	    name, SYSCTL_DESCR(description),
   4186 	    NULL, 0, limit, 0,
   4187 	    CTL_CREATE, CTL_EOL) != 0) {
   4188 		aprint_error_dev(adapter->dev, "%s: could not create sysctl",
   4189 		    __func__);
   4190 	}
   4191 }
   4192 
   4193