Home | History | Annotate | Line # | Download | only in ixgbe
ixv.c revision 1.3
      1 /******************************************************************************
      2 
      3   Copyright (c) 2001-2010, Intel Corporation
      4   All rights reserved.
      5 
      6   Redistribution and use in source and binary forms, with or without
      7   modification, are permitted provided that the following conditions are met:
      8 
      9    1. Redistributions of source code must retain the above copyright notice,
     10       this list of conditions and the following disclaimer.
     11 
     12    2. Redistributions in binary form must reproduce the above copyright
     13       notice, this list of conditions and the following disclaimer in the
     14       documentation and/or other materials provided with the distribution.
     15 
     16    3. Neither the name of the Intel Corporation nor the names of its
     17       contributors may be used to endorse or promote products derived from
     18       this software without specific prior written permission.
     19 
     20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     30   POSSIBILITY OF SUCH DAMAGE.
     31 
     32 ******************************************************************************/
     33 /*$FreeBSD: src/sys/dev/ixgbe/ixv.c,v 1.2 2011/03/23 13:10:15 jhb Exp $*/
     34 /*$NetBSD: ixv.c,v 1.3 2015/03/10 09:26:49 msaitoh Exp $*/
     35 
     36 #include "opt_inet.h"
     37 
     38 #include "ixv.h"
     39 
     40 /*********************************************************************
     41  *  Driver version
     42  *********************************************************************/
     43 char ixv_driver_version[] = "1.0.0";
     44 
     45 /*********************************************************************
     46  *  PCI Device ID Table
     47  *
     48  *  Used by probe to select devices to load on
     49  *  Last field stores an index into ixv_strings
     50  *  Last entry must be all 0s
     51  *
     52  *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
     53  *********************************************************************/
     54 
     55 static ixv_vendor_info_t ixv_vendor_info_array[] =
     56 {
     57 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
     58 	/* required last entry */
     59 	{0, 0, 0, 0, 0}
     60 };
     61 
     62 /*********************************************************************
     63  *  Table of branding strings
     64  *********************************************************************/
     65 
     66 static const char    *ixv_strings[] = {
     67 	"Intel(R) PRO/10GbE Virtual Function Network Driver"
     68 };
     69 
     70 /*********************************************************************
     71  *  Function prototypes
     72  *********************************************************************/
     73 static int      ixv_probe(device_t, cfdata_t, void *);
     74 static void      ixv_attach(device_t, device_t, void *);
     75 static int      ixv_detach(device_t, int);
     76 #if 0
     77 static int      ixv_shutdown(device_t);
     78 #endif
     79 #if __FreeBSD_version < 800000
     80 static void     ixv_start(struct ifnet *);
     81 static void     ixv_start_locked(struct tx_ring *, struct ifnet *);
     82 #else
     83 static int	ixv_mq_start(struct ifnet *, struct mbuf *);
     84 static int	ixv_mq_start_locked(struct ifnet *,
     85 		    struct tx_ring *, struct mbuf *);
     86 static void	ixv_qflush(struct ifnet *);
     87 #endif
     88 static int      ixv_ioctl(struct ifnet *, u_long, void *);
     89 static int	ixv_init(struct ifnet *);
     90 static void	ixv_init_locked(struct adapter *);
     91 static void     ixv_stop(void *);
     92 static void     ixv_media_status(struct ifnet *, struct ifmediareq *);
     93 static int      ixv_media_change(struct ifnet *);
     94 static void     ixv_identify_hardware(struct adapter *);
     95 static int      ixv_allocate_pci_resources(struct adapter *,
     96 		    const struct pci_attach_args *);
     97 static int      ixv_allocate_msix(struct adapter *);
     98 static int	ixv_allocate_queues(struct adapter *);
     99 static int	ixv_setup_msix(struct adapter *);
    100 static void	ixv_free_pci_resources(struct adapter *);
    101 static void     ixv_local_timer(void *);
    102 static void     ixv_setup_interface(device_t, struct adapter *);
    103 static void     ixv_config_link(struct adapter *);
    104 
    105 static int      ixv_allocate_transmit_buffers(struct tx_ring *);
    106 static int	ixv_setup_transmit_structures(struct adapter *);
    107 static void	ixv_setup_transmit_ring(struct tx_ring *);
    108 static void     ixv_initialize_transmit_units(struct adapter *);
    109 static void     ixv_free_transmit_structures(struct adapter *);
    110 static void     ixv_free_transmit_buffers(struct tx_ring *);
    111 
    112 static int      ixv_allocate_receive_buffers(struct rx_ring *);
    113 static int      ixv_setup_receive_structures(struct adapter *);
    114 static int	ixv_setup_receive_ring(struct rx_ring *);
    115 static void     ixv_initialize_receive_units(struct adapter *);
    116 static void     ixv_free_receive_structures(struct adapter *);
    117 static void     ixv_free_receive_buffers(struct rx_ring *);
    118 
    119 static void     ixv_enable_intr(struct adapter *);
    120 static void     ixv_disable_intr(struct adapter *);
    121 static bool	ixv_txeof(struct tx_ring *);
    122 static bool	ixv_rxeof(struct ix_queue *, int);
    123 static void	ixv_rx_checksum(u32, struct mbuf *, u32,
    124 		    struct ixgbevf_hw_stats *);
    125 static void     ixv_set_multi(struct adapter *);
    126 static void     ixv_update_link_status(struct adapter *);
    127 static void	ixv_refresh_mbufs(struct rx_ring *, int);
    128 static int      ixv_xmit(struct tx_ring *, struct mbuf *);
    129 static int	ixv_sysctl_stats(SYSCTLFN_PROTO);
    130 static int	ixv_sysctl_debug(SYSCTLFN_PROTO);
    131 static int	ixv_set_flowcntl(SYSCTLFN_PROTO);
    132 static int	ixv_dma_malloc(struct adapter *, bus_size_t,
    133 		    struct ixv_dma_alloc *, int);
    134 static void     ixv_dma_free(struct adapter *, struct ixv_dma_alloc *);
    135 static void	ixv_add_rx_process_limit(struct adapter *, const char *,
    136 		    const char *, int *, int);
    137 static u32	ixv_tx_ctx_setup(struct tx_ring *, struct mbuf *);
    138 static bool	ixv_tso_setup(struct tx_ring *, struct mbuf *, u32 *);
    139 static void	ixv_set_ivar(struct adapter *, u8, u8, s8);
    140 static void	ixv_configure_ivars(struct adapter *);
    141 static u8 *	ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
    142 
    143 static void	ixv_setup_vlan_support(struct adapter *);
    144 #if 0
    145 static void	ixv_register_vlan(void *, struct ifnet *, u16);
    146 static void	ixv_unregister_vlan(void *, struct ifnet *, u16);
    147 #endif
    148 
    149 static void	ixv_save_stats(struct adapter *);
    150 static void	ixv_init_stats(struct adapter *);
    151 static void	ixv_update_stats(struct adapter *);
    152 
    153 static __inline void ixv_rx_discard(struct rx_ring *, int);
    154 static __inline void ixv_rx_input(struct rx_ring *, struct ifnet *,
    155 		    struct mbuf *, u32);
    156 
    157 /* The MSI/X Interrupt handlers */
    158 static void	ixv_msix_que(void *);
    159 static void	ixv_msix_mbx(void *);
    160 
    161 /* Deferred interrupt tasklets */
    162 static void	ixv_handle_que(void *);
    163 static void	ixv_handle_mbx(void *);
    164 
    165 const struct sysctlnode *ixv_sysctl_instance(struct adapter *);
    166 static ixv_vendor_info_t *ixv_lookup(const struct pci_attach_args *);
    167 
    168 /*********************************************************************
    169  *  FreeBSD Device Interface Entry Points
    170  *********************************************************************/
    171 
    172 CFATTACH_DECL3_NEW(ixv, sizeof(struct adapter),
    173     ixv_probe, ixv_attach, ixv_detach, NULL, NULL, NULL,
    174     DVF_DETACH_SHUTDOWN);
    175 
    176 # if 0
    177 static device_method_t ixv_methods[] = {
    178 	/* Device interface */
    179 	DEVMETHOD(device_probe, ixv_probe),
    180 	DEVMETHOD(device_attach, ixv_attach),
    181 	DEVMETHOD(device_detach, ixv_detach),
    182 	DEVMETHOD(device_shutdown, ixv_shutdown),
    183 	{0, 0}
    184 };
    185 #endif
    186 
    187 #if 0
    188 static driver_t ixv_driver = {
    189 	"ix", ixv_methods, sizeof(struct adapter),
    190 };
    191 
    192 extern devclass_t ixgbe_devclass;
    193 DRIVER_MODULE(ixv, pci, ixv_driver, ixgbe_devclass, 0, 0);
    194 MODULE_DEPEND(ixv, pci, 1, 1, 1);
    195 MODULE_DEPEND(ixv, ether, 1, 1, 1);
    196 #endif
    197 
    198 /*
    199 ** TUNEABLE PARAMETERS:
    200 */
    201 
    202 /*
    203 ** AIM: Adaptive Interrupt Moderation
    204 ** which means that the interrupt rate
    205 ** is varied over time based on the
    206 ** traffic for that interrupt vector
    207 */
    208 static int ixv_enable_aim = FALSE;
    209 #define	TUNABLE_INT(__x, __y)
    210 TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
    211 
    212 /* How many packets rxeof tries to clean at a time */
    213 static int ixv_rx_process_limit = 128;
    214 TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit);
    215 
    216 /* Flow control setting, default to full */
    217 static int ixv_flow_control = ixgbe_fc_full;
    218 TUNABLE_INT("hw.ixv.flow_control", &ixv_flow_control);
    219 
    220 /*
    221  * Header split: this causes the hardware to DMA
    222  * the header into a seperate mbuf from the payload,
    223  * it can be a performance win in some workloads, but
    224  * in others it actually hurts, its off by default.
    225  */
    226 static bool ixv_header_split = FALSE;
    227 TUNABLE_INT("hw.ixv.hdr_split", &ixv_header_split);
    228 
    229 /*
    230 ** Number of TX descriptors per ring,
    231 ** setting higher than RX as this seems
    232 ** the better performing choice.
    233 */
    234 static int ixv_txd = DEFAULT_TXD;
    235 TUNABLE_INT("hw.ixv.txd", &ixv_txd);
    236 
    237 /* Number of RX descriptors per ring */
    238 static int ixv_rxd = DEFAULT_RXD;
    239 TUNABLE_INT("hw.ixv.rxd", &ixv_rxd);
    240 
    241 /*
    242 ** Shadow VFTA table, this is needed because
    243 ** the real filter table gets cleared during
    244 ** a soft reset and we need to repopulate it.
    245 */
    246 static u32 ixv_shadow_vfta[VFTA_SIZE];
    247 
    248 /* Keep running tab on them for sanity check */
    249 static int ixv_total_ports;
    250 
    251 /*********************************************************************
    252  *  Device identification routine
    253  *
    254  *  ixv_probe determines if the driver should be loaded on
    255  *  adapter based on PCI vendor/device id of the adapter.
    256  *
    257  *  return 0 on success, positive on failure
    258  *********************************************************************/
    259 
    260 static int
    261 ixv_probe(device_t dev, cfdata_t cf, void *aux)
    262 {
    263 	const struct pci_attach_args *pa = aux;
    264 
    265 	return (ixv_lookup(pa) != NULL) ? 1 : 0;
    266 }
    267 
    268 static ixv_vendor_info_t *
    269 ixv_lookup(const struct pci_attach_args *pa)
    270 {
    271 	pcireg_t subid;
    272 	ixv_vendor_info_t *ent;
    273 
    274 	INIT_DEBUGOUT("ixv_probe: begin");
    275 
    276 	if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
    277 		return NULL;
    278 
    279 	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
    280 
    281 	for (ent = ixv_vendor_info_array; ent->vendor_id != 0; ent++) {
    282 		if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
    283 		    (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
    284 
    285 		    ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
    286 		     (ent->subvendor_id == 0)) &&
    287 
    288 		    ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
    289 		     (ent->subdevice_id == 0))) {
    290 			++ixv_total_ports;
    291 			return ent;
    292 		}
    293 	}
    294 	return NULL;
    295 }
    296 
    297 
    298 static void
    299 ixv_sysctl_attach(struct adapter *adapter)
    300 {
    301 	struct sysctllog **log;
    302 	const struct sysctlnode *rnode, *cnode;
    303 	device_t dev;
    304 
    305 	dev = adapter->dev;
    306 	log = &adapter->sysctllog;
    307 
    308 	if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
    309 		aprint_error_dev(dev, "could not create sysctl root\n");
    310 		return;
    311 	}
    312 
    313 	if (sysctl_createv(log, 0, &rnode, &cnode,
    314 	    CTLFLAG_READWRITE, CTLTYPE_INT,
    315 	    "stats", SYSCTL_DESCR("Statistics"),
    316 	    ixv_sysctl_stats, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
    317 		aprint_error_dev(dev, "could not create sysctl\n");
    318 
    319 	if (sysctl_createv(log, 0, &rnode, &cnode,
    320 	    CTLFLAG_READWRITE, CTLTYPE_INT,
    321 	    "debug", SYSCTL_DESCR("Debug Info"),
    322 	    ixv_sysctl_debug, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
    323 		aprint_error_dev(dev, "could not create sysctl\n");
    324 
    325 	if (sysctl_createv(log, 0, &rnode, &cnode,
    326 	    CTLFLAG_READWRITE, CTLTYPE_INT,
    327 	    "flow_control", SYSCTL_DESCR("Flow Control"),
    328 	    ixv_set_flowcntl, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
    329 		aprint_error_dev(dev, "could not create sysctl\n");
    330 
    331 	/* XXX This is an *instance* sysctl controlling a *global* variable.
    332 	 * XXX It's that way in the FreeBSD driver that this derives from.
    333 	 */
    334 	if (sysctl_createv(log, 0, &rnode, &cnode,
    335 	    CTLFLAG_READWRITE, CTLTYPE_INT,
    336 	    "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
    337 	    NULL, 0, &ixv_enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
    338 		aprint_error_dev(dev, "could not create sysctl\n");
    339 }
    340 
    341 /*********************************************************************
    342  *  Device initialization routine
    343  *
    344  *  The attach entry point is called when the driver is being loaded.
    345  *  This routine identifies the type of hardware, allocates all resources
    346  *  and initializes the hardware.
    347  *
    348  *  return 0 on success, positive on failure
    349  *********************************************************************/
    350 
    351 static void
    352 ixv_attach(device_t parent, device_t dev, void *aux)
    353 {
    354 	struct adapter *adapter;
    355 	struct ixgbe_hw *hw;
    356 	int             error = 0;
    357 	ixv_vendor_info_t *ent;
    358 	const struct pci_attach_args *pa = aux;
    359 
    360 	INIT_DEBUGOUT("ixv_attach: begin");
    361 
    362 	/* Allocate, clear, and link in our adapter structure */
    363 	adapter = device_private(dev);
    364 	adapter->dev = adapter->osdep.dev = dev;
    365 	hw = &adapter->hw;
    366 
    367 	ent = ixv_lookup(pa);
    368 
    369 	KASSERT(ent != NULL);
    370 
    371 	aprint_normal(": %s, Version - %s\n",
    372 	    ixv_strings[ent->index], ixv_driver_version);
    373 
    374 	/* Core Lock Init*/
    375 	IXV_CORE_LOCK_INIT(adapter, device_xname(dev));
    376 
    377 	/* SYSCTL APIs */
    378 	ixv_sysctl_attach(adapter);
    379 
    380 	/* Set up the timer callout */
    381 	callout_init(&adapter->timer, 0);
    382 
    383 	/* Determine hardware revision */
    384 	ixv_identify_hardware(adapter);
    385 
    386 	/* Do base PCI setup - map BAR0 */
    387 	if (ixv_allocate_pci_resources(adapter, pa)) {
    388 		aprint_error_dev(dev, "Allocation of PCI resources failed\n");
    389 		error = ENXIO;
    390 		goto err_out;
    391 	}
    392 
    393 	/* Do descriptor calc and sanity checks */
    394 	if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
    395 	    ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) {
    396 		aprint_error_dev(dev, "TXD config issue, using default!\n");
    397 		adapter->num_tx_desc = DEFAULT_TXD;
    398 	} else
    399 		adapter->num_tx_desc = ixv_txd;
    400 
    401 	if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
    402 	    ixv_rxd < MIN_TXD || ixv_rxd > MAX_TXD) {
    403 		aprint_error_dev(dev, "RXD config issue, using default!\n");
    404 		adapter->num_rx_desc = DEFAULT_RXD;
    405 	} else
    406 		adapter->num_rx_desc = ixv_rxd;
    407 
    408 	/* Allocate our TX/RX Queues */
    409 	if (ixv_allocate_queues(adapter)) {
    410 		error = ENOMEM;
    411 		goto err_out;
    412 	}
    413 
    414 	/*
    415 	** Initialize the shared code: its
    416 	** at this point the mac type is set.
    417 	*/
    418 	error = ixgbe_init_shared_code(hw);
    419 	if (error) {
    420 		aprint_error_dev(dev,"Shared Code Initialization Failure\n");
    421 		error = EIO;
    422 		goto err_late;
    423 	}
    424 
    425 	/* Setup the mailbox */
    426 	ixgbe_init_mbx_params_vf(hw);
    427 
    428 	ixgbe_reset_hw(hw);
    429 
    430 	/* Get Hardware Flow Control setting */
    431 	hw->fc.requested_mode = ixgbe_fc_full;
    432 	hw->fc.pause_time = IXV_FC_PAUSE;
    433 	hw->fc.low_water = IXV_FC_LO;
    434 	hw->fc.high_water = IXV_FC_HI;
    435 	hw->fc.send_xon = TRUE;
    436 
    437 	error = ixgbe_init_hw(hw);
    438 	if (error) {
    439 		aprint_error_dev(dev,"Hardware Initialization Failure\n");
    440 		error = EIO;
    441 		goto err_late;
    442 	}
    443 
    444 	error = ixv_allocate_msix(adapter);
    445 	if (error)
    446 		goto err_late;
    447 
    448 	/* Setup OS specific network interface */
    449 	ixv_setup_interface(dev, adapter);
    450 
    451 	/* Sysctl for limiting the amount of work done in the taskqueue */
    452 	ixv_add_rx_process_limit(adapter, "rx_processing_limit",
    453 	    "max number of rx packets to process", &adapter->rx_process_limit,
    454 	    ixv_rx_process_limit);
    455 
    456 	/* Do the stats setup */
    457 	ixv_save_stats(adapter);
    458 	ixv_init_stats(adapter);
    459 
    460 	/* Register for VLAN events */
    461 #if 0 /* XXX msaitoh delete after write? */
    462 	adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
    463 	    ixv_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
    464 	adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
    465 	    ixv_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
    466 #endif
    467 
    468 	INIT_DEBUGOUT("ixv_attach: end");
    469 	return;
    470 
    471 err_late:
    472 	ixv_free_transmit_structures(adapter);
    473 	ixv_free_receive_structures(adapter);
    474 err_out:
    475 	ixv_free_pci_resources(adapter);
    476 	return;
    477 
    478 }
    479 
    480 /*********************************************************************
    481  *  Device removal routine
    482  *
    483  *  The detach entry point is called when the driver is being removed.
    484  *  This routine stops the adapter and deallocates all the resources
    485  *  that were allocated for driver operation.
    486  *
    487  *  return 0 on success, positive on failure
    488  *********************************************************************/
    489 
    490 static int
    491 ixv_detach(device_t dev, int flags)
    492 {
    493 	struct adapter *adapter = device_private(dev);
    494 	struct ix_queue *que = adapter->queues;
    495 
    496 	INIT_DEBUGOUT("ixv_detach: begin");
    497 
    498 	/* Make sure VLANS are not using driver */
    499 	if (!VLAN_ATTACHED(&adapter->osdep.ec))
    500 		;	/* nothing to do: no VLANs */
    501 	else if ((flags & (DETACH_SHUTDOWN|DETACH_FORCE)) != 0)
    502 		vlan_ifdetach(adapter->ifp);
    503 	else {
    504 		aprint_error_dev(dev, "VLANs in use\n");
    505 		return EBUSY;
    506 	}
    507 
    508 	IXV_CORE_LOCK(adapter);
    509 	ixv_stop(adapter);
    510 	IXV_CORE_UNLOCK(adapter);
    511 
    512 	for (int i = 0; i < adapter->num_queues; i++, que++) {
    513 		softint_disestablish(que->que_si);
    514 	}
    515 
    516 	/* Drain the Link queue */
    517 	softint_disestablish(adapter->mbx_si);
    518 
    519 	/* Unregister VLAN events */
    520 #if 0 /* XXX msaitoh delete after write? */
    521 	if (adapter->vlan_attach != NULL)
    522 		EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
    523 	if (adapter->vlan_detach != NULL)
    524 		EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
    525 #endif
    526 
    527 	ether_ifdetach(adapter->ifp);
    528 	callout_halt(&adapter->timer, NULL);
    529 	ixv_free_pci_resources(adapter);
    530 #if 0 /* XXX the NetBSD port is probably missing something here */
    531 	bus_generic_detach(dev);
    532 #endif
    533 	if_detach(adapter->ifp);
    534 
    535 	ixv_free_transmit_structures(adapter);
    536 	ixv_free_receive_structures(adapter);
    537 
    538 	IXV_CORE_LOCK_DESTROY(adapter);
    539 	return (0);
    540 }
    541 
    542 /*********************************************************************
    543  *
    544  *  Shutdown entry point
    545  *
    546  **********************************************************************/
    547 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
    548 static int
    549 ixv_shutdown(device_t dev)
    550 {
    551 	struct adapter *adapter = device_private(dev);
    552 	IXV_CORE_LOCK(adapter);
    553 	ixv_stop(adapter);
    554 	IXV_CORE_UNLOCK(adapter);
    555 	return (0);
    556 }
    557 #endif
    558 
    559 #if __FreeBSD_version < 800000
    560 /*********************************************************************
    561  *  Transmit entry point
    562  *
    563  *  ixv_start is called by the stack to initiate a transmit.
    564  *  The driver will remain in this routine as long as there are
    565  *  packets to transmit and transmit resources are available.
    566  *  In case resources are not available stack is notified and
    567  *  the packet is requeued.
    568  **********************************************************************/
    569 static void
    570 ixv_start_locked(struct tx_ring *txr, struct ifnet * ifp)
    571 {
    572 	int rc;
    573 	struct mbuf    *m_head;
    574 	struct adapter *adapter = txr->adapter;
    575 
    576 	IXV_TX_LOCK_ASSERT(txr);
    577 
    578 	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) !=
    579 	    IFF_RUNNING)
    580 		return;
    581 	if (!adapter->link_active)
    582 		return;
    583 
    584 	while (!IFQ_IS_EMPTY(&ifp->if_snd)) {
    585 
    586 		IFQ_POLL(&ifp->if_snd, m_head);
    587 		if (m_head == NULL)
    588 			break;
    589 
    590 		if (ixv_xmit(txr, m_head) == EAGAIN) {
    591 			ifp->if_flags |= IFF_OACTIVE;
    592 			break;
    593 		}
    594 		IFQ_DEQUEUE(&ifp->if_snd, m_head);
    595 		if (rc == EFBIG) {
    596 			struct mbuf *mtmp;
    597 
    598 			if ((mtmp = m_defrag(m_head, M_DONTWAIT)) != NULL) {
    599 				m_head = mtmp;
    600 				rc = ixv_xmit(txr, m_head);
    601 				if (rc != 0)
    602 					adapter->efbig2_tx_dma_setup.ev_count++;
    603 			} else
    604 				adapter->m_defrag_failed.ev_count++;
    605 		}
    606 		if (rc != 0) {
    607 			m_freem(m_head);
    608 			continue;
    609 		}
    610 		/* Send a copy of the frame to the BPF listener */
    611 		bpf_mtap(ifp, m_head);
    612 
    613 		/* Set watchdog on */
    614 		txr->watchdog_check = TRUE;
    615 		getmicrotime(&txr->watchdog_time);
    616 	}
    617 	return;
    618 }
    619 
    620 /*
    621  * Legacy TX start - called by the stack, this
    622  * always uses the first tx ring, and should
    623  * not be used with multiqueue tx enabled.
    624  */
    625 static void
    626 ixv_start(struct ifnet *ifp)
    627 {
    628 	struct adapter *adapter = ifp->if_softc;
    629 	struct tx_ring	*txr = adapter->tx_rings;
    630 
    631 	if (ifp->if_flags & IFF_RUNNING) {
    632 		IXV_TX_LOCK(txr);
    633 		ixv_start_locked(txr, ifp);
    634 		IXV_TX_UNLOCK(txr);
    635 	}
    636 	return;
    637 }
    638 
    639 #else
    640 
    641 /*
    642 ** Multiqueue Transmit driver
    643 **
    644 */
    645 static int
    646 ixv_mq_start(struct ifnet *ifp, struct mbuf *m)
    647 {
    648 	struct adapter	*adapter = ifp->if_softc;
    649 	struct ix_queue	*que;
    650 	struct tx_ring	*txr;
    651 	int 		i = 0, err = 0;
    652 
    653 	/* Which queue to use */
    654 	if ((m->m_flags & M_FLOWID) != 0)
    655 		i = m->m_pkthdr.flowid % adapter->num_queues;
    656 
    657 	txr = &adapter->tx_rings[i];
    658 	que = &adapter->queues[i];
    659 
    660 	if (IXV_TX_TRYLOCK(txr)) {
    661 		err = ixv_mq_start_locked(ifp, txr, m);
    662 		IXV_TX_UNLOCK(txr);
    663 	} else {
    664 		err = drbr_enqueue(ifp, txr->br, m);
    665 		softint_schedule(que->que_si);
    666 	}
    667 
    668 	return (err);
    669 }
    670 
    671 static int
    672 ixv_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr, struct mbuf *m)
    673 {
    674 	struct adapter  *adapter = txr->adapter;
    675         struct mbuf     *next;
    676         int             enqueued, err = 0;
    677 
    678 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) !=
    679 	    IFF_RUNNING || adapter->link_active == 0) {
    680 		if (m != NULL)
    681 			err = drbr_enqueue(ifp, txr->br, m);
    682 		return (err);
    683 	}
    684 
    685 	/* Do a clean if descriptors are low */
    686 	if (txr->tx_avail <= IXV_TX_CLEANUP_THRESHOLD)
    687 		ixv_txeof(txr);
    688 
    689 	enqueued = 0;
    690 	if (m == NULL) {
    691 		next = drbr_dequeue(ifp, txr->br);
    692 	} else if (drbr_needs_enqueue(ifp, txr->br)) {
    693 		if ((err = drbr_enqueue(ifp, txr->br, m)) != 0)
    694 			return (err);
    695 		next = drbr_dequeue(ifp, txr->br);
    696 	} else
    697 		next = m;
    698 
    699 	/* Process the queue */
    700 	while (next != NULL) {
    701 		if ((err = ixv_xmit(txr, next)) != 0) {
    702 			if (next != NULL)
    703 				err = drbr_enqueue(ifp, txr->br, next);
    704 			break;
    705 		}
    706 		enqueued++;
    707 		drbr_stats_update(ifp, next->m_pkthdr.len, next->m_flags);
    708 		/* Send a copy of the frame to the BPF listener */
    709 		ETHER_BPF_MTAP(ifp, next);
    710 		if ((ifp->if_flags & IFF_RUNNING) == 0)
    711 			break;
    712 		if (txr->tx_avail <= IXV_TX_OP_THRESHOLD) {
    713 			ifp->if_flags |= IFF_OACTIVE;
    714 			break;
    715 		}
    716 		next = drbr_dequeue(ifp, txr->br);
    717 	}
    718 
    719 	if (enqueued > 0) {
    720 		/* Set watchdog on */
    721 		txr->watchdog_check = TRUE;
    722 		getmicrotime(&txr->watchdog_time);
    723 	}
    724 
    725 	return (err);
    726 }
    727 
    728 /*
    729 ** Flush all ring buffers
    730 */
    731 static void
    732 ixv_qflush(struct ifnet *ifp)
    733 {
    734 	struct adapter  *adapter = ifp->if_softc;
    735 	struct tx_ring  *txr = adapter->tx_rings;
    736 	struct mbuf     *m;
    737 
    738 	for (int i = 0; i < adapter->num_queues; i++, txr++) {
    739 		IXV_TX_LOCK(txr);
    740 		while ((m = buf_ring_dequeue_sc(txr->br)) != NULL)
    741 			m_freem(m);
    742 		IXV_TX_UNLOCK(txr);
    743 	}
    744 	if_qflush(ifp);
    745 }
    746 
    747 #endif
    748 
    749 static int
    750 ixv_ifflags_cb(struct ethercom *ec)
    751 {
    752 	struct ifnet *ifp = &ec->ec_if;
    753 	struct adapter *adapter = ifp->if_softc;
    754 	int change = ifp->if_flags ^ adapter->if_flags, rc = 0;
    755 
    756 	IXV_CORE_LOCK(adapter);
    757 
    758 	if (change != 0)
    759 		adapter->if_flags = ifp->if_flags;
    760 
    761 	if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0)
    762 		rc = ENETRESET;
    763 
    764 	IXV_CORE_UNLOCK(adapter);
    765 
    766 	return rc;
    767 }
    768 
    769 /*********************************************************************
    770  *  Ioctl entry point
    771  *
    772  *  ixv_ioctl is called when the user wants to configure the
    773  *  interface.
    774  *
    775  *  return 0 on success, positive on failure
    776  **********************************************************************/
    777 
    778 static int
    779 ixv_ioctl(struct ifnet * ifp, u_long command, void *data)
    780 {
    781 	struct adapter	*adapter = ifp->if_softc;
    782 	struct ifcapreq *ifcr = data;
    783 	struct ifreq	*ifr = (struct ifreq *) data;
    784 	int             error = 0;
    785 	int l4csum_en;
    786 	const int l4csum = IFCAP_CSUM_TCPv4_Rx|IFCAP_CSUM_UDPv4_Rx|
    787 	     IFCAP_CSUM_TCPv6_Rx|IFCAP_CSUM_UDPv6_Rx;
    788 
    789 	switch (command) {
    790 	case SIOCSIFFLAGS:
    791 		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
    792 		break;
    793 	case SIOCADDMULTI:
    794 	case SIOCDELMULTI:
    795 		IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
    796 		break;
    797 	case SIOCSIFMEDIA:
    798 	case SIOCGIFMEDIA:
    799 		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
    800 		break;
    801 	case SIOCSIFCAP:
    802 		IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
    803 		break;
    804 	case SIOCSIFMTU:
    805 		IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
    806 		break;
    807 	default:
    808 		IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
    809 		break;
    810 	}
    811 
    812 	switch (command) {
    813 	case SIOCSIFMEDIA:
    814 	case SIOCGIFMEDIA:
    815 		return ifmedia_ioctl(ifp, ifr, &adapter->media, command);
    816 	case SIOCSIFCAP:
    817 		/* Layer-4 Rx checksum offload has to be turned on and
    818 		 * off as a unit.
    819 		 */
    820 		l4csum_en = ifcr->ifcr_capenable & l4csum;
    821 		if (l4csum_en != l4csum && l4csum_en != 0)
    822 			return EINVAL;
    823 		/*FALLTHROUGH*/
    824 	case SIOCADDMULTI:
    825 	case SIOCDELMULTI:
    826 	case SIOCSIFFLAGS:
    827 	case SIOCSIFMTU:
    828 	default:
    829 		if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
    830 			return error;
    831 		if ((ifp->if_flags & IFF_RUNNING) == 0)
    832 			;
    833 		else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
    834 			IXV_CORE_LOCK(adapter);
    835 			ixv_init_locked(adapter);
    836 			IXV_CORE_UNLOCK(adapter);
    837 		} else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
    838 			/*
    839 			 * Multicast list has changed; set the hardware filter
    840 			 * accordingly.
    841 			 */
    842 			IXV_CORE_LOCK(adapter);
    843 			ixv_disable_intr(adapter);
    844 			ixv_set_multi(adapter);
    845 			ixv_enable_intr(adapter);
    846 			IXV_CORE_UNLOCK(adapter);
    847 		}
    848 		return 0;
    849 	}
    850 }
    851 
    852 /*********************************************************************
    853  *  Init entry point
    854  *
    855  *  This routine is used in two ways. It is used by the stack as
    856  *  init entry point in network interface structure. It is also used
    857  *  by the driver as a hw/sw initialization routine to get to a
    858  *  consistent state.
    859  *
    860  *  return 0 on success, positive on failure
    861  **********************************************************************/
    862 #define IXGBE_MHADD_MFS_SHIFT 16
    863 
    864 static void
    865 ixv_init_locked(struct adapter *adapter)
    866 {
    867 	struct ifnet	*ifp = adapter->ifp;
    868 	device_t 	dev = adapter->dev;
    869 	struct ixgbe_hw *hw = &adapter->hw;
    870 	u32		mhadd, gpie;
    871 
    872 	INIT_DEBUGOUT("ixv_init: begin");
    873 	KASSERT(mutex_owned(&adapter->core_mtx));
    874 	hw->adapter_stopped = FALSE;
    875 	ixgbe_stop_adapter(hw);
    876         callout_stop(&adapter->timer);
    877 
    878         /* reprogram the RAR[0] in case user changed it. */
    879         ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
    880 
    881 	/* Get the latest mac address, User can use a LAA */
    882 	memcpy(hw->mac.addr, CLLADDR(adapter->ifp->if_sadl),
    883 	     IXGBE_ETH_LENGTH_OF_ADDRESS);
    884         ixgbe_set_rar(hw, 0, hw->mac.addr, 0, 1);
    885 	hw->addr_ctrl.rar_used_count = 1;
    886 
    887 	/* Prepare transmit descriptors and buffers */
    888 	if (ixv_setup_transmit_structures(adapter)) {
    889 		aprint_error_dev(dev,"Could not setup transmit structures\n");
    890 		ixv_stop(adapter);
    891 		return;
    892 	}
    893 
    894 	ixgbe_reset_hw(hw);
    895 	ixv_initialize_transmit_units(adapter);
    896 
    897 	/* Setup Multicast table */
    898 	ixv_set_multi(adapter);
    899 
    900 	/*
    901 	** Determine the correct mbuf pool
    902 	** for doing jumbo/headersplit
    903 	*/
    904 	if (ifp->if_mtu > ETHERMTU)
    905 		adapter->rx_mbuf_sz = MJUMPAGESIZE;
    906 	else
    907 		adapter->rx_mbuf_sz = MCLBYTES;
    908 
    909 	/* Prepare receive descriptors and buffers */
    910 	if (ixv_setup_receive_structures(adapter)) {
    911 		device_printf(dev,"Could not setup receive structures\n");
    912 		ixv_stop(adapter);
    913 		return;
    914 	}
    915 
    916 	/* Configure RX settings */
    917 	ixv_initialize_receive_units(adapter);
    918 
    919 	/* Enable Enhanced MSIX mode */
    920 	gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE);
    921 	gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_EIAME;
    922 	gpie |= IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD;
    923         IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
    924 
    925 #if 0 /* XXX isn't it required? -- msaitoh  */
    926 	/* Set the various hardware offload abilities */
    927 	ifp->if_hwassist = 0;
    928 	if (ifp->if_capenable & IFCAP_TSO4)
    929 		ifp->if_hwassist |= CSUM_TSO;
    930 	if (ifp->if_capenable & IFCAP_TXCSUM) {
    931 		ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
    932 #if __FreeBSD_version >= 800000
    933 		ifp->if_hwassist |= CSUM_SCTP;
    934 #endif
    935 	}
    936 #endif
    937 
    938 	/* Set MTU size */
    939 	if (ifp->if_mtu > ETHERMTU) {
    940 		mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
    941 		mhadd &= ~IXGBE_MHADD_MFS_MASK;
    942 		mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
    943 		IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
    944 	}
    945 
    946 	/* Set up VLAN offload and filter */
    947 	ixv_setup_vlan_support(adapter);
    948 
    949 	callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
    950 
    951 	/* Set up MSI/X routing */
    952 	ixv_configure_ivars(adapter);
    953 
    954 	/* Set up auto-mask */
    955 	IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_EICS_RTX_QUEUE);
    956 
    957         /* Set moderation on the Link interrupt */
    958         IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->mbxvec), IXV_LINK_ITR);
    959 
    960 	/* Stats init */
    961 	ixv_init_stats(adapter);
    962 
    963 	/* Config/Enable Link */
    964 	ixv_config_link(adapter);
    965 
    966 	/* And now turn on interrupts */
    967 	ixv_enable_intr(adapter);
    968 
    969 	/* Now inform the stack we're ready */
    970 	ifp->if_flags |= IFF_RUNNING;
    971 	ifp->if_flags &= ~IFF_OACTIVE;
    972 
    973 	return;
    974 }
    975 
    976 static int
    977 ixv_init(struct ifnet *ifp)
    978 {
    979 	struct adapter *adapter = ifp->if_softc;
    980 
    981 	IXV_CORE_LOCK(adapter);
    982 	ixv_init_locked(adapter);
    983 	IXV_CORE_UNLOCK(adapter);
    984 	return 0;
    985 }
    986 
    987 
    988 /*
    989 **
    990 ** MSIX Interrupt Handlers and Tasklets
    991 **
    992 */
    993 
    994 static inline void
    995 ixv_enable_queue(struct adapter *adapter, u32 vector)
    996 {
    997 	struct ixgbe_hw *hw = &adapter->hw;
    998 	u32	queue = 1 << vector;
    999 	u32	mask;
   1000 
   1001 	mask = (IXGBE_EIMS_RTX_QUEUE & queue);
   1002 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
   1003 }
   1004 
   1005 static inline void
   1006 ixv_disable_queue(struct adapter *adapter, u32 vector)
   1007 {
   1008 	struct ixgbe_hw *hw = &adapter->hw;
   1009 	u64	queue = (u64)(1 << vector);
   1010 	u32	mask;
   1011 
   1012 	mask = (IXGBE_EIMS_RTX_QUEUE & queue);
   1013 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
   1014 }
   1015 
   1016 static inline void
   1017 ixv_rearm_queues(struct adapter *adapter, u64 queues)
   1018 {
   1019 	u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
   1020 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask);
   1021 }
   1022 
   1023 
   1024 static void
   1025 ixv_handle_que(void *context)
   1026 {
   1027 	struct ix_queue *que = context;
   1028 	struct adapter  *adapter = que->adapter;
   1029 	struct tx_ring  *txr = que->txr;
   1030 	struct ifnet    *ifp = adapter->ifp;
   1031 	bool		more;
   1032 
   1033 	if (ifp->if_flags & IFF_RUNNING) {
   1034 		more = ixv_rxeof(que, adapter->rx_process_limit);
   1035 		IXV_TX_LOCK(txr);
   1036 		ixv_txeof(txr);
   1037 #if __FreeBSD_version >= 800000
   1038 		if (!drbr_empty(ifp, txr->br))
   1039 			ixv_mq_start_locked(ifp, txr, NULL);
   1040 #else
   1041 		if (!IFQ_IS_EMPTY(&ifp->if_snd))
   1042 			ixv_start_locked(txr, ifp);
   1043 #endif
   1044 		IXV_TX_UNLOCK(txr);
   1045 		if (more) {
   1046 			adapter->req.ev_count++;
   1047 			softint_schedule(que->que_si);
   1048 			return;
   1049 		}
   1050 	}
   1051 
   1052 	/* Reenable this interrupt */
   1053 	ixv_enable_queue(adapter, que->msix);
   1054 	return;
   1055 }
   1056 
   1057 /*********************************************************************
   1058  *
   1059  *  MSI Queue Interrupt Service routine
   1060  *
   1061  **********************************************************************/
   1062 void
   1063 ixv_msix_que(void *arg)
   1064 {
   1065 	struct ix_queue	*que = arg;
   1066 	struct adapter  *adapter = que->adapter;
   1067 	struct tx_ring	*txr = que->txr;
   1068 	struct rx_ring	*rxr = que->rxr;
   1069 	bool		more_tx, more_rx;
   1070 	u32		newitr = 0;
   1071 
   1072 	ixv_disable_queue(adapter, que->msix);
   1073 	++que->irqs;
   1074 
   1075 	more_rx = ixv_rxeof(que, adapter->rx_process_limit);
   1076 
   1077 	IXV_TX_LOCK(txr);
   1078 	more_tx = ixv_txeof(txr);
   1079 	IXV_TX_UNLOCK(txr);
   1080 
   1081 	more_rx = ixv_rxeof(que, adapter->rx_process_limit);
   1082 
   1083 	/* Do AIM now? */
   1084 
   1085 	if (ixv_enable_aim == FALSE)
   1086 		goto no_calc;
   1087 	/*
   1088 	** Do Adaptive Interrupt Moderation:
   1089         **  - Write out last calculated setting
   1090 	**  - Calculate based on average size over
   1091 	**    the last interval.
   1092 	*/
   1093         if (que->eitr_setting)
   1094                 IXGBE_WRITE_REG(&adapter->hw,
   1095                     IXGBE_VTEITR(que->msix),
   1096 		    que->eitr_setting);
   1097 
   1098         que->eitr_setting = 0;
   1099 
   1100         /* Idle, do nothing */
   1101         if ((txr->bytes == 0) && (rxr->bytes == 0))
   1102                 goto no_calc;
   1103 
   1104 	if ((txr->bytes) && (txr->packets))
   1105                	newitr = txr->bytes/txr->packets;
   1106 	if ((rxr->bytes) && (rxr->packets))
   1107 		newitr = max(newitr,
   1108 		    (rxr->bytes / rxr->packets));
   1109 	newitr += 24; /* account for hardware frame, crc */
   1110 
   1111 	/* set an upper boundary */
   1112 	newitr = min(newitr, 3000);
   1113 
   1114 	/* Be nice to the mid range */
   1115 	if ((newitr > 300) && (newitr < 1200))
   1116 		newitr = (newitr / 3);
   1117 	else
   1118 		newitr = (newitr / 2);
   1119 
   1120 	newitr |= newitr << 16;
   1121 
   1122         /* save for next interrupt */
   1123         que->eitr_setting = newitr;
   1124 
   1125         /* Reset state */
   1126         txr->bytes = 0;
   1127         txr->packets = 0;
   1128         rxr->bytes = 0;
   1129         rxr->packets = 0;
   1130 
   1131 no_calc:
   1132 	if (more_tx || more_rx)
   1133 		softint_schedule(que->que_si);
   1134 	else /* Reenable this interrupt */
   1135 		ixv_enable_queue(adapter, que->msix);
   1136 	return;
   1137 }
   1138 
   1139 static void
   1140 ixv_msix_mbx(void *arg)
   1141 {
   1142 	struct adapter	*adapter = arg;
   1143 	struct ixgbe_hw *hw = &adapter->hw;
   1144 	u32		reg;
   1145 
   1146 	++adapter->mbx_irq.ev_count;
   1147 
   1148 	/* First get the cause */
   1149 	reg = IXGBE_READ_REG(hw, IXGBE_VTEICS);
   1150 	/* Clear interrupt with write */
   1151 	IXGBE_WRITE_REG(hw, IXGBE_VTEICR, reg);
   1152 
   1153 	/* Link status change */
   1154 	if (reg & IXGBE_EICR_LSC)
   1155 		softint_schedule(adapter->mbx_si);
   1156 
   1157 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER);
   1158 	return;
   1159 }
   1160 
   1161 /*********************************************************************
   1162  *
   1163  *  Media Ioctl callback
   1164  *
   1165  *  This routine is called whenever the user queries the status of
   1166  *  the interface using ifconfig.
   1167  *
   1168  **********************************************************************/
   1169 static void
   1170 ixv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
   1171 {
   1172 	struct adapter *adapter = ifp->if_softc;
   1173 
   1174 	INIT_DEBUGOUT("ixv_media_status: begin");
   1175 	IXV_CORE_LOCK(adapter);
   1176 	ixv_update_link_status(adapter);
   1177 
   1178 	ifmr->ifm_status = IFM_AVALID;
   1179 	ifmr->ifm_active = IFM_ETHER;
   1180 
   1181 	if (!adapter->link_active) {
   1182 		IXV_CORE_UNLOCK(adapter);
   1183 		return;
   1184 	}
   1185 
   1186 	ifmr->ifm_status |= IFM_ACTIVE;
   1187 
   1188 	switch (adapter->link_speed) {
   1189 		case IXGBE_LINK_SPEED_1GB_FULL:
   1190 			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
   1191 			break;
   1192 		case IXGBE_LINK_SPEED_10GB_FULL:
   1193 			ifmr->ifm_active |= IFM_FDX;
   1194 			break;
   1195 	}
   1196 
   1197 	IXV_CORE_UNLOCK(adapter);
   1198 
   1199 	return;
   1200 }
   1201 
   1202 /*********************************************************************
   1203  *
   1204  *  Media Ioctl callback
   1205  *
   1206  *  This routine is called when the user changes speed/duplex using
   1207  *  media/mediopt option with ifconfig.
   1208  *
   1209  **********************************************************************/
   1210 static int
   1211 ixv_media_change(struct ifnet * ifp)
   1212 {
   1213 	struct adapter *adapter = ifp->if_softc;
   1214 	struct ifmedia *ifm = &adapter->media;
   1215 
   1216 	INIT_DEBUGOUT("ixv_media_change: begin");
   1217 
   1218 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
   1219 		return (EINVAL);
   1220 
   1221         switch (IFM_SUBTYPE(ifm->ifm_media)) {
   1222         case IFM_AUTO:
   1223                 break;
   1224         default:
   1225                 device_printf(adapter->dev, "Only auto media type\n");
   1226 		return (EINVAL);
   1227         }
   1228 
   1229 	return (0);
   1230 }
   1231 
   1232 /*********************************************************************
   1233  *
   1234  *  This routine maps the mbufs to tx descriptors, allowing the
   1235  *  TX engine to transmit the packets.
   1236  *  	- return 0 on success, positive on failure
   1237  *
   1238  **********************************************************************/
   1239 
   1240 static int
   1241 ixv_xmit(struct tx_ring *txr, struct mbuf *m_head)
   1242 {
   1243 	struct m_tag *mtag;
   1244 	struct adapter  *adapter = txr->adapter;
   1245 	struct ethercom *ec = &adapter->osdep.ec;
   1246 	u32		olinfo_status = 0, cmd_type_len;
   1247 	u32		paylen = 0;
   1248 	int             i, j, error, nsegs;
   1249 	int		first, last = 0;
   1250 	bus_dmamap_t	map;
   1251 	struct ixv_tx_buf *txbuf;
   1252 	union ixgbe_adv_tx_desc *txd = NULL;
   1253 
   1254 	/* Basic descriptor defines */
   1255         cmd_type_len = (IXGBE_ADVTXD_DTYP_DATA |
   1256 	    IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT);
   1257 
   1258 	if ((mtag = VLAN_OUTPUT_TAG(ec, m_head)) != NULL)
   1259         	cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
   1260 
   1261         /*
   1262          * Important to capture the first descriptor
   1263          * used because it will contain the index of
   1264          * the one we tell the hardware to report back
   1265          */
   1266         first = txr->next_avail_desc;
   1267 	txbuf = &txr->tx_buffers[first];
   1268 	map = txbuf->map;
   1269 
   1270 	/*
   1271 	 * Map the packet for DMA.
   1272 	 */
   1273 	error = bus_dmamap_load_mbuf(txr->txtag->dt_dmat, map,
   1274 	    m_head, BUS_DMA_NOWAIT);
   1275 
   1276 	switch (error) {
   1277 	case EAGAIN:
   1278 		adapter->eagain_tx_dma_setup.ev_count++;
   1279 		return EAGAIN;
   1280 	case ENOMEM:
   1281 		adapter->enomem_tx_dma_setup.ev_count++;
   1282 		return EAGAIN;
   1283 	case EFBIG:
   1284 		adapter->efbig_tx_dma_setup.ev_count++;
   1285 		return error;
   1286 	case EINVAL:
   1287 		adapter->einval_tx_dma_setup.ev_count++;
   1288 		return error;
   1289 	default:
   1290 		adapter->other_tx_dma_setup.ev_count++;
   1291 		return error;
   1292 	case 0:
   1293 		break;
   1294 	}
   1295 
   1296 	/* Make certain there are enough descriptors */
   1297 	if (nsegs > txr->tx_avail - 2) {
   1298 		txr->no_desc_avail.ev_count++;
   1299 		/* XXX s/ixgbe/ixv/ */
   1300 		ixgbe_dmamap_unload(txr->txtag, txbuf->map);
   1301 		return EAGAIN;
   1302 	}
   1303 
   1304 	/*
   1305 	** Set up the appropriate offload context
   1306 	** this becomes the first descriptor of
   1307 	** a packet.
   1308 	*/
   1309 	if (m_head->m_pkthdr.csum_flags & (M_CSUM_TSOv4|M_CSUM_TSOv6)) {
   1310 		if (ixv_tso_setup(txr, m_head, &paylen)) {
   1311 			cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
   1312 			olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
   1313 			olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
   1314 			olinfo_status |= paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
   1315 			++adapter->tso_tx.ev_count;
   1316 		} else {
   1317 			++adapter->tso_err.ev_count;
   1318 			/* XXX unload DMA map! --dyoung -> easy? --msaitoh */
   1319 			return (ENXIO);
   1320 		}
   1321 	} else
   1322 		olinfo_status |= ixv_tx_ctx_setup(txr, m_head);
   1323 
   1324         /* Record payload length */
   1325 	if (paylen == 0)
   1326         	olinfo_status |= m_head->m_pkthdr.len <<
   1327 		    IXGBE_ADVTXD_PAYLEN_SHIFT;
   1328 
   1329 	i = txr->next_avail_desc;
   1330 	for (j = 0; j < map->dm_nsegs; j++) {
   1331 		bus_size_t seglen;
   1332 		bus_addr_t segaddr;
   1333 
   1334 		txbuf = &txr->tx_buffers[i];
   1335 		txd = &txr->tx_base[i];
   1336 		seglen = map->dm_segs[j].ds_len;
   1337 		segaddr = htole64(map->dm_segs[j].ds_addr);
   1338 
   1339 		txd->read.buffer_addr = segaddr;
   1340 		txd->read.cmd_type_len = htole32(txr->txd_cmd |
   1341 		    cmd_type_len |seglen);
   1342 		txd->read.olinfo_status = htole32(olinfo_status);
   1343 		last = i; /* descriptor that will get completion IRQ */
   1344 
   1345 		if (++i == adapter->num_tx_desc)
   1346 			i = 0;
   1347 
   1348 		txbuf->m_head = NULL;
   1349 		txbuf->eop_index = -1;
   1350 	}
   1351 
   1352 	txd->read.cmd_type_len |=
   1353 	    htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS);
   1354 	txr->tx_avail -= map->dm_nsegs;
   1355 	txr->next_avail_desc = i;
   1356 
   1357 	txbuf->m_head = m_head;
   1358 	/* We exchange the maps instead of copying because otherwise
   1359 	 * we end up with many pointers to the same map and we free
   1360 	 * one map twice in ixgbe_free_transmit_structures().  Who
   1361 	 * knows what other problems this caused.  --dyoung
   1362 	 */
   1363 	txbuf->map = map;
   1364 	bus_dmamap_sync(txr->txtag->dt_dmat, map, 0, m_head->m_pkthdr.len,
   1365 	    BUS_DMASYNC_PREWRITE);
   1366 
   1367         /* Set the index of the descriptor that will be marked done */
   1368         txbuf = &txr->tx_buffers[first];
   1369 	txbuf->eop_index = last;
   1370 
   1371 	/* XXX s/ixgbe/ixg/ */
   1372         ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
   1373             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1374 	/*
   1375 	 * Advance the Transmit Descriptor Tail (Tdt), this tells the
   1376 	 * hardware that this frame is available to transmit.
   1377 	 */
   1378 	++txr->total_packets.ev_count;
   1379 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(txr->me), i);
   1380 
   1381 	return 0;
   1382 }
   1383 
   1384 
   1385 /*********************************************************************
   1386  *  Multicast Update
   1387  *
   1388  *  This routine is called whenever multicast address list is updated.
   1389  *
   1390  **********************************************************************/
   1391 #define IXGBE_RAR_ENTRIES 16
   1392 
   1393 static void
   1394 ixv_set_multi(struct adapter *adapter)
   1395 {
   1396 	struct ether_multi *enm;
   1397 	struct ether_multistep step;
   1398 	u8	mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
   1399 	u8	*update_ptr;
   1400 	int	mcnt = 0;
   1401 	struct ethercom *ec = &adapter->osdep.ec;
   1402 
   1403 	IOCTL_DEBUGOUT("ixv_set_multi: begin");
   1404 
   1405 	ETHER_FIRST_MULTI(step, ec, enm);
   1406 	while (enm != NULL) {
   1407 		bcopy(enm->enm_addrlo,
   1408 		    &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
   1409 		    IXGBE_ETH_LENGTH_OF_ADDRESS);
   1410 		mcnt++;
   1411 		/* XXX This might be required --msaitoh */
   1412 		if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES)
   1413 			break;
   1414 		ETHER_NEXT_MULTI(step, enm);
   1415 	}
   1416 
   1417 	update_ptr = mta;
   1418 
   1419 	ixgbe_update_mc_addr_list(&adapter->hw,
   1420 	    update_ptr, mcnt, ixv_mc_array_itr);
   1421 
   1422 	return;
   1423 }
   1424 
   1425 /*
   1426  * This is an iterator function now needed by the multicast
   1427  * shared code. It simply feeds the shared code routine the
   1428  * addresses in the array of ixv_set_multi() one by one.
   1429  */
   1430 static u8 *
   1431 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
   1432 {
   1433 	u8 *addr = *update_ptr;
   1434 	u8 *newptr;
   1435 	*vmdq = 0;
   1436 
   1437 	newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
   1438 	*update_ptr = newptr;
   1439 	return addr;
   1440 }
   1441 
   1442 /*********************************************************************
   1443  *  Timer routine
   1444  *
   1445  *  This routine checks for link status,updates statistics,
   1446  *  and runs the watchdog check.
   1447  *
   1448  **********************************************************************/
   1449 
   1450 static void
   1451 ixv_local_timer1(void *arg)
   1452 {
   1453 	struct adapter	*adapter = arg;
   1454 	device_t	dev = adapter->dev;
   1455 	struct tx_ring	*txr = adapter->tx_rings;
   1456 	int		i;
   1457 	struct timeval now, elapsed;
   1458 
   1459 	KASSERT(mutex_owned(&adapter->core_mtx));
   1460 
   1461 	ixv_update_link_status(adapter);
   1462 
   1463 	/* Stats Update */
   1464 	ixv_update_stats(adapter);
   1465 
   1466 	/*
   1467 	 * If the interface has been paused
   1468 	 * then don't do the watchdog check
   1469 	 */
   1470 	if (IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF)
   1471 		goto out;
   1472 	/*
   1473 	** Check for time since any descriptor was cleaned
   1474 	*/
   1475         for (i = 0; i < adapter->num_queues; i++, txr++) {
   1476 		IXV_TX_LOCK(txr);
   1477 		if (txr->watchdog_check == FALSE) {
   1478 			IXV_TX_UNLOCK(txr);
   1479 			continue;
   1480 		}
   1481 		getmicrotime(&now);
   1482 		timersub(&now, &txr->watchdog_time, &elapsed);
   1483 		if (tvtohz(&elapsed) > IXV_WATCHDOG)
   1484 			goto hung;
   1485 		IXV_TX_UNLOCK(txr);
   1486 	}
   1487 out:
   1488        	ixv_rearm_queues(adapter, adapter->que_mask);
   1489 	callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
   1490 	return;
   1491 
   1492 hung:
   1493 	device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
   1494 	device_printf(dev,"Queue(%d) tdh = %d, hw tdt = %d\n", txr->me,
   1495 	    IXGBE_READ_REG(&adapter->hw, IXGBE_VFTDH(i)),
   1496 	    IXGBE_READ_REG(&adapter->hw, IXGBE_VFTDT(i)));
   1497 	device_printf(dev,"TX(%d) desc avail = %d,"
   1498 	    "Next TX to Clean = %d\n",
   1499 	    txr->me, txr->tx_avail, txr->next_to_clean);
   1500 	adapter->ifp->if_flags &= ~IFF_RUNNING;
   1501 	adapter->watchdog_events.ev_count++;
   1502 	IXV_TX_UNLOCK(txr);
   1503 	ixv_init_locked(adapter);
   1504 }
   1505 
   1506 static void
   1507 ixv_local_timer(void *arg)
   1508 {
   1509 	struct adapter *adapter = arg;
   1510 
   1511 	IXV_CORE_LOCK(adapter);
   1512 	ixv_local_timer1(adapter);
   1513 	IXV_CORE_UNLOCK(adapter);
   1514 }
   1515 
   1516 /*
   1517 ** Note: this routine updates the OS on the link state
   1518 **	the real check of the hardware only happens with
   1519 **	a link interrupt.
   1520 */
   1521 static void
   1522 ixv_update_link_status(struct adapter *adapter)
   1523 {
   1524 	struct ifnet	*ifp = adapter->ifp;
   1525 	struct tx_ring *txr = adapter->tx_rings;
   1526 	device_t dev = adapter->dev;
   1527 
   1528 
   1529 	if (adapter->link_up){
   1530 		if (adapter->link_active == FALSE) {
   1531 			if (bootverbose)
   1532 				device_printf(dev,"Link is up %d Gbps %s \n",
   1533 				    ((adapter->link_speed == 128)? 10:1),
   1534 				    "Full Duplex");
   1535 			adapter->link_active = TRUE;
   1536 			if_link_state_change(ifp, LINK_STATE_UP);
   1537 		}
   1538 	} else { /* Link down */
   1539 		if (adapter->link_active == TRUE) {
   1540 			if (bootverbose)
   1541 				device_printf(dev,"Link is Down\n");
   1542 			if_link_state_change(ifp, LINK_STATE_DOWN);
   1543 			adapter->link_active = FALSE;
   1544 			for (int i = 0; i < adapter->num_queues;
   1545 			    i++, txr++)
   1546 				txr->watchdog_check = FALSE;
   1547 		}
   1548 	}
   1549 
   1550 	return;
   1551 }
   1552 
   1553 
   1554 static void
   1555 ixv_ifstop(struct ifnet *ifp, int disable)
   1556 {
   1557 	struct adapter *adapter = ifp->if_softc;
   1558 
   1559 	IXV_CORE_LOCK(adapter);
   1560 	ixv_stop(adapter);
   1561 	IXV_CORE_UNLOCK(adapter);
   1562 }
   1563 
   1564 /*********************************************************************
   1565  *
   1566  *  This routine disables all traffic on the adapter by issuing a
   1567  *  global reset on the MAC and deallocates TX/RX buffers.
   1568  *
   1569  **********************************************************************/
   1570 
   1571 static void
   1572 ixv_stop(void *arg)
   1573 {
   1574 	struct ifnet   *ifp;
   1575 	struct adapter *adapter = arg;
   1576 	struct ixgbe_hw *hw = &adapter->hw;
   1577 	ifp = adapter->ifp;
   1578 
   1579 	KASSERT(mutex_owned(&adapter->core_mtx));
   1580 
   1581 	INIT_DEBUGOUT("ixv_stop: begin\n");
   1582 	ixv_disable_intr(adapter);
   1583 
   1584 	/* Tell the stack that the interface is no longer active */
   1585 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   1586 
   1587 	ixgbe_reset_hw(hw);
   1588 	adapter->hw.adapter_stopped = FALSE;
   1589 	ixgbe_stop_adapter(hw);
   1590 	callout_stop(&adapter->timer);
   1591 
   1592 	/* reprogram the RAR[0] in case user changed it. */
   1593 	ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
   1594 
   1595 	return;
   1596 }
   1597 
   1598 
   1599 /*********************************************************************
   1600  *
   1601  *  Determine hardware revision.
   1602  *
   1603  **********************************************************************/
   1604 static void
   1605 ixv_identify_hardware(struct adapter *adapter)
   1606 {
   1607 	u16		pci_cmd_word;
   1608 	pcitag_t tag;
   1609 	pci_chipset_tag_t pc;
   1610 	pcireg_t subid, id;
   1611 	struct ixgbe_hw *hw = &adapter->hw;
   1612 
   1613 	pc = adapter->osdep.pc;
   1614 	tag = adapter->osdep.tag;
   1615 
   1616 	/*
   1617 	** Make sure BUSMASTER is set, on a VM under
   1618 	** KVM it may not be and will break things.
   1619 	*/
   1620 	pci_cmd_word = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
   1621 	if (!((pci_cmd_word & PCI_COMMAND_MASTER_ENABLE) &&
   1622 	    (pci_cmd_word & PCI_COMMAND_MEM_ENABLE))) {
   1623 		INIT_DEBUGOUT("Memory Access and/or Bus Master "
   1624 		    "bits were not set!\n");
   1625 		pci_cmd_word |=
   1626 		    (PCI_COMMAND_MASTER_ENABLE | PCI_COMMAND_MEM_ENABLE);
   1627 		pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, pci_cmd_word);
   1628 	}
   1629 
   1630 	id = pci_conf_read(pc, tag, PCI_ID_REG);
   1631 	subid = pci_conf_read(pc, tag, PCI_SUBSYS_ID_REG);
   1632 
   1633 	/* Save off the information about this board */
   1634 	hw->vendor_id = PCI_VENDOR(id);
   1635 	hw->device_id = PCI_PRODUCT(id);
   1636 	hw->revision_id = PCI_REVISION(pci_conf_read(pc, tag, PCI_CLASS_REG));
   1637 	hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
   1638 	hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
   1639 
   1640 	return;
   1641 }
   1642 
   1643 /*********************************************************************
   1644  *
   1645  *  Setup MSIX Interrupt resources and handlers
   1646  *
   1647  **********************************************************************/
   1648 static int
   1649 ixv_allocate_msix(struct adapter *adapter)
   1650 {
   1651 #if !defined(NETBSD_MSI_OR_MSIX)
   1652 	return 0;
   1653 #else
   1654 	device_t        dev = adapter->dev;
   1655 	struct 		ix_queue *que = adapter->queues;
   1656 	int 		error, rid, vector = 0;
   1657 	pcitag_t tag;
   1658 	pci_chipset_tag_t pc;
   1659 
   1660 	pc = adapter->osdep.pc;
   1661 	tag = adapter->osdep.tag;
   1662 
   1663 	for (int i = 0; i < adapter->num_queues; i++, vector++, que++) {
   1664 		rid = vector + 1;
   1665 		que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
   1666 		    RF_SHAREABLE | RF_ACTIVE);
   1667 		if (que->res == NULL) {
   1668 			aprint_error_dev(dev,"Unable to allocate"
   1669 		    	    " bus resource: que interrupt [%d]\n", vector);
   1670 			return (ENXIO);
   1671 		}
   1672 		/* Set the handler function */
   1673 		error = bus_setup_intr(dev, que->res,
   1674 		    INTR_TYPE_NET | INTR_MPSAFE, NULL,
   1675 		    ixv_msix_que, que, &que->tag);
   1676 		if (error) {
   1677 			que->res = NULL;
   1678 			aprint_error_dev(dev,
   1679 			    "Failed to register QUE handler");
   1680 			return (error);
   1681 		}
   1682 #if __FreeBSD_version >= 800504
   1683 		bus_describe_intr(dev, que->res, que->tag, "que %d", i);
   1684 #endif
   1685 		que->msix = vector;
   1686         	adapter->que_mask |= (u64)(1 << que->msix);
   1687 		/*
   1688 		** Bind the msix vector, and thus the
   1689 		** ring to the corresponding cpu.
   1690 		*/
   1691 		if (adapter->num_queues > 1)
   1692 			bus_bind_intr(dev, que->res, i);
   1693 
   1694 		que->que_si = softint_establish(SOFTINT_NET, ixv_handle_que,
   1695 		    que);
   1696 	}
   1697 
   1698 	/* and Mailbox */
   1699 	rid = vector + 1;
   1700 	adapter->res = bus_alloc_resource_any(dev,
   1701     	    SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
   1702 	if (!adapter->res) {
   1703 		aprint_error_dev(dev,"Unable to allocate"
   1704     	    " bus resource: MBX interrupt [%d]\n", rid);
   1705 		return (ENXIO);
   1706 	}
   1707 	/* Set the mbx handler function */
   1708 	error = bus_setup_intr(dev, adapter->res,
   1709 	    INTR_TYPE_NET | INTR_MPSAFE, NULL,
   1710 	    ixv_msix_mbx, adapter, &adapter->tag);
   1711 	if (error) {
   1712 		adapter->res = NULL;
   1713 		aprint_error_dev(dev, "Failed to register LINK handler");
   1714 		return (error);
   1715 	}
   1716 #if __FreeBSD_version >= 800504
   1717 	bus_describe_intr(dev, adapter->res, adapter->tag, "mbx");
   1718 #endif
   1719 	adapter->mbxvec = vector;
   1720 	/* Tasklets for Mailbox */
   1721 	adapter->mbx_si = softint_establish(SOFTINT_NET, ixv_handle_mbx,
   1722 	    adapter);
   1723 	/*
   1724 	** Due to a broken design QEMU will fail to properly
   1725 	** enable the guest for MSIX unless the vectors in
   1726 	** the table are all set up, so we must rewrite the
   1727 	** ENABLE in the MSIX control register again at this
   1728 	** point to cause it to successfully initialize us.
   1729 	*/
   1730 	if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
   1731 		int msix_ctrl;
   1732 		pci_get_capability(pc, tag, PCI_CAP_MSIX, &rid);
   1733 		rid += PCI_MSIX_CTL;
   1734 		msix_ctrl = pci_read_config(pc, tag, rid);
   1735 		msix_ctrl |= PCI_MSIX_CTL_ENABLE;
   1736 		pci_conf_write(pc, tag, msix_ctrl);
   1737 	}
   1738 
   1739 	return (0);
   1740 #endif
   1741 }
   1742 
   1743 /*
   1744  * Setup MSIX resources, note that the VF
   1745  * device MUST use MSIX, there is no fallback.
   1746  */
   1747 static int
   1748 ixv_setup_msix(struct adapter *adapter)
   1749 {
   1750 #if !defined(NETBSD_MSI_OR_MSIX)
   1751 	return 0;
   1752 #else
   1753 	device_t dev = adapter->dev;
   1754 	int rid, vectors, want = 2;
   1755 
   1756 
   1757 	/* First try MSI/X */
   1758 	rid = PCIR_BAR(3);
   1759 	adapter->msix_mem = bus_alloc_resource_any(dev,
   1760 	    SYS_RES_MEMORY, &rid, RF_ACTIVE);
   1761        	if (!adapter->msix_mem) {
   1762 		device_printf(adapter->dev,
   1763 		    "Unable to map MSIX table \n");
   1764 		goto out;
   1765 	}
   1766 
   1767 	vectors = pci_msix_count(dev);
   1768 	if (vectors < 2) {
   1769 		bus_release_resource(dev, SYS_RES_MEMORY,
   1770 		    rid, adapter->msix_mem);
   1771 		adapter->msix_mem = NULL;
   1772 		goto out;
   1773 	}
   1774 
   1775 	/*
   1776 	** Want two vectors: one for a queue,
   1777 	** plus an additional for mailbox.
   1778 	*/
   1779 	if (pci_alloc_msix(dev, &want) == 0) {
   1780                	device_printf(adapter->dev,
   1781 		    "Using MSIX interrupts with %d vectors\n", want);
   1782 		return (want);
   1783 	}
   1784 out:
   1785 	device_printf(adapter->dev,"MSIX config error\n");
   1786 	return (ENXIO);
   1787 #endif
   1788 }
   1789 
   1790 
   1791 static int
   1792 ixv_allocate_pci_resources(struct adapter *adapter,
   1793     const struct pci_attach_args *pa)
   1794 {
   1795 	pcireg_t	memtype;
   1796 	device_t        dev = adapter->dev;
   1797 	bus_addr_t addr;
   1798 	int flags;
   1799 
   1800 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
   1801 
   1802 	switch (memtype) {
   1803 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1804 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1805 		adapter->osdep.mem_bus_space_tag = pa->pa_memt;
   1806 		if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
   1807 	              memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
   1808 			goto map_err;
   1809 		if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
   1810 			aprint_normal_dev(dev, "clearing prefetchable bit\n");
   1811 			flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
   1812 		}
   1813 		if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
   1814 		     adapter->osdep.mem_size, flags,
   1815 		     &adapter->osdep.mem_bus_space_handle) != 0) {
   1816 map_err:
   1817 			adapter->osdep.mem_size = 0;
   1818 			aprint_error_dev(dev, "unable to map BAR0\n");
   1819 			return ENXIO;
   1820 		}
   1821 		break;
   1822 	default:
   1823 		aprint_error_dev(dev, "unexpected type on BAR0\n");
   1824 		return ENXIO;
   1825 	}
   1826 
   1827 	adapter->num_queues = 1;
   1828 	adapter->hw.back = &adapter->osdep;
   1829 
   1830 	/*
   1831 	** Now setup MSI/X, should
   1832 	** return us the number of
   1833 	** configured vectors.
   1834 	*/
   1835 	adapter->msix = ixv_setup_msix(adapter);
   1836 	if (adapter->msix == ENXIO)
   1837 		return (ENXIO);
   1838 	else
   1839 		return (0);
   1840 }
   1841 
   1842 static void
   1843 ixv_free_pci_resources(struct adapter * adapter)
   1844 {
   1845 #if defined(NETBSD_MSI_OR_MSIX)
   1846 	struct 		ix_queue *que = adapter->queues;
   1847 	device_t	dev = adapter->dev;
   1848 	int		rid, memrid;
   1849 
   1850 	memrid = PCI_BAR(MSIX_BAR);
   1851 
   1852 	/*
   1853 	** There is a slight possibility of a failure mode
   1854 	** in attach that will result in entering this function
   1855 	** before interrupt resources have been initialized, and
   1856 	** in that case we do not want to execute the loops below
   1857 	** We can detect this reliably by the state of the adapter
   1858 	** res pointer.
   1859 	*/
   1860 	if (adapter->res == NULL)
   1861 		goto mem;
   1862 
   1863 	/*
   1864 	**  Release all msix queue resources:
   1865 	*/
   1866 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   1867 		rid = que->msix + 1;
   1868 		if (que->tag != NULL) {
   1869 			bus_teardown_intr(dev, que->res, que->tag);
   1870 			que->tag = NULL;
   1871 		}
   1872 		if (que->res != NULL)
   1873 			bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
   1874 	}
   1875 
   1876 
   1877 	/* Clean the Legacy or Link interrupt last */
   1878 	if (adapter->mbxvec) /* we are doing MSIX */
   1879 		rid = adapter->mbxvec + 1;
   1880 	else
   1881 		(adapter->msix != 0) ? (rid = 1):(rid = 0);
   1882 
   1883 	if (adapter->tag != NULL) {
   1884 		bus_teardown_intr(dev, adapter->res, adapter->tag);
   1885 		adapter->tag = NULL;
   1886 	}
   1887 	if (adapter->res != NULL)
   1888 		bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res);
   1889 
   1890 mem:
   1891 	if (adapter->msix)
   1892 		pci_release_msi(dev);
   1893 
   1894 	if (adapter->msix_mem != NULL)
   1895 		bus_release_resource(dev, SYS_RES_MEMORY,
   1896 		    memrid, adapter->msix_mem);
   1897 
   1898 	if (adapter->pci_mem != NULL)
   1899 		bus_release_resource(dev, SYS_RES_MEMORY,
   1900 		    PCIR_BAR(0), adapter->pci_mem);
   1901 
   1902 #endif
   1903 	return;
   1904 }
   1905 
   1906 /*********************************************************************
   1907  *
   1908  *  Setup networking device structure and register an interface.
   1909  *
   1910  **********************************************************************/
   1911 static void
   1912 ixv_setup_interface(device_t dev, struct adapter *adapter)
   1913 {
   1914 	struct ethercom *ec = &adapter->osdep.ec;
   1915 	struct ifnet   *ifp;
   1916 
   1917 	INIT_DEBUGOUT("ixv_setup_interface: begin");
   1918 
   1919 	ifp = adapter->ifp = &ec->ec_if;
   1920 	strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
   1921 	ifp->if_mtu = ETHERMTU;
   1922 	ifp->if_baudrate = 1000000000;
   1923 	ifp->if_init = ixv_init;
   1924 	ifp->if_stop = ixv_ifstop;
   1925 	ifp->if_softc = adapter;
   1926 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   1927 	ifp->if_ioctl = ixv_ioctl;
   1928 #if __FreeBSD_version >= 800000
   1929 	ifp->if_transmit = ixv_mq_start;
   1930 	ifp->if_qflush = ixv_qflush;
   1931 #else
   1932 	ifp->if_start = ixv_start;
   1933 #endif
   1934 	ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 2;
   1935 
   1936 	if_attach(ifp);
   1937 	ether_ifattach(ifp, adapter->hw.mac.addr);
   1938 	ether_set_ifflags_cb(ec, ixv_ifflags_cb);
   1939 
   1940 	adapter->max_frame_size =
   1941 	    ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
   1942 
   1943 	/*
   1944 	 * Tell the upper layer(s) we support long frames.
   1945 	 */
   1946 	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
   1947 
   1948 	ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSOv4;
   1949 	ifp->if_capenable = 0;
   1950 
   1951 	ec->ec_capabilities |= ETHERCAP_VLAN_HWCSUM;
   1952 	ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_MTU;
   1953 	ec->ec_capabilities |= ETHERCAP_JUMBO_MTU;
   1954 	ec->ec_capenable = ec->ec_capabilities;
   1955 
   1956 	/* Don't enable LRO by default */
   1957 	ifp->if_capabilities |= IFCAP_LRO;
   1958 
   1959 	/*
   1960 	** Dont turn this on by default, if vlans are
   1961 	** created on another pseudo device (eg. lagg)
   1962 	** then vlan events are not passed thru, breaking
   1963 	** operation, but with HW FILTER off it works. If
   1964 	** using vlans directly on the em driver you can
   1965 	** enable this and get full hardware tag filtering.
   1966 	*/
   1967 	ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
   1968 
   1969 	/*
   1970 	 * Specify the media types supported by this adapter and register
   1971 	 * callbacks to update media and link information
   1972 	 */
   1973 	ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change,
   1974 		     ixv_media_status);
   1975 	ifmedia_add(&adapter->media, IFM_ETHER | IFM_FDX, 0, NULL);
   1976 	ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
   1977 	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
   1978 
   1979 	return;
   1980 }
   1981 
   1982 static void
   1983 ixv_config_link(struct adapter *adapter)
   1984 {
   1985 	struct ixgbe_hw *hw = &adapter->hw;
   1986 	u32	autoneg, err = 0;
   1987 	bool	negotiate = TRUE;
   1988 
   1989 	if (hw->mac.ops.check_link)
   1990 		err = hw->mac.ops.check_link(hw, &autoneg,
   1991 		    &adapter->link_up, FALSE);
   1992 	if (err)
   1993 		goto out;
   1994 
   1995 	if (hw->mac.ops.setup_link)
   1996                	err = hw->mac.ops.setup_link(hw, autoneg,
   1997 		    negotiate, adapter->link_up);
   1998 out:
   1999 	return;
   2000 }
   2001 
   2002 /********************************************************************
   2003  * Manage DMA'able memory.
   2004  *******************************************************************/
   2005 
   2006 static int
   2007 ixv_dma_malloc(struct adapter *adapter, bus_size_t size,
   2008 		struct ixv_dma_alloc *dma, int mapflags)
   2009 {
   2010 	device_t dev = adapter->dev;
   2011 	int             r, rsegs;
   2012 
   2013 	r = ixgbe_dma_tag_create(adapter->osdep.dmat,	/* parent */
   2014 			       DBA_ALIGN, 0,	/* alignment, bounds */
   2015 			       size,	/* maxsize */
   2016 			       1,	/* nsegments */
   2017 			       size,	/* maxsegsize */
   2018 			       BUS_DMA_ALLOCNOW,	/* flags */
   2019 			       &dma->dma_tag);
   2020 	if (r != 0) {
   2021 		aprint_error_dev(dev,
   2022 		    "ixv_dma_malloc: bus_dma_tag_create failed; error %u\n", r);
   2023 		goto fail_0;
   2024 	}
   2025 	r = bus_dmamem_alloc(dma->dma_tag->dt_dmat,
   2026 		size,
   2027 		dma->dma_tag->dt_alignment,
   2028 		dma->dma_tag->dt_boundary,
   2029 		&dma->dma_seg, 1, &rsegs, BUS_DMA_NOWAIT);
   2030 	if (r != 0) {
   2031 		aprint_error_dev(dev,
   2032 		    "%s: bus_dmamem_alloc failed; error %u\n", __func__, r);
   2033 		goto fail_1;
   2034 	}
   2035 
   2036 	r = bus_dmamem_map(dma->dma_tag->dt_dmat, &dma->dma_seg, rsegs,
   2037 	    size, &dma->dma_vaddr, BUS_DMA_NOWAIT);
   2038 	if (r != 0) {
   2039 		aprint_error_dev(dev, "%s: bus_dmamem_map failed; error %d\n",
   2040 		    __func__, r);
   2041 		goto fail_2;
   2042 	}
   2043 
   2044 	r = ixgbe_dmamap_create(dma->dma_tag, 0, &dma->dma_map);
   2045 	if (r != 0) {
   2046 		aprint_error_dev(dev, "%s: bus_dmamem_map failed; error %d\n",
   2047 		    __func__, r);
   2048 		goto fail_3;
   2049 	}
   2050 
   2051 	r = bus_dmamap_load(dma->dma_tag->dt_dmat, dma->dma_map, dma->dma_vaddr,
   2052 			    size,
   2053 			    NULL,
   2054 			    mapflags | BUS_DMA_NOWAIT);
   2055 	if (r != 0) {
   2056 		aprint_error_dev(dev,"%s: bus_dmamap_load failed; error %u\n",
   2057 		    __func__, r);
   2058 		goto fail_4;
   2059 	}
   2060 	dma->dma_paddr = dma->dma_map->dm_segs[0].ds_addr;
   2061 	dma->dma_size = size;
   2062 	return 0;
   2063 fail_4:
   2064 	ixgbe_dmamap_destroy(dma->dma_tag, dma->dma_map);
   2065 fail_3:
   2066 	bus_dmamem_unmap(dma->dma_tag->dt_dmat, dma->dma_vaddr, size);
   2067 fail_2:
   2068 	bus_dmamem_free(dma->dma_tag->dt_dmat, &dma->dma_seg, rsegs);
   2069 fail_1:
   2070 	ixgbe_dma_tag_destroy(dma->dma_tag);
   2071 fail_0:
   2072 	dma->dma_map = NULL;
   2073 	dma->dma_tag = NULL;
   2074 	return (r);
   2075 }
   2076 
   2077 static void
   2078 ixv_dma_free(struct adapter *adapter, struct ixv_dma_alloc *dma)
   2079 {
   2080 	bus_dmamap_sync(dma->dma_tag->dt_dmat, dma->dma_map, 0, dma->dma_size,
   2081 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   2082 	ixgbe_dmamap_unload(dma->dma_tag, dma->dma_map);
   2083 	bus_dmamem_free(dma->dma_tag->dt_dmat, &dma->dma_seg, 1);
   2084 	ixgbe_dma_tag_destroy(dma->dma_tag);
   2085 }
   2086 
   2087 
   2088 /*********************************************************************
   2089  *
   2090  *  Allocate memory for the transmit and receive rings, and then
   2091  *  the descriptors associated with each, called only once at attach.
   2092  *
   2093  **********************************************************************/
   2094 static int
   2095 ixv_allocate_queues(struct adapter *adapter)
   2096 {
   2097 	device_t	dev = adapter->dev;
   2098 	struct ix_queue	*que;
   2099 	struct tx_ring	*txr;
   2100 	struct rx_ring	*rxr;
   2101 	int rsize, tsize, error = 0;
   2102 	int txconf = 0, rxconf = 0;
   2103 
   2104         /* First allocate the top level queue structs */
   2105         if (!(adapter->queues =
   2106             (struct ix_queue *) malloc(sizeof(struct ix_queue) *
   2107             adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
   2108                 aprint_error_dev(dev, "Unable to allocate queue memory\n");
   2109                 error = ENOMEM;
   2110                 goto fail;
   2111         }
   2112 
   2113 	/* First allocate the TX ring struct memory */
   2114 	if (!(adapter->tx_rings =
   2115 	    (struct tx_ring *) malloc(sizeof(struct tx_ring) *
   2116 	    adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
   2117 		aprint_error_dev(dev, "Unable to allocate TX ring memory\n");
   2118 		error = ENOMEM;
   2119 		goto tx_fail;
   2120 	}
   2121 
   2122 	/* Next allocate the RX */
   2123 	if (!(adapter->rx_rings =
   2124 	    (struct rx_ring *) malloc(sizeof(struct rx_ring) *
   2125 	    adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
   2126 		aprint_error_dev(dev, "Unable to allocate RX ring memory\n");
   2127 		error = ENOMEM;
   2128 		goto rx_fail;
   2129 	}
   2130 
   2131 	/* For the ring itself */
   2132 	tsize = roundup2(adapter->num_tx_desc *
   2133 	    sizeof(union ixgbe_adv_tx_desc), DBA_ALIGN);
   2134 
   2135 	/*
   2136 	 * Now set up the TX queues, txconf is needed to handle the
   2137 	 * possibility that things fail midcourse and we need to
   2138 	 * undo memory gracefully
   2139 	 */
   2140 	for (int i = 0; i < adapter->num_queues; i++, txconf++) {
   2141 		/* Set up some basics */
   2142 		txr = &adapter->tx_rings[i];
   2143 		txr->adapter = adapter;
   2144 		txr->me = i;
   2145 
   2146 		/* Initialize the TX side lock */
   2147 		snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
   2148 		    device_xname(dev), txr->me);
   2149 		mutex_init(&txr->tx_mtx, MUTEX_DEFAULT, IPL_NET);
   2150 
   2151 		if (ixv_dma_malloc(adapter, tsize,
   2152 			&txr->txdma, BUS_DMA_NOWAIT)) {
   2153 			aprint_error_dev(dev,
   2154 			    "Unable to allocate TX Descriptor memory\n");
   2155 			error = ENOMEM;
   2156 			goto err_tx_desc;
   2157 		}
   2158 		txr->tx_base = (union ixgbe_adv_tx_desc *)txr->txdma.dma_vaddr;
   2159 		bzero((void *)txr->tx_base, tsize);
   2160 
   2161         	/* Now allocate transmit buffers for the ring */
   2162         	if (ixv_allocate_transmit_buffers(txr)) {
   2163 			aprint_error_dev(dev,
   2164 			    "Critical Failure setting up transmit buffers\n");
   2165 			error = ENOMEM;
   2166 			goto err_tx_desc;
   2167         	}
   2168 #if __FreeBSD_version >= 800000
   2169 		/* Allocate a buf ring */
   2170 		txr->br = buf_ring_alloc(IXV_BR_SIZE, M_DEVBUF,
   2171 		    M_WAITOK, &txr->tx_mtx);
   2172 		if (txr->br == NULL) {
   2173 			aprint_error_dev(dev,
   2174 			    "Critical Failure setting up buf ring\n");
   2175 			error = ENOMEM;
   2176 			goto err_tx_desc;
   2177 		}
   2178 #endif
   2179 	}
   2180 
   2181 	/*
   2182 	 * Next the RX queues...
   2183 	 */
   2184 	rsize = roundup2(adapter->num_rx_desc *
   2185 	    sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
   2186 	for (int i = 0; i < adapter->num_queues; i++, rxconf++) {
   2187 		rxr = &adapter->rx_rings[i];
   2188 		/* Set up some basics */
   2189 		rxr->adapter = adapter;
   2190 		rxr->me = i;
   2191 
   2192 		/* Initialize the RX side lock */
   2193 		snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
   2194 		    device_xname(dev), rxr->me);
   2195 		mutex_init(&rxr->rx_mtx, MUTEX_DEFAULT, IPL_NET);
   2196 
   2197 		if (ixv_dma_malloc(adapter, rsize,
   2198 			&rxr->rxdma, BUS_DMA_NOWAIT)) {
   2199 			aprint_error_dev(dev,
   2200 			    "Unable to allocate RxDescriptor memory\n");
   2201 			error = ENOMEM;
   2202 			goto err_rx_desc;
   2203 		}
   2204 		rxr->rx_base = (union ixgbe_adv_rx_desc *)rxr->rxdma.dma_vaddr;
   2205 		bzero((void *)rxr->rx_base, rsize);
   2206 
   2207         	/* Allocate receive buffers for the ring*/
   2208 		if (ixv_allocate_receive_buffers(rxr)) {
   2209 			aprint_error_dev(dev,
   2210 			    "Critical Failure setting up receive buffers\n");
   2211 			error = ENOMEM;
   2212 			goto err_rx_desc;
   2213 		}
   2214 	}
   2215 
   2216 	/*
   2217 	** Finally set up the queue holding structs
   2218 	*/
   2219 	for (int i = 0; i < adapter->num_queues; i++) {
   2220 		que = &adapter->queues[i];
   2221 		que->adapter = adapter;
   2222 		que->txr = &adapter->tx_rings[i];
   2223 		que->rxr = &adapter->rx_rings[i];
   2224 	}
   2225 
   2226 	return (0);
   2227 
   2228 err_rx_desc:
   2229 	for (rxr = adapter->rx_rings; rxconf > 0; rxr++, rxconf--)
   2230 		ixv_dma_free(adapter, &rxr->rxdma);
   2231 err_tx_desc:
   2232 	for (txr = adapter->tx_rings; txconf > 0; txr++, txconf--)
   2233 		ixv_dma_free(adapter, &txr->txdma);
   2234 	free(adapter->rx_rings, M_DEVBUF);
   2235 rx_fail:
   2236 	free(adapter->tx_rings, M_DEVBUF);
   2237 tx_fail:
   2238 	free(adapter->queues, M_DEVBUF);
   2239 fail:
   2240 	return (error);
   2241 }
   2242 
   2243 
   2244 /*********************************************************************
   2245  *
   2246  *  Allocate memory for tx_buffer structures. The tx_buffer stores all
   2247  *  the information needed to transmit a packet on the wire. This is
   2248  *  called only once at attach, setup is done every reset.
   2249  *
   2250  **********************************************************************/
   2251 static int
   2252 ixv_allocate_transmit_buffers(struct tx_ring *txr)
   2253 {
   2254 	struct adapter *adapter = txr->adapter;
   2255 	device_t dev = adapter->dev;
   2256 	struct ixv_tx_buf *txbuf;
   2257 	int error, i;
   2258 
   2259 	/*
   2260 	 * Setup DMA descriptor areas.
   2261 	 */
   2262 	if ((error = ixgbe_dma_tag_create(adapter->osdep.dmat,	/* parent */
   2263 			       1, 0,		/* alignment, bounds */
   2264 			       IXV_TSO_SIZE,		/* maxsize */
   2265 			       32,			/* nsegments */
   2266 			       PAGE_SIZE,		/* maxsegsize */
   2267 			       0,			/* flags */
   2268 			       &txr->txtag))) {
   2269 		aprint_error_dev(dev,"Unable to allocate TX DMA tag\n");
   2270 		goto fail;
   2271 	}
   2272 
   2273 	if (!(txr->tx_buffers =
   2274 	    (struct ixv_tx_buf *) malloc(sizeof(struct ixv_tx_buf) *
   2275 	    adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) {
   2276 		aprint_error_dev(dev, "Unable to allocate tx_buffer memory\n");
   2277 		error = ENOMEM;
   2278 		goto fail;
   2279 	}
   2280 
   2281         /* Create the descriptor buffer dma maps */
   2282 	txbuf = txr->tx_buffers;
   2283 	for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
   2284 		error = ixgbe_dmamap_create(txr->txtag, 0, &txbuf->map);
   2285 		if (error != 0) {
   2286 			aprint_error_dev(dev, "Unable to create TX DMA map\n");
   2287 			goto fail;
   2288 		}
   2289 	}
   2290 
   2291 	return 0;
   2292 fail:
   2293 	/* We free all, it handles case where we are in the middle */
   2294 	ixv_free_transmit_structures(adapter);
   2295 	return (error);
   2296 }
   2297 
   2298 /*********************************************************************
   2299  *
   2300  *  Initialize a transmit ring.
   2301  *
   2302  **********************************************************************/
   2303 static void
   2304 ixv_setup_transmit_ring(struct tx_ring *txr)
   2305 {
   2306 	struct adapter *adapter = txr->adapter;
   2307 	struct ixv_tx_buf *txbuf;
   2308 	int i;
   2309 
   2310 	/* Clear the old ring contents */
   2311 	IXV_TX_LOCK(txr);
   2312 	bzero((void *)txr->tx_base,
   2313 	      (sizeof(union ixgbe_adv_tx_desc)) * adapter->num_tx_desc);
   2314 	/* Reset indices */
   2315 	txr->next_avail_desc = 0;
   2316 	txr->next_to_clean = 0;
   2317 
   2318 	/* Free any existing tx buffers. */
   2319         txbuf = txr->tx_buffers;
   2320 	for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
   2321 		if (txbuf->m_head != NULL) {
   2322 			bus_dmamap_sync(txr->txtag->dt_dmat, txbuf->map,
   2323 			    0, txbuf->m_head->m_pkthdr.len,
   2324 			    BUS_DMASYNC_POSTWRITE);
   2325 			ixgbe_dmamap_unload(txr->txtag, txbuf->map);
   2326 			m_freem(txbuf->m_head);
   2327 			txbuf->m_head = NULL;
   2328 		}
   2329 		/* Clear the EOP index */
   2330 		txbuf->eop_index = -1;
   2331         }
   2332 
   2333 	/* Set number of descriptors available */
   2334 	txr->tx_avail = adapter->num_tx_desc;
   2335 
   2336 	ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
   2337 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   2338 	IXV_TX_UNLOCK(txr);
   2339 }
   2340 
   2341 /*********************************************************************
   2342  *
   2343  *  Initialize all transmit rings.
   2344  *
   2345  **********************************************************************/
   2346 static int
   2347 ixv_setup_transmit_structures(struct adapter *adapter)
   2348 {
   2349 	struct tx_ring *txr = adapter->tx_rings;
   2350 
   2351 	for (int i = 0; i < adapter->num_queues; i++, txr++)
   2352 		ixv_setup_transmit_ring(txr);
   2353 
   2354 	return (0);
   2355 }
   2356 
   2357 /*********************************************************************
   2358  *
   2359  *  Enable transmit unit.
   2360  *
   2361  **********************************************************************/
   2362 static void
   2363 ixv_initialize_transmit_units(struct adapter *adapter)
   2364 {
   2365 	struct tx_ring	*txr = adapter->tx_rings;
   2366 	struct ixgbe_hw	*hw = &adapter->hw;
   2367 
   2368 
   2369 	for (int i = 0; i < adapter->num_queues; i++, txr++) {
   2370 		u64	tdba = txr->txdma.dma_paddr;
   2371 		u32	txctrl, txdctl;
   2372 
   2373 		/* Set WTHRESH to 8, burst writeback */
   2374 		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
   2375 		txdctl |= (8 << 16);
   2376 		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
   2377 		/* Now enable */
   2378 		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
   2379 		txdctl |= IXGBE_TXDCTL_ENABLE;
   2380 		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
   2381 
   2382 		/* Set the HW Tx Head and Tail indices */
   2383 	    	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(i), 0);
   2384 	    	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(i), 0);
   2385 
   2386 		/* Setup Transmit Descriptor Cmd Settings */
   2387 		txr->txd_cmd = IXGBE_TXD_CMD_IFCS;
   2388 		txr->watchdog_check = FALSE;
   2389 
   2390 		/* Set Ring parameters */
   2391 		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
   2392 		       (tdba & 0x00000000ffffffffULL));
   2393 		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i), (tdba >> 32));
   2394 		IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
   2395 		    adapter->num_tx_desc *
   2396 		    sizeof(struct ixgbe_legacy_tx_desc));
   2397 		txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(i));
   2398 		txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
   2399 		IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), txctrl);
   2400 		break;
   2401 	}
   2402 
   2403 	return;
   2404 }
   2405 
   2406 /*********************************************************************
   2407  *
   2408  *  Free all transmit rings.
   2409  *
   2410  **********************************************************************/
   2411 static void
   2412 ixv_free_transmit_structures(struct adapter *adapter)
   2413 {
   2414 	struct tx_ring *txr = adapter->tx_rings;
   2415 
   2416 	for (int i = 0; i < adapter->num_queues; i++, txr++) {
   2417 		ixv_free_transmit_buffers(txr);
   2418 		ixv_dma_free(adapter, &txr->txdma);
   2419 		IXV_TX_LOCK_DESTROY(txr);
   2420 	}
   2421 	free(adapter->tx_rings, M_DEVBUF);
   2422 }
   2423 
   2424 /*********************************************************************
   2425  *
   2426  *  Free transmit ring related data structures.
   2427  *
   2428  **********************************************************************/
   2429 static void
   2430 ixv_free_transmit_buffers(struct tx_ring *txr)
   2431 {
   2432 	struct adapter *adapter = txr->adapter;
   2433 	struct ixv_tx_buf *tx_buffer;
   2434 	int             i;
   2435 
   2436 	INIT_DEBUGOUT("free_transmit_ring: begin");
   2437 
   2438 	if (txr->tx_buffers == NULL)
   2439 		return;
   2440 
   2441 	tx_buffer = txr->tx_buffers;
   2442 	for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
   2443 		if (tx_buffer->m_head != NULL) {
   2444 			bus_dmamap_sync(txr->txtag->dt_dmat, tx_buffer->map,
   2445 			    0, tx_buffer->m_head->m_pkthdr.len,
   2446 			    BUS_DMASYNC_POSTWRITE);
   2447 			ixgbe_dmamap_unload(txr->txtag, tx_buffer->map);
   2448 			m_freem(tx_buffer->m_head);
   2449 			tx_buffer->m_head = NULL;
   2450 			if (tx_buffer->map != NULL) {
   2451 				ixgbe_dmamap_destroy(txr->txtag,
   2452 				    tx_buffer->map);
   2453 				tx_buffer->map = NULL;
   2454 			}
   2455 		} else if (tx_buffer->map != NULL) {
   2456 			ixgbe_dmamap_unload(txr->txtag, tx_buffer->map);
   2457 			ixgbe_dmamap_destroy(txr->txtag, tx_buffer->map);
   2458 			tx_buffer->map = NULL;
   2459 		}
   2460 	}
   2461 #if __FreeBSD_version >= 800000
   2462 	if (txr->br != NULL)
   2463 		buf_ring_free(txr->br, M_DEVBUF);
   2464 #endif
   2465 	if (txr->tx_buffers != NULL) {
   2466 		free(txr->tx_buffers, M_DEVBUF);
   2467 		txr->tx_buffers = NULL;
   2468 	}
   2469 	if (txr->txtag != NULL) {
   2470 		ixgbe_dma_tag_destroy(txr->txtag);
   2471 		txr->txtag = NULL;
   2472 	}
   2473 	return;
   2474 }
   2475 
   2476 /*********************************************************************
   2477  *
   2478  *  Advanced Context Descriptor setup for VLAN or L4 CSUM
   2479  *
   2480  **********************************************************************/
   2481 
   2482 static u32
   2483 ixv_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp)
   2484 {
   2485 	struct m_tag *mtag;
   2486 	struct adapter *adapter = txr->adapter;
   2487 	struct ethercom *ec = &adapter->osdep.ec;
   2488 	struct ixgbe_adv_tx_context_desc *TXD;
   2489 	struct ixv_tx_buf        *tx_buffer;
   2490 	u32 olinfo = 0, vlan_macip_lens = 0, type_tucmd_mlhl = 0;
   2491 	struct ether_vlan_header *eh;
   2492 	struct ip ip;
   2493 	struct ip6_hdr ip6;
   2494 	int  ehdrlen, ip_hlen = 0;
   2495 	u16	etype;
   2496 	u8	ipproto = 0;
   2497 	bool	offload;
   2498 	int ctxd = txr->next_avail_desc;
   2499 	u16 vtag = 0;
   2500 
   2501 
   2502 	offload = ((mp->m_pkthdr.csum_flags & M_CSUM_OFFLOAD) != 0);
   2503 
   2504 	tx_buffer = &txr->tx_buffers[ctxd];
   2505 	TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
   2506 
   2507 	/*
   2508 	** In advanced descriptors the vlan tag must
   2509 	** be placed into the descriptor itself.
   2510 	*/
   2511 	if ((mtag = VLAN_OUTPUT_TAG(ec, mp)) != NULL) {
   2512 		vtag = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   2513 		vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
   2514 	} else if (!offload)
   2515 		return 0;
   2516 
   2517 	/*
   2518 	 * Determine where frame payload starts.
   2519 	 * Jump over vlan headers if already present,
   2520 	 * helpful for QinQ too.
   2521 	 */
   2522 	KASSERT(mp->m_len >= offsetof(struct ether_vlan_header, evl_tag));
   2523 	eh = mtod(mp, struct ether_vlan_header *);
   2524 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
   2525 		KASSERT(mp->m_len >= sizeof(struct ether_vlan_header));
   2526 		etype = ntohs(eh->evl_proto);
   2527 		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   2528 	} else {
   2529 		etype = ntohs(eh->evl_encap_proto);
   2530 		ehdrlen = ETHER_HDR_LEN;
   2531 	}
   2532 
   2533 	/* Set the ether header length */
   2534 	vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
   2535 
   2536 	switch (etype) {
   2537 	case ETHERTYPE_IP:
   2538 		m_copydata(mp, ehdrlen, sizeof(ip), &ip);
   2539 		ip_hlen = ip.ip_hl << 2;
   2540 		ipproto = ip.ip_p;
   2541 #if 0
   2542 		ip.ip_sum = 0;
   2543 		m_copyback(mp, ehdrlen, sizeof(ip), &ip);
   2544 #else
   2545 		KASSERT((mp->m_pkthdr.csum_flags & M_CSUM_IPv4) == 0 ||
   2546 		    ip.ip_sum == 0);
   2547 #endif
   2548 		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
   2549 		break;
   2550 	case ETHERTYPE_IPV6:
   2551 		m_copydata(mp, ehdrlen, sizeof(ip6), &ip6);
   2552 		ip_hlen = sizeof(ip6);
   2553 		ipproto = ip6.ip6_nxt;
   2554 		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
   2555 		break;
   2556 	default:
   2557 		break;
   2558 	}
   2559 
   2560 	if ((mp->m_pkthdr.csum_flags & M_CSUM_IPv4) != 0)
   2561 		olinfo |= IXGBE_TXD_POPTS_IXSM << 8;
   2562 
   2563 	vlan_macip_lens |= ip_hlen;
   2564 	type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
   2565 
   2566 	if (mp->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_TCPv6)) {
   2567 		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
   2568 		olinfo |= IXGBE_TXD_POPTS_TXSM << 8;
   2569 		KASSERT(ipproto == IPPROTO_TCP);
   2570 	} else if (mp->m_pkthdr.csum_flags & (M_CSUM_UDPv4|M_CSUM_UDPv6)) {
   2571 		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP;
   2572 		olinfo |= IXGBE_TXD_POPTS_TXSM << 8;
   2573 		KASSERT(ipproto == IPPROTO_UDP);
   2574 	}
   2575 
   2576 	/* Now copy bits into descriptor */
   2577 	TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
   2578 	TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
   2579 	TXD->seqnum_seed = htole32(0);
   2580 	TXD->mss_l4len_idx = htole32(0);
   2581 
   2582 	tx_buffer->m_head = NULL;
   2583 	tx_buffer->eop_index = -1;
   2584 
   2585 	/* We've consumed the first desc, adjust counters */
   2586 	if (++ctxd == adapter->num_tx_desc)
   2587 		ctxd = 0;
   2588 	txr->next_avail_desc = ctxd;
   2589 	--txr->tx_avail;
   2590 
   2591         return olinfo;
   2592 }
   2593 
   2594 /**********************************************************************
   2595  *
   2596  *  Setup work for hardware segmentation offload (TSO) on
   2597  *  adapters using advanced tx descriptors
   2598  *
   2599  **********************************************************************/
   2600 static bool
   2601 ixv_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *paylen)
   2602 {
   2603 	struct m_tag *mtag;
   2604 	struct adapter *adapter = txr->adapter;
   2605 	struct ethercom *ec = &adapter->osdep.ec;
   2606 	struct ixgbe_adv_tx_context_desc *TXD;
   2607 	struct ixv_tx_buf        *tx_buffer;
   2608 	u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
   2609 	u32 mss_l4len_idx = 0;
   2610 	u16 vtag = 0;
   2611 	int ctxd, ehdrlen,  hdrlen, ip_hlen, tcp_hlen;
   2612 	struct ether_vlan_header *eh;
   2613 	struct ip *ip;
   2614 	struct tcphdr *th;
   2615 
   2616 
   2617 	/*
   2618 	 * Determine where frame payload starts.
   2619 	 * Jump over vlan headers if already present
   2620 	 */
   2621 	eh = mtod(mp, struct ether_vlan_header *);
   2622 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN))
   2623 		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   2624 	else
   2625 		ehdrlen = ETHER_HDR_LEN;
   2626 
   2627         /* Ensure we have at least the IP+TCP header in the first mbuf. */
   2628         if (mp->m_len < ehdrlen + sizeof(struct ip) + sizeof(struct tcphdr))
   2629 		return FALSE;
   2630 
   2631 	ctxd = txr->next_avail_desc;
   2632 	tx_buffer = &txr->tx_buffers[ctxd];
   2633 	TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
   2634 
   2635 	ip = (struct ip *)(mp->m_data + ehdrlen);
   2636 	if (ip->ip_p != IPPROTO_TCP)
   2637 		return FALSE;   /* 0 */
   2638 	ip->ip_sum = 0;
   2639 	ip_hlen = ip->ip_hl << 2;
   2640 	th = (struct tcphdr *)((char *)ip + ip_hlen);
   2641 	/* XXX Educated guess: FreeBSD's in_pseudo == NetBSD's in_cksum_phdr */
   2642 	th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   2643 	    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   2644 	tcp_hlen = th->th_off << 2;
   2645 	hdrlen = ehdrlen + ip_hlen + tcp_hlen;
   2646 
   2647 	/* This is used in the transmit desc in encap */
   2648 	*paylen = mp->m_pkthdr.len - hdrlen;
   2649 
   2650 	/* VLAN MACLEN IPLEN */
   2651 	if ((mtag = VLAN_OUTPUT_TAG(ec, mp)) != NULL) {
   2652 		vtag = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   2653                 vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
   2654 	}
   2655 
   2656 	vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
   2657 	vlan_macip_lens |= ip_hlen;
   2658 	TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
   2659 
   2660 	/* ADV DTYPE TUCMD */
   2661 	type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
   2662 	type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
   2663 	type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
   2664 	TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
   2665 
   2666 
   2667 	/* MSS L4LEN IDX */
   2668 	mss_l4len_idx |= (mp->m_pkthdr.segsz << IXGBE_ADVTXD_MSS_SHIFT);
   2669 	mss_l4len_idx |= (tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT);
   2670 	TXD->mss_l4len_idx = htole32(mss_l4len_idx);
   2671 
   2672 	TXD->seqnum_seed = htole32(0);
   2673 	tx_buffer->m_head = NULL;
   2674 	tx_buffer->eop_index = -1;
   2675 
   2676 	if (++ctxd == adapter->num_tx_desc)
   2677 		ctxd = 0;
   2678 
   2679 	txr->tx_avail--;
   2680 	txr->next_avail_desc = ctxd;
   2681 	return TRUE;
   2682 }
   2683 
   2684 
   2685 /**********************************************************************
   2686  *
   2687  *  Examine each tx_buffer in the used queue. If the hardware is done
   2688  *  processing the packet then free associated resources. The
   2689  *  tx_buffer is put back on the free queue.
   2690  *
   2691  **********************************************************************/
   2692 static bool
   2693 ixv_txeof(struct tx_ring *txr)
   2694 {
   2695 	struct adapter	*adapter = txr->adapter;
   2696 	struct ifnet	*ifp = adapter->ifp;
   2697 	u32	first, last, done;
   2698 	struct ixv_tx_buf *tx_buffer;
   2699 	struct ixgbe_legacy_tx_desc *tx_desc, *eop_desc;
   2700 
   2701 	KASSERT(mutex_owned(&txr->tx_mtx));
   2702 
   2703 	if (txr->tx_avail == adapter->num_tx_desc)
   2704 		return false;
   2705 
   2706 	first = txr->next_to_clean;
   2707 	tx_buffer = &txr->tx_buffers[first];
   2708 	/* For cleanup we just use legacy struct */
   2709 	tx_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
   2710 	last = tx_buffer->eop_index;
   2711 	if (last == -1)
   2712 		return false;
   2713 	eop_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
   2714 
   2715 	/*
   2716 	** Get the index of the first descriptor
   2717 	** BEYOND the EOP and call that 'done'.
   2718 	** I do this so the comparison in the
   2719 	** inner while loop below can be simple
   2720 	*/
   2721 	if (++last == adapter->num_tx_desc) last = 0;
   2722 	done = last;
   2723 
   2724         ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
   2725             BUS_DMASYNC_POSTREAD);
   2726 	/*
   2727 	** Only the EOP descriptor of a packet now has the DD
   2728 	** bit set, this is what we look for...
   2729 	*/
   2730 	while (eop_desc->upper.fields.status & IXGBE_TXD_STAT_DD) {
   2731 		/* We clean the range of the packet */
   2732 		while (first != done) {
   2733 			tx_desc->upper.data = 0;
   2734 			tx_desc->lower.data = 0;
   2735 			tx_desc->buffer_addr = 0;
   2736 			++txr->tx_avail;
   2737 
   2738 			if (tx_buffer->m_head) {
   2739 				bus_dmamap_sync(txr->txtag->dt_dmat,
   2740 				    tx_buffer->map,
   2741 				    0, tx_buffer->m_head->m_pkthdr.len,
   2742 				    BUS_DMASYNC_POSTWRITE);
   2743 				ixgbe_dmamap_unload(txr->txtag, tx_buffer->map);
   2744 				m_freem(tx_buffer->m_head);
   2745 				tx_buffer->m_head = NULL;
   2746 				tx_buffer->map = NULL;
   2747 			}
   2748 			tx_buffer->eop_index = -1;
   2749 			getmicrotime(&txr->watchdog_time);
   2750 
   2751 			if (++first == adapter->num_tx_desc)
   2752 				first = 0;
   2753 
   2754 			tx_buffer = &txr->tx_buffers[first];
   2755 			tx_desc =
   2756 			    (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
   2757 		}
   2758 		++ifp->if_opackets;
   2759 		/* See if there is more work now */
   2760 		last = tx_buffer->eop_index;
   2761 		if (last != -1) {
   2762 			eop_desc =
   2763 			    (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
   2764 			/* Get next done point */
   2765 			if (++last == adapter->num_tx_desc) last = 0;
   2766 			done = last;
   2767 		} else
   2768 			break;
   2769 	}
   2770 	ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
   2771 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   2772 
   2773 	txr->next_to_clean = first;
   2774 
   2775 	/*
   2776 	 * If we have enough room, clear IFF_OACTIVE to tell the stack that
   2777 	 * it is OK to send packets. If there are no pending descriptors,
   2778 	 * clear the timeout. Otherwise, if some descriptors have been freed,
   2779 	 * restart the timeout.
   2780 	 */
   2781 	if (txr->tx_avail > IXV_TX_CLEANUP_THRESHOLD) {
   2782 		ifp->if_flags &= ~IFF_OACTIVE;
   2783 		if (txr->tx_avail == adapter->num_tx_desc) {
   2784 			txr->watchdog_check = FALSE;
   2785 			return false;
   2786 		}
   2787 	}
   2788 
   2789 	return true;
   2790 }
   2791 
   2792 /*********************************************************************
   2793  *
   2794  *  Refresh mbuf buffers for RX descriptor rings
   2795  *   - now keeps its own state so discards due to resource
   2796  *     exhaustion are unnecessary, if an mbuf cannot be obtained
   2797  *     it just returns, keeping its placeholder, thus it can simply
   2798  *     be recalled to try again.
   2799  *
   2800  **********************************************************************/
   2801 static void
   2802 ixv_refresh_mbufs(struct rx_ring *rxr, int limit)
   2803 {
   2804 	struct adapter		*adapter = rxr->adapter;
   2805 	struct ixv_rx_buf	*rxbuf;
   2806 	struct mbuf		*mh, *mp;
   2807 	int			i, j, error;
   2808 	bool			refreshed = false;
   2809 
   2810 	i = j = rxr->next_to_refresh;
   2811 	/* Control the loop with one beyond */
   2812 	if (++j == adapter->num_rx_desc)
   2813 		j = 0;
   2814 	while (j != limit) {
   2815 		rxbuf = &rxr->rx_buffers[i];
   2816 		if ((rxbuf->m_head == NULL) && (rxr->hdr_split)) {
   2817 			mh = m_gethdr(M_DONTWAIT, MT_DATA);
   2818 			if (mh == NULL)
   2819 				goto update;
   2820 			mh->m_pkthdr.len = mh->m_len = MHLEN;
   2821 			mh->m_len = MHLEN;
   2822 			mh->m_flags |= M_PKTHDR;
   2823 			m_adj(mh, ETHER_ALIGN);
   2824 			/* Get the memory mapping */
   2825 			error = bus_dmamap_load_mbuf(rxr->htag->dt_dmat,
   2826 			    rxbuf->hmap, mh, BUS_DMA_NOWAIT);
   2827 			if (error != 0) {
   2828 				printf("GET BUF: dmamap load"
   2829 				    " failure - %d\n", error);
   2830 				m_free(mh);
   2831 				goto update;
   2832 			}
   2833 			rxbuf->m_head = mh;
   2834 			ixgbe_dmamap_sync(rxr->htag, rxbuf->hmap,
   2835 			    BUS_DMASYNC_PREREAD);
   2836 			rxr->rx_base[i].read.hdr_addr =
   2837 			    htole64(rxbuf->hmap->dm_segs[0].ds_addr);
   2838 		}
   2839 
   2840 		if (rxbuf->m_pack == NULL) {
   2841 			mp = ixgbe_getjcl(&adapter->jcl_head, M_DONTWAIT,
   2842 			    MT_DATA, M_PKTHDR, adapter->rx_mbuf_sz);
   2843 			if (mp == NULL) {
   2844 				rxr->no_jmbuf.ev_count++;
   2845 				goto update;
   2846 			}
   2847 			mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
   2848 			/* Get the memory mapping */
   2849 			error = bus_dmamap_load_mbuf(rxr->ptag->dt_dmat,
   2850 			    rxbuf->pmap, mp, BUS_DMA_NOWAIT);
   2851 			if (error != 0) {
   2852 				printf("GET BUF: dmamap load"
   2853 				    " failure - %d\n", error);
   2854 				m_free(mp);
   2855 				goto update;
   2856 			}
   2857 			rxbuf->m_pack = mp;
   2858 			bus_dmamap_sync(rxr->ptag->dt_dmat, rxbuf->pmap,
   2859 			    0, mp->m_pkthdr.len, BUS_DMASYNC_PREREAD);
   2860 			rxr->rx_base[i].read.pkt_addr =
   2861 			    htole64(rxbuf->pmap->dm_segs[0].ds_addr);
   2862 		}
   2863 
   2864 		refreshed = true;
   2865 		rxr->next_to_refresh = i = j;
   2866 		/* Calculate next index */
   2867 		if (++j == adapter->num_rx_desc)
   2868 			j = 0;
   2869 	}
   2870 update:
   2871 	if (refreshed) /* If we refreshed some, bump tail */
   2872 		IXGBE_WRITE_REG(&adapter->hw,
   2873 		    IXGBE_VFRDT(rxr->me), rxr->next_to_refresh);
   2874 	return;
   2875 }
   2876 
   2877 /*********************************************************************
   2878  *
   2879  *  Allocate memory for rx_buffer structures. Since we use one
   2880  *  rx_buffer per received packet, the maximum number of rx_buffer's
   2881  *  that we'll need is equal to the number of receive descriptors
   2882  *  that we've allocated.
   2883  *
   2884  **********************************************************************/
   2885 static int
   2886 ixv_allocate_receive_buffers(struct rx_ring *rxr)
   2887 {
   2888 	struct	adapter 	*adapter = rxr->adapter;
   2889 	device_t 		dev = adapter->dev;
   2890 	struct ixv_rx_buf 	*rxbuf;
   2891 	int             	i, bsize, error;
   2892 
   2893 	bsize = sizeof(struct ixv_rx_buf) * adapter->num_rx_desc;
   2894 	if (!(rxr->rx_buffers =
   2895 	    (struct ixv_rx_buf *) malloc(bsize,
   2896 	    M_DEVBUF, M_NOWAIT | M_ZERO))) {
   2897 		aprint_error_dev(dev, "Unable to allocate rx_buffer memory\n");
   2898 		error = ENOMEM;
   2899 		goto fail;
   2900 	}
   2901 
   2902 	if ((error = ixgbe_dma_tag_create(adapter->osdep.dmat,	/* parent */
   2903 				   1, 0,	/* alignment, bounds */
   2904 				   MSIZE,		/* maxsize */
   2905 				   1,			/* nsegments */
   2906 				   MSIZE,		/* maxsegsize */
   2907 				   0,			/* flags */
   2908 				   &rxr->htag))) {
   2909 		aprint_error_dev(dev, "Unable to create RX DMA tag\n");
   2910 		goto fail;
   2911 	}
   2912 
   2913 	if ((error = ixgbe_dma_tag_create(adapter->osdep.dmat,	/* parent */
   2914 				   1, 0,	/* alignment, bounds */
   2915 				   MJUMPAGESIZE,	/* maxsize */
   2916 				   1,			/* nsegments */
   2917 				   MJUMPAGESIZE,	/* maxsegsize */
   2918 				   0,			/* flags */
   2919 				   &rxr->ptag))) {
   2920 		aprint_error_dev(dev, "Unable to create RX DMA tag\n");
   2921 		goto fail;
   2922 	}
   2923 
   2924 	for (i = 0; i < adapter->num_rx_desc; i++, rxbuf++) {
   2925 		rxbuf = &rxr->rx_buffers[i];
   2926 		error = ixgbe_dmamap_create(rxr->htag,
   2927 		    BUS_DMA_NOWAIT, &rxbuf->hmap);
   2928 		if (error) {
   2929 			aprint_error_dev(dev, "Unable to create RX head map\n");
   2930 			goto fail;
   2931 		}
   2932 		error = ixgbe_dmamap_create(rxr->ptag,
   2933 		    BUS_DMA_NOWAIT, &rxbuf->pmap);
   2934 		if (error) {
   2935 			aprint_error_dev(dev, "Unable to create RX pkt map\n");
   2936 			goto fail;
   2937 		}
   2938 	}
   2939 
   2940 	return (0);
   2941 
   2942 fail:
   2943 	/* Frees all, but can handle partial completion */
   2944 	ixv_free_receive_structures(adapter);
   2945 	return (error);
   2946 }
   2947 
   2948 static void
   2949 ixv_free_receive_ring(struct rx_ring *rxr)
   2950 {
   2951 	struct  adapter         *adapter;
   2952 	struct ixv_rx_buf       *rxbuf;
   2953 	int i;
   2954 
   2955 	adapter = rxr->adapter;
   2956 	for (i = 0; i < adapter->num_rx_desc; i++) {
   2957 		rxbuf = &rxr->rx_buffers[i];
   2958 		if (rxbuf->m_head != NULL) {
   2959 			ixgbe_dmamap_sync(rxr->htag, rxbuf->hmap,
   2960 			    BUS_DMASYNC_POSTREAD);
   2961 			ixgbe_dmamap_unload(rxr->htag, rxbuf->hmap);
   2962 			rxbuf->m_head->m_flags |= M_PKTHDR;
   2963 			m_freem(rxbuf->m_head);
   2964 		}
   2965 		if (rxbuf->m_pack != NULL) {
   2966 			/* XXX not ixgbe_ ? */
   2967 			bus_dmamap_sync(rxr->ptag->dt_dmat, rxbuf->pmap,
   2968 			    0, rxbuf->m_pack->m_pkthdr.len,
   2969 			    BUS_DMASYNC_POSTREAD);
   2970 			ixgbe_dmamap_unload(rxr->ptag, rxbuf->pmap);
   2971 			rxbuf->m_pack->m_flags |= M_PKTHDR;
   2972 			m_freem(rxbuf->m_pack);
   2973 		}
   2974 		rxbuf->m_head = NULL;
   2975 		rxbuf->m_pack = NULL;
   2976 	}
   2977 }
   2978 
   2979 
   2980 /*********************************************************************
   2981  *
   2982  *  Initialize a receive ring and its buffers.
   2983  *
   2984  **********************************************************************/
   2985 static int
   2986 ixv_setup_receive_ring(struct rx_ring *rxr)
   2987 {
   2988 	struct	adapter 	*adapter;
   2989 	struct ixv_rx_buf	*rxbuf;
   2990 #ifdef LRO
   2991 	struct ifnet		*ifp;
   2992 	struct lro_ctrl		*lro = &rxr->lro;
   2993 #endif /* LRO */
   2994 	int			rsize, error = 0;
   2995 
   2996 	adapter = rxr->adapter;
   2997 #ifdef LRO
   2998 	ifp = adapter->ifp;
   2999 #endif /* LRO */
   3000 
   3001 	/* Clear the ring contents */
   3002 	IXV_RX_LOCK(rxr);
   3003 	rsize = roundup2(adapter->num_rx_desc *
   3004 	    sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
   3005 	bzero((void *)rxr->rx_base, rsize);
   3006 
   3007 	/* Free current RX buffer structs and their mbufs */
   3008 	ixv_free_receive_ring(rxr);
   3009 
   3010 	IXV_RX_UNLOCK(rxr);
   3011 
   3012 	/* Now reinitialize our supply of jumbo mbufs.  The number
   3013 	 * or size of jumbo mbufs may have changed.
   3014 	 */
   3015 	ixgbe_jcl_reinit(&adapter->jcl_head, rxr->ptag->dt_dmat,
   3016 	    2 * adapter->num_rx_desc, adapter->rx_mbuf_sz);
   3017 
   3018 	IXV_RX_LOCK(rxr);
   3019 
   3020 	/* Configure header split? */
   3021 	if (ixv_header_split)
   3022 		rxr->hdr_split = TRUE;
   3023 
   3024 	/* Now replenish the mbufs */
   3025 	for (int j = 0; j != adapter->num_rx_desc; ++j) {
   3026 		struct mbuf	*mh, *mp;
   3027 
   3028 		rxbuf = &rxr->rx_buffers[j];
   3029 		/*
   3030 		** Dont allocate mbufs if not
   3031 		** doing header split, its wasteful
   3032 		*/
   3033 		if (rxr->hdr_split == FALSE)
   3034 			goto skip_head;
   3035 
   3036 		/* First the header */
   3037 		rxbuf->m_head = m_gethdr(M_DONTWAIT, MT_DATA);
   3038 		if (rxbuf->m_head == NULL) {
   3039 			error = ENOBUFS;
   3040 			goto fail;
   3041 		}
   3042 		m_adj(rxbuf->m_head, ETHER_ALIGN);
   3043 		mh = rxbuf->m_head;
   3044 		mh->m_len = mh->m_pkthdr.len = MHLEN;
   3045 		mh->m_flags |= M_PKTHDR;
   3046 		/* Get the memory mapping */
   3047 		error = bus_dmamap_load_mbuf(rxr->htag->dt_dmat,
   3048 		    rxbuf->hmap, rxbuf->m_head, BUS_DMA_NOWAIT);
   3049 		if (error != 0) /* Nothing elegant to do here */
   3050 			goto fail;
   3051 		bus_dmamap_sync(rxr->htag->dt_dmat, rxbuf->hmap,
   3052 		    0, mh->m_pkthdr.len, BUS_DMASYNC_PREREAD);
   3053 		/* Update descriptor */
   3054 		rxr->rx_base[j].read.hdr_addr =
   3055 		    htole64(rxbuf->hmap->dm_segs[0].ds_addr);
   3056 
   3057 skip_head:
   3058 		/* Now the payload cluster */
   3059 		rxbuf->m_pack = ixgbe_getjcl(&adapter->jcl_head, M_DONTWAIT,
   3060 		    MT_DATA, M_PKTHDR, adapter->rx_mbuf_sz);
   3061 		if (rxbuf->m_pack == NULL) {
   3062 			error = ENOBUFS;
   3063                         goto fail;
   3064 		}
   3065 		mp = rxbuf->m_pack;
   3066 		mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
   3067 		/* Get the memory mapping */
   3068 		error = bus_dmamap_load_mbuf(rxr->ptag->dt_dmat,
   3069 		    rxbuf->pmap, mp, BUS_DMA_NOWAIT);
   3070 		if (error != 0)
   3071                         goto fail;
   3072 		bus_dmamap_sync(rxr->ptag->dt_dmat, rxbuf->pmap,
   3073 		    0, adapter->rx_mbuf_sz, BUS_DMASYNC_PREREAD);
   3074 		/* Update descriptor */
   3075 		rxr->rx_base[j].read.pkt_addr =
   3076 		    htole64(rxbuf->pmap->dm_segs[0].ds_addr);
   3077 	}
   3078 
   3079 
   3080 	/* Setup our descriptor indices */
   3081 	rxr->next_to_check = 0;
   3082 	rxr->next_to_refresh = 0;
   3083 	rxr->lro_enabled = FALSE;
   3084 	rxr->rx_split_packets.ev_count = 0;
   3085 	rxr->rx_bytes.ev_count = 0;
   3086 
   3087 	ixgbe_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
   3088 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   3089 
   3090 #ifdef LRO
   3091 	/*
   3092 	** Now set up the LRO interface:
   3093 	*/
   3094 	if (ifp->if_capenable & IFCAP_LRO) {
   3095 		device_t dev = adapter->dev;
   3096 		int err = tcp_lro_init(lro);
   3097 		if (err) {
   3098 			device_printf(dev, "LRO Initialization failed!\n");
   3099 			goto fail;
   3100 		}
   3101 		INIT_DEBUGOUT("RX Soft LRO Initialized\n");
   3102 		rxr->lro_enabled = TRUE;
   3103 		lro->ifp = adapter->ifp;
   3104 	}
   3105 #endif /* LRO */
   3106 
   3107 	IXV_RX_UNLOCK(rxr);
   3108 	return (0);
   3109 
   3110 fail:
   3111 	ixv_free_receive_ring(rxr);
   3112 	IXV_RX_UNLOCK(rxr);
   3113 	return (error);
   3114 }
   3115 
   3116 /*********************************************************************
   3117  *
   3118  *  Initialize all receive rings.
   3119  *
   3120  **********************************************************************/
   3121 static int
   3122 ixv_setup_receive_structures(struct adapter *adapter)
   3123 {
   3124 	struct rx_ring *rxr = adapter->rx_rings;
   3125 	int j;
   3126 
   3127 	for (j = 0; j < adapter->num_queues; j++, rxr++)
   3128 		if (ixv_setup_receive_ring(rxr))
   3129 			goto fail;
   3130 
   3131 	return (0);
   3132 fail:
   3133 	/*
   3134 	 * Free RX buffers allocated so far, we will only handle
   3135 	 * the rings that completed, the failing case will have
   3136 	 * cleaned up for itself. 'j' failed, so its the terminus.
   3137 	 */
   3138 	for (int i = 0; i < j; ++i) {
   3139 		rxr = &adapter->rx_rings[i];
   3140 		ixv_free_receive_ring(rxr);
   3141 	}
   3142 
   3143 	return (ENOBUFS);
   3144 }
   3145 
   3146 /*********************************************************************
   3147  *
   3148  *  Setup receive registers and features.
   3149  *
   3150  **********************************************************************/
   3151 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
   3152 
   3153 static void
   3154 ixv_initialize_receive_units(struct adapter *adapter)
   3155 {
   3156 	int i;
   3157 	struct	rx_ring	*rxr = adapter->rx_rings;
   3158 	struct ixgbe_hw	*hw = &adapter->hw;
   3159 	struct ifnet   *ifp = adapter->ifp;
   3160 	u32		bufsz, fctrl, rxcsum, hlreg;
   3161 
   3162 
   3163 	/* Enable broadcasts */
   3164 	fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
   3165 	fctrl |= IXGBE_FCTRL_BAM;
   3166 	fctrl |= IXGBE_FCTRL_DPF;
   3167 	fctrl |= IXGBE_FCTRL_PMCF;
   3168 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
   3169 
   3170 	/* Set for Jumbo Frames? */
   3171 	hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
   3172 	if (ifp->if_mtu > ETHERMTU) {
   3173 		hlreg |= IXGBE_HLREG0_JUMBOEN;
   3174 		bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
   3175 	} else {
   3176 		hlreg &= ~IXGBE_HLREG0_JUMBOEN;
   3177 		bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
   3178 	}
   3179 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
   3180 
   3181 	for (i = 0; i < adapter->num_queues; i++, rxr++) {
   3182 		u64 rdba = rxr->rxdma.dma_paddr;
   3183 		u32 reg, rxdctl;
   3184 
   3185 		/* Do the queue enabling first */
   3186 		rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
   3187 		rxdctl |= IXGBE_RXDCTL_ENABLE;
   3188 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
   3189 		for (int k = 0; k < 10; k++) {
   3190 			if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
   3191 			    IXGBE_RXDCTL_ENABLE)
   3192 				break;
   3193 			else
   3194 				msec_delay(1);
   3195 		}
   3196 		wmb();
   3197 
   3198 		/* Setup the Base and Length of the Rx Descriptor Ring */
   3199 		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
   3200 		    (rdba & 0x00000000ffffffffULL));
   3201 		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i),
   3202 		    (rdba >> 32));
   3203 		IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
   3204 		    adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
   3205 
   3206 		/* Set up the SRRCTL register */
   3207 		reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
   3208 		reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
   3209 		reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
   3210 		reg |= bufsz;
   3211 		if (rxr->hdr_split) {
   3212 			/* Use a standard mbuf for the header */
   3213 			reg |= ((IXV_RX_HDR <<
   3214 			    IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT)
   3215 			    & IXGBE_SRRCTL_BSIZEHDR_MASK);
   3216 			reg |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
   3217 		} else
   3218 			reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
   3219 		IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), reg);
   3220 
   3221 		/* Setup the HW Rx Head and Tail Descriptor Pointers */
   3222 		IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
   3223 		IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
   3224 		    adapter->num_rx_desc - 1);
   3225 	}
   3226 
   3227 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
   3228 
   3229 	if (ifp->if_capenable & IFCAP_RXCSUM)
   3230 		rxcsum |= IXGBE_RXCSUM_PCSD;
   3231 
   3232 	if (!(rxcsum & IXGBE_RXCSUM_PCSD))
   3233 		rxcsum |= IXGBE_RXCSUM_IPPCSE;
   3234 
   3235 	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
   3236 
   3237 	return;
   3238 }
   3239 
   3240 /*********************************************************************
   3241  *
   3242  *  Free all receive rings.
   3243  *
   3244  **********************************************************************/
   3245 static void
   3246 ixv_free_receive_structures(struct adapter *adapter)
   3247 {
   3248 	struct rx_ring *rxr = adapter->rx_rings;
   3249 
   3250 	for (int i = 0; i < adapter->num_queues; i++, rxr++) {
   3251 #ifdef LRO
   3252 		struct lro_ctrl		*lro = &rxr->lro;
   3253 #endif /* LRO */
   3254 		ixv_free_receive_buffers(rxr);
   3255 #ifdef LRO
   3256 		/* Free LRO memory */
   3257 		tcp_lro_free(lro);
   3258 #endif /* LRO */
   3259 		/* Free the ring memory as well */
   3260 		ixv_dma_free(adapter, &rxr->rxdma);
   3261 		IXV_RX_LOCK_DESTROY(rxr);
   3262 	}
   3263 
   3264 	free(adapter->rx_rings, M_DEVBUF);
   3265 }
   3266 
   3267 
   3268 /*********************************************************************
   3269  *
   3270  *  Free receive ring data structures
   3271  *
   3272  **********************************************************************/
   3273 static void
   3274 ixv_free_receive_buffers(struct rx_ring *rxr)
   3275 {
   3276 	struct adapter		*adapter = rxr->adapter;
   3277 	struct ixv_rx_buf	*rxbuf;
   3278 
   3279 	INIT_DEBUGOUT("free_receive_structures: begin");
   3280 
   3281 	/* Cleanup any existing buffers */
   3282 	if (rxr->rx_buffers != NULL) {
   3283 		for (int i = 0; i < adapter->num_rx_desc; i++) {
   3284 			rxbuf = &rxr->rx_buffers[i];
   3285 			if (rxbuf->m_head != NULL) {
   3286 				ixgbe_dmamap_sync(rxr->htag, rxbuf->hmap,
   3287 				    BUS_DMASYNC_POSTREAD);
   3288 				ixgbe_dmamap_unload(rxr->htag, rxbuf->hmap);
   3289 				rxbuf->m_head->m_flags |= M_PKTHDR;
   3290 				m_freem(rxbuf->m_head);
   3291 			}
   3292 			if (rxbuf->m_pack != NULL) {
   3293 				/* XXX not ixgbe_* ? */
   3294 				bus_dmamap_sync(rxr->ptag->dt_dmat, rxbuf->pmap,
   3295 				    0, rxbuf->m_pack->m_pkthdr.len,
   3296 				    BUS_DMASYNC_POSTREAD);
   3297 				ixgbe_dmamap_unload(rxr->ptag, rxbuf->pmap);
   3298 				rxbuf->m_pack->m_flags |= M_PKTHDR;
   3299 				m_freem(rxbuf->m_pack);
   3300 			}
   3301 			rxbuf->m_head = NULL;
   3302 			rxbuf->m_pack = NULL;
   3303 			if (rxbuf->hmap != NULL) {
   3304 				ixgbe_dmamap_destroy(rxr->htag, rxbuf->hmap);
   3305 				rxbuf->hmap = NULL;
   3306 			}
   3307 			if (rxbuf->pmap != NULL) {
   3308 				ixgbe_dmamap_destroy(rxr->ptag, rxbuf->pmap);
   3309 				rxbuf->pmap = NULL;
   3310 			}
   3311 		}
   3312 		if (rxr->rx_buffers != NULL) {
   3313 			free(rxr->rx_buffers, M_DEVBUF);
   3314 			rxr->rx_buffers = NULL;
   3315 		}
   3316 	}
   3317 
   3318 	if (rxr->htag != NULL) {
   3319 		ixgbe_dma_tag_destroy(rxr->htag);
   3320 		rxr->htag = NULL;
   3321 	}
   3322 	if (rxr->ptag != NULL) {
   3323 		ixgbe_dma_tag_destroy(rxr->ptag);
   3324 		rxr->ptag = NULL;
   3325 	}
   3326 
   3327 	return;
   3328 }
   3329 
   3330 static __inline void
   3331 ixv_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m, u32 ptype)
   3332 {
   3333 	int s;
   3334 
   3335 #ifdef LRO
   3336 	struct adapter	*adapter = ifp->if_softc;
   3337 	struct ethercom *ec = &adapter->osdep.ec;
   3338 
   3339         /*
   3340          * ATM LRO is only for IPv4/TCP packets and TCP checksum of the packet
   3341          * should be computed by hardware. Also it should not have VLAN tag in
   3342          * ethernet header.
   3343          */
   3344         if (rxr->lro_enabled &&
   3345             (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING) != 0 &&
   3346             (ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
   3347             (ptype & (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP)) ==
   3348             (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP) &&
   3349             (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) ==
   3350             (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) {
   3351                 /*
   3352                  * Send to the stack if:
   3353                  **  - LRO not enabled, or
   3354                  **  - no LRO resources, or
   3355                  **  - lro enqueue fails
   3356                  */
   3357                 if (rxr->lro.lro_cnt != 0)
   3358                         if (tcp_lro_rx(&rxr->lro, m, 0) == 0)
   3359                                 return;
   3360         }
   3361 #endif /* LRO */
   3362 
   3363 	IXV_RX_UNLOCK(rxr);
   3364 
   3365 	s = splnet();
   3366 	/* Pass this up to any BPF listeners. */
   3367 	bpf_mtap(ifp, m);
   3368         (*ifp->if_input)(ifp, m);
   3369 	splx(s);
   3370 
   3371 	IXV_RX_LOCK(rxr);
   3372 }
   3373 
   3374 static __inline void
   3375 ixv_rx_discard(struct rx_ring *rxr, int i)
   3376 {
   3377 	struct adapter		*adapter = rxr->adapter;
   3378 	struct ixv_rx_buf	*rbuf;
   3379 	struct mbuf		*mh, *mp;
   3380 
   3381 	rbuf = &rxr->rx_buffers[i];
   3382         if (rbuf->fmp != NULL) /* Partial chain ? */
   3383                 m_freem(rbuf->fmp);
   3384 
   3385 	mh = rbuf->m_head;
   3386 	mp = rbuf->m_pack;
   3387 
   3388 	/* Reuse loaded DMA map and just update mbuf chain */
   3389 	mh->m_len = MHLEN;
   3390 	mh->m_flags |= M_PKTHDR;
   3391 	mh->m_next = NULL;
   3392 
   3393 	mp->m_len = mp->m_pkthdr.len = adapter->rx_mbuf_sz;
   3394 	mp->m_data = mp->m_ext.ext_buf;
   3395 	mp->m_next = NULL;
   3396 	return;
   3397 }
   3398 
   3399 
   3400 /*********************************************************************
   3401  *
   3402  *  This routine executes in interrupt context. It replenishes
   3403  *  the mbufs in the descriptor and sends data which has been
   3404  *  dma'ed into host memory to upper layer.
   3405  *
   3406  *  We loop at most count times if count is > 0, or until done if
   3407  *  count < 0.
   3408  *
   3409  *  Return TRUE for more work, FALSE for all clean.
   3410  *********************************************************************/
   3411 static bool
   3412 ixv_rxeof(struct ix_queue *que, int count)
   3413 {
   3414 	struct adapter		*adapter = que->adapter;
   3415 	struct rx_ring		*rxr = que->rxr;
   3416 	struct ifnet		*ifp = adapter->ifp;
   3417 #ifdef LRO
   3418 	struct lro_ctrl		*lro = &rxr->lro;
   3419 	struct lro_entry	*queued;
   3420 #endif /* LRO */
   3421 	int			i, nextp, processed = 0;
   3422 	u32			staterr = 0;
   3423 	union ixgbe_adv_rx_desc	*cur;
   3424 	struct ixv_rx_buf	*rbuf, *nbuf;
   3425 
   3426 	IXV_RX_LOCK(rxr);
   3427 
   3428 	for (i = rxr->next_to_check; count != 0;) {
   3429 		struct mbuf	*sendmp, *mh, *mp;
   3430 		u32		ptype;
   3431 		u16		hlen, plen, hdr, vtag;
   3432 		bool		eop;
   3433 
   3434 		/* Sync the ring. */
   3435 		ixgbe_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
   3436 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   3437 
   3438 		cur = &rxr->rx_base[i];
   3439 		staterr = le32toh(cur->wb.upper.status_error);
   3440 
   3441 		if ((staterr & IXGBE_RXD_STAT_DD) == 0)
   3442 			break;
   3443 		if ((ifp->if_flags & IFF_RUNNING) == 0)
   3444 			break;
   3445 
   3446 		count--;
   3447 		sendmp = NULL;
   3448 		nbuf = NULL;
   3449 		cur->wb.upper.status_error = 0;
   3450 		rbuf = &rxr->rx_buffers[i];
   3451 		mh = rbuf->m_head;
   3452 		mp = rbuf->m_pack;
   3453 
   3454 		plen = le16toh(cur->wb.upper.length);
   3455 		ptype = le32toh(cur->wb.lower.lo_dword.data) &
   3456 		    IXGBE_RXDADV_PKTTYPE_MASK;
   3457 		hdr = le16toh(cur->wb.lower.lo_dword.hs_rss.hdr_info);
   3458 		vtag = le16toh(cur->wb.upper.vlan);
   3459 		eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0);
   3460 
   3461 		/* Make sure all parts of a bad packet are discarded */
   3462 		if (((staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) != 0) ||
   3463 		    (rxr->discard)) {
   3464 			ifp->if_ierrors++;
   3465 			rxr->rx_discarded.ev_count++;
   3466 			if (!eop)
   3467 				rxr->discard = TRUE;
   3468 			else
   3469 				rxr->discard = FALSE;
   3470 			ixv_rx_discard(rxr, i);
   3471 			goto next_desc;
   3472 		}
   3473 
   3474 		if (!eop) {
   3475 			nextp = i + 1;
   3476 			if (nextp == adapter->num_rx_desc)
   3477 				nextp = 0;
   3478 			nbuf = &rxr->rx_buffers[nextp];
   3479 			prefetch(nbuf);
   3480 		}
   3481 		/*
   3482 		** The header mbuf is ONLY used when header
   3483 		** split is enabled, otherwise we get normal
   3484 		** behavior, ie, both header and payload
   3485 		** are DMA'd into the payload buffer.
   3486 		**
   3487 		** Rather than using the fmp/lmp global pointers
   3488 		** we now keep the head of a packet chain in the
   3489 		** buffer struct and pass this along from one
   3490 		** descriptor to the next, until we get EOP.
   3491 		*/
   3492 		if (rxr->hdr_split && (rbuf->fmp == NULL)) {
   3493 			/* This must be an initial descriptor */
   3494 			hlen = (hdr & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
   3495 			    IXGBE_RXDADV_HDRBUFLEN_SHIFT;
   3496 			if (hlen > IXV_RX_HDR)
   3497 				hlen = IXV_RX_HDR;
   3498 			mh->m_len = hlen;
   3499 			mh->m_flags |= M_PKTHDR;
   3500 			mh->m_next = NULL;
   3501 			mh->m_pkthdr.len = mh->m_len;
   3502 			/* Null buf pointer so it is refreshed */
   3503 			rbuf->m_head = NULL;
   3504 			/*
   3505 			** Check the payload length, this
   3506 			** could be zero if its a small
   3507 			** packet.
   3508 			*/
   3509 			if (plen > 0) {
   3510 				mp->m_len = plen;
   3511 				mp->m_next = NULL;
   3512 				mp->m_flags &= ~M_PKTHDR;
   3513 				mh->m_next = mp;
   3514 				mh->m_pkthdr.len += mp->m_len;
   3515 				/* Null buf pointer so it is refreshed */
   3516 				rbuf->m_pack = NULL;
   3517 				rxr->rx_split_packets.ev_count++;
   3518 			}
   3519 			/*
   3520 			** Now create the forward
   3521 			** chain so when complete
   3522 			** we wont have to.
   3523 			*/
   3524                         if (eop == 0) {
   3525 				/* stash the chain head */
   3526                                 nbuf->fmp = mh;
   3527 				/* Make forward chain */
   3528                                 if (plen)
   3529                                         mp->m_next = nbuf->m_pack;
   3530                                 else
   3531                                         mh->m_next = nbuf->m_pack;
   3532                         } else {
   3533 				/* Singlet, prepare to send */
   3534                                 sendmp = mh;
   3535                                 if (VLAN_ATTACHED(&adapter->osdep.ec) &&
   3536 				  (staterr & IXGBE_RXD_STAT_VP)) {
   3537 					VLAN_INPUT_TAG(ifp, sendmp, vtag,
   3538 					    printf("%s: could not apply VLAN "
   3539 					        "tag", __func__));
   3540                                 }
   3541                         }
   3542 		} else {
   3543 			/*
   3544 			** Either no header split, or a
   3545 			** secondary piece of a fragmented
   3546 			** split packet.
   3547 			*/
   3548 			mp->m_len = plen;
   3549 			/*
   3550 			** See if there is a stored head
   3551 			** that determines what we are
   3552 			*/
   3553 			sendmp = rbuf->fmp;
   3554 			rbuf->m_pack = rbuf->fmp = NULL;
   3555 
   3556 			if (sendmp != NULL) /* secondary frag */
   3557 				sendmp->m_pkthdr.len += mp->m_len;
   3558 			else {
   3559 				/* first desc of a non-ps chain */
   3560 				sendmp = mp;
   3561 				sendmp->m_flags |= M_PKTHDR;
   3562 				sendmp->m_pkthdr.len = mp->m_len;
   3563 				if (staterr & IXGBE_RXD_STAT_VP) {
   3564 					/* XXX Do something reasonable on
   3565 					 * error.
   3566 					 */
   3567 					VLAN_INPUT_TAG(ifp, sendmp, vtag,
   3568 					    printf("%s: could not apply VLAN "
   3569 					        "tag", __func__));
   3570 				}
   3571                         }
   3572 			/* Pass the head pointer on */
   3573 			if (eop == 0) {
   3574 				nbuf->fmp = sendmp;
   3575 				sendmp = NULL;
   3576 				mp->m_next = nbuf->m_pack;
   3577 			}
   3578 		}
   3579 		++processed;
   3580 		/* Sending this frame? */
   3581 		if (eop) {
   3582 			sendmp->m_pkthdr.rcvif = ifp;
   3583 			ifp->if_ipackets++;
   3584 			rxr->rx_packets.ev_count++;
   3585 			/* capture data for AIM */
   3586 			rxr->bytes += sendmp->m_pkthdr.len;
   3587 			rxr->rx_bytes.ev_count += sendmp->m_pkthdr.len;
   3588 			if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
   3589 				ixv_rx_checksum(staterr, sendmp, ptype,
   3590 				   &adapter->stats);
   3591 			}
   3592 #if __FreeBSD_version >= 800000
   3593 			sendmp->m_pkthdr.flowid = que->msix;
   3594 			sendmp->m_flags |= M_FLOWID;
   3595 #endif
   3596 		}
   3597 next_desc:
   3598 		ixgbe_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
   3599 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   3600 
   3601 		/* Advance our pointers to the next descriptor. */
   3602 		if (++i == adapter->num_rx_desc)
   3603 			i = 0;
   3604 
   3605 		/* Now send to the stack or do LRO */
   3606 		if (sendmp != NULL)
   3607 			ixv_rx_input(rxr, ifp, sendmp, ptype);
   3608 
   3609                /* Every 8 descriptors we go to refresh mbufs */
   3610 		if (processed == 8) {
   3611 			ixv_refresh_mbufs(rxr, i);
   3612 			processed = 0;
   3613 		}
   3614 	}
   3615 
   3616 	/* Refresh any remaining buf structs */
   3617 	if (processed != 0) {
   3618 		ixv_refresh_mbufs(rxr, i);
   3619 		processed = 0;
   3620 	}
   3621 
   3622 	rxr->next_to_check = i;
   3623 
   3624 #ifdef LRO
   3625 	/*
   3626 	 * Flush any outstanding LRO work
   3627 	 */
   3628 	while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
   3629 		SLIST_REMOVE_HEAD(&lro->lro_active, next);
   3630 		tcp_lro_flush(lro, queued);
   3631 	}
   3632 #endif /* LRO */
   3633 
   3634 	IXV_RX_UNLOCK(rxr);
   3635 
   3636 	/*
   3637 	** We still have cleaning to do?
   3638 	** Schedule another interrupt if so.
   3639 	*/
   3640 	if ((staterr & IXGBE_RXD_STAT_DD) != 0) {
   3641 		ixv_rearm_queues(adapter, (u64)(1ULL << que->msix));
   3642 		return true;
   3643 	}
   3644 
   3645 	return false;
   3646 }
   3647 
   3648 
   3649 /*********************************************************************
   3650  *
   3651  *  Verify that the hardware indicated that the checksum is valid.
   3652  *  Inform the stack about the status of checksum so that stack
   3653  *  doesn't spend time verifying the checksum.
   3654  *
   3655  *********************************************************************/
   3656 static void
   3657 ixv_rx_checksum(u32 staterr, struct mbuf * mp, u32 ptype,
   3658     struct ixgbevf_hw_stats *stats)
   3659 {
   3660 	u16	status = (u16) staterr;
   3661 	u8	errors = (u8) (staterr >> 24);
   3662 #if 0
   3663 	bool	sctp = FALSE;
   3664 	if ((ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
   3665 	    (ptype & IXGBE_RXDADV_PKTTYPE_SCTP) != 0)
   3666 		sctp = TRUE;
   3667 #endif
   3668 	if (status & IXGBE_RXD_STAT_IPCS) {
   3669 		stats->ipcs.ev_count++;
   3670 		if (!(errors & IXGBE_RXD_ERR_IPE)) {
   3671 			/* IP Checksum Good */
   3672 			mp->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   3673 
   3674 		} else {
   3675 			stats->ipcs_bad.ev_count++;
   3676 			mp->m_pkthdr.csum_flags = M_CSUM_IPv4|M_CSUM_IPv4_BAD;
   3677 		}
   3678 	}
   3679 	if (status & IXGBE_RXD_STAT_L4CS) {
   3680 		stats->l4cs.ev_count++;
   3681 		u16 type = M_CSUM_TCPv4|M_CSUM_TCPv6|M_CSUM_UDPv4|M_CSUM_UDPv6;
   3682 		if (!(errors & IXGBE_RXD_ERR_TCPE)) {
   3683 			mp->m_pkthdr.csum_flags |= type;
   3684 		} else {
   3685 			stats->l4cs_bad.ev_count++;
   3686 			mp->m_pkthdr.csum_flags |= type | M_CSUM_TCP_UDP_BAD;
   3687 		}
   3688 	}
   3689 	return;
   3690 }
   3691 
   3692 static void
   3693 ixv_setup_vlan_support(struct adapter *adapter)
   3694 {
   3695 	struct ixgbe_hw *hw = &adapter->hw;
   3696 	u32		ctrl, vid, vfta, retry;
   3697 
   3698 
   3699 	/*
   3700 	** We get here thru init_locked, meaning
   3701 	** a soft reset, this has already cleared
   3702 	** the VFTA and other state, so if there
   3703 	** have been no vlan's registered do nothing.
   3704 	*/
   3705 	if (adapter->num_vlans == 0)
   3706 		return;
   3707 
   3708 	/* Enable the queues */
   3709 	for (int i = 0; i < adapter->num_queues; i++) {
   3710 		ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
   3711 		ctrl |= IXGBE_RXDCTL_VME;
   3712 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl);
   3713 	}
   3714 
   3715 	/*
   3716 	** A soft reset zero's out the VFTA, so
   3717 	** we need to repopulate it now.
   3718 	*/
   3719 	for (int i = 0; i < VFTA_SIZE; i++) {
   3720 		if (ixv_shadow_vfta[i] == 0)
   3721 			continue;
   3722 		vfta = ixv_shadow_vfta[i];
   3723 		/*
   3724 		** Reconstruct the vlan id's
   3725 		** based on the bits set in each
   3726 		** of the array ints.
   3727 		*/
   3728 		for ( int j = 0; j < 32; j++) {
   3729 			retry = 0;
   3730 			if ((vfta & (1 << j)) == 0)
   3731 				continue;
   3732 			vid = (i * 32) + j;
   3733 			/* Call the shared code mailbox routine */
   3734 			while (ixgbe_set_vfta(hw, vid, 0, TRUE)) {
   3735 				if (++retry > 5)
   3736 					break;
   3737 			}
   3738 		}
   3739 	}
   3740 }
   3741 
   3742 #if 0	/* XXX Badly need to overhaul vlan(4) on NetBSD. */
   3743 /*
   3744 ** This routine is run via an vlan config EVENT,
   3745 ** it enables us to use the HW Filter table since
   3746 ** we can get the vlan id. This just creates the
   3747 ** entry in the soft version of the VFTA, init will
   3748 ** repopulate the real table.
   3749 */
   3750 static void
   3751 ixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   3752 {
   3753 	struct adapter	*adapter = ifp->if_softc;
   3754 	u16		index, bit;
   3755 
   3756 	if (ifp->if_softc !=  arg)   /* Not our event */
   3757 		return;
   3758 
   3759 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
   3760 		return;
   3761 
   3762 	index = (vtag >> 5) & 0x7F;
   3763 	bit = vtag & 0x1F;
   3764 	ixv_shadow_vfta[index] |= (1 << bit);
   3765 	/* Re-init to load the changes */
   3766 	ixv_init(adapter);
   3767 }
   3768 
   3769 /*
   3770 ** This routine is run via an vlan
   3771 ** unconfig EVENT, remove our entry
   3772 ** in the soft vfta.
   3773 */
   3774 static void
   3775 ixv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   3776 {
   3777 	struct adapter	*adapter = ifp->if_softc;
   3778 	u16		index, bit;
   3779 
   3780 	if (ifp->if_softc !=  arg)
   3781 		return;
   3782 
   3783 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
   3784 		return;
   3785 
   3786 	index = (vtag >> 5) & 0x7F;
   3787 	bit = vtag & 0x1F;
   3788 	ixv_shadow_vfta[index] &= ~(1 << bit);
   3789 	/* Re-init to load the changes */
   3790 	ixv_init(adapter);
   3791 }
   3792 #endif
   3793 
   3794 static void
   3795 ixv_enable_intr(struct adapter *adapter)
   3796 {
   3797 	struct ixgbe_hw *hw = &adapter->hw;
   3798 	struct ix_queue *que = adapter->queues;
   3799 	u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
   3800 
   3801 
   3802 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
   3803 
   3804 	mask = IXGBE_EIMS_ENABLE_MASK;
   3805 	mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
   3806 	IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
   3807 
   3808         for (int i = 0; i < adapter->num_queues; i++, que++)
   3809 		ixv_enable_queue(adapter, que->msix);
   3810 
   3811 	IXGBE_WRITE_FLUSH(hw);
   3812 
   3813 	return;
   3814 }
   3815 
   3816 static void
   3817 ixv_disable_intr(struct adapter *adapter)
   3818 {
   3819 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
   3820 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, ~0);
   3821 	IXGBE_WRITE_FLUSH(&adapter->hw);
   3822 	return;
   3823 }
   3824 
   3825 /*
   3826 ** Setup the correct IVAR register for a particular MSIX interrupt
   3827 **  - entry is the register array entry
   3828 **  - vector is the MSIX vector for this queue
   3829 **  - type is RX/TX/MISC
   3830 */
   3831 static void
   3832 ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
   3833 {
   3834 	struct ixgbe_hw *hw = &adapter->hw;
   3835 	u32 ivar, index;
   3836 
   3837 	vector |= IXGBE_IVAR_ALLOC_VAL;
   3838 
   3839 	if (type == -1) { /* MISC IVAR */
   3840 		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
   3841 		ivar &= ~0xFF;
   3842 		ivar |= vector;
   3843 		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
   3844 	} else {	/* RX/TX IVARS */
   3845 		index = (16 * (entry & 1)) + (8 * type);
   3846 		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
   3847 		ivar &= ~(0xFF << index);
   3848 		ivar |= (vector << index);
   3849 		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
   3850 	}
   3851 }
   3852 
   3853 static void
   3854 ixv_configure_ivars(struct adapter *adapter)
   3855 {
   3856 	struct  ix_queue *que = adapter->queues;
   3857 
   3858         for (int i = 0; i < adapter->num_queues; i++, que++) {
   3859 		/* First the RX queue entry */
   3860                 ixv_set_ivar(adapter, i, que->msix, 0);
   3861 		/* ... and the TX */
   3862 		ixv_set_ivar(adapter, i, que->msix, 1);
   3863 		/* Set an initial value in EITR */
   3864                 IXGBE_WRITE_REG(&adapter->hw,
   3865                     IXGBE_VTEITR(que->msix), IXV_EITR_DEFAULT);
   3866 	}
   3867 
   3868 	/* For the Link interrupt */
   3869         ixv_set_ivar(adapter, 1, adapter->mbxvec, -1);
   3870 }
   3871 
   3872 
   3873 /*
   3874 ** Tasklet handler for MSIX MBX interrupts
   3875 **  - do outside interrupt since it might sleep
   3876 */
   3877 static void
   3878 ixv_handle_mbx(void *context)
   3879 {
   3880 	struct adapter  *adapter = context;
   3881 
   3882 	ixgbe_check_link(&adapter->hw,
   3883 	    &adapter->link_speed, &adapter->link_up, 0);
   3884 	ixv_update_link_status(adapter);
   3885 }
   3886 
   3887 /*
   3888 ** The VF stats registers never have a truely virgin
   3889 ** starting point, so this routine tries to make an
   3890 ** artificial one, marking ground zero on attach as
   3891 ** it were.
   3892 */
   3893 static void
   3894 ixv_save_stats(struct adapter *adapter)
   3895 {
   3896 	if (adapter->stats.vfgprc || adapter->stats.vfgptc) {
   3897 		adapter->stats.saved_reset_vfgprc +=
   3898 		    adapter->stats.vfgprc - adapter->stats.base_vfgprc;
   3899 		adapter->stats.saved_reset_vfgptc +=
   3900 		    adapter->stats.vfgptc - adapter->stats.base_vfgptc;
   3901 		adapter->stats.saved_reset_vfgorc +=
   3902 		    adapter->stats.vfgorc - adapter->stats.base_vfgorc;
   3903 		adapter->stats.saved_reset_vfgotc +=
   3904 		    adapter->stats.vfgotc - adapter->stats.base_vfgotc;
   3905 		adapter->stats.saved_reset_vfmprc +=
   3906 		    adapter->stats.vfmprc - adapter->stats.base_vfmprc;
   3907 	}
   3908 }
   3909 
   3910 static void
   3911 ixv_init_stats(struct adapter *adapter)
   3912 {
   3913 	struct ixgbe_hw *hw = &adapter->hw;
   3914 
   3915 	adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
   3916 	adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
   3917 	adapter->stats.last_vfgorc |=
   3918 	    (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
   3919 
   3920 	adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
   3921 	adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
   3922 	adapter->stats.last_vfgotc |=
   3923 	    (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
   3924 
   3925 	adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
   3926 
   3927 	adapter->stats.base_vfgprc = adapter->stats.last_vfgprc;
   3928 	adapter->stats.base_vfgorc = adapter->stats.last_vfgorc;
   3929 	adapter->stats.base_vfgptc = adapter->stats.last_vfgptc;
   3930 	adapter->stats.base_vfgotc = adapter->stats.last_vfgotc;
   3931 	adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
   3932 }
   3933 
   3934 #define UPDATE_STAT_32(reg, last, count)		\
   3935 {							\
   3936 	u32 current = IXGBE_READ_REG(hw, reg);		\
   3937 	if (current < last)				\
   3938 		count += 0x100000000LL;			\
   3939 	last = current;					\
   3940 	count &= 0xFFFFFFFF00000000LL;			\
   3941 	count |= current;				\
   3942 }
   3943 
   3944 #define UPDATE_STAT_36(lsb, msb, last, count) 		\
   3945 {							\
   3946 	u64 cur_lsb = IXGBE_READ_REG(hw, lsb);		\
   3947 	u64 cur_msb = IXGBE_READ_REG(hw, msb);		\
   3948 	u64 current = ((cur_msb << 32) | cur_lsb);	\
   3949 	if (current < last)				\
   3950 		count += 0x1000000000LL;		\
   3951 	last = current;					\
   3952 	count &= 0xFFFFFFF000000000LL;			\
   3953 	count |= current;				\
   3954 }
   3955 
   3956 /*
   3957 ** ixv_update_stats - Update the board statistics counters.
   3958 */
   3959 void
   3960 ixv_update_stats(struct adapter *adapter)
   3961 {
   3962         struct ixgbe_hw *hw = &adapter->hw;
   3963 
   3964         UPDATE_STAT_32(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
   3965 	    adapter->stats.vfgprc);
   3966         UPDATE_STAT_32(IXGBE_VFGPTC, adapter->stats.last_vfgptc,
   3967 	    adapter->stats.vfgptc);
   3968         UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
   3969 	    adapter->stats.last_vfgorc, adapter->stats.vfgorc);
   3970         UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
   3971 	    adapter->stats.last_vfgotc, adapter->stats.vfgotc);
   3972         UPDATE_STAT_32(IXGBE_VFMPRC, adapter->stats.last_vfmprc,
   3973 	    adapter->stats.vfmprc);
   3974 }
   3975 
   3976 /**********************************************************************
   3977  *
   3978  *  This routine is called only when ixgbe_display_debug_stats is enabled.
   3979  *  This routine provides a way to take a look at important statistics
   3980  *  maintained by the driver and hardware.
   3981  *
   3982  **********************************************************************/
   3983 static void
   3984 ixv_print_hw_stats(struct adapter * adapter)
   3985 {
   3986         device_t dev = adapter->dev;
   3987 
   3988         device_printf(dev,"Std Mbuf Failed = %lu\n",
   3989                adapter->mbuf_defrag_failed.ev_count);
   3990         device_printf(dev,"Driver dropped packets = %lu\n",
   3991                adapter->dropped_pkts.ev_count);
   3992         device_printf(dev, "watchdog timeouts = %ld\n",
   3993                adapter->watchdog_events.ev_count);
   3994 
   3995         device_printf(dev,"Good Packets Rcvd = %llu\n",
   3996                (long long)adapter->stats.vfgprc);
   3997         device_printf(dev,"Good Packets Xmtd = %llu\n",
   3998                (long long)adapter->stats.vfgptc);
   3999         device_printf(dev,"TSO Transmissions = %lu\n",
   4000                adapter->tso_tx.ev_count);
   4001 
   4002 }
   4003 
   4004 /**********************************************************************
   4005  *
   4006  *  This routine is called only when em_display_debug_stats is enabled.
   4007  *  This routine provides a way to take a look at important statistics
   4008  *  maintained by the driver and hardware.
   4009  *
   4010  **********************************************************************/
   4011 static void
   4012 ixv_print_debug_info(struct adapter *adapter)
   4013 {
   4014         device_t dev = adapter->dev;
   4015         struct ixgbe_hw         *hw = &adapter->hw;
   4016         struct ix_queue         *que = adapter->queues;
   4017         struct rx_ring          *rxr;
   4018         struct tx_ring          *txr;
   4019 #ifdef LRO
   4020         struct lro_ctrl         *lro;
   4021 #endif /* LRO */
   4022 
   4023         device_printf(dev,"Error Byte Count = %u \n",
   4024             IXGBE_READ_REG(hw, IXGBE_ERRBC));
   4025 
   4026         for (int i = 0; i < adapter->num_queues; i++, que++) {
   4027                 txr = que->txr;
   4028                 rxr = que->rxr;
   4029 #ifdef LRO
   4030                 lro = &rxr->lro;
   4031 #endif /* LRO */
   4032                 device_printf(dev,"QUE(%d) IRQs Handled: %lu\n",
   4033                     que->msix, (long)que->irqs);
   4034                 device_printf(dev,"RX(%d) Packets Received: %lld\n",
   4035                     rxr->me, (long long)rxr->rx_packets.ev_count);
   4036                 device_printf(dev,"RX(%d) Split RX Packets: %lld\n",
   4037                     rxr->me, (long long)rxr->rx_split_packets.ev_count);
   4038                 device_printf(dev,"RX(%d) Bytes Received: %lu\n",
   4039                     rxr->me, (long)rxr->rx_bytes.ev_count);
   4040 #ifdef LRO
   4041                 device_printf(dev,"RX(%d) LRO Queued= %d\n",
   4042                     rxr->me, lro->lro_queued);
   4043                 device_printf(dev,"RX(%d) LRO Flushed= %d\n",
   4044                     rxr->me, lro->lro_flushed);
   4045 #endif /* LRO */
   4046                 device_printf(dev,"TX(%d) Packets Sent: %lu\n",
   4047                     txr->me, (long)txr->total_packets.ev_count);
   4048                 device_printf(dev,"TX(%d) NO Desc Avail: %lu\n",
   4049                     txr->me, (long)txr->no_desc_avail.ev_count);
   4050         }
   4051 
   4052         device_printf(dev,"MBX IRQ Handled: %lu\n",
   4053             (long)adapter->mbx_irq.ev_count);
   4054         return;
   4055 }
   4056 
   4057 static int
   4058 ixv_sysctl_stats(SYSCTLFN_ARGS)
   4059 {
   4060 	struct sysctlnode node;
   4061 	int             error;
   4062 	int		result;
   4063 	struct adapter *adapter;
   4064 
   4065 	node = *rnode;
   4066 	adapter = (struct adapter *)node.sysctl_data;
   4067 	node.sysctl_data = &result;
   4068 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   4069 	if (error != 0)
   4070 		return error;
   4071 
   4072 	if (result == 1)
   4073 		ixv_print_hw_stats(adapter);
   4074 
   4075 	return 0;
   4076 }
   4077 
   4078 static int
   4079 ixv_sysctl_debug(SYSCTLFN_ARGS)
   4080 {
   4081 	struct sysctlnode node;
   4082 	int error, result;
   4083 	struct adapter *adapter;
   4084 
   4085 	node = *rnode;
   4086 	adapter = (struct adapter *)node.sysctl_data;
   4087 	node.sysctl_data = &result;
   4088 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   4089 
   4090 	if (error)
   4091 		return error;
   4092 
   4093 	if (result == 1)
   4094 		ixv_print_debug_info(adapter);
   4095 
   4096 	return 0;
   4097 }
   4098 
   4099 /*
   4100 ** Set flow control using sysctl:
   4101 ** Flow control values:
   4102 ** 	0 - off
   4103 **	1 - rx pause
   4104 **	2 - tx pause
   4105 **	3 - full
   4106 */
   4107 static int
   4108 ixv_set_flowcntl(SYSCTLFN_ARGS)
   4109 {
   4110 	struct sysctlnode node;
   4111 	int error;
   4112 	struct adapter *adapter;
   4113 
   4114 	node = *rnode;
   4115 	adapter = (struct adapter *)node.sysctl_data;
   4116 	node.sysctl_data = &ixv_flow_control;
   4117 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   4118 
   4119 	if (error)
   4120 		return (error);
   4121 
   4122 	switch (ixv_flow_control) {
   4123 		case ixgbe_fc_rx_pause:
   4124 		case ixgbe_fc_tx_pause:
   4125 		case ixgbe_fc_full:
   4126 			adapter->hw.fc.requested_mode = ixv_flow_control;
   4127 			break;
   4128 		case ixgbe_fc_none:
   4129 		default:
   4130 			adapter->hw.fc.requested_mode = ixgbe_fc_none;
   4131 	}
   4132 
   4133 	ixgbe_fc_enable(&adapter->hw, 0);
   4134 	return error;
   4135 }
   4136 
   4137 const struct sysctlnode *
   4138 ixv_sysctl_instance(struct adapter *adapter)
   4139 {
   4140 	const char *dvname;
   4141 	struct sysctllog **log;
   4142 	int rc;
   4143 	const struct sysctlnode *rnode;
   4144 
   4145 	log = &adapter->sysctllog;
   4146 	dvname = device_xname(adapter->dev);
   4147 
   4148 	if ((rc = sysctl_createv(log, 0, NULL, &rnode,
   4149 	    0, CTLTYPE_NODE, dvname,
   4150 	    SYSCTL_DESCR("ixv information and settings"),
   4151 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
   4152 		goto err;
   4153 
   4154 	return rnode;
   4155 err:
   4156 	printf("%s: sysctl_createv failed, rc = %d\n", __func__, rc);
   4157 	return NULL;
   4158 }
   4159 
   4160 static void
   4161 ixv_add_rx_process_limit(struct adapter *adapter, const char *name,
   4162         const char *description, int *limit, int value)
   4163 {
   4164 	const struct sysctlnode *rnode, *cnode;
   4165 	struct sysctllog **log = &adapter->sysctllog;
   4166 
   4167         *limit = value;
   4168 
   4169 	if ((rnode = ixv_sysctl_instance(adapter)) == NULL)
   4170 		aprint_error_dev(adapter->dev,
   4171 		    "could not create sysctl root\n");
   4172 	else if (sysctl_createv(log, 0, &rnode, &cnode,
   4173 	    CTLFLAG_READWRITE,
   4174 	    CTLTYPE_INT,
   4175 	    name, SYSCTL_DESCR(description),
   4176 	    NULL, 0, limit, 0,
   4177 	    CTL_CREATE, CTL_EOL) != 0) {
   4178 		aprint_error_dev(adapter->dev, "%s: could not create sysctl",
   4179 		    __func__);
   4180 	}
   4181 }
   4182 
   4183