Home | History | Annotate | Line # | Download | only in ixgbe
ixv.c revision 1.4
      1 /******************************************************************************
      2 
      3   Copyright (c) 2001-2011, Intel Corporation
      4   All rights reserved.
      5 
      6   Redistribution and use in source and binary forms, with or without
      7   modification, are permitted provided that the following conditions are met:
      8 
      9    1. Redistributions of source code must retain the above copyright notice,
     10       this list of conditions and the following disclaimer.
     11 
     12    2. Redistributions in binary form must reproduce the above copyright
     13       notice, this list of conditions and the following disclaimer in the
     14       documentation and/or other materials provided with the distribution.
     15 
     16    3. Neither the name of the Intel Corporation nor the names of its
     17       contributors may be used to endorse or promote products derived from
     18       this software without specific prior written permission.
     19 
     20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     30   POSSIBILITY OF SUCH DAMAGE.
     31 
     32 ******************************************************************************/
     33 /*$FreeBSD: src/sys/dev/ixgbe/ixv.c,v 1.2 2011/03/23 13:10:15 jhb Exp $*/
     34 /*$NetBSD: ixv.c,v 1.4 2015/03/19 14:22:23 msaitoh Exp $*/
     35 
     36 #include "opt_inet.h"
     37 #include "opt_inet6.h"
     38 
     39 #include "ixv.h"
     40 
     41 /*********************************************************************
     42  *  Driver version
     43  *********************************************************************/
     44 char ixv_driver_version[] = "1.0.1";
     45 
     46 /*********************************************************************
     47  *  PCI Device ID Table
     48  *
     49  *  Used by probe to select devices to load on
     50  *  Last field stores an index into ixv_strings
     51  *  Last entry must be all 0s
     52  *
     53  *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
     54  *********************************************************************/
     55 
     56 static ixv_vendor_info_t ixv_vendor_info_array[] =
     57 {
     58 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
     59 	/* required last entry */
     60 	{0, 0, 0, 0, 0}
     61 };
     62 
     63 /*********************************************************************
     64  *  Table of branding strings
     65  *********************************************************************/
     66 
     67 static const char    *ixv_strings[] = {
     68 	"Intel(R) PRO/10GbE Virtual Function Network Driver"
     69 };
     70 
     71 /*********************************************************************
     72  *  Function prototypes
     73  *********************************************************************/
     74 static int      ixv_probe(device_t, cfdata_t, void *);
     75 static void      ixv_attach(device_t, device_t, void *);
     76 static int      ixv_detach(device_t, int);
     77 #if 0
     78 static int      ixv_shutdown(device_t);
     79 #endif
     80 #if __FreeBSD_version < 800000
     81 static void     ixv_start(struct ifnet *);
     82 static void     ixv_start_locked(struct tx_ring *, struct ifnet *);
     83 #else
     84 static int	ixv_mq_start(struct ifnet *, struct mbuf *);
     85 static int	ixv_mq_start_locked(struct ifnet *,
     86 		    struct tx_ring *, struct mbuf *);
     87 static void	ixv_qflush(struct ifnet *);
     88 #endif
     89 static int      ixv_ioctl(struct ifnet *, u_long, void *);
     90 static int	ixv_init(struct ifnet *);
     91 static void	ixv_init_locked(struct adapter *);
     92 static void     ixv_stop(void *);
     93 static void     ixv_media_status(struct ifnet *, struct ifmediareq *);
     94 static int      ixv_media_change(struct ifnet *);
     95 static void     ixv_identify_hardware(struct adapter *);
     96 static int      ixv_allocate_pci_resources(struct adapter *,
     97 		    const struct pci_attach_args *);
     98 static int      ixv_allocate_msix(struct adapter *);
     99 static int	ixv_allocate_queues(struct adapter *);
    100 static int	ixv_setup_msix(struct adapter *);
    101 static void	ixv_free_pci_resources(struct adapter *);
    102 static void     ixv_local_timer(void *);
    103 static void     ixv_setup_interface(device_t, struct adapter *);
    104 static void     ixv_config_link(struct adapter *);
    105 
    106 static int      ixv_allocate_transmit_buffers(struct tx_ring *);
    107 static int	ixv_setup_transmit_structures(struct adapter *);
    108 static void	ixv_setup_transmit_ring(struct tx_ring *);
    109 static void     ixv_initialize_transmit_units(struct adapter *);
    110 static void     ixv_free_transmit_structures(struct adapter *);
    111 static void     ixv_free_transmit_buffers(struct tx_ring *);
    112 
    113 static int      ixv_allocate_receive_buffers(struct rx_ring *);
    114 static int      ixv_setup_receive_structures(struct adapter *);
    115 static int	ixv_setup_receive_ring(struct rx_ring *);
    116 static void     ixv_initialize_receive_units(struct adapter *);
    117 static void     ixv_free_receive_structures(struct adapter *);
    118 static void     ixv_free_receive_buffers(struct rx_ring *);
    119 
    120 static void     ixv_enable_intr(struct adapter *);
    121 static void     ixv_disable_intr(struct adapter *);
    122 static bool	ixv_txeof(struct tx_ring *);
    123 static bool	ixv_rxeof(struct ix_queue *, int);
    124 static void	ixv_rx_checksum(u32, struct mbuf *, u32,
    125 		    struct ixgbevf_hw_stats *);
    126 static void     ixv_set_multi(struct adapter *);
    127 static void     ixv_update_link_status(struct adapter *);
    128 static void	ixv_refresh_mbufs(struct rx_ring *, int);
    129 static int      ixv_xmit(struct tx_ring *, struct mbuf *);
    130 static int	ixv_sysctl_stats(SYSCTLFN_PROTO);
    131 static int	ixv_sysctl_debug(SYSCTLFN_PROTO);
    132 static int	ixv_set_flowcntl(SYSCTLFN_PROTO);
    133 static int	ixv_dma_malloc(struct adapter *, bus_size_t,
    134 		    struct ixv_dma_alloc *, int);
    135 static void     ixv_dma_free(struct adapter *, struct ixv_dma_alloc *);
    136 static void	ixv_add_rx_process_limit(struct adapter *, const char *,
    137 		    const char *, int *, int);
    138 static u32	ixv_tx_ctx_setup(struct tx_ring *, struct mbuf *);
    139 static bool	ixv_tso_setup(struct tx_ring *, struct mbuf *, u32 *);
    140 static void	ixv_set_ivar(struct adapter *, u8, u8, s8);
    141 static void	ixv_configure_ivars(struct adapter *);
    142 static u8 *	ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
    143 
    144 static void	ixv_setup_vlan_support(struct adapter *);
    145 #if 0
    146 static void	ixv_register_vlan(void *, struct ifnet *, u16);
    147 static void	ixv_unregister_vlan(void *, struct ifnet *, u16);
    148 #endif
    149 
    150 static void	ixv_save_stats(struct adapter *);
    151 static void	ixv_init_stats(struct adapter *);
    152 static void	ixv_update_stats(struct adapter *);
    153 
    154 static __inline void ixv_rx_discard(struct rx_ring *, int);
    155 static __inline void ixv_rx_input(struct rx_ring *, struct ifnet *,
    156 		    struct mbuf *, u32);
    157 
    158 /* The MSI/X Interrupt handlers */
    159 static void	ixv_msix_que(void *);
    160 static void	ixv_msix_mbx(void *);
    161 
    162 /* Deferred interrupt tasklets */
    163 static void	ixv_handle_que(void *);
    164 static void	ixv_handle_mbx(void *);
    165 
    166 const struct sysctlnode *ixv_sysctl_instance(struct adapter *);
    167 static ixv_vendor_info_t *ixv_lookup(const struct pci_attach_args *);
    168 
    169 /*********************************************************************
    170  *  FreeBSD Device Interface Entry Points
    171  *********************************************************************/
    172 
    173 CFATTACH_DECL3_NEW(ixv, sizeof(struct adapter),
    174     ixv_probe, ixv_attach, ixv_detach, NULL, NULL, NULL,
    175     DVF_DETACH_SHUTDOWN);
    176 
    177 # if 0
    178 static device_method_t ixv_methods[] = {
    179 	/* Device interface */
    180 	DEVMETHOD(device_probe, ixv_probe),
    181 	DEVMETHOD(device_attach, ixv_attach),
    182 	DEVMETHOD(device_detach, ixv_detach),
    183 	DEVMETHOD(device_shutdown, ixv_shutdown),
    184 	{0, 0}
    185 };
    186 #endif
    187 
    188 #if 0
    189 static driver_t ixv_driver = {
    190 	"ix", ixv_methods, sizeof(struct adapter),
    191 };
    192 
    193 extern devclass_t ixgbe_devclass;
    194 DRIVER_MODULE(ixv, pci, ixv_driver, ixgbe_devclass, 0, 0);
    195 MODULE_DEPEND(ixv, pci, 1, 1, 1);
    196 MODULE_DEPEND(ixv, ether, 1, 1, 1);
    197 #endif
    198 
    199 /*
    200 ** TUNEABLE PARAMETERS:
    201 */
    202 
    203 /*
    204 ** AIM: Adaptive Interrupt Moderation
    205 ** which means that the interrupt rate
    206 ** is varied over time based on the
    207 ** traffic for that interrupt vector
    208 */
    209 static int ixv_enable_aim = FALSE;
    210 #define	TUNABLE_INT(__x, __y)
    211 TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
    212 
    213 /* How many packets rxeof tries to clean at a time */
    214 static int ixv_rx_process_limit = 128;
    215 TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit);
    216 
    217 /* Flow control setting, default to full */
    218 static int ixv_flow_control = ixgbe_fc_full;
    219 TUNABLE_INT("hw.ixv.flow_control", &ixv_flow_control);
    220 
    221 /*
    222  * Header split: this causes the hardware to DMA
    223  * the header into a seperate mbuf from the payload,
    224  * it can be a performance win in some workloads, but
    225  * in others it actually hurts, its off by default.
    226  */
    227 static int ixv_header_split = FALSE;
    228 TUNABLE_INT("hw.ixv.hdr_split", &ixv_header_split);
    229 
    230 /*
    231 ** Number of TX descriptors per ring,
    232 ** setting higher than RX as this seems
    233 ** the better performing choice.
    234 */
    235 static int ixv_txd = DEFAULT_TXD;
    236 TUNABLE_INT("hw.ixv.txd", &ixv_txd);
    237 
    238 /* Number of RX descriptors per ring */
    239 static int ixv_rxd = DEFAULT_RXD;
    240 TUNABLE_INT("hw.ixv.rxd", &ixv_rxd);
    241 
    242 /*
    243 ** Shadow VFTA table, this is needed because
    244 ** the real filter table gets cleared during
    245 ** a soft reset and we need to repopulate it.
    246 */
    247 static u32 ixv_shadow_vfta[VFTA_SIZE];
    248 
    249 /* Keep running tab on them for sanity check */
    250 static int ixv_total_ports;
    251 
    252 /*********************************************************************
    253  *  Device identification routine
    254  *
    255  *  ixv_probe determines if the driver should be loaded on
    256  *  adapter based on PCI vendor/device id of the adapter.
    257  *
    258  *  return 1 on success, 0 on failure
    259  *********************************************************************/
    260 
    261 static int
    262 ixv_probe(device_t dev, cfdata_t cf, void *aux)
    263 {
    264 	const struct pci_attach_args *pa = aux;
    265 
    266 	return (ixv_lookup(pa) != NULL) ? 1 : 0;
    267 }
    268 
    269 static ixv_vendor_info_t *
    270 ixv_lookup(const struct pci_attach_args *pa)
    271 {
    272 	pcireg_t subid;
    273 	ixv_vendor_info_t *ent;
    274 
    275 	INIT_DEBUGOUT("ixv_probe: begin");
    276 
    277 	if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
    278 		return NULL;
    279 
    280 	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
    281 
    282 	for (ent = ixv_vendor_info_array; ent->vendor_id != 0; ent++) {
    283 		if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
    284 		    (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
    285 
    286 		    ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
    287 		     (ent->subvendor_id == 0)) &&
    288 
    289 		    ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
    290 		     (ent->subdevice_id == 0))) {
    291 			++ixv_total_ports;
    292 			return ent;
    293 		}
    294 	}
    295 	return NULL;
    296 }
    297 
    298 
    299 static void
    300 ixv_sysctl_attach(struct adapter *adapter)
    301 {
    302 	struct sysctllog **log;
    303 	const struct sysctlnode *rnode, *cnode;
    304 	device_t dev;
    305 
    306 	dev = adapter->dev;
    307 	log = &adapter->sysctllog;
    308 
    309 	if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
    310 		aprint_error_dev(dev, "could not create sysctl root\n");
    311 		return;
    312 	}
    313 
    314 	if (sysctl_createv(log, 0, &rnode, &cnode,
    315 	    CTLFLAG_READWRITE, CTLTYPE_INT,
    316 	    "stats", SYSCTL_DESCR("Statistics"),
    317 	    ixv_sysctl_stats, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
    318 		aprint_error_dev(dev, "could not create sysctl\n");
    319 
    320 	if (sysctl_createv(log, 0, &rnode, &cnode,
    321 	    CTLFLAG_READWRITE, CTLTYPE_INT,
    322 	    "debug", SYSCTL_DESCR("Debug Info"),
    323 	    ixv_sysctl_debug, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
    324 		aprint_error_dev(dev, "could not create sysctl\n");
    325 
    326 	if (sysctl_createv(log, 0, &rnode, &cnode,
    327 	    CTLFLAG_READWRITE, CTLTYPE_INT,
    328 	    "flow_control", SYSCTL_DESCR("Flow Control"),
    329 	    ixv_set_flowcntl, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
    330 		aprint_error_dev(dev, "could not create sysctl\n");
    331 
    332 	/* XXX This is an *instance* sysctl controlling a *global* variable.
    333 	 * XXX It's that way in the FreeBSD driver that this derives from.
    334 	 */
    335 	if (sysctl_createv(log, 0, &rnode, &cnode,
    336 	    CTLFLAG_READWRITE, CTLTYPE_INT,
    337 	    "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
    338 	    NULL, 0, &ixv_enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
    339 		aprint_error_dev(dev, "could not create sysctl\n");
    340 }
    341 
    342 /*********************************************************************
    343  *  Device initialization routine
    344  *
    345  *  The attach entry point is called when the driver is being loaded.
    346  *  This routine identifies the type of hardware, allocates all resources
    347  *  and initializes the hardware.
    348  *
    349  *  return 0 on success, positive on failure
    350  *********************************************************************/
    351 
    352 static void
    353 ixv_attach(device_t parent, device_t dev, void *aux)
    354 {
    355 	struct adapter *adapter;
    356 	struct ixgbe_hw *hw;
    357 	int             error = 0;
    358 	ixv_vendor_info_t *ent;
    359 	const struct pci_attach_args *pa = aux;
    360 
    361 	INIT_DEBUGOUT("ixv_attach: begin");
    362 
    363 	/* Allocate, clear, and link in our adapter structure */
    364 	adapter = device_private(dev);
    365 	adapter->dev = adapter->osdep.dev = dev;
    366 	hw = &adapter->hw;
    367 
    368 	ent = ixv_lookup(pa);
    369 
    370 	KASSERT(ent != NULL);
    371 
    372 	aprint_normal(": %s, Version - %s\n",
    373 	    ixv_strings[ent->index], ixv_driver_version);
    374 
    375 	/* Core Lock Init*/
    376 	IXV_CORE_LOCK_INIT(adapter, device_xname(dev));
    377 
    378 	/* SYSCTL APIs */
    379 	ixv_sysctl_attach(adapter);
    380 
    381 	/* Set up the timer callout */
    382 	callout_init(&adapter->timer, 0);
    383 
    384 	/* Determine hardware revision */
    385 	ixv_identify_hardware(adapter);
    386 
    387 	/* Do base PCI setup - map BAR0 */
    388 	if (ixv_allocate_pci_resources(adapter, pa)) {
    389 		aprint_error_dev(dev, "Allocation of PCI resources failed\n");
    390 		error = ENXIO;
    391 		goto err_out;
    392 	}
    393 
    394 	/* Do descriptor calc and sanity checks */
    395 	if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
    396 	    ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) {
    397 		aprint_error_dev(dev, "TXD config issue, using default!\n");
    398 		adapter->num_tx_desc = DEFAULT_TXD;
    399 	} else
    400 		adapter->num_tx_desc = ixv_txd;
    401 
    402 	if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
    403 	    ixv_rxd < MIN_TXD || ixv_rxd > MAX_TXD) {
    404 		aprint_error_dev(dev, "RXD config issue, using default!\n");
    405 		adapter->num_rx_desc = DEFAULT_RXD;
    406 	} else
    407 		adapter->num_rx_desc = ixv_rxd;
    408 
    409 	/* Allocate our TX/RX Queues */
    410 	if (ixv_allocate_queues(adapter)) {
    411 		error = ENOMEM;
    412 		goto err_out;
    413 	}
    414 
    415 	/*
    416 	** Initialize the shared code: its
    417 	** at this point the mac type is set.
    418 	*/
    419 	error = ixgbe_init_shared_code(hw);
    420 	if (error) {
    421 		aprint_error_dev(dev,"Shared Code Initialization Failure\n");
    422 		error = EIO;
    423 		goto err_late;
    424 	}
    425 
    426 	/* Setup the mailbox */
    427 	ixgbe_init_mbx_params_vf(hw);
    428 
    429 	ixgbe_reset_hw(hw);
    430 
    431 	/* Get Hardware Flow Control setting */
    432 	hw->fc.requested_mode = ixgbe_fc_full;
    433 	hw->fc.pause_time = IXV_FC_PAUSE;
    434 	hw->fc.low_water = IXV_FC_LO;
    435 	hw->fc.high_water = IXV_FC_HI;
    436 	hw->fc.send_xon = TRUE;
    437 
    438 	error = ixgbe_init_hw(hw);
    439 	if (error) {
    440 		aprint_error_dev(dev,"Hardware Initialization Failure\n");
    441 		error = EIO;
    442 		goto err_late;
    443 	}
    444 
    445 	error = ixv_allocate_msix(adapter);
    446 	if (error)
    447 		goto err_late;
    448 
    449 	/* Setup OS specific network interface */
    450 	ixv_setup_interface(dev, adapter);
    451 
    452 	/* Sysctl for limiting the amount of work done in the taskqueue */
    453 	ixv_add_rx_process_limit(adapter, "rx_processing_limit",
    454 	    "max number of rx packets to process", &adapter->rx_process_limit,
    455 	    ixv_rx_process_limit);
    456 
    457 	/* Do the stats setup */
    458 	ixv_save_stats(adapter);
    459 	ixv_init_stats(adapter);
    460 
    461 	/* Register for VLAN events */
    462 #if 0 /* XXX msaitoh delete after write? */
    463 	adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
    464 	    ixv_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
    465 	adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
    466 	    ixv_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
    467 #endif
    468 
    469 	INIT_DEBUGOUT("ixv_attach: end");
    470 	return;
    471 
    472 err_late:
    473 	ixv_free_transmit_structures(adapter);
    474 	ixv_free_receive_structures(adapter);
    475 err_out:
    476 	ixv_free_pci_resources(adapter);
    477 	return;
    478 
    479 }
    480 
    481 /*********************************************************************
    482  *  Device removal routine
    483  *
    484  *  The detach entry point is called when the driver is being removed.
    485  *  This routine stops the adapter and deallocates all the resources
    486  *  that were allocated for driver operation.
    487  *
    488  *  return 0 on success, positive on failure
    489  *********************************************************************/
    490 
    491 static int
    492 ixv_detach(device_t dev, int flags)
    493 {
    494 	struct adapter *adapter = device_private(dev);
    495 	struct ix_queue *que = adapter->queues;
    496 
    497 	INIT_DEBUGOUT("ixv_detach: begin");
    498 
    499 	/* Make sure VLANS are not using driver */
    500 	if (!VLAN_ATTACHED(&adapter->osdep.ec))
    501 		;	/* nothing to do: no VLANs */
    502 	else if ((flags & (DETACH_SHUTDOWN|DETACH_FORCE)) != 0)
    503 		vlan_ifdetach(adapter->ifp);
    504 	else {
    505 		aprint_error_dev(dev, "VLANs in use\n");
    506 		return EBUSY;
    507 	}
    508 
    509 	IXV_CORE_LOCK(adapter);
    510 	ixv_stop(adapter);
    511 	IXV_CORE_UNLOCK(adapter);
    512 
    513 	for (int i = 0; i < adapter->num_queues; i++, que++) {
    514 		softint_disestablish(que->que_si);
    515 	}
    516 
    517 	/* Drain the Link queue */
    518 	softint_disestablish(adapter->mbx_si);
    519 
    520 	/* Unregister VLAN events */
    521 #if 0 /* XXX msaitoh delete after write? */
    522 	if (adapter->vlan_attach != NULL)
    523 		EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
    524 	if (adapter->vlan_detach != NULL)
    525 		EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
    526 #endif
    527 
    528 	ether_ifdetach(adapter->ifp);
    529 	callout_halt(&adapter->timer, NULL);
    530 	ixv_free_pci_resources(adapter);
    531 #if 0 /* XXX the NetBSD port is probably missing something here */
    532 	bus_generic_detach(dev);
    533 #endif
    534 	if_detach(adapter->ifp);
    535 
    536 	ixv_free_transmit_structures(adapter);
    537 	ixv_free_receive_structures(adapter);
    538 
    539 	IXV_CORE_LOCK_DESTROY(adapter);
    540 	return (0);
    541 }
    542 
    543 /*********************************************************************
    544  *
    545  *  Shutdown entry point
    546  *
    547  **********************************************************************/
    548 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
    549 static int
    550 ixv_shutdown(device_t dev)
    551 {
    552 	struct adapter *adapter = device_private(dev);
    553 	IXV_CORE_LOCK(adapter);
    554 	ixv_stop(adapter);
    555 	IXV_CORE_UNLOCK(adapter);
    556 	return (0);
    557 }
    558 #endif
    559 
    560 #if __FreeBSD_version < 800000
    561 /*********************************************************************
    562  *  Transmit entry point
    563  *
    564  *  ixv_start is called by the stack to initiate a transmit.
    565  *  The driver will remain in this routine as long as there are
    566  *  packets to transmit and transmit resources are available.
    567  *  In case resources are not available stack is notified and
    568  *  the packet is requeued.
    569  **********************************************************************/
    570 static void
    571 ixv_start_locked(struct tx_ring *txr, struct ifnet * ifp)
    572 {
    573 	int rc;
    574 	struct mbuf    *m_head;
    575 	struct adapter *adapter = txr->adapter;
    576 
    577 	IXV_TX_LOCK_ASSERT(txr);
    578 
    579 	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) !=
    580 	    IFF_RUNNING)
    581 		return;
    582 	if (!adapter->link_active)
    583 		return;
    584 
    585 	while (!IFQ_IS_EMPTY(&ifp->if_snd)) {
    586 
    587 		IFQ_POLL(&ifp->if_snd, m_head);
    588 		if (m_head == NULL)
    589 			break;
    590 
    591 		if (ixv_xmit(txr, m_head) == EAGAIN) {
    592 			ifp->if_flags |= IFF_OACTIVE;
    593 			break;
    594 		}
    595 		IFQ_DEQUEUE(&ifp->if_snd, m_head);
    596 		if (rc == EFBIG) {
    597 			struct mbuf *mtmp;
    598 
    599 			if ((mtmp = m_defrag(m_head, M_DONTWAIT)) != NULL) {
    600 				m_head = mtmp;
    601 				rc = ixv_xmit(txr, m_head);
    602 				if (rc != 0)
    603 					adapter->efbig2_tx_dma_setup.ev_count++;
    604 			} else
    605 				adapter->m_defrag_failed.ev_count++;
    606 		}
    607 		if (rc != 0) {
    608 			m_freem(m_head);
    609 			continue;
    610 		}
    611 		/* Send a copy of the frame to the BPF listener */
    612 		bpf_mtap(ifp, m_head);
    613 
    614 		/* Set watchdog on */
    615 		txr->watchdog_check = TRUE;
    616 		getmicrotime(&txr->watchdog_time);
    617 	}
    618 	return;
    619 }
    620 
    621 /*
    622  * Legacy TX start - called by the stack, this
    623  * always uses the first tx ring, and should
    624  * not be used with multiqueue tx enabled.
    625  */
    626 static void
    627 ixv_start(struct ifnet *ifp)
    628 {
    629 	struct adapter *adapter = ifp->if_softc;
    630 	struct tx_ring	*txr = adapter->tx_rings;
    631 
    632 	if (ifp->if_flags & IFF_RUNNING) {
    633 		IXV_TX_LOCK(txr);
    634 		ixv_start_locked(txr, ifp);
    635 		IXV_TX_UNLOCK(txr);
    636 	}
    637 	return;
    638 }
    639 
    640 #else
    641 
    642 /*
    643 ** Multiqueue Transmit driver
    644 **
    645 */
    646 static int
    647 ixv_mq_start(struct ifnet *ifp, struct mbuf *m)
    648 {
    649 	struct adapter	*adapter = ifp->if_softc;
    650 	struct ix_queue	*que;
    651 	struct tx_ring	*txr;
    652 	int 		i = 0, err = 0;
    653 
    654 	/* Which queue to use */
    655 	if ((m->m_flags & M_FLOWID) != 0)
    656 		i = m->m_pkthdr.flowid % adapter->num_queues;
    657 
    658 	txr = &adapter->tx_rings[i];
    659 	que = &adapter->queues[i];
    660 
    661 	if (IXV_TX_TRYLOCK(txr)) {
    662 		err = ixv_mq_start_locked(ifp, txr, m);
    663 		IXV_TX_UNLOCK(txr);
    664 	} else {
    665 		err = drbr_enqueue(ifp, txr->br, m);
    666 		softint_schedule(que->que_si);
    667 	}
    668 
    669 	return (err);
    670 }
    671 
    672 static int
    673 ixv_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr, struct mbuf *m)
    674 {
    675 	struct adapter  *adapter = txr->adapter;
    676         struct mbuf     *next;
    677         int             enqueued, err = 0;
    678 
    679 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) !=
    680 	    IFF_RUNNING || adapter->link_active == 0) {
    681 		if (m != NULL)
    682 			err = drbr_enqueue(ifp, txr->br, m);
    683 		return (err);
    684 	}
    685 
    686 	/* Do a clean if descriptors are low */
    687 	if (txr->tx_avail <= IXV_TX_CLEANUP_THRESHOLD)
    688 		ixv_txeof(txr);
    689 
    690 	enqueued = 0;
    691 	if (m == NULL) {
    692 		next = drbr_dequeue(ifp, txr->br);
    693 	} else if (drbr_needs_enqueue(ifp, txr->br)) {
    694 		if ((err = drbr_enqueue(ifp, txr->br, m)) != 0)
    695 			return (err);
    696 		next = drbr_dequeue(ifp, txr->br);
    697 	} else
    698 		next = m;
    699 
    700 	/* Process the queue */
    701 	while (next != NULL) {
    702 		if ((err = ixv_xmit(txr, next)) != 0) {
    703 			if (next != NULL)
    704 				err = drbr_enqueue(ifp, txr->br, next);
    705 			break;
    706 		}
    707 		enqueued++;
    708 		drbr_stats_update(ifp, next->m_pkthdr.len, next->m_flags);
    709 		/* Send a copy of the frame to the BPF listener */
    710 		ETHER_BPF_MTAP(ifp, next);
    711 		if ((ifp->if_flags & IFF_RUNNING) == 0)
    712 			break;
    713 		if (txr->tx_avail <= IXV_TX_OP_THRESHOLD) {
    714 			ifp->if_flags |= IFF_OACTIVE;
    715 			break;
    716 		}
    717 		next = drbr_dequeue(ifp, txr->br);
    718 	}
    719 
    720 	if (enqueued > 0) {
    721 		/* Set watchdog on */
    722 		txr->watchdog_check = TRUE;
    723 		getmicrotime(&txr->watchdog_time);
    724 	}
    725 
    726 	return (err);
    727 }
    728 
    729 /*
    730 ** Flush all ring buffers
    731 */
    732 static void
    733 ixv_qflush(struct ifnet *ifp)
    734 {
    735 	struct adapter  *adapter = ifp->if_softc;
    736 	struct tx_ring  *txr = adapter->tx_rings;
    737 	struct mbuf     *m;
    738 
    739 	for (int i = 0; i < adapter->num_queues; i++, txr++) {
    740 		IXV_TX_LOCK(txr);
    741 		while ((m = buf_ring_dequeue_sc(txr->br)) != NULL)
    742 			m_freem(m);
    743 		IXV_TX_UNLOCK(txr);
    744 	}
    745 	if_qflush(ifp);
    746 }
    747 
    748 #endif
    749 
    750 static int
    751 ixv_ifflags_cb(struct ethercom *ec)
    752 {
    753 	struct ifnet *ifp = &ec->ec_if;
    754 	struct adapter *adapter = ifp->if_softc;
    755 	int change = ifp->if_flags ^ adapter->if_flags, rc = 0;
    756 
    757 	IXV_CORE_LOCK(adapter);
    758 
    759 	if (change != 0)
    760 		adapter->if_flags = ifp->if_flags;
    761 
    762 	if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0)
    763 		rc = ENETRESET;
    764 
    765 	IXV_CORE_UNLOCK(adapter);
    766 
    767 	return rc;
    768 }
    769 
    770 /*********************************************************************
    771  *  Ioctl entry point
    772  *
    773  *  ixv_ioctl is called when the user wants to configure the
    774  *  interface.
    775  *
    776  *  return 0 on success, positive on failure
    777  **********************************************************************/
    778 
    779 static int
    780 ixv_ioctl(struct ifnet * ifp, u_long command, void *data)
    781 {
    782 	struct adapter	*adapter = ifp->if_softc;
    783 	struct ifcapreq *ifcr = data;
    784 	struct ifreq	*ifr = (struct ifreq *) data;
    785 	int             error = 0;
    786 	int l4csum_en;
    787 	const int l4csum = IFCAP_CSUM_TCPv4_Rx|IFCAP_CSUM_UDPv4_Rx|
    788 	     IFCAP_CSUM_TCPv6_Rx|IFCAP_CSUM_UDPv6_Rx;
    789 
    790 	switch (command) {
    791 	case SIOCSIFFLAGS:
    792 		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
    793 		break;
    794 	case SIOCADDMULTI:
    795 	case SIOCDELMULTI:
    796 		IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
    797 		break;
    798 	case SIOCSIFMEDIA:
    799 	case SIOCGIFMEDIA:
    800 		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
    801 		break;
    802 	case SIOCSIFCAP:
    803 		IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
    804 		break;
    805 	case SIOCSIFMTU:
    806 		IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
    807 		break;
    808 	default:
    809 		IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
    810 		break;
    811 	}
    812 
    813 	switch (command) {
    814 	case SIOCSIFMEDIA:
    815 	case SIOCGIFMEDIA:
    816 		return ifmedia_ioctl(ifp, ifr, &adapter->media, command);
    817 	case SIOCSIFCAP:
    818 		/* Layer-4 Rx checksum offload has to be turned on and
    819 		 * off as a unit.
    820 		 */
    821 		l4csum_en = ifcr->ifcr_capenable & l4csum;
    822 		if (l4csum_en != l4csum && l4csum_en != 0)
    823 			return EINVAL;
    824 		/*FALLTHROUGH*/
    825 	case SIOCADDMULTI:
    826 	case SIOCDELMULTI:
    827 	case SIOCSIFFLAGS:
    828 	case SIOCSIFMTU:
    829 	default:
    830 		if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
    831 			return error;
    832 		if ((ifp->if_flags & IFF_RUNNING) == 0)
    833 			;
    834 		else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
    835 			IXV_CORE_LOCK(adapter);
    836 			ixv_init_locked(adapter);
    837 			IXV_CORE_UNLOCK(adapter);
    838 		} else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
    839 			/*
    840 			 * Multicast list has changed; set the hardware filter
    841 			 * accordingly.
    842 			 */
    843 			IXV_CORE_LOCK(adapter);
    844 			ixv_disable_intr(adapter);
    845 			ixv_set_multi(adapter);
    846 			ixv_enable_intr(adapter);
    847 			IXV_CORE_UNLOCK(adapter);
    848 		}
    849 		return 0;
    850 	}
    851 }
    852 
    853 /*********************************************************************
    854  *  Init entry point
    855  *
    856  *  This routine is used in two ways. It is used by the stack as
    857  *  init entry point in network interface structure. It is also used
    858  *  by the driver as a hw/sw initialization routine to get to a
    859  *  consistent state.
    860  *
    861  *  return 0 on success, positive on failure
    862  **********************************************************************/
    863 #define IXGBE_MHADD_MFS_SHIFT 16
    864 
    865 static void
    866 ixv_init_locked(struct adapter *adapter)
    867 {
    868 	struct ifnet	*ifp = adapter->ifp;
    869 	device_t 	dev = adapter->dev;
    870 	struct ixgbe_hw *hw = &adapter->hw;
    871 	u32		mhadd, gpie;
    872 
    873 	INIT_DEBUGOUT("ixv_init: begin");
    874 	KASSERT(mutex_owned(&adapter->core_mtx));
    875 	hw->adapter_stopped = FALSE;
    876 	ixgbe_stop_adapter(hw);
    877         callout_stop(&adapter->timer);
    878 
    879         /* reprogram the RAR[0] in case user changed it. */
    880         ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
    881 
    882 	/* Get the latest mac address, User can use a LAA */
    883 	memcpy(hw->mac.addr, CLLADDR(adapter->ifp->if_sadl),
    884 	     IXGBE_ETH_LENGTH_OF_ADDRESS);
    885         ixgbe_set_rar(hw, 0, hw->mac.addr, 0, 1);
    886 	hw->addr_ctrl.rar_used_count = 1;
    887 
    888 	/* Prepare transmit descriptors and buffers */
    889 	if (ixv_setup_transmit_structures(adapter)) {
    890 		aprint_error_dev(dev,"Could not setup transmit structures\n");
    891 		ixv_stop(adapter);
    892 		return;
    893 	}
    894 
    895 	ixgbe_reset_hw(hw);
    896 	ixv_initialize_transmit_units(adapter);
    897 
    898 	/* Setup Multicast table */
    899 	ixv_set_multi(adapter);
    900 
    901 	/*
    902 	** Determine the correct mbuf pool
    903 	** for doing jumbo/headersplit
    904 	*/
    905 	if (ifp->if_mtu > ETHERMTU)
    906 		adapter->rx_mbuf_sz = MJUMPAGESIZE;
    907 	else
    908 		adapter->rx_mbuf_sz = MCLBYTES;
    909 
    910 	/* Prepare receive descriptors and buffers */
    911 	if (ixv_setup_receive_structures(adapter)) {
    912 		device_printf(dev,"Could not setup receive structures\n");
    913 		ixv_stop(adapter);
    914 		return;
    915 	}
    916 
    917 	/* Configure RX settings */
    918 	ixv_initialize_receive_units(adapter);
    919 
    920 	/* Enable Enhanced MSIX mode */
    921 	gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE);
    922 	gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_EIAME;
    923 	gpie |= IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD;
    924         IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
    925 
    926 #if 0 /* XXX isn't it required? -- msaitoh  */
    927 	/* Set the various hardware offload abilities */
    928 	ifp->if_hwassist = 0;
    929 	if (ifp->if_capenable & IFCAP_TSO4)
    930 		ifp->if_hwassist |= CSUM_TSO;
    931 	if (ifp->if_capenable & IFCAP_TXCSUM) {
    932 		ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
    933 #if __FreeBSD_version >= 800000
    934 		ifp->if_hwassist |= CSUM_SCTP;
    935 #endif
    936 	}
    937 #endif
    938 
    939 	/* Set MTU size */
    940 	if (ifp->if_mtu > ETHERMTU) {
    941 		mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
    942 		mhadd &= ~IXGBE_MHADD_MFS_MASK;
    943 		mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
    944 		IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
    945 	}
    946 
    947 	/* Set up VLAN offload and filter */
    948 	ixv_setup_vlan_support(adapter);
    949 
    950 	callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
    951 
    952 	/* Set up MSI/X routing */
    953 	ixv_configure_ivars(adapter);
    954 
    955 	/* Set up auto-mask */
    956 	IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_EICS_RTX_QUEUE);
    957 
    958         /* Set moderation on the Link interrupt */
    959         IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->mbxvec), IXV_LINK_ITR);
    960 
    961 	/* Stats init */
    962 	ixv_init_stats(adapter);
    963 
    964 	/* Config/Enable Link */
    965 	ixv_config_link(adapter);
    966 
    967 	/* And now turn on interrupts */
    968 	ixv_enable_intr(adapter);
    969 
    970 	/* Now inform the stack we're ready */
    971 	ifp->if_flags |= IFF_RUNNING;
    972 	ifp->if_flags &= ~IFF_OACTIVE;
    973 
    974 	return;
    975 }
    976 
    977 static int
    978 ixv_init(struct ifnet *ifp)
    979 {
    980 	struct adapter *adapter = ifp->if_softc;
    981 
    982 	IXV_CORE_LOCK(adapter);
    983 	ixv_init_locked(adapter);
    984 	IXV_CORE_UNLOCK(adapter);
    985 	return 0;
    986 }
    987 
    988 
    989 /*
    990 **
    991 ** MSIX Interrupt Handlers and Tasklets
    992 **
    993 */
    994 
    995 static inline void
    996 ixv_enable_queue(struct adapter *adapter, u32 vector)
    997 {
    998 	struct ixgbe_hw *hw = &adapter->hw;
    999 	u32	queue = 1 << vector;
   1000 	u32	mask;
   1001 
   1002 	mask = (IXGBE_EIMS_RTX_QUEUE & queue);
   1003 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
   1004 }
   1005 
   1006 static inline void
   1007 ixv_disable_queue(struct adapter *adapter, u32 vector)
   1008 {
   1009 	struct ixgbe_hw *hw = &adapter->hw;
   1010 	u64	queue = (u64)(1 << vector);
   1011 	u32	mask;
   1012 
   1013 	mask = (IXGBE_EIMS_RTX_QUEUE & queue);
   1014 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
   1015 }
   1016 
   1017 static inline void
   1018 ixv_rearm_queues(struct adapter *adapter, u64 queues)
   1019 {
   1020 	u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
   1021 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask);
   1022 }
   1023 
   1024 
   1025 static void
   1026 ixv_handle_que(void *context)
   1027 {
   1028 	struct ix_queue *que = context;
   1029 	struct adapter  *adapter = que->adapter;
   1030 	struct tx_ring  *txr = que->txr;
   1031 	struct ifnet    *ifp = adapter->ifp;
   1032 	bool		more;
   1033 
   1034 	if (ifp->if_flags & IFF_RUNNING) {
   1035 		more = ixv_rxeof(que, adapter->rx_process_limit);
   1036 		IXV_TX_LOCK(txr);
   1037 		ixv_txeof(txr);
   1038 #if __FreeBSD_version >= 800000
   1039 		if (!drbr_empty(ifp, txr->br))
   1040 			ixv_mq_start_locked(ifp, txr, NULL);
   1041 #else
   1042 		if (!IFQ_IS_EMPTY(&ifp->if_snd))
   1043 			ixv_start_locked(txr, ifp);
   1044 #endif
   1045 		IXV_TX_UNLOCK(txr);
   1046 		if (more) {
   1047 			adapter->req.ev_count++;
   1048 			softint_schedule(que->que_si);
   1049 			return;
   1050 		}
   1051 	}
   1052 
   1053 	/* Reenable this interrupt */
   1054 	ixv_enable_queue(adapter, que->msix);
   1055 	return;
   1056 }
   1057 
   1058 /*********************************************************************
   1059  *
   1060  *  MSI Queue Interrupt Service routine
   1061  *
   1062  **********************************************************************/
   1063 void
   1064 ixv_msix_que(void *arg)
   1065 {
   1066 	struct ix_queue	*que = arg;
   1067 	struct adapter  *adapter = que->adapter;
   1068 	struct tx_ring	*txr = que->txr;
   1069 	struct rx_ring	*rxr = que->rxr;
   1070 	bool		more_tx, more_rx;
   1071 	u32		newitr = 0;
   1072 
   1073 	ixv_disable_queue(adapter, que->msix);
   1074 	++que->irqs;
   1075 
   1076 	more_rx = ixv_rxeof(que, adapter->rx_process_limit);
   1077 
   1078 	IXV_TX_LOCK(txr);
   1079 	more_tx = ixv_txeof(txr);
   1080 	IXV_TX_UNLOCK(txr);
   1081 
   1082 	more_rx = ixv_rxeof(que, adapter->rx_process_limit);
   1083 
   1084 	/* Do AIM now? */
   1085 
   1086 	if (ixv_enable_aim == FALSE)
   1087 		goto no_calc;
   1088 	/*
   1089 	** Do Adaptive Interrupt Moderation:
   1090         **  - Write out last calculated setting
   1091 	**  - Calculate based on average size over
   1092 	**    the last interval.
   1093 	*/
   1094         if (que->eitr_setting)
   1095                 IXGBE_WRITE_REG(&adapter->hw,
   1096                     IXGBE_VTEITR(que->msix),
   1097 		    que->eitr_setting);
   1098 
   1099         que->eitr_setting = 0;
   1100 
   1101         /* Idle, do nothing */
   1102         if ((txr->bytes == 0) && (rxr->bytes == 0))
   1103                 goto no_calc;
   1104 
   1105 	if ((txr->bytes) && (txr->packets))
   1106                	newitr = txr->bytes/txr->packets;
   1107 	if ((rxr->bytes) && (rxr->packets))
   1108 		newitr = max(newitr,
   1109 		    (rxr->bytes / rxr->packets));
   1110 	newitr += 24; /* account for hardware frame, crc */
   1111 
   1112 	/* set an upper boundary */
   1113 	newitr = min(newitr, 3000);
   1114 
   1115 	/* Be nice to the mid range */
   1116 	if ((newitr > 300) && (newitr < 1200))
   1117 		newitr = (newitr / 3);
   1118 	else
   1119 		newitr = (newitr / 2);
   1120 
   1121 	newitr |= newitr << 16;
   1122 
   1123         /* save for next interrupt */
   1124         que->eitr_setting = newitr;
   1125 
   1126         /* Reset state */
   1127         txr->bytes = 0;
   1128         txr->packets = 0;
   1129         rxr->bytes = 0;
   1130         rxr->packets = 0;
   1131 
   1132 no_calc:
   1133 	if (more_tx || more_rx)
   1134 		softint_schedule(que->que_si);
   1135 	else /* Reenable this interrupt */
   1136 		ixv_enable_queue(adapter, que->msix);
   1137 	return;
   1138 }
   1139 
   1140 static void
   1141 ixv_msix_mbx(void *arg)
   1142 {
   1143 	struct adapter	*adapter = arg;
   1144 	struct ixgbe_hw *hw = &adapter->hw;
   1145 	u32		reg;
   1146 
   1147 	++adapter->mbx_irq.ev_count;
   1148 
   1149 	/* First get the cause */
   1150 	reg = IXGBE_READ_REG(hw, IXGBE_VTEICS);
   1151 	/* Clear interrupt with write */
   1152 	IXGBE_WRITE_REG(hw, IXGBE_VTEICR, reg);
   1153 
   1154 	/* Link status change */
   1155 	if (reg & IXGBE_EICR_LSC)
   1156 		softint_schedule(adapter->mbx_si);
   1157 
   1158 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER);
   1159 	return;
   1160 }
   1161 
   1162 /*********************************************************************
   1163  *
   1164  *  Media Ioctl callback
   1165  *
   1166  *  This routine is called whenever the user queries the status of
   1167  *  the interface using ifconfig.
   1168  *
   1169  **********************************************************************/
   1170 static void
   1171 ixv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
   1172 {
   1173 	struct adapter *adapter = ifp->if_softc;
   1174 
   1175 	INIT_DEBUGOUT("ixv_media_status: begin");
   1176 	IXV_CORE_LOCK(adapter);
   1177 	ixv_update_link_status(adapter);
   1178 
   1179 	ifmr->ifm_status = IFM_AVALID;
   1180 	ifmr->ifm_active = IFM_ETHER;
   1181 
   1182 	if (!adapter->link_active) {
   1183 		IXV_CORE_UNLOCK(adapter);
   1184 		return;
   1185 	}
   1186 
   1187 	ifmr->ifm_status |= IFM_ACTIVE;
   1188 
   1189 	switch (adapter->link_speed) {
   1190 		case IXGBE_LINK_SPEED_1GB_FULL:
   1191 			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
   1192 			break;
   1193 		case IXGBE_LINK_SPEED_10GB_FULL:
   1194 			ifmr->ifm_active |= IFM_FDX;
   1195 			break;
   1196 	}
   1197 
   1198 	IXV_CORE_UNLOCK(adapter);
   1199 
   1200 	return;
   1201 }
   1202 
   1203 /*********************************************************************
   1204  *
   1205  *  Media Ioctl callback
   1206  *
   1207  *  This routine is called when the user changes speed/duplex using
   1208  *  media/mediopt option with ifconfig.
   1209  *
   1210  **********************************************************************/
   1211 static int
   1212 ixv_media_change(struct ifnet * ifp)
   1213 {
   1214 	struct adapter *adapter = ifp->if_softc;
   1215 	struct ifmedia *ifm = &adapter->media;
   1216 
   1217 	INIT_DEBUGOUT("ixv_media_change: begin");
   1218 
   1219 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
   1220 		return (EINVAL);
   1221 
   1222         switch (IFM_SUBTYPE(ifm->ifm_media)) {
   1223         case IFM_AUTO:
   1224                 break;
   1225         default:
   1226                 device_printf(adapter->dev, "Only auto media type\n");
   1227 		return (EINVAL);
   1228         }
   1229 
   1230 	return (0);
   1231 }
   1232 
   1233 /*********************************************************************
   1234  *
   1235  *  This routine maps the mbufs to tx descriptors, allowing the
   1236  *  TX engine to transmit the packets.
   1237  *  	- return 0 on success, positive on failure
   1238  *
   1239  **********************************************************************/
   1240 
   1241 static int
   1242 ixv_xmit(struct tx_ring *txr, struct mbuf *m_head)
   1243 {
   1244 	struct m_tag *mtag;
   1245 	struct adapter  *adapter = txr->adapter;
   1246 	struct ethercom *ec = &adapter->osdep.ec;
   1247 	u32		olinfo_status = 0, cmd_type_len;
   1248 	u32		paylen = 0;
   1249 	int             i, j, error, nsegs;
   1250 	int		first, last = 0;
   1251 	bus_dmamap_t	map;
   1252 	struct ixv_tx_buf *txbuf;
   1253 	union ixgbe_adv_tx_desc *txd = NULL;
   1254 
   1255 	/* Basic descriptor defines */
   1256         cmd_type_len = (IXGBE_ADVTXD_DTYP_DATA |
   1257 	    IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT);
   1258 
   1259 	if ((mtag = VLAN_OUTPUT_TAG(ec, m_head)) != NULL)
   1260         	cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
   1261 
   1262         /*
   1263          * Important to capture the first descriptor
   1264          * used because it will contain the index of
   1265          * the one we tell the hardware to report back
   1266          */
   1267         first = txr->next_avail_desc;
   1268 	txbuf = &txr->tx_buffers[first];
   1269 	map = txbuf->map;
   1270 
   1271 	/*
   1272 	 * Map the packet for DMA.
   1273 	 */
   1274 	error = bus_dmamap_load_mbuf(txr->txtag->dt_dmat, map,
   1275 	    m_head, BUS_DMA_NOWAIT);
   1276 
   1277 	switch (error) {
   1278 	case EAGAIN:
   1279 		adapter->eagain_tx_dma_setup.ev_count++;
   1280 		return EAGAIN;
   1281 	case ENOMEM:
   1282 		adapter->enomem_tx_dma_setup.ev_count++;
   1283 		return EAGAIN;
   1284 	case EFBIG:
   1285 		adapter->efbig_tx_dma_setup.ev_count++;
   1286 		return error;
   1287 	case EINVAL:
   1288 		adapter->einval_tx_dma_setup.ev_count++;
   1289 		return error;
   1290 	default:
   1291 		adapter->other_tx_dma_setup.ev_count++;
   1292 		return error;
   1293 	case 0:
   1294 		break;
   1295 	}
   1296 
   1297 	/* Make certain there are enough descriptors */
   1298 	if (nsegs > txr->tx_avail - 2) {
   1299 		txr->no_desc_avail.ev_count++;
   1300 		/* XXX s/ixgbe/ixv/ */
   1301 		ixgbe_dmamap_unload(txr->txtag, txbuf->map);
   1302 		return EAGAIN;
   1303 	}
   1304 
   1305 	/*
   1306 	** Set up the appropriate offload context
   1307 	** this becomes the first descriptor of
   1308 	** a packet.
   1309 	*/
   1310 	if (m_head->m_pkthdr.csum_flags & (M_CSUM_TSOv4|M_CSUM_TSOv6)) {
   1311 		if (ixv_tso_setup(txr, m_head, &paylen)) {
   1312 			cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
   1313 			olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
   1314 			olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
   1315 			olinfo_status |= paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
   1316 			++adapter->tso_tx.ev_count;
   1317 		} else {
   1318 			++adapter->tso_err.ev_count;
   1319 			/* XXX unload DMA map! --dyoung -> easy? --msaitoh */
   1320 			return (ENXIO);
   1321 		}
   1322 	} else
   1323 		olinfo_status |= ixv_tx_ctx_setup(txr, m_head);
   1324 
   1325         /* Record payload length */
   1326 	if (paylen == 0)
   1327         	olinfo_status |= m_head->m_pkthdr.len <<
   1328 		    IXGBE_ADVTXD_PAYLEN_SHIFT;
   1329 
   1330 	i = txr->next_avail_desc;
   1331 	for (j = 0; j < map->dm_nsegs; j++) {
   1332 		bus_size_t seglen;
   1333 		bus_addr_t segaddr;
   1334 
   1335 		txbuf = &txr->tx_buffers[i];
   1336 		txd = &txr->tx_base[i];
   1337 		seglen = map->dm_segs[j].ds_len;
   1338 		segaddr = htole64(map->dm_segs[j].ds_addr);
   1339 
   1340 		txd->read.buffer_addr = segaddr;
   1341 		txd->read.cmd_type_len = htole32(txr->txd_cmd |
   1342 		    cmd_type_len |seglen);
   1343 		txd->read.olinfo_status = htole32(olinfo_status);
   1344 		last = i; /* descriptor that will get completion IRQ */
   1345 
   1346 		if (++i == adapter->num_tx_desc)
   1347 			i = 0;
   1348 
   1349 		txbuf->m_head = NULL;
   1350 		txbuf->eop_index = -1;
   1351 	}
   1352 
   1353 	txd->read.cmd_type_len |=
   1354 	    htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS);
   1355 	txr->tx_avail -= map->dm_nsegs;
   1356 	txr->next_avail_desc = i;
   1357 
   1358 	txbuf->m_head = m_head;
   1359 	/* Swap the dma map between the first and last descriptor */
   1360 	txr->tx_buffers[first].map = txbuf->map;
   1361 	txbuf->map = map;
   1362 	bus_dmamap_sync(txr->txtag->dt_dmat, map, 0, m_head->m_pkthdr.len,
   1363 	    BUS_DMASYNC_PREWRITE);
   1364 
   1365         /* Set the index of the descriptor that will be marked done */
   1366         txbuf = &txr->tx_buffers[first];
   1367 	txbuf->eop_index = last;
   1368 
   1369 	/* XXX s/ixgbe/ixg/ */
   1370         ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
   1371             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1372 	/*
   1373 	 * Advance the Transmit Descriptor Tail (Tdt), this tells the
   1374 	 * hardware that this frame is available to transmit.
   1375 	 */
   1376 	++txr->total_packets.ev_count;
   1377 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(txr->me), i);
   1378 
   1379 	return 0;
   1380 }
   1381 
   1382 
   1383 /*********************************************************************
   1384  *  Multicast Update
   1385  *
   1386  *  This routine is called whenever multicast address list is updated.
   1387  *
   1388  **********************************************************************/
   1389 #define IXGBE_RAR_ENTRIES 16
   1390 
   1391 static void
   1392 ixv_set_multi(struct adapter *adapter)
   1393 {
   1394 	struct ether_multi *enm;
   1395 	struct ether_multistep step;
   1396 	u8	mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
   1397 	u8	*update_ptr;
   1398 	int	mcnt = 0;
   1399 	struct ethercom *ec = &adapter->osdep.ec;
   1400 
   1401 	IOCTL_DEBUGOUT("ixv_set_multi: begin");
   1402 
   1403 	ETHER_FIRST_MULTI(step, ec, enm);
   1404 	while (enm != NULL) {
   1405 		bcopy(enm->enm_addrlo,
   1406 		    &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
   1407 		    IXGBE_ETH_LENGTH_OF_ADDRESS);
   1408 		mcnt++;
   1409 		/* XXX This might be required --msaitoh */
   1410 		if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES)
   1411 			break;
   1412 		ETHER_NEXT_MULTI(step, enm);
   1413 	}
   1414 
   1415 	update_ptr = mta;
   1416 
   1417 	ixgbe_update_mc_addr_list(&adapter->hw,
   1418 	    update_ptr, mcnt, ixv_mc_array_itr);
   1419 
   1420 	return;
   1421 }
   1422 
   1423 /*
   1424  * This is an iterator function now needed by the multicast
   1425  * shared code. It simply feeds the shared code routine the
   1426  * addresses in the array of ixv_set_multi() one by one.
   1427  */
   1428 static u8 *
   1429 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
   1430 {
   1431 	u8 *addr = *update_ptr;
   1432 	u8 *newptr;
   1433 	*vmdq = 0;
   1434 
   1435 	newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
   1436 	*update_ptr = newptr;
   1437 	return addr;
   1438 }
   1439 
   1440 /*********************************************************************
   1441  *  Timer routine
   1442  *
   1443  *  This routine checks for link status,updates statistics,
   1444  *  and runs the watchdog check.
   1445  *
   1446  **********************************************************************/
   1447 
   1448 static void
   1449 ixv_local_timer1(void *arg)
   1450 {
   1451 	struct adapter	*adapter = arg;
   1452 	device_t	dev = adapter->dev;
   1453 	struct tx_ring	*txr = adapter->tx_rings;
   1454 	int		i;
   1455 	struct timeval now, elapsed;
   1456 
   1457 	KASSERT(mutex_owned(&adapter->core_mtx));
   1458 
   1459 	ixv_update_link_status(adapter);
   1460 
   1461 	/* Stats Update */
   1462 	ixv_update_stats(adapter);
   1463 
   1464 	/*
   1465 	 * If the interface has been paused
   1466 	 * then don't do the watchdog check
   1467 	 */
   1468 	if (IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF)
   1469 		goto out;
   1470 	/*
   1471 	** Check for time since any descriptor was cleaned
   1472 	*/
   1473         for (i = 0; i < adapter->num_queues; i++, txr++) {
   1474 		IXV_TX_LOCK(txr);
   1475 		if (txr->watchdog_check == FALSE) {
   1476 			IXV_TX_UNLOCK(txr);
   1477 			continue;
   1478 		}
   1479 		getmicrotime(&now);
   1480 		timersub(&now, &txr->watchdog_time, &elapsed);
   1481 		if (tvtohz(&elapsed) > IXV_WATCHDOG)
   1482 			goto hung;
   1483 		IXV_TX_UNLOCK(txr);
   1484 	}
   1485 out:
   1486        	ixv_rearm_queues(adapter, adapter->que_mask);
   1487 	callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
   1488 	return;
   1489 
   1490 hung:
   1491 	device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
   1492 	device_printf(dev,"Queue(%d) tdh = %d, hw tdt = %d\n", txr->me,
   1493 	    IXGBE_READ_REG(&adapter->hw, IXGBE_VFTDH(i)),
   1494 	    IXGBE_READ_REG(&adapter->hw, IXGBE_VFTDT(i)));
   1495 	device_printf(dev,"TX(%d) desc avail = %d,"
   1496 	    "Next TX to Clean = %d\n",
   1497 	    txr->me, txr->tx_avail, txr->next_to_clean);
   1498 	adapter->ifp->if_flags &= ~IFF_RUNNING;
   1499 	adapter->watchdog_events.ev_count++;
   1500 	IXV_TX_UNLOCK(txr);
   1501 	ixv_init_locked(adapter);
   1502 }
   1503 
   1504 static void
   1505 ixv_local_timer(void *arg)
   1506 {
   1507 	struct adapter *adapter = arg;
   1508 
   1509 	IXV_CORE_LOCK(adapter);
   1510 	ixv_local_timer1(adapter);
   1511 	IXV_CORE_UNLOCK(adapter);
   1512 }
   1513 
   1514 /*
   1515 ** Note: this routine updates the OS on the link state
   1516 **	the real check of the hardware only happens with
   1517 **	a link interrupt.
   1518 */
   1519 static void
   1520 ixv_update_link_status(struct adapter *adapter)
   1521 {
   1522 	struct ifnet	*ifp = adapter->ifp;
   1523 	struct tx_ring *txr = adapter->tx_rings;
   1524 	device_t dev = adapter->dev;
   1525 
   1526 
   1527 	if (adapter->link_up){
   1528 		if (adapter->link_active == FALSE) {
   1529 			if (bootverbose)
   1530 				device_printf(dev,"Link is up %d Gbps %s \n",
   1531 				    ((adapter->link_speed == 128)? 10:1),
   1532 				    "Full Duplex");
   1533 			adapter->link_active = TRUE;
   1534 			if_link_state_change(ifp, LINK_STATE_UP);
   1535 		}
   1536 	} else { /* Link down */
   1537 		if (adapter->link_active == TRUE) {
   1538 			if (bootverbose)
   1539 				device_printf(dev,"Link is Down\n");
   1540 			if_link_state_change(ifp, LINK_STATE_DOWN);
   1541 			adapter->link_active = FALSE;
   1542 			for (int i = 0; i < adapter->num_queues;
   1543 			    i++, txr++)
   1544 				txr->watchdog_check = FALSE;
   1545 		}
   1546 	}
   1547 
   1548 	return;
   1549 }
   1550 
   1551 
   1552 static void
   1553 ixv_ifstop(struct ifnet *ifp, int disable)
   1554 {
   1555 	struct adapter *adapter = ifp->if_softc;
   1556 
   1557 	IXV_CORE_LOCK(adapter);
   1558 	ixv_stop(adapter);
   1559 	IXV_CORE_UNLOCK(adapter);
   1560 }
   1561 
   1562 /*********************************************************************
   1563  *
   1564  *  This routine disables all traffic on the adapter by issuing a
   1565  *  global reset on the MAC and deallocates TX/RX buffers.
   1566  *
   1567  **********************************************************************/
   1568 
   1569 static void
   1570 ixv_stop(void *arg)
   1571 {
   1572 	struct ifnet   *ifp;
   1573 	struct adapter *adapter = arg;
   1574 	struct ixgbe_hw *hw = &adapter->hw;
   1575 	ifp = adapter->ifp;
   1576 
   1577 	KASSERT(mutex_owned(&adapter->core_mtx));
   1578 
   1579 	INIT_DEBUGOUT("ixv_stop: begin\n");
   1580 	ixv_disable_intr(adapter);
   1581 
   1582 	/* Tell the stack that the interface is no longer active */
   1583 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   1584 
   1585 	ixgbe_reset_hw(hw);
   1586 	adapter->hw.adapter_stopped = FALSE;
   1587 	ixgbe_stop_adapter(hw);
   1588 	callout_stop(&adapter->timer);
   1589 
   1590 	/* reprogram the RAR[0] in case user changed it. */
   1591 	ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
   1592 
   1593 	return;
   1594 }
   1595 
   1596 
   1597 /*********************************************************************
   1598  *
   1599  *  Determine hardware revision.
   1600  *
   1601  **********************************************************************/
   1602 static void
   1603 ixv_identify_hardware(struct adapter *adapter)
   1604 {
   1605 	u16		pci_cmd_word;
   1606 	pcitag_t tag;
   1607 	pci_chipset_tag_t pc;
   1608 	pcireg_t subid, id;
   1609 	struct ixgbe_hw *hw = &adapter->hw;
   1610 
   1611 	pc = adapter->osdep.pc;
   1612 	tag = adapter->osdep.tag;
   1613 
   1614 	/*
   1615 	** Make sure BUSMASTER is set, on a VM under
   1616 	** KVM it may not be and will break things.
   1617 	*/
   1618 	pci_cmd_word = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
   1619 	if (!((pci_cmd_word & PCI_COMMAND_MASTER_ENABLE) &&
   1620 	    (pci_cmd_word & PCI_COMMAND_MEM_ENABLE))) {
   1621 		INIT_DEBUGOUT("Memory Access and/or Bus Master "
   1622 		    "bits were not set!\n");
   1623 		pci_cmd_word |=
   1624 		    (PCI_COMMAND_MASTER_ENABLE | PCI_COMMAND_MEM_ENABLE);
   1625 		pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, pci_cmd_word);
   1626 	}
   1627 
   1628 	id = pci_conf_read(pc, tag, PCI_ID_REG);
   1629 	subid = pci_conf_read(pc, tag, PCI_SUBSYS_ID_REG);
   1630 
   1631 	/* Save off the information about this board */
   1632 	hw->vendor_id = PCI_VENDOR(id);
   1633 	hw->device_id = PCI_PRODUCT(id);
   1634 	hw->revision_id = PCI_REVISION(pci_conf_read(pc, tag, PCI_CLASS_REG));
   1635 	hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
   1636 	hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
   1637 
   1638 	return;
   1639 }
   1640 
   1641 /*********************************************************************
   1642  *
   1643  *  Setup MSIX Interrupt resources and handlers
   1644  *
   1645  **********************************************************************/
   1646 static int
   1647 ixv_allocate_msix(struct adapter *adapter)
   1648 {
   1649 #if !defined(NETBSD_MSI_OR_MSIX)
   1650 	return 0;
   1651 #else
   1652 	device_t        dev = adapter->dev;
   1653 	struct 		ix_queue *que = adapter->queues;
   1654 	int 		error, rid, vector = 0;
   1655 	pcitag_t tag;
   1656 	pci_chipset_tag_t pc;
   1657 
   1658 	pc = adapter->osdep.pc;
   1659 	tag = adapter->osdep.tag;
   1660 
   1661 	for (int i = 0; i < adapter->num_queues; i++, vector++, que++) {
   1662 		rid = vector + 1;
   1663 		que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
   1664 		    RF_SHAREABLE | RF_ACTIVE);
   1665 		if (que->res == NULL) {
   1666 			aprint_error_dev(dev,"Unable to allocate"
   1667 		    	    " bus resource: que interrupt [%d]\n", vector);
   1668 			return (ENXIO);
   1669 		}
   1670 		/* Set the handler function */
   1671 		error = bus_setup_intr(dev, que->res,
   1672 		    INTR_TYPE_NET | INTR_MPSAFE, NULL,
   1673 		    ixv_msix_que, que, &que->tag);
   1674 		if (error) {
   1675 			que->res = NULL;
   1676 			aprint_error_dev(dev,
   1677 			    "Failed to register QUE handler");
   1678 			return (error);
   1679 		}
   1680 #if __FreeBSD_version >= 800504
   1681 		bus_describe_intr(dev, que->res, que->tag, "que %d", i);
   1682 #endif
   1683 		que->msix = vector;
   1684         	adapter->que_mask |= (u64)(1 << que->msix);
   1685 		/*
   1686 		** Bind the msix vector, and thus the
   1687 		** ring to the corresponding cpu.
   1688 		*/
   1689 		if (adapter->num_queues > 1)
   1690 			bus_bind_intr(dev, que->res, i);
   1691 
   1692 		que->que_si = softint_establish(SOFTINT_NET, ixv_handle_que,
   1693 		    que);
   1694 	}
   1695 
   1696 	/* and Mailbox */
   1697 	rid = vector + 1;
   1698 	adapter->res = bus_alloc_resource_any(dev,
   1699     	    SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
   1700 	if (!adapter->res) {
   1701 		aprint_error_dev(dev,"Unable to allocate"
   1702     	    " bus resource: MBX interrupt [%d]\n", rid);
   1703 		return (ENXIO);
   1704 	}
   1705 	/* Set the mbx handler function */
   1706 	error = bus_setup_intr(dev, adapter->res,
   1707 	    INTR_TYPE_NET | INTR_MPSAFE, NULL,
   1708 	    ixv_msix_mbx, adapter, &adapter->tag);
   1709 	if (error) {
   1710 		adapter->res = NULL;
   1711 		aprint_error_dev(dev, "Failed to register LINK handler");
   1712 		return (error);
   1713 	}
   1714 #if __FreeBSD_version >= 800504
   1715 	bus_describe_intr(dev, adapter->res, adapter->tag, "mbx");
   1716 #endif
   1717 	adapter->mbxvec = vector;
   1718 	/* Tasklets for Mailbox */
   1719 	adapter->mbx_si = softint_establish(SOFTINT_NET, ixv_handle_mbx,
   1720 	    adapter);
   1721 	/*
   1722 	** Due to a broken design QEMU will fail to properly
   1723 	** enable the guest for MSIX unless the vectors in
   1724 	** the table are all set up, so we must rewrite the
   1725 	** ENABLE in the MSIX control register again at this
   1726 	** point to cause it to successfully initialize us.
   1727 	*/
   1728 	if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
   1729 		int msix_ctrl;
   1730 		pci_get_capability(pc, tag, PCI_CAP_MSIX, &rid);
   1731 		rid += PCI_MSIX_CTL;
   1732 		msix_ctrl = pci_read_config(pc, tag, rid);
   1733 		msix_ctrl |= PCI_MSIX_CTL_ENABLE;
   1734 		pci_conf_write(pc, tag, msix_ctrl);
   1735 	}
   1736 
   1737 	return (0);
   1738 #endif
   1739 }
   1740 
   1741 /*
   1742  * Setup MSIX resources, note that the VF
   1743  * device MUST use MSIX, there is no fallback.
   1744  */
   1745 static int
   1746 ixv_setup_msix(struct adapter *adapter)
   1747 {
   1748 #if !defined(NETBSD_MSI_OR_MSIX)
   1749 	return 0;
   1750 #else
   1751 	device_t dev = adapter->dev;
   1752 	int rid, vectors, want = 2;
   1753 
   1754 
   1755 	/* First try MSI/X */
   1756 	rid = PCIR_BAR(3);
   1757 	adapter->msix_mem = bus_alloc_resource_any(dev,
   1758 	    SYS_RES_MEMORY, &rid, RF_ACTIVE);
   1759        	if (!adapter->msix_mem) {
   1760 		device_printf(adapter->dev,
   1761 		    "Unable to map MSIX table \n");
   1762 		goto out;
   1763 	}
   1764 
   1765 	vectors = pci_msix_count(dev);
   1766 	if (vectors < 2) {
   1767 		bus_release_resource(dev, SYS_RES_MEMORY,
   1768 		    rid, adapter->msix_mem);
   1769 		adapter->msix_mem = NULL;
   1770 		goto out;
   1771 	}
   1772 
   1773 	/*
   1774 	** Want two vectors: one for a queue,
   1775 	** plus an additional for mailbox.
   1776 	*/
   1777 	if (pci_alloc_msix(dev, &want) == 0) {
   1778                	device_printf(adapter->dev,
   1779 		    "Using MSIX interrupts with %d vectors\n", want);
   1780 		return (want);
   1781 	}
   1782 out:
   1783 	device_printf(adapter->dev,"MSIX config error\n");
   1784 	return (ENXIO);
   1785 #endif
   1786 }
   1787 
   1788 
   1789 static int
   1790 ixv_allocate_pci_resources(struct adapter *adapter,
   1791     const struct pci_attach_args *pa)
   1792 {
   1793 	pcireg_t	memtype;
   1794 	device_t        dev = adapter->dev;
   1795 	bus_addr_t addr;
   1796 	int flags;
   1797 
   1798 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
   1799 
   1800 	switch (memtype) {
   1801 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1802 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1803 		adapter->osdep.mem_bus_space_tag = pa->pa_memt;
   1804 		if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
   1805 	              memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
   1806 			goto map_err;
   1807 		if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
   1808 			aprint_normal_dev(dev, "clearing prefetchable bit\n");
   1809 			flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
   1810 		}
   1811 		if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
   1812 		     adapter->osdep.mem_size, flags,
   1813 		     &adapter->osdep.mem_bus_space_handle) != 0) {
   1814 map_err:
   1815 			adapter->osdep.mem_size = 0;
   1816 			aprint_error_dev(dev, "unable to map BAR0\n");
   1817 			return ENXIO;
   1818 		}
   1819 		break;
   1820 	default:
   1821 		aprint_error_dev(dev, "unexpected type on BAR0\n");
   1822 		return ENXIO;
   1823 	}
   1824 
   1825 	adapter->num_queues = 1;
   1826 	adapter->hw.back = &adapter->osdep;
   1827 
   1828 	/*
   1829 	** Now setup MSI/X, should
   1830 	** return us the number of
   1831 	** configured vectors.
   1832 	*/
   1833 	adapter->msix = ixv_setup_msix(adapter);
   1834 	if (adapter->msix == ENXIO)
   1835 		return (ENXIO);
   1836 	else
   1837 		return (0);
   1838 }
   1839 
   1840 static void
   1841 ixv_free_pci_resources(struct adapter * adapter)
   1842 {
   1843 #if defined(NETBSD_MSI_OR_MSIX)
   1844 	struct 		ix_queue *que = adapter->queues;
   1845 	device_t	dev = adapter->dev;
   1846 	int		rid, memrid;
   1847 
   1848 	memrid = PCI_BAR(MSIX_BAR);
   1849 
   1850 	/*
   1851 	** There is a slight possibility of a failure mode
   1852 	** in attach that will result in entering this function
   1853 	** before interrupt resources have been initialized, and
   1854 	** in that case we do not want to execute the loops below
   1855 	** We can detect this reliably by the state of the adapter
   1856 	** res pointer.
   1857 	*/
   1858 	if (adapter->res == NULL)
   1859 		goto mem;
   1860 
   1861 	/*
   1862 	**  Release all msix queue resources:
   1863 	*/
   1864 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   1865 		rid = que->msix + 1;
   1866 		if (que->tag != NULL) {
   1867 			bus_teardown_intr(dev, que->res, que->tag);
   1868 			que->tag = NULL;
   1869 		}
   1870 		if (que->res != NULL)
   1871 			bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
   1872 	}
   1873 
   1874 
   1875 	/* Clean the Legacy or Link interrupt last */
   1876 	if (adapter->mbxvec) /* we are doing MSIX */
   1877 		rid = adapter->mbxvec + 1;
   1878 	else
   1879 		(adapter->msix != 0) ? (rid = 1):(rid = 0);
   1880 
   1881 	if (adapter->tag != NULL) {
   1882 		bus_teardown_intr(dev, adapter->res, adapter->tag);
   1883 		adapter->tag = NULL;
   1884 	}
   1885 	if (adapter->res != NULL)
   1886 		bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res);
   1887 
   1888 mem:
   1889 	if (adapter->msix)
   1890 		pci_release_msi(dev);
   1891 
   1892 	if (adapter->msix_mem != NULL)
   1893 		bus_release_resource(dev, SYS_RES_MEMORY,
   1894 		    memrid, adapter->msix_mem);
   1895 
   1896 	if (adapter->pci_mem != NULL)
   1897 		bus_release_resource(dev, SYS_RES_MEMORY,
   1898 		    PCIR_BAR(0), adapter->pci_mem);
   1899 
   1900 #endif
   1901 	return;
   1902 }
   1903 
   1904 /*********************************************************************
   1905  *
   1906  *  Setup networking device structure and register an interface.
   1907  *
   1908  **********************************************************************/
   1909 static void
   1910 ixv_setup_interface(device_t dev, struct adapter *adapter)
   1911 {
   1912 	struct ethercom *ec = &adapter->osdep.ec;
   1913 	struct ifnet   *ifp;
   1914 
   1915 	INIT_DEBUGOUT("ixv_setup_interface: begin");
   1916 
   1917 	ifp = adapter->ifp = &ec->ec_if;
   1918 	strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
   1919 	ifp->if_baudrate = 1000000000;
   1920 	ifp->if_init = ixv_init;
   1921 	ifp->if_stop = ixv_ifstop;
   1922 	ifp->if_softc = adapter;
   1923 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   1924 	ifp->if_ioctl = ixv_ioctl;
   1925 #if __FreeBSD_version >= 800000
   1926 	ifp->if_transmit = ixv_mq_start;
   1927 	ifp->if_qflush = ixv_qflush;
   1928 #else
   1929 	ifp->if_start = ixv_start;
   1930 #endif
   1931 	ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 2;
   1932 
   1933 	if_attach(ifp);
   1934 	ether_ifattach(ifp, adapter->hw.mac.addr);
   1935 	ether_set_ifflags_cb(ec, ixv_ifflags_cb);
   1936 
   1937 	adapter->max_frame_size =
   1938 	    ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
   1939 
   1940 	/*
   1941 	 * Tell the upper layer(s) we support long frames.
   1942 	 */
   1943 	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
   1944 
   1945 	ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSOv4;
   1946 	ifp->if_capenable = 0;
   1947 
   1948 	ec->ec_capabilities |= ETHERCAP_VLAN_HWCSUM;
   1949 	ec->ec_capabilities |= ETHERCAP_JUMBO_MTU;
   1950 	ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
   1951 	    		| ETHERCAP_VLAN_MTU;
   1952 	ec->ec_capenable = ec->ec_capabilities;
   1953 
   1954 	/* Don't enable LRO by default */
   1955 	ifp->if_capabilities |= IFCAP_LRO;
   1956 
   1957 	/*
   1958 	** Dont turn this on by default, if vlans are
   1959 	** created on another pseudo device (eg. lagg)
   1960 	** then vlan events are not passed thru, breaking
   1961 	** operation, but with HW FILTER off it works. If
   1962 	** using vlans directly on the em driver you can
   1963 	** enable this and get full hardware tag filtering.
   1964 	*/
   1965 	ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
   1966 
   1967 	/*
   1968 	 * Specify the media types supported by this adapter and register
   1969 	 * callbacks to update media and link information
   1970 	 */
   1971 	ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change,
   1972 		     ixv_media_status);
   1973 	ifmedia_add(&adapter->media, IFM_ETHER | IFM_FDX, 0, NULL);
   1974 	ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
   1975 	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
   1976 
   1977 	return;
   1978 }
   1979 
   1980 static void
   1981 ixv_config_link(struct adapter *adapter)
   1982 {
   1983 	struct ixgbe_hw *hw = &adapter->hw;
   1984 	u32	autoneg, err = 0;
   1985 	bool	negotiate = TRUE;
   1986 
   1987 	if (hw->mac.ops.check_link)
   1988 		err = hw->mac.ops.check_link(hw, &autoneg,
   1989 		    &adapter->link_up, FALSE);
   1990 	if (err)
   1991 		goto out;
   1992 
   1993 	if (hw->mac.ops.setup_link)
   1994                	err = hw->mac.ops.setup_link(hw, autoneg,
   1995 		    negotiate, adapter->link_up);
   1996 out:
   1997 	return;
   1998 }
   1999 
   2000 /********************************************************************
   2001  * Manage DMA'able memory.
   2002  *******************************************************************/
   2003 
   2004 static int
   2005 ixv_dma_malloc(struct adapter *adapter, bus_size_t size,
   2006 		struct ixv_dma_alloc *dma, int mapflags)
   2007 {
   2008 	device_t dev = adapter->dev;
   2009 	int             r, rsegs;
   2010 
   2011 	r = ixgbe_dma_tag_create(adapter->osdep.dmat,	/* parent */
   2012 			       DBA_ALIGN, 0,	/* alignment, bounds */
   2013 			       size,	/* maxsize */
   2014 			       1,	/* nsegments */
   2015 			       size,	/* maxsegsize */
   2016 			       BUS_DMA_ALLOCNOW,	/* flags */
   2017 			       &dma->dma_tag);
   2018 	if (r != 0) {
   2019 		aprint_error_dev(dev,
   2020 		    "ixv_dma_malloc: bus_dma_tag_create failed; error %u\n", r);
   2021 		goto fail_0;
   2022 	}
   2023 	r = bus_dmamem_alloc(dma->dma_tag->dt_dmat,
   2024 		size,
   2025 		dma->dma_tag->dt_alignment,
   2026 		dma->dma_tag->dt_boundary,
   2027 		&dma->dma_seg, 1, &rsegs, BUS_DMA_NOWAIT);
   2028 	if (r != 0) {
   2029 		aprint_error_dev(dev,
   2030 		    "%s: bus_dmamem_alloc failed; error %u\n", __func__, r);
   2031 		goto fail_1;
   2032 	}
   2033 
   2034 	r = bus_dmamem_map(dma->dma_tag->dt_dmat, &dma->dma_seg, rsegs,
   2035 	    size, &dma->dma_vaddr, BUS_DMA_NOWAIT);
   2036 	if (r != 0) {
   2037 		aprint_error_dev(dev, "%s: bus_dmamem_map failed; error %d\n",
   2038 		    __func__, r);
   2039 		goto fail_2;
   2040 	}
   2041 
   2042 	r = ixgbe_dmamap_create(dma->dma_tag, 0, &dma->dma_map);
   2043 	if (r != 0) {
   2044 		aprint_error_dev(dev, "%s: bus_dmamem_map failed; error %d\n",
   2045 		    __func__, r);
   2046 		goto fail_3;
   2047 	}
   2048 
   2049 	r = bus_dmamap_load(dma->dma_tag->dt_dmat, dma->dma_map, dma->dma_vaddr,
   2050 			    size,
   2051 			    NULL,
   2052 			    mapflags | BUS_DMA_NOWAIT);
   2053 	if (r != 0) {
   2054 		aprint_error_dev(dev,"%s: bus_dmamap_load failed; error %u\n",
   2055 		    __func__, r);
   2056 		goto fail_4;
   2057 	}
   2058 	dma->dma_paddr = dma->dma_map->dm_segs[0].ds_addr;
   2059 	dma->dma_size = size;
   2060 	return 0;
   2061 fail_4:
   2062 	ixgbe_dmamap_destroy(dma->dma_tag, dma->dma_map);
   2063 fail_3:
   2064 	bus_dmamem_unmap(dma->dma_tag->dt_dmat, dma->dma_vaddr, size);
   2065 fail_2:
   2066 	bus_dmamem_free(dma->dma_tag->dt_dmat, &dma->dma_seg, rsegs);
   2067 fail_1:
   2068 	ixgbe_dma_tag_destroy(dma->dma_tag);
   2069 fail_0:
   2070 	dma->dma_map = NULL;
   2071 	dma->dma_tag = NULL;
   2072 	return (r);
   2073 }
   2074 
   2075 static void
   2076 ixv_dma_free(struct adapter *adapter, struct ixv_dma_alloc *dma)
   2077 {
   2078 	bus_dmamap_sync(dma->dma_tag->dt_dmat, dma->dma_map, 0, dma->dma_size,
   2079 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   2080 	ixgbe_dmamap_unload(dma->dma_tag, dma->dma_map);
   2081 	bus_dmamem_free(dma->dma_tag->dt_dmat, &dma->dma_seg, 1);
   2082 	ixgbe_dma_tag_destroy(dma->dma_tag);
   2083 }
   2084 
   2085 
   2086 /*********************************************************************
   2087  *
   2088  *  Allocate memory for the transmit and receive rings, and then
   2089  *  the descriptors associated with each, called only once at attach.
   2090  *
   2091  **********************************************************************/
   2092 static int
   2093 ixv_allocate_queues(struct adapter *adapter)
   2094 {
   2095 	device_t	dev = adapter->dev;
   2096 	struct ix_queue	*que;
   2097 	struct tx_ring	*txr;
   2098 	struct rx_ring	*rxr;
   2099 	int rsize, tsize, error = 0;
   2100 	int txconf = 0, rxconf = 0;
   2101 
   2102         /* First allocate the top level queue structs */
   2103         if (!(adapter->queues =
   2104             (struct ix_queue *) malloc(sizeof(struct ix_queue) *
   2105             adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
   2106                 aprint_error_dev(dev, "Unable to allocate queue memory\n");
   2107                 error = ENOMEM;
   2108                 goto fail;
   2109         }
   2110 
   2111 	/* First allocate the TX ring struct memory */
   2112 	if (!(adapter->tx_rings =
   2113 	    (struct tx_ring *) malloc(sizeof(struct tx_ring) *
   2114 	    adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
   2115 		aprint_error_dev(dev, "Unable to allocate TX ring memory\n");
   2116 		error = ENOMEM;
   2117 		goto tx_fail;
   2118 	}
   2119 
   2120 	/* Next allocate the RX */
   2121 	if (!(adapter->rx_rings =
   2122 	    (struct rx_ring *) malloc(sizeof(struct rx_ring) *
   2123 	    adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
   2124 		aprint_error_dev(dev, "Unable to allocate RX ring memory\n");
   2125 		error = ENOMEM;
   2126 		goto rx_fail;
   2127 	}
   2128 
   2129 	/* For the ring itself */
   2130 	tsize = roundup2(adapter->num_tx_desc *
   2131 	    sizeof(union ixgbe_adv_tx_desc), DBA_ALIGN);
   2132 
   2133 	/*
   2134 	 * Now set up the TX queues, txconf is needed to handle the
   2135 	 * possibility that things fail midcourse and we need to
   2136 	 * undo memory gracefully
   2137 	 */
   2138 	for (int i = 0; i < adapter->num_queues; i++, txconf++) {
   2139 		/* Set up some basics */
   2140 		txr = &adapter->tx_rings[i];
   2141 		txr->adapter = adapter;
   2142 		txr->me = i;
   2143 
   2144 		/* Initialize the TX side lock */
   2145 		snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
   2146 		    device_xname(dev), txr->me);
   2147 		mutex_init(&txr->tx_mtx, MUTEX_DEFAULT, IPL_NET);
   2148 
   2149 		if (ixv_dma_malloc(adapter, tsize,
   2150 			&txr->txdma, BUS_DMA_NOWAIT)) {
   2151 			aprint_error_dev(dev,
   2152 			    "Unable to allocate TX Descriptor memory\n");
   2153 			error = ENOMEM;
   2154 			goto err_tx_desc;
   2155 		}
   2156 		txr->tx_base = (union ixgbe_adv_tx_desc *)txr->txdma.dma_vaddr;
   2157 		bzero((void *)txr->tx_base, tsize);
   2158 
   2159         	/* Now allocate transmit buffers for the ring */
   2160         	if (ixv_allocate_transmit_buffers(txr)) {
   2161 			aprint_error_dev(dev,
   2162 			    "Critical Failure setting up transmit buffers\n");
   2163 			error = ENOMEM;
   2164 			goto err_tx_desc;
   2165         	}
   2166 #if __FreeBSD_version >= 800000
   2167 		/* Allocate a buf ring */
   2168 		txr->br = buf_ring_alloc(IXV_BR_SIZE, M_DEVBUF,
   2169 		    M_WAITOK, &txr->tx_mtx);
   2170 		if (txr->br == NULL) {
   2171 			aprint_error_dev(dev,
   2172 			    "Critical Failure setting up buf ring\n");
   2173 			error = ENOMEM;
   2174 			goto err_tx_desc;
   2175 		}
   2176 #endif
   2177 	}
   2178 
   2179 	/*
   2180 	 * Next the RX queues...
   2181 	 */
   2182 	rsize = roundup2(adapter->num_rx_desc *
   2183 	    sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
   2184 	for (int i = 0; i < adapter->num_queues; i++, rxconf++) {
   2185 		rxr = &adapter->rx_rings[i];
   2186 		/* Set up some basics */
   2187 		rxr->adapter = adapter;
   2188 		rxr->me = i;
   2189 
   2190 		/* Initialize the RX side lock */
   2191 		snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
   2192 		    device_xname(dev), rxr->me);
   2193 		mutex_init(&rxr->rx_mtx, MUTEX_DEFAULT, IPL_NET);
   2194 
   2195 		if (ixv_dma_malloc(adapter, rsize,
   2196 			&rxr->rxdma, BUS_DMA_NOWAIT)) {
   2197 			aprint_error_dev(dev,
   2198 			    "Unable to allocate RxDescriptor memory\n");
   2199 			error = ENOMEM;
   2200 			goto err_rx_desc;
   2201 		}
   2202 		rxr->rx_base = (union ixgbe_adv_rx_desc *)rxr->rxdma.dma_vaddr;
   2203 		bzero((void *)rxr->rx_base, rsize);
   2204 
   2205         	/* Allocate receive buffers for the ring*/
   2206 		if (ixv_allocate_receive_buffers(rxr)) {
   2207 			aprint_error_dev(dev,
   2208 			    "Critical Failure setting up receive buffers\n");
   2209 			error = ENOMEM;
   2210 			goto err_rx_desc;
   2211 		}
   2212 	}
   2213 
   2214 	/*
   2215 	** Finally set up the queue holding structs
   2216 	*/
   2217 	for (int i = 0; i < adapter->num_queues; i++) {
   2218 		que = &adapter->queues[i];
   2219 		que->adapter = adapter;
   2220 		que->txr = &adapter->tx_rings[i];
   2221 		que->rxr = &adapter->rx_rings[i];
   2222 	}
   2223 
   2224 	return (0);
   2225 
   2226 err_rx_desc:
   2227 	for (rxr = adapter->rx_rings; rxconf > 0; rxr++, rxconf--)
   2228 		ixv_dma_free(adapter, &rxr->rxdma);
   2229 err_tx_desc:
   2230 	for (txr = adapter->tx_rings; txconf > 0; txr++, txconf--)
   2231 		ixv_dma_free(adapter, &txr->txdma);
   2232 	free(adapter->rx_rings, M_DEVBUF);
   2233 rx_fail:
   2234 	free(adapter->tx_rings, M_DEVBUF);
   2235 tx_fail:
   2236 	free(adapter->queues, M_DEVBUF);
   2237 fail:
   2238 	return (error);
   2239 }
   2240 
   2241 
   2242 /*********************************************************************
   2243  *
   2244  *  Allocate memory for tx_buffer structures. The tx_buffer stores all
   2245  *  the information needed to transmit a packet on the wire. This is
   2246  *  called only once at attach, setup is done every reset.
   2247  *
   2248  **********************************************************************/
   2249 static int
   2250 ixv_allocate_transmit_buffers(struct tx_ring *txr)
   2251 {
   2252 	struct adapter *adapter = txr->adapter;
   2253 	device_t dev = adapter->dev;
   2254 	struct ixv_tx_buf *txbuf;
   2255 	int error, i;
   2256 
   2257 	/*
   2258 	 * Setup DMA descriptor areas.
   2259 	 */
   2260 	if ((error = ixgbe_dma_tag_create(adapter->osdep.dmat,	/* parent */
   2261 			       1, 0,		/* alignment, bounds */
   2262 			       IXV_TSO_SIZE,		/* maxsize */
   2263 			       32,			/* nsegments */
   2264 			       PAGE_SIZE,		/* maxsegsize */
   2265 			       0,			/* flags */
   2266 			       &txr->txtag))) {
   2267 		aprint_error_dev(dev,"Unable to allocate TX DMA tag\n");
   2268 		goto fail;
   2269 	}
   2270 
   2271 	if (!(txr->tx_buffers =
   2272 	    (struct ixv_tx_buf *) malloc(sizeof(struct ixv_tx_buf) *
   2273 	    adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) {
   2274 		aprint_error_dev(dev, "Unable to allocate tx_buffer memory\n");
   2275 		error = ENOMEM;
   2276 		goto fail;
   2277 	}
   2278 
   2279         /* Create the descriptor buffer dma maps */
   2280 	txbuf = txr->tx_buffers;
   2281 	for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
   2282 		error = ixgbe_dmamap_create(txr->txtag, 0, &txbuf->map);
   2283 		if (error != 0) {
   2284 			aprint_error_dev(dev, "Unable to create TX DMA map\n");
   2285 			goto fail;
   2286 		}
   2287 	}
   2288 
   2289 	return 0;
   2290 fail:
   2291 	/* We free all, it handles case where we are in the middle */
   2292 	ixv_free_transmit_structures(adapter);
   2293 	return (error);
   2294 }
   2295 
   2296 /*********************************************************************
   2297  *
   2298  *  Initialize a transmit ring.
   2299  *
   2300  **********************************************************************/
   2301 static void
   2302 ixv_setup_transmit_ring(struct tx_ring *txr)
   2303 {
   2304 	struct adapter *adapter = txr->adapter;
   2305 	struct ixv_tx_buf *txbuf;
   2306 	int i;
   2307 
   2308 	/* Clear the old ring contents */
   2309 	IXV_TX_LOCK(txr);
   2310 	bzero((void *)txr->tx_base,
   2311 	      (sizeof(union ixgbe_adv_tx_desc)) * adapter->num_tx_desc);
   2312 	/* Reset indices */
   2313 	txr->next_avail_desc = 0;
   2314 	txr->next_to_clean = 0;
   2315 
   2316 	/* Free any existing tx buffers. */
   2317         txbuf = txr->tx_buffers;
   2318 	for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
   2319 		if (txbuf->m_head != NULL) {
   2320 			bus_dmamap_sync(txr->txtag->dt_dmat, txbuf->map,
   2321 			    0, txbuf->m_head->m_pkthdr.len,
   2322 			    BUS_DMASYNC_POSTWRITE);
   2323 			ixgbe_dmamap_unload(txr->txtag, txbuf->map);
   2324 			m_freem(txbuf->m_head);
   2325 			txbuf->m_head = NULL;
   2326 		}
   2327 		/* Clear the EOP index */
   2328 		txbuf->eop_index = -1;
   2329         }
   2330 
   2331 	/* Set number of descriptors available */
   2332 	txr->tx_avail = adapter->num_tx_desc;
   2333 
   2334 	ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
   2335 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   2336 	IXV_TX_UNLOCK(txr);
   2337 }
   2338 
   2339 /*********************************************************************
   2340  *
   2341  *  Initialize all transmit rings.
   2342  *
   2343  **********************************************************************/
   2344 static int
   2345 ixv_setup_transmit_structures(struct adapter *adapter)
   2346 {
   2347 	struct tx_ring *txr = adapter->tx_rings;
   2348 
   2349 	for (int i = 0; i < adapter->num_queues; i++, txr++)
   2350 		ixv_setup_transmit_ring(txr);
   2351 
   2352 	return (0);
   2353 }
   2354 
   2355 /*********************************************************************
   2356  *
   2357  *  Enable transmit unit.
   2358  *
   2359  **********************************************************************/
   2360 static void
   2361 ixv_initialize_transmit_units(struct adapter *adapter)
   2362 {
   2363 	struct tx_ring	*txr = adapter->tx_rings;
   2364 	struct ixgbe_hw	*hw = &adapter->hw;
   2365 
   2366 
   2367 	for (int i = 0; i < adapter->num_queues; i++, txr++) {
   2368 		u64	tdba = txr->txdma.dma_paddr;
   2369 		u32	txctrl, txdctl;
   2370 
   2371 		/* Set WTHRESH to 8, burst writeback */
   2372 		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
   2373 		txdctl |= (8 << 16);
   2374 		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
   2375 		/* Now enable */
   2376 		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
   2377 		txdctl |= IXGBE_TXDCTL_ENABLE;
   2378 		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
   2379 
   2380 		/* Set the HW Tx Head and Tail indices */
   2381 	    	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(i), 0);
   2382 	    	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(i), 0);
   2383 
   2384 		/* Setup Transmit Descriptor Cmd Settings */
   2385 		txr->txd_cmd = IXGBE_TXD_CMD_IFCS;
   2386 		txr->watchdog_check = FALSE;
   2387 
   2388 		/* Set Ring parameters */
   2389 		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
   2390 		       (tdba & 0x00000000ffffffffULL));
   2391 		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i), (tdba >> 32));
   2392 		IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
   2393 		    adapter->num_tx_desc *
   2394 		    sizeof(struct ixgbe_legacy_tx_desc));
   2395 		txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(i));
   2396 		txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
   2397 		IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), txctrl);
   2398 		break;
   2399 	}
   2400 
   2401 	return;
   2402 }
   2403 
   2404 /*********************************************************************
   2405  *
   2406  *  Free all transmit rings.
   2407  *
   2408  **********************************************************************/
   2409 static void
   2410 ixv_free_transmit_structures(struct adapter *adapter)
   2411 {
   2412 	struct tx_ring *txr = adapter->tx_rings;
   2413 
   2414 	for (int i = 0; i < adapter->num_queues; i++, txr++) {
   2415 		ixv_free_transmit_buffers(txr);
   2416 		ixv_dma_free(adapter, &txr->txdma);
   2417 		IXV_TX_LOCK_DESTROY(txr);
   2418 	}
   2419 	free(adapter->tx_rings, M_DEVBUF);
   2420 }
   2421 
   2422 /*********************************************************************
   2423  *
   2424  *  Free transmit ring related data structures.
   2425  *
   2426  **********************************************************************/
   2427 static void
   2428 ixv_free_transmit_buffers(struct tx_ring *txr)
   2429 {
   2430 	struct adapter *adapter = txr->adapter;
   2431 	struct ixv_tx_buf *tx_buffer;
   2432 	int             i;
   2433 
   2434 	INIT_DEBUGOUT("free_transmit_ring: begin");
   2435 
   2436 	if (txr->tx_buffers == NULL)
   2437 		return;
   2438 
   2439 	tx_buffer = txr->tx_buffers;
   2440 	for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
   2441 		if (tx_buffer->m_head != NULL) {
   2442 			bus_dmamap_sync(txr->txtag->dt_dmat, tx_buffer->map,
   2443 			    0, tx_buffer->m_head->m_pkthdr.len,
   2444 			    BUS_DMASYNC_POSTWRITE);
   2445 			ixgbe_dmamap_unload(txr->txtag, tx_buffer->map);
   2446 			m_freem(tx_buffer->m_head);
   2447 			tx_buffer->m_head = NULL;
   2448 			if (tx_buffer->map != NULL) {
   2449 				ixgbe_dmamap_destroy(txr->txtag,
   2450 				    tx_buffer->map);
   2451 				tx_buffer->map = NULL;
   2452 			}
   2453 		} else if (tx_buffer->map != NULL) {
   2454 			ixgbe_dmamap_unload(txr->txtag, tx_buffer->map);
   2455 			ixgbe_dmamap_destroy(txr->txtag, tx_buffer->map);
   2456 			tx_buffer->map = NULL;
   2457 		}
   2458 	}
   2459 #if __FreeBSD_version >= 800000
   2460 	if (txr->br != NULL)
   2461 		buf_ring_free(txr->br, M_DEVBUF);
   2462 #endif
   2463 	if (txr->tx_buffers != NULL) {
   2464 		free(txr->tx_buffers, M_DEVBUF);
   2465 		txr->tx_buffers = NULL;
   2466 	}
   2467 	if (txr->txtag != NULL) {
   2468 		ixgbe_dma_tag_destroy(txr->txtag);
   2469 		txr->txtag = NULL;
   2470 	}
   2471 	return;
   2472 }
   2473 
   2474 /*********************************************************************
   2475  *
   2476  *  Advanced Context Descriptor setup for VLAN or L4 CSUM
   2477  *
   2478  **********************************************************************/
   2479 
   2480 static u32
   2481 ixv_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp)
   2482 {
   2483 	struct m_tag *mtag;
   2484 	struct adapter *adapter = txr->adapter;
   2485 	struct ethercom *ec = &adapter->osdep.ec;
   2486 	struct ixgbe_adv_tx_context_desc *TXD;
   2487 	struct ixv_tx_buf        *tx_buffer;
   2488 	u32 olinfo = 0, vlan_macip_lens = 0, type_tucmd_mlhl = 0;
   2489 	struct ether_vlan_header *eh;
   2490 	struct ip ip;
   2491 	struct ip6_hdr ip6;
   2492 	int  ehdrlen, ip_hlen = 0;
   2493 	u16	etype;
   2494 	u8	ipproto = 0;
   2495 	bool	offload;
   2496 	int ctxd = txr->next_avail_desc;
   2497 	u16 vtag = 0;
   2498 
   2499 
   2500 	offload = ((mp->m_pkthdr.csum_flags & M_CSUM_OFFLOAD) != 0);
   2501 
   2502 	tx_buffer = &txr->tx_buffers[ctxd];
   2503 	TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
   2504 
   2505 	/*
   2506 	** In advanced descriptors the vlan tag must
   2507 	** be placed into the descriptor itself.
   2508 	*/
   2509 	if ((mtag = VLAN_OUTPUT_TAG(ec, mp)) != NULL) {
   2510 		vtag = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   2511 		vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
   2512 	} else if (!offload)
   2513 		return 0;
   2514 
   2515 	/*
   2516 	 * Determine where frame payload starts.
   2517 	 * Jump over vlan headers if already present,
   2518 	 * helpful for QinQ too.
   2519 	 */
   2520 	KASSERT(mp->m_len >= offsetof(struct ether_vlan_header, evl_tag));
   2521 	eh = mtod(mp, struct ether_vlan_header *);
   2522 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
   2523 		KASSERT(mp->m_len >= sizeof(struct ether_vlan_header));
   2524 		etype = ntohs(eh->evl_proto);
   2525 		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   2526 	} else {
   2527 		etype = ntohs(eh->evl_encap_proto);
   2528 		ehdrlen = ETHER_HDR_LEN;
   2529 	}
   2530 
   2531 	/* Set the ether header length */
   2532 	vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
   2533 
   2534 	switch (etype) {
   2535 	case ETHERTYPE_IP:
   2536 		m_copydata(mp, ehdrlen, sizeof(ip), &ip);
   2537 		ip_hlen = ip.ip_hl << 2;
   2538 		ipproto = ip.ip_p;
   2539 #if 0
   2540 		ip.ip_sum = 0;
   2541 		m_copyback(mp, ehdrlen, sizeof(ip), &ip);
   2542 #else
   2543 		KASSERT((mp->m_pkthdr.csum_flags & M_CSUM_IPv4) == 0 ||
   2544 		    ip.ip_sum == 0);
   2545 #endif
   2546 		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
   2547 		break;
   2548 	case ETHERTYPE_IPV6:
   2549 		m_copydata(mp, ehdrlen, sizeof(ip6), &ip6);
   2550 		ip_hlen = sizeof(ip6);
   2551 		ipproto = ip6.ip6_nxt;
   2552 		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
   2553 		break;
   2554 	default:
   2555 		break;
   2556 	}
   2557 
   2558 	if ((mp->m_pkthdr.csum_flags & M_CSUM_IPv4) != 0)
   2559 		olinfo |= IXGBE_TXD_POPTS_IXSM << 8;
   2560 
   2561 	vlan_macip_lens |= ip_hlen;
   2562 	type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
   2563 
   2564 	if (mp->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_TCPv6)) {
   2565 		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
   2566 		olinfo |= IXGBE_TXD_POPTS_TXSM << 8;
   2567 		KASSERT(ipproto == IPPROTO_TCP);
   2568 	} else if (mp->m_pkthdr.csum_flags & (M_CSUM_UDPv4|M_CSUM_UDPv6)) {
   2569 		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP;
   2570 		olinfo |= IXGBE_TXD_POPTS_TXSM << 8;
   2571 		KASSERT(ipproto == IPPROTO_UDP);
   2572 	}
   2573 
   2574 	/* Now copy bits into descriptor */
   2575 	TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
   2576 	TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
   2577 	TXD->seqnum_seed = htole32(0);
   2578 	TXD->mss_l4len_idx = htole32(0);
   2579 
   2580 	tx_buffer->m_head = NULL;
   2581 	tx_buffer->eop_index = -1;
   2582 
   2583 	/* We've consumed the first desc, adjust counters */
   2584 	if (++ctxd == adapter->num_tx_desc)
   2585 		ctxd = 0;
   2586 	txr->next_avail_desc = ctxd;
   2587 	--txr->tx_avail;
   2588 
   2589         return olinfo;
   2590 }
   2591 
   2592 /**********************************************************************
   2593  *
   2594  *  Setup work for hardware segmentation offload (TSO) on
   2595  *  adapters using advanced tx descriptors
   2596  *
   2597  **********************************************************************/
   2598 static bool
   2599 ixv_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *paylen)
   2600 {
   2601 	struct m_tag *mtag;
   2602 	struct adapter *adapter = txr->adapter;
   2603 	struct ethercom *ec = &adapter->osdep.ec;
   2604 	struct ixgbe_adv_tx_context_desc *TXD;
   2605 	struct ixv_tx_buf        *tx_buffer;
   2606 	u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
   2607 	u32 mss_l4len_idx = 0;
   2608 	u16 vtag = 0;
   2609 	int ctxd, ehdrlen,  hdrlen, ip_hlen, tcp_hlen;
   2610 	struct ether_vlan_header *eh;
   2611 	struct ip *ip;
   2612 	struct tcphdr *th;
   2613 
   2614 
   2615 	/*
   2616 	 * Determine where frame payload starts.
   2617 	 * Jump over vlan headers if already present
   2618 	 */
   2619 	eh = mtod(mp, struct ether_vlan_header *);
   2620 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN))
   2621 		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   2622 	else
   2623 		ehdrlen = ETHER_HDR_LEN;
   2624 
   2625         /* Ensure we have at least the IP+TCP header in the first mbuf. */
   2626         if (mp->m_len < ehdrlen + sizeof(struct ip) + sizeof(struct tcphdr))
   2627 		return FALSE;
   2628 
   2629 	ctxd = txr->next_avail_desc;
   2630 	tx_buffer = &txr->tx_buffers[ctxd];
   2631 	TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
   2632 
   2633 	ip = (struct ip *)(mp->m_data + ehdrlen);
   2634 	if (ip->ip_p != IPPROTO_TCP)
   2635 		return FALSE;   /* 0 */
   2636 	ip->ip_sum = 0;
   2637 	ip_hlen = ip->ip_hl << 2;
   2638 	th = (struct tcphdr *)((char *)ip + ip_hlen);
   2639 	/* XXX Educated guess: FreeBSD's in_pseudo == NetBSD's in_cksum_phdr */
   2640 	th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   2641 	    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   2642 	tcp_hlen = th->th_off << 2;
   2643 	hdrlen = ehdrlen + ip_hlen + tcp_hlen;
   2644 
   2645 	/* This is used in the transmit desc in encap */
   2646 	*paylen = mp->m_pkthdr.len - hdrlen;
   2647 
   2648 	/* VLAN MACLEN IPLEN */
   2649 	if ((mtag = VLAN_OUTPUT_TAG(ec, mp)) != NULL) {
   2650 		vtag = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   2651                 vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
   2652 	}
   2653 
   2654 	vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
   2655 	vlan_macip_lens |= ip_hlen;
   2656 	TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
   2657 
   2658 	/* ADV DTYPE TUCMD */
   2659 	type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
   2660 	type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
   2661 	type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
   2662 	TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
   2663 
   2664 
   2665 	/* MSS L4LEN IDX */
   2666 	mss_l4len_idx |= (mp->m_pkthdr.segsz << IXGBE_ADVTXD_MSS_SHIFT);
   2667 	mss_l4len_idx |= (tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT);
   2668 	TXD->mss_l4len_idx = htole32(mss_l4len_idx);
   2669 
   2670 	TXD->seqnum_seed = htole32(0);
   2671 	tx_buffer->m_head = NULL;
   2672 	tx_buffer->eop_index = -1;
   2673 
   2674 	if (++ctxd == adapter->num_tx_desc)
   2675 		ctxd = 0;
   2676 
   2677 	txr->tx_avail--;
   2678 	txr->next_avail_desc = ctxd;
   2679 	return TRUE;
   2680 }
   2681 
   2682 
   2683 /**********************************************************************
   2684  *
   2685  *  Examine each tx_buffer in the used queue. If the hardware is done
   2686  *  processing the packet then free associated resources. The
   2687  *  tx_buffer is put back on the free queue.
   2688  *
   2689  **********************************************************************/
   2690 static bool
   2691 ixv_txeof(struct tx_ring *txr)
   2692 {
   2693 	struct adapter	*adapter = txr->adapter;
   2694 	struct ifnet	*ifp = adapter->ifp;
   2695 	u32	first, last, done;
   2696 	struct ixv_tx_buf *tx_buffer;
   2697 	struct ixgbe_legacy_tx_desc *tx_desc, *eop_desc;
   2698 
   2699 	KASSERT(mutex_owned(&txr->tx_mtx));
   2700 
   2701 	if (txr->tx_avail == adapter->num_tx_desc)
   2702 		return false;
   2703 
   2704 	first = txr->next_to_clean;
   2705 	tx_buffer = &txr->tx_buffers[first];
   2706 	/* For cleanup we just use legacy struct */
   2707 	tx_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
   2708 	last = tx_buffer->eop_index;
   2709 	if (last == -1)
   2710 		return false;
   2711 	eop_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
   2712 
   2713 	/*
   2714 	** Get the index of the first descriptor
   2715 	** BEYOND the EOP and call that 'done'.
   2716 	** I do this so the comparison in the
   2717 	** inner while loop below can be simple
   2718 	*/
   2719 	if (++last == adapter->num_tx_desc) last = 0;
   2720 	done = last;
   2721 
   2722         ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
   2723             BUS_DMASYNC_POSTREAD);
   2724 	/*
   2725 	** Only the EOP descriptor of a packet now has the DD
   2726 	** bit set, this is what we look for...
   2727 	*/
   2728 	while (eop_desc->upper.fields.status & IXGBE_TXD_STAT_DD) {
   2729 		/* We clean the range of the packet */
   2730 		while (first != done) {
   2731 			tx_desc->upper.data = 0;
   2732 			tx_desc->lower.data = 0;
   2733 			tx_desc->buffer_addr = 0;
   2734 			++txr->tx_avail;
   2735 
   2736 			if (tx_buffer->m_head) {
   2737 				bus_dmamap_sync(txr->txtag->dt_dmat,
   2738 				    tx_buffer->map,
   2739 				    0, tx_buffer->m_head->m_pkthdr.len,
   2740 				    BUS_DMASYNC_POSTWRITE);
   2741 				ixgbe_dmamap_unload(txr->txtag, tx_buffer->map);
   2742 				m_freem(tx_buffer->m_head);
   2743 				tx_buffer->m_head = NULL;
   2744 				tx_buffer->map = NULL;
   2745 			}
   2746 			tx_buffer->eop_index = -1;
   2747 			getmicrotime(&txr->watchdog_time);
   2748 
   2749 			if (++first == adapter->num_tx_desc)
   2750 				first = 0;
   2751 
   2752 			tx_buffer = &txr->tx_buffers[first];
   2753 			tx_desc =
   2754 			    (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
   2755 		}
   2756 		++ifp->if_opackets;
   2757 		/* See if there is more work now */
   2758 		last = tx_buffer->eop_index;
   2759 		if (last != -1) {
   2760 			eop_desc =
   2761 			    (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
   2762 			/* Get next done point */
   2763 			if (++last == adapter->num_tx_desc) last = 0;
   2764 			done = last;
   2765 		} else
   2766 			break;
   2767 	}
   2768 	ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
   2769 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   2770 
   2771 	txr->next_to_clean = first;
   2772 
   2773 	/*
   2774 	 * If we have enough room, clear IFF_OACTIVE to tell the stack that
   2775 	 * it is OK to send packets. If there are no pending descriptors,
   2776 	 * clear the timeout. Otherwise, if some descriptors have been freed,
   2777 	 * restart the timeout.
   2778 	 */
   2779 	if (txr->tx_avail > IXV_TX_CLEANUP_THRESHOLD) {
   2780 		ifp->if_flags &= ~IFF_OACTIVE;
   2781 		if (txr->tx_avail == adapter->num_tx_desc) {
   2782 			txr->watchdog_check = FALSE;
   2783 			return false;
   2784 		}
   2785 	}
   2786 
   2787 	return true;
   2788 }
   2789 
   2790 /*********************************************************************
   2791  *
   2792  *  Refresh mbuf buffers for RX descriptor rings
   2793  *   - now keeps its own state so discards due to resource
   2794  *     exhaustion are unnecessary, if an mbuf cannot be obtained
   2795  *     it just returns, keeping its placeholder, thus it can simply
   2796  *     be recalled to try again.
   2797  *
   2798  **********************************************************************/
   2799 static void
   2800 ixv_refresh_mbufs(struct rx_ring *rxr, int limit)
   2801 {
   2802 	struct adapter		*adapter = rxr->adapter;
   2803 	struct ixv_rx_buf	*rxbuf;
   2804 	struct mbuf		*mh, *mp;
   2805 	int			i, j, error;
   2806 	bool			refreshed = false;
   2807 
   2808 	i = j = rxr->next_to_refresh;
   2809 	/* Control the loop with one beyond */
   2810 	if (++j == adapter->num_rx_desc)
   2811 		j = 0;
   2812 	while (j != limit) {
   2813 		rxbuf = &rxr->rx_buffers[i];
   2814 		if ((rxbuf->m_head == NULL) && (rxr->hdr_split)) {
   2815 			mh = m_gethdr(M_DONTWAIT, MT_DATA);
   2816 			if (mh == NULL)
   2817 				goto update;
   2818 			mh->m_pkthdr.len = mh->m_len = MHLEN;
   2819 			mh->m_len = MHLEN;
   2820 			mh->m_flags |= M_PKTHDR;
   2821 			m_adj(mh, ETHER_ALIGN);
   2822 			/* Get the memory mapping */
   2823 			error = bus_dmamap_load_mbuf(rxr->htag->dt_dmat,
   2824 			    rxbuf->hmap, mh, BUS_DMA_NOWAIT);
   2825 			if (error != 0) {
   2826 				printf("GET BUF: dmamap load"
   2827 				    " failure - %d\n", error);
   2828 				m_free(mh);
   2829 				goto update;
   2830 			}
   2831 			rxbuf->m_head = mh;
   2832 			ixgbe_dmamap_sync(rxr->htag, rxbuf->hmap,
   2833 			    BUS_DMASYNC_PREREAD);
   2834 			rxr->rx_base[i].read.hdr_addr =
   2835 			    htole64(rxbuf->hmap->dm_segs[0].ds_addr);
   2836 		}
   2837 
   2838 		if (rxbuf->m_pack == NULL) {
   2839 			mp = ixgbe_getjcl(&adapter->jcl_head, M_DONTWAIT,
   2840 			    MT_DATA, M_PKTHDR, adapter->rx_mbuf_sz);
   2841 			if (mp == NULL) {
   2842 				rxr->no_jmbuf.ev_count++;
   2843 				goto update;
   2844 			}
   2845 			mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
   2846 			/* Get the memory mapping */
   2847 			error = bus_dmamap_load_mbuf(rxr->ptag->dt_dmat,
   2848 			    rxbuf->pmap, mp, BUS_DMA_NOWAIT);
   2849 			if (error != 0) {
   2850 				printf("GET BUF: dmamap load"
   2851 				    " failure - %d\n", error);
   2852 				m_free(mp);
   2853 				goto update;
   2854 			}
   2855 			rxbuf->m_pack = mp;
   2856 			bus_dmamap_sync(rxr->ptag->dt_dmat, rxbuf->pmap,
   2857 			    0, mp->m_pkthdr.len, BUS_DMASYNC_PREREAD);
   2858 			rxr->rx_base[i].read.pkt_addr =
   2859 			    htole64(rxbuf->pmap->dm_segs[0].ds_addr);
   2860 		}
   2861 
   2862 		refreshed = true;
   2863 		rxr->next_to_refresh = i = j;
   2864 		/* Calculate next index */
   2865 		if (++j == adapter->num_rx_desc)
   2866 			j = 0;
   2867 	}
   2868 update:
   2869 	if (refreshed) /* If we refreshed some, bump tail */
   2870 		IXGBE_WRITE_REG(&adapter->hw,
   2871 		    IXGBE_VFRDT(rxr->me), rxr->next_to_refresh);
   2872 	return;
   2873 }
   2874 
   2875 /*********************************************************************
   2876  *
   2877  *  Allocate memory for rx_buffer structures. Since we use one
   2878  *  rx_buffer per received packet, the maximum number of rx_buffer's
   2879  *  that we'll need is equal to the number of receive descriptors
   2880  *  that we've allocated.
   2881  *
   2882  **********************************************************************/
   2883 static int
   2884 ixv_allocate_receive_buffers(struct rx_ring *rxr)
   2885 {
   2886 	struct	adapter 	*adapter = rxr->adapter;
   2887 	device_t 		dev = adapter->dev;
   2888 	struct ixv_rx_buf 	*rxbuf;
   2889 	int             	i, bsize, error;
   2890 
   2891 	bsize = sizeof(struct ixv_rx_buf) * adapter->num_rx_desc;
   2892 	if (!(rxr->rx_buffers =
   2893 	    (struct ixv_rx_buf *) malloc(bsize,
   2894 	    M_DEVBUF, M_NOWAIT | M_ZERO))) {
   2895 		aprint_error_dev(dev, "Unable to allocate rx_buffer memory\n");
   2896 		error = ENOMEM;
   2897 		goto fail;
   2898 	}
   2899 
   2900 	if ((error = ixgbe_dma_tag_create(adapter->osdep.dmat,	/* parent */
   2901 				   1, 0,	/* alignment, bounds */
   2902 				   MSIZE,		/* maxsize */
   2903 				   1,			/* nsegments */
   2904 				   MSIZE,		/* maxsegsize */
   2905 				   0,			/* flags */
   2906 				   &rxr->htag))) {
   2907 		aprint_error_dev(dev, "Unable to create RX DMA tag\n");
   2908 		goto fail;
   2909 	}
   2910 
   2911 	if ((error = ixgbe_dma_tag_create(adapter->osdep.dmat,	/* parent */
   2912 				   1, 0,	/* alignment, bounds */
   2913 				   MJUMPAGESIZE,	/* maxsize */
   2914 				   1,			/* nsegments */
   2915 				   MJUMPAGESIZE,	/* maxsegsize */
   2916 				   0,			/* flags */
   2917 				   &rxr->ptag))) {
   2918 		aprint_error_dev(dev, "Unable to create RX DMA tag\n");
   2919 		goto fail;
   2920 	}
   2921 
   2922 	for (i = 0; i < adapter->num_rx_desc; i++, rxbuf++) {
   2923 		rxbuf = &rxr->rx_buffers[i];
   2924 		error = ixgbe_dmamap_create(rxr->htag,
   2925 		    BUS_DMA_NOWAIT, &rxbuf->hmap);
   2926 		if (error) {
   2927 			aprint_error_dev(dev, "Unable to create RX head map\n");
   2928 			goto fail;
   2929 		}
   2930 		error = ixgbe_dmamap_create(rxr->ptag,
   2931 		    BUS_DMA_NOWAIT, &rxbuf->pmap);
   2932 		if (error) {
   2933 			aprint_error_dev(dev, "Unable to create RX pkt map\n");
   2934 			goto fail;
   2935 		}
   2936 	}
   2937 
   2938 	return (0);
   2939 
   2940 fail:
   2941 	/* Frees all, but can handle partial completion */
   2942 	ixv_free_receive_structures(adapter);
   2943 	return (error);
   2944 }
   2945 
   2946 static void
   2947 ixv_free_receive_ring(struct rx_ring *rxr)
   2948 {
   2949 	struct  adapter         *adapter;
   2950 	struct ixv_rx_buf       *rxbuf;
   2951 	int i;
   2952 
   2953 	adapter = rxr->adapter;
   2954 	for (i = 0; i < adapter->num_rx_desc; i++) {
   2955 		rxbuf = &rxr->rx_buffers[i];
   2956 		if (rxbuf->m_head != NULL) {
   2957 			ixgbe_dmamap_sync(rxr->htag, rxbuf->hmap,
   2958 			    BUS_DMASYNC_POSTREAD);
   2959 			ixgbe_dmamap_unload(rxr->htag, rxbuf->hmap);
   2960 			rxbuf->m_head->m_flags |= M_PKTHDR;
   2961 			m_freem(rxbuf->m_head);
   2962 		}
   2963 		if (rxbuf->m_pack != NULL) {
   2964 			/* XXX not ixgbe_ ? */
   2965 			bus_dmamap_sync(rxr->ptag->dt_dmat, rxbuf->pmap,
   2966 			    0, rxbuf->m_pack->m_pkthdr.len,
   2967 			    BUS_DMASYNC_POSTREAD);
   2968 			ixgbe_dmamap_unload(rxr->ptag, rxbuf->pmap);
   2969 			rxbuf->m_pack->m_flags |= M_PKTHDR;
   2970 			m_freem(rxbuf->m_pack);
   2971 		}
   2972 		rxbuf->m_head = NULL;
   2973 		rxbuf->m_pack = NULL;
   2974 	}
   2975 }
   2976 
   2977 
   2978 /*********************************************************************
   2979  *
   2980  *  Initialize a receive ring and its buffers.
   2981  *
   2982  **********************************************************************/
   2983 static int
   2984 ixv_setup_receive_ring(struct rx_ring *rxr)
   2985 {
   2986 	struct	adapter 	*adapter;
   2987 	struct ixv_rx_buf	*rxbuf;
   2988 #ifdef LRO
   2989 	struct ifnet		*ifp;
   2990 	struct lro_ctrl		*lro = &rxr->lro;
   2991 #endif /* LRO */
   2992 	int			rsize, error = 0;
   2993 
   2994 	adapter = rxr->adapter;
   2995 #ifdef LRO
   2996 	ifp = adapter->ifp;
   2997 #endif /* LRO */
   2998 
   2999 	/* Clear the ring contents */
   3000 	IXV_RX_LOCK(rxr);
   3001 	rsize = roundup2(adapter->num_rx_desc *
   3002 	    sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
   3003 	bzero((void *)rxr->rx_base, rsize);
   3004 
   3005 	/* Free current RX buffer structs and their mbufs */
   3006 	ixv_free_receive_ring(rxr);
   3007 
   3008 	IXV_RX_UNLOCK(rxr);
   3009 
   3010 	/* Now reinitialize our supply of jumbo mbufs.  The number
   3011 	 * or size of jumbo mbufs may have changed.
   3012 	 */
   3013 	ixgbe_jcl_reinit(&adapter->jcl_head, rxr->ptag->dt_dmat,
   3014 	    2 * adapter->num_rx_desc, adapter->rx_mbuf_sz);
   3015 
   3016 	IXV_RX_LOCK(rxr);
   3017 
   3018 	/* Configure header split? */
   3019 	if (ixv_header_split)
   3020 		rxr->hdr_split = TRUE;
   3021 
   3022 	/* Now replenish the mbufs */
   3023 	for (int j = 0; j != adapter->num_rx_desc; ++j) {
   3024 		struct mbuf	*mh, *mp;
   3025 
   3026 		rxbuf = &rxr->rx_buffers[j];
   3027 		/*
   3028 		** Dont allocate mbufs if not
   3029 		** doing header split, its wasteful
   3030 		*/
   3031 		if (rxr->hdr_split == FALSE)
   3032 			goto skip_head;
   3033 
   3034 		/* First the header */
   3035 		rxbuf->m_head = m_gethdr(M_DONTWAIT, MT_DATA);
   3036 		if (rxbuf->m_head == NULL) {
   3037 			error = ENOBUFS;
   3038 			goto fail;
   3039 		}
   3040 		m_adj(rxbuf->m_head, ETHER_ALIGN);
   3041 		mh = rxbuf->m_head;
   3042 		mh->m_len = mh->m_pkthdr.len = MHLEN;
   3043 		mh->m_flags |= M_PKTHDR;
   3044 		/* Get the memory mapping */
   3045 		error = bus_dmamap_load_mbuf(rxr->htag->dt_dmat,
   3046 		    rxbuf->hmap, rxbuf->m_head, BUS_DMA_NOWAIT);
   3047 		if (error != 0) /* Nothing elegant to do here */
   3048 			goto fail;
   3049 		bus_dmamap_sync(rxr->htag->dt_dmat, rxbuf->hmap,
   3050 		    0, mh->m_pkthdr.len, BUS_DMASYNC_PREREAD);
   3051 		/* Update descriptor */
   3052 		rxr->rx_base[j].read.hdr_addr =
   3053 		    htole64(rxbuf->hmap->dm_segs[0].ds_addr);
   3054 
   3055 skip_head:
   3056 		/* Now the payload cluster */
   3057 		rxbuf->m_pack = ixgbe_getjcl(&adapter->jcl_head, M_DONTWAIT,
   3058 		    MT_DATA, M_PKTHDR, adapter->rx_mbuf_sz);
   3059 		if (rxbuf->m_pack == NULL) {
   3060 			error = ENOBUFS;
   3061                         goto fail;
   3062 		}
   3063 		mp = rxbuf->m_pack;
   3064 		mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
   3065 		/* Get the memory mapping */
   3066 		error = bus_dmamap_load_mbuf(rxr->ptag->dt_dmat,
   3067 		    rxbuf->pmap, mp, BUS_DMA_NOWAIT);
   3068 		if (error != 0)
   3069                         goto fail;
   3070 		bus_dmamap_sync(rxr->ptag->dt_dmat, rxbuf->pmap,
   3071 		    0, adapter->rx_mbuf_sz, BUS_DMASYNC_PREREAD);
   3072 		/* Update descriptor */
   3073 		rxr->rx_base[j].read.pkt_addr =
   3074 		    htole64(rxbuf->pmap->dm_segs[0].ds_addr);
   3075 	}
   3076 
   3077 
   3078 	/* Setup our descriptor indices */
   3079 	rxr->next_to_check = 0;
   3080 	rxr->next_to_refresh = 0;
   3081 	rxr->lro_enabled = FALSE;
   3082 	rxr->rx_split_packets.ev_count = 0;
   3083 	rxr->rx_bytes.ev_count = 0;
   3084 
   3085 	ixgbe_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
   3086 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   3087 
   3088 #ifdef LRO
   3089 	/*
   3090 	** Now set up the LRO interface:
   3091 	*/
   3092 	if (ifp->if_capenable & IFCAP_LRO) {
   3093 		device_t dev = adapter->dev;
   3094 		int err = tcp_lro_init(lro);
   3095 		if (err) {
   3096 			device_printf(dev, "LRO Initialization failed!\n");
   3097 			goto fail;
   3098 		}
   3099 		INIT_DEBUGOUT("RX Soft LRO Initialized\n");
   3100 		rxr->lro_enabled = TRUE;
   3101 		lro->ifp = adapter->ifp;
   3102 	}
   3103 #endif /* LRO */
   3104 
   3105 	IXV_RX_UNLOCK(rxr);
   3106 	return (0);
   3107 
   3108 fail:
   3109 	ixv_free_receive_ring(rxr);
   3110 	IXV_RX_UNLOCK(rxr);
   3111 	return (error);
   3112 }
   3113 
   3114 /*********************************************************************
   3115  *
   3116  *  Initialize all receive rings.
   3117  *
   3118  **********************************************************************/
   3119 static int
   3120 ixv_setup_receive_structures(struct adapter *adapter)
   3121 {
   3122 	struct rx_ring *rxr = adapter->rx_rings;
   3123 	int j;
   3124 
   3125 	for (j = 0; j < adapter->num_queues; j++, rxr++)
   3126 		if (ixv_setup_receive_ring(rxr))
   3127 			goto fail;
   3128 
   3129 	return (0);
   3130 fail:
   3131 	/*
   3132 	 * Free RX buffers allocated so far, we will only handle
   3133 	 * the rings that completed, the failing case will have
   3134 	 * cleaned up for itself. 'j' failed, so its the terminus.
   3135 	 */
   3136 	for (int i = 0; i < j; ++i) {
   3137 		rxr = &adapter->rx_rings[i];
   3138 		ixv_free_receive_ring(rxr);
   3139 	}
   3140 
   3141 	return (ENOBUFS);
   3142 }
   3143 
   3144 /*********************************************************************
   3145  *
   3146  *  Setup receive registers and features.
   3147  *
   3148  **********************************************************************/
   3149 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
   3150 
   3151 static void
   3152 ixv_initialize_receive_units(struct adapter *adapter)
   3153 {
   3154 	int i;
   3155 	struct	rx_ring	*rxr = adapter->rx_rings;
   3156 	struct ixgbe_hw	*hw = &adapter->hw;
   3157 	struct ifnet   *ifp = adapter->ifp;
   3158 	u32		bufsz, fctrl, rxcsum, hlreg;
   3159 
   3160 
   3161 	/* Enable broadcasts */
   3162 	fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
   3163 	fctrl |= IXGBE_FCTRL_BAM;
   3164 	fctrl |= IXGBE_FCTRL_DPF;
   3165 	fctrl |= IXGBE_FCTRL_PMCF;
   3166 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
   3167 
   3168 	/* Set for Jumbo Frames? */
   3169 	hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
   3170 	if (ifp->if_mtu > ETHERMTU) {
   3171 		hlreg |= IXGBE_HLREG0_JUMBOEN;
   3172 		bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
   3173 	} else {
   3174 		hlreg &= ~IXGBE_HLREG0_JUMBOEN;
   3175 		bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
   3176 	}
   3177 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
   3178 
   3179 	for (i = 0; i < adapter->num_queues; i++, rxr++) {
   3180 		u64 rdba = rxr->rxdma.dma_paddr;
   3181 		u32 reg, rxdctl;
   3182 
   3183 		/* Do the queue enabling first */
   3184 		rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
   3185 		rxdctl |= IXGBE_RXDCTL_ENABLE;
   3186 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
   3187 		for (int k = 0; k < 10; k++) {
   3188 			if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
   3189 			    IXGBE_RXDCTL_ENABLE)
   3190 				break;
   3191 			else
   3192 				msec_delay(1);
   3193 		}
   3194 		wmb();
   3195 
   3196 		/* Setup the Base and Length of the Rx Descriptor Ring */
   3197 		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
   3198 		    (rdba & 0x00000000ffffffffULL));
   3199 		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i),
   3200 		    (rdba >> 32));
   3201 		IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
   3202 		    adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
   3203 
   3204 		/* Set up the SRRCTL register */
   3205 		reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
   3206 		reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
   3207 		reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
   3208 		reg |= bufsz;
   3209 		if (rxr->hdr_split) {
   3210 			/* Use a standard mbuf for the header */
   3211 			reg |= ((IXV_RX_HDR <<
   3212 			    IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT)
   3213 			    & IXGBE_SRRCTL_BSIZEHDR_MASK);
   3214 			reg |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
   3215 		} else
   3216 			reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
   3217 		IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), reg);
   3218 
   3219 		/* Setup the HW Rx Head and Tail Descriptor Pointers */
   3220 		IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
   3221 		IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
   3222 		    adapter->num_rx_desc - 1);
   3223 	}
   3224 
   3225 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
   3226 
   3227 	if (ifp->if_capenable & IFCAP_RXCSUM)
   3228 		rxcsum |= IXGBE_RXCSUM_PCSD;
   3229 
   3230 	if (!(rxcsum & IXGBE_RXCSUM_PCSD))
   3231 		rxcsum |= IXGBE_RXCSUM_IPPCSE;
   3232 
   3233 	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
   3234 
   3235 	return;
   3236 }
   3237 
   3238 /*********************************************************************
   3239  *
   3240  *  Free all receive rings.
   3241  *
   3242  **********************************************************************/
   3243 static void
   3244 ixv_free_receive_structures(struct adapter *adapter)
   3245 {
   3246 	struct rx_ring *rxr = adapter->rx_rings;
   3247 
   3248 	for (int i = 0; i < adapter->num_queues; i++, rxr++) {
   3249 #ifdef LRO
   3250 		struct lro_ctrl		*lro = &rxr->lro;
   3251 #endif /* LRO */
   3252 		ixv_free_receive_buffers(rxr);
   3253 #ifdef LRO
   3254 		/* Free LRO memory */
   3255 		tcp_lro_free(lro);
   3256 #endif /* LRO */
   3257 		/* Free the ring memory as well */
   3258 		ixv_dma_free(adapter, &rxr->rxdma);
   3259 		IXV_RX_LOCK_DESTROY(rxr);
   3260 	}
   3261 
   3262 	free(adapter->rx_rings, M_DEVBUF);
   3263 }
   3264 
   3265 
   3266 /*********************************************************************
   3267  *
   3268  *  Free receive ring data structures
   3269  *
   3270  **********************************************************************/
   3271 static void
   3272 ixv_free_receive_buffers(struct rx_ring *rxr)
   3273 {
   3274 	struct adapter		*adapter = rxr->adapter;
   3275 	struct ixv_rx_buf	*rxbuf;
   3276 
   3277 	INIT_DEBUGOUT("free_receive_structures: begin");
   3278 
   3279 	/* Cleanup any existing buffers */
   3280 	if (rxr->rx_buffers != NULL) {
   3281 		for (int i = 0; i < adapter->num_rx_desc; i++) {
   3282 			rxbuf = &rxr->rx_buffers[i];
   3283 			if (rxbuf->m_head != NULL) {
   3284 				ixgbe_dmamap_sync(rxr->htag, rxbuf->hmap,
   3285 				    BUS_DMASYNC_POSTREAD);
   3286 				ixgbe_dmamap_unload(rxr->htag, rxbuf->hmap);
   3287 				rxbuf->m_head->m_flags |= M_PKTHDR;
   3288 				m_freem(rxbuf->m_head);
   3289 			}
   3290 			if (rxbuf->m_pack != NULL) {
   3291 				/* XXX not ixgbe_* ? */
   3292 				bus_dmamap_sync(rxr->ptag->dt_dmat, rxbuf->pmap,
   3293 				    0, rxbuf->m_pack->m_pkthdr.len,
   3294 				    BUS_DMASYNC_POSTREAD);
   3295 				ixgbe_dmamap_unload(rxr->ptag, rxbuf->pmap);
   3296 				rxbuf->m_pack->m_flags |= M_PKTHDR;
   3297 				m_freem(rxbuf->m_pack);
   3298 			}
   3299 			rxbuf->m_head = NULL;
   3300 			rxbuf->m_pack = NULL;
   3301 			if (rxbuf->hmap != NULL) {
   3302 				ixgbe_dmamap_destroy(rxr->htag, rxbuf->hmap);
   3303 				rxbuf->hmap = NULL;
   3304 			}
   3305 			if (rxbuf->pmap != NULL) {
   3306 				ixgbe_dmamap_destroy(rxr->ptag, rxbuf->pmap);
   3307 				rxbuf->pmap = NULL;
   3308 			}
   3309 		}
   3310 		if (rxr->rx_buffers != NULL) {
   3311 			free(rxr->rx_buffers, M_DEVBUF);
   3312 			rxr->rx_buffers = NULL;
   3313 		}
   3314 	}
   3315 
   3316 	if (rxr->htag != NULL) {
   3317 		ixgbe_dma_tag_destroy(rxr->htag);
   3318 		rxr->htag = NULL;
   3319 	}
   3320 	if (rxr->ptag != NULL) {
   3321 		ixgbe_dma_tag_destroy(rxr->ptag);
   3322 		rxr->ptag = NULL;
   3323 	}
   3324 
   3325 	return;
   3326 }
   3327 
   3328 static __inline void
   3329 ixv_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m, u32 ptype)
   3330 {
   3331 	int s;
   3332 
   3333 #ifdef LRO
   3334 	struct adapter	*adapter = ifp->if_softc;
   3335 	struct ethercom *ec = &adapter->osdep.ec;
   3336 
   3337         /*
   3338          * ATM LRO is only for IPv4/TCP packets and TCP checksum of the packet
   3339          * should be computed by hardware. Also it should not have VLAN tag in
   3340          * ethernet header.
   3341          */
   3342         if (rxr->lro_enabled &&
   3343             (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING) != 0 &&
   3344             (ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
   3345             (ptype & (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP)) ==
   3346             (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP) &&
   3347             (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) ==
   3348             (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) {
   3349                 /*
   3350                  * Send to the stack if:
   3351                  **  - LRO not enabled, or
   3352                  **  - no LRO resources, or
   3353                  **  - lro enqueue fails
   3354                  */
   3355                 if (rxr->lro.lro_cnt != 0)
   3356                         if (tcp_lro_rx(&rxr->lro, m, 0) == 0)
   3357                                 return;
   3358         }
   3359 #endif /* LRO */
   3360 
   3361 	IXV_RX_UNLOCK(rxr);
   3362 
   3363 	s = splnet();
   3364 	/* Pass this up to any BPF listeners. */
   3365 	bpf_mtap(ifp, m);
   3366         (*ifp->if_input)(ifp, m);
   3367 	splx(s);
   3368 
   3369 	IXV_RX_LOCK(rxr);
   3370 }
   3371 
   3372 static __inline void
   3373 ixv_rx_discard(struct rx_ring *rxr, int i)
   3374 {
   3375 	struct adapter		*adapter = rxr->adapter;
   3376 	struct ixv_rx_buf	*rbuf;
   3377 	struct mbuf		*mh, *mp;
   3378 
   3379 	rbuf = &rxr->rx_buffers[i];
   3380         if (rbuf->fmp != NULL) /* Partial chain ? */
   3381                 m_freem(rbuf->fmp);
   3382 
   3383 	mh = rbuf->m_head;
   3384 	mp = rbuf->m_pack;
   3385 
   3386 	/* Reuse loaded DMA map and just update mbuf chain */
   3387 	mh->m_len = MHLEN;
   3388 	mh->m_flags |= M_PKTHDR;
   3389 	mh->m_next = NULL;
   3390 
   3391 	mp->m_len = mp->m_pkthdr.len = adapter->rx_mbuf_sz;
   3392 	mp->m_data = mp->m_ext.ext_buf;
   3393 	mp->m_next = NULL;
   3394 	return;
   3395 }
   3396 
   3397 
   3398 /*********************************************************************
   3399  *
   3400  *  This routine executes in interrupt context. It replenishes
   3401  *  the mbufs in the descriptor and sends data which has been
   3402  *  dma'ed into host memory to upper layer.
   3403  *
   3404  *  We loop at most count times if count is > 0, or until done if
   3405  *  count < 0.
   3406  *
   3407  *  Return TRUE for more work, FALSE for all clean.
   3408  *********************************************************************/
   3409 static bool
   3410 ixv_rxeof(struct ix_queue *que, int count)
   3411 {
   3412 	struct adapter		*adapter = que->adapter;
   3413 	struct rx_ring		*rxr = que->rxr;
   3414 	struct ifnet		*ifp = adapter->ifp;
   3415 #ifdef LRO
   3416 	struct lro_ctrl		*lro = &rxr->lro;
   3417 	struct lro_entry	*queued;
   3418 #endif /* LRO */
   3419 	int			i, nextp, processed = 0;
   3420 	u32			staterr = 0;
   3421 	union ixgbe_adv_rx_desc	*cur;
   3422 	struct ixv_rx_buf	*rbuf, *nbuf;
   3423 
   3424 	IXV_RX_LOCK(rxr);
   3425 
   3426 	for (i = rxr->next_to_check; count != 0;) {
   3427 		struct mbuf	*sendmp, *mh, *mp;
   3428 		u32		ptype;
   3429 		u16		hlen, plen, hdr, vtag;
   3430 		bool		eop;
   3431 
   3432 		/* Sync the ring. */
   3433 		ixgbe_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
   3434 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   3435 
   3436 		cur = &rxr->rx_base[i];
   3437 		staterr = le32toh(cur->wb.upper.status_error);
   3438 
   3439 		if ((staterr & IXGBE_RXD_STAT_DD) == 0)
   3440 			break;
   3441 		if ((ifp->if_flags & IFF_RUNNING) == 0)
   3442 			break;
   3443 
   3444 		count--;
   3445 		sendmp = NULL;
   3446 		nbuf = NULL;
   3447 		cur->wb.upper.status_error = 0;
   3448 		rbuf = &rxr->rx_buffers[i];
   3449 		mh = rbuf->m_head;
   3450 		mp = rbuf->m_pack;
   3451 
   3452 		plen = le16toh(cur->wb.upper.length);
   3453 		ptype = le32toh(cur->wb.lower.lo_dword.data) &
   3454 		    IXGBE_RXDADV_PKTTYPE_MASK;
   3455 		hdr = le16toh(cur->wb.lower.lo_dword.hs_rss.hdr_info);
   3456 		vtag = le16toh(cur->wb.upper.vlan);
   3457 		eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0);
   3458 
   3459 		/* Make sure all parts of a bad packet are discarded */
   3460 		if (((staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) != 0) ||
   3461 		    (rxr->discard)) {
   3462 			ifp->if_ierrors++;
   3463 			rxr->rx_discarded.ev_count++;
   3464 			if (!eop)
   3465 				rxr->discard = TRUE;
   3466 			else
   3467 				rxr->discard = FALSE;
   3468 			ixv_rx_discard(rxr, i);
   3469 			goto next_desc;
   3470 		}
   3471 
   3472 		if (!eop) {
   3473 			nextp = i + 1;
   3474 			if (nextp == adapter->num_rx_desc)
   3475 				nextp = 0;
   3476 			nbuf = &rxr->rx_buffers[nextp];
   3477 			prefetch(nbuf);
   3478 		}
   3479 		/*
   3480 		** The header mbuf is ONLY used when header
   3481 		** split is enabled, otherwise we get normal
   3482 		** behavior, ie, both header and payload
   3483 		** are DMA'd into the payload buffer.
   3484 		**
   3485 		** Rather than using the fmp/lmp global pointers
   3486 		** we now keep the head of a packet chain in the
   3487 		** buffer struct and pass this along from one
   3488 		** descriptor to the next, until we get EOP.
   3489 		*/
   3490 		if (rxr->hdr_split && (rbuf->fmp == NULL)) {
   3491 			/* This must be an initial descriptor */
   3492 			hlen = (hdr & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
   3493 			    IXGBE_RXDADV_HDRBUFLEN_SHIFT;
   3494 			if (hlen > IXV_RX_HDR)
   3495 				hlen = IXV_RX_HDR;
   3496 			mh->m_len = hlen;
   3497 			mh->m_flags |= M_PKTHDR;
   3498 			mh->m_next = NULL;
   3499 			mh->m_pkthdr.len = mh->m_len;
   3500 			/* Null buf pointer so it is refreshed */
   3501 			rbuf->m_head = NULL;
   3502 			/*
   3503 			** Check the payload length, this
   3504 			** could be zero if its a small
   3505 			** packet.
   3506 			*/
   3507 			if (plen > 0) {
   3508 				mp->m_len = plen;
   3509 				mp->m_next = NULL;
   3510 				mp->m_flags &= ~M_PKTHDR;
   3511 				mh->m_next = mp;
   3512 				mh->m_pkthdr.len += mp->m_len;
   3513 				/* Null buf pointer so it is refreshed */
   3514 				rbuf->m_pack = NULL;
   3515 				rxr->rx_split_packets.ev_count++;
   3516 			}
   3517 			/*
   3518 			** Now create the forward
   3519 			** chain so when complete
   3520 			** we wont have to.
   3521 			*/
   3522                         if (eop == 0) {
   3523 				/* stash the chain head */
   3524                                 nbuf->fmp = mh;
   3525 				/* Make forward chain */
   3526                                 if (plen)
   3527                                         mp->m_next = nbuf->m_pack;
   3528                                 else
   3529                                         mh->m_next = nbuf->m_pack;
   3530                         } else {
   3531 				/* Singlet, prepare to send */
   3532                                 sendmp = mh;
   3533                                 if (VLAN_ATTACHED(&adapter->osdep.ec) &&
   3534 				  (staterr & IXGBE_RXD_STAT_VP)) {
   3535 					VLAN_INPUT_TAG(ifp, sendmp, vtag,
   3536 					    printf("%s: could not apply VLAN "
   3537 					        "tag", __func__));
   3538                                 }
   3539                         }
   3540 		} else {
   3541 			/*
   3542 			** Either no header split, or a
   3543 			** secondary piece of a fragmented
   3544 			** split packet.
   3545 			*/
   3546 			mp->m_len = plen;
   3547 			/*
   3548 			** See if there is a stored head
   3549 			** that determines what we are
   3550 			*/
   3551 			sendmp = rbuf->fmp;
   3552 			rbuf->m_pack = rbuf->fmp = NULL;
   3553 
   3554 			if (sendmp != NULL) /* secondary frag */
   3555 				sendmp->m_pkthdr.len += mp->m_len;
   3556 			else {
   3557 				/* first desc of a non-ps chain */
   3558 				sendmp = mp;
   3559 				sendmp->m_flags |= M_PKTHDR;
   3560 				sendmp->m_pkthdr.len = mp->m_len;
   3561 				if (staterr & IXGBE_RXD_STAT_VP) {
   3562 					/* XXX Do something reasonable on
   3563 					 * error.
   3564 					 */
   3565 					VLAN_INPUT_TAG(ifp, sendmp, vtag,
   3566 					    printf("%s: could not apply VLAN "
   3567 					        "tag", __func__));
   3568 				}
   3569                         }
   3570 			/* Pass the head pointer on */
   3571 			if (eop == 0) {
   3572 				nbuf->fmp = sendmp;
   3573 				sendmp = NULL;
   3574 				mp->m_next = nbuf->m_pack;
   3575 			}
   3576 		}
   3577 		++processed;
   3578 		/* Sending this frame? */
   3579 		if (eop) {
   3580 			sendmp->m_pkthdr.rcvif = ifp;
   3581 			ifp->if_ipackets++;
   3582 			rxr->rx_packets.ev_count++;
   3583 			/* capture data for AIM */
   3584 			rxr->bytes += sendmp->m_pkthdr.len;
   3585 			rxr->rx_bytes.ev_count += sendmp->m_pkthdr.len;
   3586 			if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
   3587 				ixv_rx_checksum(staterr, sendmp, ptype,
   3588 				   &adapter->stats);
   3589 			}
   3590 #if __FreeBSD_version >= 800000
   3591 			sendmp->m_pkthdr.flowid = que->msix;
   3592 			sendmp->m_flags |= M_FLOWID;
   3593 #endif
   3594 		}
   3595 next_desc:
   3596 		ixgbe_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
   3597 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   3598 
   3599 		/* Advance our pointers to the next descriptor. */
   3600 		if (++i == adapter->num_rx_desc)
   3601 			i = 0;
   3602 
   3603 		/* Now send to the stack or do LRO */
   3604 		if (sendmp != NULL)
   3605 			ixv_rx_input(rxr, ifp, sendmp, ptype);
   3606 
   3607                /* Every 8 descriptors we go to refresh mbufs */
   3608 		if (processed == 8) {
   3609 			ixv_refresh_mbufs(rxr, i);
   3610 			processed = 0;
   3611 		}
   3612 	}
   3613 
   3614 	/* Refresh any remaining buf structs */
   3615 	if (processed != 0) {
   3616 		ixv_refresh_mbufs(rxr, i);
   3617 		processed = 0;
   3618 	}
   3619 
   3620 	rxr->next_to_check = i;
   3621 
   3622 #ifdef LRO
   3623 	/*
   3624 	 * Flush any outstanding LRO work
   3625 	 */
   3626 	while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
   3627 		SLIST_REMOVE_HEAD(&lro->lro_active, next);
   3628 		tcp_lro_flush(lro, queued);
   3629 	}
   3630 #endif /* LRO */
   3631 
   3632 	IXV_RX_UNLOCK(rxr);
   3633 
   3634 	/*
   3635 	** We still have cleaning to do?
   3636 	** Schedule another interrupt if so.
   3637 	*/
   3638 	if ((staterr & IXGBE_RXD_STAT_DD) != 0) {
   3639 		ixv_rearm_queues(adapter, (u64)(1ULL << que->msix));
   3640 		return true;
   3641 	}
   3642 
   3643 	return false;
   3644 }
   3645 
   3646 
   3647 /*********************************************************************
   3648  *
   3649  *  Verify that the hardware indicated that the checksum is valid.
   3650  *  Inform the stack about the status of checksum so that stack
   3651  *  doesn't spend time verifying the checksum.
   3652  *
   3653  *********************************************************************/
   3654 static void
   3655 ixv_rx_checksum(u32 staterr, struct mbuf * mp, u32 ptype,
   3656     struct ixgbevf_hw_stats *stats)
   3657 {
   3658 	u16	status = (u16) staterr;
   3659 	u8	errors = (u8) (staterr >> 24);
   3660 #if 0
   3661 	bool	sctp = FALSE;
   3662 	if ((ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
   3663 	    (ptype & IXGBE_RXDADV_PKTTYPE_SCTP) != 0)
   3664 		sctp = TRUE;
   3665 #endif
   3666 	if (status & IXGBE_RXD_STAT_IPCS) {
   3667 		stats->ipcs.ev_count++;
   3668 		if (!(errors & IXGBE_RXD_ERR_IPE)) {
   3669 			/* IP Checksum Good */
   3670 			mp->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   3671 
   3672 		} else {
   3673 			stats->ipcs_bad.ev_count++;
   3674 			mp->m_pkthdr.csum_flags = M_CSUM_IPv4|M_CSUM_IPv4_BAD;
   3675 		}
   3676 	}
   3677 	if (status & IXGBE_RXD_STAT_L4CS) {
   3678 		stats->l4cs.ev_count++;
   3679 		u16 type = M_CSUM_TCPv4|M_CSUM_TCPv6|M_CSUM_UDPv4|M_CSUM_UDPv6;
   3680 		if (!(errors & IXGBE_RXD_ERR_TCPE)) {
   3681 			mp->m_pkthdr.csum_flags |= type;
   3682 		} else {
   3683 			stats->l4cs_bad.ev_count++;
   3684 			mp->m_pkthdr.csum_flags |= type | M_CSUM_TCP_UDP_BAD;
   3685 		}
   3686 	}
   3687 	return;
   3688 }
   3689 
   3690 static void
   3691 ixv_setup_vlan_support(struct adapter *adapter)
   3692 {
   3693 	struct ixgbe_hw *hw = &adapter->hw;
   3694 	u32		ctrl, vid, vfta, retry;
   3695 
   3696 
   3697 	/*
   3698 	** We get here thru init_locked, meaning
   3699 	** a soft reset, this has already cleared
   3700 	** the VFTA and other state, so if there
   3701 	** have been no vlan's registered do nothing.
   3702 	*/
   3703 	if (adapter->num_vlans == 0)
   3704 		return;
   3705 
   3706 	/* Enable the queues */
   3707 	for (int i = 0; i < adapter->num_queues; i++) {
   3708 		ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
   3709 		ctrl |= IXGBE_RXDCTL_VME;
   3710 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl);
   3711 	}
   3712 
   3713 	/*
   3714 	** A soft reset zero's out the VFTA, so
   3715 	** we need to repopulate it now.
   3716 	*/
   3717 	for (int i = 0; i < VFTA_SIZE; i++) {
   3718 		if (ixv_shadow_vfta[i] == 0)
   3719 			continue;
   3720 		vfta = ixv_shadow_vfta[i];
   3721 		/*
   3722 		** Reconstruct the vlan id's
   3723 		** based on the bits set in each
   3724 		** of the array ints.
   3725 		*/
   3726 		for ( int j = 0; j < 32; j++) {
   3727 			retry = 0;
   3728 			if ((vfta & (1 << j)) == 0)
   3729 				continue;
   3730 			vid = (i * 32) + j;
   3731 			/* Call the shared code mailbox routine */
   3732 			while (ixgbe_set_vfta(hw, vid, 0, TRUE)) {
   3733 				if (++retry > 5)
   3734 					break;
   3735 			}
   3736 		}
   3737 	}
   3738 }
   3739 
   3740 #if 0	/* XXX Badly need to overhaul vlan(4) on NetBSD. */
   3741 /*
   3742 ** This routine is run via an vlan config EVENT,
   3743 ** it enables us to use the HW Filter table since
   3744 ** we can get the vlan id. This just creates the
   3745 ** entry in the soft version of the VFTA, init will
   3746 ** repopulate the real table.
   3747 */
   3748 static void
   3749 ixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   3750 {
   3751 	struct adapter	*adapter = ifp->if_softc;
   3752 	u16		index, bit;
   3753 
   3754 	if (ifp->if_softc !=  arg)   /* Not our event */
   3755 		return;
   3756 
   3757 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
   3758 		return;
   3759 
   3760 	index = (vtag >> 5) & 0x7F;
   3761 	bit = vtag & 0x1F;
   3762 	ixv_shadow_vfta[index] |= (1 << bit);
   3763 	/* Re-init to load the changes */
   3764 	ixv_init(adapter);
   3765 }
   3766 
   3767 /*
   3768 ** This routine is run via an vlan
   3769 ** unconfig EVENT, remove our entry
   3770 ** in the soft vfta.
   3771 */
   3772 static void
   3773 ixv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   3774 {
   3775 	struct adapter	*adapter = ifp->if_softc;
   3776 	u16		index, bit;
   3777 
   3778 	if (ifp->if_softc !=  arg)
   3779 		return;
   3780 
   3781 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
   3782 		return;
   3783 
   3784 	index = (vtag >> 5) & 0x7F;
   3785 	bit = vtag & 0x1F;
   3786 	ixv_shadow_vfta[index] &= ~(1 << bit);
   3787 	/* Re-init to load the changes */
   3788 	ixv_init(adapter);
   3789 }
   3790 #endif
   3791 
   3792 static void
   3793 ixv_enable_intr(struct adapter *adapter)
   3794 {
   3795 	struct ixgbe_hw *hw = &adapter->hw;
   3796 	struct ix_queue *que = adapter->queues;
   3797 	u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
   3798 
   3799 
   3800 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
   3801 
   3802 	mask = IXGBE_EIMS_ENABLE_MASK;
   3803 	mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
   3804 	IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
   3805 
   3806         for (int i = 0; i < adapter->num_queues; i++, que++)
   3807 		ixv_enable_queue(adapter, que->msix);
   3808 
   3809 	IXGBE_WRITE_FLUSH(hw);
   3810 
   3811 	return;
   3812 }
   3813 
   3814 static void
   3815 ixv_disable_intr(struct adapter *adapter)
   3816 {
   3817 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
   3818 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, ~0);
   3819 	IXGBE_WRITE_FLUSH(&adapter->hw);
   3820 	return;
   3821 }
   3822 
   3823 /*
   3824 ** Setup the correct IVAR register for a particular MSIX interrupt
   3825 **  - entry is the register array entry
   3826 **  - vector is the MSIX vector for this queue
   3827 **  - type is RX/TX/MISC
   3828 */
   3829 static void
   3830 ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
   3831 {
   3832 	struct ixgbe_hw *hw = &adapter->hw;
   3833 	u32 ivar, index;
   3834 
   3835 	vector |= IXGBE_IVAR_ALLOC_VAL;
   3836 
   3837 	if (type == -1) { /* MISC IVAR */
   3838 		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
   3839 		ivar &= ~0xFF;
   3840 		ivar |= vector;
   3841 		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
   3842 	} else {	/* RX/TX IVARS */
   3843 		index = (16 * (entry & 1)) + (8 * type);
   3844 		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
   3845 		ivar &= ~(0xFF << index);
   3846 		ivar |= (vector << index);
   3847 		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
   3848 	}
   3849 }
   3850 
   3851 static void
   3852 ixv_configure_ivars(struct adapter *adapter)
   3853 {
   3854 	struct  ix_queue *que = adapter->queues;
   3855 
   3856         for (int i = 0; i < adapter->num_queues; i++, que++) {
   3857 		/* First the RX queue entry */
   3858                 ixv_set_ivar(adapter, i, que->msix, 0);
   3859 		/* ... and the TX */
   3860 		ixv_set_ivar(adapter, i, que->msix, 1);
   3861 		/* Set an initial value in EITR */
   3862                 IXGBE_WRITE_REG(&adapter->hw,
   3863                     IXGBE_VTEITR(que->msix), IXV_EITR_DEFAULT);
   3864 	}
   3865 
   3866 	/* For the Link interrupt */
   3867         ixv_set_ivar(adapter, 1, adapter->mbxvec, -1);
   3868 }
   3869 
   3870 
   3871 /*
   3872 ** Tasklet handler for MSIX MBX interrupts
   3873 **  - do outside interrupt since it might sleep
   3874 */
   3875 static void
   3876 ixv_handle_mbx(void *context)
   3877 {
   3878 	struct adapter  *adapter = context;
   3879 
   3880 	ixgbe_check_link(&adapter->hw,
   3881 	    &adapter->link_speed, &adapter->link_up, 0);
   3882 	ixv_update_link_status(adapter);
   3883 }
   3884 
   3885 /*
   3886 ** The VF stats registers never have a truely virgin
   3887 ** starting point, so this routine tries to make an
   3888 ** artificial one, marking ground zero on attach as
   3889 ** it were.
   3890 */
   3891 static void
   3892 ixv_save_stats(struct adapter *adapter)
   3893 {
   3894 	if (adapter->stats.vfgprc || adapter->stats.vfgptc) {
   3895 		adapter->stats.saved_reset_vfgprc +=
   3896 		    adapter->stats.vfgprc - adapter->stats.base_vfgprc;
   3897 		adapter->stats.saved_reset_vfgptc +=
   3898 		    adapter->stats.vfgptc - adapter->stats.base_vfgptc;
   3899 		adapter->stats.saved_reset_vfgorc +=
   3900 		    adapter->stats.vfgorc - adapter->stats.base_vfgorc;
   3901 		adapter->stats.saved_reset_vfgotc +=
   3902 		    adapter->stats.vfgotc - adapter->stats.base_vfgotc;
   3903 		adapter->stats.saved_reset_vfmprc +=
   3904 		    adapter->stats.vfmprc - adapter->stats.base_vfmprc;
   3905 	}
   3906 }
   3907 
   3908 static void
   3909 ixv_init_stats(struct adapter *adapter)
   3910 {
   3911 	struct ixgbe_hw *hw = &adapter->hw;
   3912 
   3913 	adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
   3914 	adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
   3915 	adapter->stats.last_vfgorc |=
   3916 	    (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
   3917 
   3918 	adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
   3919 	adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
   3920 	adapter->stats.last_vfgotc |=
   3921 	    (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
   3922 
   3923 	adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
   3924 
   3925 	adapter->stats.base_vfgprc = adapter->stats.last_vfgprc;
   3926 	adapter->stats.base_vfgorc = adapter->stats.last_vfgorc;
   3927 	adapter->stats.base_vfgptc = adapter->stats.last_vfgptc;
   3928 	adapter->stats.base_vfgotc = adapter->stats.last_vfgotc;
   3929 	adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
   3930 }
   3931 
   3932 #define UPDATE_STAT_32(reg, last, count)		\
   3933 {							\
   3934 	u32 current = IXGBE_READ_REG(hw, reg);		\
   3935 	if (current < last)				\
   3936 		count += 0x100000000LL;			\
   3937 	last = current;					\
   3938 	count &= 0xFFFFFFFF00000000LL;			\
   3939 	count |= current;				\
   3940 }
   3941 
   3942 #define UPDATE_STAT_36(lsb, msb, last, count) 		\
   3943 {							\
   3944 	u64 cur_lsb = IXGBE_READ_REG(hw, lsb);		\
   3945 	u64 cur_msb = IXGBE_READ_REG(hw, msb);		\
   3946 	u64 current = ((cur_msb << 32) | cur_lsb);	\
   3947 	if (current < last)				\
   3948 		count += 0x1000000000LL;		\
   3949 	last = current;					\
   3950 	count &= 0xFFFFFFF000000000LL;			\
   3951 	count |= current;				\
   3952 }
   3953 
   3954 /*
   3955 ** ixv_update_stats - Update the board statistics counters.
   3956 */
   3957 void
   3958 ixv_update_stats(struct adapter *adapter)
   3959 {
   3960         struct ixgbe_hw *hw = &adapter->hw;
   3961 
   3962         UPDATE_STAT_32(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
   3963 	    adapter->stats.vfgprc);
   3964         UPDATE_STAT_32(IXGBE_VFGPTC, adapter->stats.last_vfgptc,
   3965 	    adapter->stats.vfgptc);
   3966         UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
   3967 	    adapter->stats.last_vfgorc, adapter->stats.vfgorc);
   3968         UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
   3969 	    adapter->stats.last_vfgotc, adapter->stats.vfgotc);
   3970         UPDATE_STAT_32(IXGBE_VFMPRC, adapter->stats.last_vfmprc,
   3971 	    adapter->stats.vfmprc);
   3972 }
   3973 
   3974 /**********************************************************************
   3975  *
   3976  *  This routine is called only when ixgbe_display_debug_stats is enabled.
   3977  *  This routine provides a way to take a look at important statistics
   3978  *  maintained by the driver and hardware.
   3979  *
   3980  **********************************************************************/
   3981 static void
   3982 ixv_print_hw_stats(struct adapter * adapter)
   3983 {
   3984         device_t dev = adapter->dev;
   3985 
   3986         device_printf(dev,"Std Mbuf Failed = %lu\n",
   3987                adapter->mbuf_defrag_failed.ev_count);
   3988         device_printf(dev,"Driver dropped packets = %lu\n",
   3989                adapter->dropped_pkts.ev_count);
   3990         device_printf(dev, "watchdog timeouts = %ld\n",
   3991                adapter->watchdog_events.ev_count);
   3992 
   3993         device_printf(dev,"Good Packets Rcvd = %llu\n",
   3994                (long long)adapter->stats.vfgprc);
   3995         device_printf(dev,"Good Packets Xmtd = %llu\n",
   3996                (long long)adapter->stats.vfgptc);
   3997         device_printf(dev,"TSO Transmissions = %lu\n",
   3998                adapter->tso_tx.ev_count);
   3999 
   4000 }
   4001 
   4002 /**********************************************************************
   4003  *
   4004  *  This routine is called only when em_display_debug_stats is enabled.
   4005  *  This routine provides a way to take a look at important statistics
   4006  *  maintained by the driver and hardware.
   4007  *
   4008  **********************************************************************/
   4009 static void
   4010 ixv_print_debug_info(struct adapter *adapter)
   4011 {
   4012         device_t dev = adapter->dev;
   4013         struct ixgbe_hw         *hw = &adapter->hw;
   4014         struct ix_queue         *que = adapter->queues;
   4015         struct rx_ring          *rxr;
   4016         struct tx_ring          *txr;
   4017 #ifdef LRO
   4018         struct lro_ctrl         *lro;
   4019 #endif /* LRO */
   4020 
   4021         device_printf(dev,"Error Byte Count = %u \n",
   4022             IXGBE_READ_REG(hw, IXGBE_ERRBC));
   4023 
   4024         for (int i = 0; i < adapter->num_queues; i++, que++) {
   4025                 txr = que->txr;
   4026                 rxr = que->rxr;
   4027 #ifdef LRO
   4028                 lro = &rxr->lro;
   4029 #endif /* LRO */
   4030                 device_printf(dev,"QUE(%d) IRQs Handled: %lu\n",
   4031                     que->msix, (long)que->irqs);
   4032                 device_printf(dev,"RX(%d) Packets Received: %lld\n",
   4033                     rxr->me, (long long)rxr->rx_packets.ev_count);
   4034                 device_printf(dev,"RX(%d) Split RX Packets: %lld\n",
   4035                     rxr->me, (long long)rxr->rx_split_packets.ev_count);
   4036                 device_printf(dev,"RX(%d) Bytes Received: %lu\n",
   4037                     rxr->me, (long)rxr->rx_bytes.ev_count);
   4038 #ifdef LRO
   4039                 device_printf(dev,"RX(%d) LRO Queued= %d\n",
   4040                     rxr->me, lro->lro_queued);
   4041                 device_printf(dev,"RX(%d) LRO Flushed= %d\n",
   4042                     rxr->me, lro->lro_flushed);
   4043 #endif /* LRO */
   4044                 device_printf(dev,"TX(%d) Packets Sent: %lu\n",
   4045                     txr->me, (long)txr->total_packets.ev_count);
   4046                 device_printf(dev,"TX(%d) NO Desc Avail: %lu\n",
   4047                     txr->me, (long)txr->no_desc_avail.ev_count);
   4048         }
   4049 
   4050         device_printf(dev,"MBX IRQ Handled: %lu\n",
   4051             (long)adapter->mbx_irq.ev_count);
   4052         return;
   4053 }
   4054 
   4055 static int
   4056 ixv_sysctl_stats(SYSCTLFN_ARGS)
   4057 {
   4058 	struct sysctlnode node;
   4059 	int             error;
   4060 	int		result;
   4061 	struct adapter *adapter;
   4062 
   4063 	node = *rnode;
   4064 	adapter = (struct adapter *)node.sysctl_data;
   4065 	node.sysctl_data = &result;
   4066 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   4067 	if (error != 0)
   4068 		return error;
   4069 
   4070 	if (result == 1)
   4071 		ixv_print_hw_stats(adapter);
   4072 
   4073 	return 0;
   4074 }
   4075 
   4076 static int
   4077 ixv_sysctl_debug(SYSCTLFN_ARGS)
   4078 {
   4079 	struct sysctlnode node;
   4080 	int error, result;
   4081 	struct adapter *adapter;
   4082 
   4083 	node = *rnode;
   4084 	adapter = (struct adapter *)node.sysctl_data;
   4085 	node.sysctl_data = &result;
   4086 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   4087 
   4088 	if (error)
   4089 		return error;
   4090 
   4091 	if (result == 1)
   4092 		ixv_print_debug_info(adapter);
   4093 
   4094 	return 0;
   4095 }
   4096 
   4097 /*
   4098 ** Set flow control using sysctl:
   4099 ** Flow control values:
   4100 ** 	0 - off
   4101 **	1 - rx pause
   4102 **	2 - tx pause
   4103 **	3 - full
   4104 */
   4105 static int
   4106 ixv_set_flowcntl(SYSCTLFN_ARGS)
   4107 {
   4108 	struct sysctlnode node;
   4109 	int error;
   4110 	struct adapter *adapter;
   4111 
   4112 	node = *rnode;
   4113 	adapter = (struct adapter *)node.sysctl_data;
   4114 	node.sysctl_data = &ixv_flow_control;
   4115 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   4116 
   4117 	if (error)
   4118 		return (error);
   4119 
   4120 	switch (ixv_flow_control) {
   4121 		case ixgbe_fc_rx_pause:
   4122 		case ixgbe_fc_tx_pause:
   4123 		case ixgbe_fc_full:
   4124 			adapter->hw.fc.requested_mode = ixv_flow_control;
   4125 			break;
   4126 		case ixgbe_fc_none:
   4127 		default:
   4128 			adapter->hw.fc.requested_mode = ixgbe_fc_none;
   4129 	}
   4130 
   4131 	ixgbe_fc_enable(&adapter->hw, 0);
   4132 	return error;
   4133 }
   4134 
   4135 const struct sysctlnode *
   4136 ixv_sysctl_instance(struct adapter *adapter)
   4137 {
   4138 	const char *dvname;
   4139 	struct sysctllog **log;
   4140 	int rc;
   4141 	const struct sysctlnode *rnode;
   4142 
   4143 	log = &adapter->sysctllog;
   4144 	dvname = device_xname(adapter->dev);
   4145 
   4146 	if ((rc = sysctl_createv(log, 0, NULL, &rnode,
   4147 	    0, CTLTYPE_NODE, dvname,
   4148 	    SYSCTL_DESCR("ixv information and settings"),
   4149 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
   4150 		goto err;
   4151 
   4152 	return rnode;
   4153 err:
   4154 	printf("%s: sysctl_createv failed, rc = %d\n", __func__, rc);
   4155 	return NULL;
   4156 }
   4157 
   4158 static void
   4159 ixv_add_rx_process_limit(struct adapter *adapter, const char *name,
   4160         const char *description, int *limit, int value)
   4161 {
   4162 	const struct sysctlnode *rnode, *cnode;
   4163 	struct sysctllog **log = &adapter->sysctllog;
   4164 
   4165         *limit = value;
   4166 
   4167 	if ((rnode = ixv_sysctl_instance(adapter)) == NULL)
   4168 		aprint_error_dev(adapter->dev,
   4169 		    "could not create sysctl root\n");
   4170 	else if (sysctl_createv(log, 0, &rnode, &cnode,
   4171 	    CTLFLAG_READWRITE,
   4172 	    CTLTYPE_INT,
   4173 	    name, SYSCTL_DESCR(description),
   4174 	    NULL, 0, limit, 0,
   4175 	    CTL_CREATE, CTL_EOL) != 0) {
   4176 		aprint_error_dev(adapter->dev, "%s: could not create sysctl",
   4177 		    __func__);
   4178 	}
   4179 }
   4180 
   4181