Home | History | Annotate | Line # | Download | only in ixgbe
ixv.c revision 1.7
      1  1.1   dyoung /******************************************************************************
      2  1.1   dyoung 
      3  1.5  msaitoh   Copyright (c) 2001-2012, Intel Corporation
      4  1.1   dyoung   All rights reserved.
      5  1.1   dyoung 
      6  1.1   dyoung   Redistribution and use in source and binary forms, with or without
      7  1.1   dyoung   modification, are permitted provided that the following conditions are met:
      8  1.1   dyoung 
      9  1.1   dyoung    1. Redistributions of source code must retain the above copyright notice,
     10  1.1   dyoung       this list of conditions and the following disclaimer.
     11  1.1   dyoung 
     12  1.1   dyoung    2. Redistributions in binary form must reproduce the above copyright
     13  1.1   dyoung       notice, this list of conditions and the following disclaimer in the
     14  1.1   dyoung       documentation and/or other materials provided with the distribution.
     15  1.1   dyoung 
     16  1.1   dyoung    3. Neither the name of the Intel Corporation nor the names of its
     17  1.1   dyoung       contributors may be used to endorse or promote products derived from
     18  1.1   dyoung       this software without specific prior written permission.
     19  1.1   dyoung 
     20  1.1   dyoung   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     21  1.1   dyoung   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     22  1.1   dyoung   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     23  1.1   dyoung   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     24  1.1   dyoung   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     25  1.1   dyoung   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     26  1.1   dyoung   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     27  1.1   dyoung   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     28  1.1   dyoung   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     29  1.1   dyoung   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     30  1.1   dyoung   POSSIBILITY OF SUCH DAMAGE.
     31  1.1   dyoung 
     32  1.1   dyoung ******************************************************************************/
     33  1.7  msaitoh /*$FreeBSD: head/sys/dev/ixgbe/ixv.c 241917 2012-10-22 22:29:48Z eadler $*/
     34  1.7  msaitoh /*$NetBSD: ixv.c,v 1.7 2015/04/14 07:17:06 msaitoh Exp $*/
     35  1.1   dyoung 
     36  1.1   dyoung #include "opt_inet.h"
     37  1.4  msaitoh #include "opt_inet6.h"
     38  1.1   dyoung 
     39  1.1   dyoung #include "ixv.h"
     40  1.1   dyoung 
     41  1.1   dyoung /*********************************************************************
     42  1.1   dyoung  *  Driver version
     43  1.1   dyoung  *********************************************************************/
     44  1.6  msaitoh char ixv_driver_version[] = "1.1.4";
     45  1.1   dyoung 
     46  1.1   dyoung /*********************************************************************
     47  1.1   dyoung  *  PCI Device ID Table
     48  1.1   dyoung  *
     49  1.1   dyoung  *  Used by probe to select devices to load on
     50  1.1   dyoung  *  Last field stores an index into ixv_strings
     51  1.1   dyoung  *  Last entry must be all 0s
     52  1.1   dyoung  *
     53  1.1   dyoung  *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
     54  1.1   dyoung  *********************************************************************/
     55  1.1   dyoung 
     56  1.1   dyoung static ixv_vendor_info_t ixv_vendor_info_array[] =
     57  1.1   dyoung {
     58  1.1   dyoung 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
     59  1.5  msaitoh 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0},
     60  1.1   dyoung 	/* required last entry */
     61  1.1   dyoung 	{0, 0, 0, 0, 0}
     62  1.1   dyoung };
     63  1.1   dyoung 
     64  1.1   dyoung /*********************************************************************
     65  1.1   dyoung  *  Table of branding strings
     66  1.1   dyoung  *********************************************************************/
     67  1.1   dyoung 
     68  1.3  msaitoh static const char    *ixv_strings[] = {
     69  1.1   dyoung 	"Intel(R) PRO/10GbE Virtual Function Network Driver"
     70  1.1   dyoung };
     71  1.1   dyoung 
     72  1.1   dyoung /*********************************************************************
     73  1.1   dyoung  *  Function prototypes
     74  1.1   dyoung  *********************************************************************/
     75  1.3  msaitoh static int      ixv_probe(device_t, cfdata_t, void *);
     76  1.3  msaitoh static void      ixv_attach(device_t, device_t, void *);
     77  1.3  msaitoh static int      ixv_detach(device_t, int);
     78  1.3  msaitoh #if 0
     79  1.1   dyoung static int      ixv_shutdown(device_t);
     80  1.3  msaitoh #endif
     81  1.1   dyoung #if __FreeBSD_version < 800000
     82  1.1   dyoung static void     ixv_start(struct ifnet *);
     83  1.1   dyoung static void     ixv_start_locked(struct tx_ring *, struct ifnet *);
     84  1.1   dyoung #else
     85  1.1   dyoung static int	ixv_mq_start(struct ifnet *, struct mbuf *);
     86  1.1   dyoung static int	ixv_mq_start_locked(struct ifnet *,
     87  1.1   dyoung 		    struct tx_ring *, struct mbuf *);
     88  1.1   dyoung static void	ixv_qflush(struct ifnet *);
     89  1.1   dyoung #endif
     90  1.3  msaitoh static int      ixv_ioctl(struct ifnet *, u_long, void *);
     91  1.3  msaitoh static int	ixv_init(struct ifnet *);
     92  1.1   dyoung static void	ixv_init_locked(struct adapter *);
     93  1.1   dyoung static void     ixv_stop(void *);
     94  1.1   dyoung static void     ixv_media_status(struct ifnet *, struct ifmediareq *);
     95  1.1   dyoung static int      ixv_media_change(struct ifnet *);
     96  1.1   dyoung static void     ixv_identify_hardware(struct adapter *);
     97  1.3  msaitoh static int      ixv_allocate_pci_resources(struct adapter *,
     98  1.3  msaitoh 		    const struct pci_attach_args *);
     99  1.1   dyoung static int      ixv_allocate_msix(struct adapter *);
    100  1.1   dyoung static int	ixv_allocate_queues(struct adapter *);
    101  1.1   dyoung static int	ixv_setup_msix(struct adapter *);
    102  1.1   dyoung static void	ixv_free_pci_resources(struct adapter *);
    103  1.1   dyoung static void     ixv_local_timer(void *);
    104  1.1   dyoung static void     ixv_setup_interface(device_t, struct adapter *);
    105  1.1   dyoung static void     ixv_config_link(struct adapter *);
    106  1.1   dyoung 
    107  1.1   dyoung static int      ixv_allocate_transmit_buffers(struct tx_ring *);
    108  1.1   dyoung static int	ixv_setup_transmit_structures(struct adapter *);
    109  1.1   dyoung static void	ixv_setup_transmit_ring(struct tx_ring *);
    110  1.1   dyoung static void     ixv_initialize_transmit_units(struct adapter *);
    111  1.1   dyoung static void     ixv_free_transmit_structures(struct adapter *);
    112  1.1   dyoung static void     ixv_free_transmit_buffers(struct tx_ring *);
    113  1.1   dyoung 
    114  1.1   dyoung static int      ixv_allocate_receive_buffers(struct rx_ring *);
    115  1.1   dyoung static int      ixv_setup_receive_structures(struct adapter *);
    116  1.1   dyoung static int	ixv_setup_receive_ring(struct rx_ring *);
    117  1.1   dyoung static void     ixv_initialize_receive_units(struct adapter *);
    118  1.1   dyoung static void     ixv_free_receive_structures(struct adapter *);
    119  1.1   dyoung static void     ixv_free_receive_buffers(struct rx_ring *);
    120  1.1   dyoung 
    121  1.1   dyoung static void     ixv_enable_intr(struct adapter *);
    122  1.1   dyoung static void     ixv_disable_intr(struct adapter *);
    123  1.1   dyoung static bool	ixv_txeof(struct tx_ring *);
    124  1.1   dyoung static bool	ixv_rxeof(struct ix_queue *, int);
    125  1.3  msaitoh static void	ixv_rx_checksum(u32, struct mbuf *, u32,
    126  1.3  msaitoh 		    struct ixgbevf_hw_stats *);
    127  1.1   dyoung static void     ixv_set_multi(struct adapter *);
    128  1.1   dyoung static void     ixv_update_link_status(struct adapter *);
    129  1.1   dyoung static void	ixv_refresh_mbufs(struct rx_ring *, int);
    130  1.3  msaitoh static int      ixv_xmit(struct tx_ring *, struct mbuf *);
    131  1.3  msaitoh static int	ixv_sysctl_stats(SYSCTLFN_PROTO);
    132  1.3  msaitoh static int	ixv_sysctl_debug(SYSCTLFN_PROTO);
    133  1.3  msaitoh static int	ixv_set_flowcntl(SYSCTLFN_PROTO);
    134  1.1   dyoung static int	ixv_dma_malloc(struct adapter *, bus_size_t,
    135  1.1   dyoung 		    struct ixv_dma_alloc *, int);
    136  1.1   dyoung static void     ixv_dma_free(struct adapter *, struct ixv_dma_alloc *);
    137  1.1   dyoung static void	ixv_add_rx_process_limit(struct adapter *, const char *,
    138  1.1   dyoung 		    const char *, int *, int);
    139  1.3  msaitoh static u32	ixv_tx_ctx_setup(struct tx_ring *, struct mbuf *);
    140  1.1   dyoung static bool	ixv_tso_setup(struct tx_ring *, struct mbuf *, u32 *);
    141  1.1   dyoung static void	ixv_set_ivar(struct adapter *, u8, u8, s8);
    142  1.1   dyoung static void	ixv_configure_ivars(struct adapter *);
    143  1.1   dyoung static u8 *	ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
    144  1.1   dyoung 
    145  1.1   dyoung static void	ixv_setup_vlan_support(struct adapter *);
    146  1.3  msaitoh #if 0
    147  1.1   dyoung static void	ixv_register_vlan(void *, struct ifnet *, u16);
    148  1.1   dyoung static void	ixv_unregister_vlan(void *, struct ifnet *, u16);
    149  1.3  msaitoh #endif
    150  1.1   dyoung 
    151  1.1   dyoung static void	ixv_save_stats(struct adapter *);
    152  1.1   dyoung static void	ixv_init_stats(struct adapter *);
    153  1.1   dyoung static void	ixv_update_stats(struct adapter *);
    154  1.1   dyoung 
    155  1.1   dyoung static __inline void ixv_rx_discard(struct rx_ring *, int);
    156  1.1   dyoung static __inline void ixv_rx_input(struct rx_ring *, struct ifnet *,
    157  1.1   dyoung 		    struct mbuf *, u32);
    158  1.1   dyoung 
    159  1.1   dyoung /* The MSI/X Interrupt handlers */
    160  1.1   dyoung static void	ixv_msix_que(void *);
    161  1.1   dyoung static void	ixv_msix_mbx(void *);
    162  1.1   dyoung 
    163  1.1   dyoung /* Deferred interrupt tasklets */
    164  1.3  msaitoh static void	ixv_handle_que(void *);
    165  1.3  msaitoh static void	ixv_handle_mbx(void *);
    166  1.3  msaitoh 
    167  1.3  msaitoh const struct sysctlnode *ixv_sysctl_instance(struct adapter *);
    168  1.3  msaitoh static ixv_vendor_info_t *ixv_lookup(const struct pci_attach_args *);
    169  1.1   dyoung 
    170  1.1   dyoung /*********************************************************************
    171  1.1   dyoung  *  FreeBSD Device Interface Entry Points
    172  1.1   dyoung  *********************************************************************/
    173  1.1   dyoung 
    174  1.3  msaitoh CFATTACH_DECL3_NEW(ixv, sizeof(struct adapter),
    175  1.3  msaitoh     ixv_probe, ixv_attach, ixv_detach, NULL, NULL, NULL,
    176  1.3  msaitoh     DVF_DETACH_SHUTDOWN);
    177  1.3  msaitoh 
    178  1.3  msaitoh # if 0
    179  1.1   dyoung static device_method_t ixv_methods[] = {
    180  1.1   dyoung 	/* Device interface */
    181  1.1   dyoung 	DEVMETHOD(device_probe, ixv_probe),
    182  1.1   dyoung 	DEVMETHOD(device_attach, ixv_attach),
    183  1.1   dyoung 	DEVMETHOD(device_detach, ixv_detach),
    184  1.1   dyoung 	DEVMETHOD(device_shutdown, ixv_shutdown),
    185  1.1   dyoung 	{0, 0}
    186  1.1   dyoung };
    187  1.3  msaitoh #endif
    188  1.1   dyoung 
    189  1.1   dyoung #if 0
    190  1.1   dyoung static driver_t ixv_driver = {
    191  1.1   dyoung 	"ix", ixv_methods, sizeof(struct adapter),
    192  1.1   dyoung };
    193  1.1   dyoung 
    194  1.1   dyoung extern devclass_t ixgbe_devclass;
    195  1.1   dyoung DRIVER_MODULE(ixv, pci, ixv_driver, ixgbe_devclass, 0, 0);
    196  1.1   dyoung MODULE_DEPEND(ixv, pci, 1, 1, 1);
    197  1.1   dyoung MODULE_DEPEND(ixv, ether, 1, 1, 1);
    198  1.1   dyoung #endif
    199  1.1   dyoung 
    200  1.1   dyoung /*
    201  1.1   dyoung ** TUNEABLE PARAMETERS:
    202  1.1   dyoung */
    203  1.1   dyoung 
    204  1.1   dyoung /*
    205  1.1   dyoung ** AIM: Adaptive Interrupt Moderation
    206  1.1   dyoung ** which means that the interrupt rate
    207  1.1   dyoung ** is varied over time based on the
    208  1.1   dyoung ** traffic for that interrupt vector
    209  1.1   dyoung */
    210  1.1   dyoung static int ixv_enable_aim = FALSE;
    211  1.1   dyoung #define	TUNABLE_INT(__x, __y)
    212  1.1   dyoung TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
    213  1.1   dyoung 
    214  1.1   dyoung /* How many packets rxeof tries to clean at a time */
    215  1.1   dyoung static int ixv_rx_process_limit = 128;
    216  1.1   dyoung TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit);
    217  1.1   dyoung 
    218  1.1   dyoung /* Flow control setting, default to full */
    219  1.1   dyoung static int ixv_flow_control = ixgbe_fc_full;
    220  1.1   dyoung TUNABLE_INT("hw.ixv.flow_control", &ixv_flow_control);
    221  1.1   dyoung 
    222  1.1   dyoung /*
    223  1.1   dyoung  * Header split: this causes the hardware to DMA
    224  1.1   dyoung  * the header into a seperate mbuf from the payload,
    225  1.1   dyoung  * it can be a performance win in some workloads, but
    226  1.1   dyoung  * in others it actually hurts, its off by default.
    227  1.1   dyoung  */
    228  1.4  msaitoh static int ixv_header_split = FALSE;
    229  1.1   dyoung TUNABLE_INT("hw.ixv.hdr_split", &ixv_header_split);
    230  1.1   dyoung 
    231  1.1   dyoung /*
    232  1.1   dyoung ** Number of TX descriptors per ring,
    233  1.1   dyoung ** setting higher than RX as this seems
    234  1.1   dyoung ** the better performing choice.
    235  1.1   dyoung */
    236  1.1   dyoung static int ixv_txd = DEFAULT_TXD;
    237  1.1   dyoung TUNABLE_INT("hw.ixv.txd", &ixv_txd);
    238  1.1   dyoung 
    239  1.1   dyoung /* Number of RX descriptors per ring */
    240  1.1   dyoung static int ixv_rxd = DEFAULT_RXD;
    241  1.1   dyoung TUNABLE_INT("hw.ixv.rxd", &ixv_rxd);
    242  1.1   dyoung 
    243  1.1   dyoung /*
    244  1.1   dyoung ** Shadow VFTA table, this is needed because
    245  1.1   dyoung ** the real filter table gets cleared during
    246  1.1   dyoung ** a soft reset and we need to repopulate it.
    247  1.1   dyoung */
    248  1.1   dyoung static u32 ixv_shadow_vfta[VFTA_SIZE];
    249  1.1   dyoung 
    250  1.3  msaitoh /* Keep running tab on them for sanity check */
    251  1.3  msaitoh static int ixv_total_ports;
    252  1.3  msaitoh 
    253  1.1   dyoung /*********************************************************************
    254  1.1   dyoung  *  Device identification routine
    255  1.1   dyoung  *
    256  1.1   dyoung  *  ixv_probe determines if the driver should be loaded on
    257  1.1   dyoung  *  adapter based on PCI vendor/device id of the adapter.
    258  1.1   dyoung  *
    259  1.4  msaitoh  *  return 1 on success, 0 on failure
    260  1.1   dyoung  *********************************************************************/
    261  1.1   dyoung 
    262  1.1   dyoung static int
    263  1.3  msaitoh ixv_probe(device_t dev, cfdata_t cf, void *aux)
    264  1.3  msaitoh {
    265  1.3  msaitoh 	const struct pci_attach_args *pa = aux;
    266  1.3  msaitoh 
    267  1.3  msaitoh 	return (ixv_lookup(pa) != NULL) ? 1 : 0;
    268  1.3  msaitoh }
    269  1.3  msaitoh 
    270  1.3  msaitoh static ixv_vendor_info_t *
    271  1.3  msaitoh ixv_lookup(const struct pci_attach_args *pa)
    272  1.1   dyoung {
    273  1.3  msaitoh 	pcireg_t subid;
    274  1.1   dyoung 	ixv_vendor_info_t *ent;
    275  1.1   dyoung 
    276  1.3  msaitoh 	INIT_DEBUGOUT("ixv_probe: begin");
    277  1.1   dyoung 
    278  1.3  msaitoh 	if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
    279  1.3  msaitoh 		return NULL;
    280  1.1   dyoung 
    281  1.3  msaitoh 	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
    282  1.1   dyoung 
    283  1.3  msaitoh 	for (ent = ixv_vendor_info_array; ent->vendor_id != 0; ent++) {
    284  1.3  msaitoh 		if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
    285  1.3  msaitoh 		    (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
    286  1.1   dyoung 
    287  1.3  msaitoh 		    ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
    288  1.1   dyoung 		     (ent->subvendor_id == 0)) &&
    289  1.1   dyoung 
    290  1.3  msaitoh 		    ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
    291  1.1   dyoung 		     (ent->subdevice_id == 0))) {
    292  1.3  msaitoh 			++ixv_total_ports;
    293  1.3  msaitoh 			return ent;
    294  1.1   dyoung 		}
    295  1.1   dyoung 	}
    296  1.3  msaitoh 	return NULL;
    297  1.3  msaitoh }
    298  1.3  msaitoh 
    299  1.3  msaitoh 
    300  1.3  msaitoh static void
    301  1.3  msaitoh ixv_sysctl_attach(struct adapter *adapter)
    302  1.3  msaitoh {
    303  1.3  msaitoh 	struct sysctllog **log;
    304  1.3  msaitoh 	const struct sysctlnode *rnode, *cnode;
    305  1.3  msaitoh 	device_t dev;
    306  1.3  msaitoh 
    307  1.3  msaitoh 	dev = adapter->dev;
    308  1.3  msaitoh 	log = &adapter->sysctllog;
    309  1.3  msaitoh 
    310  1.3  msaitoh 	if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
    311  1.3  msaitoh 		aprint_error_dev(dev, "could not create sysctl root\n");
    312  1.3  msaitoh 		return;
    313  1.3  msaitoh 	}
    314  1.3  msaitoh 
    315  1.3  msaitoh 	if (sysctl_createv(log, 0, &rnode, &cnode,
    316  1.3  msaitoh 	    CTLFLAG_READWRITE, CTLTYPE_INT,
    317  1.3  msaitoh 	    "stats", SYSCTL_DESCR("Statistics"),
    318  1.3  msaitoh 	    ixv_sysctl_stats, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
    319  1.3  msaitoh 		aprint_error_dev(dev, "could not create sysctl\n");
    320  1.3  msaitoh 
    321  1.3  msaitoh 	if (sysctl_createv(log, 0, &rnode, &cnode,
    322  1.3  msaitoh 	    CTLFLAG_READWRITE, CTLTYPE_INT,
    323  1.3  msaitoh 	    "debug", SYSCTL_DESCR("Debug Info"),
    324  1.3  msaitoh 	    ixv_sysctl_debug, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
    325  1.3  msaitoh 		aprint_error_dev(dev, "could not create sysctl\n");
    326  1.3  msaitoh 
    327  1.3  msaitoh 	if (sysctl_createv(log, 0, &rnode, &cnode,
    328  1.3  msaitoh 	    CTLFLAG_READWRITE, CTLTYPE_INT,
    329  1.3  msaitoh 	    "flow_control", SYSCTL_DESCR("Flow Control"),
    330  1.3  msaitoh 	    ixv_set_flowcntl, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
    331  1.3  msaitoh 		aprint_error_dev(dev, "could not create sysctl\n");
    332  1.3  msaitoh 
    333  1.3  msaitoh 	/* XXX This is an *instance* sysctl controlling a *global* variable.
    334  1.3  msaitoh 	 * XXX It's that way in the FreeBSD driver that this derives from.
    335  1.3  msaitoh 	 */
    336  1.3  msaitoh 	if (sysctl_createv(log, 0, &rnode, &cnode,
    337  1.3  msaitoh 	    CTLFLAG_READWRITE, CTLTYPE_INT,
    338  1.3  msaitoh 	    "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
    339  1.3  msaitoh 	    NULL, 0, &ixv_enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
    340  1.3  msaitoh 		aprint_error_dev(dev, "could not create sysctl\n");
    341  1.1   dyoung }
    342  1.1   dyoung 
    343  1.1   dyoung /*********************************************************************
    344  1.1   dyoung  *  Device initialization routine
    345  1.1   dyoung  *
    346  1.1   dyoung  *  The attach entry point is called when the driver is being loaded.
    347  1.1   dyoung  *  This routine identifies the type of hardware, allocates all resources
    348  1.1   dyoung  *  and initializes the hardware.
    349  1.1   dyoung  *
    350  1.1   dyoung  *  return 0 on success, positive on failure
    351  1.1   dyoung  *********************************************************************/
    352  1.1   dyoung 
    353  1.3  msaitoh static void
    354  1.3  msaitoh ixv_attach(device_t parent, device_t dev, void *aux)
    355  1.1   dyoung {
    356  1.1   dyoung 	struct adapter *adapter;
    357  1.1   dyoung 	struct ixgbe_hw *hw;
    358  1.1   dyoung 	int             error = 0;
    359  1.3  msaitoh 	ixv_vendor_info_t *ent;
    360  1.3  msaitoh 	const struct pci_attach_args *pa = aux;
    361  1.1   dyoung 
    362  1.1   dyoung 	INIT_DEBUGOUT("ixv_attach: begin");
    363  1.1   dyoung 
    364  1.1   dyoung 	/* Allocate, clear, and link in our adapter structure */
    365  1.3  msaitoh 	adapter = device_private(dev);
    366  1.1   dyoung 	adapter->dev = adapter->osdep.dev = dev;
    367  1.1   dyoung 	hw = &adapter->hw;
    368  1.1   dyoung 
    369  1.3  msaitoh 	ent = ixv_lookup(pa);
    370  1.3  msaitoh 
    371  1.3  msaitoh 	KASSERT(ent != NULL);
    372  1.3  msaitoh 
    373  1.3  msaitoh 	aprint_normal(": %s, Version - %s\n",
    374  1.3  msaitoh 	    ixv_strings[ent->index], ixv_driver_version);
    375  1.3  msaitoh 
    376  1.1   dyoung 	/* Core Lock Init*/
    377  1.3  msaitoh 	IXV_CORE_LOCK_INIT(adapter, device_xname(dev));
    378  1.1   dyoung 
    379  1.1   dyoung 	/* SYSCTL APIs */
    380  1.3  msaitoh 	ixv_sysctl_attach(adapter);
    381  1.1   dyoung 
    382  1.1   dyoung 	/* Set up the timer callout */
    383  1.3  msaitoh 	callout_init(&adapter->timer, 0);
    384  1.1   dyoung 
    385  1.1   dyoung 	/* Determine hardware revision */
    386  1.1   dyoung 	ixv_identify_hardware(adapter);
    387  1.1   dyoung 
    388  1.1   dyoung 	/* Do base PCI setup - map BAR0 */
    389  1.3  msaitoh 	if (ixv_allocate_pci_resources(adapter, pa)) {
    390  1.3  msaitoh 		aprint_error_dev(dev, "Allocation of PCI resources failed\n");
    391  1.1   dyoung 		error = ENXIO;
    392  1.1   dyoung 		goto err_out;
    393  1.1   dyoung 	}
    394  1.1   dyoung 
    395  1.1   dyoung 	/* Do descriptor calc and sanity checks */
    396  1.1   dyoung 	if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
    397  1.1   dyoung 	    ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) {
    398  1.3  msaitoh 		aprint_error_dev(dev, "TXD config issue, using default!\n");
    399  1.1   dyoung 		adapter->num_tx_desc = DEFAULT_TXD;
    400  1.1   dyoung 	} else
    401  1.1   dyoung 		adapter->num_tx_desc = ixv_txd;
    402  1.1   dyoung 
    403  1.1   dyoung 	if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
    404  1.1   dyoung 	    ixv_rxd < MIN_TXD || ixv_rxd > MAX_TXD) {
    405  1.3  msaitoh 		aprint_error_dev(dev, "RXD config issue, using default!\n");
    406  1.1   dyoung 		adapter->num_rx_desc = DEFAULT_RXD;
    407  1.1   dyoung 	} else
    408  1.1   dyoung 		adapter->num_rx_desc = ixv_rxd;
    409  1.1   dyoung 
    410  1.1   dyoung 	/* Allocate our TX/RX Queues */
    411  1.1   dyoung 	if (ixv_allocate_queues(adapter)) {
    412  1.1   dyoung 		error = ENOMEM;
    413  1.1   dyoung 		goto err_out;
    414  1.1   dyoung 	}
    415  1.1   dyoung 
    416  1.1   dyoung 	/*
    417  1.1   dyoung 	** Initialize the shared code: its
    418  1.1   dyoung 	** at this point the mac type is set.
    419  1.1   dyoung 	*/
    420  1.1   dyoung 	error = ixgbe_init_shared_code(hw);
    421  1.1   dyoung 	if (error) {
    422  1.3  msaitoh 		aprint_error_dev(dev,"Shared Code Initialization Failure\n");
    423  1.1   dyoung 		error = EIO;
    424  1.1   dyoung 		goto err_late;
    425  1.1   dyoung 	}
    426  1.1   dyoung 
    427  1.1   dyoung 	/* Setup the mailbox */
    428  1.1   dyoung 	ixgbe_init_mbx_params_vf(hw);
    429  1.1   dyoung 
    430  1.1   dyoung 	ixgbe_reset_hw(hw);
    431  1.1   dyoung 
    432  1.1   dyoung 	/* Get Hardware Flow Control setting */
    433  1.1   dyoung 	hw->fc.requested_mode = ixgbe_fc_full;
    434  1.1   dyoung 	hw->fc.pause_time = IXV_FC_PAUSE;
    435  1.6  msaitoh 	hw->fc.low_water[0] = IXV_FC_LO;
    436  1.5  msaitoh 	hw->fc.high_water[0] = IXV_FC_HI;
    437  1.1   dyoung 	hw->fc.send_xon = TRUE;
    438  1.1   dyoung 
    439  1.1   dyoung 	error = ixgbe_init_hw(hw);
    440  1.1   dyoung 	if (error) {
    441  1.3  msaitoh 		aprint_error_dev(dev,"Hardware Initialization Failure\n");
    442  1.1   dyoung 		error = EIO;
    443  1.1   dyoung 		goto err_late;
    444  1.1   dyoung 	}
    445  1.1   dyoung 
    446  1.1   dyoung 	error = ixv_allocate_msix(adapter);
    447  1.1   dyoung 	if (error)
    448  1.1   dyoung 		goto err_late;
    449  1.1   dyoung 
    450  1.1   dyoung 	/* Setup OS specific network interface */
    451  1.1   dyoung 	ixv_setup_interface(dev, adapter);
    452  1.1   dyoung 
    453  1.1   dyoung 	/* Sysctl for limiting the amount of work done in the taskqueue */
    454  1.1   dyoung 	ixv_add_rx_process_limit(adapter, "rx_processing_limit",
    455  1.1   dyoung 	    "max number of rx packets to process", &adapter->rx_process_limit,
    456  1.1   dyoung 	    ixv_rx_process_limit);
    457  1.1   dyoung 
    458  1.1   dyoung 	/* Do the stats setup */
    459  1.1   dyoung 	ixv_save_stats(adapter);
    460  1.1   dyoung 	ixv_init_stats(adapter);
    461  1.1   dyoung 
    462  1.1   dyoung 	/* Register for VLAN events */
    463  1.3  msaitoh #if 0 /* XXX msaitoh delete after write? */
    464  1.1   dyoung 	adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
    465  1.1   dyoung 	    ixv_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
    466  1.1   dyoung 	adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
    467  1.1   dyoung 	    ixv_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
    468  1.3  msaitoh #endif
    469  1.1   dyoung 
    470  1.1   dyoung 	INIT_DEBUGOUT("ixv_attach: end");
    471  1.3  msaitoh 	return;
    472  1.1   dyoung 
    473  1.1   dyoung err_late:
    474  1.1   dyoung 	ixv_free_transmit_structures(adapter);
    475  1.1   dyoung 	ixv_free_receive_structures(adapter);
    476  1.1   dyoung err_out:
    477  1.1   dyoung 	ixv_free_pci_resources(adapter);
    478  1.3  msaitoh 	return;
    479  1.1   dyoung 
    480  1.1   dyoung }
    481  1.1   dyoung 
    482  1.1   dyoung /*********************************************************************
    483  1.1   dyoung  *  Device removal routine
    484  1.1   dyoung  *
    485  1.1   dyoung  *  The detach entry point is called when the driver is being removed.
    486  1.1   dyoung  *  This routine stops the adapter and deallocates all the resources
    487  1.1   dyoung  *  that were allocated for driver operation.
    488  1.1   dyoung  *
    489  1.1   dyoung  *  return 0 on success, positive on failure
    490  1.1   dyoung  *********************************************************************/
    491  1.1   dyoung 
    492  1.1   dyoung static int
    493  1.3  msaitoh ixv_detach(device_t dev, int flags)
    494  1.1   dyoung {
    495  1.3  msaitoh 	struct adapter *adapter = device_private(dev);
    496  1.1   dyoung 	struct ix_queue *que = adapter->queues;
    497  1.1   dyoung 
    498  1.1   dyoung 	INIT_DEBUGOUT("ixv_detach: begin");
    499  1.1   dyoung 
    500  1.1   dyoung 	/* Make sure VLANS are not using driver */
    501  1.3  msaitoh 	if (!VLAN_ATTACHED(&adapter->osdep.ec))
    502  1.3  msaitoh 		;	/* nothing to do: no VLANs */
    503  1.3  msaitoh 	else if ((flags & (DETACH_SHUTDOWN|DETACH_FORCE)) != 0)
    504  1.3  msaitoh 		vlan_ifdetach(adapter->ifp);
    505  1.3  msaitoh 	else {
    506  1.3  msaitoh 		aprint_error_dev(dev, "VLANs in use\n");
    507  1.3  msaitoh 		return EBUSY;
    508  1.1   dyoung 	}
    509  1.1   dyoung 
    510  1.1   dyoung 	IXV_CORE_LOCK(adapter);
    511  1.1   dyoung 	ixv_stop(adapter);
    512  1.1   dyoung 	IXV_CORE_UNLOCK(adapter);
    513  1.1   dyoung 
    514  1.1   dyoung 	for (int i = 0; i < adapter->num_queues; i++, que++) {
    515  1.3  msaitoh 		softint_disestablish(que->que_si);
    516  1.1   dyoung 	}
    517  1.1   dyoung 
    518  1.1   dyoung 	/* Drain the Link queue */
    519  1.3  msaitoh 	softint_disestablish(adapter->mbx_si);
    520  1.1   dyoung 
    521  1.1   dyoung 	/* Unregister VLAN events */
    522  1.3  msaitoh #if 0 /* XXX msaitoh delete after write? */
    523  1.1   dyoung 	if (adapter->vlan_attach != NULL)
    524  1.1   dyoung 		EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
    525  1.1   dyoung 	if (adapter->vlan_detach != NULL)
    526  1.1   dyoung 		EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
    527  1.3  msaitoh #endif
    528  1.1   dyoung 
    529  1.1   dyoung 	ether_ifdetach(adapter->ifp);
    530  1.3  msaitoh 	callout_halt(&adapter->timer, NULL);
    531  1.1   dyoung 	ixv_free_pci_resources(adapter);
    532  1.3  msaitoh #if 0 /* XXX the NetBSD port is probably missing something here */
    533  1.1   dyoung 	bus_generic_detach(dev);
    534  1.3  msaitoh #endif
    535  1.3  msaitoh 	if_detach(adapter->ifp);
    536  1.1   dyoung 
    537  1.1   dyoung 	ixv_free_transmit_structures(adapter);
    538  1.1   dyoung 	ixv_free_receive_structures(adapter);
    539  1.1   dyoung 
    540  1.1   dyoung 	IXV_CORE_LOCK_DESTROY(adapter);
    541  1.1   dyoung 	return (0);
    542  1.1   dyoung }
    543  1.1   dyoung 
    544  1.1   dyoung /*********************************************************************
    545  1.1   dyoung  *
    546  1.1   dyoung  *  Shutdown entry point
    547  1.1   dyoung  *
    548  1.1   dyoung  **********************************************************************/
    549  1.3  msaitoh #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
    550  1.1   dyoung static int
    551  1.1   dyoung ixv_shutdown(device_t dev)
    552  1.1   dyoung {
    553  1.3  msaitoh 	struct adapter *adapter = device_private(dev);
    554  1.1   dyoung 	IXV_CORE_LOCK(adapter);
    555  1.1   dyoung 	ixv_stop(adapter);
    556  1.1   dyoung 	IXV_CORE_UNLOCK(adapter);
    557  1.1   dyoung 	return (0);
    558  1.1   dyoung }
    559  1.3  msaitoh #endif
    560  1.1   dyoung 
    561  1.1   dyoung #if __FreeBSD_version < 800000
    562  1.1   dyoung /*********************************************************************
    563  1.1   dyoung  *  Transmit entry point
    564  1.1   dyoung  *
    565  1.1   dyoung  *  ixv_start is called by the stack to initiate a transmit.
    566  1.1   dyoung  *  The driver will remain in this routine as long as there are
    567  1.1   dyoung  *  packets to transmit and transmit resources are available.
    568  1.1   dyoung  *  In case resources are not available stack is notified and
    569  1.1   dyoung  *  the packet is requeued.
    570  1.1   dyoung  **********************************************************************/
    571  1.1   dyoung static void
    572  1.1   dyoung ixv_start_locked(struct tx_ring *txr, struct ifnet * ifp)
    573  1.1   dyoung {
    574  1.3  msaitoh 	int rc;
    575  1.1   dyoung 	struct mbuf    *m_head;
    576  1.1   dyoung 	struct adapter *adapter = txr->adapter;
    577  1.1   dyoung 
    578  1.1   dyoung 	IXV_TX_LOCK_ASSERT(txr);
    579  1.1   dyoung 
    580  1.3  msaitoh 	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) !=
    581  1.3  msaitoh 	    IFF_RUNNING)
    582  1.1   dyoung 		return;
    583  1.1   dyoung 	if (!adapter->link_active)
    584  1.1   dyoung 		return;
    585  1.1   dyoung 
    586  1.3  msaitoh 	while (!IFQ_IS_EMPTY(&ifp->if_snd)) {
    587  1.1   dyoung 
    588  1.3  msaitoh 		IFQ_POLL(&ifp->if_snd, m_head);
    589  1.1   dyoung 		if (m_head == NULL)
    590  1.1   dyoung 			break;
    591  1.1   dyoung 
    592  1.3  msaitoh 		if (ixv_xmit(txr, m_head) == EAGAIN) {
    593  1.3  msaitoh 			ifp->if_flags |= IFF_OACTIVE;
    594  1.1   dyoung 			break;
    595  1.1   dyoung 		}
    596  1.3  msaitoh 		IFQ_DEQUEUE(&ifp->if_snd, m_head);
    597  1.3  msaitoh 		if (rc == EFBIG) {
    598  1.3  msaitoh 			struct mbuf *mtmp;
    599  1.3  msaitoh 
    600  1.3  msaitoh 			if ((mtmp = m_defrag(m_head, M_DONTWAIT)) != NULL) {
    601  1.3  msaitoh 				m_head = mtmp;
    602  1.3  msaitoh 				rc = ixv_xmit(txr, m_head);
    603  1.3  msaitoh 				if (rc != 0)
    604  1.3  msaitoh 					adapter->efbig2_tx_dma_setup.ev_count++;
    605  1.3  msaitoh 			} else
    606  1.3  msaitoh 				adapter->m_defrag_failed.ev_count++;
    607  1.3  msaitoh 		}
    608  1.3  msaitoh 		if (rc != 0) {
    609  1.3  msaitoh 			m_freem(m_head);
    610  1.3  msaitoh 			continue;
    611  1.3  msaitoh 		}
    612  1.1   dyoung 		/* Send a copy of the frame to the BPF listener */
    613  1.3  msaitoh 		bpf_mtap(ifp, m_head);
    614  1.1   dyoung 
    615  1.1   dyoung 		/* Set watchdog on */
    616  1.1   dyoung 		txr->watchdog_check = TRUE;
    617  1.3  msaitoh 		getmicrotime(&txr->watchdog_time);
    618  1.1   dyoung 	}
    619  1.1   dyoung 	return;
    620  1.1   dyoung }
    621  1.1   dyoung 
    622  1.1   dyoung /*
    623  1.1   dyoung  * Legacy TX start - called by the stack, this
    624  1.1   dyoung  * always uses the first tx ring, and should
    625  1.1   dyoung  * not be used with multiqueue tx enabled.
    626  1.1   dyoung  */
    627  1.1   dyoung static void
    628  1.1   dyoung ixv_start(struct ifnet *ifp)
    629  1.1   dyoung {
    630  1.1   dyoung 	struct adapter *adapter = ifp->if_softc;
    631  1.1   dyoung 	struct tx_ring	*txr = adapter->tx_rings;
    632  1.1   dyoung 
    633  1.3  msaitoh 	if (ifp->if_flags & IFF_RUNNING) {
    634  1.1   dyoung 		IXV_TX_LOCK(txr);
    635  1.1   dyoung 		ixv_start_locked(txr, ifp);
    636  1.1   dyoung 		IXV_TX_UNLOCK(txr);
    637  1.1   dyoung 	}
    638  1.1   dyoung 	return;
    639  1.1   dyoung }
    640  1.1   dyoung 
    641  1.1   dyoung #else
    642  1.1   dyoung 
    643  1.1   dyoung /*
    644  1.1   dyoung ** Multiqueue Transmit driver
    645  1.1   dyoung **
    646  1.1   dyoung */
    647  1.1   dyoung static int
    648  1.1   dyoung ixv_mq_start(struct ifnet *ifp, struct mbuf *m)
    649  1.1   dyoung {
    650  1.1   dyoung 	struct adapter	*adapter = ifp->if_softc;
    651  1.1   dyoung 	struct ix_queue	*que;
    652  1.1   dyoung 	struct tx_ring	*txr;
    653  1.1   dyoung 	int 		i = 0, err = 0;
    654  1.1   dyoung 
    655  1.1   dyoung 	/* Which queue to use */
    656  1.1   dyoung 	if ((m->m_flags & M_FLOWID) != 0)
    657  1.1   dyoung 		i = m->m_pkthdr.flowid % adapter->num_queues;
    658  1.1   dyoung 
    659  1.1   dyoung 	txr = &adapter->tx_rings[i];
    660  1.1   dyoung 	que = &adapter->queues[i];
    661  1.1   dyoung 
    662  1.1   dyoung 	if (IXV_TX_TRYLOCK(txr)) {
    663  1.1   dyoung 		err = ixv_mq_start_locked(ifp, txr, m);
    664  1.1   dyoung 		IXV_TX_UNLOCK(txr);
    665  1.1   dyoung 	} else {
    666  1.1   dyoung 		err = drbr_enqueue(ifp, txr->br, m);
    667  1.3  msaitoh 		softint_schedule(que->que_si);
    668  1.1   dyoung 	}
    669  1.1   dyoung 
    670  1.1   dyoung 	return (err);
    671  1.1   dyoung }
    672  1.1   dyoung 
    673  1.1   dyoung static int
    674  1.1   dyoung ixv_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr, struct mbuf *m)
    675  1.1   dyoung {
    676  1.1   dyoung 	struct adapter  *adapter = txr->adapter;
    677  1.1   dyoung         struct mbuf     *next;
    678  1.1   dyoung         int             enqueued, err = 0;
    679  1.1   dyoung 
    680  1.3  msaitoh 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) !=
    681  1.3  msaitoh 	    IFF_RUNNING || adapter->link_active == 0) {
    682  1.1   dyoung 		if (m != NULL)
    683  1.1   dyoung 			err = drbr_enqueue(ifp, txr->br, m);
    684  1.1   dyoung 		return (err);
    685  1.1   dyoung 	}
    686  1.1   dyoung 
    687  1.1   dyoung 	/* Do a clean if descriptors are low */
    688  1.1   dyoung 	if (txr->tx_avail <= IXV_TX_CLEANUP_THRESHOLD)
    689  1.1   dyoung 		ixv_txeof(txr);
    690  1.1   dyoung 
    691  1.1   dyoung 	enqueued = 0;
    692  1.1   dyoung 	if (m == NULL) {
    693  1.1   dyoung 		next = drbr_dequeue(ifp, txr->br);
    694  1.1   dyoung 	} else if (drbr_needs_enqueue(ifp, txr->br)) {
    695  1.1   dyoung 		if ((err = drbr_enqueue(ifp, txr->br, m)) != 0)
    696  1.1   dyoung 			return (err);
    697  1.1   dyoung 		next = drbr_dequeue(ifp, txr->br);
    698  1.1   dyoung 	} else
    699  1.1   dyoung 		next = m;
    700  1.1   dyoung 
    701  1.1   dyoung 	/* Process the queue */
    702  1.1   dyoung 	while (next != NULL) {
    703  1.3  msaitoh 		if ((err = ixv_xmit(txr, next)) != 0) {
    704  1.1   dyoung 			if (next != NULL)
    705  1.1   dyoung 				err = drbr_enqueue(ifp, txr->br, next);
    706  1.1   dyoung 			break;
    707  1.1   dyoung 		}
    708  1.1   dyoung 		enqueued++;
    709  1.7  msaitoh 		ifp->if_obytes += next->m_pkthdr.len;
    710  1.7  msaitoh 		if (next->m_flags & M_MCAST)
    711  1.7  msaitoh 			ifp->if_omcasts++;
    712  1.1   dyoung 		/* Send a copy of the frame to the BPF listener */
    713  1.1   dyoung 		ETHER_BPF_MTAP(ifp, next);
    714  1.3  msaitoh 		if ((ifp->if_flags & IFF_RUNNING) == 0)
    715  1.1   dyoung 			break;
    716  1.1   dyoung 		if (txr->tx_avail <= IXV_TX_OP_THRESHOLD) {
    717  1.3  msaitoh 			ifp->if_flags |= IFF_OACTIVE;
    718  1.1   dyoung 			break;
    719  1.1   dyoung 		}
    720  1.1   dyoung 		next = drbr_dequeue(ifp, txr->br);
    721  1.1   dyoung 	}
    722  1.1   dyoung 
    723  1.1   dyoung 	if (enqueued > 0) {
    724  1.1   dyoung 		/* Set watchdog on */
    725  1.1   dyoung 		txr->watchdog_check = TRUE;
    726  1.3  msaitoh 		getmicrotime(&txr->watchdog_time);
    727  1.1   dyoung 	}
    728  1.1   dyoung 
    729  1.1   dyoung 	return (err);
    730  1.1   dyoung }
    731  1.1   dyoung 
    732  1.1   dyoung /*
    733  1.1   dyoung ** Flush all ring buffers
    734  1.1   dyoung */
    735  1.1   dyoung static void
    736  1.1   dyoung ixv_qflush(struct ifnet *ifp)
    737  1.1   dyoung {
    738  1.1   dyoung 	struct adapter  *adapter = ifp->if_softc;
    739  1.1   dyoung 	struct tx_ring  *txr = adapter->tx_rings;
    740  1.1   dyoung 	struct mbuf     *m;
    741  1.1   dyoung 
    742  1.1   dyoung 	for (int i = 0; i < adapter->num_queues; i++, txr++) {
    743  1.1   dyoung 		IXV_TX_LOCK(txr);
    744  1.1   dyoung 		while ((m = buf_ring_dequeue_sc(txr->br)) != NULL)
    745  1.1   dyoung 			m_freem(m);
    746  1.1   dyoung 		IXV_TX_UNLOCK(txr);
    747  1.1   dyoung 	}
    748  1.1   dyoung 	if_qflush(ifp);
    749  1.1   dyoung }
    750  1.1   dyoung 
    751  1.1   dyoung #endif
    752  1.1   dyoung 
    753  1.3  msaitoh static int
    754  1.3  msaitoh ixv_ifflags_cb(struct ethercom *ec)
    755  1.3  msaitoh {
    756  1.3  msaitoh 	struct ifnet *ifp = &ec->ec_if;
    757  1.3  msaitoh 	struct adapter *adapter = ifp->if_softc;
    758  1.3  msaitoh 	int change = ifp->if_flags ^ adapter->if_flags, rc = 0;
    759  1.3  msaitoh 
    760  1.3  msaitoh 	IXV_CORE_LOCK(adapter);
    761  1.3  msaitoh 
    762  1.3  msaitoh 	if (change != 0)
    763  1.3  msaitoh 		adapter->if_flags = ifp->if_flags;
    764  1.3  msaitoh 
    765  1.3  msaitoh 	if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0)
    766  1.3  msaitoh 		rc = ENETRESET;
    767  1.3  msaitoh 
    768  1.3  msaitoh 	IXV_CORE_UNLOCK(adapter);
    769  1.3  msaitoh 
    770  1.3  msaitoh 	return rc;
    771  1.3  msaitoh }
    772  1.3  msaitoh 
    773  1.1   dyoung /*********************************************************************
    774  1.1   dyoung  *  Ioctl entry point
    775  1.1   dyoung  *
    776  1.1   dyoung  *  ixv_ioctl is called when the user wants to configure the
    777  1.1   dyoung  *  interface.
    778  1.1   dyoung  *
    779  1.1   dyoung  *  return 0 on success, positive on failure
    780  1.1   dyoung  **********************************************************************/
    781  1.1   dyoung 
    782  1.1   dyoung static int
    783  1.3  msaitoh ixv_ioctl(struct ifnet * ifp, u_long command, void *data)
    784  1.1   dyoung {
    785  1.1   dyoung 	struct adapter	*adapter = ifp->if_softc;
    786  1.3  msaitoh 	struct ifcapreq *ifcr = data;
    787  1.1   dyoung 	struct ifreq	*ifr = (struct ifreq *) data;
    788  1.1   dyoung 	int             error = 0;
    789  1.3  msaitoh 	int l4csum_en;
    790  1.3  msaitoh 	const int l4csum = IFCAP_CSUM_TCPv4_Rx|IFCAP_CSUM_UDPv4_Rx|
    791  1.3  msaitoh 	     IFCAP_CSUM_TCPv6_Rx|IFCAP_CSUM_UDPv6_Rx;
    792  1.1   dyoung 
    793  1.1   dyoung 	switch (command) {
    794  1.1   dyoung 	case SIOCSIFFLAGS:
    795  1.1   dyoung 		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
    796  1.1   dyoung 		break;
    797  1.1   dyoung 	case SIOCADDMULTI:
    798  1.1   dyoung 	case SIOCDELMULTI:
    799  1.1   dyoung 		IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
    800  1.1   dyoung 		break;
    801  1.1   dyoung 	case SIOCSIFMEDIA:
    802  1.1   dyoung 	case SIOCGIFMEDIA:
    803  1.1   dyoung 		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
    804  1.1   dyoung 		break;
    805  1.1   dyoung 	case SIOCSIFCAP:
    806  1.1   dyoung 		IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
    807  1.1   dyoung 		break;
    808  1.3  msaitoh 	case SIOCSIFMTU:
    809  1.3  msaitoh 		IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
    810  1.3  msaitoh 		break;
    811  1.1   dyoung 	default:
    812  1.1   dyoung 		IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
    813  1.1   dyoung 		break;
    814  1.1   dyoung 	}
    815  1.1   dyoung 
    816  1.3  msaitoh 	switch (command) {
    817  1.3  msaitoh 	case SIOCSIFMEDIA:
    818  1.3  msaitoh 	case SIOCGIFMEDIA:
    819  1.3  msaitoh 		return ifmedia_ioctl(ifp, ifr, &adapter->media, command);
    820  1.3  msaitoh 	case SIOCSIFCAP:
    821  1.3  msaitoh 		/* Layer-4 Rx checksum offload has to be turned on and
    822  1.3  msaitoh 		 * off as a unit.
    823  1.3  msaitoh 		 */
    824  1.3  msaitoh 		l4csum_en = ifcr->ifcr_capenable & l4csum;
    825  1.3  msaitoh 		if (l4csum_en != l4csum && l4csum_en != 0)
    826  1.3  msaitoh 			return EINVAL;
    827  1.3  msaitoh 		/*FALLTHROUGH*/
    828  1.3  msaitoh 	case SIOCADDMULTI:
    829  1.3  msaitoh 	case SIOCDELMULTI:
    830  1.3  msaitoh 	case SIOCSIFFLAGS:
    831  1.3  msaitoh 	case SIOCSIFMTU:
    832  1.3  msaitoh 	default:
    833  1.3  msaitoh 		if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
    834  1.3  msaitoh 			return error;
    835  1.3  msaitoh 		if ((ifp->if_flags & IFF_RUNNING) == 0)
    836  1.3  msaitoh 			;
    837  1.3  msaitoh 		else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
    838  1.3  msaitoh 			IXV_CORE_LOCK(adapter);
    839  1.3  msaitoh 			ixv_init_locked(adapter);
    840  1.3  msaitoh 			IXV_CORE_UNLOCK(adapter);
    841  1.3  msaitoh 		} else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
    842  1.3  msaitoh 			/*
    843  1.3  msaitoh 			 * Multicast list has changed; set the hardware filter
    844  1.3  msaitoh 			 * accordingly.
    845  1.3  msaitoh 			 */
    846  1.3  msaitoh 			IXV_CORE_LOCK(adapter);
    847  1.3  msaitoh 			ixv_disable_intr(adapter);
    848  1.3  msaitoh 			ixv_set_multi(adapter);
    849  1.3  msaitoh 			ixv_enable_intr(adapter);
    850  1.3  msaitoh 			IXV_CORE_UNLOCK(adapter);
    851  1.3  msaitoh 		}
    852  1.3  msaitoh 		return 0;
    853  1.3  msaitoh 	}
    854  1.1   dyoung }
    855  1.1   dyoung 
    856  1.1   dyoung /*********************************************************************
    857  1.1   dyoung  *  Init entry point
    858  1.1   dyoung  *
    859  1.1   dyoung  *  This routine is used in two ways. It is used by the stack as
    860  1.1   dyoung  *  init entry point in network interface structure. It is also used
    861  1.1   dyoung  *  by the driver as a hw/sw initialization routine to get to a
    862  1.1   dyoung  *  consistent state.
    863  1.1   dyoung  *
    864  1.1   dyoung  *  return 0 on success, positive on failure
    865  1.1   dyoung  **********************************************************************/
    866  1.1   dyoung #define IXGBE_MHADD_MFS_SHIFT 16
    867  1.1   dyoung 
    868  1.1   dyoung static void
    869  1.1   dyoung ixv_init_locked(struct adapter *adapter)
    870  1.1   dyoung {
    871  1.1   dyoung 	struct ifnet	*ifp = adapter->ifp;
    872  1.1   dyoung 	device_t 	dev = adapter->dev;
    873  1.1   dyoung 	struct ixgbe_hw *hw = &adapter->hw;
    874  1.1   dyoung 	u32		mhadd, gpie;
    875  1.1   dyoung 
    876  1.1   dyoung 	INIT_DEBUGOUT("ixv_init: begin");
    877  1.3  msaitoh 	KASSERT(mutex_owned(&adapter->core_mtx));
    878  1.1   dyoung 	hw->adapter_stopped = FALSE;
    879  1.1   dyoung 	ixgbe_stop_adapter(hw);
    880  1.1   dyoung         callout_stop(&adapter->timer);
    881  1.1   dyoung 
    882  1.1   dyoung         /* reprogram the RAR[0] in case user changed it. */
    883  1.1   dyoung         ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
    884  1.1   dyoung 
    885  1.1   dyoung 	/* Get the latest mac address, User can use a LAA */
    886  1.3  msaitoh 	memcpy(hw->mac.addr, CLLADDR(adapter->ifp->if_sadl),
    887  1.1   dyoung 	     IXGBE_ETH_LENGTH_OF_ADDRESS);
    888  1.1   dyoung         ixgbe_set_rar(hw, 0, hw->mac.addr, 0, 1);
    889  1.1   dyoung 	hw->addr_ctrl.rar_used_count = 1;
    890  1.1   dyoung 
    891  1.1   dyoung 	/* Prepare transmit descriptors and buffers */
    892  1.1   dyoung 	if (ixv_setup_transmit_structures(adapter)) {
    893  1.3  msaitoh 		aprint_error_dev(dev,"Could not setup transmit structures\n");
    894  1.1   dyoung 		ixv_stop(adapter);
    895  1.1   dyoung 		return;
    896  1.1   dyoung 	}
    897  1.1   dyoung 
    898  1.1   dyoung 	ixgbe_reset_hw(hw);
    899  1.1   dyoung 	ixv_initialize_transmit_units(adapter);
    900  1.1   dyoung 
    901  1.1   dyoung 	/* Setup Multicast table */
    902  1.1   dyoung 	ixv_set_multi(adapter);
    903  1.1   dyoung 
    904  1.1   dyoung 	/*
    905  1.1   dyoung 	** Determine the correct mbuf pool
    906  1.1   dyoung 	** for doing jumbo/headersplit
    907  1.1   dyoung 	*/
    908  1.1   dyoung 	if (ifp->if_mtu > ETHERMTU)
    909  1.1   dyoung 		adapter->rx_mbuf_sz = MJUMPAGESIZE;
    910  1.1   dyoung 	else
    911  1.1   dyoung 		adapter->rx_mbuf_sz = MCLBYTES;
    912  1.1   dyoung 
    913  1.1   dyoung 	/* Prepare receive descriptors and buffers */
    914  1.1   dyoung 	if (ixv_setup_receive_structures(adapter)) {
    915  1.1   dyoung 		device_printf(dev,"Could not setup receive structures\n");
    916  1.1   dyoung 		ixv_stop(adapter);
    917  1.1   dyoung 		return;
    918  1.1   dyoung 	}
    919  1.1   dyoung 
    920  1.1   dyoung 	/* Configure RX settings */
    921  1.1   dyoung 	ixv_initialize_receive_units(adapter);
    922  1.1   dyoung 
    923  1.1   dyoung 	/* Enable Enhanced MSIX mode */
    924  1.1   dyoung 	gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE);
    925  1.1   dyoung 	gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_EIAME;
    926  1.1   dyoung 	gpie |= IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD;
    927  1.1   dyoung         IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
    928  1.1   dyoung 
    929  1.3  msaitoh #if 0 /* XXX isn't it required? -- msaitoh  */
    930  1.1   dyoung 	/* Set the various hardware offload abilities */
    931  1.1   dyoung 	ifp->if_hwassist = 0;
    932  1.1   dyoung 	if (ifp->if_capenable & IFCAP_TSO4)
    933  1.1   dyoung 		ifp->if_hwassist |= CSUM_TSO;
    934  1.1   dyoung 	if (ifp->if_capenable & IFCAP_TXCSUM) {
    935  1.1   dyoung 		ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
    936  1.1   dyoung #if __FreeBSD_version >= 800000
    937  1.1   dyoung 		ifp->if_hwassist |= CSUM_SCTP;
    938  1.1   dyoung #endif
    939  1.1   dyoung 	}
    940  1.3  msaitoh #endif
    941  1.1   dyoung 
    942  1.1   dyoung 	/* Set MTU size */
    943  1.1   dyoung 	if (ifp->if_mtu > ETHERMTU) {
    944  1.1   dyoung 		mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
    945  1.1   dyoung 		mhadd &= ~IXGBE_MHADD_MFS_MASK;
    946  1.1   dyoung 		mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
    947  1.1   dyoung 		IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
    948  1.1   dyoung 	}
    949  1.1   dyoung 
    950  1.1   dyoung 	/* Set up VLAN offload and filter */
    951  1.1   dyoung 	ixv_setup_vlan_support(adapter);
    952  1.1   dyoung 
    953  1.1   dyoung 	callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
    954  1.1   dyoung 
    955  1.1   dyoung 	/* Set up MSI/X routing */
    956  1.1   dyoung 	ixv_configure_ivars(adapter);
    957  1.1   dyoung 
    958  1.1   dyoung 	/* Set up auto-mask */
    959  1.1   dyoung 	IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_EICS_RTX_QUEUE);
    960  1.1   dyoung 
    961  1.1   dyoung         /* Set moderation on the Link interrupt */
    962  1.1   dyoung         IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->mbxvec), IXV_LINK_ITR);
    963  1.1   dyoung 
    964  1.1   dyoung 	/* Stats init */
    965  1.1   dyoung 	ixv_init_stats(adapter);
    966  1.1   dyoung 
    967  1.1   dyoung 	/* Config/Enable Link */
    968  1.1   dyoung 	ixv_config_link(adapter);
    969  1.1   dyoung 
    970  1.1   dyoung 	/* And now turn on interrupts */
    971  1.1   dyoung 	ixv_enable_intr(adapter);
    972  1.1   dyoung 
    973  1.1   dyoung 	/* Now inform the stack we're ready */
    974  1.3  msaitoh 	ifp->if_flags |= IFF_RUNNING;
    975  1.3  msaitoh 	ifp->if_flags &= ~IFF_OACTIVE;
    976  1.1   dyoung 
    977  1.1   dyoung 	return;
    978  1.1   dyoung }
    979  1.1   dyoung 
    980  1.3  msaitoh static int
    981  1.3  msaitoh ixv_init(struct ifnet *ifp)
    982  1.1   dyoung {
    983  1.3  msaitoh 	struct adapter *adapter = ifp->if_softc;
    984  1.1   dyoung 
    985  1.1   dyoung 	IXV_CORE_LOCK(adapter);
    986  1.1   dyoung 	ixv_init_locked(adapter);
    987  1.1   dyoung 	IXV_CORE_UNLOCK(adapter);
    988  1.3  msaitoh 	return 0;
    989  1.1   dyoung }
    990  1.1   dyoung 
    991  1.1   dyoung 
    992  1.1   dyoung /*
    993  1.1   dyoung **
    994  1.1   dyoung ** MSIX Interrupt Handlers and Tasklets
    995  1.1   dyoung **
    996  1.1   dyoung */
    997  1.1   dyoung 
    998  1.1   dyoung static inline void
    999  1.1   dyoung ixv_enable_queue(struct adapter *adapter, u32 vector)
   1000  1.1   dyoung {
   1001  1.1   dyoung 	struct ixgbe_hw *hw = &adapter->hw;
   1002  1.1   dyoung 	u32	queue = 1 << vector;
   1003  1.1   dyoung 	u32	mask;
   1004  1.1   dyoung 
   1005  1.1   dyoung 	mask = (IXGBE_EIMS_RTX_QUEUE & queue);
   1006  1.1   dyoung 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
   1007  1.1   dyoung }
   1008  1.1   dyoung 
   1009  1.1   dyoung static inline void
   1010  1.1   dyoung ixv_disable_queue(struct adapter *adapter, u32 vector)
   1011  1.1   dyoung {
   1012  1.1   dyoung 	struct ixgbe_hw *hw = &adapter->hw;
   1013  1.1   dyoung 	u64	queue = (u64)(1 << vector);
   1014  1.1   dyoung 	u32	mask;
   1015  1.1   dyoung 
   1016  1.1   dyoung 	mask = (IXGBE_EIMS_RTX_QUEUE & queue);
   1017  1.1   dyoung 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
   1018  1.1   dyoung }
   1019  1.1   dyoung 
   1020  1.1   dyoung static inline void
   1021  1.1   dyoung ixv_rearm_queues(struct adapter *adapter, u64 queues)
   1022  1.1   dyoung {
   1023  1.1   dyoung 	u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
   1024  1.1   dyoung 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask);
   1025  1.1   dyoung }
   1026  1.1   dyoung 
   1027  1.1   dyoung 
   1028  1.1   dyoung static void
   1029  1.1   dyoung ixv_handle_que(void *context)
   1030  1.1   dyoung {
   1031  1.1   dyoung 	struct ix_queue *que = context;
   1032  1.1   dyoung 	struct adapter  *adapter = que->adapter;
   1033  1.1   dyoung 	struct tx_ring  *txr = que->txr;
   1034  1.1   dyoung 	struct ifnet    *ifp = adapter->ifp;
   1035  1.1   dyoung 	bool		more;
   1036  1.1   dyoung 
   1037  1.3  msaitoh 	if (ifp->if_flags & IFF_RUNNING) {
   1038  1.1   dyoung 		more = ixv_rxeof(que, adapter->rx_process_limit);
   1039  1.1   dyoung 		IXV_TX_LOCK(txr);
   1040  1.1   dyoung 		ixv_txeof(txr);
   1041  1.1   dyoung #if __FreeBSD_version >= 800000
   1042  1.1   dyoung 		if (!drbr_empty(ifp, txr->br))
   1043  1.1   dyoung 			ixv_mq_start_locked(ifp, txr, NULL);
   1044  1.1   dyoung #else
   1045  1.3  msaitoh 		if (!IFQ_IS_EMPTY(&ifp->if_snd))
   1046  1.1   dyoung 			ixv_start_locked(txr, ifp);
   1047  1.1   dyoung #endif
   1048  1.1   dyoung 		IXV_TX_UNLOCK(txr);
   1049  1.1   dyoung 		if (more) {
   1050  1.3  msaitoh 			adapter->req.ev_count++;
   1051  1.3  msaitoh 			softint_schedule(que->que_si);
   1052  1.1   dyoung 			return;
   1053  1.1   dyoung 		}
   1054  1.1   dyoung 	}
   1055  1.1   dyoung 
   1056  1.1   dyoung 	/* Reenable this interrupt */
   1057  1.1   dyoung 	ixv_enable_queue(adapter, que->msix);
   1058  1.1   dyoung 	return;
   1059  1.1   dyoung }
   1060  1.1   dyoung 
   1061  1.1   dyoung /*********************************************************************
   1062  1.1   dyoung  *
   1063  1.1   dyoung  *  MSI Queue Interrupt Service routine
   1064  1.1   dyoung  *
   1065  1.1   dyoung  **********************************************************************/
   1066  1.1   dyoung void
   1067  1.1   dyoung ixv_msix_que(void *arg)
   1068  1.1   dyoung {
   1069  1.1   dyoung 	struct ix_queue	*que = arg;
   1070  1.1   dyoung 	struct adapter  *adapter = que->adapter;
   1071  1.1   dyoung 	struct tx_ring	*txr = que->txr;
   1072  1.1   dyoung 	struct rx_ring	*rxr = que->rxr;
   1073  1.1   dyoung 	bool		more_tx, more_rx;
   1074  1.1   dyoung 	u32		newitr = 0;
   1075  1.1   dyoung 
   1076  1.1   dyoung 	ixv_disable_queue(adapter, que->msix);
   1077  1.1   dyoung 	++que->irqs;
   1078  1.1   dyoung 
   1079  1.1   dyoung 	more_rx = ixv_rxeof(que, adapter->rx_process_limit);
   1080  1.1   dyoung 
   1081  1.1   dyoung 	IXV_TX_LOCK(txr);
   1082  1.1   dyoung 	more_tx = ixv_txeof(txr);
   1083  1.5  msaitoh 	/*
   1084  1.5  msaitoh 	** Make certain that if the stack
   1085  1.5  msaitoh 	** has anything queued the task gets
   1086  1.5  msaitoh 	** scheduled to handle it.
   1087  1.5  msaitoh 	*/
   1088  1.5  msaitoh #if __FreeBSD_version < 800000
   1089  1.5  msaitoh 	if (!IFQ_IS_EMPTY(&adapter->ifp->if_snd))
   1090  1.5  msaitoh #else
   1091  1.5  msaitoh 	if (!drbr_empty(adapter->ifp, txr->br))
   1092  1.5  msaitoh #endif
   1093  1.5  msaitoh                 more_tx = 1;
   1094  1.1   dyoung 	IXV_TX_UNLOCK(txr);
   1095  1.1   dyoung 
   1096  1.1   dyoung 	more_rx = ixv_rxeof(que, adapter->rx_process_limit);
   1097  1.1   dyoung 
   1098  1.1   dyoung 	/* Do AIM now? */
   1099  1.1   dyoung 
   1100  1.1   dyoung 	if (ixv_enable_aim == FALSE)
   1101  1.1   dyoung 		goto no_calc;
   1102  1.1   dyoung 	/*
   1103  1.1   dyoung 	** Do Adaptive Interrupt Moderation:
   1104  1.1   dyoung         **  - Write out last calculated setting
   1105  1.1   dyoung 	**  - Calculate based on average size over
   1106  1.1   dyoung 	**    the last interval.
   1107  1.1   dyoung 	*/
   1108  1.1   dyoung         if (que->eitr_setting)
   1109  1.1   dyoung                 IXGBE_WRITE_REG(&adapter->hw,
   1110  1.1   dyoung                     IXGBE_VTEITR(que->msix),
   1111  1.1   dyoung 		    que->eitr_setting);
   1112  1.1   dyoung 
   1113  1.1   dyoung         que->eitr_setting = 0;
   1114  1.1   dyoung 
   1115  1.1   dyoung         /* Idle, do nothing */
   1116  1.1   dyoung         if ((txr->bytes == 0) && (rxr->bytes == 0))
   1117  1.1   dyoung                 goto no_calc;
   1118  1.1   dyoung 
   1119  1.1   dyoung 	if ((txr->bytes) && (txr->packets))
   1120  1.1   dyoung                	newitr = txr->bytes/txr->packets;
   1121  1.1   dyoung 	if ((rxr->bytes) && (rxr->packets))
   1122  1.1   dyoung 		newitr = max(newitr,
   1123  1.1   dyoung 		    (rxr->bytes / rxr->packets));
   1124  1.1   dyoung 	newitr += 24; /* account for hardware frame, crc */
   1125  1.1   dyoung 
   1126  1.1   dyoung 	/* set an upper boundary */
   1127  1.1   dyoung 	newitr = min(newitr, 3000);
   1128  1.1   dyoung 
   1129  1.1   dyoung 	/* Be nice to the mid range */
   1130  1.1   dyoung 	if ((newitr > 300) && (newitr < 1200))
   1131  1.1   dyoung 		newitr = (newitr / 3);
   1132  1.1   dyoung 	else
   1133  1.1   dyoung 		newitr = (newitr / 2);
   1134  1.1   dyoung 
   1135  1.1   dyoung 	newitr |= newitr << 16;
   1136  1.1   dyoung 
   1137  1.1   dyoung         /* save for next interrupt */
   1138  1.1   dyoung         que->eitr_setting = newitr;
   1139  1.1   dyoung 
   1140  1.1   dyoung         /* Reset state */
   1141  1.1   dyoung         txr->bytes = 0;
   1142  1.1   dyoung         txr->packets = 0;
   1143  1.1   dyoung         rxr->bytes = 0;
   1144  1.1   dyoung         rxr->packets = 0;
   1145  1.1   dyoung 
   1146  1.1   dyoung no_calc:
   1147  1.1   dyoung 	if (more_tx || more_rx)
   1148  1.3  msaitoh 		softint_schedule(que->que_si);
   1149  1.1   dyoung 	else /* Reenable this interrupt */
   1150  1.1   dyoung 		ixv_enable_queue(adapter, que->msix);
   1151  1.1   dyoung 	return;
   1152  1.1   dyoung }
   1153  1.1   dyoung 
   1154  1.1   dyoung static void
   1155  1.1   dyoung ixv_msix_mbx(void *arg)
   1156  1.1   dyoung {
   1157  1.1   dyoung 	struct adapter	*adapter = arg;
   1158  1.1   dyoung 	struct ixgbe_hw *hw = &adapter->hw;
   1159  1.1   dyoung 	u32		reg;
   1160  1.1   dyoung 
   1161  1.3  msaitoh 	++adapter->mbx_irq.ev_count;
   1162  1.1   dyoung 
   1163  1.1   dyoung 	/* First get the cause */
   1164  1.1   dyoung 	reg = IXGBE_READ_REG(hw, IXGBE_VTEICS);
   1165  1.1   dyoung 	/* Clear interrupt with write */
   1166  1.1   dyoung 	IXGBE_WRITE_REG(hw, IXGBE_VTEICR, reg);
   1167  1.1   dyoung 
   1168  1.1   dyoung 	/* Link status change */
   1169  1.1   dyoung 	if (reg & IXGBE_EICR_LSC)
   1170  1.3  msaitoh 		softint_schedule(adapter->mbx_si);
   1171  1.1   dyoung 
   1172  1.1   dyoung 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER);
   1173  1.1   dyoung 	return;
   1174  1.1   dyoung }
   1175  1.1   dyoung 
   1176  1.1   dyoung /*********************************************************************
   1177  1.1   dyoung  *
   1178  1.1   dyoung  *  Media Ioctl callback
   1179  1.1   dyoung  *
   1180  1.1   dyoung  *  This routine is called whenever the user queries the status of
   1181  1.1   dyoung  *  the interface using ifconfig.
   1182  1.1   dyoung  *
   1183  1.1   dyoung  **********************************************************************/
   1184  1.1   dyoung static void
   1185  1.1   dyoung ixv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
   1186  1.1   dyoung {
   1187  1.1   dyoung 	struct adapter *adapter = ifp->if_softc;
   1188  1.1   dyoung 
   1189  1.1   dyoung 	INIT_DEBUGOUT("ixv_media_status: begin");
   1190  1.1   dyoung 	IXV_CORE_LOCK(adapter);
   1191  1.1   dyoung 	ixv_update_link_status(adapter);
   1192  1.1   dyoung 
   1193  1.1   dyoung 	ifmr->ifm_status = IFM_AVALID;
   1194  1.1   dyoung 	ifmr->ifm_active = IFM_ETHER;
   1195  1.1   dyoung 
   1196  1.1   dyoung 	if (!adapter->link_active) {
   1197  1.1   dyoung 		IXV_CORE_UNLOCK(adapter);
   1198  1.1   dyoung 		return;
   1199  1.1   dyoung 	}
   1200  1.1   dyoung 
   1201  1.1   dyoung 	ifmr->ifm_status |= IFM_ACTIVE;
   1202  1.1   dyoung 
   1203  1.1   dyoung 	switch (adapter->link_speed) {
   1204  1.1   dyoung 		case IXGBE_LINK_SPEED_1GB_FULL:
   1205  1.1   dyoung 			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
   1206  1.1   dyoung 			break;
   1207  1.1   dyoung 		case IXGBE_LINK_SPEED_10GB_FULL:
   1208  1.1   dyoung 			ifmr->ifm_active |= IFM_FDX;
   1209  1.1   dyoung 			break;
   1210  1.1   dyoung 	}
   1211  1.1   dyoung 
   1212  1.1   dyoung 	IXV_CORE_UNLOCK(adapter);
   1213  1.1   dyoung 
   1214  1.1   dyoung 	return;
   1215  1.1   dyoung }
   1216  1.1   dyoung 
   1217  1.1   dyoung /*********************************************************************
   1218  1.1   dyoung  *
   1219  1.1   dyoung  *  Media Ioctl callback
   1220  1.1   dyoung  *
   1221  1.1   dyoung  *  This routine is called when the user changes speed/duplex using
   1222  1.1   dyoung  *  media/mediopt option with ifconfig.
   1223  1.1   dyoung  *
   1224  1.1   dyoung  **********************************************************************/
   1225  1.1   dyoung static int
   1226  1.1   dyoung ixv_media_change(struct ifnet * ifp)
   1227  1.1   dyoung {
   1228  1.1   dyoung 	struct adapter *adapter = ifp->if_softc;
   1229  1.1   dyoung 	struct ifmedia *ifm = &adapter->media;
   1230  1.1   dyoung 
   1231  1.1   dyoung 	INIT_DEBUGOUT("ixv_media_change: begin");
   1232  1.1   dyoung 
   1233  1.1   dyoung 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
   1234  1.1   dyoung 		return (EINVAL);
   1235  1.1   dyoung 
   1236  1.1   dyoung         switch (IFM_SUBTYPE(ifm->ifm_media)) {
   1237  1.1   dyoung         case IFM_AUTO:
   1238  1.1   dyoung                 break;
   1239  1.1   dyoung         default:
   1240  1.1   dyoung                 device_printf(adapter->dev, "Only auto media type\n");
   1241  1.1   dyoung 		return (EINVAL);
   1242  1.1   dyoung         }
   1243  1.1   dyoung 
   1244  1.1   dyoung 	return (0);
   1245  1.1   dyoung }
   1246  1.1   dyoung 
   1247  1.1   dyoung /*********************************************************************
   1248  1.1   dyoung  *
   1249  1.1   dyoung  *  This routine maps the mbufs to tx descriptors, allowing the
   1250  1.1   dyoung  *  TX engine to transmit the packets.
   1251  1.1   dyoung  *  	- return 0 on success, positive on failure
   1252  1.1   dyoung  *
   1253  1.1   dyoung  **********************************************************************/
   1254  1.1   dyoung 
   1255  1.1   dyoung static int
   1256  1.3  msaitoh ixv_xmit(struct tx_ring *txr, struct mbuf *m_head)
   1257  1.1   dyoung {
   1258  1.3  msaitoh 	struct m_tag *mtag;
   1259  1.1   dyoung 	struct adapter  *adapter = txr->adapter;
   1260  1.3  msaitoh 	struct ethercom *ec = &adapter->osdep.ec;
   1261  1.1   dyoung 	u32		olinfo_status = 0, cmd_type_len;
   1262  1.1   dyoung 	u32		paylen = 0;
   1263  1.1   dyoung 	int             i, j, error, nsegs;
   1264  1.1   dyoung 	int		first, last = 0;
   1265  1.1   dyoung 	bus_dmamap_t	map;
   1266  1.3  msaitoh 	struct ixv_tx_buf *txbuf;
   1267  1.1   dyoung 	union ixgbe_adv_tx_desc *txd = NULL;
   1268  1.1   dyoung 
   1269  1.1   dyoung 	/* Basic descriptor defines */
   1270  1.1   dyoung         cmd_type_len = (IXGBE_ADVTXD_DTYP_DATA |
   1271  1.1   dyoung 	    IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT);
   1272  1.1   dyoung 
   1273  1.3  msaitoh 	if ((mtag = VLAN_OUTPUT_TAG(ec, m_head)) != NULL)
   1274  1.1   dyoung         	cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
   1275  1.1   dyoung 
   1276  1.1   dyoung         /*
   1277  1.1   dyoung          * Important to capture the first descriptor
   1278  1.1   dyoung          * used because it will contain the index of
   1279  1.1   dyoung          * the one we tell the hardware to report back
   1280  1.1   dyoung          */
   1281  1.1   dyoung         first = txr->next_avail_desc;
   1282  1.1   dyoung 	txbuf = &txr->tx_buffers[first];
   1283  1.1   dyoung 	map = txbuf->map;
   1284  1.1   dyoung 
   1285  1.1   dyoung 	/*
   1286  1.1   dyoung 	 * Map the packet for DMA.
   1287  1.1   dyoung 	 */
   1288  1.3  msaitoh 	error = bus_dmamap_load_mbuf(txr->txtag->dt_dmat, map,
   1289  1.3  msaitoh 	    m_head, BUS_DMA_NOWAIT);
   1290  1.1   dyoung 
   1291  1.3  msaitoh 	switch (error) {
   1292  1.3  msaitoh 	case EAGAIN:
   1293  1.3  msaitoh 		adapter->eagain_tx_dma_setup.ev_count++;
   1294  1.3  msaitoh 		return EAGAIN;
   1295  1.3  msaitoh 	case ENOMEM:
   1296  1.3  msaitoh 		adapter->enomem_tx_dma_setup.ev_count++;
   1297  1.3  msaitoh 		return EAGAIN;
   1298  1.3  msaitoh 	case EFBIG:
   1299  1.3  msaitoh 		adapter->efbig_tx_dma_setup.ev_count++;
   1300  1.3  msaitoh 		return error;
   1301  1.3  msaitoh 	case EINVAL:
   1302  1.3  msaitoh 		adapter->einval_tx_dma_setup.ev_count++;
   1303  1.3  msaitoh 		return error;
   1304  1.3  msaitoh 	default:
   1305  1.3  msaitoh 		adapter->other_tx_dma_setup.ev_count++;
   1306  1.3  msaitoh 		return error;
   1307  1.3  msaitoh 	case 0:
   1308  1.3  msaitoh 		break;
   1309  1.1   dyoung 	}
   1310  1.1   dyoung 
   1311  1.1   dyoung 	/* Make certain there are enough descriptors */
   1312  1.1   dyoung 	if (nsegs > txr->tx_avail - 2) {
   1313  1.3  msaitoh 		txr->no_desc_avail.ev_count++;
   1314  1.3  msaitoh 		/* XXX s/ixgbe/ixv/ */
   1315  1.3  msaitoh 		ixgbe_dmamap_unload(txr->txtag, txbuf->map);
   1316  1.3  msaitoh 		return EAGAIN;
   1317  1.1   dyoung 	}
   1318  1.1   dyoung 
   1319  1.1   dyoung 	/*
   1320  1.1   dyoung 	** Set up the appropriate offload context
   1321  1.1   dyoung 	** this becomes the first descriptor of
   1322  1.1   dyoung 	** a packet.
   1323  1.1   dyoung 	*/
   1324  1.3  msaitoh 	if (m_head->m_pkthdr.csum_flags & (M_CSUM_TSOv4|M_CSUM_TSOv6)) {
   1325  1.1   dyoung 		if (ixv_tso_setup(txr, m_head, &paylen)) {
   1326  1.1   dyoung 			cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
   1327  1.1   dyoung 			olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
   1328  1.1   dyoung 			olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
   1329  1.1   dyoung 			olinfo_status |= paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
   1330  1.3  msaitoh 			++adapter->tso_tx.ev_count;
   1331  1.3  msaitoh 		} else {
   1332  1.3  msaitoh 			++adapter->tso_err.ev_count;
   1333  1.3  msaitoh 			/* XXX unload DMA map! --dyoung -> easy? --msaitoh */
   1334  1.1   dyoung 			return (ENXIO);
   1335  1.3  msaitoh 		}
   1336  1.3  msaitoh 	} else
   1337  1.3  msaitoh 		olinfo_status |= ixv_tx_ctx_setup(txr, m_head);
   1338  1.1   dyoung 
   1339  1.1   dyoung         /* Record payload length */
   1340  1.1   dyoung 	if (paylen == 0)
   1341  1.1   dyoung         	olinfo_status |= m_head->m_pkthdr.len <<
   1342  1.1   dyoung 		    IXGBE_ADVTXD_PAYLEN_SHIFT;
   1343  1.1   dyoung 
   1344  1.1   dyoung 	i = txr->next_avail_desc;
   1345  1.3  msaitoh 	for (j = 0; j < map->dm_nsegs; j++) {
   1346  1.1   dyoung 		bus_size_t seglen;
   1347  1.1   dyoung 		bus_addr_t segaddr;
   1348  1.1   dyoung 
   1349  1.1   dyoung 		txbuf = &txr->tx_buffers[i];
   1350  1.1   dyoung 		txd = &txr->tx_base[i];
   1351  1.3  msaitoh 		seglen = map->dm_segs[j].ds_len;
   1352  1.3  msaitoh 		segaddr = htole64(map->dm_segs[j].ds_addr);
   1353  1.1   dyoung 
   1354  1.1   dyoung 		txd->read.buffer_addr = segaddr;
   1355  1.1   dyoung 		txd->read.cmd_type_len = htole32(txr->txd_cmd |
   1356  1.1   dyoung 		    cmd_type_len |seglen);
   1357  1.1   dyoung 		txd->read.olinfo_status = htole32(olinfo_status);
   1358  1.1   dyoung 		last = i; /* descriptor that will get completion IRQ */
   1359  1.1   dyoung 
   1360  1.1   dyoung 		if (++i == adapter->num_tx_desc)
   1361  1.1   dyoung 			i = 0;
   1362  1.1   dyoung 
   1363  1.1   dyoung 		txbuf->m_head = NULL;
   1364  1.1   dyoung 		txbuf->eop_index = -1;
   1365  1.1   dyoung 	}
   1366  1.1   dyoung 
   1367  1.1   dyoung 	txd->read.cmd_type_len |=
   1368  1.1   dyoung 	    htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS);
   1369  1.3  msaitoh 	txr->tx_avail -= map->dm_nsegs;
   1370  1.1   dyoung 	txr->next_avail_desc = i;
   1371  1.1   dyoung 
   1372  1.1   dyoung 	txbuf->m_head = m_head;
   1373  1.4  msaitoh 	/* Swap the dma map between the first and last descriptor */
   1374  1.4  msaitoh 	txr->tx_buffers[first].map = txbuf->map;
   1375  1.1   dyoung 	txbuf->map = map;
   1376  1.3  msaitoh 	bus_dmamap_sync(txr->txtag->dt_dmat, map, 0, m_head->m_pkthdr.len,
   1377  1.3  msaitoh 	    BUS_DMASYNC_PREWRITE);
   1378  1.1   dyoung 
   1379  1.1   dyoung         /* Set the index of the descriptor that will be marked done */
   1380  1.1   dyoung         txbuf = &txr->tx_buffers[first];
   1381  1.1   dyoung 	txbuf->eop_index = last;
   1382  1.1   dyoung 
   1383  1.3  msaitoh 	/* XXX s/ixgbe/ixg/ */
   1384  1.3  msaitoh         ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
   1385  1.1   dyoung             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1386  1.1   dyoung 	/*
   1387  1.1   dyoung 	 * Advance the Transmit Descriptor Tail (Tdt), this tells the
   1388  1.1   dyoung 	 * hardware that this frame is available to transmit.
   1389  1.1   dyoung 	 */
   1390  1.3  msaitoh 	++txr->total_packets.ev_count;
   1391  1.1   dyoung 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(txr->me), i);
   1392  1.1   dyoung 
   1393  1.3  msaitoh 	return 0;
   1394  1.1   dyoung }
   1395  1.1   dyoung 
   1396  1.1   dyoung 
   1397  1.1   dyoung /*********************************************************************
   1398  1.1   dyoung  *  Multicast Update
   1399  1.1   dyoung  *
   1400  1.1   dyoung  *  This routine is called whenever multicast address list is updated.
   1401  1.1   dyoung  *
   1402  1.1   dyoung  **********************************************************************/
   1403  1.1   dyoung #define IXGBE_RAR_ENTRIES 16
   1404  1.1   dyoung 
   1405  1.1   dyoung static void
   1406  1.1   dyoung ixv_set_multi(struct adapter *adapter)
   1407  1.1   dyoung {
   1408  1.3  msaitoh 	struct ether_multi *enm;
   1409  1.3  msaitoh 	struct ether_multistep step;
   1410  1.1   dyoung 	u8	mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
   1411  1.1   dyoung 	u8	*update_ptr;
   1412  1.1   dyoung 	int	mcnt = 0;
   1413  1.3  msaitoh 	struct ethercom *ec = &adapter->osdep.ec;
   1414  1.1   dyoung 
   1415  1.1   dyoung 	IOCTL_DEBUGOUT("ixv_set_multi: begin");
   1416  1.1   dyoung 
   1417  1.3  msaitoh 	ETHER_FIRST_MULTI(step, ec, enm);
   1418  1.3  msaitoh 	while (enm != NULL) {
   1419  1.3  msaitoh 		bcopy(enm->enm_addrlo,
   1420  1.1   dyoung 		    &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
   1421  1.1   dyoung 		    IXGBE_ETH_LENGTH_OF_ADDRESS);
   1422  1.1   dyoung 		mcnt++;
   1423  1.3  msaitoh 		/* XXX This might be required --msaitoh */
   1424  1.3  msaitoh 		if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES)
   1425  1.3  msaitoh 			break;
   1426  1.3  msaitoh 		ETHER_NEXT_MULTI(step, enm);
   1427  1.1   dyoung 	}
   1428  1.1   dyoung 
   1429  1.1   dyoung 	update_ptr = mta;
   1430  1.1   dyoung 
   1431  1.1   dyoung 	ixgbe_update_mc_addr_list(&adapter->hw,
   1432  1.5  msaitoh 	    update_ptr, mcnt, ixv_mc_array_itr, TRUE);
   1433  1.1   dyoung 
   1434  1.1   dyoung 	return;
   1435  1.1   dyoung }
   1436  1.1   dyoung 
   1437  1.1   dyoung /*
   1438  1.1   dyoung  * This is an iterator function now needed by the multicast
   1439  1.1   dyoung  * shared code. It simply feeds the shared code routine the
   1440  1.1   dyoung  * addresses in the array of ixv_set_multi() one by one.
   1441  1.1   dyoung  */
   1442  1.1   dyoung static u8 *
   1443  1.1   dyoung ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
   1444  1.1   dyoung {
   1445  1.1   dyoung 	u8 *addr = *update_ptr;
   1446  1.1   dyoung 	u8 *newptr;
   1447  1.1   dyoung 	*vmdq = 0;
   1448  1.1   dyoung 
   1449  1.1   dyoung 	newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
   1450  1.1   dyoung 	*update_ptr = newptr;
   1451  1.1   dyoung 	return addr;
   1452  1.1   dyoung }
   1453  1.1   dyoung 
   1454  1.1   dyoung /*********************************************************************
   1455  1.1   dyoung  *  Timer routine
   1456  1.1   dyoung  *
   1457  1.1   dyoung  *  This routine checks for link status,updates statistics,
   1458  1.1   dyoung  *  and runs the watchdog check.
   1459  1.1   dyoung  *
   1460  1.1   dyoung  **********************************************************************/
   1461  1.1   dyoung 
   1462  1.1   dyoung static void
   1463  1.3  msaitoh ixv_local_timer1(void *arg)
   1464  1.1   dyoung {
   1465  1.1   dyoung 	struct adapter	*adapter = arg;
   1466  1.1   dyoung 	device_t	dev = adapter->dev;
   1467  1.1   dyoung 	struct tx_ring	*txr = adapter->tx_rings;
   1468  1.1   dyoung 	int		i;
   1469  1.3  msaitoh 	struct timeval now, elapsed;
   1470  1.1   dyoung 
   1471  1.3  msaitoh 	KASSERT(mutex_owned(&adapter->core_mtx));
   1472  1.1   dyoung 
   1473  1.1   dyoung 	ixv_update_link_status(adapter);
   1474  1.1   dyoung 
   1475  1.1   dyoung 	/* Stats Update */
   1476  1.1   dyoung 	ixv_update_stats(adapter);
   1477  1.1   dyoung 
   1478  1.1   dyoung 	/*
   1479  1.1   dyoung 	 * If the interface has been paused
   1480  1.1   dyoung 	 * then don't do the watchdog check
   1481  1.1   dyoung 	 */
   1482  1.1   dyoung 	if (IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF)
   1483  1.1   dyoung 		goto out;
   1484  1.1   dyoung 	/*
   1485  1.1   dyoung 	** Check for time since any descriptor was cleaned
   1486  1.1   dyoung 	*/
   1487  1.1   dyoung         for (i = 0; i < adapter->num_queues; i++, txr++) {
   1488  1.1   dyoung 		IXV_TX_LOCK(txr);
   1489  1.1   dyoung 		if (txr->watchdog_check == FALSE) {
   1490  1.1   dyoung 			IXV_TX_UNLOCK(txr);
   1491  1.1   dyoung 			continue;
   1492  1.1   dyoung 		}
   1493  1.3  msaitoh 		getmicrotime(&now);
   1494  1.3  msaitoh 		timersub(&now, &txr->watchdog_time, &elapsed);
   1495  1.3  msaitoh 		if (tvtohz(&elapsed) > IXV_WATCHDOG)
   1496  1.1   dyoung 			goto hung;
   1497  1.1   dyoung 		IXV_TX_UNLOCK(txr);
   1498  1.1   dyoung 	}
   1499  1.1   dyoung out:
   1500  1.1   dyoung        	ixv_rearm_queues(adapter, adapter->que_mask);
   1501  1.1   dyoung 	callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
   1502  1.1   dyoung 	return;
   1503  1.1   dyoung 
   1504  1.1   dyoung hung:
   1505  1.1   dyoung 	device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
   1506  1.1   dyoung 	device_printf(dev,"Queue(%d) tdh = %d, hw tdt = %d\n", txr->me,
   1507  1.1   dyoung 	    IXGBE_READ_REG(&adapter->hw, IXGBE_VFTDH(i)),
   1508  1.1   dyoung 	    IXGBE_READ_REG(&adapter->hw, IXGBE_VFTDT(i)));
   1509  1.1   dyoung 	device_printf(dev,"TX(%d) desc avail = %d,"
   1510  1.1   dyoung 	    "Next TX to Clean = %d\n",
   1511  1.1   dyoung 	    txr->me, txr->tx_avail, txr->next_to_clean);
   1512  1.3  msaitoh 	adapter->ifp->if_flags &= ~IFF_RUNNING;
   1513  1.3  msaitoh 	adapter->watchdog_events.ev_count++;
   1514  1.1   dyoung 	IXV_TX_UNLOCK(txr);
   1515  1.1   dyoung 	ixv_init_locked(adapter);
   1516  1.1   dyoung }
   1517  1.1   dyoung 
   1518  1.3  msaitoh static void
   1519  1.3  msaitoh ixv_local_timer(void *arg)
   1520  1.3  msaitoh {
   1521  1.3  msaitoh 	struct adapter *adapter = arg;
   1522  1.3  msaitoh 
   1523  1.3  msaitoh 	IXV_CORE_LOCK(adapter);
   1524  1.3  msaitoh 	ixv_local_timer1(adapter);
   1525  1.3  msaitoh 	IXV_CORE_UNLOCK(adapter);
   1526  1.3  msaitoh }
   1527  1.3  msaitoh 
   1528  1.1   dyoung /*
   1529  1.1   dyoung ** Note: this routine updates the OS on the link state
   1530  1.1   dyoung **	the real check of the hardware only happens with
   1531  1.1   dyoung **	a link interrupt.
   1532  1.1   dyoung */
   1533  1.1   dyoung static void
   1534  1.1   dyoung ixv_update_link_status(struct adapter *adapter)
   1535  1.1   dyoung {
   1536  1.1   dyoung 	struct ifnet	*ifp = adapter->ifp;
   1537  1.1   dyoung 	struct tx_ring *txr = adapter->tx_rings;
   1538  1.1   dyoung 	device_t dev = adapter->dev;
   1539  1.1   dyoung 
   1540  1.1   dyoung 
   1541  1.1   dyoung 	if (adapter->link_up){
   1542  1.1   dyoung 		if (adapter->link_active == FALSE) {
   1543  1.1   dyoung 			if (bootverbose)
   1544  1.1   dyoung 				device_printf(dev,"Link is up %d Gbps %s \n",
   1545  1.1   dyoung 				    ((adapter->link_speed == 128)? 10:1),
   1546  1.1   dyoung 				    "Full Duplex");
   1547  1.1   dyoung 			adapter->link_active = TRUE;
   1548  1.1   dyoung 			if_link_state_change(ifp, LINK_STATE_UP);
   1549  1.1   dyoung 		}
   1550  1.1   dyoung 	} else { /* Link down */
   1551  1.1   dyoung 		if (adapter->link_active == TRUE) {
   1552  1.1   dyoung 			if (bootverbose)
   1553  1.1   dyoung 				device_printf(dev,"Link is Down\n");
   1554  1.1   dyoung 			if_link_state_change(ifp, LINK_STATE_DOWN);
   1555  1.1   dyoung 			adapter->link_active = FALSE;
   1556  1.1   dyoung 			for (int i = 0; i < adapter->num_queues;
   1557  1.1   dyoung 			    i++, txr++)
   1558  1.1   dyoung 				txr->watchdog_check = FALSE;
   1559  1.1   dyoung 		}
   1560  1.1   dyoung 	}
   1561  1.1   dyoung 
   1562  1.1   dyoung 	return;
   1563  1.1   dyoung }
   1564  1.1   dyoung 
   1565  1.1   dyoung 
   1566  1.3  msaitoh static void
   1567  1.3  msaitoh ixv_ifstop(struct ifnet *ifp, int disable)
   1568  1.3  msaitoh {
   1569  1.3  msaitoh 	struct adapter *adapter = ifp->if_softc;
   1570  1.3  msaitoh 
   1571  1.3  msaitoh 	IXV_CORE_LOCK(adapter);
   1572  1.3  msaitoh 	ixv_stop(adapter);
   1573  1.3  msaitoh 	IXV_CORE_UNLOCK(adapter);
   1574  1.3  msaitoh }
   1575  1.3  msaitoh 
   1576  1.1   dyoung /*********************************************************************
   1577  1.1   dyoung  *
   1578  1.1   dyoung  *  This routine disables all traffic on the adapter by issuing a
   1579  1.1   dyoung  *  global reset on the MAC and deallocates TX/RX buffers.
   1580  1.1   dyoung  *
   1581  1.1   dyoung  **********************************************************************/
   1582  1.1   dyoung 
   1583  1.1   dyoung static void
   1584  1.1   dyoung ixv_stop(void *arg)
   1585  1.1   dyoung {
   1586  1.1   dyoung 	struct ifnet   *ifp;
   1587  1.1   dyoung 	struct adapter *adapter = arg;
   1588  1.1   dyoung 	struct ixgbe_hw *hw = &adapter->hw;
   1589  1.1   dyoung 	ifp = adapter->ifp;
   1590  1.1   dyoung 
   1591  1.3  msaitoh 	KASSERT(mutex_owned(&adapter->core_mtx));
   1592  1.1   dyoung 
   1593  1.1   dyoung 	INIT_DEBUGOUT("ixv_stop: begin\n");
   1594  1.1   dyoung 	ixv_disable_intr(adapter);
   1595  1.1   dyoung 
   1596  1.1   dyoung 	/* Tell the stack that the interface is no longer active */
   1597  1.3  msaitoh 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   1598  1.1   dyoung 
   1599  1.1   dyoung 	ixgbe_reset_hw(hw);
   1600  1.1   dyoung 	adapter->hw.adapter_stopped = FALSE;
   1601  1.1   dyoung 	ixgbe_stop_adapter(hw);
   1602  1.1   dyoung 	callout_stop(&adapter->timer);
   1603  1.1   dyoung 
   1604  1.1   dyoung 	/* reprogram the RAR[0] in case user changed it. */
   1605  1.1   dyoung 	ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
   1606  1.1   dyoung 
   1607  1.1   dyoung 	return;
   1608  1.1   dyoung }
   1609  1.1   dyoung 
   1610  1.1   dyoung 
   1611  1.1   dyoung /*********************************************************************
   1612  1.1   dyoung  *
   1613  1.1   dyoung  *  Determine hardware revision.
   1614  1.1   dyoung  *
   1615  1.1   dyoung  **********************************************************************/
   1616  1.1   dyoung static void
   1617  1.1   dyoung ixv_identify_hardware(struct adapter *adapter)
   1618  1.1   dyoung {
   1619  1.1   dyoung 	u16		pci_cmd_word;
   1620  1.3  msaitoh 	pcitag_t tag;
   1621  1.3  msaitoh 	pci_chipset_tag_t pc;
   1622  1.3  msaitoh 	pcireg_t subid, id;
   1623  1.3  msaitoh 	struct ixgbe_hw *hw = &adapter->hw;
   1624  1.3  msaitoh 
   1625  1.3  msaitoh 	pc = adapter->osdep.pc;
   1626  1.3  msaitoh 	tag = adapter->osdep.tag;
   1627  1.1   dyoung 
   1628  1.1   dyoung 	/*
   1629  1.1   dyoung 	** Make sure BUSMASTER is set, on a VM under
   1630  1.1   dyoung 	** KVM it may not be and will break things.
   1631  1.1   dyoung 	*/
   1632  1.3  msaitoh 	pci_cmd_word = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
   1633  1.3  msaitoh 	if (!((pci_cmd_word & PCI_COMMAND_MASTER_ENABLE) &&
   1634  1.3  msaitoh 	    (pci_cmd_word & PCI_COMMAND_MEM_ENABLE))) {
   1635  1.1   dyoung 		INIT_DEBUGOUT("Memory Access and/or Bus Master "
   1636  1.1   dyoung 		    "bits were not set!\n");
   1637  1.3  msaitoh 		pci_cmd_word |=
   1638  1.3  msaitoh 		    (PCI_COMMAND_MASTER_ENABLE | PCI_COMMAND_MEM_ENABLE);
   1639  1.3  msaitoh 		pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, pci_cmd_word);
   1640  1.1   dyoung 	}
   1641  1.1   dyoung 
   1642  1.3  msaitoh 	id = pci_conf_read(pc, tag, PCI_ID_REG);
   1643  1.3  msaitoh 	subid = pci_conf_read(pc, tag, PCI_SUBSYS_ID_REG);
   1644  1.3  msaitoh 
   1645  1.1   dyoung 	/* Save off the information about this board */
   1646  1.3  msaitoh 	hw->vendor_id = PCI_VENDOR(id);
   1647  1.3  msaitoh 	hw->device_id = PCI_PRODUCT(id);
   1648  1.3  msaitoh 	hw->revision_id = PCI_REVISION(pci_conf_read(pc, tag, PCI_CLASS_REG));
   1649  1.3  msaitoh 	hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
   1650  1.3  msaitoh 	hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
   1651  1.1   dyoung 
   1652  1.1   dyoung 	return;
   1653  1.1   dyoung }
   1654  1.1   dyoung 
   1655  1.1   dyoung /*********************************************************************
   1656  1.1   dyoung  *
   1657  1.1   dyoung  *  Setup MSIX Interrupt resources and handlers
   1658  1.1   dyoung  *
   1659  1.1   dyoung  **********************************************************************/
   1660  1.1   dyoung static int
   1661  1.1   dyoung ixv_allocate_msix(struct adapter *adapter)
   1662  1.1   dyoung {
   1663  1.3  msaitoh #if !defined(NETBSD_MSI_OR_MSIX)
   1664  1.3  msaitoh 	return 0;
   1665  1.3  msaitoh #else
   1666  1.1   dyoung 	device_t        dev = adapter->dev;
   1667  1.1   dyoung 	struct 		ix_queue *que = adapter->queues;
   1668  1.1   dyoung 	int 		error, rid, vector = 0;
   1669  1.3  msaitoh 	pcitag_t tag;
   1670  1.3  msaitoh 	pci_chipset_tag_t pc;
   1671  1.3  msaitoh 
   1672  1.3  msaitoh 	pc = adapter->osdep.pc;
   1673  1.3  msaitoh 	tag = adapter->osdep.tag;
   1674  1.1   dyoung 
   1675  1.1   dyoung 	for (int i = 0; i < adapter->num_queues; i++, vector++, que++) {
   1676  1.1   dyoung 		rid = vector + 1;
   1677  1.1   dyoung 		que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
   1678  1.1   dyoung 		    RF_SHAREABLE | RF_ACTIVE);
   1679  1.1   dyoung 		if (que->res == NULL) {
   1680  1.3  msaitoh 			aprint_error_dev(dev,"Unable to allocate"
   1681  1.1   dyoung 		    	    " bus resource: que interrupt [%d]\n", vector);
   1682  1.1   dyoung 			return (ENXIO);
   1683  1.1   dyoung 		}
   1684  1.1   dyoung 		/* Set the handler function */
   1685  1.1   dyoung 		error = bus_setup_intr(dev, que->res,
   1686  1.1   dyoung 		    INTR_TYPE_NET | INTR_MPSAFE, NULL,
   1687  1.1   dyoung 		    ixv_msix_que, que, &que->tag);
   1688  1.1   dyoung 		if (error) {
   1689  1.1   dyoung 			que->res = NULL;
   1690  1.3  msaitoh 			aprint_error_dev(dev,
   1691  1.3  msaitoh 			    "Failed to register QUE handler");
   1692  1.1   dyoung 			return (error);
   1693  1.1   dyoung 		}
   1694  1.1   dyoung #if __FreeBSD_version >= 800504
   1695  1.1   dyoung 		bus_describe_intr(dev, que->res, que->tag, "que %d", i);
   1696  1.1   dyoung #endif
   1697  1.1   dyoung 		que->msix = vector;
   1698  1.1   dyoung         	adapter->que_mask |= (u64)(1 << que->msix);
   1699  1.1   dyoung 		/*
   1700  1.1   dyoung 		** Bind the msix vector, and thus the
   1701  1.1   dyoung 		** ring to the corresponding cpu.
   1702  1.1   dyoung 		*/
   1703  1.1   dyoung 		if (adapter->num_queues > 1)
   1704  1.1   dyoung 			bus_bind_intr(dev, que->res, i);
   1705  1.1   dyoung 
   1706  1.3  msaitoh 		que->que_si = softint_establish(SOFTINT_NET, ixv_handle_que,
   1707  1.3  msaitoh 		    que);
   1708  1.1   dyoung 	}
   1709  1.1   dyoung 
   1710  1.1   dyoung 	/* and Mailbox */
   1711  1.1   dyoung 	rid = vector + 1;
   1712  1.1   dyoung 	adapter->res = bus_alloc_resource_any(dev,
   1713  1.1   dyoung     	    SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
   1714  1.1   dyoung 	if (!adapter->res) {
   1715  1.3  msaitoh 		aprint_error_dev(dev,"Unable to allocate"
   1716  1.1   dyoung     	    " bus resource: MBX interrupt [%d]\n", rid);
   1717  1.1   dyoung 		return (ENXIO);
   1718  1.1   dyoung 	}
   1719  1.1   dyoung 	/* Set the mbx handler function */
   1720  1.1   dyoung 	error = bus_setup_intr(dev, adapter->res,
   1721  1.1   dyoung 	    INTR_TYPE_NET | INTR_MPSAFE, NULL,
   1722  1.1   dyoung 	    ixv_msix_mbx, adapter, &adapter->tag);
   1723  1.1   dyoung 	if (error) {
   1724  1.1   dyoung 		adapter->res = NULL;
   1725  1.3  msaitoh 		aprint_error_dev(dev, "Failed to register LINK handler");
   1726  1.1   dyoung 		return (error);
   1727  1.1   dyoung 	}
   1728  1.1   dyoung #if __FreeBSD_version >= 800504
   1729  1.1   dyoung 	bus_describe_intr(dev, adapter->res, adapter->tag, "mbx");
   1730  1.1   dyoung #endif
   1731  1.1   dyoung 	adapter->mbxvec = vector;
   1732  1.1   dyoung 	/* Tasklets for Mailbox */
   1733  1.3  msaitoh 	adapter->mbx_si = softint_establish(SOFTINT_NET, ixv_handle_mbx,
   1734  1.3  msaitoh 	    adapter);
   1735  1.1   dyoung 	/*
   1736  1.1   dyoung 	** Due to a broken design QEMU will fail to properly
   1737  1.1   dyoung 	** enable the guest for MSIX unless the vectors in
   1738  1.1   dyoung 	** the table are all set up, so we must rewrite the
   1739  1.1   dyoung 	** ENABLE in the MSIX control register again at this
   1740  1.1   dyoung 	** point to cause it to successfully initialize us.
   1741  1.1   dyoung 	*/
   1742  1.1   dyoung 	if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
   1743  1.1   dyoung 		int msix_ctrl;
   1744  1.3  msaitoh 		pci_get_capability(pc, tag, PCI_CAP_MSIX, &rid);
   1745  1.3  msaitoh 		rid += PCI_MSIX_CTL;
   1746  1.3  msaitoh 		msix_ctrl = pci_read_config(pc, tag, rid);
   1747  1.3  msaitoh 		msix_ctrl |= PCI_MSIX_CTL_ENABLE;
   1748  1.3  msaitoh 		pci_conf_write(pc, tag, msix_ctrl);
   1749  1.1   dyoung 	}
   1750  1.1   dyoung 
   1751  1.1   dyoung 	return (0);
   1752  1.3  msaitoh #endif
   1753  1.1   dyoung }
   1754  1.1   dyoung 
   1755  1.1   dyoung /*
   1756  1.1   dyoung  * Setup MSIX resources, note that the VF
   1757  1.1   dyoung  * device MUST use MSIX, there is no fallback.
   1758  1.1   dyoung  */
   1759  1.1   dyoung static int
   1760  1.1   dyoung ixv_setup_msix(struct adapter *adapter)
   1761  1.1   dyoung {
   1762  1.3  msaitoh #if !defined(NETBSD_MSI_OR_MSIX)
   1763  1.3  msaitoh 	return 0;
   1764  1.3  msaitoh #else
   1765  1.1   dyoung 	device_t dev = adapter->dev;
   1766  1.1   dyoung 	int rid, vectors, want = 2;
   1767  1.1   dyoung 
   1768  1.1   dyoung 
   1769  1.1   dyoung 	/* First try MSI/X */
   1770  1.1   dyoung 	rid = PCIR_BAR(3);
   1771  1.1   dyoung 	adapter->msix_mem = bus_alloc_resource_any(dev,
   1772  1.1   dyoung 	    SYS_RES_MEMORY, &rid, RF_ACTIVE);
   1773  1.1   dyoung        	if (!adapter->msix_mem) {
   1774  1.1   dyoung 		device_printf(adapter->dev,
   1775  1.1   dyoung 		    "Unable to map MSIX table \n");
   1776  1.1   dyoung 		goto out;
   1777  1.1   dyoung 	}
   1778  1.1   dyoung 
   1779  1.1   dyoung 	vectors = pci_msix_count(dev);
   1780  1.1   dyoung 	if (vectors < 2) {
   1781  1.1   dyoung 		bus_release_resource(dev, SYS_RES_MEMORY,
   1782  1.1   dyoung 		    rid, adapter->msix_mem);
   1783  1.1   dyoung 		adapter->msix_mem = NULL;
   1784  1.1   dyoung 		goto out;
   1785  1.1   dyoung 	}
   1786  1.1   dyoung 
   1787  1.1   dyoung 	/*
   1788  1.1   dyoung 	** Want two vectors: one for a queue,
   1789  1.1   dyoung 	** plus an additional for mailbox.
   1790  1.1   dyoung 	*/
   1791  1.1   dyoung 	if (pci_alloc_msix(dev, &want) == 0) {
   1792  1.1   dyoung                	device_printf(adapter->dev,
   1793  1.1   dyoung 		    "Using MSIX interrupts with %d vectors\n", want);
   1794  1.1   dyoung 		return (want);
   1795  1.1   dyoung 	}
   1796  1.1   dyoung out:
   1797  1.1   dyoung 	device_printf(adapter->dev,"MSIX config error\n");
   1798  1.1   dyoung 	return (ENXIO);
   1799  1.3  msaitoh #endif
   1800  1.1   dyoung }
   1801  1.1   dyoung 
   1802  1.1   dyoung 
   1803  1.1   dyoung static int
   1804  1.3  msaitoh ixv_allocate_pci_resources(struct adapter *adapter,
   1805  1.3  msaitoh     const struct pci_attach_args *pa)
   1806  1.1   dyoung {
   1807  1.3  msaitoh 	pcireg_t	memtype;
   1808  1.1   dyoung 	device_t        dev = adapter->dev;
   1809  1.3  msaitoh 	bus_addr_t addr;
   1810  1.3  msaitoh 	int flags;
   1811  1.1   dyoung 
   1812  1.3  msaitoh 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
   1813  1.1   dyoung 
   1814  1.3  msaitoh 	switch (memtype) {
   1815  1.3  msaitoh 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1816  1.3  msaitoh 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1817  1.3  msaitoh 		adapter->osdep.mem_bus_space_tag = pa->pa_memt;
   1818  1.3  msaitoh 		if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
   1819  1.3  msaitoh 	              memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
   1820  1.3  msaitoh 			goto map_err;
   1821  1.3  msaitoh 		if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
   1822  1.3  msaitoh 			aprint_normal_dev(dev, "clearing prefetchable bit\n");
   1823  1.3  msaitoh 			flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
   1824  1.3  msaitoh 		}
   1825  1.3  msaitoh 		if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
   1826  1.3  msaitoh 		     adapter->osdep.mem_size, flags,
   1827  1.3  msaitoh 		     &adapter->osdep.mem_bus_space_handle) != 0) {
   1828  1.3  msaitoh map_err:
   1829  1.3  msaitoh 			adapter->osdep.mem_size = 0;
   1830  1.3  msaitoh 			aprint_error_dev(dev, "unable to map BAR0\n");
   1831  1.3  msaitoh 			return ENXIO;
   1832  1.3  msaitoh 		}
   1833  1.3  msaitoh 		break;
   1834  1.3  msaitoh 	default:
   1835  1.3  msaitoh 		aprint_error_dev(dev, "unexpected type on BAR0\n");
   1836  1.3  msaitoh 		return ENXIO;
   1837  1.1   dyoung 	}
   1838  1.1   dyoung 
   1839  1.1   dyoung 	adapter->num_queues = 1;
   1840  1.1   dyoung 	adapter->hw.back = &adapter->osdep;
   1841  1.1   dyoung 
   1842  1.1   dyoung 	/*
   1843  1.1   dyoung 	** Now setup MSI/X, should
   1844  1.1   dyoung 	** return us the number of
   1845  1.1   dyoung 	** configured vectors.
   1846  1.1   dyoung 	*/
   1847  1.1   dyoung 	adapter->msix = ixv_setup_msix(adapter);
   1848  1.1   dyoung 	if (adapter->msix == ENXIO)
   1849  1.1   dyoung 		return (ENXIO);
   1850  1.1   dyoung 	else
   1851  1.1   dyoung 		return (0);
   1852  1.1   dyoung }
   1853  1.1   dyoung 
   1854  1.1   dyoung static void
   1855  1.1   dyoung ixv_free_pci_resources(struct adapter * adapter)
   1856  1.1   dyoung {
   1857  1.3  msaitoh #if defined(NETBSD_MSI_OR_MSIX)
   1858  1.1   dyoung 	struct 		ix_queue *que = adapter->queues;
   1859  1.1   dyoung 	device_t	dev = adapter->dev;
   1860  1.1   dyoung 	int		rid, memrid;
   1861  1.1   dyoung 
   1862  1.3  msaitoh 	memrid = PCI_BAR(MSIX_BAR);
   1863  1.1   dyoung 
   1864  1.1   dyoung 	/*
   1865  1.1   dyoung 	** There is a slight possibility of a failure mode
   1866  1.1   dyoung 	** in attach that will result in entering this function
   1867  1.1   dyoung 	** before interrupt resources have been initialized, and
   1868  1.1   dyoung 	** in that case we do not want to execute the loops below
   1869  1.1   dyoung 	** We can detect this reliably by the state of the adapter
   1870  1.1   dyoung 	** res pointer.
   1871  1.1   dyoung 	*/
   1872  1.1   dyoung 	if (adapter->res == NULL)
   1873  1.1   dyoung 		goto mem;
   1874  1.1   dyoung 
   1875  1.1   dyoung 	/*
   1876  1.1   dyoung 	**  Release all msix queue resources:
   1877  1.1   dyoung 	*/
   1878  1.1   dyoung 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   1879  1.1   dyoung 		rid = que->msix + 1;
   1880  1.1   dyoung 		if (que->tag != NULL) {
   1881  1.1   dyoung 			bus_teardown_intr(dev, que->res, que->tag);
   1882  1.1   dyoung 			que->tag = NULL;
   1883  1.1   dyoung 		}
   1884  1.1   dyoung 		if (que->res != NULL)
   1885  1.1   dyoung 			bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
   1886  1.1   dyoung 	}
   1887  1.1   dyoung 
   1888  1.1   dyoung 
   1889  1.1   dyoung 	/* Clean the Legacy or Link interrupt last */
   1890  1.1   dyoung 	if (adapter->mbxvec) /* we are doing MSIX */
   1891  1.1   dyoung 		rid = adapter->mbxvec + 1;
   1892  1.1   dyoung 	else
   1893  1.1   dyoung 		(adapter->msix != 0) ? (rid = 1):(rid = 0);
   1894  1.1   dyoung 
   1895  1.1   dyoung 	if (adapter->tag != NULL) {
   1896  1.1   dyoung 		bus_teardown_intr(dev, adapter->res, adapter->tag);
   1897  1.1   dyoung 		adapter->tag = NULL;
   1898  1.1   dyoung 	}
   1899  1.1   dyoung 	if (adapter->res != NULL)
   1900  1.1   dyoung 		bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res);
   1901  1.1   dyoung 
   1902  1.1   dyoung mem:
   1903  1.1   dyoung 	if (adapter->msix)
   1904  1.1   dyoung 		pci_release_msi(dev);
   1905  1.1   dyoung 
   1906  1.1   dyoung 	if (adapter->msix_mem != NULL)
   1907  1.1   dyoung 		bus_release_resource(dev, SYS_RES_MEMORY,
   1908  1.1   dyoung 		    memrid, adapter->msix_mem);
   1909  1.1   dyoung 
   1910  1.1   dyoung 	if (adapter->pci_mem != NULL)
   1911  1.1   dyoung 		bus_release_resource(dev, SYS_RES_MEMORY,
   1912  1.1   dyoung 		    PCIR_BAR(0), adapter->pci_mem);
   1913  1.1   dyoung 
   1914  1.3  msaitoh #endif
   1915  1.1   dyoung 	return;
   1916  1.1   dyoung }
   1917  1.1   dyoung 
   1918  1.1   dyoung /*********************************************************************
   1919  1.1   dyoung  *
   1920  1.1   dyoung  *  Setup networking device structure and register an interface.
   1921  1.1   dyoung  *
   1922  1.1   dyoung  **********************************************************************/
   1923  1.1   dyoung static void
   1924  1.1   dyoung ixv_setup_interface(device_t dev, struct adapter *adapter)
   1925  1.1   dyoung {
   1926  1.3  msaitoh 	struct ethercom *ec = &adapter->osdep.ec;
   1927  1.1   dyoung 	struct ifnet   *ifp;
   1928  1.1   dyoung 
   1929  1.1   dyoung 	INIT_DEBUGOUT("ixv_setup_interface: begin");
   1930  1.1   dyoung 
   1931  1.3  msaitoh 	ifp = adapter->ifp = &ec->ec_if;
   1932  1.3  msaitoh 	strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
   1933  1.1   dyoung 	ifp->if_baudrate = 1000000000;
   1934  1.1   dyoung 	ifp->if_init = ixv_init;
   1935  1.3  msaitoh 	ifp->if_stop = ixv_ifstop;
   1936  1.1   dyoung 	ifp->if_softc = adapter;
   1937  1.1   dyoung 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   1938  1.1   dyoung 	ifp->if_ioctl = ixv_ioctl;
   1939  1.1   dyoung #if __FreeBSD_version >= 800000
   1940  1.1   dyoung 	ifp->if_transmit = ixv_mq_start;
   1941  1.1   dyoung 	ifp->if_qflush = ixv_qflush;
   1942  1.1   dyoung #else
   1943  1.1   dyoung 	ifp->if_start = ixv_start;
   1944  1.1   dyoung #endif
   1945  1.1   dyoung 	ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 2;
   1946  1.1   dyoung 
   1947  1.3  msaitoh 	if_attach(ifp);
   1948  1.1   dyoung 	ether_ifattach(ifp, adapter->hw.mac.addr);
   1949  1.3  msaitoh 	ether_set_ifflags_cb(ec, ixv_ifflags_cb);
   1950  1.1   dyoung 
   1951  1.1   dyoung 	adapter->max_frame_size =
   1952  1.1   dyoung 	    ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
   1953  1.1   dyoung 
   1954  1.1   dyoung 	/*
   1955  1.1   dyoung 	 * Tell the upper layer(s) we support long frames.
   1956  1.1   dyoung 	 */
   1957  1.3  msaitoh 	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
   1958  1.3  msaitoh 
   1959  1.3  msaitoh 	ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSOv4;
   1960  1.3  msaitoh 	ifp->if_capenable = 0;
   1961  1.1   dyoung 
   1962  1.3  msaitoh 	ec->ec_capabilities |= ETHERCAP_VLAN_HWCSUM;
   1963  1.3  msaitoh 	ec->ec_capabilities |= ETHERCAP_JUMBO_MTU;
   1964  1.4  msaitoh 	ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
   1965  1.4  msaitoh 	    		| ETHERCAP_VLAN_MTU;
   1966  1.3  msaitoh 	ec->ec_capenable = ec->ec_capabilities;
   1967  1.1   dyoung 
   1968  1.3  msaitoh 	/* Don't enable LRO by default */
   1969  1.3  msaitoh 	ifp->if_capabilities |= IFCAP_LRO;
   1970  1.3  msaitoh 
   1971  1.3  msaitoh 	/*
   1972  1.3  msaitoh 	** Dont turn this on by default, if vlans are
   1973  1.3  msaitoh 	** created on another pseudo device (eg. lagg)
   1974  1.3  msaitoh 	** then vlan events are not passed thru, breaking
   1975  1.3  msaitoh 	** operation, but with HW FILTER off it works. If
   1976  1.3  msaitoh 	** using vlans directly on the em driver you can
   1977  1.3  msaitoh 	** enable this and get full hardware tag filtering.
   1978  1.3  msaitoh 	*/
   1979  1.3  msaitoh 	ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
   1980  1.1   dyoung 
   1981  1.1   dyoung 	/*
   1982  1.1   dyoung 	 * Specify the media types supported by this adapter and register
   1983  1.1   dyoung 	 * callbacks to update media and link information
   1984  1.1   dyoung 	 */
   1985  1.1   dyoung 	ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change,
   1986  1.1   dyoung 		     ixv_media_status);
   1987  1.1   dyoung 	ifmedia_add(&adapter->media, IFM_ETHER | IFM_FDX, 0, NULL);
   1988  1.1   dyoung 	ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
   1989  1.1   dyoung 	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
   1990  1.1   dyoung 
   1991  1.1   dyoung 	return;
   1992  1.1   dyoung }
   1993  1.1   dyoung 
   1994  1.1   dyoung static void
   1995  1.1   dyoung ixv_config_link(struct adapter *adapter)
   1996  1.1   dyoung {
   1997  1.1   dyoung 	struct ixgbe_hw *hw = &adapter->hw;
   1998  1.1   dyoung 	u32	autoneg, err = 0;
   1999  1.1   dyoung 	bool	negotiate = TRUE;
   2000  1.1   dyoung 
   2001  1.1   dyoung 	if (hw->mac.ops.check_link)
   2002  1.1   dyoung 		err = hw->mac.ops.check_link(hw, &autoneg,
   2003  1.1   dyoung 		    &adapter->link_up, FALSE);
   2004  1.1   dyoung 	if (err)
   2005  1.1   dyoung 		goto out;
   2006  1.1   dyoung 
   2007  1.1   dyoung 	if (hw->mac.ops.setup_link)
   2008  1.1   dyoung                	err = hw->mac.ops.setup_link(hw, autoneg,
   2009  1.1   dyoung 		    negotiate, adapter->link_up);
   2010  1.1   dyoung out:
   2011  1.1   dyoung 	return;
   2012  1.1   dyoung }
   2013  1.1   dyoung 
   2014  1.1   dyoung /********************************************************************
   2015  1.1   dyoung  * Manage DMA'able memory.
   2016  1.1   dyoung  *******************************************************************/
   2017  1.1   dyoung 
   2018  1.1   dyoung static int
   2019  1.1   dyoung ixv_dma_malloc(struct adapter *adapter, bus_size_t size,
   2020  1.1   dyoung 		struct ixv_dma_alloc *dma, int mapflags)
   2021  1.1   dyoung {
   2022  1.1   dyoung 	device_t dev = adapter->dev;
   2023  1.3  msaitoh 	int             r, rsegs;
   2024  1.1   dyoung 
   2025  1.3  msaitoh 	r = ixgbe_dma_tag_create(adapter->osdep.dmat,	/* parent */
   2026  1.1   dyoung 			       DBA_ALIGN, 0,	/* alignment, bounds */
   2027  1.1   dyoung 			       size,	/* maxsize */
   2028  1.1   dyoung 			       1,	/* nsegments */
   2029  1.1   dyoung 			       size,	/* maxsegsize */
   2030  1.1   dyoung 			       BUS_DMA_ALLOCNOW,	/* flags */
   2031  1.1   dyoung 			       &dma->dma_tag);
   2032  1.1   dyoung 	if (r != 0) {
   2033  1.3  msaitoh 		aprint_error_dev(dev,
   2034  1.3  msaitoh 		    "ixv_dma_malloc: bus_dma_tag_create failed; error %u\n", r);
   2035  1.1   dyoung 		goto fail_0;
   2036  1.1   dyoung 	}
   2037  1.3  msaitoh 	r = bus_dmamem_alloc(dma->dma_tag->dt_dmat,
   2038  1.3  msaitoh 		size,
   2039  1.3  msaitoh 		dma->dma_tag->dt_alignment,
   2040  1.3  msaitoh 		dma->dma_tag->dt_boundary,
   2041  1.3  msaitoh 		&dma->dma_seg, 1, &rsegs, BUS_DMA_NOWAIT);
   2042  1.1   dyoung 	if (r != 0) {
   2043  1.3  msaitoh 		aprint_error_dev(dev,
   2044  1.3  msaitoh 		    "%s: bus_dmamem_alloc failed; error %u\n", __func__, r);
   2045  1.1   dyoung 		goto fail_1;
   2046  1.1   dyoung 	}
   2047  1.3  msaitoh 
   2048  1.3  msaitoh 	r = bus_dmamem_map(dma->dma_tag->dt_dmat, &dma->dma_seg, rsegs,
   2049  1.3  msaitoh 	    size, &dma->dma_vaddr, BUS_DMA_NOWAIT);
   2050  1.3  msaitoh 	if (r != 0) {
   2051  1.3  msaitoh 		aprint_error_dev(dev, "%s: bus_dmamem_map failed; error %d\n",
   2052  1.3  msaitoh 		    __func__, r);
   2053  1.3  msaitoh 		goto fail_2;
   2054  1.3  msaitoh 	}
   2055  1.3  msaitoh 
   2056  1.3  msaitoh 	r = ixgbe_dmamap_create(dma->dma_tag, 0, &dma->dma_map);
   2057  1.3  msaitoh 	if (r != 0) {
   2058  1.3  msaitoh 		aprint_error_dev(dev, "%s: bus_dmamem_map failed; error %d\n",
   2059  1.3  msaitoh 		    __func__, r);
   2060  1.3  msaitoh 		goto fail_3;
   2061  1.3  msaitoh 	}
   2062  1.3  msaitoh 
   2063  1.3  msaitoh 	r = bus_dmamap_load(dma->dma_tag->dt_dmat, dma->dma_map, dma->dma_vaddr,
   2064  1.1   dyoung 			    size,
   2065  1.3  msaitoh 			    NULL,
   2066  1.1   dyoung 			    mapflags | BUS_DMA_NOWAIT);
   2067  1.1   dyoung 	if (r != 0) {
   2068  1.3  msaitoh 		aprint_error_dev(dev,"%s: bus_dmamap_load failed; error %u\n",
   2069  1.3  msaitoh 		    __func__, r);
   2070  1.3  msaitoh 		goto fail_4;
   2071  1.1   dyoung 	}
   2072  1.3  msaitoh 	dma->dma_paddr = dma->dma_map->dm_segs[0].ds_addr;
   2073  1.1   dyoung 	dma->dma_size = size;
   2074  1.3  msaitoh 	return 0;
   2075  1.3  msaitoh fail_4:
   2076  1.3  msaitoh 	ixgbe_dmamap_destroy(dma->dma_tag, dma->dma_map);
   2077  1.3  msaitoh fail_3:
   2078  1.3  msaitoh 	bus_dmamem_unmap(dma->dma_tag->dt_dmat, dma->dma_vaddr, size);
   2079  1.1   dyoung fail_2:
   2080  1.3  msaitoh 	bus_dmamem_free(dma->dma_tag->dt_dmat, &dma->dma_seg, rsegs);
   2081  1.1   dyoung fail_1:
   2082  1.3  msaitoh 	ixgbe_dma_tag_destroy(dma->dma_tag);
   2083  1.1   dyoung fail_0:
   2084  1.1   dyoung 	dma->dma_map = NULL;
   2085  1.1   dyoung 	dma->dma_tag = NULL;
   2086  1.1   dyoung 	return (r);
   2087  1.1   dyoung }
   2088  1.1   dyoung 
   2089  1.1   dyoung static void
   2090  1.1   dyoung ixv_dma_free(struct adapter *adapter, struct ixv_dma_alloc *dma)
   2091  1.1   dyoung {
   2092  1.3  msaitoh 	bus_dmamap_sync(dma->dma_tag->dt_dmat, dma->dma_map, 0, dma->dma_size,
   2093  1.1   dyoung 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   2094  1.3  msaitoh 	ixgbe_dmamap_unload(dma->dma_tag, dma->dma_map);
   2095  1.3  msaitoh 	bus_dmamem_free(dma->dma_tag->dt_dmat, &dma->dma_seg, 1);
   2096  1.3  msaitoh 	ixgbe_dma_tag_destroy(dma->dma_tag);
   2097  1.1   dyoung }
   2098  1.1   dyoung 
   2099  1.1   dyoung 
   2100  1.1   dyoung /*********************************************************************
   2101  1.1   dyoung  *
   2102  1.1   dyoung  *  Allocate memory for the transmit and receive rings, and then
   2103  1.1   dyoung  *  the descriptors associated with each, called only once at attach.
   2104  1.1   dyoung  *
   2105  1.1   dyoung  **********************************************************************/
   2106  1.1   dyoung static int
   2107  1.1   dyoung ixv_allocate_queues(struct adapter *adapter)
   2108  1.1   dyoung {
   2109  1.1   dyoung 	device_t	dev = adapter->dev;
   2110  1.1   dyoung 	struct ix_queue	*que;
   2111  1.1   dyoung 	struct tx_ring	*txr;
   2112  1.1   dyoung 	struct rx_ring	*rxr;
   2113  1.1   dyoung 	int rsize, tsize, error = 0;
   2114  1.1   dyoung 	int txconf = 0, rxconf = 0;
   2115  1.1   dyoung 
   2116  1.1   dyoung         /* First allocate the top level queue structs */
   2117  1.1   dyoung         if (!(adapter->queues =
   2118  1.1   dyoung             (struct ix_queue *) malloc(sizeof(struct ix_queue) *
   2119  1.1   dyoung             adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
   2120  1.3  msaitoh                 aprint_error_dev(dev, "Unable to allocate queue memory\n");
   2121  1.1   dyoung                 error = ENOMEM;
   2122  1.1   dyoung                 goto fail;
   2123  1.1   dyoung         }
   2124  1.1   dyoung 
   2125  1.1   dyoung 	/* First allocate the TX ring struct memory */
   2126  1.1   dyoung 	if (!(adapter->tx_rings =
   2127  1.1   dyoung 	    (struct tx_ring *) malloc(sizeof(struct tx_ring) *
   2128  1.1   dyoung 	    adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
   2129  1.3  msaitoh 		aprint_error_dev(dev, "Unable to allocate TX ring memory\n");
   2130  1.1   dyoung 		error = ENOMEM;
   2131  1.1   dyoung 		goto tx_fail;
   2132  1.1   dyoung 	}
   2133  1.1   dyoung 
   2134  1.1   dyoung 	/* Next allocate the RX */
   2135  1.1   dyoung 	if (!(adapter->rx_rings =
   2136  1.1   dyoung 	    (struct rx_ring *) malloc(sizeof(struct rx_ring) *
   2137  1.1   dyoung 	    adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
   2138  1.3  msaitoh 		aprint_error_dev(dev, "Unable to allocate RX ring memory\n");
   2139  1.1   dyoung 		error = ENOMEM;
   2140  1.1   dyoung 		goto rx_fail;
   2141  1.1   dyoung 	}
   2142  1.1   dyoung 
   2143  1.1   dyoung 	/* For the ring itself */
   2144  1.1   dyoung 	tsize = roundup2(adapter->num_tx_desc *
   2145  1.1   dyoung 	    sizeof(union ixgbe_adv_tx_desc), DBA_ALIGN);
   2146  1.1   dyoung 
   2147  1.1   dyoung 	/*
   2148  1.1   dyoung 	 * Now set up the TX queues, txconf is needed to handle the
   2149  1.1   dyoung 	 * possibility that things fail midcourse and we need to
   2150  1.1   dyoung 	 * undo memory gracefully
   2151  1.1   dyoung 	 */
   2152  1.1   dyoung 	for (int i = 0; i < adapter->num_queues; i++, txconf++) {
   2153  1.1   dyoung 		/* Set up some basics */
   2154  1.1   dyoung 		txr = &adapter->tx_rings[i];
   2155  1.1   dyoung 		txr->adapter = adapter;
   2156  1.1   dyoung 		txr->me = i;
   2157  1.1   dyoung 
   2158  1.1   dyoung 		/* Initialize the TX side lock */
   2159  1.1   dyoung 		snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
   2160  1.3  msaitoh 		    device_xname(dev), txr->me);
   2161  1.3  msaitoh 		mutex_init(&txr->tx_mtx, MUTEX_DEFAULT, IPL_NET);
   2162  1.1   dyoung 
   2163  1.1   dyoung 		if (ixv_dma_malloc(adapter, tsize,
   2164  1.1   dyoung 			&txr->txdma, BUS_DMA_NOWAIT)) {
   2165  1.3  msaitoh 			aprint_error_dev(dev,
   2166  1.1   dyoung 			    "Unable to allocate TX Descriptor memory\n");
   2167  1.1   dyoung 			error = ENOMEM;
   2168  1.1   dyoung 			goto err_tx_desc;
   2169  1.1   dyoung 		}
   2170  1.1   dyoung 		txr->tx_base = (union ixgbe_adv_tx_desc *)txr->txdma.dma_vaddr;
   2171  1.1   dyoung 		bzero((void *)txr->tx_base, tsize);
   2172  1.1   dyoung 
   2173  1.1   dyoung         	/* Now allocate transmit buffers for the ring */
   2174  1.1   dyoung         	if (ixv_allocate_transmit_buffers(txr)) {
   2175  1.3  msaitoh 			aprint_error_dev(dev,
   2176  1.1   dyoung 			    "Critical Failure setting up transmit buffers\n");
   2177  1.1   dyoung 			error = ENOMEM;
   2178  1.1   dyoung 			goto err_tx_desc;
   2179  1.1   dyoung         	}
   2180  1.1   dyoung #if __FreeBSD_version >= 800000
   2181  1.1   dyoung 		/* Allocate a buf ring */
   2182  1.1   dyoung 		txr->br = buf_ring_alloc(IXV_BR_SIZE, M_DEVBUF,
   2183  1.1   dyoung 		    M_WAITOK, &txr->tx_mtx);
   2184  1.1   dyoung 		if (txr->br == NULL) {
   2185  1.3  msaitoh 			aprint_error_dev(dev,
   2186  1.1   dyoung 			    "Critical Failure setting up buf ring\n");
   2187  1.1   dyoung 			error = ENOMEM;
   2188  1.1   dyoung 			goto err_tx_desc;
   2189  1.1   dyoung 		}
   2190  1.1   dyoung #endif
   2191  1.1   dyoung 	}
   2192  1.1   dyoung 
   2193  1.1   dyoung 	/*
   2194  1.1   dyoung 	 * Next the RX queues...
   2195  1.1   dyoung 	 */
   2196  1.1   dyoung 	rsize = roundup2(adapter->num_rx_desc *
   2197  1.1   dyoung 	    sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
   2198  1.1   dyoung 	for (int i = 0; i < adapter->num_queues; i++, rxconf++) {
   2199  1.1   dyoung 		rxr = &adapter->rx_rings[i];
   2200  1.1   dyoung 		/* Set up some basics */
   2201  1.1   dyoung 		rxr->adapter = adapter;
   2202  1.1   dyoung 		rxr->me = i;
   2203  1.1   dyoung 
   2204  1.1   dyoung 		/* Initialize the RX side lock */
   2205  1.1   dyoung 		snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
   2206  1.3  msaitoh 		    device_xname(dev), rxr->me);
   2207  1.3  msaitoh 		mutex_init(&rxr->rx_mtx, MUTEX_DEFAULT, IPL_NET);
   2208  1.1   dyoung 
   2209  1.1   dyoung 		if (ixv_dma_malloc(adapter, rsize,
   2210  1.1   dyoung 			&rxr->rxdma, BUS_DMA_NOWAIT)) {
   2211  1.3  msaitoh 			aprint_error_dev(dev,
   2212  1.1   dyoung 			    "Unable to allocate RxDescriptor memory\n");
   2213  1.1   dyoung 			error = ENOMEM;
   2214  1.1   dyoung 			goto err_rx_desc;
   2215  1.1   dyoung 		}
   2216  1.1   dyoung 		rxr->rx_base = (union ixgbe_adv_rx_desc *)rxr->rxdma.dma_vaddr;
   2217  1.1   dyoung 		bzero((void *)rxr->rx_base, rsize);
   2218  1.1   dyoung 
   2219  1.1   dyoung         	/* Allocate receive buffers for the ring*/
   2220  1.1   dyoung 		if (ixv_allocate_receive_buffers(rxr)) {
   2221  1.3  msaitoh 			aprint_error_dev(dev,
   2222  1.1   dyoung 			    "Critical Failure setting up receive buffers\n");
   2223  1.1   dyoung 			error = ENOMEM;
   2224  1.1   dyoung 			goto err_rx_desc;
   2225  1.1   dyoung 		}
   2226  1.1   dyoung 	}
   2227  1.1   dyoung 
   2228  1.1   dyoung 	/*
   2229  1.1   dyoung 	** Finally set up the queue holding structs
   2230  1.1   dyoung 	*/
   2231  1.1   dyoung 	for (int i = 0; i < adapter->num_queues; i++) {
   2232  1.1   dyoung 		que = &adapter->queues[i];
   2233  1.1   dyoung 		que->adapter = adapter;
   2234  1.1   dyoung 		que->txr = &adapter->tx_rings[i];
   2235  1.1   dyoung 		que->rxr = &adapter->rx_rings[i];
   2236  1.1   dyoung 	}
   2237  1.1   dyoung 
   2238  1.1   dyoung 	return (0);
   2239  1.1   dyoung 
   2240  1.1   dyoung err_rx_desc:
   2241  1.1   dyoung 	for (rxr = adapter->rx_rings; rxconf > 0; rxr++, rxconf--)
   2242  1.1   dyoung 		ixv_dma_free(adapter, &rxr->rxdma);
   2243  1.1   dyoung err_tx_desc:
   2244  1.1   dyoung 	for (txr = adapter->tx_rings; txconf > 0; txr++, txconf--)
   2245  1.1   dyoung 		ixv_dma_free(adapter, &txr->txdma);
   2246  1.1   dyoung 	free(adapter->rx_rings, M_DEVBUF);
   2247  1.1   dyoung rx_fail:
   2248  1.1   dyoung 	free(adapter->tx_rings, M_DEVBUF);
   2249  1.1   dyoung tx_fail:
   2250  1.1   dyoung 	free(adapter->queues, M_DEVBUF);
   2251  1.1   dyoung fail:
   2252  1.1   dyoung 	return (error);
   2253  1.1   dyoung }
   2254  1.1   dyoung 
   2255  1.1   dyoung 
   2256  1.1   dyoung /*********************************************************************
   2257  1.1   dyoung  *
   2258  1.1   dyoung  *  Allocate memory for tx_buffer structures. The tx_buffer stores all
   2259  1.1   dyoung  *  the information needed to transmit a packet on the wire. This is
   2260  1.1   dyoung  *  called only once at attach, setup is done every reset.
   2261  1.1   dyoung  *
   2262  1.1   dyoung  **********************************************************************/
   2263  1.1   dyoung static int
   2264  1.1   dyoung ixv_allocate_transmit_buffers(struct tx_ring *txr)
   2265  1.1   dyoung {
   2266  1.1   dyoung 	struct adapter *adapter = txr->adapter;
   2267  1.1   dyoung 	device_t dev = adapter->dev;
   2268  1.1   dyoung 	struct ixv_tx_buf *txbuf;
   2269  1.1   dyoung 	int error, i;
   2270  1.1   dyoung 
   2271  1.1   dyoung 	/*
   2272  1.1   dyoung 	 * Setup DMA descriptor areas.
   2273  1.1   dyoung 	 */
   2274  1.3  msaitoh 	if ((error = ixgbe_dma_tag_create(adapter->osdep.dmat,	/* parent */
   2275  1.1   dyoung 			       1, 0,		/* alignment, bounds */
   2276  1.1   dyoung 			       IXV_TSO_SIZE,		/* maxsize */
   2277  1.1   dyoung 			       32,			/* nsegments */
   2278  1.1   dyoung 			       PAGE_SIZE,		/* maxsegsize */
   2279  1.1   dyoung 			       0,			/* flags */
   2280  1.1   dyoung 			       &txr->txtag))) {
   2281  1.3  msaitoh 		aprint_error_dev(dev,"Unable to allocate TX DMA tag\n");
   2282  1.1   dyoung 		goto fail;
   2283  1.1   dyoung 	}
   2284  1.1   dyoung 
   2285  1.1   dyoung 	if (!(txr->tx_buffers =
   2286  1.1   dyoung 	    (struct ixv_tx_buf *) malloc(sizeof(struct ixv_tx_buf) *
   2287  1.1   dyoung 	    adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) {
   2288  1.3  msaitoh 		aprint_error_dev(dev, "Unable to allocate tx_buffer memory\n");
   2289  1.1   dyoung 		error = ENOMEM;
   2290  1.1   dyoung 		goto fail;
   2291  1.1   dyoung 	}
   2292  1.1   dyoung 
   2293  1.1   dyoung         /* Create the descriptor buffer dma maps */
   2294  1.1   dyoung 	txbuf = txr->tx_buffers;
   2295  1.1   dyoung 	for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
   2296  1.3  msaitoh 		error = ixgbe_dmamap_create(txr->txtag, 0, &txbuf->map);
   2297  1.1   dyoung 		if (error != 0) {
   2298  1.3  msaitoh 			aprint_error_dev(dev, "Unable to create TX DMA map\n");
   2299  1.1   dyoung 			goto fail;
   2300  1.1   dyoung 		}
   2301  1.1   dyoung 	}
   2302  1.1   dyoung 
   2303  1.1   dyoung 	return 0;
   2304  1.1   dyoung fail:
   2305  1.1   dyoung 	/* We free all, it handles case where we are in the middle */
   2306  1.1   dyoung 	ixv_free_transmit_structures(adapter);
   2307  1.1   dyoung 	return (error);
   2308  1.1   dyoung }
   2309  1.1   dyoung 
   2310  1.1   dyoung /*********************************************************************
   2311  1.1   dyoung  *
   2312  1.1   dyoung  *  Initialize a transmit ring.
   2313  1.1   dyoung  *
   2314  1.1   dyoung  **********************************************************************/
   2315  1.1   dyoung static void
   2316  1.1   dyoung ixv_setup_transmit_ring(struct tx_ring *txr)
   2317  1.1   dyoung {
   2318  1.1   dyoung 	struct adapter *adapter = txr->adapter;
   2319  1.1   dyoung 	struct ixv_tx_buf *txbuf;
   2320  1.1   dyoung 	int i;
   2321  1.1   dyoung 
   2322  1.1   dyoung 	/* Clear the old ring contents */
   2323  1.1   dyoung 	IXV_TX_LOCK(txr);
   2324  1.1   dyoung 	bzero((void *)txr->tx_base,
   2325  1.1   dyoung 	      (sizeof(union ixgbe_adv_tx_desc)) * adapter->num_tx_desc);
   2326  1.1   dyoung 	/* Reset indices */
   2327  1.1   dyoung 	txr->next_avail_desc = 0;
   2328  1.1   dyoung 	txr->next_to_clean = 0;
   2329  1.1   dyoung 
   2330  1.1   dyoung 	/* Free any existing tx buffers. */
   2331  1.1   dyoung         txbuf = txr->tx_buffers;
   2332  1.1   dyoung 	for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
   2333  1.1   dyoung 		if (txbuf->m_head != NULL) {
   2334  1.3  msaitoh 			bus_dmamap_sync(txr->txtag->dt_dmat, txbuf->map,
   2335  1.3  msaitoh 			    0, txbuf->m_head->m_pkthdr.len,
   2336  1.1   dyoung 			    BUS_DMASYNC_POSTWRITE);
   2337  1.3  msaitoh 			ixgbe_dmamap_unload(txr->txtag, txbuf->map);
   2338  1.1   dyoung 			m_freem(txbuf->m_head);
   2339  1.1   dyoung 			txbuf->m_head = NULL;
   2340  1.1   dyoung 		}
   2341  1.1   dyoung 		/* Clear the EOP index */
   2342  1.1   dyoung 		txbuf->eop_index = -1;
   2343  1.1   dyoung         }
   2344  1.1   dyoung 
   2345  1.1   dyoung 	/* Set number of descriptors available */
   2346  1.1   dyoung 	txr->tx_avail = adapter->num_tx_desc;
   2347  1.1   dyoung 
   2348  1.3  msaitoh 	ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
   2349  1.1   dyoung 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   2350  1.1   dyoung 	IXV_TX_UNLOCK(txr);
   2351  1.1   dyoung }
   2352  1.1   dyoung 
   2353  1.1   dyoung /*********************************************************************
   2354  1.1   dyoung  *
   2355  1.1   dyoung  *  Initialize all transmit rings.
   2356  1.1   dyoung  *
   2357  1.1   dyoung  **********************************************************************/
   2358  1.1   dyoung static int
   2359  1.1   dyoung ixv_setup_transmit_structures(struct adapter *adapter)
   2360  1.1   dyoung {
   2361  1.1   dyoung 	struct tx_ring *txr = adapter->tx_rings;
   2362  1.1   dyoung 
   2363  1.1   dyoung 	for (int i = 0; i < adapter->num_queues; i++, txr++)
   2364  1.1   dyoung 		ixv_setup_transmit_ring(txr);
   2365  1.1   dyoung 
   2366  1.1   dyoung 	return (0);
   2367  1.1   dyoung }
   2368  1.1   dyoung 
   2369  1.1   dyoung /*********************************************************************
   2370  1.1   dyoung  *
   2371  1.1   dyoung  *  Enable transmit unit.
   2372  1.1   dyoung  *
   2373  1.1   dyoung  **********************************************************************/
   2374  1.1   dyoung static void
   2375  1.1   dyoung ixv_initialize_transmit_units(struct adapter *adapter)
   2376  1.1   dyoung {
   2377  1.1   dyoung 	struct tx_ring	*txr = adapter->tx_rings;
   2378  1.1   dyoung 	struct ixgbe_hw	*hw = &adapter->hw;
   2379  1.1   dyoung 
   2380  1.1   dyoung 
   2381  1.1   dyoung 	for (int i = 0; i < adapter->num_queues; i++, txr++) {
   2382  1.1   dyoung 		u64	tdba = txr->txdma.dma_paddr;
   2383  1.1   dyoung 		u32	txctrl, txdctl;
   2384  1.1   dyoung 
   2385  1.1   dyoung 		/* Set WTHRESH to 8, burst writeback */
   2386  1.1   dyoung 		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
   2387  1.1   dyoung 		txdctl |= (8 << 16);
   2388  1.1   dyoung 		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
   2389  1.1   dyoung 		/* Now enable */
   2390  1.1   dyoung 		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
   2391  1.1   dyoung 		txdctl |= IXGBE_TXDCTL_ENABLE;
   2392  1.1   dyoung 		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
   2393  1.1   dyoung 
   2394  1.1   dyoung 		/* Set the HW Tx Head and Tail indices */
   2395  1.1   dyoung 	    	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(i), 0);
   2396  1.1   dyoung 	    	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(i), 0);
   2397  1.1   dyoung 
   2398  1.1   dyoung 		/* Setup Transmit Descriptor Cmd Settings */
   2399  1.1   dyoung 		txr->txd_cmd = IXGBE_TXD_CMD_IFCS;
   2400  1.1   dyoung 		txr->watchdog_check = FALSE;
   2401  1.1   dyoung 
   2402  1.1   dyoung 		/* Set Ring parameters */
   2403  1.1   dyoung 		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
   2404  1.1   dyoung 		       (tdba & 0x00000000ffffffffULL));
   2405  1.1   dyoung 		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i), (tdba >> 32));
   2406  1.1   dyoung 		IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
   2407  1.1   dyoung 		    adapter->num_tx_desc *
   2408  1.1   dyoung 		    sizeof(struct ixgbe_legacy_tx_desc));
   2409  1.1   dyoung 		txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(i));
   2410  1.6  msaitoh 		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
   2411  1.1   dyoung 		IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), txctrl);
   2412  1.1   dyoung 		break;
   2413  1.1   dyoung 	}
   2414  1.1   dyoung 
   2415  1.1   dyoung 	return;
   2416  1.1   dyoung }
   2417  1.1   dyoung 
   2418  1.1   dyoung /*********************************************************************
   2419  1.1   dyoung  *
   2420  1.1   dyoung  *  Free all transmit rings.
   2421  1.1   dyoung  *
   2422  1.1   dyoung  **********************************************************************/
   2423  1.1   dyoung static void
   2424  1.1   dyoung ixv_free_transmit_structures(struct adapter *adapter)
   2425  1.1   dyoung {
   2426  1.1   dyoung 	struct tx_ring *txr = adapter->tx_rings;
   2427  1.1   dyoung 
   2428  1.1   dyoung 	for (int i = 0; i < adapter->num_queues; i++, txr++) {
   2429  1.1   dyoung 		ixv_free_transmit_buffers(txr);
   2430  1.1   dyoung 		ixv_dma_free(adapter, &txr->txdma);
   2431  1.1   dyoung 		IXV_TX_LOCK_DESTROY(txr);
   2432  1.1   dyoung 	}
   2433  1.1   dyoung 	free(adapter->tx_rings, M_DEVBUF);
   2434  1.1   dyoung }
   2435  1.1   dyoung 
   2436  1.1   dyoung /*********************************************************************
   2437  1.1   dyoung  *
   2438  1.1   dyoung  *  Free transmit ring related data structures.
   2439  1.1   dyoung  *
   2440  1.1   dyoung  **********************************************************************/
   2441  1.1   dyoung static void
   2442  1.1   dyoung ixv_free_transmit_buffers(struct tx_ring *txr)
   2443  1.1   dyoung {
   2444  1.1   dyoung 	struct adapter *adapter = txr->adapter;
   2445  1.1   dyoung 	struct ixv_tx_buf *tx_buffer;
   2446  1.1   dyoung 	int             i;
   2447  1.1   dyoung 
   2448  1.1   dyoung 	INIT_DEBUGOUT("free_transmit_ring: begin");
   2449  1.1   dyoung 
   2450  1.1   dyoung 	if (txr->tx_buffers == NULL)
   2451  1.1   dyoung 		return;
   2452  1.1   dyoung 
   2453  1.1   dyoung 	tx_buffer = txr->tx_buffers;
   2454  1.1   dyoung 	for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
   2455  1.1   dyoung 		if (tx_buffer->m_head != NULL) {
   2456  1.3  msaitoh 			bus_dmamap_sync(txr->txtag->dt_dmat, tx_buffer->map,
   2457  1.3  msaitoh 			    0, tx_buffer->m_head->m_pkthdr.len,
   2458  1.1   dyoung 			    BUS_DMASYNC_POSTWRITE);
   2459  1.3  msaitoh 			ixgbe_dmamap_unload(txr->txtag, tx_buffer->map);
   2460  1.1   dyoung 			m_freem(tx_buffer->m_head);
   2461  1.1   dyoung 			tx_buffer->m_head = NULL;
   2462  1.1   dyoung 			if (tx_buffer->map != NULL) {
   2463  1.3  msaitoh 				ixgbe_dmamap_destroy(txr->txtag,
   2464  1.1   dyoung 				    tx_buffer->map);
   2465  1.1   dyoung 				tx_buffer->map = NULL;
   2466  1.1   dyoung 			}
   2467  1.1   dyoung 		} else if (tx_buffer->map != NULL) {
   2468  1.3  msaitoh 			ixgbe_dmamap_unload(txr->txtag, tx_buffer->map);
   2469  1.3  msaitoh 			ixgbe_dmamap_destroy(txr->txtag, tx_buffer->map);
   2470  1.1   dyoung 			tx_buffer->map = NULL;
   2471  1.1   dyoung 		}
   2472  1.1   dyoung 	}
   2473  1.1   dyoung #if __FreeBSD_version >= 800000
   2474  1.1   dyoung 	if (txr->br != NULL)
   2475  1.1   dyoung 		buf_ring_free(txr->br, M_DEVBUF);
   2476  1.1   dyoung #endif
   2477  1.1   dyoung 	if (txr->tx_buffers != NULL) {
   2478  1.1   dyoung 		free(txr->tx_buffers, M_DEVBUF);
   2479  1.1   dyoung 		txr->tx_buffers = NULL;
   2480  1.1   dyoung 	}
   2481  1.1   dyoung 	if (txr->txtag != NULL) {
   2482  1.3  msaitoh 		ixgbe_dma_tag_destroy(txr->txtag);
   2483  1.1   dyoung 		txr->txtag = NULL;
   2484  1.1   dyoung 	}
   2485  1.1   dyoung 	return;
   2486  1.1   dyoung }
   2487  1.1   dyoung 
   2488  1.1   dyoung /*********************************************************************
   2489  1.1   dyoung  *
   2490  1.3  msaitoh  *  Advanced Context Descriptor setup for VLAN or L4 CSUM
   2491  1.1   dyoung  *
   2492  1.1   dyoung  **********************************************************************/
   2493  1.1   dyoung 
   2494  1.3  msaitoh static u32
   2495  1.1   dyoung ixv_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp)
   2496  1.1   dyoung {
   2497  1.3  msaitoh 	struct m_tag *mtag;
   2498  1.1   dyoung 	struct adapter *adapter = txr->adapter;
   2499  1.3  msaitoh 	struct ethercom *ec = &adapter->osdep.ec;
   2500  1.1   dyoung 	struct ixgbe_adv_tx_context_desc *TXD;
   2501  1.1   dyoung 	struct ixv_tx_buf        *tx_buffer;
   2502  1.3  msaitoh 	u32 olinfo = 0, vlan_macip_lens = 0, type_tucmd_mlhl = 0;
   2503  1.1   dyoung 	struct ether_vlan_header *eh;
   2504  1.3  msaitoh 	struct ip ip;
   2505  1.3  msaitoh 	struct ip6_hdr ip6;
   2506  1.1   dyoung 	int  ehdrlen, ip_hlen = 0;
   2507  1.1   dyoung 	u16	etype;
   2508  1.1   dyoung 	u8	ipproto = 0;
   2509  1.3  msaitoh 	bool	offload;
   2510  1.1   dyoung 	int ctxd = txr->next_avail_desc;
   2511  1.1   dyoung 	u16 vtag = 0;
   2512  1.1   dyoung 
   2513  1.1   dyoung 
   2514  1.3  msaitoh 	offload = ((mp->m_pkthdr.csum_flags & M_CSUM_OFFLOAD) != 0);
   2515  1.1   dyoung 
   2516  1.1   dyoung 	tx_buffer = &txr->tx_buffers[ctxd];
   2517  1.1   dyoung 	TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
   2518  1.1   dyoung 
   2519  1.1   dyoung 	/*
   2520  1.1   dyoung 	** In advanced descriptors the vlan tag must
   2521  1.1   dyoung 	** be placed into the descriptor itself.
   2522  1.1   dyoung 	*/
   2523  1.3  msaitoh 	if ((mtag = VLAN_OUTPUT_TAG(ec, mp)) != NULL) {
   2524  1.3  msaitoh 		vtag = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   2525  1.1   dyoung 		vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
   2526  1.3  msaitoh 	} else if (!offload)
   2527  1.3  msaitoh 		return 0;
   2528  1.1   dyoung 
   2529  1.1   dyoung 	/*
   2530  1.1   dyoung 	 * Determine where frame payload starts.
   2531  1.1   dyoung 	 * Jump over vlan headers if already present,
   2532  1.1   dyoung 	 * helpful for QinQ too.
   2533  1.1   dyoung 	 */
   2534  1.3  msaitoh 	KASSERT(mp->m_len >= offsetof(struct ether_vlan_header, evl_tag));
   2535  1.1   dyoung 	eh = mtod(mp, struct ether_vlan_header *);
   2536  1.1   dyoung 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
   2537  1.3  msaitoh 		KASSERT(mp->m_len >= sizeof(struct ether_vlan_header));
   2538  1.1   dyoung 		etype = ntohs(eh->evl_proto);
   2539  1.1   dyoung 		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   2540  1.1   dyoung 	} else {
   2541  1.1   dyoung 		etype = ntohs(eh->evl_encap_proto);
   2542  1.1   dyoung 		ehdrlen = ETHER_HDR_LEN;
   2543  1.1   dyoung 	}
   2544  1.1   dyoung 
   2545  1.1   dyoung 	/* Set the ether header length */
   2546  1.1   dyoung 	vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
   2547  1.1   dyoung 
   2548  1.1   dyoung 	switch (etype) {
   2549  1.3  msaitoh 	case ETHERTYPE_IP:
   2550  1.3  msaitoh 		m_copydata(mp, ehdrlen, sizeof(ip), &ip);
   2551  1.3  msaitoh 		ip_hlen = ip.ip_hl << 2;
   2552  1.3  msaitoh 		ipproto = ip.ip_p;
   2553  1.3  msaitoh #if 0
   2554  1.3  msaitoh 		ip.ip_sum = 0;
   2555  1.3  msaitoh 		m_copyback(mp, ehdrlen, sizeof(ip), &ip);
   2556  1.3  msaitoh #else
   2557  1.3  msaitoh 		KASSERT((mp->m_pkthdr.csum_flags & M_CSUM_IPv4) == 0 ||
   2558  1.3  msaitoh 		    ip.ip_sum == 0);
   2559  1.3  msaitoh #endif
   2560  1.3  msaitoh 		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
   2561  1.3  msaitoh 		break;
   2562  1.3  msaitoh 	case ETHERTYPE_IPV6:
   2563  1.3  msaitoh 		m_copydata(mp, ehdrlen, sizeof(ip6), &ip6);
   2564  1.3  msaitoh 		ip_hlen = sizeof(ip6);
   2565  1.3  msaitoh 		ipproto = ip6.ip6_nxt;
   2566  1.3  msaitoh 		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
   2567  1.3  msaitoh 		break;
   2568  1.3  msaitoh 	default:
   2569  1.3  msaitoh 		break;
   2570  1.1   dyoung 	}
   2571  1.1   dyoung 
   2572  1.3  msaitoh 	if ((mp->m_pkthdr.csum_flags & M_CSUM_IPv4) != 0)
   2573  1.3  msaitoh 		olinfo |= IXGBE_TXD_POPTS_IXSM << 8;
   2574  1.3  msaitoh 
   2575  1.1   dyoung 	vlan_macip_lens |= ip_hlen;
   2576  1.1   dyoung 	type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
   2577  1.1   dyoung 
   2578  1.3  msaitoh 	if (mp->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_TCPv6)) {
   2579  1.3  msaitoh 		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
   2580  1.3  msaitoh 		olinfo |= IXGBE_TXD_POPTS_TXSM << 8;
   2581  1.3  msaitoh 		KASSERT(ipproto == IPPROTO_TCP);
   2582  1.3  msaitoh 	} else if (mp->m_pkthdr.csum_flags & (M_CSUM_UDPv4|M_CSUM_UDPv6)) {
   2583  1.3  msaitoh 		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP;
   2584  1.3  msaitoh 		olinfo |= IXGBE_TXD_POPTS_TXSM << 8;
   2585  1.3  msaitoh 		KASSERT(ipproto == IPPROTO_UDP);
   2586  1.1   dyoung 	}
   2587  1.1   dyoung 
   2588  1.1   dyoung 	/* Now copy bits into descriptor */
   2589  1.1   dyoung 	TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
   2590  1.1   dyoung 	TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
   2591  1.1   dyoung 	TXD->seqnum_seed = htole32(0);
   2592  1.1   dyoung 	TXD->mss_l4len_idx = htole32(0);
   2593  1.1   dyoung 
   2594  1.1   dyoung 	tx_buffer->m_head = NULL;
   2595  1.1   dyoung 	tx_buffer->eop_index = -1;
   2596  1.1   dyoung 
   2597  1.1   dyoung 	/* We've consumed the first desc, adjust counters */
   2598  1.1   dyoung 	if (++ctxd == adapter->num_tx_desc)
   2599  1.1   dyoung 		ctxd = 0;
   2600  1.1   dyoung 	txr->next_avail_desc = ctxd;
   2601  1.1   dyoung 	--txr->tx_avail;
   2602  1.1   dyoung 
   2603  1.3  msaitoh         return olinfo;
   2604  1.1   dyoung }
   2605  1.1   dyoung 
   2606  1.1   dyoung /**********************************************************************
   2607  1.1   dyoung  *
   2608  1.1   dyoung  *  Setup work for hardware segmentation offload (TSO) on
   2609  1.1   dyoung  *  adapters using advanced tx descriptors
   2610  1.1   dyoung  *
   2611  1.1   dyoung  **********************************************************************/
   2612  1.3  msaitoh static bool
   2613  1.1   dyoung ixv_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *paylen)
   2614  1.1   dyoung {
   2615  1.3  msaitoh 	struct m_tag *mtag;
   2616  1.1   dyoung 	struct adapter *adapter = txr->adapter;
   2617  1.3  msaitoh 	struct ethercom *ec = &adapter->osdep.ec;
   2618  1.1   dyoung 	struct ixgbe_adv_tx_context_desc *TXD;
   2619  1.1   dyoung 	struct ixv_tx_buf        *tx_buffer;
   2620  1.1   dyoung 	u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
   2621  1.1   dyoung 	u32 mss_l4len_idx = 0;
   2622  1.1   dyoung 	u16 vtag = 0;
   2623  1.1   dyoung 	int ctxd, ehdrlen,  hdrlen, ip_hlen, tcp_hlen;
   2624  1.1   dyoung 	struct ether_vlan_header *eh;
   2625  1.1   dyoung 	struct ip *ip;
   2626  1.1   dyoung 	struct tcphdr *th;
   2627  1.1   dyoung 
   2628  1.1   dyoung 
   2629  1.1   dyoung 	/*
   2630  1.1   dyoung 	 * Determine where frame payload starts.
   2631  1.1   dyoung 	 * Jump over vlan headers if already present
   2632  1.1   dyoung 	 */
   2633  1.1   dyoung 	eh = mtod(mp, struct ether_vlan_header *);
   2634  1.1   dyoung 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN))
   2635  1.1   dyoung 		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   2636  1.1   dyoung 	else
   2637  1.1   dyoung 		ehdrlen = ETHER_HDR_LEN;
   2638  1.1   dyoung 
   2639  1.1   dyoung         /* Ensure we have at least the IP+TCP header in the first mbuf. */
   2640  1.1   dyoung         if (mp->m_len < ehdrlen + sizeof(struct ip) + sizeof(struct tcphdr))
   2641  1.1   dyoung 		return FALSE;
   2642  1.1   dyoung 
   2643  1.1   dyoung 	ctxd = txr->next_avail_desc;
   2644  1.1   dyoung 	tx_buffer = &txr->tx_buffers[ctxd];
   2645  1.1   dyoung 	TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
   2646  1.1   dyoung 
   2647  1.1   dyoung 	ip = (struct ip *)(mp->m_data + ehdrlen);
   2648  1.1   dyoung 	if (ip->ip_p != IPPROTO_TCP)
   2649  1.1   dyoung 		return FALSE;   /* 0 */
   2650  1.1   dyoung 	ip->ip_sum = 0;
   2651  1.1   dyoung 	ip_hlen = ip->ip_hl << 2;
   2652  1.3  msaitoh 	th = (struct tcphdr *)((char *)ip + ip_hlen);
   2653  1.3  msaitoh 	/* XXX Educated guess: FreeBSD's in_pseudo == NetBSD's in_cksum_phdr */
   2654  1.3  msaitoh 	th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   2655  1.1   dyoung 	    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   2656  1.1   dyoung 	tcp_hlen = th->th_off << 2;
   2657  1.1   dyoung 	hdrlen = ehdrlen + ip_hlen + tcp_hlen;
   2658  1.1   dyoung 
   2659  1.1   dyoung 	/* This is used in the transmit desc in encap */
   2660  1.1   dyoung 	*paylen = mp->m_pkthdr.len - hdrlen;
   2661  1.1   dyoung 
   2662  1.1   dyoung 	/* VLAN MACLEN IPLEN */
   2663  1.3  msaitoh 	if ((mtag = VLAN_OUTPUT_TAG(ec, mp)) != NULL) {
   2664  1.3  msaitoh 		vtag = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   2665  1.1   dyoung                 vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
   2666  1.1   dyoung 	}
   2667  1.1   dyoung 
   2668  1.1   dyoung 	vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
   2669  1.1   dyoung 	vlan_macip_lens |= ip_hlen;
   2670  1.1   dyoung 	TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
   2671  1.1   dyoung 
   2672  1.1   dyoung 	/* ADV DTYPE TUCMD */
   2673  1.1   dyoung 	type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
   2674  1.1   dyoung 	type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
   2675  1.1   dyoung 	type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
   2676  1.1   dyoung 	TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
   2677  1.1   dyoung 
   2678  1.1   dyoung 
   2679  1.1   dyoung 	/* MSS L4LEN IDX */
   2680  1.3  msaitoh 	mss_l4len_idx |= (mp->m_pkthdr.segsz << IXGBE_ADVTXD_MSS_SHIFT);
   2681  1.1   dyoung 	mss_l4len_idx |= (tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT);
   2682  1.1   dyoung 	TXD->mss_l4len_idx = htole32(mss_l4len_idx);
   2683  1.1   dyoung 
   2684  1.1   dyoung 	TXD->seqnum_seed = htole32(0);
   2685  1.1   dyoung 	tx_buffer->m_head = NULL;
   2686  1.1   dyoung 	tx_buffer->eop_index = -1;
   2687  1.1   dyoung 
   2688  1.1   dyoung 	if (++ctxd == adapter->num_tx_desc)
   2689  1.1   dyoung 		ctxd = 0;
   2690  1.1   dyoung 
   2691  1.1   dyoung 	txr->tx_avail--;
   2692  1.1   dyoung 	txr->next_avail_desc = ctxd;
   2693  1.1   dyoung 	return TRUE;
   2694  1.1   dyoung }
   2695  1.1   dyoung 
   2696  1.1   dyoung 
   2697  1.1   dyoung /**********************************************************************
   2698  1.1   dyoung  *
   2699  1.1   dyoung  *  Examine each tx_buffer in the used queue. If the hardware is done
   2700  1.1   dyoung  *  processing the packet then free associated resources. The
   2701  1.1   dyoung  *  tx_buffer is put back on the free queue.
   2702  1.1   dyoung  *
   2703  1.1   dyoung  **********************************************************************/
   2704  1.3  msaitoh static bool
   2705  1.1   dyoung ixv_txeof(struct tx_ring *txr)
   2706  1.1   dyoung {
   2707  1.1   dyoung 	struct adapter	*adapter = txr->adapter;
   2708  1.1   dyoung 	struct ifnet	*ifp = adapter->ifp;
   2709  1.1   dyoung 	u32	first, last, done;
   2710  1.1   dyoung 	struct ixv_tx_buf *tx_buffer;
   2711  1.1   dyoung 	struct ixgbe_legacy_tx_desc *tx_desc, *eop_desc;
   2712  1.1   dyoung 
   2713  1.3  msaitoh 	KASSERT(mutex_owned(&txr->tx_mtx));
   2714  1.1   dyoung 
   2715  1.1   dyoung 	if (txr->tx_avail == adapter->num_tx_desc)
   2716  1.3  msaitoh 		return false;
   2717  1.1   dyoung 
   2718  1.1   dyoung 	first = txr->next_to_clean;
   2719  1.1   dyoung 	tx_buffer = &txr->tx_buffers[first];
   2720  1.1   dyoung 	/* For cleanup we just use legacy struct */
   2721  1.1   dyoung 	tx_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
   2722  1.1   dyoung 	last = tx_buffer->eop_index;
   2723  1.1   dyoung 	if (last == -1)
   2724  1.3  msaitoh 		return false;
   2725  1.1   dyoung 	eop_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
   2726  1.1   dyoung 
   2727  1.1   dyoung 	/*
   2728  1.1   dyoung 	** Get the index of the first descriptor
   2729  1.1   dyoung 	** BEYOND the EOP and call that 'done'.
   2730  1.1   dyoung 	** I do this so the comparison in the
   2731  1.1   dyoung 	** inner while loop below can be simple
   2732  1.1   dyoung 	*/
   2733  1.1   dyoung 	if (++last == adapter->num_tx_desc) last = 0;
   2734  1.1   dyoung 	done = last;
   2735  1.1   dyoung 
   2736  1.3  msaitoh         ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
   2737  1.1   dyoung             BUS_DMASYNC_POSTREAD);
   2738  1.1   dyoung 	/*
   2739  1.1   dyoung 	** Only the EOP descriptor of a packet now has the DD
   2740  1.1   dyoung 	** bit set, this is what we look for...
   2741  1.1   dyoung 	*/
   2742  1.1   dyoung 	while (eop_desc->upper.fields.status & IXGBE_TXD_STAT_DD) {
   2743  1.1   dyoung 		/* We clean the range of the packet */
   2744  1.1   dyoung 		while (first != done) {
   2745  1.1   dyoung 			tx_desc->upper.data = 0;
   2746  1.1   dyoung 			tx_desc->lower.data = 0;
   2747  1.1   dyoung 			tx_desc->buffer_addr = 0;
   2748  1.1   dyoung 			++txr->tx_avail;
   2749  1.1   dyoung 
   2750  1.1   dyoung 			if (tx_buffer->m_head) {
   2751  1.3  msaitoh 				bus_dmamap_sync(txr->txtag->dt_dmat,
   2752  1.1   dyoung 				    tx_buffer->map,
   2753  1.3  msaitoh 				    0, tx_buffer->m_head->m_pkthdr.len,
   2754  1.1   dyoung 				    BUS_DMASYNC_POSTWRITE);
   2755  1.3  msaitoh 				ixgbe_dmamap_unload(txr->txtag, tx_buffer->map);
   2756  1.1   dyoung 				m_freem(tx_buffer->m_head);
   2757  1.1   dyoung 				tx_buffer->m_head = NULL;
   2758  1.1   dyoung 				tx_buffer->map = NULL;
   2759  1.1   dyoung 			}
   2760  1.1   dyoung 			tx_buffer->eop_index = -1;
   2761  1.3  msaitoh 			getmicrotime(&txr->watchdog_time);
   2762  1.1   dyoung 
   2763  1.1   dyoung 			if (++first == adapter->num_tx_desc)
   2764  1.1   dyoung 				first = 0;
   2765  1.1   dyoung 
   2766  1.1   dyoung 			tx_buffer = &txr->tx_buffers[first];
   2767  1.1   dyoung 			tx_desc =
   2768  1.1   dyoung 			    (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
   2769  1.1   dyoung 		}
   2770  1.1   dyoung 		++ifp->if_opackets;
   2771  1.1   dyoung 		/* See if there is more work now */
   2772  1.1   dyoung 		last = tx_buffer->eop_index;
   2773  1.1   dyoung 		if (last != -1) {
   2774  1.1   dyoung 			eop_desc =
   2775  1.1   dyoung 			    (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
   2776  1.1   dyoung 			/* Get next done point */
   2777  1.1   dyoung 			if (++last == adapter->num_tx_desc) last = 0;
   2778  1.1   dyoung 			done = last;
   2779  1.1   dyoung 		} else
   2780  1.1   dyoung 			break;
   2781  1.1   dyoung 	}
   2782  1.3  msaitoh 	ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
   2783  1.1   dyoung 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   2784  1.1   dyoung 
   2785  1.1   dyoung 	txr->next_to_clean = first;
   2786  1.1   dyoung 
   2787  1.1   dyoung 	/*
   2788  1.3  msaitoh 	 * If we have enough room, clear IFF_OACTIVE to tell the stack that
   2789  1.1   dyoung 	 * it is OK to send packets. If there are no pending descriptors,
   2790  1.1   dyoung 	 * clear the timeout. Otherwise, if some descriptors have been freed,
   2791  1.1   dyoung 	 * restart the timeout.
   2792  1.1   dyoung 	 */
   2793  1.1   dyoung 	if (txr->tx_avail > IXV_TX_CLEANUP_THRESHOLD) {
   2794  1.3  msaitoh 		ifp->if_flags &= ~IFF_OACTIVE;
   2795  1.1   dyoung 		if (txr->tx_avail == adapter->num_tx_desc) {
   2796  1.1   dyoung 			txr->watchdog_check = FALSE;
   2797  1.3  msaitoh 			return false;
   2798  1.1   dyoung 		}
   2799  1.1   dyoung 	}
   2800  1.1   dyoung 
   2801  1.3  msaitoh 	return true;
   2802  1.1   dyoung }
   2803  1.1   dyoung 
   2804  1.1   dyoung /*********************************************************************
   2805  1.1   dyoung  *
   2806  1.1   dyoung  *  Refresh mbuf buffers for RX descriptor rings
   2807  1.1   dyoung  *   - now keeps its own state so discards due to resource
   2808  1.1   dyoung  *     exhaustion are unnecessary, if an mbuf cannot be obtained
   2809  1.1   dyoung  *     it just returns, keeping its placeholder, thus it can simply
   2810  1.1   dyoung  *     be recalled to try again.
   2811  1.1   dyoung  *
   2812  1.1   dyoung  **********************************************************************/
   2813  1.1   dyoung static void
   2814  1.1   dyoung ixv_refresh_mbufs(struct rx_ring *rxr, int limit)
   2815  1.1   dyoung {
   2816  1.1   dyoung 	struct adapter		*adapter = rxr->adapter;
   2817  1.1   dyoung 	struct ixv_rx_buf	*rxbuf;
   2818  1.1   dyoung 	struct mbuf		*mh, *mp;
   2819  1.3  msaitoh 	int			i, j, error;
   2820  1.3  msaitoh 	bool			refreshed = false;
   2821  1.1   dyoung 
   2822  1.3  msaitoh 	i = j = rxr->next_to_refresh;
   2823  1.5  msaitoh         /* Get the control variable, one beyond refresh point */
   2824  1.3  msaitoh 	if (++j == adapter->num_rx_desc)
   2825  1.3  msaitoh 		j = 0;
   2826  1.3  msaitoh 	while (j != limit) {
   2827  1.1   dyoung 		rxbuf = &rxr->rx_buffers[i];
   2828  1.1   dyoung 		if ((rxbuf->m_head == NULL) && (rxr->hdr_split)) {
   2829  1.1   dyoung 			mh = m_gethdr(M_DONTWAIT, MT_DATA);
   2830  1.1   dyoung 			if (mh == NULL)
   2831  1.1   dyoung 				goto update;
   2832  1.1   dyoung 			mh->m_pkthdr.len = mh->m_len = MHLEN;
   2833  1.1   dyoung 			mh->m_len = MHLEN;
   2834  1.1   dyoung 			mh->m_flags |= M_PKTHDR;
   2835  1.1   dyoung 			m_adj(mh, ETHER_ALIGN);
   2836  1.1   dyoung 			/* Get the memory mapping */
   2837  1.3  msaitoh 			error = bus_dmamap_load_mbuf(rxr->htag->dt_dmat,
   2838  1.3  msaitoh 			    rxbuf->hmap, mh, BUS_DMA_NOWAIT);
   2839  1.1   dyoung 			if (error != 0) {
   2840  1.1   dyoung 				printf("GET BUF: dmamap load"
   2841  1.1   dyoung 				    " failure - %d\n", error);
   2842  1.1   dyoung 				m_free(mh);
   2843  1.1   dyoung 				goto update;
   2844  1.1   dyoung 			}
   2845  1.1   dyoung 			rxbuf->m_head = mh;
   2846  1.3  msaitoh 			ixgbe_dmamap_sync(rxr->htag, rxbuf->hmap,
   2847  1.1   dyoung 			    BUS_DMASYNC_PREREAD);
   2848  1.1   dyoung 			rxr->rx_base[i].read.hdr_addr =
   2849  1.3  msaitoh 			    htole64(rxbuf->hmap->dm_segs[0].ds_addr);
   2850  1.1   dyoung 		}
   2851  1.1   dyoung 
   2852  1.1   dyoung 		if (rxbuf->m_pack == NULL) {
   2853  1.3  msaitoh 			mp = ixgbe_getjcl(&adapter->jcl_head, M_DONTWAIT,
   2854  1.3  msaitoh 			    MT_DATA, M_PKTHDR, adapter->rx_mbuf_sz);
   2855  1.3  msaitoh 			if (mp == NULL) {
   2856  1.3  msaitoh 				rxr->no_jmbuf.ev_count++;
   2857  1.1   dyoung 				goto update;
   2858  1.5  msaitoh 			} else
   2859  1.5  msaitoh 				mp = rxbuf->m_pack;
   2860  1.5  msaitoh 
   2861  1.1   dyoung 			mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
   2862  1.1   dyoung 			/* Get the memory mapping */
   2863  1.3  msaitoh 			error = bus_dmamap_load_mbuf(rxr->ptag->dt_dmat,
   2864  1.3  msaitoh 			    rxbuf->pmap, mp, BUS_DMA_NOWAIT);
   2865  1.1   dyoung 			if (error != 0) {
   2866  1.1   dyoung 				printf("GET BUF: dmamap load"
   2867  1.1   dyoung 				    " failure - %d\n", error);
   2868  1.1   dyoung 				m_free(mp);
   2869  1.5  msaitoh 				rxbuf->m_pack = NULL;
   2870  1.1   dyoung 				goto update;
   2871  1.1   dyoung 			}
   2872  1.1   dyoung 			rxbuf->m_pack = mp;
   2873  1.3  msaitoh 			bus_dmamap_sync(rxr->ptag->dt_dmat, rxbuf->pmap,
   2874  1.3  msaitoh 			    0, mp->m_pkthdr.len, BUS_DMASYNC_PREREAD);
   2875  1.1   dyoung 			rxr->rx_base[i].read.pkt_addr =
   2876  1.3  msaitoh 			    htole64(rxbuf->pmap->dm_segs[0].ds_addr);
   2877  1.1   dyoung 		}
   2878  1.1   dyoung 
   2879  1.3  msaitoh 		refreshed = true;
   2880  1.3  msaitoh 		rxr->next_to_refresh = i = j;
   2881  1.1   dyoung 		/* Calculate next index */
   2882  1.3  msaitoh 		if (++j == adapter->num_rx_desc)
   2883  1.3  msaitoh 			j = 0;
   2884  1.1   dyoung 	}
   2885  1.1   dyoung update:
   2886  1.5  msaitoh 	if (refreshed) /* update tail index */
   2887  1.1   dyoung 		IXGBE_WRITE_REG(&adapter->hw,
   2888  1.3  msaitoh 		    IXGBE_VFRDT(rxr->me), rxr->next_to_refresh);
   2889  1.1   dyoung 	return;
   2890  1.1   dyoung }
   2891  1.1   dyoung 
   2892  1.1   dyoung /*********************************************************************
   2893  1.1   dyoung  *
   2894  1.1   dyoung  *  Allocate memory for rx_buffer structures. Since we use one
   2895  1.1   dyoung  *  rx_buffer per received packet, the maximum number of rx_buffer's
   2896  1.1   dyoung  *  that we'll need is equal to the number of receive descriptors
   2897  1.1   dyoung  *  that we've allocated.
   2898  1.1   dyoung  *
   2899  1.1   dyoung  **********************************************************************/
   2900  1.1   dyoung static int
   2901  1.1   dyoung ixv_allocate_receive_buffers(struct rx_ring *rxr)
   2902  1.1   dyoung {
   2903  1.1   dyoung 	struct	adapter 	*adapter = rxr->adapter;
   2904  1.1   dyoung 	device_t 		dev = adapter->dev;
   2905  1.1   dyoung 	struct ixv_rx_buf 	*rxbuf;
   2906  1.1   dyoung 	int             	i, bsize, error;
   2907  1.1   dyoung 
   2908  1.1   dyoung 	bsize = sizeof(struct ixv_rx_buf) * adapter->num_rx_desc;
   2909  1.1   dyoung 	if (!(rxr->rx_buffers =
   2910  1.1   dyoung 	    (struct ixv_rx_buf *) malloc(bsize,
   2911  1.1   dyoung 	    M_DEVBUF, M_NOWAIT | M_ZERO))) {
   2912  1.3  msaitoh 		aprint_error_dev(dev, "Unable to allocate rx_buffer memory\n");
   2913  1.1   dyoung 		error = ENOMEM;
   2914  1.1   dyoung 		goto fail;
   2915  1.1   dyoung 	}
   2916  1.1   dyoung 
   2917  1.3  msaitoh 	if ((error = ixgbe_dma_tag_create(adapter->osdep.dmat,	/* parent */
   2918  1.1   dyoung 				   1, 0,	/* alignment, bounds */
   2919  1.1   dyoung 				   MSIZE,		/* maxsize */
   2920  1.1   dyoung 				   1,			/* nsegments */
   2921  1.1   dyoung 				   MSIZE,		/* maxsegsize */
   2922  1.1   dyoung 				   0,			/* flags */
   2923  1.1   dyoung 				   &rxr->htag))) {
   2924  1.3  msaitoh 		aprint_error_dev(dev, "Unable to create RX DMA tag\n");
   2925  1.1   dyoung 		goto fail;
   2926  1.1   dyoung 	}
   2927  1.1   dyoung 
   2928  1.3  msaitoh 	if ((error = ixgbe_dma_tag_create(adapter->osdep.dmat,	/* parent */
   2929  1.1   dyoung 				   1, 0,	/* alignment, bounds */
   2930  1.1   dyoung 				   MJUMPAGESIZE,	/* maxsize */
   2931  1.1   dyoung 				   1,			/* nsegments */
   2932  1.1   dyoung 				   MJUMPAGESIZE,	/* maxsegsize */
   2933  1.1   dyoung 				   0,			/* flags */
   2934  1.1   dyoung 				   &rxr->ptag))) {
   2935  1.3  msaitoh 		aprint_error_dev(dev, "Unable to create RX DMA tag\n");
   2936  1.1   dyoung 		goto fail;
   2937  1.1   dyoung 	}
   2938  1.1   dyoung 
   2939  1.1   dyoung 	for (i = 0; i < adapter->num_rx_desc; i++, rxbuf++) {
   2940  1.1   dyoung 		rxbuf = &rxr->rx_buffers[i];
   2941  1.3  msaitoh 		error = ixgbe_dmamap_create(rxr->htag,
   2942  1.1   dyoung 		    BUS_DMA_NOWAIT, &rxbuf->hmap);
   2943  1.1   dyoung 		if (error) {
   2944  1.3  msaitoh 			aprint_error_dev(dev, "Unable to create RX head map\n");
   2945  1.1   dyoung 			goto fail;
   2946  1.1   dyoung 		}
   2947  1.3  msaitoh 		error = ixgbe_dmamap_create(rxr->ptag,
   2948  1.1   dyoung 		    BUS_DMA_NOWAIT, &rxbuf->pmap);
   2949  1.1   dyoung 		if (error) {
   2950  1.3  msaitoh 			aprint_error_dev(dev, "Unable to create RX pkt map\n");
   2951  1.1   dyoung 			goto fail;
   2952  1.1   dyoung 		}
   2953  1.1   dyoung 	}
   2954  1.1   dyoung 
   2955  1.1   dyoung 	return (0);
   2956  1.1   dyoung 
   2957  1.1   dyoung fail:
   2958  1.1   dyoung 	/* Frees all, but can handle partial completion */
   2959  1.1   dyoung 	ixv_free_receive_structures(adapter);
   2960  1.1   dyoung 	return (error);
   2961  1.1   dyoung }
   2962  1.1   dyoung 
   2963  1.1   dyoung static void
   2964  1.1   dyoung ixv_free_receive_ring(struct rx_ring *rxr)
   2965  1.1   dyoung {
   2966  1.1   dyoung 	struct  adapter         *adapter;
   2967  1.1   dyoung 	struct ixv_rx_buf       *rxbuf;
   2968  1.1   dyoung 	int i;
   2969  1.1   dyoung 
   2970  1.1   dyoung 	adapter = rxr->adapter;
   2971  1.1   dyoung 	for (i = 0; i < adapter->num_rx_desc; i++) {
   2972  1.1   dyoung 		rxbuf = &rxr->rx_buffers[i];
   2973  1.1   dyoung 		if (rxbuf->m_head != NULL) {
   2974  1.3  msaitoh 			ixgbe_dmamap_sync(rxr->htag, rxbuf->hmap,
   2975  1.1   dyoung 			    BUS_DMASYNC_POSTREAD);
   2976  1.3  msaitoh 			ixgbe_dmamap_unload(rxr->htag, rxbuf->hmap);
   2977  1.1   dyoung 			rxbuf->m_head->m_flags |= M_PKTHDR;
   2978  1.1   dyoung 			m_freem(rxbuf->m_head);
   2979  1.1   dyoung 		}
   2980  1.1   dyoung 		if (rxbuf->m_pack != NULL) {
   2981  1.3  msaitoh 			/* XXX not ixgbe_ ? */
   2982  1.3  msaitoh 			bus_dmamap_sync(rxr->ptag->dt_dmat, rxbuf->pmap,
   2983  1.3  msaitoh 			    0, rxbuf->m_pack->m_pkthdr.len,
   2984  1.1   dyoung 			    BUS_DMASYNC_POSTREAD);
   2985  1.3  msaitoh 			ixgbe_dmamap_unload(rxr->ptag, rxbuf->pmap);
   2986  1.1   dyoung 			rxbuf->m_pack->m_flags |= M_PKTHDR;
   2987  1.1   dyoung 			m_freem(rxbuf->m_pack);
   2988  1.1   dyoung 		}
   2989  1.1   dyoung 		rxbuf->m_head = NULL;
   2990  1.1   dyoung 		rxbuf->m_pack = NULL;
   2991  1.1   dyoung 	}
   2992  1.1   dyoung }
   2993  1.1   dyoung 
   2994  1.1   dyoung 
   2995  1.1   dyoung /*********************************************************************
   2996  1.1   dyoung  *
   2997  1.1   dyoung  *  Initialize a receive ring and its buffers.
   2998  1.1   dyoung  *
   2999  1.1   dyoung  **********************************************************************/
   3000  1.1   dyoung static int
   3001  1.1   dyoung ixv_setup_receive_ring(struct rx_ring *rxr)
   3002  1.1   dyoung {
   3003  1.1   dyoung 	struct	adapter 	*adapter;
   3004  1.3  msaitoh 	struct ixv_rx_buf	*rxbuf;
   3005  1.3  msaitoh #ifdef LRO
   3006  1.1   dyoung 	struct ifnet		*ifp;
   3007  1.1   dyoung 	struct lro_ctrl		*lro = &rxr->lro;
   3008  1.3  msaitoh #endif /* LRO */
   3009  1.3  msaitoh 	int			rsize, error = 0;
   3010  1.1   dyoung 
   3011  1.1   dyoung 	adapter = rxr->adapter;
   3012  1.3  msaitoh #ifdef LRO
   3013  1.1   dyoung 	ifp = adapter->ifp;
   3014  1.3  msaitoh #endif /* LRO */
   3015  1.1   dyoung 
   3016  1.1   dyoung 	/* Clear the ring contents */
   3017  1.1   dyoung 	IXV_RX_LOCK(rxr);
   3018  1.1   dyoung 	rsize = roundup2(adapter->num_rx_desc *
   3019  1.1   dyoung 	    sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
   3020  1.1   dyoung 	bzero((void *)rxr->rx_base, rsize);
   3021  1.1   dyoung 
   3022  1.1   dyoung 	/* Free current RX buffer structs and their mbufs */
   3023  1.1   dyoung 	ixv_free_receive_ring(rxr);
   3024  1.1   dyoung 
   3025  1.3  msaitoh 	IXV_RX_UNLOCK(rxr);
   3026  1.3  msaitoh 
   3027  1.3  msaitoh 	/* Now reinitialize our supply of jumbo mbufs.  The number
   3028  1.3  msaitoh 	 * or size of jumbo mbufs may have changed.
   3029  1.3  msaitoh 	 */
   3030  1.3  msaitoh 	ixgbe_jcl_reinit(&adapter->jcl_head, rxr->ptag->dt_dmat,
   3031  1.3  msaitoh 	    2 * adapter->num_rx_desc, adapter->rx_mbuf_sz);
   3032  1.3  msaitoh 
   3033  1.3  msaitoh 	IXV_RX_LOCK(rxr);
   3034  1.3  msaitoh 
   3035  1.1   dyoung 	/* Configure header split? */
   3036  1.1   dyoung 	if (ixv_header_split)
   3037  1.1   dyoung 		rxr->hdr_split = TRUE;
   3038  1.1   dyoung 
   3039  1.1   dyoung 	/* Now replenish the mbufs */
   3040  1.1   dyoung 	for (int j = 0; j != adapter->num_rx_desc; ++j) {
   3041  1.1   dyoung 		struct mbuf	*mh, *mp;
   3042  1.1   dyoung 
   3043  1.1   dyoung 		rxbuf = &rxr->rx_buffers[j];
   3044  1.1   dyoung 		/*
   3045  1.1   dyoung 		** Dont allocate mbufs if not
   3046  1.1   dyoung 		** doing header split, its wasteful
   3047  1.1   dyoung 		*/
   3048  1.1   dyoung 		if (rxr->hdr_split == FALSE)
   3049  1.1   dyoung 			goto skip_head;
   3050  1.1   dyoung 
   3051  1.1   dyoung 		/* First the header */
   3052  1.3  msaitoh 		rxbuf->m_head = m_gethdr(M_DONTWAIT, MT_DATA);
   3053  1.1   dyoung 		if (rxbuf->m_head == NULL) {
   3054  1.1   dyoung 			error = ENOBUFS;
   3055  1.1   dyoung 			goto fail;
   3056  1.1   dyoung 		}
   3057  1.1   dyoung 		m_adj(rxbuf->m_head, ETHER_ALIGN);
   3058  1.1   dyoung 		mh = rxbuf->m_head;
   3059  1.1   dyoung 		mh->m_len = mh->m_pkthdr.len = MHLEN;
   3060  1.1   dyoung 		mh->m_flags |= M_PKTHDR;
   3061  1.1   dyoung 		/* Get the memory mapping */
   3062  1.3  msaitoh 		error = bus_dmamap_load_mbuf(rxr->htag->dt_dmat,
   3063  1.3  msaitoh 		    rxbuf->hmap, rxbuf->m_head, BUS_DMA_NOWAIT);
   3064  1.1   dyoung 		if (error != 0) /* Nothing elegant to do here */
   3065  1.1   dyoung 			goto fail;
   3066  1.3  msaitoh 		bus_dmamap_sync(rxr->htag->dt_dmat, rxbuf->hmap,
   3067  1.3  msaitoh 		    0, mh->m_pkthdr.len, BUS_DMASYNC_PREREAD);
   3068  1.1   dyoung 		/* Update descriptor */
   3069  1.3  msaitoh 		rxr->rx_base[j].read.hdr_addr =
   3070  1.3  msaitoh 		    htole64(rxbuf->hmap->dm_segs[0].ds_addr);
   3071  1.1   dyoung 
   3072  1.1   dyoung skip_head:
   3073  1.1   dyoung 		/* Now the payload cluster */
   3074  1.3  msaitoh 		rxbuf->m_pack = ixgbe_getjcl(&adapter->jcl_head, M_DONTWAIT,
   3075  1.3  msaitoh 		    MT_DATA, M_PKTHDR, adapter->rx_mbuf_sz);
   3076  1.1   dyoung 		if (rxbuf->m_pack == NULL) {
   3077  1.1   dyoung 			error = ENOBUFS;
   3078  1.1   dyoung                         goto fail;
   3079  1.1   dyoung 		}
   3080  1.1   dyoung 		mp = rxbuf->m_pack;
   3081  1.1   dyoung 		mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
   3082  1.1   dyoung 		/* Get the memory mapping */
   3083  1.3  msaitoh 		error = bus_dmamap_load_mbuf(rxr->ptag->dt_dmat,
   3084  1.3  msaitoh 		    rxbuf->pmap, mp, BUS_DMA_NOWAIT);
   3085  1.1   dyoung 		if (error != 0)
   3086  1.1   dyoung                         goto fail;
   3087  1.3  msaitoh 		bus_dmamap_sync(rxr->ptag->dt_dmat, rxbuf->pmap,
   3088  1.3  msaitoh 		    0, adapter->rx_mbuf_sz, BUS_DMASYNC_PREREAD);
   3089  1.1   dyoung 		/* Update descriptor */
   3090  1.3  msaitoh 		rxr->rx_base[j].read.pkt_addr =
   3091  1.3  msaitoh 		    htole64(rxbuf->pmap->dm_segs[0].ds_addr);
   3092  1.1   dyoung 	}
   3093  1.1   dyoung 
   3094  1.1   dyoung 
   3095  1.1   dyoung 	/* Setup our descriptor indices */
   3096  1.1   dyoung 	rxr->next_to_check = 0;
   3097  1.1   dyoung 	rxr->next_to_refresh = 0;
   3098  1.1   dyoung 	rxr->lro_enabled = FALSE;
   3099  1.3  msaitoh 	rxr->rx_split_packets.ev_count = 0;
   3100  1.3  msaitoh 	rxr->rx_bytes.ev_count = 0;
   3101  1.5  msaitoh 	rxr->discard = FALSE;
   3102  1.1   dyoung 
   3103  1.3  msaitoh 	ixgbe_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
   3104  1.1   dyoung 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   3105  1.1   dyoung 
   3106  1.3  msaitoh #ifdef LRO
   3107  1.1   dyoung 	/*
   3108  1.1   dyoung 	** Now set up the LRO interface:
   3109  1.1   dyoung 	*/
   3110  1.1   dyoung 	if (ifp->if_capenable & IFCAP_LRO) {
   3111  1.3  msaitoh 		device_t dev = adapter->dev;
   3112  1.1   dyoung 		int err = tcp_lro_init(lro);
   3113  1.1   dyoung 		if (err) {
   3114  1.1   dyoung 			device_printf(dev, "LRO Initialization failed!\n");
   3115  1.1   dyoung 			goto fail;
   3116  1.1   dyoung 		}
   3117  1.1   dyoung 		INIT_DEBUGOUT("RX Soft LRO Initialized\n");
   3118  1.1   dyoung 		rxr->lro_enabled = TRUE;
   3119  1.1   dyoung 		lro->ifp = adapter->ifp;
   3120  1.1   dyoung 	}
   3121  1.3  msaitoh #endif /* LRO */
   3122  1.1   dyoung 
   3123  1.1   dyoung 	IXV_RX_UNLOCK(rxr);
   3124  1.1   dyoung 	return (0);
   3125  1.1   dyoung 
   3126  1.1   dyoung fail:
   3127  1.1   dyoung 	ixv_free_receive_ring(rxr);
   3128  1.1   dyoung 	IXV_RX_UNLOCK(rxr);
   3129  1.1   dyoung 	return (error);
   3130  1.1   dyoung }
   3131  1.1   dyoung 
   3132  1.1   dyoung /*********************************************************************
   3133  1.1   dyoung  *
   3134  1.1   dyoung  *  Initialize all receive rings.
   3135  1.1   dyoung  *
   3136  1.1   dyoung  **********************************************************************/
   3137  1.1   dyoung static int
   3138  1.1   dyoung ixv_setup_receive_structures(struct adapter *adapter)
   3139  1.1   dyoung {
   3140  1.1   dyoung 	struct rx_ring *rxr = adapter->rx_rings;
   3141  1.1   dyoung 	int j;
   3142  1.1   dyoung 
   3143  1.1   dyoung 	for (j = 0; j < adapter->num_queues; j++, rxr++)
   3144  1.1   dyoung 		if (ixv_setup_receive_ring(rxr))
   3145  1.1   dyoung 			goto fail;
   3146  1.1   dyoung 
   3147  1.1   dyoung 	return (0);
   3148  1.1   dyoung fail:
   3149  1.1   dyoung 	/*
   3150  1.1   dyoung 	 * Free RX buffers allocated so far, we will only handle
   3151  1.1   dyoung 	 * the rings that completed, the failing case will have
   3152  1.1   dyoung 	 * cleaned up for itself. 'j' failed, so its the terminus.
   3153  1.1   dyoung 	 */
   3154  1.1   dyoung 	for (int i = 0; i < j; ++i) {
   3155  1.1   dyoung 		rxr = &adapter->rx_rings[i];
   3156  1.1   dyoung 		ixv_free_receive_ring(rxr);
   3157  1.1   dyoung 	}
   3158  1.1   dyoung 
   3159  1.1   dyoung 	return (ENOBUFS);
   3160  1.1   dyoung }
   3161  1.1   dyoung 
   3162  1.1   dyoung /*********************************************************************
   3163  1.1   dyoung  *
   3164  1.1   dyoung  *  Setup receive registers and features.
   3165  1.1   dyoung  *
   3166  1.1   dyoung  **********************************************************************/
   3167  1.1   dyoung #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
   3168  1.1   dyoung 
   3169  1.1   dyoung static void
   3170  1.1   dyoung ixv_initialize_receive_units(struct adapter *adapter)
   3171  1.1   dyoung {
   3172  1.3  msaitoh 	int i;
   3173  1.1   dyoung 	struct	rx_ring	*rxr = adapter->rx_rings;
   3174  1.1   dyoung 	struct ixgbe_hw	*hw = &adapter->hw;
   3175  1.1   dyoung 	struct ifnet   *ifp = adapter->ifp;
   3176  1.1   dyoung 	u32		bufsz, fctrl, rxcsum, hlreg;
   3177  1.1   dyoung 
   3178  1.1   dyoung 
   3179  1.1   dyoung 	/* Enable broadcasts */
   3180  1.1   dyoung 	fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
   3181  1.1   dyoung 	fctrl |= IXGBE_FCTRL_BAM;
   3182  1.1   dyoung 	fctrl |= IXGBE_FCTRL_DPF;
   3183  1.1   dyoung 	fctrl |= IXGBE_FCTRL_PMCF;
   3184  1.1   dyoung 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
   3185  1.1   dyoung 
   3186  1.1   dyoung 	/* Set for Jumbo Frames? */
   3187  1.1   dyoung 	hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
   3188  1.1   dyoung 	if (ifp->if_mtu > ETHERMTU) {
   3189  1.1   dyoung 		hlreg |= IXGBE_HLREG0_JUMBOEN;
   3190  1.1   dyoung 		bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
   3191  1.1   dyoung 	} else {
   3192  1.1   dyoung 		hlreg &= ~IXGBE_HLREG0_JUMBOEN;
   3193  1.1   dyoung 		bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
   3194  1.1   dyoung 	}
   3195  1.1   dyoung 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
   3196  1.1   dyoung 
   3197  1.3  msaitoh 	for (i = 0; i < adapter->num_queues; i++, rxr++) {
   3198  1.1   dyoung 		u64 rdba = rxr->rxdma.dma_paddr;
   3199  1.1   dyoung 		u32 reg, rxdctl;
   3200  1.1   dyoung 
   3201  1.1   dyoung 		/* Do the queue enabling first */
   3202  1.1   dyoung 		rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
   3203  1.1   dyoung 		rxdctl |= IXGBE_RXDCTL_ENABLE;
   3204  1.1   dyoung 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
   3205  1.1   dyoung 		for (int k = 0; k < 10; k++) {
   3206  1.1   dyoung 			if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
   3207  1.1   dyoung 			    IXGBE_RXDCTL_ENABLE)
   3208  1.1   dyoung 				break;
   3209  1.1   dyoung 			else
   3210  1.1   dyoung 				msec_delay(1);
   3211  1.1   dyoung 		}
   3212  1.1   dyoung 		wmb();
   3213  1.1   dyoung 
   3214  1.1   dyoung 		/* Setup the Base and Length of the Rx Descriptor Ring */
   3215  1.1   dyoung 		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
   3216  1.1   dyoung 		    (rdba & 0x00000000ffffffffULL));
   3217  1.1   dyoung 		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i),
   3218  1.1   dyoung 		    (rdba >> 32));
   3219  1.1   dyoung 		IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
   3220  1.1   dyoung 		    adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
   3221  1.1   dyoung 
   3222  1.1   dyoung 		/* Set up the SRRCTL register */
   3223  1.1   dyoung 		reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
   3224  1.1   dyoung 		reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
   3225  1.1   dyoung 		reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
   3226  1.1   dyoung 		reg |= bufsz;
   3227  1.1   dyoung 		if (rxr->hdr_split) {
   3228  1.1   dyoung 			/* Use a standard mbuf for the header */
   3229  1.1   dyoung 			reg |= ((IXV_RX_HDR <<
   3230  1.1   dyoung 			    IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT)
   3231  1.1   dyoung 			    & IXGBE_SRRCTL_BSIZEHDR_MASK);
   3232  1.1   dyoung 			reg |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
   3233  1.1   dyoung 		} else
   3234  1.1   dyoung 			reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
   3235  1.1   dyoung 		IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), reg);
   3236  1.1   dyoung 
   3237  1.1   dyoung 		/* Setup the HW Rx Head and Tail Descriptor Pointers */
   3238  1.1   dyoung 		IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
   3239  1.1   dyoung 		IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
   3240  1.1   dyoung 		    adapter->num_rx_desc - 1);
   3241  1.1   dyoung 	}
   3242  1.1   dyoung 
   3243  1.1   dyoung 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
   3244  1.1   dyoung 
   3245  1.1   dyoung 	if (ifp->if_capenable & IFCAP_RXCSUM)
   3246  1.1   dyoung 		rxcsum |= IXGBE_RXCSUM_PCSD;
   3247  1.1   dyoung 
   3248  1.1   dyoung 	if (!(rxcsum & IXGBE_RXCSUM_PCSD))
   3249  1.1   dyoung 		rxcsum |= IXGBE_RXCSUM_IPPCSE;
   3250  1.1   dyoung 
   3251  1.1   dyoung 	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
   3252  1.1   dyoung 
   3253  1.1   dyoung 	return;
   3254  1.1   dyoung }
   3255  1.1   dyoung 
   3256  1.1   dyoung /*********************************************************************
   3257  1.1   dyoung  *
   3258  1.1   dyoung  *  Free all receive rings.
   3259  1.1   dyoung  *
   3260  1.1   dyoung  **********************************************************************/
   3261  1.1   dyoung static void
   3262  1.1   dyoung ixv_free_receive_structures(struct adapter *adapter)
   3263  1.1   dyoung {
   3264  1.1   dyoung 	struct rx_ring *rxr = adapter->rx_rings;
   3265  1.1   dyoung 
   3266  1.1   dyoung 	for (int i = 0; i < adapter->num_queues; i++, rxr++) {
   3267  1.3  msaitoh #ifdef LRO
   3268  1.1   dyoung 		struct lro_ctrl		*lro = &rxr->lro;
   3269  1.3  msaitoh #endif /* LRO */
   3270  1.1   dyoung 		ixv_free_receive_buffers(rxr);
   3271  1.3  msaitoh #ifdef LRO
   3272  1.1   dyoung 		/* Free LRO memory */
   3273  1.1   dyoung 		tcp_lro_free(lro);
   3274  1.3  msaitoh #endif /* LRO */
   3275  1.1   dyoung 		/* Free the ring memory as well */
   3276  1.1   dyoung 		ixv_dma_free(adapter, &rxr->rxdma);
   3277  1.3  msaitoh 		IXV_RX_LOCK_DESTROY(rxr);
   3278  1.1   dyoung 	}
   3279  1.1   dyoung 
   3280  1.1   dyoung 	free(adapter->rx_rings, M_DEVBUF);
   3281  1.1   dyoung }
   3282  1.1   dyoung 
   3283  1.1   dyoung 
   3284  1.1   dyoung /*********************************************************************
   3285  1.1   dyoung  *
   3286  1.1   dyoung  *  Free receive ring data structures
   3287  1.1   dyoung  *
   3288  1.1   dyoung  **********************************************************************/
   3289  1.1   dyoung static void
   3290  1.1   dyoung ixv_free_receive_buffers(struct rx_ring *rxr)
   3291  1.1   dyoung {
   3292  1.1   dyoung 	struct adapter		*adapter = rxr->adapter;
   3293  1.1   dyoung 	struct ixv_rx_buf	*rxbuf;
   3294  1.1   dyoung 
   3295  1.1   dyoung 	INIT_DEBUGOUT("free_receive_structures: begin");
   3296  1.1   dyoung 
   3297  1.1   dyoung 	/* Cleanup any existing buffers */
   3298  1.1   dyoung 	if (rxr->rx_buffers != NULL) {
   3299  1.1   dyoung 		for (int i = 0; i < adapter->num_rx_desc; i++) {
   3300  1.1   dyoung 			rxbuf = &rxr->rx_buffers[i];
   3301  1.1   dyoung 			if (rxbuf->m_head != NULL) {
   3302  1.3  msaitoh 				ixgbe_dmamap_sync(rxr->htag, rxbuf->hmap,
   3303  1.1   dyoung 				    BUS_DMASYNC_POSTREAD);
   3304  1.3  msaitoh 				ixgbe_dmamap_unload(rxr->htag, rxbuf->hmap);
   3305  1.1   dyoung 				rxbuf->m_head->m_flags |= M_PKTHDR;
   3306  1.1   dyoung 				m_freem(rxbuf->m_head);
   3307  1.1   dyoung 			}
   3308  1.1   dyoung 			if (rxbuf->m_pack != NULL) {
   3309  1.3  msaitoh 				/* XXX not ixgbe_* ? */
   3310  1.3  msaitoh 				bus_dmamap_sync(rxr->ptag->dt_dmat, rxbuf->pmap,
   3311  1.3  msaitoh 				    0, rxbuf->m_pack->m_pkthdr.len,
   3312  1.1   dyoung 				    BUS_DMASYNC_POSTREAD);
   3313  1.3  msaitoh 				ixgbe_dmamap_unload(rxr->ptag, rxbuf->pmap);
   3314  1.1   dyoung 				rxbuf->m_pack->m_flags |= M_PKTHDR;
   3315  1.1   dyoung 				m_freem(rxbuf->m_pack);
   3316  1.1   dyoung 			}
   3317  1.1   dyoung 			rxbuf->m_head = NULL;
   3318  1.1   dyoung 			rxbuf->m_pack = NULL;
   3319  1.1   dyoung 			if (rxbuf->hmap != NULL) {
   3320  1.3  msaitoh 				ixgbe_dmamap_destroy(rxr->htag, rxbuf->hmap);
   3321  1.1   dyoung 				rxbuf->hmap = NULL;
   3322  1.1   dyoung 			}
   3323  1.1   dyoung 			if (rxbuf->pmap != NULL) {
   3324  1.3  msaitoh 				ixgbe_dmamap_destroy(rxr->ptag, rxbuf->pmap);
   3325  1.1   dyoung 				rxbuf->pmap = NULL;
   3326  1.1   dyoung 			}
   3327  1.1   dyoung 		}
   3328  1.1   dyoung 		if (rxr->rx_buffers != NULL) {
   3329  1.1   dyoung 			free(rxr->rx_buffers, M_DEVBUF);
   3330  1.1   dyoung 			rxr->rx_buffers = NULL;
   3331  1.1   dyoung 		}
   3332  1.1   dyoung 	}
   3333  1.1   dyoung 
   3334  1.1   dyoung 	if (rxr->htag != NULL) {
   3335  1.3  msaitoh 		ixgbe_dma_tag_destroy(rxr->htag);
   3336  1.1   dyoung 		rxr->htag = NULL;
   3337  1.1   dyoung 	}
   3338  1.1   dyoung 	if (rxr->ptag != NULL) {
   3339  1.3  msaitoh 		ixgbe_dma_tag_destroy(rxr->ptag);
   3340  1.1   dyoung 		rxr->ptag = NULL;
   3341  1.1   dyoung 	}
   3342  1.1   dyoung 
   3343  1.1   dyoung 	return;
   3344  1.1   dyoung }
   3345  1.1   dyoung 
   3346  1.1   dyoung static __inline void
   3347  1.1   dyoung ixv_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m, u32 ptype)
   3348  1.1   dyoung {
   3349  1.3  msaitoh 	int s;
   3350  1.1   dyoung 
   3351  1.3  msaitoh #ifdef LRO
   3352  1.3  msaitoh 	struct adapter	*adapter = ifp->if_softc;
   3353  1.3  msaitoh 	struct ethercom *ec = &adapter->osdep.ec;
   3354  1.3  msaitoh 
   3355  1.1   dyoung         /*
   3356  1.1   dyoung          * ATM LRO is only for IPv4/TCP packets and TCP checksum of the packet
   3357  1.1   dyoung          * should be computed by hardware. Also it should not have VLAN tag in
   3358  1.1   dyoung          * ethernet header.
   3359  1.1   dyoung          */
   3360  1.1   dyoung         if (rxr->lro_enabled &&
   3361  1.3  msaitoh             (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING) != 0 &&
   3362  1.1   dyoung             (ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
   3363  1.1   dyoung             (ptype & (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP)) ==
   3364  1.1   dyoung             (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP) &&
   3365  1.1   dyoung             (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) ==
   3366  1.1   dyoung             (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) {
   3367  1.1   dyoung                 /*
   3368  1.1   dyoung                  * Send to the stack if:
   3369  1.1   dyoung                  **  - LRO not enabled, or
   3370  1.1   dyoung                  **  - no LRO resources, or
   3371  1.1   dyoung                  **  - lro enqueue fails
   3372  1.1   dyoung                  */
   3373  1.1   dyoung                 if (rxr->lro.lro_cnt != 0)
   3374  1.1   dyoung                         if (tcp_lro_rx(&rxr->lro, m, 0) == 0)
   3375  1.1   dyoung                                 return;
   3376  1.1   dyoung         }
   3377  1.3  msaitoh #endif /* LRO */
   3378  1.3  msaitoh 
   3379  1.3  msaitoh 	IXV_RX_UNLOCK(rxr);
   3380  1.3  msaitoh 
   3381  1.3  msaitoh 	s = splnet();
   3382  1.3  msaitoh 	/* Pass this up to any BPF listeners. */
   3383  1.3  msaitoh 	bpf_mtap(ifp, m);
   3384  1.1   dyoung         (*ifp->if_input)(ifp, m);
   3385  1.3  msaitoh 	splx(s);
   3386  1.3  msaitoh 
   3387  1.3  msaitoh 	IXV_RX_LOCK(rxr);
   3388  1.1   dyoung }
   3389  1.1   dyoung 
   3390  1.1   dyoung static __inline void
   3391  1.1   dyoung ixv_rx_discard(struct rx_ring *rxr, int i)
   3392  1.1   dyoung {
   3393  1.1   dyoung 	struct ixv_rx_buf	*rbuf;
   3394  1.1   dyoung 
   3395  1.1   dyoung 	rbuf = &rxr->rx_buffers[i];
   3396  1.5  msaitoh 	if (rbuf->fmp != NULL) {/* Partial chain ? */
   3397  1.5  msaitoh 		rbuf->fmp->m_flags |= M_PKTHDR;
   3398  1.5  msaitoh 		m_freem(rbuf->fmp);
   3399  1.5  msaitoh 		rbuf->fmp = NULL;
   3400  1.5  msaitoh 	}
   3401  1.1   dyoung 
   3402  1.5  msaitoh 	/*
   3403  1.5  msaitoh 	** With advanced descriptors the writeback
   3404  1.5  msaitoh 	** clobbers the buffer addrs, so its easier
   3405  1.5  msaitoh 	** to just free the existing mbufs and take
   3406  1.5  msaitoh 	** the normal refresh path to get new buffers
   3407  1.5  msaitoh 	** and mapping.
   3408  1.5  msaitoh 	*/
   3409  1.5  msaitoh 	if (rbuf->m_head) {
   3410  1.5  msaitoh 		m_free(rbuf->m_head);
   3411  1.5  msaitoh 		rbuf->m_head = NULL;
   3412  1.5  msaitoh 	}
   3413  1.5  msaitoh 
   3414  1.5  msaitoh 	if (rbuf->m_pack) {
   3415  1.5  msaitoh 		m_free(rbuf->m_pack);
   3416  1.5  msaitoh 		rbuf->m_pack = NULL;
   3417  1.5  msaitoh 	}
   3418  1.1   dyoung 
   3419  1.1   dyoung 	return;
   3420  1.1   dyoung }
   3421  1.1   dyoung 
   3422  1.1   dyoung 
   3423  1.1   dyoung /*********************************************************************
   3424  1.1   dyoung  *
   3425  1.1   dyoung  *  This routine executes in interrupt context. It replenishes
   3426  1.1   dyoung  *  the mbufs in the descriptor and sends data which has been
   3427  1.1   dyoung  *  dma'ed into host memory to upper layer.
   3428  1.1   dyoung  *
   3429  1.1   dyoung  *  We loop at most count times if count is > 0, or until done if
   3430  1.1   dyoung  *  count < 0.
   3431  1.1   dyoung  *
   3432  1.1   dyoung  *  Return TRUE for more work, FALSE for all clean.
   3433  1.1   dyoung  *********************************************************************/
   3434  1.1   dyoung static bool
   3435  1.1   dyoung ixv_rxeof(struct ix_queue *que, int count)
   3436  1.1   dyoung {
   3437  1.1   dyoung 	struct adapter		*adapter = que->adapter;
   3438  1.1   dyoung 	struct rx_ring		*rxr = que->rxr;
   3439  1.1   dyoung 	struct ifnet		*ifp = adapter->ifp;
   3440  1.3  msaitoh #ifdef LRO
   3441  1.1   dyoung 	struct lro_ctrl		*lro = &rxr->lro;
   3442  1.1   dyoung 	struct lro_entry	*queued;
   3443  1.3  msaitoh #endif /* LRO */
   3444  1.1   dyoung 	int			i, nextp, processed = 0;
   3445  1.1   dyoung 	u32			staterr = 0;
   3446  1.1   dyoung 	union ixgbe_adv_rx_desc	*cur;
   3447  1.1   dyoung 	struct ixv_rx_buf	*rbuf, *nbuf;
   3448  1.1   dyoung 
   3449  1.1   dyoung 	IXV_RX_LOCK(rxr);
   3450  1.1   dyoung 
   3451  1.1   dyoung 	for (i = rxr->next_to_check; count != 0;) {
   3452  1.1   dyoung 		struct mbuf	*sendmp, *mh, *mp;
   3453  1.3  msaitoh 		u32		ptype;
   3454  1.1   dyoung 		u16		hlen, plen, hdr, vtag;
   3455  1.1   dyoung 		bool		eop;
   3456  1.1   dyoung 
   3457  1.1   dyoung 		/* Sync the ring. */
   3458  1.3  msaitoh 		ixgbe_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
   3459  1.1   dyoung 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   3460  1.1   dyoung 
   3461  1.1   dyoung 		cur = &rxr->rx_base[i];
   3462  1.1   dyoung 		staterr = le32toh(cur->wb.upper.status_error);
   3463  1.1   dyoung 
   3464  1.1   dyoung 		if ((staterr & IXGBE_RXD_STAT_DD) == 0)
   3465  1.1   dyoung 			break;
   3466  1.3  msaitoh 		if ((ifp->if_flags & IFF_RUNNING) == 0)
   3467  1.1   dyoung 			break;
   3468  1.1   dyoung 
   3469  1.1   dyoung 		count--;
   3470  1.1   dyoung 		sendmp = NULL;
   3471  1.1   dyoung 		nbuf = NULL;
   3472  1.1   dyoung 		cur->wb.upper.status_error = 0;
   3473  1.1   dyoung 		rbuf = &rxr->rx_buffers[i];
   3474  1.1   dyoung 		mh = rbuf->m_head;
   3475  1.1   dyoung 		mp = rbuf->m_pack;
   3476  1.1   dyoung 
   3477  1.1   dyoung 		plen = le16toh(cur->wb.upper.length);
   3478  1.1   dyoung 		ptype = le32toh(cur->wb.lower.lo_dword.data) &
   3479  1.1   dyoung 		    IXGBE_RXDADV_PKTTYPE_MASK;
   3480  1.1   dyoung 		hdr = le16toh(cur->wb.lower.lo_dword.hs_rss.hdr_info);
   3481  1.1   dyoung 		vtag = le16toh(cur->wb.upper.vlan);
   3482  1.1   dyoung 		eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0);
   3483  1.1   dyoung 
   3484  1.1   dyoung 		/* Make sure all parts of a bad packet are discarded */
   3485  1.1   dyoung 		if (((staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) != 0) ||
   3486  1.1   dyoung 		    (rxr->discard)) {
   3487  1.1   dyoung 			ifp->if_ierrors++;
   3488  1.3  msaitoh 			rxr->rx_discarded.ev_count++;
   3489  1.1   dyoung 			if (!eop)
   3490  1.1   dyoung 				rxr->discard = TRUE;
   3491  1.1   dyoung 			else
   3492  1.1   dyoung 				rxr->discard = FALSE;
   3493  1.1   dyoung 			ixv_rx_discard(rxr, i);
   3494  1.1   dyoung 			goto next_desc;
   3495  1.1   dyoung 		}
   3496  1.1   dyoung 
   3497  1.1   dyoung 		if (!eop) {
   3498  1.1   dyoung 			nextp = i + 1;
   3499  1.1   dyoung 			if (nextp == adapter->num_rx_desc)
   3500  1.1   dyoung 				nextp = 0;
   3501  1.1   dyoung 			nbuf = &rxr->rx_buffers[nextp];
   3502  1.1   dyoung 			prefetch(nbuf);
   3503  1.1   dyoung 		}
   3504  1.1   dyoung 		/*
   3505  1.1   dyoung 		** The header mbuf is ONLY used when header
   3506  1.1   dyoung 		** split is enabled, otherwise we get normal
   3507  1.1   dyoung 		** behavior, ie, both header and payload
   3508  1.1   dyoung 		** are DMA'd into the payload buffer.
   3509  1.1   dyoung 		**
   3510  1.1   dyoung 		** Rather than using the fmp/lmp global pointers
   3511  1.1   dyoung 		** we now keep the head of a packet chain in the
   3512  1.1   dyoung 		** buffer struct and pass this along from one
   3513  1.1   dyoung 		** descriptor to the next, until we get EOP.
   3514  1.1   dyoung 		*/
   3515  1.1   dyoung 		if (rxr->hdr_split && (rbuf->fmp == NULL)) {
   3516  1.1   dyoung 			/* This must be an initial descriptor */
   3517  1.1   dyoung 			hlen = (hdr & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
   3518  1.1   dyoung 			    IXGBE_RXDADV_HDRBUFLEN_SHIFT;
   3519  1.1   dyoung 			if (hlen > IXV_RX_HDR)
   3520  1.1   dyoung 				hlen = IXV_RX_HDR;
   3521  1.1   dyoung 			mh->m_len = hlen;
   3522  1.1   dyoung 			mh->m_flags |= M_PKTHDR;
   3523  1.1   dyoung 			mh->m_next = NULL;
   3524  1.1   dyoung 			mh->m_pkthdr.len = mh->m_len;
   3525  1.1   dyoung 			/* Null buf pointer so it is refreshed */
   3526  1.1   dyoung 			rbuf->m_head = NULL;
   3527  1.1   dyoung 			/*
   3528  1.1   dyoung 			** Check the payload length, this
   3529  1.1   dyoung 			** could be zero if its a small
   3530  1.1   dyoung 			** packet.
   3531  1.1   dyoung 			*/
   3532  1.1   dyoung 			if (plen > 0) {
   3533  1.1   dyoung 				mp->m_len = plen;
   3534  1.1   dyoung 				mp->m_next = NULL;
   3535  1.1   dyoung 				mp->m_flags &= ~M_PKTHDR;
   3536  1.1   dyoung 				mh->m_next = mp;
   3537  1.1   dyoung 				mh->m_pkthdr.len += mp->m_len;
   3538  1.1   dyoung 				/* Null buf pointer so it is refreshed */
   3539  1.1   dyoung 				rbuf->m_pack = NULL;
   3540  1.3  msaitoh 				rxr->rx_split_packets.ev_count++;
   3541  1.1   dyoung 			}
   3542  1.1   dyoung 			/*
   3543  1.1   dyoung 			** Now create the forward
   3544  1.1   dyoung 			** chain so when complete
   3545  1.1   dyoung 			** we wont have to.
   3546  1.1   dyoung 			*/
   3547  1.1   dyoung                         if (eop == 0) {
   3548  1.1   dyoung 				/* stash the chain head */
   3549  1.1   dyoung                                 nbuf->fmp = mh;
   3550  1.1   dyoung 				/* Make forward chain */
   3551  1.1   dyoung                                 if (plen)
   3552  1.1   dyoung                                         mp->m_next = nbuf->m_pack;
   3553  1.1   dyoung                                 else
   3554  1.1   dyoung                                         mh->m_next = nbuf->m_pack;
   3555  1.1   dyoung                         } else {
   3556  1.1   dyoung 				/* Singlet, prepare to send */
   3557  1.1   dyoung                                 sendmp = mh;
   3558  1.3  msaitoh                                 if (VLAN_ATTACHED(&adapter->osdep.ec) &&
   3559  1.3  msaitoh 				  (staterr & IXGBE_RXD_STAT_VP)) {
   3560  1.3  msaitoh 					VLAN_INPUT_TAG(ifp, sendmp, vtag,
   3561  1.3  msaitoh 					    printf("%s: could not apply VLAN "
   3562  1.3  msaitoh 					        "tag", __func__));
   3563  1.1   dyoung                                 }
   3564  1.1   dyoung                         }
   3565  1.1   dyoung 		} else {
   3566  1.1   dyoung 			/*
   3567  1.1   dyoung 			** Either no header split, or a
   3568  1.1   dyoung 			** secondary piece of a fragmented
   3569  1.1   dyoung 			** split packet.
   3570  1.1   dyoung 			*/
   3571  1.1   dyoung 			mp->m_len = plen;
   3572  1.1   dyoung 			/*
   3573  1.1   dyoung 			** See if there is a stored head
   3574  1.1   dyoung 			** that determines what we are
   3575  1.1   dyoung 			*/
   3576  1.1   dyoung 			sendmp = rbuf->fmp;
   3577  1.1   dyoung 			rbuf->m_pack = rbuf->fmp = NULL;
   3578  1.1   dyoung 
   3579  1.1   dyoung 			if (sendmp != NULL) /* secondary frag */
   3580  1.1   dyoung 				sendmp->m_pkthdr.len += mp->m_len;
   3581  1.1   dyoung 			else {
   3582  1.1   dyoung 				/* first desc of a non-ps chain */
   3583  1.1   dyoung 				sendmp = mp;
   3584  1.1   dyoung 				sendmp->m_flags |= M_PKTHDR;
   3585  1.1   dyoung 				sendmp->m_pkthdr.len = mp->m_len;
   3586  1.1   dyoung 				if (staterr & IXGBE_RXD_STAT_VP) {
   3587  1.3  msaitoh 					/* XXX Do something reasonable on
   3588  1.3  msaitoh 					 * error.
   3589  1.3  msaitoh 					 */
   3590  1.3  msaitoh 					VLAN_INPUT_TAG(ifp, sendmp, vtag,
   3591  1.3  msaitoh 					    printf("%s: could not apply VLAN "
   3592  1.3  msaitoh 					        "tag", __func__));
   3593  1.1   dyoung 				}
   3594  1.1   dyoung                         }
   3595  1.1   dyoung 			/* Pass the head pointer on */
   3596  1.1   dyoung 			if (eop == 0) {
   3597  1.1   dyoung 				nbuf->fmp = sendmp;
   3598  1.1   dyoung 				sendmp = NULL;
   3599  1.1   dyoung 				mp->m_next = nbuf->m_pack;
   3600  1.1   dyoung 			}
   3601  1.1   dyoung 		}
   3602  1.1   dyoung 		++processed;
   3603  1.1   dyoung 		/* Sending this frame? */
   3604  1.1   dyoung 		if (eop) {
   3605  1.1   dyoung 			sendmp->m_pkthdr.rcvif = ifp;
   3606  1.1   dyoung 			ifp->if_ipackets++;
   3607  1.3  msaitoh 			rxr->rx_packets.ev_count++;
   3608  1.1   dyoung 			/* capture data for AIM */
   3609  1.1   dyoung 			rxr->bytes += sendmp->m_pkthdr.len;
   3610  1.3  msaitoh 			rxr->rx_bytes.ev_count += sendmp->m_pkthdr.len;
   3611  1.3  msaitoh 			if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
   3612  1.3  msaitoh 				ixv_rx_checksum(staterr, sendmp, ptype,
   3613  1.3  msaitoh 				   &adapter->stats);
   3614  1.3  msaitoh 			}
   3615  1.1   dyoung #if __FreeBSD_version >= 800000
   3616  1.1   dyoung 			sendmp->m_pkthdr.flowid = que->msix;
   3617  1.1   dyoung 			sendmp->m_flags |= M_FLOWID;
   3618  1.1   dyoung #endif
   3619  1.1   dyoung 		}
   3620  1.1   dyoung next_desc:
   3621  1.3  msaitoh 		ixgbe_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
   3622  1.1   dyoung 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   3623  1.1   dyoung 
   3624  1.1   dyoung 		/* Advance our pointers to the next descriptor. */
   3625  1.1   dyoung 		if (++i == adapter->num_rx_desc)
   3626  1.1   dyoung 			i = 0;
   3627  1.1   dyoung 
   3628  1.1   dyoung 		/* Now send to the stack or do LRO */
   3629  1.1   dyoung 		if (sendmp != NULL)
   3630  1.1   dyoung 			ixv_rx_input(rxr, ifp, sendmp, ptype);
   3631  1.1   dyoung 
   3632  1.1   dyoung                /* Every 8 descriptors we go to refresh mbufs */
   3633  1.1   dyoung 		if (processed == 8) {
   3634  1.1   dyoung 			ixv_refresh_mbufs(rxr, i);
   3635  1.1   dyoung 			processed = 0;
   3636  1.1   dyoung 		}
   3637  1.1   dyoung 	}
   3638  1.1   dyoung 
   3639  1.1   dyoung 	/* Refresh any remaining buf structs */
   3640  1.5  msaitoh 	if (ixv_rx_unrefreshed(rxr))
   3641  1.1   dyoung 		ixv_refresh_mbufs(rxr, i);
   3642  1.1   dyoung 
   3643  1.1   dyoung 	rxr->next_to_check = i;
   3644  1.1   dyoung 
   3645  1.3  msaitoh #ifdef LRO
   3646  1.1   dyoung 	/*
   3647  1.1   dyoung 	 * Flush any outstanding LRO work
   3648  1.1   dyoung 	 */
   3649  1.1   dyoung 	while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
   3650  1.1   dyoung 		SLIST_REMOVE_HEAD(&lro->lro_active, next);
   3651  1.1   dyoung 		tcp_lro_flush(lro, queued);
   3652  1.1   dyoung 	}
   3653  1.3  msaitoh #endif /* LRO */
   3654  1.1   dyoung 
   3655  1.1   dyoung 	IXV_RX_UNLOCK(rxr);
   3656  1.1   dyoung 
   3657  1.1   dyoung 	/*
   3658  1.1   dyoung 	** We still have cleaning to do?
   3659  1.1   dyoung 	** Schedule another interrupt if so.
   3660  1.1   dyoung 	*/
   3661  1.1   dyoung 	if ((staterr & IXGBE_RXD_STAT_DD) != 0) {
   3662  1.3  msaitoh 		ixv_rearm_queues(adapter, (u64)(1ULL << que->msix));
   3663  1.3  msaitoh 		return true;
   3664  1.1   dyoung 	}
   3665  1.1   dyoung 
   3666  1.3  msaitoh 	return false;
   3667  1.1   dyoung }
   3668  1.1   dyoung 
   3669  1.1   dyoung 
   3670  1.1   dyoung /*********************************************************************
   3671  1.1   dyoung  *
   3672  1.1   dyoung  *  Verify that the hardware indicated that the checksum is valid.
   3673  1.1   dyoung  *  Inform the stack about the status of checksum so that stack
   3674  1.1   dyoung  *  doesn't spend time verifying the checksum.
   3675  1.1   dyoung  *
   3676  1.1   dyoung  *********************************************************************/
   3677  1.1   dyoung static void
   3678  1.3  msaitoh ixv_rx_checksum(u32 staterr, struct mbuf * mp, u32 ptype,
   3679  1.3  msaitoh     struct ixgbevf_hw_stats *stats)
   3680  1.1   dyoung {
   3681  1.1   dyoung 	u16	status = (u16) staterr;
   3682  1.1   dyoung 	u8	errors = (u8) (staterr >> 24);
   3683  1.3  msaitoh #if 0
   3684  1.1   dyoung 	bool	sctp = FALSE;
   3685  1.1   dyoung 	if ((ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
   3686  1.1   dyoung 	    (ptype & IXGBE_RXDADV_PKTTYPE_SCTP) != 0)
   3687  1.1   dyoung 		sctp = TRUE;
   3688  1.3  msaitoh #endif
   3689  1.1   dyoung 	if (status & IXGBE_RXD_STAT_IPCS) {
   3690  1.3  msaitoh 		stats->ipcs.ev_count++;
   3691  1.1   dyoung 		if (!(errors & IXGBE_RXD_ERR_IPE)) {
   3692  1.1   dyoung 			/* IP Checksum Good */
   3693  1.3  msaitoh 			mp->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   3694  1.1   dyoung 
   3695  1.3  msaitoh 		} else {
   3696  1.3  msaitoh 			stats->ipcs_bad.ev_count++;
   3697  1.3  msaitoh 			mp->m_pkthdr.csum_flags = M_CSUM_IPv4|M_CSUM_IPv4_BAD;
   3698  1.3  msaitoh 		}
   3699  1.1   dyoung 	}
   3700  1.1   dyoung 	if (status & IXGBE_RXD_STAT_L4CS) {
   3701  1.3  msaitoh 		stats->l4cs.ev_count++;
   3702  1.3  msaitoh 		u16 type = M_CSUM_TCPv4|M_CSUM_TCPv6|M_CSUM_UDPv4|M_CSUM_UDPv6;
   3703  1.1   dyoung 		if (!(errors & IXGBE_RXD_ERR_TCPE)) {
   3704  1.1   dyoung 			mp->m_pkthdr.csum_flags |= type;
   3705  1.3  msaitoh 		} else {
   3706  1.3  msaitoh 			stats->l4cs_bad.ev_count++;
   3707  1.3  msaitoh 			mp->m_pkthdr.csum_flags |= type | M_CSUM_TCP_UDP_BAD;
   3708  1.1   dyoung 		}
   3709  1.1   dyoung 	}
   3710  1.1   dyoung 	return;
   3711  1.1   dyoung }
   3712  1.1   dyoung 
   3713  1.1   dyoung static void
   3714  1.1   dyoung ixv_setup_vlan_support(struct adapter *adapter)
   3715  1.1   dyoung {
   3716  1.1   dyoung 	struct ixgbe_hw *hw = &adapter->hw;
   3717  1.1   dyoung 	u32		ctrl, vid, vfta, retry;
   3718  1.1   dyoung 
   3719  1.1   dyoung 
   3720  1.1   dyoung 	/*
   3721  1.1   dyoung 	** We get here thru init_locked, meaning
   3722  1.1   dyoung 	** a soft reset, this has already cleared
   3723  1.1   dyoung 	** the VFTA and other state, so if there
   3724  1.1   dyoung 	** have been no vlan's registered do nothing.
   3725  1.1   dyoung 	*/
   3726  1.1   dyoung 	if (adapter->num_vlans == 0)
   3727  1.1   dyoung 		return;
   3728  1.1   dyoung 
   3729  1.1   dyoung 	/* Enable the queues */
   3730  1.1   dyoung 	for (int i = 0; i < adapter->num_queues; i++) {
   3731  1.1   dyoung 		ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
   3732  1.1   dyoung 		ctrl |= IXGBE_RXDCTL_VME;
   3733  1.1   dyoung 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl);
   3734  1.1   dyoung 	}
   3735  1.1   dyoung 
   3736  1.1   dyoung 	/*
   3737  1.1   dyoung 	** A soft reset zero's out the VFTA, so
   3738  1.1   dyoung 	** we need to repopulate it now.
   3739  1.1   dyoung 	*/
   3740  1.1   dyoung 	for (int i = 0; i < VFTA_SIZE; i++) {
   3741  1.1   dyoung 		if (ixv_shadow_vfta[i] == 0)
   3742  1.1   dyoung 			continue;
   3743  1.1   dyoung 		vfta = ixv_shadow_vfta[i];
   3744  1.1   dyoung 		/*
   3745  1.1   dyoung 		** Reconstruct the vlan id's
   3746  1.1   dyoung 		** based on the bits set in each
   3747  1.1   dyoung 		** of the array ints.
   3748  1.1   dyoung 		*/
   3749  1.1   dyoung 		for ( int j = 0; j < 32; j++) {
   3750  1.1   dyoung 			retry = 0;
   3751  1.1   dyoung 			if ((vfta & (1 << j)) == 0)
   3752  1.1   dyoung 				continue;
   3753  1.1   dyoung 			vid = (i * 32) + j;
   3754  1.1   dyoung 			/* Call the shared code mailbox routine */
   3755  1.1   dyoung 			while (ixgbe_set_vfta(hw, vid, 0, TRUE)) {
   3756  1.1   dyoung 				if (++retry > 5)
   3757  1.1   dyoung 					break;
   3758  1.1   dyoung 			}
   3759  1.1   dyoung 		}
   3760  1.1   dyoung 	}
   3761  1.1   dyoung }
   3762  1.1   dyoung 
   3763  1.3  msaitoh #if 0	/* XXX Badly need to overhaul vlan(4) on NetBSD. */
   3764  1.1   dyoung /*
   3765  1.1   dyoung ** This routine is run via an vlan config EVENT,
   3766  1.1   dyoung ** it enables us to use the HW Filter table since
   3767  1.1   dyoung ** we can get the vlan id. This just creates the
   3768  1.1   dyoung ** entry in the soft version of the VFTA, init will
   3769  1.1   dyoung ** repopulate the real table.
   3770  1.1   dyoung */
   3771  1.1   dyoung static void
   3772  1.1   dyoung ixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   3773  1.1   dyoung {
   3774  1.1   dyoung 	struct adapter	*adapter = ifp->if_softc;
   3775  1.1   dyoung 	u16		index, bit;
   3776  1.1   dyoung 
   3777  1.1   dyoung 	if (ifp->if_softc !=  arg)   /* Not our event */
   3778  1.1   dyoung 		return;
   3779  1.1   dyoung 
   3780  1.1   dyoung 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
   3781  1.1   dyoung 		return;
   3782  1.1   dyoung 
   3783  1.5  msaitoh 	IXV_CORE_LOCK(adapter);
   3784  1.1   dyoung 	index = (vtag >> 5) & 0x7F;
   3785  1.1   dyoung 	bit = vtag & 0x1F;
   3786  1.1   dyoung 	ixv_shadow_vfta[index] |= (1 << bit);
   3787  1.1   dyoung 	/* Re-init to load the changes */
   3788  1.5  msaitoh 	ixv_init_locked(adapter);
   3789  1.5  msaitoh 	IXV_CORE_UNLOCK(adapter);
   3790  1.1   dyoung }
   3791  1.1   dyoung 
   3792  1.1   dyoung /*
   3793  1.1   dyoung ** This routine is run via an vlan
   3794  1.1   dyoung ** unconfig EVENT, remove our entry
   3795  1.1   dyoung ** in the soft vfta.
   3796  1.1   dyoung */
   3797  1.1   dyoung static void
   3798  1.1   dyoung ixv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   3799  1.1   dyoung {
   3800  1.1   dyoung 	struct adapter	*adapter = ifp->if_softc;
   3801  1.1   dyoung 	u16		index, bit;
   3802  1.1   dyoung 
   3803  1.1   dyoung 	if (ifp->if_softc !=  arg)
   3804  1.1   dyoung 		return;
   3805  1.1   dyoung 
   3806  1.1   dyoung 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
   3807  1.1   dyoung 		return;
   3808  1.1   dyoung 
   3809  1.5  msaitoh 	IXV_CORE_LOCK(adapter);
   3810  1.1   dyoung 	index = (vtag >> 5) & 0x7F;
   3811  1.1   dyoung 	bit = vtag & 0x1F;
   3812  1.1   dyoung 	ixv_shadow_vfta[index] &= ~(1 << bit);
   3813  1.1   dyoung 	/* Re-init to load the changes */
   3814  1.5  msaitoh 	ixv_init_locked(adapter);
   3815  1.5  msaitoh 	IXV_CORE_UNLOCK(adapter);
   3816  1.1   dyoung }
   3817  1.3  msaitoh #endif
   3818  1.1   dyoung 
   3819  1.1   dyoung static void
   3820  1.1   dyoung ixv_enable_intr(struct adapter *adapter)
   3821  1.1   dyoung {
   3822  1.1   dyoung 	struct ixgbe_hw *hw = &adapter->hw;
   3823  1.1   dyoung 	struct ix_queue *que = adapter->queues;
   3824  1.1   dyoung 	u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
   3825  1.1   dyoung 
   3826  1.1   dyoung 
   3827  1.1   dyoung 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
   3828  1.1   dyoung 
   3829  1.1   dyoung 	mask = IXGBE_EIMS_ENABLE_MASK;
   3830  1.1   dyoung 	mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
   3831  1.1   dyoung 	IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
   3832  1.1   dyoung 
   3833  1.1   dyoung         for (int i = 0; i < adapter->num_queues; i++, que++)
   3834  1.1   dyoung 		ixv_enable_queue(adapter, que->msix);
   3835  1.1   dyoung 
   3836  1.1   dyoung 	IXGBE_WRITE_FLUSH(hw);
   3837  1.1   dyoung 
   3838  1.1   dyoung 	return;
   3839  1.1   dyoung }
   3840  1.1   dyoung 
   3841  1.1   dyoung static void
   3842  1.1   dyoung ixv_disable_intr(struct adapter *adapter)
   3843  1.1   dyoung {
   3844  1.1   dyoung 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
   3845  1.1   dyoung 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, ~0);
   3846  1.1   dyoung 	IXGBE_WRITE_FLUSH(&adapter->hw);
   3847  1.1   dyoung 	return;
   3848  1.1   dyoung }
   3849  1.1   dyoung 
   3850  1.1   dyoung /*
   3851  1.1   dyoung ** Setup the correct IVAR register for a particular MSIX interrupt
   3852  1.1   dyoung **  - entry is the register array entry
   3853  1.1   dyoung **  - vector is the MSIX vector for this queue
   3854  1.1   dyoung **  - type is RX/TX/MISC
   3855  1.1   dyoung */
   3856  1.1   dyoung static void
   3857  1.1   dyoung ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
   3858  1.1   dyoung {
   3859  1.1   dyoung 	struct ixgbe_hw *hw = &adapter->hw;
   3860  1.1   dyoung 	u32 ivar, index;
   3861  1.1   dyoung 
   3862  1.1   dyoung 	vector |= IXGBE_IVAR_ALLOC_VAL;
   3863  1.1   dyoung 
   3864  1.1   dyoung 	if (type == -1) { /* MISC IVAR */
   3865  1.1   dyoung 		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
   3866  1.1   dyoung 		ivar &= ~0xFF;
   3867  1.1   dyoung 		ivar |= vector;
   3868  1.1   dyoung 		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
   3869  1.1   dyoung 	} else {	/* RX/TX IVARS */
   3870  1.1   dyoung 		index = (16 * (entry & 1)) + (8 * type);
   3871  1.1   dyoung 		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
   3872  1.1   dyoung 		ivar &= ~(0xFF << index);
   3873  1.1   dyoung 		ivar |= (vector << index);
   3874  1.1   dyoung 		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
   3875  1.1   dyoung 	}
   3876  1.1   dyoung }
   3877  1.1   dyoung 
   3878  1.1   dyoung static void
   3879  1.1   dyoung ixv_configure_ivars(struct adapter *adapter)
   3880  1.1   dyoung {
   3881  1.1   dyoung 	struct  ix_queue *que = adapter->queues;
   3882  1.1   dyoung 
   3883  1.1   dyoung         for (int i = 0; i < adapter->num_queues; i++, que++) {
   3884  1.1   dyoung 		/* First the RX queue entry */
   3885  1.1   dyoung                 ixv_set_ivar(adapter, i, que->msix, 0);
   3886  1.1   dyoung 		/* ... and the TX */
   3887  1.1   dyoung 		ixv_set_ivar(adapter, i, que->msix, 1);
   3888  1.1   dyoung 		/* Set an initial value in EITR */
   3889  1.1   dyoung                 IXGBE_WRITE_REG(&adapter->hw,
   3890  1.1   dyoung                     IXGBE_VTEITR(que->msix), IXV_EITR_DEFAULT);
   3891  1.1   dyoung 	}
   3892  1.1   dyoung 
   3893  1.1   dyoung 	/* For the Link interrupt */
   3894  1.1   dyoung         ixv_set_ivar(adapter, 1, adapter->mbxvec, -1);
   3895  1.1   dyoung }
   3896  1.1   dyoung 
   3897  1.1   dyoung 
   3898  1.1   dyoung /*
   3899  1.1   dyoung ** Tasklet handler for MSIX MBX interrupts
   3900  1.1   dyoung **  - do outside interrupt since it might sleep
   3901  1.1   dyoung */
   3902  1.1   dyoung static void
   3903  1.1   dyoung ixv_handle_mbx(void *context)
   3904  1.1   dyoung {
   3905  1.1   dyoung 	struct adapter  *adapter = context;
   3906  1.1   dyoung 
   3907  1.1   dyoung 	ixgbe_check_link(&adapter->hw,
   3908  1.1   dyoung 	    &adapter->link_speed, &adapter->link_up, 0);
   3909  1.1   dyoung 	ixv_update_link_status(adapter);
   3910  1.1   dyoung }
   3911  1.1   dyoung 
   3912  1.1   dyoung /*
   3913  1.1   dyoung ** The VF stats registers never have a truely virgin
   3914  1.1   dyoung ** starting point, so this routine tries to make an
   3915  1.1   dyoung ** artificial one, marking ground zero on attach as
   3916  1.1   dyoung ** it were.
   3917  1.1   dyoung */
   3918  1.1   dyoung static void
   3919  1.1   dyoung ixv_save_stats(struct adapter *adapter)
   3920  1.1   dyoung {
   3921  1.1   dyoung 	if (adapter->stats.vfgprc || adapter->stats.vfgptc) {
   3922  1.1   dyoung 		adapter->stats.saved_reset_vfgprc +=
   3923  1.1   dyoung 		    adapter->stats.vfgprc - adapter->stats.base_vfgprc;
   3924  1.1   dyoung 		adapter->stats.saved_reset_vfgptc +=
   3925  1.1   dyoung 		    adapter->stats.vfgptc - adapter->stats.base_vfgptc;
   3926  1.1   dyoung 		adapter->stats.saved_reset_vfgorc +=
   3927  1.1   dyoung 		    adapter->stats.vfgorc - adapter->stats.base_vfgorc;
   3928  1.1   dyoung 		adapter->stats.saved_reset_vfgotc +=
   3929  1.1   dyoung 		    adapter->stats.vfgotc - adapter->stats.base_vfgotc;
   3930  1.1   dyoung 		adapter->stats.saved_reset_vfmprc +=
   3931  1.1   dyoung 		    adapter->stats.vfmprc - adapter->stats.base_vfmprc;
   3932  1.1   dyoung 	}
   3933  1.1   dyoung }
   3934  1.1   dyoung 
   3935  1.1   dyoung static void
   3936  1.1   dyoung ixv_init_stats(struct adapter *adapter)
   3937  1.1   dyoung {
   3938  1.1   dyoung 	struct ixgbe_hw *hw = &adapter->hw;
   3939  1.1   dyoung 
   3940  1.1   dyoung 	adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
   3941  1.1   dyoung 	adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
   3942  1.1   dyoung 	adapter->stats.last_vfgorc |=
   3943  1.1   dyoung 	    (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
   3944  1.1   dyoung 
   3945  1.1   dyoung 	adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
   3946  1.1   dyoung 	adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
   3947  1.1   dyoung 	adapter->stats.last_vfgotc |=
   3948  1.1   dyoung 	    (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
   3949  1.1   dyoung 
   3950  1.1   dyoung 	adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
   3951  1.1   dyoung 
   3952  1.1   dyoung 	adapter->stats.base_vfgprc = adapter->stats.last_vfgprc;
   3953  1.1   dyoung 	adapter->stats.base_vfgorc = adapter->stats.last_vfgorc;
   3954  1.1   dyoung 	adapter->stats.base_vfgptc = adapter->stats.last_vfgptc;
   3955  1.1   dyoung 	adapter->stats.base_vfgotc = adapter->stats.last_vfgotc;
   3956  1.1   dyoung 	adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
   3957  1.1   dyoung }
   3958  1.1   dyoung 
   3959  1.1   dyoung #define UPDATE_STAT_32(reg, last, count)		\
   3960  1.1   dyoung {							\
   3961  1.1   dyoung 	u32 current = IXGBE_READ_REG(hw, reg);		\
   3962  1.1   dyoung 	if (current < last)				\
   3963  1.1   dyoung 		count += 0x100000000LL;			\
   3964  1.1   dyoung 	last = current;					\
   3965  1.1   dyoung 	count &= 0xFFFFFFFF00000000LL;			\
   3966  1.1   dyoung 	count |= current;				\
   3967  1.1   dyoung }
   3968  1.1   dyoung 
   3969  1.1   dyoung #define UPDATE_STAT_36(lsb, msb, last, count) 		\
   3970  1.1   dyoung {							\
   3971  1.1   dyoung 	u64 cur_lsb = IXGBE_READ_REG(hw, lsb);		\
   3972  1.1   dyoung 	u64 cur_msb = IXGBE_READ_REG(hw, msb);		\
   3973  1.1   dyoung 	u64 current = ((cur_msb << 32) | cur_lsb);	\
   3974  1.1   dyoung 	if (current < last)				\
   3975  1.1   dyoung 		count += 0x1000000000LL;		\
   3976  1.1   dyoung 	last = current;					\
   3977  1.1   dyoung 	count &= 0xFFFFFFF000000000LL;			\
   3978  1.1   dyoung 	count |= current;				\
   3979  1.1   dyoung }
   3980  1.1   dyoung 
   3981  1.1   dyoung /*
   3982  1.1   dyoung ** ixv_update_stats - Update the board statistics counters.
   3983  1.1   dyoung */
   3984  1.1   dyoung void
   3985  1.1   dyoung ixv_update_stats(struct adapter *adapter)
   3986  1.1   dyoung {
   3987  1.1   dyoung         struct ixgbe_hw *hw = &adapter->hw;
   3988  1.1   dyoung 
   3989  1.1   dyoung         UPDATE_STAT_32(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
   3990  1.1   dyoung 	    adapter->stats.vfgprc);
   3991  1.1   dyoung         UPDATE_STAT_32(IXGBE_VFGPTC, adapter->stats.last_vfgptc,
   3992  1.1   dyoung 	    adapter->stats.vfgptc);
   3993  1.1   dyoung         UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
   3994  1.1   dyoung 	    adapter->stats.last_vfgorc, adapter->stats.vfgorc);
   3995  1.1   dyoung         UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
   3996  1.1   dyoung 	    adapter->stats.last_vfgotc, adapter->stats.vfgotc);
   3997  1.1   dyoung         UPDATE_STAT_32(IXGBE_VFMPRC, adapter->stats.last_vfmprc,
   3998  1.1   dyoung 	    adapter->stats.vfmprc);
   3999  1.1   dyoung }
   4000  1.1   dyoung 
   4001  1.1   dyoung /**********************************************************************
   4002  1.1   dyoung  *
   4003  1.1   dyoung  *  This routine is called only when ixgbe_display_debug_stats is enabled.
   4004  1.1   dyoung  *  This routine provides a way to take a look at important statistics
   4005  1.1   dyoung  *  maintained by the driver and hardware.
   4006  1.1   dyoung  *
   4007  1.1   dyoung  **********************************************************************/
   4008  1.1   dyoung static void
   4009  1.1   dyoung ixv_print_hw_stats(struct adapter * adapter)
   4010  1.1   dyoung {
   4011  1.1   dyoung         device_t dev = adapter->dev;
   4012  1.1   dyoung 
   4013  1.1   dyoung         device_printf(dev,"Std Mbuf Failed = %lu\n",
   4014  1.3  msaitoh                adapter->mbuf_defrag_failed.ev_count);
   4015  1.1   dyoung         device_printf(dev,"Driver dropped packets = %lu\n",
   4016  1.3  msaitoh                adapter->dropped_pkts.ev_count);
   4017  1.1   dyoung         device_printf(dev, "watchdog timeouts = %ld\n",
   4018  1.3  msaitoh                adapter->watchdog_events.ev_count);
   4019  1.1   dyoung 
   4020  1.1   dyoung         device_printf(dev,"Good Packets Rcvd = %llu\n",
   4021  1.1   dyoung                (long long)adapter->stats.vfgprc);
   4022  1.1   dyoung         device_printf(dev,"Good Packets Xmtd = %llu\n",
   4023  1.1   dyoung                (long long)adapter->stats.vfgptc);
   4024  1.1   dyoung         device_printf(dev,"TSO Transmissions = %lu\n",
   4025  1.3  msaitoh                adapter->tso_tx.ev_count);
   4026  1.1   dyoung 
   4027  1.1   dyoung }
   4028  1.1   dyoung 
   4029  1.1   dyoung /**********************************************************************
   4030  1.1   dyoung  *
   4031  1.1   dyoung  *  This routine is called only when em_display_debug_stats is enabled.
   4032  1.1   dyoung  *  This routine provides a way to take a look at important statistics
   4033  1.1   dyoung  *  maintained by the driver and hardware.
   4034  1.1   dyoung  *
   4035  1.1   dyoung  **********************************************************************/
   4036  1.1   dyoung static void
   4037  1.1   dyoung ixv_print_debug_info(struct adapter *adapter)
   4038  1.1   dyoung {
   4039  1.1   dyoung         device_t dev = adapter->dev;
   4040  1.1   dyoung         struct ixgbe_hw         *hw = &adapter->hw;
   4041  1.1   dyoung         struct ix_queue         *que = adapter->queues;
   4042  1.1   dyoung         struct rx_ring          *rxr;
   4043  1.1   dyoung         struct tx_ring          *txr;
   4044  1.3  msaitoh #ifdef LRO
   4045  1.1   dyoung         struct lro_ctrl         *lro;
   4046  1.3  msaitoh #endif /* LRO */
   4047  1.1   dyoung 
   4048  1.1   dyoung         device_printf(dev,"Error Byte Count = %u \n",
   4049  1.1   dyoung             IXGBE_READ_REG(hw, IXGBE_ERRBC));
   4050  1.1   dyoung 
   4051  1.1   dyoung         for (int i = 0; i < adapter->num_queues; i++, que++) {
   4052  1.1   dyoung                 txr = que->txr;
   4053  1.1   dyoung                 rxr = que->rxr;
   4054  1.3  msaitoh #ifdef LRO
   4055  1.1   dyoung                 lro = &rxr->lro;
   4056  1.3  msaitoh #endif /* LRO */
   4057  1.1   dyoung                 device_printf(dev,"QUE(%d) IRQs Handled: %lu\n",
   4058  1.1   dyoung                     que->msix, (long)que->irqs);
   4059  1.1   dyoung                 device_printf(dev,"RX(%d) Packets Received: %lld\n",
   4060  1.3  msaitoh                     rxr->me, (long long)rxr->rx_packets.ev_count);
   4061  1.1   dyoung                 device_printf(dev,"RX(%d) Split RX Packets: %lld\n",
   4062  1.3  msaitoh                     rxr->me, (long long)rxr->rx_split_packets.ev_count);
   4063  1.1   dyoung                 device_printf(dev,"RX(%d) Bytes Received: %lu\n",
   4064  1.3  msaitoh                     rxr->me, (long)rxr->rx_bytes.ev_count);
   4065  1.3  msaitoh #ifdef LRO
   4066  1.1   dyoung                 device_printf(dev,"RX(%d) LRO Queued= %d\n",
   4067  1.1   dyoung                     rxr->me, lro->lro_queued);
   4068  1.1   dyoung                 device_printf(dev,"RX(%d) LRO Flushed= %d\n",
   4069  1.1   dyoung                     rxr->me, lro->lro_flushed);
   4070  1.3  msaitoh #endif /* LRO */
   4071  1.1   dyoung                 device_printf(dev,"TX(%d) Packets Sent: %lu\n",
   4072  1.3  msaitoh                     txr->me, (long)txr->total_packets.ev_count);
   4073  1.1   dyoung                 device_printf(dev,"TX(%d) NO Desc Avail: %lu\n",
   4074  1.3  msaitoh                     txr->me, (long)txr->no_desc_avail.ev_count);
   4075  1.1   dyoung         }
   4076  1.1   dyoung 
   4077  1.1   dyoung         device_printf(dev,"MBX IRQ Handled: %lu\n",
   4078  1.3  msaitoh             (long)adapter->mbx_irq.ev_count);
   4079  1.1   dyoung         return;
   4080  1.1   dyoung }
   4081  1.1   dyoung 
   4082  1.1   dyoung static int
   4083  1.3  msaitoh ixv_sysctl_stats(SYSCTLFN_ARGS)
   4084  1.1   dyoung {
   4085  1.3  msaitoh 	struct sysctlnode node;
   4086  1.1   dyoung 	int             error;
   4087  1.3  msaitoh 	int		result;
   4088  1.1   dyoung 	struct adapter *adapter;
   4089  1.1   dyoung 
   4090  1.3  msaitoh 	node = *rnode;
   4091  1.3  msaitoh 	adapter = (struct adapter *)node.sysctl_data;
   4092  1.3  msaitoh 	node.sysctl_data = &result;
   4093  1.3  msaitoh 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   4094  1.3  msaitoh 	if (error != 0)
   4095  1.3  msaitoh 		return error;
   4096  1.1   dyoung 
   4097  1.3  msaitoh 	if (result == 1)
   4098  1.3  msaitoh 		ixv_print_hw_stats(adapter);
   4099  1.1   dyoung 
   4100  1.3  msaitoh 	return 0;
   4101  1.1   dyoung }
   4102  1.1   dyoung 
   4103  1.1   dyoung static int
   4104  1.3  msaitoh ixv_sysctl_debug(SYSCTLFN_ARGS)
   4105  1.1   dyoung {
   4106  1.3  msaitoh 	struct sysctlnode node;
   4107  1.1   dyoung 	int error, result;
   4108  1.1   dyoung 	struct adapter *adapter;
   4109  1.1   dyoung 
   4110  1.3  msaitoh 	node = *rnode;
   4111  1.3  msaitoh 	adapter = (struct adapter *)node.sysctl_data;
   4112  1.3  msaitoh 	node.sysctl_data = &result;
   4113  1.3  msaitoh 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   4114  1.1   dyoung 
   4115  1.3  msaitoh 	if (error)
   4116  1.3  msaitoh 		return error;
   4117  1.1   dyoung 
   4118  1.3  msaitoh 	if (result == 1)
   4119  1.1   dyoung 		ixv_print_debug_info(adapter);
   4120  1.3  msaitoh 
   4121  1.3  msaitoh 	return 0;
   4122  1.1   dyoung }
   4123  1.1   dyoung 
   4124  1.1   dyoung /*
   4125  1.1   dyoung ** Set flow control using sysctl:
   4126  1.1   dyoung ** Flow control values:
   4127  1.1   dyoung ** 	0 - off
   4128  1.1   dyoung **	1 - rx pause
   4129  1.1   dyoung **	2 - tx pause
   4130  1.1   dyoung **	3 - full
   4131  1.1   dyoung */
   4132  1.1   dyoung static int
   4133  1.3  msaitoh ixv_set_flowcntl(SYSCTLFN_ARGS)
   4134  1.1   dyoung {
   4135  1.3  msaitoh 	struct sysctlnode node;
   4136  1.1   dyoung 	int error;
   4137  1.1   dyoung 	struct adapter *adapter;
   4138  1.1   dyoung 
   4139  1.3  msaitoh 	node = *rnode;
   4140  1.3  msaitoh 	adapter = (struct adapter *)node.sysctl_data;
   4141  1.3  msaitoh 	node.sysctl_data = &ixv_flow_control;
   4142  1.3  msaitoh 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   4143  1.1   dyoung 
   4144  1.1   dyoung 	if (error)
   4145  1.1   dyoung 		return (error);
   4146  1.1   dyoung 
   4147  1.1   dyoung 	switch (ixv_flow_control) {
   4148  1.1   dyoung 		case ixgbe_fc_rx_pause:
   4149  1.1   dyoung 		case ixgbe_fc_tx_pause:
   4150  1.1   dyoung 		case ixgbe_fc_full:
   4151  1.1   dyoung 			adapter->hw.fc.requested_mode = ixv_flow_control;
   4152  1.1   dyoung 			break;
   4153  1.1   dyoung 		case ixgbe_fc_none:
   4154  1.1   dyoung 		default:
   4155  1.1   dyoung 			adapter->hw.fc.requested_mode = ixgbe_fc_none;
   4156  1.1   dyoung 	}
   4157  1.1   dyoung 
   4158  1.6  msaitoh 	ixgbe_fc_enable(&adapter->hw);
   4159  1.1   dyoung 	return error;
   4160  1.1   dyoung }
   4161  1.1   dyoung 
   4162  1.3  msaitoh const struct sysctlnode *
   4163  1.3  msaitoh ixv_sysctl_instance(struct adapter *adapter)
   4164  1.3  msaitoh {
   4165  1.3  msaitoh 	const char *dvname;
   4166  1.3  msaitoh 	struct sysctllog **log;
   4167  1.3  msaitoh 	int rc;
   4168  1.3  msaitoh 	const struct sysctlnode *rnode;
   4169  1.3  msaitoh 
   4170  1.3  msaitoh 	log = &adapter->sysctllog;
   4171  1.3  msaitoh 	dvname = device_xname(adapter->dev);
   4172  1.3  msaitoh 
   4173  1.3  msaitoh 	if ((rc = sysctl_createv(log, 0, NULL, &rnode,
   4174  1.3  msaitoh 	    0, CTLTYPE_NODE, dvname,
   4175  1.3  msaitoh 	    SYSCTL_DESCR("ixv information and settings"),
   4176  1.3  msaitoh 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
   4177  1.3  msaitoh 		goto err;
   4178  1.3  msaitoh 
   4179  1.3  msaitoh 	return rnode;
   4180  1.3  msaitoh err:
   4181  1.3  msaitoh 	printf("%s: sysctl_createv failed, rc = %d\n", __func__, rc);
   4182  1.3  msaitoh 	return NULL;
   4183  1.3  msaitoh }
   4184  1.3  msaitoh 
   4185  1.1   dyoung static void
   4186  1.1   dyoung ixv_add_rx_process_limit(struct adapter *adapter, const char *name,
   4187  1.1   dyoung         const char *description, int *limit, int value)
   4188  1.1   dyoung {
   4189  1.3  msaitoh 	const struct sysctlnode *rnode, *cnode;
   4190  1.3  msaitoh 	struct sysctllog **log = &adapter->sysctllog;
   4191  1.3  msaitoh 
   4192  1.1   dyoung         *limit = value;
   4193  1.3  msaitoh 
   4194  1.3  msaitoh 	if ((rnode = ixv_sysctl_instance(adapter)) == NULL)
   4195  1.3  msaitoh 		aprint_error_dev(adapter->dev,
   4196  1.3  msaitoh 		    "could not create sysctl root\n");
   4197  1.3  msaitoh 	else if (sysctl_createv(log, 0, &rnode, &cnode,
   4198  1.3  msaitoh 	    CTLFLAG_READWRITE,
   4199  1.3  msaitoh 	    CTLTYPE_INT,
   4200  1.3  msaitoh 	    name, SYSCTL_DESCR(description),
   4201  1.3  msaitoh 	    NULL, 0, limit, 0,
   4202  1.3  msaitoh 	    CTL_CREATE, CTL_EOL) != 0) {
   4203  1.3  msaitoh 		aprint_error_dev(adapter->dev, "%s: could not create sysctl",
   4204  1.3  msaitoh 		    __func__);
   4205  1.3  msaitoh 	}
   4206  1.1   dyoung }
   4207  1.1   dyoung 
   4208