Home | History | Annotate | Line # | Download | only in ixgbe
ixgbe.c revision 1.8
      1 /******************************************************************************
      2 
      3   Copyright (c) 2001-2011, Intel Corporation
      4   All rights reserved.
      5 
      6   Redistribution and use in source and binary forms, with or without
      7   modification, are permitted provided that the following conditions are met:
      8 
      9    1. Redistributions of source code must retain the above copyright notice,
     10       this list of conditions and the following disclaimer.
     11 
     12    2. Redistributions in binary form must reproduce the above copyright
     13       notice, this list of conditions and the following disclaimer in the
     14       documentation and/or other materials provided with the distribution.
     15 
     16    3. Neither the name of the Intel Corporation nor the names of its
     17       contributors may be used to endorse or promote products derived from
     18       this software without specific prior written permission.
     19 
     20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     30   POSSIBILITY OF SUCH DAMAGE.
     31 
     32 ******************************************************************************/
     33 /*
     34  * Copyright (c) 2011 The NetBSD Foundation, Inc.
     35  * All rights reserved.
     36  *
     37  * This code is derived from software contributed to The NetBSD Foundation
     38  * by Coyote Point Systems, Inc.
     39  *
     40  * Redistribution and use in source and binary forms, with or without
     41  * modification, are permitted provided that the following conditions
     42  * are met:
     43  * 1. Redistributions of source code must retain the above copyright
     44  *    notice, this list of conditions and the following disclaimer.
     45  * 2. Redistributions in binary form must reproduce the above copyright
     46  *    notice, this list of conditions and the following disclaimer in the
     47  *    documentation and/or other materials provided with the distribution.
     48  *
     49  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     50  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     51  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     52  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     53  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     54  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     55  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     56  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     57  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     58  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     59  * POSSIBILITY OF SUCH DAMAGE.
     60  */
     61 /*$FreeBSD: src/sys/dev/ixgbe/ixgbe.c,v 1.51 2011/04/25 23:34:21 jfv Exp $*/
     62 /*$NetBSD: ixgbe.c,v 1.8 2014/03/29 19:28:25 christos Exp $*/
     63 
     64 #include "opt_inet.h"
     65 
     66 #include "ixgbe.h"
     67 
     68 /*********************************************************************
     69  *  Set this to one to display debug statistics
     70  *********************************************************************/
     71 int             ixgbe_display_debug_stats = 0;
     72 
     73 /*********************************************************************
     74  *  Driver version
     75  *********************************************************************/
     76 char ixgbe_driver_version[] = "2.3.10";
     77 
     78 /*********************************************************************
     79  *  PCI Device ID Table
     80  *
     81  *  Used by probe to select devices to load on
     82  *  Last field stores an index into ixgbe_strings
     83  *  Last entry must be all 0s
     84  *
     85  *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
     86  *********************************************************************/
     87 
     88 static ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
     89 {
     90 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
     91 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
     92 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
     93 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
     94 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
     95 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
     96 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
     97 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
     98 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
     99 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
    100 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
    101 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
    102 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
    103 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
    104 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
    105 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
    106 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
    107 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
    108 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
    109 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
    110 	/* required last entry */
    111 	{0, 0, 0, 0, 0}
    112 };
    113 
    114 /*********************************************************************
    115  *  Table of branding strings
    116  *********************************************************************/
    117 
    118 static const char    *ixgbe_strings[] = {
    119 	"Intel(R) PRO/10GbE PCI-Express Network Driver"
    120 };
    121 
    122 /*********************************************************************
    123  *  Function prototypes
    124  *********************************************************************/
    125 static int      ixgbe_probe(device_t, cfdata_t, void *);
    126 static void     ixgbe_attach(device_t, device_t, void *);
    127 static int      ixgbe_detach(device_t, int);
    128 #if 0
    129 static int      ixgbe_shutdown(device_t);
    130 #endif
    131 static void     ixgbe_start(struct ifnet *);
    132 static void     ixgbe_start_locked(struct tx_ring *, struct ifnet *);
    133 #if __FreeBSD_version >= 800000
    134 static int	ixgbe_mq_start(struct ifnet *, struct mbuf *);
    135 static int	ixgbe_mq_start_locked(struct ifnet *,
    136                     struct tx_ring *, struct mbuf *);
    137 static void	ixgbe_qflush(struct ifnet *);
    138 #endif
    139 static int      ixgbe_ioctl(struct ifnet *, u_long, void *);
    140 static void	ixgbe_ifstop(struct ifnet *, int);
    141 static int	ixgbe_init(struct ifnet *);
    142 static void	ixgbe_init_locked(struct adapter *);
    143 static void     ixgbe_stop(void *);
    144 static void     ixgbe_media_status(struct ifnet *, struct ifmediareq *);
    145 static int      ixgbe_media_change(struct ifnet *);
    146 static void     ixgbe_identify_hardware(struct adapter *);
    147 static int      ixgbe_allocate_pci_resources(struct adapter *,
    148 		    const struct pci_attach_args *);
    149 static int      ixgbe_allocate_msix(struct adapter *,
    150 		    const struct pci_attach_args *);
    151 static int      ixgbe_allocate_legacy(struct adapter *,
    152 		    const struct pci_attach_args *);
    153 static int	ixgbe_allocate_queues(struct adapter *);
    154 static int	ixgbe_setup_msix(struct adapter *);
    155 static void	ixgbe_free_pci_resources(struct adapter *);
    156 static void	ixgbe_local_timer(void *);
    157 static int	ixgbe_setup_interface(device_t, struct adapter *);
    158 static void	ixgbe_config_link(struct adapter *);
    159 
    160 static int      ixgbe_allocate_transmit_buffers(struct tx_ring *);
    161 static int	ixgbe_setup_transmit_structures(struct adapter *);
    162 static void	ixgbe_setup_transmit_ring(struct tx_ring *);
    163 static void     ixgbe_initialize_transmit_units(struct adapter *);
    164 static void     ixgbe_free_transmit_structures(struct adapter *);
    165 static void     ixgbe_free_transmit_buffers(struct tx_ring *);
    166 
    167 static int      ixgbe_allocate_receive_buffers(struct rx_ring *);
    168 static int      ixgbe_setup_receive_structures(struct adapter *);
    169 static int	ixgbe_setup_receive_ring(struct rx_ring *);
    170 static void     ixgbe_initialize_receive_units(struct adapter *);
    171 static void     ixgbe_free_receive_structures(struct adapter *);
    172 static void     ixgbe_free_receive_buffers(struct rx_ring *);
    173 static void	ixgbe_setup_hw_rsc(struct rx_ring *);
    174 
    175 static void     ixgbe_enable_intr(struct adapter *);
    176 static void     ixgbe_disable_intr(struct adapter *);
    177 static void     ixgbe_update_stats_counters(struct adapter *);
    178 static bool	ixgbe_txeof(struct tx_ring *);
    179 static bool	ixgbe_rxeof(struct ix_queue *, int);
    180 static void	ixgbe_rx_checksum(u32, struct mbuf *, u32,
    181 		    struct ixgbe_hw_stats *);
    182 static void     ixgbe_set_promisc(struct adapter *);
    183 static void     ixgbe_set_multi(struct adapter *);
    184 static void     ixgbe_update_link_status(struct adapter *);
    185 static void	ixgbe_refresh_mbufs(struct rx_ring *, int);
    186 static int      ixgbe_xmit(struct tx_ring *, struct mbuf *);
    187 static int	ixgbe_set_flowcntl(SYSCTLFN_PROTO);
    188 static int	ixgbe_set_advertise(SYSCTLFN_PROTO);
    189 static int	ixgbe_dma_malloc(struct adapter *, bus_size_t,
    190 		    struct ixgbe_dma_alloc *, int);
    191 static void     ixgbe_dma_free(struct adapter *, struct ixgbe_dma_alloc *);
    192 static void	ixgbe_add_rx_process_limit(struct adapter *, const char *,
    193 		    const char *, int *, int);
    194 static u32	ixgbe_tx_ctx_setup(struct tx_ring *, struct mbuf *);
    195 static bool	ixgbe_tso_setup(struct tx_ring *, struct mbuf *, u32 *);
    196 static void	ixgbe_set_ivar(struct adapter *, u8, u8, s8);
    197 static void	ixgbe_configure_ivars(struct adapter *);
    198 static u8 *	ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
    199 
    200 static void	ixgbe_setup_vlan_hw_support(struct adapter *);
    201 #if 0
    202 static void	ixgbe_register_vlan(void *, struct ifnet *, u16);
    203 static void	ixgbe_unregister_vlan(void *, struct ifnet *, u16);
    204 #endif
    205 
    206 static void     ixgbe_add_hw_stats(struct adapter *adapter);
    207 
    208 static __inline void ixgbe_rx_discard(struct rx_ring *, int);
    209 static __inline void ixgbe_rx_input(struct rx_ring *, struct ifnet *,
    210 		    struct mbuf *, u32);
    211 
    212 /* Support for pluggable optic modules */
    213 static bool	ixgbe_sfp_probe(struct adapter *);
    214 static void	ixgbe_setup_optics(struct adapter *);
    215 
    216 /* Legacy (single vector interrupt handler */
    217 static int	ixgbe_legacy_irq(void *);
    218 
    219 #if defined(NETBSD_MSI_OR_MSIX)
    220 /* The MSI/X Interrupt handlers */
    221 static void	ixgbe_msix_que(void *);
    222 static void	ixgbe_msix_link(void *);
    223 #endif
    224 
    225 /* Software interrupts for deferred work */
    226 static void	ixgbe_handle_que(void *);
    227 static void	ixgbe_handle_link(void *);
    228 static void	ixgbe_handle_msf(void *);
    229 static void	ixgbe_handle_mod(void *);
    230 
    231 const struct sysctlnode *ixgbe_sysctl_instance(struct adapter *);
    232 static ixgbe_vendor_info_t *ixgbe_lookup(const struct pci_attach_args *);
    233 
    234 #ifdef IXGBE_FDIR
    235 static void	ixgbe_atr(struct tx_ring *, struct mbuf *);
    236 static void	ixgbe_reinit_fdir(void *, int);
    237 #endif
    238 
    239 /*********************************************************************
    240  *  FreeBSD Device Interface Entry Points
    241  *********************************************************************/
    242 
    243 CFATTACH_DECL3_NEW(ixg, sizeof(struct adapter),
    244     ixgbe_probe, ixgbe_attach, ixgbe_detach, NULL, NULL, NULL,
    245     DVF_DETACH_SHUTDOWN);
    246 
    247 #if 0
    248 devclass_t ixgbe_devclass;
    249 DRIVER_MODULE(ixgbe, pci, ixgbe_driver, ixgbe_devclass, 0, 0);
    250 
    251 MODULE_DEPEND(ixgbe, pci, 1, 1, 1);
    252 MODULE_DEPEND(ixgbe, ether, 1, 1, 1);
    253 #endif
    254 
    255 /*
    256 ** TUNEABLE PARAMETERS:
    257 */
    258 
    259 /*
    260 ** AIM: Adaptive Interrupt Moderation
    261 ** which means that the interrupt rate
    262 ** is varied over time based on the
    263 ** traffic for that interrupt vector
    264 */
    265 static int ixgbe_enable_aim = TRUE;
    266 #define TUNABLE_INT(__x, __y)
    267 TUNABLE_INT("hw.ixgbe.enable_aim", &ixgbe_enable_aim);
    268 
    269 static int ixgbe_max_interrupt_rate = (8000000 / IXGBE_LOW_LATENCY);
    270 TUNABLE_INT("hw.ixgbe.max_interrupt_rate", &ixgbe_max_interrupt_rate);
    271 
    272 /* How many packets rxeof tries to clean at a time */
    273 static int ixgbe_rx_process_limit = 256;
    274 TUNABLE_INT("hw.ixgbe.rx_process_limit", &ixgbe_rx_process_limit);
    275 
    276 /* Flow control setting, default to full */
    277 static int ixgbe_flow_control = ixgbe_fc_full;
    278 TUNABLE_INT("hw.ixgbe.flow_control", &ixgbe_flow_control);
    279 
    280 /*
    281 ** Smart speed setting, default to on
    282 ** this only works as a compile option
    283 ** right now as its during attach, set
    284 ** this to 'ixgbe_smart_speed_off' to
    285 ** disable.
    286 */
    287 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
    288 
    289 /*
    290  * MSIX should be the default for best performance,
    291  * but this allows it to be forced off for testing.
    292  */
    293 static int ixgbe_enable_msix = 1;
    294 TUNABLE_INT("hw.ixgbe.enable_msix", &ixgbe_enable_msix);
    295 
    296 /*
    297  * Header split: this causes the hardware to DMA
    298  * the header into a separate mbuf from the payload,
    299  * it can be a performance win in some workloads, but
    300  * in others it actually hurts, its off by default.
    301  */
    302 static bool ixgbe_header_split = FALSE;
    303 TUNABLE_INT("hw.ixgbe.hdr_split", &ixgbe_header_split);
    304 
    305 #if defined(NETBSD_MSI_OR_MSIX)
    306 /*
    307  * Number of Queues, can be set to 0,
    308  * it then autoconfigures based on the
    309  * number of cpus with a max of 8. This
    310  * can be overriden manually here.
    311  */
    312 static int ixgbe_num_queues = 0;
    313 TUNABLE_INT("hw.ixgbe.num_queues", &ixgbe_num_queues);
    314 #endif
    315 
    316 /*
    317 ** Number of TX descriptors per ring,
    318 ** setting higher than RX as this seems
    319 ** the better performing choice.
    320 */
    321 static int ixgbe_txd = PERFORM_TXD;
    322 TUNABLE_INT("hw.ixgbe.txd", &ixgbe_txd);
    323 
    324 /* Number of RX descriptors per ring */
    325 static int ixgbe_rxd = PERFORM_RXD;
    326 TUNABLE_INT("hw.ixgbe.rxd", &ixgbe_rxd);
    327 
    328 /* Keep running tab on them for sanity check */
    329 static int ixgbe_total_ports;
    330 
    331 #ifdef IXGBE_FDIR
    332 /*
    333 ** For Flow Director: this is the
    334 ** number of TX packets we sample
    335 ** for the filter pool, this means
    336 ** every 20th packet will be probed.
    337 **
    338 ** This feature can be disabled by
    339 ** setting this to 0.
    340 */
    341 static int atr_sample_rate = 20;
    342 /*
    343 ** Flow Director actually 'steals'
    344 ** part of the packet buffer as its
    345 ** filter pool, this variable controls
    346 ** how much it uses:
    347 **  0 = 64K, 1 = 128K, 2 = 256K
    348 */
    349 static int fdir_pballoc = 1;
    350 #endif
    351 
    352 /*********************************************************************
    353  *  Device identification routine
    354  *
    355  *  ixgbe_probe determines if the driver should be loaded on
    356  *  adapter based on PCI vendor/device id of the adapter.
    357  *
    358  *  return 1 on success, 0 on failure
    359  *********************************************************************/
    360 
    361 static int
    362 ixgbe_probe(device_t dev, cfdata_t cf, void *aux)
    363 {
    364 	const struct pci_attach_args *pa = aux;
    365 
    366 	return (ixgbe_lookup(pa) != NULL) ? 1 : 0;
    367 }
    368 
    369 static ixgbe_vendor_info_t *
    370 ixgbe_lookup(const struct pci_attach_args *pa)
    371 {
    372 	pcireg_t subid;
    373 	ixgbe_vendor_info_t *ent;
    374 
    375 	INIT_DEBUGOUT("ixgbe_probe: begin");
    376 
    377 	if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
    378 		return NULL;
    379 
    380 	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
    381 
    382 	for (ent = ixgbe_vendor_info_array; ent->vendor_id != 0; ent++) {
    383 		if (PCI_VENDOR(pa->pa_id) == ent->vendor_id &&
    384 		    PCI_PRODUCT(pa->pa_id) == ent->device_id &&
    385 
    386 		    (PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id ||
    387 		     ent->subvendor_id == 0) &&
    388 
    389 		    (PCI_SUBSYS_ID(subid) == ent->subdevice_id ||
    390 		     ent->subdevice_id == 0)) {
    391 			++ixgbe_total_ports;
    392 			return ent;
    393 		}
    394 	}
    395 	return NULL;
    396 }
    397 
    398 
    399 static void
    400 ixgbe_sysctl_attach(struct adapter *adapter)
    401 {
    402 	struct sysctllog **log;
    403 	const struct sysctlnode *rnode, *cnode;
    404 	device_t dev;
    405 
    406 	dev = adapter->dev;
    407 	log = &adapter->sysctllog;
    408 
    409 	if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
    410 		aprint_error_dev(dev, "could not create sysctl root\n");
    411 		return;
    412 	}
    413 
    414 	if (sysctl_createv(log, 0, &rnode, &cnode,
    415 	    CTLFLAG_READONLY, CTLTYPE_INT,
    416 	    "num_rx_desc", SYSCTL_DESCR("Number of rx descriptors"),
    417 	    NULL, 0, &adapter->num_rx_desc, 0, CTL_CREATE, CTL_EOL) != 0)
    418 		aprint_error_dev(dev, "could not create sysctl\n");
    419 
    420 	if (sysctl_createv(log, 0, &rnode, &cnode,
    421 	    CTLFLAG_READONLY, CTLTYPE_INT,
    422 	    "num_queues", SYSCTL_DESCR("Number of queues"),
    423 	    NULL, 0, &adapter->num_queues, 0, CTL_CREATE, CTL_EOL) != 0)
    424 		aprint_error_dev(dev, "could not create sysctl\n");
    425 
    426 	if (sysctl_createv(log, 0, &rnode, &cnode,
    427 	    CTLFLAG_READWRITE, CTLTYPE_INT,
    428 	    "flow_control", SYSCTL_DESCR("Flow Control"),
    429 	    ixgbe_set_flowcntl, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
    430 		aprint_error_dev(dev, "could not create sysctl\n");
    431 
    432 	if (sysctl_createv(log, 0, &rnode, &cnode,
    433 	    CTLFLAG_READWRITE, CTLTYPE_INT,
    434 	    "advertise_gig", SYSCTL_DESCR("1G Link"),
    435 	    ixgbe_set_advertise, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
    436 		aprint_error_dev(dev, "could not create sysctl\n");
    437 
    438 	/* XXX This is an *instance* sysctl controlling a *global* variable.
    439 	 * XXX It's that way in the FreeBSD driver that this derives from.
    440 	 */
    441 	if (sysctl_createv(log, 0, &rnode, &cnode,
    442 	    CTLFLAG_READWRITE, CTLTYPE_INT,
    443 	    "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
    444 	    NULL, 0, &ixgbe_enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
    445 		aprint_error_dev(dev, "could not create sysctl\n");
    446 }
    447 
    448 /*********************************************************************
    449  *  Device initialization routine
    450  *
    451  *  The attach entry point is called when the driver is being loaded.
    452  *  This routine identifies the type of hardware, allocates all resources
    453  *  and initializes the hardware.
    454  *
    455  *  return 0 on success, positive on failure
    456  *********************************************************************/
    457 
    458 static void
    459 ixgbe_attach(device_t parent, device_t dev, void *aux)
    460 {
    461 	struct adapter *adapter;
    462 	struct ixgbe_hw *hw;
    463 	int             error = 0;
    464 	u16		csum;
    465 	u32		ctrl_ext;
    466 	ixgbe_vendor_info_t *ent;
    467 	const struct pci_attach_args *pa = aux;
    468 
    469 	INIT_DEBUGOUT("ixgbe_attach: begin");
    470 
    471 	/* Allocate, clear, and link in our adapter structure */
    472 	adapter = device_private(dev);
    473 	adapter->dev = adapter->osdep.dev = dev;
    474 	hw = &adapter->hw;
    475 	adapter->osdep.pc = pa->pa_pc;
    476 	adapter->osdep.tag = pa->pa_tag;
    477 	adapter->osdep.dmat = pa->pa_dmat;
    478 
    479 	ent = ixgbe_lookup(pa);
    480 
    481 	KASSERT(ent != NULL);
    482 
    483 	aprint_normal(": %s, Version - %s\n",
    484 	    ixgbe_strings[ent->index], ixgbe_driver_version);
    485 
    486 	/* Core Lock Init*/
    487 	IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
    488 
    489 	/* SYSCTL APIs */
    490 
    491 	ixgbe_sysctl_attach(adapter);
    492 
    493 	/* Set up the timer callout */
    494 	callout_init(&adapter->timer, 0);
    495 
    496 	/* Determine hardware revision */
    497 	ixgbe_identify_hardware(adapter);
    498 
    499 	/* Do base PCI setup - map BAR0 */
    500 	if (ixgbe_allocate_pci_resources(adapter, pa)) {
    501 		aprint_error_dev(dev, "Allocation of PCI resources failed\n");
    502 		error = ENXIO;
    503 		goto err_out;
    504 	}
    505 
    506 	/* Do descriptor calc and sanity checks */
    507 	if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
    508 	    ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
    509 		aprint_error_dev(dev, "TXD config issue, using default!\n");
    510 		adapter->num_tx_desc = DEFAULT_TXD;
    511 	} else
    512 		adapter->num_tx_desc = ixgbe_txd;
    513 
    514 	/*
    515 	** With many RX rings it is easy to exceed the
    516 	** system mbuf allocation. Tuning nmbclusters
    517 	** can alleviate this.
    518 	*/
    519 	if (nmbclusters > 0 ) {
    520 		int s;
    521 		s = (ixgbe_rxd * adapter->num_queues) * ixgbe_total_ports;
    522 		if (s > nmbclusters) {
    523 			aprint_error_dev(dev, "RX Descriptors exceed "
    524 			    "system mbuf max, using default instead!\n");
    525 			ixgbe_rxd = DEFAULT_RXD;
    526 		}
    527 	}
    528 
    529 	if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
    530 	    ixgbe_rxd < MIN_TXD || ixgbe_rxd > MAX_TXD) {
    531 		aprint_error_dev(dev, "RXD config issue, using default!\n");
    532 		adapter->num_rx_desc = DEFAULT_RXD;
    533 	} else
    534 		adapter->num_rx_desc = ixgbe_rxd;
    535 
    536 	/* Allocate our TX/RX Queues */
    537 	if (ixgbe_allocate_queues(adapter)) {
    538 		error = ENOMEM;
    539 		goto err_out;
    540 	}
    541 
    542 	/* Allocate multicast array memory. */
    543 	adapter->mta = malloc(sizeof(u8) * IXGBE_ETH_LENGTH_OF_ADDRESS *
    544 	    MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
    545 	if (adapter->mta == NULL) {
    546 		aprint_error_dev(dev, "Cannot allocate multicast setup array\n");
    547 		error = ENOMEM;
    548 		goto err_late;
    549 	}
    550 
    551 	/* Initialize the shared code */
    552 	error = ixgbe_init_shared_code(hw);
    553 	if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
    554 		/*
    555 		** No optics in this port, set up
    556 		** so the timer routine will probe
    557 		** for later insertion.
    558 		*/
    559 		adapter->sfp_probe = TRUE;
    560 		error = 0;
    561 	} else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
    562 		aprint_error_dev(dev,"Unsupported SFP+ module detected!\n");
    563 		error = EIO;
    564 		goto err_late;
    565 	} else if (error) {
    566 		aprint_error_dev(dev,"Unable to initialize the shared code\n");
    567 		error = EIO;
    568 		goto err_late;
    569 	}
    570 
    571 	/* Make sure we have a good EEPROM before we read from it */
    572 	if (ixgbe_validate_eeprom_checksum(&adapter->hw, &csum) < 0) {
    573 		aprint_error_dev(dev,"The EEPROM Checksum Is Not Valid\n");
    574 		error = EIO;
    575 		goto err_late;
    576 	}
    577 
    578 	/* Get Hardware Flow Control setting */
    579 	hw->fc.requested_mode = ixgbe_fc_full;
    580 	hw->fc.pause_time = IXGBE_FC_PAUSE;
    581 	hw->fc.low_water = IXGBE_FC_LO;
    582 	hw->fc.high_water = IXGBE_FC_HI;
    583 	hw->fc.send_xon = TRUE;
    584 
    585 	error = ixgbe_init_hw(hw);
    586 	if (error == IXGBE_ERR_EEPROM_VERSION) {
    587 		aprint_error_dev(dev, "This device is a pre-production adapter/"
    588 		    "LOM.  Please be aware there may be issues associated "
    589 		    "with your hardware.\n If you are experiencing problems "
    590 		    "please contact your Intel or hardware representative "
    591 		    "who provided you with this hardware.\n");
    592 	} else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED)
    593 		aprint_error_dev(dev,"Unsupported SFP+ Module\n");
    594 
    595 	if (error) {
    596 		error = EIO;
    597 		aprint_error_dev(dev,"Hardware Initialization Failure\n");
    598 		goto err_late;
    599 	}
    600 
    601 	/* Detect and set physical type */
    602 	ixgbe_setup_optics(adapter);
    603 
    604 	if ((adapter->msix > 1) && (ixgbe_enable_msix))
    605 		error = ixgbe_allocate_msix(adapter, pa);
    606 	else
    607 		error = ixgbe_allocate_legacy(adapter, pa);
    608 	if (error)
    609 		goto err_late;
    610 
    611 	/* Setup OS specific network interface */
    612 	if (ixgbe_setup_interface(dev, adapter) != 0)
    613 		goto err_late;
    614 
    615 	/* Sysctl for limiting the amount of work done in software interrupts */
    616 	ixgbe_add_rx_process_limit(adapter, "rx_processing_limit",
    617 	    "max number of rx packets to process", &adapter->rx_process_limit,
    618 	    ixgbe_rx_process_limit);
    619 
    620 	/* Initialize statistics */
    621 	ixgbe_update_stats_counters(adapter);
    622 
    623         /* Print PCIE bus type/speed/width info */
    624 	ixgbe_get_bus_info(hw);
    625 	aprint_normal_dev(dev,"PCI Express Bus: Speed %s %s\n",
    626 	    ((hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0Gb/s":
    627 	    (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5Gb/s":"Unknown"),
    628 	    (hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
    629 	    (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
    630 	    (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
    631 	    ("Unknown"));
    632 
    633 	if ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
    634 	    (hw->bus.speed == ixgbe_bus_speed_2500)) {
    635 		aprint_error_dev(dev, "PCI-Express bandwidth available"
    636 		    " for this card\n     is not sufficient for"
    637 		    " optimal performance.\n");
    638 		aprint_error_dev(dev, "For optimal performance a x8 "
    639 		    "PCIE, or x4 PCIE 2 slot is required.\n");
    640         }
    641 
    642 	/* let hardware know driver is loaded */
    643 	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
    644 	ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
    645 	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
    646 
    647 	ixgbe_add_hw_stats(adapter);
    648 
    649 	INIT_DEBUGOUT("ixgbe_attach: end");
    650 	return;
    651 err_late:
    652 	ixgbe_free_transmit_structures(adapter);
    653 	ixgbe_free_receive_structures(adapter);
    654 err_out:
    655 	if (adapter->ifp != NULL)
    656 		if_free(adapter->ifp);
    657 	ixgbe_free_pci_resources(adapter);
    658 	if (adapter->mta != NULL)
    659 		free(adapter->mta, M_DEVBUF);
    660 	return;
    661 
    662 }
    663 
    664 /*********************************************************************
    665  *  Device removal routine
    666  *
    667  *  The detach entry point is called when the driver is being removed.
    668  *  This routine stops the adapter and deallocates all the resources
    669  *  that were allocated for driver operation.
    670  *
    671  *  return 0 on success, positive on failure
    672  *********************************************************************/
    673 
    674 static int
    675 ixgbe_detach(device_t dev, int flags)
    676 {
    677 	struct adapter *adapter = device_private(dev);
    678 	struct tx_ring *txr = adapter->tx_rings;
    679 	struct rx_ring *rxr = adapter->rx_rings;
    680 	struct ixgbe_hw_stats *stats = &adapter->stats;
    681 	struct ix_queue *que = adapter->queues;
    682 	u32	ctrl_ext;
    683 
    684 	INIT_DEBUGOUT("ixgbe_detach: begin");
    685 
    686 	/* Make sure VLANs are not using driver */
    687 	if (!VLAN_ATTACHED(&adapter->osdep.ec))
    688 		;	/* nothing to do: no VLANs */
    689 	else if ((flags & (DETACH_SHUTDOWN|DETACH_FORCE)) != 0)
    690 		vlan_ifdetach(adapter->ifp);
    691 	else {
    692 		aprint_error_dev(dev, "VLANs in use\n");
    693 		return EBUSY;
    694 	}
    695 
    696 	IXGBE_CORE_LOCK(adapter);
    697 	ixgbe_stop(adapter);
    698 	IXGBE_CORE_UNLOCK(adapter);
    699 
    700 	for (int i = 0; i < adapter->num_queues; i++, que++) {
    701 		softint_disestablish(que->que_si);
    702 	}
    703 
    704 	/* Drain the Link queue */
    705 	softint_disestablish(adapter->link_si);
    706 	softint_disestablish(adapter->mod_si);
    707 	softint_disestablish(adapter->msf_si);
    708 #ifdef IXGBE_FDIR
    709 	softint_disestablish(adapter->fdir_si);
    710 #endif
    711 
    712 	/* let hardware know driver is unloading */
    713 	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
    714 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
    715 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
    716 
    717 	ether_ifdetach(adapter->ifp);
    718 	callout_halt(&adapter->timer, NULL);
    719 	ixgbe_free_pci_resources(adapter);
    720 #if 0	/* XXX the NetBSD port is probably missing something here */
    721 	bus_generic_detach(dev);
    722 #endif
    723 	if_detach(adapter->ifp);
    724 
    725 	sysctl_teardown(&adapter->sysctllog);
    726 	evcnt_detach(&adapter->handleq);
    727 	evcnt_detach(&adapter->req);
    728 	evcnt_detach(&adapter->morerx);
    729 	evcnt_detach(&adapter->moretx);
    730 	evcnt_detach(&adapter->txloops);
    731 	evcnt_detach(&adapter->efbig_tx_dma_setup);
    732 	evcnt_detach(&adapter->m_defrag_failed);
    733 	evcnt_detach(&adapter->efbig2_tx_dma_setup);
    734 	evcnt_detach(&adapter->einval_tx_dma_setup);
    735 	evcnt_detach(&adapter->other_tx_dma_setup);
    736 	evcnt_detach(&adapter->eagain_tx_dma_setup);
    737 	evcnt_detach(&adapter->enomem_tx_dma_setup);
    738 	evcnt_detach(&adapter->watchdog_events);
    739 	evcnt_detach(&adapter->tso_err);
    740 	evcnt_detach(&adapter->tso_tx);
    741 	evcnt_detach(&adapter->link_irq);
    742 	for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
    743 		evcnt_detach(&txr->no_desc_avail);
    744 		evcnt_detach(&txr->total_packets);
    745 
    746 		if (i < __arraycount(adapter->stats.mpc)) {
    747 			evcnt_detach(&adapter->stats.mpc[i]);
    748 		}
    749 		if (i < __arraycount(adapter->stats.pxontxc)) {
    750 			evcnt_detach(&adapter->stats.pxontxc[i]);
    751 			evcnt_detach(&adapter->stats.pxonrxc[i]);
    752 			evcnt_detach(&adapter->stats.pxofftxc[i]);
    753 			evcnt_detach(&adapter->stats.pxoffrxc[i]);
    754 			evcnt_detach(&adapter->stats.pxon2offc[i]);
    755 		}
    756 		if (i < __arraycount(adapter->stats.qprc)) {
    757 			evcnt_detach(&adapter->stats.qprc[i]);
    758 			evcnt_detach(&adapter->stats.qptc[i]);
    759 			evcnt_detach(&adapter->stats.qbrc[i]);
    760 			evcnt_detach(&adapter->stats.qbtc[i]);
    761 			evcnt_detach(&adapter->stats.qprdc[i]);
    762 		}
    763 
    764 		evcnt_detach(&rxr->rx_packets);
    765 		evcnt_detach(&rxr->rx_bytes);
    766 		evcnt_detach(&rxr->no_jmbuf);
    767 		evcnt_detach(&rxr->rx_discarded);
    768 		evcnt_detach(&rxr->rx_split_packets);
    769 		evcnt_detach(&rxr->rx_irq);
    770 	}
    771 	evcnt_detach(&stats->ipcs);
    772 	evcnt_detach(&stats->l4cs);
    773 	evcnt_detach(&stats->ipcs_bad);
    774 	evcnt_detach(&stats->l4cs_bad);
    775 	evcnt_detach(&stats->intzero);
    776 	evcnt_detach(&stats->legint);
    777 	evcnt_detach(&stats->crcerrs);
    778 	evcnt_detach(&stats->illerrc);
    779 	evcnt_detach(&stats->errbc);
    780 	evcnt_detach(&stats->mspdc);
    781 	evcnt_detach(&stats->mlfc);
    782 	evcnt_detach(&stats->mrfc);
    783 	evcnt_detach(&stats->rlec);
    784 	evcnt_detach(&stats->lxontxc);
    785 	evcnt_detach(&stats->lxonrxc);
    786 	evcnt_detach(&stats->lxofftxc);
    787 	evcnt_detach(&stats->lxoffrxc);
    788 
    789 	/* Packet Reception Stats */
    790 	evcnt_detach(&stats->tor);
    791 	evcnt_detach(&stats->gorc);
    792 	evcnt_detach(&stats->tpr);
    793 	evcnt_detach(&stats->gprc);
    794 	evcnt_detach(&stats->mprc);
    795 	evcnt_detach(&stats->bprc);
    796 	evcnt_detach(&stats->prc64);
    797 	evcnt_detach(&stats->prc127);
    798 	evcnt_detach(&stats->prc255);
    799 	evcnt_detach(&stats->prc511);
    800 	evcnt_detach(&stats->prc1023);
    801 	evcnt_detach(&stats->prc1522);
    802 	evcnt_detach(&stats->ruc);
    803 	evcnt_detach(&stats->rfc);
    804 	evcnt_detach(&stats->roc);
    805 	evcnt_detach(&stats->rjc);
    806 	evcnt_detach(&stats->mngprc);
    807 	evcnt_detach(&stats->mngptc);
    808 	evcnt_detach(&stats->xec);
    809 
    810 	/* Packet Transmission Stats */
    811 	evcnt_detach(&stats->gotc);
    812 	evcnt_detach(&stats->tpt);
    813 	evcnt_detach(&stats->gptc);
    814 	evcnt_detach(&stats->bptc);
    815 	evcnt_detach(&stats->mptc);
    816 	evcnt_detach(&stats->mngptc);
    817 	evcnt_detach(&stats->ptc64);
    818 	evcnt_detach(&stats->ptc127);
    819 	evcnt_detach(&stats->ptc255);
    820 	evcnt_detach(&stats->ptc511);
    821 	evcnt_detach(&stats->ptc1023);
    822 	evcnt_detach(&stats->ptc1522);
    823 
    824 	/* FC Stats */
    825 	evcnt_detach(&stats->fccrc);
    826 	evcnt_detach(&stats->fclast);
    827 	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
    828 		evcnt_detach(&stats->fcoerpdc);
    829 		evcnt_detach(&stats->fcoeprc);
    830 		evcnt_detach(&stats->fcoeptc);
    831 		evcnt_detach(&stats->fcoedwrc);
    832 		evcnt_detach(&stats->fcoedwtc);
    833 	}
    834 
    835 	ixgbe_free_transmit_structures(adapter);
    836 	ixgbe_free_receive_structures(adapter);
    837 	free(adapter->mta, M_DEVBUF);
    838 
    839 	IXGBE_CORE_LOCK_DESTROY(adapter);
    840 	return (0);
    841 }
    842 
    843 /*********************************************************************
    844  *
    845  *  Shutdown entry point
    846  *
    847  **********************************************************************/
    848 
    849 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
    850 static int
    851 ixgbe_shutdown(device_t dev)
    852 {
    853 	struct adapter *adapter = device_private(dev);
    854 	IXGBE_CORE_LOCK(adapter);
    855 	ixgbe_stop(adapter);
    856 	IXGBE_CORE_UNLOCK(adapter);
    857 	return (0);
    858 }
    859 #endif
    860 
    861 
    862 /*********************************************************************
    863  *  Transmit entry point
    864  *
    865  *  ixgbe_start is called by the stack to initiate a transmit.
    866  *  The driver will remain in this routine as long as there are
    867  *  packets to transmit and transmit resources are available.
    868  *  In case resources are not available stack is notified and
    869  *  the packet is requeued.
    870  **********************************************************************/
    871 
    872 static void
    873 ixgbe_start_locked(struct tx_ring *txr, struct ifnet * ifp)
    874 {
    875 	int rc;
    876 	struct mbuf    *m_head;
    877 	struct adapter *adapter = txr->adapter;
    878 
    879 	IXGBE_TX_LOCK_ASSERT(txr);
    880 
    881 	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) !=
    882 	    IFF_RUNNING)
    883 		return;
    884 	if (!adapter->link_active)
    885 		return;
    886 
    887 	while (!IFQ_IS_EMPTY(&ifp->if_snd)) {
    888 
    889 		IFQ_POLL(&ifp->if_snd, m_head);
    890 		if (m_head == NULL)
    891 			break;
    892 
    893 		if ((rc = ixgbe_xmit(txr, m_head)) == EAGAIN) {
    894 			ifp->if_flags |= IFF_OACTIVE;
    895 			break;
    896 		}
    897 		IFQ_DEQUEUE(&ifp->if_snd, m_head);
    898 		if (rc == EFBIG) {
    899 			struct mbuf *mtmp;
    900 
    901 			if ((mtmp = m_defrag(m_head, M_DONTWAIT)) != NULL) {
    902 				m_head = mtmp;
    903 				rc = ixgbe_xmit(txr, m_head);
    904 				if (rc != 0)
    905 					adapter->efbig2_tx_dma_setup.ev_count++;
    906 			} else
    907 				adapter->m_defrag_failed.ev_count++;
    908 		}
    909 		if (rc != 0) {
    910 			m_freem(m_head);
    911 			continue;
    912 		}
    913 
    914 		/* Send a copy of the frame to the BPF listener */
    915 		bpf_mtap(ifp, m_head);
    916 
    917 		/* Set watchdog on */
    918 		getmicrotime(&txr->watchdog_time);
    919 		txr->queue_status = IXGBE_QUEUE_WORKING;
    920 
    921 	}
    922 	return;
    923 }
    924 
    925 /*
    926  * Legacy TX start - called by the stack, this
    927  * always uses the first tx ring, and should
    928  * not be used with multiqueue tx enabled.
    929  */
    930 static void
    931 ixgbe_start(struct ifnet *ifp)
    932 {
    933 	struct adapter *adapter = ifp->if_softc;
    934 	struct tx_ring	*txr = adapter->tx_rings;
    935 
    936 	if (ifp->if_flags & IFF_RUNNING) {
    937 		IXGBE_TX_LOCK(txr);
    938 		ixgbe_start_locked(txr, ifp);
    939 		IXGBE_TX_UNLOCK(txr);
    940 	}
    941 	return;
    942 }
    943 
    944 #if __FreeBSD_version >= 800000
    945 /*
    946 ** Multiqueue Transmit driver
    947 **
    948 */
    949 static int
    950 ixgbe_mq_start(struct ifnet *ifp, struct mbuf *m)
    951 {
    952 	struct adapter	*adapter = ifp->if_softc;
    953 	struct ix_queue	*que;
    954 	struct tx_ring	*txr;
    955 	int 		i = 0, err = 0;
    956 
    957 	/* Which queue to use */
    958 	if ((m->m_flags & M_FLOWID) != 0)
    959 		i = m->m_pkthdr.flowid % adapter->num_queues;
    960 
    961 	txr = &adapter->tx_rings[i];
    962 	que = &adapter->queues[i];
    963 
    964 	if (IXGBE_TX_TRYLOCK(txr)) {
    965 		err = ixgbe_mq_start_locked(ifp, txr, m);
    966 		IXGBE_TX_UNLOCK(txr);
    967 	} else {
    968 		err = drbr_enqueue(ifp, txr->br, m);
    969 		softint_schedule(que->que_si);
    970 	}
    971 
    972 	return (err);
    973 }
    974 
    975 static int
    976 ixgbe_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr, struct mbuf *m)
    977 {
    978 	struct adapter  *adapter = txr->adapter;
    979         struct mbuf     *next;
    980         int             enqueued, err = 0;
    981 
    982 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) !=
    983 	    IFF_RUNNING || adapter->link_active == 0) {
    984 		if (m != NULL)
    985 			err = drbr_enqueue(ifp, txr->br, m);
    986 		return (err);
    987 	}
    988 
    989 	enqueued = 0;
    990 	if (m == NULL) {
    991 		next = drbr_dequeue(ifp, txr->br);
    992 	} else if (drbr_needs_enqueue(ifp, txr->br)) {
    993 		if ((err = drbr_enqueue(ifp, txr->br, m)) != 0)
    994 			return (err);
    995 		next = drbr_dequeue(ifp, txr->br);
    996 	} else
    997 		next = m;
    998 
    999 	/* Process the queue */
   1000 	while (next != NULL) {
   1001 		if ((err = ixgbe_xmit(txr, &next)) != 0) {
   1002 			if (next != NULL)
   1003 				err = drbr_enqueue(ifp, txr->br, next);
   1004 			break;
   1005 		}
   1006 		enqueued++;
   1007 		drbr_stats_update(ifp, next->m_pkthdr.len, next->m_flags);
   1008 		/* Send a copy of the frame to the BPF listener */
   1009 		bpf_mtap(ifp, next);
   1010 		if ((ifp->if_flags & IFF_RUNNING) == 0)
   1011 			break;
   1012 		if (txr->tx_avail < IXGBE_TX_OP_THRESHOLD)
   1013 			ixgbe_txeof(txr);
   1014 		if (txr->tx_avail < IXGBE_TX_OP_THRESHOLD) {
   1015 			ifp->if_flags |= IFF_OACTIVE;
   1016 			break;
   1017 		}
   1018 		next = drbr_dequeue(ifp, txr->br);
   1019 	}
   1020 
   1021 	if (enqueued > 0) {
   1022 		/* Set watchdog on */
   1023 		txr->queue_status = IXGBE_QUEUE_WORKING;
   1024 		getmicrotime(&txr->watchdog_time);
   1025 	}
   1026 
   1027 	return (err);
   1028 }
   1029 
   1030 /*
   1031 ** Flush all ring buffers
   1032 */
   1033 static void
   1034 ixgbe_qflush(struct ifnet *ifp)
   1035 {
   1036 	struct adapter	*adapter = ifp->if_softc;
   1037 	struct tx_ring	*txr = adapter->tx_rings;
   1038 	struct mbuf	*m;
   1039 
   1040 	for (int i = 0; i < adapter->num_queues; i++, txr++) {
   1041 		IXGBE_TX_LOCK(txr);
   1042 		while ((m = buf_ring_dequeue_sc(txr->br)) != NULL)
   1043 			m_freem(m);
   1044 		IXGBE_TX_UNLOCK(txr);
   1045 	}
   1046 	if_qflush(ifp);
   1047 }
   1048 #endif /* __FreeBSD_version >= 800000 */
   1049 
   1050 static int
   1051 ixgbe_ifflags_cb(struct ethercom *ec)
   1052 {
   1053 	struct ifnet *ifp = &ec->ec_if;
   1054 	struct adapter *adapter = ifp->if_softc;
   1055 	int change = ifp->if_flags ^ adapter->if_flags, rc = 0;
   1056 
   1057 	IXGBE_CORE_LOCK(adapter);
   1058 
   1059 	if (change != 0)
   1060 		adapter->if_flags = ifp->if_flags;
   1061 
   1062 	if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0)
   1063 		rc = ENETRESET;
   1064 	else if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   1065 		ixgbe_set_promisc(adapter);
   1066 
   1067 	IXGBE_CORE_UNLOCK(adapter);
   1068 
   1069 	return rc;
   1070 }
   1071 
   1072 /*********************************************************************
   1073  *  Ioctl entry point
   1074  *
   1075  *  ixgbe_ioctl is called when the user wants to configure the
   1076  *  interface.
   1077  *
   1078  *  return 0 on success, positive on failure
   1079  **********************************************************************/
   1080 
   1081 static int
   1082 ixgbe_ioctl(struct ifnet * ifp, u_long command, void *data)
   1083 {
   1084 	struct adapter	*adapter = ifp->if_softc;
   1085 	struct ifcapreq *ifcr = data;
   1086 	struct ifreq	*ifr = data;
   1087 	int             error = 0;
   1088 	int l4csum_en;
   1089 	const int l4csum = IFCAP_CSUM_TCPv4_Rx|IFCAP_CSUM_UDPv4_Rx|
   1090 	     IFCAP_CSUM_TCPv6_Rx|IFCAP_CSUM_UDPv6_Rx;
   1091 
   1092 	switch (command) {
   1093 	case SIOCSIFFLAGS:
   1094 		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
   1095 		break;
   1096 	case SIOCADDMULTI:
   1097 	case SIOCDELMULTI:
   1098 		IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
   1099 		break;
   1100 	case SIOCSIFMEDIA:
   1101 	case SIOCGIFMEDIA:
   1102 		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
   1103 		break;
   1104 	case SIOCSIFCAP:
   1105 		IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
   1106 		break;
   1107 	case SIOCSIFMTU:
   1108 		IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
   1109 		break;
   1110 	default:
   1111 		IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
   1112 		break;
   1113 	}
   1114 
   1115 	switch (command) {
   1116 	case SIOCSIFMEDIA:
   1117 	case SIOCGIFMEDIA:
   1118 		return ifmedia_ioctl(ifp, ifr, &adapter->media, command);
   1119 	case SIOCSIFCAP:
   1120 		/* Layer-4 Rx checksum offload has to be turned on and
   1121 		 * off as a unit.
   1122 		 */
   1123 		l4csum_en = ifcr->ifcr_capenable & l4csum;
   1124 		if (l4csum_en != l4csum && l4csum_en != 0)
   1125 			return EINVAL;
   1126 		/*FALLTHROUGH*/
   1127 	case SIOCADDMULTI:
   1128 	case SIOCDELMULTI:
   1129 	case SIOCSIFFLAGS:
   1130 	case SIOCSIFMTU:
   1131 	default:
   1132 		if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
   1133 			return error;
   1134 		if ((ifp->if_flags & IFF_RUNNING) == 0)
   1135 			;
   1136 		else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
   1137 			IXGBE_CORE_LOCK(adapter);
   1138 			ixgbe_init_locked(adapter);
   1139 			IXGBE_CORE_UNLOCK(adapter);
   1140 		} else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
   1141 			/*
   1142 			 * Multicast list has changed; set the hardware filter
   1143 			 * accordingly.
   1144 			 */
   1145 			IXGBE_CORE_LOCK(adapter);
   1146 			ixgbe_disable_intr(adapter);
   1147 			ixgbe_set_multi(adapter);
   1148 			ixgbe_enable_intr(adapter);
   1149 			IXGBE_CORE_UNLOCK(adapter);
   1150 		}
   1151 		return 0;
   1152 	}
   1153 }
   1154 
   1155 /*********************************************************************
   1156  *  Init entry point
   1157  *
   1158  *  This routine is used in two ways. It is used by the stack as
   1159  *  init entry point in network interface structure. It is also used
   1160  *  by the driver as a hw/sw initialization routine to get to a
   1161  *  consistent state.
   1162  *
   1163  *  return 0 on success, positive on failure
   1164  **********************************************************************/
   1165 #define IXGBE_MHADD_MFS_SHIFT 16
   1166 
   1167 static void
   1168 ixgbe_init_locked(struct adapter *adapter)
   1169 {
   1170 	struct ifnet   *ifp = adapter->ifp;
   1171 	device_t 	dev = adapter->dev;
   1172 	struct ixgbe_hw *hw = &adapter->hw;
   1173 	u32		k, txdctl, mhadd, gpie;
   1174 	u32		rxdctl, rxctrl;
   1175 
   1176 	/* XXX check IFF_UP and IFF_RUNNING, power-saving state! */
   1177 
   1178 	KASSERT(mutex_owned(&adapter->core_mtx));
   1179 	INIT_DEBUGOUT("ixgbe_init: begin");
   1180 	hw->adapter_stopped = FALSE;
   1181 	ixgbe_stop_adapter(hw);
   1182         callout_stop(&adapter->timer);
   1183 
   1184 	/* XXX I moved this here from the SIOCSIFMTU case in ixgbe_ioctl(). */
   1185 	adapter->max_frame_size =
   1186 		ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
   1187 
   1188         /* reprogram the RAR[0] in case user changed it. */
   1189         ixgbe_set_rar(hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
   1190 
   1191 	/* Get the latest mac address, User can use a LAA */
   1192 	memcpy(hw->mac.addr, CLLADDR(adapter->ifp->if_sadl),
   1193 	    IXGBE_ETH_LENGTH_OF_ADDRESS);
   1194 	ixgbe_set_rar(hw, 0, hw->mac.addr, 0, 1);
   1195 	hw->addr_ctrl.rar_used_count = 1;
   1196 
   1197 	/* Prepare transmit descriptors and buffers */
   1198 	if (ixgbe_setup_transmit_structures(adapter)) {
   1199 		device_printf(dev,"Could not setup transmit structures\n");
   1200 		ixgbe_stop(adapter);
   1201 		return;
   1202 	}
   1203 
   1204 	ixgbe_init_hw(hw);
   1205 	ixgbe_initialize_transmit_units(adapter);
   1206 
   1207 	/* Setup Multicast table */
   1208 	ixgbe_set_multi(adapter);
   1209 
   1210 	/*
   1211 	** Determine the correct mbuf pool
   1212 	** for doing jumbo/headersplit
   1213 	*/
   1214 	if (adapter->max_frame_size <= 2048)
   1215 		adapter->rx_mbuf_sz = MCLBYTES;
   1216 	else if (adapter->max_frame_size <= 4096)
   1217 		adapter->rx_mbuf_sz = MJUMPAGESIZE;
   1218 	else if (adapter->max_frame_size <= 9216)
   1219 		adapter->rx_mbuf_sz = MJUM9BYTES;
   1220 	else
   1221 		adapter->rx_mbuf_sz = MJUM16BYTES;
   1222 
   1223 	/* Prepare receive descriptors and buffers */
   1224 	if (ixgbe_setup_receive_structures(adapter)) {
   1225 		device_printf(dev,"Could not setup receive structures\n");
   1226 		ixgbe_stop(adapter);
   1227 		return;
   1228 	}
   1229 
   1230 	/* Configure RX settings */
   1231 	ixgbe_initialize_receive_units(adapter);
   1232 
   1233 	gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE);
   1234 
   1235 	/* Enable Fan Failure Interrupt */
   1236 	gpie |= IXGBE_SDP1_GPIEN;
   1237 
   1238 	/* Add for Thermal detection */
   1239 	if (hw->mac.type == ixgbe_mac_82599EB)
   1240 		gpie |= IXGBE_SDP2_GPIEN;
   1241 
   1242 	if (adapter->msix > 1) {
   1243 		/* Enable Enhanced MSIX mode */
   1244 		gpie |= IXGBE_GPIE_MSIX_MODE;
   1245 		gpie |= IXGBE_GPIE_EIAME | IXGBE_GPIE_PBA_SUPPORT |
   1246 		    IXGBE_GPIE_OCD;
   1247 	}
   1248 	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
   1249 
   1250 	/* Set MTU size */
   1251 	if (ifp->if_mtu > ETHERMTU) {
   1252 		mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
   1253 		mhadd &= ~IXGBE_MHADD_MFS_MASK;
   1254 		mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
   1255 		IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
   1256 	}
   1257 
   1258 	/* Now enable all the queues */
   1259 
   1260 	for (int i = 0; i < adapter->num_queues; i++) {
   1261 		txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
   1262 		txdctl |= IXGBE_TXDCTL_ENABLE;
   1263 		/* Set WTHRESH to 8, burst writeback */
   1264 		txdctl |= (8 << 16);
   1265 		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), txdctl);
   1266 	}
   1267 
   1268 	for (int i = 0; i < adapter->num_queues; i++) {
   1269 		rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
   1270 		if (hw->mac.type == ixgbe_mac_82598EB) {
   1271 			/*
   1272 			** PTHRESH = 21
   1273 			** HTHRESH = 4
   1274 			** WTHRESH = 8
   1275 			*/
   1276 			rxdctl &= ~0x3FFFFF;
   1277 			rxdctl |= 0x080420;
   1278 		}
   1279 		rxdctl |= IXGBE_RXDCTL_ENABLE;
   1280 		IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), rxdctl);
   1281 		/* XXX I don't trust this loop, and I don't trust the
   1282 		 * XXX memory barrier.  What is this meant to do? --dyoung
   1283 		 */
   1284 		for (k = 0; k < 10; k++) {
   1285 			if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)) &
   1286 			    IXGBE_RXDCTL_ENABLE)
   1287 				break;
   1288 			else
   1289 				msec_delay(1);
   1290 		}
   1291 		wmb();
   1292 		IXGBE_WRITE_REG(hw, IXGBE_RDT(i), adapter->num_rx_desc - 1);
   1293 	}
   1294 
   1295 	/* Set up VLAN support and filter */
   1296 	ixgbe_setup_vlan_hw_support(adapter);
   1297 
   1298 	/* Enable Receive engine */
   1299 	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
   1300 	if (hw->mac.type == ixgbe_mac_82598EB)
   1301 		rxctrl |= IXGBE_RXCTRL_DMBYPS;
   1302 	rxctrl |= IXGBE_RXCTRL_RXEN;
   1303 	ixgbe_enable_rx_dma(hw, rxctrl);
   1304 
   1305 	callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
   1306 
   1307 	/* Set up MSI/X routing */
   1308 	if (ixgbe_enable_msix)  {
   1309 		ixgbe_configure_ivars(adapter);
   1310 		/* Set up auto-mask */
   1311 		if (hw->mac.type == ixgbe_mac_82598EB)
   1312 			IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
   1313 		else {
   1314 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
   1315 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
   1316 		}
   1317 	} else {  /* Simple settings for Legacy/MSI */
   1318                 ixgbe_set_ivar(adapter, 0, 0, 0);
   1319                 ixgbe_set_ivar(adapter, 0, 0, 1);
   1320 		IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
   1321 	}
   1322 
   1323 #ifdef IXGBE_FDIR
   1324 	/* Init Flow director */
   1325 	if (hw->mac.type != ixgbe_mac_82598EB)
   1326 		ixgbe_init_fdir_signature_82599(&adapter->hw, fdir_pballoc);
   1327 #endif
   1328 
   1329 	/*
   1330 	** Check on any SFP devices that
   1331 	** need to be kick-started
   1332 	*/
   1333 	if (hw->phy.type == ixgbe_phy_none) {
   1334 		int err = hw->phy.ops.identify(hw);
   1335 		if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
   1336                 	device_printf(dev,
   1337 			    "Unsupported SFP+ module type was detected.\n");
   1338 			return;
   1339         	}
   1340 	}
   1341 
   1342 	/* Set moderation on the Link interrupt */
   1343 	IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->linkvec), IXGBE_LINK_ITR);
   1344 
   1345 	/* Config/Enable Link */
   1346 	ixgbe_config_link(adapter);
   1347 
   1348 	/* And now turn on interrupts */
   1349 	ixgbe_enable_intr(adapter);
   1350 
   1351 	/* Now inform the stack we're ready */
   1352 	ifp->if_flags |= IFF_RUNNING;
   1353 	ifp->if_flags &= ~IFF_OACTIVE;
   1354 
   1355 	return;
   1356 }
   1357 
   1358 static int
   1359 ixgbe_init(struct ifnet *ifp)
   1360 {
   1361 	struct adapter *adapter = ifp->if_softc;
   1362 
   1363 	IXGBE_CORE_LOCK(adapter);
   1364 	ixgbe_init_locked(adapter);
   1365 	IXGBE_CORE_UNLOCK(adapter);
   1366 	return 0;	/* XXX ixgbe_init_locked cannot fail?  really? */
   1367 }
   1368 
   1369 
   1370 /*
   1371 **
   1372 ** MSIX Interrupt Handlers and Tasklets
   1373 **
   1374 */
   1375 
   1376 static inline void
   1377 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
   1378 {
   1379 	struct ixgbe_hw *hw = &adapter->hw;
   1380 	u64	queue = (u64)(1 << vector);
   1381 	u32	mask;
   1382 
   1383 	if (hw->mac.type == ixgbe_mac_82598EB) {
   1384                 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
   1385                 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
   1386 	} else {
   1387                 mask = (queue & 0xFFFFFFFF);
   1388                 if (mask)
   1389                         IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
   1390                 mask = (queue >> 32);
   1391                 if (mask)
   1392                         IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
   1393 	}
   1394 }
   1395 
   1396 static inline void
   1397 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
   1398 {
   1399 	struct ixgbe_hw *hw = &adapter->hw;
   1400 	u64	queue = (u64)(1 << vector);
   1401 	u32	mask;
   1402 
   1403 	if (hw->mac.type == ixgbe_mac_82598EB) {
   1404                 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
   1405                 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
   1406 	} else {
   1407                 mask = (queue & 0xFFFFFFFF);
   1408                 if (mask)
   1409                         IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
   1410                 mask = (queue >> 32);
   1411                 if (mask)
   1412                         IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
   1413 	}
   1414 }
   1415 
   1416 static inline void
   1417 ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
   1418 {
   1419 	u32 mask;
   1420 
   1421 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
   1422 		mask = (IXGBE_EIMS_RTX_QUEUE & queues);
   1423 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
   1424 	} else {
   1425 		mask = (queues & 0xFFFFFFFF);
   1426 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
   1427 		mask = (queues >> 32);
   1428 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
   1429 	}
   1430 }
   1431 
   1432 
   1433 static void
   1434 ixgbe_handle_que(void *context)
   1435 {
   1436 	struct ix_queue *que = context;
   1437 	struct adapter  *adapter = que->adapter;
   1438 	struct tx_ring  *txr = que->txr;
   1439 	struct ifnet    *ifp = adapter->ifp;
   1440 	bool		more;
   1441 
   1442 	adapter->handleq.ev_count++;
   1443 
   1444 	if (ifp->if_flags & IFF_RUNNING) {
   1445 		more = ixgbe_rxeof(que, adapter->rx_process_limit);
   1446 		IXGBE_TX_LOCK(txr);
   1447 		ixgbe_txeof(txr);
   1448 #if __FreeBSD_version >= 800000
   1449 		if (!drbr_empty(ifp, txr->br))
   1450 			ixgbe_mq_start_locked(ifp, txr, NULL);
   1451 #else
   1452 		if (!IFQ_IS_EMPTY(&ifp->if_snd))
   1453 			ixgbe_start_locked(txr, ifp);
   1454 #endif
   1455 		IXGBE_TX_UNLOCK(txr);
   1456 		if (more) {
   1457 			adapter->req.ev_count++;
   1458 			softint_schedule(que->que_si);
   1459 			return;
   1460 		}
   1461 	}
   1462 
   1463 	/* Reenable this interrupt */
   1464 	ixgbe_enable_queue(adapter, que->msix);
   1465 
   1466 	return;
   1467 }
   1468 
   1469 
   1470 /*********************************************************************
   1471  *
   1472  *  Legacy Interrupt Service routine
   1473  *
   1474  **********************************************************************/
   1475 
   1476 static int
   1477 ixgbe_legacy_irq(void *arg)
   1478 {
   1479 	struct ix_queue *que = arg;
   1480 	struct adapter	*adapter = que->adapter;
   1481 	struct ixgbe_hw	*hw = &adapter->hw;
   1482 	struct 		tx_ring *txr = adapter->tx_rings;
   1483 	bool		more_tx, more_rx;
   1484 	u32       	reg_eicr, loop = MAX_LOOP;
   1485 
   1486 
   1487 	reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
   1488 
   1489 	adapter->stats.legint.ev_count++;
   1490 	++que->irqs;
   1491 	if (reg_eicr == 0) {
   1492 		adapter->stats.intzero.ev_count++;
   1493 		ixgbe_enable_intr(adapter);
   1494 		return 0;
   1495 	}
   1496 
   1497 	more_rx = ixgbe_rxeof(que, adapter->rx_process_limit);
   1498 
   1499 	IXGBE_TX_LOCK(txr);
   1500 	do {
   1501 		adapter->txloops.ev_count++;
   1502 		more_tx = ixgbe_txeof(txr);
   1503 	} while (loop-- && more_tx);
   1504 	IXGBE_TX_UNLOCK(txr);
   1505 
   1506 	if (more_rx || more_tx) {
   1507 		if (more_rx)
   1508 			adapter->morerx.ev_count++;
   1509 		if (more_tx)
   1510 			adapter->moretx.ev_count++;
   1511 		softint_schedule(que->que_si);
   1512 	}
   1513 
   1514 	/* Check for fan failure */
   1515 	if ((hw->phy.media_type == ixgbe_media_type_copper) &&
   1516 	    (reg_eicr & IXGBE_EICR_GPI_SDP1)) {
   1517                 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! "
   1518 		    "REPLACE IMMEDIATELY!!\n");
   1519 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1);
   1520 	}
   1521 
   1522 	/* Link status change */
   1523 	if (reg_eicr & IXGBE_EICR_LSC)
   1524 		softint_schedule(adapter->link_si);
   1525 
   1526 	ixgbe_enable_intr(adapter);
   1527 	return 1;
   1528 }
   1529 
   1530 
   1531 #if defined(NETBSD_MSI_OR_MSIX)
   1532 /*********************************************************************
   1533  *
   1534  *  MSI Queue Interrupt Service routine
   1535  *
   1536  **********************************************************************/
   1537 void
   1538 ixgbe_msix_que(void *arg)
   1539 {
   1540 	struct ix_queue	*que = arg;
   1541 	struct adapter  *adapter = que->adapter;
   1542 	struct tx_ring	*txr = que->txr;
   1543 	struct rx_ring	*rxr = que->rxr;
   1544 	bool		more_tx, more_rx;
   1545 	u32		newitr = 0;
   1546 
   1547 	++que->irqs;
   1548 
   1549 	more_rx = ixgbe_rxeof(que, adapter->rx_process_limit);
   1550 
   1551 	IXGBE_TX_LOCK(txr);
   1552 	more_tx = ixgbe_txeof(txr);
   1553 	IXGBE_TX_UNLOCK(txr);
   1554 
   1555 	/* Do AIM now? */
   1556 
   1557 	if (ixgbe_enable_aim == FALSE)
   1558 		goto no_calc;
   1559 	/*
   1560 	** Do Adaptive Interrupt Moderation:
   1561         **  - Write out last calculated setting
   1562 	**  - Calculate based on average size over
   1563 	**    the last interval.
   1564 	*/
   1565         if (que->eitr_setting)
   1566                 IXGBE_WRITE_REG(&adapter->hw,
   1567                     IXGBE_EITR(que->msix), que->eitr_setting);
   1568 
   1569         que->eitr_setting = 0;
   1570 
   1571         /* Idle, do nothing */
   1572         if ((txr->bytes == 0) && (rxr->bytes == 0))
   1573                 goto no_calc;
   1574 
   1575 	if ((txr->bytes) && (txr->packets))
   1576                	newitr = txr->bytes/txr->packets;
   1577 	if ((rxr->bytes) && (rxr->packets))
   1578 		newitr = max(newitr,
   1579 		    (rxr->bytes / rxr->packets));
   1580 	newitr += 24; /* account for hardware frame, crc */
   1581 
   1582 	/* set an upper boundary */
   1583 	newitr = min(newitr, 3000);
   1584 
   1585 	/* Be nice to the mid range */
   1586 	if ((newitr > 300) && (newitr < 1200))
   1587 		newitr = (newitr / 3);
   1588 	else
   1589 		newitr = (newitr / 2);
   1590 
   1591         if (adapter->hw.mac.type == ixgbe_mac_82598EB)
   1592                 newitr |= newitr << 16;
   1593         else
   1594                 newitr |= IXGBE_EITR_CNT_WDIS;
   1595 
   1596         /* save for next interrupt */
   1597         que->eitr_setting = newitr;
   1598 
   1599         /* Reset state */
   1600         txr->bytes = 0;
   1601         txr->packets = 0;
   1602         rxr->bytes = 0;
   1603         rxr->packets = 0;
   1604 
   1605 no_calc:
   1606 	if (more_tx || more_rx)
   1607 		softint_schedule(que->que_si);
   1608 	else /* Reenable this interrupt */
   1609 		ixgbe_enable_queue(adapter, que->msix);
   1610 	return;
   1611 }
   1612 
   1613 
   1614 static void
   1615 ixgbe_msix_link(void *arg)
   1616 {
   1617 	struct adapter	*adapter = arg;
   1618 	struct ixgbe_hw *hw = &adapter->hw;
   1619 	u32		reg_eicr;
   1620 
   1621 	++adapter->link_irq.ev_count;
   1622 
   1623 	/* First get the cause */
   1624 	reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
   1625 	/* Clear interrupt with write */
   1626 	IXGBE_WRITE_REG(hw, IXGBE_EICR, reg_eicr);
   1627 
   1628 	/* Link status change */
   1629 	if (reg_eicr & IXGBE_EICR_LSC)
   1630 		softint_schedule(adapter->link_si);
   1631 
   1632 	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
   1633 #ifdef IXGBE_FDIR
   1634 		if (reg_eicr & IXGBE_EICR_FLOW_DIR) {
   1635 			/* This is probably overkill :) */
   1636 			if (!atomic_cmpset_int(&adapter->fdir_reinit, 0, 1))
   1637 				return;
   1638                 	/* Clear the interrupt */
   1639 			IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_FLOW_DIR);
   1640 			/* Turn off the interface */
   1641 			adapter->ifp->if_flags &= ~IFF_RUNNING;
   1642 			softint_schedule(adapter->fdir_si);
   1643 		} else
   1644 #endif
   1645 		if (reg_eicr & IXGBE_EICR_ECC) {
   1646                 	device_printf(adapter->dev, "\nCRITICAL: ECC ERROR!! "
   1647 			    "Please Reboot!!\n");
   1648 			IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
   1649 		} else
   1650 
   1651 		if (reg_eicr & IXGBE_EICR_GPI_SDP1) {
   1652                 	/* Clear the interrupt */
   1653                 	IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
   1654 			softint_schedule(adapter->msf_si);
   1655         	} else if (reg_eicr & IXGBE_EICR_GPI_SDP2) {
   1656                 	/* Clear the interrupt */
   1657                 	IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2);
   1658 			softint_schedule(adapter->mod_si);
   1659 		}
   1660         }
   1661 
   1662 	/* Check for fan failure */
   1663 	if ((hw->device_id == IXGBE_DEV_ID_82598AT) &&
   1664 	    (reg_eicr & IXGBE_EICR_GPI_SDP1)) {
   1665                 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! "
   1666 		    "REPLACE IMMEDIATELY!!\n");
   1667 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
   1668 	}
   1669 
   1670 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
   1671 	return;
   1672 }
   1673 #endif
   1674 
   1675 /*********************************************************************
   1676  *
   1677  *  Media Ioctl callback
   1678  *
   1679  *  This routine is called whenever the user queries the status of
   1680  *  the interface using ifconfig.
   1681  *
   1682  **********************************************************************/
   1683 static void
   1684 ixgbe_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
   1685 {
   1686 	struct adapter *adapter = ifp->if_softc;
   1687 
   1688 	INIT_DEBUGOUT("ixgbe_media_status: begin");
   1689 	IXGBE_CORE_LOCK(adapter);
   1690 	ixgbe_update_link_status(adapter);
   1691 
   1692 	ifmr->ifm_status = IFM_AVALID;
   1693 	ifmr->ifm_active = IFM_ETHER;
   1694 
   1695 	if (!adapter->link_active) {
   1696 		IXGBE_CORE_UNLOCK(adapter);
   1697 		return;
   1698 	}
   1699 
   1700 	ifmr->ifm_status |= IFM_ACTIVE;
   1701 
   1702 	switch (adapter->link_speed) {
   1703 		case IXGBE_LINK_SPEED_1GB_FULL:
   1704 			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
   1705 			break;
   1706 		case IXGBE_LINK_SPEED_10GB_FULL:
   1707 			ifmr->ifm_active |= adapter->optics | IFM_FDX;
   1708 			break;
   1709 	}
   1710 
   1711 	IXGBE_CORE_UNLOCK(adapter);
   1712 
   1713 	return;
   1714 }
   1715 
   1716 /*********************************************************************
   1717  *
   1718  *  Media Ioctl callback
   1719  *
   1720  *  This routine is called when the user changes speed/duplex using
   1721  *  media/mediopt option with ifconfig.
   1722  *
   1723  **********************************************************************/
   1724 static int
   1725 ixgbe_media_change(struct ifnet * ifp)
   1726 {
   1727 	struct adapter *adapter = ifp->if_softc;
   1728 	struct ifmedia *ifm = &adapter->media;
   1729 
   1730 	INIT_DEBUGOUT("ixgbe_media_change: begin");
   1731 
   1732 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
   1733 		return (EINVAL);
   1734 
   1735         switch (IFM_SUBTYPE(ifm->ifm_media)) {
   1736         case IFM_AUTO:
   1737                 adapter->hw.phy.autoneg_advertised =
   1738 		    IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_10GB_FULL;
   1739                 break;
   1740         default:
   1741                 device_printf(adapter->dev, "Only auto media type\n");
   1742 		return (EINVAL);
   1743         }
   1744 
   1745 	return (0);
   1746 }
   1747 
   1748 /*********************************************************************
   1749  *
   1750  *  This routine maps the mbufs to tx descriptors, allowing the
   1751  *  TX engine to transmit the packets.
   1752  *  	- return 0 on success, positive on failure
   1753  *
   1754  **********************************************************************/
   1755 
   1756 static int
   1757 ixgbe_xmit(struct tx_ring *txr, struct mbuf *m_head)
   1758 {
   1759 	struct m_tag *mtag;
   1760 	struct adapter  *adapter = txr->adapter;
   1761 	struct ethercom *ec = &adapter->osdep.ec;
   1762 	u32		olinfo_status = 0, cmd_type_len;
   1763 	u32		paylen = 0;
   1764 	int             i, j, error;
   1765 	int		first, last = 0;
   1766 	bus_dmamap_t	map;
   1767 	struct ixgbe_tx_buf *txbuf, *txbuf_mapped;
   1768 	union ixgbe_adv_tx_desc *txd = NULL;
   1769 
   1770 	/* Basic descriptor defines */
   1771         cmd_type_len = (IXGBE_ADVTXD_DTYP_DATA |
   1772 	    IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT);
   1773 
   1774 	if ((mtag = VLAN_OUTPUT_TAG(ec, m_head)) != NULL)
   1775         	cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
   1776 
   1777         /*
   1778          * Important to capture the first descriptor
   1779          * used because it will contain the index of
   1780          * the one we tell the hardware to report back
   1781          */
   1782         first = txr->next_avail_desc;
   1783 	txbuf = &txr->tx_buffers[first];
   1784 	txbuf_mapped = txbuf;
   1785 	map = txbuf->map;
   1786 
   1787 	/*
   1788 	 * Map the packet for DMA.
   1789 	 */
   1790 	error = bus_dmamap_load_mbuf(txr->txtag->dt_dmat, map,
   1791 	    m_head, BUS_DMA_NOWAIT);
   1792 
   1793 	switch (error) {
   1794 	case EAGAIN:
   1795 		adapter->eagain_tx_dma_setup.ev_count++;
   1796 		return EAGAIN;
   1797 	case ENOMEM:
   1798 		adapter->enomem_tx_dma_setup.ev_count++;
   1799 		return EAGAIN;
   1800 	case EFBIG:
   1801 		adapter->efbig_tx_dma_setup.ev_count++;
   1802 		return error;
   1803 	case EINVAL:
   1804 		adapter->einval_tx_dma_setup.ev_count++;
   1805 		return error;
   1806 	default:
   1807 		adapter->other_tx_dma_setup.ev_count++;
   1808 		return error;
   1809 	case 0:
   1810 		break;
   1811 	}
   1812 
   1813 	/* Make certain there are enough descriptors */
   1814 	if (map->dm_nsegs > txr->tx_avail - 2) {
   1815 		txr->no_desc_avail.ev_count++;
   1816 		ixgbe_dmamap_unload(txr->txtag, txbuf->map);
   1817 		return EAGAIN;
   1818 	}
   1819 
   1820 	/*
   1821 	** Set up the appropriate offload context
   1822 	** this becomes the first descriptor of
   1823 	** a packet.
   1824 	*/
   1825 	if (m_head->m_pkthdr.csum_flags & (M_CSUM_TSOv4|M_CSUM_TSOv6)) {
   1826 		if (ixgbe_tso_setup(txr, m_head, &paylen)) {
   1827 			cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
   1828 			olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
   1829 			olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
   1830 			olinfo_status |= paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
   1831 			++adapter->tso_tx.ev_count;
   1832 		} else {
   1833 			++adapter->tso_err.ev_count;
   1834 			/* XXX unload DMA map! --dyoung */
   1835 			return ENXIO;
   1836 		}
   1837 	} else
   1838 		olinfo_status |= ixgbe_tx_ctx_setup(txr, m_head);
   1839 
   1840 #ifdef IXGBE_IEEE1588
   1841         /* This is changing soon to an mtag detection */
   1842         if (we detect this mbuf has a TSTAMP mtag)
   1843                 cmd_type_len |= IXGBE_ADVTXD_MAC_TSTAMP;
   1844 #endif
   1845 
   1846 #ifdef IXGBE_FDIR
   1847 	/* Do the flow director magic */
   1848 	if ((txr->atr_sample) && (!adapter->fdir_reinit)) {
   1849 		++txr->atr_count;
   1850 		if (txr->atr_count >= atr_sample_rate) {
   1851 			ixgbe_atr(txr, m_head);
   1852 			txr->atr_count = 0;
   1853 		}
   1854 	}
   1855 #endif
   1856         /* Record payload length */
   1857 	if (paylen == 0)
   1858         	olinfo_status |= m_head->m_pkthdr.len <<
   1859 		    IXGBE_ADVTXD_PAYLEN_SHIFT;
   1860 
   1861 	i = txr->next_avail_desc;
   1862 	for (j = 0; j < map->dm_nsegs; j++) {
   1863 		bus_size_t seglen;
   1864 		bus_addr_t segaddr;
   1865 
   1866 		txbuf = &txr->tx_buffers[i];
   1867 		txd = &txr->tx_base[i];
   1868 		seglen = map->dm_segs[j].ds_len;
   1869 		segaddr = htole64(map->dm_segs[j].ds_addr);
   1870 
   1871 		txd->read.buffer_addr = segaddr;
   1872 		txd->read.cmd_type_len = htole32(txr->txd_cmd |
   1873 		    cmd_type_len |seglen);
   1874 		txd->read.olinfo_status = htole32(olinfo_status);
   1875 		last = i; /* descriptor that will get completion IRQ */
   1876 
   1877 		if (++i == adapter->num_tx_desc)
   1878 			i = 0;
   1879 
   1880 		txbuf->m_head = NULL;
   1881 		txbuf->eop_index = -1;
   1882 	}
   1883 
   1884 	txd->read.cmd_type_len |=
   1885 	    htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS);
   1886 	txr->tx_avail -= map->dm_nsegs;
   1887 	txr->next_avail_desc = i;
   1888 
   1889 	txbuf->m_head = m_head;
   1890 	/* We exchange the maps instead of copying because otherwise
   1891 	 * we end up with many pointers to the same map and we free
   1892 	 * one map twice in ixgbe_free_transmit_structures().  Who
   1893 	 * knows what other problems this caused.  --dyoung
   1894 	 */
   1895 	txr->tx_buffers[first].map = txbuf->map;
   1896 	txbuf->map = map;
   1897 	bus_dmamap_sync(txr->txtag->dt_dmat, map, 0, m_head->m_pkthdr.len,
   1898 	    BUS_DMASYNC_PREWRITE);
   1899 
   1900         /* Set the index of the descriptor that will be marked done */
   1901         txbuf = &txr->tx_buffers[first];
   1902 	txbuf->eop_index = last;
   1903 
   1904         ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
   1905 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1906 	/*
   1907 	 * Advance the Transmit Descriptor Tail (Tdt), this tells the
   1908 	 * hardware that this frame is available to transmit.
   1909 	 */
   1910 	++txr->total_packets.ev_count;
   1911 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDT(txr->me), i);
   1912 
   1913 	return 0;
   1914 }
   1915 
   1916 static void
   1917 ixgbe_set_promisc(struct adapter *adapter)
   1918 {
   1919 	u_int32_t       reg_rctl;
   1920 	struct ifnet   *ifp = adapter->ifp;
   1921 
   1922 	reg_rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
   1923 	reg_rctl &= (~IXGBE_FCTRL_UPE);
   1924 	reg_rctl &= (~IXGBE_FCTRL_MPE);
   1925 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
   1926 
   1927 	if (ifp->if_flags & IFF_PROMISC) {
   1928 		reg_rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
   1929 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
   1930 	} else if (ifp->if_flags & IFF_ALLMULTI) {
   1931 		reg_rctl |= IXGBE_FCTRL_MPE;
   1932 		reg_rctl &= ~IXGBE_FCTRL_UPE;
   1933 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
   1934 	}
   1935 	return;
   1936 }
   1937 
   1938 
   1939 /*********************************************************************
   1940  *  Multicast Update
   1941  *
   1942  *  This routine is called whenever multicast address list is updated.
   1943  *
   1944  **********************************************************************/
   1945 #define IXGBE_RAR_ENTRIES 16
   1946 
   1947 static void
   1948 ixgbe_set_multi(struct adapter *adapter)
   1949 {
   1950 	struct ether_multi *enm;
   1951 	struct ether_multistep step;
   1952 	u32	fctrl;
   1953 	u8	*mta;
   1954 	u8	*update_ptr;
   1955 	int	mcnt = 0;
   1956 	struct ethercom *ec = &adapter->osdep.ec;
   1957 	struct ifnet   *ifp = adapter->ifp;
   1958 
   1959 	IOCTL_DEBUGOUT("ixgbe_set_multi: begin");
   1960 
   1961 	mta = adapter->mta;
   1962 	bzero(mta, sizeof(u8) * IXGBE_ETH_LENGTH_OF_ADDRESS *
   1963 	    MAX_NUM_MULTICAST_ADDRESSES);
   1964 
   1965 	fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
   1966 	fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
   1967 	if (ifp->if_flags & IFF_PROMISC)
   1968 		fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
   1969 	else if (ifp->if_flags & IFF_ALLMULTI) {
   1970 		fctrl |= IXGBE_FCTRL_MPE;
   1971 		fctrl &= ~IXGBE_FCTRL_UPE;
   1972 	} else
   1973 		fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
   1974 
   1975 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
   1976 
   1977 	ETHER_FIRST_MULTI(step, ec, enm);
   1978 	while (enm != NULL) {
   1979 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
   1980 		           ETHER_ADDR_LEN) != 0) {
   1981 			fctrl |= IXGBE_FCTRL_MPE;
   1982 			IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
   1983 			break;
   1984 		}
   1985 		bcopy(enm->enm_addrlo,
   1986 		    &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
   1987 		    IXGBE_ETH_LENGTH_OF_ADDRESS);
   1988 		mcnt++;
   1989 		ETHER_NEXT_MULTI(step, enm);
   1990 	}
   1991 
   1992 	update_ptr = mta;
   1993 	ixgbe_update_mc_addr_list(&adapter->hw,
   1994 	    update_ptr, mcnt, ixgbe_mc_array_itr);
   1995 
   1996 	return;
   1997 }
   1998 
   1999 /*
   2000  * This is an iterator function now needed by the multicast
   2001  * shared code. It simply feeds the shared code routine the
   2002  * addresses in the array of ixgbe_set_multi() one by one.
   2003  */
   2004 static u8 *
   2005 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
   2006 {
   2007 	u8 *addr = *update_ptr;
   2008 	u8 *newptr;
   2009 	*vmdq = 0;
   2010 
   2011 	newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
   2012 	*update_ptr = newptr;
   2013 	return addr;
   2014 }
   2015 
   2016 
   2017 /*********************************************************************
   2018  *  Timer routine
   2019  *
   2020  *  This routine checks for link status,updates statistics,
   2021  *  and runs the watchdog check.
   2022  *
   2023  **********************************************************************/
   2024 
   2025 static void
   2026 ixgbe_local_timer1(void *arg)
   2027 {
   2028 	struct adapter *adapter = arg;
   2029 	device_t	dev = adapter->dev;
   2030 	struct tx_ring *txr = adapter->tx_rings;
   2031 
   2032 	KASSERT(mutex_owned(&adapter->core_mtx));
   2033 
   2034 	/* Check for pluggable optics */
   2035 	if (adapter->sfp_probe)
   2036 		if (!ixgbe_sfp_probe(adapter))
   2037 			goto out; /* Nothing to do */
   2038 
   2039 	ixgbe_update_link_status(adapter);
   2040 	ixgbe_update_stats_counters(adapter);
   2041 
   2042 	/*
   2043 	 * If the interface has been paused
   2044 	 * then don't do the watchdog check
   2045 	 */
   2046 	if (IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF)
   2047 		goto out;
   2048 
   2049 	/*
   2050 	** Check status on the TX queues for a hang
   2051 	*/
   2052         for (int i = 0; i < adapter->num_queues; i++, txr++)
   2053 		if (txr->queue_status == IXGBE_QUEUE_HUNG)
   2054 			goto hung;
   2055 
   2056 out:
   2057 	ixgbe_rearm_queues(adapter, adapter->que_mask);
   2058 	callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
   2059 	return;
   2060 
   2061 hung:
   2062 	device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
   2063 	device_printf(dev,"Queue(%d) tdh = %d, hw tdt = %d\n", txr->me,
   2064 	    IXGBE_READ_REG(&adapter->hw, IXGBE_TDH(txr->me)),
   2065 	    IXGBE_READ_REG(&adapter->hw, IXGBE_TDT(txr->me)));
   2066 	device_printf(dev,"TX(%d) desc avail = %d,"
   2067 	    "Next TX to Clean = %d\n",
   2068 	    txr->me, txr->tx_avail, txr->next_to_clean);
   2069 	adapter->ifp->if_flags &= ~IFF_RUNNING;
   2070 	adapter->watchdog_events.ev_count++;
   2071 	ixgbe_init_locked(adapter);
   2072 }
   2073 
   2074 static void
   2075 ixgbe_local_timer(void *arg)
   2076 {
   2077 	struct adapter *adapter = arg;
   2078 
   2079 	IXGBE_CORE_LOCK(adapter);
   2080 	ixgbe_local_timer1(adapter);
   2081 	IXGBE_CORE_UNLOCK(adapter);
   2082 }
   2083 
   2084 /*
   2085 ** Note: this routine updates the OS on the link state
   2086 **	the real check of the hardware only happens with
   2087 **	a link interrupt.
   2088 */
   2089 static void
   2090 ixgbe_update_link_status(struct adapter *adapter)
   2091 {
   2092 	struct ifnet	*ifp = adapter->ifp;
   2093 	struct tx_ring *txr = adapter->tx_rings;
   2094 	device_t dev = adapter->dev;
   2095 
   2096 
   2097 	if (adapter->link_up){
   2098 		if (adapter->link_active == FALSE) {
   2099 			if (bootverbose)
   2100 				device_printf(dev,"Link is up %d Gbps %s \n",
   2101 				    ((adapter->link_speed == 128)? 10:1),
   2102 				    "Full Duplex");
   2103 			adapter->link_active = TRUE;
   2104 			if_link_state_change(ifp, LINK_STATE_UP);
   2105 		}
   2106 	} else { /* Link down */
   2107 		if (adapter->link_active == TRUE) {
   2108 			if (bootverbose)
   2109 				device_printf(dev,"Link is Down\n");
   2110 			if_link_state_change(ifp, LINK_STATE_DOWN);
   2111 			adapter->link_active = FALSE;
   2112 			for (int i = 0; i < adapter->num_queues;
   2113 			    i++, txr++)
   2114 				txr->queue_status = IXGBE_QUEUE_IDLE;
   2115 		}
   2116 	}
   2117 
   2118 	return;
   2119 }
   2120 
   2121 
   2122 static void
   2123 ixgbe_ifstop(struct ifnet *ifp, int disable)
   2124 {
   2125 	struct adapter *adapter = ifp->if_softc;
   2126 
   2127 	IXGBE_CORE_LOCK(adapter);
   2128 	ixgbe_stop(adapter);
   2129 	IXGBE_CORE_UNLOCK(adapter);
   2130 }
   2131 
   2132 /*********************************************************************
   2133  *
   2134  *  This routine disables all traffic on the adapter by issuing a
   2135  *  global reset on the MAC and deallocates TX/RX buffers.
   2136  *
   2137  **********************************************************************/
   2138 
   2139 static void
   2140 ixgbe_stop(void *arg)
   2141 {
   2142 	struct ifnet   *ifp;
   2143 	struct adapter *adapter = arg;
   2144 	struct ixgbe_hw *hw = &adapter->hw;
   2145 	ifp = adapter->ifp;
   2146 
   2147 	KASSERT(mutex_owned(&adapter->core_mtx));
   2148 
   2149 	INIT_DEBUGOUT("ixgbe_stop: begin\n");
   2150 	ixgbe_disable_intr(adapter);
   2151 
   2152 	/* Tell the stack that the interface is no longer active */
   2153 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   2154 
   2155 	ixgbe_reset_hw(hw);
   2156 	hw->adapter_stopped = FALSE;
   2157 	ixgbe_stop_adapter(hw);
   2158 	/* Turn off the laser */
   2159 	if (hw->phy.multispeed_fiber)
   2160 		ixgbe_disable_tx_laser(hw);
   2161 	callout_stop(&adapter->timer);
   2162 
   2163 	/* reprogram the RAR[0] in case user changed it. */
   2164 	ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
   2165 
   2166 	return;
   2167 }
   2168 
   2169 
   2170 /*********************************************************************
   2171  *
   2172  *  Determine hardware revision.
   2173  *
   2174  **********************************************************************/
   2175 static void
   2176 ixgbe_identify_hardware(struct adapter *adapter)
   2177 {
   2178 	pcitag_t tag;
   2179 	pci_chipset_tag_t pc;
   2180 	pcireg_t subid, id;
   2181 	struct ixgbe_hw *hw = &adapter->hw;
   2182 
   2183 	pc = adapter->osdep.pc;
   2184 	tag = adapter->osdep.tag;
   2185 
   2186 	id = pci_conf_read(pc, tag, PCI_ID_REG);
   2187 	subid = pci_conf_read(pc, tag, PCI_SUBSYS_ID_REG);
   2188 
   2189 	/* Save off the information about this board */
   2190 	hw->vendor_id = PCI_VENDOR(id);
   2191 	hw->device_id = PCI_PRODUCT(id);
   2192 	hw->revision_id =
   2193 	    PCI_REVISION(pci_conf_read(pc, tag, PCI_CLASS_REG));
   2194 	hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
   2195 	hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
   2196 
   2197 	/* We need this here to set the num_segs below */
   2198 	ixgbe_set_mac_type(hw);
   2199 
   2200 	/* Pick up the 82599 and VF settings */
   2201 	if (hw->mac.type != ixgbe_mac_82598EB) {
   2202 		hw->phy.smart_speed = ixgbe_smart_speed;
   2203 		adapter->num_segs = IXGBE_82599_SCATTER;
   2204 	} else
   2205 		adapter->num_segs = IXGBE_82598_SCATTER;
   2206 
   2207 	return;
   2208 }
   2209 
   2210 /*********************************************************************
   2211  *
   2212  *  Determine optic type
   2213  *
   2214  **********************************************************************/
   2215 static void
   2216 ixgbe_setup_optics(struct adapter *adapter)
   2217 {
   2218 	struct ixgbe_hw *hw = &adapter->hw;
   2219 	int		layer;
   2220 
   2221 	layer = ixgbe_get_supported_physical_layer(hw);
   2222 	switch (layer) {
   2223 		case IXGBE_PHYSICAL_LAYER_10GBASE_T:
   2224 			adapter->optics = IFM_10G_T;
   2225 			break;
   2226 		case IXGBE_PHYSICAL_LAYER_1000BASE_T:
   2227 			adapter->optics = IFM_1000_T;
   2228 			break;
   2229 		case IXGBE_PHYSICAL_LAYER_10GBASE_LR:
   2230 		case IXGBE_PHYSICAL_LAYER_10GBASE_LRM:
   2231 			adapter->optics = IFM_10G_LR;
   2232 			break;
   2233 		case IXGBE_PHYSICAL_LAYER_10GBASE_SR:
   2234 			adapter->optics = IFM_10G_SR;
   2235 			break;
   2236 		case IXGBE_PHYSICAL_LAYER_10GBASE_KX4:
   2237 		case IXGBE_PHYSICAL_LAYER_10GBASE_CX4:
   2238 			adapter->optics = IFM_10G_CX4;
   2239 			break;
   2240 		case IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU:
   2241 			adapter->optics = IFM_10G_TWINAX;
   2242 			break;
   2243 		case IXGBE_PHYSICAL_LAYER_1000BASE_KX:
   2244 		case IXGBE_PHYSICAL_LAYER_10GBASE_KR:
   2245 		case IXGBE_PHYSICAL_LAYER_10GBASE_XAUI:
   2246 		case IXGBE_PHYSICAL_LAYER_UNKNOWN:
   2247 		default:
   2248 			adapter->optics = IFM_ETHER | IFM_AUTO;
   2249 			break;
   2250 	}
   2251 	return;
   2252 }
   2253 
   2254 /*********************************************************************
   2255  *
   2256  *  Setup the Legacy or MSI Interrupt handler
   2257  *
   2258  **********************************************************************/
   2259 static int
   2260 ixgbe_allocate_legacy(struct adapter *adapter, const struct pci_attach_args *pa)
   2261 {
   2262 	device_t dev = adapter->dev;
   2263 	struct		ix_queue *que = adapter->queues;
   2264 	int rid = 0;
   2265 	char intrbuf[PCI_INTRSTR_LEN];
   2266 
   2267 	/* MSI RID at 1 */
   2268 	if (adapter->msix == 1)
   2269 		rid = 1;
   2270 
   2271 	/* We allocate a single interrupt resource */
   2272  	if (pci_intr_map(pa, &adapter->osdep.ih) != 0) {
   2273 		aprint_error_dev(dev, "unable to map interrupt\n");
   2274 		return ENXIO;
   2275 	} else {
   2276 		aprint_normal_dev(dev, "interrupting at %s\n",
   2277 		    pci_intr_string(adapter->osdep.pc, adapter->osdep.ih), intrbuf, sizeof(intrbuf));
   2278 	}
   2279 
   2280 	/*
   2281 	 * Try allocating a fast interrupt and the associated deferred
   2282 	 * processing contexts.
   2283 	 */
   2284 	que->que_si = softint_establish(SOFTINT_NET, ixgbe_handle_que, que);
   2285 
   2286 	/* Tasklets for Link, SFP and Multispeed Fiber */
   2287 	adapter->link_si =
   2288 	    softint_establish(SOFTINT_NET, ixgbe_handle_link, adapter);
   2289 	adapter->mod_si =
   2290 	    softint_establish(SOFTINT_NET, ixgbe_handle_mod, adapter);
   2291 	adapter->msf_si =
   2292 	    softint_establish(SOFTINT_NET, ixgbe_handle_msf, adapter);
   2293 
   2294 #ifdef IXGBE_FDIR
   2295 	adapter->fdir_si =
   2296 	    softint_establish(SOFTINT_NET, ixgbe_reinit_fdir, adapter);
   2297 #endif
   2298 	if (que->que_si == NULL ||
   2299 	    adapter->link_si == NULL ||
   2300 	    adapter->mod_si == NULL ||
   2301 #ifdef IXGBE_FDIR
   2302 	    adapter->fdir_si == NULL ||
   2303 #endif
   2304 	    adapter->msf_si == NULL) {
   2305 		aprint_error_dev(dev,
   2306 		    "could not establish software interrupts\n");
   2307 		return ENXIO;
   2308 	}
   2309 
   2310 	adapter->osdep.intr = pci_intr_establish(adapter->osdep.pc,
   2311 	    adapter->osdep.ih, IPL_NET, ixgbe_legacy_irq, que);
   2312 	if (adapter->osdep.intr == NULL) {
   2313 		aprint_error_dev(dev, "failed to register interrupt handler\n");
   2314 		softint_disestablish(que->que_si);
   2315 		softint_disestablish(adapter->link_si);
   2316 		softint_disestablish(adapter->mod_si);
   2317 		softint_disestablish(adapter->msf_si);
   2318 #ifdef IXGBE_FDIR
   2319 		softint_disestablish(adapter->fdir_si);
   2320 #endif
   2321 		return ENXIO;
   2322 	}
   2323 	/* For simplicity in the handlers */
   2324 	adapter->que_mask = IXGBE_EIMS_ENABLE_MASK;
   2325 
   2326 	return (0);
   2327 }
   2328 
   2329 
   2330 /*********************************************************************
   2331  *
   2332  *  Setup MSIX Interrupt resources and handlers
   2333  *
   2334  **********************************************************************/
   2335 static int
   2336 ixgbe_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
   2337 {
   2338 #if !defined(NETBSD_MSI_OR_MSIX)
   2339 	return 0;
   2340 #else
   2341 	device_t        dev = adapter->dev;
   2342 	struct 		ix_queue *que = adapter->queues;
   2343 	int 		error, rid, vector = 0;
   2344 
   2345 	for (int i = 0; i < adapter->num_queues; i++, vector++, que++) {
   2346 		rid = vector + 1;
   2347 		que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
   2348 		    RF_SHAREABLE | RF_ACTIVE);
   2349 		if (que->res == NULL) {
   2350 			aprint_error_dev(dev,"Unable to allocate"
   2351 		    	    " bus resource: que interrupt [%d]\n", vector);
   2352 			return (ENXIO);
   2353 		}
   2354 		/* Set the handler function */
   2355 		error = bus_setup_intr(dev, que->res,
   2356 		    INTR_TYPE_NET | INTR_MPSAFE, NULL,
   2357 		    ixgbe_msix_que, que, &que->tag);
   2358 		if (error) {
   2359 			que->res = NULL;
   2360 			aprint_error_dev(dev,
   2361 			    "Failed to register QUE handler\n");
   2362 			return error;
   2363 		}
   2364 #if __FreeBSD_version >= 800504
   2365 		bus_describe_intr(dev, que->res, que->tag, "que %d", i);
   2366 #endif
   2367 		que->msix = vector;
   2368         	adapter->que_mask |= (u64)(1 << que->msix);
   2369 		/*
   2370 		** Bind the msix vector, and thus the
   2371 		** ring to the corresponding cpu.
   2372 		*/
   2373 		if (adapter->num_queues > 1)
   2374 			bus_bind_intr(dev, que->res, i);
   2375 
   2376 		que->que_si = softint_establish(ixgbe_handle_que, que);
   2377 		if (que->que_si == NULL) {
   2378 			aprint_error_dev(dev,
   2379 			    "could not establish software interrupt\n");
   2380 		}
   2381 	}
   2382 
   2383 	/* and Link */
   2384 	rid = vector + 1;
   2385 	adapter->res = bus_alloc_resource_any(dev,
   2386     	    SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
   2387 	if (!adapter->res) {
   2388 		aprint_error_dev(dev,"Unable to allocate bus resource: "
   2389 		    "Link interrupt [%d]\n", rid);
   2390 		return (ENXIO);
   2391 	}
   2392 	/* Set the link handler function */
   2393 	error = bus_setup_intr(dev, adapter->res,
   2394 	    INTR_TYPE_NET | INTR_MPSAFE, NULL,
   2395 	    ixgbe_msix_link, adapter, &adapter->tag);
   2396 	if (error) {
   2397 		adapter->res = NULL;
   2398 		aprint_error_dev(dev, "Failed to register LINK handler\n");
   2399 		return (error);
   2400 	}
   2401 #if __FreeBSD_version >= 800504
   2402 	bus_describe_intr(dev, adapter->res, adapter->tag, "link");
   2403 #endif
   2404 	adapter->linkvec = vector;
   2405 	/* Tasklets for Link, SFP and Multispeed Fiber */
   2406 	adapter->link_si =
   2407 	    softint_establish(SOFTINT_NET, ixgbe_handle_link, adapter);
   2408 	adapter->mod_si =
   2409 	    softint_establish(SOFTINT_NET, ixgbe_handle_mod, adapter);
   2410 	adapter->msf_si =
   2411 	    softint_establish(SOFTINT_NET, ixgbe_handle_msf, adapter);
   2412 #ifdef IXGBE_FDIR
   2413 	adapter->fdir_si =
   2414 	    softint_establish(SOFTINT_NET, ixgbe_reinit_fdir, adapter);
   2415 #endif
   2416 
   2417 	return (0);
   2418 #endif
   2419 }
   2420 
   2421 /*
   2422  * Setup Either MSI/X or MSI
   2423  */
   2424 static int
   2425 ixgbe_setup_msix(struct adapter *adapter)
   2426 {
   2427 #if !defined(NETBSD_MSI_OR_MSIX)
   2428 	return 0;
   2429 #else
   2430 	device_t dev = adapter->dev;
   2431 	int rid, want, queues, msgs;
   2432 
   2433 	/* Override by tuneable */
   2434 	if (ixgbe_enable_msix == 0)
   2435 		goto msi;
   2436 
   2437 	/* First try MSI/X */
   2438 	rid = PCI_BAR(MSIX_82598_BAR);
   2439 	adapter->msix_mem = bus_alloc_resource_any(dev,
   2440 	    SYS_RES_MEMORY, &rid, RF_ACTIVE);
   2441        	if (!adapter->msix_mem) {
   2442 		rid += 4;	/* 82599 maps in higher BAR */
   2443 		adapter->msix_mem = bus_alloc_resource_any(dev,
   2444 		    SYS_RES_MEMORY, &rid, RF_ACTIVE);
   2445 	}
   2446        	if (!adapter->msix_mem) {
   2447 		/* May not be enabled */
   2448 		device_printf(adapter->dev,
   2449 		    "Unable to map MSIX table \n");
   2450 		goto msi;
   2451 	}
   2452 
   2453 	msgs = pci_msix_count(dev);
   2454 	if (msgs == 0) { /* system has msix disabled */
   2455 		bus_release_resource(dev, SYS_RES_MEMORY,
   2456 		    rid, adapter->msix_mem);
   2457 		adapter->msix_mem = NULL;
   2458 		goto msi;
   2459 	}
   2460 
   2461 	/* Figure out a reasonable auto config value */
   2462 	queues = (mp_ncpus > (msgs-1)) ? (msgs-1) : mp_ncpus;
   2463 
   2464 	if (ixgbe_num_queues != 0)
   2465 		queues = ixgbe_num_queues;
   2466 	/* Set max queues to 8 when autoconfiguring */
   2467 	else if ((ixgbe_num_queues == 0) && (queues > 8))
   2468 		queues = 8;
   2469 
   2470 	/*
   2471 	** Want one vector (RX/TX pair) per queue
   2472 	** plus an additional for Link.
   2473 	*/
   2474 	want = queues + 1;
   2475 	if (msgs >= want)
   2476 		msgs = want;
   2477 	else {
   2478                	device_printf(adapter->dev,
   2479 		    "MSIX Configuration Problem, "
   2480 		    "%d vectors but %d queues wanted!\n",
   2481 		    msgs, want);
   2482 		return (0); /* Will go to Legacy setup */
   2483 	}
   2484 	if ((msgs) && pci_alloc_msix(dev, &msgs) == 0) {
   2485                	device_printf(adapter->dev,
   2486 		    "Using MSIX interrupts with %d vectors\n", msgs);
   2487 		adapter->num_queues = queues;
   2488 		return (msgs);
   2489 	}
   2490 msi:
   2491        	msgs = pci_msi_count(dev);
   2492        	if (msgs == 1 && pci_alloc_msi(dev, &msgs) == 0)
   2493                	device_printf(adapter->dev,"Using MSI interrupt\n");
   2494 	return (msgs);
   2495 #endif
   2496 }
   2497 
   2498 
   2499 static int
   2500 ixgbe_allocate_pci_resources(struct adapter *adapter, const struct pci_attach_args *pa)
   2501 {
   2502 	pcireg_t	memtype;
   2503 	device_t        dev = adapter->dev;
   2504 	bus_addr_t addr;
   2505 	int flags;
   2506 
   2507 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
   2508 	switch (memtype) {
   2509 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   2510 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   2511 		adapter->osdep.mem_bus_space_tag = pa->pa_memt;
   2512 		if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
   2513 	              memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
   2514 			goto map_err;
   2515 		if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
   2516 			aprint_normal_dev(dev, "clearing prefetchable bit\n");
   2517 			flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
   2518 		}
   2519 		if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
   2520 		     adapter->osdep.mem_size, flags,
   2521 		     &adapter->osdep.mem_bus_space_handle) != 0) {
   2522 map_err:
   2523 			adapter->osdep.mem_size = 0;
   2524 			aprint_error_dev(dev, "unable to map BAR0\n");
   2525 			return ENXIO;
   2526 		}
   2527 		break;
   2528 	default:
   2529 		aprint_error_dev(dev, "unexpected type on BAR0\n");
   2530 		return ENXIO;
   2531 	}
   2532 
   2533 	/* Legacy defaults */
   2534 	adapter->num_queues = 1;
   2535 	adapter->hw.back = &adapter->osdep;
   2536 
   2537 	/*
   2538 	** Now setup MSI or MSI/X, should
   2539 	** return us the number of supported
   2540 	** vectors. (Will be 1 for MSI)
   2541 	*/
   2542 	adapter->msix = ixgbe_setup_msix(adapter);
   2543 	return (0);
   2544 }
   2545 
   2546 static void
   2547 ixgbe_free_pci_resources(struct adapter * adapter)
   2548 {
   2549 #if defined(NETBSD_MSI_OR_MSIX)
   2550 	struct 		ix_queue *que = adapter->queues;
   2551 #endif
   2552 	device_t	dev = adapter->dev;
   2553 	int		rid, memrid;
   2554 
   2555 	if (adapter->hw.mac.type == ixgbe_mac_82598EB)
   2556 		memrid = PCI_BAR(MSIX_82598_BAR);
   2557 	else
   2558 		memrid = PCI_BAR(MSIX_82599_BAR);
   2559 
   2560 #if defined(NETBSD_MSI_OR_MSIX)
   2561 	/*
   2562 	** There is a slight possibility of a failure mode
   2563 	** in attach that will result in entering this function
   2564 	** before interrupt resources have been initialized, and
   2565 	** in that case we do not want to execute the loops below
   2566 	** We can detect this reliably by the state of the adapter
   2567 	** res pointer.
   2568 	*/
   2569 	if (adapter->res == NULL)
   2570 		goto mem;
   2571 
   2572 	/*
   2573 	**  Release all msix queue resources:
   2574 	*/
   2575 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   2576 		rid = que->msix + 1;
   2577 		if (que->tag != NULL) {
   2578 			bus_teardown_intr(dev, que->res, que->tag);
   2579 			que->tag = NULL;
   2580 		}
   2581 		if (que->res != NULL)
   2582 			bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
   2583 	}
   2584 #endif
   2585 
   2586 	/* Clean the Legacy or Link interrupt last */
   2587 	if (adapter->linkvec) /* we are doing MSIX */
   2588 		rid = adapter->linkvec + 1;
   2589 	else
   2590 		(adapter->msix != 0) ? (rid = 1):(rid = 0);
   2591 
   2592 	printf("%s: disestablishing interrupt handler\n", device_xname(dev));
   2593 	pci_intr_disestablish(adapter->osdep.pc, adapter->osdep.intr);
   2594 	adapter->osdep.intr = NULL;
   2595 
   2596 #if defined(NETBSD_MSI_OR_MSIX)
   2597 mem:
   2598 	if (adapter->msix)
   2599 		pci_release_msi(dev);
   2600 
   2601 	if (adapter->msix_mem != NULL)
   2602 		bus_release_resource(dev, SYS_RES_MEMORY,
   2603 		    memrid, adapter->msix_mem);
   2604 #endif
   2605 
   2606 	if (adapter->osdep.mem_size != 0) {
   2607 		bus_space_unmap(adapter->osdep.mem_bus_space_tag,
   2608 		    adapter->osdep.mem_bus_space_handle,
   2609 		    adapter->osdep.mem_size);
   2610 	}
   2611 
   2612 	return;
   2613 }
   2614 
   2615 /*********************************************************************
   2616  *
   2617  *  Setup networking device structure and register an interface.
   2618  *
   2619  **********************************************************************/
   2620 static int
   2621 ixgbe_setup_interface(device_t dev, struct adapter *adapter)
   2622 {
   2623 	struct ethercom *ec = &adapter->osdep.ec;
   2624 	struct ixgbe_hw *hw = &adapter->hw;
   2625 	struct ifnet   *ifp;
   2626 
   2627 	INIT_DEBUGOUT("ixgbe_setup_interface: begin");
   2628 
   2629 	ifp = adapter->ifp = &ec->ec_if;
   2630 	strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
   2631 	ifp->if_mtu = ETHERMTU;
   2632 	ifp->if_baudrate = 1000000000;
   2633 	ifp->if_init = ixgbe_init;
   2634 	ifp->if_stop = ixgbe_ifstop;
   2635 	ifp->if_softc = adapter;
   2636 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2637 	ifp->if_ioctl = ixgbe_ioctl;
   2638 	ifp->if_start = ixgbe_start;
   2639 #if __FreeBSD_version >= 800000
   2640 	ifp->if_transmit = ixgbe_mq_start;
   2641 	ifp->if_qflush = ixgbe_qflush;
   2642 #endif
   2643 	ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 2;
   2644 
   2645 	if_attach(ifp);
   2646 	ether_ifattach(ifp, adapter->hw.mac.addr);
   2647 	ether_set_ifflags_cb(ec, ixgbe_ifflags_cb);
   2648 
   2649 	adapter->max_frame_size =
   2650 	    ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
   2651 
   2652 	/*
   2653 	 * Tell the upper layer(s) we support long frames.
   2654 	 */
   2655 	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
   2656 
   2657 	ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSOv4;
   2658 	ifp->if_capenable = 0;
   2659 
   2660 	ec->ec_capabilities |= ETHERCAP_VLAN_HWCSUM;
   2661 	ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_MTU;
   2662 	ec->ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2663 	ec->ec_capenable = ec->ec_capabilities;
   2664 
   2665 	/* Don't enable LRO by default */
   2666 	ifp->if_capabilities |= IFCAP_LRO;
   2667 
   2668 	/*
   2669 	** Dont turn this on by default, if vlans are
   2670 	** created on another pseudo device (eg. lagg)
   2671 	** then vlan events are not passed thru, breaking
   2672 	** operation, but with HW FILTER off it works. If
   2673 	** using vlans directly on the em driver you can
   2674 	** enable this and get full hardware tag filtering.
   2675 	*/
   2676 	ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
   2677 
   2678 	/*
   2679 	 * Specify the media types supported by this adapter and register
   2680 	 * callbacks to update media and link information
   2681 	 */
   2682 	ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
   2683 		     ixgbe_media_status);
   2684 	ifmedia_add(&adapter->media, IFM_ETHER | adapter->optics, 0, NULL);
   2685 	ifmedia_set(&adapter->media, IFM_ETHER | adapter->optics);
   2686 	if (hw->device_id == IXGBE_DEV_ID_82598AT) {
   2687 		ifmedia_add(&adapter->media,
   2688 		    IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
   2689 		ifmedia_add(&adapter->media,
   2690 		    IFM_ETHER | IFM_1000_T, 0, NULL);
   2691 	}
   2692 	ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
   2693 	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
   2694 
   2695 	return (0);
   2696 }
   2697 
   2698 static void
   2699 ixgbe_config_link(struct adapter *adapter)
   2700 {
   2701 	struct ixgbe_hw *hw = &adapter->hw;
   2702 	u32	autoneg, err = 0;
   2703 	bool	sfp, negotiate;
   2704 
   2705 	sfp = ixgbe_is_sfp(hw);
   2706 
   2707 	if (sfp) {
   2708 		if (hw->phy.multispeed_fiber) {
   2709 			hw->mac.ops.setup_sfp(hw);
   2710 			ixgbe_enable_tx_laser(hw);
   2711 			softint_schedule(adapter->msf_si);
   2712 		} else {
   2713 			softint_schedule(adapter->mod_si);
   2714 		}
   2715 	} else {
   2716 		if (hw->mac.ops.check_link)
   2717 			err = ixgbe_check_link(hw, &autoneg,
   2718 			    &adapter->link_up, FALSE);
   2719 		if (err)
   2720 			goto out;
   2721 		autoneg = hw->phy.autoneg_advertised;
   2722 		if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
   2723                 	err  = hw->mac.ops.get_link_capabilities(hw,
   2724 			    &autoneg, &negotiate);
   2725 		if (err)
   2726 			goto out;
   2727 		if (hw->mac.ops.setup_link)
   2728                 	err = hw->mac.ops.setup_link(hw, autoneg,
   2729 			    negotiate, adapter->link_up);
   2730 	}
   2731 out:
   2732 	return;
   2733 }
   2734 
   2735 /********************************************************************
   2736  * Manage DMA'able memory.
   2737  *******************************************************************/
   2738 
   2739 static int
   2740 ixgbe_dma_malloc(struct adapter *adapter, const bus_size_t size,
   2741 		struct ixgbe_dma_alloc *dma, const int mapflags)
   2742 {
   2743 	device_t dev = adapter->dev;
   2744 	int             r, rsegs;
   2745 
   2746 	r = ixgbe_dma_tag_create(adapter->osdep.dmat,	/* parent */
   2747 			       DBA_ALIGN, 0,	/* alignment, bounds */
   2748 			       size,	/* maxsize */
   2749 			       1,	/* nsegments */
   2750 			       size,	/* maxsegsize */
   2751 			       BUS_DMA_ALLOCNOW,	/* flags */
   2752 			       &dma->dma_tag);
   2753 	if (r != 0) {
   2754 		aprint_error_dev(dev,
   2755 		    "%s: ixgbe_dma_tag_create failed; error %d\n", __func__, r);
   2756 		goto fail_0;
   2757 	}
   2758 
   2759 	r = bus_dmamem_alloc(dma->dma_tag->dt_dmat,
   2760 		size,
   2761 		dma->dma_tag->dt_alignment,
   2762 		dma->dma_tag->dt_boundary,
   2763 		&dma->dma_seg, 1, &rsegs, BUS_DMA_NOWAIT);
   2764 	if (r != 0) {
   2765 		aprint_error_dev(dev,
   2766 		    "%s: bus_dmamem_alloc failed; error %d\n", __func__, r);
   2767 		goto fail_1;
   2768 	}
   2769 
   2770 	r = bus_dmamem_map(dma->dma_tag->dt_dmat, &dma->dma_seg, rsegs,
   2771 	    size, &dma->dma_vaddr, BUS_DMA_NOWAIT);
   2772 	if (r != 0) {
   2773 		aprint_error_dev(dev, "%s: bus_dmamem_map failed; error %d\n",
   2774 		    __func__, r);
   2775 		goto fail_2;
   2776 	}
   2777 
   2778 	r = ixgbe_dmamap_create(dma->dma_tag, 0, &dma->dma_map);
   2779 	if (r != 0) {
   2780 		aprint_error_dev(dev, "%s: bus_dmamem_map failed; error %d\n",
   2781 		    __func__, r);
   2782 		goto fail_3;
   2783 	}
   2784 
   2785 	r = bus_dmamap_load(dma->dma_tag->dt_dmat, dma->dma_map, dma->dma_vaddr,
   2786 			    size,
   2787 			    NULL,
   2788 			    mapflags | BUS_DMA_NOWAIT);
   2789 	if (r != 0) {
   2790 		aprint_error_dev(dev, "%s: bus_dmamap_load failed; error %d\n",
   2791 		    __func__, r);
   2792 		goto fail_4;
   2793 	}
   2794 	dma->dma_paddr = dma->dma_map->dm_segs[0].ds_addr;
   2795 	dma->dma_size = size;
   2796 	return 0;
   2797 fail_4:
   2798 	ixgbe_dmamap_destroy(dma->dma_tag, dma->dma_map);
   2799 fail_3:
   2800 	bus_dmamem_unmap(dma->dma_tag->dt_dmat, dma->dma_vaddr, size);
   2801 fail_2:
   2802 	bus_dmamem_free(dma->dma_tag->dt_dmat, &dma->dma_seg, rsegs);
   2803 fail_1:
   2804 	ixgbe_dma_tag_destroy(dma->dma_tag);
   2805 fail_0:
   2806 	return r;
   2807 }
   2808 
   2809 static void
   2810 ixgbe_dma_free(struct adapter *adapter, struct ixgbe_dma_alloc *dma)
   2811 {
   2812 	bus_dmamap_sync(dma->dma_tag->dt_dmat, dma->dma_map, 0, dma->dma_size,
   2813 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   2814 	ixgbe_dmamap_unload(dma->dma_tag, dma->dma_map);
   2815 	bus_dmamem_free(dma->dma_tag->dt_dmat, &dma->dma_seg, 1);
   2816 	ixgbe_dma_tag_destroy(dma->dma_tag);
   2817 }
   2818 
   2819 
   2820 /*********************************************************************
   2821  *
   2822  *  Allocate memory for the transmit and receive rings, and then
   2823  *  the descriptors associated with each, called only once at attach.
   2824  *
   2825  **********************************************************************/
   2826 static int
   2827 ixgbe_allocate_queues(struct adapter *adapter)
   2828 {
   2829 	device_t	dev = adapter->dev;
   2830 	struct ix_queue	*que;
   2831 	struct tx_ring	*txr;
   2832 	struct rx_ring	*rxr;
   2833 	int rsize, tsize, error = IXGBE_SUCCESS;
   2834 	int txconf = 0, rxconf = 0;
   2835 
   2836         /* First allocate the top level queue structs */
   2837         if (!(adapter->queues =
   2838             (struct ix_queue *) malloc(sizeof(struct ix_queue) *
   2839             adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
   2840                 aprint_error_dev(dev, "Unable to allocate queue memory\n");
   2841                 error = ENOMEM;
   2842                 goto fail;
   2843         }
   2844 
   2845 	/* First allocate the TX ring struct memory */
   2846 	if (!(adapter->tx_rings =
   2847 	    (struct tx_ring *) malloc(sizeof(struct tx_ring) *
   2848 	    adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
   2849 		aprint_error_dev(dev, "Unable to allocate TX ring memory\n");
   2850 		error = ENOMEM;
   2851 		goto tx_fail;
   2852 	}
   2853 
   2854 	/* Next allocate the RX */
   2855 	if (!(adapter->rx_rings =
   2856 	    (struct rx_ring *) malloc(sizeof(struct rx_ring) *
   2857 	    adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
   2858 		aprint_error_dev(dev, "Unable to allocate RX ring memory\n");
   2859 		error = ENOMEM;
   2860 		goto rx_fail;
   2861 	}
   2862 
   2863 	/* For the ring itself */
   2864 	tsize = roundup2(adapter->num_tx_desc *
   2865 	    sizeof(union ixgbe_adv_tx_desc), DBA_ALIGN);
   2866 
   2867 	/*
   2868 	 * Now set up the TX queues, txconf is needed to handle the
   2869 	 * possibility that things fail midcourse and we need to
   2870 	 * undo memory gracefully
   2871 	 */
   2872 	for (int i = 0; i < adapter->num_queues; i++, txconf++) {
   2873 		/* Set up some basics */
   2874 		txr = &adapter->tx_rings[i];
   2875 		txr->adapter = adapter;
   2876 		txr->me = i;
   2877 
   2878 		/* Initialize the TX side lock */
   2879 		snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
   2880 		    device_xname(dev), txr->me);
   2881 		mutex_init(&txr->tx_mtx, MUTEX_DEFAULT, IPL_NET);
   2882 
   2883 		if (ixgbe_dma_malloc(adapter, tsize,
   2884 			&txr->txdma, BUS_DMA_NOWAIT)) {
   2885 			aprint_error_dev(dev,
   2886 			    "Unable to allocate TX Descriptor memory\n");
   2887 			error = ENOMEM;
   2888 			goto err_tx_desc;
   2889 		}
   2890 		txr->tx_base = (union ixgbe_adv_tx_desc *)txr->txdma.dma_vaddr;
   2891 		bzero((void *)txr->tx_base, tsize);
   2892 
   2893         	/* Now allocate transmit buffers for the ring */
   2894         	if (ixgbe_allocate_transmit_buffers(txr)) {
   2895 			aprint_error_dev(dev,
   2896 			    "Critical Failure setting up transmit buffers\n");
   2897 			error = ENOMEM;
   2898 			goto err_tx_desc;
   2899         	}
   2900 #if __FreeBSD_version >= 800000
   2901 		/* Allocate a buf ring */
   2902 		txr->br = buf_ring_alloc(IXGBE_BR_SIZE, M_DEVBUF,
   2903 		    M_WAITOK, &txr->tx_mtx);
   2904 		if (txr->br == NULL) {
   2905 			aprint_error_dev(dev,
   2906 			    "Critical Failure setting up buf ring\n");
   2907 			error = ENOMEM;
   2908 			goto err_tx_desc;
   2909         	}
   2910 #endif
   2911 	}
   2912 
   2913 	/*
   2914 	 * Next the RX queues...
   2915 	 */
   2916 	rsize = roundup2(adapter->num_rx_desc *
   2917 	    sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
   2918 	for (int i = 0; i < adapter->num_queues; i++, rxconf++) {
   2919 		rxr = &adapter->rx_rings[i];
   2920 		/* Set up some basics */
   2921 		rxr->adapter = adapter;
   2922 		rxr->me = i;
   2923 
   2924 		/* Initialize the RX side lock */
   2925 		snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
   2926 		    device_xname(dev), rxr->me);
   2927 		mutex_init(&rxr->rx_mtx, MUTEX_DEFAULT, IPL_NET);
   2928 
   2929 		if (ixgbe_dma_malloc(adapter, rsize,
   2930 			&rxr->rxdma, BUS_DMA_NOWAIT)) {
   2931 			aprint_error_dev(dev,
   2932 			    "Unable to allocate RxDescriptor memory\n");
   2933 			error = ENOMEM;
   2934 			goto err_rx_desc;
   2935 		}
   2936 		rxr->rx_base = (union ixgbe_adv_rx_desc *)rxr->rxdma.dma_vaddr;
   2937 		bzero((void *)rxr->rx_base, rsize);
   2938 
   2939         	/* Allocate receive buffers for the ring*/
   2940 		if (ixgbe_allocate_receive_buffers(rxr)) {
   2941 			aprint_error_dev(dev,
   2942 			    "Critical Failure setting up receive buffers\n");
   2943 			error = ENOMEM;
   2944 			goto err_rx_desc;
   2945 		}
   2946 	}
   2947 
   2948 	/*
   2949 	** Finally set up the queue holding structs
   2950 	*/
   2951 	for (int i = 0; i < adapter->num_queues; i++) {
   2952 		que = &adapter->queues[i];
   2953 		que->adapter = adapter;
   2954 		que->txr = &adapter->tx_rings[i];
   2955 		que->rxr = &adapter->rx_rings[i];
   2956 	}
   2957 
   2958 	return (0);
   2959 
   2960 err_rx_desc:
   2961 	for (rxr = adapter->rx_rings; rxconf > 0; rxr++, rxconf--)
   2962 		ixgbe_dma_free(adapter, &rxr->rxdma);
   2963 err_tx_desc:
   2964 	for (txr = adapter->tx_rings; txconf > 0; txr++, txconf--)
   2965 		ixgbe_dma_free(adapter, &txr->txdma);
   2966 	free(adapter->rx_rings, M_DEVBUF);
   2967 rx_fail:
   2968 	free(adapter->tx_rings, M_DEVBUF);
   2969 tx_fail:
   2970 	free(adapter->queues, M_DEVBUF);
   2971 fail:
   2972 	return (error);
   2973 }
   2974 
   2975 /*********************************************************************
   2976  *
   2977  *  Allocate memory for tx_buffer structures. The tx_buffer stores all
   2978  *  the information needed to transmit a packet on the wire. This is
   2979  *  called only once at attach, setup is done every reset.
   2980  *
   2981  **********************************************************************/
   2982 static int
   2983 ixgbe_allocate_transmit_buffers(struct tx_ring *txr)
   2984 {
   2985 	struct adapter *adapter = txr->adapter;
   2986 	device_t dev = adapter->dev;
   2987 	struct ixgbe_tx_buf *txbuf;
   2988 	int error, i;
   2989 
   2990 	/*
   2991 	 * Setup DMA descriptor areas.
   2992 	 */
   2993 	if ((error = ixgbe_dma_tag_create(adapter->osdep.dmat,	/* parent */
   2994 			       1, 0,		/* alignment, bounds */
   2995 			       IXGBE_TSO_SIZE,		/* maxsize */
   2996 			       adapter->num_segs,	/* nsegments */
   2997 			       PAGE_SIZE,		/* maxsegsize */
   2998 			       0,			/* flags */
   2999 			       &txr->txtag))) {
   3000 		aprint_error_dev(dev,"Unable to allocate TX DMA tag\n");
   3001 		goto fail;
   3002 	}
   3003 
   3004 	if (!(txr->tx_buffers =
   3005 	    (struct ixgbe_tx_buf *) malloc(sizeof(struct ixgbe_tx_buf) *
   3006 	    adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) {
   3007 		aprint_error_dev(dev, "Unable to allocate tx_buffer memory\n");
   3008 		error = ENOMEM;
   3009 		goto fail;
   3010 	}
   3011 
   3012         /* Create the descriptor buffer dma maps */
   3013 	txbuf = txr->tx_buffers;
   3014 	for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
   3015 		error = ixgbe_dmamap_create(txr->txtag, 0, &txbuf->map);
   3016 		if (error != 0) {
   3017 			aprint_error_dev(dev, "Unable to create TX DMA map\n");
   3018 			goto fail;
   3019 		}
   3020 	}
   3021 
   3022 	return 0;
   3023 fail:
   3024 	/* We free all, it handles case where we are in the middle */
   3025 	ixgbe_free_transmit_structures(adapter);
   3026 	return (error);
   3027 }
   3028 
   3029 /*********************************************************************
   3030  *
   3031  *  Initialize a transmit ring.
   3032  *
   3033  **********************************************************************/
   3034 static void
   3035 ixgbe_setup_transmit_ring(struct tx_ring *txr)
   3036 {
   3037 	struct adapter *adapter = txr->adapter;
   3038 	struct ixgbe_tx_buf *txbuf;
   3039 	int i;
   3040 
   3041 	/* Clear the old ring contents */
   3042 	IXGBE_TX_LOCK(txr);
   3043 	bzero((void *)txr->tx_base,
   3044 	      (sizeof(union ixgbe_adv_tx_desc)) * adapter->num_tx_desc);
   3045 	/* Reset indices */
   3046 	txr->next_avail_desc = 0;
   3047 	txr->next_to_clean = 0;
   3048 
   3049 	/* Free any existing tx buffers. */
   3050         txbuf = txr->tx_buffers;
   3051 	for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
   3052 		if (txbuf->m_head != NULL) {
   3053 			bus_dmamap_sync(txr->txtag->dt_dmat, txbuf->map,
   3054 			    0, txbuf->m_head->m_pkthdr.len,
   3055 			    BUS_DMASYNC_POSTWRITE);
   3056 			ixgbe_dmamap_unload(txr->txtag, txbuf->map);
   3057 			m_freem(txbuf->m_head);
   3058 			txbuf->m_head = NULL;
   3059 		}
   3060 		/* Clear the EOP index */
   3061 		txbuf->eop_index = -1;
   3062         }
   3063 
   3064 #ifdef IXGBE_FDIR
   3065 	/* Set the rate at which we sample packets */
   3066 	if (adapter->hw.mac.type != ixgbe_mac_82598EB)
   3067 		txr->atr_sample = atr_sample_rate;
   3068 #endif
   3069 
   3070 	/* Set number of descriptors available */
   3071 	txr->tx_avail = adapter->num_tx_desc;
   3072 
   3073 	ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
   3074 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   3075 	IXGBE_TX_UNLOCK(txr);
   3076 }
   3077 
   3078 /*********************************************************************
   3079  *
   3080  *  Initialize all transmit rings.
   3081  *
   3082  **********************************************************************/
   3083 static int
   3084 ixgbe_setup_transmit_structures(struct adapter *adapter)
   3085 {
   3086 	struct tx_ring *txr = adapter->tx_rings;
   3087 
   3088 	for (int i = 0; i < adapter->num_queues; i++, txr++)
   3089 		ixgbe_setup_transmit_ring(txr);
   3090 
   3091 	return (0);
   3092 }
   3093 
   3094 /*********************************************************************
   3095  *
   3096  *  Enable transmit unit.
   3097  *
   3098  **********************************************************************/
   3099 static void
   3100 ixgbe_initialize_transmit_units(struct adapter *adapter)
   3101 {
   3102 	struct tx_ring	*txr = adapter->tx_rings;
   3103 	struct ixgbe_hw	*hw = &adapter->hw;
   3104 
   3105 	/* Setup the Base and Length of the Tx Descriptor Ring */
   3106 
   3107 	for (int i = 0; i < adapter->num_queues; i++, txr++) {
   3108 		u64	tdba = txr->txdma.dma_paddr;
   3109 		u32	txctrl;
   3110 
   3111 		IXGBE_WRITE_REG(hw, IXGBE_TDBAL(i),
   3112 		       (tdba & 0x00000000ffffffffULL));
   3113 		IXGBE_WRITE_REG(hw, IXGBE_TDBAH(i), (tdba >> 32));
   3114 		IXGBE_WRITE_REG(hw, IXGBE_TDLEN(i),
   3115 		    adapter->num_tx_desc * sizeof(struct ixgbe_legacy_tx_desc));
   3116 
   3117 		/* Setup the HW Tx Head and Tail descriptor pointers */
   3118 		IXGBE_WRITE_REG(hw, IXGBE_TDH(i), 0);
   3119 		IXGBE_WRITE_REG(hw, IXGBE_TDT(i), 0);
   3120 
   3121 		/* Setup Transmit Descriptor Cmd Settings */
   3122 		txr->txd_cmd = IXGBE_TXD_CMD_IFCS;
   3123 		txr->queue_status = IXGBE_QUEUE_IDLE;
   3124 
   3125 		/* Disable Head Writeback */
   3126 		switch (hw->mac.type) {
   3127 		case ixgbe_mac_82598EB:
   3128 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
   3129 			break;
   3130 		case ixgbe_mac_82599EB:
   3131 		default:
   3132 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
   3133 			break;
   3134                 }
   3135 		txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
   3136 		switch (hw->mac.type) {
   3137 		case ixgbe_mac_82598EB:
   3138 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), txctrl);
   3139 			break;
   3140 		case ixgbe_mac_82599EB:
   3141 		default:
   3142 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), txctrl);
   3143 			break;
   3144 		}
   3145 
   3146 	}
   3147 
   3148 	if (hw->mac.type != ixgbe_mac_82598EB) {
   3149 		u32 dmatxctl, rttdcs;
   3150 		dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
   3151 		dmatxctl |= IXGBE_DMATXCTL_TE;
   3152 		IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
   3153 		/* Disable arbiter to set MTQC */
   3154 		rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
   3155 		rttdcs |= IXGBE_RTTDCS_ARBDIS;
   3156 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
   3157 		IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
   3158 		rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
   3159 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
   3160 	}
   3161 
   3162 	return;
   3163 }
   3164 
   3165 /*********************************************************************
   3166  *
   3167  *  Free all transmit rings.
   3168  *
   3169  **********************************************************************/
   3170 static void
   3171 ixgbe_free_transmit_structures(struct adapter *adapter)
   3172 {
   3173 	struct tx_ring *txr = adapter->tx_rings;
   3174 
   3175 	for (int i = 0; i < adapter->num_queues; i++, txr++) {
   3176 		IXGBE_TX_LOCK(txr);
   3177 		ixgbe_free_transmit_buffers(txr);
   3178 		ixgbe_dma_free(adapter, &txr->txdma);
   3179 		IXGBE_TX_UNLOCK(txr);
   3180 		IXGBE_TX_LOCK_DESTROY(txr);
   3181 	}
   3182 	free(adapter->tx_rings, M_DEVBUF);
   3183 }
   3184 
   3185 /*********************************************************************
   3186  *
   3187  *  Free transmit ring related data structures.
   3188  *
   3189  **********************************************************************/
   3190 static void
   3191 ixgbe_free_transmit_buffers(struct tx_ring *txr)
   3192 {
   3193 	struct adapter *adapter = txr->adapter;
   3194 	struct ixgbe_tx_buf *tx_buffer;
   3195 	int             i;
   3196 
   3197 	INIT_DEBUGOUT("free_transmit_ring: begin");
   3198 
   3199 	if (txr->tx_buffers == NULL)
   3200 		return;
   3201 
   3202 	tx_buffer = txr->tx_buffers;
   3203 	for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
   3204 		if (tx_buffer->m_head != NULL) {
   3205 			bus_dmamap_sync(txr->txtag->dt_dmat, tx_buffer->map,
   3206 			    0, tx_buffer->m_head->m_pkthdr.len,
   3207 			    BUS_DMASYNC_POSTWRITE);
   3208 			ixgbe_dmamap_unload(txr->txtag, tx_buffer->map);
   3209 			m_freem(tx_buffer->m_head);
   3210 			tx_buffer->m_head = NULL;
   3211 			if (tx_buffer->map != NULL) {
   3212 				ixgbe_dmamap_destroy(txr->txtag,
   3213 				    tx_buffer->map);
   3214 				tx_buffer->map = NULL;
   3215 			}
   3216 		} else if (tx_buffer->map != NULL) {
   3217 			ixgbe_dmamap_unload(txr->txtag, tx_buffer->map);
   3218 			ixgbe_dmamap_destroy(txr->txtag, tx_buffer->map);
   3219 			tx_buffer->map = NULL;
   3220 		}
   3221 	}
   3222 #if __FreeBSD_version >= 800000
   3223 	if (txr->br != NULL)
   3224 		buf_ring_free(txr->br, M_DEVBUF);
   3225 #endif
   3226 	if (txr->tx_buffers != NULL) {
   3227 		free(txr->tx_buffers, M_DEVBUF);
   3228 		txr->tx_buffers = NULL;
   3229 	}
   3230 	if (txr->txtag != NULL) {
   3231 		ixgbe_dma_tag_destroy(txr->txtag);
   3232 		txr->txtag = NULL;
   3233 	}
   3234 	return;
   3235 }
   3236 
   3237 /*********************************************************************
   3238  *
   3239  *  Advanced Context Descriptor setup for VLAN or L4 CSUM
   3240  *
   3241  **********************************************************************/
   3242 
   3243 static u32
   3244 ixgbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp)
   3245 {
   3246 	struct m_tag *mtag;
   3247 	struct adapter *adapter = txr->adapter;
   3248 	struct ethercom *ec = &adapter->osdep.ec;
   3249 	struct ixgbe_adv_tx_context_desc *TXD;
   3250 	struct ixgbe_tx_buf        *tx_buffer;
   3251 	u32 olinfo = 0, vlan_macip_lens = 0, type_tucmd_mlhl = 0;
   3252 	struct ether_vlan_header *eh;
   3253 	struct ip ip;
   3254 	struct ip6_hdr ip6;
   3255 	int  ehdrlen, ip_hlen = 0;
   3256 	u16	etype;
   3257 	u8	ipproto = 0;
   3258 	bool	offload;
   3259 	int ctxd = txr->next_avail_desc;
   3260 	u16 vtag = 0;
   3261 
   3262 	offload = ((mp->m_pkthdr.csum_flags & M_CSUM_OFFLOAD) != 0);
   3263 
   3264 	tx_buffer = &txr->tx_buffers[ctxd];
   3265 	TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
   3266 
   3267 	/*
   3268 	** In advanced descriptors the vlan tag must
   3269 	** be placed into the descriptor itself.
   3270 	*/
   3271 	if ((mtag = VLAN_OUTPUT_TAG(ec, mp)) != NULL) {
   3272 		vtag = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   3273 		vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
   3274 	} else if (!offload)
   3275 		return 0;
   3276 
   3277 	/*
   3278 	 * Determine where frame payload starts.
   3279 	 * Jump over vlan headers if already present,
   3280 	 * helpful for QinQ too.
   3281 	 */
   3282 	KASSERT(mp->m_len >= offsetof(struct ether_vlan_header, evl_tag));
   3283 	eh = mtod(mp, struct ether_vlan_header *);
   3284 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
   3285 		KASSERT(mp->m_len >= sizeof(struct ether_vlan_header));
   3286 		etype = ntohs(eh->evl_proto);
   3287 		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   3288 	} else {
   3289 		etype = ntohs(eh->evl_encap_proto);
   3290 		ehdrlen = ETHER_HDR_LEN;
   3291 	}
   3292 
   3293 	/* Set the ether header length */
   3294 	vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
   3295 
   3296 	switch (etype) {
   3297 	case ETHERTYPE_IP:
   3298 		m_copydata(mp, ehdrlen, sizeof(ip), &ip);
   3299 		ip_hlen = ip.ip_hl << 2;
   3300 		ipproto = ip.ip_p;
   3301 #if 0
   3302 		ip.ip_sum = 0;
   3303 		m_copyback(mp, ehdrlen, sizeof(ip), &ip);
   3304 #else
   3305 		KASSERT((mp->m_pkthdr.csum_flags & M_CSUM_IPv4) == 0 ||
   3306 		    ip.ip_sum == 0);
   3307 #endif
   3308 		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
   3309 		break;
   3310 	case ETHERTYPE_IPV6:
   3311 		m_copydata(mp, ehdrlen, sizeof(ip6), &ip6);
   3312 		ip_hlen = sizeof(ip6);
   3313 		ipproto = ip6.ip6_nxt;
   3314 		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
   3315 		break;
   3316 	default:
   3317 		break;
   3318 	}
   3319 
   3320 	if ((mp->m_pkthdr.csum_flags & M_CSUM_IPv4) != 0)
   3321 		olinfo |= IXGBE_TXD_POPTS_IXSM << 8;
   3322 
   3323 	vlan_macip_lens |= ip_hlen;
   3324 	type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
   3325 
   3326 	if (mp->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_TCPv6)) {
   3327 		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
   3328 		olinfo |= IXGBE_TXD_POPTS_TXSM << 8;
   3329 		KASSERT(ipproto == IPPROTO_TCP);
   3330 	} else if (mp->m_pkthdr.csum_flags & (M_CSUM_UDPv4|M_CSUM_UDPv6)) {
   3331 		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP;
   3332 		olinfo |= IXGBE_TXD_POPTS_TXSM << 8;
   3333 		KASSERT(ipproto == IPPROTO_UDP);
   3334 	}
   3335 
   3336 	/* Now copy bits into descriptor */
   3337 	TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
   3338 	TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
   3339 	TXD->seqnum_seed = htole32(0);
   3340 	TXD->mss_l4len_idx = htole32(0);
   3341 
   3342 	tx_buffer->m_head = NULL;
   3343 	tx_buffer->eop_index = -1;
   3344 
   3345 	/* We've consumed the first desc, adjust counters */
   3346 	if (++ctxd == adapter->num_tx_desc)
   3347 		ctxd = 0;
   3348 	txr->next_avail_desc = ctxd;
   3349 	--txr->tx_avail;
   3350 
   3351         return olinfo;
   3352 }
   3353 
   3354 /**********************************************************************
   3355  *
   3356  *  Setup work for hardware segmentation offload (TSO) on
   3357  *  adapters using advanced tx descriptors
   3358  *
   3359  **********************************************************************/
   3360 static bool
   3361 ixgbe_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *paylen)
   3362 {
   3363 	struct m_tag *mtag;
   3364 	struct adapter *adapter = txr->adapter;
   3365 	struct ethercom *ec = &adapter->osdep.ec;
   3366 	struct ixgbe_adv_tx_context_desc *TXD;
   3367 	struct ixgbe_tx_buf        *tx_buffer;
   3368 	u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
   3369 	u32 mss_l4len_idx = 0;
   3370 	u16 vtag = 0;
   3371 	int ctxd, ehdrlen,  hdrlen, ip_hlen, tcp_hlen;
   3372 	struct ether_vlan_header *eh;
   3373 	struct ip *ip;
   3374 	struct tcphdr *th;
   3375 
   3376 
   3377 	/*
   3378 	 * Determine where frame payload starts.
   3379 	 * Jump over vlan headers if already present
   3380 	 */
   3381 	eh = mtod(mp, struct ether_vlan_header *);
   3382 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN))
   3383 		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   3384 	else
   3385 		ehdrlen = ETHER_HDR_LEN;
   3386 
   3387         /* Ensure we have at least the IP+TCP header in the first mbuf. */
   3388         if (mp->m_len < ehdrlen + sizeof(struct ip) + sizeof(struct tcphdr))
   3389 		return FALSE;
   3390 
   3391 	ctxd = txr->next_avail_desc;
   3392 	tx_buffer = &txr->tx_buffers[ctxd];
   3393 	TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
   3394 
   3395 	ip = (struct ip *)(mp->m_data + ehdrlen);
   3396 	if (ip->ip_p != IPPROTO_TCP)
   3397 		return FALSE;   /* 0 */
   3398 	ip->ip_sum = 0;
   3399 	ip_hlen = ip->ip_hl << 2;
   3400 	th = (struct tcphdr *)((char *)ip + ip_hlen);
   3401 	/* XXX Educated guess: FreeBSD's in_pseudo == NetBSD's in_cksum_phdr */
   3402 	th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   3403 	    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   3404 	tcp_hlen = th->th_off << 2;
   3405 	hdrlen = ehdrlen + ip_hlen + tcp_hlen;
   3406 
   3407 	/* This is used in the transmit desc in encap */
   3408 	*paylen = mp->m_pkthdr.len - hdrlen;
   3409 
   3410 	/* VLAN MACLEN IPLEN */
   3411 	if ((mtag = VLAN_OUTPUT_TAG(ec, mp)) != NULL) {
   3412 		vtag = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   3413                 vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
   3414 	}
   3415 
   3416 	vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
   3417 	vlan_macip_lens |= ip_hlen;
   3418 	TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
   3419 
   3420 	/* ADV DTYPE TUCMD */
   3421 	type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
   3422 	type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
   3423 	type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
   3424 	TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
   3425 
   3426 
   3427 	/* MSS L4LEN IDX */
   3428 	mss_l4len_idx |= (mp->m_pkthdr.segsz << IXGBE_ADVTXD_MSS_SHIFT);
   3429 	mss_l4len_idx |= (tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT);
   3430 	TXD->mss_l4len_idx = htole32(mss_l4len_idx);
   3431 
   3432 	TXD->seqnum_seed = htole32(0);
   3433 	tx_buffer->m_head = NULL;
   3434 	tx_buffer->eop_index = -1;
   3435 
   3436 	if (++ctxd == adapter->num_tx_desc)
   3437 		ctxd = 0;
   3438 
   3439 	txr->tx_avail--;
   3440 	txr->next_avail_desc = ctxd;
   3441 	return TRUE;
   3442 }
   3443 
   3444 #ifdef IXGBE_FDIR
   3445 /*
   3446 ** This routine parses packet headers so that Flow
   3447 ** Director can make a hashed filter table entry
   3448 ** allowing traffic flows to be identified and kept
   3449 ** on the same cpu.  This would be a performance
   3450 ** hit, but we only do it at IXGBE_FDIR_RATE of
   3451 ** packets.
   3452 */
   3453 static void
   3454 ixgbe_atr(struct tx_ring *txr, struct mbuf *mp)
   3455 {
   3456 	struct adapter			*adapter = txr->adapter;
   3457 	struct ix_queue			*que;
   3458 	struct ip			*ip;
   3459 	struct tcphdr			*th;
   3460 	struct udphdr			*uh;
   3461 	struct ether_vlan_header	*eh;
   3462 	union ixgbe_atr_hash_dword	input = {.dword = 0};
   3463 	union ixgbe_atr_hash_dword	common = {.dword = 0};
   3464 	int  				ehdrlen, ip_hlen;
   3465 	u16				etype;
   3466 
   3467 	eh = mtod(mp, struct ether_vlan_header *);
   3468 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
   3469 		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   3470 		etype = eh->evl_proto;
   3471 	} else {
   3472 		ehdrlen = ETHER_HDR_LEN;
   3473 		etype = eh->evl_encap_proto;
   3474 	}
   3475 
   3476 	/* Only handling IPv4 */
   3477 	if (etype != htons(ETHERTYPE_IP))
   3478 		return;
   3479 
   3480 	ip = (struct ip *)(mp->m_data + ehdrlen);
   3481 	ip_hlen = ip->ip_hl << 2;
   3482 
   3483 	/* check if we're UDP or TCP */
   3484 	switch (ip->ip_p) {
   3485 	case IPPROTO_TCP:
   3486 		th = (struct tcphdr *)((char *)ip + ip_hlen);
   3487 		/* src and dst are inverted */
   3488 		common.port.dst ^= th->th_sport;
   3489 		common.port.src ^= th->th_dport;
   3490 		input.formatted.flow_type ^= IXGBE_ATR_FLOW_TYPE_TCPV4;
   3491 		break;
   3492 	case IPPROTO_UDP:
   3493 		uh = (struct udphdr *)((char *)ip + ip_hlen);
   3494 		/* src and dst are inverted */
   3495 		common.port.dst ^= uh->uh_sport;
   3496 		common.port.src ^= uh->uh_dport;
   3497 		input.formatted.flow_type ^= IXGBE_ATR_FLOW_TYPE_UDPV4;
   3498 		break;
   3499 	default:
   3500 		return;
   3501 	}
   3502 
   3503 	input.formatted.vlan_id = htobe16(mp->m_pkthdr.ether_vtag);
   3504 	if (mp->m_pkthdr.ether_vtag)
   3505 		common.flex_bytes ^= htons(ETHERTYPE_VLAN);
   3506 	else
   3507 		common.flex_bytes ^= etype;
   3508 	common.ip ^= ip->ip_src.s_addr ^ ip->ip_dst.s_addr;
   3509 
   3510 	que = &adapter->queues[txr->me];
   3511 	/*
   3512 	** This assumes the Rx queue and Tx
   3513 	** queue are bound to the same CPU
   3514 	*/
   3515 	ixgbe_fdir_add_signature_filter_82599(&adapter->hw,
   3516 	    input, common, que->msix);
   3517 }
   3518 #endif /* IXGBE_FDIR */
   3519 
   3520 /**********************************************************************
   3521  *
   3522  *  Examine each tx_buffer in the used queue. If the hardware is done
   3523  *  processing the packet then free associated resources. The
   3524  *  tx_buffer is put back on the free queue.
   3525  *
   3526  **********************************************************************/
   3527 static bool
   3528 ixgbe_txeof(struct tx_ring *txr)
   3529 {
   3530 	struct adapter	*adapter = txr->adapter;
   3531 	struct ifnet	*ifp = adapter->ifp;
   3532 	u32	first, last, done, processed;
   3533 	struct ixgbe_tx_buf *tx_buffer;
   3534 	struct ixgbe_legacy_tx_desc *tx_desc, *eop_desc;
   3535 	struct timeval now, elapsed;
   3536 
   3537 	KASSERT(mutex_owned(&txr->tx_mtx));
   3538 
   3539 	if (txr->tx_avail == adapter->num_tx_desc) {
   3540 		txr->queue_status = IXGBE_QUEUE_IDLE;
   3541 		return false;
   3542 	}
   3543 
   3544 	processed = 0;
   3545 	first = txr->next_to_clean;
   3546 	tx_buffer = &txr->tx_buffers[first];
   3547 	/* For cleanup we just use legacy struct */
   3548 	tx_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
   3549 	last = tx_buffer->eop_index;
   3550 	if (last == -1)
   3551 		return false;
   3552 	eop_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
   3553 
   3554 	/*
   3555 	** Get the index of the first descriptor
   3556 	** BEYOND the EOP and call that 'done'.
   3557 	** I do this so the comparison in the
   3558 	** inner while loop below can be simple
   3559 	*/
   3560 	if (++last == adapter->num_tx_desc) last = 0;
   3561 	done = last;
   3562 
   3563         ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
   3564 	    BUS_DMASYNC_POSTREAD);
   3565 	/*
   3566 	** Only the EOP descriptor of a packet now has the DD
   3567 	** bit set, this is what we look for...
   3568 	*/
   3569 	while (eop_desc->upper.fields.status & IXGBE_TXD_STAT_DD) {
   3570 		/* We clean the range of the packet */
   3571 		while (first != done) {
   3572 			tx_desc->upper.data = 0;
   3573 			tx_desc->lower.data = 0;
   3574 			tx_desc->buffer_addr = 0;
   3575 			++txr->tx_avail;
   3576 			++processed;
   3577 
   3578 			if (tx_buffer->m_head) {
   3579 				txr->bytes +=
   3580 				    tx_buffer->m_head->m_pkthdr.len;
   3581 				bus_dmamap_sync(txr->txtag->dt_dmat,
   3582 				    tx_buffer->map,
   3583 				    0, tx_buffer->m_head->m_pkthdr.len,
   3584 				    BUS_DMASYNC_POSTWRITE);
   3585 				ixgbe_dmamap_unload(txr->txtag, tx_buffer->map);
   3586 				m_freem(tx_buffer->m_head);
   3587 				tx_buffer->m_head = NULL;
   3588 			}
   3589 			tx_buffer->eop_index = -1;
   3590 			getmicrotime(&txr->watchdog_time);
   3591 
   3592 			if (++first == adapter->num_tx_desc)
   3593 				first = 0;
   3594 
   3595 			tx_buffer = &txr->tx_buffers[first];
   3596 			tx_desc =
   3597 			    (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
   3598 		}
   3599 		++txr->packets;
   3600 		++ifp->if_opackets;
   3601 		/* See if there is more work now */
   3602 		last = tx_buffer->eop_index;
   3603 		if (last != -1) {
   3604 			eop_desc =
   3605 			    (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
   3606 			/* Get next done point */
   3607 			if (++last == adapter->num_tx_desc) last = 0;
   3608 			done = last;
   3609 		} else
   3610 			break;
   3611 	}
   3612 	ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
   3613 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   3614 
   3615 	txr->next_to_clean = first;
   3616 
   3617 	/*
   3618 	** Watchdog calculation, we know there's
   3619 	** work outstanding or the first return
   3620 	** would have been taken, so none processed
   3621 	** for too long indicates a hang.
   3622 	*/
   3623 	getmicrotime(&now);
   3624 	timersub(&now, &txr->watchdog_time, &elapsed);
   3625 	if (!processed && tvtohz(&elapsed) > IXGBE_WATCHDOG)
   3626 		txr->queue_status = IXGBE_QUEUE_HUNG;
   3627 
   3628 	/*
   3629 	 * If we have enough room, clear IFF_OACTIVE to tell the stack that
   3630 	 * it is OK to send packets. If there are no pending descriptors,
   3631 	 * clear the timeout. Otherwise, if some descriptors have been freed,
   3632 	 * restart the timeout.
   3633 	 */
   3634 	if (txr->tx_avail > IXGBE_TX_CLEANUP_THRESHOLD) {
   3635 		ifp->if_flags &= ~IFF_OACTIVE;
   3636 		if (txr->tx_avail == adapter->num_tx_desc) {
   3637 			txr->queue_status = IXGBE_QUEUE_IDLE;
   3638 			return false;
   3639 		}
   3640 	}
   3641 
   3642 	return true;
   3643 }
   3644 
   3645 /*********************************************************************
   3646  *
   3647  *  Refresh mbuf buffers for RX descriptor rings
   3648  *   - now keeps its own state so discards due to resource
   3649  *     exhaustion are unnecessary, if an mbuf cannot be obtained
   3650  *     it just returns, keeping its placeholder, thus it can simply
   3651  *     be recalled to try again.
   3652  *
   3653  **********************************************************************/
   3654 static void
   3655 ixgbe_refresh_mbufs(struct rx_ring *rxr, int limit)
   3656 {
   3657 	struct adapter		*adapter = rxr->adapter;
   3658 	struct ixgbe_rx_buf	*rxbuf;
   3659 	struct mbuf		*mh, *mp;
   3660 	int			i, j, error;
   3661 	bool			refreshed = false;
   3662 
   3663 	i = j = rxr->next_to_refresh;
   3664 	/* Control the loop with one beyond */
   3665 	if (++j == adapter->num_rx_desc)
   3666 		j = 0;
   3667 
   3668 	while (j != limit) {
   3669 		rxbuf = &rxr->rx_buffers[i];
   3670 		if (rxr->hdr_split == FALSE)
   3671 			goto no_split;
   3672 
   3673 		if (rxbuf->m_head == NULL) {
   3674 			mh = m_gethdr(M_DONTWAIT, MT_DATA);
   3675 			if (mh == NULL)
   3676 				goto update;
   3677 		} else
   3678 			mh = rxbuf->m_head;
   3679 
   3680 		mh->m_pkthdr.len = mh->m_len = MHLEN;
   3681 		mh->m_len = MHLEN;
   3682 		mh->m_flags |= M_PKTHDR;
   3683 		/* Get the memory mapping */
   3684 		error = bus_dmamap_load_mbuf(rxr->htag->dt_dmat,
   3685 		    rxbuf->hmap, mh, BUS_DMA_NOWAIT);
   3686 		if (error != 0) {
   3687 			printf("Refresh mbufs: hdr dmamap load"
   3688 			    " failure - %d\n", error);
   3689 			m_free(mh);
   3690 			rxbuf->m_head = NULL;
   3691 			goto update;
   3692 		}
   3693 		rxbuf->m_head = mh;
   3694 		ixgbe_dmamap_sync(rxr->htag, rxbuf->hmap, BUS_DMASYNC_PREREAD);
   3695 		rxr->rx_base[i].read.hdr_addr =
   3696 		    htole64(rxbuf->hmap->dm_segs[0].ds_addr);
   3697 
   3698 no_split:
   3699 		if (rxbuf->m_pack == NULL) {
   3700 			mp = ixgbe_getjcl(&adapter->jcl_head, M_DONTWAIT,
   3701 			    MT_DATA, M_PKTHDR, adapter->rx_mbuf_sz);
   3702 			if (mp == NULL) {
   3703 				rxr->no_jmbuf.ev_count++;
   3704 				goto update;
   3705 			}
   3706 		} else
   3707 			mp = rxbuf->m_pack;
   3708 
   3709 		mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
   3710 		/* Get the memory mapping */
   3711 		error = bus_dmamap_load_mbuf(rxr->ptag->dt_dmat,
   3712 		    rxbuf->pmap, mp, BUS_DMA_NOWAIT);
   3713 		if (error != 0) {
   3714 			printf("Refresh mbufs: payload dmamap load"
   3715 			    " failure - %d\n", error);
   3716 			m_free(mp);
   3717 			rxbuf->m_pack = NULL;
   3718 			goto update;
   3719 		}
   3720 		rxbuf->m_pack = mp;
   3721 		bus_dmamap_sync(rxr->ptag->dt_dmat, rxbuf->pmap,
   3722 		    0, mp->m_pkthdr.len, BUS_DMASYNC_PREREAD);
   3723 		rxr->rx_base[i].read.pkt_addr =
   3724 		    htole64(rxbuf->pmap->dm_segs[0].ds_addr);
   3725 
   3726 		refreshed = true;
   3727 		/* Next is precalculated */
   3728 		i = j;
   3729 		rxr->next_to_refresh = i;
   3730 		if (++j == adapter->num_rx_desc)
   3731 			j = 0;
   3732 	}
   3733 update:
   3734 	if (refreshed) /* Update hardware tail index */
   3735 		IXGBE_WRITE_REG(&adapter->hw,
   3736 		    IXGBE_RDT(rxr->me), rxr->next_to_refresh);
   3737 	return;
   3738 }
   3739 
   3740 /*********************************************************************
   3741  *
   3742  *  Allocate memory for rx_buffer structures. Since we use one
   3743  *  rx_buffer per received packet, the maximum number of rx_buffer's
   3744  *  that we'll need is equal to the number of receive descriptors
   3745  *  that we've allocated.
   3746  *
   3747  **********************************************************************/
   3748 static int
   3749 ixgbe_allocate_receive_buffers(struct rx_ring *rxr)
   3750 {
   3751 	struct	adapter 	*adapter = rxr->adapter;
   3752 	device_t 		dev = adapter->dev;
   3753 	struct ixgbe_rx_buf 	*rxbuf;
   3754 	int             	i, bsize, error;
   3755 
   3756 	bsize = sizeof(struct ixgbe_rx_buf) * adapter->num_rx_desc;
   3757 	if (!(rxr->rx_buffers =
   3758 	    (struct ixgbe_rx_buf *) malloc(bsize,
   3759 	    M_DEVBUF, M_NOWAIT | M_ZERO))) {
   3760 		aprint_error_dev(dev, "Unable to allocate rx_buffer memory\n");
   3761 		error = ENOMEM;
   3762 		goto fail;
   3763 	}
   3764 
   3765 	if ((error = ixgbe_dma_tag_create(adapter->osdep.dmat,	/* parent */
   3766 				   1, 0,	/* alignment, bounds */
   3767 				   MSIZE,		/* maxsize */
   3768 				   1,			/* nsegments */
   3769 				   MSIZE,		/* maxsegsize */
   3770 				   0,			/* flags */
   3771 				   &rxr->htag))) {
   3772 		aprint_error_dev(dev, "Unable to create RX DMA tag\n");
   3773 		goto fail;
   3774 	}
   3775 
   3776 	if ((error = ixgbe_dma_tag_create(adapter->osdep.dmat,	/* parent */
   3777 				   1, 0,	/* alignment, bounds */
   3778 				   MJUM16BYTES,		/* maxsize */
   3779 				   1,			/* nsegments */
   3780 				   MJUM16BYTES,		/* maxsegsize */
   3781 				   0,			/* flags */
   3782 				   &rxr->ptag))) {
   3783 		aprint_error_dev(dev, "Unable to create RX DMA tag\n");
   3784 		goto fail;
   3785 	}
   3786 
   3787 	for (i = 0; i < adapter->num_rx_desc; i++, rxbuf++) {
   3788 		rxbuf = &rxr->rx_buffers[i];
   3789 		error = ixgbe_dmamap_create(rxr->htag,
   3790 		    BUS_DMA_NOWAIT, &rxbuf->hmap);
   3791 		if (error) {
   3792 			aprint_error_dev(dev, "Unable to create RX head map\n");
   3793 			goto fail;
   3794 		}
   3795 		error = ixgbe_dmamap_create(rxr->ptag,
   3796 		    BUS_DMA_NOWAIT, &rxbuf->pmap);
   3797 		if (error) {
   3798 			aprint_error_dev(dev, "Unable to create RX pkt map\n");
   3799 			goto fail;
   3800 		}
   3801 	}
   3802 
   3803 	return (0);
   3804 
   3805 fail:
   3806 	/* Frees all, but can handle partial completion */
   3807 	ixgbe_free_receive_structures(adapter);
   3808 	return (error);
   3809 }
   3810 
   3811 /*
   3812 ** Used to detect a descriptor that has
   3813 ** been merged by Hardware RSC.
   3814 */
   3815 static inline u32
   3816 ixgbe_rsc_count(union ixgbe_adv_rx_desc *rx)
   3817 {
   3818 	return (le32toh(rx->wb.lower.lo_dword.data) &
   3819 	    IXGBE_RXDADV_RSCCNT_MASK) >> IXGBE_RXDADV_RSCCNT_SHIFT;
   3820 }
   3821 
   3822 /*********************************************************************
   3823  *
   3824  *  Initialize Hardware RSC (LRO) feature on 82599
   3825  *  for an RX ring, this is toggled by the LRO capability
   3826  *  even though it is transparent to the stack.
   3827  *
   3828  **********************************************************************/
   3829 static void
   3830 ixgbe_setup_hw_rsc(struct rx_ring *rxr)
   3831 {
   3832 	struct	adapter 	*adapter = rxr->adapter;
   3833 	struct	ixgbe_hw	*hw = &adapter->hw;
   3834 	u32			rscctrl, rdrxctl;
   3835 
   3836 	rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
   3837 	rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
   3838 	rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
   3839 	rdrxctl |= IXGBE_RDRXCTL_RSCACKC;
   3840 	IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
   3841 
   3842 	rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(rxr->me));
   3843 	rscctrl |= IXGBE_RSCCTL_RSCEN;
   3844 	/*
   3845 	** Limit the total number of descriptors that
   3846 	** can be combined, so it does not exceed 64K
   3847 	*/
   3848 	if (adapter->rx_mbuf_sz == MCLBYTES)
   3849 		rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
   3850 	else if (adapter->rx_mbuf_sz == MJUMPAGESIZE)
   3851 		rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
   3852 	else if (adapter->rx_mbuf_sz == MJUM9BYTES)
   3853 		rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
   3854 	else  /* Using 16K cluster */
   3855 		rscctrl |= IXGBE_RSCCTL_MAXDESC_1;
   3856 
   3857 	IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(rxr->me), rscctrl);
   3858 
   3859 	/* Enable TCP header recognition */
   3860 	IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0),
   3861 	    (IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0)) |
   3862 	    IXGBE_PSRTYPE_TCPHDR));
   3863 
   3864 	/* Disable RSC for ACK packets */
   3865 	IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
   3866 	    (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
   3867 
   3868 	rxr->hw_rsc = TRUE;
   3869 }
   3870 
   3871 
   3872 static void
   3873 ixgbe_free_receive_ring(struct rx_ring *rxr)
   3874 {
   3875 	struct  adapter         *adapter;
   3876 	struct ixgbe_rx_buf       *rxbuf;
   3877 	int i;
   3878 
   3879 	adapter = rxr->adapter;
   3880 	for (i = 0; i < adapter->num_rx_desc; i++) {
   3881 		rxbuf = &rxr->rx_buffers[i];
   3882 		if (rxbuf->m_head != NULL) {
   3883 			ixgbe_dmamap_sync(rxr->htag, rxbuf->hmap,
   3884 			    BUS_DMASYNC_POSTREAD);
   3885 			ixgbe_dmamap_unload(rxr->htag, rxbuf->hmap);
   3886 			rxbuf->m_head->m_flags |= M_PKTHDR;
   3887 			m_freem(rxbuf->m_head);
   3888 		}
   3889 		if (rxbuf->m_pack != NULL) {
   3890 			bus_dmamap_sync(rxr->ptag->dt_dmat, rxbuf->pmap,
   3891 			    0, rxbuf->m_pack->m_pkthdr.len,
   3892 			    BUS_DMASYNC_POSTREAD);
   3893 			ixgbe_dmamap_unload(rxr->ptag, rxbuf->pmap);
   3894 			rxbuf->m_pack->m_flags |= M_PKTHDR;
   3895 			m_freem(rxbuf->m_pack);
   3896 		}
   3897 		rxbuf->m_head = NULL;
   3898 		rxbuf->m_pack = NULL;
   3899 	}
   3900 }
   3901 
   3902 
   3903 /*********************************************************************
   3904  *
   3905  *  Initialize a receive ring and its buffers.
   3906  *
   3907  **********************************************************************/
   3908 static int
   3909 ixgbe_setup_receive_ring(struct rx_ring *rxr)
   3910 {
   3911 	struct	adapter 	*adapter;
   3912 	struct ifnet		*ifp;
   3913 	device_t		dev;
   3914 	struct ixgbe_rx_buf	*rxbuf;
   3915 #ifdef LRO
   3916 	struct lro_ctrl		*lro = &rxr->lro;
   3917 #endif /* LRO */
   3918 	int			rsize, error = 0;
   3919 
   3920 	adapter = rxr->adapter;
   3921 	ifp = adapter->ifp;
   3922 	dev = adapter->dev;
   3923 
   3924 	/* Clear the ring contents */
   3925 	IXGBE_RX_LOCK(rxr);
   3926 	rsize = roundup2(adapter->num_rx_desc *
   3927 	    sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
   3928 	bzero((void *)rxr->rx_base, rsize);
   3929 
   3930 	/* Free current RX buffer structs and their mbufs */
   3931 	ixgbe_free_receive_ring(rxr);
   3932 
   3933 	/* Now reinitialize our supply of jumbo mbufs.  The number
   3934 	 * or size of jumbo mbufs may have changed.
   3935 	 */
   3936 	ixgbe_jcl_reinit(&adapter->jcl_head, rxr->ptag->dt_dmat,
   3937 	    2 * adapter->num_rx_desc, adapter->rx_mbuf_sz);
   3938 
   3939 	/* Configure header split? */
   3940 	if (ixgbe_header_split)
   3941 		rxr->hdr_split = TRUE;
   3942 
   3943 	/* Now replenish the mbufs */
   3944 	for (int j = 0; j != adapter->num_rx_desc; ++j) {
   3945 		struct mbuf	*mh, *mp;
   3946 
   3947 		rxbuf = &rxr->rx_buffers[j];
   3948 		/*
   3949 		** Don't allocate mbufs if not
   3950 		** doing header split, its wasteful
   3951 		*/
   3952 		if (rxr->hdr_split == FALSE)
   3953 			goto skip_head;
   3954 
   3955 		/* First the header */
   3956 		rxbuf->m_head = m_gethdr(M_DONTWAIT, MT_DATA);
   3957 		if (rxbuf->m_head == NULL) {
   3958 			error = ENOBUFS;
   3959 			goto fail;
   3960 		}
   3961 		m_adj(rxbuf->m_head, ETHER_ALIGN);
   3962 		mh = rxbuf->m_head;
   3963 		mh->m_len = mh->m_pkthdr.len = MHLEN;
   3964 		mh->m_flags |= M_PKTHDR;
   3965 		/* Get the memory mapping */
   3966 		error = bus_dmamap_load_mbuf(rxr->htag->dt_dmat,
   3967 		    rxbuf->hmap, rxbuf->m_head, BUS_DMA_NOWAIT);
   3968 		if (error != 0) /* Nothing elegant to do here */
   3969 			goto fail;
   3970 		bus_dmamap_sync(rxr->htag->dt_dmat, rxbuf->hmap,
   3971 		    0, mh->m_pkthdr.len, BUS_DMASYNC_PREREAD);
   3972 		/* Update descriptor */
   3973 		rxr->rx_base[j].read.hdr_addr =
   3974 		    htole64(rxbuf->hmap->dm_segs[0].ds_addr);
   3975 
   3976 skip_head:
   3977 		/* Now the payload cluster */
   3978 		rxbuf->m_pack = ixgbe_getjcl(&adapter->jcl_head, M_DONTWAIT,
   3979 		    MT_DATA, M_PKTHDR, adapter->rx_mbuf_sz);
   3980 		if (rxbuf->m_pack == NULL) {
   3981 			error = ENOBUFS;
   3982                         goto fail;
   3983 		}
   3984 		mp = rxbuf->m_pack;
   3985 		mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
   3986 		/* Get the memory mapping */
   3987 		error = bus_dmamap_load_mbuf(rxr->ptag->dt_dmat,
   3988 		    rxbuf->pmap, mp, BUS_DMA_NOWAIT);
   3989 		if (error != 0)
   3990                         goto fail;
   3991 		bus_dmamap_sync(rxr->ptag->dt_dmat, rxbuf->pmap,
   3992 		    0, adapter->rx_mbuf_sz, BUS_DMASYNC_PREREAD);
   3993 		/* Update descriptor */
   3994 		rxr->rx_base[j].read.pkt_addr =
   3995 		    htole64(rxbuf->pmap->dm_segs[0].ds_addr);
   3996 	}
   3997 
   3998 
   3999 	/* Setup our descriptor indices */
   4000 	rxr->next_to_check = 0;
   4001 	rxr->next_to_refresh = 0;
   4002 	rxr->lro_enabled = FALSE;
   4003 	rxr->rx_split_packets.ev_count = 0;
   4004 	rxr->rx_bytes.ev_count = 0;
   4005 	rxr->discard = FALSE;
   4006 
   4007 	ixgbe_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
   4008 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   4009 
   4010 	/*
   4011 	** Now set up the LRO interface:
   4012 	** 82598 uses software LRO, the
   4013 	** 82599 uses a hardware assist.
   4014 	*/
   4015 	if ((adapter->hw.mac.type != ixgbe_mac_82598EB) &&
   4016 	    (ifp->if_capenable & IFCAP_RXCSUM) &&
   4017 	    (ifp->if_capenable & IFCAP_LRO))
   4018 		ixgbe_setup_hw_rsc(rxr);
   4019 #ifdef LRO
   4020 	else if (ifp->if_capenable & IFCAP_LRO) {
   4021 		int err = tcp_lro_init(lro);
   4022 		if (err) {
   4023 			device_printf(dev, "LRO Initialization failed!\n");
   4024 			goto fail;
   4025 		}
   4026 		INIT_DEBUGOUT("RX Soft LRO Initialized\n");
   4027 		rxr->lro_enabled = TRUE;
   4028 		lro->ifp = adapter->ifp;
   4029 	}
   4030 #endif /* LRO */
   4031 
   4032 	IXGBE_RX_UNLOCK(rxr);
   4033 	return (0);
   4034 
   4035 fail:
   4036 	ixgbe_free_receive_ring(rxr);
   4037 	IXGBE_RX_UNLOCK(rxr);
   4038 	return (error);
   4039 }
   4040 
   4041 /*********************************************************************
   4042  *
   4043  *  Initialize all receive rings.
   4044  *
   4045  **********************************************************************/
   4046 static int
   4047 ixgbe_setup_receive_structures(struct adapter *adapter)
   4048 {
   4049 	struct rx_ring *rxr = adapter->rx_rings;
   4050 	int j;
   4051 
   4052 	for (j = 0; j < adapter->num_queues; j++, rxr++)
   4053 		if (ixgbe_setup_receive_ring(rxr))
   4054 			goto fail;
   4055 
   4056 	return (0);
   4057 fail:
   4058 	/*
   4059 	 * Free RX buffers allocated so far, we will only handle
   4060 	 * the rings that completed, the failing case will have
   4061 	 * cleaned up for itself. 'j' failed, so its the terminus.
   4062 	 */
   4063 	for (int i = 0; i < j; ++i) {
   4064 		rxr = &adapter->rx_rings[i];
   4065 		ixgbe_free_receive_ring(rxr);
   4066 	}
   4067 
   4068 	return (ENOBUFS);
   4069 }
   4070 
   4071 /*********************************************************************
   4072  *
   4073  *  Setup receive registers and features.
   4074  *
   4075  **********************************************************************/
   4076 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
   4077 
   4078 static void
   4079 ixgbe_initialize_receive_units(struct adapter *adapter)
   4080 {
   4081 	int i;
   4082 	struct	rx_ring	*rxr = adapter->rx_rings;
   4083 	struct ixgbe_hw	*hw = &adapter->hw;
   4084 	struct ifnet   *ifp = adapter->ifp;
   4085 	u32		bufsz, rxctrl, fctrl, srrctl, rxcsum;
   4086 	u32		reta, mrqc = 0, hlreg, r[10];
   4087 
   4088 
   4089 	/*
   4090 	 * Make sure receives are disabled while
   4091 	 * setting up the descriptor ring
   4092 	 */
   4093 	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
   4094 	IXGBE_WRITE_REG(hw, IXGBE_RXCTRL,
   4095 	    rxctrl & ~IXGBE_RXCTRL_RXEN);
   4096 
   4097 	/* Enable broadcasts */
   4098 	fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
   4099 	fctrl |= IXGBE_FCTRL_BAM;
   4100 	fctrl |= IXGBE_FCTRL_DPF;
   4101 	fctrl |= IXGBE_FCTRL_PMCF;
   4102 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
   4103 
   4104 	/* Set for Jumbo Frames? */
   4105 	hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
   4106 	if (ifp->if_mtu > ETHERMTU)
   4107 		hlreg |= IXGBE_HLREG0_JUMBOEN;
   4108 	else
   4109 		hlreg &= ~IXGBE_HLREG0_JUMBOEN;
   4110 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
   4111 
   4112 	bufsz = adapter->rx_mbuf_sz  >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
   4113 
   4114 	for (i = 0; i < adapter->num_queues; i++, rxr++) {
   4115 		u64 rdba = rxr->rxdma.dma_paddr;
   4116 
   4117 		/* Setup the Base and Length of the Rx Descriptor Ring */
   4118 		IXGBE_WRITE_REG(hw, IXGBE_RDBAL(i),
   4119 			       (rdba & 0x00000000ffffffffULL));
   4120 		IXGBE_WRITE_REG(hw, IXGBE_RDBAH(i), (rdba >> 32));
   4121 		IXGBE_WRITE_REG(hw, IXGBE_RDLEN(i),
   4122 		    adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
   4123 
   4124 		/* Set up the SRRCTL register */
   4125 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
   4126 		srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
   4127 		srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
   4128 		srrctl |= bufsz;
   4129 		if (rxr->hdr_split) {
   4130 			/* Use a standard mbuf for the header */
   4131 			srrctl |= ((IXGBE_RX_HDR <<
   4132 			    IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT)
   4133 			    & IXGBE_SRRCTL_BSIZEHDR_MASK);
   4134 			srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
   4135 		} else
   4136 			srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
   4137 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(i), srrctl);
   4138 
   4139 		/* Setup the HW Rx Head and Tail Descriptor Pointers */
   4140 		IXGBE_WRITE_REG(hw, IXGBE_RDH(i), 0);
   4141 		IXGBE_WRITE_REG(hw, IXGBE_RDT(i), 0);
   4142 	}
   4143 
   4144 	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
   4145 		u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
   4146 			      IXGBE_PSRTYPE_UDPHDR |
   4147 			      IXGBE_PSRTYPE_IPV4HDR |
   4148 			      IXGBE_PSRTYPE_IPV6HDR;
   4149 		IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
   4150 	}
   4151 
   4152 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
   4153 
   4154 	/* Setup RSS */
   4155 	if (adapter->num_queues > 1) {
   4156 		int j;
   4157 		reta = 0;
   4158 
   4159 		/* set up random bits */
   4160 		cprng_fast(&r, sizeof(r));
   4161 
   4162 		/* Set up the redirection table */
   4163 		for (i = 0, j = 0; i < 128; i++, j++) {
   4164 			if (j == adapter->num_queues) j = 0;
   4165 			reta = (reta << 8) | (j * 0x11);
   4166 			if ((i & 3) == 3)
   4167 				IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
   4168 		}
   4169 
   4170 		/* Now fill our hash function seeds */
   4171 		for (i = 0; i < 10; i++)
   4172 			IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), r[i]);
   4173 
   4174 		/* Perform hash on these packet types */
   4175 		mrqc = IXGBE_MRQC_RSSEN
   4176 		     | IXGBE_MRQC_RSS_FIELD_IPV4
   4177 		     | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
   4178 		     | IXGBE_MRQC_RSS_FIELD_IPV4_UDP
   4179 		     | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP
   4180 		     | IXGBE_MRQC_RSS_FIELD_IPV6_EX
   4181 		     | IXGBE_MRQC_RSS_FIELD_IPV6
   4182 		     | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
   4183 		     | IXGBE_MRQC_RSS_FIELD_IPV6_UDP
   4184 		     | IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
   4185 		IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
   4186 
   4187 		/* RSS and RX IPP Checksum are mutually exclusive */
   4188 		rxcsum |= IXGBE_RXCSUM_PCSD;
   4189 	}
   4190 
   4191 	if (ifp->if_capenable & IFCAP_RXCSUM)
   4192 		rxcsum |= IXGBE_RXCSUM_PCSD;
   4193 
   4194 	if (!(rxcsum & IXGBE_RXCSUM_PCSD))
   4195 		rxcsum |= IXGBE_RXCSUM_IPPCSE;
   4196 
   4197 	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
   4198 
   4199 	return;
   4200 }
   4201 
   4202 /*********************************************************************
   4203  *
   4204  *  Free all receive rings.
   4205  *
   4206  **********************************************************************/
   4207 static void
   4208 ixgbe_free_receive_structures(struct adapter *adapter)
   4209 {
   4210 	struct rx_ring *rxr = adapter->rx_rings;
   4211 
   4212 	for (int i = 0; i < adapter->num_queues; i++, rxr++) {
   4213 #ifdef LRO
   4214 		struct lro_ctrl		*lro = &rxr->lro;
   4215 #endif /* LRO */
   4216 		ixgbe_free_receive_buffers(rxr);
   4217 #ifdef LRO
   4218 		/* Free LRO memory */
   4219 		tcp_lro_free(lro);
   4220 #endif /* LRO */
   4221 		/* Free the ring memory as well */
   4222 		ixgbe_dma_free(adapter, &rxr->rxdma);
   4223 	}
   4224 
   4225 	free(adapter->rx_rings, M_DEVBUF);
   4226 }
   4227 
   4228 
   4229 /*********************************************************************
   4230  *
   4231  *  Free receive ring data structures
   4232  *
   4233  **********************************************************************/
   4234 static void
   4235 ixgbe_free_receive_buffers(struct rx_ring *rxr)
   4236 {
   4237 	struct adapter		*adapter = rxr->adapter;
   4238 	struct ixgbe_rx_buf	*rxbuf;
   4239 
   4240 	INIT_DEBUGOUT("free_receive_structures: begin");
   4241 
   4242 	/* Cleanup any existing buffers */
   4243 	if (rxr->rx_buffers != NULL) {
   4244 		for (int i = 0; i < adapter->num_rx_desc; i++) {
   4245 			rxbuf = &rxr->rx_buffers[i];
   4246 			if (rxbuf->m_head != NULL) {
   4247 				ixgbe_dmamap_sync(rxr->htag, rxbuf->hmap,
   4248 				    BUS_DMASYNC_POSTREAD);
   4249 				ixgbe_dmamap_unload(rxr->htag, rxbuf->hmap);
   4250 				rxbuf->m_head->m_flags |= M_PKTHDR;
   4251 				m_freem(rxbuf->m_head);
   4252 			}
   4253 			if (rxbuf->m_pack != NULL) {
   4254 				bus_dmamap_sync(rxr->ptag->dt_dmat, rxbuf->pmap,
   4255 				    0, rxbuf->m_pack->m_pkthdr.len,
   4256 				    BUS_DMASYNC_POSTREAD);
   4257 				ixgbe_dmamap_unload(rxr->ptag, rxbuf->pmap);
   4258 				rxbuf->m_pack->m_flags |= M_PKTHDR;
   4259 				m_freem(rxbuf->m_pack);
   4260 			}
   4261 			rxbuf->m_head = NULL;
   4262 			rxbuf->m_pack = NULL;
   4263 			if (rxbuf->hmap != NULL) {
   4264 				ixgbe_dmamap_destroy(rxr->htag, rxbuf->hmap);
   4265 				rxbuf->hmap = NULL;
   4266 			}
   4267 			if (rxbuf->pmap != NULL) {
   4268 				ixgbe_dmamap_destroy(rxr->ptag, rxbuf->pmap);
   4269 				rxbuf->pmap = NULL;
   4270 			}
   4271 		}
   4272 		if (rxr->rx_buffers != NULL) {
   4273 			free(rxr->rx_buffers, M_DEVBUF);
   4274 			rxr->rx_buffers = NULL;
   4275 		}
   4276 	}
   4277 
   4278 	if (rxr->htag != NULL) {
   4279 		ixgbe_dma_tag_destroy(rxr->htag);
   4280 		rxr->htag = NULL;
   4281 	}
   4282 	if (rxr->ptag != NULL) {
   4283 		ixgbe_dma_tag_destroy(rxr->ptag);
   4284 		rxr->ptag = NULL;
   4285 	}
   4286 
   4287 	return;
   4288 }
   4289 
   4290 static __inline void
   4291 ixgbe_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m, u32 ptype)
   4292 {
   4293 	struct ethercom *ec;
   4294 	struct adapter	*adapter = ifp->if_softc;
   4295 	int s;
   4296 
   4297 	ec = &adapter->osdep.ec;
   4298 
   4299 #ifdef LRO
   4300         /*
   4301          * ATM LRO is only for IPv4/TCP packets and TCP checksum of the packet
   4302          * should be computed by hardware. Also it should not have VLAN tag in
   4303          * ethernet header.
   4304          */
   4305         if (rxr->lro_enabled &&
   4306             (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING) != 0 &&
   4307             (ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
   4308             (ptype & (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP)) ==
   4309             (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP) &&
   4310             (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) ==
   4311             (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) {
   4312                 /*
   4313                  * Send to the stack if:
   4314                  **  - LRO not enabled, or
   4315                  **  - no LRO resources, or
   4316                  **  - lro enqueue fails
   4317                  */
   4318                 if (rxr->lro.lro_cnt != 0)
   4319                         if (tcp_lro_rx(&rxr->lro, m, 0) == 0)
   4320                                 return;
   4321         }
   4322 #endif /* LRO */
   4323 
   4324 	IXGBE_RX_UNLOCK(rxr);
   4325 
   4326 	s = splnet();
   4327 	/* Pass this up to any BPF listeners. */
   4328 	bpf_mtap(ifp, m);
   4329 	(*ifp->if_input)(ifp, m);
   4330 	splx(s);
   4331 
   4332 	IXGBE_RX_LOCK(rxr);
   4333 }
   4334 
   4335 static __inline void
   4336 ixgbe_rx_discard(struct rx_ring *rxr, int i)
   4337 {
   4338 	struct ixgbe_rx_buf	*rbuf;
   4339 
   4340 	rbuf = &rxr->rx_buffers[i];
   4341 
   4342         if (rbuf->fmp != NULL) {/* Partial chain ? */
   4343 		rbuf->fmp->m_flags |= M_PKTHDR;
   4344                 m_freem(rbuf->fmp);
   4345                 rbuf->fmp = NULL;
   4346 	}
   4347 
   4348 	/*
   4349 	** With advanced descriptors the writeback
   4350 	** clobbers the buffer addrs, so its easier
   4351 	** to just free the existing mbufs and take
   4352 	** the normal refresh path to get new buffers
   4353 	** and mapping.
   4354 	*/
   4355 	if (rbuf->m_head) {
   4356 		m_free(rbuf->m_head);
   4357 		rbuf->m_head = NULL;
   4358 	}
   4359 
   4360 	if (rbuf->m_pack) {
   4361 		m_free(rbuf->m_pack);
   4362 		rbuf->m_pack = NULL;
   4363 	}
   4364 
   4365 	return;
   4366 }
   4367 
   4368 
   4369 /*********************************************************************
   4370  *
   4371  *  This routine executes in interrupt context. It replenishes
   4372  *  the mbufs in the descriptor and sends data which has been
   4373  *  dma'ed into host memory to upper layer.
   4374  *
   4375  *  We loop at most count times if count is > 0, or until done if
   4376  *  count < 0.
   4377  *
   4378  *  Return TRUE for more work, FALSE for all clean.
   4379  *********************************************************************/
   4380 static bool
   4381 ixgbe_rxeof(struct ix_queue *que, int count)
   4382 {
   4383 	struct adapter		*adapter = que->adapter;
   4384 	struct rx_ring		*rxr = que->rxr;
   4385 	struct ifnet		*ifp = adapter->ifp;
   4386 #ifdef LRO
   4387 	struct lro_ctrl		*lro = &rxr->lro;
   4388 	struct lro_entry	*queued;
   4389 #endif /* LRO */
   4390 	int			i, nextp, processed = 0;
   4391 	u32			staterr = 0;
   4392 	union ixgbe_adv_rx_desc	*cur;
   4393 	struct ixgbe_rx_buf	*rbuf, *nbuf;
   4394 
   4395 	IXGBE_RX_LOCK(rxr);
   4396 
   4397 	for (i = rxr->next_to_check; count != 0;) {
   4398 		struct mbuf	*sendmp, *mh, *mp;
   4399 		u32		rsc, ptype;
   4400 		u16		hlen, plen, hdr, vtag;
   4401 		bool		eop;
   4402 
   4403 		/* Sync the ring. */
   4404 		ixgbe_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
   4405 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   4406 
   4407 		cur = &rxr->rx_base[i];
   4408 		staterr = le32toh(cur->wb.upper.status_error);
   4409 
   4410 		if ((staterr & IXGBE_RXD_STAT_DD) == 0)
   4411 			break;
   4412 		if ((ifp->if_flags & IFF_RUNNING) == 0)
   4413 			break;
   4414 
   4415 		count--;
   4416 		sendmp = NULL;
   4417 		nbuf = NULL;
   4418 		rsc = 0;
   4419 		cur->wb.upper.status_error = 0;
   4420 		rbuf = &rxr->rx_buffers[i];
   4421 		mh = rbuf->m_head;
   4422 		mp = rbuf->m_pack;
   4423 
   4424 		plen = le16toh(cur->wb.upper.length);
   4425 		ptype = le32toh(cur->wb.lower.lo_dword.data) &
   4426 		    IXGBE_RXDADV_PKTTYPE_MASK;
   4427 		hdr = le16toh(cur->wb.lower.lo_dword.hs_rss.hdr_info);
   4428 		vtag = le16toh(cur->wb.upper.vlan);
   4429 		eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0);
   4430 
   4431 		/* Make sure bad packets are discarded */
   4432 		if (((staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) != 0) ||
   4433 		    (rxr->discard)) {
   4434 			ifp->if_ierrors++;
   4435 			rxr->rx_discarded.ev_count++;
   4436 			if (eop)
   4437 				rxr->discard = FALSE;
   4438 			else
   4439 				rxr->discard = TRUE;
   4440 			ixgbe_rx_discard(rxr, i);
   4441 			goto next_desc;
   4442 		}
   4443 
   4444 		/*
   4445 		** On 82599 which supports a hardware
   4446 		** LRO (called HW RSC), packets need
   4447 		** not be fragmented across sequential
   4448 		** descriptors, rather the next descriptor
   4449 		** is indicated in bits of the descriptor.
   4450 		** This also means that we might proceses
   4451 		** more than one packet at a time, something
   4452 		** that has never been true before, it
   4453 		** required eliminating global chain pointers
   4454 		** in favor of what we are doing here.  -jfv
   4455 		*/
   4456 		if (!eop) {
   4457 			/*
   4458 			** Figure out the next descriptor
   4459 			** of this frame.
   4460 			*/
   4461 			if (rxr->hw_rsc == TRUE) {
   4462 				rsc = ixgbe_rsc_count(cur);
   4463 				rxr->rsc_num += (rsc - 1);
   4464 			}
   4465 			if (rsc) { /* Get hardware index */
   4466 				nextp = ((staterr &
   4467 				    IXGBE_RXDADV_NEXTP_MASK) >>
   4468 				    IXGBE_RXDADV_NEXTP_SHIFT);
   4469 			} else { /* Just sequential */
   4470 				nextp = i + 1;
   4471 				if (nextp == adapter->num_rx_desc)
   4472 					nextp = 0;
   4473 			}
   4474 			nbuf = &rxr->rx_buffers[nextp];
   4475 			prefetch(nbuf);
   4476 		}
   4477 		/*
   4478 		** The header mbuf is ONLY used when header
   4479 		** split is enabled, otherwise we get normal
   4480 		** behavior, ie, both header and payload
   4481 		** are DMA'd into the payload buffer.
   4482 		**
   4483 		** Rather than using the fmp/lmp global pointers
   4484 		** we now keep the head of a packet chain in the
   4485 		** buffer struct and pass this along from one
   4486 		** descriptor to the next, until we get EOP.
   4487 		*/
   4488 		if (rxr->hdr_split && (rbuf->fmp == NULL)) {
   4489 			/* This must be an initial descriptor */
   4490 			hlen = (hdr & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
   4491 			    IXGBE_RXDADV_HDRBUFLEN_SHIFT;
   4492 			if (hlen > IXGBE_RX_HDR)
   4493 				hlen = IXGBE_RX_HDR;
   4494 			mh->m_len = hlen;
   4495 			mh->m_flags |= M_PKTHDR;
   4496 			mh->m_next = NULL;
   4497 			mh->m_pkthdr.len = mh->m_len;
   4498 			/* Null buf pointer so it is refreshed */
   4499 			rbuf->m_head = NULL;
   4500 			/*
   4501 			** Check the payload length, this
   4502 			** could be zero if its a small
   4503 			** packet.
   4504 			*/
   4505 			if (plen > 0) {
   4506 				mp->m_len = plen;
   4507 				mp->m_next = NULL;
   4508 				mp->m_flags &= ~M_PKTHDR;
   4509 				mh->m_next = mp;
   4510 				mh->m_pkthdr.len += mp->m_len;
   4511 				/* Null buf pointer so it is refreshed */
   4512 				rbuf->m_pack = NULL;
   4513 				rxr->rx_split_packets.ev_count++;
   4514 			}
   4515 			/*
   4516 			** Now create the forward
   4517 			** chain so when complete
   4518 			** we wont have to.
   4519 			*/
   4520                         if (eop == 0) {
   4521 				/* stash the chain head */
   4522                                 nbuf->fmp = mh;
   4523 				/* Make forward chain */
   4524                                 if (plen)
   4525                                         mp->m_next = nbuf->m_pack;
   4526                                 else
   4527                                         mh->m_next = nbuf->m_pack;
   4528                         } else {
   4529 				/* Singlet, prepare to send */
   4530                                 sendmp = mh;
   4531                                 if (VLAN_ATTACHED(&adapter->osdep.ec) &&
   4532 				  (staterr & IXGBE_RXD_STAT_VP)) {
   4533 					/* XXX Do something reasonable on
   4534 					 * error.
   4535 					 */
   4536 #if 0
   4537 					printf("%s.%d: VLAN_INPUT_TAG\n",
   4538 					    __func__, __LINE__);
   4539 					Debugger();
   4540 #endif
   4541 					VLAN_INPUT_TAG(ifp, sendmp, vtag,
   4542 					    printf("%s: could not apply VLAN "
   4543 					        "tag", __func__));
   4544                                 }
   4545                         }
   4546 		} else {
   4547 			/*
   4548 			** Either no header split, or a
   4549 			** secondary piece of a fragmented
   4550 			** split packet.
   4551 			*/
   4552 			mp->m_len = plen;
   4553 			/*
   4554 			** See if there is a stored head
   4555 			** that determines what we are
   4556 			*/
   4557 			sendmp = rbuf->fmp;
   4558 			rbuf->m_pack = rbuf->fmp = NULL;
   4559 
   4560 			if (sendmp != NULL) /* secondary frag */
   4561 				sendmp->m_pkthdr.len += mp->m_len;
   4562 			else {
   4563 				/* first desc of a non-ps chain */
   4564 				sendmp = mp;
   4565 				sendmp->m_flags |= M_PKTHDR;
   4566 				sendmp->m_pkthdr.len = mp->m_len;
   4567 				if (staterr & IXGBE_RXD_STAT_VP) {
   4568 					/* XXX Do something reasonable on
   4569 					 * error.
   4570 					 */
   4571 #if 0
   4572 					printf("%s.%d: VLAN_INPUT_TAG\n",
   4573 					    __func__, __LINE__);
   4574 					Debugger();
   4575 #endif
   4576 					VLAN_INPUT_TAG(ifp, sendmp, vtag,
   4577 					    printf("%s: could not apply VLAN "
   4578 					        "tag", __func__));
   4579 				}
   4580                         }
   4581 			/* Pass the head pointer on */
   4582 			if (eop == 0) {
   4583 				nbuf->fmp = sendmp;
   4584 				sendmp = NULL;
   4585 				mp->m_next = nbuf->m_pack;
   4586 			}
   4587 		}
   4588 		++processed;
   4589 		/* Sending this frame? */
   4590 		if (eop) {
   4591 			sendmp->m_pkthdr.rcvif = ifp;
   4592 			ifp->if_ipackets++;
   4593 			rxr->rx_packets.ev_count++;
   4594 			/* capture data for AIM */
   4595 			rxr->bytes += sendmp->m_pkthdr.len;
   4596 			rxr->rx_bytes.ev_count += sendmp->m_pkthdr.len;
   4597 			if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
   4598 				ixgbe_rx_checksum(staterr, sendmp, ptype,
   4599 				   &adapter->stats);
   4600 			}
   4601 #if __FreeBSD_version >= 800000
   4602 			sendmp->m_pkthdr.flowid = que->msix;
   4603 			sendmp->m_flags |= M_FLOWID;
   4604 #endif
   4605 		}
   4606 next_desc:
   4607 		ixgbe_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
   4608 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   4609 
   4610 		/* Advance our pointers to the next descriptor. */
   4611 		if (++i == adapter->num_rx_desc)
   4612 			i = 0;
   4613 
   4614 		/* Now send to the stack or do LRO */
   4615 		if (sendmp != NULL) {
   4616 			rxr->next_to_check = i;
   4617 			ixgbe_rx_input(rxr, ifp, sendmp, ptype);
   4618 			i = rxr->next_to_check;
   4619 		}
   4620 
   4621                /* Every 8 descriptors we go to refresh mbufs */
   4622 		if (processed == 8) {
   4623 			ixgbe_refresh_mbufs(rxr, i);
   4624 			processed = 0;
   4625 		}
   4626 	}
   4627 
   4628 	/* Refresh any remaining buf structs */
   4629 	if (ixgbe_rx_unrefreshed(rxr))
   4630 		ixgbe_refresh_mbufs(rxr, i);
   4631 
   4632 	rxr->next_to_check = i;
   4633 
   4634 #ifdef LRO
   4635 	/*
   4636 	 * Flush any outstanding LRO work
   4637 	 */
   4638 	while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
   4639 		SLIST_REMOVE_HEAD(&lro->lro_active, next);
   4640 		tcp_lro_flush(lro, queued);
   4641 	}
   4642 #endif /* LRO */
   4643 
   4644 	IXGBE_RX_UNLOCK(rxr);
   4645 
   4646 	/*
   4647 	** We still have cleaning to do?
   4648 	** Schedule another interrupt if so.
   4649 	*/
   4650 	if ((staterr & IXGBE_RXD_STAT_DD) != 0) {
   4651 		ixgbe_rearm_queues(adapter, (u64)(1 << que->msix));
   4652 		return true;
   4653 	}
   4654 
   4655 	return false;
   4656 }
   4657 
   4658 
   4659 /*********************************************************************
   4660  *
   4661  *  Verify that the hardware indicated that the checksum is valid.
   4662  *  Inform the stack about the status of checksum so that stack
   4663  *  doesn't spend time verifying the checksum.
   4664  *
   4665  *********************************************************************/
   4666 static void
   4667 ixgbe_rx_checksum(u32 staterr, struct mbuf * mp, u32 ptype,
   4668     struct ixgbe_hw_stats *stats)
   4669 {
   4670 	u16	status = (u16) staterr;
   4671 	u8	errors = (u8) (staterr >> 24);
   4672 	bool	sctp = FALSE;
   4673 
   4674 	if ((ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
   4675 	    (ptype & IXGBE_RXDADV_PKTTYPE_SCTP) != 0)
   4676 		sctp = TRUE;
   4677 
   4678 	if (status & IXGBE_RXD_STAT_IPCS) {
   4679 		stats->ipcs.ev_count++;
   4680 		if (!(errors & IXGBE_RXD_ERR_IPE)) {
   4681 			/* IP Checksum Good */
   4682 			mp->m_pkthdr.csum_flags = M_CSUM_IPv4;
   4683 
   4684 		} else {
   4685 			stats->ipcs_bad.ev_count++;
   4686 			mp->m_pkthdr.csum_flags = M_CSUM_IPv4|M_CSUM_IPv4_BAD;
   4687 		}
   4688 	}
   4689 	if (status & IXGBE_RXD_STAT_L4CS) {
   4690 		stats->l4cs.ev_count++;
   4691 		u16 type = M_CSUM_TCPv4|M_CSUM_TCPv6|M_CSUM_UDPv4|M_CSUM_UDPv6;
   4692 		if (!(errors & IXGBE_RXD_ERR_TCPE)) {
   4693 			mp->m_pkthdr.csum_flags |= type;
   4694 		} else {
   4695 			stats->l4cs_bad.ev_count++;
   4696 			mp->m_pkthdr.csum_flags |= type | M_CSUM_TCP_UDP_BAD;
   4697 		}
   4698 	}
   4699 	return;
   4700 }
   4701 
   4702 
   4703 #if 0	/* XXX Badly need to overhaul vlan(4) on NetBSD. */
   4704 /*
   4705 ** This routine is run via an vlan config EVENT,
   4706 ** it enables us to use the HW Filter table since
   4707 ** we can get the vlan id. This just creates the
   4708 ** entry in the soft version of the VFTA, init will
   4709 ** repopulate the real table.
   4710 */
   4711 static void
   4712 ixgbe_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   4713 {
   4714 	struct adapter	*adapter = ifp->if_softc;
   4715 	u16		index, bit;
   4716 
   4717 	if (ifp->if_softc !=  arg)   /* Not our event */
   4718 		return;
   4719 
   4720 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
   4721 		return;
   4722 
   4723 	IXGBE_CORE_LOCK(adapter);
   4724 	index = (vtag >> 5) & 0x7F;
   4725 	bit = vtag & 0x1F;
   4726 	adapter->shadow_vfta[index] |= (1 << bit);
   4727 	ixgbe_init_locked(adapter);
   4728 	IXGBE_CORE_UNLOCK(adapter);
   4729 }
   4730 
   4731 /*
   4732 ** This routine is run via an vlan
   4733 ** unconfig EVENT, remove our entry
   4734 ** in the soft vfta.
   4735 */
   4736 static void
   4737 ixgbe_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   4738 {
   4739 	struct adapter	*adapter = ifp->if_softc;
   4740 	u16		index, bit;
   4741 
   4742 	if (ifp->if_softc !=  arg)
   4743 		return;
   4744 
   4745 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
   4746 		return;
   4747 
   4748 	IXGBE_CORE_LOCK(adapter);
   4749 	index = (vtag >> 5) & 0x7F;
   4750 	bit = vtag & 0x1F;
   4751 	adapter->shadow_vfta[index] &= ~(1 << bit);
   4752 	/* Re-init to load the changes */
   4753 	ixgbe_init_locked(adapter);
   4754 	IXGBE_CORE_UNLOCK(adapter);
   4755 }
   4756 #endif
   4757 
   4758 static void
   4759 ixgbe_setup_vlan_hw_support(struct adapter *adapter)
   4760 {
   4761 	struct ethercom *ec = &adapter->osdep.ec;
   4762 	struct ixgbe_hw *hw = &adapter->hw;
   4763 	u32		ctrl;
   4764 
   4765 	/*
   4766 	** We get here thru init_locked, meaning
   4767 	** a soft reset, this has already cleared
   4768 	** the VFTA and other state, so if there
   4769 	** have been no vlan's registered do nothing.
   4770 	*/
   4771 	if (!VLAN_ATTACHED(&adapter->osdep.ec)) {
   4772 		return;
   4773 	}
   4774 
   4775 	/*
   4776 	** A soft reset zero's out the VFTA, so
   4777 	** we need to repopulate it now.
   4778 	*/
   4779 	for (int i = 0; i < IXGBE_VFTA_SIZE; i++)
   4780 		if (adapter->shadow_vfta[i] != 0)
   4781 			IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
   4782 			    adapter->shadow_vfta[i]);
   4783 
   4784 	ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
   4785 	/* Enable the Filter Table if enabled */
   4786 	if (ec->ec_capenable & ETHERCAP_VLAN_HWFILTER) {
   4787 		ctrl &= ~IXGBE_VLNCTRL_CFIEN;
   4788 		ctrl |= IXGBE_VLNCTRL_VFE;
   4789 	}
   4790 	if (hw->mac.type == ixgbe_mac_82598EB)
   4791 		ctrl |= IXGBE_VLNCTRL_VME;
   4792 	IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
   4793 
   4794 	/* On 82599 the VLAN enable is per/queue in RXDCTL */
   4795 	if (hw->mac.type != ixgbe_mac_82598EB)
   4796 		for (int i = 0; i < adapter->num_queues; i++) {
   4797 			ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
   4798 				ctrl |= IXGBE_RXDCTL_VME;
   4799 			IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), ctrl);
   4800 		}
   4801 }
   4802 
   4803 static void
   4804 ixgbe_enable_intr(struct adapter *adapter)
   4805 {
   4806 	struct ixgbe_hw *hw = &adapter->hw;
   4807 	struct ix_queue *que = adapter->queues;
   4808 	u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
   4809 
   4810 
   4811 	/* Enable Fan Failure detection */
   4812 	if (hw->device_id == IXGBE_DEV_ID_82598AT)
   4813 		    mask |= IXGBE_EIMS_GPI_SDP1;
   4814 	else {
   4815 		    mask |= IXGBE_EIMS_ECC;
   4816 		    mask |= IXGBE_EIMS_GPI_SDP1;
   4817 		    mask |= IXGBE_EIMS_GPI_SDP2;
   4818 #ifdef IXGBE_FDIR
   4819 		    mask |= IXGBE_EIMS_FLOW_DIR;
   4820 #endif
   4821 	}
   4822 
   4823 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
   4824 
   4825 	/* With RSS we use auto clear */
   4826 	if (adapter->msix_mem) {
   4827 		mask = IXGBE_EIMS_ENABLE_MASK;
   4828 		/* Don't autoclear Link */
   4829 		mask &= ~IXGBE_EIMS_OTHER;
   4830 		mask &= ~IXGBE_EIMS_LSC;
   4831 		IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
   4832 	}
   4833 
   4834 	/*
   4835 	** Now enable all queues, this is done separately to
   4836 	** allow for handling the extended (beyond 32) MSIX
   4837 	** vectors that can be used by 82599
   4838 	*/
   4839         for (int i = 0; i < adapter->num_queues; i++, que++)
   4840                 ixgbe_enable_queue(adapter, que->msix);
   4841 
   4842 	IXGBE_WRITE_FLUSH(hw);
   4843 
   4844 	return;
   4845 }
   4846 
   4847 static void
   4848 ixgbe_disable_intr(struct adapter *adapter)
   4849 {
   4850 	if (adapter->msix_mem)
   4851 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
   4852 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
   4853 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
   4854 	} else {
   4855 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
   4856 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
   4857 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
   4858 	}
   4859 	IXGBE_WRITE_FLUSH(&adapter->hw);
   4860 	return;
   4861 }
   4862 
   4863 u16
   4864 ixgbe_read_pci_cfg(struct ixgbe_hw *hw, u32 reg)
   4865 {
   4866 	switch (reg % 4) {
   4867 	case 0:
   4868 		return pci_conf_read(hw->back->pc, hw->back->tag, reg) &
   4869 		    __BITS(15, 0);
   4870 	case 2:
   4871 		return __SHIFTOUT(pci_conf_read(hw->back->pc, hw->back->tag,
   4872 		    reg - 2), __BITS(31, 16));
   4873 	default:
   4874 		panic("%s: invalid register (%" PRIx32, __func__, reg);
   4875 		break;
   4876 	}
   4877 }
   4878 
   4879 void
   4880 ixgbe_write_pci_cfg(struct ixgbe_hw *hw, u32 reg, u16 value)
   4881 {
   4882 	pcireg_t old;
   4883 
   4884 	switch (reg % 4) {
   4885 	case 0:
   4886 		old = pci_conf_read(hw->back->pc, hw->back->tag, reg) &
   4887 		    __BITS(31, 16);
   4888 		pci_conf_write(hw->back->pc, hw->back->tag, reg, value | old);
   4889 		break;
   4890 	case 2:
   4891 		old = pci_conf_read(hw->back->pc, hw->back->tag, reg - 2) &
   4892 		    __BITS(15, 0);
   4893 		pci_conf_write(hw->back->pc, hw->back->tag, reg - 2,
   4894 		    __SHIFTIN(value, __BITS(31, 16)) | old);
   4895 		break;
   4896 	default:
   4897 		panic("%s: invalid register (%" PRIx32, __func__, reg);
   4898 		break;
   4899 	}
   4900 
   4901 	return;
   4902 }
   4903 
   4904 /*
   4905 ** Setup the correct IVAR register for a particular MSIX interrupt
   4906 **   (yes this is all very magic and confusing :)
   4907 **  - entry is the register array entry
   4908 **  - vector is the MSIX vector for this queue
   4909 **  - type is RX/TX/MISC
   4910 */
   4911 static void
   4912 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
   4913 {
   4914 	struct ixgbe_hw *hw = &adapter->hw;
   4915 	u32 ivar, index;
   4916 
   4917 	vector |= IXGBE_IVAR_ALLOC_VAL;
   4918 
   4919 	switch (hw->mac.type) {
   4920 
   4921 	case ixgbe_mac_82598EB:
   4922 		if (type == -1)
   4923 			entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
   4924 		else
   4925 			entry += (type * 64);
   4926 		index = (entry >> 2) & 0x1F;
   4927 		ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
   4928 		ivar &= ~(0xFF << (8 * (entry & 0x3)));
   4929 		ivar |= (vector << (8 * (entry & 0x3)));
   4930 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
   4931 		break;
   4932 
   4933 	case ixgbe_mac_82599EB:
   4934 		if (type == -1) { /* MISC IVAR */
   4935 			index = (entry & 1) * 8;
   4936 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
   4937 			ivar &= ~(0xFF << index);
   4938 			ivar |= (vector << index);
   4939 			IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
   4940 		} else {	/* RX/TX IVARS */
   4941 			index = (16 * (entry & 1)) + (8 * type);
   4942 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
   4943 			ivar &= ~(0xFF << index);
   4944 			ivar |= (vector << index);
   4945 			IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
   4946 		}
   4947 
   4948 	default:
   4949 		break;
   4950 	}
   4951 }
   4952 
   4953 static void
   4954 ixgbe_configure_ivars(struct adapter *adapter)
   4955 {
   4956 	struct  ix_queue *que = adapter->queues;
   4957 	u32 newitr;
   4958 
   4959 	if (ixgbe_max_interrupt_rate > 0)
   4960 		newitr = (8000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
   4961 	else
   4962 		newitr = 0;
   4963 
   4964         for (int i = 0; i < adapter->num_queues; i++, que++) {
   4965 		/* First the RX queue entry */
   4966                 ixgbe_set_ivar(adapter, i, que->msix, 0);
   4967 		/* ... and the TX */
   4968 		ixgbe_set_ivar(adapter, i, que->msix, 1);
   4969 		/* Set an Initial EITR value */
   4970                 IXGBE_WRITE_REG(&adapter->hw,
   4971                     IXGBE_EITR(que->msix), newitr);
   4972 	}
   4973 
   4974 	/* For the Link interrupt */
   4975         ixgbe_set_ivar(adapter, 1, adapter->linkvec, -1);
   4976 }
   4977 
   4978 /*
   4979 ** ixgbe_sfp_probe - called in the local timer to
   4980 ** determine if a port had optics inserted.
   4981 */
   4982 static bool ixgbe_sfp_probe(struct adapter *adapter)
   4983 {
   4984 	struct ixgbe_hw	*hw = &adapter->hw;
   4985 	device_t	dev = adapter->dev;
   4986 	bool		result = FALSE;
   4987 
   4988 	if ((hw->phy.type == ixgbe_phy_nl) &&
   4989 	    (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
   4990 		s32 ret = hw->phy.ops.identify_sfp(hw);
   4991 		if (ret)
   4992                         goto out;
   4993 		ret = hw->phy.ops.reset(hw);
   4994 		if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
   4995 			device_printf(dev,"Unsupported SFP+ module detected!");
   4996 			device_printf(dev, "Reload driver with supported module.\n");
   4997 			adapter->sfp_probe = FALSE;
   4998                         goto out;
   4999 		} else
   5000 			device_printf(dev,"SFP+ module detected!\n");
   5001 		/* We now have supported optics */
   5002 		adapter->sfp_probe = FALSE;
   5003 		/* Set the optics type so system reports correctly */
   5004 		ixgbe_setup_optics(adapter);
   5005 		result = TRUE;
   5006 	}
   5007 out:
   5008 	return (result);
   5009 }
   5010 
   5011 /*
   5012 ** Tasklet handler for MSIX Link interrupts
   5013 **  - do outside interrupt since it might sleep
   5014 */
   5015 static void
   5016 ixgbe_handle_link(void *context)
   5017 {
   5018 	struct adapter  *adapter = context;
   5019 
   5020 	ixgbe_check_link(&adapter->hw,
   5021 	    &adapter->link_speed, &adapter->link_up, 0);
   5022        	ixgbe_update_link_status(adapter);
   5023 }
   5024 
   5025 /*
   5026 ** Tasklet for handling SFP module interrupts
   5027 */
   5028 static void
   5029 ixgbe_handle_mod(void *context)
   5030 {
   5031 	struct adapter  *adapter = context;
   5032 	struct ixgbe_hw *hw = &adapter->hw;
   5033 	device_t	dev = adapter->dev;
   5034 	u32 err;
   5035 
   5036 	err = hw->phy.ops.identify_sfp(hw);
   5037 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
   5038 		device_printf(dev,
   5039 		    "Unsupported SFP+ module type was detected.\n");
   5040 		return;
   5041 	}
   5042 	err = hw->mac.ops.setup_sfp(hw);
   5043 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
   5044 		device_printf(dev,
   5045 		    "Setup failure - unsupported SFP+ module type.\n");
   5046 		return;
   5047 	}
   5048 	softint_schedule(adapter->msf_si);
   5049 	return;
   5050 }
   5051 
   5052 
   5053 /*
   5054 ** Tasklet for handling MSF (multispeed fiber) interrupts
   5055 */
   5056 static void
   5057 ixgbe_handle_msf(void *context)
   5058 {
   5059 	struct adapter  *adapter = context;
   5060 	struct ixgbe_hw *hw = &adapter->hw;
   5061 	u32 autoneg;
   5062 	bool negotiate;
   5063 
   5064 	autoneg = hw->phy.autoneg_advertised;
   5065 	if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
   5066 		hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
   5067 	if (hw->mac.ops.setup_link)
   5068 		hw->mac.ops.setup_link(hw, autoneg, negotiate, TRUE);
   5069 	return;
   5070 }
   5071 
   5072 #ifdef IXGBE_FDIR
   5073 /*
   5074 ** Tasklet for reinitializing the Flow Director filter table
   5075 */
   5076 static void
   5077 ixgbe_reinit_fdir(void *context)
   5078 {
   5079 	struct adapter  *adapter = context;
   5080 	struct ifnet   *ifp = adapter->ifp;
   5081 
   5082 	if (adapter->fdir_reinit != 1) /* Shouldn't happen */
   5083 		return;
   5084 	ixgbe_reinit_fdir_tables_82599(&adapter->hw);
   5085 	adapter->fdir_reinit = 0;
   5086 	/* Restart the interface */
   5087 	ifp->if_flags |= IFF_RUNNING;
   5088 	return;
   5089 }
   5090 #endif
   5091 
   5092 /**********************************************************************
   5093  *
   5094  *  Update the board statistics counters.
   5095  *
   5096  **********************************************************************/
   5097 static void
   5098 ixgbe_update_stats_counters(struct adapter *adapter)
   5099 {
   5100 	struct ifnet   *ifp = adapter->ifp;
   5101 	struct ixgbe_hw *hw = &adapter->hw;
   5102 	u32  missed_rx = 0, bprc, lxon, lxoff, total;
   5103 	u64  total_missed_rx = 0;
   5104 
   5105 	adapter->stats.crcerrs.ev_count += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
   5106 	adapter->stats.illerrc.ev_count += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
   5107 	adapter->stats.errbc.ev_count += IXGBE_READ_REG(hw, IXGBE_ERRBC);
   5108 	adapter->stats.mspdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MSPDC);
   5109 
   5110 	for (int i = 0; i < __arraycount(adapter->stats.mpc); i++) {
   5111 		int j = i % adapter->num_queues;
   5112 		u32 mp;
   5113 		mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
   5114 		/* missed_rx tallies misses for the gprc workaround */
   5115 		missed_rx += mp;
   5116 		/* global total per queue */
   5117         	adapter->stats.mpc[j].ev_count += mp;
   5118 		/* Running comprehensive total for stats display */
   5119 		total_missed_rx += adapter->stats.mpc[j].ev_count;
   5120 		if (hw->mac.type == ixgbe_mac_82598EB)
   5121 			adapter->stats.rnbc[j] +=
   5122 			    IXGBE_READ_REG(hw, IXGBE_RNBC(i));
   5123 		adapter->stats.pxontxc[j].ev_count +=
   5124 		    IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
   5125 		adapter->stats.pxonrxc[j].ev_count +=
   5126 		    IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
   5127 		adapter->stats.pxofftxc[j].ev_count +=
   5128 		    IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
   5129 		adapter->stats.pxoffrxc[j].ev_count +=
   5130 		    IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
   5131 		adapter->stats.pxon2offc[j].ev_count +=
   5132 		    IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
   5133 	}
   5134 	for (int i = 0; i < __arraycount(adapter->stats.qprc); i++) {
   5135 		int j = i % adapter->num_queues;
   5136 		adapter->stats.qprc[j].ev_count += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
   5137 		adapter->stats.qptc[j].ev_count += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
   5138 		adapter->stats.qbrc[j].ev_count += IXGBE_READ_REG(hw, IXGBE_QBRC(i));
   5139 		adapter->stats.qbrc[j].ev_count +=
   5140 		    ((u64)IXGBE_READ_REG(hw, IXGBE_QBRC(i)) << 32);
   5141 		adapter->stats.qbtc[j].ev_count += IXGBE_READ_REG(hw, IXGBE_QBTC(i));
   5142 		adapter->stats.qbtc[j].ev_count +=
   5143 		    ((u64)IXGBE_READ_REG(hw, IXGBE_QBTC(i)) << 32);
   5144 		adapter->stats.qprdc[j].ev_count += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
   5145 	}
   5146 	adapter->stats.mlfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MLFC);
   5147 	adapter->stats.mrfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MRFC);
   5148 	adapter->stats.rlec.ev_count += IXGBE_READ_REG(hw, IXGBE_RLEC);
   5149 
   5150 	/* Hardware workaround, gprc counts missed packets */
   5151 	adapter->stats.gprc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPRC) - missed_rx;
   5152 
   5153 	lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
   5154 	adapter->stats.lxontxc.ev_count += lxon;
   5155 	lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
   5156 	adapter->stats.lxofftxc.ev_count += lxoff;
   5157 	total = lxon + lxoff;
   5158 
   5159 	if (hw->mac.type != ixgbe_mac_82598EB) {
   5160 		adapter->stats.gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCL) +
   5161 		    ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
   5162 		adapter->stats.gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
   5163 		    ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32) - total * ETHER_MIN_LEN;
   5164 		adapter->stats.tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORL) +
   5165 		    ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
   5166 		adapter->stats.lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
   5167 		adapter->stats.lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
   5168 	} else {
   5169 		adapter->stats.lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
   5170 		adapter->stats.lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
   5171 		/* 82598 only has a counter in the high register */
   5172 		adapter->stats.gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCH);
   5173 		adapter->stats.gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCH) - total * ETHER_MIN_LEN;
   5174 		adapter->stats.tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORH);
   5175 	}
   5176 
   5177 	/*
   5178 	 * Workaround: mprc hardware is incorrectly counting
   5179 	 * broadcasts, so for now we subtract those.
   5180 	 */
   5181 	bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
   5182 	adapter->stats.bprc.ev_count += bprc;
   5183 	adapter->stats.mprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPRC) - ((hw->mac.type == ixgbe_mac_82598EB) ? bprc : 0);
   5184 
   5185 	adapter->stats.prc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC64);
   5186 	adapter->stats.prc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC127);
   5187 	adapter->stats.prc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC255);
   5188 	adapter->stats.prc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC511);
   5189 	adapter->stats.prc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1023);
   5190 	adapter->stats.prc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1522);
   5191 
   5192 	adapter->stats.gptc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPTC) - total;
   5193 	adapter->stats.mptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPTC) - total;
   5194 	adapter->stats.ptc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC64) - total;
   5195 
   5196 	adapter->stats.ruc.ev_count += IXGBE_READ_REG(hw, IXGBE_RUC);
   5197 	adapter->stats.rfc.ev_count += IXGBE_READ_REG(hw, IXGBE_RFC);
   5198 	adapter->stats.roc.ev_count += IXGBE_READ_REG(hw, IXGBE_ROC);
   5199 	adapter->stats.rjc.ev_count += IXGBE_READ_REG(hw, IXGBE_RJC);
   5200 	adapter->stats.mngprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
   5201 	adapter->stats.mngpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
   5202 	adapter->stats.mngptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
   5203 	adapter->stats.tpr.ev_count += IXGBE_READ_REG(hw, IXGBE_TPR);
   5204 	adapter->stats.tpt.ev_count += IXGBE_READ_REG(hw, IXGBE_TPT);
   5205 	adapter->stats.ptc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC127);
   5206 	adapter->stats.ptc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC255);
   5207 	adapter->stats.ptc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC511);
   5208 	adapter->stats.ptc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1023);
   5209 	adapter->stats.ptc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1522);
   5210 	adapter->stats.bptc.ev_count += IXGBE_READ_REG(hw, IXGBE_BPTC);
   5211 	adapter->stats.xec.ev_count += IXGBE_READ_REG(hw, IXGBE_XEC);
   5212 	adapter->stats.fccrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCCRC);
   5213 	adapter->stats.fclast.ev_count += IXGBE_READ_REG(hw, IXGBE_FCLAST);
   5214 
   5215 	/* Only read FCOE on 82599 */
   5216 	if (hw->mac.type != ixgbe_mac_82598EB) {
   5217 		adapter->stats.fcoerpdc.ev_count +=
   5218 		    IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
   5219 		adapter->stats.fcoeprc.ev_count +=
   5220 		    IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
   5221 		adapter->stats.fcoeptc.ev_count +=
   5222 		    IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
   5223 		adapter->stats.fcoedwrc.ev_count +=
   5224 		    IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
   5225 		adapter->stats.fcoedwtc.ev_count +=
   5226 		    IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
   5227 	}
   5228 
   5229 	/* Fill out the OS statistics structure */
   5230 	ifp->if_ipackets = adapter->stats.gprc.ev_count;
   5231 	ifp->if_opackets = adapter->stats.gptc.ev_count;
   5232 	ifp->if_ibytes = adapter->stats.gorc.ev_count;
   5233 	ifp->if_obytes = adapter->stats.gotc.ev_count;
   5234 	ifp->if_imcasts = adapter->stats.mprc.ev_count;
   5235 	ifp->if_collisions = 0;
   5236 
   5237 	/* Rx Errors */
   5238 	ifp->if_ierrors = total_missed_rx + adapter->stats.crcerrs.ev_count +
   5239 		adapter->stats.rlec.ev_count;
   5240 }
   5241 
   5242 /** ixgbe_sysctl_tdh_handler - Handler function
   5243  *  Retrieves the TDH value from the hardware
   5244  */
   5245 static int
   5246 ixgbe_sysctl_tdh_handler(SYSCTLFN_ARGS)
   5247 {
   5248 	struct sysctlnode node;
   5249 	uint32_t val;
   5250 	struct tx_ring *txr;
   5251 
   5252 	node = *rnode;
   5253 	txr = (struct tx_ring *)node.sysctl_data;
   5254 	if (txr == NULL)
   5255 		return 0;
   5256 	val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me));
   5257 	node.sysctl_data = &val;
   5258 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   5259 }
   5260 
   5261 /** ixgbe_sysctl_tdt_handler - Handler function
   5262  *  Retrieves the TDT value from the hardware
   5263  */
   5264 static int
   5265 ixgbe_sysctl_tdt_handler(SYSCTLFN_ARGS)
   5266 {
   5267 	struct sysctlnode node;
   5268 	uint32_t val;
   5269 	struct tx_ring *txr;
   5270 
   5271 	node = *rnode;
   5272 	txr = (struct tx_ring *)node.sysctl_data;
   5273 	if (txr == NULL)
   5274 		return 0;
   5275 	val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me));
   5276 	node.sysctl_data = &val;
   5277 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   5278 }
   5279 
   5280 /** ixgbe_sysctl_rdh_handler - Handler function
   5281  *  Retrieves the RDH value from the hardware
   5282  */
   5283 static int
   5284 ixgbe_sysctl_rdh_handler(SYSCTLFN_ARGS)
   5285 {
   5286 	struct sysctlnode node;
   5287 	uint32_t val;
   5288 	struct rx_ring *rxr;
   5289 
   5290 	node = *rnode;
   5291 	rxr = (struct rx_ring *)node.sysctl_data;
   5292 	if (rxr == NULL)
   5293 		return 0;
   5294 	val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me));
   5295 	node.sysctl_data = &val;
   5296 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   5297 }
   5298 
   5299 /** ixgbe_sysctl_rdt_handler - Handler function
   5300  *  Retrieves the RDT value from the hardware
   5301  */
   5302 static int
   5303 ixgbe_sysctl_rdt_handler(SYSCTLFN_ARGS)
   5304 {
   5305 	struct sysctlnode node;
   5306 	uint32_t val;
   5307 	struct rx_ring *rxr;
   5308 
   5309 	node = *rnode;
   5310 	rxr = (struct rx_ring *)node.sysctl_data;
   5311 	if (rxr == NULL)
   5312 		return 0;
   5313 	val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me));
   5314 	node.sysctl_data = &val;
   5315 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   5316 }
   5317 
   5318 static int
   5319 ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_ARGS)
   5320 {
   5321 	struct sysctlnode node;
   5322 	struct ix_queue *que;
   5323 	uint32_t reg, usec, rate;
   5324 
   5325 	node = *rnode;
   5326 	que = (struct ix_queue *)node.sysctl_data;
   5327 	if (que == NULL)
   5328 		return 0;
   5329 	reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix));
   5330 	usec = ((reg & 0x0FF8) >> 3);
   5331 	if (usec > 0)
   5332 		rate = 1000000 / usec;
   5333 	else
   5334 		rate = 0;
   5335 	node.sysctl_data = &rate;
   5336 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   5337 }
   5338 
   5339 const struct sysctlnode *
   5340 ixgbe_sysctl_instance(struct adapter *adapter)
   5341 {
   5342 	const char *dvname;
   5343 	struct sysctllog **log;
   5344 	int rc;
   5345 	const struct sysctlnode *rnode;
   5346 
   5347 	log = &adapter->sysctllog;
   5348 	dvname = device_xname(adapter->dev);
   5349 
   5350 	if ((rc = sysctl_createv(log, 0, NULL, &rnode,
   5351 	    0, CTLTYPE_NODE, dvname,
   5352 	    SYSCTL_DESCR("ixgbe information and settings"),
   5353 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
   5354 		goto err;
   5355 
   5356 	return rnode;
   5357 err:
   5358 	printf("%s: sysctl_createv failed, rc = %d\n", __func__, rc);
   5359 	return NULL;
   5360 }
   5361 
   5362 /*
   5363  * Add sysctl variables, one per statistic, to the system.
   5364  */
   5365 static void
   5366 ixgbe_add_hw_stats(struct adapter *adapter)
   5367 {
   5368 	device_t dev = adapter->dev;
   5369 	const struct sysctlnode *rnode, *cnode;
   5370 	struct sysctllog **log = &adapter->sysctllog;
   5371 	struct tx_ring *txr = adapter->tx_rings;
   5372 	struct rx_ring *rxr = adapter->rx_rings;
   5373 	struct ixgbe_hw	 *hw = &adapter->hw;
   5374 
   5375 	struct ixgbe_hw_stats *stats = &adapter->stats;
   5376 
   5377 	/* Driver Statistics */
   5378 #if 0
   5379 	/* These counters are not updated by the software */
   5380 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
   5381 			CTLFLAG_RD, &adapter->dropped_pkts,
   5382 			"Driver dropped packets");
   5383 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_header_failed",
   5384 			CTLFLAG_RD, &adapter->mbuf_header_failed,
   5385 			"???");
   5386 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_packet_failed",
   5387 			CTLFLAG_RD, &adapter->mbuf_packet_failed,
   5388 			"???");
   5389 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "no_tx_map_avail",
   5390 			CTLFLAG_RD, &adapter->no_tx_map_avail,
   5391 			"???");
   5392 #endif
   5393 	evcnt_attach_dynamic(&adapter->handleq, EVCNT_TYPE_MISC,
   5394 	    NULL, device_xname(dev), "Handled queue in softint");
   5395 	evcnt_attach_dynamic(&adapter->req, EVCNT_TYPE_MISC,
   5396 	    NULL, device_xname(dev), "Requeued in softint");
   5397 	evcnt_attach_dynamic(&adapter->morerx, EVCNT_TYPE_MISC,
   5398 	    NULL, device_xname(dev), "Interrupt handler more rx");
   5399 	evcnt_attach_dynamic(&adapter->moretx, EVCNT_TYPE_MISC,
   5400 	    NULL, device_xname(dev), "Interrupt handler more tx");
   5401 	evcnt_attach_dynamic(&adapter->txloops, EVCNT_TYPE_MISC,
   5402 	    NULL, device_xname(dev), "Interrupt handler tx loops");
   5403 	evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
   5404 	    NULL, device_xname(dev), "Driver tx dma soft fail EFBIG");
   5405 	evcnt_attach_dynamic(&adapter->m_defrag_failed, EVCNT_TYPE_MISC,
   5406 	    NULL, device_xname(dev), "m_defrag() failed");
   5407 	evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
   5408 	    NULL, device_xname(dev), "Driver tx dma hard fail EFBIG");
   5409 	evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC,
   5410 	    NULL, device_xname(dev), "Driver tx dma hard fail EINVAL");
   5411 	evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC,
   5412 	    NULL, device_xname(dev), "Driver tx dma hard fail other");
   5413 	evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
   5414 	    NULL, device_xname(dev), "Driver tx dma soft fail EAGAIN");
   5415 	evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
   5416 	    NULL, device_xname(dev), "Driver tx dma soft fail ENOMEM");
   5417 	evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
   5418 	    NULL, device_xname(dev), "Watchdog timeouts");
   5419 	evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC,
   5420 	    NULL, device_xname(dev), "TSO errors");
   5421 	evcnt_attach_dynamic(&adapter->tso_tx, EVCNT_TYPE_MISC,
   5422 	    NULL, device_xname(dev), "TSO");
   5423 	evcnt_attach_dynamic(&adapter->link_irq, EVCNT_TYPE_MISC,
   5424 	    NULL, device_xname(dev), "Link MSIX IRQ Handled");
   5425 
   5426 	for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
   5427 		snprintf(adapter->queues[i].evnamebuf,
   5428 		    sizeof(adapter->queues[i].evnamebuf), "%s queue%d",
   5429 		    device_xname(dev), i);
   5430 		snprintf(adapter->queues[i].namebuf,
   5431 		    sizeof(adapter->queues[i].namebuf), "queue%d", i);
   5432 
   5433 		if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
   5434 			aprint_error_dev(dev, "could not create sysctl root\n");
   5435 			break;
   5436 		}
   5437 
   5438 		if (sysctl_createv(log, 0, &rnode, &rnode,
   5439 		    0, CTLTYPE_NODE,
   5440 		    adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
   5441 		    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
   5442 			break;
   5443 
   5444 		if (sysctl_createv(log, 0, &rnode, &cnode,
   5445 		    CTLFLAG_READONLY, CTLTYPE_INT,
   5446 		    "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
   5447 		    ixgbe_sysctl_interrupt_rate_handler, 0,
   5448 		    (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
   5449 			break;
   5450 
   5451 		if (sysctl_createv(log, 0, &rnode, &cnode,
   5452 		    CTLFLAG_READONLY, CTLTYPE_INT,
   5453 		    "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
   5454 		    ixgbe_sysctl_tdh_handler, 0, (void *)txr,
   5455 		    0, CTL_CREATE, CTL_EOL) != 0)
   5456 			break;
   5457 
   5458 		if (sysctl_createv(log, 0, &rnode, &cnode,
   5459 		    CTLFLAG_READONLY, CTLTYPE_INT,
   5460 		    "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
   5461 		    ixgbe_sysctl_tdt_handler, 0, (void *)txr,
   5462 		    0, CTL_CREATE, CTL_EOL) != 0)
   5463 			break;
   5464 
   5465 		evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
   5466 		    NULL, adapter->queues[i].evnamebuf,
   5467 		    "Queue No Descriptor Available");
   5468 		evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
   5469 		    NULL, adapter->queues[i].evnamebuf,
   5470 		    "Queue Packets Transmitted");
   5471 
   5472 #ifdef LRO
   5473 		struct lro_ctrl *lro = &rxr->lro;
   5474 #endif /* LRO */
   5475 
   5476 		if (sysctl_createv(log, 0, &rnode, &cnode,
   5477 		    CTLFLAG_READONLY,
   5478 		    CTLTYPE_INT,
   5479 		    "rxd_head", SYSCTL_DESCR("Receive Descriptor Head"),
   5480 		    ixgbe_sysctl_rdh_handler, 0, (void *)rxr, 0,
   5481 		    CTL_CREATE, CTL_EOL) != 0)
   5482 			break;
   5483 
   5484 		if (sysctl_createv(log, 0, &rnode, &cnode,
   5485 		    CTLFLAG_READONLY,
   5486 		    CTLTYPE_INT,
   5487 		    "rxd_tail", SYSCTL_DESCR("Receive Descriptor Tail"),
   5488 		    ixgbe_sysctl_rdt_handler, 0, (void *)rxr, 0,
   5489 		    CTL_CREATE, CTL_EOL) != 0)
   5490 			break;
   5491 
   5492 		if (i < __arraycount(adapter->stats.mpc)) {
   5493 			evcnt_attach_dynamic(&adapter->stats.mpc[i],
   5494 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   5495 			    "Missed Packet Count");
   5496 		}
   5497 		if (i < __arraycount(adapter->stats.pxontxc)) {
   5498 			evcnt_attach_dynamic(&adapter->stats.pxontxc[i],
   5499 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   5500 			    "pxontxc");
   5501 			evcnt_attach_dynamic(&adapter->stats.pxonrxc[i],
   5502 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   5503 			    "pxonrxc");
   5504 			evcnt_attach_dynamic(&adapter->stats.pxofftxc[i],
   5505 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   5506 			    "pxofftxc");
   5507 			evcnt_attach_dynamic(&adapter->stats.pxoffrxc[i],
   5508 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   5509 			    "pxoffrxc");
   5510 			evcnt_attach_dynamic(&adapter->stats.pxon2offc[i],
   5511 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   5512 			    "pxon2offc");
   5513 		}
   5514 		if (i < __arraycount(adapter->stats.qprc)) {
   5515 			evcnt_attach_dynamic(&adapter->stats.qprc[i],
   5516 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   5517 			    "qprc");
   5518 			evcnt_attach_dynamic(&adapter->stats.qptc[i],
   5519 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   5520 			    "qptc");
   5521 			evcnt_attach_dynamic(&adapter->stats.qbrc[i],
   5522 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   5523 			    "qbrc");
   5524 			evcnt_attach_dynamic(&adapter->stats.qbtc[i],
   5525 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   5526 			    "qbtc");
   5527 			evcnt_attach_dynamic(&adapter->stats.qprdc[i],
   5528 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   5529 			    "qprdc");
   5530 		}
   5531 
   5532 		evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
   5533 		    NULL, adapter->queues[i].evnamebuf, "Queue Packets Received");
   5534 		evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
   5535 		    NULL, adapter->queues[i].evnamebuf, "Queue Bytes Received");
   5536 		evcnt_attach_dynamic(&rxr->no_jmbuf, EVCNT_TYPE_MISC,
   5537 		    NULL, adapter->queues[i].evnamebuf, "Rx no jumbo mbuf");
   5538 		evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
   5539 		    NULL, adapter->queues[i].evnamebuf, "Rx discarded");
   5540 		evcnt_attach_dynamic(&rxr->rx_split_packets, EVCNT_TYPE_MISC,
   5541 		    NULL, adapter->queues[i].evnamebuf, "Rx split packets");
   5542 		evcnt_attach_dynamic(&rxr->rx_irq, EVCNT_TYPE_MISC,
   5543 		    NULL, adapter->queues[i].evnamebuf, "Rx interrupts");
   5544 #ifdef LRO
   5545 		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
   5546 				CTLFLAG_RD, &lro->lro_queued, 0,
   5547 				"LRO Queued");
   5548 		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
   5549 				CTLFLAG_RD, &lro->lro_flushed, 0,
   5550 				"LRO Flushed");
   5551 #endif /* LRO */
   5552 	}
   5553 
   5554 	/* MAC stats get the own sub node */
   5555 
   5556 
   5557 	snprintf(stats->namebuf,
   5558 	    sizeof(stats->namebuf), "%s MAC Statistics", device_xname(dev));
   5559 
   5560 	evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
   5561 	    stats->namebuf, "rx csum offload - IP");
   5562 	evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
   5563 	    stats->namebuf, "rx csum offload - L4");
   5564 	evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
   5565 	    stats->namebuf, "rx csum offload - IP bad");
   5566 	evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
   5567 	    stats->namebuf, "rx csum offload - L4 bad");
   5568 	evcnt_attach_dynamic(&stats->intzero, EVCNT_TYPE_MISC, NULL,
   5569 	    stats->namebuf, "Interrupt conditions zero");
   5570 	evcnt_attach_dynamic(&stats->legint, EVCNT_TYPE_MISC, NULL,
   5571 	    stats->namebuf, "Legacy interrupts");
   5572 	evcnt_attach_dynamic(&stats->crcerrs, EVCNT_TYPE_MISC, NULL,
   5573 	    stats->namebuf, "CRC Errors");
   5574 	evcnt_attach_dynamic(&stats->illerrc, EVCNT_TYPE_MISC, NULL,
   5575 	    stats->namebuf, "Illegal Byte Errors");
   5576 	evcnt_attach_dynamic(&stats->errbc, EVCNT_TYPE_MISC, NULL,
   5577 	    stats->namebuf, "Byte Errors");
   5578 	evcnt_attach_dynamic(&stats->mspdc, EVCNT_TYPE_MISC, NULL,
   5579 	    stats->namebuf, "MAC Short Packets Discarded");
   5580 	evcnt_attach_dynamic(&stats->mlfc, EVCNT_TYPE_MISC, NULL,
   5581 	    stats->namebuf, "MAC Local Faults");
   5582 	evcnt_attach_dynamic(&stats->mrfc, EVCNT_TYPE_MISC, NULL,
   5583 	    stats->namebuf, "MAC Remote Faults");
   5584 	evcnt_attach_dynamic(&stats->rlec, EVCNT_TYPE_MISC, NULL,
   5585 	    stats->namebuf, "Receive Length Errors");
   5586 	evcnt_attach_dynamic(&stats->lxontxc, EVCNT_TYPE_MISC, NULL,
   5587 	    stats->namebuf, "Link XON Transmitted");
   5588 	evcnt_attach_dynamic(&stats->lxonrxc, EVCNT_TYPE_MISC, NULL,
   5589 	    stats->namebuf, "Link XON Received");
   5590 	evcnt_attach_dynamic(&stats->lxofftxc, EVCNT_TYPE_MISC, NULL,
   5591 	    stats->namebuf, "Link XOFF Transmitted");
   5592 	evcnt_attach_dynamic(&stats->lxoffrxc, EVCNT_TYPE_MISC, NULL,
   5593 	    stats->namebuf, "Link XOFF Received");
   5594 
   5595 	/* Packet Reception Stats */
   5596 	evcnt_attach_dynamic(&stats->tor, EVCNT_TYPE_MISC, NULL,
   5597 	    stats->namebuf, "Total Octets Received");
   5598 	evcnt_attach_dynamic(&stats->gorc, EVCNT_TYPE_MISC, NULL,
   5599 	    stats->namebuf, "Good Octets Received");
   5600 	evcnt_attach_dynamic(&stats->tpr, EVCNT_TYPE_MISC, NULL,
   5601 	    stats->namebuf, "Total Packets Received");
   5602 	evcnt_attach_dynamic(&stats->gprc, EVCNT_TYPE_MISC, NULL,
   5603 	    stats->namebuf, "Good Packets Received");
   5604 	evcnt_attach_dynamic(&stats->mprc, EVCNT_TYPE_MISC, NULL,
   5605 	    stats->namebuf, "Multicast Packets Received");
   5606 	evcnt_attach_dynamic(&stats->bprc, EVCNT_TYPE_MISC, NULL,
   5607 	    stats->namebuf, "Broadcast Packets Received");
   5608 	evcnt_attach_dynamic(&stats->prc64, EVCNT_TYPE_MISC, NULL,
   5609 	    stats->namebuf, "64 byte frames received ");
   5610 	evcnt_attach_dynamic(&stats->prc127, EVCNT_TYPE_MISC, NULL,
   5611 	    stats->namebuf, "65-127 byte frames received");
   5612 	evcnt_attach_dynamic(&stats->prc255, EVCNT_TYPE_MISC, NULL,
   5613 	    stats->namebuf, "128-255 byte frames received");
   5614 	evcnt_attach_dynamic(&stats->prc511, EVCNT_TYPE_MISC, NULL,
   5615 	    stats->namebuf, "256-511 byte frames received");
   5616 	evcnt_attach_dynamic(&stats->prc1023, EVCNT_TYPE_MISC, NULL,
   5617 	    stats->namebuf, "512-1023 byte frames received");
   5618 	evcnt_attach_dynamic(&stats->prc1522, EVCNT_TYPE_MISC, NULL,
   5619 	    stats->namebuf, "1023-1522 byte frames received");
   5620 	evcnt_attach_dynamic(&stats->ruc, EVCNT_TYPE_MISC, NULL,
   5621 	    stats->namebuf, "Receive Undersized");
   5622 	evcnt_attach_dynamic(&stats->rfc, EVCNT_TYPE_MISC, NULL,
   5623 	    stats->namebuf, "Fragmented Packets Received ");
   5624 	evcnt_attach_dynamic(&stats->roc, EVCNT_TYPE_MISC, NULL,
   5625 	    stats->namebuf, "Oversized Packets Received");
   5626 	evcnt_attach_dynamic(&stats->rjc, EVCNT_TYPE_MISC, NULL,
   5627 	    stats->namebuf, "Received Jabber");
   5628 	evcnt_attach_dynamic(&stats->mngprc, EVCNT_TYPE_MISC, NULL,
   5629 	    stats->namebuf, "Management Packets Received");
   5630 	evcnt_attach_dynamic(&stats->mngptc, EVCNT_TYPE_MISC, NULL,
   5631 	    stats->namebuf, "Management Packets Dropped");
   5632 	evcnt_attach_dynamic(&stats->xec, EVCNT_TYPE_MISC, NULL,
   5633 	    stats->namebuf, "Checksum Errors");
   5634 
   5635 	/* Packet Transmission Stats */
   5636 	evcnt_attach_dynamic(&stats->gotc, EVCNT_TYPE_MISC, NULL,
   5637 	    stats->namebuf, "Good Octets Transmitted");
   5638 	evcnt_attach_dynamic(&stats->tpt, EVCNT_TYPE_MISC, NULL,
   5639 	    stats->namebuf, "Total Packets Transmitted");
   5640 	evcnt_attach_dynamic(&stats->gptc, EVCNT_TYPE_MISC, NULL,
   5641 	    stats->namebuf, "Good Packets Transmitted");
   5642 	evcnt_attach_dynamic(&stats->bptc, EVCNT_TYPE_MISC, NULL,
   5643 	    stats->namebuf, "Broadcast Packets Transmitted");
   5644 	evcnt_attach_dynamic(&stats->mptc, EVCNT_TYPE_MISC, NULL,
   5645 	    stats->namebuf, "Multicast Packets Transmitted");
   5646 	evcnt_attach_dynamic(&stats->mngptc, EVCNT_TYPE_MISC, NULL,
   5647 	    stats->namebuf, "Management Packets Transmitted");
   5648 	evcnt_attach_dynamic(&stats->ptc64, EVCNT_TYPE_MISC, NULL,
   5649 	    stats->namebuf, "64 byte frames transmitted ");
   5650 	evcnt_attach_dynamic(&stats->ptc127, EVCNT_TYPE_MISC, NULL,
   5651 	    stats->namebuf, "65-127 byte frames transmitted");
   5652 	evcnt_attach_dynamic(&stats->ptc255, EVCNT_TYPE_MISC, NULL,
   5653 	    stats->namebuf, "128-255 byte frames transmitted");
   5654 	evcnt_attach_dynamic(&stats->ptc511, EVCNT_TYPE_MISC, NULL,
   5655 	    stats->namebuf, "256-511 byte frames transmitted");
   5656 	evcnt_attach_dynamic(&stats->ptc1023, EVCNT_TYPE_MISC, NULL,
   5657 	    stats->namebuf, "512-1023 byte frames transmitted");
   5658 	evcnt_attach_dynamic(&stats->ptc1522, EVCNT_TYPE_MISC, NULL,
   5659 	    stats->namebuf, "1024-1522 byte frames transmitted");
   5660 
   5661 	/* FC Stats */
   5662 	evcnt_attach_dynamic(&stats->fccrc, EVCNT_TYPE_MISC, NULL,
   5663 	    stats->namebuf, "FC CRC Errors");
   5664 	evcnt_attach_dynamic(&stats->fclast, EVCNT_TYPE_MISC, NULL,
   5665 	    stats->namebuf, "FC Last Error");
   5666 	if (hw->mac.type != ixgbe_mac_82598EB) {
   5667 		evcnt_attach_dynamic(&stats->fcoerpdc, EVCNT_TYPE_MISC, NULL,
   5668 		    stats->namebuf, "FCoE Packets Dropped");
   5669 		evcnt_attach_dynamic(&stats->fcoeprc, EVCNT_TYPE_MISC, NULL,
   5670 		    stats->namebuf, "FCoE Packets Received");
   5671 		evcnt_attach_dynamic(&stats->fcoeptc, EVCNT_TYPE_MISC, NULL,
   5672 		    stats->namebuf, "FCoE Packets Transmitted");
   5673 		evcnt_attach_dynamic(&stats->fcoedwrc, EVCNT_TYPE_MISC, NULL,
   5674 		    stats->namebuf, "FCoE DWords Received");
   5675 		evcnt_attach_dynamic(&stats->fcoedwtc, EVCNT_TYPE_MISC, NULL,
   5676 		    stats->namebuf, "FCoE DWords Transmitted");
   5677 	}
   5678 }
   5679 
   5680 /*
   5681 ** Set flow control using sysctl:
   5682 ** Flow control values:
   5683 ** 	0 - off
   5684 **	1 - rx pause
   5685 **	2 - tx pause
   5686 **	3 - full
   5687 */
   5688 static int
   5689 ixgbe_set_flowcntl(SYSCTLFN_ARGS)
   5690 {
   5691 	struct sysctlnode node;
   5692 	int error;
   5693 	int last = ixgbe_flow_control;
   5694 	struct adapter *adapter;
   5695 
   5696 	node = *rnode;
   5697 	adapter = (struct adapter *)node.sysctl_data;
   5698 	node.sysctl_data = &ixgbe_flow_control;
   5699 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5700 	if (error != 0 || newp == NULL)
   5701 		return error;
   5702 
   5703 	/* Don't bother if it's not changed */
   5704 	if (ixgbe_flow_control == last)
   5705 		return (0);
   5706 
   5707 	switch (ixgbe_flow_control) {
   5708 		case ixgbe_fc_rx_pause:
   5709 		case ixgbe_fc_tx_pause:
   5710 		case ixgbe_fc_full:
   5711 			adapter->hw.fc.requested_mode = ixgbe_flow_control;
   5712 			break;
   5713 		case ixgbe_fc_none:
   5714 		default:
   5715 			adapter->hw.fc.requested_mode = ixgbe_fc_none;
   5716 	}
   5717 
   5718 	ixgbe_fc_enable(&adapter->hw, 0);
   5719 	return 0;
   5720 }
   5721 
   5722 static void
   5723 ixgbe_add_rx_process_limit(struct adapter *adapter, const char *name,
   5724         const char *description, int *limit, int value)
   5725 {
   5726 	const struct sysctlnode *rnode, *cnode;
   5727 	struct sysctllog **log = &adapter->sysctllog;
   5728 
   5729         *limit = value;
   5730 
   5731 	if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL)
   5732 		aprint_error_dev(adapter->dev,
   5733 		    "could not create sysctl root\n");
   5734 	else if (sysctl_createv(log, 0, &rnode, &cnode,
   5735 	    CTLFLAG_READWRITE,
   5736 	    CTLTYPE_INT,
   5737 	    name, SYSCTL_DESCR(description),
   5738 	    NULL, 0, limit, 0,
   5739 	    CTL_CREATE, CTL_EOL) != 0) {
   5740 		aprint_error_dev(adapter->dev, "%s: could not create sysctl",
   5741 		    __func__);
   5742 	}
   5743 }
   5744 
   5745 /*
   5746 ** Control link advertise speed:
   5747 ** 	0 - normal
   5748 **	1 - advertise only 1G
   5749 */
   5750 static int
   5751 ixgbe_set_advertise(SYSCTLFN_ARGS)
   5752 {
   5753 	struct sysctlnode	node;
   5754 	int			t, error;
   5755 	struct adapter		*adapter;
   5756 	struct ixgbe_hw		*hw;
   5757 	ixgbe_link_speed	speed, last;
   5758 
   5759 	node = *rnode;
   5760 	adapter = (struct adapter *)node.sysctl_data;
   5761 	t = adapter->advertise;
   5762 	node.sysctl_data = &t;
   5763 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5764 	if (error != 0 || newp == NULL)
   5765 		return error;
   5766 
   5767 	if (t == -1)
   5768 		return 0;
   5769 
   5770 	adapter->advertise = t;
   5771 
   5772 	hw = &adapter->hw;
   5773 	last = hw->phy.autoneg_advertised;
   5774 
   5775 	if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
   5776             (hw->phy.multispeed_fiber)))
   5777 		return 0;
   5778 
   5779 	if (adapter->advertise == 1)
   5780                 speed = IXGBE_LINK_SPEED_1GB_FULL;
   5781 	else
   5782                 speed = IXGBE_LINK_SPEED_1GB_FULL |
   5783 			IXGBE_LINK_SPEED_10GB_FULL;
   5784 
   5785 	if (speed == last) /* no change */
   5786 		return 0;
   5787 
   5788 	hw->mac.autotry_restart = TRUE;
   5789 	hw->mac.ops.setup_link(hw, speed, TRUE, TRUE);
   5790 
   5791 	return 0;
   5792 }
   5793