Home | History | Annotate | Line # | Download | only in ixgbe
ixgbe.c revision 1.15
      1 /******************************************************************************
      2 
      3   Copyright (c) 2001-2011, Intel Corporation
      4   All rights reserved.
      5 
      6   Redistribution and use in source and binary forms, with or without
      7   modification, are permitted provided that the following conditions are met:
      8 
      9    1. Redistributions of source code must retain the above copyright notice,
     10       this list of conditions and the following disclaimer.
     11 
     12    2. Redistributions in binary form must reproduce the above copyright
     13       notice, this list of conditions and the following disclaimer in the
     14       documentation and/or other materials provided with the distribution.
     15 
     16    3. Neither the name of the Intel Corporation nor the names of its
     17       contributors may be used to endorse or promote products derived from
     18       this software without specific prior written permission.
     19 
     20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     30   POSSIBILITY OF SUCH DAMAGE.
     31 
     32 ******************************************************************************/
     33 /*
     34  * Copyright (c) 2011 The NetBSD Foundation, Inc.
     35  * All rights reserved.
     36  *
     37  * This code is derived from software contributed to The NetBSD Foundation
     38  * by Coyote Point Systems, Inc.
     39  *
     40  * Redistribution and use in source and binary forms, with or without
     41  * modification, are permitted provided that the following conditions
     42  * are met:
     43  * 1. Redistributions of source code must retain the above copyright
     44  *    notice, this list of conditions and the following disclaimer.
     45  * 2. Redistributions in binary form must reproduce the above copyright
     46  *    notice, this list of conditions and the following disclaimer in the
     47  *    documentation and/or other materials provided with the distribution.
     48  *
     49  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     50  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     51  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     52  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     53  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     54  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     55  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     56  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     57  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     58  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     59  * POSSIBILITY OF SUCH DAMAGE.
     60  */
     61 /*$FreeBSD: src/sys/dev/ixgbe/ixgbe.c,v 1.51 2011/04/25 23:34:21 jfv Exp $*/
     62 /*$NetBSD: ixgbe.c,v 1.15 2015/01/13 03:11:34 msaitoh Exp $*/
     63 
     64 #include "opt_inet.h"
     65 
     66 #include "ixgbe.h"
     67 
     68 /*********************************************************************
     69  *  Set this to one to display debug statistics
     70  *********************************************************************/
     71 int             ixgbe_display_debug_stats = 0;
     72 
     73 /*********************************************************************
     74  *  Driver version
     75  *********************************************************************/
     76 char ixgbe_driver_version[] = "2.3.10";
     77 
     78 /*********************************************************************
     79  *  PCI Device ID Table
     80  *
     81  *  Used by probe to select devices to load on
     82  *  Last field stores an index into ixgbe_strings
     83  *  Last entry must be all 0s
     84  *
     85  *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
     86  *********************************************************************/
     87 
     88 static ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
     89 {
     90 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
     91 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
     92 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
     93 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
     94 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
     95 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
     96 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
     97 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
     98 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
     99 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
    100 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
    101 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
    102 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
    103 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
    104 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
    105 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
    106 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
    107 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
    108 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
    109 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
    110 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_DELL, 0, 0, 0},
    111 	/* required last entry */
    112 	{0, 0, 0, 0, 0}
    113 };
    114 
    115 /*********************************************************************
    116  *  Table of branding strings
    117  *********************************************************************/
    118 
    119 static const char    *ixgbe_strings[] = {
    120 	"Intel(R) PRO/10GbE PCI-Express Network Driver"
    121 };
    122 
    123 /*********************************************************************
    124  *  Function prototypes
    125  *********************************************************************/
    126 static int      ixgbe_probe(device_t, cfdata_t, void *);
    127 static void     ixgbe_attach(device_t, device_t, void *);
    128 static int      ixgbe_detach(device_t, int);
    129 #if 0
    130 static int      ixgbe_shutdown(device_t);
    131 #endif
    132 static void     ixgbe_start(struct ifnet *);
    133 static void     ixgbe_start_locked(struct tx_ring *, struct ifnet *);
    134 #if __FreeBSD_version >= 800000
    135 static int	ixgbe_mq_start(struct ifnet *, struct mbuf *);
    136 static int	ixgbe_mq_start_locked(struct ifnet *,
    137                     struct tx_ring *, struct mbuf *);
    138 static void	ixgbe_qflush(struct ifnet *);
    139 #endif
    140 static int      ixgbe_ioctl(struct ifnet *, u_long, void *);
    141 static void	ixgbe_ifstop(struct ifnet *, int);
    142 static int	ixgbe_init(struct ifnet *);
    143 static void	ixgbe_init_locked(struct adapter *);
    144 static void     ixgbe_stop(void *);
    145 static void     ixgbe_media_status(struct ifnet *, struct ifmediareq *);
    146 static int      ixgbe_media_change(struct ifnet *);
    147 static void     ixgbe_identify_hardware(struct adapter *);
    148 static int      ixgbe_allocate_pci_resources(struct adapter *,
    149 		    const struct pci_attach_args *);
    150 static int      ixgbe_allocate_msix(struct adapter *,
    151 		    const struct pci_attach_args *);
    152 static int      ixgbe_allocate_legacy(struct adapter *,
    153 		    const struct pci_attach_args *);
    154 static int	ixgbe_allocate_queues(struct adapter *);
    155 static int	ixgbe_setup_msix(struct adapter *);
    156 static void	ixgbe_free_pci_resources(struct adapter *);
    157 static void	ixgbe_local_timer(void *);
    158 static int	ixgbe_setup_interface(device_t, struct adapter *);
    159 static void	ixgbe_config_link(struct adapter *);
    160 
    161 static int      ixgbe_allocate_transmit_buffers(struct tx_ring *);
    162 static int	ixgbe_setup_transmit_structures(struct adapter *);
    163 static void	ixgbe_setup_transmit_ring(struct tx_ring *);
    164 static void     ixgbe_initialize_transmit_units(struct adapter *);
    165 static void     ixgbe_free_transmit_structures(struct adapter *);
    166 static void     ixgbe_free_transmit_buffers(struct tx_ring *);
    167 
    168 static int      ixgbe_allocate_receive_buffers(struct rx_ring *);
    169 static int      ixgbe_setup_receive_structures(struct adapter *);
    170 static int	ixgbe_setup_receive_ring(struct rx_ring *);
    171 static void     ixgbe_initialize_receive_units(struct adapter *);
    172 static void     ixgbe_free_receive_structures(struct adapter *);
    173 static void     ixgbe_free_receive_buffers(struct rx_ring *);
    174 static void	ixgbe_setup_hw_rsc(struct rx_ring *);
    175 
    176 static void     ixgbe_enable_intr(struct adapter *);
    177 static void     ixgbe_disable_intr(struct adapter *);
    178 static void     ixgbe_update_stats_counters(struct adapter *);
    179 static bool	ixgbe_txeof(struct tx_ring *);
    180 static bool	ixgbe_rxeof(struct ix_queue *, int);
    181 static void	ixgbe_rx_checksum(u32, struct mbuf *, u32,
    182 		    struct ixgbe_hw_stats *);
    183 static void     ixgbe_set_promisc(struct adapter *);
    184 static void     ixgbe_set_multi(struct adapter *);
    185 static void     ixgbe_update_link_status(struct adapter *);
    186 static void	ixgbe_refresh_mbufs(struct rx_ring *, int);
    187 static int      ixgbe_xmit(struct tx_ring *, struct mbuf *);
    188 static int	ixgbe_set_flowcntl(SYSCTLFN_PROTO);
    189 static int	ixgbe_set_advertise(SYSCTLFN_PROTO);
    190 static int	ixgbe_dma_malloc(struct adapter *, bus_size_t,
    191 		    struct ixgbe_dma_alloc *, int);
    192 static void     ixgbe_dma_free(struct adapter *, struct ixgbe_dma_alloc *);
    193 static void	ixgbe_add_rx_process_limit(struct adapter *, const char *,
    194 		    const char *, int *, int);
    195 static u32	ixgbe_tx_ctx_setup(struct tx_ring *, struct mbuf *);
    196 static bool	ixgbe_tso_setup(struct tx_ring *, struct mbuf *, u32 *);
    197 static void	ixgbe_set_ivar(struct adapter *, u8, u8, s8);
    198 static void	ixgbe_configure_ivars(struct adapter *);
    199 static u8 *	ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
    200 
    201 static void	ixgbe_setup_vlan_hw_support(struct adapter *);
    202 #if 0
    203 static void	ixgbe_register_vlan(void *, struct ifnet *, u16);
    204 static void	ixgbe_unregister_vlan(void *, struct ifnet *, u16);
    205 #endif
    206 
    207 static void     ixgbe_add_hw_stats(struct adapter *adapter);
    208 
    209 static __inline void ixgbe_rx_discard(struct rx_ring *, int);
    210 static __inline void ixgbe_rx_input(struct rx_ring *, struct ifnet *,
    211 		    struct mbuf *, u32);
    212 
    213 /* Support for pluggable optic modules */
    214 static bool	ixgbe_sfp_probe(struct adapter *);
    215 static void	ixgbe_setup_optics(struct adapter *);
    216 
    217 /* Legacy (single vector interrupt handler */
    218 static int	ixgbe_legacy_irq(void *);
    219 
    220 #if defined(NETBSD_MSI_OR_MSIX)
    221 /* The MSI/X Interrupt handlers */
    222 static void	ixgbe_msix_que(void *);
    223 static void	ixgbe_msix_link(void *);
    224 #endif
    225 
    226 /* Software interrupts for deferred work */
    227 static void	ixgbe_handle_que(void *);
    228 static void	ixgbe_handle_link(void *);
    229 static void	ixgbe_handle_msf(void *);
    230 static void	ixgbe_handle_mod(void *);
    231 
    232 const struct sysctlnode *ixgbe_sysctl_instance(struct adapter *);
    233 static ixgbe_vendor_info_t *ixgbe_lookup(const struct pci_attach_args *);
    234 
    235 #ifdef IXGBE_FDIR
    236 static void	ixgbe_atr(struct tx_ring *, struct mbuf *);
    237 static void	ixgbe_reinit_fdir(void *, int);
    238 #endif
    239 
    240 /*********************************************************************
    241  *  FreeBSD Device Interface Entry Points
    242  *********************************************************************/
    243 
    244 CFATTACH_DECL3_NEW(ixg, sizeof(struct adapter),
    245     ixgbe_probe, ixgbe_attach, ixgbe_detach, NULL, NULL, NULL,
    246     DVF_DETACH_SHUTDOWN);
    247 
    248 #if 0
    249 devclass_t ixgbe_devclass;
    250 DRIVER_MODULE(ixgbe, pci, ixgbe_driver, ixgbe_devclass, 0, 0);
    251 
    252 MODULE_DEPEND(ixgbe, pci, 1, 1, 1);
    253 MODULE_DEPEND(ixgbe, ether, 1, 1, 1);
    254 #endif
    255 
    256 /*
    257 ** TUNEABLE PARAMETERS:
    258 */
    259 
    260 /*
    261 ** AIM: Adaptive Interrupt Moderation
    262 ** which means that the interrupt rate
    263 ** is varied over time based on the
    264 ** traffic for that interrupt vector
    265 */
    266 static int ixgbe_enable_aim = TRUE;
    267 #define TUNABLE_INT(__x, __y)
    268 TUNABLE_INT("hw.ixgbe.enable_aim", &ixgbe_enable_aim);
    269 
    270 static int ixgbe_max_interrupt_rate = (8000000 / IXGBE_LOW_LATENCY);
    271 TUNABLE_INT("hw.ixgbe.max_interrupt_rate", &ixgbe_max_interrupt_rate);
    272 
    273 /* How many packets rxeof tries to clean at a time */
    274 static int ixgbe_rx_process_limit = 256;
    275 TUNABLE_INT("hw.ixgbe.rx_process_limit", &ixgbe_rx_process_limit);
    276 
    277 /* Flow control setting, default to full */
    278 static int ixgbe_flow_control = ixgbe_fc_full;
    279 TUNABLE_INT("hw.ixgbe.flow_control", &ixgbe_flow_control);
    280 
    281 /*
    282 ** Smart speed setting, default to on
    283 ** this only works as a compile option
    284 ** right now as its during attach, set
    285 ** this to 'ixgbe_smart_speed_off' to
    286 ** disable.
    287 */
    288 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
    289 
    290 /*
    291  * MSIX should be the default for best performance,
    292  * but this allows it to be forced off for testing.
    293  */
    294 static int ixgbe_enable_msix = 1;
    295 TUNABLE_INT("hw.ixgbe.enable_msix", &ixgbe_enable_msix);
    296 
    297 /*
    298  * Header split: this causes the hardware to DMA
    299  * the header into a separate mbuf from the payload,
    300  * it can be a performance win in some workloads, but
    301  * in others it actually hurts, its off by default.
    302  */
    303 static bool ixgbe_header_split = FALSE;
    304 TUNABLE_INT("hw.ixgbe.hdr_split", &ixgbe_header_split);
    305 
    306 #if defined(NETBSD_MSI_OR_MSIX)
    307 /*
    308  * Number of Queues, can be set to 0,
    309  * it then autoconfigures based on the
    310  * number of cpus with a max of 8. This
    311  * can be overriden manually here.
    312  */
    313 static int ixgbe_num_queues = 0;
    314 TUNABLE_INT("hw.ixgbe.num_queues", &ixgbe_num_queues);
    315 #endif
    316 
    317 /*
    318 ** Number of TX descriptors per ring,
    319 ** setting higher than RX as this seems
    320 ** the better performing choice.
    321 */
    322 static int ixgbe_txd = PERFORM_TXD;
    323 TUNABLE_INT("hw.ixgbe.txd", &ixgbe_txd);
    324 
    325 /* Number of RX descriptors per ring */
    326 static int ixgbe_rxd = PERFORM_RXD;
    327 TUNABLE_INT("hw.ixgbe.rxd", &ixgbe_rxd);
    328 
    329 /* Keep running tab on them for sanity check */
    330 static int ixgbe_total_ports;
    331 
    332 #ifdef IXGBE_FDIR
    333 /*
    334 ** For Flow Director: this is the
    335 ** number of TX packets we sample
    336 ** for the filter pool, this means
    337 ** every 20th packet will be probed.
    338 **
    339 ** This feature can be disabled by
    340 ** setting this to 0.
    341 */
    342 static int atr_sample_rate = 20;
    343 /*
    344 ** Flow Director actually 'steals'
    345 ** part of the packet buffer as its
    346 ** filter pool, this variable controls
    347 ** how much it uses:
    348 **  0 = 64K, 1 = 128K, 2 = 256K
    349 */
    350 static int fdir_pballoc = 1;
    351 #endif
    352 
    353 /*********************************************************************
    354  *  Device identification routine
    355  *
    356  *  ixgbe_probe determines if the driver should be loaded on
    357  *  adapter based on PCI vendor/device id of the adapter.
    358  *
    359  *  return 1 on success, 0 on failure
    360  *********************************************************************/
    361 
    362 static int
    363 ixgbe_probe(device_t dev, cfdata_t cf, void *aux)
    364 {
    365 	const struct pci_attach_args *pa = aux;
    366 
    367 	return (ixgbe_lookup(pa) != NULL) ? 1 : 0;
    368 }
    369 
    370 static ixgbe_vendor_info_t *
    371 ixgbe_lookup(const struct pci_attach_args *pa)
    372 {
    373 	pcireg_t subid;
    374 	ixgbe_vendor_info_t *ent;
    375 
    376 	INIT_DEBUGOUT("ixgbe_probe: begin");
    377 
    378 	if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
    379 		return NULL;
    380 
    381 	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
    382 
    383 	for (ent = ixgbe_vendor_info_array; ent->vendor_id != 0; ent++) {
    384 		if (PCI_VENDOR(pa->pa_id) == ent->vendor_id &&
    385 		    PCI_PRODUCT(pa->pa_id) == ent->device_id &&
    386 
    387 		    (PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id ||
    388 		     ent->subvendor_id == 0) &&
    389 
    390 		    (PCI_SUBSYS_ID(subid) == ent->subdevice_id ||
    391 		     ent->subdevice_id == 0)) {
    392 			++ixgbe_total_ports;
    393 			return ent;
    394 		}
    395 	}
    396 	return NULL;
    397 }
    398 
    399 
    400 static void
    401 ixgbe_sysctl_attach(struct adapter *adapter)
    402 {
    403 	struct sysctllog **log;
    404 	const struct sysctlnode *rnode, *cnode;
    405 	device_t dev;
    406 
    407 	dev = adapter->dev;
    408 	log = &adapter->sysctllog;
    409 
    410 	if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
    411 		aprint_error_dev(dev, "could not create sysctl root\n");
    412 		return;
    413 	}
    414 
    415 	if (sysctl_createv(log, 0, &rnode, &cnode,
    416 	    CTLFLAG_READONLY, CTLTYPE_INT,
    417 	    "num_rx_desc", SYSCTL_DESCR("Number of rx descriptors"),
    418 	    NULL, 0, &adapter->num_rx_desc, 0, CTL_CREATE, CTL_EOL) != 0)
    419 		aprint_error_dev(dev, "could not create sysctl\n");
    420 
    421 	if (sysctl_createv(log, 0, &rnode, &cnode,
    422 	    CTLFLAG_READONLY, CTLTYPE_INT,
    423 	    "num_queues", SYSCTL_DESCR("Number of queues"),
    424 	    NULL, 0, &adapter->num_queues, 0, CTL_CREATE, CTL_EOL) != 0)
    425 		aprint_error_dev(dev, "could not create sysctl\n");
    426 
    427 	if (sysctl_createv(log, 0, &rnode, &cnode,
    428 	    CTLFLAG_READWRITE, CTLTYPE_INT,
    429 	    "flow_control", SYSCTL_DESCR("Flow Control"),
    430 	    ixgbe_set_flowcntl, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
    431 		aprint_error_dev(dev, "could not create sysctl\n");
    432 
    433 	if (sysctl_createv(log, 0, &rnode, &cnode,
    434 	    CTLFLAG_READWRITE, CTLTYPE_INT,
    435 	    "advertise_gig", SYSCTL_DESCR("1G Link"),
    436 	    ixgbe_set_advertise, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
    437 		aprint_error_dev(dev, "could not create sysctl\n");
    438 
    439 	/* XXX This is an *instance* sysctl controlling a *global* variable.
    440 	 * XXX It's that way in the FreeBSD driver that this derives from.
    441 	 */
    442 	if (sysctl_createv(log, 0, &rnode, &cnode,
    443 	    CTLFLAG_READWRITE, CTLTYPE_INT,
    444 	    "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
    445 	    NULL, 0, &ixgbe_enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
    446 		aprint_error_dev(dev, "could not create sysctl\n");
    447 }
    448 
    449 /*********************************************************************
    450  *  Device initialization routine
    451  *
    452  *  The attach entry point is called when the driver is being loaded.
    453  *  This routine identifies the type of hardware, allocates all resources
    454  *  and initializes the hardware.
    455  *
    456  *  return 0 on success, positive on failure
    457  *********************************************************************/
    458 
    459 static void
    460 ixgbe_attach(device_t parent, device_t dev, void *aux)
    461 {
    462 	struct adapter *adapter;
    463 	struct ixgbe_hw *hw;
    464 	int             error = 0;
    465 	u16		csum;
    466 	u32		ctrl_ext;
    467 	ixgbe_vendor_info_t *ent;
    468 	const struct pci_attach_args *pa = aux;
    469 
    470 	INIT_DEBUGOUT("ixgbe_attach: begin");
    471 
    472 	/* Allocate, clear, and link in our adapter structure */
    473 	adapter = device_private(dev);
    474 	adapter->dev = adapter->osdep.dev = dev;
    475 	hw = &adapter->hw;
    476 	adapter->osdep.pc = pa->pa_pc;
    477 	adapter->osdep.tag = pa->pa_tag;
    478 	adapter->osdep.dmat = pa->pa_dmat;
    479 
    480 	ent = ixgbe_lookup(pa);
    481 
    482 	KASSERT(ent != NULL);
    483 
    484 	aprint_normal(": %s, Version - %s\n",
    485 	    ixgbe_strings[ent->index], ixgbe_driver_version);
    486 
    487 	/* Core Lock Init*/
    488 	IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
    489 
    490 	/* SYSCTL APIs */
    491 
    492 	ixgbe_sysctl_attach(adapter);
    493 
    494 	/* Set up the timer callout */
    495 	callout_init(&adapter->timer, 0);
    496 
    497 	/* Determine hardware revision */
    498 	ixgbe_identify_hardware(adapter);
    499 
    500 	/* Do base PCI setup - map BAR0 */
    501 	if (ixgbe_allocate_pci_resources(adapter, pa)) {
    502 		aprint_error_dev(dev, "Allocation of PCI resources failed\n");
    503 		error = ENXIO;
    504 		goto err_out;
    505 	}
    506 
    507 	/* Do descriptor calc and sanity checks */
    508 	if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
    509 	    ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
    510 		aprint_error_dev(dev, "TXD config issue, using default!\n");
    511 		adapter->num_tx_desc = DEFAULT_TXD;
    512 	} else
    513 		adapter->num_tx_desc = ixgbe_txd;
    514 
    515 	/*
    516 	** With many RX rings it is easy to exceed the
    517 	** system mbuf allocation. Tuning nmbclusters
    518 	** can alleviate this.
    519 	*/
    520 	if (nmbclusters > 0 ) {
    521 		int s;
    522 		s = (ixgbe_rxd * adapter->num_queues) * ixgbe_total_ports;
    523 		if (s > nmbclusters) {
    524 			aprint_error_dev(dev, "RX Descriptors exceed "
    525 			    "system mbuf max, using default instead!\n");
    526 			ixgbe_rxd = DEFAULT_RXD;
    527 		}
    528 	}
    529 
    530 	if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
    531 	    ixgbe_rxd < MIN_TXD || ixgbe_rxd > MAX_TXD) {
    532 		aprint_error_dev(dev, "RXD config issue, using default!\n");
    533 		adapter->num_rx_desc = DEFAULT_RXD;
    534 	} else
    535 		adapter->num_rx_desc = ixgbe_rxd;
    536 
    537 	/* Allocate our TX/RX Queues */
    538 	if (ixgbe_allocate_queues(adapter)) {
    539 		error = ENOMEM;
    540 		goto err_out;
    541 	}
    542 
    543 	/* Allocate multicast array memory. */
    544 	adapter->mta = malloc(sizeof(u8) * IXGBE_ETH_LENGTH_OF_ADDRESS *
    545 	    MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
    546 	if (adapter->mta == NULL) {
    547 		aprint_error_dev(dev, "Cannot allocate multicast setup array\n");
    548 		error = ENOMEM;
    549 		goto err_late;
    550 	}
    551 
    552 	/* Initialize the shared code */
    553 	error = ixgbe_init_shared_code(hw);
    554 	if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
    555 		/*
    556 		** No optics in this port, set up
    557 		** so the timer routine will probe
    558 		** for later insertion.
    559 		*/
    560 		adapter->sfp_probe = TRUE;
    561 		error = 0;
    562 	} else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
    563 		aprint_error_dev(dev,"Unsupported SFP+ module detected!\n");
    564 		error = EIO;
    565 		goto err_late;
    566 	} else if (error) {
    567 		aprint_error_dev(dev,"Unable to initialize the shared code\n");
    568 		error = EIO;
    569 		goto err_late;
    570 	}
    571 
    572 	/* Make sure we have a good EEPROM before we read from it */
    573 	if (ixgbe_validate_eeprom_checksum(&adapter->hw, &csum) < 0) {
    574 		aprint_error_dev(dev,"The EEPROM Checksum Is Not Valid\n");
    575 		error = EIO;
    576 		goto err_late;
    577 	}
    578 
    579 	/* Get Hardware Flow Control setting */
    580 	hw->fc.requested_mode = ixgbe_fc_full;
    581 	hw->fc.pause_time = IXGBE_FC_PAUSE;
    582 	hw->fc.low_water = IXGBE_FC_LO;
    583 	hw->fc.high_water = IXGBE_FC_HI;
    584 	hw->fc.send_xon = TRUE;
    585 
    586 	error = ixgbe_init_hw(hw);
    587 	if (error == IXGBE_ERR_EEPROM_VERSION) {
    588 		aprint_error_dev(dev, "This device is a pre-production adapter/"
    589 		    "LOM.  Please be aware there may be issues associated "
    590 		    "with your hardware.\n If you are experiencing problems "
    591 		    "please contact your Intel or hardware representative "
    592 		    "who provided you with this hardware.\n");
    593 	} else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED)
    594 		aprint_error_dev(dev,"Unsupported SFP+ Module\n");
    595 
    596 	if (error) {
    597 		error = EIO;
    598 		aprint_error_dev(dev,"Hardware Initialization Failure\n");
    599 		goto err_late;
    600 	}
    601 
    602 	/* Detect and set physical type */
    603 	ixgbe_setup_optics(adapter);
    604 
    605 	if ((adapter->msix > 1) && (ixgbe_enable_msix))
    606 		error = ixgbe_allocate_msix(adapter, pa);
    607 	else
    608 		error = ixgbe_allocate_legacy(adapter, pa);
    609 	if (error)
    610 		goto err_late;
    611 
    612 	/* Setup OS specific network interface */
    613 	if (ixgbe_setup_interface(dev, adapter) != 0)
    614 		goto err_late;
    615 
    616 	/* Sysctl for limiting the amount of work done in software interrupts */
    617 	ixgbe_add_rx_process_limit(adapter, "rx_processing_limit",
    618 	    "max number of rx packets to process", &adapter->rx_process_limit,
    619 	    ixgbe_rx_process_limit);
    620 
    621 	/* Initialize statistics */
    622 	ixgbe_update_stats_counters(adapter);
    623 
    624         /* Print PCIE bus type/speed/width info */
    625 	ixgbe_get_bus_info(hw);
    626 	aprint_normal_dev(dev,"PCI Express Bus: Speed %s %s\n",
    627 	    ((hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0Gb/s":
    628 	    (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5Gb/s":"Unknown"),
    629 	    (hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
    630 	    (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
    631 	    (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
    632 	    ("Unknown"));
    633 
    634 	if ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
    635 	    (hw->bus.speed == ixgbe_bus_speed_2500)) {
    636 		aprint_error_dev(dev, "PCI-Express bandwidth available"
    637 		    " for this card\n     is not sufficient for"
    638 		    " optimal performance.\n");
    639 		aprint_error_dev(dev, "For optimal performance a x8 "
    640 		    "PCIE, or x4 PCIE 2 slot is required.\n");
    641         }
    642 
    643 	/* let hardware know driver is loaded */
    644 	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
    645 	ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
    646 	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
    647 
    648 	ixgbe_add_hw_stats(adapter);
    649 
    650 	INIT_DEBUGOUT("ixgbe_attach: end");
    651 	return;
    652 err_late:
    653 	ixgbe_free_transmit_structures(adapter);
    654 	ixgbe_free_receive_structures(adapter);
    655 err_out:
    656 	if (adapter->ifp != NULL)
    657 		if_free(adapter->ifp);
    658 	ixgbe_free_pci_resources(adapter);
    659 	if (adapter->mta != NULL)
    660 		free(adapter->mta, M_DEVBUF);
    661 	return;
    662 
    663 }
    664 
    665 /*********************************************************************
    666  *  Device removal routine
    667  *
    668  *  The detach entry point is called when the driver is being removed.
    669  *  This routine stops the adapter and deallocates all the resources
    670  *  that were allocated for driver operation.
    671  *
    672  *  return 0 on success, positive on failure
    673  *********************************************************************/
    674 
    675 static int
    676 ixgbe_detach(device_t dev, int flags)
    677 {
    678 	struct adapter *adapter = device_private(dev);
    679 	struct tx_ring *txr = adapter->tx_rings;
    680 	struct rx_ring *rxr = adapter->rx_rings;
    681 	struct ixgbe_hw_stats *stats = &adapter->stats;
    682 	struct ix_queue *que = adapter->queues;
    683 	u32	ctrl_ext;
    684 
    685 	INIT_DEBUGOUT("ixgbe_detach: begin");
    686 
    687 	/* Make sure VLANs are not using driver */
    688 	if (!VLAN_ATTACHED(&adapter->osdep.ec))
    689 		;	/* nothing to do: no VLANs */
    690 	else if ((flags & (DETACH_SHUTDOWN|DETACH_FORCE)) != 0)
    691 		vlan_ifdetach(adapter->ifp);
    692 	else {
    693 		aprint_error_dev(dev, "VLANs in use\n");
    694 		return EBUSY;
    695 	}
    696 
    697 	IXGBE_CORE_LOCK(adapter);
    698 	ixgbe_stop(adapter);
    699 	IXGBE_CORE_UNLOCK(adapter);
    700 
    701 	for (int i = 0; i < adapter->num_queues; i++, que++) {
    702 		softint_disestablish(que->que_si);
    703 	}
    704 
    705 	/* Drain the Link queue */
    706 	softint_disestablish(adapter->link_si);
    707 	softint_disestablish(adapter->mod_si);
    708 	softint_disestablish(adapter->msf_si);
    709 #ifdef IXGBE_FDIR
    710 	softint_disestablish(adapter->fdir_si);
    711 #endif
    712 
    713 	/* let hardware know driver is unloading */
    714 	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
    715 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
    716 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
    717 
    718 	ether_ifdetach(adapter->ifp);
    719 	callout_halt(&adapter->timer, NULL);
    720 	ixgbe_free_pci_resources(adapter);
    721 #if 0	/* XXX the NetBSD port is probably missing something here */
    722 	bus_generic_detach(dev);
    723 #endif
    724 	if_detach(adapter->ifp);
    725 
    726 	sysctl_teardown(&adapter->sysctllog);
    727 	evcnt_detach(&adapter->handleq);
    728 	evcnt_detach(&adapter->req);
    729 	evcnt_detach(&adapter->morerx);
    730 	evcnt_detach(&adapter->moretx);
    731 	evcnt_detach(&adapter->txloops);
    732 	evcnt_detach(&adapter->efbig_tx_dma_setup);
    733 	evcnt_detach(&adapter->m_defrag_failed);
    734 	evcnt_detach(&adapter->efbig2_tx_dma_setup);
    735 	evcnt_detach(&adapter->einval_tx_dma_setup);
    736 	evcnt_detach(&adapter->other_tx_dma_setup);
    737 	evcnt_detach(&adapter->eagain_tx_dma_setup);
    738 	evcnt_detach(&adapter->enomem_tx_dma_setup);
    739 	evcnt_detach(&adapter->watchdog_events);
    740 	evcnt_detach(&adapter->tso_err);
    741 	evcnt_detach(&adapter->tso_tx);
    742 	evcnt_detach(&adapter->link_irq);
    743 	for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
    744 		evcnt_detach(&txr->no_desc_avail);
    745 		evcnt_detach(&txr->total_packets);
    746 
    747 		if (i < __arraycount(adapter->stats.mpc)) {
    748 			evcnt_detach(&adapter->stats.mpc[i]);
    749 		}
    750 		if (i < __arraycount(adapter->stats.pxontxc)) {
    751 			evcnt_detach(&adapter->stats.pxontxc[i]);
    752 			evcnt_detach(&adapter->stats.pxonrxc[i]);
    753 			evcnt_detach(&adapter->stats.pxofftxc[i]);
    754 			evcnt_detach(&adapter->stats.pxoffrxc[i]);
    755 			evcnt_detach(&adapter->stats.pxon2offc[i]);
    756 		}
    757 		if (i < __arraycount(adapter->stats.qprc)) {
    758 			evcnt_detach(&adapter->stats.qprc[i]);
    759 			evcnt_detach(&adapter->stats.qptc[i]);
    760 			evcnt_detach(&adapter->stats.qbrc[i]);
    761 			evcnt_detach(&adapter->stats.qbtc[i]);
    762 			evcnt_detach(&adapter->stats.qprdc[i]);
    763 		}
    764 
    765 		evcnt_detach(&rxr->rx_packets);
    766 		evcnt_detach(&rxr->rx_bytes);
    767 		evcnt_detach(&rxr->no_jmbuf);
    768 		evcnt_detach(&rxr->rx_discarded);
    769 		evcnt_detach(&rxr->rx_split_packets);
    770 		evcnt_detach(&rxr->rx_irq);
    771 	}
    772 	evcnt_detach(&stats->ipcs);
    773 	evcnt_detach(&stats->l4cs);
    774 	evcnt_detach(&stats->ipcs_bad);
    775 	evcnt_detach(&stats->l4cs_bad);
    776 	evcnt_detach(&stats->intzero);
    777 	evcnt_detach(&stats->legint);
    778 	evcnt_detach(&stats->crcerrs);
    779 	evcnt_detach(&stats->illerrc);
    780 	evcnt_detach(&stats->errbc);
    781 	evcnt_detach(&stats->mspdc);
    782 	evcnt_detach(&stats->mlfc);
    783 	evcnt_detach(&stats->mrfc);
    784 	evcnt_detach(&stats->rlec);
    785 	evcnt_detach(&stats->lxontxc);
    786 	evcnt_detach(&stats->lxonrxc);
    787 	evcnt_detach(&stats->lxofftxc);
    788 	evcnt_detach(&stats->lxoffrxc);
    789 
    790 	/* Packet Reception Stats */
    791 	evcnt_detach(&stats->tor);
    792 	evcnt_detach(&stats->gorc);
    793 	evcnt_detach(&stats->tpr);
    794 	evcnt_detach(&stats->gprc);
    795 	evcnt_detach(&stats->mprc);
    796 	evcnt_detach(&stats->bprc);
    797 	evcnt_detach(&stats->prc64);
    798 	evcnt_detach(&stats->prc127);
    799 	evcnt_detach(&stats->prc255);
    800 	evcnt_detach(&stats->prc511);
    801 	evcnt_detach(&stats->prc1023);
    802 	evcnt_detach(&stats->prc1522);
    803 	evcnt_detach(&stats->ruc);
    804 	evcnt_detach(&stats->rfc);
    805 	evcnt_detach(&stats->roc);
    806 	evcnt_detach(&stats->rjc);
    807 	evcnt_detach(&stats->mngprc);
    808 	evcnt_detach(&stats->xec);
    809 
    810 	/* Packet Transmission Stats */
    811 	evcnt_detach(&stats->gotc);
    812 	evcnt_detach(&stats->tpt);
    813 	evcnt_detach(&stats->gptc);
    814 	evcnt_detach(&stats->bptc);
    815 	evcnt_detach(&stats->mptc);
    816 	evcnt_detach(&stats->mngptc);
    817 	evcnt_detach(&stats->ptc64);
    818 	evcnt_detach(&stats->ptc127);
    819 	evcnt_detach(&stats->ptc255);
    820 	evcnt_detach(&stats->ptc511);
    821 	evcnt_detach(&stats->ptc1023);
    822 	evcnt_detach(&stats->ptc1522);
    823 
    824 	/* FC Stats */
    825 	evcnt_detach(&stats->fccrc);
    826 	evcnt_detach(&stats->fclast);
    827 	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
    828 		evcnt_detach(&stats->fcoerpdc);
    829 		evcnt_detach(&stats->fcoeprc);
    830 		evcnt_detach(&stats->fcoeptc);
    831 		evcnt_detach(&stats->fcoedwrc);
    832 		evcnt_detach(&stats->fcoedwtc);
    833 	}
    834 
    835 	ixgbe_free_transmit_structures(adapter);
    836 	ixgbe_free_receive_structures(adapter);
    837 	free(adapter->mta, M_DEVBUF);
    838 
    839 	IXGBE_CORE_LOCK_DESTROY(adapter);
    840 	return (0);
    841 }
    842 
    843 /*********************************************************************
    844  *
    845  *  Shutdown entry point
    846  *
    847  **********************************************************************/
    848 
    849 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
    850 static int
    851 ixgbe_shutdown(device_t dev)
    852 {
    853 	struct adapter *adapter = device_private(dev);
    854 	IXGBE_CORE_LOCK(adapter);
    855 	ixgbe_stop(adapter);
    856 	IXGBE_CORE_UNLOCK(adapter);
    857 	return (0);
    858 }
    859 #endif
    860 
    861 
    862 /*********************************************************************
    863  *  Transmit entry point
    864  *
    865  *  ixgbe_start is called by the stack to initiate a transmit.
    866  *  The driver will remain in this routine as long as there are
    867  *  packets to transmit and transmit resources are available.
    868  *  In case resources are not available stack is notified and
    869  *  the packet is requeued.
    870  **********************************************************************/
    871 
    872 static void
    873 ixgbe_start_locked(struct tx_ring *txr, struct ifnet * ifp)
    874 {
    875 	int rc;
    876 	struct mbuf    *m_head;
    877 	struct adapter *adapter = txr->adapter;
    878 
    879 	IXGBE_TX_LOCK_ASSERT(txr);
    880 
    881 	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) !=
    882 	    IFF_RUNNING)
    883 		return;
    884 	if (!adapter->link_active)
    885 		return;
    886 
    887 	while (!IFQ_IS_EMPTY(&ifp->if_snd)) {
    888 
    889 		IFQ_POLL(&ifp->if_snd, m_head);
    890 		if (m_head == NULL)
    891 			break;
    892 
    893 		if ((rc = ixgbe_xmit(txr, m_head)) == EAGAIN) {
    894 			ifp->if_flags |= IFF_OACTIVE;
    895 			break;
    896 		}
    897 		IFQ_DEQUEUE(&ifp->if_snd, m_head);
    898 		if (rc == EFBIG) {
    899 			struct mbuf *mtmp;
    900 
    901 			if ((mtmp = m_defrag(m_head, M_DONTWAIT)) != NULL) {
    902 				m_head = mtmp;
    903 				rc = ixgbe_xmit(txr, m_head);
    904 				if (rc != 0)
    905 					adapter->efbig2_tx_dma_setup.ev_count++;
    906 			} else
    907 				adapter->m_defrag_failed.ev_count++;
    908 		}
    909 		if (rc != 0) {
    910 			m_freem(m_head);
    911 			continue;
    912 		}
    913 
    914 		/* Send a copy of the frame to the BPF listener */
    915 		bpf_mtap(ifp, m_head);
    916 
    917 		/* Set watchdog on */
    918 		getmicrotime(&txr->watchdog_time);
    919 		txr->queue_status = IXGBE_QUEUE_WORKING;
    920 
    921 	}
    922 	return;
    923 }
    924 
    925 /*
    926  * Legacy TX start - called by the stack, this
    927  * always uses the first tx ring, and should
    928  * not be used with multiqueue tx enabled.
    929  */
    930 static void
    931 ixgbe_start(struct ifnet *ifp)
    932 {
    933 	struct adapter *adapter = ifp->if_softc;
    934 	struct tx_ring	*txr = adapter->tx_rings;
    935 
    936 	if (ifp->if_flags & IFF_RUNNING) {
    937 		IXGBE_TX_LOCK(txr);
    938 		ixgbe_start_locked(txr, ifp);
    939 		IXGBE_TX_UNLOCK(txr);
    940 	}
    941 	return;
    942 }
    943 
    944 #if __FreeBSD_version >= 800000
    945 /*
    946 ** Multiqueue Transmit driver
    947 **
    948 */
    949 static int
    950 ixgbe_mq_start(struct ifnet *ifp, struct mbuf *m)
    951 {
    952 	struct adapter	*adapter = ifp->if_softc;
    953 	struct ix_queue	*que;
    954 	struct tx_ring	*txr;
    955 	int 		i = 0, err = 0;
    956 
    957 	/* Which queue to use */
    958 	if ((m->m_flags & M_FLOWID) != 0)
    959 		i = m->m_pkthdr.flowid % adapter->num_queues;
    960 
    961 	txr = &adapter->tx_rings[i];
    962 	que = &adapter->queues[i];
    963 
    964 	if (IXGBE_TX_TRYLOCK(txr)) {
    965 		err = ixgbe_mq_start_locked(ifp, txr, m);
    966 		IXGBE_TX_UNLOCK(txr);
    967 	} else {
    968 		err = drbr_enqueue(ifp, txr->br, m);
    969 		softint_schedule(que->que_si);
    970 	}
    971 
    972 	return (err);
    973 }
    974 
    975 static int
    976 ixgbe_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr, struct mbuf *m)
    977 {
    978 	struct adapter  *adapter = txr->adapter;
    979         struct mbuf     *next;
    980         int             enqueued, err = 0;
    981 
    982 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) !=
    983 	    IFF_RUNNING || adapter->link_active == 0) {
    984 		if (m != NULL)
    985 			err = drbr_enqueue(ifp, txr->br, m);
    986 		return (err);
    987 	}
    988 
    989 	enqueued = 0;
    990 	if (m == NULL) {
    991 		next = drbr_dequeue(ifp, txr->br);
    992 	} else if (drbr_needs_enqueue(ifp, txr->br)) {
    993 		if ((err = drbr_enqueue(ifp, txr->br, m)) != 0)
    994 			return (err);
    995 		next = drbr_dequeue(ifp, txr->br);
    996 	} else
    997 		next = m;
    998 
    999 	/* Process the queue */
   1000 	while (next != NULL) {
   1001 		if ((err = ixgbe_xmit(txr, &next)) != 0) {
   1002 			if (next != NULL)
   1003 				err = drbr_enqueue(ifp, txr->br, next);
   1004 			break;
   1005 		}
   1006 		enqueued++;
   1007 		drbr_stats_update(ifp, next->m_pkthdr.len, next->m_flags);
   1008 		/* Send a copy of the frame to the BPF listener */
   1009 		bpf_mtap(ifp, next);
   1010 		if ((ifp->if_flags & IFF_RUNNING) == 0)
   1011 			break;
   1012 		if (txr->tx_avail < IXGBE_TX_OP_THRESHOLD)
   1013 			ixgbe_txeof(txr);
   1014 		if (txr->tx_avail < IXGBE_TX_OP_THRESHOLD) {
   1015 			ifp->if_flags |= IFF_OACTIVE;
   1016 			break;
   1017 		}
   1018 		next = drbr_dequeue(ifp, txr->br);
   1019 	}
   1020 
   1021 	if (enqueued > 0) {
   1022 		/* Set watchdog on */
   1023 		txr->queue_status = IXGBE_QUEUE_WORKING;
   1024 		getmicrotime(&txr->watchdog_time);
   1025 	}
   1026 
   1027 	return (err);
   1028 }
   1029 
   1030 /*
   1031 ** Flush all ring buffers
   1032 */
   1033 static void
   1034 ixgbe_qflush(struct ifnet *ifp)
   1035 {
   1036 	struct adapter	*adapter = ifp->if_softc;
   1037 	struct tx_ring	*txr = adapter->tx_rings;
   1038 	struct mbuf	*m;
   1039 
   1040 	for (int i = 0; i < adapter->num_queues; i++, txr++) {
   1041 		IXGBE_TX_LOCK(txr);
   1042 		while ((m = buf_ring_dequeue_sc(txr->br)) != NULL)
   1043 			m_freem(m);
   1044 		IXGBE_TX_UNLOCK(txr);
   1045 	}
   1046 	if_qflush(ifp);
   1047 }
   1048 #endif /* __FreeBSD_version >= 800000 */
   1049 
   1050 static int
   1051 ixgbe_ifflags_cb(struct ethercom *ec)
   1052 {
   1053 	struct ifnet *ifp = &ec->ec_if;
   1054 	struct adapter *adapter = ifp->if_softc;
   1055 	int change = ifp->if_flags ^ adapter->if_flags, rc = 0;
   1056 
   1057 	IXGBE_CORE_LOCK(adapter);
   1058 
   1059 	if (change != 0)
   1060 		adapter->if_flags = ifp->if_flags;
   1061 
   1062 	if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0)
   1063 		rc = ENETRESET;
   1064 	else if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   1065 		ixgbe_set_promisc(adapter);
   1066 
   1067 	IXGBE_CORE_UNLOCK(adapter);
   1068 
   1069 	return rc;
   1070 }
   1071 
   1072 /*********************************************************************
   1073  *  Ioctl entry point
   1074  *
   1075  *  ixgbe_ioctl is called when the user wants to configure the
   1076  *  interface.
   1077  *
   1078  *  return 0 on success, positive on failure
   1079  **********************************************************************/
   1080 
   1081 static int
   1082 ixgbe_ioctl(struct ifnet * ifp, u_long command, void *data)
   1083 {
   1084 	struct adapter	*adapter = ifp->if_softc;
   1085 	struct ifcapreq *ifcr = data;
   1086 	struct ifreq	*ifr = data;
   1087 	int             error = 0;
   1088 	int l4csum_en;
   1089 	const int l4csum = IFCAP_CSUM_TCPv4_Rx|IFCAP_CSUM_UDPv4_Rx|
   1090 	     IFCAP_CSUM_TCPv6_Rx|IFCAP_CSUM_UDPv6_Rx;
   1091 
   1092 	switch (command) {
   1093 	case SIOCSIFFLAGS:
   1094 		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
   1095 		break;
   1096 	case SIOCADDMULTI:
   1097 	case SIOCDELMULTI:
   1098 		IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
   1099 		break;
   1100 	case SIOCSIFMEDIA:
   1101 	case SIOCGIFMEDIA:
   1102 		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
   1103 		break;
   1104 	case SIOCSIFCAP:
   1105 		IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
   1106 		break;
   1107 	case SIOCSIFMTU:
   1108 		IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
   1109 		break;
   1110 	default:
   1111 		IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
   1112 		break;
   1113 	}
   1114 
   1115 	switch (command) {
   1116 	case SIOCSIFMEDIA:
   1117 	case SIOCGIFMEDIA:
   1118 		return ifmedia_ioctl(ifp, ifr, &adapter->media, command);
   1119 	case SIOCSIFCAP:
   1120 		/* Layer-4 Rx checksum offload has to be turned on and
   1121 		 * off as a unit.
   1122 		 */
   1123 		l4csum_en = ifcr->ifcr_capenable & l4csum;
   1124 		if (l4csum_en != l4csum && l4csum_en != 0)
   1125 			return EINVAL;
   1126 		/*FALLTHROUGH*/
   1127 	case SIOCADDMULTI:
   1128 	case SIOCDELMULTI:
   1129 	case SIOCSIFFLAGS:
   1130 	case SIOCSIFMTU:
   1131 	default:
   1132 		if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
   1133 			return error;
   1134 		if ((ifp->if_flags & IFF_RUNNING) == 0)
   1135 			;
   1136 		else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
   1137 			IXGBE_CORE_LOCK(adapter);
   1138 			ixgbe_init_locked(adapter);
   1139 			IXGBE_CORE_UNLOCK(adapter);
   1140 		} else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
   1141 			/*
   1142 			 * Multicast list has changed; set the hardware filter
   1143 			 * accordingly.
   1144 			 */
   1145 			IXGBE_CORE_LOCK(adapter);
   1146 			ixgbe_disable_intr(adapter);
   1147 			ixgbe_set_multi(adapter);
   1148 			ixgbe_enable_intr(adapter);
   1149 			IXGBE_CORE_UNLOCK(adapter);
   1150 		}
   1151 		return 0;
   1152 	}
   1153 }
   1154 
   1155 /*********************************************************************
   1156  *  Init entry point
   1157  *
   1158  *  This routine is used in two ways. It is used by the stack as
   1159  *  init entry point in network interface structure. It is also used
   1160  *  by the driver as a hw/sw initialization routine to get to a
   1161  *  consistent state.
   1162  *
   1163  *  return 0 on success, positive on failure
   1164  **********************************************************************/
   1165 #define IXGBE_MHADD_MFS_SHIFT 16
   1166 
   1167 static void
   1168 ixgbe_init_locked(struct adapter *adapter)
   1169 {
   1170 	struct ifnet   *ifp = adapter->ifp;
   1171 	device_t 	dev = adapter->dev;
   1172 	struct ixgbe_hw *hw = &adapter->hw;
   1173 	u32		k, txdctl, mhadd, gpie;
   1174 	u32		rxdctl, rxctrl;
   1175 
   1176 	/* XXX check IFF_UP and IFF_RUNNING, power-saving state! */
   1177 
   1178 	KASSERT(mutex_owned(&adapter->core_mtx));
   1179 	INIT_DEBUGOUT("ixgbe_init: begin");
   1180 	hw->adapter_stopped = FALSE;
   1181 	ixgbe_stop_adapter(hw);
   1182         callout_stop(&adapter->timer);
   1183 
   1184 	/* XXX I moved this here from the SIOCSIFMTU case in ixgbe_ioctl(). */
   1185 	adapter->max_frame_size =
   1186 		ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
   1187 
   1188         /* reprogram the RAR[0] in case user changed it. */
   1189         ixgbe_set_rar(hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
   1190 
   1191 	/* Get the latest mac address, User can use a LAA */
   1192 	memcpy(hw->mac.addr, CLLADDR(adapter->ifp->if_sadl),
   1193 	    IXGBE_ETH_LENGTH_OF_ADDRESS);
   1194 	ixgbe_set_rar(hw, 0, hw->mac.addr, 0, 1);
   1195 	hw->addr_ctrl.rar_used_count = 1;
   1196 
   1197 	/* Prepare transmit descriptors and buffers */
   1198 	if (ixgbe_setup_transmit_structures(adapter)) {
   1199 		device_printf(dev,"Could not setup transmit structures\n");
   1200 		ixgbe_stop(adapter);
   1201 		return;
   1202 	}
   1203 
   1204 	ixgbe_init_hw(hw);
   1205 	ixgbe_initialize_transmit_units(adapter);
   1206 
   1207 	/* Setup Multicast table */
   1208 	ixgbe_set_multi(adapter);
   1209 
   1210 	/*
   1211 	** Determine the correct mbuf pool
   1212 	** for doing jumbo/headersplit
   1213 	*/
   1214 	if (adapter->max_frame_size <= 2048)
   1215 		adapter->rx_mbuf_sz = MCLBYTES;
   1216 	else if (adapter->max_frame_size <= 4096)
   1217 		adapter->rx_mbuf_sz = MJUMPAGESIZE;
   1218 	else if (adapter->max_frame_size <= 9216)
   1219 		adapter->rx_mbuf_sz = MJUM9BYTES;
   1220 	else
   1221 		adapter->rx_mbuf_sz = MJUM16BYTES;
   1222 
   1223 	/* Prepare receive descriptors and buffers */
   1224 	if (ixgbe_setup_receive_structures(adapter)) {
   1225 		device_printf(dev,"Could not setup receive structures\n");
   1226 		ixgbe_stop(adapter);
   1227 		return;
   1228 	}
   1229 
   1230 	/* Configure RX settings */
   1231 	ixgbe_initialize_receive_units(adapter);
   1232 
   1233 	gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE);
   1234 
   1235 	/* Enable Fan Failure Interrupt */
   1236 	gpie |= IXGBE_SDP1_GPIEN;
   1237 
   1238 	/* Add for Thermal detection */
   1239 	if (hw->mac.type == ixgbe_mac_82599EB)
   1240 		gpie |= IXGBE_SDP2_GPIEN;
   1241 
   1242 	if (adapter->msix > 1) {
   1243 		/* Enable Enhanced MSIX mode */
   1244 		gpie |= IXGBE_GPIE_MSIX_MODE;
   1245 		gpie |= IXGBE_GPIE_EIAME | IXGBE_GPIE_PBA_SUPPORT |
   1246 		    IXGBE_GPIE_OCD;
   1247 	}
   1248 	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
   1249 
   1250 	/* Set MTU size */
   1251 	if (ifp->if_mtu > ETHERMTU) {
   1252 		mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
   1253 		mhadd &= ~IXGBE_MHADD_MFS_MASK;
   1254 		mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
   1255 		IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
   1256 	}
   1257 
   1258 	/* Now enable all the queues */
   1259 
   1260 	for (int i = 0; i < adapter->num_queues; i++) {
   1261 		txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
   1262 		txdctl |= IXGBE_TXDCTL_ENABLE;
   1263 		/* Set WTHRESH to 8, burst writeback */
   1264 		txdctl |= (8 << 16);
   1265 		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), txdctl);
   1266 	}
   1267 
   1268 	for (int i = 0; i < adapter->num_queues; i++) {
   1269 		rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
   1270 		if (hw->mac.type == ixgbe_mac_82598EB) {
   1271 			/*
   1272 			** PTHRESH = 21
   1273 			** HTHRESH = 4
   1274 			** WTHRESH = 8
   1275 			*/
   1276 			rxdctl &= ~0x3FFFFF;
   1277 			rxdctl |= 0x080420;
   1278 		}
   1279 		rxdctl |= IXGBE_RXDCTL_ENABLE;
   1280 		IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), rxdctl);
   1281 		/* XXX I don't trust this loop, and I don't trust the
   1282 		 * XXX memory barrier.  What is this meant to do? --dyoung
   1283 		 */
   1284 		for (k = 0; k < 10; k++) {
   1285 			if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)) &
   1286 			    IXGBE_RXDCTL_ENABLE)
   1287 				break;
   1288 			else
   1289 				msec_delay(1);
   1290 		}
   1291 		wmb();
   1292 		IXGBE_WRITE_REG(hw, IXGBE_RDT(i), adapter->num_rx_desc - 1);
   1293 	}
   1294 
   1295 	/* Set up VLAN support and filter */
   1296 	ixgbe_setup_vlan_hw_support(adapter);
   1297 
   1298 	/* Enable Receive engine */
   1299 	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
   1300 	if (hw->mac.type == ixgbe_mac_82598EB)
   1301 		rxctrl |= IXGBE_RXCTRL_DMBYPS;
   1302 	rxctrl |= IXGBE_RXCTRL_RXEN;
   1303 	ixgbe_enable_rx_dma(hw, rxctrl);
   1304 
   1305 	callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
   1306 
   1307 	/* Set up MSI/X routing */
   1308 	if (ixgbe_enable_msix)  {
   1309 		ixgbe_configure_ivars(adapter);
   1310 		/* Set up auto-mask */
   1311 		if (hw->mac.type == ixgbe_mac_82598EB)
   1312 			IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
   1313 		else {
   1314 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
   1315 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
   1316 		}
   1317 	} else {  /* Simple settings for Legacy/MSI */
   1318                 ixgbe_set_ivar(adapter, 0, 0, 0);
   1319                 ixgbe_set_ivar(adapter, 0, 0, 1);
   1320 		IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
   1321 	}
   1322 
   1323 #ifdef IXGBE_FDIR
   1324 	/* Init Flow director */
   1325 	if (hw->mac.type != ixgbe_mac_82598EB)
   1326 		ixgbe_init_fdir_signature_82599(&adapter->hw, fdir_pballoc);
   1327 #endif
   1328 
   1329 	/*
   1330 	** Check on any SFP devices that
   1331 	** need to be kick-started
   1332 	*/
   1333 	if (hw->phy.type == ixgbe_phy_none) {
   1334 		int err = hw->phy.ops.identify(hw);
   1335 		if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
   1336                 	device_printf(dev,
   1337 			    "Unsupported SFP+ module type was detected.\n");
   1338 			return;
   1339         	}
   1340 	}
   1341 
   1342 	/* Set moderation on the Link interrupt */
   1343 	IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->linkvec), IXGBE_LINK_ITR);
   1344 
   1345 	/* Config/Enable Link */
   1346 	ixgbe_config_link(adapter);
   1347 
   1348 	/* And now turn on interrupts */
   1349 	ixgbe_enable_intr(adapter);
   1350 
   1351 	/* Now inform the stack we're ready */
   1352 	ifp->if_flags |= IFF_RUNNING;
   1353 	ifp->if_flags &= ~IFF_OACTIVE;
   1354 
   1355 	return;
   1356 }
   1357 
   1358 static int
   1359 ixgbe_init(struct ifnet *ifp)
   1360 {
   1361 	struct adapter *adapter = ifp->if_softc;
   1362 
   1363 	IXGBE_CORE_LOCK(adapter);
   1364 	ixgbe_init_locked(adapter);
   1365 	IXGBE_CORE_UNLOCK(adapter);
   1366 	return 0;	/* XXX ixgbe_init_locked cannot fail?  really? */
   1367 }
   1368 
   1369 
   1370 /*
   1371 **
   1372 ** MSIX Interrupt Handlers and Tasklets
   1373 **
   1374 */
   1375 
   1376 static inline void
   1377 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
   1378 {
   1379 	struct ixgbe_hw *hw = &adapter->hw;
   1380 	u64	queue = (u64)(1ULL << vector);
   1381 	u32	mask;
   1382 
   1383 	if (hw->mac.type == ixgbe_mac_82598EB) {
   1384                 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
   1385                 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
   1386 	} else {
   1387                 mask = (queue & 0xFFFFFFFF);
   1388                 if (mask)
   1389                         IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
   1390                 mask = (queue >> 32);
   1391                 if (mask)
   1392                         IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
   1393 	}
   1394 }
   1395 
   1396 __unused static inline void
   1397 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
   1398 {
   1399 	struct ixgbe_hw *hw = &adapter->hw;
   1400 	u64	queue = (u64)(1ULL << vector);
   1401 	u32	mask;
   1402 
   1403 	if (hw->mac.type == ixgbe_mac_82598EB) {
   1404                 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
   1405                 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
   1406 	} else {
   1407                 mask = (queue & 0xFFFFFFFF);
   1408                 if (mask)
   1409                         IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
   1410                 mask = (queue >> 32);
   1411                 if (mask)
   1412                         IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
   1413 	}
   1414 }
   1415 
   1416 static inline void
   1417 ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
   1418 {
   1419 	u32 mask;
   1420 
   1421 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
   1422 		mask = (IXGBE_EIMS_RTX_QUEUE & queues);
   1423 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
   1424 	} else {
   1425 		mask = (queues & 0xFFFFFFFF);
   1426 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
   1427 		mask = (queues >> 32);
   1428 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
   1429 	}
   1430 }
   1431 
   1432 
   1433 static void
   1434 ixgbe_handle_que(void *context)
   1435 {
   1436 	struct ix_queue *que = context;
   1437 	struct adapter  *adapter = que->adapter;
   1438 	struct tx_ring  *txr = que->txr;
   1439 	struct ifnet    *ifp = adapter->ifp;
   1440 	bool		more;
   1441 
   1442 	adapter->handleq.ev_count++;
   1443 
   1444 	if (ifp->if_flags & IFF_RUNNING) {
   1445 		more = ixgbe_rxeof(que, adapter->rx_process_limit);
   1446 		IXGBE_TX_LOCK(txr);
   1447 		ixgbe_txeof(txr);
   1448 #if __FreeBSD_version >= 800000
   1449 		if (!drbr_empty(ifp, txr->br))
   1450 			ixgbe_mq_start_locked(ifp, txr, NULL);
   1451 #else
   1452 		if (!IFQ_IS_EMPTY(&ifp->if_snd))
   1453 			ixgbe_start_locked(txr, ifp);
   1454 #endif
   1455 		IXGBE_TX_UNLOCK(txr);
   1456 		if (more) {
   1457 			adapter->req.ev_count++;
   1458 			softint_schedule(que->que_si);
   1459 			return;
   1460 		}
   1461 	}
   1462 
   1463 	/* Reenable this interrupt */
   1464 	ixgbe_enable_queue(adapter, que->msix);
   1465 
   1466 	return;
   1467 }
   1468 
   1469 
   1470 /*********************************************************************
   1471  *
   1472  *  Legacy Interrupt Service routine
   1473  *
   1474  **********************************************************************/
   1475 
   1476 static int
   1477 ixgbe_legacy_irq(void *arg)
   1478 {
   1479 	struct ix_queue *que = arg;
   1480 	struct adapter	*adapter = que->adapter;
   1481 	struct ifnet   *ifp = adapter->ifp;
   1482 	struct ixgbe_hw	*hw = &adapter->hw;
   1483 	struct 		tx_ring *txr = adapter->tx_rings;
   1484 	bool		more_tx = false, more_rx = false;
   1485 	u32       	reg_eicr, loop = MAX_LOOP;
   1486 
   1487 	reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
   1488 
   1489 	adapter->stats.legint.ev_count++;
   1490 	++que->irqs;
   1491 	if (reg_eicr == 0) {
   1492 		adapter->stats.intzero.ev_count++;
   1493 		if ((ifp->if_flags & IFF_UP) != 0)
   1494 			ixgbe_enable_intr(adapter);
   1495 		return 0;
   1496 	}
   1497 
   1498 	if ((ifp->if_flags & IFF_RUNNING) != 0) {
   1499 		more_rx = ixgbe_rxeof(que, adapter->rx_process_limit);
   1500 
   1501 		IXGBE_TX_LOCK(txr);
   1502 		do {
   1503 			adapter->txloops.ev_count++;
   1504 			more_tx = ixgbe_txeof(txr);
   1505 		} while (loop-- && more_tx);
   1506 		IXGBE_TX_UNLOCK(txr);
   1507 	}
   1508 
   1509 	if (more_rx || more_tx) {
   1510 		if (more_rx)
   1511 			adapter->morerx.ev_count++;
   1512 		if (more_tx)
   1513 			adapter->moretx.ev_count++;
   1514 		softint_schedule(que->que_si);
   1515 	}
   1516 
   1517 	/* Check for fan failure */
   1518 	if ((hw->phy.media_type == ixgbe_media_type_copper) &&
   1519 	    (reg_eicr & IXGBE_EICR_GPI_SDP1)) {
   1520                 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! "
   1521 		    "REPLACE IMMEDIATELY!!\n");
   1522 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1);
   1523 	}
   1524 
   1525 	/* Link status change */
   1526 	if (reg_eicr & IXGBE_EICR_LSC)
   1527 		softint_schedule(adapter->link_si);
   1528 
   1529 	ixgbe_enable_intr(adapter);
   1530 	return 1;
   1531 }
   1532 
   1533 
   1534 #if defined(NETBSD_MSI_OR_MSIX)
   1535 /*********************************************************************
   1536  *
   1537  *  MSI Queue Interrupt Service routine
   1538  *
   1539  **********************************************************************/
   1540 void
   1541 ixgbe_msix_que(void *arg)
   1542 {
   1543 	struct ix_queue	*que = arg;
   1544 	struct adapter  *adapter = que->adapter;
   1545 	struct tx_ring	*txr = que->txr;
   1546 	struct rx_ring	*rxr = que->rxr;
   1547 	bool		more_tx, more_rx;
   1548 	u32		newitr = 0;
   1549 
   1550 	++que->irqs;
   1551 
   1552 	more_rx = ixgbe_rxeof(que, adapter->rx_process_limit);
   1553 
   1554 	IXGBE_TX_LOCK(txr);
   1555 	more_tx = ixgbe_txeof(txr);
   1556 	IXGBE_TX_UNLOCK(txr);
   1557 
   1558 	/* Do AIM now? */
   1559 
   1560 	if (ixgbe_enable_aim == FALSE)
   1561 		goto no_calc;
   1562 	/*
   1563 	** Do Adaptive Interrupt Moderation:
   1564         **  - Write out last calculated setting
   1565 	**  - Calculate based on average size over
   1566 	**    the last interval.
   1567 	*/
   1568         if (que->eitr_setting)
   1569                 IXGBE_WRITE_REG(&adapter->hw,
   1570                     IXGBE_EITR(que->msix), que->eitr_setting);
   1571 
   1572         que->eitr_setting = 0;
   1573 
   1574         /* Idle, do nothing */
   1575         if ((txr->bytes == 0) && (rxr->bytes == 0))
   1576                 goto no_calc;
   1577 
   1578 	if ((txr->bytes) && (txr->packets))
   1579                	newitr = txr->bytes/txr->packets;
   1580 	if ((rxr->bytes) && (rxr->packets))
   1581 		newitr = max(newitr,
   1582 		    (rxr->bytes / rxr->packets));
   1583 	newitr += 24; /* account for hardware frame, crc */
   1584 
   1585 	/* set an upper boundary */
   1586 	newitr = min(newitr, 3000);
   1587 
   1588 	/* Be nice to the mid range */
   1589 	if ((newitr > 300) && (newitr < 1200))
   1590 		newitr = (newitr / 3);
   1591 	else
   1592 		newitr = (newitr / 2);
   1593 
   1594         if (adapter->hw.mac.type == ixgbe_mac_82598EB)
   1595                 newitr |= newitr << 16;
   1596         else
   1597                 newitr |= IXGBE_EITR_CNT_WDIS;
   1598 
   1599         /* save for next interrupt */
   1600         que->eitr_setting = newitr;
   1601 
   1602         /* Reset state */
   1603         txr->bytes = 0;
   1604         txr->packets = 0;
   1605         rxr->bytes = 0;
   1606         rxr->packets = 0;
   1607 
   1608 no_calc:
   1609 	if (more_tx || more_rx)
   1610 		softint_schedule(que->que_si);
   1611 	else /* Reenable this interrupt */
   1612 		ixgbe_enable_queue(adapter, que->msix);
   1613 	return;
   1614 }
   1615 
   1616 
   1617 static void
   1618 ixgbe_msix_link(void *arg)
   1619 {
   1620 	struct adapter	*adapter = arg;
   1621 	struct ixgbe_hw *hw = &adapter->hw;
   1622 	u32		reg_eicr;
   1623 
   1624 	++adapter->link_irq.ev_count;
   1625 
   1626 	/* First get the cause */
   1627 	reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
   1628 	/* Clear interrupt with write */
   1629 	IXGBE_WRITE_REG(hw, IXGBE_EICR, reg_eicr);
   1630 
   1631 	/* Link status change */
   1632 	if (reg_eicr & IXGBE_EICR_LSC)
   1633 		softint_schedule(adapter->link_si);
   1634 
   1635 	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
   1636 #ifdef IXGBE_FDIR
   1637 		if (reg_eicr & IXGBE_EICR_FLOW_DIR) {
   1638 			/* This is probably overkill :) */
   1639 			if (!atomic_cmpset_int(&adapter->fdir_reinit, 0, 1))
   1640 				return;
   1641                 	/* Clear the interrupt */
   1642 			IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_FLOW_DIR);
   1643 			/* Turn off the interface */
   1644 			adapter->ifp->if_flags &= ~IFF_RUNNING;
   1645 			softint_schedule(adapter->fdir_si);
   1646 		} else
   1647 #endif
   1648 		if (reg_eicr & IXGBE_EICR_ECC) {
   1649                 	device_printf(adapter->dev, "\nCRITICAL: ECC ERROR!! "
   1650 			    "Please Reboot!!\n");
   1651 			IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
   1652 		} else
   1653 
   1654 		if (reg_eicr & IXGBE_EICR_GPI_SDP1) {
   1655                 	/* Clear the interrupt */
   1656                 	IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
   1657 			softint_schedule(adapter->msf_si);
   1658         	} else if (reg_eicr & IXGBE_EICR_GPI_SDP2) {
   1659                 	/* Clear the interrupt */
   1660                 	IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2);
   1661 			softint_schedule(adapter->mod_si);
   1662 		}
   1663         }
   1664 
   1665 	/* Check for fan failure */
   1666 	if ((hw->device_id == IXGBE_DEV_ID_82598AT) &&
   1667 	    (reg_eicr & IXGBE_EICR_GPI_SDP1)) {
   1668                 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! "
   1669 		    "REPLACE IMMEDIATELY!!\n");
   1670 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
   1671 	}
   1672 
   1673 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
   1674 	return;
   1675 }
   1676 #endif
   1677 
   1678 /*********************************************************************
   1679  *
   1680  *  Media Ioctl callback
   1681  *
   1682  *  This routine is called whenever the user queries the status of
   1683  *  the interface using ifconfig.
   1684  *
   1685  **********************************************************************/
   1686 static void
   1687 ixgbe_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
   1688 {
   1689 	struct adapter *adapter = ifp->if_softc;
   1690 
   1691 	INIT_DEBUGOUT("ixgbe_media_status: begin");
   1692 	IXGBE_CORE_LOCK(adapter);
   1693 	ixgbe_update_link_status(adapter);
   1694 
   1695 	ifmr->ifm_status = IFM_AVALID;
   1696 	ifmr->ifm_active = IFM_ETHER;
   1697 
   1698 	if (!adapter->link_active) {
   1699 		IXGBE_CORE_UNLOCK(adapter);
   1700 		return;
   1701 	}
   1702 
   1703 	ifmr->ifm_status |= IFM_ACTIVE;
   1704 
   1705 	switch (adapter->link_speed) {
   1706 		case IXGBE_LINK_SPEED_1GB_FULL:
   1707 			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
   1708 			break;
   1709 		case IXGBE_LINK_SPEED_10GB_FULL:
   1710 			ifmr->ifm_active |= adapter->optics | IFM_FDX;
   1711 			break;
   1712 	}
   1713 
   1714 	IXGBE_CORE_UNLOCK(adapter);
   1715 
   1716 	return;
   1717 }
   1718 
   1719 /*********************************************************************
   1720  *
   1721  *  Media Ioctl callback
   1722  *
   1723  *  This routine is called when the user changes speed/duplex using
   1724  *  media/mediopt option with ifconfig.
   1725  *
   1726  **********************************************************************/
   1727 static int
   1728 ixgbe_media_change(struct ifnet * ifp)
   1729 {
   1730 	struct adapter *adapter = ifp->if_softc;
   1731 	struct ifmedia *ifm = &adapter->media;
   1732 
   1733 	INIT_DEBUGOUT("ixgbe_media_change: begin");
   1734 
   1735 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
   1736 		return (EINVAL);
   1737 
   1738         switch (IFM_SUBTYPE(ifm->ifm_media)) {
   1739         case IFM_AUTO:
   1740                 adapter->hw.phy.autoneg_advertised =
   1741 		    IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_10GB_FULL;
   1742                 break;
   1743         default:
   1744                 device_printf(adapter->dev, "Only auto media type\n");
   1745 		return (EINVAL);
   1746         }
   1747 
   1748 	return (0);
   1749 }
   1750 
   1751 /*********************************************************************
   1752  *
   1753  *  This routine maps the mbufs to tx descriptors, allowing the
   1754  *  TX engine to transmit the packets.
   1755  *  	- return 0 on success, positive on failure
   1756  *
   1757  **********************************************************************/
   1758 
   1759 static int
   1760 ixgbe_xmit(struct tx_ring *txr, struct mbuf *m_head)
   1761 {
   1762 	struct m_tag *mtag;
   1763 	struct adapter  *adapter = txr->adapter;
   1764 	struct ethercom *ec = &adapter->osdep.ec;
   1765 	u32		olinfo_status = 0, cmd_type_len;
   1766 	u32		paylen = 0;
   1767 	int             i, j, error;
   1768 	int		first, last = 0;
   1769 	bus_dmamap_t	map;
   1770 	struct ixgbe_tx_buf *txbuf;
   1771 	union ixgbe_adv_tx_desc *txd = NULL;
   1772 
   1773 	/* Basic descriptor defines */
   1774         cmd_type_len = (IXGBE_ADVTXD_DTYP_DATA |
   1775 	    IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT);
   1776 
   1777 	if ((mtag = VLAN_OUTPUT_TAG(ec, m_head)) != NULL)
   1778         	cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
   1779 
   1780         /*
   1781          * Important to capture the first descriptor
   1782          * used because it will contain the index of
   1783          * the one we tell the hardware to report back
   1784          */
   1785         first = txr->next_avail_desc;
   1786 	txbuf = &txr->tx_buffers[first];
   1787 	map = txbuf->map;
   1788 
   1789 	/*
   1790 	 * Map the packet for DMA.
   1791 	 */
   1792 	error = bus_dmamap_load_mbuf(txr->txtag->dt_dmat, map,
   1793 	    m_head, BUS_DMA_NOWAIT);
   1794 
   1795 	switch (error) {
   1796 	case EAGAIN:
   1797 		adapter->eagain_tx_dma_setup.ev_count++;
   1798 		return EAGAIN;
   1799 	case ENOMEM:
   1800 		adapter->enomem_tx_dma_setup.ev_count++;
   1801 		return EAGAIN;
   1802 	case EFBIG:
   1803 		adapter->efbig_tx_dma_setup.ev_count++;
   1804 		return error;
   1805 	case EINVAL:
   1806 		adapter->einval_tx_dma_setup.ev_count++;
   1807 		return error;
   1808 	default:
   1809 		adapter->other_tx_dma_setup.ev_count++;
   1810 		return error;
   1811 	case 0:
   1812 		break;
   1813 	}
   1814 
   1815 	/* Make certain there are enough descriptors */
   1816 	if (map->dm_nsegs > txr->tx_avail - 2) {
   1817 		txr->no_desc_avail.ev_count++;
   1818 		ixgbe_dmamap_unload(txr->txtag, txbuf->map);
   1819 		return EAGAIN;
   1820 	}
   1821 
   1822 	/*
   1823 	** Set up the appropriate offload context
   1824 	** this becomes the first descriptor of
   1825 	** a packet.
   1826 	*/
   1827 	if (m_head->m_pkthdr.csum_flags & (M_CSUM_TSOv4|M_CSUM_TSOv6)) {
   1828 		if (ixgbe_tso_setup(txr, m_head, &paylen)) {
   1829 			cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
   1830 			olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
   1831 			olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
   1832 			olinfo_status |= paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
   1833 			++adapter->tso_tx.ev_count;
   1834 		} else {
   1835 			++adapter->tso_err.ev_count;
   1836 			/* XXX unload DMA map! --dyoung */
   1837 			return ENXIO;
   1838 		}
   1839 	} else
   1840 		olinfo_status |= ixgbe_tx_ctx_setup(txr, m_head);
   1841 
   1842 #ifdef IXGBE_IEEE1588
   1843         /* This is changing soon to an mtag detection */
   1844         if (we detect this mbuf has a TSTAMP mtag)
   1845                 cmd_type_len |= IXGBE_ADVTXD_MAC_TSTAMP;
   1846 #endif
   1847 
   1848 #ifdef IXGBE_FDIR
   1849 	/* Do the flow director magic */
   1850 	if ((txr->atr_sample) && (!adapter->fdir_reinit)) {
   1851 		++txr->atr_count;
   1852 		if (txr->atr_count >= atr_sample_rate) {
   1853 			ixgbe_atr(txr, m_head);
   1854 			txr->atr_count = 0;
   1855 		}
   1856 	}
   1857 #endif
   1858         /* Record payload length */
   1859 	if (paylen == 0)
   1860         	olinfo_status |= m_head->m_pkthdr.len <<
   1861 		    IXGBE_ADVTXD_PAYLEN_SHIFT;
   1862 
   1863 	i = txr->next_avail_desc;
   1864 	for (j = 0; j < map->dm_nsegs; j++) {
   1865 		bus_size_t seglen;
   1866 		bus_addr_t segaddr;
   1867 
   1868 		txbuf = &txr->tx_buffers[i];
   1869 		txd = &txr->tx_base[i];
   1870 		seglen = map->dm_segs[j].ds_len;
   1871 		segaddr = htole64(map->dm_segs[j].ds_addr);
   1872 
   1873 		txd->read.buffer_addr = segaddr;
   1874 		txd->read.cmd_type_len = htole32(txr->txd_cmd |
   1875 		    cmd_type_len |seglen);
   1876 		txd->read.olinfo_status = htole32(olinfo_status);
   1877 		last = i; /* descriptor that will get completion IRQ */
   1878 
   1879 		if (++i == adapter->num_tx_desc)
   1880 			i = 0;
   1881 
   1882 		txbuf->m_head = NULL;
   1883 		txbuf->eop_index = -1;
   1884 	}
   1885 
   1886 	txd->read.cmd_type_len |=
   1887 	    htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS);
   1888 	txr->tx_avail -= map->dm_nsegs;
   1889 	txr->next_avail_desc = i;
   1890 
   1891 	txbuf->m_head = m_head;
   1892 	/* We exchange the maps instead of copying because otherwise
   1893 	 * we end up with many pointers to the same map and we free
   1894 	 * one map twice in ixgbe_free_transmit_structures().  Who
   1895 	 * knows what other problems this caused.  --dyoung
   1896 	 */
   1897 	txr->tx_buffers[first].map = txbuf->map;
   1898 	txbuf->map = map;
   1899 	bus_dmamap_sync(txr->txtag->dt_dmat, map, 0, m_head->m_pkthdr.len,
   1900 	    BUS_DMASYNC_PREWRITE);
   1901 
   1902         /* Set the index of the descriptor that will be marked done */
   1903         txbuf = &txr->tx_buffers[first];
   1904 	txbuf->eop_index = last;
   1905 
   1906         ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
   1907 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1908 	/*
   1909 	 * Advance the Transmit Descriptor Tail (Tdt), this tells the
   1910 	 * hardware that this frame is available to transmit.
   1911 	 */
   1912 	++txr->total_packets.ev_count;
   1913 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDT(txr->me), i);
   1914 
   1915 	return 0;
   1916 }
   1917 
   1918 static void
   1919 ixgbe_set_promisc(struct adapter *adapter)
   1920 {
   1921 	u_int32_t       reg_rctl;
   1922 	struct ifnet   *ifp = adapter->ifp;
   1923 
   1924 	reg_rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
   1925 	reg_rctl &= (~IXGBE_FCTRL_UPE);
   1926 	reg_rctl &= (~IXGBE_FCTRL_MPE);
   1927 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
   1928 
   1929 	if (ifp->if_flags & IFF_PROMISC) {
   1930 		reg_rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
   1931 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
   1932 	} else if (ifp->if_flags & IFF_ALLMULTI) {
   1933 		reg_rctl |= IXGBE_FCTRL_MPE;
   1934 		reg_rctl &= ~IXGBE_FCTRL_UPE;
   1935 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
   1936 	}
   1937 	return;
   1938 }
   1939 
   1940 
   1941 /*********************************************************************
   1942  *  Multicast Update
   1943  *
   1944  *  This routine is called whenever multicast address list is updated.
   1945  *
   1946  **********************************************************************/
   1947 #define IXGBE_RAR_ENTRIES 16
   1948 
   1949 static void
   1950 ixgbe_set_multi(struct adapter *adapter)
   1951 {
   1952 	struct ether_multi *enm;
   1953 	struct ether_multistep step;
   1954 	u32	fctrl;
   1955 	u8	*mta;
   1956 	u8	*update_ptr;
   1957 	int	mcnt = 0;
   1958 	struct ethercom *ec = &adapter->osdep.ec;
   1959 	struct ifnet   *ifp = adapter->ifp;
   1960 
   1961 	IOCTL_DEBUGOUT("ixgbe_set_multi: begin");
   1962 
   1963 	mta = adapter->mta;
   1964 	bzero(mta, sizeof(u8) * IXGBE_ETH_LENGTH_OF_ADDRESS *
   1965 	    MAX_NUM_MULTICAST_ADDRESSES);
   1966 
   1967 	fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
   1968 	fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
   1969 	if (ifp->if_flags & IFF_PROMISC)
   1970 		fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
   1971 	else if (ifp->if_flags & IFF_ALLMULTI) {
   1972 		fctrl |= IXGBE_FCTRL_MPE;
   1973 		fctrl &= ~IXGBE_FCTRL_UPE;
   1974 	} else
   1975 		fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
   1976 
   1977 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
   1978 
   1979 	ETHER_FIRST_MULTI(step, ec, enm);
   1980 	while (enm != NULL) {
   1981 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
   1982 		           ETHER_ADDR_LEN) != 0) {
   1983 			fctrl |= IXGBE_FCTRL_MPE;
   1984 			IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
   1985 			break;
   1986 		}
   1987 		bcopy(enm->enm_addrlo,
   1988 		    &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
   1989 		    IXGBE_ETH_LENGTH_OF_ADDRESS);
   1990 		mcnt++;
   1991 		ETHER_NEXT_MULTI(step, enm);
   1992 	}
   1993 
   1994 	update_ptr = mta;
   1995 	ixgbe_update_mc_addr_list(&adapter->hw,
   1996 	    update_ptr, mcnt, ixgbe_mc_array_itr);
   1997 
   1998 	return;
   1999 }
   2000 
   2001 /*
   2002  * This is an iterator function now needed by the multicast
   2003  * shared code. It simply feeds the shared code routine the
   2004  * addresses in the array of ixgbe_set_multi() one by one.
   2005  */
   2006 static u8 *
   2007 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
   2008 {
   2009 	u8 *addr = *update_ptr;
   2010 	u8 *newptr;
   2011 	*vmdq = 0;
   2012 
   2013 	newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
   2014 	*update_ptr = newptr;
   2015 	return addr;
   2016 }
   2017 
   2018 
   2019 /*********************************************************************
   2020  *  Timer routine
   2021  *
   2022  *  This routine checks for link status,updates statistics,
   2023  *  and runs the watchdog check.
   2024  *
   2025  **********************************************************************/
   2026 
   2027 static void
   2028 ixgbe_local_timer1(void *arg)
   2029 {
   2030 	struct adapter *adapter = arg;
   2031 	device_t	dev = adapter->dev;
   2032 	struct tx_ring *txr = adapter->tx_rings;
   2033 
   2034 	KASSERT(mutex_owned(&adapter->core_mtx));
   2035 
   2036 	/* Check for pluggable optics */
   2037 	if (adapter->sfp_probe)
   2038 		if (!ixgbe_sfp_probe(adapter))
   2039 			goto out; /* Nothing to do */
   2040 
   2041 	ixgbe_update_link_status(adapter);
   2042 	ixgbe_update_stats_counters(adapter);
   2043 
   2044 	/*
   2045 	 * If the interface has been paused
   2046 	 * then don't do the watchdog check
   2047 	 */
   2048 	if (IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF)
   2049 		goto out;
   2050 
   2051 	/*
   2052 	** Check status on the TX queues for a hang
   2053 	*/
   2054         for (int i = 0; i < adapter->num_queues; i++, txr++)
   2055 		if (txr->queue_status == IXGBE_QUEUE_HUNG)
   2056 			goto hung;
   2057 
   2058 out:
   2059 	ixgbe_rearm_queues(adapter, adapter->que_mask);
   2060 	callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
   2061 	return;
   2062 
   2063 hung:
   2064 	device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
   2065 	device_printf(dev,"Queue(%d) tdh = %d, hw tdt = %d\n", txr->me,
   2066 	    IXGBE_READ_REG(&adapter->hw, IXGBE_TDH(txr->me)),
   2067 	    IXGBE_READ_REG(&adapter->hw, IXGBE_TDT(txr->me)));
   2068 	device_printf(dev,"TX(%d) desc avail = %d,"
   2069 	    "Next TX to Clean = %d\n",
   2070 	    txr->me, txr->tx_avail, txr->next_to_clean);
   2071 	adapter->ifp->if_flags &= ~IFF_RUNNING;
   2072 	adapter->watchdog_events.ev_count++;
   2073 	ixgbe_init_locked(adapter);
   2074 }
   2075 
   2076 static void
   2077 ixgbe_local_timer(void *arg)
   2078 {
   2079 	struct adapter *adapter = arg;
   2080 
   2081 	IXGBE_CORE_LOCK(adapter);
   2082 	ixgbe_local_timer1(adapter);
   2083 	IXGBE_CORE_UNLOCK(adapter);
   2084 }
   2085 
   2086 /*
   2087 ** Note: this routine updates the OS on the link state
   2088 **	the real check of the hardware only happens with
   2089 **	a link interrupt.
   2090 */
   2091 static void
   2092 ixgbe_update_link_status(struct adapter *adapter)
   2093 {
   2094 	struct ifnet	*ifp = adapter->ifp;
   2095 	struct tx_ring *txr = adapter->tx_rings;
   2096 	device_t dev = adapter->dev;
   2097 
   2098 
   2099 	if (adapter->link_up){
   2100 		if (adapter->link_active == FALSE) {
   2101 			if (bootverbose)
   2102 				device_printf(dev,"Link is up %d Gbps %s \n",
   2103 				    ((adapter->link_speed == 128)? 10:1),
   2104 				    "Full Duplex");
   2105 			adapter->link_active = TRUE;
   2106 			if_link_state_change(ifp, LINK_STATE_UP);
   2107 		}
   2108 	} else { /* Link down */
   2109 		if (adapter->link_active == TRUE) {
   2110 			if (bootverbose)
   2111 				device_printf(dev,"Link is Down\n");
   2112 			if_link_state_change(ifp, LINK_STATE_DOWN);
   2113 			adapter->link_active = FALSE;
   2114 			for (int i = 0; i < adapter->num_queues;
   2115 			    i++, txr++)
   2116 				txr->queue_status = IXGBE_QUEUE_IDLE;
   2117 		}
   2118 	}
   2119 
   2120 	return;
   2121 }
   2122 
   2123 
   2124 static void
   2125 ixgbe_ifstop(struct ifnet *ifp, int disable)
   2126 {
   2127 	struct adapter *adapter = ifp->if_softc;
   2128 
   2129 	IXGBE_CORE_LOCK(adapter);
   2130 	ixgbe_stop(adapter);
   2131 	IXGBE_CORE_UNLOCK(adapter);
   2132 }
   2133 
   2134 /*********************************************************************
   2135  *
   2136  *  This routine disables all traffic on the adapter by issuing a
   2137  *  global reset on the MAC and deallocates TX/RX buffers.
   2138  *
   2139  **********************************************************************/
   2140 
   2141 static void
   2142 ixgbe_stop(void *arg)
   2143 {
   2144 	struct ifnet   *ifp;
   2145 	struct adapter *adapter = arg;
   2146 	struct ixgbe_hw *hw = &adapter->hw;
   2147 	ifp = adapter->ifp;
   2148 
   2149 	KASSERT(mutex_owned(&adapter->core_mtx));
   2150 
   2151 	INIT_DEBUGOUT("ixgbe_stop: begin\n");
   2152 	ixgbe_disable_intr(adapter);
   2153 
   2154 	/* Tell the stack that the interface is no longer active */
   2155 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   2156 
   2157 	ixgbe_reset_hw(hw);
   2158 	hw->adapter_stopped = FALSE;
   2159 	ixgbe_stop_adapter(hw);
   2160 	/* Turn off the laser */
   2161 	if (hw->phy.multispeed_fiber)
   2162 		ixgbe_disable_tx_laser(hw);
   2163 	callout_stop(&adapter->timer);
   2164 
   2165 	/* reprogram the RAR[0] in case user changed it. */
   2166 	ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
   2167 
   2168 	return;
   2169 }
   2170 
   2171 
   2172 /*********************************************************************
   2173  *
   2174  *  Determine hardware revision.
   2175  *
   2176  **********************************************************************/
   2177 static void
   2178 ixgbe_identify_hardware(struct adapter *adapter)
   2179 {
   2180 	pcitag_t tag;
   2181 	pci_chipset_tag_t pc;
   2182 	pcireg_t subid, id;
   2183 	struct ixgbe_hw *hw = &adapter->hw;
   2184 
   2185 	pc = adapter->osdep.pc;
   2186 	tag = adapter->osdep.tag;
   2187 
   2188 	id = pci_conf_read(pc, tag, PCI_ID_REG);
   2189 	subid = pci_conf_read(pc, tag, PCI_SUBSYS_ID_REG);
   2190 
   2191 	/* Save off the information about this board */
   2192 	hw->vendor_id = PCI_VENDOR(id);
   2193 	hw->device_id = PCI_PRODUCT(id);
   2194 	hw->revision_id =
   2195 	    PCI_REVISION(pci_conf_read(pc, tag, PCI_CLASS_REG));
   2196 	hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
   2197 	hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
   2198 
   2199 	/* We need this here to set the num_segs below */
   2200 	ixgbe_set_mac_type(hw);
   2201 
   2202 	/* Pick up the 82599 and VF settings */
   2203 	if (hw->mac.type != ixgbe_mac_82598EB) {
   2204 		hw->phy.smart_speed = ixgbe_smart_speed;
   2205 		adapter->num_segs = IXGBE_82599_SCATTER;
   2206 	} else
   2207 		adapter->num_segs = IXGBE_82598_SCATTER;
   2208 
   2209 	return;
   2210 }
   2211 
   2212 /*********************************************************************
   2213  *
   2214  *  Determine optic type
   2215  *
   2216  **********************************************************************/
   2217 static void
   2218 ixgbe_setup_optics(struct adapter *adapter)
   2219 {
   2220 	struct ixgbe_hw *hw = &adapter->hw;
   2221 	int		layer;
   2222 
   2223 	layer = ixgbe_get_supported_physical_layer(hw);
   2224 	switch (layer) {
   2225 		case IXGBE_PHYSICAL_LAYER_10GBASE_T:
   2226 			adapter->optics = IFM_10G_T;
   2227 			break;
   2228 		case IXGBE_PHYSICAL_LAYER_1000BASE_T:
   2229 			adapter->optics = IFM_1000_T;
   2230 			break;
   2231 		case IXGBE_PHYSICAL_LAYER_10GBASE_LR:
   2232 		case IXGBE_PHYSICAL_LAYER_10GBASE_LRM:
   2233 			adapter->optics = IFM_10G_LR;
   2234 			break;
   2235 		case IXGBE_PHYSICAL_LAYER_10GBASE_SR:
   2236 			adapter->optics = IFM_10G_SR;
   2237 			break;
   2238 		case IXGBE_PHYSICAL_LAYER_10GBASE_KX4:
   2239 		case IXGBE_PHYSICAL_LAYER_10GBASE_CX4:
   2240 			adapter->optics = IFM_10G_CX4;
   2241 			break;
   2242 		case IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU:
   2243 			adapter->optics = IFM_10G_TWINAX;
   2244 			break;
   2245 		case IXGBE_PHYSICAL_LAYER_1000BASE_KX:
   2246 		case IXGBE_PHYSICAL_LAYER_10GBASE_KR:
   2247 		case IXGBE_PHYSICAL_LAYER_10GBASE_XAUI:
   2248 		case IXGBE_PHYSICAL_LAYER_UNKNOWN:
   2249 		default:
   2250 			adapter->optics = IFM_ETHER | IFM_AUTO;
   2251 			break;
   2252 	}
   2253 	return;
   2254 }
   2255 
   2256 /*********************************************************************
   2257  *
   2258  *  Setup the Legacy or MSI Interrupt handler
   2259  *
   2260  **********************************************************************/
   2261 static int
   2262 ixgbe_allocate_legacy(struct adapter *adapter, const struct pci_attach_args *pa)
   2263 {
   2264 	device_t dev = adapter->dev;
   2265 	struct		ix_queue *que = adapter->queues;
   2266 	char intrbuf[PCI_INTRSTR_LEN];
   2267 #if 0
   2268 	int rid = 0;
   2269 
   2270 	/* MSI RID at 1 */
   2271 	if (adapter->msix == 1)
   2272 		rid = 1;
   2273 #endif
   2274 
   2275 	/* We allocate a single interrupt resource */
   2276  	if (pci_intr_map(pa, &adapter->osdep.ih) != 0) {
   2277 		aprint_error_dev(dev, "unable to map interrupt\n");
   2278 		return ENXIO;
   2279 	} else {
   2280 		aprint_normal_dev(dev, "interrupting at %s\n",
   2281 		    pci_intr_string(adapter->osdep.pc, adapter->osdep.ih,
   2282 			intrbuf, sizeof(intrbuf)));
   2283 	}
   2284 
   2285 	/*
   2286 	 * Try allocating a fast interrupt and the associated deferred
   2287 	 * processing contexts.
   2288 	 */
   2289 	que->que_si = softint_establish(SOFTINT_NET, ixgbe_handle_que, que);
   2290 
   2291 	/* Tasklets for Link, SFP and Multispeed Fiber */
   2292 	adapter->link_si =
   2293 	    softint_establish(SOFTINT_NET, ixgbe_handle_link, adapter);
   2294 	adapter->mod_si =
   2295 	    softint_establish(SOFTINT_NET, ixgbe_handle_mod, adapter);
   2296 	adapter->msf_si =
   2297 	    softint_establish(SOFTINT_NET, ixgbe_handle_msf, adapter);
   2298 
   2299 #ifdef IXGBE_FDIR
   2300 	adapter->fdir_si =
   2301 	    softint_establish(SOFTINT_NET, ixgbe_reinit_fdir, adapter);
   2302 #endif
   2303 	if (que->que_si == NULL ||
   2304 	    adapter->link_si == NULL ||
   2305 	    adapter->mod_si == NULL ||
   2306 #ifdef IXGBE_FDIR
   2307 	    adapter->fdir_si == NULL ||
   2308 #endif
   2309 	    adapter->msf_si == NULL) {
   2310 		aprint_error_dev(dev,
   2311 		    "could not establish software interrupts\n");
   2312 		return ENXIO;
   2313 	}
   2314 
   2315 	adapter->osdep.intr = pci_intr_establish(adapter->osdep.pc,
   2316 	    adapter->osdep.ih, IPL_NET, ixgbe_legacy_irq, que);
   2317 	if (adapter->osdep.intr == NULL) {
   2318 		aprint_error_dev(dev, "failed to register interrupt handler\n");
   2319 		softint_disestablish(que->que_si);
   2320 		softint_disestablish(adapter->link_si);
   2321 		softint_disestablish(adapter->mod_si);
   2322 		softint_disestablish(adapter->msf_si);
   2323 #ifdef IXGBE_FDIR
   2324 		softint_disestablish(adapter->fdir_si);
   2325 #endif
   2326 		return ENXIO;
   2327 	}
   2328 	/* For simplicity in the handlers */
   2329 	adapter->que_mask = IXGBE_EIMS_ENABLE_MASK;
   2330 
   2331 	return (0);
   2332 }
   2333 
   2334 
   2335 /*********************************************************************
   2336  *
   2337  *  Setup MSIX Interrupt resources and handlers
   2338  *
   2339  **********************************************************************/
   2340 static int
   2341 ixgbe_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
   2342 {
   2343 #if !defined(NETBSD_MSI_OR_MSIX)
   2344 	return 0;
   2345 #else
   2346 	device_t        dev = adapter->dev;
   2347 	struct 		ix_queue *que = adapter->queues;
   2348 	int 		error, rid, vector = 0;
   2349 
   2350 	for (int i = 0; i < adapter->num_queues; i++, vector++, que++) {
   2351 		rid = vector + 1;
   2352 		que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
   2353 		    RF_SHAREABLE | RF_ACTIVE);
   2354 		if (que->res == NULL) {
   2355 			aprint_error_dev(dev,"Unable to allocate"
   2356 		    	    " bus resource: que interrupt [%d]\n", vector);
   2357 			return (ENXIO);
   2358 		}
   2359 		/* Set the handler function */
   2360 		error = bus_setup_intr(dev, que->res,
   2361 		    INTR_TYPE_NET | INTR_MPSAFE, NULL,
   2362 		    ixgbe_msix_que, que, &que->tag);
   2363 		if (error) {
   2364 			que->res = NULL;
   2365 			aprint_error_dev(dev,
   2366 			    "Failed to register QUE handler\n");
   2367 			return error;
   2368 		}
   2369 #if __FreeBSD_version >= 800504
   2370 		bus_describe_intr(dev, que->res, que->tag, "que %d", i);
   2371 #endif
   2372 		que->msix = vector;
   2373         	adapter->que_mask |= (u64)(1 << que->msix);
   2374 		/*
   2375 		** Bind the msix vector, and thus the
   2376 		** ring to the corresponding cpu.
   2377 		*/
   2378 		if (adapter->num_queues > 1)
   2379 			bus_bind_intr(dev, que->res, i);
   2380 
   2381 		que->que_si = softint_establish(ixgbe_handle_que, que);
   2382 		if (que->que_si == NULL) {
   2383 			aprint_error_dev(dev,
   2384 			    "could not establish software interrupt\n");
   2385 		}
   2386 	}
   2387 
   2388 	/* and Link */
   2389 	rid = vector + 1;
   2390 	adapter->res = bus_alloc_resource_any(dev,
   2391     	    SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
   2392 	if (!adapter->res) {
   2393 		aprint_error_dev(dev,"Unable to allocate bus resource: "
   2394 		    "Link interrupt [%d]\n", rid);
   2395 		return (ENXIO);
   2396 	}
   2397 	/* Set the link handler function */
   2398 	error = bus_setup_intr(dev, adapter->res,
   2399 	    INTR_TYPE_NET | INTR_MPSAFE, NULL,
   2400 	    ixgbe_msix_link, adapter, &adapter->tag);
   2401 	if (error) {
   2402 		adapter->res = NULL;
   2403 		aprint_error_dev(dev, "Failed to register LINK handler\n");
   2404 		return (error);
   2405 	}
   2406 #if __FreeBSD_version >= 800504
   2407 	bus_describe_intr(dev, adapter->res, adapter->tag, "link");
   2408 #endif
   2409 	adapter->linkvec = vector;
   2410 	/* Tasklets for Link, SFP and Multispeed Fiber */
   2411 	adapter->link_si =
   2412 	    softint_establish(SOFTINT_NET, ixgbe_handle_link, adapter);
   2413 	adapter->mod_si =
   2414 	    softint_establish(SOFTINT_NET, ixgbe_handle_mod, adapter);
   2415 	adapter->msf_si =
   2416 	    softint_establish(SOFTINT_NET, ixgbe_handle_msf, adapter);
   2417 #ifdef IXGBE_FDIR
   2418 	adapter->fdir_si =
   2419 	    softint_establish(SOFTINT_NET, ixgbe_reinit_fdir, adapter);
   2420 #endif
   2421 
   2422 	return (0);
   2423 #endif
   2424 }
   2425 
   2426 /*
   2427  * Setup Either MSI/X or MSI
   2428  */
   2429 static int
   2430 ixgbe_setup_msix(struct adapter *adapter)
   2431 {
   2432 #if !defined(NETBSD_MSI_OR_MSIX)
   2433 	return 0;
   2434 #else
   2435 	device_t dev = adapter->dev;
   2436 	int rid, want, queues, msgs;
   2437 
   2438 	/* Override by tuneable */
   2439 	if (ixgbe_enable_msix == 0)
   2440 		goto msi;
   2441 
   2442 	/* First try MSI/X */
   2443 	rid = PCI_BAR(MSIX_82598_BAR);
   2444 	adapter->msix_mem = bus_alloc_resource_any(dev,
   2445 	    SYS_RES_MEMORY, &rid, RF_ACTIVE);
   2446        	if (!adapter->msix_mem) {
   2447 		rid += 4;	/* 82599 maps in higher BAR */
   2448 		adapter->msix_mem = bus_alloc_resource_any(dev,
   2449 		    SYS_RES_MEMORY, &rid, RF_ACTIVE);
   2450 	}
   2451        	if (!adapter->msix_mem) {
   2452 		/* May not be enabled */
   2453 		device_printf(adapter->dev,
   2454 		    "Unable to map MSIX table \n");
   2455 		goto msi;
   2456 	}
   2457 
   2458 	msgs = pci_msix_count(dev);
   2459 	if (msgs == 0) { /* system has msix disabled */
   2460 		bus_release_resource(dev, SYS_RES_MEMORY,
   2461 		    rid, adapter->msix_mem);
   2462 		adapter->msix_mem = NULL;
   2463 		goto msi;
   2464 	}
   2465 
   2466 	/* Figure out a reasonable auto config value */
   2467 	queues = (mp_ncpus > (msgs-1)) ? (msgs-1) : mp_ncpus;
   2468 
   2469 	if (ixgbe_num_queues != 0)
   2470 		queues = ixgbe_num_queues;
   2471 	/* Set max queues to 8 when autoconfiguring */
   2472 	else if ((ixgbe_num_queues == 0) && (queues > 8))
   2473 		queues = 8;
   2474 
   2475 	/*
   2476 	** Want one vector (RX/TX pair) per queue
   2477 	** plus an additional for Link.
   2478 	*/
   2479 	want = queues + 1;
   2480 	if (msgs >= want)
   2481 		msgs = want;
   2482 	else {
   2483                	device_printf(adapter->dev,
   2484 		    "MSIX Configuration Problem, "
   2485 		    "%d vectors but %d queues wanted!\n",
   2486 		    msgs, want);
   2487 		return (0); /* Will go to Legacy setup */
   2488 	}
   2489 	if ((msgs) && pci_alloc_msix(dev, &msgs) == 0) {
   2490                	device_printf(adapter->dev,
   2491 		    "Using MSIX interrupts with %d vectors\n", msgs);
   2492 		adapter->num_queues = queues;
   2493 		return (msgs);
   2494 	}
   2495 msi:
   2496        	msgs = pci_msi_count(dev);
   2497        	if (msgs == 1 && pci_alloc_msi(dev, &msgs) == 0)
   2498                	device_printf(adapter->dev,"Using MSI interrupt\n");
   2499 	return (msgs);
   2500 #endif
   2501 }
   2502 
   2503 
   2504 static int
   2505 ixgbe_allocate_pci_resources(struct adapter *adapter, const struct pci_attach_args *pa)
   2506 {
   2507 	pcireg_t	memtype;
   2508 	device_t        dev = adapter->dev;
   2509 	bus_addr_t addr;
   2510 	int flags;
   2511 
   2512 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
   2513 	switch (memtype) {
   2514 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   2515 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   2516 		adapter->osdep.mem_bus_space_tag = pa->pa_memt;
   2517 		if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
   2518 	              memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
   2519 			goto map_err;
   2520 		if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
   2521 			aprint_normal_dev(dev, "clearing prefetchable bit\n");
   2522 			flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
   2523 		}
   2524 		if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
   2525 		     adapter->osdep.mem_size, flags,
   2526 		     &adapter->osdep.mem_bus_space_handle) != 0) {
   2527 map_err:
   2528 			adapter->osdep.mem_size = 0;
   2529 			aprint_error_dev(dev, "unable to map BAR0\n");
   2530 			return ENXIO;
   2531 		}
   2532 		break;
   2533 	default:
   2534 		aprint_error_dev(dev, "unexpected type on BAR0\n");
   2535 		return ENXIO;
   2536 	}
   2537 
   2538 	/* Legacy defaults */
   2539 	adapter->num_queues = 1;
   2540 	adapter->hw.back = &adapter->osdep;
   2541 
   2542 	/*
   2543 	** Now setup MSI or MSI/X, should
   2544 	** return us the number of supported
   2545 	** vectors. (Will be 1 for MSI)
   2546 	*/
   2547 	adapter->msix = ixgbe_setup_msix(adapter);
   2548 	return (0);
   2549 }
   2550 
   2551 static void
   2552 ixgbe_free_pci_resources(struct adapter * adapter)
   2553 {
   2554 #if defined(NETBSD_MSI_OR_MSIX)
   2555 	struct 		ix_queue *que = adapter->queues;
   2556 	device_t	dev = adapter->dev;
   2557 #endif
   2558 	int		rid;
   2559 
   2560 #if defined(NETBSD_MSI_OR_MSIX)
   2561 	int		 memrid;
   2562 	if (adapter->hw.mac.type == ixgbe_mac_82598EB)
   2563 		memrid = PCI_BAR(MSIX_82598_BAR);
   2564 	else
   2565 		memrid = PCI_BAR(MSIX_82599_BAR);
   2566 
   2567 	/*
   2568 	** There is a slight possibility of a failure mode
   2569 	** in attach that will result in entering this function
   2570 	** before interrupt resources have been initialized, and
   2571 	** in that case we do not want to execute the loops below
   2572 	** We can detect this reliably by the state of the adapter
   2573 	** res pointer.
   2574 	*/
   2575 	if (adapter->res == NULL)
   2576 		goto mem;
   2577 
   2578 	/*
   2579 	**  Release all msix queue resources:
   2580 	*/
   2581 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   2582 		rid = que->msix + 1;
   2583 		if (que->tag != NULL) {
   2584 			bus_teardown_intr(dev, que->res, que->tag);
   2585 			que->tag = NULL;
   2586 		}
   2587 		if (que->res != NULL)
   2588 			bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
   2589 	}
   2590 #endif
   2591 
   2592 	/* Clean the Legacy or Link interrupt last */
   2593 	if (adapter->linkvec) /* we are doing MSIX */
   2594 		rid = adapter->linkvec + 1;
   2595 	else
   2596 		(adapter->msix != 0) ? (rid = 1):(rid = 0);
   2597 
   2598 	pci_intr_disestablish(adapter->osdep.pc, adapter->osdep.intr);
   2599 	adapter->osdep.intr = NULL;
   2600 
   2601 #if defined(NETBSD_MSI_OR_MSIX)
   2602 mem:
   2603 	if (adapter->msix)
   2604 		pci_release_msi(dev);
   2605 
   2606 	if (adapter->msix_mem != NULL)
   2607 		bus_release_resource(dev, SYS_RES_MEMORY,
   2608 		    memrid, adapter->msix_mem);
   2609 #endif
   2610 
   2611 	if (adapter->osdep.mem_size != 0) {
   2612 		bus_space_unmap(adapter->osdep.mem_bus_space_tag,
   2613 		    adapter->osdep.mem_bus_space_handle,
   2614 		    adapter->osdep.mem_size);
   2615 	}
   2616 
   2617 	return;
   2618 }
   2619 
   2620 /*********************************************************************
   2621  *
   2622  *  Setup networking device structure and register an interface.
   2623  *
   2624  **********************************************************************/
   2625 static int
   2626 ixgbe_setup_interface(device_t dev, struct adapter *adapter)
   2627 {
   2628 	struct ethercom *ec = &adapter->osdep.ec;
   2629 	struct ixgbe_hw *hw = &adapter->hw;
   2630 	struct ifnet   *ifp;
   2631 
   2632 	INIT_DEBUGOUT("ixgbe_setup_interface: begin");
   2633 
   2634 	ifp = adapter->ifp = &ec->ec_if;
   2635 	strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
   2636 	ifp->if_mtu = ETHERMTU;
   2637 	ifp->if_baudrate = 1000000000;
   2638 	ifp->if_init = ixgbe_init;
   2639 	ifp->if_stop = ixgbe_ifstop;
   2640 	ifp->if_softc = adapter;
   2641 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2642 	ifp->if_ioctl = ixgbe_ioctl;
   2643 	ifp->if_start = ixgbe_start;
   2644 #if __FreeBSD_version >= 800000
   2645 	ifp->if_transmit = ixgbe_mq_start;
   2646 	ifp->if_qflush = ixgbe_qflush;
   2647 #endif
   2648 	ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 2;
   2649 
   2650 	if_attach(ifp);
   2651 	ether_ifattach(ifp, adapter->hw.mac.addr);
   2652 	ether_set_ifflags_cb(ec, ixgbe_ifflags_cb);
   2653 
   2654 	adapter->max_frame_size =
   2655 	    ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
   2656 
   2657 	/*
   2658 	 * Tell the upper layer(s) we support long frames.
   2659 	 */
   2660 	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
   2661 
   2662 	ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSOv4;
   2663 	ifp->if_capenable = 0;
   2664 
   2665 	ec->ec_capabilities |= ETHERCAP_VLAN_HWCSUM;
   2666 	ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_MTU;
   2667 	ec->ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2668 	ec->ec_capenable = ec->ec_capabilities;
   2669 
   2670 	/* Don't enable LRO by default */
   2671 	ifp->if_capabilities |= IFCAP_LRO;
   2672 
   2673 	/*
   2674 	** Dont turn this on by default, if vlans are
   2675 	** created on another pseudo device (eg. lagg)
   2676 	** then vlan events are not passed thru, breaking
   2677 	** operation, but with HW FILTER off it works. If
   2678 	** using vlans directly on the em driver you can
   2679 	** enable this and get full hardware tag filtering.
   2680 	*/
   2681 	ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
   2682 
   2683 	/*
   2684 	 * Specify the media types supported by this adapter and register
   2685 	 * callbacks to update media and link information
   2686 	 */
   2687 	ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
   2688 		     ixgbe_media_status);
   2689 	ifmedia_add(&adapter->media, IFM_ETHER | adapter->optics, 0, NULL);
   2690 	ifmedia_set(&adapter->media, IFM_ETHER | adapter->optics);
   2691 	if (hw->device_id == IXGBE_DEV_ID_82598AT) {
   2692 		ifmedia_add(&adapter->media,
   2693 		    IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
   2694 		ifmedia_add(&adapter->media,
   2695 		    IFM_ETHER | IFM_1000_T, 0, NULL);
   2696 	}
   2697 	ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
   2698 	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
   2699 
   2700 	return (0);
   2701 }
   2702 
   2703 static void
   2704 ixgbe_config_link(struct adapter *adapter)
   2705 {
   2706 	struct ixgbe_hw *hw = &adapter->hw;
   2707 	u32	autoneg, err = 0;
   2708 	bool	sfp, negotiate;
   2709 
   2710 	sfp = ixgbe_is_sfp(hw);
   2711 
   2712 	if (sfp) {
   2713 		if (hw->phy.multispeed_fiber) {
   2714 			hw->mac.ops.setup_sfp(hw);
   2715 			ixgbe_enable_tx_laser(hw);
   2716 			softint_schedule(adapter->msf_si);
   2717 		} else {
   2718 			softint_schedule(adapter->mod_si);
   2719 		}
   2720 	} else {
   2721 		if (hw->mac.ops.check_link)
   2722 			err = ixgbe_check_link(hw, &autoneg,
   2723 			    &adapter->link_up, FALSE);
   2724 		if (err)
   2725 			goto out;
   2726 		autoneg = hw->phy.autoneg_advertised;
   2727 		if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
   2728                 	err  = hw->mac.ops.get_link_capabilities(hw,
   2729 			    &autoneg, &negotiate);
   2730 		else
   2731 			negotiate = 0;
   2732 		if (err)
   2733 			goto out;
   2734 		if (hw->mac.ops.setup_link)
   2735                 	err = hw->mac.ops.setup_link(hw, autoneg,
   2736 			    negotiate, adapter->link_up);
   2737 	}
   2738 out:
   2739 	return;
   2740 }
   2741 
   2742 /********************************************************************
   2743  * Manage DMA'able memory.
   2744  *******************************************************************/
   2745 
   2746 static int
   2747 ixgbe_dma_malloc(struct adapter *adapter, const bus_size_t size,
   2748 		struct ixgbe_dma_alloc *dma, const int mapflags)
   2749 {
   2750 	device_t dev = adapter->dev;
   2751 	int             r, rsegs;
   2752 
   2753 	r = ixgbe_dma_tag_create(adapter->osdep.dmat,	/* parent */
   2754 			       DBA_ALIGN, 0,	/* alignment, bounds */
   2755 			       size,	/* maxsize */
   2756 			       1,	/* nsegments */
   2757 			       size,	/* maxsegsize */
   2758 			       BUS_DMA_ALLOCNOW,	/* flags */
   2759 			       &dma->dma_tag);
   2760 	if (r != 0) {
   2761 		aprint_error_dev(dev,
   2762 		    "%s: ixgbe_dma_tag_create failed; error %d\n", __func__, r);
   2763 		goto fail_0;
   2764 	}
   2765 
   2766 	r = bus_dmamem_alloc(dma->dma_tag->dt_dmat,
   2767 		size,
   2768 		dma->dma_tag->dt_alignment,
   2769 		dma->dma_tag->dt_boundary,
   2770 		&dma->dma_seg, 1, &rsegs, BUS_DMA_NOWAIT);
   2771 	if (r != 0) {
   2772 		aprint_error_dev(dev,
   2773 		    "%s: bus_dmamem_alloc failed; error %d\n", __func__, r);
   2774 		goto fail_1;
   2775 	}
   2776 
   2777 	r = bus_dmamem_map(dma->dma_tag->dt_dmat, &dma->dma_seg, rsegs,
   2778 	    size, &dma->dma_vaddr, BUS_DMA_NOWAIT);
   2779 	if (r != 0) {
   2780 		aprint_error_dev(dev, "%s: bus_dmamem_map failed; error %d\n",
   2781 		    __func__, r);
   2782 		goto fail_2;
   2783 	}
   2784 
   2785 	r = ixgbe_dmamap_create(dma->dma_tag, 0, &dma->dma_map);
   2786 	if (r != 0) {
   2787 		aprint_error_dev(dev, "%s: bus_dmamem_map failed; error %d\n",
   2788 		    __func__, r);
   2789 		goto fail_3;
   2790 	}
   2791 
   2792 	r = bus_dmamap_load(dma->dma_tag->dt_dmat, dma->dma_map, dma->dma_vaddr,
   2793 			    size,
   2794 			    NULL,
   2795 			    mapflags | BUS_DMA_NOWAIT);
   2796 	if (r != 0) {
   2797 		aprint_error_dev(dev, "%s: bus_dmamap_load failed; error %d\n",
   2798 		    __func__, r);
   2799 		goto fail_4;
   2800 	}
   2801 	dma->dma_paddr = dma->dma_map->dm_segs[0].ds_addr;
   2802 	dma->dma_size = size;
   2803 	return 0;
   2804 fail_4:
   2805 	ixgbe_dmamap_destroy(dma->dma_tag, dma->dma_map);
   2806 fail_3:
   2807 	bus_dmamem_unmap(dma->dma_tag->dt_dmat, dma->dma_vaddr, size);
   2808 fail_2:
   2809 	bus_dmamem_free(dma->dma_tag->dt_dmat, &dma->dma_seg, rsegs);
   2810 fail_1:
   2811 	ixgbe_dma_tag_destroy(dma->dma_tag);
   2812 fail_0:
   2813 	return r;
   2814 }
   2815 
   2816 static void
   2817 ixgbe_dma_free(struct adapter *adapter, struct ixgbe_dma_alloc *dma)
   2818 {
   2819 	bus_dmamap_sync(dma->dma_tag->dt_dmat, dma->dma_map, 0, dma->dma_size,
   2820 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   2821 	ixgbe_dmamap_unload(dma->dma_tag, dma->dma_map);
   2822 	bus_dmamem_free(dma->dma_tag->dt_dmat, &dma->dma_seg, 1);
   2823 	ixgbe_dma_tag_destroy(dma->dma_tag);
   2824 }
   2825 
   2826 
   2827 /*********************************************************************
   2828  *
   2829  *  Allocate memory for the transmit and receive rings, and then
   2830  *  the descriptors associated with each, called only once at attach.
   2831  *
   2832  **********************************************************************/
   2833 static int
   2834 ixgbe_allocate_queues(struct adapter *adapter)
   2835 {
   2836 	device_t	dev = adapter->dev;
   2837 	struct ix_queue	*que;
   2838 	struct tx_ring	*txr;
   2839 	struct rx_ring	*rxr;
   2840 	int rsize, tsize, error = IXGBE_SUCCESS;
   2841 	int txconf = 0, rxconf = 0;
   2842 
   2843         /* First allocate the top level queue structs */
   2844         if (!(adapter->queues =
   2845             (struct ix_queue *) malloc(sizeof(struct ix_queue) *
   2846             adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
   2847                 aprint_error_dev(dev, "Unable to allocate queue memory\n");
   2848                 error = ENOMEM;
   2849                 goto fail;
   2850         }
   2851 
   2852 	/* First allocate the TX ring struct memory */
   2853 	if (!(adapter->tx_rings =
   2854 	    (struct tx_ring *) malloc(sizeof(struct tx_ring) *
   2855 	    adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
   2856 		aprint_error_dev(dev, "Unable to allocate TX ring memory\n");
   2857 		error = ENOMEM;
   2858 		goto tx_fail;
   2859 	}
   2860 
   2861 	/* Next allocate the RX */
   2862 	if (!(adapter->rx_rings =
   2863 	    (struct rx_ring *) malloc(sizeof(struct rx_ring) *
   2864 	    adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
   2865 		aprint_error_dev(dev, "Unable to allocate RX ring memory\n");
   2866 		error = ENOMEM;
   2867 		goto rx_fail;
   2868 	}
   2869 
   2870 	/* For the ring itself */
   2871 	tsize = roundup2(adapter->num_tx_desc *
   2872 	    sizeof(union ixgbe_adv_tx_desc), DBA_ALIGN);
   2873 
   2874 	/*
   2875 	 * Now set up the TX queues, txconf is needed to handle the
   2876 	 * possibility that things fail midcourse and we need to
   2877 	 * undo memory gracefully
   2878 	 */
   2879 	for (int i = 0; i < adapter->num_queues; i++, txconf++) {
   2880 		/* Set up some basics */
   2881 		txr = &adapter->tx_rings[i];
   2882 		txr->adapter = adapter;
   2883 		txr->me = i;
   2884 
   2885 		/* Initialize the TX side lock */
   2886 		snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
   2887 		    device_xname(dev), txr->me);
   2888 		mutex_init(&txr->tx_mtx, MUTEX_DEFAULT, IPL_NET);
   2889 
   2890 		if (ixgbe_dma_malloc(adapter, tsize,
   2891 			&txr->txdma, BUS_DMA_NOWAIT)) {
   2892 			aprint_error_dev(dev,
   2893 			    "Unable to allocate TX Descriptor memory\n");
   2894 			error = ENOMEM;
   2895 			goto err_tx_desc;
   2896 		}
   2897 		txr->tx_base = (union ixgbe_adv_tx_desc *)txr->txdma.dma_vaddr;
   2898 		bzero((void *)txr->tx_base, tsize);
   2899 
   2900         	/* Now allocate transmit buffers for the ring */
   2901         	if (ixgbe_allocate_transmit_buffers(txr)) {
   2902 			aprint_error_dev(dev,
   2903 			    "Critical Failure setting up transmit buffers\n");
   2904 			error = ENOMEM;
   2905 			goto err_tx_desc;
   2906         	}
   2907 #if __FreeBSD_version >= 800000
   2908 		/* Allocate a buf ring */
   2909 		txr->br = buf_ring_alloc(IXGBE_BR_SIZE, M_DEVBUF,
   2910 		    M_WAITOK, &txr->tx_mtx);
   2911 		if (txr->br == NULL) {
   2912 			aprint_error_dev(dev,
   2913 			    "Critical Failure setting up buf ring\n");
   2914 			error = ENOMEM;
   2915 			goto err_tx_desc;
   2916         	}
   2917 #endif
   2918 	}
   2919 
   2920 	/*
   2921 	 * Next the RX queues...
   2922 	 */
   2923 	rsize = roundup2(adapter->num_rx_desc *
   2924 	    sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
   2925 	for (int i = 0; i < adapter->num_queues; i++, rxconf++) {
   2926 		rxr = &adapter->rx_rings[i];
   2927 		/* Set up some basics */
   2928 		rxr->adapter = adapter;
   2929 		rxr->me = i;
   2930 
   2931 		/* Initialize the RX side lock */
   2932 		snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
   2933 		    device_xname(dev), rxr->me);
   2934 		mutex_init(&rxr->rx_mtx, MUTEX_DEFAULT, IPL_NET);
   2935 
   2936 		if (ixgbe_dma_malloc(adapter, rsize,
   2937 			&rxr->rxdma, BUS_DMA_NOWAIT)) {
   2938 			aprint_error_dev(dev,
   2939 			    "Unable to allocate RxDescriptor memory\n");
   2940 			error = ENOMEM;
   2941 			goto err_rx_desc;
   2942 		}
   2943 		rxr->rx_base = (union ixgbe_adv_rx_desc *)rxr->rxdma.dma_vaddr;
   2944 		bzero((void *)rxr->rx_base, rsize);
   2945 
   2946         	/* Allocate receive buffers for the ring*/
   2947 		if (ixgbe_allocate_receive_buffers(rxr)) {
   2948 			aprint_error_dev(dev,
   2949 			    "Critical Failure setting up receive buffers\n");
   2950 			error = ENOMEM;
   2951 			goto err_rx_desc;
   2952 		}
   2953 	}
   2954 
   2955 	/*
   2956 	** Finally set up the queue holding structs
   2957 	*/
   2958 	for (int i = 0; i < adapter->num_queues; i++) {
   2959 		que = &adapter->queues[i];
   2960 		que->adapter = adapter;
   2961 		que->txr = &adapter->tx_rings[i];
   2962 		que->rxr = &adapter->rx_rings[i];
   2963 	}
   2964 
   2965 	return (0);
   2966 
   2967 err_rx_desc:
   2968 	for (rxr = adapter->rx_rings; rxconf > 0; rxr++, rxconf--)
   2969 		ixgbe_dma_free(adapter, &rxr->rxdma);
   2970 err_tx_desc:
   2971 	for (txr = adapter->tx_rings; txconf > 0; txr++, txconf--)
   2972 		ixgbe_dma_free(adapter, &txr->txdma);
   2973 	free(adapter->rx_rings, M_DEVBUF);
   2974 rx_fail:
   2975 	free(adapter->tx_rings, M_DEVBUF);
   2976 tx_fail:
   2977 	free(adapter->queues, M_DEVBUF);
   2978 fail:
   2979 	return (error);
   2980 }
   2981 
   2982 /*********************************************************************
   2983  *
   2984  *  Allocate memory for tx_buffer structures. The tx_buffer stores all
   2985  *  the information needed to transmit a packet on the wire. This is
   2986  *  called only once at attach, setup is done every reset.
   2987  *
   2988  **********************************************************************/
   2989 static int
   2990 ixgbe_allocate_transmit_buffers(struct tx_ring *txr)
   2991 {
   2992 	struct adapter *adapter = txr->adapter;
   2993 	device_t dev = adapter->dev;
   2994 	struct ixgbe_tx_buf *txbuf;
   2995 	int error, i;
   2996 
   2997 	/*
   2998 	 * Setup DMA descriptor areas.
   2999 	 */
   3000 	if ((error = ixgbe_dma_tag_create(adapter->osdep.dmat,	/* parent */
   3001 			       1, 0,		/* alignment, bounds */
   3002 			       IXGBE_TSO_SIZE,		/* maxsize */
   3003 			       adapter->num_segs,	/* nsegments */
   3004 			       PAGE_SIZE,		/* maxsegsize */
   3005 			       0,			/* flags */
   3006 			       &txr->txtag))) {
   3007 		aprint_error_dev(dev,"Unable to allocate TX DMA tag\n");
   3008 		goto fail;
   3009 	}
   3010 
   3011 	if (!(txr->tx_buffers =
   3012 	    (struct ixgbe_tx_buf *) malloc(sizeof(struct ixgbe_tx_buf) *
   3013 	    adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) {
   3014 		aprint_error_dev(dev, "Unable to allocate tx_buffer memory\n");
   3015 		error = ENOMEM;
   3016 		goto fail;
   3017 	}
   3018 
   3019         /* Create the descriptor buffer dma maps */
   3020 	txbuf = txr->tx_buffers;
   3021 	for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
   3022 		error = ixgbe_dmamap_create(txr->txtag, 0, &txbuf->map);
   3023 		if (error != 0) {
   3024 			aprint_error_dev(dev, "Unable to create TX DMA map\n");
   3025 			goto fail;
   3026 		}
   3027 	}
   3028 
   3029 	return 0;
   3030 fail:
   3031 	/* We free all, it handles case where we are in the middle */
   3032 	ixgbe_free_transmit_structures(adapter);
   3033 	return (error);
   3034 }
   3035 
   3036 /*********************************************************************
   3037  *
   3038  *  Initialize a transmit ring.
   3039  *
   3040  **********************************************************************/
   3041 static void
   3042 ixgbe_setup_transmit_ring(struct tx_ring *txr)
   3043 {
   3044 	struct adapter *adapter = txr->adapter;
   3045 	struct ixgbe_tx_buf *txbuf;
   3046 	int i;
   3047 
   3048 	/* Clear the old ring contents */
   3049 	IXGBE_TX_LOCK(txr);
   3050 	bzero((void *)txr->tx_base,
   3051 	      (sizeof(union ixgbe_adv_tx_desc)) * adapter->num_tx_desc);
   3052 	/* Reset indices */
   3053 	txr->next_avail_desc = 0;
   3054 	txr->next_to_clean = 0;
   3055 
   3056 	/* Free any existing tx buffers. */
   3057         txbuf = txr->tx_buffers;
   3058 	for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
   3059 		if (txbuf->m_head != NULL) {
   3060 			bus_dmamap_sync(txr->txtag->dt_dmat, txbuf->map,
   3061 			    0, txbuf->m_head->m_pkthdr.len,
   3062 			    BUS_DMASYNC_POSTWRITE);
   3063 			ixgbe_dmamap_unload(txr->txtag, txbuf->map);
   3064 			m_freem(txbuf->m_head);
   3065 			txbuf->m_head = NULL;
   3066 		}
   3067 		/* Clear the EOP index */
   3068 		txbuf->eop_index = -1;
   3069         }
   3070 
   3071 #ifdef IXGBE_FDIR
   3072 	/* Set the rate at which we sample packets */
   3073 	if (adapter->hw.mac.type != ixgbe_mac_82598EB)
   3074 		txr->atr_sample = atr_sample_rate;
   3075 #endif
   3076 
   3077 	/* Set number of descriptors available */
   3078 	txr->tx_avail = adapter->num_tx_desc;
   3079 
   3080 	ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
   3081 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   3082 	IXGBE_TX_UNLOCK(txr);
   3083 }
   3084 
   3085 /*********************************************************************
   3086  *
   3087  *  Initialize all transmit rings.
   3088  *
   3089  **********************************************************************/
   3090 static int
   3091 ixgbe_setup_transmit_structures(struct adapter *adapter)
   3092 {
   3093 	struct tx_ring *txr = adapter->tx_rings;
   3094 
   3095 	for (int i = 0; i < adapter->num_queues; i++, txr++)
   3096 		ixgbe_setup_transmit_ring(txr);
   3097 
   3098 	return (0);
   3099 }
   3100 
   3101 /*********************************************************************
   3102  *
   3103  *  Enable transmit unit.
   3104  *
   3105  **********************************************************************/
   3106 static void
   3107 ixgbe_initialize_transmit_units(struct adapter *adapter)
   3108 {
   3109 	struct tx_ring	*txr = adapter->tx_rings;
   3110 	struct ixgbe_hw	*hw = &adapter->hw;
   3111 
   3112 	/* Setup the Base and Length of the Tx Descriptor Ring */
   3113 
   3114 	for (int i = 0; i < adapter->num_queues; i++, txr++) {
   3115 		u64	tdba = txr->txdma.dma_paddr;
   3116 		u32	txctrl;
   3117 
   3118 		IXGBE_WRITE_REG(hw, IXGBE_TDBAL(i),
   3119 		       (tdba & 0x00000000ffffffffULL));
   3120 		IXGBE_WRITE_REG(hw, IXGBE_TDBAH(i), (tdba >> 32));
   3121 		IXGBE_WRITE_REG(hw, IXGBE_TDLEN(i),
   3122 		    adapter->num_tx_desc * sizeof(struct ixgbe_legacy_tx_desc));
   3123 
   3124 		/* Setup the HW Tx Head and Tail descriptor pointers */
   3125 		IXGBE_WRITE_REG(hw, IXGBE_TDH(i), 0);
   3126 		IXGBE_WRITE_REG(hw, IXGBE_TDT(i), 0);
   3127 
   3128 		/* Setup Transmit Descriptor Cmd Settings */
   3129 		txr->txd_cmd = IXGBE_TXD_CMD_IFCS;
   3130 		txr->queue_status = IXGBE_QUEUE_IDLE;
   3131 
   3132 		/* Disable Head Writeback */
   3133 		switch (hw->mac.type) {
   3134 		case ixgbe_mac_82598EB:
   3135 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
   3136 			break;
   3137 		case ixgbe_mac_82599EB:
   3138 		default:
   3139 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
   3140 			break;
   3141                 }
   3142 		txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
   3143 		switch (hw->mac.type) {
   3144 		case ixgbe_mac_82598EB:
   3145 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), txctrl);
   3146 			break;
   3147 		case ixgbe_mac_82599EB:
   3148 		default:
   3149 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), txctrl);
   3150 			break;
   3151 		}
   3152 
   3153 	}
   3154 
   3155 	if (hw->mac.type != ixgbe_mac_82598EB) {
   3156 		u32 dmatxctl, rttdcs;
   3157 		dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
   3158 		dmatxctl |= IXGBE_DMATXCTL_TE;
   3159 		IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
   3160 		/* Disable arbiter to set MTQC */
   3161 		rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
   3162 		rttdcs |= IXGBE_RTTDCS_ARBDIS;
   3163 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
   3164 		IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
   3165 		rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
   3166 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
   3167 	}
   3168 
   3169 	return;
   3170 }
   3171 
   3172 /*********************************************************************
   3173  *
   3174  *  Free all transmit rings.
   3175  *
   3176  **********************************************************************/
   3177 static void
   3178 ixgbe_free_transmit_structures(struct adapter *adapter)
   3179 {
   3180 	struct tx_ring *txr = adapter->tx_rings;
   3181 
   3182 	for (int i = 0; i < adapter->num_queues; i++, txr++) {
   3183 		IXGBE_TX_LOCK(txr);
   3184 		ixgbe_free_transmit_buffers(txr);
   3185 		ixgbe_dma_free(adapter, &txr->txdma);
   3186 		IXGBE_TX_UNLOCK(txr);
   3187 		IXGBE_TX_LOCK_DESTROY(txr);
   3188 	}
   3189 	free(adapter->tx_rings, M_DEVBUF);
   3190 }
   3191 
   3192 /*********************************************************************
   3193  *
   3194  *  Free transmit ring related data structures.
   3195  *
   3196  **********************************************************************/
   3197 static void
   3198 ixgbe_free_transmit_buffers(struct tx_ring *txr)
   3199 {
   3200 	struct adapter *adapter = txr->adapter;
   3201 	struct ixgbe_tx_buf *tx_buffer;
   3202 	int             i;
   3203 
   3204 	INIT_DEBUGOUT("free_transmit_ring: begin");
   3205 
   3206 	if (txr->tx_buffers == NULL)
   3207 		return;
   3208 
   3209 	tx_buffer = txr->tx_buffers;
   3210 	for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
   3211 		if (tx_buffer->m_head != NULL) {
   3212 			bus_dmamap_sync(txr->txtag->dt_dmat, tx_buffer->map,
   3213 			    0, tx_buffer->m_head->m_pkthdr.len,
   3214 			    BUS_DMASYNC_POSTWRITE);
   3215 			ixgbe_dmamap_unload(txr->txtag, tx_buffer->map);
   3216 			m_freem(tx_buffer->m_head);
   3217 			tx_buffer->m_head = NULL;
   3218 			if (tx_buffer->map != NULL) {
   3219 				ixgbe_dmamap_destroy(txr->txtag,
   3220 				    tx_buffer->map);
   3221 				tx_buffer->map = NULL;
   3222 			}
   3223 		} else if (tx_buffer->map != NULL) {
   3224 			ixgbe_dmamap_unload(txr->txtag, tx_buffer->map);
   3225 			ixgbe_dmamap_destroy(txr->txtag, tx_buffer->map);
   3226 			tx_buffer->map = NULL;
   3227 		}
   3228 	}
   3229 #if __FreeBSD_version >= 800000
   3230 	if (txr->br != NULL)
   3231 		buf_ring_free(txr->br, M_DEVBUF);
   3232 #endif
   3233 	if (txr->tx_buffers != NULL) {
   3234 		free(txr->tx_buffers, M_DEVBUF);
   3235 		txr->tx_buffers = NULL;
   3236 	}
   3237 	if (txr->txtag != NULL) {
   3238 		ixgbe_dma_tag_destroy(txr->txtag);
   3239 		txr->txtag = NULL;
   3240 	}
   3241 	return;
   3242 }
   3243 
   3244 /*********************************************************************
   3245  *
   3246  *  Advanced Context Descriptor setup for VLAN or L4 CSUM
   3247  *
   3248  **********************************************************************/
   3249 
   3250 static u32
   3251 ixgbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp)
   3252 {
   3253 	struct m_tag *mtag;
   3254 	struct adapter *adapter = txr->adapter;
   3255 	struct ethercom *ec = &adapter->osdep.ec;
   3256 	struct ixgbe_adv_tx_context_desc *TXD;
   3257 	struct ixgbe_tx_buf        *tx_buffer;
   3258 	u32 olinfo = 0, vlan_macip_lens = 0, type_tucmd_mlhl = 0;
   3259 	struct ether_vlan_header *eh;
   3260 	struct ip ip;
   3261 	struct ip6_hdr ip6;
   3262 	int  ehdrlen, ip_hlen = 0;
   3263 	u16	etype;
   3264 	u8	ipproto __diagused = 0;
   3265 	bool	offload;
   3266 	int ctxd = txr->next_avail_desc;
   3267 	u16 vtag = 0;
   3268 
   3269 	offload = ((mp->m_pkthdr.csum_flags & M_CSUM_OFFLOAD) != 0);
   3270 
   3271 	tx_buffer = &txr->tx_buffers[ctxd];
   3272 	TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
   3273 
   3274 	/*
   3275 	** In advanced descriptors the vlan tag must
   3276 	** be placed into the descriptor itself.
   3277 	*/
   3278 	if ((mtag = VLAN_OUTPUT_TAG(ec, mp)) != NULL) {
   3279 		vtag = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   3280 		vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
   3281 	} else if (!offload)
   3282 		return 0;
   3283 
   3284 	/*
   3285 	 * Determine where frame payload starts.
   3286 	 * Jump over vlan headers if already present,
   3287 	 * helpful for QinQ too.
   3288 	 */
   3289 	KASSERT(mp->m_len >= offsetof(struct ether_vlan_header, evl_tag));
   3290 	eh = mtod(mp, struct ether_vlan_header *);
   3291 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
   3292 		KASSERT(mp->m_len >= sizeof(struct ether_vlan_header));
   3293 		etype = ntohs(eh->evl_proto);
   3294 		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   3295 	} else {
   3296 		etype = ntohs(eh->evl_encap_proto);
   3297 		ehdrlen = ETHER_HDR_LEN;
   3298 	}
   3299 
   3300 	/* Set the ether header length */
   3301 	vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
   3302 
   3303 	switch (etype) {
   3304 	case ETHERTYPE_IP:
   3305 		m_copydata(mp, ehdrlen, sizeof(ip), &ip);
   3306 		ip_hlen = ip.ip_hl << 2;
   3307 		ipproto = ip.ip_p;
   3308 #if 0
   3309 		ip.ip_sum = 0;
   3310 		m_copyback(mp, ehdrlen, sizeof(ip), &ip);
   3311 #else
   3312 		KASSERT((mp->m_pkthdr.csum_flags & M_CSUM_IPv4) == 0 ||
   3313 		    ip.ip_sum == 0);
   3314 #endif
   3315 		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
   3316 		break;
   3317 	case ETHERTYPE_IPV6:
   3318 		m_copydata(mp, ehdrlen, sizeof(ip6), &ip6);
   3319 		ip_hlen = sizeof(ip6);
   3320 		ipproto = ip6.ip6_nxt;
   3321 		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
   3322 		break;
   3323 	default:
   3324 		break;
   3325 	}
   3326 
   3327 	if ((mp->m_pkthdr.csum_flags & M_CSUM_IPv4) != 0)
   3328 		olinfo |= IXGBE_TXD_POPTS_IXSM << 8;
   3329 
   3330 	vlan_macip_lens |= ip_hlen;
   3331 	type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
   3332 
   3333 	if (mp->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_TCPv6)) {
   3334 		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
   3335 		olinfo |= IXGBE_TXD_POPTS_TXSM << 8;
   3336 		KASSERT(ipproto == IPPROTO_TCP);
   3337 	} else if (mp->m_pkthdr.csum_flags & (M_CSUM_UDPv4|M_CSUM_UDPv6)) {
   3338 		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP;
   3339 		olinfo |= IXGBE_TXD_POPTS_TXSM << 8;
   3340 		KASSERT(ipproto == IPPROTO_UDP);
   3341 	}
   3342 
   3343 	/* Now copy bits into descriptor */
   3344 	TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
   3345 	TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
   3346 	TXD->seqnum_seed = htole32(0);
   3347 	TXD->mss_l4len_idx = htole32(0);
   3348 
   3349 	tx_buffer->m_head = NULL;
   3350 	tx_buffer->eop_index = -1;
   3351 
   3352 	/* We've consumed the first desc, adjust counters */
   3353 	if (++ctxd == adapter->num_tx_desc)
   3354 		ctxd = 0;
   3355 	txr->next_avail_desc = ctxd;
   3356 	--txr->tx_avail;
   3357 
   3358         return olinfo;
   3359 }
   3360 
   3361 /**********************************************************************
   3362  *
   3363  *  Setup work for hardware segmentation offload (TSO) on
   3364  *  adapters using advanced tx descriptors
   3365  *
   3366  **********************************************************************/
   3367 static bool
   3368 ixgbe_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *paylen)
   3369 {
   3370 	struct m_tag *mtag;
   3371 	struct adapter *adapter = txr->adapter;
   3372 	struct ethercom *ec = &adapter->osdep.ec;
   3373 	struct ixgbe_adv_tx_context_desc *TXD;
   3374 	struct ixgbe_tx_buf        *tx_buffer;
   3375 	u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
   3376 	u32 mss_l4len_idx = 0;
   3377 	u16 vtag = 0;
   3378 	int ctxd, ehdrlen,  hdrlen, ip_hlen, tcp_hlen;
   3379 	struct ether_vlan_header *eh;
   3380 	struct ip *ip;
   3381 	struct tcphdr *th;
   3382 
   3383 
   3384 	/*
   3385 	 * Determine where frame payload starts.
   3386 	 * Jump over vlan headers if already present
   3387 	 */
   3388 	eh = mtod(mp, struct ether_vlan_header *);
   3389 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN))
   3390 		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   3391 	else
   3392 		ehdrlen = ETHER_HDR_LEN;
   3393 
   3394         /* Ensure we have at least the IP+TCP header in the first mbuf. */
   3395         if (mp->m_len < ehdrlen + sizeof(struct ip) + sizeof(struct tcphdr))
   3396 		return FALSE;
   3397 
   3398 	ctxd = txr->next_avail_desc;
   3399 	tx_buffer = &txr->tx_buffers[ctxd];
   3400 	TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
   3401 
   3402 	ip = (struct ip *)(mp->m_data + ehdrlen);
   3403 	if (ip->ip_p != IPPROTO_TCP)
   3404 		return FALSE;   /* 0 */
   3405 	ip->ip_sum = 0;
   3406 	ip_hlen = ip->ip_hl << 2;
   3407 	th = (struct tcphdr *)((char *)ip + ip_hlen);
   3408 	/* XXX Educated guess: FreeBSD's in_pseudo == NetBSD's in_cksum_phdr */
   3409 	th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   3410 	    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   3411 	tcp_hlen = th->th_off << 2;
   3412 	hdrlen = ehdrlen + ip_hlen + tcp_hlen;
   3413 
   3414 	/* This is used in the transmit desc in encap */
   3415 	*paylen = mp->m_pkthdr.len - hdrlen;
   3416 
   3417 	/* VLAN MACLEN IPLEN */
   3418 	if ((mtag = VLAN_OUTPUT_TAG(ec, mp)) != NULL) {
   3419 		vtag = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   3420                 vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
   3421 	}
   3422 
   3423 	vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
   3424 	vlan_macip_lens |= ip_hlen;
   3425 	TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
   3426 
   3427 	/* ADV DTYPE TUCMD */
   3428 	type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
   3429 	type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
   3430 	type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
   3431 	TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
   3432 
   3433 
   3434 	/* MSS L4LEN IDX */
   3435 	mss_l4len_idx |= (mp->m_pkthdr.segsz << IXGBE_ADVTXD_MSS_SHIFT);
   3436 	mss_l4len_idx |= (tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT);
   3437 	TXD->mss_l4len_idx = htole32(mss_l4len_idx);
   3438 
   3439 	TXD->seqnum_seed = htole32(0);
   3440 	tx_buffer->m_head = NULL;
   3441 	tx_buffer->eop_index = -1;
   3442 
   3443 	if (++ctxd == adapter->num_tx_desc)
   3444 		ctxd = 0;
   3445 
   3446 	txr->tx_avail--;
   3447 	txr->next_avail_desc = ctxd;
   3448 	return TRUE;
   3449 }
   3450 
   3451 #ifdef IXGBE_FDIR
   3452 /*
   3453 ** This routine parses packet headers so that Flow
   3454 ** Director can make a hashed filter table entry
   3455 ** allowing traffic flows to be identified and kept
   3456 ** on the same cpu.  This would be a performance
   3457 ** hit, but we only do it at IXGBE_FDIR_RATE of
   3458 ** packets.
   3459 */
   3460 static void
   3461 ixgbe_atr(struct tx_ring *txr, struct mbuf *mp)
   3462 {
   3463 	struct adapter			*adapter = txr->adapter;
   3464 	struct ix_queue			*que;
   3465 	struct ip			*ip;
   3466 	struct tcphdr			*th;
   3467 	struct udphdr			*uh;
   3468 	struct ether_vlan_header	*eh;
   3469 	union ixgbe_atr_hash_dword	input = {.dword = 0};
   3470 	union ixgbe_atr_hash_dword	common = {.dword = 0};
   3471 	int  				ehdrlen, ip_hlen;
   3472 	u16				etype;
   3473 
   3474 	eh = mtod(mp, struct ether_vlan_header *);
   3475 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
   3476 		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   3477 		etype = eh->evl_proto;
   3478 	} else {
   3479 		ehdrlen = ETHER_HDR_LEN;
   3480 		etype = eh->evl_encap_proto;
   3481 	}
   3482 
   3483 	/* Only handling IPv4 */
   3484 	if (etype != htons(ETHERTYPE_IP))
   3485 		return;
   3486 
   3487 	ip = (struct ip *)(mp->m_data + ehdrlen);
   3488 	ip_hlen = ip->ip_hl << 2;
   3489 
   3490 	/* check if we're UDP or TCP */
   3491 	switch (ip->ip_p) {
   3492 	case IPPROTO_TCP:
   3493 		th = (struct tcphdr *)((char *)ip + ip_hlen);
   3494 		/* src and dst are inverted */
   3495 		common.port.dst ^= th->th_sport;
   3496 		common.port.src ^= th->th_dport;
   3497 		input.formatted.flow_type ^= IXGBE_ATR_FLOW_TYPE_TCPV4;
   3498 		break;
   3499 	case IPPROTO_UDP:
   3500 		uh = (struct udphdr *)((char *)ip + ip_hlen);
   3501 		/* src and dst are inverted */
   3502 		common.port.dst ^= uh->uh_sport;
   3503 		common.port.src ^= uh->uh_dport;
   3504 		input.formatted.flow_type ^= IXGBE_ATR_FLOW_TYPE_UDPV4;
   3505 		break;
   3506 	default:
   3507 		return;
   3508 	}
   3509 
   3510 	input.formatted.vlan_id = htobe16(mp->m_pkthdr.ether_vtag);
   3511 	if (mp->m_pkthdr.ether_vtag)
   3512 		common.flex_bytes ^= htons(ETHERTYPE_VLAN);
   3513 	else
   3514 		common.flex_bytes ^= etype;
   3515 	common.ip ^= ip->ip_src.s_addr ^ ip->ip_dst.s_addr;
   3516 
   3517 	que = &adapter->queues[txr->me];
   3518 	/*
   3519 	** This assumes the Rx queue and Tx
   3520 	** queue are bound to the same CPU
   3521 	*/
   3522 	ixgbe_fdir_add_signature_filter_82599(&adapter->hw,
   3523 	    input, common, que->msix);
   3524 }
   3525 #endif /* IXGBE_FDIR */
   3526 
   3527 /**********************************************************************
   3528  *
   3529  *  Examine each tx_buffer in the used queue. If the hardware is done
   3530  *  processing the packet then free associated resources. The
   3531  *  tx_buffer is put back on the free queue.
   3532  *
   3533  **********************************************************************/
   3534 static bool
   3535 ixgbe_txeof(struct tx_ring *txr)
   3536 {
   3537 	struct adapter	*adapter = txr->adapter;
   3538 	struct ifnet	*ifp = adapter->ifp;
   3539 	u32	first, last, done, processed;
   3540 	struct ixgbe_tx_buf *tx_buffer;
   3541 	struct ixgbe_legacy_tx_desc *tx_desc, *eop_desc;
   3542 	struct timeval now, elapsed;
   3543 
   3544 	KASSERT(mutex_owned(&txr->tx_mtx));
   3545 
   3546 	if (txr->tx_avail == adapter->num_tx_desc) {
   3547 		txr->queue_status = IXGBE_QUEUE_IDLE;
   3548 		return false;
   3549 	}
   3550 
   3551 	processed = 0;
   3552 	first = txr->next_to_clean;
   3553 	tx_buffer = &txr->tx_buffers[first];
   3554 	/* For cleanup we just use legacy struct */
   3555 	tx_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
   3556 	last = tx_buffer->eop_index;
   3557 	if (last == -1)
   3558 		return false;
   3559 	eop_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
   3560 
   3561 	/*
   3562 	** Get the index of the first descriptor
   3563 	** BEYOND the EOP and call that 'done'.
   3564 	** I do this so the comparison in the
   3565 	** inner while loop below can be simple
   3566 	*/
   3567 	if (++last == adapter->num_tx_desc) last = 0;
   3568 	done = last;
   3569 
   3570         ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
   3571 	    BUS_DMASYNC_POSTREAD);
   3572 	/*
   3573 	** Only the EOP descriptor of a packet now has the DD
   3574 	** bit set, this is what we look for...
   3575 	*/
   3576 	while (eop_desc->upper.fields.status & IXGBE_TXD_STAT_DD) {
   3577 		/* We clean the range of the packet */
   3578 		while (first != done) {
   3579 			tx_desc->upper.data = 0;
   3580 			tx_desc->lower.data = 0;
   3581 			tx_desc->buffer_addr = 0;
   3582 			++txr->tx_avail;
   3583 			++processed;
   3584 
   3585 			if (tx_buffer->m_head) {
   3586 				txr->bytes +=
   3587 				    tx_buffer->m_head->m_pkthdr.len;
   3588 				bus_dmamap_sync(txr->txtag->dt_dmat,
   3589 				    tx_buffer->map,
   3590 				    0, tx_buffer->m_head->m_pkthdr.len,
   3591 				    BUS_DMASYNC_POSTWRITE);
   3592 				ixgbe_dmamap_unload(txr->txtag, tx_buffer->map);
   3593 				m_freem(tx_buffer->m_head);
   3594 				tx_buffer->m_head = NULL;
   3595 			}
   3596 			tx_buffer->eop_index = -1;
   3597 			getmicrotime(&txr->watchdog_time);
   3598 
   3599 			if (++first == adapter->num_tx_desc)
   3600 				first = 0;
   3601 
   3602 			tx_buffer = &txr->tx_buffers[first];
   3603 			tx_desc =
   3604 			    (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
   3605 		}
   3606 		++txr->packets;
   3607 		++ifp->if_opackets;
   3608 		/* See if there is more work now */
   3609 		last = tx_buffer->eop_index;
   3610 		if (last != -1) {
   3611 			eop_desc =
   3612 			    (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
   3613 			/* Get next done point */
   3614 			if (++last == adapter->num_tx_desc) last = 0;
   3615 			done = last;
   3616 		} else
   3617 			break;
   3618 	}
   3619 	ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
   3620 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   3621 
   3622 	txr->next_to_clean = first;
   3623 
   3624 	/*
   3625 	** Watchdog calculation, we know there's
   3626 	** work outstanding or the first return
   3627 	** would have been taken, so none processed
   3628 	** for too long indicates a hang.
   3629 	*/
   3630 	getmicrotime(&now);
   3631 	timersub(&now, &txr->watchdog_time, &elapsed);
   3632 	if (!processed && tvtohz(&elapsed) > IXGBE_WATCHDOG)
   3633 		txr->queue_status = IXGBE_QUEUE_HUNG;
   3634 
   3635 	/*
   3636 	 * If we have enough room, clear IFF_OACTIVE to tell the stack that
   3637 	 * it is OK to send packets. If there are no pending descriptors,
   3638 	 * clear the timeout. Otherwise, if some descriptors have been freed,
   3639 	 * restart the timeout.
   3640 	 */
   3641 	if (txr->tx_avail > IXGBE_TX_CLEANUP_THRESHOLD) {
   3642 		ifp->if_flags &= ~IFF_OACTIVE;
   3643 		if (txr->tx_avail == adapter->num_tx_desc) {
   3644 			txr->queue_status = IXGBE_QUEUE_IDLE;
   3645 			return false;
   3646 		}
   3647 	}
   3648 
   3649 	return true;
   3650 }
   3651 
   3652 /*********************************************************************
   3653  *
   3654  *  Refresh mbuf buffers for RX descriptor rings
   3655  *   - now keeps its own state so discards due to resource
   3656  *     exhaustion are unnecessary, if an mbuf cannot be obtained
   3657  *     it just returns, keeping its placeholder, thus it can simply
   3658  *     be recalled to try again.
   3659  *
   3660  **********************************************************************/
   3661 static void
   3662 ixgbe_refresh_mbufs(struct rx_ring *rxr, int limit)
   3663 {
   3664 	struct adapter		*adapter = rxr->adapter;
   3665 	struct ixgbe_rx_buf	*rxbuf;
   3666 	struct mbuf		*mh, *mp;
   3667 	int			i, j, error;
   3668 	bool			refreshed = false;
   3669 
   3670 	i = j = rxr->next_to_refresh;
   3671 	/* Control the loop with one beyond */
   3672 	if (++j == adapter->num_rx_desc)
   3673 		j = 0;
   3674 
   3675 	while (j != limit) {
   3676 		rxbuf = &rxr->rx_buffers[i];
   3677 		if (rxr->hdr_split == FALSE)
   3678 			goto no_split;
   3679 
   3680 		if (rxbuf->m_head == NULL) {
   3681 			mh = m_gethdr(M_DONTWAIT, MT_DATA);
   3682 			if (mh == NULL)
   3683 				goto update;
   3684 		} else
   3685 			mh = rxbuf->m_head;
   3686 
   3687 		mh->m_pkthdr.len = mh->m_len = MHLEN;
   3688 		mh->m_len = MHLEN;
   3689 		mh->m_flags |= M_PKTHDR;
   3690 		/* Get the memory mapping */
   3691 		error = bus_dmamap_load_mbuf(rxr->htag->dt_dmat,
   3692 		    rxbuf->hmap, mh, BUS_DMA_NOWAIT);
   3693 		if (error != 0) {
   3694 			printf("Refresh mbufs: hdr dmamap load"
   3695 			    " failure - %d\n", error);
   3696 			m_free(mh);
   3697 			rxbuf->m_head = NULL;
   3698 			goto update;
   3699 		}
   3700 		rxbuf->m_head = mh;
   3701 		ixgbe_dmamap_sync(rxr->htag, rxbuf->hmap, BUS_DMASYNC_PREREAD);
   3702 		rxr->rx_base[i].read.hdr_addr =
   3703 		    htole64(rxbuf->hmap->dm_segs[0].ds_addr);
   3704 
   3705 no_split:
   3706 		if (rxbuf->m_pack == NULL) {
   3707 			mp = ixgbe_getjcl(&adapter->jcl_head, M_DONTWAIT,
   3708 			    MT_DATA, M_PKTHDR, adapter->rx_mbuf_sz);
   3709 			if (mp == NULL) {
   3710 				rxr->no_jmbuf.ev_count++;
   3711 				goto update;
   3712 			}
   3713 		} else
   3714 			mp = rxbuf->m_pack;
   3715 
   3716 		mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
   3717 		/* Get the memory mapping */
   3718 		error = bus_dmamap_load_mbuf(rxr->ptag->dt_dmat,
   3719 		    rxbuf->pmap, mp, BUS_DMA_NOWAIT);
   3720 		if (error != 0) {
   3721 			printf("Refresh mbufs: payload dmamap load"
   3722 			    " failure - %d\n", error);
   3723 			m_free(mp);
   3724 			rxbuf->m_pack = NULL;
   3725 			goto update;
   3726 		}
   3727 		rxbuf->m_pack = mp;
   3728 		bus_dmamap_sync(rxr->ptag->dt_dmat, rxbuf->pmap,
   3729 		    0, mp->m_pkthdr.len, BUS_DMASYNC_PREREAD);
   3730 		rxr->rx_base[i].read.pkt_addr =
   3731 		    htole64(rxbuf->pmap->dm_segs[0].ds_addr);
   3732 
   3733 		refreshed = true;
   3734 		/* Next is precalculated */
   3735 		i = j;
   3736 		rxr->next_to_refresh = i;
   3737 		if (++j == adapter->num_rx_desc)
   3738 			j = 0;
   3739 	}
   3740 update:
   3741 	if (refreshed) /* Update hardware tail index */
   3742 		IXGBE_WRITE_REG(&adapter->hw,
   3743 		    IXGBE_RDT(rxr->me), rxr->next_to_refresh);
   3744 	return;
   3745 }
   3746 
   3747 /*********************************************************************
   3748  *
   3749  *  Allocate memory for rx_buffer structures. Since we use one
   3750  *  rx_buffer per received packet, the maximum number of rx_buffer's
   3751  *  that we'll need is equal to the number of receive descriptors
   3752  *  that we've allocated.
   3753  *
   3754  **********************************************************************/
   3755 static int
   3756 ixgbe_allocate_receive_buffers(struct rx_ring *rxr)
   3757 {
   3758 	struct	adapter 	*adapter = rxr->adapter;
   3759 	device_t 		dev = adapter->dev;
   3760 	struct ixgbe_rx_buf 	*rxbuf;
   3761 	int             	i, bsize, error;
   3762 
   3763 	bsize = sizeof(struct ixgbe_rx_buf) * adapter->num_rx_desc;
   3764 	if (!(rxr->rx_buffers =
   3765 	    (struct ixgbe_rx_buf *) malloc(bsize,
   3766 	    M_DEVBUF, M_NOWAIT | M_ZERO))) {
   3767 		aprint_error_dev(dev, "Unable to allocate rx_buffer memory\n");
   3768 		error = ENOMEM;
   3769 		goto fail;
   3770 	}
   3771 
   3772 	if ((error = ixgbe_dma_tag_create(adapter->osdep.dmat,	/* parent */
   3773 				   1, 0,	/* alignment, bounds */
   3774 				   MSIZE,		/* maxsize */
   3775 				   1,			/* nsegments */
   3776 				   MSIZE,		/* maxsegsize */
   3777 				   0,			/* flags */
   3778 				   &rxr->htag))) {
   3779 		aprint_error_dev(dev, "Unable to create RX DMA tag\n");
   3780 		goto fail;
   3781 	}
   3782 
   3783 	if ((error = ixgbe_dma_tag_create(adapter->osdep.dmat,	/* parent */
   3784 				   1, 0,	/* alignment, bounds */
   3785 				   MJUM16BYTES,		/* maxsize */
   3786 				   1,			/* nsegments */
   3787 				   MJUM16BYTES,		/* maxsegsize */
   3788 				   0,			/* flags */
   3789 				   &rxr->ptag))) {
   3790 		aprint_error_dev(dev, "Unable to create RX DMA tag\n");
   3791 		goto fail;
   3792 	}
   3793 
   3794 	for (i = 0; i < adapter->num_rx_desc; i++, rxbuf++) {
   3795 		rxbuf = &rxr->rx_buffers[i];
   3796 		error = ixgbe_dmamap_create(rxr->htag,
   3797 		    BUS_DMA_NOWAIT, &rxbuf->hmap);
   3798 		if (error) {
   3799 			aprint_error_dev(dev, "Unable to create RX head map\n");
   3800 			goto fail;
   3801 		}
   3802 		error = ixgbe_dmamap_create(rxr->ptag,
   3803 		    BUS_DMA_NOWAIT, &rxbuf->pmap);
   3804 		if (error) {
   3805 			aprint_error_dev(dev, "Unable to create RX pkt map\n");
   3806 			goto fail;
   3807 		}
   3808 	}
   3809 
   3810 	return (0);
   3811 
   3812 fail:
   3813 	/* Frees all, but can handle partial completion */
   3814 	ixgbe_free_receive_structures(adapter);
   3815 	return (error);
   3816 }
   3817 
   3818 /*
   3819 ** Used to detect a descriptor that has
   3820 ** been merged by Hardware RSC.
   3821 */
   3822 static inline u32
   3823 ixgbe_rsc_count(union ixgbe_adv_rx_desc *rx)
   3824 {
   3825 	return (le32toh(rx->wb.lower.lo_dword.data) &
   3826 	    IXGBE_RXDADV_RSCCNT_MASK) >> IXGBE_RXDADV_RSCCNT_SHIFT;
   3827 }
   3828 
   3829 /*********************************************************************
   3830  *
   3831  *  Initialize Hardware RSC (LRO) feature on 82599
   3832  *  for an RX ring, this is toggled by the LRO capability
   3833  *  even though it is transparent to the stack.
   3834  *
   3835  **********************************************************************/
   3836 static void
   3837 ixgbe_setup_hw_rsc(struct rx_ring *rxr)
   3838 {
   3839 	struct	adapter 	*adapter = rxr->adapter;
   3840 	struct	ixgbe_hw	*hw = &adapter->hw;
   3841 	u32			rscctrl, rdrxctl;
   3842 
   3843 	rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
   3844 	rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
   3845 	rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
   3846 	rdrxctl |= IXGBE_RDRXCTL_RSCACKC;
   3847 	IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
   3848 
   3849 	rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(rxr->me));
   3850 	rscctrl |= IXGBE_RSCCTL_RSCEN;
   3851 	/*
   3852 	** Limit the total number of descriptors that
   3853 	** can be combined, so it does not exceed 64K
   3854 	*/
   3855 	if (adapter->rx_mbuf_sz == MCLBYTES)
   3856 		rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
   3857 	else if (adapter->rx_mbuf_sz == MJUMPAGESIZE)
   3858 		rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
   3859 	else if (adapter->rx_mbuf_sz == MJUM9BYTES)
   3860 		rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
   3861 	else  /* Using 16K cluster */
   3862 		rscctrl |= IXGBE_RSCCTL_MAXDESC_1;
   3863 
   3864 	IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(rxr->me), rscctrl);
   3865 
   3866 	/* Enable TCP header recognition */
   3867 	IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0),
   3868 	    (IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0)) |
   3869 	    IXGBE_PSRTYPE_TCPHDR));
   3870 
   3871 	/* Disable RSC for ACK packets */
   3872 	IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
   3873 	    (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
   3874 
   3875 	rxr->hw_rsc = TRUE;
   3876 }
   3877 
   3878 
   3879 static void
   3880 ixgbe_free_receive_ring(struct rx_ring *rxr)
   3881 {
   3882 	struct  adapter         *adapter;
   3883 	struct ixgbe_rx_buf       *rxbuf;
   3884 	int i;
   3885 
   3886 	adapter = rxr->adapter;
   3887 	for (i = 0; i < adapter->num_rx_desc; i++) {
   3888 		rxbuf = &rxr->rx_buffers[i];
   3889 		if (rxbuf->m_head != NULL) {
   3890 			ixgbe_dmamap_sync(rxr->htag, rxbuf->hmap,
   3891 			    BUS_DMASYNC_POSTREAD);
   3892 			ixgbe_dmamap_unload(rxr->htag, rxbuf->hmap);
   3893 			rxbuf->m_head->m_flags |= M_PKTHDR;
   3894 			m_freem(rxbuf->m_head);
   3895 		}
   3896 		if (rxbuf->m_pack != NULL) {
   3897 			bus_dmamap_sync(rxr->ptag->dt_dmat, rxbuf->pmap,
   3898 			    0, rxbuf->m_pack->m_pkthdr.len,
   3899 			    BUS_DMASYNC_POSTREAD);
   3900 			ixgbe_dmamap_unload(rxr->ptag, rxbuf->pmap);
   3901 			rxbuf->m_pack->m_flags |= M_PKTHDR;
   3902 			m_freem(rxbuf->m_pack);
   3903 		}
   3904 		rxbuf->m_head = NULL;
   3905 		rxbuf->m_pack = NULL;
   3906 	}
   3907 }
   3908 
   3909 
   3910 /*********************************************************************
   3911  *
   3912  *  Initialize a receive ring and its buffers.
   3913  *
   3914  **********************************************************************/
   3915 static int
   3916 ixgbe_setup_receive_ring(struct rx_ring *rxr)
   3917 {
   3918 	struct	adapter 	*adapter;
   3919 	struct ifnet		*ifp;
   3920 	struct ixgbe_rx_buf	*rxbuf;
   3921 #ifdef LRO
   3922 	struct lro_ctrl		*lro = &rxr->lro;
   3923 #endif /* LRO */
   3924 	int			rsize, error = 0;
   3925 
   3926 	adapter = rxr->adapter;
   3927 	ifp = adapter->ifp;
   3928 
   3929 	/* Clear the ring contents */
   3930 	IXGBE_RX_LOCK(rxr);
   3931 	rsize = roundup2(adapter->num_rx_desc *
   3932 	    sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
   3933 	bzero((void *)rxr->rx_base, rsize);
   3934 
   3935 	/* Free current RX buffer structs and their mbufs */
   3936 	ixgbe_free_receive_ring(rxr);
   3937 
   3938 	/* Now reinitialize our supply of jumbo mbufs.  The number
   3939 	 * or size of jumbo mbufs may have changed.
   3940 	 */
   3941 	ixgbe_jcl_reinit(&adapter->jcl_head, rxr->ptag->dt_dmat,
   3942 	    2 * adapter->num_rx_desc, adapter->rx_mbuf_sz);
   3943 
   3944 	/* Configure header split? */
   3945 	if (ixgbe_header_split)
   3946 		rxr->hdr_split = TRUE;
   3947 
   3948 	/* Now replenish the mbufs */
   3949 	for (int j = 0; j != adapter->num_rx_desc; ++j) {
   3950 		struct mbuf	*mh, *mp;
   3951 
   3952 		rxbuf = &rxr->rx_buffers[j];
   3953 		/*
   3954 		** Don't allocate mbufs if not
   3955 		** doing header split, its wasteful
   3956 		*/
   3957 		if (rxr->hdr_split == FALSE)
   3958 			goto skip_head;
   3959 
   3960 		/* First the header */
   3961 		rxbuf->m_head = m_gethdr(M_DONTWAIT, MT_DATA);
   3962 		if (rxbuf->m_head == NULL) {
   3963 			error = ENOBUFS;
   3964 			goto fail;
   3965 		}
   3966 		m_adj(rxbuf->m_head, ETHER_ALIGN);
   3967 		mh = rxbuf->m_head;
   3968 		mh->m_len = mh->m_pkthdr.len = MHLEN;
   3969 		mh->m_flags |= M_PKTHDR;
   3970 		/* Get the memory mapping */
   3971 		error = bus_dmamap_load_mbuf(rxr->htag->dt_dmat,
   3972 		    rxbuf->hmap, rxbuf->m_head, BUS_DMA_NOWAIT);
   3973 		if (error != 0) /* Nothing elegant to do here */
   3974 			goto fail;
   3975 		bus_dmamap_sync(rxr->htag->dt_dmat, rxbuf->hmap,
   3976 		    0, mh->m_pkthdr.len, BUS_DMASYNC_PREREAD);
   3977 		/* Update descriptor */
   3978 		rxr->rx_base[j].read.hdr_addr =
   3979 		    htole64(rxbuf->hmap->dm_segs[0].ds_addr);
   3980 
   3981 skip_head:
   3982 		/* Now the payload cluster */
   3983 		rxbuf->m_pack = ixgbe_getjcl(&adapter->jcl_head, M_DONTWAIT,
   3984 		    MT_DATA, M_PKTHDR, adapter->rx_mbuf_sz);
   3985 		if (rxbuf->m_pack == NULL) {
   3986 			error = ENOBUFS;
   3987                         goto fail;
   3988 		}
   3989 		mp = rxbuf->m_pack;
   3990 		mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
   3991 		/* Get the memory mapping */
   3992 		error = bus_dmamap_load_mbuf(rxr->ptag->dt_dmat,
   3993 		    rxbuf->pmap, mp, BUS_DMA_NOWAIT);
   3994 		if (error != 0)
   3995                         goto fail;
   3996 		bus_dmamap_sync(rxr->ptag->dt_dmat, rxbuf->pmap,
   3997 		    0, adapter->rx_mbuf_sz, BUS_DMASYNC_PREREAD);
   3998 		/* Update descriptor */
   3999 		rxr->rx_base[j].read.pkt_addr =
   4000 		    htole64(rxbuf->pmap->dm_segs[0].ds_addr);
   4001 	}
   4002 
   4003 
   4004 	/* Setup our descriptor indices */
   4005 	rxr->next_to_check = 0;
   4006 	rxr->next_to_refresh = 0;
   4007 	rxr->lro_enabled = FALSE;
   4008 	rxr->rx_split_packets.ev_count = 0;
   4009 	rxr->rx_bytes.ev_count = 0;
   4010 	rxr->discard = FALSE;
   4011 
   4012 	ixgbe_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
   4013 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   4014 
   4015 	/*
   4016 	** Now set up the LRO interface:
   4017 	** 82598 uses software LRO, the
   4018 	** 82599 uses a hardware assist.
   4019 	*/
   4020 	if ((adapter->hw.mac.type != ixgbe_mac_82598EB) &&
   4021 	    (ifp->if_capenable & IFCAP_RXCSUM) &&
   4022 	    (ifp->if_capenable & IFCAP_LRO))
   4023 		ixgbe_setup_hw_rsc(rxr);
   4024 #ifdef LRO
   4025 	else if (ifp->if_capenable & IFCAP_LRO) {
   4026 		device_t dev = adapter->dev;
   4027 		int err = tcp_lro_init(lro);
   4028 		if (err) {
   4029 			device_printf(dev, "LRO Initialization failed!\n");
   4030 			goto fail;
   4031 		}
   4032 		INIT_DEBUGOUT("RX Soft LRO Initialized\n");
   4033 		rxr->lro_enabled = TRUE;
   4034 		lro->ifp = adapter->ifp;
   4035 	}
   4036 #endif /* LRO */
   4037 
   4038 	IXGBE_RX_UNLOCK(rxr);
   4039 	return (0);
   4040 
   4041 fail:
   4042 	ixgbe_free_receive_ring(rxr);
   4043 	IXGBE_RX_UNLOCK(rxr);
   4044 	return (error);
   4045 }
   4046 
   4047 /*********************************************************************
   4048  *
   4049  *  Initialize all receive rings.
   4050  *
   4051  **********************************************************************/
   4052 static int
   4053 ixgbe_setup_receive_structures(struct adapter *adapter)
   4054 {
   4055 	struct rx_ring *rxr = adapter->rx_rings;
   4056 	int j;
   4057 
   4058 	for (j = 0; j < adapter->num_queues; j++, rxr++)
   4059 		if (ixgbe_setup_receive_ring(rxr))
   4060 			goto fail;
   4061 
   4062 	return (0);
   4063 fail:
   4064 	/*
   4065 	 * Free RX buffers allocated so far, we will only handle
   4066 	 * the rings that completed, the failing case will have
   4067 	 * cleaned up for itself. 'j' failed, so its the terminus.
   4068 	 */
   4069 	for (int i = 0; i < j; ++i) {
   4070 		rxr = &adapter->rx_rings[i];
   4071 		ixgbe_free_receive_ring(rxr);
   4072 	}
   4073 
   4074 	return (ENOBUFS);
   4075 }
   4076 
   4077 /*********************************************************************
   4078  *
   4079  *  Setup receive registers and features.
   4080  *
   4081  **********************************************************************/
   4082 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
   4083 
   4084 static void
   4085 ixgbe_initialize_receive_units(struct adapter *adapter)
   4086 {
   4087 	int i;
   4088 	struct	rx_ring	*rxr = adapter->rx_rings;
   4089 	struct ixgbe_hw	*hw = &adapter->hw;
   4090 	struct ifnet   *ifp = adapter->ifp;
   4091 	u32		bufsz, rxctrl, fctrl, srrctl, rxcsum;
   4092 	u32		reta, mrqc = 0, hlreg, r[10];
   4093 
   4094 
   4095 	/*
   4096 	 * Make sure receives are disabled while
   4097 	 * setting up the descriptor ring
   4098 	 */
   4099 	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
   4100 	IXGBE_WRITE_REG(hw, IXGBE_RXCTRL,
   4101 	    rxctrl & ~IXGBE_RXCTRL_RXEN);
   4102 
   4103 	/* Enable broadcasts */
   4104 	fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
   4105 	fctrl |= IXGBE_FCTRL_BAM;
   4106 	fctrl |= IXGBE_FCTRL_DPF;
   4107 	fctrl |= IXGBE_FCTRL_PMCF;
   4108 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
   4109 
   4110 	/* Set for Jumbo Frames? */
   4111 	hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
   4112 	if (ifp->if_mtu > ETHERMTU)
   4113 		hlreg |= IXGBE_HLREG0_JUMBOEN;
   4114 	else
   4115 		hlreg &= ~IXGBE_HLREG0_JUMBOEN;
   4116 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
   4117 
   4118 	bufsz = adapter->rx_mbuf_sz  >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
   4119 
   4120 	for (i = 0; i < adapter->num_queues; i++, rxr++) {
   4121 		u64 rdba = rxr->rxdma.dma_paddr;
   4122 
   4123 		/* Setup the Base and Length of the Rx Descriptor Ring */
   4124 		IXGBE_WRITE_REG(hw, IXGBE_RDBAL(i),
   4125 			       (rdba & 0x00000000ffffffffULL));
   4126 		IXGBE_WRITE_REG(hw, IXGBE_RDBAH(i), (rdba >> 32));
   4127 		IXGBE_WRITE_REG(hw, IXGBE_RDLEN(i),
   4128 		    adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
   4129 
   4130 		/* Set up the SRRCTL register */
   4131 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
   4132 		srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
   4133 		srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
   4134 		srrctl |= bufsz;
   4135 		if (rxr->hdr_split) {
   4136 			/* Use a standard mbuf for the header */
   4137 			srrctl |= ((IXGBE_RX_HDR <<
   4138 			    IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT)
   4139 			    & IXGBE_SRRCTL_BSIZEHDR_MASK);
   4140 			srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
   4141 		} else
   4142 			srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
   4143 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(i), srrctl);
   4144 
   4145 		/* Setup the HW Rx Head and Tail Descriptor Pointers */
   4146 		IXGBE_WRITE_REG(hw, IXGBE_RDH(i), 0);
   4147 		IXGBE_WRITE_REG(hw, IXGBE_RDT(i), 0);
   4148 	}
   4149 
   4150 	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
   4151 		u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
   4152 			      IXGBE_PSRTYPE_UDPHDR |
   4153 			      IXGBE_PSRTYPE_IPV4HDR |
   4154 			      IXGBE_PSRTYPE_IPV6HDR;
   4155 		IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
   4156 	}
   4157 
   4158 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
   4159 
   4160 	/* Setup RSS */
   4161 	if (adapter->num_queues > 1) {
   4162 		int j;
   4163 		reta = 0;
   4164 
   4165 		/* set up random bits */
   4166 		cprng_fast(&r, sizeof(r));
   4167 
   4168 		/* Set up the redirection table */
   4169 		for (i = 0, j = 0; i < 128; i++, j++) {
   4170 			if (j == adapter->num_queues) j = 0;
   4171 			reta = (reta << 8) | (j * 0x11);
   4172 			if ((i & 3) == 3)
   4173 				IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
   4174 		}
   4175 
   4176 		/* Now fill our hash function seeds */
   4177 		for (i = 0; i < 10; i++)
   4178 			IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), r[i]);
   4179 
   4180 		/* Perform hash on these packet types */
   4181 		mrqc = IXGBE_MRQC_RSSEN
   4182 		     | IXGBE_MRQC_RSS_FIELD_IPV4
   4183 		     | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
   4184 		     | IXGBE_MRQC_RSS_FIELD_IPV4_UDP
   4185 		     | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP
   4186 		     | IXGBE_MRQC_RSS_FIELD_IPV6_EX
   4187 		     | IXGBE_MRQC_RSS_FIELD_IPV6
   4188 		     | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
   4189 		     | IXGBE_MRQC_RSS_FIELD_IPV6_UDP
   4190 		     | IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
   4191 		IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
   4192 
   4193 		/* RSS and RX IPP Checksum are mutually exclusive */
   4194 		rxcsum |= IXGBE_RXCSUM_PCSD;
   4195 	}
   4196 
   4197 	if (ifp->if_capenable & IFCAP_RXCSUM)
   4198 		rxcsum |= IXGBE_RXCSUM_PCSD;
   4199 
   4200 	if (!(rxcsum & IXGBE_RXCSUM_PCSD))
   4201 		rxcsum |= IXGBE_RXCSUM_IPPCSE;
   4202 
   4203 	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
   4204 
   4205 	return;
   4206 }
   4207 
   4208 /*********************************************************************
   4209  *
   4210  *  Free all receive rings.
   4211  *
   4212  **********************************************************************/
   4213 static void
   4214 ixgbe_free_receive_structures(struct adapter *adapter)
   4215 {
   4216 	struct rx_ring *rxr = adapter->rx_rings;
   4217 
   4218 	for (int i = 0; i < adapter->num_queues; i++, rxr++) {
   4219 #ifdef LRO
   4220 		struct lro_ctrl		*lro = &rxr->lro;
   4221 #endif /* LRO */
   4222 		ixgbe_free_receive_buffers(rxr);
   4223 #ifdef LRO
   4224 		/* Free LRO memory */
   4225 		tcp_lro_free(lro);
   4226 #endif /* LRO */
   4227 		/* Free the ring memory as well */
   4228 		ixgbe_dma_free(adapter, &rxr->rxdma);
   4229 	}
   4230 
   4231 	free(adapter->rx_rings, M_DEVBUF);
   4232 }
   4233 
   4234 
   4235 /*********************************************************************
   4236  *
   4237  *  Free receive ring data structures
   4238  *
   4239  **********************************************************************/
   4240 static void
   4241 ixgbe_free_receive_buffers(struct rx_ring *rxr)
   4242 {
   4243 	struct adapter		*adapter = rxr->adapter;
   4244 	struct ixgbe_rx_buf	*rxbuf;
   4245 
   4246 	INIT_DEBUGOUT("free_receive_structures: begin");
   4247 
   4248 	/* Cleanup any existing buffers */
   4249 	if (rxr->rx_buffers != NULL) {
   4250 		for (int i = 0; i < adapter->num_rx_desc; i++) {
   4251 			rxbuf = &rxr->rx_buffers[i];
   4252 			if (rxbuf->m_head != NULL) {
   4253 				ixgbe_dmamap_sync(rxr->htag, rxbuf->hmap,
   4254 				    BUS_DMASYNC_POSTREAD);
   4255 				ixgbe_dmamap_unload(rxr->htag, rxbuf->hmap);
   4256 				rxbuf->m_head->m_flags |= M_PKTHDR;
   4257 				m_freem(rxbuf->m_head);
   4258 			}
   4259 			if (rxbuf->m_pack != NULL) {
   4260 				bus_dmamap_sync(rxr->ptag->dt_dmat, rxbuf->pmap,
   4261 				    0, rxbuf->m_pack->m_pkthdr.len,
   4262 				    BUS_DMASYNC_POSTREAD);
   4263 				ixgbe_dmamap_unload(rxr->ptag, rxbuf->pmap);
   4264 				rxbuf->m_pack->m_flags |= M_PKTHDR;
   4265 				m_freem(rxbuf->m_pack);
   4266 			}
   4267 			rxbuf->m_head = NULL;
   4268 			rxbuf->m_pack = NULL;
   4269 			if (rxbuf->hmap != NULL) {
   4270 				ixgbe_dmamap_destroy(rxr->htag, rxbuf->hmap);
   4271 				rxbuf->hmap = NULL;
   4272 			}
   4273 			if (rxbuf->pmap != NULL) {
   4274 				ixgbe_dmamap_destroy(rxr->ptag, rxbuf->pmap);
   4275 				rxbuf->pmap = NULL;
   4276 			}
   4277 		}
   4278 		if (rxr->rx_buffers != NULL) {
   4279 			free(rxr->rx_buffers, M_DEVBUF);
   4280 			rxr->rx_buffers = NULL;
   4281 		}
   4282 	}
   4283 
   4284 	if (rxr->htag != NULL) {
   4285 		ixgbe_dma_tag_destroy(rxr->htag);
   4286 		rxr->htag = NULL;
   4287 	}
   4288 	if (rxr->ptag != NULL) {
   4289 		ixgbe_dma_tag_destroy(rxr->ptag);
   4290 		rxr->ptag = NULL;
   4291 	}
   4292 
   4293 	return;
   4294 }
   4295 
   4296 static __inline void
   4297 ixgbe_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m, u32 ptype)
   4298 {
   4299 	int s;
   4300 
   4301 #ifdef LRO
   4302 	struct adapter	*adapter = ifp->if_softc;
   4303 	struct ethercom *ec = &adapter->osdep.ec;
   4304 
   4305         /*
   4306          * ATM LRO is only for IPv4/TCP packets and TCP checksum of the packet
   4307          * should be computed by hardware. Also it should not have VLAN tag in
   4308          * ethernet header.
   4309          */
   4310         if (rxr->lro_enabled &&
   4311             (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING) != 0 &&
   4312             (ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
   4313             (ptype & (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP)) ==
   4314             (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP) &&
   4315             (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) ==
   4316             (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) {
   4317                 /*
   4318                  * Send to the stack if:
   4319                  **  - LRO not enabled, or
   4320                  **  - no LRO resources, or
   4321                  **  - lro enqueue fails
   4322                  */
   4323                 if (rxr->lro.lro_cnt != 0)
   4324                         if (tcp_lro_rx(&rxr->lro, m, 0) == 0)
   4325                                 return;
   4326         }
   4327 #endif /* LRO */
   4328 
   4329 	IXGBE_RX_UNLOCK(rxr);
   4330 
   4331 	s = splnet();
   4332 	/* Pass this up to any BPF listeners. */
   4333 	bpf_mtap(ifp, m);
   4334 	(*ifp->if_input)(ifp, m);
   4335 	splx(s);
   4336 
   4337 	IXGBE_RX_LOCK(rxr);
   4338 }
   4339 
   4340 static __inline void
   4341 ixgbe_rx_discard(struct rx_ring *rxr, int i)
   4342 {
   4343 	struct ixgbe_rx_buf	*rbuf;
   4344 
   4345 	rbuf = &rxr->rx_buffers[i];
   4346 
   4347         if (rbuf->fmp != NULL) {/* Partial chain ? */
   4348 		rbuf->fmp->m_flags |= M_PKTHDR;
   4349                 m_freem(rbuf->fmp);
   4350                 rbuf->fmp = NULL;
   4351 	}
   4352 
   4353 	/*
   4354 	** With advanced descriptors the writeback
   4355 	** clobbers the buffer addrs, so its easier
   4356 	** to just free the existing mbufs and take
   4357 	** the normal refresh path to get new buffers
   4358 	** and mapping.
   4359 	*/
   4360 	if (rbuf->m_head) {
   4361 		m_free(rbuf->m_head);
   4362 		rbuf->m_head = NULL;
   4363 	}
   4364 
   4365 	if (rbuf->m_pack) {
   4366 		m_free(rbuf->m_pack);
   4367 		rbuf->m_pack = NULL;
   4368 	}
   4369 
   4370 	return;
   4371 }
   4372 
   4373 
   4374 /*********************************************************************
   4375  *
   4376  *  This routine executes in interrupt context. It replenishes
   4377  *  the mbufs in the descriptor and sends data which has been
   4378  *  dma'ed into host memory to upper layer.
   4379  *
   4380  *  We loop at most count times if count is > 0, or until done if
   4381  *  count < 0.
   4382  *
   4383  *  Return TRUE for more work, FALSE for all clean.
   4384  *********************************************************************/
   4385 static bool
   4386 ixgbe_rxeof(struct ix_queue *que, int count)
   4387 {
   4388 	struct adapter		*adapter = que->adapter;
   4389 	struct rx_ring		*rxr = que->rxr;
   4390 	struct ifnet		*ifp = adapter->ifp;
   4391 #ifdef LRO
   4392 	struct lro_ctrl		*lro = &rxr->lro;
   4393 	struct lro_entry	*queued;
   4394 #endif /* LRO */
   4395 	int			i, nextp, processed = 0;
   4396 	u32			staterr = 0;
   4397 	union ixgbe_adv_rx_desc	*cur;
   4398 	struct ixgbe_rx_buf	*rbuf, *nbuf;
   4399 
   4400 	IXGBE_RX_LOCK(rxr);
   4401 
   4402 	for (i = rxr->next_to_check; count != 0;) {
   4403 		struct mbuf	*sendmp, *mh, *mp;
   4404 		u32		rsc, ptype;
   4405 		u16		hlen, plen, hdr, vtag;
   4406 		bool		eop;
   4407 
   4408 		/* Sync the ring. */
   4409 		ixgbe_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
   4410 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   4411 
   4412 		cur = &rxr->rx_base[i];
   4413 		staterr = le32toh(cur->wb.upper.status_error);
   4414 
   4415 		if ((staterr & IXGBE_RXD_STAT_DD) == 0)
   4416 			break;
   4417 		if ((ifp->if_flags & IFF_RUNNING) == 0)
   4418 			break;
   4419 
   4420 		count--;
   4421 		sendmp = NULL;
   4422 		nbuf = NULL;
   4423 		rsc = 0;
   4424 		cur->wb.upper.status_error = 0;
   4425 		rbuf = &rxr->rx_buffers[i];
   4426 		mh = rbuf->m_head;
   4427 		mp = rbuf->m_pack;
   4428 
   4429 		plen = le16toh(cur->wb.upper.length);
   4430 		ptype = le32toh(cur->wb.lower.lo_dword.data) &
   4431 		    IXGBE_RXDADV_PKTTYPE_MASK;
   4432 		hdr = le16toh(cur->wb.lower.lo_dword.hs_rss.hdr_info);
   4433 		vtag = le16toh(cur->wb.upper.vlan);
   4434 		eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0);
   4435 
   4436 		/* Make sure bad packets are discarded */
   4437 		if (((staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) != 0) ||
   4438 		    (rxr->discard)) {
   4439 			ifp->if_ierrors++;
   4440 			rxr->rx_discarded.ev_count++;
   4441 			if (eop)
   4442 				rxr->discard = FALSE;
   4443 			else
   4444 				rxr->discard = TRUE;
   4445 			ixgbe_rx_discard(rxr, i);
   4446 			goto next_desc;
   4447 		}
   4448 
   4449 		/*
   4450 		** On 82599 which supports a hardware
   4451 		** LRO (called HW RSC), packets need
   4452 		** not be fragmented across sequential
   4453 		** descriptors, rather the next descriptor
   4454 		** is indicated in bits of the descriptor.
   4455 		** This also means that we might proceses
   4456 		** more than one packet at a time, something
   4457 		** that has never been true before, it
   4458 		** required eliminating global chain pointers
   4459 		** in favor of what we are doing here.  -jfv
   4460 		*/
   4461 		if (!eop) {
   4462 			/*
   4463 			** Figure out the next descriptor
   4464 			** of this frame.
   4465 			*/
   4466 			if (rxr->hw_rsc == TRUE) {
   4467 				rsc = ixgbe_rsc_count(cur);
   4468 				rxr->rsc_num += (rsc - 1);
   4469 			}
   4470 			if (rsc) { /* Get hardware index */
   4471 				nextp = ((staterr &
   4472 				    IXGBE_RXDADV_NEXTP_MASK) >>
   4473 				    IXGBE_RXDADV_NEXTP_SHIFT);
   4474 			} else { /* Just sequential */
   4475 				nextp = i + 1;
   4476 				if (nextp == adapter->num_rx_desc)
   4477 					nextp = 0;
   4478 			}
   4479 			nbuf = &rxr->rx_buffers[nextp];
   4480 			prefetch(nbuf);
   4481 		}
   4482 		/*
   4483 		** The header mbuf is ONLY used when header
   4484 		** split is enabled, otherwise we get normal
   4485 		** behavior, ie, both header and payload
   4486 		** are DMA'd into the payload buffer.
   4487 		**
   4488 		** Rather than using the fmp/lmp global pointers
   4489 		** we now keep the head of a packet chain in the
   4490 		** buffer struct and pass this along from one
   4491 		** descriptor to the next, until we get EOP.
   4492 		*/
   4493 		if (rxr->hdr_split && (rbuf->fmp == NULL)) {
   4494 			/* This must be an initial descriptor */
   4495 			hlen = (hdr & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
   4496 			    IXGBE_RXDADV_HDRBUFLEN_SHIFT;
   4497 			if (hlen > IXGBE_RX_HDR)
   4498 				hlen = IXGBE_RX_HDR;
   4499 			mh->m_len = hlen;
   4500 			mh->m_flags |= M_PKTHDR;
   4501 			mh->m_next = NULL;
   4502 			mh->m_pkthdr.len = mh->m_len;
   4503 			/* Null buf pointer so it is refreshed */
   4504 			rbuf->m_head = NULL;
   4505 			/*
   4506 			** Check the payload length, this
   4507 			** could be zero if its a small
   4508 			** packet.
   4509 			*/
   4510 			if (plen > 0) {
   4511 				mp->m_len = plen;
   4512 				mp->m_next = NULL;
   4513 				mp->m_flags &= ~M_PKTHDR;
   4514 				mh->m_next = mp;
   4515 				mh->m_pkthdr.len += mp->m_len;
   4516 				/* Null buf pointer so it is refreshed */
   4517 				rbuf->m_pack = NULL;
   4518 				rxr->rx_split_packets.ev_count++;
   4519 			}
   4520 			/*
   4521 			** Now create the forward
   4522 			** chain so when complete
   4523 			** we wont have to.
   4524 			*/
   4525                         if (eop == 0) {
   4526 				/* stash the chain head */
   4527                                 nbuf->fmp = mh;
   4528 				/* Make forward chain */
   4529                                 if (plen)
   4530                                         mp->m_next = nbuf->m_pack;
   4531                                 else
   4532                                         mh->m_next = nbuf->m_pack;
   4533                         } else {
   4534 				/* Singlet, prepare to send */
   4535                                 sendmp = mh;
   4536                                 if (VLAN_ATTACHED(&adapter->osdep.ec) &&
   4537 				  (staterr & IXGBE_RXD_STAT_VP)) {
   4538 					/* XXX Do something reasonable on
   4539 					 * error.
   4540 					 */
   4541 #if 0
   4542 					printf("%s.%d: VLAN_INPUT_TAG\n",
   4543 					    __func__, __LINE__);
   4544 					Debugger();
   4545 #endif
   4546 					VLAN_INPUT_TAG(ifp, sendmp, vtag,
   4547 					    printf("%s: could not apply VLAN "
   4548 					        "tag", __func__));
   4549                                 }
   4550                         }
   4551 		} else {
   4552 			/*
   4553 			** Either no header split, or a
   4554 			** secondary piece of a fragmented
   4555 			** split packet.
   4556 			*/
   4557 			mp->m_len = plen;
   4558 			/*
   4559 			** See if there is a stored head
   4560 			** that determines what we are
   4561 			*/
   4562 			sendmp = rbuf->fmp;
   4563 			rbuf->m_pack = rbuf->fmp = NULL;
   4564 
   4565 			if (sendmp != NULL) /* secondary frag */
   4566 				sendmp->m_pkthdr.len += mp->m_len;
   4567 			else {
   4568 				/* first desc of a non-ps chain */
   4569 				sendmp = mp;
   4570 				sendmp->m_flags |= M_PKTHDR;
   4571 				sendmp->m_pkthdr.len = mp->m_len;
   4572 				if (staterr & IXGBE_RXD_STAT_VP) {
   4573 					/* XXX Do something reasonable on
   4574 					 * error.
   4575 					 */
   4576 #if 0
   4577 					printf("%s.%d: VLAN_INPUT_TAG\n",
   4578 					    __func__, __LINE__);
   4579 					Debugger();
   4580 #endif
   4581 					VLAN_INPUT_TAG(ifp, sendmp, vtag,
   4582 					    printf("%s: could not apply VLAN "
   4583 					        "tag", __func__));
   4584 				}
   4585                         }
   4586 			/* Pass the head pointer on */
   4587 			if (eop == 0) {
   4588 				nbuf->fmp = sendmp;
   4589 				sendmp = NULL;
   4590 				mp->m_next = nbuf->m_pack;
   4591 			}
   4592 		}
   4593 		++processed;
   4594 		/* Sending this frame? */
   4595 		if (eop) {
   4596 			sendmp->m_pkthdr.rcvif = ifp;
   4597 			ifp->if_ipackets++;
   4598 			rxr->rx_packets.ev_count++;
   4599 			/* capture data for AIM */
   4600 			rxr->bytes += sendmp->m_pkthdr.len;
   4601 			rxr->rx_bytes.ev_count += sendmp->m_pkthdr.len;
   4602 			if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
   4603 				ixgbe_rx_checksum(staterr, sendmp, ptype,
   4604 				   &adapter->stats);
   4605 			}
   4606 #if __FreeBSD_version >= 800000
   4607 			sendmp->m_pkthdr.flowid = que->msix;
   4608 			sendmp->m_flags |= M_FLOWID;
   4609 #endif
   4610 		}
   4611 next_desc:
   4612 		ixgbe_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
   4613 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   4614 
   4615 		/* Advance our pointers to the next descriptor. */
   4616 		if (++i == adapter->num_rx_desc)
   4617 			i = 0;
   4618 
   4619 		/* Now send to the stack or do LRO */
   4620 		if (sendmp != NULL) {
   4621 			rxr->next_to_check = i;
   4622 			ixgbe_rx_input(rxr, ifp, sendmp, ptype);
   4623 			i = rxr->next_to_check;
   4624 		}
   4625 
   4626                /* Every 8 descriptors we go to refresh mbufs */
   4627 		if (processed == 8) {
   4628 			ixgbe_refresh_mbufs(rxr, i);
   4629 			processed = 0;
   4630 		}
   4631 	}
   4632 
   4633 	/* Refresh any remaining buf structs */
   4634 	if (ixgbe_rx_unrefreshed(rxr))
   4635 		ixgbe_refresh_mbufs(rxr, i);
   4636 
   4637 	rxr->next_to_check = i;
   4638 
   4639 #ifdef LRO
   4640 	/*
   4641 	 * Flush any outstanding LRO work
   4642 	 */
   4643 	while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
   4644 		SLIST_REMOVE_HEAD(&lro->lro_active, next);
   4645 		tcp_lro_flush(lro, queued);
   4646 	}
   4647 #endif /* LRO */
   4648 
   4649 	IXGBE_RX_UNLOCK(rxr);
   4650 
   4651 	/*
   4652 	** We still have cleaning to do?
   4653 	** Schedule another interrupt if so.
   4654 	*/
   4655 	if ((staterr & IXGBE_RXD_STAT_DD) != 0) {
   4656 		ixgbe_rearm_queues(adapter, (u64)(1ULL << que->msix));
   4657 		return true;
   4658 	}
   4659 
   4660 	return false;
   4661 }
   4662 
   4663 
   4664 /*********************************************************************
   4665  *
   4666  *  Verify that the hardware indicated that the checksum is valid.
   4667  *  Inform the stack about the status of checksum so that stack
   4668  *  doesn't spend time verifying the checksum.
   4669  *
   4670  *********************************************************************/
   4671 static void
   4672 ixgbe_rx_checksum(u32 staterr, struct mbuf * mp, u32 ptype,
   4673     struct ixgbe_hw_stats *stats)
   4674 {
   4675 	u16	status = (u16) staterr;
   4676 	u8	errors = (u8) (staterr >> 24);
   4677 #if 0
   4678 	bool	sctp = FALSE;
   4679 
   4680 	if ((ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
   4681 	    (ptype & IXGBE_RXDADV_PKTTYPE_SCTP) != 0)
   4682 		sctp = TRUE;
   4683 #endif
   4684 
   4685 	if (status & IXGBE_RXD_STAT_IPCS) {
   4686 		stats->ipcs.ev_count++;
   4687 		if (!(errors & IXGBE_RXD_ERR_IPE)) {
   4688 			/* IP Checksum Good */
   4689 			mp->m_pkthdr.csum_flags = M_CSUM_IPv4;
   4690 
   4691 		} else {
   4692 			stats->ipcs_bad.ev_count++;
   4693 			mp->m_pkthdr.csum_flags = M_CSUM_IPv4|M_CSUM_IPv4_BAD;
   4694 		}
   4695 	}
   4696 	if (status & IXGBE_RXD_STAT_L4CS) {
   4697 		stats->l4cs.ev_count++;
   4698 		u16 type = M_CSUM_TCPv4|M_CSUM_TCPv6|M_CSUM_UDPv4|M_CSUM_UDPv6;
   4699 		if (!(errors & IXGBE_RXD_ERR_TCPE)) {
   4700 			mp->m_pkthdr.csum_flags |= type;
   4701 		} else {
   4702 			stats->l4cs_bad.ev_count++;
   4703 			mp->m_pkthdr.csum_flags |= type | M_CSUM_TCP_UDP_BAD;
   4704 		}
   4705 	}
   4706 	return;
   4707 }
   4708 
   4709 
   4710 #if 0	/* XXX Badly need to overhaul vlan(4) on NetBSD. */
   4711 /*
   4712 ** This routine is run via an vlan config EVENT,
   4713 ** it enables us to use the HW Filter table since
   4714 ** we can get the vlan id. This just creates the
   4715 ** entry in the soft version of the VFTA, init will
   4716 ** repopulate the real table.
   4717 */
   4718 static void
   4719 ixgbe_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   4720 {
   4721 	struct adapter	*adapter = ifp->if_softc;
   4722 	u16		index, bit;
   4723 
   4724 	if (ifp->if_softc !=  arg)   /* Not our event */
   4725 		return;
   4726 
   4727 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
   4728 		return;
   4729 
   4730 	IXGBE_CORE_LOCK(adapter);
   4731 	index = (vtag >> 5) & 0x7F;
   4732 	bit = vtag & 0x1F;
   4733 	adapter->shadow_vfta[index] |= (1 << bit);
   4734 	ixgbe_init_locked(adapter);
   4735 	IXGBE_CORE_UNLOCK(adapter);
   4736 }
   4737 
   4738 /*
   4739 ** This routine is run via an vlan
   4740 ** unconfig EVENT, remove our entry
   4741 ** in the soft vfta.
   4742 */
   4743 static void
   4744 ixgbe_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   4745 {
   4746 	struct adapter	*adapter = ifp->if_softc;
   4747 	u16		index, bit;
   4748 
   4749 	if (ifp->if_softc !=  arg)
   4750 		return;
   4751 
   4752 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
   4753 		return;
   4754 
   4755 	IXGBE_CORE_LOCK(adapter);
   4756 	index = (vtag >> 5) & 0x7F;
   4757 	bit = vtag & 0x1F;
   4758 	adapter->shadow_vfta[index] &= ~(1 << bit);
   4759 	/* Re-init to load the changes */
   4760 	ixgbe_init_locked(adapter);
   4761 	IXGBE_CORE_UNLOCK(adapter);
   4762 }
   4763 #endif
   4764 
   4765 static void
   4766 ixgbe_setup_vlan_hw_support(struct adapter *adapter)
   4767 {
   4768 	struct ethercom *ec = &adapter->osdep.ec;
   4769 	struct ixgbe_hw *hw = &adapter->hw;
   4770 	u32		ctrl;
   4771 
   4772 	/*
   4773 	** We get here thru init_locked, meaning
   4774 	** a soft reset, this has already cleared
   4775 	** the VFTA and other state, so if there
   4776 	** have been no vlan's registered do nothing.
   4777 	*/
   4778 	if (!VLAN_ATTACHED(&adapter->osdep.ec)) {
   4779 		return;
   4780 	}
   4781 
   4782 	/*
   4783 	** A soft reset zero's out the VFTA, so
   4784 	** we need to repopulate it now.
   4785 	*/
   4786 	for (int i = 0; i < IXGBE_VFTA_SIZE; i++)
   4787 		if (adapter->shadow_vfta[i] != 0)
   4788 			IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
   4789 			    adapter->shadow_vfta[i]);
   4790 
   4791 	ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
   4792 	/* Enable the Filter Table if enabled */
   4793 	if (ec->ec_capenable & ETHERCAP_VLAN_HWFILTER) {
   4794 		ctrl &= ~IXGBE_VLNCTRL_CFIEN;
   4795 		ctrl |= IXGBE_VLNCTRL_VFE;
   4796 	}
   4797 	if (hw->mac.type == ixgbe_mac_82598EB)
   4798 		ctrl |= IXGBE_VLNCTRL_VME;
   4799 	IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
   4800 
   4801 	/* On 82599 the VLAN enable is per/queue in RXDCTL */
   4802 	if (hw->mac.type != ixgbe_mac_82598EB)
   4803 		for (int i = 0; i < adapter->num_queues; i++) {
   4804 			ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
   4805 				ctrl |= IXGBE_RXDCTL_VME;
   4806 			IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), ctrl);
   4807 		}
   4808 }
   4809 
   4810 static void
   4811 ixgbe_enable_intr(struct adapter *adapter)
   4812 {
   4813 	struct ixgbe_hw *hw = &adapter->hw;
   4814 	struct ix_queue *que = adapter->queues;
   4815 	u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
   4816 
   4817 
   4818 	/* Enable Fan Failure detection */
   4819 	if (hw->device_id == IXGBE_DEV_ID_82598AT)
   4820 		    mask |= IXGBE_EIMS_GPI_SDP1;
   4821 	else {
   4822 		    mask |= IXGBE_EIMS_ECC;
   4823 		    mask |= IXGBE_EIMS_GPI_SDP1;
   4824 		    mask |= IXGBE_EIMS_GPI_SDP2;
   4825 #ifdef IXGBE_FDIR
   4826 		    mask |= IXGBE_EIMS_FLOW_DIR;
   4827 #endif
   4828 	}
   4829 
   4830 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
   4831 
   4832 	/* With RSS we use auto clear */
   4833 	if (adapter->msix_mem) {
   4834 		mask = IXGBE_EIMS_ENABLE_MASK;
   4835 		/* Don't autoclear Link */
   4836 		mask &= ~IXGBE_EIMS_OTHER;
   4837 		mask &= ~IXGBE_EIMS_LSC;
   4838 		IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
   4839 	}
   4840 
   4841 	/*
   4842 	** Now enable all queues, this is done separately to
   4843 	** allow for handling the extended (beyond 32) MSIX
   4844 	** vectors that can be used by 82599
   4845 	*/
   4846         for (int i = 0; i < adapter->num_queues; i++, que++)
   4847                 ixgbe_enable_queue(adapter, que->msix);
   4848 
   4849 	IXGBE_WRITE_FLUSH(hw);
   4850 
   4851 	return;
   4852 }
   4853 
   4854 static void
   4855 ixgbe_disable_intr(struct adapter *adapter)
   4856 {
   4857 	if (adapter->msix_mem)
   4858 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
   4859 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
   4860 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
   4861 	} else {
   4862 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
   4863 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
   4864 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
   4865 	}
   4866 	IXGBE_WRITE_FLUSH(&adapter->hw);
   4867 	return;
   4868 }
   4869 
   4870 u16
   4871 ixgbe_read_pci_cfg(struct ixgbe_hw *hw, u32 reg)
   4872 {
   4873 	switch (reg % 4) {
   4874 	case 0:
   4875 		return pci_conf_read(hw->back->pc, hw->back->tag, reg) &
   4876 		    __BITS(15, 0);
   4877 	case 2:
   4878 		return __SHIFTOUT(pci_conf_read(hw->back->pc, hw->back->tag,
   4879 		    reg - 2), __BITS(31, 16));
   4880 	default:
   4881 		panic("%s: invalid register (%" PRIx32, __func__, reg);
   4882 		break;
   4883 	}
   4884 }
   4885 
   4886 void
   4887 ixgbe_write_pci_cfg(struct ixgbe_hw *hw, u32 reg, u16 value)
   4888 {
   4889 	pcireg_t old;
   4890 
   4891 	switch (reg % 4) {
   4892 	case 0:
   4893 		old = pci_conf_read(hw->back->pc, hw->back->tag, reg) &
   4894 		    __BITS(31, 16);
   4895 		pci_conf_write(hw->back->pc, hw->back->tag, reg, value | old);
   4896 		break;
   4897 	case 2:
   4898 		old = pci_conf_read(hw->back->pc, hw->back->tag, reg - 2) &
   4899 		    __BITS(15, 0);
   4900 		pci_conf_write(hw->back->pc, hw->back->tag, reg - 2,
   4901 		    __SHIFTIN(value, __BITS(31, 16)) | old);
   4902 		break;
   4903 	default:
   4904 		panic("%s: invalid register (%" PRIx32, __func__, reg);
   4905 		break;
   4906 	}
   4907 
   4908 	return;
   4909 }
   4910 
   4911 /*
   4912 ** Setup the correct IVAR register for a particular MSIX interrupt
   4913 **   (yes this is all very magic and confusing :)
   4914 **  - entry is the register array entry
   4915 **  - vector is the MSIX vector for this queue
   4916 **  - type is RX/TX/MISC
   4917 */
   4918 static void
   4919 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
   4920 {
   4921 	struct ixgbe_hw *hw = &adapter->hw;
   4922 	u32 ivar, index;
   4923 
   4924 	vector |= IXGBE_IVAR_ALLOC_VAL;
   4925 
   4926 	switch (hw->mac.type) {
   4927 
   4928 	case ixgbe_mac_82598EB:
   4929 		if (type == -1)
   4930 			entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
   4931 		else
   4932 			entry += (type * 64);
   4933 		index = (entry >> 2) & 0x1F;
   4934 		ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
   4935 		ivar &= ~(0xFF << (8 * (entry & 0x3)));
   4936 		ivar |= (vector << (8 * (entry & 0x3)));
   4937 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
   4938 		break;
   4939 
   4940 	case ixgbe_mac_82599EB:
   4941 		if (type == -1) { /* MISC IVAR */
   4942 			index = (entry & 1) * 8;
   4943 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
   4944 			ivar &= ~(0xFF << index);
   4945 			ivar |= (vector << index);
   4946 			IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
   4947 		} else {	/* RX/TX IVARS */
   4948 			index = (16 * (entry & 1)) + (8 * type);
   4949 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
   4950 			ivar &= ~(0xFF << index);
   4951 			ivar |= (vector << index);
   4952 			IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
   4953 		}
   4954 
   4955 	default:
   4956 		break;
   4957 	}
   4958 }
   4959 
   4960 static void
   4961 ixgbe_configure_ivars(struct adapter *adapter)
   4962 {
   4963 	struct  ix_queue *que = adapter->queues;
   4964 	u32 newitr;
   4965 
   4966 	if (ixgbe_max_interrupt_rate > 0)
   4967 		newitr = (8000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
   4968 	else
   4969 		newitr = 0;
   4970 
   4971         for (int i = 0; i < adapter->num_queues; i++, que++) {
   4972 		/* First the RX queue entry */
   4973                 ixgbe_set_ivar(adapter, i, que->msix, 0);
   4974 		/* ... and the TX */
   4975 		ixgbe_set_ivar(adapter, i, que->msix, 1);
   4976 		/* Set an Initial EITR value */
   4977                 IXGBE_WRITE_REG(&adapter->hw,
   4978                     IXGBE_EITR(que->msix), newitr);
   4979 	}
   4980 
   4981 	/* For the Link interrupt */
   4982         ixgbe_set_ivar(adapter, 1, adapter->linkvec, -1);
   4983 }
   4984 
   4985 /*
   4986 ** ixgbe_sfp_probe - called in the local timer to
   4987 ** determine if a port had optics inserted.
   4988 */
   4989 static bool ixgbe_sfp_probe(struct adapter *adapter)
   4990 {
   4991 	struct ixgbe_hw	*hw = &adapter->hw;
   4992 	device_t	dev = adapter->dev;
   4993 	bool		result = FALSE;
   4994 
   4995 	if ((hw->phy.type == ixgbe_phy_nl) &&
   4996 	    (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
   4997 		s32 ret = hw->phy.ops.identify_sfp(hw);
   4998 		if (ret)
   4999                         goto out;
   5000 		ret = hw->phy.ops.reset(hw);
   5001 		if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
   5002 			device_printf(dev,"Unsupported SFP+ module detected!");
   5003 			device_printf(dev, "Reload driver with supported module.\n");
   5004 			adapter->sfp_probe = FALSE;
   5005                         goto out;
   5006 		} else
   5007 			device_printf(dev,"SFP+ module detected!\n");
   5008 		/* We now have supported optics */
   5009 		adapter->sfp_probe = FALSE;
   5010 		/* Set the optics type so system reports correctly */
   5011 		ixgbe_setup_optics(adapter);
   5012 		result = TRUE;
   5013 	}
   5014 out:
   5015 	return (result);
   5016 }
   5017 
   5018 /*
   5019 ** Tasklet handler for MSIX Link interrupts
   5020 **  - do outside interrupt since it might sleep
   5021 */
   5022 static void
   5023 ixgbe_handle_link(void *context)
   5024 {
   5025 	struct adapter  *adapter = context;
   5026 
   5027 	if (ixgbe_check_link(&adapter->hw,
   5028 	    &adapter->link_speed, &adapter->link_up, 0) == 0)
   5029 	    ixgbe_update_link_status(adapter);
   5030 }
   5031 
   5032 /*
   5033 ** Tasklet for handling SFP module interrupts
   5034 */
   5035 static void
   5036 ixgbe_handle_mod(void *context)
   5037 {
   5038 	struct adapter  *adapter = context;
   5039 	struct ixgbe_hw *hw = &adapter->hw;
   5040 	device_t	dev = adapter->dev;
   5041 	u32 err;
   5042 
   5043 	err = hw->phy.ops.identify_sfp(hw);
   5044 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
   5045 		device_printf(dev,
   5046 		    "Unsupported SFP+ module type was detected.\n");
   5047 		return;
   5048 	}
   5049 	err = hw->mac.ops.setup_sfp(hw);
   5050 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
   5051 		device_printf(dev,
   5052 		    "Setup failure - unsupported SFP+ module type.\n");
   5053 		return;
   5054 	}
   5055 	softint_schedule(adapter->msf_si);
   5056 	return;
   5057 }
   5058 
   5059 
   5060 /*
   5061 ** Tasklet for handling MSF (multispeed fiber) interrupts
   5062 */
   5063 static void
   5064 ixgbe_handle_msf(void *context)
   5065 {
   5066 	struct adapter  *adapter = context;
   5067 	struct ixgbe_hw *hw = &adapter->hw;
   5068 	u32 autoneg;
   5069 	bool negotiate;
   5070 
   5071 	autoneg = hw->phy.autoneg_advertised;
   5072 	if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
   5073 		hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
   5074 	else
   5075 		negotiate = 0;
   5076 	if (hw->mac.ops.setup_link)
   5077 		hw->mac.ops.setup_link(hw, autoneg, negotiate, TRUE);
   5078 	return;
   5079 }
   5080 
   5081 #ifdef IXGBE_FDIR
   5082 /*
   5083 ** Tasklet for reinitializing the Flow Director filter table
   5084 */
   5085 static void
   5086 ixgbe_reinit_fdir(void *context)
   5087 {
   5088 	struct adapter  *adapter = context;
   5089 	struct ifnet   *ifp = adapter->ifp;
   5090 
   5091 	if (adapter->fdir_reinit != 1) /* Shouldn't happen */
   5092 		return;
   5093 	ixgbe_reinit_fdir_tables_82599(&adapter->hw);
   5094 	adapter->fdir_reinit = 0;
   5095 	/* Restart the interface */
   5096 	ifp->if_flags |= IFF_RUNNING;
   5097 	return;
   5098 }
   5099 #endif
   5100 
   5101 /**********************************************************************
   5102  *
   5103  *  Update the board statistics counters.
   5104  *
   5105  **********************************************************************/
   5106 static void
   5107 ixgbe_update_stats_counters(struct adapter *adapter)
   5108 {
   5109 	struct ifnet   *ifp = adapter->ifp;
   5110 	struct ixgbe_hw *hw = &adapter->hw;
   5111 	u32  missed_rx = 0, bprc, lxon, lxoff, total;
   5112 	u64  total_missed_rx = 0;
   5113 
   5114 	adapter->stats.crcerrs.ev_count += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
   5115 	adapter->stats.illerrc.ev_count += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
   5116 	adapter->stats.errbc.ev_count += IXGBE_READ_REG(hw, IXGBE_ERRBC);
   5117 	adapter->stats.mspdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MSPDC);
   5118 
   5119 	for (int i = 0; i < __arraycount(adapter->stats.mpc); i++) {
   5120 		int j = i % adapter->num_queues;
   5121 		u32 mp;
   5122 		mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
   5123 		/* missed_rx tallies misses for the gprc workaround */
   5124 		missed_rx += mp;
   5125 		/* global total per queue */
   5126         	adapter->stats.mpc[j].ev_count += mp;
   5127 		/* Running comprehensive total for stats display */
   5128 		total_missed_rx += adapter->stats.mpc[j].ev_count;
   5129 		if (hw->mac.type == ixgbe_mac_82598EB)
   5130 			adapter->stats.rnbc[j] +=
   5131 			    IXGBE_READ_REG(hw, IXGBE_RNBC(i));
   5132 		adapter->stats.pxontxc[j].ev_count +=
   5133 		    IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
   5134 		adapter->stats.pxonrxc[j].ev_count +=
   5135 		    IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
   5136 		adapter->stats.pxofftxc[j].ev_count +=
   5137 		    IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
   5138 		adapter->stats.pxoffrxc[j].ev_count +=
   5139 		    IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
   5140 		adapter->stats.pxon2offc[j].ev_count +=
   5141 		    IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
   5142 	}
   5143 	for (int i = 0; i < __arraycount(adapter->stats.qprc); i++) {
   5144 		int j = i % adapter->num_queues;
   5145 		adapter->stats.qprc[j].ev_count += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
   5146 		adapter->stats.qptc[j].ev_count += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
   5147 		adapter->stats.qbrc[j].ev_count += IXGBE_READ_REG(hw, IXGBE_QBRC(i));
   5148 		adapter->stats.qbrc[j].ev_count +=
   5149 		    ((u64)IXGBE_READ_REG(hw, IXGBE_QBRC(i)) << 32);
   5150 		adapter->stats.qbtc[j].ev_count += IXGBE_READ_REG(hw, IXGBE_QBTC(i));
   5151 		adapter->stats.qbtc[j].ev_count +=
   5152 		    ((u64)IXGBE_READ_REG(hw, IXGBE_QBTC(i)) << 32);
   5153 		adapter->stats.qprdc[j].ev_count += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
   5154 	}
   5155 	adapter->stats.mlfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MLFC);
   5156 	adapter->stats.mrfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MRFC);
   5157 	adapter->stats.rlec.ev_count += IXGBE_READ_REG(hw, IXGBE_RLEC);
   5158 
   5159 	/* Hardware workaround, gprc counts missed packets */
   5160 	adapter->stats.gprc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPRC) - missed_rx;
   5161 
   5162 	lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
   5163 	adapter->stats.lxontxc.ev_count += lxon;
   5164 	lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
   5165 	adapter->stats.lxofftxc.ev_count += lxoff;
   5166 	total = lxon + lxoff;
   5167 
   5168 	if (hw->mac.type != ixgbe_mac_82598EB) {
   5169 		adapter->stats.gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCL) +
   5170 		    ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
   5171 		adapter->stats.gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
   5172 		    ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32) - total * ETHER_MIN_LEN;
   5173 		adapter->stats.tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORL) +
   5174 		    ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
   5175 		adapter->stats.lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
   5176 		adapter->stats.lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
   5177 	} else {
   5178 		adapter->stats.lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
   5179 		adapter->stats.lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
   5180 		/* 82598 only has a counter in the high register */
   5181 		adapter->stats.gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCH);
   5182 		adapter->stats.gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCH) - total * ETHER_MIN_LEN;
   5183 		adapter->stats.tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORH);
   5184 	}
   5185 
   5186 	/*
   5187 	 * Workaround: mprc hardware is incorrectly counting
   5188 	 * broadcasts, so for now we subtract those.
   5189 	 */
   5190 	bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
   5191 	adapter->stats.bprc.ev_count += bprc;
   5192 	adapter->stats.mprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPRC) - ((hw->mac.type == ixgbe_mac_82598EB) ? bprc : 0);
   5193 
   5194 	adapter->stats.prc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC64);
   5195 	adapter->stats.prc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC127);
   5196 	adapter->stats.prc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC255);
   5197 	adapter->stats.prc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC511);
   5198 	adapter->stats.prc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1023);
   5199 	adapter->stats.prc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1522);
   5200 
   5201 	adapter->stats.gptc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPTC) - total;
   5202 	adapter->stats.mptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPTC) - total;
   5203 	adapter->stats.ptc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC64) - total;
   5204 
   5205 	adapter->stats.ruc.ev_count += IXGBE_READ_REG(hw, IXGBE_RUC);
   5206 	adapter->stats.rfc.ev_count += IXGBE_READ_REG(hw, IXGBE_RFC);
   5207 	adapter->stats.roc.ev_count += IXGBE_READ_REG(hw, IXGBE_ROC);
   5208 	adapter->stats.rjc.ev_count += IXGBE_READ_REG(hw, IXGBE_RJC);
   5209 	adapter->stats.mngprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
   5210 	adapter->stats.mngpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
   5211 	adapter->stats.mngptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
   5212 	adapter->stats.tpr.ev_count += IXGBE_READ_REG(hw, IXGBE_TPR);
   5213 	adapter->stats.tpt.ev_count += IXGBE_READ_REG(hw, IXGBE_TPT);
   5214 	adapter->stats.ptc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC127);
   5215 	adapter->stats.ptc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC255);
   5216 	adapter->stats.ptc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC511);
   5217 	adapter->stats.ptc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1023);
   5218 	adapter->stats.ptc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1522);
   5219 	adapter->stats.bptc.ev_count += IXGBE_READ_REG(hw, IXGBE_BPTC);
   5220 	adapter->stats.xec.ev_count += IXGBE_READ_REG(hw, IXGBE_XEC);
   5221 	adapter->stats.fccrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCCRC);
   5222 	adapter->stats.fclast.ev_count += IXGBE_READ_REG(hw, IXGBE_FCLAST);
   5223 
   5224 	/* Only read FCOE on 82599 */
   5225 	if (hw->mac.type != ixgbe_mac_82598EB) {
   5226 		adapter->stats.fcoerpdc.ev_count +=
   5227 		    IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
   5228 		adapter->stats.fcoeprc.ev_count +=
   5229 		    IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
   5230 		adapter->stats.fcoeptc.ev_count +=
   5231 		    IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
   5232 		adapter->stats.fcoedwrc.ev_count +=
   5233 		    IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
   5234 		adapter->stats.fcoedwtc.ev_count +=
   5235 		    IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
   5236 	}
   5237 
   5238 	/* Fill out the OS statistics structure */
   5239 	ifp->if_ipackets = adapter->stats.gprc.ev_count;
   5240 	ifp->if_opackets = adapter->stats.gptc.ev_count;
   5241 	ifp->if_ibytes = adapter->stats.gorc.ev_count;
   5242 	ifp->if_obytes = adapter->stats.gotc.ev_count;
   5243 	ifp->if_imcasts = adapter->stats.mprc.ev_count;
   5244 	ifp->if_collisions = 0;
   5245 
   5246 	/* Rx Errors */
   5247 	ifp->if_ierrors = total_missed_rx + adapter->stats.crcerrs.ev_count +
   5248 		adapter->stats.rlec.ev_count;
   5249 }
   5250 
   5251 /** ixgbe_sysctl_tdh_handler - Handler function
   5252  *  Retrieves the TDH value from the hardware
   5253  */
   5254 static int
   5255 ixgbe_sysctl_tdh_handler(SYSCTLFN_ARGS)
   5256 {
   5257 	struct sysctlnode node;
   5258 	uint32_t val;
   5259 	struct tx_ring *txr;
   5260 
   5261 	node = *rnode;
   5262 	txr = (struct tx_ring *)node.sysctl_data;
   5263 	if (txr == NULL)
   5264 		return 0;
   5265 	val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me));
   5266 	node.sysctl_data = &val;
   5267 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   5268 }
   5269 
   5270 /** ixgbe_sysctl_tdt_handler - Handler function
   5271  *  Retrieves the TDT value from the hardware
   5272  */
   5273 static int
   5274 ixgbe_sysctl_tdt_handler(SYSCTLFN_ARGS)
   5275 {
   5276 	struct sysctlnode node;
   5277 	uint32_t val;
   5278 	struct tx_ring *txr;
   5279 
   5280 	node = *rnode;
   5281 	txr = (struct tx_ring *)node.sysctl_data;
   5282 	if (txr == NULL)
   5283 		return 0;
   5284 	val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me));
   5285 	node.sysctl_data = &val;
   5286 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   5287 }
   5288 
   5289 /** ixgbe_sysctl_rdh_handler - Handler function
   5290  *  Retrieves the RDH value from the hardware
   5291  */
   5292 static int
   5293 ixgbe_sysctl_rdh_handler(SYSCTLFN_ARGS)
   5294 {
   5295 	struct sysctlnode node;
   5296 	uint32_t val;
   5297 	struct rx_ring *rxr;
   5298 
   5299 	node = *rnode;
   5300 	rxr = (struct rx_ring *)node.sysctl_data;
   5301 	if (rxr == NULL)
   5302 		return 0;
   5303 	val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me));
   5304 	node.sysctl_data = &val;
   5305 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   5306 }
   5307 
   5308 /** ixgbe_sysctl_rdt_handler - Handler function
   5309  *  Retrieves the RDT value from the hardware
   5310  */
   5311 static int
   5312 ixgbe_sysctl_rdt_handler(SYSCTLFN_ARGS)
   5313 {
   5314 	struct sysctlnode node;
   5315 	uint32_t val;
   5316 	struct rx_ring *rxr;
   5317 
   5318 	node = *rnode;
   5319 	rxr = (struct rx_ring *)node.sysctl_data;
   5320 	if (rxr == NULL)
   5321 		return 0;
   5322 	val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me));
   5323 	node.sysctl_data = &val;
   5324 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   5325 }
   5326 
   5327 static int
   5328 ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_ARGS)
   5329 {
   5330 	struct sysctlnode node;
   5331 	struct ix_queue *que;
   5332 	uint32_t reg, usec, rate;
   5333 
   5334 	node = *rnode;
   5335 	que = (struct ix_queue *)node.sysctl_data;
   5336 	if (que == NULL)
   5337 		return 0;
   5338 	reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix));
   5339 	usec = ((reg & 0x0FF8) >> 3);
   5340 	if (usec > 0)
   5341 		rate = 1000000 / usec;
   5342 	else
   5343 		rate = 0;
   5344 	node.sysctl_data = &rate;
   5345 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   5346 }
   5347 
   5348 const struct sysctlnode *
   5349 ixgbe_sysctl_instance(struct adapter *adapter)
   5350 {
   5351 	const char *dvname;
   5352 	struct sysctllog **log;
   5353 	int rc;
   5354 	const struct sysctlnode *rnode;
   5355 
   5356 	log = &adapter->sysctllog;
   5357 	dvname = device_xname(adapter->dev);
   5358 
   5359 	if ((rc = sysctl_createv(log, 0, NULL, &rnode,
   5360 	    0, CTLTYPE_NODE, dvname,
   5361 	    SYSCTL_DESCR("ixgbe information and settings"),
   5362 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
   5363 		goto err;
   5364 
   5365 	return rnode;
   5366 err:
   5367 	printf("%s: sysctl_createv failed, rc = %d\n", __func__, rc);
   5368 	return NULL;
   5369 }
   5370 
   5371 /*
   5372  * Add sysctl variables, one per statistic, to the system.
   5373  */
   5374 static void
   5375 ixgbe_add_hw_stats(struct adapter *adapter)
   5376 {
   5377 	device_t dev = adapter->dev;
   5378 	const struct sysctlnode *rnode, *cnode;
   5379 	struct sysctllog **log = &adapter->sysctllog;
   5380 	struct tx_ring *txr = adapter->tx_rings;
   5381 	struct rx_ring *rxr = adapter->rx_rings;
   5382 	struct ixgbe_hw	 *hw = &adapter->hw;
   5383 
   5384 	struct ixgbe_hw_stats *stats = &adapter->stats;
   5385 
   5386 	/* Driver Statistics */
   5387 #if 0
   5388 	/* These counters are not updated by the software */
   5389 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
   5390 			CTLFLAG_RD, &adapter->dropped_pkts,
   5391 			"Driver dropped packets");
   5392 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_header_failed",
   5393 			CTLFLAG_RD, &adapter->mbuf_header_failed,
   5394 			"???");
   5395 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_packet_failed",
   5396 			CTLFLAG_RD, &adapter->mbuf_packet_failed,
   5397 			"???");
   5398 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "no_tx_map_avail",
   5399 			CTLFLAG_RD, &adapter->no_tx_map_avail,
   5400 			"???");
   5401 #endif
   5402 	evcnt_attach_dynamic(&adapter->handleq, EVCNT_TYPE_MISC,
   5403 	    NULL, device_xname(dev), "Handled queue in softint");
   5404 	evcnt_attach_dynamic(&adapter->req, EVCNT_TYPE_MISC,
   5405 	    NULL, device_xname(dev), "Requeued in softint");
   5406 	evcnt_attach_dynamic(&adapter->morerx, EVCNT_TYPE_MISC,
   5407 	    NULL, device_xname(dev), "Interrupt handler more rx");
   5408 	evcnt_attach_dynamic(&adapter->moretx, EVCNT_TYPE_MISC,
   5409 	    NULL, device_xname(dev), "Interrupt handler more tx");
   5410 	evcnt_attach_dynamic(&adapter->txloops, EVCNT_TYPE_MISC,
   5411 	    NULL, device_xname(dev), "Interrupt handler tx loops");
   5412 	evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
   5413 	    NULL, device_xname(dev), "Driver tx dma soft fail EFBIG");
   5414 	evcnt_attach_dynamic(&adapter->m_defrag_failed, EVCNT_TYPE_MISC,
   5415 	    NULL, device_xname(dev), "m_defrag() failed");
   5416 	evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
   5417 	    NULL, device_xname(dev), "Driver tx dma hard fail EFBIG");
   5418 	evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC,
   5419 	    NULL, device_xname(dev), "Driver tx dma hard fail EINVAL");
   5420 	evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC,
   5421 	    NULL, device_xname(dev), "Driver tx dma hard fail other");
   5422 	evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
   5423 	    NULL, device_xname(dev), "Driver tx dma soft fail EAGAIN");
   5424 	evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
   5425 	    NULL, device_xname(dev), "Driver tx dma soft fail ENOMEM");
   5426 	evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
   5427 	    NULL, device_xname(dev), "Watchdog timeouts");
   5428 	evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC,
   5429 	    NULL, device_xname(dev), "TSO errors");
   5430 	evcnt_attach_dynamic(&adapter->tso_tx, EVCNT_TYPE_MISC,
   5431 	    NULL, device_xname(dev), "TSO");
   5432 	evcnt_attach_dynamic(&adapter->link_irq, EVCNT_TYPE_MISC,
   5433 	    NULL, device_xname(dev), "Link MSIX IRQ Handled");
   5434 
   5435 	for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
   5436 		snprintf(adapter->queues[i].evnamebuf,
   5437 		    sizeof(adapter->queues[i].evnamebuf), "%s queue%d",
   5438 		    device_xname(dev), i);
   5439 		snprintf(adapter->queues[i].namebuf,
   5440 		    sizeof(adapter->queues[i].namebuf), "queue%d", i);
   5441 
   5442 		if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
   5443 			aprint_error_dev(dev, "could not create sysctl root\n");
   5444 			break;
   5445 		}
   5446 
   5447 		if (sysctl_createv(log, 0, &rnode, &rnode,
   5448 		    0, CTLTYPE_NODE,
   5449 		    adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
   5450 		    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
   5451 			break;
   5452 
   5453 		if (sysctl_createv(log, 0, &rnode, &cnode,
   5454 		    CTLFLAG_READONLY, CTLTYPE_INT,
   5455 		    "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
   5456 		    ixgbe_sysctl_interrupt_rate_handler, 0,
   5457 		    (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
   5458 			break;
   5459 
   5460 		if (sysctl_createv(log, 0, &rnode, &cnode,
   5461 		    CTLFLAG_READONLY, CTLTYPE_INT,
   5462 		    "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
   5463 		    ixgbe_sysctl_tdh_handler, 0, (void *)txr,
   5464 		    0, CTL_CREATE, CTL_EOL) != 0)
   5465 			break;
   5466 
   5467 		if (sysctl_createv(log, 0, &rnode, &cnode,
   5468 		    CTLFLAG_READONLY, CTLTYPE_INT,
   5469 		    "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
   5470 		    ixgbe_sysctl_tdt_handler, 0, (void *)txr,
   5471 		    0, CTL_CREATE, CTL_EOL) != 0)
   5472 			break;
   5473 
   5474 		evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
   5475 		    NULL, adapter->queues[i].evnamebuf,
   5476 		    "Queue No Descriptor Available");
   5477 		evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
   5478 		    NULL, adapter->queues[i].evnamebuf,
   5479 		    "Queue Packets Transmitted");
   5480 
   5481 #ifdef LRO
   5482 		struct lro_ctrl *lro = &rxr->lro;
   5483 #endif /* LRO */
   5484 
   5485 		if (sysctl_createv(log, 0, &rnode, &cnode,
   5486 		    CTLFLAG_READONLY,
   5487 		    CTLTYPE_INT,
   5488 		    "rxd_head", SYSCTL_DESCR("Receive Descriptor Head"),
   5489 		    ixgbe_sysctl_rdh_handler, 0, (void *)rxr, 0,
   5490 		    CTL_CREATE, CTL_EOL) != 0)
   5491 			break;
   5492 
   5493 		if (sysctl_createv(log, 0, &rnode, &cnode,
   5494 		    CTLFLAG_READONLY,
   5495 		    CTLTYPE_INT,
   5496 		    "rxd_tail", SYSCTL_DESCR("Receive Descriptor Tail"),
   5497 		    ixgbe_sysctl_rdt_handler, 0, (void *)rxr, 0,
   5498 		    CTL_CREATE, CTL_EOL) != 0)
   5499 			break;
   5500 
   5501 		if (i < __arraycount(adapter->stats.mpc)) {
   5502 			evcnt_attach_dynamic(&adapter->stats.mpc[i],
   5503 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   5504 			    "Missed Packet Count");
   5505 		}
   5506 		if (i < __arraycount(adapter->stats.pxontxc)) {
   5507 			evcnt_attach_dynamic(&adapter->stats.pxontxc[i],
   5508 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   5509 			    "pxontxc");
   5510 			evcnt_attach_dynamic(&adapter->stats.pxonrxc[i],
   5511 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   5512 			    "pxonrxc");
   5513 			evcnt_attach_dynamic(&adapter->stats.pxofftxc[i],
   5514 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   5515 			    "pxofftxc");
   5516 			evcnt_attach_dynamic(&adapter->stats.pxoffrxc[i],
   5517 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   5518 			    "pxoffrxc");
   5519 			evcnt_attach_dynamic(&adapter->stats.pxon2offc[i],
   5520 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   5521 			    "pxon2offc");
   5522 		}
   5523 		if (i < __arraycount(adapter->stats.qprc)) {
   5524 			evcnt_attach_dynamic(&adapter->stats.qprc[i],
   5525 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   5526 			    "qprc");
   5527 			evcnt_attach_dynamic(&adapter->stats.qptc[i],
   5528 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   5529 			    "qptc");
   5530 			evcnt_attach_dynamic(&adapter->stats.qbrc[i],
   5531 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   5532 			    "qbrc");
   5533 			evcnt_attach_dynamic(&adapter->stats.qbtc[i],
   5534 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   5535 			    "qbtc");
   5536 			evcnt_attach_dynamic(&adapter->stats.qprdc[i],
   5537 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   5538 			    "qprdc");
   5539 		}
   5540 
   5541 		evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
   5542 		    NULL, adapter->queues[i].evnamebuf, "Queue Packets Received");
   5543 		evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
   5544 		    NULL, adapter->queues[i].evnamebuf, "Queue Bytes Received");
   5545 		evcnt_attach_dynamic(&rxr->no_jmbuf, EVCNT_TYPE_MISC,
   5546 		    NULL, adapter->queues[i].evnamebuf, "Rx no jumbo mbuf");
   5547 		evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
   5548 		    NULL, adapter->queues[i].evnamebuf, "Rx discarded");
   5549 		evcnt_attach_dynamic(&rxr->rx_split_packets, EVCNT_TYPE_MISC,
   5550 		    NULL, adapter->queues[i].evnamebuf, "Rx split packets");
   5551 		evcnt_attach_dynamic(&rxr->rx_irq, EVCNT_TYPE_MISC,
   5552 		    NULL, adapter->queues[i].evnamebuf, "Rx interrupts");
   5553 #ifdef LRO
   5554 		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
   5555 				CTLFLAG_RD, &lro->lro_queued, 0,
   5556 				"LRO Queued");
   5557 		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
   5558 				CTLFLAG_RD, &lro->lro_flushed, 0,
   5559 				"LRO Flushed");
   5560 #endif /* LRO */
   5561 	}
   5562 
   5563 	/* MAC stats get the own sub node */
   5564 
   5565 
   5566 	snprintf(stats->namebuf,
   5567 	    sizeof(stats->namebuf), "%s MAC Statistics", device_xname(dev));
   5568 
   5569 	evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
   5570 	    stats->namebuf, "rx csum offload - IP");
   5571 	evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
   5572 	    stats->namebuf, "rx csum offload - L4");
   5573 	evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
   5574 	    stats->namebuf, "rx csum offload - IP bad");
   5575 	evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
   5576 	    stats->namebuf, "rx csum offload - L4 bad");
   5577 	evcnt_attach_dynamic(&stats->intzero, EVCNT_TYPE_MISC, NULL,
   5578 	    stats->namebuf, "Interrupt conditions zero");
   5579 	evcnt_attach_dynamic(&stats->legint, EVCNT_TYPE_MISC, NULL,
   5580 	    stats->namebuf, "Legacy interrupts");
   5581 	evcnt_attach_dynamic(&stats->crcerrs, EVCNT_TYPE_MISC, NULL,
   5582 	    stats->namebuf, "CRC Errors");
   5583 	evcnt_attach_dynamic(&stats->illerrc, EVCNT_TYPE_MISC, NULL,
   5584 	    stats->namebuf, "Illegal Byte Errors");
   5585 	evcnt_attach_dynamic(&stats->errbc, EVCNT_TYPE_MISC, NULL,
   5586 	    stats->namebuf, "Byte Errors");
   5587 	evcnt_attach_dynamic(&stats->mspdc, EVCNT_TYPE_MISC, NULL,
   5588 	    stats->namebuf, "MAC Short Packets Discarded");
   5589 	evcnt_attach_dynamic(&stats->mlfc, EVCNT_TYPE_MISC, NULL,
   5590 	    stats->namebuf, "MAC Local Faults");
   5591 	evcnt_attach_dynamic(&stats->mrfc, EVCNT_TYPE_MISC, NULL,
   5592 	    stats->namebuf, "MAC Remote Faults");
   5593 	evcnt_attach_dynamic(&stats->rlec, EVCNT_TYPE_MISC, NULL,
   5594 	    stats->namebuf, "Receive Length Errors");
   5595 	evcnt_attach_dynamic(&stats->lxontxc, EVCNT_TYPE_MISC, NULL,
   5596 	    stats->namebuf, "Link XON Transmitted");
   5597 	evcnt_attach_dynamic(&stats->lxonrxc, EVCNT_TYPE_MISC, NULL,
   5598 	    stats->namebuf, "Link XON Received");
   5599 	evcnt_attach_dynamic(&stats->lxofftxc, EVCNT_TYPE_MISC, NULL,
   5600 	    stats->namebuf, "Link XOFF Transmitted");
   5601 	evcnt_attach_dynamic(&stats->lxoffrxc, EVCNT_TYPE_MISC, NULL,
   5602 	    stats->namebuf, "Link XOFF Received");
   5603 
   5604 	/* Packet Reception Stats */
   5605 	evcnt_attach_dynamic(&stats->tor, EVCNT_TYPE_MISC, NULL,
   5606 	    stats->namebuf, "Total Octets Received");
   5607 	evcnt_attach_dynamic(&stats->gorc, EVCNT_TYPE_MISC, NULL,
   5608 	    stats->namebuf, "Good Octets Received");
   5609 	evcnt_attach_dynamic(&stats->tpr, EVCNT_TYPE_MISC, NULL,
   5610 	    stats->namebuf, "Total Packets Received");
   5611 	evcnt_attach_dynamic(&stats->gprc, EVCNT_TYPE_MISC, NULL,
   5612 	    stats->namebuf, "Good Packets Received");
   5613 	evcnt_attach_dynamic(&stats->mprc, EVCNT_TYPE_MISC, NULL,
   5614 	    stats->namebuf, "Multicast Packets Received");
   5615 	evcnt_attach_dynamic(&stats->bprc, EVCNT_TYPE_MISC, NULL,
   5616 	    stats->namebuf, "Broadcast Packets Received");
   5617 	evcnt_attach_dynamic(&stats->prc64, EVCNT_TYPE_MISC, NULL,
   5618 	    stats->namebuf, "64 byte frames received ");
   5619 	evcnt_attach_dynamic(&stats->prc127, EVCNT_TYPE_MISC, NULL,
   5620 	    stats->namebuf, "65-127 byte frames received");
   5621 	evcnt_attach_dynamic(&stats->prc255, EVCNT_TYPE_MISC, NULL,
   5622 	    stats->namebuf, "128-255 byte frames received");
   5623 	evcnt_attach_dynamic(&stats->prc511, EVCNT_TYPE_MISC, NULL,
   5624 	    stats->namebuf, "256-511 byte frames received");
   5625 	evcnt_attach_dynamic(&stats->prc1023, EVCNT_TYPE_MISC, NULL,
   5626 	    stats->namebuf, "512-1023 byte frames received");
   5627 	evcnt_attach_dynamic(&stats->prc1522, EVCNT_TYPE_MISC, NULL,
   5628 	    stats->namebuf, "1023-1522 byte frames received");
   5629 	evcnt_attach_dynamic(&stats->ruc, EVCNT_TYPE_MISC, NULL,
   5630 	    stats->namebuf, "Receive Undersized");
   5631 	evcnt_attach_dynamic(&stats->rfc, EVCNT_TYPE_MISC, NULL,
   5632 	    stats->namebuf, "Fragmented Packets Received ");
   5633 	evcnt_attach_dynamic(&stats->roc, EVCNT_TYPE_MISC, NULL,
   5634 	    stats->namebuf, "Oversized Packets Received");
   5635 	evcnt_attach_dynamic(&stats->rjc, EVCNT_TYPE_MISC, NULL,
   5636 	    stats->namebuf, "Received Jabber");
   5637 	evcnt_attach_dynamic(&stats->mngprc, EVCNT_TYPE_MISC, NULL,
   5638 	    stats->namebuf, "Management Packets Received");
   5639 	evcnt_attach_dynamic(&stats->xec, EVCNT_TYPE_MISC, NULL,
   5640 	    stats->namebuf, "Checksum Errors");
   5641 
   5642 	/* Packet Transmission Stats */
   5643 	evcnt_attach_dynamic(&stats->gotc, EVCNT_TYPE_MISC, NULL,
   5644 	    stats->namebuf, "Good Octets Transmitted");
   5645 	evcnt_attach_dynamic(&stats->tpt, EVCNT_TYPE_MISC, NULL,
   5646 	    stats->namebuf, "Total Packets Transmitted");
   5647 	evcnt_attach_dynamic(&stats->gptc, EVCNT_TYPE_MISC, NULL,
   5648 	    stats->namebuf, "Good Packets Transmitted");
   5649 	evcnt_attach_dynamic(&stats->bptc, EVCNT_TYPE_MISC, NULL,
   5650 	    stats->namebuf, "Broadcast Packets Transmitted");
   5651 	evcnt_attach_dynamic(&stats->mptc, EVCNT_TYPE_MISC, NULL,
   5652 	    stats->namebuf, "Multicast Packets Transmitted");
   5653 	evcnt_attach_dynamic(&stats->mngptc, EVCNT_TYPE_MISC, NULL,
   5654 	    stats->namebuf, "Management Packets Transmitted");
   5655 	evcnt_attach_dynamic(&stats->ptc64, EVCNT_TYPE_MISC, NULL,
   5656 	    stats->namebuf, "64 byte frames transmitted ");
   5657 	evcnt_attach_dynamic(&stats->ptc127, EVCNT_TYPE_MISC, NULL,
   5658 	    stats->namebuf, "65-127 byte frames transmitted");
   5659 	evcnt_attach_dynamic(&stats->ptc255, EVCNT_TYPE_MISC, NULL,
   5660 	    stats->namebuf, "128-255 byte frames transmitted");
   5661 	evcnt_attach_dynamic(&stats->ptc511, EVCNT_TYPE_MISC, NULL,
   5662 	    stats->namebuf, "256-511 byte frames transmitted");
   5663 	evcnt_attach_dynamic(&stats->ptc1023, EVCNT_TYPE_MISC, NULL,
   5664 	    stats->namebuf, "512-1023 byte frames transmitted");
   5665 	evcnt_attach_dynamic(&stats->ptc1522, EVCNT_TYPE_MISC, NULL,
   5666 	    stats->namebuf, "1024-1522 byte frames transmitted");
   5667 
   5668 	/* FC Stats */
   5669 	evcnt_attach_dynamic(&stats->fccrc, EVCNT_TYPE_MISC, NULL,
   5670 	    stats->namebuf, "FC CRC Errors");
   5671 	evcnt_attach_dynamic(&stats->fclast, EVCNT_TYPE_MISC, NULL,
   5672 	    stats->namebuf, "FC Last Error");
   5673 	if (hw->mac.type != ixgbe_mac_82598EB) {
   5674 		evcnt_attach_dynamic(&stats->fcoerpdc, EVCNT_TYPE_MISC, NULL,
   5675 		    stats->namebuf, "FCoE Packets Dropped");
   5676 		evcnt_attach_dynamic(&stats->fcoeprc, EVCNT_TYPE_MISC, NULL,
   5677 		    stats->namebuf, "FCoE Packets Received");
   5678 		evcnt_attach_dynamic(&stats->fcoeptc, EVCNT_TYPE_MISC, NULL,
   5679 		    stats->namebuf, "FCoE Packets Transmitted");
   5680 		evcnt_attach_dynamic(&stats->fcoedwrc, EVCNT_TYPE_MISC, NULL,
   5681 		    stats->namebuf, "FCoE DWords Received");
   5682 		evcnt_attach_dynamic(&stats->fcoedwtc, EVCNT_TYPE_MISC, NULL,
   5683 		    stats->namebuf, "FCoE DWords Transmitted");
   5684 	}
   5685 }
   5686 
   5687 /*
   5688 ** Set flow control using sysctl:
   5689 ** Flow control values:
   5690 ** 	0 - off
   5691 **	1 - rx pause
   5692 **	2 - tx pause
   5693 **	3 - full
   5694 */
   5695 static int
   5696 ixgbe_set_flowcntl(SYSCTLFN_ARGS)
   5697 {
   5698 	struct sysctlnode node;
   5699 	int error;
   5700 	int last = ixgbe_flow_control;
   5701 	struct adapter *adapter;
   5702 
   5703 	node = *rnode;
   5704 	adapter = (struct adapter *)node.sysctl_data;
   5705 	node.sysctl_data = &ixgbe_flow_control;
   5706 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5707 	if (error != 0 || newp == NULL)
   5708 		return error;
   5709 
   5710 	/* Don't bother if it's not changed */
   5711 	if (ixgbe_flow_control == last)
   5712 		return (0);
   5713 
   5714 	switch (ixgbe_flow_control) {
   5715 		case ixgbe_fc_rx_pause:
   5716 		case ixgbe_fc_tx_pause:
   5717 		case ixgbe_fc_full:
   5718 			adapter->hw.fc.requested_mode = ixgbe_flow_control;
   5719 			break;
   5720 		case ixgbe_fc_none:
   5721 		default:
   5722 			adapter->hw.fc.requested_mode = ixgbe_fc_none;
   5723 	}
   5724 
   5725 	ixgbe_fc_enable(&adapter->hw, 0);
   5726 	return 0;
   5727 }
   5728 
   5729 static void
   5730 ixgbe_add_rx_process_limit(struct adapter *adapter, const char *name,
   5731         const char *description, int *limit, int value)
   5732 {
   5733 	const struct sysctlnode *rnode, *cnode;
   5734 	struct sysctllog **log = &adapter->sysctllog;
   5735 
   5736         *limit = value;
   5737 
   5738 	if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL)
   5739 		aprint_error_dev(adapter->dev,
   5740 		    "could not create sysctl root\n");
   5741 	else if (sysctl_createv(log, 0, &rnode, &cnode,
   5742 	    CTLFLAG_READWRITE,
   5743 	    CTLTYPE_INT,
   5744 	    name, SYSCTL_DESCR(description),
   5745 	    NULL, 0, limit, 0,
   5746 	    CTL_CREATE, CTL_EOL) != 0) {
   5747 		aprint_error_dev(adapter->dev, "%s: could not create sysctl",
   5748 		    __func__);
   5749 	}
   5750 }
   5751 
   5752 /*
   5753 ** Control link advertise speed:
   5754 ** 	0 - normal
   5755 **	1 - advertise only 1G
   5756 */
   5757 static int
   5758 ixgbe_set_advertise(SYSCTLFN_ARGS)
   5759 {
   5760 	struct sysctlnode	node;
   5761 	int			t, error;
   5762 	struct adapter		*adapter;
   5763 	struct ixgbe_hw		*hw;
   5764 	ixgbe_link_speed	speed, last;
   5765 
   5766 	node = *rnode;
   5767 	adapter = (struct adapter *)node.sysctl_data;
   5768 	t = adapter->advertise;
   5769 	node.sysctl_data = &t;
   5770 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5771 	if (error != 0 || newp == NULL)
   5772 		return error;
   5773 
   5774 	if (t == -1)
   5775 		return 0;
   5776 
   5777 	adapter->advertise = t;
   5778 
   5779 	hw = &adapter->hw;
   5780 	last = hw->phy.autoneg_advertised;
   5781 
   5782 	if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
   5783             (hw->phy.multispeed_fiber)))
   5784 		return 0;
   5785 
   5786 	if (adapter->advertise == 1)
   5787                 speed = IXGBE_LINK_SPEED_1GB_FULL;
   5788 	else
   5789                 speed = IXGBE_LINK_SPEED_1GB_FULL |
   5790 			IXGBE_LINK_SPEED_10GB_FULL;
   5791 
   5792 	if (speed == last) /* no change */
   5793 		return 0;
   5794 
   5795 	hw->mac.autotry_restart = TRUE;
   5796 	hw->mac.ops.setup_link(hw, speed, TRUE, TRUE);
   5797 
   5798 	return 0;
   5799 }
   5800