Home | History | Annotate | Line # | Download | only in ixgbe
ixgbe.c revision 1.16
      1 /******************************************************************************
      2 
      3   Copyright (c) 2001-2011, Intel Corporation
      4   All rights reserved.
      5 
      6   Redistribution and use in source and binary forms, with or without
      7   modification, are permitted provided that the following conditions are met:
      8 
      9    1. Redistributions of source code must retain the above copyright notice,
     10       this list of conditions and the following disclaimer.
     11 
     12    2. Redistributions in binary form must reproduce the above copyright
     13       notice, this list of conditions and the following disclaimer in the
     14       documentation and/or other materials provided with the distribution.
     15 
     16    3. Neither the name of the Intel Corporation nor the names of its
     17       contributors may be used to endorse or promote products derived from
     18       this software without specific prior written permission.
     19 
     20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     30   POSSIBILITY OF SUCH DAMAGE.
     31 
     32 ******************************************************************************/
     33 /*
     34  * Copyright (c) 2011 The NetBSD Foundation, Inc.
     35  * All rights reserved.
     36  *
     37  * This code is derived from software contributed to The NetBSD Foundation
     38  * by Coyote Point Systems, Inc.
     39  *
     40  * Redistribution and use in source and binary forms, with or without
     41  * modification, are permitted provided that the following conditions
     42  * are met:
     43  * 1. Redistributions of source code must retain the above copyright
     44  *    notice, this list of conditions and the following disclaimer.
     45  * 2. Redistributions in binary form must reproduce the above copyright
     46  *    notice, this list of conditions and the following disclaimer in the
     47  *    documentation and/or other materials provided with the distribution.
     48  *
     49  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     50  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     51  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     52  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     53  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     54  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     55  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     56  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     57  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     58  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     59  * POSSIBILITY OF SUCH DAMAGE.
     60  */
     61 /*$FreeBSD: src/sys/dev/ixgbe/ixgbe.c,v 1.51 2011/04/25 23:34:21 jfv Exp $*/
     62 /*$NetBSD: ixgbe.c,v 1.16 2015/02/04 03:17:29 msaitoh Exp $*/
     63 
     64 #include "opt_inet.h"
     65 
     66 #include "ixgbe.h"
     67 
     68 /*********************************************************************
     69  *  Set this to one to display debug statistics
     70  *********************************************************************/
     71 int             ixgbe_display_debug_stats = 0;
     72 
     73 /*********************************************************************
     74  *  Driver version
     75  *********************************************************************/
     76 char ixgbe_driver_version[] = "2.3.10";
     77 
     78 /*********************************************************************
     79  *  PCI Device ID Table
     80  *
     81  *  Used by probe to select devices to load on
     82  *  Last field stores an index into ixgbe_strings
     83  *  Last entry must be all 0s
     84  *
     85  *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
     86  *********************************************************************/
     87 
     88 static ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
     89 {
     90 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
     91 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
     92 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
     93 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
     94 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
     95 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
     96 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
     97 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
     98 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
     99 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
    100 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
    101 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
    102 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
    103 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
    104 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
    105 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
    106 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
    107 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
    108 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
    109 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
    110 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_DELL, 0, 0, 0},
    111 	/* required last entry */
    112 	{0, 0, 0, 0, 0}
    113 };
    114 
    115 /*********************************************************************
    116  *  Table of branding strings
    117  *********************************************************************/
    118 
    119 static const char    *ixgbe_strings[] = {
    120 	"Intel(R) PRO/10GbE PCI-Express Network Driver"
    121 };
    122 
    123 /*********************************************************************
    124  *  Function prototypes
    125  *********************************************************************/
    126 static int      ixgbe_probe(device_t, cfdata_t, void *);
    127 static void     ixgbe_attach(device_t, device_t, void *);
    128 static int      ixgbe_detach(device_t, int);
    129 #if 0
    130 static int      ixgbe_shutdown(device_t);
    131 #endif
    132 static void     ixgbe_start(struct ifnet *);
    133 static void     ixgbe_start_locked(struct tx_ring *, struct ifnet *);
    134 #if __FreeBSD_version >= 800000
    135 static int	ixgbe_mq_start(struct ifnet *, struct mbuf *);
    136 static int	ixgbe_mq_start_locked(struct ifnet *,
    137                     struct tx_ring *, struct mbuf *);
    138 static void	ixgbe_qflush(struct ifnet *);
    139 #endif
    140 static int      ixgbe_ioctl(struct ifnet *, u_long, void *);
    141 static void	ixgbe_ifstop(struct ifnet *, int);
    142 static int	ixgbe_init(struct ifnet *);
    143 static void	ixgbe_init_locked(struct adapter *);
    144 static void     ixgbe_stop(void *);
    145 static void     ixgbe_media_status(struct ifnet *, struct ifmediareq *);
    146 static int      ixgbe_media_change(struct ifnet *);
    147 static void     ixgbe_identify_hardware(struct adapter *);
    148 static int      ixgbe_allocate_pci_resources(struct adapter *,
    149 		    const struct pci_attach_args *);
    150 static int      ixgbe_allocate_msix(struct adapter *,
    151 		    const struct pci_attach_args *);
    152 static int      ixgbe_allocate_legacy(struct adapter *,
    153 		    const struct pci_attach_args *);
    154 static int	ixgbe_allocate_queues(struct adapter *);
    155 static int	ixgbe_setup_msix(struct adapter *);
    156 static void	ixgbe_free_pci_resources(struct adapter *);
    157 static void	ixgbe_local_timer(void *);
    158 static int	ixgbe_setup_interface(device_t, struct adapter *);
    159 static void	ixgbe_config_link(struct adapter *);
    160 
    161 static int      ixgbe_allocate_transmit_buffers(struct tx_ring *);
    162 static int	ixgbe_setup_transmit_structures(struct adapter *);
    163 static void	ixgbe_setup_transmit_ring(struct tx_ring *);
    164 static void     ixgbe_initialize_transmit_units(struct adapter *);
    165 static void     ixgbe_free_transmit_structures(struct adapter *);
    166 static void     ixgbe_free_transmit_buffers(struct tx_ring *);
    167 
    168 static int      ixgbe_allocate_receive_buffers(struct rx_ring *);
    169 static int      ixgbe_setup_receive_structures(struct adapter *);
    170 static int	ixgbe_setup_receive_ring(struct rx_ring *);
    171 static void     ixgbe_initialize_receive_units(struct adapter *);
    172 static void     ixgbe_free_receive_structures(struct adapter *);
    173 static void     ixgbe_free_receive_buffers(struct rx_ring *);
    174 static void	ixgbe_setup_hw_rsc(struct rx_ring *);
    175 
    176 static void     ixgbe_enable_intr(struct adapter *);
    177 static void     ixgbe_disable_intr(struct adapter *);
    178 static void     ixgbe_update_stats_counters(struct adapter *);
    179 static bool	ixgbe_txeof(struct tx_ring *);
    180 static bool	ixgbe_rxeof(struct ix_queue *, int);
    181 static void	ixgbe_rx_checksum(u32, struct mbuf *, u32,
    182 		    struct ixgbe_hw_stats *);
    183 static void     ixgbe_set_promisc(struct adapter *);
    184 static void     ixgbe_set_multi(struct adapter *);
    185 static void     ixgbe_update_link_status(struct adapter *);
    186 static void	ixgbe_refresh_mbufs(struct rx_ring *, int);
    187 static int      ixgbe_xmit(struct tx_ring *, struct mbuf *);
    188 static int	ixgbe_set_flowcntl(SYSCTLFN_PROTO);
    189 static int	ixgbe_set_advertise(SYSCTLFN_PROTO);
    190 static int	ixgbe_dma_malloc(struct adapter *, bus_size_t,
    191 		    struct ixgbe_dma_alloc *, int);
    192 static void     ixgbe_dma_free(struct adapter *, struct ixgbe_dma_alloc *);
    193 static void	ixgbe_add_rx_process_limit(struct adapter *, const char *,
    194 		    const char *, int *, int);
    195 static u32	ixgbe_tx_ctx_setup(struct tx_ring *, struct mbuf *);
    196 static bool	ixgbe_tso_setup(struct tx_ring *, struct mbuf *, u32 *);
    197 static void	ixgbe_set_ivar(struct adapter *, u8, u8, s8);
    198 static void	ixgbe_configure_ivars(struct adapter *);
    199 static u8 *	ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
    200 
    201 static void	ixgbe_setup_vlan_hw_support(struct adapter *);
    202 #if 0
    203 static void	ixgbe_register_vlan(void *, struct ifnet *, u16);
    204 static void	ixgbe_unregister_vlan(void *, struct ifnet *, u16);
    205 #endif
    206 
    207 static void     ixgbe_add_hw_stats(struct adapter *adapter);
    208 
    209 static __inline void ixgbe_rx_discard(struct rx_ring *, int);
    210 static __inline void ixgbe_rx_input(struct rx_ring *, struct ifnet *,
    211 		    struct mbuf *, u32);
    212 
    213 /* Support for pluggable optic modules */
    214 static bool	ixgbe_sfp_probe(struct adapter *);
    215 static void	ixgbe_setup_optics(struct adapter *);
    216 
    217 /* Legacy (single vector interrupt handler */
    218 static int	ixgbe_legacy_irq(void *);
    219 
    220 #if defined(NETBSD_MSI_OR_MSIX)
    221 /* The MSI/X Interrupt handlers */
    222 static void	ixgbe_msix_que(void *);
    223 static void	ixgbe_msix_link(void *);
    224 #endif
    225 
    226 /* Software interrupts for deferred work */
    227 static void	ixgbe_handle_que(void *);
    228 static void	ixgbe_handle_link(void *);
    229 static void	ixgbe_handle_msf(void *);
    230 static void	ixgbe_handle_mod(void *);
    231 
    232 const struct sysctlnode *ixgbe_sysctl_instance(struct adapter *);
    233 static ixgbe_vendor_info_t *ixgbe_lookup(const struct pci_attach_args *);
    234 
    235 #ifdef IXGBE_FDIR
    236 static void	ixgbe_atr(struct tx_ring *, struct mbuf *);
    237 static void	ixgbe_reinit_fdir(void *, int);
    238 #endif
    239 
    240 /*********************************************************************
    241  *  FreeBSD Device Interface Entry Points
    242  *********************************************************************/
    243 
    244 CFATTACH_DECL3_NEW(ixg, sizeof(struct adapter),
    245     ixgbe_probe, ixgbe_attach, ixgbe_detach, NULL, NULL, NULL,
    246     DVF_DETACH_SHUTDOWN);
    247 
    248 #if 0
    249 devclass_t ixgbe_devclass;
    250 DRIVER_MODULE(ixgbe, pci, ixgbe_driver, ixgbe_devclass, 0, 0);
    251 
    252 MODULE_DEPEND(ixgbe, pci, 1, 1, 1);
    253 MODULE_DEPEND(ixgbe, ether, 1, 1, 1);
    254 #endif
    255 
    256 /*
    257 ** TUNEABLE PARAMETERS:
    258 */
    259 
    260 /*
    261 ** AIM: Adaptive Interrupt Moderation
    262 ** which means that the interrupt rate
    263 ** is varied over time based on the
    264 ** traffic for that interrupt vector
    265 */
    266 static int ixgbe_enable_aim = TRUE;
    267 #define TUNABLE_INT(__x, __y)
    268 TUNABLE_INT("hw.ixgbe.enable_aim", &ixgbe_enable_aim);
    269 
    270 static int ixgbe_max_interrupt_rate = (8000000 / IXGBE_LOW_LATENCY);
    271 TUNABLE_INT("hw.ixgbe.max_interrupt_rate", &ixgbe_max_interrupt_rate);
    272 
    273 /* How many packets rxeof tries to clean at a time */
    274 static int ixgbe_rx_process_limit = 256;
    275 TUNABLE_INT("hw.ixgbe.rx_process_limit", &ixgbe_rx_process_limit);
    276 
    277 /* Flow control setting, default to full */
    278 static int ixgbe_flow_control = ixgbe_fc_full;
    279 TUNABLE_INT("hw.ixgbe.flow_control", &ixgbe_flow_control);
    280 
    281 /*
    282 ** Smart speed setting, default to on
    283 ** this only works as a compile option
    284 ** right now as its during attach, set
    285 ** this to 'ixgbe_smart_speed_off' to
    286 ** disable.
    287 */
    288 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
    289 
    290 /*
    291  * MSIX should be the default for best performance,
    292  * but this allows it to be forced off for testing.
    293  */
    294 static int ixgbe_enable_msix = 1;
    295 TUNABLE_INT("hw.ixgbe.enable_msix", &ixgbe_enable_msix);
    296 
    297 /*
    298  * Header split: this causes the hardware to DMA
    299  * the header into a separate mbuf from the payload,
    300  * it can be a performance win in some workloads, but
    301  * in others it actually hurts, its off by default.
    302  */
    303 static bool ixgbe_header_split = FALSE;
    304 TUNABLE_INT("hw.ixgbe.hdr_split", &ixgbe_header_split);
    305 
    306 #if defined(NETBSD_MSI_OR_MSIX)
    307 /*
    308  * Number of Queues, can be set to 0,
    309  * it then autoconfigures based on the
    310  * number of cpus with a max of 8. This
    311  * can be overriden manually here.
    312  */
    313 static int ixgbe_num_queues = 0;
    314 TUNABLE_INT("hw.ixgbe.num_queues", &ixgbe_num_queues);
    315 #endif
    316 
    317 /*
    318 ** Number of TX descriptors per ring,
    319 ** setting higher than RX as this seems
    320 ** the better performing choice.
    321 */
    322 static int ixgbe_txd = PERFORM_TXD;
    323 TUNABLE_INT("hw.ixgbe.txd", &ixgbe_txd);
    324 
    325 /* Number of RX descriptors per ring */
    326 static int ixgbe_rxd = PERFORM_RXD;
    327 TUNABLE_INT("hw.ixgbe.rxd", &ixgbe_rxd);
    328 
    329 /* Keep running tab on them for sanity check */
    330 static int ixgbe_total_ports;
    331 
    332 #ifdef IXGBE_FDIR
    333 /*
    334 ** For Flow Director: this is the
    335 ** number of TX packets we sample
    336 ** for the filter pool, this means
    337 ** every 20th packet will be probed.
    338 **
    339 ** This feature can be disabled by
    340 ** setting this to 0.
    341 */
    342 static int atr_sample_rate = 20;
    343 /*
    344 ** Flow Director actually 'steals'
    345 ** part of the packet buffer as its
    346 ** filter pool, this variable controls
    347 ** how much it uses:
    348 **  0 = 64K, 1 = 128K, 2 = 256K
    349 */
    350 static int fdir_pballoc = 1;
    351 #endif
    352 
    353 /*********************************************************************
    354  *  Device identification routine
    355  *
    356  *  ixgbe_probe determines if the driver should be loaded on
    357  *  adapter based on PCI vendor/device id of the adapter.
    358  *
    359  *  return 1 on success, 0 on failure
    360  *********************************************************************/
    361 
    362 static int
    363 ixgbe_probe(device_t dev, cfdata_t cf, void *aux)
    364 {
    365 	const struct pci_attach_args *pa = aux;
    366 
    367 	return (ixgbe_lookup(pa) != NULL) ? 1 : 0;
    368 }
    369 
    370 static ixgbe_vendor_info_t *
    371 ixgbe_lookup(const struct pci_attach_args *pa)
    372 {
    373 	pcireg_t subid;
    374 	ixgbe_vendor_info_t *ent;
    375 
    376 	INIT_DEBUGOUT("ixgbe_probe: begin");
    377 
    378 	if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
    379 		return NULL;
    380 
    381 	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
    382 
    383 	for (ent = ixgbe_vendor_info_array; ent->vendor_id != 0; ent++) {
    384 		if (PCI_VENDOR(pa->pa_id) == ent->vendor_id &&
    385 		    PCI_PRODUCT(pa->pa_id) == ent->device_id &&
    386 
    387 		    (PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id ||
    388 		     ent->subvendor_id == 0) &&
    389 
    390 		    (PCI_SUBSYS_ID(subid) == ent->subdevice_id ||
    391 		     ent->subdevice_id == 0)) {
    392 			++ixgbe_total_ports;
    393 			return ent;
    394 		}
    395 	}
    396 	return NULL;
    397 }
    398 
    399 
    400 static void
    401 ixgbe_sysctl_attach(struct adapter *adapter)
    402 {
    403 	struct sysctllog **log;
    404 	const struct sysctlnode *rnode, *cnode;
    405 	device_t dev;
    406 
    407 	dev = adapter->dev;
    408 	log = &adapter->sysctllog;
    409 
    410 	if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
    411 		aprint_error_dev(dev, "could not create sysctl root\n");
    412 		return;
    413 	}
    414 
    415 	if (sysctl_createv(log, 0, &rnode, &cnode,
    416 	    CTLFLAG_READONLY, CTLTYPE_INT,
    417 	    "num_rx_desc", SYSCTL_DESCR("Number of rx descriptors"),
    418 	    NULL, 0, &adapter->num_rx_desc, 0, CTL_CREATE, CTL_EOL) != 0)
    419 		aprint_error_dev(dev, "could not create sysctl\n");
    420 
    421 	if (sysctl_createv(log, 0, &rnode, &cnode,
    422 	    CTLFLAG_READONLY, CTLTYPE_INT,
    423 	    "num_queues", SYSCTL_DESCR("Number of queues"),
    424 	    NULL, 0, &adapter->num_queues, 0, CTL_CREATE, CTL_EOL) != 0)
    425 		aprint_error_dev(dev, "could not create sysctl\n");
    426 
    427 	if (sysctl_createv(log, 0, &rnode, &cnode,
    428 	    CTLFLAG_READWRITE, CTLTYPE_INT,
    429 	    "flow_control", SYSCTL_DESCR("Flow Control"),
    430 	    ixgbe_set_flowcntl, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
    431 		aprint_error_dev(dev, "could not create sysctl\n");
    432 
    433 	if (sysctl_createv(log, 0, &rnode, &cnode,
    434 	    CTLFLAG_READWRITE, CTLTYPE_INT,
    435 	    "advertise_gig", SYSCTL_DESCR("1G Link"),
    436 	    ixgbe_set_advertise, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
    437 		aprint_error_dev(dev, "could not create sysctl\n");
    438 
    439 	/* XXX This is an *instance* sysctl controlling a *global* variable.
    440 	 * XXX It's that way in the FreeBSD driver that this derives from.
    441 	 */
    442 	if (sysctl_createv(log, 0, &rnode, &cnode,
    443 	    CTLFLAG_READWRITE, CTLTYPE_INT,
    444 	    "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
    445 	    NULL, 0, &ixgbe_enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
    446 		aprint_error_dev(dev, "could not create sysctl\n");
    447 }
    448 
    449 /*********************************************************************
    450  *  Device initialization routine
    451  *
    452  *  The attach entry point is called when the driver is being loaded.
    453  *  This routine identifies the type of hardware, allocates all resources
    454  *  and initializes the hardware.
    455  *
    456  *  return 0 on success, positive on failure
    457  *********************************************************************/
    458 
    459 static void
    460 ixgbe_attach(device_t parent, device_t dev, void *aux)
    461 {
    462 	struct adapter *adapter;
    463 	struct ixgbe_hw *hw;
    464 	int             error = 0;
    465 	u16		csum;
    466 	u32		ctrl_ext;
    467 	ixgbe_vendor_info_t *ent;
    468 	const struct pci_attach_args *pa = aux;
    469 
    470 	INIT_DEBUGOUT("ixgbe_attach: begin");
    471 
    472 	/* Allocate, clear, and link in our adapter structure */
    473 	adapter = device_private(dev);
    474 	adapter->dev = adapter->osdep.dev = dev;
    475 	hw = &adapter->hw;
    476 	adapter->osdep.pc = pa->pa_pc;
    477 	adapter->osdep.tag = pa->pa_tag;
    478 	adapter->osdep.dmat = pa->pa_dmat;
    479 
    480 	ent = ixgbe_lookup(pa);
    481 
    482 	KASSERT(ent != NULL);
    483 
    484 	aprint_normal(": %s, Version - %s\n",
    485 	    ixgbe_strings[ent->index], ixgbe_driver_version);
    486 
    487 	/* Core Lock Init*/
    488 	IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
    489 
    490 	/* SYSCTL APIs */
    491 
    492 	ixgbe_sysctl_attach(adapter);
    493 
    494 	/* Set up the timer callout */
    495 	callout_init(&adapter->timer, 0);
    496 
    497 	/* Determine hardware revision */
    498 	ixgbe_identify_hardware(adapter);
    499 
    500 	/* Do base PCI setup - map BAR0 */
    501 	if (ixgbe_allocate_pci_resources(adapter, pa)) {
    502 		aprint_error_dev(dev, "Allocation of PCI resources failed\n");
    503 		error = ENXIO;
    504 		goto err_out;
    505 	}
    506 
    507 	/* Do descriptor calc and sanity checks */
    508 	if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
    509 	    ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
    510 		aprint_error_dev(dev, "TXD config issue, using default!\n");
    511 		adapter->num_tx_desc = DEFAULT_TXD;
    512 	} else
    513 		adapter->num_tx_desc = ixgbe_txd;
    514 
    515 	/*
    516 	** With many RX rings it is easy to exceed the
    517 	** system mbuf allocation. Tuning nmbclusters
    518 	** can alleviate this.
    519 	*/
    520 	if (nmbclusters > 0 ) {
    521 		int s;
    522 		s = (ixgbe_rxd * adapter->num_queues) * ixgbe_total_ports;
    523 		if (s > nmbclusters) {
    524 			aprint_error_dev(dev, "RX Descriptors exceed "
    525 			    "system mbuf max, using default instead!\n");
    526 			ixgbe_rxd = DEFAULT_RXD;
    527 		}
    528 	}
    529 
    530 	if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
    531 	    ixgbe_rxd < MIN_TXD || ixgbe_rxd > MAX_TXD) {
    532 		aprint_error_dev(dev, "RXD config issue, using default!\n");
    533 		adapter->num_rx_desc = DEFAULT_RXD;
    534 	} else
    535 		adapter->num_rx_desc = ixgbe_rxd;
    536 
    537 	/* Allocate our TX/RX Queues */
    538 	if (ixgbe_allocate_queues(adapter)) {
    539 		error = ENOMEM;
    540 		goto err_out;
    541 	}
    542 
    543 	/* Allocate multicast array memory. */
    544 	adapter->mta = malloc(sizeof(u8) * IXGBE_ETH_LENGTH_OF_ADDRESS *
    545 	    MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
    546 	if (adapter->mta == NULL) {
    547 		aprint_error_dev(dev, "Cannot allocate multicast setup array\n");
    548 		error = ENOMEM;
    549 		goto err_late;
    550 	}
    551 
    552 	/* Initialize the shared code */
    553 	error = ixgbe_init_shared_code(hw);
    554 	if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
    555 		/*
    556 		** No optics in this port, set up
    557 		** so the timer routine will probe
    558 		** for later insertion.
    559 		*/
    560 		adapter->sfp_probe = TRUE;
    561 		error = 0;
    562 	} else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
    563 		aprint_error_dev(dev,"Unsupported SFP+ module detected!\n");
    564 		error = EIO;
    565 		goto err_late;
    566 	} else if (error) {
    567 		aprint_error_dev(dev,"Unable to initialize the shared code\n");
    568 		error = EIO;
    569 		goto err_late;
    570 	}
    571 
    572 	/* Make sure we have a good EEPROM before we read from it */
    573 	if (ixgbe_validate_eeprom_checksum(&adapter->hw, &csum) < 0) {
    574 		aprint_error_dev(dev,"The EEPROM Checksum Is Not Valid\n");
    575 		error = EIO;
    576 		goto err_late;
    577 	}
    578 
    579 	/* Get Hardware Flow Control setting */
    580 	hw->fc.requested_mode = ixgbe_fc_full;
    581 	hw->fc.pause_time = IXGBE_FC_PAUSE;
    582 	hw->fc.low_water = IXGBE_FC_LO;
    583 	hw->fc.high_water = IXGBE_FC_HI;
    584 	hw->fc.send_xon = TRUE;
    585 
    586 	error = ixgbe_init_hw(hw);
    587 	if (error == IXGBE_ERR_EEPROM_VERSION) {
    588 		aprint_error_dev(dev, "This device is a pre-production adapter/"
    589 		    "LOM.  Please be aware there may be issues associated "
    590 		    "with your hardware.\n If you are experiencing problems "
    591 		    "please contact your Intel or hardware representative "
    592 		    "who provided you with this hardware.\n");
    593 	} else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED)
    594 		aprint_error_dev(dev,"Unsupported SFP+ Module\n");
    595 
    596 	if (error) {
    597 		error = EIO;
    598 		aprint_error_dev(dev,"Hardware Initialization Failure\n");
    599 		goto err_late;
    600 	}
    601 
    602 	/* Detect and set physical type */
    603 	ixgbe_setup_optics(adapter);
    604 
    605 	if ((adapter->msix > 1) && (ixgbe_enable_msix))
    606 		error = ixgbe_allocate_msix(adapter, pa);
    607 	else
    608 		error = ixgbe_allocate_legacy(adapter, pa);
    609 	if (error)
    610 		goto err_late;
    611 
    612 	/* Setup OS specific network interface */
    613 	if (ixgbe_setup_interface(dev, adapter) != 0)
    614 		goto err_late;
    615 
    616 	/* Sysctl for limiting the amount of work done in software interrupts */
    617 	ixgbe_add_rx_process_limit(adapter, "rx_processing_limit",
    618 	    "max number of rx packets to process", &adapter->rx_process_limit,
    619 	    ixgbe_rx_process_limit);
    620 
    621 	/* Initialize statistics */
    622 	ixgbe_update_stats_counters(adapter);
    623 
    624         /* Print PCIE bus type/speed/width info */
    625 	ixgbe_get_bus_info(hw);
    626 	aprint_normal_dev(dev,"PCI Express Bus: Speed %s %s\n",
    627 	    ((hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0Gb/s":
    628 	    (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5Gb/s":"Unknown"),
    629 	    (hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
    630 	    (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
    631 	    (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
    632 	    ("Unknown"));
    633 
    634 	if ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
    635 	    (hw->bus.speed == ixgbe_bus_speed_2500)) {
    636 		aprint_error_dev(dev, "PCI-Express bandwidth available"
    637 		    " for this card\n     is not sufficient for"
    638 		    " optimal performance.\n");
    639 		aprint_error_dev(dev, "For optimal performance a x8 "
    640 		    "PCIE, or x4 PCIE 2 slot is required.\n");
    641         }
    642 
    643 	/* let hardware know driver is loaded */
    644 	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
    645 	ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
    646 	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
    647 
    648 	ixgbe_add_hw_stats(adapter);
    649 
    650 	INIT_DEBUGOUT("ixgbe_attach: end");
    651 	return;
    652 err_late:
    653 	ixgbe_free_transmit_structures(adapter);
    654 	ixgbe_free_receive_structures(adapter);
    655 err_out:
    656 	if (adapter->ifp != NULL)
    657 		if_free(adapter->ifp);
    658 	ixgbe_free_pci_resources(adapter);
    659 	if (adapter->mta != NULL)
    660 		free(adapter->mta, M_DEVBUF);
    661 	return;
    662 
    663 }
    664 
    665 /*********************************************************************
    666  *  Device removal routine
    667  *
    668  *  The detach entry point is called when the driver is being removed.
    669  *  This routine stops the adapter and deallocates all the resources
    670  *  that were allocated for driver operation.
    671  *
    672  *  return 0 on success, positive on failure
    673  *********************************************************************/
    674 
    675 static int
    676 ixgbe_detach(device_t dev, int flags)
    677 {
    678 	struct adapter *adapter = device_private(dev);
    679 	struct tx_ring *txr = adapter->tx_rings;
    680 	struct rx_ring *rxr = adapter->rx_rings;
    681 	struct ixgbe_hw_stats *stats = &adapter->stats;
    682 	struct ix_queue *que = adapter->queues;
    683 	u32	ctrl_ext;
    684 
    685 	INIT_DEBUGOUT("ixgbe_detach: begin");
    686 
    687 	/* Make sure VLANs are not using driver */
    688 	if (!VLAN_ATTACHED(&adapter->osdep.ec))
    689 		;	/* nothing to do: no VLANs */
    690 	else if ((flags & (DETACH_SHUTDOWN|DETACH_FORCE)) != 0)
    691 		vlan_ifdetach(adapter->ifp);
    692 	else {
    693 		aprint_error_dev(dev, "VLANs in use\n");
    694 		return EBUSY;
    695 	}
    696 
    697 	IXGBE_CORE_LOCK(adapter);
    698 	ixgbe_stop(adapter);
    699 	IXGBE_CORE_UNLOCK(adapter);
    700 
    701 	for (int i = 0; i < adapter->num_queues; i++, que++) {
    702 		softint_disestablish(que->que_si);
    703 	}
    704 
    705 	/* Drain the Link queue */
    706 	softint_disestablish(adapter->link_si);
    707 	softint_disestablish(adapter->mod_si);
    708 	softint_disestablish(adapter->msf_si);
    709 #ifdef IXGBE_FDIR
    710 	softint_disestablish(adapter->fdir_si);
    711 #endif
    712 
    713 	/* let hardware know driver is unloading */
    714 	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
    715 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
    716 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
    717 
    718 	ether_ifdetach(adapter->ifp);
    719 	callout_halt(&adapter->timer, NULL);
    720 	ixgbe_free_pci_resources(adapter);
    721 #if 0	/* XXX the NetBSD port is probably missing something here */
    722 	bus_generic_detach(dev);
    723 #endif
    724 	if_detach(adapter->ifp);
    725 
    726 	sysctl_teardown(&adapter->sysctllog);
    727 	evcnt_detach(&adapter->handleq);
    728 	evcnt_detach(&adapter->req);
    729 	evcnt_detach(&adapter->morerx);
    730 	evcnt_detach(&adapter->moretx);
    731 	evcnt_detach(&adapter->txloops);
    732 	evcnt_detach(&adapter->efbig_tx_dma_setup);
    733 	evcnt_detach(&adapter->m_defrag_failed);
    734 	evcnt_detach(&adapter->efbig2_tx_dma_setup);
    735 	evcnt_detach(&adapter->einval_tx_dma_setup);
    736 	evcnt_detach(&adapter->other_tx_dma_setup);
    737 	evcnt_detach(&adapter->eagain_tx_dma_setup);
    738 	evcnt_detach(&adapter->enomem_tx_dma_setup);
    739 	evcnt_detach(&adapter->watchdog_events);
    740 	evcnt_detach(&adapter->tso_err);
    741 	evcnt_detach(&adapter->tso_tx);
    742 	evcnt_detach(&adapter->link_irq);
    743 	for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
    744 		evcnt_detach(&txr->no_desc_avail);
    745 		evcnt_detach(&txr->total_packets);
    746 
    747 		if (i < __arraycount(adapter->stats.mpc)) {
    748 			evcnt_detach(&adapter->stats.mpc[i]);
    749 		}
    750 		if (i < __arraycount(adapter->stats.pxontxc)) {
    751 			evcnt_detach(&adapter->stats.pxontxc[i]);
    752 			evcnt_detach(&adapter->stats.pxonrxc[i]);
    753 			evcnt_detach(&adapter->stats.pxofftxc[i]);
    754 			evcnt_detach(&adapter->stats.pxoffrxc[i]);
    755 			evcnt_detach(&adapter->stats.pxon2offc[i]);
    756 		}
    757 		if (i < __arraycount(adapter->stats.qprc)) {
    758 			evcnt_detach(&adapter->stats.qprc[i]);
    759 			evcnt_detach(&adapter->stats.qptc[i]);
    760 			evcnt_detach(&adapter->stats.qbrc[i]);
    761 			evcnt_detach(&adapter->stats.qbtc[i]);
    762 			evcnt_detach(&adapter->stats.qprdc[i]);
    763 		}
    764 
    765 		evcnt_detach(&rxr->rx_packets);
    766 		evcnt_detach(&rxr->rx_bytes);
    767 		evcnt_detach(&rxr->no_jmbuf);
    768 		evcnt_detach(&rxr->rx_discarded);
    769 		evcnt_detach(&rxr->rx_split_packets);
    770 		evcnt_detach(&rxr->rx_irq);
    771 	}
    772 	evcnt_detach(&stats->ipcs);
    773 	evcnt_detach(&stats->l4cs);
    774 	evcnt_detach(&stats->ipcs_bad);
    775 	evcnt_detach(&stats->l4cs_bad);
    776 	evcnt_detach(&stats->intzero);
    777 	evcnt_detach(&stats->legint);
    778 	evcnt_detach(&stats->crcerrs);
    779 	evcnt_detach(&stats->illerrc);
    780 	evcnt_detach(&stats->errbc);
    781 	evcnt_detach(&stats->mspdc);
    782 	evcnt_detach(&stats->mlfc);
    783 	evcnt_detach(&stats->mrfc);
    784 	evcnt_detach(&stats->rlec);
    785 	evcnt_detach(&stats->lxontxc);
    786 	evcnt_detach(&stats->lxonrxc);
    787 	evcnt_detach(&stats->lxofftxc);
    788 	evcnt_detach(&stats->lxoffrxc);
    789 
    790 	/* Packet Reception Stats */
    791 	evcnt_detach(&stats->tor);
    792 	evcnt_detach(&stats->gorc);
    793 	evcnt_detach(&stats->tpr);
    794 	evcnt_detach(&stats->gprc);
    795 	evcnt_detach(&stats->mprc);
    796 	evcnt_detach(&stats->bprc);
    797 	evcnt_detach(&stats->prc64);
    798 	evcnt_detach(&stats->prc127);
    799 	evcnt_detach(&stats->prc255);
    800 	evcnt_detach(&stats->prc511);
    801 	evcnt_detach(&stats->prc1023);
    802 	evcnt_detach(&stats->prc1522);
    803 	evcnt_detach(&stats->ruc);
    804 	evcnt_detach(&stats->rfc);
    805 	evcnt_detach(&stats->roc);
    806 	evcnt_detach(&stats->rjc);
    807 	evcnt_detach(&stats->mngprc);
    808 	evcnt_detach(&stats->xec);
    809 
    810 	/* Packet Transmission Stats */
    811 	evcnt_detach(&stats->gotc);
    812 	evcnt_detach(&stats->tpt);
    813 	evcnt_detach(&stats->gptc);
    814 	evcnt_detach(&stats->bptc);
    815 	evcnt_detach(&stats->mptc);
    816 	evcnt_detach(&stats->mngptc);
    817 	evcnt_detach(&stats->ptc64);
    818 	evcnt_detach(&stats->ptc127);
    819 	evcnt_detach(&stats->ptc255);
    820 	evcnt_detach(&stats->ptc511);
    821 	evcnt_detach(&stats->ptc1023);
    822 	evcnt_detach(&stats->ptc1522);
    823 
    824 	/* FC Stats */
    825 	evcnt_detach(&stats->fccrc);
    826 	evcnt_detach(&stats->fclast);
    827 	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
    828 		evcnt_detach(&stats->fcoerpdc);
    829 		evcnt_detach(&stats->fcoeprc);
    830 		evcnt_detach(&stats->fcoeptc);
    831 		evcnt_detach(&stats->fcoedwrc);
    832 		evcnt_detach(&stats->fcoedwtc);
    833 	}
    834 
    835 	ixgbe_free_transmit_structures(adapter);
    836 	ixgbe_free_receive_structures(adapter);
    837 	free(adapter->mta, M_DEVBUF);
    838 
    839 	IXGBE_CORE_LOCK_DESTROY(adapter);
    840 	return (0);
    841 }
    842 
    843 /*********************************************************************
    844  *
    845  *  Shutdown entry point
    846  *
    847  **********************************************************************/
    848 
    849 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
    850 static int
    851 ixgbe_shutdown(device_t dev)
    852 {
    853 	struct adapter *adapter = device_private(dev);
    854 	IXGBE_CORE_LOCK(adapter);
    855 	ixgbe_stop(adapter);
    856 	IXGBE_CORE_UNLOCK(adapter);
    857 	return (0);
    858 }
    859 #endif
    860 
    861 
    862 /*********************************************************************
    863  *  Transmit entry point
    864  *
    865  *  ixgbe_start is called by the stack to initiate a transmit.
    866  *  The driver will remain in this routine as long as there are
    867  *  packets to transmit and transmit resources are available.
    868  *  In case resources are not available stack is notified and
    869  *  the packet is requeued.
    870  **********************************************************************/
    871 
    872 static void
    873 ixgbe_start_locked(struct tx_ring *txr, struct ifnet * ifp)
    874 {
    875 	int rc;
    876 	struct mbuf    *m_head;
    877 	struct adapter *adapter = txr->adapter;
    878 
    879 	IXGBE_TX_LOCK_ASSERT(txr);
    880 
    881 	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) !=
    882 	    IFF_RUNNING)
    883 		return;
    884 	if (!adapter->link_active)
    885 		return;
    886 
    887 	while (!IFQ_IS_EMPTY(&ifp->if_snd)) {
    888 
    889 		IFQ_POLL(&ifp->if_snd, m_head);
    890 		if (m_head == NULL)
    891 			break;
    892 
    893 		if ((rc = ixgbe_xmit(txr, m_head)) == EAGAIN) {
    894 			ifp->if_flags |= IFF_OACTIVE;
    895 			break;
    896 		}
    897 		IFQ_DEQUEUE(&ifp->if_snd, m_head);
    898 		if (rc == EFBIG) {
    899 			struct mbuf *mtmp;
    900 
    901 			if ((mtmp = m_defrag(m_head, M_DONTWAIT)) != NULL) {
    902 				m_head = mtmp;
    903 				rc = ixgbe_xmit(txr, m_head);
    904 				if (rc != 0)
    905 					adapter->efbig2_tx_dma_setup.ev_count++;
    906 			} else
    907 				adapter->m_defrag_failed.ev_count++;
    908 		}
    909 		if (rc != 0) {
    910 			m_freem(m_head);
    911 			continue;
    912 		}
    913 
    914 		/* Send a copy of the frame to the BPF listener */
    915 		bpf_mtap(ifp, m_head);
    916 
    917 		/* Set watchdog on */
    918 		getmicrotime(&txr->watchdog_time);
    919 		txr->queue_status = IXGBE_QUEUE_WORKING;
    920 
    921 	}
    922 	return;
    923 }
    924 
    925 /*
    926  * Legacy TX start - called by the stack, this
    927  * always uses the first tx ring, and should
    928  * not be used with multiqueue tx enabled.
    929  */
    930 static void
    931 ixgbe_start(struct ifnet *ifp)
    932 {
    933 	struct adapter *adapter = ifp->if_softc;
    934 	struct tx_ring	*txr = adapter->tx_rings;
    935 
    936 	if (ifp->if_flags & IFF_RUNNING) {
    937 		IXGBE_TX_LOCK(txr);
    938 		ixgbe_start_locked(txr, ifp);
    939 		IXGBE_TX_UNLOCK(txr);
    940 	}
    941 	return;
    942 }
    943 
    944 #if __FreeBSD_version >= 800000
    945 /*
    946 ** Multiqueue Transmit driver
    947 **
    948 */
    949 static int
    950 ixgbe_mq_start(struct ifnet *ifp, struct mbuf *m)
    951 {
    952 	struct adapter	*adapter = ifp->if_softc;
    953 	struct ix_queue	*que;
    954 	struct tx_ring	*txr;
    955 	int 		i = 0, err = 0;
    956 
    957 	/* Which queue to use */
    958 	if ((m->m_flags & M_FLOWID) != 0)
    959 		i = m->m_pkthdr.flowid % adapter->num_queues;
    960 
    961 	txr = &adapter->tx_rings[i];
    962 	que = &adapter->queues[i];
    963 
    964 	if (IXGBE_TX_TRYLOCK(txr)) {
    965 		err = ixgbe_mq_start_locked(ifp, txr, m);
    966 		IXGBE_TX_UNLOCK(txr);
    967 	} else {
    968 		err = drbr_enqueue(ifp, txr->br, m);
    969 		softint_schedule(que->que_si);
    970 	}
    971 
    972 	return (err);
    973 }
    974 
    975 static int
    976 ixgbe_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr, struct mbuf *m)
    977 {
    978 	struct adapter  *adapter = txr->adapter;
    979         struct mbuf     *next;
    980         int             enqueued, err = 0;
    981 
    982 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) !=
    983 	    IFF_RUNNING || adapter->link_active == 0) {
    984 		if (m != NULL)
    985 			err = drbr_enqueue(ifp, txr->br, m);
    986 		return (err);
    987 	}
    988 
    989 	enqueued = 0;
    990 	if (m == NULL) {
    991 		next = drbr_dequeue(ifp, txr->br);
    992 	} else if (drbr_needs_enqueue(ifp, txr->br)) {
    993 		if ((err = drbr_enqueue(ifp, txr->br, m)) != 0)
    994 			return (err);
    995 		next = drbr_dequeue(ifp, txr->br);
    996 	} else
    997 		next = m;
    998 
    999 	/* Process the queue */
   1000 	while (next != NULL) {
   1001 		if ((err = ixgbe_xmit(txr, &next)) != 0) {
   1002 			if (next != NULL)
   1003 				err = drbr_enqueue(ifp, txr->br, next);
   1004 			break;
   1005 		}
   1006 		enqueued++;
   1007 		drbr_stats_update(ifp, next->m_pkthdr.len, next->m_flags);
   1008 		/* Send a copy of the frame to the BPF listener */
   1009 		bpf_mtap(ifp, next);
   1010 		if ((ifp->if_flags & IFF_RUNNING) == 0)
   1011 			break;
   1012 		if (txr->tx_avail < IXGBE_TX_OP_THRESHOLD)
   1013 			ixgbe_txeof(txr);
   1014 		if (txr->tx_avail < IXGBE_TX_OP_THRESHOLD) {
   1015 			ifp->if_flags |= IFF_OACTIVE;
   1016 			break;
   1017 		}
   1018 		next = drbr_dequeue(ifp, txr->br);
   1019 	}
   1020 
   1021 	if (enqueued > 0) {
   1022 		/* Set watchdog on */
   1023 		txr->queue_status = IXGBE_QUEUE_WORKING;
   1024 		getmicrotime(&txr->watchdog_time);
   1025 	}
   1026 
   1027 	return (err);
   1028 }
   1029 
   1030 /*
   1031 ** Flush all ring buffers
   1032 */
   1033 static void
   1034 ixgbe_qflush(struct ifnet *ifp)
   1035 {
   1036 	struct adapter	*adapter = ifp->if_softc;
   1037 	struct tx_ring	*txr = adapter->tx_rings;
   1038 	struct mbuf	*m;
   1039 
   1040 	for (int i = 0; i < adapter->num_queues; i++, txr++) {
   1041 		IXGBE_TX_LOCK(txr);
   1042 		while ((m = buf_ring_dequeue_sc(txr->br)) != NULL)
   1043 			m_freem(m);
   1044 		IXGBE_TX_UNLOCK(txr);
   1045 	}
   1046 	if_qflush(ifp);
   1047 }
   1048 #endif /* __FreeBSD_version >= 800000 */
   1049 
   1050 static int
   1051 ixgbe_ifflags_cb(struct ethercom *ec)
   1052 {
   1053 	struct ifnet *ifp = &ec->ec_if;
   1054 	struct adapter *adapter = ifp->if_softc;
   1055 	int change = ifp->if_flags ^ adapter->if_flags, rc = 0;
   1056 
   1057 	IXGBE_CORE_LOCK(adapter);
   1058 
   1059 	if (change != 0)
   1060 		adapter->if_flags = ifp->if_flags;
   1061 
   1062 	if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0)
   1063 		rc = ENETRESET;
   1064 	else if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   1065 		ixgbe_set_promisc(adapter);
   1066 
   1067 	IXGBE_CORE_UNLOCK(adapter);
   1068 
   1069 	return rc;
   1070 }
   1071 
   1072 /*********************************************************************
   1073  *  Ioctl entry point
   1074  *
   1075  *  ixgbe_ioctl is called when the user wants to configure the
   1076  *  interface.
   1077  *
   1078  *  return 0 on success, positive on failure
   1079  **********************************************************************/
   1080 
   1081 static int
   1082 ixgbe_ioctl(struct ifnet * ifp, u_long command, void *data)
   1083 {
   1084 	struct adapter	*adapter = ifp->if_softc;
   1085 	struct ifcapreq *ifcr = data;
   1086 	struct ifreq	*ifr = data;
   1087 	int             error = 0;
   1088 	int l4csum_en;
   1089 	const int l4csum = IFCAP_CSUM_TCPv4_Rx|IFCAP_CSUM_UDPv4_Rx|
   1090 	     IFCAP_CSUM_TCPv6_Rx|IFCAP_CSUM_UDPv6_Rx;
   1091 
   1092 	switch (command) {
   1093 	case SIOCSIFFLAGS:
   1094 		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
   1095 		break;
   1096 	case SIOCADDMULTI:
   1097 	case SIOCDELMULTI:
   1098 		IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
   1099 		break;
   1100 	case SIOCSIFMEDIA:
   1101 	case SIOCGIFMEDIA:
   1102 		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
   1103 		break;
   1104 	case SIOCSIFCAP:
   1105 		IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
   1106 		break;
   1107 	case SIOCSIFMTU:
   1108 		IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
   1109 		break;
   1110 	default:
   1111 		IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
   1112 		break;
   1113 	}
   1114 
   1115 	switch (command) {
   1116 	case SIOCSIFMEDIA:
   1117 	case SIOCGIFMEDIA:
   1118 		return ifmedia_ioctl(ifp, ifr, &adapter->media, command);
   1119 	case SIOCSIFCAP:
   1120 		/* Layer-4 Rx checksum offload has to be turned on and
   1121 		 * off as a unit.
   1122 		 */
   1123 		l4csum_en = ifcr->ifcr_capenable & l4csum;
   1124 		if (l4csum_en != l4csum && l4csum_en != 0)
   1125 			return EINVAL;
   1126 		/*FALLTHROUGH*/
   1127 	case SIOCADDMULTI:
   1128 	case SIOCDELMULTI:
   1129 	case SIOCSIFFLAGS:
   1130 	case SIOCSIFMTU:
   1131 	default:
   1132 		if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
   1133 			return error;
   1134 		if ((ifp->if_flags & IFF_RUNNING) == 0)
   1135 			;
   1136 		else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
   1137 			IXGBE_CORE_LOCK(adapter);
   1138 			ixgbe_init_locked(adapter);
   1139 			IXGBE_CORE_UNLOCK(adapter);
   1140 		} else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
   1141 			/*
   1142 			 * Multicast list has changed; set the hardware filter
   1143 			 * accordingly.
   1144 			 */
   1145 			IXGBE_CORE_LOCK(adapter);
   1146 			ixgbe_disable_intr(adapter);
   1147 			ixgbe_set_multi(adapter);
   1148 			ixgbe_enable_intr(adapter);
   1149 			IXGBE_CORE_UNLOCK(adapter);
   1150 		}
   1151 		return 0;
   1152 	}
   1153 }
   1154 
   1155 /*********************************************************************
   1156  *  Init entry point
   1157  *
   1158  *  This routine is used in two ways. It is used by the stack as
   1159  *  init entry point in network interface structure. It is also used
   1160  *  by the driver as a hw/sw initialization routine to get to a
   1161  *  consistent state.
   1162  *
   1163  *  return 0 on success, positive on failure
   1164  **********************************************************************/
   1165 #define IXGBE_MHADD_MFS_SHIFT 16
   1166 
   1167 static void
   1168 ixgbe_init_locked(struct adapter *adapter)
   1169 {
   1170 	struct ifnet   *ifp = adapter->ifp;
   1171 	device_t 	dev = adapter->dev;
   1172 	struct ixgbe_hw *hw = &adapter->hw;
   1173 	u32		k, txdctl, mhadd, gpie;
   1174 	u32		rxdctl, rxctrl;
   1175 
   1176 	/* XXX check IFF_UP and IFF_RUNNING, power-saving state! */
   1177 
   1178 	KASSERT(mutex_owned(&adapter->core_mtx));
   1179 	INIT_DEBUGOUT("ixgbe_init: begin");
   1180 	hw->adapter_stopped = FALSE;
   1181 	ixgbe_stop_adapter(hw);
   1182         callout_stop(&adapter->timer);
   1183 
   1184 	/* XXX I moved this here from the SIOCSIFMTU case in ixgbe_ioctl(). */
   1185 	adapter->max_frame_size =
   1186 		ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
   1187 
   1188         /* reprogram the RAR[0] in case user changed it. */
   1189         ixgbe_set_rar(hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
   1190 
   1191 	/* Get the latest mac address, User can use a LAA */
   1192 	memcpy(hw->mac.addr, CLLADDR(adapter->ifp->if_sadl),
   1193 	    IXGBE_ETH_LENGTH_OF_ADDRESS);
   1194 	ixgbe_set_rar(hw, 0, hw->mac.addr, 0, 1);
   1195 	hw->addr_ctrl.rar_used_count = 1;
   1196 
   1197 	/* Prepare transmit descriptors and buffers */
   1198 	if (ixgbe_setup_transmit_structures(adapter)) {
   1199 		device_printf(dev,"Could not setup transmit structures\n");
   1200 		ixgbe_stop(adapter);
   1201 		return;
   1202 	}
   1203 
   1204 	ixgbe_init_hw(hw);
   1205 	ixgbe_initialize_transmit_units(adapter);
   1206 
   1207 	/* Setup Multicast table */
   1208 	ixgbe_set_multi(adapter);
   1209 
   1210 	/*
   1211 	** Determine the correct mbuf pool
   1212 	** for doing jumbo/headersplit
   1213 	*/
   1214 	if (adapter->max_frame_size <= 2048)
   1215 		adapter->rx_mbuf_sz = MCLBYTES;
   1216 	else if (adapter->max_frame_size <= 4096)
   1217 		adapter->rx_mbuf_sz = MJUMPAGESIZE;
   1218 	else if (adapter->max_frame_size <= 9216)
   1219 		adapter->rx_mbuf_sz = MJUM9BYTES;
   1220 	else
   1221 		adapter->rx_mbuf_sz = MJUM16BYTES;
   1222 
   1223 	/* Prepare receive descriptors and buffers */
   1224 	if (ixgbe_setup_receive_structures(adapter)) {
   1225 		device_printf(dev,"Could not setup receive structures\n");
   1226 		ixgbe_stop(adapter);
   1227 		return;
   1228 	}
   1229 
   1230 	/* Configure RX settings */
   1231 	ixgbe_initialize_receive_units(adapter);
   1232 
   1233 	gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE);
   1234 
   1235 	/* Enable Fan Failure Interrupt */
   1236 	gpie |= IXGBE_SDP1_GPIEN;
   1237 
   1238 	/* Add for Thermal detection */
   1239 	if (hw->mac.type == ixgbe_mac_82599EB)
   1240 		gpie |= IXGBE_SDP2_GPIEN;
   1241 
   1242 	if (adapter->msix > 1) {
   1243 		/* Enable Enhanced MSIX mode */
   1244 		gpie |= IXGBE_GPIE_MSIX_MODE;
   1245 		gpie |= IXGBE_GPIE_EIAME | IXGBE_GPIE_PBA_SUPPORT |
   1246 		    IXGBE_GPIE_OCD;
   1247 	}
   1248 	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
   1249 
   1250 	/* Set MTU size */
   1251 	if (ifp->if_mtu > ETHERMTU) {
   1252 		mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
   1253 		mhadd &= ~IXGBE_MHADD_MFS_MASK;
   1254 		mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
   1255 		IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
   1256 	}
   1257 
   1258 	/* Now enable all the queues */
   1259 
   1260 	for (int i = 0; i < adapter->num_queues; i++) {
   1261 		txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
   1262 		txdctl |= IXGBE_TXDCTL_ENABLE;
   1263 		/* Set WTHRESH to 8, burst writeback */
   1264 		txdctl |= (8 << 16);
   1265 		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), txdctl);
   1266 	}
   1267 
   1268 	for (int i = 0; i < adapter->num_queues; i++) {
   1269 		rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
   1270 		if (hw->mac.type == ixgbe_mac_82598EB) {
   1271 			/*
   1272 			** PTHRESH = 21
   1273 			** HTHRESH = 4
   1274 			** WTHRESH = 8
   1275 			*/
   1276 			rxdctl &= ~0x3FFFFF;
   1277 			rxdctl |= 0x080420;
   1278 		}
   1279 		rxdctl |= IXGBE_RXDCTL_ENABLE;
   1280 		IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), rxdctl);
   1281 		/* XXX I don't trust this loop, and I don't trust the
   1282 		 * XXX memory barrier.  What is this meant to do? --dyoung
   1283 		 */
   1284 		for (k = 0; k < 10; k++) {
   1285 			if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)) &
   1286 			    IXGBE_RXDCTL_ENABLE)
   1287 				break;
   1288 			else
   1289 				msec_delay(1);
   1290 		}
   1291 		wmb();
   1292 		IXGBE_WRITE_REG(hw, IXGBE_RDT(i), adapter->num_rx_desc - 1);
   1293 	}
   1294 
   1295 	/* Set up VLAN support and filter */
   1296 	ixgbe_setup_vlan_hw_support(adapter);
   1297 
   1298 	/* Enable Receive engine */
   1299 	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
   1300 	if (hw->mac.type == ixgbe_mac_82598EB)
   1301 		rxctrl |= IXGBE_RXCTRL_DMBYPS;
   1302 	rxctrl |= IXGBE_RXCTRL_RXEN;
   1303 	ixgbe_enable_rx_dma(hw, rxctrl);
   1304 
   1305 	callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
   1306 
   1307 	/* Set up MSI/X routing */
   1308 	if (ixgbe_enable_msix)  {
   1309 		ixgbe_configure_ivars(adapter);
   1310 		/* Set up auto-mask */
   1311 		if (hw->mac.type == ixgbe_mac_82598EB)
   1312 			IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
   1313 		else {
   1314 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
   1315 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
   1316 		}
   1317 	} else {  /* Simple settings for Legacy/MSI */
   1318                 ixgbe_set_ivar(adapter, 0, 0, 0);
   1319                 ixgbe_set_ivar(adapter, 0, 0, 1);
   1320 		IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
   1321 	}
   1322 
   1323 #ifdef IXGBE_FDIR
   1324 	/* Init Flow director */
   1325 	if (hw->mac.type != ixgbe_mac_82598EB)
   1326 		ixgbe_init_fdir_signature_82599(&adapter->hw, fdir_pballoc);
   1327 #endif
   1328 
   1329 	/*
   1330 	** Check on any SFP devices that
   1331 	** need to be kick-started
   1332 	*/
   1333 	if (hw->phy.type == ixgbe_phy_none) {
   1334 		int err = hw->phy.ops.identify(hw);
   1335 		if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
   1336                 	device_printf(dev,
   1337 			    "Unsupported SFP+ module type was detected.\n");
   1338 			return;
   1339         	}
   1340 	}
   1341 
   1342 	/* Set moderation on the Link interrupt */
   1343 	IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->linkvec), IXGBE_LINK_ITR);
   1344 
   1345 	/* Config/Enable Link */
   1346 	ixgbe_config_link(adapter);
   1347 
   1348 	/* And now turn on interrupts */
   1349 	ixgbe_enable_intr(adapter);
   1350 
   1351 	/* Now inform the stack we're ready */
   1352 	ifp->if_flags |= IFF_RUNNING;
   1353 	ifp->if_flags &= ~IFF_OACTIVE;
   1354 
   1355 	return;
   1356 }
   1357 
   1358 static int
   1359 ixgbe_init(struct ifnet *ifp)
   1360 {
   1361 	struct adapter *adapter = ifp->if_softc;
   1362 
   1363 	IXGBE_CORE_LOCK(adapter);
   1364 	ixgbe_init_locked(adapter);
   1365 	IXGBE_CORE_UNLOCK(adapter);
   1366 	return 0;	/* XXX ixgbe_init_locked cannot fail?  really? */
   1367 }
   1368 
   1369 
   1370 /*
   1371 **
   1372 ** MSIX Interrupt Handlers and Tasklets
   1373 **
   1374 */
   1375 
   1376 static inline void
   1377 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
   1378 {
   1379 	struct ixgbe_hw *hw = &adapter->hw;
   1380 	u64	queue = (u64)(1ULL << vector);
   1381 	u32	mask;
   1382 
   1383 	if (hw->mac.type == ixgbe_mac_82598EB) {
   1384                 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
   1385                 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
   1386 	} else {
   1387                 mask = (queue & 0xFFFFFFFF);
   1388                 if (mask)
   1389                         IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
   1390                 mask = (queue >> 32);
   1391                 if (mask)
   1392                         IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
   1393 	}
   1394 }
   1395 
   1396 __unused static inline void
   1397 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
   1398 {
   1399 	struct ixgbe_hw *hw = &adapter->hw;
   1400 	u64	queue = (u64)(1ULL << vector);
   1401 	u32	mask;
   1402 
   1403 	if (hw->mac.type == ixgbe_mac_82598EB) {
   1404                 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
   1405                 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
   1406 	} else {
   1407                 mask = (queue & 0xFFFFFFFF);
   1408                 if (mask)
   1409                         IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
   1410                 mask = (queue >> 32);
   1411                 if (mask)
   1412                         IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
   1413 	}
   1414 }
   1415 
   1416 static inline void
   1417 ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
   1418 {
   1419 	u32 mask;
   1420 
   1421 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
   1422 		mask = (IXGBE_EIMS_RTX_QUEUE & queues);
   1423 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
   1424 	} else {
   1425 		mask = (queues & 0xFFFFFFFF);
   1426 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
   1427 		mask = (queues >> 32);
   1428 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
   1429 	}
   1430 }
   1431 
   1432 
   1433 static void
   1434 ixgbe_handle_que(void *context)
   1435 {
   1436 	struct ix_queue *que = context;
   1437 	struct adapter  *adapter = que->adapter;
   1438 	struct tx_ring  *txr = que->txr;
   1439 	struct ifnet    *ifp = adapter->ifp;
   1440 	bool		more;
   1441 
   1442 	adapter->handleq.ev_count++;
   1443 
   1444 	if (ifp->if_flags & IFF_RUNNING) {
   1445 		more = ixgbe_rxeof(que, adapter->rx_process_limit);
   1446 		IXGBE_TX_LOCK(txr);
   1447 		ixgbe_txeof(txr);
   1448 #if __FreeBSD_version >= 800000
   1449 		if (!drbr_empty(ifp, txr->br))
   1450 			ixgbe_mq_start_locked(ifp, txr, NULL);
   1451 #else
   1452 		if (!IFQ_IS_EMPTY(&ifp->if_snd))
   1453 			ixgbe_start_locked(txr, ifp);
   1454 #endif
   1455 		IXGBE_TX_UNLOCK(txr);
   1456 		if (more) {
   1457 			adapter->req.ev_count++;
   1458 			softint_schedule(que->que_si);
   1459 			return;
   1460 		}
   1461 	}
   1462 
   1463 	/* Reenable this interrupt */
   1464 	ixgbe_enable_queue(adapter, que->msix);
   1465 
   1466 	return;
   1467 }
   1468 
   1469 
   1470 /*********************************************************************
   1471  *
   1472  *  Legacy Interrupt Service routine
   1473  *
   1474  **********************************************************************/
   1475 
   1476 static int
   1477 ixgbe_legacy_irq(void *arg)
   1478 {
   1479 	struct ix_queue *que = arg;
   1480 	struct adapter	*adapter = que->adapter;
   1481 	struct ifnet   *ifp = adapter->ifp;
   1482 	struct ixgbe_hw	*hw = &adapter->hw;
   1483 	struct 		tx_ring *txr = adapter->tx_rings;
   1484 	bool		more_tx = false, more_rx = false;
   1485 	u32       	reg_eicr, loop = MAX_LOOP;
   1486 
   1487 	reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
   1488 
   1489 	adapter->stats.legint.ev_count++;
   1490 	++que->irqs;
   1491 	if (reg_eicr == 0) {
   1492 		adapter->stats.intzero.ev_count++;
   1493 		if ((ifp->if_flags & IFF_UP) != 0)
   1494 			ixgbe_enable_intr(adapter);
   1495 		return 0;
   1496 	}
   1497 
   1498 	if ((ifp->if_flags & IFF_RUNNING) != 0) {
   1499 		more_rx = ixgbe_rxeof(que, adapter->rx_process_limit);
   1500 
   1501 		IXGBE_TX_LOCK(txr);
   1502 		do {
   1503 			adapter->txloops.ev_count++;
   1504 			more_tx = ixgbe_txeof(txr);
   1505 		} while (loop-- && more_tx);
   1506 		IXGBE_TX_UNLOCK(txr);
   1507 	}
   1508 
   1509 	if (more_rx || more_tx) {
   1510 		if (more_rx)
   1511 			adapter->morerx.ev_count++;
   1512 		if (more_tx)
   1513 			adapter->moretx.ev_count++;
   1514 		softint_schedule(que->que_si);
   1515 	}
   1516 
   1517 	/* Check for fan failure */
   1518 	if ((hw->phy.media_type == ixgbe_media_type_copper) &&
   1519 	    (reg_eicr & IXGBE_EICR_GPI_SDP1)) {
   1520                 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! "
   1521 		    "REPLACE IMMEDIATELY!!\n");
   1522 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1);
   1523 	}
   1524 
   1525 	/* Link status change */
   1526 	if (reg_eicr & IXGBE_EICR_LSC)
   1527 		softint_schedule(adapter->link_si);
   1528 
   1529 	ixgbe_enable_intr(adapter);
   1530 	return 1;
   1531 }
   1532 
   1533 
   1534 #if defined(NETBSD_MSI_OR_MSIX)
   1535 /*********************************************************************
   1536  *
   1537  *  MSI Queue Interrupt Service routine
   1538  *
   1539  **********************************************************************/
   1540 void
   1541 ixgbe_msix_que(void *arg)
   1542 {
   1543 	struct ix_queue	*que = arg;
   1544 	struct adapter  *adapter = que->adapter;
   1545 	struct tx_ring	*txr = que->txr;
   1546 	struct rx_ring	*rxr = que->rxr;
   1547 	bool		more_tx, more_rx;
   1548 	u32		newitr = 0;
   1549 
   1550 	++que->irqs;
   1551 
   1552 	more_rx = ixgbe_rxeof(que, adapter->rx_process_limit);
   1553 
   1554 	IXGBE_TX_LOCK(txr);
   1555 	more_tx = ixgbe_txeof(txr);
   1556 	IXGBE_TX_UNLOCK(txr);
   1557 
   1558 	/* Do AIM now? */
   1559 
   1560 	if (ixgbe_enable_aim == FALSE)
   1561 		goto no_calc;
   1562 	/*
   1563 	** Do Adaptive Interrupt Moderation:
   1564         **  - Write out last calculated setting
   1565 	**  - Calculate based on average size over
   1566 	**    the last interval.
   1567 	*/
   1568         if (que->eitr_setting)
   1569                 IXGBE_WRITE_REG(&adapter->hw,
   1570                     IXGBE_EITR(que->msix), que->eitr_setting);
   1571 
   1572         que->eitr_setting = 0;
   1573 
   1574         /* Idle, do nothing */
   1575         if ((txr->bytes == 0) && (rxr->bytes == 0))
   1576                 goto no_calc;
   1577 
   1578 	if ((txr->bytes) && (txr->packets))
   1579                	newitr = txr->bytes/txr->packets;
   1580 	if ((rxr->bytes) && (rxr->packets))
   1581 		newitr = max(newitr,
   1582 		    (rxr->bytes / rxr->packets));
   1583 	newitr += 24; /* account for hardware frame, crc */
   1584 
   1585 	/* set an upper boundary */
   1586 	newitr = min(newitr, 3000);
   1587 
   1588 	/* Be nice to the mid range */
   1589 	if ((newitr > 300) && (newitr < 1200))
   1590 		newitr = (newitr / 3);
   1591 	else
   1592 		newitr = (newitr / 2);
   1593 
   1594         if (adapter->hw.mac.type == ixgbe_mac_82598EB)
   1595                 newitr |= newitr << 16;
   1596         else
   1597                 newitr |= IXGBE_EITR_CNT_WDIS;
   1598 
   1599         /* save for next interrupt */
   1600         que->eitr_setting = newitr;
   1601 
   1602         /* Reset state */
   1603         txr->bytes = 0;
   1604         txr->packets = 0;
   1605         rxr->bytes = 0;
   1606         rxr->packets = 0;
   1607 
   1608 no_calc:
   1609 	if (more_tx || more_rx)
   1610 		softint_schedule(que->que_si);
   1611 	else /* Reenable this interrupt */
   1612 		ixgbe_enable_queue(adapter, que->msix);
   1613 	return;
   1614 }
   1615 
   1616 
   1617 static void
   1618 ixgbe_msix_link(void *arg)
   1619 {
   1620 	struct adapter	*adapter = arg;
   1621 	struct ixgbe_hw *hw = &adapter->hw;
   1622 	u32		reg_eicr;
   1623 
   1624 	++adapter->link_irq.ev_count;
   1625 
   1626 	/* First get the cause */
   1627 	reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
   1628 	/* Clear interrupt with write */
   1629 	IXGBE_WRITE_REG(hw, IXGBE_EICR, reg_eicr);
   1630 
   1631 	/* Link status change */
   1632 	if (reg_eicr & IXGBE_EICR_LSC)
   1633 		softint_schedule(adapter->link_si);
   1634 
   1635 	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
   1636 #ifdef IXGBE_FDIR
   1637 		if (reg_eicr & IXGBE_EICR_FLOW_DIR) {
   1638 			/* This is probably overkill :) */
   1639 			if (!atomic_cmpset_int(&adapter->fdir_reinit, 0, 1))
   1640 				return;
   1641                 	/* Clear the interrupt */
   1642 			IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_FLOW_DIR);
   1643 			/* Turn off the interface */
   1644 			adapter->ifp->if_flags &= ~IFF_RUNNING;
   1645 			softint_schedule(adapter->fdir_si);
   1646 		} else
   1647 #endif
   1648 		if (reg_eicr & IXGBE_EICR_ECC) {
   1649                 	device_printf(adapter->dev, "\nCRITICAL: ECC ERROR!! "
   1650 			    "Please Reboot!!\n");
   1651 			IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
   1652 		} else
   1653 
   1654 		if (reg_eicr & IXGBE_EICR_GPI_SDP1) {
   1655                 	/* Clear the interrupt */
   1656                 	IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
   1657 			softint_schedule(adapter->msf_si);
   1658         	} else if (reg_eicr & IXGBE_EICR_GPI_SDP2) {
   1659                 	/* Clear the interrupt */
   1660                 	IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2);
   1661 			softint_schedule(adapter->mod_si);
   1662 		}
   1663         }
   1664 
   1665 	/* Check for fan failure */
   1666 	if ((hw->device_id == IXGBE_DEV_ID_82598AT) &&
   1667 	    (reg_eicr & IXGBE_EICR_GPI_SDP1)) {
   1668                 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! "
   1669 		    "REPLACE IMMEDIATELY!!\n");
   1670 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
   1671 	}
   1672 
   1673 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
   1674 	return;
   1675 }
   1676 #endif
   1677 
   1678 /*********************************************************************
   1679  *
   1680  *  Media Ioctl callback
   1681  *
   1682  *  This routine is called whenever the user queries the status of
   1683  *  the interface using ifconfig.
   1684  *
   1685  **********************************************************************/
   1686 static void
   1687 ixgbe_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
   1688 {
   1689 	struct adapter *adapter = ifp->if_softc;
   1690 
   1691 	INIT_DEBUGOUT("ixgbe_media_status: begin");
   1692 	IXGBE_CORE_LOCK(adapter);
   1693 	ixgbe_update_link_status(adapter);
   1694 
   1695 	ifmr->ifm_status = IFM_AVALID;
   1696 	ifmr->ifm_active = IFM_ETHER;
   1697 
   1698 	if (!adapter->link_active) {
   1699 		IXGBE_CORE_UNLOCK(adapter);
   1700 		return;
   1701 	}
   1702 
   1703 	ifmr->ifm_status |= IFM_ACTIVE;
   1704 
   1705 	switch (adapter->link_speed) {
   1706 		case IXGBE_LINK_SPEED_1GB_FULL:
   1707 			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
   1708 			break;
   1709 		case IXGBE_LINK_SPEED_10GB_FULL:
   1710 			ifmr->ifm_active |= adapter->optics | IFM_FDX;
   1711 			break;
   1712 	}
   1713 
   1714 	IXGBE_CORE_UNLOCK(adapter);
   1715 
   1716 	return;
   1717 }
   1718 
   1719 /*********************************************************************
   1720  *
   1721  *  Media Ioctl callback
   1722  *
   1723  *  This routine is called when the user changes speed/duplex using
   1724  *  media/mediopt option with ifconfig.
   1725  *
   1726  **********************************************************************/
   1727 static int
   1728 ixgbe_media_change(struct ifnet * ifp)
   1729 {
   1730 	struct adapter *adapter = ifp->if_softc;
   1731 	struct ifmedia *ifm = &adapter->media;
   1732 
   1733 	INIT_DEBUGOUT("ixgbe_media_change: begin");
   1734 
   1735 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
   1736 		return (EINVAL);
   1737 
   1738         switch (IFM_SUBTYPE(ifm->ifm_media)) {
   1739         case IFM_AUTO:
   1740                 adapter->hw.phy.autoneg_advertised =
   1741 		    IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_10GB_FULL;
   1742                 break;
   1743         default:
   1744                 device_printf(adapter->dev, "Only auto media type\n");
   1745 		return (EINVAL);
   1746         }
   1747 
   1748 	return (0);
   1749 }
   1750 
   1751 /*********************************************************************
   1752  *
   1753  *  This routine maps the mbufs to tx descriptors, allowing the
   1754  *  TX engine to transmit the packets.
   1755  *  	- return 0 on success, positive on failure
   1756  *
   1757  **********************************************************************/
   1758 
   1759 static int
   1760 ixgbe_xmit(struct tx_ring *txr, struct mbuf *m_head)
   1761 {
   1762 	struct m_tag *mtag;
   1763 	struct adapter  *adapter = txr->adapter;
   1764 	struct ethercom *ec = &adapter->osdep.ec;
   1765 	u32		olinfo_status = 0, cmd_type_len;
   1766 	u32		paylen = 0;
   1767 	int             i, j, error;
   1768 	int		first, last = 0;
   1769 	bus_dmamap_t	map;
   1770 	struct ixgbe_tx_buf *txbuf;
   1771 	union ixgbe_adv_tx_desc *txd = NULL;
   1772 
   1773 	/* Basic descriptor defines */
   1774         cmd_type_len = (IXGBE_ADVTXD_DTYP_DATA |
   1775 	    IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT);
   1776 
   1777 	if ((mtag = VLAN_OUTPUT_TAG(ec, m_head)) != NULL)
   1778         	cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
   1779 
   1780         /*
   1781          * Important to capture the first descriptor
   1782          * used because it will contain the index of
   1783          * the one we tell the hardware to report back
   1784          */
   1785         first = txr->next_avail_desc;
   1786 	txbuf = &txr->tx_buffers[first];
   1787 	map = txbuf->map;
   1788 
   1789 	/*
   1790 	 * Map the packet for DMA.
   1791 	 */
   1792 	error = bus_dmamap_load_mbuf(txr->txtag->dt_dmat, map,
   1793 	    m_head, BUS_DMA_NOWAIT);
   1794 
   1795 	switch (error) {
   1796 	case EAGAIN:
   1797 		adapter->eagain_tx_dma_setup.ev_count++;
   1798 		return EAGAIN;
   1799 	case ENOMEM:
   1800 		adapter->enomem_tx_dma_setup.ev_count++;
   1801 		return EAGAIN;
   1802 	case EFBIG:
   1803 		adapter->efbig_tx_dma_setup.ev_count++;
   1804 		return error;
   1805 	case EINVAL:
   1806 		adapter->einval_tx_dma_setup.ev_count++;
   1807 		return error;
   1808 	default:
   1809 		adapter->other_tx_dma_setup.ev_count++;
   1810 		return error;
   1811 	case 0:
   1812 		break;
   1813 	}
   1814 
   1815 	/* Make certain there are enough descriptors */
   1816 	if (map->dm_nsegs > txr->tx_avail - 2) {
   1817 		txr->no_desc_avail.ev_count++;
   1818 		ixgbe_dmamap_unload(txr->txtag, txbuf->map);
   1819 		return EAGAIN;
   1820 	}
   1821 
   1822 	/*
   1823 	** Set up the appropriate offload context
   1824 	** this becomes the first descriptor of
   1825 	** a packet.
   1826 	*/
   1827 	if (m_head->m_pkthdr.csum_flags & (M_CSUM_TSOv4|M_CSUM_TSOv6)) {
   1828 		if (ixgbe_tso_setup(txr, m_head, &paylen)) {
   1829 			cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
   1830 			olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
   1831 			olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
   1832 			olinfo_status |= paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
   1833 			++adapter->tso_tx.ev_count;
   1834 		} else {
   1835 			++adapter->tso_err.ev_count;
   1836 			/* XXX unload DMA map! --dyoung */
   1837 			return ENXIO;
   1838 		}
   1839 	} else
   1840 		olinfo_status |= ixgbe_tx_ctx_setup(txr, m_head);
   1841 
   1842 #ifdef IXGBE_IEEE1588
   1843         /* This is changing soon to an mtag detection */
   1844         if (we detect this mbuf has a TSTAMP mtag)
   1845                 cmd_type_len |= IXGBE_ADVTXD_MAC_TSTAMP;
   1846 #endif
   1847 
   1848 #ifdef IXGBE_FDIR
   1849 	/* Do the flow director magic */
   1850 	if ((txr->atr_sample) && (!adapter->fdir_reinit)) {
   1851 		++txr->atr_count;
   1852 		if (txr->atr_count >= atr_sample_rate) {
   1853 			ixgbe_atr(txr, m_head);
   1854 			txr->atr_count = 0;
   1855 		}
   1856 	}
   1857 #endif
   1858         /* Record payload length */
   1859 	if (paylen == 0)
   1860         	olinfo_status |= m_head->m_pkthdr.len <<
   1861 		    IXGBE_ADVTXD_PAYLEN_SHIFT;
   1862 
   1863 	i = txr->next_avail_desc;
   1864 	for (j = 0; j < map->dm_nsegs; j++) {
   1865 		bus_size_t seglen;
   1866 		bus_addr_t segaddr;
   1867 
   1868 		txbuf = &txr->tx_buffers[i];
   1869 		txd = &txr->tx_base[i];
   1870 		seglen = map->dm_segs[j].ds_len;
   1871 		segaddr = htole64(map->dm_segs[j].ds_addr);
   1872 
   1873 		txd->read.buffer_addr = segaddr;
   1874 		txd->read.cmd_type_len = htole32(txr->txd_cmd |
   1875 		    cmd_type_len |seglen);
   1876 		txd->read.olinfo_status = htole32(olinfo_status);
   1877 		last = i; /* descriptor that will get completion IRQ */
   1878 
   1879 		if (++i == adapter->num_tx_desc)
   1880 			i = 0;
   1881 
   1882 		txbuf->m_head = NULL;
   1883 		txbuf->eop_index = -1;
   1884 	}
   1885 
   1886 	txd->read.cmd_type_len |=
   1887 	    htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS);
   1888 	txr->tx_avail -= map->dm_nsegs;
   1889 	txr->next_avail_desc = i;
   1890 
   1891 	txbuf->m_head = m_head;
   1892 	/* We exchange the maps instead of copying because otherwise
   1893 	 * we end up with many pointers to the same map and we free
   1894 	 * one map twice in ixgbe_free_transmit_structures().  Who
   1895 	 * knows what other problems this caused.  --dyoung
   1896 	 */
   1897 	txr->tx_buffers[first].map = txbuf->map;
   1898 	txbuf->map = map;
   1899 	bus_dmamap_sync(txr->txtag->dt_dmat, map, 0, m_head->m_pkthdr.len,
   1900 	    BUS_DMASYNC_PREWRITE);
   1901 
   1902         /* Set the index of the descriptor that will be marked done */
   1903         txbuf = &txr->tx_buffers[first];
   1904 	txbuf->eop_index = last;
   1905 
   1906         ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
   1907 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1908 	/*
   1909 	 * Advance the Transmit Descriptor Tail (Tdt), this tells the
   1910 	 * hardware that this frame is available to transmit.
   1911 	 */
   1912 	++txr->total_packets.ev_count;
   1913 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDT(txr->me), i);
   1914 
   1915 	return 0;
   1916 }
   1917 
   1918 static void
   1919 ixgbe_set_promisc(struct adapter *adapter)
   1920 {
   1921 	u_int32_t       reg_rctl;
   1922 	struct ifnet   *ifp = adapter->ifp;
   1923 
   1924 	reg_rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
   1925 	reg_rctl &= (~IXGBE_FCTRL_UPE);
   1926 	reg_rctl &= (~IXGBE_FCTRL_MPE);
   1927 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
   1928 
   1929 	if (ifp->if_flags & IFF_PROMISC) {
   1930 		reg_rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
   1931 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
   1932 	} else if (ifp->if_flags & IFF_ALLMULTI) {
   1933 		reg_rctl |= IXGBE_FCTRL_MPE;
   1934 		reg_rctl &= ~IXGBE_FCTRL_UPE;
   1935 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
   1936 	}
   1937 	return;
   1938 }
   1939 
   1940 
   1941 /*********************************************************************
   1942  *  Multicast Update
   1943  *
   1944  *  This routine is called whenever multicast address list is updated.
   1945  *
   1946  **********************************************************************/
   1947 #define IXGBE_RAR_ENTRIES 16
   1948 
   1949 static void
   1950 ixgbe_set_multi(struct adapter *adapter)
   1951 {
   1952 	struct ether_multi *enm;
   1953 	struct ether_multistep step;
   1954 	u32	fctrl;
   1955 	u8	*mta;
   1956 	u8	*update_ptr;
   1957 	int	mcnt = 0;
   1958 	struct ethercom *ec = &adapter->osdep.ec;
   1959 	struct ifnet   *ifp = adapter->ifp;
   1960 
   1961 	IOCTL_DEBUGOUT("ixgbe_set_multi: begin");
   1962 
   1963 	mta = adapter->mta;
   1964 	bzero(mta, sizeof(u8) * IXGBE_ETH_LENGTH_OF_ADDRESS *
   1965 	    MAX_NUM_MULTICAST_ADDRESSES);
   1966 
   1967 	fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
   1968 	fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
   1969 	if (ifp->if_flags & IFF_PROMISC)
   1970 		fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
   1971 	else if (ifp->if_flags & IFF_ALLMULTI) {
   1972 		fctrl |= IXGBE_FCTRL_MPE;
   1973 		fctrl &= ~IXGBE_FCTRL_UPE;
   1974 	} else
   1975 		fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
   1976 
   1977 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
   1978 
   1979 	ETHER_FIRST_MULTI(step, ec, enm);
   1980 	while (enm != NULL) {
   1981 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
   1982 		           ETHER_ADDR_LEN) != 0) {
   1983 			fctrl |= IXGBE_FCTRL_MPE;
   1984 			IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
   1985 			break;
   1986 		}
   1987 		bcopy(enm->enm_addrlo,
   1988 		    &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
   1989 		    IXGBE_ETH_LENGTH_OF_ADDRESS);
   1990 		mcnt++;
   1991 		ETHER_NEXT_MULTI(step, enm);
   1992 	}
   1993 
   1994 	update_ptr = mta;
   1995 	ixgbe_update_mc_addr_list(&adapter->hw,
   1996 	    update_ptr, mcnt, ixgbe_mc_array_itr);
   1997 
   1998 	return;
   1999 }
   2000 
   2001 /*
   2002  * This is an iterator function now needed by the multicast
   2003  * shared code. It simply feeds the shared code routine the
   2004  * addresses in the array of ixgbe_set_multi() one by one.
   2005  */
   2006 static u8 *
   2007 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
   2008 {
   2009 	u8 *addr = *update_ptr;
   2010 	u8 *newptr;
   2011 	*vmdq = 0;
   2012 
   2013 	newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
   2014 	*update_ptr = newptr;
   2015 	return addr;
   2016 }
   2017 
   2018 
   2019 /*********************************************************************
   2020  *  Timer routine
   2021  *
   2022  *  This routine checks for link status,updates statistics,
   2023  *  and runs the watchdog check.
   2024  *
   2025  **********************************************************************/
   2026 
   2027 static void
   2028 ixgbe_local_timer1(void *arg)
   2029 {
   2030 	struct adapter *adapter = arg;
   2031 	device_t	dev = adapter->dev;
   2032 	struct tx_ring *txr = adapter->tx_rings;
   2033 
   2034 	KASSERT(mutex_owned(&adapter->core_mtx));
   2035 
   2036 	/* Check for pluggable optics */
   2037 	if (adapter->sfp_probe)
   2038 		if (!ixgbe_sfp_probe(adapter))
   2039 			goto out; /* Nothing to do */
   2040 
   2041 	ixgbe_update_link_status(adapter);
   2042 	ixgbe_update_stats_counters(adapter);
   2043 
   2044 	/*
   2045 	 * If the interface has been paused
   2046 	 * then don't do the watchdog check
   2047 	 */
   2048 	if (IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF)
   2049 		goto out;
   2050 
   2051 	/*
   2052 	** Check status on the TX queues for a hang
   2053 	*/
   2054         for (int i = 0; i < adapter->num_queues; i++, txr++)
   2055 		if (txr->queue_status == IXGBE_QUEUE_HUNG)
   2056 			goto hung;
   2057 
   2058 out:
   2059 	ixgbe_rearm_queues(adapter, adapter->que_mask);
   2060 	callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
   2061 	return;
   2062 
   2063 hung:
   2064 	device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
   2065 	device_printf(dev,"Queue(%d) tdh = %d, hw tdt = %d\n", txr->me,
   2066 	    IXGBE_READ_REG(&adapter->hw, IXGBE_TDH(txr->me)),
   2067 	    IXGBE_READ_REG(&adapter->hw, IXGBE_TDT(txr->me)));
   2068 	device_printf(dev,"TX(%d) desc avail = %d,"
   2069 	    "Next TX to Clean = %d\n",
   2070 	    txr->me, txr->tx_avail, txr->next_to_clean);
   2071 	adapter->ifp->if_flags &= ~IFF_RUNNING;
   2072 	adapter->watchdog_events.ev_count++;
   2073 	ixgbe_init_locked(adapter);
   2074 }
   2075 
   2076 static void
   2077 ixgbe_local_timer(void *arg)
   2078 {
   2079 	struct adapter *adapter = arg;
   2080 
   2081 	IXGBE_CORE_LOCK(adapter);
   2082 	ixgbe_local_timer1(adapter);
   2083 	IXGBE_CORE_UNLOCK(adapter);
   2084 }
   2085 
   2086 /*
   2087 ** Note: this routine updates the OS on the link state
   2088 **	the real check of the hardware only happens with
   2089 **	a link interrupt.
   2090 */
   2091 static void
   2092 ixgbe_update_link_status(struct adapter *adapter)
   2093 {
   2094 	struct ifnet	*ifp = adapter->ifp;
   2095 	struct tx_ring *txr = adapter->tx_rings;
   2096 	device_t dev = adapter->dev;
   2097 
   2098 
   2099 	if (adapter->link_up){
   2100 		if (adapter->link_active == FALSE) {
   2101 			if (bootverbose)
   2102 				device_printf(dev,"Link is up %d Gbps %s \n",
   2103 				    ((adapter->link_speed == 128)? 10:1),
   2104 				    "Full Duplex");
   2105 			adapter->link_active = TRUE;
   2106 			if_link_state_change(ifp, LINK_STATE_UP);
   2107 		}
   2108 	} else { /* Link down */
   2109 		if (adapter->link_active == TRUE) {
   2110 			if (bootverbose)
   2111 				device_printf(dev,"Link is Down\n");
   2112 			if_link_state_change(ifp, LINK_STATE_DOWN);
   2113 			adapter->link_active = FALSE;
   2114 			for (int i = 0; i < adapter->num_queues;
   2115 			    i++, txr++)
   2116 				txr->queue_status = IXGBE_QUEUE_IDLE;
   2117 		}
   2118 	}
   2119 
   2120 	return;
   2121 }
   2122 
   2123 
   2124 static void
   2125 ixgbe_ifstop(struct ifnet *ifp, int disable)
   2126 {
   2127 	struct adapter *adapter = ifp->if_softc;
   2128 
   2129 	IXGBE_CORE_LOCK(adapter);
   2130 	ixgbe_stop(adapter);
   2131 	IXGBE_CORE_UNLOCK(adapter);
   2132 }
   2133 
   2134 /*********************************************************************
   2135  *
   2136  *  This routine disables all traffic on the adapter by issuing a
   2137  *  global reset on the MAC and deallocates TX/RX buffers.
   2138  *
   2139  **********************************************************************/
   2140 
   2141 static void
   2142 ixgbe_stop(void *arg)
   2143 {
   2144 	struct ifnet   *ifp;
   2145 	struct adapter *adapter = arg;
   2146 	struct ixgbe_hw *hw = &adapter->hw;
   2147 	ifp = adapter->ifp;
   2148 
   2149 	KASSERT(mutex_owned(&adapter->core_mtx));
   2150 
   2151 	INIT_DEBUGOUT("ixgbe_stop: begin\n");
   2152 	ixgbe_disable_intr(adapter);
   2153 
   2154 	/* Tell the stack that the interface is no longer active */
   2155 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   2156 
   2157 	ixgbe_reset_hw(hw);
   2158 	hw->adapter_stopped = FALSE;
   2159 	ixgbe_stop_adapter(hw);
   2160 	/* Turn off the laser */
   2161 	if (hw->phy.multispeed_fiber)
   2162 		ixgbe_disable_tx_laser(hw);
   2163 	callout_stop(&adapter->timer);
   2164 
   2165 	/* reprogram the RAR[0] in case user changed it. */
   2166 	ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
   2167 
   2168 	return;
   2169 }
   2170 
   2171 
   2172 /*********************************************************************
   2173  *
   2174  *  Determine hardware revision.
   2175  *
   2176  **********************************************************************/
   2177 static void
   2178 ixgbe_identify_hardware(struct adapter *adapter)
   2179 {
   2180 	pcitag_t tag;
   2181 	pci_chipset_tag_t pc;
   2182 	pcireg_t subid, id;
   2183 	struct ixgbe_hw *hw = &adapter->hw;
   2184 
   2185 	pc = adapter->osdep.pc;
   2186 	tag = adapter->osdep.tag;
   2187 
   2188 	id = pci_conf_read(pc, tag, PCI_ID_REG);
   2189 	subid = pci_conf_read(pc, tag, PCI_SUBSYS_ID_REG);
   2190 
   2191 	/* Save off the information about this board */
   2192 	hw->vendor_id = PCI_VENDOR(id);
   2193 	hw->device_id = PCI_PRODUCT(id);
   2194 	hw->revision_id =
   2195 	    PCI_REVISION(pci_conf_read(pc, tag, PCI_CLASS_REG));
   2196 	hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
   2197 	hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
   2198 
   2199 	/* We need this here to set the num_segs below */
   2200 	ixgbe_set_mac_type(hw);
   2201 
   2202 	/* Pick up the 82599 and VF settings */
   2203 	if (hw->mac.type != ixgbe_mac_82598EB) {
   2204 		hw->phy.smart_speed = ixgbe_smart_speed;
   2205 		adapter->num_segs = IXGBE_82599_SCATTER;
   2206 	} else
   2207 		adapter->num_segs = IXGBE_82598_SCATTER;
   2208 
   2209 	return;
   2210 }
   2211 
   2212 /*********************************************************************
   2213  *
   2214  *  Determine optic type
   2215  *
   2216  **********************************************************************/
   2217 static void
   2218 ixgbe_setup_optics(struct adapter *adapter)
   2219 {
   2220 	struct ixgbe_hw *hw = &adapter->hw;
   2221 	int		layer;
   2222 
   2223 	layer = ixgbe_get_supported_physical_layer(hw);
   2224 	switch (layer) {
   2225 		case IXGBE_PHYSICAL_LAYER_10GBASE_T:
   2226 			adapter->optics = IFM_10G_T;
   2227 			break;
   2228 		case IXGBE_PHYSICAL_LAYER_1000BASE_T:
   2229 			adapter->optics = IFM_1000_T;
   2230 			break;
   2231 		case IXGBE_PHYSICAL_LAYER_10GBASE_LR:
   2232 		case IXGBE_PHYSICAL_LAYER_10GBASE_LRM:
   2233 			adapter->optics = IFM_10G_LR;
   2234 			break;
   2235 		case IXGBE_PHYSICAL_LAYER_10GBASE_SR:
   2236 			adapter->optics = IFM_10G_SR;
   2237 			break;
   2238 		case IXGBE_PHYSICAL_LAYER_10GBASE_KX4:
   2239 		case IXGBE_PHYSICAL_LAYER_10GBASE_CX4:
   2240 			adapter->optics = IFM_10G_CX4;
   2241 			break;
   2242 		case IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU:
   2243 			adapter->optics = IFM_10G_TWINAX;
   2244 			break;
   2245 		case IXGBE_PHYSICAL_LAYER_1000BASE_KX:
   2246 		case IXGBE_PHYSICAL_LAYER_10GBASE_KR:
   2247 		case IXGBE_PHYSICAL_LAYER_10GBASE_XAUI:
   2248 		case IXGBE_PHYSICAL_LAYER_UNKNOWN:
   2249 		default:
   2250 			adapter->optics = IFM_ETHER | IFM_AUTO;
   2251 			break;
   2252 	}
   2253 	return;
   2254 }
   2255 
   2256 /*********************************************************************
   2257  *
   2258  *  Setup the Legacy or MSI Interrupt handler
   2259  *
   2260  **********************************************************************/
   2261 static int
   2262 ixgbe_allocate_legacy(struct adapter *adapter, const struct pci_attach_args *pa)
   2263 {
   2264 	device_t dev = adapter->dev;
   2265 	struct		ix_queue *que = adapter->queues;
   2266 	char intrbuf[PCI_INTRSTR_LEN];
   2267 #if 0
   2268 	int rid = 0;
   2269 
   2270 	/* MSI RID at 1 */
   2271 	if (adapter->msix == 1)
   2272 		rid = 1;
   2273 #endif
   2274 
   2275 	/* We allocate a single interrupt resource */
   2276  	if (pci_intr_map(pa, &adapter->osdep.ih) != 0) {
   2277 		aprint_error_dev(dev, "unable to map interrupt\n");
   2278 		return ENXIO;
   2279 	} else {
   2280 		aprint_normal_dev(dev, "interrupting at %s\n",
   2281 		    pci_intr_string(adapter->osdep.pc, adapter->osdep.ih,
   2282 			intrbuf, sizeof(intrbuf)));
   2283 	}
   2284 
   2285 	/*
   2286 	 * Try allocating a fast interrupt and the associated deferred
   2287 	 * processing contexts.
   2288 	 */
   2289 	que->que_si = softint_establish(SOFTINT_NET, ixgbe_handle_que, que);
   2290 
   2291 	/* Tasklets for Link, SFP and Multispeed Fiber */
   2292 	adapter->link_si =
   2293 	    softint_establish(SOFTINT_NET, ixgbe_handle_link, adapter);
   2294 	adapter->mod_si =
   2295 	    softint_establish(SOFTINT_NET, ixgbe_handle_mod, adapter);
   2296 	adapter->msf_si =
   2297 	    softint_establish(SOFTINT_NET, ixgbe_handle_msf, adapter);
   2298 
   2299 #ifdef IXGBE_FDIR
   2300 	adapter->fdir_si =
   2301 	    softint_establish(SOFTINT_NET, ixgbe_reinit_fdir, adapter);
   2302 #endif
   2303 	if (que->que_si == NULL ||
   2304 	    adapter->link_si == NULL ||
   2305 	    adapter->mod_si == NULL ||
   2306 #ifdef IXGBE_FDIR
   2307 	    adapter->fdir_si == NULL ||
   2308 #endif
   2309 	    adapter->msf_si == NULL) {
   2310 		aprint_error_dev(dev,
   2311 		    "could not establish software interrupts\n");
   2312 		return ENXIO;
   2313 	}
   2314 
   2315 	adapter->osdep.intr = pci_intr_establish(adapter->osdep.pc,
   2316 	    adapter->osdep.ih, IPL_NET, ixgbe_legacy_irq, que);
   2317 	if (adapter->osdep.intr == NULL) {
   2318 		aprint_error_dev(dev, "failed to register interrupt handler\n");
   2319 		softint_disestablish(que->que_si);
   2320 		softint_disestablish(adapter->link_si);
   2321 		softint_disestablish(adapter->mod_si);
   2322 		softint_disestablish(adapter->msf_si);
   2323 #ifdef IXGBE_FDIR
   2324 		softint_disestablish(adapter->fdir_si);
   2325 #endif
   2326 		return ENXIO;
   2327 	}
   2328 	/* For simplicity in the handlers */
   2329 	adapter->que_mask = IXGBE_EIMS_ENABLE_MASK;
   2330 
   2331 	return (0);
   2332 }
   2333 
   2334 
   2335 /*********************************************************************
   2336  *
   2337  *  Setup MSIX Interrupt resources and handlers
   2338  *
   2339  **********************************************************************/
   2340 static int
   2341 ixgbe_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
   2342 {
   2343 #if !defined(NETBSD_MSI_OR_MSIX)
   2344 	return 0;
   2345 #else
   2346 	device_t        dev = adapter->dev;
   2347 	struct 		ix_queue *que = adapter->queues;
   2348 	int 		error, rid, vector = 0;
   2349 
   2350 	for (int i = 0; i < adapter->num_queues; i++, vector++, que++) {
   2351 		rid = vector + 1;
   2352 		que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
   2353 		    RF_SHAREABLE | RF_ACTIVE);
   2354 		if (que->res == NULL) {
   2355 			aprint_error_dev(dev,"Unable to allocate"
   2356 		    	    " bus resource: que interrupt [%d]\n", vector);
   2357 			return (ENXIO);
   2358 		}
   2359 		/* Set the handler function */
   2360 		error = bus_setup_intr(dev, que->res,
   2361 		    INTR_TYPE_NET | INTR_MPSAFE, NULL,
   2362 		    ixgbe_msix_que, que, &que->tag);
   2363 		if (error) {
   2364 			que->res = NULL;
   2365 			aprint_error_dev(dev,
   2366 			    "Failed to register QUE handler\n");
   2367 			return error;
   2368 		}
   2369 #if __FreeBSD_version >= 800504
   2370 		bus_describe_intr(dev, que->res, que->tag, "que %d", i);
   2371 #endif
   2372 		que->msix = vector;
   2373         	adapter->que_mask |= (u64)(1 << que->msix);
   2374 		/*
   2375 		** Bind the msix vector, and thus the
   2376 		** ring to the corresponding cpu.
   2377 		*/
   2378 		if (adapter->num_queues > 1)
   2379 			bus_bind_intr(dev, que->res, i);
   2380 
   2381 		que->que_si = softint_establish(ixgbe_handle_que, que);
   2382 		if (que->que_si == NULL) {
   2383 			aprint_error_dev(dev,
   2384 			    "could not establish software interrupt\n");
   2385 		}
   2386 	}
   2387 
   2388 	/* and Link */
   2389 	rid = vector + 1;
   2390 	adapter->res = bus_alloc_resource_any(dev,
   2391     	    SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
   2392 	if (!adapter->res) {
   2393 		aprint_error_dev(dev,"Unable to allocate bus resource: "
   2394 		    "Link interrupt [%d]\n", rid);
   2395 		return (ENXIO);
   2396 	}
   2397 	/* Set the link handler function */
   2398 	error = bus_setup_intr(dev, adapter->res,
   2399 	    INTR_TYPE_NET | INTR_MPSAFE, NULL,
   2400 	    ixgbe_msix_link, adapter, &adapter->tag);
   2401 	if (error) {
   2402 		adapter->res = NULL;
   2403 		aprint_error_dev(dev, "Failed to register LINK handler\n");
   2404 		return (error);
   2405 	}
   2406 #if __FreeBSD_version >= 800504
   2407 	bus_describe_intr(dev, adapter->res, adapter->tag, "link");
   2408 #endif
   2409 	adapter->linkvec = vector;
   2410 	/* Tasklets for Link, SFP and Multispeed Fiber */
   2411 	adapter->link_si =
   2412 	    softint_establish(SOFTINT_NET, ixgbe_handle_link, adapter);
   2413 	adapter->mod_si =
   2414 	    softint_establish(SOFTINT_NET, ixgbe_handle_mod, adapter);
   2415 	adapter->msf_si =
   2416 	    softint_establish(SOFTINT_NET, ixgbe_handle_msf, adapter);
   2417 #ifdef IXGBE_FDIR
   2418 	adapter->fdir_si =
   2419 	    softint_establish(SOFTINT_NET, ixgbe_reinit_fdir, adapter);
   2420 #endif
   2421 
   2422 	return (0);
   2423 #endif
   2424 }
   2425 
   2426 /*
   2427  * Setup Either MSI/X or MSI
   2428  */
   2429 static int
   2430 ixgbe_setup_msix(struct adapter *adapter)
   2431 {
   2432 #if !defined(NETBSD_MSI_OR_MSIX)
   2433 	return 0;
   2434 #else
   2435 	device_t dev = adapter->dev;
   2436 	int rid, want, queues, msgs;
   2437 
   2438 	/* Override by tuneable */
   2439 	if (ixgbe_enable_msix == 0)
   2440 		goto msi;
   2441 
   2442 	/* First try MSI/X */
   2443 	rid = PCI_BAR(MSIX_82598_BAR);
   2444 	adapter->msix_mem = bus_alloc_resource_any(dev,
   2445 	    SYS_RES_MEMORY, &rid, RF_ACTIVE);
   2446        	if (!adapter->msix_mem) {
   2447 		rid += 4;	/* 82599 maps in higher BAR */
   2448 		adapter->msix_mem = bus_alloc_resource_any(dev,
   2449 		    SYS_RES_MEMORY, &rid, RF_ACTIVE);
   2450 	}
   2451        	if (!adapter->msix_mem) {
   2452 		/* May not be enabled */
   2453 		device_printf(adapter->dev,
   2454 		    "Unable to map MSIX table \n");
   2455 		goto msi;
   2456 	}
   2457 
   2458 	msgs = pci_msix_count(dev);
   2459 	if (msgs == 0) { /* system has msix disabled */
   2460 		bus_release_resource(dev, SYS_RES_MEMORY,
   2461 		    rid, adapter->msix_mem);
   2462 		adapter->msix_mem = NULL;
   2463 		goto msi;
   2464 	}
   2465 
   2466 	/* Figure out a reasonable auto config value */
   2467 	queues = (mp_ncpus > (msgs-1)) ? (msgs-1) : mp_ncpus;
   2468 
   2469 	if (ixgbe_num_queues != 0)
   2470 		queues = ixgbe_num_queues;
   2471 	/* Set max queues to 8 when autoconfiguring */
   2472 	else if ((ixgbe_num_queues == 0) && (queues > 8))
   2473 		queues = 8;
   2474 
   2475 	/*
   2476 	** Want one vector (RX/TX pair) per queue
   2477 	** plus an additional for Link.
   2478 	*/
   2479 	want = queues + 1;
   2480 	if (msgs >= want)
   2481 		msgs = want;
   2482 	else {
   2483                	device_printf(adapter->dev,
   2484 		    "MSIX Configuration Problem, "
   2485 		    "%d vectors but %d queues wanted!\n",
   2486 		    msgs, want);
   2487 		return (0); /* Will go to Legacy setup */
   2488 	}
   2489 	if ((msgs) && pci_alloc_msix(dev, &msgs) == 0) {
   2490                	device_printf(adapter->dev,
   2491 		    "Using MSIX interrupts with %d vectors\n", msgs);
   2492 		adapter->num_queues = queues;
   2493 		return (msgs);
   2494 	}
   2495 msi:
   2496        	msgs = pci_msi_count(dev);
   2497        	if (msgs == 1 && pci_alloc_msi(dev, &msgs) == 0)
   2498                	device_printf(adapter->dev,"Using MSI interrupt\n");
   2499 	return (msgs);
   2500 #endif
   2501 }
   2502 
   2503 
   2504 static int
   2505 ixgbe_allocate_pci_resources(struct adapter *adapter, const struct pci_attach_args *pa)
   2506 {
   2507 	pcireg_t	memtype;
   2508 	device_t        dev = adapter->dev;
   2509 	bus_addr_t addr;
   2510 	int flags;
   2511 
   2512 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
   2513 	switch (memtype) {
   2514 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   2515 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   2516 		adapter->osdep.mem_bus_space_tag = pa->pa_memt;
   2517 		if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
   2518 	              memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
   2519 			goto map_err;
   2520 		if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
   2521 			aprint_normal_dev(dev, "clearing prefetchable bit\n");
   2522 			flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
   2523 		}
   2524 		if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
   2525 		     adapter->osdep.mem_size, flags,
   2526 		     &adapter->osdep.mem_bus_space_handle) != 0) {
   2527 map_err:
   2528 			adapter->osdep.mem_size = 0;
   2529 			aprint_error_dev(dev, "unable to map BAR0\n");
   2530 			return ENXIO;
   2531 		}
   2532 		break;
   2533 	default:
   2534 		aprint_error_dev(dev, "unexpected type on BAR0\n");
   2535 		return ENXIO;
   2536 	}
   2537 
   2538 	/* Legacy defaults */
   2539 	adapter->num_queues = 1;
   2540 	adapter->hw.back = &adapter->osdep;
   2541 
   2542 	/*
   2543 	** Now setup MSI or MSI/X, should
   2544 	** return us the number of supported
   2545 	** vectors. (Will be 1 for MSI)
   2546 	*/
   2547 	adapter->msix = ixgbe_setup_msix(adapter);
   2548 	return (0);
   2549 }
   2550 
   2551 static void
   2552 ixgbe_free_pci_resources(struct adapter * adapter)
   2553 {
   2554 #if defined(NETBSD_MSI_OR_MSIX)
   2555 	struct 		ix_queue *que = adapter->queues;
   2556 	device_t	dev = adapter->dev;
   2557 #endif
   2558 	int		rid;
   2559 
   2560 #if defined(NETBSD_MSI_OR_MSIX)
   2561 	int		 memrid;
   2562 	if (adapter->hw.mac.type == ixgbe_mac_82598EB)
   2563 		memrid = PCI_BAR(MSIX_82598_BAR);
   2564 	else
   2565 		memrid = PCI_BAR(MSIX_82599_BAR);
   2566 
   2567 	/*
   2568 	** There is a slight possibility of a failure mode
   2569 	** in attach that will result in entering this function
   2570 	** before interrupt resources have been initialized, and
   2571 	** in that case we do not want to execute the loops below
   2572 	** We can detect this reliably by the state of the adapter
   2573 	** res pointer.
   2574 	*/
   2575 	if (adapter->res == NULL)
   2576 		goto mem;
   2577 
   2578 	/*
   2579 	**  Release all msix queue resources:
   2580 	*/
   2581 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   2582 		rid = que->msix + 1;
   2583 		if (que->tag != NULL) {
   2584 			bus_teardown_intr(dev, que->res, que->tag);
   2585 			que->tag = NULL;
   2586 		}
   2587 		if (que->res != NULL)
   2588 			bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
   2589 	}
   2590 #endif
   2591 
   2592 	/* Clean the Legacy or Link interrupt last */
   2593 	if (adapter->linkvec) /* we are doing MSIX */
   2594 		rid = adapter->linkvec + 1;
   2595 	else
   2596 		(adapter->msix != 0) ? (rid = 1):(rid = 0);
   2597 
   2598 	pci_intr_disestablish(adapter->osdep.pc, adapter->osdep.intr);
   2599 	adapter->osdep.intr = NULL;
   2600 
   2601 #if defined(NETBSD_MSI_OR_MSIX)
   2602 mem:
   2603 	if (adapter->msix)
   2604 		pci_release_msi(dev);
   2605 
   2606 	if (adapter->msix_mem != NULL)
   2607 		bus_release_resource(dev, SYS_RES_MEMORY,
   2608 		    memrid, adapter->msix_mem);
   2609 #endif
   2610 
   2611 	if (adapter->osdep.mem_size != 0) {
   2612 		bus_space_unmap(adapter->osdep.mem_bus_space_tag,
   2613 		    adapter->osdep.mem_bus_space_handle,
   2614 		    adapter->osdep.mem_size);
   2615 	}
   2616 
   2617 	return;
   2618 }
   2619 
   2620 /*********************************************************************
   2621  *
   2622  *  Setup networking device structure and register an interface.
   2623  *
   2624  **********************************************************************/
   2625 static int
   2626 ixgbe_setup_interface(device_t dev, struct adapter *adapter)
   2627 {
   2628 	struct ethercom *ec = &adapter->osdep.ec;
   2629 	struct ixgbe_hw *hw = &adapter->hw;
   2630 	struct ifnet   *ifp;
   2631 
   2632 	INIT_DEBUGOUT("ixgbe_setup_interface: begin");
   2633 
   2634 	ifp = adapter->ifp = &ec->ec_if;
   2635 	strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
   2636 	ifp->if_mtu = ETHERMTU;
   2637 	ifp->if_baudrate = 1000000000;
   2638 	ifp->if_init = ixgbe_init;
   2639 	ifp->if_stop = ixgbe_ifstop;
   2640 	ifp->if_softc = adapter;
   2641 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2642 	ifp->if_ioctl = ixgbe_ioctl;
   2643 	ifp->if_start = ixgbe_start;
   2644 #if __FreeBSD_version >= 800000
   2645 	ifp->if_transmit = ixgbe_mq_start;
   2646 	ifp->if_qflush = ixgbe_qflush;
   2647 #endif
   2648 	ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 2;
   2649 
   2650 	if_attach(ifp);
   2651 	ether_ifattach(ifp, adapter->hw.mac.addr);
   2652 	ether_set_ifflags_cb(ec, ixgbe_ifflags_cb);
   2653 
   2654 	adapter->max_frame_size =
   2655 	    ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
   2656 
   2657 	/*
   2658 	 * Tell the upper layer(s) we support long frames.
   2659 	 */
   2660 	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
   2661 
   2662 	ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSOv4;
   2663 	ifp->if_capenable = 0;
   2664 
   2665 	ec->ec_capabilities |= ETHERCAP_VLAN_HWCSUM;
   2666 	ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_MTU;
   2667 	ec->ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2668 	ec->ec_capenable = ec->ec_capabilities;
   2669 
   2670 	/* Don't enable LRO by default */
   2671 	ifp->if_capabilities |= IFCAP_LRO;
   2672 
   2673 	/*
   2674 	** Dont turn this on by default, if vlans are
   2675 	** created on another pseudo device (eg. lagg)
   2676 	** then vlan events are not passed thru, breaking
   2677 	** operation, but with HW FILTER off it works. If
   2678 	** using vlans directly on the em driver you can
   2679 	** enable this and get full hardware tag filtering.
   2680 	*/
   2681 	ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
   2682 
   2683 	/*
   2684 	 * Specify the media types supported by this adapter and register
   2685 	 * callbacks to update media and link information
   2686 	 */
   2687 	ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
   2688 		     ixgbe_media_status);
   2689 	ifmedia_add(&adapter->media, IFM_ETHER | adapter->optics, 0, NULL);
   2690 	ifmedia_set(&adapter->media, IFM_ETHER | adapter->optics);
   2691 	if (hw->device_id == IXGBE_DEV_ID_82598AT) {
   2692 		ifmedia_add(&adapter->media,
   2693 		    IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
   2694 		ifmedia_add(&adapter->media,
   2695 		    IFM_ETHER | IFM_1000_T, 0, NULL);
   2696 	}
   2697 	ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
   2698 	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
   2699 
   2700 	return (0);
   2701 }
   2702 
   2703 static void
   2704 ixgbe_config_link(struct adapter *adapter)
   2705 {
   2706 	struct ixgbe_hw *hw = &adapter->hw;
   2707 	u32	autoneg, err = 0;
   2708 	bool	sfp, negotiate;
   2709 
   2710 	sfp = ixgbe_is_sfp(hw);
   2711 
   2712 	if (sfp) {
   2713 		if (hw->phy.multispeed_fiber) {
   2714 			hw->mac.ops.setup_sfp(hw);
   2715 			ixgbe_enable_tx_laser(hw);
   2716 			softint_schedule(adapter->msf_si);
   2717 		} else {
   2718 			softint_schedule(adapter->mod_si);
   2719 		}
   2720 	} else {
   2721 		if (hw->mac.ops.check_link)
   2722 			err = ixgbe_check_link(hw, &autoneg,
   2723 			    &adapter->link_up, FALSE);
   2724 		if (err)
   2725 			goto out;
   2726 		autoneg = hw->phy.autoneg_advertised;
   2727 		if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
   2728                 	err  = hw->mac.ops.get_link_capabilities(hw,
   2729 			    &autoneg, &negotiate);
   2730 		else
   2731 			negotiate = 0;
   2732 		if (err)
   2733 			goto out;
   2734 		if (hw->mac.ops.setup_link)
   2735                 	err = hw->mac.ops.setup_link(hw, autoneg,
   2736 			    negotiate, adapter->link_up);
   2737 	}
   2738 out:
   2739 	return;
   2740 }
   2741 
   2742 /********************************************************************
   2743  * Manage DMA'able memory.
   2744  *******************************************************************/
   2745 
   2746 static int
   2747 ixgbe_dma_malloc(struct adapter *adapter, const bus_size_t size,
   2748 		struct ixgbe_dma_alloc *dma, const int mapflags)
   2749 {
   2750 	device_t dev = adapter->dev;
   2751 	int             r, rsegs;
   2752 
   2753 	r = ixgbe_dma_tag_create(adapter->osdep.dmat,	/* parent */
   2754 			       DBA_ALIGN, 0,	/* alignment, bounds */
   2755 			       size,	/* maxsize */
   2756 			       1,	/* nsegments */
   2757 			       size,	/* maxsegsize */
   2758 			       BUS_DMA_ALLOCNOW,	/* flags */
   2759 			       &dma->dma_tag);
   2760 	if (r != 0) {
   2761 		aprint_error_dev(dev,
   2762 		    "%s: ixgbe_dma_tag_create failed; error %d\n", __func__, r);
   2763 		goto fail_0;
   2764 	}
   2765 
   2766 	r = bus_dmamem_alloc(dma->dma_tag->dt_dmat,
   2767 		size,
   2768 		dma->dma_tag->dt_alignment,
   2769 		dma->dma_tag->dt_boundary,
   2770 		&dma->dma_seg, 1, &rsegs, BUS_DMA_NOWAIT);
   2771 	if (r != 0) {
   2772 		aprint_error_dev(dev,
   2773 		    "%s: bus_dmamem_alloc failed; error %d\n", __func__, r);
   2774 		goto fail_1;
   2775 	}
   2776 
   2777 	r = bus_dmamem_map(dma->dma_tag->dt_dmat, &dma->dma_seg, rsegs,
   2778 	    size, &dma->dma_vaddr, BUS_DMA_NOWAIT);
   2779 	if (r != 0) {
   2780 		aprint_error_dev(dev, "%s: bus_dmamem_map failed; error %d\n",
   2781 		    __func__, r);
   2782 		goto fail_2;
   2783 	}
   2784 
   2785 	r = ixgbe_dmamap_create(dma->dma_tag, 0, &dma->dma_map);
   2786 	if (r != 0) {
   2787 		aprint_error_dev(dev, "%s: bus_dmamem_map failed; error %d\n",
   2788 		    __func__, r);
   2789 		goto fail_3;
   2790 	}
   2791 
   2792 	r = bus_dmamap_load(dma->dma_tag->dt_dmat, dma->dma_map, dma->dma_vaddr,
   2793 			    size,
   2794 			    NULL,
   2795 			    mapflags | BUS_DMA_NOWAIT);
   2796 	if (r != 0) {
   2797 		aprint_error_dev(dev, "%s: bus_dmamap_load failed; error %d\n",
   2798 		    __func__, r);
   2799 		goto fail_4;
   2800 	}
   2801 	dma->dma_paddr = dma->dma_map->dm_segs[0].ds_addr;
   2802 	dma->dma_size = size;
   2803 	return 0;
   2804 fail_4:
   2805 	ixgbe_dmamap_destroy(dma->dma_tag, dma->dma_map);
   2806 fail_3:
   2807 	bus_dmamem_unmap(dma->dma_tag->dt_dmat, dma->dma_vaddr, size);
   2808 fail_2:
   2809 	bus_dmamem_free(dma->dma_tag->dt_dmat, &dma->dma_seg, rsegs);
   2810 fail_1:
   2811 	ixgbe_dma_tag_destroy(dma->dma_tag);
   2812 fail_0:
   2813 	return r;
   2814 }
   2815 
   2816 static void
   2817 ixgbe_dma_free(struct adapter *adapter, struct ixgbe_dma_alloc *dma)
   2818 {
   2819 	bus_dmamap_sync(dma->dma_tag->dt_dmat, dma->dma_map, 0, dma->dma_size,
   2820 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   2821 	ixgbe_dmamap_unload(dma->dma_tag, dma->dma_map);
   2822 	bus_dmamem_free(dma->dma_tag->dt_dmat, &dma->dma_seg, 1);
   2823 	ixgbe_dma_tag_destroy(dma->dma_tag);
   2824 }
   2825 
   2826 
   2827 /*********************************************************************
   2828  *
   2829  *  Allocate memory for the transmit and receive rings, and then
   2830  *  the descriptors associated with each, called only once at attach.
   2831  *
   2832  **********************************************************************/
   2833 static int
   2834 ixgbe_allocate_queues(struct adapter *adapter)
   2835 {
   2836 	device_t	dev = adapter->dev;
   2837 	struct ix_queue	*que;
   2838 	struct tx_ring	*txr;
   2839 	struct rx_ring	*rxr;
   2840 	int rsize, tsize, error = IXGBE_SUCCESS;
   2841 	int txconf = 0, rxconf = 0;
   2842 
   2843         /* First allocate the top level queue structs */
   2844         if (!(adapter->queues =
   2845             (struct ix_queue *) malloc(sizeof(struct ix_queue) *
   2846             adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
   2847                 aprint_error_dev(dev, "Unable to allocate queue memory\n");
   2848                 error = ENOMEM;
   2849                 goto fail;
   2850         }
   2851 
   2852 	/* First allocate the TX ring struct memory */
   2853 	if (!(adapter->tx_rings =
   2854 	    (struct tx_ring *) malloc(sizeof(struct tx_ring) *
   2855 	    adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
   2856 		aprint_error_dev(dev, "Unable to allocate TX ring memory\n");
   2857 		error = ENOMEM;
   2858 		goto tx_fail;
   2859 	}
   2860 
   2861 	/* Next allocate the RX */
   2862 	if (!(adapter->rx_rings =
   2863 	    (struct rx_ring *) malloc(sizeof(struct rx_ring) *
   2864 	    adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
   2865 		aprint_error_dev(dev, "Unable to allocate RX ring memory\n");
   2866 		error = ENOMEM;
   2867 		goto rx_fail;
   2868 	}
   2869 
   2870 	/* For the ring itself */
   2871 	tsize = roundup2(adapter->num_tx_desc *
   2872 	    sizeof(union ixgbe_adv_tx_desc), DBA_ALIGN);
   2873 
   2874 	/*
   2875 	 * Now set up the TX queues, txconf is needed to handle the
   2876 	 * possibility that things fail midcourse and we need to
   2877 	 * undo memory gracefully
   2878 	 */
   2879 	for (int i = 0; i < adapter->num_queues; i++, txconf++) {
   2880 		/* Set up some basics */
   2881 		txr = &adapter->tx_rings[i];
   2882 		txr->adapter = adapter;
   2883 		txr->me = i;
   2884 
   2885 		/* Initialize the TX side lock */
   2886 		snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
   2887 		    device_xname(dev), txr->me);
   2888 		mutex_init(&txr->tx_mtx, MUTEX_DEFAULT, IPL_NET);
   2889 
   2890 		if (ixgbe_dma_malloc(adapter, tsize,
   2891 			&txr->txdma, BUS_DMA_NOWAIT)) {
   2892 			aprint_error_dev(dev,
   2893 			    "Unable to allocate TX Descriptor memory\n");
   2894 			error = ENOMEM;
   2895 			goto err_tx_desc;
   2896 		}
   2897 		txr->tx_base = (union ixgbe_adv_tx_desc *)txr->txdma.dma_vaddr;
   2898 		bzero((void *)txr->tx_base, tsize);
   2899 
   2900         	/* Now allocate transmit buffers for the ring */
   2901         	if (ixgbe_allocate_transmit_buffers(txr)) {
   2902 			aprint_error_dev(dev,
   2903 			    "Critical Failure setting up transmit buffers\n");
   2904 			error = ENOMEM;
   2905 			goto err_tx_desc;
   2906         	}
   2907 #if __FreeBSD_version >= 800000
   2908 		/* Allocate a buf ring */
   2909 		txr->br = buf_ring_alloc(IXGBE_BR_SIZE, M_DEVBUF,
   2910 		    M_WAITOK, &txr->tx_mtx);
   2911 		if (txr->br == NULL) {
   2912 			aprint_error_dev(dev,
   2913 			    "Critical Failure setting up buf ring\n");
   2914 			error = ENOMEM;
   2915 			goto err_tx_desc;
   2916         	}
   2917 #endif
   2918 	}
   2919 
   2920 	/*
   2921 	 * Next the RX queues...
   2922 	 */
   2923 	rsize = roundup2(adapter->num_rx_desc *
   2924 	    sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
   2925 	for (int i = 0; i < adapter->num_queues; i++, rxconf++) {
   2926 		rxr = &adapter->rx_rings[i];
   2927 		/* Set up some basics */
   2928 		rxr->adapter = adapter;
   2929 		rxr->me = i;
   2930 
   2931 		/* Initialize the RX side lock */
   2932 		snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
   2933 		    device_xname(dev), rxr->me);
   2934 		mutex_init(&rxr->rx_mtx, MUTEX_DEFAULT, IPL_NET);
   2935 
   2936 		if (ixgbe_dma_malloc(adapter, rsize,
   2937 			&rxr->rxdma, BUS_DMA_NOWAIT)) {
   2938 			aprint_error_dev(dev,
   2939 			    "Unable to allocate RxDescriptor memory\n");
   2940 			error = ENOMEM;
   2941 			goto err_rx_desc;
   2942 		}
   2943 		rxr->rx_base = (union ixgbe_adv_rx_desc *)rxr->rxdma.dma_vaddr;
   2944 		bzero((void *)rxr->rx_base, rsize);
   2945 
   2946         	/* Allocate receive buffers for the ring*/
   2947 		if (ixgbe_allocate_receive_buffers(rxr)) {
   2948 			aprint_error_dev(dev,
   2949 			    "Critical Failure setting up receive buffers\n");
   2950 			error = ENOMEM;
   2951 			goto err_rx_desc;
   2952 		}
   2953 	}
   2954 
   2955 	/*
   2956 	** Finally set up the queue holding structs
   2957 	*/
   2958 	for (int i = 0; i < adapter->num_queues; i++) {
   2959 		que = &adapter->queues[i];
   2960 		que->adapter = adapter;
   2961 		que->txr = &adapter->tx_rings[i];
   2962 		que->rxr = &adapter->rx_rings[i];
   2963 	}
   2964 
   2965 	return (0);
   2966 
   2967 err_rx_desc:
   2968 	for (rxr = adapter->rx_rings; rxconf > 0; rxr++, rxconf--)
   2969 		ixgbe_dma_free(adapter, &rxr->rxdma);
   2970 err_tx_desc:
   2971 	for (txr = adapter->tx_rings; txconf > 0; txr++, txconf--)
   2972 		ixgbe_dma_free(adapter, &txr->txdma);
   2973 	free(adapter->rx_rings, M_DEVBUF);
   2974 rx_fail:
   2975 	free(adapter->tx_rings, M_DEVBUF);
   2976 tx_fail:
   2977 	free(adapter->queues, M_DEVBUF);
   2978 fail:
   2979 	return (error);
   2980 }
   2981 
   2982 /*********************************************************************
   2983  *
   2984  *  Allocate memory for tx_buffer structures. The tx_buffer stores all
   2985  *  the information needed to transmit a packet on the wire. This is
   2986  *  called only once at attach, setup is done every reset.
   2987  *
   2988  **********************************************************************/
   2989 static int
   2990 ixgbe_allocate_transmit_buffers(struct tx_ring *txr)
   2991 {
   2992 	struct adapter *adapter = txr->adapter;
   2993 	device_t dev = adapter->dev;
   2994 	struct ixgbe_tx_buf *txbuf;
   2995 	int error, i;
   2996 
   2997 	/*
   2998 	 * Setup DMA descriptor areas.
   2999 	 */
   3000 	if ((error = ixgbe_dma_tag_create(adapter->osdep.dmat,	/* parent */
   3001 			       1, 0,		/* alignment, bounds */
   3002 			       IXGBE_TSO_SIZE,		/* maxsize */
   3003 			       adapter->num_segs,	/* nsegments */
   3004 			       PAGE_SIZE,		/* maxsegsize */
   3005 			       0,			/* flags */
   3006 			       &txr->txtag))) {
   3007 		aprint_error_dev(dev,"Unable to allocate TX DMA tag\n");
   3008 		goto fail;
   3009 	}
   3010 
   3011 	if (!(txr->tx_buffers =
   3012 	    (struct ixgbe_tx_buf *) malloc(sizeof(struct ixgbe_tx_buf) *
   3013 	    adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) {
   3014 		aprint_error_dev(dev, "Unable to allocate tx_buffer memory\n");
   3015 		error = ENOMEM;
   3016 		goto fail;
   3017 	}
   3018 
   3019         /* Create the descriptor buffer dma maps */
   3020 	txbuf = txr->tx_buffers;
   3021 	for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
   3022 		error = ixgbe_dmamap_create(txr->txtag, 0, &txbuf->map);
   3023 		if (error != 0) {
   3024 			aprint_error_dev(dev, "Unable to create TX DMA map\n");
   3025 			goto fail;
   3026 		}
   3027 	}
   3028 
   3029 	return 0;
   3030 fail:
   3031 	/* We free all, it handles case where we are in the middle */
   3032 	ixgbe_free_transmit_structures(adapter);
   3033 	return (error);
   3034 }
   3035 
   3036 /*********************************************************************
   3037  *
   3038  *  Initialize a transmit ring.
   3039  *
   3040  **********************************************************************/
   3041 static void
   3042 ixgbe_setup_transmit_ring(struct tx_ring *txr)
   3043 {
   3044 	struct adapter *adapter = txr->adapter;
   3045 	struct ixgbe_tx_buf *txbuf;
   3046 	int i;
   3047 
   3048 	/* Clear the old ring contents */
   3049 	IXGBE_TX_LOCK(txr);
   3050 	bzero((void *)txr->tx_base,
   3051 	      (sizeof(union ixgbe_adv_tx_desc)) * adapter->num_tx_desc);
   3052 	/* Reset indices */
   3053 	txr->next_avail_desc = 0;
   3054 	txr->next_to_clean = 0;
   3055 
   3056 	/* Free any existing tx buffers. */
   3057         txbuf = txr->tx_buffers;
   3058 	for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
   3059 		if (txbuf->m_head != NULL) {
   3060 			bus_dmamap_sync(txr->txtag->dt_dmat, txbuf->map,
   3061 			    0, txbuf->m_head->m_pkthdr.len,
   3062 			    BUS_DMASYNC_POSTWRITE);
   3063 			ixgbe_dmamap_unload(txr->txtag, txbuf->map);
   3064 			m_freem(txbuf->m_head);
   3065 			txbuf->m_head = NULL;
   3066 		}
   3067 		/* Clear the EOP index */
   3068 		txbuf->eop_index = -1;
   3069         }
   3070 
   3071 #ifdef IXGBE_FDIR
   3072 	/* Set the rate at which we sample packets */
   3073 	if (adapter->hw.mac.type != ixgbe_mac_82598EB)
   3074 		txr->atr_sample = atr_sample_rate;
   3075 #endif
   3076 
   3077 	/* Set number of descriptors available */
   3078 	txr->tx_avail = adapter->num_tx_desc;
   3079 
   3080 	ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
   3081 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   3082 	IXGBE_TX_UNLOCK(txr);
   3083 }
   3084 
   3085 /*********************************************************************
   3086  *
   3087  *  Initialize all transmit rings.
   3088  *
   3089  **********************************************************************/
   3090 static int
   3091 ixgbe_setup_transmit_structures(struct adapter *adapter)
   3092 {
   3093 	struct tx_ring *txr = adapter->tx_rings;
   3094 
   3095 	for (int i = 0; i < adapter->num_queues; i++, txr++)
   3096 		ixgbe_setup_transmit_ring(txr);
   3097 
   3098 	return (0);
   3099 }
   3100 
   3101 /*********************************************************************
   3102  *
   3103  *  Enable transmit unit.
   3104  *
   3105  **********************************************************************/
   3106 static void
   3107 ixgbe_initialize_transmit_units(struct adapter *adapter)
   3108 {
   3109 	struct tx_ring	*txr = adapter->tx_rings;
   3110 	struct ixgbe_hw	*hw = &adapter->hw;
   3111 
   3112 	/* Setup the Base and Length of the Tx Descriptor Ring */
   3113 
   3114 	for (int i = 0; i < adapter->num_queues; i++, txr++) {
   3115 		u64	tdba = txr->txdma.dma_paddr;
   3116 		u32	txctrl;
   3117 
   3118 		IXGBE_WRITE_REG(hw, IXGBE_TDBAL(i),
   3119 		       (tdba & 0x00000000ffffffffULL));
   3120 		IXGBE_WRITE_REG(hw, IXGBE_TDBAH(i), (tdba >> 32));
   3121 		IXGBE_WRITE_REG(hw, IXGBE_TDLEN(i),
   3122 		    adapter->num_tx_desc * sizeof(struct ixgbe_legacy_tx_desc));
   3123 
   3124 		/* Setup the HW Tx Head and Tail descriptor pointers */
   3125 		IXGBE_WRITE_REG(hw, IXGBE_TDH(i), 0);
   3126 		IXGBE_WRITE_REG(hw, IXGBE_TDT(i), 0);
   3127 
   3128 		/* Setup Transmit Descriptor Cmd Settings */
   3129 		txr->txd_cmd = IXGBE_TXD_CMD_IFCS;
   3130 		txr->queue_status = IXGBE_QUEUE_IDLE;
   3131 
   3132 		/* Disable Head Writeback */
   3133 		switch (hw->mac.type) {
   3134 		case ixgbe_mac_82598EB:
   3135 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
   3136 			break;
   3137 		case ixgbe_mac_82599EB:
   3138 		default:
   3139 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
   3140 			break;
   3141                 }
   3142 		txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
   3143 		switch (hw->mac.type) {
   3144 		case ixgbe_mac_82598EB:
   3145 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), txctrl);
   3146 			break;
   3147 		case ixgbe_mac_82599EB:
   3148 		default:
   3149 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), txctrl);
   3150 			break;
   3151 		}
   3152 
   3153 	}
   3154 
   3155 	if (hw->mac.type != ixgbe_mac_82598EB) {
   3156 		u32 dmatxctl, rttdcs;
   3157 		dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
   3158 		dmatxctl |= IXGBE_DMATXCTL_TE;
   3159 		IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
   3160 		/* Disable arbiter to set MTQC */
   3161 		rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
   3162 		rttdcs |= IXGBE_RTTDCS_ARBDIS;
   3163 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
   3164 		IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
   3165 		rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
   3166 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
   3167 	}
   3168 
   3169 	return;
   3170 }
   3171 
   3172 /*********************************************************************
   3173  *
   3174  *  Free all transmit rings.
   3175  *
   3176  **********************************************************************/
   3177 static void
   3178 ixgbe_free_transmit_structures(struct adapter *adapter)
   3179 {
   3180 	struct tx_ring *txr = adapter->tx_rings;
   3181 
   3182 	for (int i = 0; i < adapter->num_queues; i++, txr++) {
   3183 		IXGBE_TX_LOCK(txr);
   3184 		ixgbe_free_transmit_buffers(txr);
   3185 		ixgbe_dma_free(adapter, &txr->txdma);
   3186 		IXGBE_TX_UNLOCK(txr);
   3187 		IXGBE_TX_LOCK_DESTROY(txr);
   3188 	}
   3189 	free(adapter->tx_rings, M_DEVBUF);
   3190 }
   3191 
   3192 /*********************************************************************
   3193  *
   3194  *  Free transmit ring related data structures.
   3195  *
   3196  **********************************************************************/
   3197 static void
   3198 ixgbe_free_transmit_buffers(struct tx_ring *txr)
   3199 {
   3200 	struct adapter *adapter = txr->adapter;
   3201 	struct ixgbe_tx_buf *tx_buffer;
   3202 	int             i;
   3203 
   3204 	INIT_DEBUGOUT("free_transmit_ring: begin");
   3205 
   3206 	if (txr->tx_buffers == NULL)
   3207 		return;
   3208 
   3209 	tx_buffer = txr->tx_buffers;
   3210 	for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
   3211 		if (tx_buffer->m_head != NULL) {
   3212 			bus_dmamap_sync(txr->txtag->dt_dmat, tx_buffer->map,
   3213 			    0, tx_buffer->m_head->m_pkthdr.len,
   3214 			    BUS_DMASYNC_POSTWRITE);
   3215 			ixgbe_dmamap_unload(txr->txtag, tx_buffer->map);
   3216 			m_freem(tx_buffer->m_head);
   3217 			tx_buffer->m_head = NULL;
   3218 			if (tx_buffer->map != NULL) {
   3219 				ixgbe_dmamap_destroy(txr->txtag,
   3220 				    tx_buffer->map);
   3221 				tx_buffer->map = NULL;
   3222 			}
   3223 		} else if (tx_buffer->map != NULL) {
   3224 			ixgbe_dmamap_unload(txr->txtag, tx_buffer->map);
   3225 			ixgbe_dmamap_destroy(txr->txtag, tx_buffer->map);
   3226 			tx_buffer->map = NULL;
   3227 		}
   3228 	}
   3229 #if __FreeBSD_version >= 800000
   3230 	if (txr->br != NULL)
   3231 		buf_ring_free(txr->br, M_DEVBUF);
   3232 #endif
   3233 	if (txr->tx_buffers != NULL) {
   3234 		free(txr->tx_buffers, M_DEVBUF);
   3235 		txr->tx_buffers = NULL;
   3236 	}
   3237 	if (txr->txtag != NULL) {
   3238 		ixgbe_dma_tag_destroy(txr->txtag);
   3239 		txr->txtag = NULL;
   3240 	}
   3241 	return;
   3242 }
   3243 
   3244 /*********************************************************************
   3245  *
   3246  *  Advanced Context Descriptor setup for VLAN or L4 CSUM
   3247  *
   3248  **********************************************************************/
   3249 
   3250 static u32
   3251 ixgbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp)
   3252 {
   3253 	struct m_tag *mtag;
   3254 	struct adapter *adapter = txr->adapter;
   3255 	struct ethercom *ec = &adapter->osdep.ec;
   3256 	struct ixgbe_adv_tx_context_desc *TXD;
   3257 	struct ixgbe_tx_buf        *tx_buffer;
   3258 	u32 olinfo = 0, vlan_macip_lens = 0, type_tucmd_mlhl = 0;
   3259 	struct ether_vlan_header *eh;
   3260 	struct ip ip;
   3261 	struct ip6_hdr ip6;
   3262 	int  ehdrlen, ip_hlen = 0;
   3263 	u16	etype;
   3264 	u8	ipproto __diagused = 0;
   3265 	bool	offload;
   3266 	int ctxd = txr->next_avail_desc;
   3267 	u16 vtag = 0;
   3268 
   3269 	offload = ((mp->m_pkthdr.csum_flags & M_CSUM_OFFLOAD) != 0);
   3270 
   3271 	tx_buffer = &txr->tx_buffers[ctxd];
   3272 	TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
   3273 
   3274 	/*
   3275 	** In advanced descriptors the vlan tag must
   3276 	** be placed into the descriptor itself.
   3277 	*/
   3278 	if ((mtag = VLAN_OUTPUT_TAG(ec, mp)) != NULL) {
   3279 		vtag = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   3280 		vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
   3281 	} else if (!offload)
   3282 		return 0;
   3283 
   3284 	/*
   3285 	 * Determine where frame payload starts.
   3286 	 * Jump over vlan headers if already present,
   3287 	 * helpful for QinQ too.
   3288 	 */
   3289 	KASSERT(mp->m_len >= offsetof(struct ether_vlan_header, evl_tag));
   3290 	eh = mtod(mp, struct ether_vlan_header *);
   3291 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
   3292 		KASSERT(mp->m_len >= sizeof(struct ether_vlan_header));
   3293 		etype = ntohs(eh->evl_proto);
   3294 		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   3295 	} else {
   3296 		etype = ntohs(eh->evl_encap_proto);
   3297 		ehdrlen = ETHER_HDR_LEN;
   3298 	}
   3299 
   3300 	/* Set the ether header length */
   3301 	vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
   3302 
   3303 	switch (etype) {
   3304 	case ETHERTYPE_IP:
   3305 		m_copydata(mp, ehdrlen, sizeof(ip), &ip);
   3306 		ip_hlen = ip.ip_hl << 2;
   3307 		ipproto = ip.ip_p;
   3308 #if 0
   3309 		ip.ip_sum = 0;
   3310 		m_copyback(mp, ehdrlen, sizeof(ip), &ip);
   3311 #else
   3312 		KASSERT((mp->m_pkthdr.csum_flags & M_CSUM_IPv4) == 0 ||
   3313 		    ip.ip_sum == 0);
   3314 #endif
   3315 		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
   3316 		break;
   3317 	case ETHERTYPE_IPV6:
   3318 		m_copydata(mp, ehdrlen, sizeof(ip6), &ip6);
   3319 		ip_hlen = sizeof(ip6);
   3320 		ipproto = ip6.ip6_nxt;
   3321 		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
   3322 		break;
   3323 	default:
   3324 		break;
   3325 	}
   3326 
   3327 	if ((mp->m_pkthdr.csum_flags & M_CSUM_IPv4) != 0)
   3328 		olinfo |= IXGBE_TXD_POPTS_IXSM << 8;
   3329 
   3330 	vlan_macip_lens |= ip_hlen;
   3331 	type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
   3332 
   3333 	if (mp->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_TCPv6)) {
   3334 		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
   3335 		olinfo |= IXGBE_TXD_POPTS_TXSM << 8;
   3336 		KASSERT(ipproto == IPPROTO_TCP);
   3337 	} else if (mp->m_pkthdr.csum_flags & (M_CSUM_UDPv4|M_CSUM_UDPv6)) {
   3338 		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP;
   3339 		olinfo |= IXGBE_TXD_POPTS_TXSM << 8;
   3340 		KASSERT(ipproto == IPPROTO_UDP);
   3341 	}
   3342 
   3343 	/* Now copy bits into descriptor */
   3344 	TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
   3345 	TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
   3346 	TXD->seqnum_seed = htole32(0);
   3347 	TXD->mss_l4len_idx = htole32(0);
   3348 
   3349 	tx_buffer->m_head = NULL;
   3350 	tx_buffer->eop_index = -1;
   3351 
   3352 	/* We've consumed the first desc, adjust counters */
   3353 	if (++ctxd == adapter->num_tx_desc)
   3354 		ctxd = 0;
   3355 	txr->next_avail_desc = ctxd;
   3356 	--txr->tx_avail;
   3357 
   3358         return olinfo;
   3359 }
   3360 
   3361 /**********************************************************************
   3362  *
   3363  *  Setup work for hardware segmentation offload (TSO) on
   3364  *  adapters using advanced tx descriptors
   3365  *
   3366  **********************************************************************/
   3367 static bool
   3368 ixgbe_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *paylen)
   3369 {
   3370 	struct m_tag *mtag;
   3371 	struct adapter *adapter = txr->adapter;
   3372 	struct ethercom *ec = &adapter->osdep.ec;
   3373 	struct ixgbe_adv_tx_context_desc *TXD;
   3374 	struct ixgbe_tx_buf        *tx_buffer;
   3375 	u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
   3376 	u32 mss_l4len_idx = 0;
   3377 	u16 vtag = 0;
   3378 	int ctxd, ehdrlen,  hdrlen, ip_hlen, tcp_hlen;
   3379 	struct ether_vlan_header *eh;
   3380 	struct ip *ip;
   3381 	struct tcphdr *th;
   3382 
   3383 
   3384 	/*
   3385 	 * Determine where frame payload starts.
   3386 	 * Jump over vlan headers if already present
   3387 	 */
   3388 	eh = mtod(mp, struct ether_vlan_header *);
   3389 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN))
   3390 		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   3391 	else
   3392 		ehdrlen = ETHER_HDR_LEN;
   3393 
   3394         /* Ensure we have at least the IP+TCP header in the first mbuf. */
   3395         if (mp->m_len < ehdrlen + sizeof(struct ip) + sizeof(struct tcphdr))
   3396 		return FALSE;
   3397 
   3398 	ctxd = txr->next_avail_desc;
   3399 	tx_buffer = &txr->tx_buffers[ctxd];
   3400 	TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
   3401 
   3402 	ip = (struct ip *)(mp->m_data + ehdrlen);
   3403 	if (ip->ip_p != IPPROTO_TCP)
   3404 		return FALSE;   /* 0 */
   3405 	ip->ip_sum = 0;
   3406 	ip_hlen = ip->ip_hl << 2;
   3407 	th = (struct tcphdr *)((char *)ip + ip_hlen);
   3408 	/* XXX Educated guess: FreeBSD's in_pseudo == NetBSD's in_cksum_phdr */
   3409 	th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   3410 	    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   3411 	tcp_hlen = th->th_off << 2;
   3412 	hdrlen = ehdrlen + ip_hlen + tcp_hlen;
   3413 
   3414 	/* This is used in the transmit desc in encap */
   3415 	*paylen = mp->m_pkthdr.len - hdrlen;
   3416 
   3417 	/* VLAN MACLEN IPLEN */
   3418 	if ((mtag = VLAN_OUTPUT_TAG(ec, mp)) != NULL) {
   3419 		vtag = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   3420                 vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
   3421 	}
   3422 
   3423 	vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
   3424 	vlan_macip_lens |= ip_hlen;
   3425 	TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
   3426 
   3427 	/* ADV DTYPE TUCMD */
   3428 	type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
   3429 	type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
   3430 	type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
   3431 	TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
   3432 
   3433 
   3434 	/* MSS L4LEN IDX */
   3435 	mss_l4len_idx |= (mp->m_pkthdr.segsz << IXGBE_ADVTXD_MSS_SHIFT);
   3436 	mss_l4len_idx |= (tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT);
   3437 	TXD->mss_l4len_idx = htole32(mss_l4len_idx);
   3438 
   3439 	TXD->seqnum_seed = htole32(0);
   3440 	tx_buffer->m_head = NULL;
   3441 	tx_buffer->eop_index = -1;
   3442 
   3443 	if (++ctxd == adapter->num_tx_desc)
   3444 		ctxd = 0;
   3445 
   3446 	txr->tx_avail--;
   3447 	txr->next_avail_desc = ctxd;
   3448 	return TRUE;
   3449 }
   3450 
   3451 #ifdef IXGBE_FDIR
   3452 /*
   3453 ** This routine parses packet headers so that Flow
   3454 ** Director can make a hashed filter table entry
   3455 ** allowing traffic flows to be identified and kept
   3456 ** on the same cpu.  This would be a performance
   3457 ** hit, but we only do it at IXGBE_FDIR_RATE of
   3458 ** packets.
   3459 */
   3460 static void
   3461 ixgbe_atr(struct tx_ring *txr, struct mbuf *mp)
   3462 {
   3463 	struct adapter			*adapter = txr->adapter;
   3464 	struct ix_queue			*que;
   3465 	struct ip			*ip;
   3466 	struct tcphdr			*th;
   3467 	struct udphdr			*uh;
   3468 	struct ether_vlan_header	*eh;
   3469 	union ixgbe_atr_hash_dword	input = {.dword = 0};
   3470 	union ixgbe_atr_hash_dword	common = {.dword = 0};
   3471 	int  				ehdrlen, ip_hlen;
   3472 	u16				etype;
   3473 
   3474 	eh = mtod(mp, struct ether_vlan_header *);
   3475 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
   3476 		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   3477 		etype = eh->evl_proto;
   3478 	} else {
   3479 		ehdrlen = ETHER_HDR_LEN;
   3480 		etype = eh->evl_encap_proto;
   3481 	}
   3482 
   3483 	/* Only handling IPv4 */
   3484 	if (etype != htons(ETHERTYPE_IP))
   3485 		return;
   3486 
   3487 	ip = (struct ip *)(mp->m_data + ehdrlen);
   3488 	ip_hlen = ip->ip_hl << 2;
   3489 
   3490 	/* check if we're UDP or TCP */
   3491 	switch (ip->ip_p) {
   3492 	case IPPROTO_TCP:
   3493 		th = (struct tcphdr *)((char *)ip + ip_hlen);
   3494 		/* src and dst are inverted */
   3495 		common.port.dst ^= th->th_sport;
   3496 		common.port.src ^= th->th_dport;
   3497 		input.formatted.flow_type ^= IXGBE_ATR_FLOW_TYPE_TCPV4;
   3498 		break;
   3499 	case IPPROTO_UDP:
   3500 		uh = (struct udphdr *)((char *)ip + ip_hlen);
   3501 		/* src and dst are inverted */
   3502 		common.port.dst ^= uh->uh_sport;
   3503 		common.port.src ^= uh->uh_dport;
   3504 		input.formatted.flow_type ^= IXGBE_ATR_FLOW_TYPE_UDPV4;
   3505 		break;
   3506 	default:
   3507 		return;
   3508 	}
   3509 
   3510 	input.formatted.vlan_id = htobe16(mp->m_pkthdr.ether_vtag);
   3511 	if (mp->m_pkthdr.ether_vtag)
   3512 		common.flex_bytes ^= htons(ETHERTYPE_VLAN);
   3513 	else
   3514 		common.flex_bytes ^= etype;
   3515 	common.ip ^= ip->ip_src.s_addr ^ ip->ip_dst.s_addr;
   3516 
   3517 	que = &adapter->queues[txr->me];
   3518 	/*
   3519 	** This assumes the Rx queue and Tx
   3520 	** queue are bound to the same CPU
   3521 	*/
   3522 	ixgbe_fdir_add_signature_filter_82599(&adapter->hw,
   3523 	    input, common, que->msix);
   3524 }
   3525 #endif /* IXGBE_FDIR */
   3526 
   3527 /**********************************************************************
   3528  *
   3529  *  Examine each tx_buffer in the used queue. If the hardware is done
   3530  *  processing the packet then free associated resources. The
   3531  *  tx_buffer is put back on the free queue.
   3532  *
   3533  **********************************************************************/
   3534 static bool
   3535 ixgbe_txeof(struct tx_ring *txr)
   3536 {
   3537 	struct adapter	*adapter = txr->adapter;
   3538 	struct ifnet	*ifp = adapter->ifp;
   3539 	u32	first, last, done, processed;
   3540 	struct ixgbe_tx_buf *tx_buffer;
   3541 	struct ixgbe_legacy_tx_desc *tx_desc, *eop_desc;
   3542 	struct timeval now, elapsed;
   3543 
   3544 	KASSERT(mutex_owned(&txr->tx_mtx));
   3545 
   3546 	if (txr->tx_avail == adapter->num_tx_desc) {
   3547 		txr->queue_status = IXGBE_QUEUE_IDLE;
   3548 		return false;
   3549 	}
   3550 
   3551 	processed = 0;
   3552 	first = txr->next_to_clean;
   3553 	tx_buffer = &txr->tx_buffers[first];
   3554 	/* For cleanup we just use legacy struct */
   3555 	tx_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
   3556 	last = tx_buffer->eop_index;
   3557 	if (last == -1)
   3558 		return false;
   3559 	eop_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
   3560 
   3561 	/*
   3562 	** Get the index of the first descriptor
   3563 	** BEYOND the EOP and call that 'done'.
   3564 	** I do this so the comparison in the
   3565 	** inner while loop below can be simple
   3566 	*/
   3567 	if (++last == adapter->num_tx_desc) last = 0;
   3568 	done = last;
   3569 
   3570         ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
   3571 	    BUS_DMASYNC_POSTREAD);
   3572 	/*
   3573 	** Only the EOP descriptor of a packet now has the DD
   3574 	** bit set, this is what we look for...
   3575 	*/
   3576 	while (eop_desc->upper.fields.status & IXGBE_TXD_STAT_DD) {
   3577 		/* We clean the range of the packet */
   3578 		while (first != done) {
   3579 			tx_desc->upper.data = 0;
   3580 			tx_desc->lower.data = 0;
   3581 			tx_desc->buffer_addr = 0;
   3582 			++txr->tx_avail;
   3583 			++processed;
   3584 
   3585 			if (tx_buffer->m_head) {
   3586 				txr->bytes +=
   3587 				    tx_buffer->m_head->m_pkthdr.len;
   3588 				bus_dmamap_sync(txr->txtag->dt_dmat,
   3589 				    tx_buffer->map,
   3590 				    0, tx_buffer->m_head->m_pkthdr.len,
   3591 				    BUS_DMASYNC_POSTWRITE);
   3592 				ixgbe_dmamap_unload(txr->txtag, tx_buffer->map);
   3593 				m_freem(tx_buffer->m_head);
   3594 				tx_buffer->m_head = NULL;
   3595 			}
   3596 			tx_buffer->eop_index = -1;
   3597 			getmicrotime(&txr->watchdog_time);
   3598 
   3599 			if (++first == adapter->num_tx_desc)
   3600 				first = 0;
   3601 
   3602 			tx_buffer = &txr->tx_buffers[first];
   3603 			tx_desc =
   3604 			    (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
   3605 		}
   3606 		++txr->packets;
   3607 		++ifp->if_opackets;
   3608 		/* See if there is more work now */
   3609 		last = tx_buffer->eop_index;
   3610 		if (last != -1) {
   3611 			eop_desc =
   3612 			    (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
   3613 			/* Get next done point */
   3614 			if (++last == adapter->num_tx_desc) last = 0;
   3615 			done = last;
   3616 		} else
   3617 			break;
   3618 	}
   3619 	ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
   3620 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   3621 
   3622 	txr->next_to_clean = first;
   3623 
   3624 	/*
   3625 	** Watchdog calculation, we know there's
   3626 	** work outstanding or the first return
   3627 	** would have been taken, so none processed
   3628 	** for too long indicates a hang.
   3629 	*/
   3630 	getmicrotime(&now);
   3631 	timersub(&now, &txr->watchdog_time, &elapsed);
   3632 	if (!processed && tvtohz(&elapsed) > IXGBE_WATCHDOG)
   3633 		txr->queue_status = IXGBE_QUEUE_HUNG;
   3634 
   3635 	/*
   3636 	 * If we have enough room, clear IFF_OACTIVE to tell the stack that
   3637 	 * it is OK to send packets. If there are no pending descriptors,
   3638 	 * clear the timeout. Otherwise, if some descriptors have been freed,
   3639 	 * restart the timeout.
   3640 	 */
   3641 	if (txr->tx_avail > IXGBE_TX_CLEANUP_THRESHOLD) {
   3642 		ifp->if_flags &= ~IFF_OACTIVE;
   3643 		if (txr->tx_avail == adapter->num_tx_desc) {
   3644 			txr->queue_status = IXGBE_QUEUE_IDLE;
   3645 			return false;
   3646 		}
   3647 	}
   3648 
   3649 	return true;
   3650 }
   3651 
   3652 /*********************************************************************
   3653  *
   3654  *  Refresh mbuf buffers for RX descriptor rings
   3655  *   - now keeps its own state so discards due to resource
   3656  *     exhaustion are unnecessary, if an mbuf cannot be obtained
   3657  *     it just returns, keeping its placeholder, thus it can simply
   3658  *     be recalled to try again.
   3659  *
   3660  **********************************************************************/
   3661 static void
   3662 ixgbe_refresh_mbufs(struct rx_ring *rxr, int limit)
   3663 {
   3664 	struct adapter		*adapter = rxr->adapter;
   3665 	struct ixgbe_rx_buf	*rxbuf;
   3666 	struct mbuf		*mh, *mp;
   3667 	int			i, j, error;
   3668 	bool			refreshed = false;
   3669 
   3670 	i = j = rxr->next_to_refresh;
   3671 	/* Control the loop with one beyond */
   3672 	if (++j == adapter->num_rx_desc)
   3673 		j = 0;
   3674 
   3675 	while (j != limit) {
   3676 		rxbuf = &rxr->rx_buffers[i];
   3677 		if (rxr->hdr_split == FALSE)
   3678 			goto no_split;
   3679 
   3680 		if (rxbuf->m_head == NULL) {
   3681 			mh = m_gethdr(M_DONTWAIT, MT_DATA);
   3682 			if (mh == NULL)
   3683 				goto update;
   3684 		} else
   3685 			mh = rxbuf->m_head;
   3686 
   3687 		mh->m_pkthdr.len = mh->m_len = MHLEN;
   3688 		mh->m_len = MHLEN;
   3689 		mh->m_flags |= M_PKTHDR;
   3690 		/* Get the memory mapping */
   3691 		error = bus_dmamap_load_mbuf(rxr->htag->dt_dmat,
   3692 		    rxbuf->hmap, mh, BUS_DMA_NOWAIT);
   3693 		if (error != 0) {
   3694 			printf("Refresh mbufs: hdr dmamap load"
   3695 			    " failure - %d\n", error);
   3696 			m_free(mh);
   3697 			rxbuf->m_head = NULL;
   3698 			goto update;
   3699 		}
   3700 		rxbuf->m_head = mh;
   3701 		ixgbe_dmamap_sync(rxr->htag, rxbuf->hmap, BUS_DMASYNC_PREREAD);
   3702 		rxr->rx_base[i].read.hdr_addr =
   3703 		    htole64(rxbuf->hmap->dm_segs[0].ds_addr);
   3704 
   3705 no_split:
   3706 		if (rxbuf->m_pack == NULL) {
   3707 			mp = ixgbe_getjcl(&adapter->jcl_head, M_DONTWAIT,
   3708 			    MT_DATA, M_PKTHDR, adapter->rx_mbuf_sz);
   3709 			if (mp == NULL) {
   3710 				rxr->no_jmbuf.ev_count++;
   3711 				goto update;
   3712 			}
   3713 		} else
   3714 			mp = rxbuf->m_pack;
   3715 
   3716 		mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
   3717 		/* Get the memory mapping */
   3718 		error = bus_dmamap_load_mbuf(rxr->ptag->dt_dmat,
   3719 		    rxbuf->pmap, mp, BUS_DMA_NOWAIT);
   3720 		if (error != 0) {
   3721 			printf("Refresh mbufs: payload dmamap load"
   3722 			    " failure - %d\n", error);
   3723 			m_free(mp);
   3724 			rxbuf->m_pack = NULL;
   3725 			goto update;
   3726 		}
   3727 		rxbuf->m_pack = mp;
   3728 		bus_dmamap_sync(rxr->ptag->dt_dmat, rxbuf->pmap,
   3729 		    0, mp->m_pkthdr.len, BUS_DMASYNC_PREREAD);
   3730 		rxr->rx_base[i].read.pkt_addr =
   3731 		    htole64(rxbuf->pmap->dm_segs[0].ds_addr);
   3732 
   3733 		refreshed = true;
   3734 		/* Next is precalculated */
   3735 		i = j;
   3736 		rxr->next_to_refresh = i;
   3737 		if (++j == adapter->num_rx_desc)
   3738 			j = 0;
   3739 	}
   3740 update:
   3741 	if (refreshed) /* Update hardware tail index */
   3742 		IXGBE_WRITE_REG(&adapter->hw,
   3743 		    IXGBE_RDT(rxr->me), rxr->next_to_refresh);
   3744 	return;
   3745 }
   3746 
   3747 /*********************************************************************
   3748  *
   3749  *  Allocate memory for rx_buffer structures. Since we use one
   3750  *  rx_buffer per received packet, the maximum number of rx_buffer's
   3751  *  that we'll need is equal to the number of receive descriptors
   3752  *  that we've allocated.
   3753  *
   3754  **********************************************************************/
   3755 static int
   3756 ixgbe_allocate_receive_buffers(struct rx_ring *rxr)
   3757 {
   3758 	struct	adapter 	*adapter = rxr->adapter;
   3759 	device_t 		dev = adapter->dev;
   3760 	struct ixgbe_rx_buf 	*rxbuf;
   3761 	int             	i, bsize, error;
   3762 
   3763 	bsize = sizeof(struct ixgbe_rx_buf) * adapter->num_rx_desc;
   3764 	if (!(rxr->rx_buffers =
   3765 	    (struct ixgbe_rx_buf *) malloc(bsize,
   3766 	    M_DEVBUF, M_NOWAIT | M_ZERO))) {
   3767 		aprint_error_dev(dev, "Unable to allocate rx_buffer memory\n");
   3768 		error = ENOMEM;
   3769 		goto fail;
   3770 	}
   3771 
   3772 	if ((error = ixgbe_dma_tag_create(adapter->osdep.dmat,	/* parent */
   3773 				   1, 0,	/* alignment, bounds */
   3774 				   MSIZE,		/* maxsize */
   3775 				   1,			/* nsegments */
   3776 				   MSIZE,		/* maxsegsize */
   3777 				   0,			/* flags */
   3778 				   &rxr->htag))) {
   3779 		aprint_error_dev(dev, "Unable to create RX DMA tag\n");
   3780 		goto fail;
   3781 	}
   3782 
   3783 	if ((error = ixgbe_dma_tag_create(adapter->osdep.dmat,	/* parent */
   3784 				   1, 0,	/* alignment, bounds */
   3785 				   MJUM16BYTES,		/* maxsize */
   3786 				   1,			/* nsegments */
   3787 				   MJUM16BYTES,		/* maxsegsize */
   3788 				   0,			/* flags */
   3789 				   &rxr->ptag))) {
   3790 		aprint_error_dev(dev, "Unable to create RX DMA tag\n");
   3791 		goto fail;
   3792 	}
   3793 
   3794 	for (i = 0; i < adapter->num_rx_desc; i++, rxbuf++) {
   3795 		rxbuf = &rxr->rx_buffers[i];
   3796 		error = ixgbe_dmamap_create(rxr->htag,
   3797 		    BUS_DMA_NOWAIT, &rxbuf->hmap);
   3798 		if (error) {
   3799 			aprint_error_dev(dev, "Unable to create RX head map\n");
   3800 			goto fail;
   3801 		}
   3802 		error = ixgbe_dmamap_create(rxr->ptag,
   3803 		    BUS_DMA_NOWAIT, &rxbuf->pmap);
   3804 		if (error) {
   3805 			aprint_error_dev(dev, "Unable to create RX pkt map\n");
   3806 			goto fail;
   3807 		}
   3808 	}
   3809 
   3810 	return (0);
   3811 
   3812 fail:
   3813 	/* Frees all, but can handle partial completion */
   3814 	ixgbe_free_receive_structures(adapter);
   3815 	return (error);
   3816 }
   3817 
   3818 /*
   3819 ** Used to detect a descriptor that has
   3820 ** been merged by Hardware RSC.
   3821 */
   3822 static inline u32
   3823 ixgbe_rsc_count(union ixgbe_adv_rx_desc *rx)
   3824 {
   3825 	return (le32toh(rx->wb.lower.lo_dword.data) &
   3826 	    IXGBE_RXDADV_RSCCNT_MASK) >> IXGBE_RXDADV_RSCCNT_SHIFT;
   3827 }
   3828 
   3829 /*********************************************************************
   3830  *
   3831  *  Initialize Hardware RSC (LRO) feature on 82599
   3832  *  for an RX ring, this is toggled by the LRO capability
   3833  *  even though it is transparent to the stack.
   3834  *
   3835  **********************************************************************/
   3836 static void
   3837 ixgbe_setup_hw_rsc(struct rx_ring *rxr)
   3838 {
   3839 	struct	adapter 	*adapter = rxr->adapter;
   3840 	struct	ixgbe_hw	*hw = &adapter->hw;
   3841 	u32			rscctrl, rdrxctl;
   3842 
   3843 	rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
   3844 	rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
   3845 	rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
   3846 	rdrxctl |= IXGBE_RDRXCTL_RSCACKC;
   3847 	IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
   3848 
   3849 	rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(rxr->me));
   3850 	rscctrl |= IXGBE_RSCCTL_RSCEN;
   3851 	/*
   3852 	** Limit the total number of descriptors that
   3853 	** can be combined, so it does not exceed 64K
   3854 	*/
   3855 	if (adapter->rx_mbuf_sz == MCLBYTES)
   3856 		rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
   3857 	else if (adapter->rx_mbuf_sz == MJUMPAGESIZE)
   3858 		rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
   3859 	else if (adapter->rx_mbuf_sz == MJUM9BYTES)
   3860 		rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
   3861 	else  /* Using 16K cluster */
   3862 		rscctrl |= IXGBE_RSCCTL_MAXDESC_1;
   3863 
   3864 	IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(rxr->me), rscctrl);
   3865 
   3866 	/* Enable TCP header recognition */
   3867 	IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0),
   3868 	    (IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0)) |
   3869 	    IXGBE_PSRTYPE_TCPHDR));
   3870 
   3871 	/* Disable RSC for ACK packets */
   3872 	IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
   3873 	    (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
   3874 
   3875 	rxr->hw_rsc = TRUE;
   3876 }
   3877 
   3878 
   3879 static void
   3880 ixgbe_free_receive_ring(struct rx_ring *rxr)
   3881 {
   3882 	struct  adapter         *adapter;
   3883 	struct ixgbe_rx_buf       *rxbuf;
   3884 	int i;
   3885 
   3886 	adapter = rxr->adapter;
   3887 	for (i = 0; i < adapter->num_rx_desc; i++) {
   3888 		rxbuf = &rxr->rx_buffers[i];
   3889 		if (rxbuf->m_head != NULL) {
   3890 			ixgbe_dmamap_sync(rxr->htag, rxbuf->hmap,
   3891 			    BUS_DMASYNC_POSTREAD);
   3892 			ixgbe_dmamap_unload(rxr->htag, rxbuf->hmap);
   3893 			rxbuf->m_head->m_flags |= M_PKTHDR;
   3894 			m_freem(rxbuf->m_head);
   3895 		}
   3896 		if (rxbuf->m_pack != NULL) {
   3897 			bus_dmamap_sync(rxr->ptag->dt_dmat, rxbuf->pmap,
   3898 			    0, rxbuf->m_pack->m_pkthdr.len,
   3899 			    BUS_DMASYNC_POSTREAD);
   3900 			ixgbe_dmamap_unload(rxr->ptag, rxbuf->pmap);
   3901 			rxbuf->m_pack->m_flags |= M_PKTHDR;
   3902 			m_freem(rxbuf->m_pack);
   3903 		}
   3904 		rxbuf->m_head = NULL;
   3905 		rxbuf->m_pack = NULL;
   3906 	}
   3907 }
   3908 
   3909 
   3910 /*********************************************************************
   3911  *
   3912  *  Initialize a receive ring and its buffers.
   3913  *
   3914  **********************************************************************/
   3915 static int
   3916 ixgbe_setup_receive_ring(struct rx_ring *rxr)
   3917 {
   3918 	struct	adapter 	*adapter;
   3919 	struct ifnet		*ifp;
   3920 	struct ixgbe_rx_buf	*rxbuf;
   3921 #ifdef LRO
   3922 	struct lro_ctrl		*lro = &rxr->lro;
   3923 #endif /* LRO */
   3924 	int			rsize, error = 0;
   3925 
   3926 	adapter = rxr->adapter;
   3927 	ifp = adapter->ifp;
   3928 
   3929 	/* Clear the ring contents */
   3930 	IXGBE_RX_LOCK(rxr);
   3931 	rsize = roundup2(adapter->num_rx_desc *
   3932 	    sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
   3933 	bzero((void *)rxr->rx_base, rsize);
   3934 
   3935 	/* Free current RX buffer structs and their mbufs */
   3936 	ixgbe_free_receive_ring(rxr);
   3937 
   3938 	/* Now reinitialize our supply of jumbo mbufs.  The number
   3939 	 * or size of jumbo mbufs may have changed.
   3940 	 */
   3941 	ixgbe_jcl_reinit(&adapter->jcl_head, rxr->ptag->dt_dmat,
   3942 	    2 * adapter->num_rx_desc, adapter->rx_mbuf_sz);
   3943 
   3944 	/* Configure header split? */
   3945 	if (ixgbe_header_split)
   3946 		rxr->hdr_split = TRUE;
   3947 
   3948 	/* Now replenish the mbufs */
   3949 	for (int j = 0; j != adapter->num_rx_desc; ++j) {
   3950 		struct mbuf	*mh, *mp;
   3951 
   3952 		rxbuf = &rxr->rx_buffers[j];
   3953 		/*
   3954 		** Don't allocate mbufs if not
   3955 		** doing header split, its wasteful
   3956 		*/
   3957 		if (rxr->hdr_split == FALSE)
   3958 			goto skip_head;
   3959 
   3960 		/* First the header */
   3961 		rxbuf->m_head = m_gethdr(M_DONTWAIT, MT_DATA);
   3962 		if (rxbuf->m_head == NULL) {
   3963 			error = ENOBUFS;
   3964 			goto fail;
   3965 		}
   3966 		m_adj(rxbuf->m_head, ETHER_ALIGN);
   3967 		mh = rxbuf->m_head;
   3968 		mh->m_len = mh->m_pkthdr.len = MHLEN;
   3969 		mh->m_flags |= M_PKTHDR;
   3970 		/* Get the memory mapping */
   3971 		error = bus_dmamap_load_mbuf(rxr->htag->dt_dmat,
   3972 		    rxbuf->hmap, rxbuf->m_head, BUS_DMA_NOWAIT);
   3973 		if (error != 0) /* Nothing elegant to do here */
   3974 			goto fail;
   3975 		bus_dmamap_sync(rxr->htag->dt_dmat, rxbuf->hmap,
   3976 		    0, mh->m_pkthdr.len, BUS_DMASYNC_PREREAD);
   3977 		/* Update descriptor */
   3978 		rxr->rx_base[j].read.hdr_addr =
   3979 		    htole64(rxbuf->hmap->dm_segs[0].ds_addr);
   3980 
   3981 skip_head:
   3982 		/* Now the payload cluster */
   3983 		rxbuf->m_pack = ixgbe_getjcl(&adapter->jcl_head, M_DONTWAIT,
   3984 		    MT_DATA, M_PKTHDR, adapter->rx_mbuf_sz);
   3985 		if (rxbuf->m_pack == NULL) {
   3986 			error = ENOBUFS;
   3987                         goto fail;
   3988 		}
   3989 		mp = rxbuf->m_pack;
   3990 		mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
   3991 		/* Get the memory mapping */
   3992 		error = bus_dmamap_load_mbuf(rxr->ptag->dt_dmat,
   3993 		    rxbuf->pmap, mp, BUS_DMA_NOWAIT);
   3994 		if (error != 0)
   3995                         goto fail;
   3996 		bus_dmamap_sync(rxr->ptag->dt_dmat, rxbuf->pmap,
   3997 		    0, adapter->rx_mbuf_sz, BUS_DMASYNC_PREREAD);
   3998 		/* Update descriptor */
   3999 		rxr->rx_base[j].read.pkt_addr =
   4000 		    htole64(rxbuf->pmap->dm_segs[0].ds_addr);
   4001 	}
   4002 
   4003 
   4004 	/* Setup our descriptor indices */
   4005 	rxr->next_to_check = 0;
   4006 	rxr->next_to_refresh = 0;
   4007 	rxr->lro_enabled = FALSE;
   4008 	rxr->rx_split_packets.ev_count = 0;
   4009 	rxr->rx_bytes.ev_count = 0;
   4010 	rxr->discard = FALSE;
   4011 
   4012 	ixgbe_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
   4013 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   4014 
   4015 	/*
   4016 	** Now set up the LRO interface:
   4017 	** 82598 uses software LRO, the
   4018 	** 82599 uses a hardware assist.
   4019 	*/
   4020 	if ((adapter->hw.mac.type != ixgbe_mac_82598EB) &&
   4021 	    (ifp->if_capenable & IFCAP_RXCSUM) &&
   4022 	    (ifp->if_capenable & IFCAP_LRO))
   4023 		ixgbe_setup_hw_rsc(rxr);
   4024 #ifdef LRO
   4025 	else if (ifp->if_capenable & IFCAP_LRO) {
   4026 		device_t dev = adapter->dev;
   4027 		int err = tcp_lro_init(lro);
   4028 		if (err) {
   4029 			device_printf(dev, "LRO Initialization failed!\n");
   4030 			goto fail;
   4031 		}
   4032 		INIT_DEBUGOUT("RX Soft LRO Initialized\n");
   4033 		rxr->lro_enabled = TRUE;
   4034 		lro->ifp = adapter->ifp;
   4035 	}
   4036 #endif /* LRO */
   4037 
   4038 	IXGBE_RX_UNLOCK(rxr);
   4039 	return (0);
   4040 
   4041 fail:
   4042 	ixgbe_free_receive_ring(rxr);
   4043 	IXGBE_RX_UNLOCK(rxr);
   4044 	return (error);
   4045 }
   4046 
   4047 /*********************************************************************
   4048  *
   4049  *  Initialize all receive rings.
   4050  *
   4051  **********************************************************************/
   4052 static int
   4053 ixgbe_setup_receive_structures(struct adapter *adapter)
   4054 {
   4055 	struct rx_ring *rxr = adapter->rx_rings;
   4056 	int j;
   4057 
   4058 	for (j = 0; j < adapter->num_queues; j++, rxr++)
   4059 		if (ixgbe_setup_receive_ring(rxr))
   4060 			goto fail;
   4061 
   4062 	return (0);
   4063 fail:
   4064 	/*
   4065 	 * Free RX buffers allocated so far, we will only handle
   4066 	 * the rings that completed, the failing case will have
   4067 	 * cleaned up for itself. 'j' failed, so its the terminus.
   4068 	 */
   4069 	for (int i = 0; i < j; ++i) {
   4070 		rxr = &adapter->rx_rings[i];
   4071 		ixgbe_free_receive_ring(rxr);
   4072 	}
   4073 
   4074 	return (ENOBUFS);
   4075 }
   4076 
   4077 /*********************************************************************
   4078  *
   4079  *  Setup receive registers and features.
   4080  *
   4081  **********************************************************************/
   4082 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
   4083 
   4084 static void
   4085 ixgbe_initialize_receive_units(struct adapter *adapter)
   4086 {
   4087 	int i;
   4088 	struct	rx_ring	*rxr = adapter->rx_rings;
   4089 	struct ixgbe_hw	*hw = &adapter->hw;
   4090 	struct ifnet   *ifp = adapter->ifp;
   4091 	u32		bufsz, rxctrl, fctrl, srrctl, rxcsum;
   4092 	u32		reta, mrqc = 0, hlreg, r[10];
   4093 
   4094 
   4095 	/*
   4096 	 * Make sure receives are disabled while
   4097 	 * setting up the descriptor ring
   4098 	 */
   4099 	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
   4100 	IXGBE_WRITE_REG(hw, IXGBE_RXCTRL,
   4101 	    rxctrl & ~IXGBE_RXCTRL_RXEN);
   4102 
   4103 	/* Enable broadcasts */
   4104 	fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
   4105 	fctrl |= IXGBE_FCTRL_BAM;
   4106 	fctrl |= IXGBE_FCTRL_DPF;
   4107 	fctrl |= IXGBE_FCTRL_PMCF;
   4108 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
   4109 
   4110 	/* Set for Jumbo Frames? */
   4111 	hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
   4112 	if (ifp->if_mtu > ETHERMTU)
   4113 		hlreg |= IXGBE_HLREG0_JUMBOEN;
   4114 	else
   4115 		hlreg &= ~IXGBE_HLREG0_JUMBOEN;
   4116 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
   4117 
   4118 	bufsz = adapter->rx_mbuf_sz  >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
   4119 
   4120 	for (i = 0; i < adapter->num_queues; i++, rxr++) {
   4121 		u64 rdba = rxr->rxdma.dma_paddr;
   4122 
   4123 		/* Setup the Base and Length of the Rx Descriptor Ring */
   4124 		IXGBE_WRITE_REG(hw, IXGBE_RDBAL(i),
   4125 			       (rdba & 0x00000000ffffffffULL));
   4126 		IXGBE_WRITE_REG(hw, IXGBE_RDBAH(i), (rdba >> 32));
   4127 		IXGBE_WRITE_REG(hw, IXGBE_RDLEN(i),
   4128 		    adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
   4129 
   4130 		/* Set up the SRRCTL register */
   4131 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
   4132 		srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
   4133 		srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
   4134 		srrctl |= bufsz;
   4135 		if (rxr->hdr_split) {
   4136 			/* Use a standard mbuf for the header */
   4137 			srrctl |= ((IXGBE_RX_HDR <<
   4138 			    IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT)
   4139 			    & IXGBE_SRRCTL_BSIZEHDR_MASK);
   4140 			srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
   4141 		} else
   4142 			srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
   4143 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(i), srrctl);
   4144 
   4145 		/* Setup the HW Rx Head and Tail Descriptor Pointers */
   4146 		IXGBE_WRITE_REG(hw, IXGBE_RDH(i), 0);
   4147 		IXGBE_WRITE_REG(hw, IXGBE_RDT(i), 0);
   4148 	}
   4149 
   4150 	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
   4151 		u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
   4152 			      IXGBE_PSRTYPE_UDPHDR |
   4153 			      IXGBE_PSRTYPE_IPV4HDR |
   4154 			      IXGBE_PSRTYPE_IPV6HDR;
   4155 		IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
   4156 	}
   4157 
   4158 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
   4159 
   4160 	/* Setup RSS */
   4161 	if (adapter->num_queues > 1) {
   4162 		int j;
   4163 		reta = 0;
   4164 
   4165 		/* set up random bits */
   4166 		cprng_fast(&r, sizeof(r));
   4167 
   4168 		/* Set up the redirection table */
   4169 		for (i = 0, j = 0; i < 128; i++, j++) {
   4170 			if (j == adapter->num_queues) j = 0;
   4171 			reta = (reta << 8) | (j * 0x11);
   4172 			if ((i & 3) == 3)
   4173 				IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
   4174 		}
   4175 
   4176 		/* Now fill our hash function seeds */
   4177 		for (i = 0; i < 10; i++)
   4178 			IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), r[i]);
   4179 
   4180 		/* Perform hash on these packet types */
   4181 		mrqc = IXGBE_MRQC_RSSEN
   4182 		     | IXGBE_MRQC_RSS_FIELD_IPV4
   4183 		     | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
   4184 		     | IXGBE_MRQC_RSS_FIELD_IPV4_UDP
   4185 		     | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP
   4186 		     | IXGBE_MRQC_RSS_FIELD_IPV6_EX
   4187 		     | IXGBE_MRQC_RSS_FIELD_IPV6
   4188 		     | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
   4189 		     | IXGBE_MRQC_RSS_FIELD_IPV6_UDP
   4190 		     | IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
   4191 		IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
   4192 
   4193 		/* RSS and RX IPP Checksum are mutually exclusive */
   4194 		rxcsum |= IXGBE_RXCSUM_PCSD;
   4195 	}
   4196 
   4197 	if (ifp->if_capenable & IFCAP_RXCSUM)
   4198 		rxcsum |= IXGBE_RXCSUM_PCSD;
   4199 
   4200 	if (!(rxcsum & IXGBE_RXCSUM_PCSD))
   4201 		rxcsum |= IXGBE_RXCSUM_IPPCSE;
   4202 
   4203 	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
   4204 
   4205 	return;
   4206 }
   4207 
   4208 /*********************************************************************
   4209  *
   4210  *  Free all receive rings.
   4211  *
   4212  **********************************************************************/
   4213 static void
   4214 ixgbe_free_receive_structures(struct adapter *adapter)
   4215 {
   4216 	struct rx_ring *rxr = adapter->rx_rings;
   4217 
   4218 	for (int i = 0; i < adapter->num_queues; i++, rxr++) {
   4219 #ifdef LRO
   4220 		struct lro_ctrl		*lro = &rxr->lro;
   4221 #endif /* LRO */
   4222 		ixgbe_free_receive_buffers(rxr);
   4223 #ifdef LRO
   4224 		/* Free LRO memory */
   4225 		tcp_lro_free(lro);
   4226 #endif /* LRO */
   4227 		/* Free the ring memory as well */
   4228 		ixgbe_dma_free(adapter, &rxr->rxdma);
   4229 		IXGBE_RX_LOCK_DESTROY(rxr);
   4230 	}
   4231 
   4232 	free(adapter->rx_rings, M_DEVBUF);
   4233 }
   4234 
   4235 
   4236 /*********************************************************************
   4237  *
   4238  *  Free receive ring data structures
   4239  *
   4240  **********************************************************************/
   4241 static void
   4242 ixgbe_free_receive_buffers(struct rx_ring *rxr)
   4243 {
   4244 	struct adapter		*adapter = rxr->adapter;
   4245 	struct ixgbe_rx_buf	*rxbuf;
   4246 
   4247 	INIT_DEBUGOUT("free_receive_structures: begin");
   4248 
   4249 	/* Cleanup any existing buffers */
   4250 	if (rxr->rx_buffers != NULL) {
   4251 		for (int i = 0; i < adapter->num_rx_desc; i++) {
   4252 			rxbuf = &rxr->rx_buffers[i];
   4253 			if (rxbuf->m_head != NULL) {
   4254 				ixgbe_dmamap_sync(rxr->htag, rxbuf->hmap,
   4255 				    BUS_DMASYNC_POSTREAD);
   4256 				ixgbe_dmamap_unload(rxr->htag, rxbuf->hmap);
   4257 				rxbuf->m_head->m_flags |= M_PKTHDR;
   4258 				m_freem(rxbuf->m_head);
   4259 			}
   4260 			if (rxbuf->m_pack != NULL) {
   4261 				bus_dmamap_sync(rxr->ptag->dt_dmat, rxbuf->pmap,
   4262 				    0, rxbuf->m_pack->m_pkthdr.len,
   4263 				    BUS_DMASYNC_POSTREAD);
   4264 				ixgbe_dmamap_unload(rxr->ptag, rxbuf->pmap);
   4265 				rxbuf->m_pack->m_flags |= M_PKTHDR;
   4266 				m_freem(rxbuf->m_pack);
   4267 			}
   4268 			rxbuf->m_head = NULL;
   4269 			rxbuf->m_pack = NULL;
   4270 			if (rxbuf->hmap != NULL) {
   4271 				ixgbe_dmamap_destroy(rxr->htag, rxbuf->hmap);
   4272 				rxbuf->hmap = NULL;
   4273 			}
   4274 			if (rxbuf->pmap != NULL) {
   4275 				ixgbe_dmamap_destroy(rxr->ptag, rxbuf->pmap);
   4276 				rxbuf->pmap = NULL;
   4277 			}
   4278 		}
   4279 		if (rxr->rx_buffers != NULL) {
   4280 			free(rxr->rx_buffers, M_DEVBUF);
   4281 			rxr->rx_buffers = NULL;
   4282 		}
   4283 	}
   4284 
   4285 	if (rxr->htag != NULL) {
   4286 		ixgbe_dma_tag_destroy(rxr->htag);
   4287 		rxr->htag = NULL;
   4288 	}
   4289 	if (rxr->ptag != NULL) {
   4290 		ixgbe_dma_tag_destroy(rxr->ptag);
   4291 		rxr->ptag = NULL;
   4292 	}
   4293 
   4294 	return;
   4295 }
   4296 
   4297 static __inline void
   4298 ixgbe_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m, u32 ptype)
   4299 {
   4300 	int s;
   4301 
   4302 #ifdef LRO
   4303 	struct adapter	*adapter = ifp->if_softc;
   4304 	struct ethercom *ec = &adapter->osdep.ec;
   4305 
   4306         /*
   4307          * ATM LRO is only for IPv4/TCP packets and TCP checksum of the packet
   4308          * should be computed by hardware. Also it should not have VLAN tag in
   4309          * ethernet header.
   4310          */
   4311         if (rxr->lro_enabled &&
   4312             (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING) != 0 &&
   4313             (ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
   4314             (ptype & (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP)) ==
   4315             (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP) &&
   4316             (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) ==
   4317             (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) {
   4318                 /*
   4319                  * Send to the stack if:
   4320                  **  - LRO not enabled, or
   4321                  **  - no LRO resources, or
   4322                  **  - lro enqueue fails
   4323                  */
   4324                 if (rxr->lro.lro_cnt != 0)
   4325                         if (tcp_lro_rx(&rxr->lro, m, 0) == 0)
   4326                                 return;
   4327         }
   4328 #endif /* LRO */
   4329 
   4330 	IXGBE_RX_UNLOCK(rxr);
   4331 
   4332 	s = splnet();
   4333 	/* Pass this up to any BPF listeners. */
   4334 	bpf_mtap(ifp, m);
   4335 	(*ifp->if_input)(ifp, m);
   4336 	splx(s);
   4337 
   4338 	IXGBE_RX_LOCK(rxr);
   4339 }
   4340 
   4341 static __inline void
   4342 ixgbe_rx_discard(struct rx_ring *rxr, int i)
   4343 {
   4344 	struct ixgbe_rx_buf	*rbuf;
   4345 
   4346 	rbuf = &rxr->rx_buffers[i];
   4347 
   4348         if (rbuf->fmp != NULL) {/* Partial chain ? */
   4349 		rbuf->fmp->m_flags |= M_PKTHDR;
   4350                 m_freem(rbuf->fmp);
   4351                 rbuf->fmp = NULL;
   4352 	}
   4353 
   4354 	/*
   4355 	** With advanced descriptors the writeback
   4356 	** clobbers the buffer addrs, so its easier
   4357 	** to just free the existing mbufs and take
   4358 	** the normal refresh path to get new buffers
   4359 	** and mapping.
   4360 	*/
   4361 	if (rbuf->m_head) {
   4362 		m_free(rbuf->m_head);
   4363 		rbuf->m_head = NULL;
   4364 	}
   4365 
   4366 	if (rbuf->m_pack) {
   4367 		m_free(rbuf->m_pack);
   4368 		rbuf->m_pack = NULL;
   4369 	}
   4370 
   4371 	return;
   4372 }
   4373 
   4374 
   4375 /*********************************************************************
   4376  *
   4377  *  This routine executes in interrupt context. It replenishes
   4378  *  the mbufs in the descriptor and sends data which has been
   4379  *  dma'ed into host memory to upper layer.
   4380  *
   4381  *  We loop at most count times if count is > 0, or until done if
   4382  *  count < 0.
   4383  *
   4384  *  Return TRUE for more work, FALSE for all clean.
   4385  *********************************************************************/
   4386 static bool
   4387 ixgbe_rxeof(struct ix_queue *que, int count)
   4388 {
   4389 	struct adapter		*adapter = que->adapter;
   4390 	struct rx_ring		*rxr = que->rxr;
   4391 	struct ifnet		*ifp = adapter->ifp;
   4392 #ifdef LRO
   4393 	struct lro_ctrl		*lro = &rxr->lro;
   4394 	struct lro_entry	*queued;
   4395 #endif /* LRO */
   4396 	int			i, nextp, processed = 0;
   4397 	u32			staterr = 0;
   4398 	union ixgbe_adv_rx_desc	*cur;
   4399 	struct ixgbe_rx_buf	*rbuf, *nbuf;
   4400 
   4401 	IXGBE_RX_LOCK(rxr);
   4402 
   4403 	for (i = rxr->next_to_check; count != 0;) {
   4404 		struct mbuf	*sendmp, *mh, *mp;
   4405 		u32		rsc, ptype;
   4406 		u16		hlen, plen, hdr, vtag;
   4407 		bool		eop;
   4408 
   4409 		/* Sync the ring. */
   4410 		ixgbe_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
   4411 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   4412 
   4413 		cur = &rxr->rx_base[i];
   4414 		staterr = le32toh(cur->wb.upper.status_error);
   4415 
   4416 		if ((staterr & IXGBE_RXD_STAT_DD) == 0)
   4417 			break;
   4418 		if ((ifp->if_flags & IFF_RUNNING) == 0)
   4419 			break;
   4420 
   4421 		count--;
   4422 		sendmp = NULL;
   4423 		nbuf = NULL;
   4424 		rsc = 0;
   4425 		cur->wb.upper.status_error = 0;
   4426 		rbuf = &rxr->rx_buffers[i];
   4427 		mh = rbuf->m_head;
   4428 		mp = rbuf->m_pack;
   4429 
   4430 		plen = le16toh(cur->wb.upper.length);
   4431 		ptype = le32toh(cur->wb.lower.lo_dword.data) &
   4432 		    IXGBE_RXDADV_PKTTYPE_MASK;
   4433 		hdr = le16toh(cur->wb.lower.lo_dword.hs_rss.hdr_info);
   4434 		vtag = le16toh(cur->wb.upper.vlan);
   4435 		eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0);
   4436 
   4437 		/* Make sure bad packets are discarded */
   4438 		if (((staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) != 0) ||
   4439 		    (rxr->discard)) {
   4440 			ifp->if_ierrors++;
   4441 			rxr->rx_discarded.ev_count++;
   4442 			if (eop)
   4443 				rxr->discard = FALSE;
   4444 			else
   4445 				rxr->discard = TRUE;
   4446 			ixgbe_rx_discard(rxr, i);
   4447 			goto next_desc;
   4448 		}
   4449 
   4450 		/*
   4451 		** On 82599 which supports a hardware
   4452 		** LRO (called HW RSC), packets need
   4453 		** not be fragmented across sequential
   4454 		** descriptors, rather the next descriptor
   4455 		** is indicated in bits of the descriptor.
   4456 		** This also means that we might proceses
   4457 		** more than one packet at a time, something
   4458 		** that has never been true before, it
   4459 		** required eliminating global chain pointers
   4460 		** in favor of what we are doing here.  -jfv
   4461 		*/
   4462 		if (!eop) {
   4463 			/*
   4464 			** Figure out the next descriptor
   4465 			** of this frame.
   4466 			*/
   4467 			if (rxr->hw_rsc == TRUE) {
   4468 				rsc = ixgbe_rsc_count(cur);
   4469 				rxr->rsc_num += (rsc - 1);
   4470 			}
   4471 			if (rsc) { /* Get hardware index */
   4472 				nextp = ((staterr &
   4473 				    IXGBE_RXDADV_NEXTP_MASK) >>
   4474 				    IXGBE_RXDADV_NEXTP_SHIFT);
   4475 			} else { /* Just sequential */
   4476 				nextp = i + 1;
   4477 				if (nextp == adapter->num_rx_desc)
   4478 					nextp = 0;
   4479 			}
   4480 			nbuf = &rxr->rx_buffers[nextp];
   4481 			prefetch(nbuf);
   4482 		}
   4483 		/*
   4484 		** The header mbuf is ONLY used when header
   4485 		** split is enabled, otherwise we get normal
   4486 		** behavior, ie, both header and payload
   4487 		** are DMA'd into the payload buffer.
   4488 		**
   4489 		** Rather than using the fmp/lmp global pointers
   4490 		** we now keep the head of a packet chain in the
   4491 		** buffer struct and pass this along from one
   4492 		** descriptor to the next, until we get EOP.
   4493 		*/
   4494 		if (rxr->hdr_split && (rbuf->fmp == NULL)) {
   4495 			/* This must be an initial descriptor */
   4496 			hlen = (hdr & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
   4497 			    IXGBE_RXDADV_HDRBUFLEN_SHIFT;
   4498 			if (hlen > IXGBE_RX_HDR)
   4499 				hlen = IXGBE_RX_HDR;
   4500 			mh->m_len = hlen;
   4501 			mh->m_flags |= M_PKTHDR;
   4502 			mh->m_next = NULL;
   4503 			mh->m_pkthdr.len = mh->m_len;
   4504 			/* Null buf pointer so it is refreshed */
   4505 			rbuf->m_head = NULL;
   4506 			/*
   4507 			** Check the payload length, this
   4508 			** could be zero if its a small
   4509 			** packet.
   4510 			*/
   4511 			if (plen > 0) {
   4512 				mp->m_len = plen;
   4513 				mp->m_next = NULL;
   4514 				mp->m_flags &= ~M_PKTHDR;
   4515 				mh->m_next = mp;
   4516 				mh->m_pkthdr.len += mp->m_len;
   4517 				/* Null buf pointer so it is refreshed */
   4518 				rbuf->m_pack = NULL;
   4519 				rxr->rx_split_packets.ev_count++;
   4520 			}
   4521 			/*
   4522 			** Now create the forward
   4523 			** chain so when complete
   4524 			** we wont have to.
   4525 			*/
   4526                         if (eop == 0) {
   4527 				/* stash the chain head */
   4528                                 nbuf->fmp = mh;
   4529 				/* Make forward chain */
   4530                                 if (plen)
   4531                                         mp->m_next = nbuf->m_pack;
   4532                                 else
   4533                                         mh->m_next = nbuf->m_pack;
   4534                         } else {
   4535 				/* Singlet, prepare to send */
   4536                                 sendmp = mh;
   4537                                 if (VLAN_ATTACHED(&adapter->osdep.ec) &&
   4538 				  (staterr & IXGBE_RXD_STAT_VP)) {
   4539 					/* XXX Do something reasonable on
   4540 					 * error.
   4541 					 */
   4542 #if 0
   4543 					printf("%s.%d: VLAN_INPUT_TAG\n",
   4544 					    __func__, __LINE__);
   4545 					Debugger();
   4546 #endif
   4547 					VLAN_INPUT_TAG(ifp, sendmp, vtag,
   4548 					    printf("%s: could not apply VLAN "
   4549 					        "tag", __func__));
   4550                                 }
   4551                         }
   4552 		} else {
   4553 			/*
   4554 			** Either no header split, or a
   4555 			** secondary piece of a fragmented
   4556 			** split packet.
   4557 			*/
   4558 			mp->m_len = plen;
   4559 			/*
   4560 			** See if there is a stored head
   4561 			** that determines what we are
   4562 			*/
   4563 			sendmp = rbuf->fmp;
   4564 			rbuf->m_pack = rbuf->fmp = NULL;
   4565 
   4566 			if (sendmp != NULL) /* secondary frag */
   4567 				sendmp->m_pkthdr.len += mp->m_len;
   4568 			else {
   4569 				/* first desc of a non-ps chain */
   4570 				sendmp = mp;
   4571 				sendmp->m_flags |= M_PKTHDR;
   4572 				sendmp->m_pkthdr.len = mp->m_len;
   4573 				if (staterr & IXGBE_RXD_STAT_VP) {
   4574 					/* XXX Do something reasonable on
   4575 					 * error.
   4576 					 */
   4577 #if 0
   4578 					printf("%s.%d: VLAN_INPUT_TAG\n",
   4579 					    __func__, __LINE__);
   4580 					Debugger();
   4581 #endif
   4582 					VLAN_INPUT_TAG(ifp, sendmp, vtag,
   4583 					    printf("%s: could not apply VLAN "
   4584 					        "tag", __func__));
   4585 				}
   4586                         }
   4587 			/* Pass the head pointer on */
   4588 			if (eop == 0) {
   4589 				nbuf->fmp = sendmp;
   4590 				sendmp = NULL;
   4591 				mp->m_next = nbuf->m_pack;
   4592 			}
   4593 		}
   4594 		++processed;
   4595 		/* Sending this frame? */
   4596 		if (eop) {
   4597 			sendmp->m_pkthdr.rcvif = ifp;
   4598 			ifp->if_ipackets++;
   4599 			rxr->rx_packets.ev_count++;
   4600 			/* capture data for AIM */
   4601 			rxr->bytes += sendmp->m_pkthdr.len;
   4602 			rxr->rx_bytes.ev_count += sendmp->m_pkthdr.len;
   4603 			if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
   4604 				ixgbe_rx_checksum(staterr, sendmp, ptype,
   4605 				   &adapter->stats);
   4606 			}
   4607 #if __FreeBSD_version >= 800000
   4608 			sendmp->m_pkthdr.flowid = que->msix;
   4609 			sendmp->m_flags |= M_FLOWID;
   4610 #endif
   4611 		}
   4612 next_desc:
   4613 		ixgbe_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
   4614 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   4615 
   4616 		/* Advance our pointers to the next descriptor. */
   4617 		if (++i == adapter->num_rx_desc)
   4618 			i = 0;
   4619 
   4620 		/* Now send to the stack or do LRO */
   4621 		if (sendmp != NULL) {
   4622 			rxr->next_to_check = i;
   4623 			ixgbe_rx_input(rxr, ifp, sendmp, ptype);
   4624 			i = rxr->next_to_check;
   4625 		}
   4626 
   4627                /* Every 8 descriptors we go to refresh mbufs */
   4628 		if (processed == 8) {
   4629 			ixgbe_refresh_mbufs(rxr, i);
   4630 			processed = 0;
   4631 		}
   4632 	}
   4633 
   4634 	/* Refresh any remaining buf structs */
   4635 	if (ixgbe_rx_unrefreshed(rxr))
   4636 		ixgbe_refresh_mbufs(rxr, i);
   4637 
   4638 	rxr->next_to_check = i;
   4639 
   4640 #ifdef LRO
   4641 	/*
   4642 	 * Flush any outstanding LRO work
   4643 	 */
   4644 	while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
   4645 		SLIST_REMOVE_HEAD(&lro->lro_active, next);
   4646 		tcp_lro_flush(lro, queued);
   4647 	}
   4648 #endif /* LRO */
   4649 
   4650 	IXGBE_RX_UNLOCK(rxr);
   4651 
   4652 	/*
   4653 	** We still have cleaning to do?
   4654 	** Schedule another interrupt if so.
   4655 	*/
   4656 	if ((staterr & IXGBE_RXD_STAT_DD) != 0) {
   4657 		ixgbe_rearm_queues(adapter, (u64)(1ULL << que->msix));
   4658 		return true;
   4659 	}
   4660 
   4661 	return false;
   4662 }
   4663 
   4664 
   4665 /*********************************************************************
   4666  *
   4667  *  Verify that the hardware indicated that the checksum is valid.
   4668  *  Inform the stack about the status of checksum so that stack
   4669  *  doesn't spend time verifying the checksum.
   4670  *
   4671  *********************************************************************/
   4672 static void
   4673 ixgbe_rx_checksum(u32 staterr, struct mbuf * mp, u32 ptype,
   4674     struct ixgbe_hw_stats *stats)
   4675 {
   4676 	u16	status = (u16) staterr;
   4677 	u8	errors = (u8) (staterr >> 24);
   4678 #if 0
   4679 	bool	sctp = FALSE;
   4680 
   4681 	if ((ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
   4682 	    (ptype & IXGBE_RXDADV_PKTTYPE_SCTP) != 0)
   4683 		sctp = TRUE;
   4684 #endif
   4685 
   4686 	if (status & IXGBE_RXD_STAT_IPCS) {
   4687 		stats->ipcs.ev_count++;
   4688 		if (!(errors & IXGBE_RXD_ERR_IPE)) {
   4689 			/* IP Checksum Good */
   4690 			mp->m_pkthdr.csum_flags = M_CSUM_IPv4;
   4691 
   4692 		} else {
   4693 			stats->ipcs_bad.ev_count++;
   4694 			mp->m_pkthdr.csum_flags = M_CSUM_IPv4|M_CSUM_IPv4_BAD;
   4695 		}
   4696 	}
   4697 	if (status & IXGBE_RXD_STAT_L4CS) {
   4698 		stats->l4cs.ev_count++;
   4699 		u16 type = M_CSUM_TCPv4|M_CSUM_TCPv6|M_CSUM_UDPv4|M_CSUM_UDPv6;
   4700 		if (!(errors & IXGBE_RXD_ERR_TCPE)) {
   4701 			mp->m_pkthdr.csum_flags |= type;
   4702 		} else {
   4703 			stats->l4cs_bad.ev_count++;
   4704 			mp->m_pkthdr.csum_flags |= type | M_CSUM_TCP_UDP_BAD;
   4705 		}
   4706 	}
   4707 	return;
   4708 }
   4709 
   4710 
   4711 #if 0	/* XXX Badly need to overhaul vlan(4) on NetBSD. */
   4712 /*
   4713 ** This routine is run via an vlan config EVENT,
   4714 ** it enables us to use the HW Filter table since
   4715 ** we can get the vlan id. This just creates the
   4716 ** entry in the soft version of the VFTA, init will
   4717 ** repopulate the real table.
   4718 */
   4719 static void
   4720 ixgbe_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   4721 {
   4722 	struct adapter	*adapter = ifp->if_softc;
   4723 	u16		index, bit;
   4724 
   4725 	if (ifp->if_softc !=  arg)   /* Not our event */
   4726 		return;
   4727 
   4728 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
   4729 		return;
   4730 
   4731 	IXGBE_CORE_LOCK(adapter);
   4732 	index = (vtag >> 5) & 0x7F;
   4733 	bit = vtag & 0x1F;
   4734 	adapter->shadow_vfta[index] |= (1 << bit);
   4735 	ixgbe_init_locked(adapter);
   4736 	IXGBE_CORE_UNLOCK(adapter);
   4737 }
   4738 
   4739 /*
   4740 ** This routine is run via an vlan
   4741 ** unconfig EVENT, remove our entry
   4742 ** in the soft vfta.
   4743 */
   4744 static void
   4745 ixgbe_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   4746 {
   4747 	struct adapter	*adapter = ifp->if_softc;
   4748 	u16		index, bit;
   4749 
   4750 	if (ifp->if_softc !=  arg)
   4751 		return;
   4752 
   4753 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
   4754 		return;
   4755 
   4756 	IXGBE_CORE_LOCK(adapter);
   4757 	index = (vtag >> 5) & 0x7F;
   4758 	bit = vtag & 0x1F;
   4759 	adapter->shadow_vfta[index] &= ~(1 << bit);
   4760 	/* Re-init to load the changes */
   4761 	ixgbe_init_locked(adapter);
   4762 	IXGBE_CORE_UNLOCK(adapter);
   4763 }
   4764 #endif
   4765 
   4766 static void
   4767 ixgbe_setup_vlan_hw_support(struct adapter *adapter)
   4768 {
   4769 	struct ethercom *ec = &adapter->osdep.ec;
   4770 	struct ixgbe_hw *hw = &adapter->hw;
   4771 	u32		ctrl;
   4772 
   4773 	/*
   4774 	** We get here thru init_locked, meaning
   4775 	** a soft reset, this has already cleared
   4776 	** the VFTA and other state, so if there
   4777 	** have been no vlan's registered do nothing.
   4778 	*/
   4779 	if (!VLAN_ATTACHED(&adapter->osdep.ec)) {
   4780 		return;
   4781 	}
   4782 
   4783 	/*
   4784 	** A soft reset zero's out the VFTA, so
   4785 	** we need to repopulate it now.
   4786 	*/
   4787 	for (int i = 0; i < IXGBE_VFTA_SIZE; i++)
   4788 		if (adapter->shadow_vfta[i] != 0)
   4789 			IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
   4790 			    adapter->shadow_vfta[i]);
   4791 
   4792 	ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
   4793 	/* Enable the Filter Table if enabled */
   4794 	if (ec->ec_capenable & ETHERCAP_VLAN_HWFILTER) {
   4795 		ctrl &= ~IXGBE_VLNCTRL_CFIEN;
   4796 		ctrl |= IXGBE_VLNCTRL_VFE;
   4797 	}
   4798 	if (hw->mac.type == ixgbe_mac_82598EB)
   4799 		ctrl |= IXGBE_VLNCTRL_VME;
   4800 	IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
   4801 
   4802 	/* On 82599 the VLAN enable is per/queue in RXDCTL */
   4803 	if (hw->mac.type != ixgbe_mac_82598EB)
   4804 		for (int i = 0; i < adapter->num_queues; i++) {
   4805 			ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
   4806 				ctrl |= IXGBE_RXDCTL_VME;
   4807 			IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), ctrl);
   4808 		}
   4809 }
   4810 
   4811 static void
   4812 ixgbe_enable_intr(struct adapter *adapter)
   4813 {
   4814 	struct ixgbe_hw *hw = &adapter->hw;
   4815 	struct ix_queue *que = adapter->queues;
   4816 	u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
   4817 
   4818 
   4819 	/* Enable Fan Failure detection */
   4820 	if (hw->device_id == IXGBE_DEV_ID_82598AT)
   4821 		    mask |= IXGBE_EIMS_GPI_SDP1;
   4822 	else {
   4823 		    mask |= IXGBE_EIMS_ECC;
   4824 		    mask |= IXGBE_EIMS_GPI_SDP1;
   4825 		    mask |= IXGBE_EIMS_GPI_SDP2;
   4826 #ifdef IXGBE_FDIR
   4827 		    mask |= IXGBE_EIMS_FLOW_DIR;
   4828 #endif
   4829 	}
   4830 
   4831 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
   4832 
   4833 	/* With RSS we use auto clear */
   4834 	if (adapter->msix_mem) {
   4835 		mask = IXGBE_EIMS_ENABLE_MASK;
   4836 		/* Don't autoclear Link */
   4837 		mask &= ~IXGBE_EIMS_OTHER;
   4838 		mask &= ~IXGBE_EIMS_LSC;
   4839 		IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
   4840 	}
   4841 
   4842 	/*
   4843 	** Now enable all queues, this is done separately to
   4844 	** allow for handling the extended (beyond 32) MSIX
   4845 	** vectors that can be used by 82599
   4846 	*/
   4847         for (int i = 0; i < adapter->num_queues; i++, que++)
   4848                 ixgbe_enable_queue(adapter, que->msix);
   4849 
   4850 	IXGBE_WRITE_FLUSH(hw);
   4851 
   4852 	return;
   4853 }
   4854 
   4855 static void
   4856 ixgbe_disable_intr(struct adapter *adapter)
   4857 {
   4858 	if (adapter->msix_mem)
   4859 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
   4860 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
   4861 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
   4862 	} else {
   4863 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
   4864 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
   4865 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
   4866 	}
   4867 	IXGBE_WRITE_FLUSH(&adapter->hw);
   4868 	return;
   4869 }
   4870 
   4871 u16
   4872 ixgbe_read_pci_cfg(struct ixgbe_hw *hw, u32 reg)
   4873 {
   4874 	switch (reg % 4) {
   4875 	case 0:
   4876 		return pci_conf_read(hw->back->pc, hw->back->tag, reg) &
   4877 		    __BITS(15, 0);
   4878 	case 2:
   4879 		return __SHIFTOUT(pci_conf_read(hw->back->pc, hw->back->tag,
   4880 		    reg - 2), __BITS(31, 16));
   4881 	default:
   4882 		panic("%s: invalid register (%" PRIx32, __func__, reg);
   4883 		break;
   4884 	}
   4885 }
   4886 
   4887 void
   4888 ixgbe_write_pci_cfg(struct ixgbe_hw *hw, u32 reg, u16 value)
   4889 {
   4890 	pcireg_t old;
   4891 
   4892 	switch (reg % 4) {
   4893 	case 0:
   4894 		old = pci_conf_read(hw->back->pc, hw->back->tag, reg) &
   4895 		    __BITS(31, 16);
   4896 		pci_conf_write(hw->back->pc, hw->back->tag, reg, value | old);
   4897 		break;
   4898 	case 2:
   4899 		old = pci_conf_read(hw->back->pc, hw->back->tag, reg - 2) &
   4900 		    __BITS(15, 0);
   4901 		pci_conf_write(hw->back->pc, hw->back->tag, reg - 2,
   4902 		    __SHIFTIN(value, __BITS(31, 16)) | old);
   4903 		break;
   4904 	default:
   4905 		panic("%s: invalid register (%" PRIx32, __func__, reg);
   4906 		break;
   4907 	}
   4908 
   4909 	return;
   4910 }
   4911 
   4912 /*
   4913 ** Setup the correct IVAR register for a particular MSIX interrupt
   4914 **   (yes this is all very magic and confusing :)
   4915 **  - entry is the register array entry
   4916 **  - vector is the MSIX vector for this queue
   4917 **  - type is RX/TX/MISC
   4918 */
   4919 static void
   4920 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
   4921 {
   4922 	struct ixgbe_hw *hw = &adapter->hw;
   4923 	u32 ivar, index;
   4924 
   4925 	vector |= IXGBE_IVAR_ALLOC_VAL;
   4926 
   4927 	switch (hw->mac.type) {
   4928 
   4929 	case ixgbe_mac_82598EB:
   4930 		if (type == -1)
   4931 			entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
   4932 		else
   4933 			entry += (type * 64);
   4934 		index = (entry >> 2) & 0x1F;
   4935 		ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
   4936 		ivar &= ~(0xFF << (8 * (entry & 0x3)));
   4937 		ivar |= (vector << (8 * (entry & 0x3)));
   4938 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
   4939 		break;
   4940 
   4941 	case ixgbe_mac_82599EB:
   4942 		if (type == -1) { /* MISC IVAR */
   4943 			index = (entry & 1) * 8;
   4944 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
   4945 			ivar &= ~(0xFF << index);
   4946 			ivar |= (vector << index);
   4947 			IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
   4948 		} else {	/* RX/TX IVARS */
   4949 			index = (16 * (entry & 1)) + (8 * type);
   4950 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
   4951 			ivar &= ~(0xFF << index);
   4952 			ivar |= (vector << index);
   4953 			IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
   4954 		}
   4955 
   4956 	default:
   4957 		break;
   4958 	}
   4959 }
   4960 
   4961 static void
   4962 ixgbe_configure_ivars(struct adapter *adapter)
   4963 {
   4964 	struct  ix_queue *que = adapter->queues;
   4965 	u32 newitr;
   4966 
   4967 	if (ixgbe_max_interrupt_rate > 0)
   4968 		newitr = (8000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
   4969 	else
   4970 		newitr = 0;
   4971 
   4972         for (int i = 0; i < adapter->num_queues; i++, que++) {
   4973 		/* First the RX queue entry */
   4974                 ixgbe_set_ivar(adapter, i, que->msix, 0);
   4975 		/* ... and the TX */
   4976 		ixgbe_set_ivar(adapter, i, que->msix, 1);
   4977 		/* Set an Initial EITR value */
   4978                 IXGBE_WRITE_REG(&adapter->hw,
   4979                     IXGBE_EITR(que->msix), newitr);
   4980 	}
   4981 
   4982 	/* For the Link interrupt */
   4983         ixgbe_set_ivar(adapter, 1, adapter->linkvec, -1);
   4984 }
   4985 
   4986 /*
   4987 ** ixgbe_sfp_probe - called in the local timer to
   4988 ** determine if a port had optics inserted.
   4989 */
   4990 static bool ixgbe_sfp_probe(struct adapter *adapter)
   4991 {
   4992 	struct ixgbe_hw	*hw = &adapter->hw;
   4993 	device_t	dev = adapter->dev;
   4994 	bool		result = FALSE;
   4995 
   4996 	if ((hw->phy.type == ixgbe_phy_nl) &&
   4997 	    (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
   4998 		s32 ret = hw->phy.ops.identify_sfp(hw);
   4999 		if (ret)
   5000                         goto out;
   5001 		ret = hw->phy.ops.reset(hw);
   5002 		if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
   5003 			device_printf(dev,"Unsupported SFP+ module detected!");
   5004 			device_printf(dev, "Reload driver with supported module.\n");
   5005 			adapter->sfp_probe = FALSE;
   5006                         goto out;
   5007 		} else
   5008 			device_printf(dev,"SFP+ module detected!\n");
   5009 		/* We now have supported optics */
   5010 		adapter->sfp_probe = FALSE;
   5011 		/* Set the optics type so system reports correctly */
   5012 		ixgbe_setup_optics(adapter);
   5013 		result = TRUE;
   5014 	}
   5015 out:
   5016 	return (result);
   5017 }
   5018 
   5019 /*
   5020 ** Tasklet handler for MSIX Link interrupts
   5021 **  - do outside interrupt since it might sleep
   5022 */
   5023 static void
   5024 ixgbe_handle_link(void *context)
   5025 {
   5026 	struct adapter  *adapter = context;
   5027 
   5028 	if (ixgbe_check_link(&adapter->hw,
   5029 	    &adapter->link_speed, &adapter->link_up, 0) == 0)
   5030 	    ixgbe_update_link_status(adapter);
   5031 }
   5032 
   5033 /*
   5034 ** Tasklet for handling SFP module interrupts
   5035 */
   5036 static void
   5037 ixgbe_handle_mod(void *context)
   5038 {
   5039 	struct adapter  *adapter = context;
   5040 	struct ixgbe_hw *hw = &adapter->hw;
   5041 	device_t	dev = adapter->dev;
   5042 	u32 err;
   5043 
   5044 	err = hw->phy.ops.identify_sfp(hw);
   5045 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
   5046 		device_printf(dev,
   5047 		    "Unsupported SFP+ module type was detected.\n");
   5048 		return;
   5049 	}
   5050 	err = hw->mac.ops.setup_sfp(hw);
   5051 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
   5052 		device_printf(dev,
   5053 		    "Setup failure - unsupported SFP+ module type.\n");
   5054 		return;
   5055 	}
   5056 	softint_schedule(adapter->msf_si);
   5057 	return;
   5058 }
   5059 
   5060 
   5061 /*
   5062 ** Tasklet for handling MSF (multispeed fiber) interrupts
   5063 */
   5064 static void
   5065 ixgbe_handle_msf(void *context)
   5066 {
   5067 	struct adapter  *adapter = context;
   5068 	struct ixgbe_hw *hw = &adapter->hw;
   5069 	u32 autoneg;
   5070 	bool negotiate;
   5071 
   5072 	autoneg = hw->phy.autoneg_advertised;
   5073 	if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
   5074 		hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
   5075 	else
   5076 		negotiate = 0;
   5077 	if (hw->mac.ops.setup_link)
   5078 		hw->mac.ops.setup_link(hw, autoneg, negotiate, TRUE);
   5079 	return;
   5080 }
   5081 
   5082 #ifdef IXGBE_FDIR
   5083 /*
   5084 ** Tasklet for reinitializing the Flow Director filter table
   5085 */
   5086 static void
   5087 ixgbe_reinit_fdir(void *context)
   5088 {
   5089 	struct adapter  *adapter = context;
   5090 	struct ifnet   *ifp = adapter->ifp;
   5091 
   5092 	if (adapter->fdir_reinit != 1) /* Shouldn't happen */
   5093 		return;
   5094 	ixgbe_reinit_fdir_tables_82599(&adapter->hw);
   5095 	adapter->fdir_reinit = 0;
   5096 	/* Restart the interface */
   5097 	ifp->if_flags |= IFF_RUNNING;
   5098 	return;
   5099 }
   5100 #endif
   5101 
   5102 /**********************************************************************
   5103  *
   5104  *  Update the board statistics counters.
   5105  *
   5106  **********************************************************************/
   5107 static void
   5108 ixgbe_update_stats_counters(struct adapter *adapter)
   5109 {
   5110 	struct ifnet   *ifp = adapter->ifp;
   5111 	struct ixgbe_hw *hw = &adapter->hw;
   5112 	u32  missed_rx = 0, bprc, lxon, lxoff, total;
   5113 	u64  total_missed_rx = 0;
   5114 
   5115 	adapter->stats.crcerrs.ev_count += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
   5116 	adapter->stats.illerrc.ev_count += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
   5117 	adapter->stats.errbc.ev_count += IXGBE_READ_REG(hw, IXGBE_ERRBC);
   5118 	adapter->stats.mspdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MSPDC);
   5119 
   5120 	for (int i = 0; i < __arraycount(adapter->stats.mpc); i++) {
   5121 		int j = i % adapter->num_queues;
   5122 		u32 mp;
   5123 		mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
   5124 		/* missed_rx tallies misses for the gprc workaround */
   5125 		missed_rx += mp;
   5126 		/* global total per queue */
   5127         	adapter->stats.mpc[j].ev_count += mp;
   5128 		/* Running comprehensive total for stats display */
   5129 		total_missed_rx += adapter->stats.mpc[j].ev_count;
   5130 		if (hw->mac.type == ixgbe_mac_82598EB)
   5131 			adapter->stats.rnbc[j] +=
   5132 			    IXGBE_READ_REG(hw, IXGBE_RNBC(i));
   5133 		adapter->stats.pxontxc[j].ev_count +=
   5134 		    IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
   5135 		adapter->stats.pxonrxc[j].ev_count +=
   5136 		    IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
   5137 		adapter->stats.pxofftxc[j].ev_count +=
   5138 		    IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
   5139 		adapter->stats.pxoffrxc[j].ev_count +=
   5140 		    IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
   5141 		adapter->stats.pxon2offc[j].ev_count +=
   5142 		    IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
   5143 	}
   5144 	for (int i = 0; i < __arraycount(adapter->stats.qprc); i++) {
   5145 		int j = i % adapter->num_queues;
   5146 		adapter->stats.qprc[j].ev_count += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
   5147 		adapter->stats.qptc[j].ev_count += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
   5148 		adapter->stats.qbrc[j].ev_count += IXGBE_READ_REG(hw, IXGBE_QBRC(i));
   5149 		adapter->stats.qbrc[j].ev_count +=
   5150 		    ((u64)IXGBE_READ_REG(hw, IXGBE_QBRC(i)) << 32);
   5151 		adapter->stats.qbtc[j].ev_count += IXGBE_READ_REG(hw, IXGBE_QBTC(i));
   5152 		adapter->stats.qbtc[j].ev_count +=
   5153 		    ((u64)IXGBE_READ_REG(hw, IXGBE_QBTC(i)) << 32);
   5154 		adapter->stats.qprdc[j].ev_count += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
   5155 	}
   5156 	adapter->stats.mlfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MLFC);
   5157 	adapter->stats.mrfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MRFC);
   5158 	adapter->stats.rlec.ev_count += IXGBE_READ_REG(hw, IXGBE_RLEC);
   5159 
   5160 	/* Hardware workaround, gprc counts missed packets */
   5161 	adapter->stats.gprc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPRC) - missed_rx;
   5162 
   5163 	lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
   5164 	adapter->stats.lxontxc.ev_count += lxon;
   5165 	lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
   5166 	adapter->stats.lxofftxc.ev_count += lxoff;
   5167 	total = lxon + lxoff;
   5168 
   5169 	if (hw->mac.type != ixgbe_mac_82598EB) {
   5170 		adapter->stats.gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCL) +
   5171 		    ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
   5172 		adapter->stats.gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
   5173 		    ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32) - total * ETHER_MIN_LEN;
   5174 		adapter->stats.tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORL) +
   5175 		    ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
   5176 		adapter->stats.lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
   5177 		adapter->stats.lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
   5178 	} else {
   5179 		adapter->stats.lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
   5180 		adapter->stats.lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
   5181 		/* 82598 only has a counter in the high register */
   5182 		adapter->stats.gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCH);
   5183 		adapter->stats.gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCH) - total * ETHER_MIN_LEN;
   5184 		adapter->stats.tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORH);
   5185 	}
   5186 
   5187 	/*
   5188 	 * Workaround: mprc hardware is incorrectly counting
   5189 	 * broadcasts, so for now we subtract those.
   5190 	 */
   5191 	bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
   5192 	adapter->stats.bprc.ev_count += bprc;
   5193 	adapter->stats.mprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPRC) - ((hw->mac.type == ixgbe_mac_82598EB) ? bprc : 0);
   5194 
   5195 	adapter->stats.prc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC64);
   5196 	adapter->stats.prc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC127);
   5197 	adapter->stats.prc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC255);
   5198 	adapter->stats.prc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC511);
   5199 	adapter->stats.prc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1023);
   5200 	adapter->stats.prc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1522);
   5201 
   5202 	adapter->stats.gptc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPTC) - total;
   5203 	adapter->stats.mptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPTC) - total;
   5204 	adapter->stats.ptc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC64) - total;
   5205 
   5206 	adapter->stats.ruc.ev_count += IXGBE_READ_REG(hw, IXGBE_RUC);
   5207 	adapter->stats.rfc.ev_count += IXGBE_READ_REG(hw, IXGBE_RFC);
   5208 	adapter->stats.roc.ev_count += IXGBE_READ_REG(hw, IXGBE_ROC);
   5209 	adapter->stats.rjc.ev_count += IXGBE_READ_REG(hw, IXGBE_RJC);
   5210 	adapter->stats.mngprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
   5211 	adapter->stats.mngpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
   5212 	adapter->stats.mngptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
   5213 	adapter->stats.tpr.ev_count += IXGBE_READ_REG(hw, IXGBE_TPR);
   5214 	adapter->stats.tpt.ev_count += IXGBE_READ_REG(hw, IXGBE_TPT);
   5215 	adapter->stats.ptc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC127);
   5216 	adapter->stats.ptc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC255);
   5217 	adapter->stats.ptc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC511);
   5218 	adapter->stats.ptc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1023);
   5219 	adapter->stats.ptc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1522);
   5220 	adapter->stats.bptc.ev_count += IXGBE_READ_REG(hw, IXGBE_BPTC);
   5221 	adapter->stats.xec.ev_count += IXGBE_READ_REG(hw, IXGBE_XEC);
   5222 	adapter->stats.fccrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCCRC);
   5223 	adapter->stats.fclast.ev_count += IXGBE_READ_REG(hw, IXGBE_FCLAST);
   5224 
   5225 	/* Only read FCOE on 82599 */
   5226 	if (hw->mac.type != ixgbe_mac_82598EB) {
   5227 		adapter->stats.fcoerpdc.ev_count +=
   5228 		    IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
   5229 		adapter->stats.fcoeprc.ev_count +=
   5230 		    IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
   5231 		adapter->stats.fcoeptc.ev_count +=
   5232 		    IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
   5233 		adapter->stats.fcoedwrc.ev_count +=
   5234 		    IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
   5235 		adapter->stats.fcoedwtc.ev_count +=
   5236 		    IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
   5237 	}
   5238 
   5239 	/* Fill out the OS statistics structure */
   5240 	ifp->if_ipackets = adapter->stats.gprc.ev_count;
   5241 	ifp->if_opackets = adapter->stats.gptc.ev_count;
   5242 	ifp->if_ibytes = adapter->stats.gorc.ev_count;
   5243 	ifp->if_obytes = adapter->stats.gotc.ev_count;
   5244 	ifp->if_imcasts = adapter->stats.mprc.ev_count;
   5245 	ifp->if_collisions = 0;
   5246 
   5247 	/* Rx Errors */
   5248 	ifp->if_ierrors = total_missed_rx + adapter->stats.crcerrs.ev_count +
   5249 		adapter->stats.rlec.ev_count;
   5250 }
   5251 
   5252 /** ixgbe_sysctl_tdh_handler - Handler function
   5253  *  Retrieves the TDH value from the hardware
   5254  */
   5255 static int
   5256 ixgbe_sysctl_tdh_handler(SYSCTLFN_ARGS)
   5257 {
   5258 	struct sysctlnode node;
   5259 	uint32_t val;
   5260 	struct tx_ring *txr;
   5261 
   5262 	node = *rnode;
   5263 	txr = (struct tx_ring *)node.sysctl_data;
   5264 	if (txr == NULL)
   5265 		return 0;
   5266 	val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me));
   5267 	node.sysctl_data = &val;
   5268 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   5269 }
   5270 
   5271 /** ixgbe_sysctl_tdt_handler - Handler function
   5272  *  Retrieves the TDT value from the hardware
   5273  */
   5274 static int
   5275 ixgbe_sysctl_tdt_handler(SYSCTLFN_ARGS)
   5276 {
   5277 	struct sysctlnode node;
   5278 	uint32_t val;
   5279 	struct tx_ring *txr;
   5280 
   5281 	node = *rnode;
   5282 	txr = (struct tx_ring *)node.sysctl_data;
   5283 	if (txr == NULL)
   5284 		return 0;
   5285 	val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me));
   5286 	node.sysctl_data = &val;
   5287 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   5288 }
   5289 
   5290 /** ixgbe_sysctl_rdh_handler - Handler function
   5291  *  Retrieves the RDH value from the hardware
   5292  */
   5293 static int
   5294 ixgbe_sysctl_rdh_handler(SYSCTLFN_ARGS)
   5295 {
   5296 	struct sysctlnode node;
   5297 	uint32_t val;
   5298 	struct rx_ring *rxr;
   5299 
   5300 	node = *rnode;
   5301 	rxr = (struct rx_ring *)node.sysctl_data;
   5302 	if (rxr == NULL)
   5303 		return 0;
   5304 	val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me));
   5305 	node.sysctl_data = &val;
   5306 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   5307 }
   5308 
   5309 /** ixgbe_sysctl_rdt_handler - Handler function
   5310  *  Retrieves the RDT value from the hardware
   5311  */
   5312 static int
   5313 ixgbe_sysctl_rdt_handler(SYSCTLFN_ARGS)
   5314 {
   5315 	struct sysctlnode node;
   5316 	uint32_t val;
   5317 	struct rx_ring *rxr;
   5318 
   5319 	node = *rnode;
   5320 	rxr = (struct rx_ring *)node.sysctl_data;
   5321 	if (rxr == NULL)
   5322 		return 0;
   5323 	val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me));
   5324 	node.sysctl_data = &val;
   5325 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   5326 }
   5327 
   5328 static int
   5329 ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_ARGS)
   5330 {
   5331 	struct sysctlnode node;
   5332 	struct ix_queue *que;
   5333 	uint32_t reg, usec, rate;
   5334 
   5335 	node = *rnode;
   5336 	que = (struct ix_queue *)node.sysctl_data;
   5337 	if (que == NULL)
   5338 		return 0;
   5339 	reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix));
   5340 	usec = ((reg & 0x0FF8) >> 3);
   5341 	if (usec > 0)
   5342 		rate = 1000000 / usec;
   5343 	else
   5344 		rate = 0;
   5345 	node.sysctl_data = &rate;
   5346 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   5347 }
   5348 
   5349 const struct sysctlnode *
   5350 ixgbe_sysctl_instance(struct adapter *adapter)
   5351 {
   5352 	const char *dvname;
   5353 	struct sysctllog **log;
   5354 	int rc;
   5355 	const struct sysctlnode *rnode;
   5356 
   5357 	log = &adapter->sysctllog;
   5358 	dvname = device_xname(adapter->dev);
   5359 
   5360 	if ((rc = sysctl_createv(log, 0, NULL, &rnode,
   5361 	    0, CTLTYPE_NODE, dvname,
   5362 	    SYSCTL_DESCR("ixgbe information and settings"),
   5363 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
   5364 		goto err;
   5365 
   5366 	return rnode;
   5367 err:
   5368 	printf("%s: sysctl_createv failed, rc = %d\n", __func__, rc);
   5369 	return NULL;
   5370 }
   5371 
   5372 /*
   5373  * Add sysctl variables, one per statistic, to the system.
   5374  */
   5375 static void
   5376 ixgbe_add_hw_stats(struct adapter *adapter)
   5377 {
   5378 	device_t dev = adapter->dev;
   5379 	const struct sysctlnode *rnode, *cnode;
   5380 	struct sysctllog **log = &adapter->sysctllog;
   5381 	struct tx_ring *txr = adapter->tx_rings;
   5382 	struct rx_ring *rxr = adapter->rx_rings;
   5383 	struct ixgbe_hw	 *hw = &adapter->hw;
   5384 
   5385 	struct ixgbe_hw_stats *stats = &adapter->stats;
   5386 
   5387 	/* Driver Statistics */
   5388 #if 0
   5389 	/* These counters are not updated by the software */
   5390 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
   5391 			CTLFLAG_RD, &adapter->dropped_pkts,
   5392 			"Driver dropped packets");
   5393 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_header_failed",
   5394 			CTLFLAG_RD, &adapter->mbuf_header_failed,
   5395 			"???");
   5396 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_packet_failed",
   5397 			CTLFLAG_RD, &adapter->mbuf_packet_failed,
   5398 			"???");
   5399 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "no_tx_map_avail",
   5400 			CTLFLAG_RD, &adapter->no_tx_map_avail,
   5401 			"???");
   5402 #endif
   5403 	evcnt_attach_dynamic(&adapter->handleq, EVCNT_TYPE_MISC,
   5404 	    NULL, device_xname(dev), "Handled queue in softint");
   5405 	evcnt_attach_dynamic(&adapter->req, EVCNT_TYPE_MISC,
   5406 	    NULL, device_xname(dev), "Requeued in softint");
   5407 	evcnt_attach_dynamic(&adapter->morerx, EVCNT_TYPE_MISC,
   5408 	    NULL, device_xname(dev), "Interrupt handler more rx");
   5409 	evcnt_attach_dynamic(&adapter->moretx, EVCNT_TYPE_MISC,
   5410 	    NULL, device_xname(dev), "Interrupt handler more tx");
   5411 	evcnt_attach_dynamic(&adapter->txloops, EVCNT_TYPE_MISC,
   5412 	    NULL, device_xname(dev), "Interrupt handler tx loops");
   5413 	evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
   5414 	    NULL, device_xname(dev), "Driver tx dma soft fail EFBIG");
   5415 	evcnt_attach_dynamic(&adapter->m_defrag_failed, EVCNT_TYPE_MISC,
   5416 	    NULL, device_xname(dev), "m_defrag() failed");
   5417 	evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
   5418 	    NULL, device_xname(dev), "Driver tx dma hard fail EFBIG");
   5419 	evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC,
   5420 	    NULL, device_xname(dev), "Driver tx dma hard fail EINVAL");
   5421 	evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC,
   5422 	    NULL, device_xname(dev), "Driver tx dma hard fail other");
   5423 	evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
   5424 	    NULL, device_xname(dev), "Driver tx dma soft fail EAGAIN");
   5425 	evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
   5426 	    NULL, device_xname(dev), "Driver tx dma soft fail ENOMEM");
   5427 	evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
   5428 	    NULL, device_xname(dev), "Watchdog timeouts");
   5429 	evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC,
   5430 	    NULL, device_xname(dev), "TSO errors");
   5431 	evcnt_attach_dynamic(&adapter->tso_tx, EVCNT_TYPE_MISC,
   5432 	    NULL, device_xname(dev), "TSO");
   5433 	evcnt_attach_dynamic(&adapter->link_irq, EVCNT_TYPE_MISC,
   5434 	    NULL, device_xname(dev), "Link MSIX IRQ Handled");
   5435 
   5436 	for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
   5437 		snprintf(adapter->queues[i].evnamebuf,
   5438 		    sizeof(adapter->queues[i].evnamebuf), "%s queue%d",
   5439 		    device_xname(dev), i);
   5440 		snprintf(adapter->queues[i].namebuf,
   5441 		    sizeof(adapter->queues[i].namebuf), "queue%d", i);
   5442 
   5443 		if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
   5444 			aprint_error_dev(dev, "could not create sysctl root\n");
   5445 			break;
   5446 		}
   5447 
   5448 		if (sysctl_createv(log, 0, &rnode, &rnode,
   5449 		    0, CTLTYPE_NODE,
   5450 		    adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
   5451 		    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
   5452 			break;
   5453 
   5454 		if (sysctl_createv(log, 0, &rnode, &cnode,
   5455 		    CTLFLAG_READONLY, CTLTYPE_INT,
   5456 		    "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
   5457 		    ixgbe_sysctl_interrupt_rate_handler, 0,
   5458 		    (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
   5459 			break;
   5460 
   5461 		if (sysctl_createv(log, 0, &rnode, &cnode,
   5462 		    CTLFLAG_READONLY, CTLTYPE_INT,
   5463 		    "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
   5464 		    ixgbe_sysctl_tdh_handler, 0, (void *)txr,
   5465 		    0, CTL_CREATE, CTL_EOL) != 0)
   5466 			break;
   5467 
   5468 		if (sysctl_createv(log, 0, &rnode, &cnode,
   5469 		    CTLFLAG_READONLY, CTLTYPE_INT,
   5470 		    "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
   5471 		    ixgbe_sysctl_tdt_handler, 0, (void *)txr,
   5472 		    0, CTL_CREATE, CTL_EOL) != 0)
   5473 			break;
   5474 
   5475 		evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
   5476 		    NULL, adapter->queues[i].evnamebuf,
   5477 		    "Queue No Descriptor Available");
   5478 		evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
   5479 		    NULL, adapter->queues[i].evnamebuf,
   5480 		    "Queue Packets Transmitted");
   5481 
   5482 #ifdef LRO
   5483 		struct lro_ctrl *lro = &rxr->lro;
   5484 #endif /* LRO */
   5485 
   5486 		if (sysctl_createv(log, 0, &rnode, &cnode,
   5487 		    CTLFLAG_READONLY,
   5488 		    CTLTYPE_INT,
   5489 		    "rxd_head", SYSCTL_DESCR("Receive Descriptor Head"),
   5490 		    ixgbe_sysctl_rdh_handler, 0, (void *)rxr, 0,
   5491 		    CTL_CREATE, CTL_EOL) != 0)
   5492 			break;
   5493 
   5494 		if (sysctl_createv(log, 0, &rnode, &cnode,
   5495 		    CTLFLAG_READONLY,
   5496 		    CTLTYPE_INT,
   5497 		    "rxd_tail", SYSCTL_DESCR("Receive Descriptor Tail"),
   5498 		    ixgbe_sysctl_rdt_handler, 0, (void *)rxr, 0,
   5499 		    CTL_CREATE, CTL_EOL) != 0)
   5500 			break;
   5501 
   5502 		if (i < __arraycount(adapter->stats.mpc)) {
   5503 			evcnt_attach_dynamic(&adapter->stats.mpc[i],
   5504 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   5505 			    "Missed Packet Count");
   5506 		}
   5507 		if (i < __arraycount(adapter->stats.pxontxc)) {
   5508 			evcnt_attach_dynamic(&adapter->stats.pxontxc[i],
   5509 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   5510 			    "pxontxc");
   5511 			evcnt_attach_dynamic(&adapter->stats.pxonrxc[i],
   5512 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   5513 			    "pxonrxc");
   5514 			evcnt_attach_dynamic(&adapter->stats.pxofftxc[i],
   5515 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   5516 			    "pxofftxc");
   5517 			evcnt_attach_dynamic(&adapter->stats.pxoffrxc[i],
   5518 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   5519 			    "pxoffrxc");
   5520 			evcnt_attach_dynamic(&adapter->stats.pxon2offc[i],
   5521 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   5522 			    "pxon2offc");
   5523 		}
   5524 		if (i < __arraycount(adapter->stats.qprc)) {
   5525 			evcnt_attach_dynamic(&adapter->stats.qprc[i],
   5526 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   5527 			    "qprc");
   5528 			evcnt_attach_dynamic(&adapter->stats.qptc[i],
   5529 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   5530 			    "qptc");
   5531 			evcnt_attach_dynamic(&adapter->stats.qbrc[i],
   5532 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   5533 			    "qbrc");
   5534 			evcnt_attach_dynamic(&adapter->stats.qbtc[i],
   5535 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   5536 			    "qbtc");
   5537 			evcnt_attach_dynamic(&adapter->stats.qprdc[i],
   5538 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   5539 			    "qprdc");
   5540 		}
   5541 
   5542 		evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
   5543 		    NULL, adapter->queues[i].evnamebuf, "Queue Packets Received");
   5544 		evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
   5545 		    NULL, adapter->queues[i].evnamebuf, "Queue Bytes Received");
   5546 		evcnt_attach_dynamic(&rxr->no_jmbuf, EVCNT_TYPE_MISC,
   5547 		    NULL, adapter->queues[i].evnamebuf, "Rx no jumbo mbuf");
   5548 		evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
   5549 		    NULL, adapter->queues[i].evnamebuf, "Rx discarded");
   5550 		evcnt_attach_dynamic(&rxr->rx_split_packets, EVCNT_TYPE_MISC,
   5551 		    NULL, adapter->queues[i].evnamebuf, "Rx split packets");
   5552 		evcnt_attach_dynamic(&rxr->rx_irq, EVCNT_TYPE_MISC,
   5553 		    NULL, adapter->queues[i].evnamebuf, "Rx interrupts");
   5554 #ifdef LRO
   5555 		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
   5556 				CTLFLAG_RD, &lro->lro_queued, 0,
   5557 				"LRO Queued");
   5558 		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
   5559 				CTLFLAG_RD, &lro->lro_flushed, 0,
   5560 				"LRO Flushed");
   5561 #endif /* LRO */
   5562 	}
   5563 
   5564 	/* MAC stats get the own sub node */
   5565 
   5566 
   5567 	snprintf(stats->namebuf,
   5568 	    sizeof(stats->namebuf), "%s MAC Statistics", device_xname(dev));
   5569 
   5570 	evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
   5571 	    stats->namebuf, "rx csum offload - IP");
   5572 	evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
   5573 	    stats->namebuf, "rx csum offload - L4");
   5574 	evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
   5575 	    stats->namebuf, "rx csum offload - IP bad");
   5576 	evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
   5577 	    stats->namebuf, "rx csum offload - L4 bad");
   5578 	evcnt_attach_dynamic(&stats->intzero, EVCNT_TYPE_MISC, NULL,
   5579 	    stats->namebuf, "Interrupt conditions zero");
   5580 	evcnt_attach_dynamic(&stats->legint, EVCNT_TYPE_MISC, NULL,
   5581 	    stats->namebuf, "Legacy interrupts");
   5582 	evcnt_attach_dynamic(&stats->crcerrs, EVCNT_TYPE_MISC, NULL,
   5583 	    stats->namebuf, "CRC Errors");
   5584 	evcnt_attach_dynamic(&stats->illerrc, EVCNT_TYPE_MISC, NULL,
   5585 	    stats->namebuf, "Illegal Byte Errors");
   5586 	evcnt_attach_dynamic(&stats->errbc, EVCNT_TYPE_MISC, NULL,
   5587 	    stats->namebuf, "Byte Errors");
   5588 	evcnt_attach_dynamic(&stats->mspdc, EVCNT_TYPE_MISC, NULL,
   5589 	    stats->namebuf, "MAC Short Packets Discarded");
   5590 	evcnt_attach_dynamic(&stats->mlfc, EVCNT_TYPE_MISC, NULL,
   5591 	    stats->namebuf, "MAC Local Faults");
   5592 	evcnt_attach_dynamic(&stats->mrfc, EVCNT_TYPE_MISC, NULL,
   5593 	    stats->namebuf, "MAC Remote Faults");
   5594 	evcnt_attach_dynamic(&stats->rlec, EVCNT_TYPE_MISC, NULL,
   5595 	    stats->namebuf, "Receive Length Errors");
   5596 	evcnt_attach_dynamic(&stats->lxontxc, EVCNT_TYPE_MISC, NULL,
   5597 	    stats->namebuf, "Link XON Transmitted");
   5598 	evcnt_attach_dynamic(&stats->lxonrxc, EVCNT_TYPE_MISC, NULL,
   5599 	    stats->namebuf, "Link XON Received");
   5600 	evcnt_attach_dynamic(&stats->lxofftxc, EVCNT_TYPE_MISC, NULL,
   5601 	    stats->namebuf, "Link XOFF Transmitted");
   5602 	evcnt_attach_dynamic(&stats->lxoffrxc, EVCNT_TYPE_MISC, NULL,
   5603 	    stats->namebuf, "Link XOFF Received");
   5604 
   5605 	/* Packet Reception Stats */
   5606 	evcnt_attach_dynamic(&stats->tor, EVCNT_TYPE_MISC, NULL,
   5607 	    stats->namebuf, "Total Octets Received");
   5608 	evcnt_attach_dynamic(&stats->gorc, EVCNT_TYPE_MISC, NULL,
   5609 	    stats->namebuf, "Good Octets Received");
   5610 	evcnt_attach_dynamic(&stats->tpr, EVCNT_TYPE_MISC, NULL,
   5611 	    stats->namebuf, "Total Packets Received");
   5612 	evcnt_attach_dynamic(&stats->gprc, EVCNT_TYPE_MISC, NULL,
   5613 	    stats->namebuf, "Good Packets Received");
   5614 	evcnt_attach_dynamic(&stats->mprc, EVCNT_TYPE_MISC, NULL,
   5615 	    stats->namebuf, "Multicast Packets Received");
   5616 	evcnt_attach_dynamic(&stats->bprc, EVCNT_TYPE_MISC, NULL,
   5617 	    stats->namebuf, "Broadcast Packets Received");
   5618 	evcnt_attach_dynamic(&stats->prc64, EVCNT_TYPE_MISC, NULL,
   5619 	    stats->namebuf, "64 byte frames received ");
   5620 	evcnt_attach_dynamic(&stats->prc127, EVCNT_TYPE_MISC, NULL,
   5621 	    stats->namebuf, "65-127 byte frames received");
   5622 	evcnt_attach_dynamic(&stats->prc255, EVCNT_TYPE_MISC, NULL,
   5623 	    stats->namebuf, "128-255 byte frames received");
   5624 	evcnt_attach_dynamic(&stats->prc511, EVCNT_TYPE_MISC, NULL,
   5625 	    stats->namebuf, "256-511 byte frames received");
   5626 	evcnt_attach_dynamic(&stats->prc1023, EVCNT_TYPE_MISC, NULL,
   5627 	    stats->namebuf, "512-1023 byte frames received");
   5628 	evcnt_attach_dynamic(&stats->prc1522, EVCNT_TYPE_MISC, NULL,
   5629 	    stats->namebuf, "1023-1522 byte frames received");
   5630 	evcnt_attach_dynamic(&stats->ruc, EVCNT_TYPE_MISC, NULL,
   5631 	    stats->namebuf, "Receive Undersized");
   5632 	evcnt_attach_dynamic(&stats->rfc, EVCNT_TYPE_MISC, NULL,
   5633 	    stats->namebuf, "Fragmented Packets Received ");
   5634 	evcnt_attach_dynamic(&stats->roc, EVCNT_TYPE_MISC, NULL,
   5635 	    stats->namebuf, "Oversized Packets Received");
   5636 	evcnt_attach_dynamic(&stats->rjc, EVCNT_TYPE_MISC, NULL,
   5637 	    stats->namebuf, "Received Jabber");
   5638 	evcnt_attach_dynamic(&stats->mngprc, EVCNT_TYPE_MISC, NULL,
   5639 	    stats->namebuf, "Management Packets Received");
   5640 	evcnt_attach_dynamic(&stats->xec, EVCNT_TYPE_MISC, NULL,
   5641 	    stats->namebuf, "Checksum Errors");
   5642 
   5643 	/* Packet Transmission Stats */
   5644 	evcnt_attach_dynamic(&stats->gotc, EVCNT_TYPE_MISC, NULL,
   5645 	    stats->namebuf, "Good Octets Transmitted");
   5646 	evcnt_attach_dynamic(&stats->tpt, EVCNT_TYPE_MISC, NULL,
   5647 	    stats->namebuf, "Total Packets Transmitted");
   5648 	evcnt_attach_dynamic(&stats->gptc, EVCNT_TYPE_MISC, NULL,
   5649 	    stats->namebuf, "Good Packets Transmitted");
   5650 	evcnt_attach_dynamic(&stats->bptc, EVCNT_TYPE_MISC, NULL,
   5651 	    stats->namebuf, "Broadcast Packets Transmitted");
   5652 	evcnt_attach_dynamic(&stats->mptc, EVCNT_TYPE_MISC, NULL,
   5653 	    stats->namebuf, "Multicast Packets Transmitted");
   5654 	evcnt_attach_dynamic(&stats->mngptc, EVCNT_TYPE_MISC, NULL,
   5655 	    stats->namebuf, "Management Packets Transmitted");
   5656 	evcnt_attach_dynamic(&stats->ptc64, EVCNT_TYPE_MISC, NULL,
   5657 	    stats->namebuf, "64 byte frames transmitted ");
   5658 	evcnt_attach_dynamic(&stats->ptc127, EVCNT_TYPE_MISC, NULL,
   5659 	    stats->namebuf, "65-127 byte frames transmitted");
   5660 	evcnt_attach_dynamic(&stats->ptc255, EVCNT_TYPE_MISC, NULL,
   5661 	    stats->namebuf, "128-255 byte frames transmitted");
   5662 	evcnt_attach_dynamic(&stats->ptc511, EVCNT_TYPE_MISC, NULL,
   5663 	    stats->namebuf, "256-511 byte frames transmitted");
   5664 	evcnt_attach_dynamic(&stats->ptc1023, EVCNT_TYPE_MISC, NULL,
   5665 	    stats->namebuf, "512-1023 byte frames transmitted");
   5666 	evcnt_attach_dynamic(&stats->ptc1522, EVCNT_TYPE_MISC, NULL,
   5667 	    stats->namebuf, "1024-1522 byte frames transmitted");
   5668 
   5669 	/* FC Stats */
   5670 	evcnt_attach_dynamic(&stats->fccrc, EVCNT_TYPE_MISC, NULL,
   5671 	    stats->namebuf, "FC CRC Errors");
   5672 	evcnt_attach_dynamic(&stats->fclast, EVCNT_TYPE_MISC, NULL,
   5673 	    stats->namebuf, "FC Last Error");
   5674 	if (hw->mac.type != ixgbe_mac_82598EB) {
   5675 		evcnt_attach_dynamic(&stats->fcoerpdc, EVCNT_TYPE_MISC, NULL,
   5676 		    stats->namebuf, "FCoE Packets Dropped");
   5677 		evcnt_attach_dynamic(&stats->fcoeprc, EVCNT_TYPE_MISC, NULL,
   5678 		    stats->namebuf, "FCoE Packets Received");
   5679 		evcnt_attach_dynamic(&stats->fcoeptc, EVCNT_TYPE_MISC, NULL,
   5680 		    stats->namebuf, "FCoE Packets Transmitted");
   5681 		evcnt_attach_dynamic(&stats->fcoedwrc, EVCNT_TYPE_MISC, NULL,
   5682 		    stats->namebuf, "FCoE DWords Received");
   5683 		evcnt_attach_dynamic(&stats->fcoedwtc, EVCNT_TYPE_MISC, NULL,
   5684 		    stats->namebuf, "FCoE DWords Transmitted");
   5685 	}
   5686 }
   5687 
   5688 /*
   5689 ** Set flow control using sysctl:
   5690 ** Flow control values:
   5691 ** 	0 - off
   5692 **	1 - rx pause
   5693 **	2 - tx pause
   5694 **	3 - full
   5695 */
   5696 static int
   5697 ixgbe_set_flowcntl(SYSCTLFN_ARGS)
   5698 {
   5699 	struct sysctlnode node;
   5700 	int error;
   5701 	int last = ixgbe_flow_control;
   5702 	struct adapter *adapter;
   5703 
   5704 	node = *rnode;
   5705 	adapter = (struct adapter *)node.sysctl_data;
   5706 	node.sysctl_data = &ixgbe_flow_control;
   5707 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5708 	if (error != 0 || newp == NULL)
   5709 		return error;
   5710 
   5711 	/* Don't bother if it's not changed */
   5712 	if (ixgbe_flow_control == last)
   5713 		return (0);
   5714 
   5715 	switch (ixgbe_flow_control) {
   5716 		case ixgbe_fc_rx_pause:
   5717 		case ixgbe_fc_tx_pause:
   5718 		case ixgbe_fc_full:
   5719 			adapter->hw.fc.requested_mode = ixgbe_flow_control;
   5720 			break;
   5721 		case ixgbe_fc_none:
   5722 		default:
   5723 			adapter->hw.fc.requested_mode = ixgbe_fc_none;
   5724 	}
   5725 
   5726 	ixgbe_fc_enable(&adapter->hw, 0);
   5727 	return 0;
   5728 }
   5729 
   5730 static void
   5731 ixgbe_add_rx_process_limit(struct adapter *adapter, const char *name,
   5732         const char *description, int *limit, int value)
   5733 {
   5734 	const struct sysctlnode *rnode, *cnode;
   5735 	struct sysctllog **log = &adapter->sysctllog;
   5736 
   5737         *limit = value;
   5738 
   5739 	if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL)
   5740 		aprint_error_dev(adapter->dev,
   5741 		    "could not create sysctl root\n");
   5742 	else if (sysctl_createv(log, 0, &rnode, &cnode,
   5743 	    CTLFLAG_READWRITE,
   5744 	    CTLTYPE_INT,
   5745 	    name, SYSCTL_DESCR(description),
   5746 	    NULL, 0, limit, 0,
   5747 	    CTL_CREATE, CTL_EOL) != 0) {
   5748 		aprint_error_dev(adapter->dev, "%s: could not create sysctl",
   5749 		    __func__);
   5750 	}
   5751 }
   5752 
   5753 /*
   5754 ** Control link advertise speed:
   5755 ** 	0 - normal
   5756 **	1 - advertise only 1G
   5757 */
   5758 static int
   5759 ixgbe_set_advertise(SYSCTLFN_ARGS)
   5760 {
   5761 	struct sysctlnode	node;
   5762 	int			t, error;
   5763 	struct adapter		*adapter;
   5764 	struct ixgbe_hw		*hw;
   5765 	ixgbe_link_speed	speed, last;
   5766 
   5767 	node = *rnode;
   5768 	adapter = (struct adapter *)node.sysctl_data;
   5769 	t = adapter->advertise;
   5770 	node.sysctl_data = &t;
   5771 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5772 	if (error != 0 || newp == NULL)
   5773 		return error;
   5774 
   5775 	if (t == -1)
   5776 		return 0;
   5777 
   5778 	adapter->advertise = t;
   5779 
   5780 	hw = &adapter->hw;
   5781 	last = hw->phy.autoneg_advertised;
   5782 
   5783 	if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
   5784             (hw->phy.multispeed_fiber)))
   5785 		return 0;
   5786 
   5787 	if (adapter->advertise == 1)
   5788                 speed = IXGBE_LINK_SPEED_1GB_FULL;
   5789 	else
   5790                 speed = IXGBE_LINK_SPEED_1GB_FULL |
   5791 			IXGBE_LINK_SPEED_10GB_FULL;
   5792 
   5793 	if (speed == last) /* no change */
   5794 		return 0;
   5795 
   5796 	hw->mac.autotry_restart = TRUE;
   5797 	hw->mac.ops.setup_link(hw, speed, TRUE, TRUE);
   5798 
   5799 	return 0;
   5800 }
   5801