Home | History | Annotate | Line # | Download | only in ixgbe
ixgbe.c revision 1.7
      1 /******************************************************************************
      2 
      3   Copyright (c) 2001-2011, Intel Corporation
      4   All rights reserved.
      5 
      6   Redistribution and use in source and binary forms, with or without
      7   modification, are permitted provided that the following conditions are met:
      8 
      9    1. Redistributions of source code must retain the above copyright notice,
     10       this list of conditions and the following disclaimer.
     11 
     12    2. Redistributions in binary form must reproduce the above copyright
     13       notice, this list of conditions and the following disclaimer in the
     14       documentation and/or other materials provided with the distribution.
     15 
     16    3. Neither the name of the Intel Corporation nor the names of its
     17       contributors may be used to endorse or promote products derived from
     18       this software without specific prior written permission.
     19 
     20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     30   POSSIBILITY OF SUCH DAMAGE.
     31 
     32 ******************************************************************************/
     33 /*
     34  * Copyright (c) 2011 The NetBSD Foundation, Inc.
     35  * All rights reserved.
     36  *
     37  * This code is derived from software contributed to The NetBSD Foundation
     38  * by Coyote Point Systems, Inc.
     39  *
     40  * Redistribution and use in source and binary forms, with or without
     41  * modification, are permitted provided that the following conditions
     42  * are met:
     43  * 1. Redistributions of source code must retain the above copyright
     44  *    notice, this list of conditions and the following disclaimer.
     45  * 2. Redistributions in binary form must reproduce the above copyright
     46  *    notice, this list of conditions and the following disclaimer in the
     47  *    documentation and/or other materials provided with the distribution.
     48  *
     49  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     50  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     51  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     52  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     53  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     54  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     55  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     56  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     57  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     58  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     59  * POSSIBILITY OF SUCH DAMAGE.
     60  */
     61 /*$FreeBSD: src/sys/dev/ixgbe/ixgbe.c,v 1.51 2011/04/25 23:34:21 jfv Exp $*/
     62 /*$NetBSD: ixgbe.c,v 1.7 2014/02/25 18:30:10 pooka Exp $*/
     63 
     64 #include "opt_inet.h"
     65 
     66 #include "ixgbe.h"
     67 
     68 /*********************************************************************
     69  *  Set this to one to display debug statistics
     70  *********************************************************************/
     71 int             ixgbe_display_debug_stats = 0;
     72 
     73 /*********************************************************************
     74  *  Driver version
     75  *********************************************************************/
     76 char ixgbe_driver_version[] = "2.3.10";
     77 
     78 /*********************************************************************
     79  *  PCI Device ID Table
     80  *
     81  *  Used by probe to select devices to load on
     82  *  Last field stores an index into ixgbe_strings
     83  *  Last entry must be all 0s
     84  *
     85  *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
     86  *********************************************************************/
     87 
     88 static ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
     89 {
     90 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
     91 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
     92 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
     93 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
     94 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
     95 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
     96 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
     97 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
     98 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
     99 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
    100 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
    101 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
    102 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
    103 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
    104 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
    105 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
    106 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
    107 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
    108 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
    109 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
    110 	/* required last entry */
    111 	{0, 0, 0, 0, 0}
    112 };
    113 
    114 /*********************************************************************
    115  *  Table of branding strings
    116  *********************************************************************/
    117 
    118 static const char    *ixgbe_strings[] = {
    119 	"Intel(R) PRO/10GbE PCI-Express Network Driver"
    120 };
    121 
    122 /*********************************************************************
    123  *  Function prototypes
    124  *********************************************************************/
    125 static int      ixgbe_probe(device_t, cfdata_t, void *);
    126 static void     ixgbe_attach(device_t, device_t, void *);
    127 static int      ixgbe_detach(device_t, int);
    128 #if 0
    129 static int      ixgbe_shutdown(device_t);
    130 #endif
    131 static void     ixgbe_start(struct ifnet *);
    132 static void     ixgbe_start_locked(struct tx_ring *, struct ifnet *);
    133 #if __FreeBSD_version >= 800000
    134 static int	ixgbe_mq_start(struct ifnet *, struct mbuf *);
    135 static int	ixgbe_mq_start_locked(struct ifnet *,
    136                     struct tx_ring *, struct mbuf *);
    137 static void	ixgbe_qflush(struct ifnet *);
    138 #endif
    139 static int      ixgbe_ioctl(struct ifnet *, u_long, void *);
    140 static void	ixgbe_ifstop(struct ifnet *, int);
    141 static int	ixgbe_init(struct ifnet *);
    142 static void	ixgbe_init_locked(struct adapter *);
    143 static void     ixgbe_stop(void *);
    144 static void     ixgbe_media_status(struct ifnet *, struct ifmediareq *);
    145 static int      ixgbe_media_change(struct ifnet *);
    146 static void     ixgbe_identify_hardware(struct adapter *);
    147 static int      ixgbe_allocate_pci_resources(struct adapter *,
    148 		    const struct pci_attach_args *);
    149 static int      ixgbe_allocate_msix(struct adapter *,
    150 		    const struct pci_attach_args *);
    151 static int      ixgbe_allocate_legacy(struct adapter *,
    152 		    const struct pci_attach_args *);
    153 static int	ixgbe_allocate_queues(struct adapter *);
    154 static int	ixgbe_setup_msix(struct adapter *);
    155 static void	ixgbe_free_pci_resources(struct adapter *);
    156 static void	ixgbe_local_timer(void *);
    157 static int	ixgbe_setup_interface(device_t, struct adapter *);
    158 static void	ixgbe_config_link(struct adapter *);
    159 
    160 static int      ixgbe_allocate_transmit_buffers(struct tx_ring *);
    161 static int	ixgbe_setup_transmit_structures(struct adapter *);
    162 static void	ixgbe_setup_transmit_ring(struct tx_ring *);
    163 static void     ixgbe_initialize_transmit_units(struct adapter *);
    164 static void     ixgbe_free_transmit_structures(struct adapter *);
    165 static void     ixgbe_free_transmit_buffers(struct tx_ring *);
    166 
    167 static int      ixgbe_allocate_receive_buffers(struct rx_ring *);
    168 static int      ixgbe_setup_receive_structures(struct adapter *);
    169 static int	ixgbe_setup_receive_ring(struct rx_ring *);
    170 static void     ixgbe_initialize_receive_units(struct adapter *);
    171 static void     ixgbe_free_receive_structures(struct adapter *);
    172 static void     ixgbe_free_receive_buffers(struct rx_ring *);
    173 static void	ixgbe_setup_hw_rsc(struct rx_ring *);
    174 
    175 static void     ixgbe_enable_intr(struct adapter *);
    176 static void     ixgbe_disable_intr(struct adapter *);
    177 static void     ixgbe_update_stats_counters(struct adapter *);
    178 static bool	ixgbe_txeof(struct tx_ring *);
    179 static bool	ixgbe_rxeof(struct ix_queue *, int);
    180 static void	ixgbe_rx_checksum(u32, struct mbuf *, u32,
    181 		    struct ixgbe_hw_stats *);
    182 static void     ixgbe_set_promisc(struct adapter *);
    183 static void     ixgbe_set_multi(struct adapter *);
    184 static void     ixgbe_update_link_status(struct adapter *);
    185 static void	ixgbe_refresh_mbufs(struct rx_ring *, int);
    186 static int      ixgbe_xmit(struct tx_ring *, struct mbuf *);
    187 static int	ixgbe_set_flowcntl(SYSCTLFN_PROTO);
    188 static int	ixgbe_set_advertise(SYSCTLFN_PROTO);
    189 static int	ixgbe_dma_malloc(struct adapter *, bus_size_t,
    190 		    struct ixgbe_dma_alloc *, int);
    191 static void     ixgbe_dma_free(struct adapter *, struct ixgbe_dma_alloc *);
    192 static void	ixgbe_add_rx_process_limit(struct adapter *, const char *,
    193 		    const char *, int *, int);
    194 static u32	ixgbe_tx_ctx_setup(struct tx_ring *, struct mbuf *);
    195 static bool	ixgbe_tso_setup(struct tx_ring *, struct mbuf *, u32 *);
    196 static void	ixgbe_set_ivar(struct adapter *, u8, u8, s8);
    197 static void	ixgbe_configure_ivars(struct adapter *);
    198 static u8 *	ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
    199 
    200 static void	ixgbe_setup_vlan_hw_support(struct adapter *);
    201 #if 0
    202 static void	ixgbe_register_vlan(void *, struct ifnet *, u16);
    203 static void	ixgbe_unregister_vlan(void *, struct ifnet *, u16);
    204 #endif
    205 
    206 static void     ixgbe_add_hw_stats(struct adapter *adapter);
    207 
    208 static __inline void ixgbe_rx_discard(struct rx_ring *, int);
    209 static __inline void ixgbe_rx_input(struct rx_ring *, struct ifnet *,
    210 		    struct mbuf *, u32);
    211 
    212 /* Support for pluggable optic modules */
    213 static bool	ixgbe_sfp_probe(struct adapter *);
    214 static void	ixgbe_setup_optics(struct adapter *);
    215 
    216 /* Legacy (single vector interrupt handler */
    217 static int	ixgbe_legacy_irq(void *);
    218 
    219 #if defined(NETBSD_MSI_OR_MSIX)
    220 /* The MSI/X Interrupt handlers */
    221 static void	ixgbe_msix_que(void *);
    222 static void	ixgbe_msix_link(void *);
    223 #endif
    224 
    225 /* Software interrupts for deferred work */
    226 static void	ixgbe_handle_que(void *);
    227 static void	ixgbe_handle_link(void *);
    228 static void	ixgbe_handle_msf(void *);
    229 static void	ixgbe_handle_mod(void *);
    230 
    231 const struct sysctlnode *ixgbe_sysctl_instance(struct adapter *);
    232 static ixgbe_vendor_info_t *ixgbe_lookup(const struct pci_attach_args *);
    233 
    234 #ifdef IXGBE_FDIR
    235 static void	ixgbe_atr(struct tx_ring *, struct mbuf *);
    236 static void	ixgbe_reinit_fdir(void *, int);
    237 #endif
    238 
    239 /*********************************************************************
    240  *  FreeBSD Device Interface Entry Points
    241  *********************************************************************/
    242 
    243 CFATTACH_DECL3_NEW(ixg, sizeof(struct adapter),
    244     ixgbe_probe, ixgbe_attach, ixgbe_detach, NULL, NULL, NULL,
    245     DVF_DETACH_SHUTDOWN);
    246 
    247 #if 0
    248 devclass_t ixgbe_devclass;
    249 DRIVER_MODULE(ixgbe, pci, ixgbe_driver, ixgbe_devclass, 0, 0);
    250 
    251 MODULE_DEPEND(ixgbe, pci, 1, 1, 1);
    252 MODULE_DEPEND(ixgbe, ether, 1, 1, 1);
    253 #endif
    254 
    255 /*
    256 ** TUNEABLE PARAMETERS:
    257 */
    258 
    259 /*
    260 ** AIM: Adaptive Interrupt Moderation
    261 ** which means that the interrupt rate
    262 ** is varied over time based on the
    263 ** traffic for that interrupt vector
    264 */
    265 static int ixgbe_enable_aim = TRUE;
    266 #define TUNABLE_INT(__x, __y)
    267 TUNABLE_INT("hw.ixgbe.enable_aim", &ixgbe_enable_aim);
    268 
    269 static int ixgbe_max_interrupt_rate = (8000000 / IXGBE_LOW_LATENCY);
    270 TUNABLE_INT("hw.ixgbe.max_interrupt_rate", &ixgbe_max_interrupt_rate);
    271 
    272 /* How many packets rxeof tries to clean at a time */
    273 static int ixgbe_rx_process_limit = 256;
    274 TUNABLE_INT("hw.ixgbe.rx_process_limit", &ixgbe_rx_process_limit);
    275 
    276 /* Flow control setting, default to full */
    277 static int ixgbe_flow_control = ixgbe_fc_full;
    278 TUNABLE_INT("hw.ixgbe.flow_control", &ixgbe_flow_control);
    279 
    280 /*
    281 ** Smart speed setting, default to on
    282 ** this only works as a compile option
    283 ** right now as its during attach, set
    284 ** this to 'ixgbe_smart_speed_off' to
    285 ** disable.
    286 */
    287 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
    288 
    289 /*
    290  * MSIX should be the default for best performance,
    291  * but this allows it to be forced off for testing.
    292  */
    293 static int ixgbe_enable_msix = 1;
    294 TUNABLE_INT("hw.ixgbe.enable_msix", &ixgbe_enable_msix);
    295 
    296 /*
    297  * Header split: this causes the hardware to DMA
    298  * the header into a separate mbuf from the payload,
    299  * it can be a performance win in some workloads, but
    300  * in others it actually hurts, its off by default.
    301  */
    302 static bool ixgbe_header_split = FALSE;
    303 TUNABLE_INT("hw.ixgbe.hdr_split", &ixgbe_header_split);
    304 
    305 #if defined(NETBSD_MSI_OR_MSIX)
    306 /*
    307  * Number of Queues, can be set to 0,
    308  * it then autoconfigures based on the
    309  * number of cpus with a max of 8. This
    310  * can be overriden manually here.
    311  */
    312 static int ixgbe_num_queues = 0;
    313 TUNABLE_INT("hw.ixgbe.num_queues", &ixgbe_num_queues);
    314 #endif
    315 
    316 /*
    317 ** Number of TX descriptors per ring,
    318 ** setting higher than RX as this seems
    319 ** the better performing choice.
    320 */
    321 static int ixgbe_txd = PERFORM_TXD;
    322 TUNABLE_INT("hw.ixgbe.txd", &ixgbe_txd);
    323 
    324 /* Number of RX descriptors per ring */
    325 static int ixgbe_rxd = PERFORM_RXD;
    326 TUNABLE_INT("hw.ixgbe.rxd", &ixgbe_rxd);
    327 
    328 /* Keep running tab on them for sanity check */
    329 static int ixgbe_total_ports;
    330 
    331 #ifdef IXGBE_FDIR
    332 /*
    333 ** For Flow Director: this is the
    334 ** number of TX packets we sample
    335 ** for the filter pool, this means
    336 ** every 20th packet will be probed.
    337 **
    338 ** This feature can be disabled by
    339 ** setting this to 0.
    340 */
    341 static int atr_sample_rate = 20;
    342 /*
    343 ** Flow Director actually 'steals'
    344 ** part of the packet buffer as its
    345 ** filter pool, this variable controls
    346 ** how much it uses:
    347 **  0 = 64K, 1 = 128K, 2 = 256K
    348 */
    349 static int fdir_pballoc = 1;
    350 #endif
    351 
    352 /*********************************************************************
    353  *  Device identification routine
    354  *
    355  *  ixgbe_probe determines if the driver should be loaded on
    356  *  adapter based on PCI vendor/device id of the adapter.
    357  *
    358  *  return 1 on success, 0 on failure
    359  *********************************************************************/
    360 
    361 static int
    362 ixgbe_probe(device_t dev, cfdata_t cf, void *aux)
    363 {
    364 	const struct pci_attach_args *pa = aux;
    365 
    366 	return (ixgbe_lookup(pa) != NULL) ? 1 : 0;
    367 }
    368 
    369 static ixgbe_vendor_info_t *
    370 ixgbe_lookup(const struct pci_attach_args *pa)
    371 {
    372 	pcireg_t subid;
    373 	ixgbe_vendor_info_t *ent;
    374 
    375 	INIT_DEBUGOUT("ixgbe_probe: begin");
    376 
    377 	if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
    378 		return NULL;
    379 
    380 	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
    381 
    382 	for (ent = ixgbe_vendor_info_array; ent->vendor_id != 0; ent++) {
    383 		if (PCI_VENDOR(pa->pa_id) == ent->vendor_id &&
    384 		    PCI_PRODUCT(pa->pa_id) == ent->device_id &&
    385 
    386 		    (PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id ||
    387 		     ent->subvendor_id == 0) &&
    388 
    389 		    (PCI_SUBSYS_ID(subid) == ent->subdevice_id ||
    390 		     ent->subdevice_id == 0)) {
    391 			++ixgbe_total_ports;
    392 			return ent;
    393 		}
    394 	}
    395 	return NULL;
    396 }
    397 
    398 
    399 static void
    400 ixgbe_sysctl_attach(struct adapter *adapter)
    401 {
    402 	struct sysctllog **log;
    403 	const struct sysctlnode *rnode, *cnode;
    404 	device_t dev;
    405 
    406 	dev = adapter->dev;
    407 	log = &adapter->sysctllog;
    408 
    409 	if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
    410 		aprint_error_dev(dev, "could not create sysctl root\n");
    411 		return;
    412 	}
    413 
    414 	if (sysctl_createv(log, 0, &rnode, &cnode,
    415 	    CTLFLAG_READONLY, CTLTYPE_INT,
    416 	    "num_rx_desc", SYSCTL_DESCR("Number of rx descriptors"),
    417 	    NULL, 0, &adapter->num_rx_desc, 0, CTL_CREATE, CTL_EOL) != 0)
    418 		aprint_error_dev(dev, "could not create sysctl\n");
    419 
    420 	if (sysctl_createv(log, 0, &rnode, &cnode,
    421 	    CTLFLAG_READONLY, CTLTYPE_INT,
    422 	    "num_queues", SYSCTL_DESCR("Number of queues"),
    423 	    NULL, 0, &adapter->num_queues, 0, CTL_CREATE, CTL_EOL) != 0)
    424 		aprint_error_dev(dev, "could not create sysctl\n");
    425 
    426 	if (sysctl_createv(log, 0, &rnode, &cnode,
    427 	    CTLFLAG_READWRITE, CTLTYPE_INT,
    428 	    "flow_control", SYSCTL_DESCR("Flow Control"),
    429 	    ixgbe_set_flowcntl, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
    430 		aprint_error_dev(dev, "could not create sysctl\n");
    431 
    432 	if (sysctl_createv(log, 0, &rnode, &cnode,
    433 	    CTLFLAG_READWRITE, CTLTYPE_INT,
    434 	    "advertise_gig", SYSCTL_DESCR("1G Link"),
    435 	    ixgbe_set_advertise, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
    436 		aprint_error_dev(dev, "could not create sysctl\n");
    437 
    438 	/* XXX This is an *instance* sysctl controlling a *global* variable.
    439 	 * XXX It's that way in the FreeBSD driver that this derives from.
    440 	 */
    441 	if (sysctl_createv(log, 0, &rnode, &cnode,
    442 	    CTLFLAG_READWRITE, CTLTYPE_INT,
    443 	    "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
    444 	    NULL, 0, &ixgbe_enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
    445 		aprint_error_dev(dev, "could not create sysctl\n");
    446 }
    447 
    448 /*********************************************************************
    449  *  Device initialization routine
    450  *
    451  *  The attach entry point is called when the driver is being loaded.
    452  *  This routine identifies the type of hardware, allocates all resources
    453  *  and initializes the hardware.
    454  *
    455  *  return 0 on success, positive on failure
    456  *********************************************************************/
    457 
    458 static void
    459 ixgbe_attach(device_t parent, device_t dev, void *aux)
    460 {
    461 	struct adapter *adapter;
    462 	struct ixgbe_hw *hw;
    463 	int             error = 0;
    464 	u16		csum;
    465 	u32		ctrl_ext;
    466 	ixgbe_vendor_info_t *ent;
    467 	const struct pci_attach_args *pa = aux;
    468 
    469 	INIT_DEBUGOUT("ixgbe_attach: begin");
    470 
    471 	/* Allocate, clear, and link in our adapter structure */
    472 	adapter = device_private(dev);
    473 	adapter->dev = adapter->osdep.dev = dev;
    474 	hw = &adapter->hw;
    475 	adapter->osdep.pc = pa->pa_pc;
    476 	adapter->osdep.tag = pa->pa_tag;
    477 	adapter->osdep.dmat = pa->pa_dmat;
    478 
    479 	ent = ixgbe_lookup(pa);
    480 
    481 	KASSERT(ent != NULL);
    482 
    483 	aprint_normal(": %s, Version - %s\n",
    484 	    ixgbe_strings[ent->index], ixgbe_driver_version);
    485 
    486 	/* Core Lock Init*/
    487 	IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
    488 
    489 	/* SYSCTL APIs */
    490 
    491 	ixgbe_sysctl_attach(adapter);
    492 
    493 	/* Set up the timer callout */
    494 	callout_init(&adapter->timer, 0);
    495 
    496 	/* Determine hardware revision */
    497 	ixgbe_identify_hardware(adapter);
    498 
    499 	/* Do base PCI setup - map BAR0 */
    500 	if (ixgbe_allocate_pci_resources(adapter, pa)) {
    501 		aprint_error_dev(dev, "Allocation of PCI resources failed\n");
    502 		error = ENXIO;
    503 		goto err_out;
    504 	}
    505 
    506 	/* Do descriptor calc and sanity checks */
    507 	if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
    508 	    ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
    509 		aprint_error_dev(dev, "TXD config issue, using default!\n");
    510 		adapter->num_tx_desc = DEFAULT_TXD;
    511 	} else
    512 		adapter->num_tx_desc = ixgbe_txd;
    513 
    514 	/*
    515 	** With many RX rings it is easy to exceed the
    516 	** system mbuf allocation. Tuning nmbclusters
    517 	** can alleviate this.
    518 	*/
    519 	if (nmbclusters > 0 ) {
    520 		int s;
    521 		s = (ixgbe_rxd * adapter->num_queues) * ixgbe_total_ports;
    522 		if (s > nmbclusters) {
    523 			aprint_error_dev(dev, "RX Descriptors exceed "
    524 			    "system mbuf max, using default instead!\n");
    525 			ixgbe_rxd = DEFAULT_RXD;
    526 		}
    527 	}
    528 
    529 	if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
    530 	    ixgbe_rxd < MIN_TXD || ixgbe_rxd > MAX_TXD) {
    531 		aprint_error_dev(dev, "RXD config issue, using default!\n");
    532 		adapter->num_rx_desc = DEFAULT_RXD;
    533 	} else
    534 		adapter->num_rx_desc = ixgbe_rxd;
    535 
    536 	/* Allocate our TX/RX Queues */
    537 	if (ixgbe_allocate_queues(adapter)) {
    538 		error = ENOMEM;
    539 		goto err_out;
    540 	}
    541 
    542 	/* Allocate multicast array memory. */
    543 	adapter->mta = malloc(sizeof(u8) * IXGBE_ETH_LENGTH_OF_ADDRESS *
    544 	    MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
    545 	if (adapter->mta == NULL) {
    546 		aprint_error_dev(dev, "Cannot allocate multicast setup array\n");
    547 		error = ENOMEM;
    548 		goto err_late;
    549 	}
    550 
    551 	/* Initialize the shared code */
    552 	error = ixgbe_init_shared_code(hw);
    553 	if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
    554 		/*
    555 		** No optics in this port, set up
    556 		** so the timer routine will probe
    557 		** for later insertion.
    558 		*/
    559 		adapter->sfp_probe = TRUE;
    560 		error = 0;
    561 	} else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
    562 		aprint_error_dev(dev,"Unsupported SFP+ module detected!\n");
    563 		error = EIO;
    564 		goto err_late;
    565 	} else if (error) {
    566 		aprint_error_dev(dev,"Unable to initialize the shared code\n");
    567 		error = EIO;
    568 		goto err_late;
    569 	}
    570 
    571 	/* Make sure we have a good EEPROM before we read from it */
    572 	if (ixgbe_validate_eeprom_checksum(&adapter->hw, &csum) < 0) {
    573 		aprint_error_dev(dev,"The EEPROM Checksum Is Not Valid\n");
    574 		error = EIO;
    575 		goto err_late;
    576 	}
    577 
    578 	/* Get Hardware Flow Control setting */
    579 	hw->fc.requested_mode = ixgbe_fc_full;
    580 	hw->fc.pause_time = IXGBE_FC_PAUSE;
    581 	hw->fc.low_water = IXGBE_FC_LO;
    582 	hw->fc.high_water = IXGBE_FC_HI;
    583 	hw->fc.send_xon = TRUE;
    584 
    585 	error = ixgbe_init_hw(hw);
    586 	if (error == IXGBE_ERR_EEPROM_VERSION) {
    587 		aprint_error_dev(dev, "This device is a pre-production adapter/"
    588 		    "LOM.  Please be aware there may be issues associated "
    589 		    "with your hardware.\n If you are experiencing problems "
    590 		    "please contact your Intel or hardware representative "
    591 		    "who provided you with this hardware.\n");
    592 	} else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED)
    593 		aprint_error_dev(dev,"Unsupported SFP+ Module\n");
    594 
    595 	if (error) {
    596 		error = EIO;
    597 		aprint_error_dev(dev,"Hardware Initialization Failure\n");
    598 		goto err_late;
    599 	}
    600 
    601 	/* Detect and set physical type */
    602 	ixgbe_setup_optics(adapter);
    603 
    604 	if ((adapter->msix > 1) && (ixgbe_enable_msix))
    605 		error = ixgbe_allocate_msix(adapter, pa);
    606 	else
    607 		error = ixgbe_allocate_legacy(adapter, pa);
    608 	if (error)
    609 		goto err_late;
    610 
    611 	/* Setup OS specific network interface */
    612 	if (ixgbe_setup_interface(dev, adapter) != 0)
    613 		goto err_late;
    614 
    615 	/* Sysctl for limiting the amount of work done in software interrupts */
    616 	ixgbe_add_rx_process_limit(adapter, "rx_processing_limit",
    617 	    "max number of rx packets to process", &adapter->rx_process_limit,
    618 	    ixgbe_rx_process_limit);
    619 
    620 	/* Initialize statistics */
    621 	ixgbe_update_stats_counters(adapter);
    622 
    623         /* Print PCIE bus type/speed/width info */
    624 	ixgbe_get_bus_info(hw);
    625 	aprint_normal_dev(dev,"PCI Express Bus: Speed %s %s\n",
    626 	    ((hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0Gb/s":
    627 	    (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5Gb/s":"Unknown"),
    628 	    (hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
    629 	    (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
    630 	    (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
    631 	    ("Unknown"));
    632 
    633 	if ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
    634 	    (hw->bus.speed == ixgbe_bus_speed_2500)) {
    635 		aprint_error_dev(dev, "PCI-Express bandwidth available"
    636 		    " for this card\n     is not sufficient for"
    637 		    " optimal performance.\n");
    638 		aprint_error_dev(dev, "For optimal performance a x8 "
    639 		    "PCIE, or x4 PCIE 2 slot is required.\n");
    640         }
    641 
    642 	/* let hardware know driver is loaded */
    643 	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
    644 	ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
    645 	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
    646 
    647 	ixgbe_add_hw_stats(adapter);
    648 
    649 	INIT_DEBUGOUT("ixgbe_attach: end");
    650 	return;
    651 err_late:
    652 	ixgbe_free_transmit_structures(adapter);
    653 	ixgbe_free_receive_structures(adapter);
    654 err_out:
    655 	if (adapter->ifp != NULL)
    656 		if_free(adapter->ifp);
    657 	ixgbe_free_pci_resources(adapter);
    658 	if (adapter->mta != NULL)
    659 		free(adapter->mta, M_DEVBUF);
    660 	return;
    661 
    662 }
    663 
    664 /*********************************************************************
    665  *  Device removal routine
    666  *
    667  *  The detach entry point is called when the driver is being removed.
    668  *  This routine stops the adapter and deallocates all the resources
    669  *  that were allocated for driver operation.
    670  *
    671  *  return 0 on success, positive on failure
    672  *********************************************************************/
    673 
    674 static int
    675 ixgbe_detach(device_t dev, int flags)
    676 {
    677 	struct adapter *adapter = device_private(dev);
    678 	struct tx_ring *txr = adapter->tx_rings;
    679 	struct rx_ring *rxr = adapter->rx_rings;
    680 	struct ixgbe_hw_stats *stats = &adapter->stats;
    681 	struct ix_queue *que = adapter->queues;
    682 	u32	ctrl_ext;
    683 
    684 	INIT_DEBUGOUT("ixgbe_detach: begin");
    685 
    686 	/* Make sure VLANs are not using driver */
    687 	if (!VLAN_ATTACHED(&adapter->osdep.ec))
    688 		;	/* nothing to do: no VLANs */
    689 	else if ((flags & (DETACH_SHUTDOWN|DETACH_FORCE)) != 0)
    690 		vlan_ifdetach(adapter->ifp);
    691 	else {
    692 		aprint_error_dev(dev, "VLANs in use\n");
    693 		return EBUSY;
    694 	}
    695 
    696 	IXGBE_CORE_LOCK(adapter);
    697 	ixgbe_stop(adapter);
    698 	IXGBE_CORE_UNLOCK(adapter);
    699 
    700 	for (int i = 0; i < adapter->num_queues; i++, que++) {
    701 		softint_disestablish(que->que_si);
    702 	}
    703 
    704 	/* Drain the Link queue */
    705 	softint_disestablish(adapter->link_si);
    706 	softint_disestablish(adapter->mod_si);
    707 	softint_disestablish(adapter->msf_si);
    708 #ifdef IXGBE_FDIR
    709 	softint_disestablish(adapter->fdir_si);
    710 #endif
    711 
    712 	/* let hardware know driver is unloading */
    713 	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
    714 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
    715 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
    716 
    717 	ether_ifdetach(adapter->ifp);
    718 	callout_halt(&adapter->timer, NULL);
    719 	ixgbe_free_pci_resources(adapter);
    720 #if 0	/* XXX the NetBSD port is probably missing something here */
    721 	bus_generic_detach(dev);
    722 #endif
    723 	if_detach(adapter->ifp);
    724 
    725 	sysctl_teardown(&adapter->sysctllog);
    726 	evcnt_detach(&adapter->handleq);
    727 	evcnt_detach(&adapter->req);
    728 	evcnt_detach(&adapter->morerx);
    729 	evcnt_detach(&adapter->moretx);
    730 	evcnt_detach(&adapter->txloops);
    731 	evcnt_detach(&adapter->efbig_tx_dma_setup);
    732 	evcnt_detach(&adapter->m_defrag_failed);
    733 	evcnt_detach(&adapter->efbig2_tx_dma_setup);
    734 	evcnt_detach(&adapter->einval_tx_dma_setup);
    735 	evcnt_detach(&adapter->other_tx_dma_setup);
    736 	evcnt_detach(&adapter->eagain_tx_dma_setup);
    737 	evcnt_detach(&adapter->enomem_tx_dma_setup);
    738 	evcnt_detach(&adapter->watchdog_events);
    739 	evcnt_detach(&adapter->tso_err);
    740 	evcnt_detach(&adapter->tso_tx);
    741 	evcnt_detach(&adapter->link_irq);
    742 	for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
    743 		evcnt_detach(&txr->no_desc_avail);
    744 		evcnt_detach(&txr->total_packets);
    745 
    746 		if (i < __arraycount(adapter->stats.mpc)) {
    747 			evcnt_detach(&adapter->stats.mpc[i]);
    748 		}
    749 		if (i < __arraycount(adapter->stats.pxontxc)) {
    750 			evcnt_detach(&adapter->stats.pxontxc[i]);
    751 			evcnt_detach(&adapter->stats.pxonrxc[i]);
    752 			evcnt_detach(&adapter->stats.pxofftxc[i]);
    753 			evcnt_detach(&adapter->stats.pxoffrxc[i]);
    754 			evcnt_detach(&adapter->stats.pxon2offc[i]);
    755 		}
    756 		if (i < __arraycount(adapter->stats.qprc)) {
    757 			evcnt_detach(&adapter->stats.qprc[i]);
    758 			evcnt_detach(&adapter->stats.qptc[i]);
    759 			evcnt_detach(&adapter->stats.qbrc[i]);
    760 			evcnt_detach(&adapter->stats.qbtc[i]);
    761 			evcnt_detach(&adapter->stats.qprdc[i]);
    762 		}
    763 
    764 		evcnt_detach(&rxr->rx_packets);
    765 		evcnt_detach(&rxr->rx_bytes);
    766 		evcnt_detach(&rxr->no_jmbuf);
    767 		evcnt_detach(&rxr->rx_discarded);
    768 		evcnt_detach(&rxr->rx_split_packets);
    769 		evcnt_detach(&rxr->rx_irq);
    770 	}
    771 	evcnt_detach(&stats->ipcs);
    772 	evcnt_detach(&stats->l4cs);
    773 	evcnt_detach(&stats->ipcs_bad);
    774 	evcnt_detach(&stats->l4cs_bad);
    775 	evcnt_detach(&stats->intzero);
    776 	evcnt_detach(&stats->legint);
    777 	evcnt_detach(&stats->crcerrs);
    778 	evcnt_detach(&stats->illerrc);
    779 	evcnt_detach(&stats->errbc);
    780 	evcnt_detach(&stats->mspdc);
    781 	evcnt_detach(&stats->mlfc);
    782 	evcnt_detach(&stats->mrfc);
    783 	evcnt_detach(&stats->rlec);
    784 	evcnt_detach(&stats->lxontxc);
    785 	evcnt_detach(&stats->lxonrxc);
    786 	evcnt_detach(&stats->lxofftxc);
    787 	evcnt_detach(&stats->lxoffrxc);
    788 
    789 	/* Packet Reception Stats */
    790 	evcnt_detach(&stats->tor);
    791 	evcnt_detach(&stats->gorc);
    792 	evcnt_detach(&stats->tpr);
    793 	evcnt_detach(&stats->gprc);
    794 	evcnt_detach(&stats->mprc);
    795 	evcnt_detach(&stats->bprc);
    796 	evcnt_detach(&stats->prc64);
    797 	evcnt_detach(&stats->prc127);
    798 	evcnt_detach(&stats->prc255);
    799 	evcnt_detach(&stats->prc511);
    800 	evcnt_detach(&stats->prc1023);
    801 	evcnt_detach(&stats->prc1522);
    802 	evcnt_detach(&stats->ruc);
    803 	evcnt_detach(&stats->rfc);
    804 	evcnt_detach(&stats->roc);
    805 	evcnt_detach(&stats->rjc);
    806 	evcnt_detach(&stats->mngprc);
    807 	evcnt_detach(&stats->mngptc);
    808 	evcnt_detach(&stats->xec);
    809 
    810 	/* Packet Transmission Stats */
    811 	evcnt_detach(&stats->gotc);
    812 	evcnt_detach(&stats->tpt);
    813 	evcnt_detach(&stats->gptc);
    814 	evcnt_detach(&stats->bptc);
    815 	evcnt_detach(&stats->mptc);
    816 	evcnt_detach(&stats->mngptc);
    817 	evcnt_detach(&stats->ptc64);
    818 	evcnt_detach(&stats->ptc127);
    819 	evcnt_detach(&stats->ptc255);
    820 	evcnt_detach(&stats->ptc511);
    821 	evcnt_detach(&stats->ptc1023);
    822 	evcnt_detach(&stats->ptc1522);
    823 
    824 	/* FC Stats */
    825 	evcnt_detach(&stats->fccrc);
    826 	evcnt_detach(&stats->fclast);
    827 	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
    828 		evcnt_detach(&stats->fcoerpdc);
    829 		evcnt_detach(&stats->fcoeprc);
    830 		evcnt_detach(&stats->fcoeptc);
    831 		evcnt_detach(&stats->fcoedwrc);
    832 		evcnt_detach(&stats->fcoedwtc);
    833 	}
    834 
    835 	ixgbe_free_transmit_structures(adapter);
    836 	ixgbe_free_receive_structures(adapter);
    837 	free(adapter->mta, M_DEVBUF);
    838 
    839 	IXGBE_CORE_LOCK_DESTROY(adapter);
    840 	return (0);
    841 }
    842 
    843 /*********************************************************************
    844  *
    845  *  Shutdown entry point
    846  *
    847  **********************************************************************/
    848 
    849 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
    850 static int
    851 ixgbe_shutdown(device_t dev)
    852 {
    853 	struct adapter *adapter = device_private(dev);
    854 	IXGBE_CORE_LOCK(adapter);
    855 	ixgbe_stop(adapter);
    856 	IXGBE_CORE_UNLOCK(adapter);
    857 	return (0);
    858 }
    859 #endif
    860 
    861 
    862 /*********************************************************************
    863  *  Transmit entry point
    864  *
    865  *  ixgbe_start is called by the stack to initiate a transmit.
    866  *  The driver will remain in this routine as long as there are
    867  *  packets to transmit and transmit resources are available.
    868  *  In case resources are not available stack is notified and
    869  *  the packet is requeued.
    870  **********************************************************************/
    871 
    872 static void
    873 ixgbe_start_locked(struct tx_ring *txr, struct ifnet * ifp)
    874 {
    875 	int rc;
    876 	struct mbuf    *m_head;
    877 	struct adapter *adapter = txr->adapter;
    878 
    879 	IXGBE_TX_LOCK_ASSERT(txr);
    880 
    881 	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) !=
    882 	    IFF_RUNNING)
    883 		return;
    884 	if (!adapter->link_active)
    885 		return;
    886 
    887 	while (!IFQ_IS_EMPTY(&ifp->if_snd)) {
    888 
    889 		IFQ_POLL(&ifp->if_snd, m_head);
    890 		if (m_head == NULL)
    891 			break;
    892 
    893 		if ((rc = ixgbe_xmit(txr, m_head)) == EAGAIN) {
    894 			ifp->if_flags |= IFF_OACTIVE;
    895 			break;
    896 		}
    897 		IFQ_DEQUEUE(&ifp->if_snd, m_head);
    898 		if (rc == EFBIG) {
    899 			struct mbuf *mtmp;
    900 
    901 			if ((mtmp = m_defrag(m_head, M_DONTWAIT)) != NULL) {
    902 				m_head = mtmp;
    903 				rc = ixgbe_xmit(txr, m_head);
    904 				if (rc != 0)
    905 					adapter->efbig2_tx_dma_setup.ev_count++;
    906 			} else
    907 				adapter->m_defrag_failed.ev_count++;
    908 		}
    909 		if (rc != 0) {
    910 			m_freem(m_head);
    911 			continue;
    912 		}
    913 
    914 		/* Send a copy of the frame to the BPF listener */
    915 		bpf_mtap(ifp, m_head);
    916 
    917 		/* Set watchdog on */
    918 		getmicrotime(&txr->watchdog_time);
    919 		txr->queue_status = IXGBE_QUEUE_WORKING;
    920 
    921 	}
    922 	return;
    923 }
    924 
    925 /*
    926  * Legacy TX start - called by the stack, this
    927  * always uses the first tx ring, and should
    928  * not be used with multiqueue tx enabled.
    929  */
    930 static void
    931 ixgbe_start(struct ifnet *ifp)
    932 {
    933 	struct adapter *adapter = ifp->if_softc;
    934 	struct tx_ring	*txr = adapter->tx_rings;
    935 
    936 	if (ifp->if_flags & IFF_RUNNING) {
    937 		IXGBE_TX_LOCK(txr);
    938 		ixgbe_start_locked(txr, ifp);
    939 		IXGBE_TX_UNLOCK(txr);
    940 	}
    941 	return;
    942 }
    943 
    944 #if __FreeBSD_version >= 800000
    945 /*
    946 ** Multiqueue Transmit driver
    947 **
    948 */
    949 static int
    950 ixgbe_mq_start(struct ifnet *ifp, struct mbuf *m)
    951 {
    952 	struct adapter	*adapter = ifp->if_softc;
    953 	struct ix_queue	*que;
    954 	struct tx_ring	*txr;
    955 	int 		i = 0, err = 0;
    956 
    957 	/* Which queue to use */
    958 	if ((m->m_flags & M_FLOWID) != 0)
    959 		i = m->m_pkthdr.flowid % adapter->num_queues;
    960 
    961 	txr = &adapter->tx_rings[i];
    962 	que = &adapter->queues[i];
    963 
    964 	if (IXGBE_TX_TRYLOCK(txr)) {
    965 		err = ixgbe_mq_start_locked(ifp, txr, m);
    966 		IXGBE_TX_UNLOCK(txr);
    967 	} else {
    968 		err = drbr_enqueue(ifp, txr->br, m);
    969 		softint_schedule(que->que_si);
    970 	}
    971 
    972 	return (err);
    973 }
    974 
    975 static int
    976 ixgbe_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr, struct mbuf *m)
    977 {
    978 	struct adapter  *adapter = txr->adapter;
    979         struct mbuf     *next;
    980         int             enqueued, err = 0;
    981 
    982 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) !=
    983 	    IFF_RUNNING || adapter->link_active == 0) {
    984 		if (m != NULL)
    985 			err = drbr_enqueue(ifp, txr->br, m);
    986 		return (err);
    987 	}
    988 
    989 	enqueued = 0;
    990 	if (m == NULL) {
    991 		next = drbr_dequeue(ifp, txr->br);
    992 	} else if (drbr_needs_enqueue(ifp, txr->br)) {
    993 		if ((err = drbr_enqueue(ifp, txr->br, m)) != 0)
    994 			return (err);
    995 		next = drbr_dequeue(ifp, txr->br);
    996 	} else
    997 		next = m;
    998 
    999 	/* Process the queue */
   1000 	while (next != NULL) {
   1001 		if ((err = ixgbe_xmit(txr, &next)) != 0) {
   1002 			if (next != NULL)
   1003 				err = drbr_enqueue(ifp, txr->br, next);
   1004 			break;
   1005 		}
   1006 		enqueued++;
   1007 		drbr_stats_update(ifp, next->m_pkthdr.len, next->m_flags);
   1008 		/* Send a copy of the frame to the BPF listener */
   1009 		bpf_mtap(ifp, next);
   1010 		if ((ifp->if_flags & IFF_RUNNING) == 0)
   1011 			break;
   1012 		if (txr->tx_avail < IXGBE_TX_OP_THRESHOLD)
   1013 			ixgbe_txeof(txr);
   1014 		if (txr->tx_avail < IXGBE_TX_OP_THRESHOLD) {
   1015 			ifp->if_flags |= IFF_OACTIVE;
   1016 			break;
   1017 		}
   1018 		next = drbr_dequeue(ifp, txr->br);
   1019 	}
   1020 
   1021 	if (enqueued > 0) {
   1022 		/* Set watchdog on */
   1023 		txr->queue_status = IXGBE_QUEUE_WORKING;
   1024 		getmicrotime(&txr->watchdog_time);
   1025 	}
   1026 
   1027 	return (err);
   1028 }
   1029 
   1030 /*
   1031 ** Flush all ring buffers
   1032 */
   1033 static void
   1034 ixgbe_qflush(struct ifnet *ifp)
   1035 {
   1036 	struct adapter	*adapter = ifp->if_softc;
   1037 	struct tx_ring	*txr = adapter->tx_rings;
   1038 	struct mbuf	*m;
   1039 
   1040 	for (int i = 0; i < adapter->num_queues; i++, txr++) {
   1041 		IXGBE_TX_LOCK(txr);
   1042 		while ((m = buf_ring_dequeue_sc(txr->br)) != NULL)
   1043 			m_freem(m);
   1044 		IXGBE_TX_UNLOCK(txr);
   1045 	}
   1046 	if_qflush(ifp);
   1047 }
   1048 #endif /* __FreeBSD_version >= 800000 */
   1049 
   1050 static int
   1051 ixgbe_ifflags_cb(struct ethercom *ec)
   1052 {
   1053 	struct ifnet *ifp = &ec->ec_if;
   1054 	struct adapter *adapter = ifp->if_softc;
   1055 	int change = ifp->if_flags ^ adapter->if_flags, rc = 0;
   1056 
   1057 	IXGBE_CORE_LOCK(adapter);
   1058 
   1059 	if (change != 0)
   1060 		adapter->if_flags = ifp->if_flags;
   1061 
   1062 	if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0)
   1063 		rc = ENETRESET;
   1064 	else if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   1065 		ixgbe_set_promisc(adapter);
   1066 
   1067 	IXGBE_CORE_UNLOCK(adapter);
   1068 
   1069 	return rc;
   1070 }
   1071 
   1072 /*********************************************************************
   1073  *  Ioctl entry point
   1074  *
   1075  *  ixgbe_ioctl is called when the user wants to configure the
   1076  *  interface.
   1077  *
   1078  *  return 0 on success, positive on failure
   1079  **********************************************************************/
   1080 
   1081 static int
   1082 ixgbe_ioctl(struct ifnet * ifp, u_long command, void *data)
   1083 {
   1084 	struct adapter	*adapter = ifp->if_softc;
   1085 	struct ifcapreq *ifcr = data;
   1086 	struct ifreq	*ifr = data;
   1087 	int             error = 0;
   1088 	int l4csum_en;
   1089 	const int l4csum = IFCAP_CSUM_TCPv4_Rx|IFCAP_CSUM_UDPv4_Rx|
   1090 	     IFCAP_CSUM_TCPv6_Rx|IFCAP_CSUM_UDPv6_Rx;
   1091 
   1092 	switch (command) {
   1093 	case SIOCSIFFLAGS:
   1094 		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
   1095 		break;
   1096 	case SIOCADDMULTI:
   1097 	case SIOCDELMULTI:
   1098 		IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
   1099 		break;
   1100 	case SIOCSIFMEDIA:
   1101 	case SIOCGIFMEDIA:
   1102 		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
   1103 		break;
   1104 	case SIOCSIFCAP:
   1105 		IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
   1106 		break;
   1107 	case SIOCSIFMTU:
   1108 		IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
   1109 		break;
   1110 	default:
   1111 		IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
   1112 		break;
   1113 	}
   1114 
   1115 	switch (command) {
   1116 	case SIOCSIFMEDIA:
   1117 	case SIOCGIFMEDIA:
   1118 		return ifmedia_ioctl(ifp, ifr, &adapter->media, command);
   1119 	case SIOCSIFCAP:
   1120 		/* Layer-4 Rx checksum offload has to be turned on and
   1121 		 * off as a unit.
   1122 		 */
   1123 		l4csum_en = ifcr->ifcr_capenable & l4csum;
   1124 		if (l4csum_en != l4csum && l4csum_en != 0)
   1125 			return EINVAL;
   1126 		/*FALLTHROUGH*/
   1127 	case SIOCADDMULTI:
   1128 	case SIOCDELMULTI:
   1129 	case SIOCSIFFLAGS:
   1130 	case SIOCSIFMTU:
   1131 	default:
   1132 		if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
   1133 			return error;
   1134 		if ((ifp->if_flags & IFF_RUNNING) == 0)
   1135 			;
   1136 		else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
   1137 			IXGBE_CORE_LOCK(adapter);
   1138 			ixgbe_init_locked(adapter);
   1139 			IXGBE_CORE_UNLOCK(adapter);
   1140 		} else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
   1141 			/*
   1142 			 * Multicast list has changed; set the hardware filter
   1143 			 * accordingly.
   1144 			 */
   1145 			IXGBE_CORE_LOCK(adapter);
   1146 			ixgbe_disable_intr(adapter);
   1147 			ixgbe_set_multi(adapter);
   1148 			ixgbe_enable_intr(adapter);
   1149 			IXGBE_CORE_UNLOCK(adapter);
   1150 		}
   1151 		return 0;
   1152 	}
   1153 }
   1154 
   1155 /*********************************************************************
   1156  *  Init entry point
   1157  *
   1158  *  This routine is used in two ways. It is used by the stack as
   1159  *  init entry point in network interface structure. It is also used
   1160  *  by the driver as a hw/sw initialization routine to get to a
   1161  *  consistent state.
   1162  *
   1163  *  return 0 on success, positive on failure
   1164  **********************************************************************/
   1165 #define IXGBE_MHADD_MFS_SHIFT 16
   1166 
   1167 static void
   1168 ixgbe_init_locked(struct adapter *adapter)
   1169 {
   1170 	struct ifnet   *ifp = adapter->ifp;
   1171 	device_t 	dev = adapter->dev;
   1172 	struct ixgbe_hw *hw = &adapter->hw;
   1173 	u32		k, txdctl, mhadd, gpie;
   1174 	u32		rxdctl, rxctrl;
   1175 
   1176 	/* XXX check IFF_UP and IFF_RUNNING, power-saving state! */
   1177 
   1178 	KASSERT(mutex_owned(&adapter->core_mtx));
   1179 	INIT_DEBUGOUT("ixgbe_init: begin");
   1180 	hw->adapter_stopped = FALSE;
   1181 	ixgbe_stop_adapter(hw);
   1182         callout_stop(&adapter->timer);
   1183 
   1184 	/* XXX I moved this here from the SIOCSIFMTU case in ixgbe_ioctl(). */
   1185 	adapter->max_frame_size =
   1186 		ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
   1187 
   1188         /* reprogram the RAR[0] in case user changed it. */
   1189         ixgbe_set_rar(hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
   1190 
   1191 	/* Get the latest mac address, User can use a LAA */
   1192 	memcpy(hw->mac.addr, CLLADDR(adapter->ifp->if_sadl),
   1193 	    IXGBE_ETH_LENGTH_OF_ADDRESS);
   1194 	ixgbe_set_rar(hw, 0, hw->mac.addr, 0, 1);
   1195 	hw->addr_ctrl.rar_used_count = 1;
   1196 
   1197 	/* Prepare transmit descriptors and buffers */
   1198 	if (ixgbe_setup_transmit_structures(adapter)) {
   1199 		device_printf(dev,"Could not setup transmit structures\n");
   1200 		ixgbe_stop(adapter);
   1201 		return;
   1202 	}
   1203 
   1204 	ixgbe_init_hw(hw);
   1205 	ixgbe_initialize_transmit_units(adapter);
   1206 
   1207 	/* Setup Multicast table */
   1208 	ixgbe_set_multi(adapter);
   1209 
   1210 	/*
   1211 	** Determine the correct mbuf pool
   1212 	** for doing jumbo/headersplit
   1213 	*/
   1214 	if (adapter->max_frame_size <= 2048)
   1215 		adapter->rx_mbuf_sz = MCLBYTES;
   1216 	else if (adapter->max_frame_size <= 4096)
   1217 		adapter->rx_mbuf_sz = MJUMPAGESIZE;
   1218 	else if (adapter->max_frame_size <= 9216)
   1219 		adapter->rx_mbuf_sz = MJUM9BYTES;
   1220 	else
   1221 		adapter->rx_mbuf_sz = MJUM16BYTES;
   1222 
   1223 	/* Prepare receive descriptors and buffers */
   1224 	if (ixgbe_setup_receive_structures(adapter)) {
   1225 		device_printf(dev,"Could not setup receive structures\n");
   1226 		ixgbe_stop(adapter);
   1227 		return;
   1228 	}
   1229 
   1230 	/* Configure RX settings */
   1231 	ixgbe_initialize_receive_units(adapter);
   1232 
   1233 	gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE);
   1234 
   1235 	/* Enable Fan Failure Interrupt */
   1236 	gpie |= IXGBE_SDP1_GPIEN;
   1237 
   1238 	/* Add for Thermal detection */
   1239 	if (hw->mac.type == ixgbe_mac_82599EB)
   1240 		gpie |= IXGBE_SDP2_GPIEN;
   1241 
   1242 	if (adapter->msix > 1) {
   1243 		/* Enable Enhanced MSIX mode */
   1244 		gpie |= IXGBE_GPIE_MSIX_MODE;
   1245 		gpie |= IXGBE_GPIE_EIAME | IXGBE_GPIE_PBA_SUPPORT |
   1246 		    IXGBE_GPIE_OCD;
   1247 	}
   1248 	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
   1249 
   1250 	/* Set MTU size */
   1251 	if (ifp->if_mtu > ETHERMTU) {
   1252 		mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
   1253 		mhadd &= ~IXGBE_MHADD_MFS_MASK;
   1254 		mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
   1255 		IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
   1256 	}
   1257 
   1258 	/* Now enable all the queues */
   1259 
   1260 	for (int i = 0; i < adapter->num_queues; i++) {
   1261 		txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
   1262 		txdctl |= IXGBE_TXDCTL_ENABLE;
   1263 		/* Set WTHRESH to 8, burst writeback */
   1264 		txdctl |= (8 << 16);
   1265 		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), txdctl);
   1266 	}
   1267 
   1268 	for (int i = 0; i < adapter->num_queues; i++) {
   1269 		rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
   1270 		if (hw->mac.type == ixgbe_mac_82598EB) {
   1271 			/*
   1272 			** PTHRESH = 21
   1273 			** HTHRESH = 4
   1274 			** WTHRESH = 8
   1275 			*/
   1276 			rxdctl &= ~0x3FFFFF;
   1277 			rxdctl |= 0x080420;
   1278 		}
   1279 		rxdctl |= IXGBE_RXDCTL_ENABLE;
   1280 		IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), rxdctl);
   1281 		/* XXX I don't trust this loop, and I don't trust the
   1282 		 * XXX memory barrier.  What is this meant to do? --dyoung
   1283 		 */
   1284 		for (k = 0; k < 10; k++) {
   1285 			if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)) &
   1286 			    IXGBE_RXDCTL_ENABLE)
   1287 				break;
   1288 			else
   1289 				msec_delay(1);
   1290 		}
   1291 		wmb();
   1292 		IXGBE_WRITE_REG(hw, IXGBE_RDT(i), adapter->num_rx_desc - 1);
   1293 	}
   1294 
   1295 	/* Set up VLAN support and filter */
   1296 	ixgbe_setup_vlan_hw_support(adapter);
   1297 
   1298 	/* Enable Receive engine */
   1299 	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
   1300 	if (hw->mac.type == ixgbe_mac_82598EB)
   1301 		rxctrl |= IXGBE_RXCTRL_DMBYPS;
   1302 	rxctrl |= IXGBE_RXCTRL_RXEN;
   1303 	ixgbe_enable_rx_dma(hw, rxctrl);
   1304 
   1305 	callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
   1306 
   1307 	/* Set up MSI/X routing */
   1308 	if (ixgbe_enable_msix)  {
   1309 		ixgbe_configure_ivars(adapter);
   1310 		/* Set up auto-mask */
   1311 		if (hw->mac.type == ixgbe_mac_82598EB)
   1312 			IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
   1313 		else {
   1314 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
   1315 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
   1316 		}
   1317 	} else {  /* Simple settings for Legacy/MSI */
   1318                 ixgbe_set_ivar(adapter, 0, 0, 0);
   1319                 ixgbe_set_ivar(adapter, 0, 0, 1);
   1320 		IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
   1321 	}
   1322 
   1323 #ifdef IXGBE_FDIR
   1324 	/* Init Flow director */
   1325 	if (hw->mac.type != ixgbe_mac_82598EB)
   1326 		ixgbe_init_fdir_signature_82599(&adapter->hw, fdir_pballoc);
   1327 #endif
   1328 
   1329 	/*
   1330 	** Check on any SFP devices that
   1331 	** need to be kick-started
   1332 	*/
   1333 	if (hw->phy.type == ixgbe_phy_none) {
   1334 		int err = hw->phy.ops.identify(hw);
   1335 		if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
   1336                 	device_printf(dev,
   1337 			    "Unsupported SFP+ module type was detected.\n");
   1338 			return;
   1339         	}
   1340 	}
   1341 
   1342 	/* Set moderation on the Link interrupt */
   1343 	IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->linkvec), IXGBE_LINK_ITR);
   1344 
   1345 	/* Config/Enable Link */
   1346 	ixgbe_config_link(adapter);
   1347 
   1348 	/* And now turn on interrupts */
   1349 	ixgbe_enable_intr(adapter);
   1350 
   1351 	/* Now inform the stack we're ready */
   1352 	ifp->if_flags |= IFF_RUNNING;
   1353 	ifp->if_flags &= ~IFF_OACTIVE;
   1354 
   1355 	return;
   1356 }
   1357 
   1358 static int
   1359 ixgbe_init(struct ifnet *ifp)
   1360 {
   1361 	struct adapter *adapter = ifp->if_softc;
   1362 
   1363 	IXGBE_CORE_LOCK(adapter);
   1364 	ixgbe_init_locked(adapter);
   1365 	IXGBE_CORE_UNLOCK(adapter);
   1366 	return 0;	/* XXX ixgbe_init_locked cannot fail?  really? */
   1367 }
   1368 
   1369 
   1370 /*
   1371 **
   1372 ** MSIX Interrupt Handlers and Tasklets
   1373 **
   1374 */
   1375 
   1376 static inline void
   1377 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
   1378 {
   1379 	struct ixgbe_hw *hw = &adapter->hw;
   1380 	u64	queue = (u64)(1 << vector);
   1381 	u32	mask;
   1382 
   1383 	if (hw->mac.type == ixgbe_mac_82598EB) {
   1384                 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
   1385                 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
   1386 	} else {
   1387                 mask = (queue & 0xFFFFFFFF);
   1388                 if (mask)
   1389                         IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
   1390                 mask = (queue >> 32);
   1391                 if (mask)
   1392                         IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
   1393 	}
   1394 }
   1395 
   1396 static inline void
   1397 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
   1398 {
   1399 	struct ixgbe_hw *hw = &adapter->hw;
   1400 	u64	queue = (u64)(1 << vector);
   1401 	u32	mask;
   1402 
   1403 	if (hw->mac.type == ixgbe_mac_82598EB) {
   1404                 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
   1405                 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
   1406 	} else {
   1407                 mask = (queue & 0xFFFFFFFF);
   1408                 if (mask)
   1409                         IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
   1410                 mask = (queue >> 32);
   1411                 if (mask)
   1412                         IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
   1413 	}
   1414 }
   1415 
   1416 static inline void
   1417 ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
   1418 {
   1419 	u32 mask;
   1420 
   1421 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
   1422 		mask = (IXGBE_EIMS_RTX_QUEUE & queues);
   1423 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
   1424 	} else {
   1425 		mask = (queues & 0xFFFFFFFF);
   1426 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
   1427 		mask = (queues >> 32);
   1428 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
   1429 	}
   1430 }
   1431 
   1432 
   1433 static void
   1434 ixgbe_handle_que(void *context)
   1435 {
   1436 	struct ix_queue *que = context;
   1437 	struct adapter  *adapter = que->adapter;
   1438 	struct tx_ring  *txr = que->txr;
   1439 	struct ifnet    *ifp = adapter->ifp;
   1440 	bool		more;
   1441 
   1442 	adapter->handleq.ev_count++;
   1443 
   1444 	if (ifp->if_flags & IFF_RUNNING) {
   1445 		more = ixgbe_rxeof(que, adapter->rx_process_limit);
   1446 		IXGBE_TX_LOCK(txr);
   1447 		ixgbe_txeof(txr);
   1448 #if __FreeBSD_version >= 800000
   1449 		if (!drbr_empty(ifp, txr->br))
   1450 			ixgbe_mq_start_locked(ifp, txr, NULL);
   1451 #else
   1452 		if (!IFQ_IS_EMPTY(&ifp->if_snd))
   1453 			ixgbe_start_locked(txr, ifp);
   1454 #endif
   1455 		IXGBE_TX_UNLOCK(txr);
   1456 		if (more) {
   1457 			adapter->req.ev_count++;
   1458 			softint_schedule(que->que_si);
   1459 			return;
   1460 		}
   1461 	}
   1462 
   1463 	/* Reenable this interrupt */
   1464 	ixgbe_enable_queue(adapter, que->msix);
   1465 
   1466 	return;
   1467 }
   1468 
   1469 
   1470 /*********************************************************************
   1471  *
   1472  *  Legacy Interrupt Service routine
   1473  *
   1474  **********************************************************************/
   1475 
   1476 static int
   1477 ixgbe_legacy_irq(void *arg)
   1478 {
   1479 	struct ix_queue *que = arg;
   1480 	struct adapter	*adapter = que->adapter;
   1481 	struct ixgbe_hw	*hw = &adapter->hw;
   1482 	struct 		tx_ring *txr = adapter->tx_rings;
   1483 	bool		more_tx, more_rx;
   1484 	u32       	reg_eicr, loop = MAX_LOOP;
   1485 
   1486 
   1487 	reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
   1488 
   1489 	adapter->stats.legint.ev_count++;
   1490 	++que->irqs;
   1491 	if (reg_eicr == 0) {
   1492 		adapter->stats.intzero.ev_count++;
   1493 		ixgbe_enable_intr(adapter);
   1494 		return 0;
   1495 	}
   1496 
   1497 	more_rx = ixgbe_rxeof(que, adapter->rx_process_limit);
   1498 
   1499 	IXGBE_TX_LOCK(txr);
   1500 	do {
   1501 		adapter->txloops.ev_count++;
   1502 		more_tx = ixgbe_txeof(txr);
   1503 	} while (loop-- && more_tx);
   1504 	IXGBE_TX_UNLOCK(txr);
   1505 
   1506 	if (more_rx || more_tx) {
   1507 		if (more_rx)
   1508 			adapter->morerx.ev_count++;
   1509 		if (more_tx)
   1510 			adapter->moretx.ev_count++;
   1511 		softint_schedule(que->que_si);
   1512 	}
   1513 
   1514 	/* Check for fan failure */
   1515 	if ((hw->phy.media_type == ixgbe_media_type_copper) &&
   1516 	    (reg_eicr & IXGBE_EICR_GPI_SDP1)) {
   1517                 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! "
   1518 		    "REPLACE IMMEDIATELY!!\n");
   1519 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1);
   1520 	}
   1521 
   1522 	/* Link status change */
   1523 	if (reg_eicr & IXGBE_EICR_LSC)
   1524 		softint_schedule(adapter->link_si);
   1525 
   1526 	ixgbe_enable_intr(adapter);
   1527 	return 1;
   1528 }
   1529 
   1530 
   1531 #if defined(NETBSD_MSI_OR_MSIX)
   1532 /*********************************************************************
   1533  *
   1534  *  MSI Queue Interrupt Service routine
   1535  *
   1536  **********************************************************************/
   1537 void
   1538 ixgbe_msix_que(void *arg)
   1539 {
   1540 	struct ix_queue	*que = arg;
   1541 	struct adapter  *adapter = que->adapter;
   1542 	struct tx_ring	*txr = que->txr;
   1543 	struct rx_ring	*rxr = que->rxr;
   1544 	bool		more_tx, more_rx;
   1545 	u32		newitr = 0;
   1546 
   1547 	++que->irqs;
   1548 
   1549 	more_rx = ixgbe_rxeof(que, adapter->rx_process_limit);
   1550 
   1551 	IXGBE_TX_LOCK(txr);
   1552 	more_tx = ixgbe_txeof(txr);
   1553 	IXGBE_TX_UNLOCK(txr);
   1554 
   1555 	/* Do AIM now? */
   1556 
   1557 	if (ixgbe_enable_aim == FALSE)
   1558 		goto no_calc;
   1559 	/*
   1560 	** Do Adaptive Interrupt Moderation:
   1561         **  - Write out last calculated setting
   1562 	**  - Calculate based on average size over
   1563 	**    the last interval.
   1564 	*/
   1565         if (que->eitr_setting)
   1566                 IXGBE_WRITE_REG(&adapter->hw,
   1567                     IXGBE_EITR(que->msix), que->eitr_setting);
   1568 
   1569         que->eitr_setting = 0;
   1570 
   1571         /* Idle, do nothing */
   1572         if ((txr->bytes == 0) && (rxr->bytes == 0))
   1573                 goto no_calc;
   1574 
   1575 	if ((txr->bytes) && (txr->packets))
   1576                	newitr = txr->bytes/txr->packets;
   1577 	if ((rxr->bytes) && (rxr->packets))
   1578 		newitr = max(newitr,
   1579 		    (rxr->bytes / rxr->packets));
   1580 	newitr += 24; /* account for hardware frame, crc */
   1581 
   1582 	/* set an upper boundary */
   1583 	newitr = min(newitr, 3000);
   1584 
   1585 	/* Be nice to the mid range */
   1586 	if ((newitr > 300) && (newitr < 1200))
   1587 		newitr = (newitr / 3);
   1588 	else
   1589 		newitr = (newitr / 2);
   1590 
   1591         if (adapter->hw.mac.type == ixgbe_mac_82598EB)
   1592                 newitr |= newitr << 16;
   1593         else
   1594                 newitr |= IXGBE_EITR_CNT_WDIS;
   1595 
   1596         /* save for next interrupt */
   1597         que->eitr_setting = newitr;
   1598 
   1599         /* Reset state */
   1600         txr->bytes = 0;
   1601         txr->packets = 0;
   1602         rxr->bytes = 0;
   1603         rxr->packets = 0;
   1604 
   1605 no_calc:
   1606 	if (more_tx || more_rx)
   1607 		softint_schedule(que->que_si);
   1608 	else /* Reenable this interrupt */
   1609 		ixgbe_enable_queue(adapter, que->msix);
   1610 	return;
   1611 }
   1612 
   1613 
   1614 static void
   1615 ixgbe_msix_link(void *arg)
   1616 {
   1617 	struct adapter	*adapter = arg;
   1618 	struct ixgbe_hw *hw = &adapter->hw;
   1619 	u32		reg_eicr;
   1620 
   1621 	++adapter->link_irq.ev_count;
   1622 
   1623 	/* First get the cause */
   1624 	reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
   1625 	/* Clear interrupt with write */
   1626 	IXGBE_WRITE_REG(hw, IXGBE_EICR, reg_eicr);
   1627 
   1628 	/* Link status change */
   1629 	if (reg_eicr & IXGBE_EICR_LSC)
   1630 		softint_schedule(adapter->link_si);
   1631 
   1632 	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
   1633 #ifdef IXGBE_FDIR
   1634 		if (reg_eicr & IXGBE_EICR_FLOW_DIR) {
   1635 			/* This is probably overkill :) */
   1636 			if (!atomic_cmpset_int(&adapter->fdir_reinit, 0, 1))
   1637 				return;
   1638                 	/* Clear the interrupt */
   1639 			IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_FLOW_DIR);
   1640 			/* Turn off the interface */
   1641 			adapter->ifp->if_flags &= ~IFF_RUNNING;
   1642 			softint_schedule(adapter->fdir_si);
   1643 		} else
   1644 #endif
   1645 		if (reg_eicr & IXGBE_EICR_ECC) {
   1646                 	device_printf(adapter->dev, "\nCRITICAL: ECC ERROR!! "
   1647 			    "Please Reboot!!\n");
   1648 			IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
   1649 		} else
   1650 
   1651 		if (reg_eicr & IXGBE_EICR_GPI_SDP1) {
   1652                 	/* Clear the interrupt */
   1653                 	IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
   1654 			softint_schedule(adapter->msf_si);
   1655         	} else if (reg_eicr & IXGBE_EICR_GPI_SDP2) {
   1656                 	/* Clear the interrupt */
   1657                 	IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2);
   1658 			softint_schedule(adapter->mod_si);
   1659 		}
   1660         }
   1661 
   1662 	/* Check for fan failure */
   1663 	if ((hw->device_id == IXGBE_DEV_ID_82598AT) &&
   1664 	    (reg_eicr & IXGBE_EICR_GPI_SDP1)) {
   1665                 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! "
   1666 		    "REPLACE IMMEDIATELY!!\n");
   1667 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
   1668 	}
   1669 
   1670 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
   1671 	return;
   1672 }
   1673 #endif
   1674 
   1675 /*********************************************************************
   1676  *
   1677  *  Media Ioctl callback
   1678  *
   1679  *  This routine is called whenever the user queries the status of
   1680  *  the interface using ifconfig.
   1681  *
   1682  **********************************************************************/
   1683 static void
   1684 ixgbe_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
   1685 {
   1686 	struct adapter *adapter = ifp->if_softc;
   1687 
   1688 	INIT_DEBUGOUT("ixgbe_media_status: begin");
   1689 	IXGBE_CORE_LOCK(adapter);
   1690 	ixgbe_update_link_status(adapter);
   1691 
   1692 	ifmr->ifm_status = IFM_AVALID;
   1693 	ifmr->ifm_active = IFM_ETHER;
   1694 
   1695 	if (!adapter->link_active) {
   1696 		IXGBE_CORE_UNLOCK(adapter);
   1697 		return;
   1698 	}
   1699 
   1700 	ifmr->ifm_status |= IFM_ACTIVE;
   1701 
   1702 	switch (adapter->link_speed) {
   1703 		case IXGBE_LINK_SPEED_1GB_FULL:
   1704 			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
   1705 			break;
   1706 		case IXGBE_LINK_SPEED_10GB_FULL:
   1707 			ifmr->ifm_active |= adapter->optics | IFM_FDX;
   1708 			break;
   1709 	}
   1710 
   1711 	IXGBE_CORE_UNLOCK(adapter);
   1712 
   1713 	return;
   1714 }
   1715 
   1716 /*********************************************************************
   1717  *
   1718  *  Media Ioctl callback
   1719  *
   1720  *  This routine is called when the user changes speed/duplex using
   1721  *  media/mediopt option with ifconfig.
   1722  *
   1723  **********************************************************************/
   1724 static int
   1725 ixgbe_media_change(struct ifnet * ifp)
   1726 {
   1727 	struct adapter *adapter = ifp->if_softc;
   1728 	struct ifmedia *ifm = &adapter->media;
   1729 
   1730 	INIT_DEBUGOUT("ixgbe_media_change: begin");
   1731 
   1732 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
   1733 		return (EINVAL);
   1734 
   1735         switch (IFM_SUBTYPE(ifm->ifm_media)) {
   1736         case IFM_AUTO:
   1737                 adapter->hw.phy.autoneg_advertised =
   1738 		    IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_10GB_FULL;
   1739                 break;
   1740         default:
   1741                 device_printf(adapter->dev, "Only auto media type\n");
   1742 		return (EINVAL);
   1743         }
   1744 
   1745 	return (0);
   1746 }
   1747 
   1748 /*********************************************************************
   1749  *
   1750  *  This routine maps the mbufs to tx descriptors, allowing the
   1751  *  TX engine to transmit the packets.
   1752  *  	- return 0 on success, positive on failure
   1753  *
   1754  **********************************************************************/
   1755 
   1756 static int
   1757 ixgbe_xmit(struct tx_ring *txr, struct mbuf *m_head)
   1758 {
   1759 	struct m_tag *mtag;
   1760 	struct adapter  *adapter = txr->adapter;
   1761 	struct ethercom *ec = &adapter->osdep.ec;
   1762 	u32		olinfo_status = 0, cmd_type_len;
   1763 	u32		paylen = 0;
   1764 	int             i, j, error;
   1765 	int		first, last = 0;
   1766 	bus_dmamap_t	map;
   1767 	struct ixgbe_tx_buf *txbuf, *txbuf_mapped;
   1768 	union ixgbe_adv_tx_desc *txd = NULL;
   1769 
   1770 	/* Basic descriptor defines */
   1771         cmd_type_len = (IXGBE_ADVTXD_DTYP_DATA |
   1772 	    IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT);
   1773 
   1774 	if ((mtag = VLAN_OUTPUT_TAG(ec, m_head)) != NULL)
   1775         	cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
   1776 
   1777         /*
   1778          * Important to capture the first descriptor
   1779          * used because it will contain the index of
   1780          * the one we tell the hardware to report back
   1781          */
   1782         first = txr->next_avail_desc;
   1783 	txbuf = &txr->tx_buffers[first];
   1784 	txbuf_mapped = txbuf;
   1785 	map = txbuf->map;
   1786 
   1787 	/*
   1788 	 * Map the packet for DMA.
   1789 	 */
   1790 	error = bus_dmamap_load_mbuf(txr->txtag->dt_dmat, map,
   1791 	    m_head, BUS_DMA_NOWAIT);
   1792 
   1793 	switch (error) {
   1794 	case EAGAIN:
   1795 		adapter->eagain_tx_dma_setup.ev_count++;
   1796 		return EAGAIN;
   1797 	case ENOMEM:
   1798 		adapter->enomem_tx_dma_setup.ev_count++;
   1799 		return EAGAIN;
   1800 	case EFBIG:
   1801 		adapter->efbig_tx_dma_setup.ev_count++;
   1802 		return error;
   1803 	case EINVAL:
   1804 		adapter->einval_tx_dma_setup.ev_count++;
   1805 		return error;
   1806 	default:
   1807 		adapter->other_tx_dma_setup.ev_count++;
   1808 		return error;
   1809 	case 0:
   1810 		break;
   1811 	}
   1812 
   1813 	/* Make certain there are enough descriptors */
   1814 	if (map->dm_nsegs > txr->tx_avail - 2) {
   1815 		txr->no_desc_avail.ev_count++;
   1816 		ixgbe_dmamap_unload(txr->txtag, txbuf->map);
   1817 		return EAGAIN;
   1818 	}
   1819 
   1820 	/*
   1821 	** Set up the appropriate offload context
   1822 	** this becomes the first descriptor of
   1823 	** a packet.
   1824 	*/
   1825 	if (m_head->m_pkthdr.csum_flags & (M_CSUM_TSOv4|M_CSUM_TSOv6)) {
   1826 		if (ixgbe_tso_setup(txr, m_head, &paylen)) {
   1827 			cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
   1828 			olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
   1829 			olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
   1830 			olinfo_status |= paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
   1831 			++adapter->tso_tx.ev_count;
   1832 		} else {
   1833 			++adapter->tso_err.ev_count;
   1834 			/* XXX unload DMA map! --dyoung */
   1835 			return ENXIO;
   1836 		}
   1837 	} else
   1838 		olinfo_status |= ixgbe_tx_ctx_setup(txr, m_head);
   1839 
   1840 #ifdef IXGBE_IEEE1588
   1841         /* This is changing soon to an mtag detection */
   1842         if (we detect this mbuf has a TSTAMP mtag)
   1843                 cmd_type_len |= IXGBE_ADVTXD_MAC_TSTAMP;
   1844 #endif
   1845 
   1846 #ifdef IXGBE_FDIR
   1847 	/* Do the flow director magic */
   1848 	if ((txr->atr_sample) && (!adapter->fdir_reinit)) {
   1849 		++txr->atr_count;
   1850 		if (txr->atr_count >= atr_sample_rate) {
   1851 			ixgbe_atr(txr, m_head);
   1852 			txr->atr_count = 0;
   1853 		}
   1854 	}
   1855 #endif
   1856         /* Record payload length */
   1857 	if (paylen == 0)
   1858         	olinfo_status |= m_head->m_pkthdr.len <<
   1859 		    IXGBE_ADVTXD_PAYLEN_SHIFT;
   1860 
   1861 	i = txr->next_avail_desc;
   1862 	for (j = 0; j < map->dm_nsegs; j++) {
   1863 		bus_size_t seglen;
   1864 		bus_addr_t segaddr;
   1865 
   1866 		txbuf = &txr->tx_buffers[i];
   1867 		txd = &txr->tx_base[i];
   1868 		seglen = map->dm_segs[j].ds_len;
   1869 		segaddr = htole64(map->dm_segs[j].ds_addr);
   1870 
   1871 		txd->read.buffer_addr = segaddr;
   1872 		txd->read.cmd_type_len = htole32(txr->txd_cmd |
   1873 		    cmd_type_len |seglen);
   1874 		txd->read.olinfo_status = htole32(olinfo_status);
   1875 		last = i; /* descriptor that will get completion IRQ */
   1876 
   1877 		if (++i == adapter->num_tx_desc)
   1878 			i = 0;
   1879 
   1880 		txbuf->m_head = NULL;
   1881 		txbuf->eop_index = -1;
   1882 	}
   1883 
   1884 	txd->read.cmd_type_len |=
   1885 	    htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS);
   1886 	txr->tx_avail -= map->dm_nsegs;
   1887 	txr->next_avail_desc = i;
   1888 
   1889 	txbuf->m_head = m_head;
   1890 	/* We exchange the maps instead of copying because otherwise
   1891 	 * we end up with many pointers to the same map and we free
   1892 	 * one map twice in ixgbe_free_transmit_structures().  Who
   1893 	 * knows what other problems this caused.  --dyoung
   1894 	 */
   1895 	txr->tx_buffers[first].map = txbuf->map;
   1896 	txbuf->map = map;
   1897 	bus_dmamap_sync(txr->txtag->dt_dmat, map, 0, m_head->m_pkthdr.len,
   1898 	    BUS_DMASYNC_PREWRITE);
   1899 
   1900         /* Set the index of the descriptor that will be marked done */
   1901         txbuf = &txr->tx_buffers[first];
   1902 	txbuf->eop_index = last;
   1903 
   1904         ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
   1905 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1906 	/*
   1907 	 * Advance the Transmit Descriptor Tail (Tdt), this tells the
   1908 	 * hardware that this frame is available to transmit.
   1909 	 */
   1910 	++txr->total_packets.ev_count;
   1911 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDT(txr->me), i);
   1912 
   1913 	return 0;
   1914 }
   1915 
   1916 static void
   1917 ixgbe_set_promisc(struct adapter *adapter)
   1918 {
   1919 	u_int32_t       reg_rctl;
   1920 	struct ifnet   *ifp = adapter->ifp;
   1921 
   1922 	reg_rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
   1923 	reg_rctl &= (~IXGBE_FCTRL_UPE);
   1924 	reg_rctl &= (~IXGBE_FCTRL_MPE);
   1925 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
   1926 
   1927 	if (ifp->if_flags & IFF_PROMISC) {
   1928 		reg_rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
   1929 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
   1930 	} else if (ifp->if_flags & IFF_ALLMULTI) {
   1931 		reg_rctl |= IXGBE_FCTRL_MPE;
   1932 		reg_rctl &= ~IXGBE_FCTRL_UPE;
   1933 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
   1934 	}
   1935 	return;
   1936 }
   1937 
   1938 
   1939 /*********************************************************************
   1940  *  Multicast Update
   1941  *
   1942  *  This routine is called whenever multicast address list is updated.
   1943  *
   1944  **********************************************************************/
   1945 #define IXGBE_RAR_ENTRIES 16
   1946 
   1947 static void
   1948 ixgbe_set_multi(struct adapter *adapter)
   1949 {
   1950 	struct ether_multi *enm;
   1951 	struct ether_multistep step;
   1952 	u32	fctrl;
   1953 	u8	*mta;
   1954 	u8	*update_ptr;
   1955 	int	mcnt = 0;
   1956 	struct ethercom *ec = &adapter->osdep.ec;
   1957 	struct ifnet   *ifp = adapter->ifp;
   1958 
   1959 	IOCTL_DEBUGOUT("ixgbe_set_multi: begin");
   1960 
   1961 	mta = adapter->mta;
   1962 	bzero(mta, sizeof(u8) * IXGBE_ETH_LENGTH_OF_ADDRESS *
   1963 	    MAX_NUM_MULTICAST_ADDRESSES);
   1964 
   1965 	fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
   1966 	fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
   1967 	if (ifp->if_flags & IFF_PROMISC)
   1968 		fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
   1969 	else if (ifp->if_flags & IFF_ALLMULTI) {
   1970 		fctrl |= IXGBE_FCTRL_MPE;
   1971 		fctrl &= ~IXGBE_FCTRL_UPE;
   1972 	} else
   1973 		fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
   1974 
   1975 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
   1976 
   1977 	ETHER_FIRST_MULTI(step, ec, enm);
   1978 	while (enm != NULL) {
   1979 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
   1980 		           ETHER_ADDR_LEN) != 0) {
   1981 			fctrl |= IXGBE_FCTRL_MPE;
   1982 			IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
   1983 			break;
   1984 		}
   1985 		bcopy(enm->enm_addrlo,
   1986 		    &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
   1987 		    IXGBE_ETH_LENGTH_OF_ADDRESS);
   1988 		mcnt++;
   1989 		ETHER_NEXT_MULTI(step, enm);
   1990 	}
   1991 
   1992 	update_ptr = mta;
   1993 	ixgbe_update_mc_addr_list(&adapter->hw,
   1994 	    update_ptr, mcnt, ixgbe_mc_array_itr);
   1995 
   1996 	return;
   1997 }
   1998 
   1999 /*
   2000  * This is an iterator function now needed by the multicast
   2001  * shared code. It simply feeds the shared code routine the
   2002  * addresses in the array of ixgbe_set_multi() one by one.
   2003  */
   2004 static u8 *
   2005 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
   2006 {
   2007 	u8 *addr = *update_ptr;
   2008 	u8 *newptr;
   2009 	*vmdq = 0;
   2010 
   2011 	newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
   2012 	*update_ptr = newptr;
   2013 	return addr;
   2014 }
   2015 
   2016 
   2017 /*********************************************************************
   2018  *  Timer routine
   2019  *
   2020  *  This routine checks for link status,updates statistics,
   2021  *  and runs the watchdog check.
   2022  *
   2023  **********************************************************************/
   2024 
   2025 static void
   2026 ixgbe_local_timer1(void *arg)
   2027 {
   2028 	struct adapter *adapter = arg;
   2029 	device_t	dev = adapter->dev;
   2030 	struct tx_ring *txr = adapter->tx_rings;
   2031 
   2032 	KASSERT(mutex_owned(&adapter->core_mtx));
   2033 
   2034 	/* Check for pluggable optics */
   2035 	if (adapter->sfp_probe)
   2036 		if (!ixgbe_sfp_probe(adapter))
   2037 			goto out; /* Nothing to do */
   2038 
   2039 	ixgbe_update_link_status(adapter);
   2040 	ixgbe_update_stats_counters(adapter);
   2041 
   2042 	/*
   2043 	 * If the interface has been paused
   2044 	 * then don't do the watchdog check
   2045 	 */
   2046 	if (IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF)
   2047 		goto out;
   2048 
   2049 	/*
   2050 	** Check status on the TX queues for a hang
   2051 	*/
   2052         for (int i = 0; i < adapter->num_queues; i++, txr++)
   2053 		if (txr->queue_status == IXGBE_QUEUE_HUNG)
   2054 			goto hung;
   2055 
   2056 out:
   2057 	ixgbe_rearm_queues(adapter, adapter->que_mask);
   2058 	callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
   2059 	return;
   2060 
   2061 hung:
   2062 	device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
   2063 	device_printf(dev,"Queue(%d) tdh = %d, hw tdt = %d\n", txr->me,
   2064 	    IXGBE_READ_REG(&adapter->hw, IXGBE_TDH(txr->me)),
   2065 	    IXGBE_READ_REG(&adapter->hw, IXGBE_TDT(txr->me)));
   2066 	device_printf(dev,"TX(%d) desc avail = %d,"
   2067 	    "Next TX to Clean = %d\n",
   2068 	    txr->me, txr->tx_avail, txr->next_to_clean);
   2069 	adapter->ifp->if_flags &= ~IFF_RUNNING;
   2070 	adapter->watchdog_events.ev_count++;
   2071 	ixgbe_init_locked(adapter);
   2072 }
   2073 
   2074 static void
   2075 ixgbe_local_timer(void *arg)
   2076 {
   2077 	struct adapter *adapter = arg;
   2078 
   2079 	IXGBE_CORE_LOCK(adapter);
   2080 	ixgbe_local_timer1(adapter);
   2081 	IXGBE_CORE_UNLOCK(adapter);
   2082 }
   2083 
   2084 /*
   2085 ** Note: this routine updates the OS on the link state
   2086 **	the real check of the hardware only happens with
   2087 **	a link interrupt.
   2088 */
   2089 static void
   2090 ixgbe_update_link_status(struct adapter *adapter)
   2091 {
   2092 	struct ifnet	*ifp = adapter->ifp;
   2093 	struct tx_ring *txr = adapter->tx_rings;
   2094 	device_t dev = adapter->dev;
   2095 
   2096 
   2097 	if (adapter->link_up){
   2098 		if (adapter->link_active == FALSE) {
   2099 			if (bootverbose)
   2100 				device_printf(dev,"Link is up %d Gbps %s \n",
   2101 				    ((adapter->link_speed == 128)? 10:1),
   2102 				    "Full Duplex");
   2103 			adapter->link_active = TRUE;
   2104 			if_link_state_change(ifp, LINK_STATE_UP);
   2105 		}
   2106 	} else { /* Link down */
   2107 		if (adapter->link_active == TRUE) {
   2108 			if (bootverbose)
   2109 				device_printf(dev,"Link is Down\n");
   2110 			if_link_state_change(ifp, LINK_STATE_DOWN);
   2111 			adapter->link_active = FALSE;
   2112 			for (int i = 0; i < adapter->num_queues;
   2113 			    i++, txr++)
   2114 				txr->queue_status = IXGBE_QUEUE_IDLE;
   2115 		}
   2116 	}
   2117 
   2118 	return;
   2119 }
   2120 
   2121 
   2122 static void
   2123 ixgbe_ifstop(struct ifnet *ifp, int disable)
   2124 {
   2125 	struct adapter *adapter = ifp->if_softc;
   2126 
   2127 	IXGBE_CORE_LOCK(adapter);
   2128 	ixgbe_stop(adapter);
   2129 	IXGBE_CORE_UNLOCK(adapter);
   2130 }
   2131 
   2132 /*********************************************************************
   2133  *
   2134  *  This routine disables all traffic on the adapter by issuing a
   2135  *  global reset on the MAC and deallocates TX/RX buffers.
   2136  *
   2137  **********************************************************************/
   2138 
   2139 static void
   2140 ixgbe_stop(void *arg)
   2141 {
   2142 	struct ifnet   *ifp;
   2143 	struct adapter *adapter = arg;
   2144 	struct ixgbe_hw *hw = &adapter->hw;
   2145 	ifp = adapter->ifp;
   2146 
   2147 	KASSERT(mutex_owned(&adapter->core_mtx));
   2148 
   2149 	INIT_DEBUGOUT("ixgbe_stop: begin\n");
   2150 	ixgbe_disable_intr(adapter);
   2151 
   2152 	/* Tell the stack that the interface is no longer active */
   2153 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   2154 
   2155 	ixgbe_reset_hw(hw);
   2156 	hw->adapter_stopped = FALSE;
   2157 	ixgbe_stop_adapter(hw);
   2158 	/* Turn off the laser */
   2159 	if (hw->phy.multispeed_fiber)
   2160 		ixgbe_disable_tx_laser(hw);
   2161 	callout_stop(&adapter->timer);
   2162 
   2163 	/* reprogram the RAR[0] in case user changed it. */
   2164 	ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
   2165 
   2166 	return;
   2167 }
   2168 
   2169 
   2170 /*********************************************************************
   2171  *
   2172  *  Determine hardware revision.
   2173  *
   2174  **********************************************************************/
   2175 static void
   2176 ixgbe_identify_hardware(struct adapter *adapter)
   2177 {
   2178 	pcitag_t tag;
   2179 	pci_chipset_tag_t pc;
   2180 	pcireg_t subid, id;
   2181 	struct ixgbe_hw *hw = &adapter->hw;
   2182 
   2183 	pc = adapter->osdep.pc;
   2184 	tag = adapter->osdep.tag;
   2185 
   2186 	id = pci_conf_read(pc, tag, PCI_ID_REG);
   2187 	subid = pci_conf_read(pc, tag, PCI_SUBSYS_ID_REG);
   2188 
   2189 	/* Save off the information about this board */
   2190 	hw->vendor_id = PCI_VENDOR(id);
   2191 	hw->device_id = PCI_PRODUCT(id);
   2192 	hw->revision_id =
   2193 	    PCI_REVISION(pci_conf_read(pc, tag, PCI_CLASS_REG));
   2194 	hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
   2195 	hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
   2196 
   2197 	/* We need this here to set the num_segs below */
   2198 	ixgbe_set_mac_type(hw);
   2199 
   2200 	/* Pick up the 82599 and VF settings */
   2201 	if (hw->mac.type != ixgbe_mac_82598EB) {
   2202 		hw->phy.smart_speed = ixgbe_smart_speed;
   2203 		adapter->num_segs = IXGBE_82599_SCATTER;
   2204 	} else
   2205 		adapter->num_segs = IXGBE_82598_SCATTER;
   2206 
   2207 	return;
   2208 }
   2209 
   2210 /*********************************************************************
   2211  *
   2212  *  Determine optic type
   2213  *
   2214  **********************************************************************/
   2215 static void
   2216 ixgbe_setup_optics(struct adapter *adapter)
   2217 {
   2218 	struct ixgbe_hw *hw = &adapter->hw;
   2219 	int		layer;
   2220 
   2221 	layer = ixgbe_get_supported_physical_layer(hw);
   2222 	switch (layer) {
   2223 		case IXGBE_PHYSICAL_LAYER_10GBASE_T:
   2224 			adapter->optics = IFM_10G_T;
   2225 			break;
   2226 		case IXGBE_PHYSICAL_LAYER_1000BASE_T:
   2227 			adapter->optics = IFM_1000_T;
   2228 			break;
   2229 		case IXGBE_PHYSICAL_LAYER_10GBASE_LR:
   2230 		case IXGBE_PHYSICAL_LAYER_10GBASE_LRM:
   2231 			adapter->optics = IFM_10G_LR;
   2232 			break;
   2233 		case IXGBE_PHYSICAL_LAYER_10GBASE_SR:
   2234 			adapter->optics = IFM_10G_SR;
   2235 			break;
   2236 		case IXGBE_PHYSICAL_LAYER_10GBASE_KX4:
   2237 		case IXGBE_PHYSICAL_LAYER_10GBASE_CX4:
   2238 			adapter->optics = IFM_10G_CX4;
   2239 			break;
   2240 		case IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU:
   2241 			adapter->optics = IFM_10G_TWINAX;
   2242 			break;
   2243 		case IXGBE_PHYSICAL_LAYER_1000BASE_KX:
   2244 		case IXGBE_PHYSICAL_LAYER_10GBASE_KR:
   2245 		case IXGBE_PHYSICAL_LAYER_10GBASE_XAUI:
   2246 		case IXGBE_PHYSICAL_LAYER_UNKNOWN:
   2247 		default:
   2248 			adapter->optics = IFM_ETHER | IFM_AUTO;
   2249 			break;
   2250 	}
   2251 	return;
   2252 }
   2253 
   2254 /*********************************************************************
   2255  *
   2256  *  Setup the Legacy or MSI Interrupt handler
   2257  *
   2258  **********************************************************************/
   2259 static int
   2260 ixgbe_allocate_legacy(struct adapter *adapter, const struct pci_attach_args *pa)
   2261 {
   2262 	device_t dev = adapter->dev;
   2263 	struct		ix_queue *que = adapter->queues;
   2264 	int rid = 0;
   2265 
   2266 	/* MSI RID at 1 */
   2267 	if (adapter->msix == 1)
   2268 		rid = 1;
   2269 
   2270 	/* We allocate a single interrupt resource */
   2271  	if (pci_intr_map(pa, &adapter->osdep.ih) != 0) {
   2272 		aprint_error_dev(dev, "unable to map interrupt\n");
   2273 		return ENXIO;
   2274 	} else {
   2275 		aprint_normal_dev(dev, "interrupting at %s\n",
   2276 		    pci_intr_string(adapter->osdep.pc, adapter->osdep.ih));
   2277 	}
   2278 
   2279 	/*
   2280 	 * Try allocating a fast interrupt and the associated deferred
   2281 	 * processing contexts.
   2282 	 */
   2283 	que->que_si = softint_establish(SOFTINT_NET, ixgbe_handle_que, que);
   2284 
   2285 	/* Tasklets for Link, SFP and Multispeed Fiber */
   2286 	adapter->link_si =
   2287 	    softint_establish(SOFTINT_NET, ixgbe_handle_link, adapter);
   2288 	adapter->mod_si =
   2289 	    softint_establish(SOFTINT_NET, ixgbe_handle_mod, adapter);
   2290 	adapter->msf_si =
   2291 	    softint_establish(SOFTINT_NET, ixgbe_handle_msf, adapter);
   2292 
   2293 #ifdef IXGBE_FDIR
   2294 	adapter->fdir_si =
   2295 	    softint_establish(SOFTINT_NET, ixgbe_reinit_fdir, adapter);
   2296 #endif
   2297 	if (que->que_si == NULL ||
   2298 	    adapter->link_si == NULL ||
   2299 	    adapter->mod_si == NULL ||
   2300 #ifdef IXGBE_FDIR
   2301 	    adapter->fdir_si == NULL ||
   2302 #endif
   2303 	    adapter->msf_si == NULL) {
   2304 		aprint_error_dev(dev,
   2305 		    "could not establish software interrupts\n");
   2306 		return ENXIO;
   2307 	}
   2308 
   2309 	adapter->osdep.intr = pci_intr_establish(adapter->osdep.pc,
   2310 	    adapter->osdep.ih, IPL_NET, ixgbe_legacy_irq, que);
   2311 	if (adapter->osdep.intr == NULL) {
   2312 		aprint_error_dev(dev, "failed to register interrupt handler\n");
   2313 		softint_disestablish(que->que_si);
   2314 		softint_disestablish(adapter->link_si);
   2315 		softint_disestablish(adapter->mod_si);
   2316 		softint_disestablish(adapter->msf_si);
   2317 #ifdef IXGBE_FDIR
   2318 		softint_disestablish(adapter->fdir_si);
   2319 #endif
   2320 		return ENXIO;
   2321 	}
   2322 	/* For simplicity in the handlers */
   2323 	adapter->que_mask = IXGBE_EIMS_ENABLE_MASK;
   2324 
   2325 	return (0);
   2326 }
   2327 
   2328 
   2329 /*********************************************************************
   2330  *
   2331  *  Setup MSIX Interrupt resources and handlers
   2332  *
   2333  **********************************************************************/
   2334 static int
   2335 ixgbe_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
   2336 {
   2337 #if !defined(NETBSD_MSI_OR_MSIX)
   2338 	return 0;
   2339 #else
   2340 	device_t        dev = adapter->dev;
   2341 	struct 		ix_queue *que = adapter->queues;
   2342 	int 		error, rid, vector = 0;
   2343 
   2344 	for (int i = 0; i < adapter->num_queues; i++, vector++, que++) {
   2345 		rid = vector + 1;
   2346 		que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
   2347 		    RF_SHAREABLE | RF_ACTIVE);
   2348 		if (que->res == NULL) {
   2349 			aprint_error_dev(dev,"Unable to allocate"
   2350 		    	    " bus resource: que interrupt [%d]\n", vector);
   2351 			return (ENXIO);
   2352 		}
   2353 		/* Set the handler function */
   2354 		error = bus_setup_intr(dev, que->res,
   2355 		    INTR_TYPE_NET | INTR_MPSAFE, NULL,
   2356 		    ixgbe_msix_que, que, &que->tag);
   2357 		if (error) {
   2358 			que->res = NULL;
   2359 			aprint_error_dev(dev,
   2360 			    "Failed to register QUE handler\n");
   2361 			return error;
   2362 		}
   2363 #if __FreeBSD_version >= 800504
   2364 		bus_describe_intr(dev, que->res, que->tag, "que %d", i);
   2365 #endif
   2366 		que->msix = vector;
   2367         	adapter->que_mask |= (u64)(1 << que->msix);
   2368 		/*
   2369 		** Bind the msix vector, and thus the
   2370 		** ring to the corresponding cpu.
   2371 		*/
   2372 		if (adapter->num_queues > 1)
   2373 			bus_bind_intr(dev, que->res, i);
   2374 
   2375 		que->que_si = softint_establish(ixgbe_handle_que, que);
   2376 		if (que->que_si == NULL) {
   2377 			aprint_error_dev(dev,
   2378 			    "could not establish software interrupt\n");
   2379 		}
   2380 	}
   2381 
   2382 	/* and Link */
   2383 	rid = vector + 1;
   2384 	adapter->res = bus_alloc_resource_any(dev,
   2385     	    SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
   2386 	if (!adapter->res) {
   2387 		aprint_error_dev(dev,"Unable to allocate bus resource: "
   2388 		    "Link interrupt [%d]\n", rid);
   2389 		return (ENXIO);
   2390 	}
   2391 	/* Set the link handler function */
   2392 	error = bus_setup_intr(dev, adapter->res,
   2393 	    INTR_TYPE_NET | INTR_MPSAFE, NULL,
   2394 	    ixgbe_msix_link, adapter, &adapter->tag);
   2395 	if (error) {
   2396 		adapter->res = NULL;
   2397 		aprint_error_dev(dev, "Failed to register LINK handler\n");
   2398 		return (error);
   2399 	}
   2400 #if __FreeBSD_version >= 800504
   2401 	bus_describe_intr(dev, adapter->res, adapter->tag, "link");
   2402 #endif
   2403 	adapter->linkvec = vector;
   2404 	/* Tasklets for Link, SFP and Multispeed Fiber */
   2405 	adapter->link_si =
   2406 	    softint_establish(SOFTINT_NET, ixgbe_handle_link, adapter);
   2407 	adapter->mod_si =
   2408 	    softint_establish(SOFTINT_NET, ixgbe_handle_mod, adapter);
   2409 	adapter->msf_si =
   2410 	    softint_establish(SOFTINT_NET, ixgbe_handle_msf, adapter);
   2411 #ifdef IXGBE_FDIR
   2412 	adapter->fdir_si =
   2413 	    softint_establish(SOFTINT_NET, ixgbe_reinit_fdir, adapter);
   2414 #endif
   2415 
   2416 	return (0);
   2417 #endif
   2418 }
   2419 
   2420 /*
   2421  * Setup Either MSI/X or MSI
   2422  */
   2423 static int
   2424 ixgbe_setup_msix(struct adapter *adapter)
   2425 {
   2426 #if !defined(NETBSD_MSI_OR_MSIX)
   2427 	return 0;
   2428 #else
   2429 	device_t dev = adapter->dev;
   2430 	int rid, want, queues, msgs;
   2431 
   2432 	/* Override by tuneable */
   2433 	if (ixgbe_enable_msix == 0)
   2434 		goto msi;
   2435 
   2436 	/* First try MSI/X */
   2437 	rid = PCI_BAR(MSIX_82598_BAR);
   2438 	adapter->msix_mem = bus_alloc_resource_any(dev,
   2439 	    SYS_RES_MEMORY, &rid, RF_ACTIVE);
   2440        	if (!adapter->msix_mem) {
   2441 		rid += 4;	/* 82599 maps in higher BAR */
   2442 		adapter->msix_mem = bus_alloc_resource_any(dev,
   2443 		    SYS_RES_MEMORY, &rid, RF_ACTIVE);
   2444 	}
   2445        	if (!adapter->msix_mem) {
   2446 		/* May not be enabled */
   2447 		device_printf(adapter->dev,
   2448 		    "Unable to map MSIX table \n");
   2449 		goto msi;
   2450 	}
   2451 
   2452 	msgs = pci_msix_count(dev);
   2453 	if (msgs == 0) { /* system has msix disabled */
   2454 		bus_release_resource(dev, SYS_RES_MEMORY,
   2455 		    rid, adapter->msix_mem);
   2456 		adapter->msix_mem = NULL;
   2457 		goto msi;
   2458 	}
   2459 
   2460 	/* Figure out a reasonable auto config value */
   2461 	queues = (mp_ncpus > (msgs-1)) ? (msgs-1) : mp_ncpus;
   2462 
   2463 	if (ixgbe_num_queues != 0)
   2464 		queues = ixgbe_num_queues;
   2465 	/* Set max queues to 8 when autoconfiguring */
   2466 	else if ((ixgbe_num_queues == 0) && (queues > 8))
   2467 		queues = 8;
   2468 
   2469 	/*
   2470 	** Want one vector (RX/TX pair) per queue
   2471 	** plus an additional for Link.
   2472 	*/
   2473 	want = queues + 1;
   2474 	if (msgs >= want)
   2475 		msgs = want;
   2476 	else {
   2477                	device_printf(adapter->dev,
   2478 		    "MSIX Configuration Problem, "
   2479 		    "%d vectors but %d queues wanted!\n",
   2480 		    msgs, want);
   2481 		return (0); /* Will go to Legacy setup */
   2482 	}
   2483 	if ((msgs) && pci_alloc_msix(dev, &msgs) == 0) {
   2484                	device_printf(adapter->dev,
   2485 		    "Using MSIX interrupts with %d vectors\n", msgs);
   2486 		adapter->num_queues = queues;
   2487 		return (msgs);
   2488 	}
   2489 msi:
   2490        	msgs = pci_msi_count(dev);
   2491        	if (msgs == 1 && pci_alloc_msi(dev, &msgs) == 0)
   2492                	device_printf(adapter->dev,"Using MSI interrupt\n");
   2493 	return (msgs);
   2494 #endif
   2495 }
   2496 
   2497 
   2498 static int
   2499 ixgbe_allocate_pci_resources(struct adapter *adapter, const struct pci_attach_args *pa)
   2500 {
   2501 	pcireg_t	memtype;
   2502 	device_t        dev = adapter->dev;
   2503 	bus_addr_t addr;
   2504 	int flags;
   2505 
   2506 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
   2507 	switch (memtype) {
   2508 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   2509 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   2510 		adapter->osdep.mem_bus_space_tag = pa->pa_memt;
   2511 		if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
   2512 	              memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
   2513 			goto map_err;
   2514 		if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
   2515 			aprint_normal_dev(dev, "clearing prefetchable bit\n");
   2516 			flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
   2517 		}
   2518 		if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
   2519 		     adapter->osdep.mem_size, flags,
   2520 		     &adapter->osdep.mem_bus_space_handle) != 0) {
   2521 map_err:
   2522 			adapter->osdep.mem_size = 0;
   2523 			aprint_error_dev(dev, "unable to map BAR0\n");
   2524 			return ENXIO;
   2525 		}
   2526 		break;
   2527 	default:
   2528 		aprint_error_dev(dev, "unexpected type on BAR0\n");
   2529 		return ENXIO;
   2530 	}
   2531 
   2532 	/* Legacy defaults */
   2533 	adapter->num_queues = 1;
   2534 	adapter->hw.back = &adapter->osdep;
   2535 
   2536 	/*
   2537 	** Now setup MSI or MSI/X, should
   2538 	** return us the number of supported
   2539 	** vectors. (Will be 1 for MSI)
   2540 	*/
   2541 	adapter->msix = ixgbe_setup_msix(adapter);
   2542 	return (0);
   2543 }
   2544 
   2545 static void
   2546 ixgbe_free_pci_resources(struct adapter * adapter)
   2547 {
   2548 #if defined(NETBSD_MSI_OR_MSIX)
   2549 	struct 		ix_queue *que = adapter->queues;
   2550 #endif
   2551 	device_t	dev = adapter->dev;
   2552 	int		rid, memrid;
   2553 
   2554 	if (adapter->hw.mac.type == ixgbe_mac_82598EB)
   2555 		memrid = PCI_BAR(MSIX_82598_BAR);
   2556 	else
   2557 		memrid = PCI_BAR(MSIX_82599_BAR);
   2558 
   2559 #if defined(NETBSD_MSI_OR_MSIX)
   2560 	/*
   2561 	** There is a slight possibility of a failure mode
   2562 	** in attach that will result in entering this function
   2563 	** before interrupt resources have been initialized, and
   2564 	** in that case we do not want to execute the loops below
   2565 	** We can detect this reliably by the state of the adapter
   2566 	** res pointer.
   2567 	*/
   2568 	if (adapter->res == NULL)
   2569 		goto mem;
   2570 
   2571 	/*
   2572 	**  Release all msix queue resources:
   2573 	*/
   2574 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   2575 		rid = que->msix + 1;
   2576 		if (que->tag != NULL) {
   2577 			bus_teardown_intr(dev, que->res, que->tag);
   2578 			que->tag = NULL;
   2579 		}
   2580 		if (que->res != NULL)
   2581 			bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
   2582 	}
   2583 #endif
   2584 
   2585 	/* Clean the Legacy or Link interrupt last */
   2586 	if (adapter->linkvec) /* we are doing MSIX */
   2587 		rid = adapter->linkvec + 1;
   2588 	else
   2589 		(adapter->msix != 0) ? (rid = 1):(rid = 0);
   2590 
   2591 	printf("%s: disestablishing interrupt handler\n", device_xname(dev));
   2592 	pci_intr_disestablish(adapter->osdep.pc, adapter->osdep.intr);
   2593 	adapter->osdep.intr = NULL;
   2594 
   2595 #if defined(NETBSD_MSI_OR_MSIX)
   2596 mem:
   2597 	if (adapter->msix)
   2598 		pci_release_msi(dev);
   2599 
   2600 	if (adapter->msix_mem != NULL)
   2601 		bus_release_resource(dev, SYS_RES_MEMORY,
   2602 		    memrid, adapter->msix_mem);
   2603 #endif
   2604 
   2605 	if (adapter->osdep.mem_size != 0) {
   2606 		bus_space_unmap(adapter->osdep.mem_bus_space_tag,
   2607 		    adapter->osdep.mem_bus_space_handle,
   2608 		    adapter->osdep.mem_size);
   2609 	}
   2610 
   2611 	return;
   2612 }
   2613 
   2614 /*********************************************************************
   2615  *
   2616  *  Setup networking device structure and register an interface.
   2617  *
   2618  **********************************************************************/
   2619 static int
   2620 ixgbe_setup_interface(device_t dev, struct adapter *adapter)
   2621 {
   2622 	struct ethercom *ec = &adapter->osdep.ec;
   2623 	struct ixgbe_hw *hw = &adapter->hw;
   2624 	struct ifnet   *ifp;
   2625 
   2626 	INIT_DEBUGOUT("ixgbe_setup_interface: begin");
   2627 
   2628 	ifp = adapter->ifp = &ec->ec_if;
   2629 	strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
   2630 	ifp->if_mtu = ETHERMTU;
   2631 	ifp->if_baudrate = 1000000000;
   2632 	ifp->if_init = ixgbe_init;
   2633 	ifp->if_stop = ixgbe_ifstop;
   2634 	ifp->if_softc = adapter;
   2635 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2636 	ifp->if_ioctl = ixgbe_ioctl;
   2637 	ifp->if_start = ixgbe_start;
   2638 #if __FreeBSD_version >= 800000
   2639 	ifp->if_transmit = ixgbe_mq_start;
   2640 	ifp->if_qflush = ixgbe_qflush;
   2641 #endif
   2642 	ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 2;
   2643 
   2644 	if_attach(ifp);
   2645 	ether_ifattach(ifp, adapter->hw.mac.addr);
   2646 	ether_set_ifflags_cb(ec, ixgbe_ifflags_cb);
   2647 
   2648 	adapter->max_frame_size =
   2649 	    ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
   2650 
   2651 	/*
   2652 	 * Tell the upper layer(s) we support long frames.
   2653 	 */
   2654 	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
   2655 
   2656 	ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSOv4;
   2657 	ifp->if_capenable = 0;
   2658 
   2659 	ec->ec_capabilities |= ETHERCAP_VLAN_HWCSUM;
   2660 	ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_MTU;
   2661 	ec->ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2662 	ec->ec_capenable = ec->ec_capabilities;
   2663 
   2664 	/* Don't enable LRO by default */
   2665 	ifp->if_capabilities |= IFCAP_LRO;
   2666 
   2667 	/*
   2668 	** Dont turn this on by default, if vlans are
   2669 	** created on another pseudo device (eg. lagg)
   2670 	** then vlan events are not passed thru, breaking
   2671 	** operation, but with HW FILTER off it works. If
   2672 	** using vlans directly on the em driver you can
   2673 	** enable this and get full hardware tag filtering.
   2674 	*/
   2675 	ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
   2676 
   2677 	/*
   2678 	 * Specify the media types supported by this adapter and register
   2679 	 * callbacks to update media and link information
   2680 	 */
   2681 	ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
   2682 		     ixgbe_media_status);
   2683 	ifmedia_add(&adapter->media, IFM_ETHER | adapter->optics, 0, NULL);
   2684 	ifmedia_set(&adapter->media, IFM_ETHER | adapter->optics);
   2685 	if (hw->device_id == IXGBE_DEV_ID_82598AT) {
   2686 		ifmedia_add(&adapter->media,
   2687 		    IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
   2688 		ifmedia_add(&adapter->media,
   2689 		    IFM_ETHER | IFM_1000_T, 0, NULL);
   2690 	}
   2691 	ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
   2692 	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
   2693 
   2694 	return (0);
   2695 }
   2696 
   2697 static void
   2698 ixgbe_config_link(struct adapter *adapter)
   2699 {
   2700 	struct ixgbe_hw *hw = &adapter->hw;
   2701 	u32	autoneg, err = 0;
   2702 	bool	sfp, negotiate;
   2703 
   2704 	sfp = ixgbe_is_sfp(hw);
   2705 
   2706 	if (sfp) {
   2707 		if (hw->phy.multispeed_fiber) {
   2708 			hw->mac.ops.setup_sfp(hw);
   2709 			ixgbe_enable_tx_laser(hw);
   2710 			softint_schedule(adapter->msf_si);
   2711 		} else {
   2712 			softint_schedule(adapter->mod_si);
   2713 		}
   2714 	} else {
   2715 		if (hw->mac.ops.check_link)
   2716 			err = ixgbe_check_link(hw, &autoneg,
   2717 			    &adapter->link_up, FALSE);
   2718 		if (err)
   2719 			goto out;
   2720 		autoneg = hw->phy.autoneg_advertised;
   2721 		if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
   2722                 	err  = hw->mac.ops.get_link_capabilities(hw,
   2723 			    &autoneg, &negotiate);
   2724 		if (err)
   2725 			goto out;
   2726 		if (hw->mac.ops.setup_link)
   2727                 	err = hw->mac.ops.setup_link(hw, autoneg,
   2728 			    negotiate, adapter->link_up);
   2729 	}
   2730 out:
   2731 	return;
   2732 }
   2733 
   2734 /********************************************************************
   2735  * Manage DMA'able memory.
   2736  *******************************************************************/
   2737 
   2738 static int
   2739 ixgbe_dma_malloc(struct adapter *adapter, const bus_size_t size,
   2740 		struct ixgbe_dma_alloc *dma, const int mapflags)
   2741 {
   2742 	device_t dev = adapter->dev;
   2743 	int             r, rsegs;
   2744 
   2745 	r = ixgbe_dma_tag_create(adapter->osdep.dmat,	/* parent */
   2746 			       DBA_ALIGN, 0,	/* alignment, bounds */
   2747 			       size,	/* maxsize */
   2748 			       1,	/* nsegments */
   2749 			       size,	/* maxsegsize */
   2750 			       BUS_DMA_ALLOCNOW,	/* flags */
   2751 			       &dma->dma_tag);
   2752 	if (r != 0) {
   2753 		aprint_error_dev(dev,
   2754 		    "%s: ixgbe_dma_tag_create failed; error %d\n", __func__, r);
   2755 		goto fail_0;
   2756 	}
   2757 
   2758 	r = bus_dmamem_alloc(dma->dma_tag->dt_dmat,
   2759 		size,
   2760 		dma->dma_tag->dt_alignment,
   2761 		dma->dma_tag->dt_boundary,
   2762 		&dma->dma_seg, 1, &rsegs, BUS_DMA_NOWAIT);
   2763 	if (r != 0) {
   2764 		aprint_error_dev(dev,
   2765 		    "%s: bus_dmamem_alloc failed; error %d\n", __func__, r);
   2766 		goto fail_1;
   2767 	}
   2768 
   2769 	r = bus_dmamem_map(dma->dma_tag->dt_dmat, &dma->dma_seg, rsegs,
   2770 	    size, &dma->dma_vaddr, BUS_DMA_NOWAIT);
   2771 	if (r != 0) {
   2772 		aprint_error_dev(dev, "%s: bus_dmamem_map failed; error %d\n",
   2773 		    __func__, r);
   2774 		goto fail_2;
   2775 	}
   2776 
   2777 	r = ixgbe_dmamap_create(dma->dma_tag, 0, &dma->dma_map);
   2778 	if (r != 0) {
   2779 		aprint_error_dev(dev, "%s: bus_dmamem_map failed; error %d\n",
   2780 		    __func__, r);
   2781 		goto fail_3;
   2782 	}
   2783 
   2784 	r = bus_dmamap_load(dma->dma_tag->dt_dmat, dma->dma_map, dma->dma_vaddr,
   2785 			    size,
   2786 			    NULL,
   2787 			    mapflags | BUS_DMA_NOWAIT);
   2788 	if (r != 0) {
   2789 		aprint_error_dev(dev, "%s: bus_dmamap_load failed; error %d\n",
   2790 		    __func__, r);
   2791 		goto fail_4;
   2792 	}
   2793 	dma->dma_paddr = dma->dma_map->dm_segs[0].ds_addr;
   2794 	dma->dma_size = size;
   2795 	return 0;
   2796 fail_4:
   2797 	ixgbe_dmamap_destroy(dma->dma_tag, dma->dma_map);
   2798 fail_3:
   2799 	bus_dmamem_unmap(dma->dma_tag->dt_dmat, dma->dma_vaddr, size);
   2800 fail_2:
   2801 	bus_dmamem_free(dma->dma_tag->dt_dmat, &dma->dma_seg, rsegs);
   2802 fail_1:
   2803 	ixgbe_dma_tag_destroy(dma->dma_tag);
   2804 fail_0:
   2805 	return r;
   2806 }
   2807 
   2808 static void
   2809 ixgbe_dma_free(struct adapter *adapter, struct ixgbe_dma_alloc *dma)
   2810 {
   2811 	bus_dmamap_sync(dma->dma_tag->dt_dmat, dma->dma_map, 0, dma->dma_size,
   2812 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   2813 	ixgbe_dmamap_unload(dma->dma_tag, dma->dma_map);
   2814 	bus_dmamem_free(dma->dma_tag->dt_dmat, &dma->dma_seg, 1);
   2815 	ixgbe_dma_tag_destroy(dma->dma_tag);
   2816 }
   2817 
   2818 
   2819 /*********************************************************************
   2820  *
   2821  *  Allocate memory for the transmit and receive rings, and then
   2822  *  the descriptors associated with each, called only once at attach.
   2823  *
   2824  **********************************************************************/
   2825 static int
   2826 ixgbe_allocate_queues(struct adapter *adapter)
   2827 {
   2828 	device_t	dev = adapter->dev;
   2829 	struct ix_queue	*que;
   2830 	struct tx_ring	*txr;
   2831 	struct rx_ring	*rxr;
   2832 	int rsize, tsize, error = IXGBE_SUCCESS;
   2833 	int txconf = 0, rxconf = 0;
   2834 
   2835         /* First allocate the top level queue structs */
   2836         if (!(adapter->queues =
   2837             (struct ix_queue *) malloc(sizeof(struct ix_queue) *
   2838             adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
   2839                 aprint_error_dev(dev, "Unable to allocate queue memory\n");
   2840                 error = ENOMEM;
   2841                 goto fail;
   2842         }
   2843 
   2844 	/* First allocate the TX ring struct memory */
   2845 	if (!(adapter->tx_rings =
   2846 	    (struct tx_ring *) malloc(sizeof(struct tx_ring) *
   2847 	    adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
   2848 		aprint_error_dev(dev, "Unable to allocate TX ring memory\n");
   2849 		error = ENOMEM;
   2850 		goto tx_fail;
   2851 	}
   2852 
   2853 	/* Next allocate the RX */
   2854 	if (!(adapter->rx_rings =
   2855 	    (struct rx_ring *) malloc(sizeof(struct rx_ring) *
   2856 	    adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
   2857 		aprint_error_dev(dev, "Unable to allocate RX ring memory\n");
   2858 		error = ENOMEM;
   2859 		goto rx_fail;
   2860 	}
   2861 
   2862 	/* For the ring itself */
   2863 	tsize = roundup2(adapter->num_tx_desc *
   2864 	    sizeof(union ixgbe_adv_tx_desc), DBA_ALIGN);
   2865 
   2866 	/*
   2867 	 * Now set up the TX queues, txconf is needed to handle the
   2868 	 * possibility that things fail midcourse and we need to
   2869 	 * undo memory gracefully
   2870 	 */
   2871 	for (int i = 0; i < adapter->num_queues; i++, txconf++) {
   2872 		/* Set up some basics */
   2873 		txr = &adapter->tx_rings[i];
   2874 		txr->adapter = adapter;
   2875 		txr->me = i;
   2876 
   2877 		/* Initialize the TX side lock */
   2878 		snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
   2879 		    device_xname(dev), txr->me);
   2880 		mutex_init(&txr->tx_mtx, MUTEX_DEFAULT, IPL_NET);
   2881 
   2882 		if (ixgbe_dma_malloc(adapter, tsize,
   2883 			&txr->txdma, BUS_DMA_NOWAIT)) {
   2884 			aprint_error_dev(dev,
   2885 			    "Unable to allocate TX Descriptor memory\n");
   2886 			error = ENOMEM;
   2887 			goto err_tx_desc;
   2888 		}
   2889 		txr->tx_base = (union ixgbe_adv_tx_desc *)txr->txdma.dma_vaddr;
   2890 		bzero((void *)txr->tx_base, tsize);
   2891 
   2892         	/* Now allocate transmit buffers for the ring */
   2893         	if (ixgbe_allocate_transmit_buffers(txr)) {
   2894 			aprint_error_dev(dev,
   2895 			    "Critical Failure setting up transmit buffers\n");
   2896 			error = ENOMEM;
   2897 			goto err_tx_desc;
   2898         	}
   2899 #if __FreeBSD_version >= 800000
   2900 		/* Allocate a buf ring */
   2901 		txr->br = buf_ring_alloc(IXGBE_BR_SIZE, M_DEVBUF,
   2902 		    M_WAITOK, &txr->tx_mtx);
   2903 		if (txr->br == NULL) {
   2904 			aprint_error_dev(dev,
   2905 			    "Critical Failure setting up buf ring\n");
   2906 			error = ENOMEM;
   2907 			goto err_tx_desc;
   2908         	}
   2909 #endif
   2910 	}
   2911 
   2912 	/*
   2913 	 * Next the RX queues...
   2914 	 */
   2915 	rsize = roundup2(adapter->num_rx_desc *
   2916 	    sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
   2917 	for (int i = 0; i < adapter->num_queues; i++, rxconf++) {
   2918 		rxr = &adapter->rx_rings[i];
   2919 		/* Set up some basics */
   2920 		rxr->adapter = adapter;
   2921 		rxr->me = i;
   2922 
   2923 		/* Initialize the RX side lock */
   2924 		snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
   2925 		    device_xname(dev), rxr->me);
   2926 		mutex_init(&rxr->rx_mtx, MUTEX_DEFAULT, IPL_NET);
   2927 
   2928 		if (ixgbe_dma_malloc(adapter, rsize,
   2929 			&rxr->rxdma, BUS_DMA_NOWAIT)) {
   2930 			aprint_error_dev(dev,
   2931 			    "Unable to allocate RxDescriptor memory\n");
   2932 			error = ENOMEM;
   2933 			goto err_rx_desc;
   2934 		}
   2935 		rxr->rx_base = (union ixgbe_adv_rx_desc *)rxr->rxdma.dma_vaddr;
   2936 		bzero((void *)rxr->rx_base, rsize);
   2937 
   2938         	/* Allocate receive buffers for the ring*/
   2939 		if (ixgbe_allocate_receive_buffers(rxr)) {
   2940 			aprint_error_dev(dev,
   2941 			    "Critical Failure setting up receive buffers\n");
   2942 			error = ENOMEM;
   2943 			goto err_rx_desc;
   2944 		}
   2945 	}
   2946 
   2947 	/*
   2948 	** Finally set up the queue holding structs
   2949 	*/
   2950 	for (int i = 0; i < adapter->num_queues; i++) {
   2951 		que = &adapter->queues[i];
   2952 		que->adapter = adapter;
   2953 		que->txr = &adapter->tx_rings[i];
   2954 		que->rxr = &adapter->rx_rings[i];
   2955 	}
   2956 
   2957 	return (0);
   2958 
   2959 err_rx_desc:
   2960 	for (rxr = adapter->rx_rings; rxconf > 0; rxr++, rxconf--)
   2961 		ixgbe_dma_free(adapter, &rxr->rxdma);
   2962 err_tx_desc:
   2963 	for (txr = adapter->tx_rings; txconf > 0; txr++, txconf--)
   2964 		ixgbe_dma_free(adapter, &txr->txdma);
   2965 	free(adapter->rx_rings, M_DEVBUF);
   2966 rx_fail:
   2967 	free(adapter->tx_rings, M_DEVBUF);
   2968 tx_fail:
   2969 	free(adapter->queues, M_DEVBUF);
   2970 fail:
   2971 	return (error);
   2972 }
   2973 
   2974 /*********************************************************************
   2975  *
   2976  *  Allocate memory for tx_buffer structures. The tx_buffer stores all
   2977  *  the information needed to transmit a packet on the wire. This is
   2978  *  called only once at attach, setup is done every reset.
   2979  *
   2980  **********************************************************************/
   2981 static int
   2982 ixgbe_allocate_transmit_buffers(struct tx_ring *txr)
   2983 {
   2984 	struct adapter *adapter = txr->adapter;
   2985 	device_t dev = adapter->dev;
   2986 	struct ixgbe_tx_buf *txbuf;
   2987 	int error, i;
   2988 
   2989 	/*
   2990 	 * Setup DMA descriptor areas.
   2991 	 */
   2992 	if ((error = ixgbe_dma_tag_create(adapter->osdep.dmat,	/* parent */
   2993 			       1, 0,		/* alignment, bounds */
   2994 			       IXGBE_TSO_SIZE,		/* maxsize */
   2995 			       adapter->num_segs,	/* nsegments */
   2996 			       PAGE_SIZE,		/* maxsegsize */
   2997 			       0,			/* flags */
   2998 			       &txr->txtag))) {
   2999 		aprint_error_dev(dev,"Unable to allocate TX DMA tag\n");
   3000 		goto fail;
   3001 	}
   3002 
   3003 	if (!(txr->tx_buffers =
   3004 	    (struct ixgbe_tx_buf *) malloc(sizeof(struct ixgbe_tx_buf) *
   3005 	    adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) {
   3006 		aprint_error_dev(dev, "Unable to allocate tx_buffer memory\n");
   3007 		error = ENOMEM;
   3008 		goto fail;
   3009 	}
   3010 
   3011         /* Create the descriptor buffer dma maps */
   3012 	txbuf = txr->tx_buffers;
   3013 	for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
   3014 		error = ixgbe_dmamap_create(txr->txtag, 0, &txbuf->map);
   3015 		if (error != 0) {
   3016 			aprint_error_dev(dev, "Unable to create TX DMA map\n");
   3017 			goto fail;
   3018 		}
   3019 	}
   3020 
   3021 	return 0;
   3022 fail:
   3023 	/* We free all, it handles case where we are in the middle */
   3024 	ixgbe_free_transmit_structures(adapter);
   3025 	return (error);
   3026 }
   3027 
   3028 /*********************************************************************
   3029  *
   3030  *  Initialize a transmit ring.
   3031  *
   3032  **********************************************************************/
   3033 static void
   3034 ixgbe_setup_transmit_ring(struct tx_ring *txr)
   3035 {
   3036 	struct adapter *adapter = txr->adapter;
   3037 	struct ixgbe_tx_buf *txbuf;
   3038 	int i;
   3039 
   3040 	/* Clear the old ring contents */
   3041 	IXGBE_TX_LOCK(txr);
   3042 	bzero((void *)txr->tx_base,
   3043 	      (sizeof(union ixgbe_adv_tx_desc)) * adapter->num_tx_desc);
   3044 	/* Reset indices */
   3045 	txr->next_avail_desc = 0;
   3046 	txr->next_to_clean = 0;
   3047 
   3048 	/* Free any existing tx buffers. */
   3049         txbuf = txr->tx_buffers;
   3050 	for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
   3051 		if (txbuf->m_head != NULL) {
   3052 			bus_dmamap_sync(txr->txtag->dt_dmat, txbuf->map,
   3053 			    0, txbuf->m_head->m_pkthdr.len,
   3054 			    BUS_DMASYNC_POSTWRITE);
   3055 			ixgbe_dmamap_unload(txr->txtag, txbuf->map);
   3056 			m_freem(txbuf->m_head);
   3057 			txbuf->m_head = NULL;
   3058 		}
   3059 		/* Clear the EOP index */
   3060 		txbuf->eop_index = -1;
   3061         }
   3062 
   3063 #ifdef IXGBE_FDIR
   3064 	/* Set the rate at which we sample packets */
   3065 	if (adapter->hw.mac.type != ixgbe_mac_82598EB)
   3066 		txr->atr_sample = atr_sample_rate;
   3067 #endif
   3068 
   3069 	/* Set number of descriptors available */
   3070 	txr->tx_avail = adapter->num_tx_desc;
   3071 
   3072 	ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
   3073 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   3074 	IXGBE_TX_UNLOCK(txr);
   3075 }
   3076 
   3077 /*********************************************************************
   3078  *
   3079  *  Initialize all transmit rings.
   3080  *
   3081  **********************************************************************/
   3082 static int
   3083 ixgbe_setup_transmit_structures(struct adapter *adapter)
   3084 {
   3085 	struct tx_ring *txr = adapter->tx_rings;
   3086 
   3087 	for (int i = 0; i < adapter->num_queues; i++, txr++)
   3088 		ixgbe_setup_transmit_ring(txr);
   3089 
   3090 	return (0);
   3091 }
   3092 
   3093 /*********************************************************************
   3094  *
   3095  *  Enable transmit unit.
   3096  *
   3097  **********************************************************************/
   3098 static void
   3099 ixgbe_initialize_transmit_units(struct adapter *adapter)
   3100 {
   3101 	struct tx_ring	*txr = adapter->tx_rings;
   3102 	struct ixgbe_hw	*hw = &adapter->hw;
   3103 
   3104 	/* Setup the Base and Length of the Tx Descriptor Ring */
   3105 
   3106 	for (int i = 0; i < adapter->num_queues; i++, txr++) {
   3107 		u64	tdba = txr->txdma.dma_paddr;
   3108 		u32	txctrl;
   3109 
   3110 		IXGBE_WRITE_REG(hw, IXGBE_TDBAL(i),
   3111 		       (tdba & 0x00000000ffffffffULL));
   3112 		IXGBE_WRITE_REG(hw, IXGBE_TDBAH(i), (tdba >> 32));
   3113 		IXGBE_WRITE_REG(hw, IXGBE_TDLEN(i),
   3114 		    adapter->num_tx_desc * sizeof(struct ixgbe_legacy_tx_desc));
   3115 
   3116 		/* Setup the HW Tx Head and Tail descriptor pointers */
   3117 		IXGBE_WRITE_REG(hw, IXGBE_TDH(i), 0);
   3118 		IXGBE_WRITE_REG(hw, IXGBE_TDT(i), 0);
   3119 
   3120 		/* Setup Transmit Descriptor Cmd Settings */
   3121 		txr->txd_cmd = IXGBE_TXD_CMD_IFCS;
   3122 		txr->queue_status = IXGBE_QUEUE_IDLE;
   3123 
   3124 		/* Disable Head Writeback */
   3125 		switch (hw->mac.type) {
   3126 		case ixgbe_mac_82598EB:
   3127 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
   3128 			break;
   3129 		case ixgbe_mac_82599EB:
   3130 		default:
   3131 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
   3132 			break;
   3133                 }
   3134 		txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
   3135 		switch (hw->mac.type) {
   3136 		case ixgbe_mac_82598EB:
   3137 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), txctrl);
   3138 			break;
   3139 		case ixgbe_mac_82599EB:
   3140 		default:
   3141 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), txctrl);
   3142 			break;
   3143 		}
   3144 
   3145 	}
   3146 
   3147 	if (hw->mac.type != ixgbe_mac_82598EB) {
   3148 		u32 dmatxctl, rttdcs;
   3149 		dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
   3150 		dmatxctl |= IXGBE_DMATXCTL_TE;
   3151 		IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
   3152 		/* Disable arbiter to set MTQC */
   3153 		rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
   3154 		rttdcs |= IXGBE_RTTDCS_ARBDIS;
   3155 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
   3156 		IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
   3157 		rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
   3158 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
   3159 	}
   3160 
   3161 	return;
   3162 }
   3163 
   3164 /*********************************************************************
   3165  *
   3166  *  Free all transmit rings.
   3167  *
   3168  **********************************************************************/
   3169 static void
   3170 ixgbe_free_transmit_structures(struct adapter *adapter)
   3171 {
   3172 	struct tx_ring *txr = adapter->tx_rings;
   3173 
   3174 	for (int i = 0; i < adapter->num_queues; i++, txr++) {
   3175 		IXGBE_TX_LOCK(txr);
   3176 		ixgbe_free_transmit_buffers(txr);
   3177 		ixgbe_dma_free(adapter, &txr->txdma);
   3178 		IXGBE_TX_UNLOCK(txr);
   3179 		IXGBE_TX_LOCK_DESTROY(txr);
   3180 	}
   3181 	free(adapter->tx_rings, M_DEVBUF);
   3182 }
   3183 
   3184 /*********************************************************************
   3185  *
   3186  *  Free transmit ring related data structures.
   3187  *
   3188  **********************************************************************/
   3189 static void
   3190 ixgbe_free_transmit_buffers(struct tx_ring *txr)
   3191 {
   3192 	struct adapter *adapter = txr->adapter;
   3193 	struct ixgbe_tx_buf *tx_buffer;
   3194 	int             i;
   3195 
   3196 	INIT_DEBUGOUT("free_transmit_ring: begin");
   3197 
   3198 	if (txr->tx_buffers == NULL)
   3199 		return;
   3200 
   3201 	tx_buffer = txr->tx_buffers;
   3202 	for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
   3203 		if (tx_buffer->m_head != NULL) {
   3204 			bus_dmamap_sync(txr->txtag->dt_dmat, tx_buffer->map,
   3205 			    0, tx_buffer->m_head->m_pkthdr.len,
   3206 			    BUS_DMASYNC_POSTWRITE);
   3207 			ixgbe_dmamap_unload(txr->txtag, tx_buffer->map);
   3208 			m_freem(tx_buffer->m_head);
   3209 			tx_buffer->m_head = NULL;
   3210 			if (tx_buffer->map != NULL) {
   3211 				ixgbe_dmamap_destroy(txr->txtag,
   3212 				    tx_buffer->map);
   3213 				tx_buffer->map = NULL;
   3214 			}
   3215 		} else if (tx_buffer->map != NULL) {
   3216 			ixgbe_dmamap_unload(txr->txtag, tx_buffer->map);
   3217 			ixgbe_dmamap_destroy(txr->txtag, tx_buffer->map);
   3218 			tx_buffer->map = NULL;
   3219 		}
   3220 	}
   3221 #if __FreeBSD_version >= 800000
   3222 	if (txr->br != NULL)
   3223 		buf_ring_free(txr->br, M_DEVBUF);
   3224 #endif
   3225 	if (txr->tx_buffers != NULL) {
   3226 		free(txr->tx_buffers, M_DEVBUF);
   3227 		txr->tx_buffers = NULL;
   3228 	}
   3229 	if (txr->txtag != NULL) {
   3230 		ixgbe_dma_tag_destroy(txr->txtag);
   3231 		txr->txtag = NULL;
   3232 	}
   3233 	return;
   3234 }
   3235 
   3236 /*********************************************************************
   3237  *
   3238  *  Advanced Context Descriptor setup for VLAN or L4 CSUM
   3239  *
   3240  **********************************************************************/
   3241 
   3242 static u32
   3243 ixgbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp)
   3244 {
   3245 	struct m_tag *mtag;
   3246 	struct adapter *adapter = txr->adapter;
   3247 	struct ethercom *ec = &adapter->osdep.ec;
   3248 	struct ixgbe_adv_tx_context_desc *TXD;
   3249 	struct ixgbe_tx_buf        *tx_buffer;
   3250 	u32 olinfo = 0, vlan_macip_lens = 0, type_tucmd_mlhl = 0;
   3251 	struct ether_vlan_header *eh;
   3252 	struct ip ip;
   3253 	struct ip6_hdr ip6;
   3254 	int  ehdrlen, ip_hlen = 0;
   3255 	u16	etype;
   3256 	u8	ipproto = 0;
   3257 	bool	offload;
   3258 	int ctxd = txr->next_avail_desc;
   3259 	u16 vtag = 0;
   3260 
   3261 	offload = ((mp->m_pkthdr.csum_flags & M_CSUM_OFFLOAD) != 0);
   3262 
   3263 	tx_buffer = &txr->tx_buffers[ctxd];
   3264 	TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
   3265 
   3266 	/*
   3267 	** In advanced descriptors the vlan tag must
   3268 	** be placed into the descriptor itself.
   3269 	*/
   3270 	if ((mtag = VLAN_OUTPUT_TAG(ec, mp)) != NULL) {
   3271 		vtag = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   3272 		vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
   3273 	} else if (!offload)
   3274 		return 0;
   3275 
   3276 	/*
   3277 	 * Determine where frame payload starts.
   3278 	 * Jump over vlan headers if already present,
   3279 	 * helpful for QinQ too.
   3280 	 */
   3281 	KASSERT(mp->m_len >= offsetof(struct ether_vlan_header, evl_tag));
   3282 	eh = mtod(mp, struct ether_vlan_header *);
   3283 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
   3284 		KASSERT(mp->m_len >= sizeof(struct ether_vlan_header));
   3285 		etype = ntohs(eh->evl_proto);
   3286 		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   3287 	} else {
   3288 		etype = ntohs(eh->evl_encap_proto);
   3289 		ehdrlen = ETHER_HDR_LEN;
   3290 	}
   3291 
   3292 	/* Set the ether header length */
   3293 	vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
   3294 
   3295 	switch (etype) {
   3296 	case ETHERTYPE_IP:
   3297 		m_copydata(mp, ehdrlen, sizeof(ip), &ip);
   3298 		ip_hlen = ip.ip_hl << 2;
   3299 		ipproto = ip.ip_p;
   3300 #if 0
   3301 		ip.ip_sum = 0;
   3302 		m_copyback(mp, ehdrlen, sizeof(ip), &ip);
   3303 #else
   3304 		KASSERT((mp->m_pkthdr.csum_flags & M_CSUM_IPv4) == 0 ||
   3305 		    ip.ip_sum == 0);
   3306 #endif
   3307 		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
   3308 		break;
   3309 	case ETHERTYPE_IPV6:
   3310 		m_copydata(mp, ehdrlen, sizeof(ip6), &ip6);
   3311 		ip_hlen = sizeof(ip6);
   3312 		ipproto = ip6.ip6_nxt;
   3313 		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
   3314 		break;
   3315 	default:
   3316 		break;
   3317 	}
   3318 
   3319 	if ((mp->m_pkthdr.csum_flags & M_CSUM_IPv4) != 0)
   3320 		olinfo |= IXGBE_TXD_POPTS_IXSM << 8;
   3321 
   3322 	vlan_macip_lens |= ip_hlen;
   3323 	type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
   3324 
   3325 	if (mp->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_TCPv6)) {
   3326 		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
   3327 		olinfo |= IXGBE_TXD_POPTS_TXSM << 8;
   3328 		KASSERT(ipproto == IPPROTO_TCP);
   3329 	} else if (mp->m_pkthdr.csum_flags & (M_CSUM_UDPv4|M_CSUM_UDPv6)) {
   3330 		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP;
   3331 		olinfo |= IXGBE_TXD_POPTS_TXSM << 8;
   3332 		KASSERT(ipproto == IPPROTO_UDP);
   3333 	}
   3334 
   3335 	/* Now copy bits into descriptor */
   3336 	TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
   3337 	TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
   3338 	TXD->seqnum_seed = htole32(0);
   3339 	TXD->mss_l4len_idx = htole32(0);
   3340 
   3341 	tx_buffer->m_head = NULL;
   3342 	tx_buffer->eop_index = -1;
   3343 
   3344 	/* We've consumed the first desc, adjust counters */
   3345 	if (++ctxd == adapter->num_tx_desc)
   3346 		ctxd = 0;
   3347 	txr->next_avail_desc = ctxd;
   3348 	--txr->tx_avail;
   3349 
   3350         return olinfo;
   3351 }
   3352 
   3353 /**********************************************************************
   3354  *
   3355  *  Setup work for hardware segmentation offload (TSO) on
   3356  *  adapters using advanced tx descriptors
   3357  *
   3358  **********************************************************************/
   3359 static bool
   3360 ixgbe_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *paylen)
   3361 {
   3362 	struct m_tag *mtag;
   3363 	struct adapter *adapter = txr->adapter;
   3364 	struct ethercom *ec = &adapter->osdep.ec;
   3365 	struct ixgbe_adv_tx_context_desc *TXD;
   3366 	struct ixgbe_tx_buf        *tx_buffer;
   3367 	u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
   3368 	u32 mss_l4len_idx = 0;
   3369 	u16 vtag = 0;
   3370 	int ctxd, ehdrlen,  hdrlen, ip_hlen, tcp_hlen;
   3371 	struct ether_vlan_header *eh;
   3372 	struct ip *ip;
   3373 	struct tcphdr *th;
   3374 
   3375 
   3376 	/*
   3377 	 * Determine where frame payload starts.
   3378 	 * Jump over vlan headers if already present
   3379 	 */
   3380 	eh = mtod(mp, struct ether_vlan_header *);
   3381 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN))
   3382 		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   3383 	else
   3384 		ehdrlen = ETHER_HDR_LEN;
   3385 
   3386         /* Ensure we have at least the IP+TCP header in the first mbuf. */
   3387         if (mp->m_len < ehdrlen + sizeof(struct ip) + sizeof(struct tcphdr))
   3388 		return FALSE;
   3389 
   3390 	ctxd = txr->next_avail_desc;
   3391 	tx_buffer = &txr->tx_buffers[ctxd];
   3392 	TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
   3393 
   3394 	ip = (struct ip *)(mp->m_data + ehdrlen);
   3395 	if (ip->ip_p != IPPROTO_TCP)
   3396 		return FALSE;   /* 0 */
   3397 	ip->ip_sum = 0;
   3398 	ip_hlen = ip->ip_hl << 2;
   3399 	th = (struct tcphdr *)((char *)ip + ip_hlen);
   3400 	/* XXX Educated guess: FreeBSD's in_pseudo == NetBSD's in_cksum_phdr */
   3401 	th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   3402 	    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   3403 	tcp_hlen = th->th_off << 2;
   3404 	hdrlen = ehdrlen + ip_hlen + tcp_hlen;
   3405 
   3406 	/* This is used in the transmit desc in encap */
   3407 	*paylen = mp->m_pkthdr.len - hdrlen;
   3408 
   3409 	/* VLAN MACLEN IPLEN */
   3410 	if ((mtag = VLAN_OUTPUT_TAG(ec, mp)) != NULL) {
   3411 		vtag = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   3412                 vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
   3413 	}
   3414 
   3415 	vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
   3416 	vlan_macip_lens |= ip_hlen;
   3417 	TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
   3418 
   3419 	/* ADV DTYPE TUCMD */
   3420 	type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
   3421 	type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
   3422 	type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
   3423 	TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
   3424 
   3425 
   3426 	/* MSS L4LEN IDX */
   3427 	mss_l4len_idx |= (mp->m_pkthdr.segsz << IXGBE_ADVTXD_MSS_SHIFT);
   3428 	mss_l4len_idx |= (tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT);
   3429 	TXD->mss_l4len_idx = htole32(mss_l4len_idx);
   3430 
   3431 	TXD->seqnum_seed = htole32(0);
   3432 	tx_buffer->m_head = NULL;
   3433 	tx_buffer->eop_index = -1;
   3434 
   3435 	if (++ctxd == adapter->num_tx_desc)
   3436 		ctxd = 0;
   3437 
   3438 	txr->tx_avail--;
   3439 	txr->next_avail_desc = ctxd;
   3440 	return TRUE;
   3441 }
   3442 
   3443 #ifdef IXGBE_FDIR
   3444 /*
   3445 ** This routine parses packet headers so that Flow
   3446 ** Director can make a hashed filter table entry
   3447 ** allowing traffic flows to be identified and kept
   3448 ** on the same cpu.  This would be a performance
   3449 ** hit, but we only do it at IXGBE_FDIR_RATE of
   3450 ** packets.
   3451 */
   3452 static void
   3453 ixgbe_atr(struct tx_ring *txr, struct mbuf *mp)
   3454 {
   3455 	struct adapter			*adapter = txr->adapter;
   3456 	struct ix_queue			*que;
   3457 	struct ip			*ip;
   3458 	struct tcphdr			*th;
   3459 	struct udphdr			*uh;
   3460 	struct ether_vlan_header	*eh;
   3461 	union ixgbe_atr_hash_dword	input = {.dword = 0};
   3462 	union ixgbe_atr_hash_dword	common = {.dword = 0};
   3463 	int  				ehdrlen, ip_hlen;
   3464 	u16				etype;
   3465 
   3466 	eh = mtod(mp, struct ether_vlan_header *);
   3467 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
   3468 		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   3469 		etype = eh->evl_proto;
   3470 	} else {
   3471 		ehdrlen = ETHER_HDR_LEN;
   3472 		etype = eh->evl_encap_proto;
   3473 	}
   3474 
   3475 	/* Only handling IPv4 */
   3476 	if (etype != htons(ETHERTYPE_IP))
   3477 		return;
   3478 
   3479 	ip = (struct ip *)(mp->m_data + ehdrlen);
   3480 	ip_hlen = ip->ip_hl << 2;
   3481 
   3482 	/* check if we're UDP or TCP */
   3483 	switch (ip->ip_p) {
   3484 	case IPPROTO_TCP:
   3485 		th = (struct tcphdr *)((char *)ip + ip_hlen);
   3486 		/* src and dst are inverted */
   3487 		common.port.dst ^= th->th_sport;
   3488 		common.port.src ^= th->th_dport;
   3489 		input.formatted.flow_type ^= IXGBE_ATR_FLOW_TYPE_TCPV4;
   3490 		break;
   3491 	case IPPROTO_UDP:
   3492 		uh = (struct udphdr *)((char *)ip + ip_hlen);
   3493 		/* src and dst are inverted */
   3494 		common.port.dst ^= uh->uh_sport;
   3495 		common.port.src ^= uh->uh_dport;
   3496 		input.formatted.flow_type ^= IXGBE_ATR_FLOW_TYPE_UDPV4;
   3497 		break;
   3498 	default:
   3499 		return;
   3500 	}
   3501 
   3502 	input.formatted.vlan_id = htobe16(mp->m_pkthdr.ether_vtag);
   3503 	if (mp->m_pkthdr.ether_vtag)
   3504 		common.flex_bytes ^= htons(ETHERTYPE_VLAN);
   3505 	else
   3506 		common.flex_bytes ^= etype;
   3507 	common.ip ^= ip->ip_src.s_addr ^ ip->ip_dst.s_addr;
   3508 
   3509 	que = &adapter->queues[txr->me];
   3510 	/*
   3511 	** This assumes the Rx queue and Tx
   3512 	** queue are bound to the same CPU
   3513 	*/
   3514 	ixgbe_fdir_add_signature_filter_82599(&adapter->hw,
   3515 	    input, common, que->msix);
   3516 }
   3517 #endif /* IXGBE_FDIR */
   3518 
   3519 /**********************************************************************
   3520  *
   3521  *  Examine each tx_buffer in the used queue. If the hardware is done
   3522  *  processing the packet then free associated resources. The
   3523  *  tx_buffer is put back on the free queue.
   3524  *
   3525  **********************************************************************/
   3526 static bool
   3527 ixgbe_txeof(struct tx_ring *txr)
   3528 {
   3529 	struct adapter	*adapter = txr->adapter;
   3530 	struct ifnet	*ifp = adapter->ifp;
   3531 	u32	first, last, done, processed;
   3532 	struct ixgbe_tx_buf *tx_buffer;
   3533 	struct ixgbe_legacy_tx_desc *tx_desc, *eop_desc;
   3534 	struct timeval now, elapsed;
   3535 
   3536 	KASSERT(mutex_owned(&txr->tx_mtx));
   3537 
   3538 	if (txr->tx_avail == adapter->num_tx_desc) {
   3539 		txr->queue_status = IXGBE_QUEUE_IDLE;
   3540 		return false;
   3541 	}
   3542 
   3543 	processed = 0;
   3544 	first = txr->next_to_clean;
   3545 	tx_buffer = &txr->tx_buffers[first];
   3546 	/* For cleanup we just use legacy struct */
   3547 	tx_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
   3548 	last = tx_buffer->eop_index;
   3549 	if (last == -1)
   3550 		return false;
   3551 	eop_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
   3552 
   3553 	/*
   3554 	** Get the index of the first descriptor
   3555 	** BEYOND the EOP and call that 'done'.
   3556 	** I do this so the comparison in the
   3557 	** inner while loop below can be simple
   3558 	*/
   3559 	if (++last == adapter->num_tx_desc) last = 0;
   3560 	done = last;
   3561 
   3562         ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
   3563 	    BUS_DMASYNC_POSTREAD);
   3564 	/*
   3565 	** Only the EOP descriptor of a packet now has the DD
   3566 	** bit set, this is what we look for...
   3567 	*/
   3568 	while (eop_desc->upper.fields.status & IXGBE_TXD_STAT_DD) {
   3569 		/* We clean the range of the packet */
   3570 		while (first != done) {
   3571 			tx_desc->upper.data = 0;
   3572 			tx_desc->lower.data = 0;
   3573 			tx_desc->buffer_addr = 0;
   3574 			++txr->tx_avail;
   3575 			++processed;
   3576 
   3577 			if (tx_buffer->m_head) {
   3578 				txr->bytes +=
   3579 				    tx_buffer->m_head->m_pkthdr.len;
   3580 				bus_dmamap_sync(txr->txtag->dt_dmat,
   3581 				    tx_buffer->map,
   3582 				    0, tx_buffer->m_head->m_pkthdr.len,
   3583 				    BUS_DMASYNC_POSTWRITE);
   3584 				ixgbe_dmamap_unload(txr->txtag, tx_buffer->map);
   3585 				m_freem(tx_buffer->m_head);
   3586 				tx_buffer->m_head = NULL;
   3587 			}
   3588 			tx_buffer->eop_index = -1;
   3589 			getmicrotime(&txr->watchdog_time);
   3590 
   3591 			if (++first == adapter->num_tx_desc)
   3592 				first = 0;
   3593 
   3594 			tx_buffer = &txr->tx_buffers[first];
   3595 			tx_desc =
   3596 			    (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
   3597 		}
   3598 		++txr->packets;
   3599 		++ifp->if_opackets;
   3600 		/* See if there is more work now */
   3601 		last = tx_buffer->eop_index;
   3602 		if (last != -1) {
   3603 			eop_desc =
   3604 			    (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
   3605 			/* Get next done point */
   3606 			if (++last == adapter->num_tx_desc) last = 0;
   3607 			done = last;
   3608 		} else
   3609 			break;
   3610 	}
   3611 	ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
   3612 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   3613 
   3614 	txr->next_to_clean = first;
   3615 
   3616 	/*
   3617 	** Watchdog calculation, we know there's
   3618 	** work outstanding or the first return
   3619 	** would have been taken, so none processed
   3620 	** for too long indicates a hang.
   3621 	*/
   3622 	getmicrotime(&now);
   3623 	timersub(&now, &txr->watchdog_time, &elapsed);
   3624 	if (!processed && tvtohz(&elapsed) > IXGBE_WATCHDOG)
   3625 		txr->queue_status = IXGBE_QUEUE_HUNG;
   3626 
   3627 	/*
   3628 	 * If we have enough room, clear IFF_OACTIVE to tell the stack that
   3629 	 * it is OK to send packets. If there are no pending descriptors,
   3630 	 * clear the timeout. Otherwise, if some descriptors have been freed,
   3631 	 * restart the timeout.
   3632 	 */
   3633 	if (txr->tx_avail > IXGBE_TX_CLEANUP_THRESHOLD) {
   3634 		ifp->if_flags &= ~IFF_OACTIVE;
   3635 		if (txr->tx_avail == adapter->num_tx_desc) {
   3636 			txr->queue_status = IXGBE_QUEUE_IDLE;
   3637 			return false;
   3638 		}
   3639 	}
   3640 
   3641 	return true;
   3642 }
   3643 
   3644 /*********************************************************************
   3645  *
   3646  *  Refresh mbuf buffers for RX descriptor rings
   3647  *   - now keeps its own state so discards due to resource
   3648  *     exhaustion are unnecessary, if an mbuf cannot be obtained
   3649  *     it just returns, keeping its placeholder, thus it can simply
   3650  *     be recalled to try again.
   3651  *
   3652  **********************************************************************/
   3653 static void
   3654 ixgbe_refresh_mbufs(struct rx_ring *rxr, int limit)
   3655 {
   3656 	struct adapter		*adapter = rxr->adapter;
   3657 	struct ixgbe_rx_buf	*rxbuf;
   3658 	struct mbuf		*mh, *mp;
   3659 	int			i, j, error;
   3660 	bool			refreshed = false;
   3661 
   3662 	i = j = rxr->next_to_refresh;
   3663 	/* Control the loop with one beyond */
   3664 	if (++j == adapter->num_rx_desc)
   3665 		j = 0;
   3666 
   3667 	while (j != limit) {
   3668 		rxbuf = &rxr->rx_buffers[i];
   3669 		if (rxr->hdr_split == FALSE)
   3670 			goto no_split;
   3671 
   3672 		if (rxbuf->m_head == NULL) {
   3673 			mh = m_gethdr(M_DONTWAIT, MT_DATA);
   3674 			if (mh == NULL)
   3675 				goto update;
   3676 		} else
   3677 			mh = rxbuf->m_head;
   3678 
   3679 		mh->m_pkthdr.len = mh->m_len = MHLEN;
   3680 		mh->m_len = MHLEN;
   3681 		mh->m_flags |= M_PKTHDR;
   3682 		/* Get the memory mapping */
   3683 		error = bus_dmamap_load_mbuf(rxr->htag->dt_dmat,
   3684 		    rxbuf->hmap, mh, BUS_DMA_NOWAIT);
   3685 		if (error != 0) {
   3686 			printf("Refresh mbufs: hdr dmamap load"
   3687 			    " failure - %d\n", error);
   3688 			m_free(mh);
   3689 			rxbuf->m_head = NULL;
   3690 			goto update;
   3691 		}
   3692 		rxbuf->m_head = mh;
   3693 		ixgbe_dmamap_sync(rxr->htag, rxbuf->hmap, BUS_DMASYNC_PREREAD);
   3694 		rxr->rx_base[i].read.hdr_addr =
   3695 		    htole64(rxbuf->hmap->dm_segs[0].ds_addr);
   3696 
   3697 no_split:
   3698 		if (rxbuf->m_pack == NULL) {
   3699 			mp = ixgbe_getjcl(&adapter->jcl_head, M_DONTWAIT,
   3700 			    MT_DATA, M_PKTHDR, adapter->rx_mbuf_sz);
   3701 			if (mp == NULL) {
   3702 				rxr->no_jmbuf.ev_count++;
   3703 				goto update;
   3704 			}
   3705 		} else
   3706 			mp = rxbuf->m_pack;
   3707 
   3708 		mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
   3709 		/* Get the memory mapping */
   3710 		error = bus_dmamap_load_mbuf(rxr->ptag->dt_dmat,
   3711 		    rxbuf->pmap, mp, BUS_DMA_NOWAIT);
   3712 		if (error != 0) {
   3713 			printf("Refresh mbufs: payload dmamap load"
   3714 			    " failure - %d\n", error);
   3715 			m_free(mp);
   3716 			rxbuf->m_pack = NULL;
   3717 			goto update;
   3718 		}
   3719 		rxbuf->m_pack = mp;
   3720 		bus_dmamap_sync(rxr->ptag->dt_dmat, rxbuf->pmap,
   3721 		    0, mp->m_pkthdr.len, BUS_DMASYNC_PREREAD);
   3722 		rxr->rx_base[i].read.pkt_addr =
   3723 		    htole64(rxbuf->pmap->dm_segs[0].ds_addr);
   3724 
   3725 		refreshed = true;
   3726 		/* Next is precalculated */
   3727 		i = j;
   3728 		rxr->next_to_refresh = i;
   3729 		if (++j == adapter->num_rx_desc)
   3730 			j = 0;
   3731 	}
   3732 update:
   3733 	if (refreshed) /* Update hardware tail index */
   3734 		IXGBE_WRITE_REG(&adapter->hw,
   3735 		    IXGBE_RDT(rxr->me), rxr->next_to_refresh);
   3736 	return;
   3737 }
   3738 
   3739 /*********************************************************************
   3740  *
   3741  *  Allocate memory for rx_buffer structures. Since we use one
   3742  *  rx_buffer per received packet, the maximum number of rx_buffer's
   3743  *  that we'll need is equal to the number of receive descriptors
   3744  *  that we've allocated.
   3745  *
   3746  **********************************************************************/
   3747 static int
   3748 ixgbe_allocate_receive_buffers(struct rx_ring *rxr)
   3749 {
   3750 	struct	adapter 	*adapter = rxr->adapter;
   3751 	device_t 		dev = adapter->dev;
   3752 	struct ixgbe_rx_buf 	*rxbuf;
   3753 	int             	i, bsize, error;
   3754 
   3755 	bsize = sizeof(struct ixgbe_rx_buf) * adapter->num_rx_desc;
   3756 	if (!(rxr->rx_buffers =
   3757 	    (struct ixgbe_rx_buf *) malloc(bsize,
   3758 	    M_DEVBUF, M_NOWAIT | M_ZERO))) {
   3759 		aprint_error_dev(dev, "Unable to allocate rx_buffer memory\n");
   3760 		error = ENOMEM;
   3761 		goto fail;
   3762 	}
   3763 
   3764 	if ((error = ixgbe_dma_tag_create(adapter->osdep.dmat,	/* parent */
   3765 				   1, 0,	/* alignment, bounds */
   3766 				   MSIZE,		/* maxsize */
   3767 				   1,			/* nsegments */
   3768 				   MSIZE,		/* maxsegsize */
   3769 				   0,			/* flags */
   3770 				   &rxr->htag))) {
   3771 		aprint_error_dev(dev, "Unable to create RX DMA tag\n");
   3772 		goto fail;
   3773 	}
   3774 
   3775 	if ((error = ixgbe_dma_tag_create(adapter->osdep.dmat,	/* parent */
   3776 				   1, 0,	/* alignment, bounds */
   3777 				   MJUM16BYTES,		/* maxsize */
   3778 				   1,			/* nsegments */
   3779 				   MJUM16BYTES,		/* maxsegsize */
   3780 				   0,			/* flags */
   3781 				   &rxr->ptag))) {
   3782 		aprint_error_dev(dev, "Unable to create RX DMA tag\n");
   3783 		goto fail;
   3784 	}
   3785 
   3786 	for (i = 0; i < adapter->num_rx_desc; i++, rxbuf++) {
   3787 		rxbuf = &rxr->rx_buffers[i];
   3788 		error = ixgbe_dmamap_create(rxr->htag,
   3789 		    BUS_DMA_NOWAIT, &rxbuf->hmap);
   3790 		if (error) {
   3791 			aprint_error_dev(dev, "Unable to create RX head map\n");
   3792 			goto fail;
   3793 		}
   3794 		error = ixgbe_dmamap_create(rxr->ptag,
   3795 		    BUS_DMA_NOWAIT, &rxbuf->pmap);
   3796 		if (error) {
   3797 			aprint_error_dev(dev, "Unable to create RX pkt map\n");
   3798 			goto fail;
   3799 		}
   3800 	}
   3801 
   3802 	return (0);
   3803 
   3804 fail:
   3805 	/* Frees all, but can handle partial completion */
   3806 	ixgbe_free_receive_structures(adapter);
   3807 	return (error);
   3808 }
   3809 
   3810 /*
   3811 ** Used to detect a descriptor that has
   3812 ** been merged by Hardware RSC.
   3813 */
   3814 static inline u32
   3815 ixgbe_rsc_count(union ixgbe_adv_rx_desc *rx)
   3816 {
   3817 	return (le32toh(rx->wb.lower.lo_dword.data) &
   3818 	    IXGBE_RXDADV_RSCCNT_MASK) >> IXGBE_RXDADV_RSCCNT_SHIFT;
   3819 }
   3820 
   3821 /*********************************************************************
   3822  *
   3823  *  Initialize Hardware RSC (LRO) feature on 82599
   3824  *  for an RX ring, this is toggled by the LRO capability
   3825  *  even though it is transparent to the stack.
   3826  *
   3827  **********************************************************************/
   3828 static void
   3829 ixgbe_setup_hw_rsc(struct rx_ring *rxr)
   3830 {
   3831 	struct	adapter 	*adapter = rxr->adapter;
   3832 	struct	ixgbe_hw	*hw = &adapter->hw;
   3833 	u32			rscctrl, rdrxctl;
   3834 
   3835 	rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
   3836 	rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
   3837 	rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
   3838 	rdrxctl |= IXGBE_RDRXCTL_RSCACKC;
   3839 	IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
   3840 
   3841 	rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(rxr->me));
   3842 	rscctrl |= IXGBE_RSCCTL_RSCEN;
   3843 	/*
   3844 	** Limit the total number of descriptors that
   3845 	** can be combined, so it does not exceed 64K
   3846 	*/
   3847 	if (adapter->rx_mbuf_sz == MCLBYTES)
   3848 		rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
   3849 	else if (adapter->rx_mbuf_sz == MJUMPAGESIZE)
   3850 		rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
   3851 	else if (adapter->rx_mbuf_sz == MJUM9BYTES)
   3852 		rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
   3853 	else  /* Using 16K cluster */
   3854 		rscctrl |= IXGBE_RSCCTL_MAXDESC_1;
   3855 
   3856 	IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(rxr->me), rscctrl);
   3857 
   3858 	/* Enable TCP header recognition */
   3859 	IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0),
   3860 	    (IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0)) |
   3861 	    IXGBE_PSRTYPE_TCPHDR));
   3862 
   3863 	/* Disable RSC for ACK packets */
   3864 	IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
   3865 	    (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
   3866 
   3867 	rxr->hw_rsc = TRUE;
   3868 }
   3869 
   3870 
   3871 static void
   3872 ixgbe_free_receive_ring(struct rx_ring *rxr)
   3873 {
   3874 	struct  adapter         *adapter;
   3875 	struct ixgbe_rx_buf       *rxbuf;
   3876 	int i;
   3877 
   3878 	adapter = rxr->adapter;
   3879 	for (i = 0; i < adapter->num_rx_desc; i++) {
   3880 		rxbuf = &rxr->rx_buffers[i];
   3881 		if (rxbuf->m_head != NULL) {
   3882 			ixgbe_dmamap_sync(rxr->htag, rxbuf->hmap,
   3883 			    BUS_DMASYNC_POSTREAD);
   3884 			ixgbe_dmamap_unload(rxr->htag, rxbuf->hmap);
   3885 			rxbuf->m_head->m_flags |= M_PKTHDR;
   3886 			m_freem(rxbuf->m_head);
   3887 		}
   3888 		if (rxbuf->m_pack != NULL) {
   3889 			bus_dmamap_sync(rxr->ptag->dt_dmat, rxbuf->pmap,
   3890 			    0, rxbuf->m_pack->m_pkthdr.len,
   3891 			    BUS_DMASYNC_POSTREAD);
   3892 			ixgbe_dmamap_unload(rxr->ptag, rxbuf->pmap);
   3893 			rxbuf->m_pack->m_flags |= M_PKTHDR;
   3894 			m_freem(rxbuf->m_pack);
   3895 		}
   3896 		rxbuf->m_head = NULL;
   3897 		rxbuf->m_pack = NULL;
   3898 	}
   3899 }
   3900 
   3901 
   3902 /*********************************************************************
   3903  *
   3904  *  Initialize a receive ring and its buffers.
   3905  *
   3906  **********************************************************************/
   3907 static int
   3908 ixgbe_setup_receive_ring(struct rx_ring *rxr)
   3909 {
   3910 	struct	adapter 	*adapter;
   3911 	struct ifnet		*ifp;
   3912 	device_t		dev;
   3913 	struct ixgbe_rx_buf	*rxbuf;
   3914 #ifdef LRO
   3915 	struct lro_ctrl		*lro = &rxr->lro;
   3916 #endif /* LRO */
   3917 	int			rsize, error = 0;
   3918 
   3919 	adapter = rxr->adapter;
   3920 	ifp = adapter->ifp;
   3921 	dev = adapter->dev;
   3922 
   3923 	/* Clear the ring contents */
   3924 	IXGBE_RX_LOCK(rxr);
   3925 	rsize = roundup2(adapter->num_rx_desc *
   3926 	    sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
   3927 	bzero((void *)rxr->rx_base, rsize);
   3928 
   3929 	/* Free current RX buffer structs and their mbufs */
   3930 	ixgbe_free_receive_ring(rxr);
   3931 
   3932 	/* Now reinitialize our supply of jumbo mbufs.  The number
   3933 	 * or size of jumbo mbufs may have changed.
   3934 	 */
   3935 	ixgbe_jcl_reinit(&adapter->jcl_head, rxr->ptag->dt_dmat,
   3936 	    2 * adapter->num_rx_desc, adapter->rx_mbuf_sz);
   3937 
   3938 	/* Configure header split? */
   3939 	if (ixgbe_header_split)
   3940 		rxr->hdr_split = TRUE;
   3941 
   3942 	/* Now replenish the mbufs */
   3943 	for (int j = 0; j != adapter->num_rx_desc; ++j) {
   3944 		struct mbuf	*mh, *mp;
   3945 
   3946 		rxbuf = &rxr->rx_buffers[j];
   3947 		/*
   3948 		** Don't allocate mbufs if not
   3949 		** doing header split, its wasteful
   3950 		*/
   3951 		if (rxr->hdr_split == FALSE)
   3952 			goto skip_head;
   3953 
   3954 		/* First the header */
   3955 		rxbuf->m_head = m_gethdr(M_DONTWAIT, MT_DATA);
   3956 		if (rxbuf->m_head == NULL) {
   3957 			error = ENOBUFS;
   3958 			goto fail;
   3959 		}
   3960 		m_adj(rxbuf->m_head, ETHER_ALIGN);
   3961 		mh = rxbuf->m_head;
   3962 		mh->m_len = mh->m_pkthdr.len = MHLEN;
   3963 		mh->m_flags |= M_PKTHDR;
   3964 		/* Get the memory mapping */
   3965 		error = bus_dmamap_load_mbuf(rxr->htag->dt_dmat,
   3966 		    rxbuf->hmap, rxbuf->m_head, BUS_DMA_NOWAIT);
   3967 		if (error != 0) /* Nothing elegant to do here */
   3968 			goto fail;
   3969 		bus_dmamap_sync(rxr->htag->dt_dmat, rxbuf->hmap,
   3970 		    0, mh->m_pkthdr.len, BUS_DMASYNC_PREREAD);
   3971 		/* Update descriptor */
   3972 		rxr->rx_base[j].read.hdr_addr =
   3973 		    htole64(rxbuf->hmap->dm_segs[0].ds_addr);
   3974 
   3975 skip_head:
   3976 		/* Now the payload cluster */
   3977 		rxbuf->m_pack = ixgbe_getjcl(&adapter->jcl_head, M_DONTWAIT,
   3978 		    MT_DATA, M_PKTHDR, adapter->rx_mbuf_sz);
   3979 		if (rxbuf->m_pack == NULL) {
   3980 			error = ENOBUFS;
   3981                         goto fail;
   3982 		}
   3983 		mp = rxbuf->m_pack;
   3984 		mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
   3985 		/* Get the memory mapping */
   3986 		error = bus_dmamap_load_mbuf(rxr->ptag->dt_dmat,
   3987 		    rxbuf->pmap, mp, BUS_DMA_NOWAIT);
   3988 		if (error != 0)
   3989                         goto fail;
   3990 		bus_dmamap_sync(rxr->ptag->dt_dmat, rxbuf->pmap,
   3991 		    0, adapter->rx_mbuf_sz, BUS_DMASYNC_PREREAD);
   3992 		/* Update descriptor */
   3993 		rxr->rx_base[j].read.pkt_addr =
   3994 		    htole64(rxbuf->pmap->dm_segs[0].ds_addr);
   3995 	}
   3996 
   3997 
   3998 	/* Setup our descriptor indices */
   3999 	rxr->next_to_check = 0;
   4000 	rxr->next_to_refresh = 0;
   4001 	rxr->lro_enabled = FALSE;
   4002 	rxr->rx_split_packets.ev_count = 0;
   4003 	rxr->rx_bytes.ev_count = 0;
   4004 	rxr->discard = FALSE;
   4005 
   4006 	ixgbe_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
   4007 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   4008 
   4009 	/*
   4010 	** Now set up the LRO interface:
   4011 	** 82598 uses software LRO, the
   4012 	** 82599 uses a hardware assist.
   4013 	*/
   4014 	if ((adapter->hw.mac.type != ixgbe_mac_82598EB) &&
   4015 	    (ifp->if_capenable & IFCAP_RXCSUM) &&
   4016 	    (ifp->if_capenable & IFCAP_LRO))
   4017 		ixgbe_setup_hw_rsc(rxr);
   4018 #ifdef LRO
   4019 	else if (ifp->if_capenable & IFCAP_LRO) {
   4020 		int err = tcp_lro_init(lro);
   4021 		if (err) {
   4022 			device_printf(dev, "LRO Initialization failed!\n");
   4023 			goto fail;
   4024 		}
   4025 		INIT_DEBUGOUT("RX Soft LRO Initialized\n");
   4026 		rxr->lro_enabled = TRUE;
   4027 		lro->ifp = adapter->ifp;
   4028 	}
   4029 #endif /* LRO */
   4030 
   4031 	IXGBE_RX_UNLOCK(rxr);
   4032 	return (0);
   4033 
   4034 fail:
   4035 	ixgbe_free_receive_ring(rxr);
   4036 	IXGBE_RX_UNLOCK(rxr);
   4037 	return (error);
   4038 }
   4039 
   4040 /*********************************************************************
   4041  *
   4042  *  Initialize all receive rings.
   4043  *
   4044  **********************************************************************/
   4045 static int
   4046 ixgbe_setup_receive_structures(struct adapter *adapter)
   4047 {
   4048 	struct rx_ring *rxr = adapter->rx_rings;
   4049 	int j;
   4050 
   4051 	for (j = 0; j < adapter->num_queues; j++, rxr++)
   4052 		if (ixgbe_setup_receive_ring(rxr))
   4053 			goto fail;
   4054 
   4055 	return (0);
   4056 fail:
   4057 	/*
   4058 	 * Free RX buffers allocated so far, we will only handle
   4059 	 * the rings that completed, the failing case will have
   4060 	 * cleaned up for itself. 'j' failed, so its the terminus.
   4061 	 */
   4062 	for (int i = 0; i < j; ++i) {
   4063 		rxr = &adapter->rx_rings[i];
   4064 		ixgbe_free_receive_ring(rxr);
   4065 	}
   4066 
   4067 	return (ENOBUFS);
   4068 }
   4069 
   4070 /*********************************************************************
   4071  *
   4072  *  Setup receive registers and features.
   4073  *
   4074  **********************************************************************/
   4075 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
   4076 
   4077 static void
   4078 ixgbe_initialize_receive_units(struct adapter *adapter)
   4079 {
   4080 	int i;
   4081 	struct	rx_ring	*rxr = adapter->rx_rings;
   4082 	struct ixgbe_hw	*hw = &adapter->hw;
   4083 	struct ifnet   *ifp = adapter->ifp;
   4084 	u32		bufsz, rxctrl, fctrl, srrctl, rxcsum;
   4085 	u32		reta, mrqc = 0, hlreg, r[10];
   4086 
   4087 
   4088 	/*
   4089 	 * Make sure receives are disabled while
   4090 	 * setting up the descriptor ring
   4091 	 */
   4092 	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
   4093 	IXGBE_WRITE_REG(hw, IXGBE_RXCTRL,
   4094 	    rxctrl & ~IXGBE_RXCTRL_RXEN);
   4095 
   4096 	/* Enable broadcasts */
   4097 	fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
   4098 	fctrl |= IXGBE_FCTRL_BAM;
   4099 	fctrl |= IXGBE_FCTRL_DPF;
   4100 	fctrl |= IXGBE_FCTRL_PMCF;
   4101 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
   4102 
   4103 	/* Set for Jumbo Frames? */
   4104 	hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
   4105 	if (ifp->if_mtu > ETHERMTU)
   4106 		hlreg |= IXGBE_HLREG0_JUMBOEN;
   4107 	else
   4108 		hlreg &= ~IXGBE_HLREG0_JUMBOEN;
   4109 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
   4110 
   4111 	bufsz = adapter->rx_mbuf_sz  >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
   4112 
   4113 	for (i = 0; i < adapter->num_queues; i++, rxr++) {
   4114 		u64 rdba = rxr->rxdma.dma_paddr;
   4115 
   4116 		/* Setup the Base and Length of the Rx Descriptor Ring */
   4117 		IXGBE_WRITE_REG(hw, IXGBE_RDBAL(i),
   4118 			       (rdba & 0x00000000ffffffffULL));
   4119 		IXGBE_WRITE_REG(hw, IXGBE_RDBAH(i), (rdba >> 32));
   4120 		IXGBE_WRITE_REG(hw, IXGBE_RDLEN(i),
   4121 		    adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
   4122 
   4123 		/* Set up the SRRCTL register */
   4124 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
   4125 		srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
   4126 		srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
   4127 		srrctl |= bufsz;
   4128 		if (rxr->hdr_split) {
   4129 			/* Use a standard mbuf for the header */
   4130 			srrctl |= ((IXGBE_RX_HDR <<
   4131 			    IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT)
   4132 			    & IXGBE_SRRCTL_BSIZEHDR_MASK);
   4133 			srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
   4134 		} else
   4135 			srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
   4136 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(i), srrctl);
   4137 
   4138 		/* Setup the HW Rx Head and Tail Descriptor Pointers */
   4139 		IXGBE_WRITE_REG(hw, IXGBE_RDH(i), 0);
   4140 		IXGBE_WRITE_REG(hw, IXGBE_RDT(i), 0);
   4141 	}
   4142 
   4143 	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
   4144 		u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
   4145 			      IXGBE_PSRTYPE_UDPHDR |
   4146 			      IXGBE_PSRTYPE_IPV4HDR |
   4147 			      IXGBE_PSRTYPE_IPV6HDR;
   4148 		IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
   4149 	}
   4150 
   4151 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
   4152 
   4153 	/* Setup RSS */
   4154 	if (adapter->num_queues > 1) {
   4155 		int j;
   4156 		reta = 0;
   4157 
   4158 		/* set up random bits */
   4159 		cprng_fast(&r, sizeof(r));
   4160 
   4161 		/* Set up the redirection table */
   4162 		for (i = 0, j = 0; i < 128; i++, j++) {
   4163 			if (j == adapter->num_queues) j = 0;
   4164 			reta = (reta << 8) | (j * 0x11);
   4165 			if ((i & 3) == 3)
   4166 				IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
   4167 		}
   4168 
   4169 		/* Now fill our hash function seeds */
   4170 		for (i = 0; i < 10; i++)
   4171 			IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), r[i]);
   4172 
   4173 		/* Perform hash on these packet types */
   4174 		mrqc = IXGBE_MRQC_RSSEN
   4175 		     | IXGBE_MRQC_RSS_FIELD_IPV4
   4176 		     | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
   4177 		     | IXGBE_MRQC_RSS_FIELD_IPV4_UDP
   4178 		     | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP
   4179 		     | IXGBE_MRQC_RSS_FIELD_IPV6_EX
   4180 		     | IXGBE_MRQC_RSS_FIELD_IPV6
   4181 		     | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
   4182 		     | IXGBE_MRQC_RSS_FIELD_IPV6_UDP
   4183 		     | IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
   4184 		IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
   4185 
   4186 		/* RSS and RX IPP Checksum are mutually exclusive */
   4187 		rxcsum |= IXGBE_RXCSUM_PCSD;
   4188 	}
   4189 
   4190 	if (ifp->if_capenable & IFCAP_RXCSUM)
   4191 		rxcsum |= IXGBE_RXCSUM_PCSD;
   4192 
   4193 	if (!(rxcsum & IXGBE_RXCSUM_PCSD))
   4194 		rxcsum |= IXGBE_RXCSUM_IPPCSE;
   4195 
   4196 	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
   4197 
   4198 	return;
   4199 }
   4200 
   4201 /*********************************************************************
   4202  *
   4203  *  Free all receive rings.
   4204  *
   4205  **********************************************************************/
   4206 static void
   4207 ixgbe_free_receive_structures(struct adapter *adapter)
   4208 {
   4209 	struct rx_ring *rxr = adapter->rx_rings;
   4210 
   4211 	for (int i = 0; i < adapter->num_queues; i++, rxr++) {
   4212 #ifdef LRO
   4213 		struct lro_ctrl		*lro = &rxr->lro;
   4214 #endif /* LRO */
   4215 		ixgbe_free_receive_buffers(rxr);
   4216 #ifdef LRO
   4217 		/* Free LRO memory */
   4218 		tcp_lro_free(lro);
   4219 #endif /* LRO */
   4220 		/* Free the ring memory as well */
   4221 		ixgbe_dma_free(adapter, &rxr->rxdma);
   4222 	}
   4223 
   4224 	free(adapter->rx_rings, M_DEVBUF);
   4225 }
   4226 
   4227 
   4228 /*********************************************************************
   4229  *
   4230  *  Free receive ring data structures
   4231  *
   4232  **********************************************************************/
   4233 static void
   4234 ixgbe_free_receive_buffers(struct rx_ring *rxr)
   4235 {
   4236 	struct adapter		*adapter = rxr->adapter;
   4237 	struct ixgbe_rx_buf	*rxbuf;
   4238 
   4239 	INIT_DEBUGOUT("free_receive_structures: begin");
   4240 
   4241 	/* Cleanup any existing buffers */
   4242 	if (rxr->rx_buffers != NULL) {
   4243 		for (int i = 0; i < adapter->num_rx_desc; i++) {
   4244 			rxbuf = &rxr->rx_buffers[i];
   4245 			if (rxbuf->m_head != NULL) {
   4246 				ixgbe_dmamap_sync(rxr->htag, rxbuf->hmap,
   4247 				    BUS_DMASYNC_POSTREAD);
   4248 				ixgbe_dmamap_unload(rxr->htag, rxbuf->hmap);
   4249 				rxbuf->m_head->m_flags |= M_PKTHDR;
   4250 				m_freem(rxbuf->m_head);
   4251 			}
   4252 			if (rxbuf->m_pack != NULL) {
   4253 				bus_dmamap_sync(rxr->ptag->dt_dmat, rxbuf->pmap,
   4254 				    0, rxbuf->m_pack->m_pkthdr.len,
   4255 				    BUS_DMASYNC_POSTREAD);
   4256 				ixgbe_dmamap_unload(rxr->ptag, rxbuf->pmap);
   4257 				rxbuf->m_pack->m_flags |= M_PKTHDR;
   4258 				m_freem(rxbuf->m_pack);
   4259 			}
   4260 			rxbuf->m_head = NULL;
   4261 			rxbuf->m_pack = NULL;
   4262 			if (rxbuf->hmap != NULL) {
   4263 				ixgbe_dmamap_destroy(rxr->htag, rxbuf->hmap);
   4264 				rxbuf->hmap = NULL;
   4265 			}
   4266 			if (rxbuf->pmap != NULL) {
   4267 				ixgbe_dmamap_destroy(rxr->ptag, rxbuf->pmap);
   4268 				rxbuf->pmap = NULL;
   4269 			}
   4270 		}
   4271 		if (rxr->rx_buffers != NULL) {
   4272 			free(rxr->rx_buffers, M_DEVBUF);
   4273 			rxr->rx_buffers = NULL;
   4274 		}
   4275 	}
   4276 
   4277 	if (rxr->htag != NULL) {
   4278 		ixgbe_dma_tag_destroy(rxr->htag);
   4279 		rxr->htag = NULL;
   4280 	}
   4281 	if (rxr->ptag != NULL) {
   4282 		ixgbe_dma_tag_destroy(rxr->ptag);
   4283 		rxr->ptag = NULL;
   4284 	}
   4285 
   4286 	return;
   4287 }
   4288 
   4289 static __inline void
   4290 ixgbe_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m, u32 ptype)
   4291 {
   4292 	struct ethercom *ec;
   4293 	struct adapter	*adapter = ifp->if_softc;
   4294 	int s;
   4295 
   4296 	ec = &adapter->osdep.ec;
   4297 
   4298 #ifdef LRO
   4299         /*
   4300          * ATM LRO is only for IPv4/TCP packets and TCP checksum of the packet
   4301          * should be computed by hardware. Also it should not have VLAN tag in
   4302          * ethernet header.
   4303          */
   4304         if (rxr->lro_enabled &&
   4305             (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING) != 0 &&
   4306             (ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
   4307             (ptype & (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP)) ==
   4308             (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP) &&
   4309             (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) ==
   4310             (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) {
   4311                 /*
   4312                  * Send to the stack if:
   4313                  **  - LRO not enabled, or
   4314                  **  - no LRO resources, or
   4315                  **  - lro enqueue fails
   4316                  */
   4317                 if (rxr->lro.lro_cnt != 0)
   4318                         if (tcp_lro_rx(&rxr->lro, m, 0) == 0)
   4319                                 return;
   4320         }
   4321 #endif /* LRO */
   4322 
   4323 	IXGBE_RX_UNLOCK(rxr);
   4324 
   4325 	s = splnet();
   4326 	/* Pass this up to any BPF listeners. */
   4327 	bpf_mtap(ifp, m);
   4328 	(*ifp->if_input)(ifp, m);
   4329 	splx(s);
   4330 
   4331 	IXGBE_RX_LOCK(rxr);
   4332 }
   4333 
   4334 static __inline void
   4335 ixgbe_rx_discard(struct rx_ring *rxr, int i)
   4336 {
   4337 	struct ixgbe_rx_buf	*rbuf;
   4338 
   4339 	rbuf = &rxr->rx_buffers[i];
   4340 
   4341         if (rbuf->fmp != NULL) {/* Partial chain ? */
   4342 		rbuf->fmp->m_flags |= M_PKTHDR;
   4343                 m_freem(rbuf->fmp);
   4344                 rbuf->fmp = NULL;
   4345 	}
   4346 
   4347 	/*
   4348 	** With advanced descriptors the writeback
   4349 	** clobbers the buffer addrs, so its easier
   4350 	** to just free the existing mbufs and take
   4351 	** the normal refresh path to get new buffers
   4352 	** and mapping.
   4353 	*/
   4354 	if (rbuf->m_head) {
   4355 		m_free(rbuf->m_head);
   4356 		rbuf->m_head = NULL;
   4357 	}
   4358 
   4359 	if (rbuf->m_pack) {
   4360 		m_free(rbuf->m_pack);
   4361 		rbuf->m_pack = NULL;
   4362 	}
   4363 
   4364 	return;
   4365 }
   4366 
   4367 
   4368 /*********************************************************************
   4369  *
   4370  *  This routine executes in interrupt context. It replenishes
   4371  *  the mbufs in the descriptor and sends data which has been
   4372  *  dma'ed into host memory to upper layer.
   4373  *
   4374  *  We loop at most count times if count is > 0, or until done if
   4375  *  count < 0.
   4376  *
   4377  *  Return TRUE for more work, FALSE for all clean.
   4378  *********************************************************************/
   4379 static bool
   4380 ixgbe_rxeof(struct ix_queue *que, int count)
   4381 {
   4382 	struct adapter		*adapter = que->adapter;
   4383 	struct rx_ring		*rxr = que->rxr;
   4384 	struct ifnet		*ifp = adapter->ifp;
   4385 #ifdef LRO
   4386 	struct lro_ctrl		*lro = &rxr->lro;
   4387 	struct lro_entry	*queued;
   4388 #endif /* LRO */
   4389 	int			i, nextp, processed = 0;
   4390 	u32			staterr = 0;
   4391 	union ixgbe_adv_rx_desc	*cur;
   4392 	struct ixgbe_rx_buf	*rbuf, *nbuf;
   4393 
   4394 	IXGBE_RX_LOCK(rxr);
   4395 
   4396 	for (i = rxr->next_to_check; count != 0;) {
   4397 		struct mbuf	*sendmp, *mh, *mp;
   4398 		u32		rsc, ptype;
   4399 		u16		hlen, plen, hdr, vtag;
   4400 		bool		eop;
   4401 
   4402 		/* Sync the ring. */
   4403 		ixgbe_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
   4404 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   4405 
   4406 		cur = &rxr->rx_base[i];
   4407 		staterr = le32toh(cur->wb.upper.status_error);
   4408 
   4409 		if ((staterr & IXGBE_RXD_STAT_DD) == 0)
   4410 			break;
   4411 		if ((ifp->if_flags & IFF_RUNNING) == 0)
   4412 			break;
   4413 
   4414 		count--;
   4415 		sendmp = NULL;
   4416 		nbuf = NULL;
   4417 		rsc = 0;
   4418 		cur->wb.upper.status_error = 0;
   4419 		rbuf = &rxr->rx_buffers[i];
   4420 		mh = rbuf->m_head;
   4421 		mp = rbuf->m_pack;
   4422 
   4423 		plen = le16toh(cur->wb.upper.length);
   4424 		ptype = le32toh(cur->wb.lower.lo_dword.data) &
   4425 		    IXGBE_RXDADV_PKTTYPE_MASK;
   4426 		hdr = le16toh(cur->wb.lower.lo_dword.hs_rss.hdr_info);
   4427 		vtag = le16toh(cur->wb.upper.vlan);
   4428 		eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0);
   4429 
   4430 		/* Make sure bad packets are discarded */
   4431 		if (((staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) != 0) ||
   4432 		    (rxr->discard)) {
   4433 			ifp->if_ierrors++;
   4434 			rxr->rx_discarded.ev_count++;
   4435 			if (eop)
   4436 				rxr->discard = FALSE;
   4437 			else
   4438 				rxr->discard = TRUE;
   4439 			ixgbe_rx_discard(rxr, i);
   4440 			goto next_desc;
   4441 		}
   4442 
   4443 		/*
   4444 		** On 82599 which supports a hardware
   4445 		** LRO (called HW RSC), packets need
   4446 		** not be fragmented across sequential
   4447 		** descriptors, rather the next descriptor
   4448 		** is indicated in bits of the descriptor.
   4449 		** This also means that we might proceses
   4450 		** more than one packet at a time, something
   4451 		** that has never been true before, it
   4452 		** required eliminating global chain pointers
   4453 		** in favor of what we are doing here.  -jfv
   4454 		*/
   4455 		if (!eop) {
   4456 			/*
   4457 			** Figure out the next descriptor
   4458 			** of this frame.
   4459 			*/
   4460 			if (rxr->hw_rsc == TRUE) {
   4461 				rsc = ixgbe_rsc_count(cur);
   4462 				rxr->rsc_num += (rsc - 1);
   4463 			}
   4464 			if (rsc) { /* Get hardware index */
   4465 				nextp = ((staterr &
   4466 				    IXGBE_RXDADV_NEXTP_MASK) >>
   4467 				    IXGBE_RXDADV_NEXTP_SHIFT);
   4468 			} else { /* Just sequential */
   4469 				nextp = i + 1;
   4470 				if (nextp == adapter->num_rx_desc)
   4471 					nextp = 0;
   4472 			}
   4473 			nbuf = &rxr->rx_buffers[nextp];
   4474 			prefetch(nbuf);
   4475 		}
   4476 		/*
   4477 		** The header mbuf is ONLY used when header
   4478 		** split is enabled, otherwise we get normal
   4479 		** behavior, ie, both header and payload
   4480 		** are DMA'd into the payload buffer.
   4481 		**
   4482 		** Rather than using the fmp/lmp global pointers
   4483 		** we now keep the head of a packet chain in the
   4484 		** buffer struct and pass this along from one
   4485 		** descriptor to the next, until we get EOP.
   4486 		*/
   4487 		if (rxr->hdr_split && (rbuf->fmp == NULL)) {
   4488 			/* This must be an initial descriptor */
   4489 			hlen = (hdr & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
   4490 			    IXGBE_RXDADV_HDRBUFLEN_SHIFT;
   4491 			if (hlen > IXGBE_RX_HDR)
   4492 				hlen = IXGBE_RX_HDR;
   4493 			mh->m_len = hlen;
   4494 			mh->m_flags |= M_PKTHDR;
   4495 			mh->m_next = NULL;
   4496 			mh->m_pkthdr.len = mh->m_len;
   4497 			/* Null buf pointer so it is refreshed */
   4498 			rbuf->m_head = NULL;
   4499 			/*
   4500 			** Check the payload length, this
   4501 			** could be zero if its a small
   4502 			** packet.
   4503 			*/
   4504 			if (plen > 0) {
   4505 				mp->m_len = plen;
   4506 				mp->m_next = NULL;
   4507 				mp->m_flags &= ~M_PKTHDR;
   4508 				mh->m_next = mp;
   4509 				mh->m_pkthdr.len += mp->m_len;
   4510 				/* Null buf pointer so it is refreshed */
   4511 				rbuf->m_pack = NULL;
   4512 				rxr->rx_split_packets.ev_count++;
   4513 			}
   4514 			/*
   4515 			** Now create the forward
   4516 			** chain so when complete
   4517 			** we wont have to.
   4518 			*/
   4519                         if (eop == 0) {
   4520 				/* stash the chain head */
   4521                                 nbuf->fmp = mh;
   4522 				/* Make forward chain */
   4523                                 if (plen)
   4524                                         mp->m_next = nbuf->m_pack;
   4525                                 else
   4526                                         mh->m_next = nbuf->m_pack;
   4527                         } else {
   4528 				/* Singlet, prepare to send */
   4529                                 sendmp = mh;
   4530                                 if (VLAN_ATTACHED(&adapter->osdep.ec) &&
   4531 				  (staterr & IXGBE_RXD_STAT_VP)) {
   4532 					/* XXX Do something reasonable on
   4533 					 * error.
   4534 					 */
   4535 #if 0
   4536 					printf("%s.%d: VLAN_INPUT_TAG\n",
   4537 					    __func__, __LINE__);
   4538 					Debugger();
   4539 #endif
   4540 					VLAN_INPUT_TAG(ifp, sendmp, vtag,
   4541 					    printf("%s: could not apply VLAN "
   4542 					        "tag", __func__));
   4543                                 }
   4544                         }
   4545 		} else {
   4546 			/*
   4547 			** Either no header split, or a
   4548 			** secondary piece of a fragmented
   4549 			** split packet.
   4550 			*/
   4551 			mp->m_len = plen;
   4552 			/*
   4553 			** See if there is a stored head
   4554 			** that determines what we are
   4555 			*/
   4556 			sendmp = rbuf->fmp;
   4557 			rbuf->m_pack = rbuf->fmp = NULL;
   4558 
   4559 			if (sendmp != NULL) /* secondary frag */
   4560 				sendmp->m_pkthdr.len += mp->m_len;
   4561 			else {
   4562 				/* first desc of a non-ps chain */
   4563 				sendmp = mp;
   4564 				sendmp->m_flags |= M_PKTHDR;
   4565 				sendmp->m_pkthdr.len = mp->m_len;
   4566 				if (staterr & IXGBE_RXD_STAT_VP) {
   4567 					/* XXX Do something reasonable on
   4568 					 * error.
   4569 					 */
   4570 #if 0
   4571 					printf("%s.%d: VLAN_INPUT_TAG\n",
   4572 					    __func__, __LINE__);
   4573 					Debugger();
   4574 #endif
   4575 					VLAN_INPUT_TAG(ifp, sendmp, vtag,
   4576 					    printf("%s: could not apply VLAN "
   4577 					        "tag", __func__));
   4578 				}
   4579                         }
   4580 			/* Pass the head pointer on */
   4581 			if (eop == 0) {
   4582 				nbuf->fmp = sendmp;
   4583 				sendmp = NULL;
   4584 				mp->m_next = nbuf->m_pack;
   4585 			}
   4586 		}
   4587 		++processed;
   4588 		/* Sending this frame? */
   4589 		if (eop) {
   4590 			sendmp->m_pkthdr.rcvif = ifp;
   4591 			ifp->if_ipackets++;
   4592 			rxr->rx_packets.ev_count++;
   4593 			/* capture data for AIM */
   4594 			rxr->bytes += sendmp->m_pkthdr.len;
   4595 			rxr->rx_bytes.ev_count += sendmp->m_pkthdr.len;
   4596 			if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
   4597 				ixgbe_rx_checksum(staterr, sendmp, ptype,
   4598 				   &adapter->stats);
   4599 			}
   4600 #if __FreeBSD_version >= 800000
   4601 			sendmp->m_pkthdr.flowid = que->msix;
   4602 			sendmp->m_flags |= M_FLOWID;
   4603 #endif
   4604 		}
   4605 next_desc:
   4606 		ixgbe_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
   4607 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   4608 
   4609 		/* Advance our pointers to the next descriptor. */
   4610 		if (++i == adapter->num_rx_desc)
   4611 			i = 0;
   4612 
   4613 		/* Now send to the stack or do LRO */
   4614 		if (sendmp != NULL) {
   4615 			rxr->next_to_check = i;
   4616 			ixgbe_rx_input(rxr, ifp, sendmp, ptype);
   4617 			i = rxr->next_to_check;
   4618 		}
   4619 
   4620                /* Every 8 descriptors we go to refresh mbufs */
   4621 		if (processed == 8) {
   4622 			ixgbe_refresh_mbufs(rxr, i);
   4623 			processed = 0;
   4624 		}
   4625 	}
   4626 
   4627 	/* Refresh any remaining buf structs */
   4628 	if (ixgbe_rx_unrefreshed(rxr))
   4629 		ixgbe_refresh_mbufs(rxr, i);
   4630 
   4631 	rxr->next_to_check = i;
   4632 
   4633 #ifdef LRO
   4634 	/*
   4635 	 * Flush any outstanding LRO work
   4636 	 */
   4637 	while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
   4638 		SLIST_REMOVE_HEAD(&lro->lro_active, next);
   4639 		tcp_lro_flush(lro, queued);
   4640 	}
   4641 #endif /* LRO */
   4642 
   4643 	IXGBE_RX_UNLOCK(rxr);
   4644 
   4645 	/*
   4646 	** We still have cleaning to do?
   4647 	** Schedule another interrupt if so.
   4648 	*/
   4649 	if ((staterr & IXGBE_RXD_STAT_DD) != 0) {
   4650 		ixgbe_rearm_queues(adapter, (u64)(1 << que->msix));
   4651 		return true;
   4652 	}
   4653 
   4654 	return false;
   4655 }
   4656 
   4657 
   4658 /*********************************************************************
   4659  *
   4660  *  Verify that the hardware indicated that the checksum is valid.
   4661  *  Inform the stack about the status of checksum so that stack
   4662  *  doesn't spend time verifying the checksum.
   4663  *
   4664  *********************************************************************/
   4665 static void
   4666 ixgbe_rx_checksum(u32 staterr, struct mbuf * mp, u32 ptype,
   4667     struct ixgbe_hw_stats *stats)
   4668 {
   4669 	u16	status = (u16) staterr;
   4670 	u8	errors = (u8) (staterr >> 24);
   4671 	bool	sctp = FALSE;
   4672 
   4673 	if ((ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
   4674 	    (ptype & IXGBE_RXDADV_PKTTYPE_SCTP) != 0)
   4675 		sctp = TRUE;
   4676 
   4677 	if (status & IXGBE_RXD_STAT_IPCS) {
   4678 		stats->ipcs.ev_count++;
   4679 		if (!(errors & IXGBE_RXD_ERR_IPE)) {
   4680 			/* IP Checksum Good */
   4681 			mp->m_pkthdr.csum_flags = M_CSUM_IPv4;
   4682 
   4683 		} else {
   4684 			stats->ipcs_bad.ev_count++;
   4685 			mp->m_pkthdr.csum_flags = M_CSUM_IPv4|M_CSUM_IPv4_BAD;
   4686 		}
   4687 	}
   4688 	if (status & IXGBE_RXD_STAT_L4CS) {
   4689 		stats->l4cs.ev_count++;
   4690 		u16 type = M_CSUM_TCPv4|M_CSUM_TCPv6|M_CSUM_UDPv4|M_CSUM_UDPv6;
   4691 		if (!(errors & IXGBE_RXD_ERR_TCPE)) {
   4692 			mp->m_pkthdr.csum_flags |= type;
   4693 		} else {
   4694 			stats->l4cs_bad.ev_count++;
   4695 			mp->m_pkthdr.csum_flags |= type | M_CSUM_TCP_UDP_BAD;
   4696 		}
   4697 	}
   4698 	return;
   4699 }
   4700 
   4701 
   4702 #if 0	/* XXX Badly need to overhaul vlan(4) on NetBSD. */
   4703 /*
   4704 ** This routine is run via an vlan config EVENT,
   4705 ** it enables us to use the HW Filter table since
   4706 ** we can get the vlan id. This just creates the
   4707 ** entry in the soft version of the VFTA, init will
   4708 ** repopulate the real table.
   4709 */
   4710 static void
   4711 ixgbe_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   4712 {
   4713 	struct adapter	*adapter = ifp->if_softc;
   4714 	u16		index, bit;
   4715 
   4716 	if (ifp->if_softc !=  arg)   /* Not our event */
   4717 		return;
   4718 
   4719 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
   4720 		return;
   4721 
   4722 	IXGBE_CORE_LOCK(adapter);
   4723 	index = (vtag >> 5) & 0x7F;
   4724 	bit = vtag & 0x1F;
   4725 	adapter->shadow_vfta[index] |= (1 << bit);
   4726 	ixgbe_init_locked(adapter);
   4727 	IXGBE_CORE_UNLOCK(adapter);
   4728 }
   4729 
   4730 /*
   4731 ** This routine is run via an vlan
   4732 ** unconfig EVENT, remove our entry
   4733 ** in the soft vfta.
   4734 */
   4735 static void
   4736 ixgbe_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   4737 {
   4738 	struct adapter	*adapter = ifp->if_softc;
   4739 	u16		index, bit;
   4740 
   4741 	if (ifp->if_softc !=  arg)
   4742 		return;
   4743 
   4744 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
   4745 		return;
   4746 
   4747 	IXGBE_CORE_LOCK(adapter);
   4748 	index = (vtag >> 5) & 0x7F;
   4749 	bit = vtag & 0x1F;
   4750 	adapter->shadow_vfta[index] &= ~(1 << bit);
   4751 	/* Re-init to load the changes */
   4752 	ixgbe_init_locked(adapter);
   4753 	IXGBE_CORE_UNLOCK(adapter);
   4754 }
   4755 #endif
   4756 
   4757 static void
   4758 ixgbe_setup_vlan_hw_support(struct adapter *adapter)
   4759 {
   4760 	struct ethercom *ec = &adapter->osdep.ec;
   4761 	struct ixgbe_hw *hw = &adapter->hw;
   4762 	u32		ctrl;
   4763 
   4764 	/*
   4765 	** We get here thru init_locked, meaning
   4766 	** a soft reset, this has already cleared
   4767 	** the VFTA and other state, so if there
   4768 	** have been no vlan's registered do nothing.
   4769 	*/
   4770 	if (!VLAN_ATTACHED(&adapter->osdep.ec)) {
   4771 		return;
   4772 	}
   4773 
   4774 	/*
   4775 	** A soft reset zero's out the VFTA, so
   4776 	** we need to repopulate it now.
   4777 	*/
   4778 	for (int i = 0; i < IXGBE_VFTA_SIZE; i++)
   4779 		if (adapter->shadow_vfta[i] != 0)
   4780 			IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
   4781 			    adapter->shadow_vfta[i]);
   4782 
   4783 	ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
   4784 	/* Enable the Filter Table if enabled */
   4785 	if (ec->ec_capenable & ETHERCAP_VLAN_HWFILTER) {
   4786 		ctrl &= ~IXGBE_VLNCTRL_CFIEN;
   4787 		ctrl |= IXGBE_VLNCTRL_VFE;
   4788 	}
   4789 	if (hw->mac.type == ixgbe_mac_82598EB)
   4790 		ctrl |= IXGBE_VLNCTRL_VME;
   4791 	IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
   4792 
   4793 	/* On 82599 the VLAN enable is per/queue in RXDCTL */
   4794 	if (hw->mac.type != ixgbe_mac_82598EB)
   4795 		for (int i = 0; i < adapter->num_queues; i++) {
   4796 			ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
   4797 				ctrl |= IXGBE_RXDCTL_VME;
   4798 			IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), ctrl);
   4799 		}
   4800 }
   4801 
   4802 static void
   4803 ixgbe_enable_intr(struct adapter *adapter)
   4804 {
   4805 	struct ixgbe_hw *hw = &adapter->hw;
   4806 	struct ix_queue *que = adapter->queues;
   4807 	u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
   4808 
   4809 
   4810 	/* Enable Fan Failure detection */
   4811 	if (hw->device_id == IXGBE_DEV_ID_82598AT)
   4812 		    mask |= IXGBE_EIMS_GPI_SDP1;
   4813 	else {
   4814 		    mask |= IXGBE_EIMS_ECC;
   4815 		    mask |= IXGBE_EIMS_GPI_SDP1;
   4816 		    mask |= IXGBE_EIMS_GPI_SDP2;
   4817 #ifdef IXGBE_FDIR
   4818 		    mask |= IXGBE_EIMS_FLOW_DIR;
   4819 #endif
   4820 	}
   4821 
   4822 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
   4823 
   4824 	/* With RSS we use auto clear */
   4825 	if (adapter->msix_mem) {
   4826 		mask = IXGBE_EIMS_ENABLE_MASK;
   4827 		/* Don't autoclear Link */
   4828 		mask &= ~IXGBE_EIMS_OTHER;
   4829 		mask &= ~IXGBE_EIMS_LSC;
   4830 		IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
   4831 	}
   4832 
   4833 	/*
   4834 	** Now enable all queues, this is done separately to
   4835 	** allow for handling the extended (beyond 32) MSIX
   4836 	** vectors that can be used by 82599
   4837 	*/
   4838         for (int i = 0; i < adapter->num_queues; i++, que++)
   4839                 ixgbe_enable_queue(adapter, que->msix);
   4840 
   4841 	IXGBE_WRITE_FLUSH(hw);
   4842 
   4843 	return;
   4844 }
   4845 
   4846 static void
   4847 ixgbe_disable_intr(struct adapter *adapter)
   4848 {
   4849 	if (adapter->msix_mem)
   4850 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
   4851 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
   4852 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
   4853 	} else {
   4854 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
   4855 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
   4856 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
   4857 	}
   4858 	IXGBE_WRITE_FLUSH(&adapter->hw);
   4859 	return;
   4860 }
   4861 
   4862 u16
   4863 ixgbe_read_pci_cfg(struct ixgbe_hw *hw, u32 reg)
   4864 {
   4865 	switch (reg % 4) {
   4866 	case 0:
   4867 		return pci_conf_read(hw->back->pc, hw->back->tag, reg) &
   4868 		    __BITS(15, 0);
   4869 	case 2:
   4870 		return __SHIFTOUT(pci_conf_read(hw->back->pc, hw->back->tag,
   4871 		    reg - 2), __BITS(31, 16));
   4872 	default:
   4873 		panic("%s: invalid register (%" PRIx32, __func__, reg);
   4874 		break;
   4875 	}
   4876 }
   4877 
   4878 void
   4879 ixgbe_write_pci_cfg(struct ixgbe_hw *hw, u32 reg, u16 value)
   4880 {
   4881 	pcireg_t old;
   4882 
   4883 	switch (reg % 4) {
   4884 	case 0:
   4885 		old = pci_conf_read(hw->back->pc, hw->back->tag, reg) &
   4886 		    __BITS(31, 16);
   4887 		pci_conf_write(hw->back->pc, hw->back->tag, reg, value | old);
   4888 		break;
   4889 	case 2:
   4890 		old = pci_conf_read(hw->back->pc, hw->back->tag, reg - 2) &
   4891 		    __BITS(15, 0);
   4892 		pci_conf_write(hw->back->pc, hw->back->tag, reg - 2,
   4893 		    __SHIFTIN(value, __BITS(31, 16)) | old);
   4894 		break;
   4895 	default:
   4896 		panic("%s: invalid register (%" PRIx32, __func__, reg);
   4897 		break;
   4898 	}
   4899 
   4900 	return;
   4901 }
   4902 
   4903 /*
   4904 ** Setup the correct IVAR register for a particular MSIX interrupt
   4905 **   (yes this is all very magic and confusing :)
   4906 **  - entry is the register array entry
   4907 **  - vector is the MSIX vector for this queue
   4908 **  - type is RX/TX/MISC
   4909 */
   4910 static void
   4911 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
   4912 {
   4913 	struct ixgbe_hw *hw = &adapter->hw;
   4914 	u32 ivar, index;
   4915 
   4916 	vector |= IXGBE_IVAR_ALLOC_VAL;
   4917 
   4918 	switch (hw->mac.type) {
   4919 
   4920 	case ixgbe_mac_82598EB:
   4921 		if (type == -1)
   4922 			entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
   4923 		else
   4924 			entry += (type * 64);
   4925 		index = (entry >> 2) & 0x1F;
   4926 		ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
   4927 		ivar &= ~(0xFF << (8 * (entry & 0x3)));
   4928 		ivar |= (vector << (8 * (entry & 0x3)));
   4929 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
   4930 		break;
   4931 
   4932 	case ixgbe_mac_82599EB:
   4933 		if (type == -1) { /* MISC IVAR */
   4934 			index = (entry & 1) * 8;
   4935 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
   4936 			ivar &= ~(0xFF << index);
   4937 			ivar |= (vector << index);
   4938 			IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
   4939 		} else {	/* RX/TX IVARS */
   4940 			index = (16 * (entry & 1)) + (8 * type);
   4941 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
   4942 			ivar &= ~(0xFF << index);
   4943 			ivar |= (vector << index);
   4944 			IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
   4945 		}
   4946 
   4947 	default:
   4948 		break;
   4949 	}
   4950 }
   4951 
   4952 static void
   4953 ixgbe_configure_ivars(struct adapter *adapter)
   4954 {
   4955 	struct  ix_queue *que = adapter->queues;
   4956 	u32 newitr;
   4957 
   4958 	if (ixgbe_max_interrupt_rate > 0)
   4959 		newitr = (8000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
   4960 	else
   4961 		newitr = 0;
   4962 
   4963         for (int i = 0; i < adapter->num_queues; i++, que++) {
   4964 		/* First the RX queue entry */
   4965                 ixgbe_set_ivar(adapter, i, que->msix, 0);
   4966 		/* ... and the TX */
   4967 		ixgbe_set_ivar(adapter, i, que->msix, 1);
   4968 		/* Set an Initial EITR value */
   4969                 IXGBE_WRITE_REG(&adapter->hw,
   4970                     IXGBE_EITR(que->msix), newitr);
   4971 	}
   4972 
   4973 	/* For the Link interrupt */
   4974         ixgbe_set_ivar(adapter, 1, adapter->linkvec, -1);
   4975 }
   4976 
   4977 /*
   4978 ** ixgbe_sfp_probe - called in the local timer to
   4979 ** determine if a port had optics inserted.
   4980 */
   4981 static bool ixgbe_sfp_probe(struct adapter *adapter)
   4982 {
   4983 	struct ixgbe_hw	*hw = &adapter->hw;
   4984 	device_t	dev = adapter->dev;
   4985 	bool		result = FALSE;
   4986 
   4987 	if ((hw->phy.type == ixgbe_phy_nl) &&
   4988 	    (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
   4989 		s32 ret = hw->phy.ops.identify_sfp(hw);
   4990 		if (ret)
   4991                         goto out;
   4992 		ret = hw->phy.ops.reset(hw);
   4993 		if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
   4994 			device_printf(dev,"Unsupported SFP+ module detected!");
   4995 			device_printf(dev, "Reload driver with supported module.\n");
   4996 			adapter->sfp_probe = FALSE;
   4997                         goto out;
   4998 		} else
   4999 			device_printf(dev,"SFP+ module detected!\n");
   5000 		/* We now have supported optics */
   5001 		adapter->sfp_probe = FALSE;
   5002 		/* Set the optics type so system reports correctly */
   5003 		ixgbe_setup_optics(adapter);
   5004 		result = TRUE;
   5005 	}
   5006 out:
   5007 	return (result);
   5008 }
   5009 
   5010 /*
   5011 ** Tasklet handler for MSIX Link interrupts
   5012 **  - do outside interrupt since it might sleep
   5013 */
   5014 static void
   5015 ixgbe_handle_link(void *context)
   5016 {
   5017 	struct adapter  *adapter = context;
   5018 
   5019 	ixgbe_check_link(&adapter->hw,
   5020 	    &adapter->link_speed, &adapter->link_up, 0);
   5021        	ixgbe_update_link_status(adapter);
   5022 }
   5023 
   5024 /*
   5025 ** Tasklet for handling SFP module interrupts
   5026 */
   5027 static void
   5028 ixgbe_handle_mod(void *context)
   5029 {
   5030 	struct adapter  *adapter = context;
   5031 	struct ixgbe_hw *hw = &adapter->hw;
   5032 	device_t	dev = adapter->dev;
   5033 	u32 err;
   5034 
   5035 	err = hw->phy.ops.identify_sfp(hw);
   5036 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
   5037 		device_printf(dev,
   5038 		    "Unsupported SFP+ module type was detected.\n");
   5039 		return;
   5040 	}
   5041 	err = hw->mac.ops.setup_sfp(hw);
   5042 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
   5043 		device_printf(dev,
   5044 		    "Setup failure - unsupported SFP+ module type.\n");
   5045 		return;
   5046 	}
   5047 	softint_schedule(adapter->msf_si);
   5048 	return;
   5049 }
   5050 
   5051 
   5052 /*
   5053 ** Tasklet for handling MSF (multispeed fiber) interrupts
   5054 */
   5055 static void
   5056 ixgbe_handle_msf(void *context)
   5057 {
   5058 	struct adapter  *adapter = context;
   5059 	struct ixgbe_hw *hw = &adapter->hw;
   5060 	u32 autoneg;
   5061 	bool negotiate;
   5062 
   5063 	autoneg = hw->phy.autoneg_advertised;
   5064 	if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
   5065 		hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
   5066 	if (hw->mac.ops.setup_link)
   5067 		hw->mac.ops.setup_link(hw, autoneg, negotiate, TRUE);
   5068 	return;
   5069 }
   5070 
   5071 #ifdef IXGBE_FDIR
   5072 /*
   5073 ** Tasklet for reinitializing the Flow Director filter table
   5074 */
   5075 static void
   5076 ixgbe_reinit_fdir(void *context)
   5077 {
   5078 	struct adapter  *adapter = context;
   5079 	struct ifnet   *ifp = adapter->ifp;
   5080 
   5081 	if (adapter->fdir_reinit != 1) /* Shouldn't happen */
   5082 		return;
   5083 	ixgbe_reinit_fdir_tables_82599(&adapter->hw);
   5084 	adapter->fdir_reinit = 0;
   5085 	/* Restart the interface */
   5086 	ifp->if_flags |= IFF_RUNNING;
   5087 	return;
   5088 }
   5089 #endif
   5090 
   5091 /**********************************************************************
   5092  *
   5093  *  Update the board statistics counters.
   5094  *
   5095  **********************************************************************/
   5096 static void
   5097 ixgbe_update_stats_counters(struct adapter *adapter)
   5098 {
   5099 	struct ifnet   *ifp = adapter->ifp;
   5100 	struct ixgbe_hw *hw = &adapter->hw;
   5101 	u32  missed_rx = 0, bprc, lxon, lxoff, total;
   5102 	u64  total_missed_rx = 0;
   5103 
   5104 	adapter->stats.crcerrs.ev_count += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
   5105 	adapter->stats.illerrc.ev_count += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
   5106 	adapter->stats.errbc.ev_count += IXGBE_READ_REG(hw, IXGBE_ERRBC);
   5107 	adapter->stats.mspdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MSPDC);
   5108 
   5109 	for (int i = 0; i < __arraycount(adapter->stats.mpc); i++) {
   5110 		int j = i % adapter->num_queues;
   5111 		u32 mp;
   5112 		mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
   5113 		/* missed_rx tallies misses for the gprc workaround */
   5114 		missed_rx += mp;
   5115 		/* global total per queue */
   5116         	adapter->stats.mpc[j].ev_count += mp;
   5117 		/* Running comprehensive total for stats display */
   5118 		total_missed_rx += adapter->stats.mpc[j].ev_count;
   5119 		if (hw->mac.type == ixgbe_mac_82598EB)
   5120 			adapter->stats.rnbc[j] +=
   5121 			    IXGBE_READ_REG(hw, IXGBE_RNBC(i));
   5122 		adapter->stats.pxontxc[j].ev_count +=
   5123 		    IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
   5124 		adapter->stats.pxonrxc[j].ev_count +=
   5125 		    IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
   5126 		adapter->stats.pxofftxc[j].ev_count +=
   5127 		    IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
   5128 		adapter->stats.pxoffrxc[j].ev_count +=
   5129 		    IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
   5130 		adapter->stats.pxon2offc[j].ev_count +=
   5131 		    IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
   5132 	}
   5133 	for (int i = 0; i < __arraycount(adapter->stats.qprc); i++) {
   5134 		int j = i % adapter->num_queues;
   5135 		adapter->stats.qprc[j].ev_count += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
   5136 		adapter->stats.qptc[j].ev_count += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
   5137 		adapter->stats.qbrc[j].ev_count += IXGBE_READ_REG(hw, IXGBE_QBRC(i));
   5138 		adapter->stats.qbrc[j].ev_count +=
   5139 		    ((u64)IXGBE_READ_REG(hw, IXGBE_QBRC(i)) << 32);
   5140 		adapter->stats.qbtc[j].ev_count += IXGBE_READ_REG(hw, IXGBE_QBTC(i));
   5141 		adapter->stats.qbtc[j].ev_count +=
   5142 		    ((u64)IXGBE_READ_REG(hw, IXGBE_QBTC(i)) << 32);
   5143 		adapter->stats.qprdc[j].ev_count += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
   5144 	}
   5145 	adapter->stats.mlfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MLFC);
   5146 	adapter->stats.mrfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MRFC);
   5147 	adapter->stats.rlec.ev_count += IXGBE_READ_REG(hw, IXGBE_RLEC);
   5148 
   5149 	/* Hardware workaround, gprc counts missed packets */
   5150 	adapter->stats.gprc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPRC) - missed_rx;
   5151 
   5152 	lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
   5153 	adapter->stats.lxontxc.ev_count += lxon;
   5154 	lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
   5155 	adapter->stats.lxofftxc.ev_count += lxoff;
   5156 	total = lxon + lxoff;
   5157 
   5158 	if (hw->mac.type != ixgbe_mac_82598EB) {
   5159 		adapter->stats.gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCL) +
   5160 		    ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
   5161 		adapter->stats.gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
   5162 		    ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32) - total * ETHER_MIN_LEN;
   5163 		adapter->stats.tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORL) +
   5164 		    ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
   5165 		adapter->stats.lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
   5166 		adapter->stats.lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
   5167 	} else {
   5168 		adapter->stats.lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
   5169 		adapter->stats.lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
   5170 		/* 82598 only has a counter in the high register */
   5171 		adapter->stats.gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCH);
   5172 		adapter->stats.gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCH) - total * ETHER_MIN_LEN;
   5173 		adapter->stats.tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORH);
   5174 	}
   5175 
   5176 	/*
   5177 	 * Workaround: mprc hardware is incorrectly counting
   5178 	 * broadcasts, so for now we subtract those.
   5179 	 */
   5180 	bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
   5181 	adapter->stats.bprc.ev_count += bprc;
   5182 	adapter->stats.mprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPRC) - ((hw->mac.type == ixgbe_mac_82598EB) ? bprc : 0);
   5183 
   5184 	adapter->stats.prc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC64);
   5185 	adapter->stats.prc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC127);
   5186 	adapter->stats.prc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC255);
   5187 	adapter->stats.prc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC511);
   5188 	adapter->stats.prc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1023);
   5189 	adapter->stats.prc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1522);
   5190 
   5191 	adapter->stats.gptc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPTC) - total;
   5192 	adapter->stats.mptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPTC) - total;
   5193 	adapter->stats.ptc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC64) - total;
   5194 
   5195 	adapter->stats.ruc.ev_count += IXGBE_READ_REG(hw, IXGBE_RUC);
   5196 	adapter->stats.rfc.ev_count += IXGBE_READ_REG(hw, IXGBE_RFC);
   5197 	adapter->stats.roc.ev_count += IXGBE_READ_REG(hw, IXGBE_ROC);
   5198 	adapter->stats.rjc.ev_count += IXGBE_READ_REG(hw, IXGBE_RJC);
   5199 	adapter->stats.mngprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
   5200 	adapter->stats.mngpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
   5201 	adapter->stats.mngptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
   5202 	adapter->stats.tpr.ev_count += IXGBE_READ_REG(hw, IXGBE_TPR);
   5203 	adapter->stats.tpt.ev_count += IXGBE_READ_REG(hw, IXGBE_TPT);
   5204 	adapter->stats.ptc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC127);
   5205 	adapter->stats.ptc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC255);
   5206 	adapter->stats.ptc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC511);
   5207 	adapter->stats.ptc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1023);
   5208 	adapter->stats.ptc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1522);
   5209 	adapter->stats.bptc.ev_count += IXGBE_READ_REG(hw, IXGBE_BPTC);
   5210 	adapter->stats.xec.ev_count += IXGBE_READ_REG(hw, IXGBE_XEC);
   5211 	adapter->stats.fccrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCCRC);
   5212 	adapter->stats.fclast.ev_count += IXGBE_READ_REG(hw, IXGBE_FCLAST);
   5213 
   5214 	/* Only read FCOE on 82599 */
   5215 	if (hw->mac.type != ixgbe_mac_82598EB) {
   5216 		adapter->stats.fcoerpdc.ev_count +=
   5217 		    IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
   5218 		adapter->stats.fcoeprc.ev_count +=
   5219 		    IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
   5220 		adapter->stats.fcoeptc.ev_count +=
   5221 		    IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
   5222 		adapter->stats.fcoedwrc.ev_count +=
   5223 		    IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
   5224 		adapter->stats.fcoedwtc.ev_count +=
   5225 		    IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
   5226 	}
   5227 
   5228 	/* Fill out the OS statistics structure */
   5229 	ifp->if_ipackets = adapter->stats.gprc.ev_count;
   5230 	ifp->if_opackets = adapter->stats.gptc.ev_count;
   5231 	ifp->if_ibytes = adapter->stats.gorc.ev_count;
   5232 	ifp->if_obytes = adapter->stats.gotc.ev_count;
   5233 	ifp->if_imcasts = adapter->stats.mprc.ev_count;
   5234 	ifp->if_collisions = 0;
   5235 
   5236 	/* Rx Errors */
   5237 	ifp->if_ierrors = total_missed_rx + adapter->stats.crcerrs.ev_count +
   5238 		adapter->stats.rlec.ev_count;
   5239 }
   5240 
   5241 /** ixgbe_sysctl_tdh_handler - Handler function
   5242  *  Retrieves the TDH value from the hardware
   5243  */
   5244 static int
   5245 ixgbe_sysctl_tdh_handler(SYSCTLFN_ARGS)
   5246 {
   5247 	struct sysctlnode node;
   5248 	uint32_t val;
   5249 	struct tx_ring *txr;
   5250 
   5251 	node = *rnode;
   5252 	txr = (struct tx_ring *)node.sysctl_data;
   5253 	if (txr == NULL)
   5254 		return 0;
   5255 	val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me));
   5256 	node.sysctl_data = &val;
   5257 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   5258 }
   5259 
   5260 /** ixgbe_sysctl_tdt_handler - Handler function
   5261  *  Retrieves the TDT value from the hardware
   5262  */
   5263 static int
   5264 ixgbe_sysctl_tdt_handler(SYSCTLFN_ARGS)
   5265 {
   5266 	struct sysctlnode node;
   5267 	uint32_t val;
   5268 	struct tx_ring *txr;
   5269 
   5270 	node = *rnode;
   5271 	txr = (struct tx_ring *)node.sysctl_data;
   5272 	if (txr == NULL)
   5273 		return 0;
   5274 	val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me));
   5275 	node.sysctl_data = &val;
   5276 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   5277 }
   5278 
   5279 /** ixgbe_sysctl_rdh_handler - Handler function
   5280  *  Retrieves the RDH value from the hardware
   5281  */
   5282 static int
   5283 ixgbe_sysctl_rdh_handler(SYSCTLFN_ARGS)
   5284 {
   5285 	struct sysctlnode node;
   5286 	uint32_t val;
   5287 	struct rx_ring *rxr;
   5288 
   5289 	node = *rnode;
   5290 	rxr = (struct rx_ring *)node.sysctl_data;
   5291 	if (rxr == NULL)
   5292 		return 0;
   5293 	val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me));
   5294 	node.sysctl_data = &val;
   5295 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   5296 }
   5297 
   5298 /** ixgbe_sysctl_rdt_handler - Handler function
   5299  *  Retrieves the RDT value from the hardware
   5300  */
   5301 static int
   5302 ixgbe_sysctl_rdt_handler(SYSCTLFN_ARGS)
   5303 {
   5304 	struct sysctlnode node;
   5305 	uint32_t val;
   5306 	struct rx_ring *rxr;
   5307 
   5308 	node = *rnode;
   5309 	rxr = (struct rx_ring *)node.sysctl_data;
   5310 	if (rxr == NULL)
   5311 		return 0;
   5312 	val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me));
   5313 	node.sysctl_data = &val;
   5314 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   5315 }
   5316 
   5317 static int
   5318 ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_ARGS)
   5319 {
   5320 	struct sysctlnode node;
   5321 	struct ix_queue *que;
   5322 	uint32_t reg, usec, rate;
   5323 
   5324 	node = *rnode;
   5325 	que = (struct ix_queue *)node.sysctl_data;
   5326 	if (que == NULL)
   5327 		return 0;
   5328 	reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix));
   5329 	usec = ((reg & 0x0FF8) >> 3);
   5330 	if (usec > 0)
   5331 		rate = 1000000 / usec;
   5332 	else
   5333 		rate = 0;
   5334 	node.sysctl_data = &rate;
   5335 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   5336 }
   5337 
   5338 const struct sysctlnode *
   5339 ixgbe_sysctl_instance(struct adapter *adapter)
   5340 {
   5341 	const char *dvname;
   5342 	struct sysctllog **log;
   5343 	int rc;
   5344 	const struct sysctlnode *rnode;
   5345 
   5346 	log = &adapter->sysctllog;
   5347 	dvname = device_xname(adapter->dev);
   5348 
   5349 	if ((rc = sysctl_createv(log, 0, NULL, &rnode,
   5350 	    0, CTLTYPE_NODE, dvname,
   5351 	    SYSCTL_DESCR("ixgbe information and settings"),
   5352 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
   5353 		goto err;
   5354 
   5355 	return rnode;
   5356 err:
   5357 	printf("%s: sysctl_createv failed, rc = %d\n", __func__, rc);
   5358 	return NULL;
   5359 }
   5360 
   5361 /*
   5362  * Add sysctl variables, one per statistic, to the system.
   5363  */
   5364 static void
   5365 ixgbe_add_hw_stats(struct adapter *adapter)
   5366 {
   5367 	device_t dev = adapter->dev;
   5368 	const struct sysctlnode *rnode, *cnode;
   5369 	struct sysctllog **log = &adapter->sysctllog;
   5370 	struct tx_ring *txr = adapter->tx_rings;
   5371 	struct rx_ring *rxr = adapter->rx_rings;
   5372 	struct ixgbe_hw	 *hw = &adapter->hw;
   5373 
   5374 	struct ixgbe_hw_stats *stats = &adapter->stats;
   5375 
   5376 	/* Driver Statistics */
   5377 #if 0
   5378 	/* These counters are not updated by the software */
   5379 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
   5380 			CTLFLAG_RD, &adapter->dropped_pkts,
   5381 			"Driver dropped packets");
   5382 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_header_failed",
   5383 			CTLFLAG_RD, &adapter->mbuf_header_failed,
   5384 			"???");
   5385 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_packet_failed",
   5386 			CTLFLAG_RD, &adapter->mbuf_packet_failed,
   5387 			"???");
   5388 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "no_tx_map_avail",
   5389 			CTLFLAG_RD, &adapter->no_tx_map_avail,
   5390 			"???");
   5391 #endif
   5392 	evcnt_attach_dynamic(&adapter->handleq, EVCNT_TYPE_MISC,
   5393 	    NULL, device_xname(dev), "Handled queue in softint");
   5394 	evcnt_attach_dynamic(&adapter->req, EVCNT_TYPE_MISC,
   5395 	    NULL, device_xname(dev), "Requeued in softint");
   5396 	evcnt_attach_dynamic(&adapter->morerx, EVCNT_TYPE_MISC,
   5397 	    NULL, device_xname(dev), "Interrupt handler more rx");
   5398 	evcnt_attach_dynamic(&adapter->moretx, EVCNT_TYPE_MISC,
   5399 	    NULL, device_xname(dev), "Interrupt handler more tx");
   5400 	evcnt_attach_dynamic(&adapter->txloops, EVCNT_TYPE_MISC,
   5401 	    NULL, device_xname(dev), "Interrupt handler tx loops");
   5402 	evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
   5403 	    NULL, device_xname(dev), "Driver tx dma soft fail EFBIG");
   5404 	evcnt_attach_dynamic(&adapter->m_defrag_failed, EVCNT_TYPE_MISC,
   5405 	    NULL, device_xname(dev), "m_defrag() failed");
   5406 	evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
   5407 	    NULL, device_xname(dev), "Driver tx dma hard fail EFBIG");
   5408 	evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC,
   5409 	    NULL, device_xname(dev), "Driver tx dma hard fail EINVAL");
   5410 	evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC,
   5411 	    NULL, device_xname(dev), "Driver tx dma hard fail other");
   5412 	evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
   5413 	    NULL, device_xname(dev), "Driver tx dma soft fail EAGAIN");
   5414 	evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
   5415 	    NULL, device_xname(dev), "Driver tx dma soft fail ENOMEM");
   5416 	evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
   5417 	    NULL, device_xname(dev), "Watchdog timeouts");
   5418 	evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC,
   5419 	    NULL, device_xname(dev), "TSO errors");
   5420 	evcnt_attach_dynamic(&adapter->tso_tx, EVCNT_TYPE_MISC,
   5421 	    NULL, device_xname(dev), "TSO");
   5422 	evcnt_attach_dynamic(&adapter->link_irq, EVCNT_TYPE_MISC,
   5423 	    NULL, device_xname(dev), "Link MSIX IRQ Handled");
   5424 
   5425 	for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
   5426 		snprintf(adapter->queues[i].evnamebuf,
   5427 		    sizeof(adapter->queues[i].evnamebuf), "%s queue%d",
   5428 		    device_xname(dev), i);
   5429 		snprintf(adapter->queues[i].namebuf,
   5430 		    sizeof(adapter->queues[i].namebuf), "queue%d", i);
   5431 
   5432 		if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
   5433 			aprint_error_dev(dev, "could not create sysctl root\n");
   5434 			break;
   5435 		}
   5436 
   5437 		if (sysctl_createv(log, 0, &rnode, &rnode,
   5438 		    0, CTLTYPE_NODE,
   5439 		    adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
   5440 		    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
   5441 			break;
   5442 
   5443 		if (sysctl_createv(log, 0, &rnode, &cnode,
   5444 		    CTLFLAG_READONLY, CTLTYPE_INT,
   5445 		    "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
   5446 		    ixgbe_sysctl_interrupt_rate_handler, 0,
   5447 		    (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
   5448 			break;
   5449 
   5450 		if (sysctl_createv(log, 0, &rnode, &cnode,
   5451 		    CTLFLAG_READONLY, CTLTYPE_INT,
   5452 		    "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
   5453 		    ixgbe_sysctl_tdh_handler, 0, (void *)txr,
   5454 		    0, CTL_CREATE, CTL_EOL) != 0)
   5455 			break;
   5456 
   5457 		if (sysctl_createv(log, 0, &rnode, &cnode,
   5458 		    CTLFLAG_READONLY, CTLTYPE_INT,
   5459 		    "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
   5460 		    ixgbe_sysctl_tdt_handler, 0, (void *)txr,
   5461 		    0, CTL_CREATE, CTL_EOL) != 0)
   5462 			break;
   5463 
   5464 		evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
   5465 		    NULL, adapter->queues[i].evnamebuf,
   5466 		    "Queue No Descriptor Available");
   5467 		evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
   5468 		    NULL, adapter->queues[i].evnamebuf,
   5469 		    "Queue Packets Transmitted");
   5470 
   5471 #ifdef LRO
   5472 		struct lro_ctrl *lro = &rxr->lro;
   5473 #endif /* LRO */
   5474 
   5475 		if (sysctl_createv(log, 0, &rnode, &cnode,
   5476 		    CTLFLAG_READONLY,
   5477 		    CTLTYPE_INT,
   5478 		    "rxd_head", SYSCTL_DESCR("Receive Descriptor Head"),
   5479 		    ixgbe_sysctl_rdh_handler, 0, (void *)rxr, 0,
   5480 		    CTL_CREATE, CTL_EOL) != 0)
   5481 			break;
   5482 
   5483 		if (sysctl_createv(log, 0, &rnode, &cnode,
   5484 		    CTLFLAG_READONLY,
   5485 		    CTLTYPE_INT,
   5486 		    "rxd_tail", SYSCTL_DESCR("Receive Descriptor Tail"),
   5487 		    ixgbe_sysctl_rdt_handler, 0, (void *)rxr, 0,
   5488 		    CTL_CREATE, CTL_EOL) != 0)
   5489 			break;
   5490 
   5491 		if (i < __arraycount(adapter->stats.mpc)) {
   5492 			evcnt_attach_dynamic(&adapter->stats.mpc[i],
   5493 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   5494 			    "Missed Packet Count");
   5495 		}
   5496 		if (i < __arraycount(adapter->stats.pxontxc)) {
   5497 			evcnt_attach_dynamic(&adapter->stats.pxontxc[i],
   5498 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   5499 			    "pxontxc");
   5500 			evcnt_attach_dynamic(&adapter->stats.pxonrxc[i],
   5501 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   5502 			    "pxonrxc");
   5503 			evcnt_attach_dynamic(&adapter->stats.pxofftxc[i],
   5504 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   5505 			    "pxofftxc");
   5506 			evcnt_attach_dynamic(&adapter->stats.pxoffrxc[i],
   5507 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   5508 			    "pxoffrxc");
   5509 			evcnt_attach_dynamic(&adapter->stats.pxon2offc[i],
   5510 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   5511 			    "pxon2offc");
   5512 		}
   5513 		if (i < __arraycount(adapter->stats.qprc)) {
   5514 			evcnt_attach_dynamic(&adapter->stats.qprc[i],
   5515 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   5516 			    "qprc");
   5517 			evcnt_attach_dynamic(&adapter->stats.qptc[i],
   5518 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   5519 			    "qptc");
   5520 			evcnt_attach_dynamic(&adapter->stats.qbrc[i],
   5521 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   5522 			    "qbrc");
   5523 			evcnt_attach_dynamic(&adapter->stats.qbtc[i],
   5524 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   5525 			    "qbtc");
   5526 			evcnt_attach_dynamic(&adapter->stats.qprdc[i],
   5527 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   5528 			    "qprdc");
   5529 		}
   5530 
   5531 		evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
   5532 		    NULL, adapter->queues[i].evnamebuf, "Queue Packets Received");
   5533 		evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
   5534 		    NULL, adapter->queues[i].evnamebuf, "Queue Bytes Received");
   5535 		evcnt_attach_dynamic(&rxr->no_jmbuf, EVCNT_TYPE_MISC,
   5536 		    NULL, adapter->queues[i].evnamebuf, "Rx no jumbo mbuf");
   5537 		evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
   5538 		    NULL, adapter->queues[i].evnamebuf, "Rx discarded");
   5539 		evcnt_attach_dynamic(&rxr->rx_split_packets, EVCNT_TYPE_MISC,
   5540 		    NULL, adapter->queues[i].evnamebuf, "Rx split packets");
   5541 		evcnt_attach_dynamic(&rxr->rx_irq, EVCNT_TYPE_MISC,
   5542 		    NULL, adapter->queues[i].evnamebuf, "Rx interrupts");
   5543 #ifdef LRO
   5544 		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
   5545 				CTLFLAG_RD, &lro->lro_queued, 0,
   5546 				"LRO Queued");
   5547 		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
   5548 				CTLFLAG_RD, &lro->lro_flushed, 0,
   5549 				"LRO Flushed");
   5550 #endif /* LRO */
   5551 	}
   5552 
   5553 	/* MAC stats get the own sub node */
   5554 
   5555 
   5556 	snprintf(stats->namebuf,
   5557 	    sizeof(stats->namebuf), "%s MAC Statistics", device_xname(dev));
   5558 
   5559 	evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
   5560 	    stats->namebuf, "rx csum offload - IP");
   5561 	evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
   5562 	    stats->namebuf, "rx csum offload - L4");
   5563 	evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
   5564 	    stats->namebuf, "rx csum offload - IP bad");
   5565 	evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
   5566 	    stats->namebuf, "rx csum offload - L4 bad");
   5567 	evcnt_attach_dynamic(&stats->intzero, EVCNT_TYPE_MISC, NULL,
   5568 	    stats->namebuf, "Interrupt conditions zero");
   5569 	evcnt_attach_dynamic(&stats->legint, EVCNT_TYPE_MISC, NULL,
   5570 	    stats->namebuf, "Legacy interrupts");
   5571 	evcnt_attach_dynamic(&stats->crcerrs, EVCNT_TYPE_MISC, NULL,
   5572 	    stats->namebuf, "CRC Errors");
   5573 	evcnt_attach_dynamic(&stats->illerrc, EVCNT_TYPE_MISC, NULL,
   5574 	    stats->namebuf, "Illegal Byte Errors");
   5575 	evcnt_attach_dynamic(&stats->errbc, EVCNT_TYPE_MISC, NULL,
   5576 	    stats->namebuf, "Byte Errors");
   5577 	evcnt_attach_dynamic(&stats->mspdc, EVCNT_TYPE_MISC, NULL,
   5578 	    stats->namebuf, "MAC Short Packets Discarded");
   5579 	evcnt_attach_dynamic(&stats->mlfc, EVCNT_TYPE_MISC, NULL,
   5580 	    stats->namebuf, "MAC Local Faults");
   5581 	evcnt_attach_dynamic(&stats->mrfc, EVCNT_TYPE_MISC, NULL,
   5582 	    stats->namebuf, "MAC Remote Faults");
   5583 	evcnt_attach_dynamic(&stats->rlec, EVCNT_TYPE_MISC, NULL,
   5584 	    stats->namebuf, "Receive Length Errors");
   5585 	evcnt_attach_dynamic(&stats->lxontxc, EVCNT_TYPE_MISC, NULL,
   5586 	    stats->namebuf, "Link XON Transmitted");
   5587 	evcnt_attach_dynamic(&stats->lxonrxc, EVCNT_TYPE_MISC, NULL,
   5588 	    stats->namebuf, "Link XON Received");
   5589 	evcnt_attach_dynamic(&stats->lxofftxc, EVCNT_TYPE_MISC, NULL,
   5590 	    stats->namebuf, "Link XOFF Transmitted");
   5591 	evcnt_attach_dynamic(&stats->lxoffrxc, EVCNT_TYPE_MISC, NULL,
   5592 	    stats->namebuf, "Link XOFF Received");
   5593 
   5594 	/* Packet Reception Stats */
   5595 	evcnt_attach_dynamic(&stats->tor, EVCNT_TYPE_MISC, NULL,
   5596 	    stats->namebuf, "Total Octets Received");
   5597 	evcnt_attach_dynamic(&stats->gorc, EVCNT_TYPE_MISC, NULL,
   5598 	    stats->namebuf, "Good Octets Received");
   5599 	evcnt_attach_dynamic(&stats->tpr, EVCNT_TYPE_MISC, NULL,
   5600 	    stats->namebuf, "Total Packets Received");
   5601 	evcnt_attach_dynamic(&stats->gprc, EVCNT_TYPE_MISC, NULL,
   5602 	    stats->namebuf, "Good Packets Received");
   5603 	evcnt_attach_dynamic(&stats->mprc, EVCNT_TYPE_MISC, NULL,
   5604 	    stats->namebuf, "Multicast Packets Received");
   5605 	evcnt_attach_dynamic(&stats->bprc, EVCNT_TYPE_MISC, NULL,
   5606 	    stats->namebuf, "Broadcast Packets Received");
   5607 	evcnt_attach_dynamic(&stats->prc64, EVCNT_TYPE_MISC, NULL,
   5608 	    stats->namebuf, "64 byte frames received ");
   5609 	evcnt_attach_dynamic(&stats->prc127, EVCNT_TYPE_MISC, NULL,
   5610 	    stats->namebuf, "65-127 byte frames received");
   5611 	evcnt_attach_dynamic(&stats->prc255, EVCNT_TYPE_MISC, NULL,
   5612 	    stats->namebuf, "128-255 byte frames received");
   5613 	evcnt_attach_dynamic(&stats->prc511, EVCNT_TYPE_MISC, NULL,
   5614 	    stats->namebuf, "256-511 byte frames received");
   5615 	evcnt_attach_dynamic(&stats->prc1023, EVCNT_TYPE_MISC, NULL,
   5616 	    stats->namebuf, "512-1023 byte frames received");
   5617 	evcnt_attach_dynamic(&stats->prc1522, EVCNT_TYPE_MISC, NULL,
   5618 	    stats->namebuf, "1023-1522 byte frames received");
   5619 	evcnt_attach_dynamic(&stats->ruc, EVCNT_TYPE_MISC, NULL,
   5620 	    stats->namebuf, "Receive Undersized");
   5621 	evcnt_attach_dynamic(&stats->rfc, EVCNT_TYPE_MISC, NULL,
   5622 	    stats->namebuf, "Fragmented Packets Received ");
   5623 	evcnt_attach_dynamic(&stats->roc, EVCNT_TYPE_MISC, NULL,
   5624 	    stats->namebuf, "Oversized Packets Received");
   5625 	evcnt_attach_dynamic(&stats->rjc, EVCNT_TYPE_MISC, NULL,
   5626 	    stats->namebuf, "Received Jabber");
   5627 	evcnt_attach_dynamic(&stats->mngprc, EVCNT_TYPE_MISC, NULL,
   5628 	    stats->namebuf, "Management Packets Received");
   5629 	evcnt_attach_dynamic(&stats->mngptc, EVCNT_TYPE_MISC, NULL,
   5630 	    stats->namebuf, "Management Packets Dropped");
   5631 	evcnt_attach_dynamic(&stats->xec, EVCNT_TYPE_MISC, NULL,
   5632 	    stats->namebuf, "Checksum Errors");
   5633 
   5634 	/* Packet Transmission Stats */
   5635 	evcnt_attach_dynamic(&stats->gotc, EVCNT_TYPE_MISC, NULL,
   5636 	    stats->namebuf, "Good Octets Transmitted");
   5637 	evcnt_attach_dynamic(&stats->tpt, EVCNT_TYPE_MISC, NULL,
   5638 	    stats->namebuf, "Total Packets Transmitted");
   5639 	evcnt_attach_dynamic(&stats->gptc, EVCNT_TYPE_MISC, NULL,
   5640 	    stats->namebuf, "Good Packets Transmitted");
   5641 	evcnt_attach_dynamic(&stats->bptc, EVCNT_TYPE_MISC, NULL,
   5642 	    stats->namebuf, "Broadcast Packets Transmitted");
   5643 	evcnt_attach_dynamic(&stats->mptc, EVCNT_TYPE_MISC, NULL,
   5644 	    stats->namebuf, "Multicast Packets Transmitted");
   5645 	evcnt_attach_dynamic(&stats->mngptc, EVCNT_TYPE_MISC, NULL,
   5646 	    stats->namebuf, "Management Packets Transmitted");
   5647 	evcnt_attach_dynamic(&stats->ptc64, EVCNT_TYPE_MISC, NULL,
   5648 	    stats->namebuf, "64 byte frames transmitted ");
   5649 	evcnt_attach_dynamic(&stats->ptc127, EVCNT_TYPE_MISC, NULL,
   5650 	    stats->namebuf, "65-127 byte frames transmitted");
   5651 	evcnt_attach_dynamic(&stats->ptc255, EVCNT_TYPE_MISC, NULL,
   5652 	    stats->namebuf, "128-255 byte frames transmitted");
   5653 	evcnt_attach_dynamic(&stats->ptc511, EVCNT_TYPE_MISC, NULL,
   5654 	    stats->namebuf, "256-511 byte frames transmitted");
   5655 	evcnt_attach_dynamic(&stats->ptc1023, EVCNT_TYPE_MISC, NULL,
   5656 	    stats->namebuf, "512-1023 byte frames transmitted");
   5657 	evcnt_attach_dynamic(&stats->ptc1522, EVCNT_TYPE_MISC, NULL,
   5658 	    stats->namebuf, "1024-1522 byte frames transmitted");
   5659 
   5660 	/* FC Stats */
   5661 	evcnt_attach_dynamic(&stats->fccrc, EVCNT_TYPE_MISC, NULL,
   5662 	    stats->namebuf, "FC CRC Errors");
   5663 	evcnt_attach_dynamic(&stats->fclast, EVCNT_TYPE_MISC, NULL,
   5664 	    stats->namebuf, "FC Last Error");
   5665 	if (hw->mac.type != ixgbe_mac_82598EB) {
   5666 		evcnt_attach_dynamic(&stats->fcoerpdc, EVCNT_TYPE_MISC, NULL,
   5667 		    stats->namebuf, "FCoE Packets Dropped");
   5668 		evcnt_attach_dynamic(&stats->fcoeprc, EVCNT_TYPE_MISC, NULL,
   5669 		    stats->namebuf, "FCoE Packets Received");
   5670 		evcnt_attach_dynamic(&stats->fcoeptc, EVCNT_TYPE_MISC, NULL,
   5671 		    stats->namebuf, "FCoE Packets Transmitted");
   5672 		evcnt_attach_dynamic(&stats->fcoedwrc, EVCNT_TYPE_MISC, NULL,
   5673 		    stats->namebuf, "FCoE DWords Received");
   5674 		evcnt_attach_dynamic(&stats->fcoedwtc, EVCNT_TYPE_MISC, NULL,
   5675 		    stats->namebuf, "FCoE DWords Transmitted");
   5676 	}
   5677 }
   5678 
   5679 /*
   5680 ** Set flow control using sysctl:
   5681 ** Flow control values:
   5682 ** 	0 - off
   5683 **	1 - rx pause
   5684 **	2 - tx pause
   5685 **	3 - full
   5686 */
   5687 static int
   5688 ixgbe_set_flowcntl(SYSCTLFN_ARGS)
   5689 {
   5690 	struct sysctlnode node;
   5691 	int error;
   5692 	int last = ixgbe_flow_control;
   5693 	struct adapter *adapter;
   5694 
   5695 	node = *rnode;
   5696 	adapter = (struct adapter *)node.sysctl_data;
   5697 	node.sysctl_data = &ixgbe_flow_control;
   5698 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5699 	if (error != 0 || newp == NULL)
   5700 		return error;
   5701 
   5702 	/* Don't bother if it's not changed */
   5703 	if (ixgbe_flow_control == last)
   5704 		return (0);
   5705 
   5706 	switch (ixgbe_flow_control) {
   5707 		case ixgbe_fc_rx_pause:
   5708 		case ixgbe_fc_tx_pause:
   5709 		case ixgbe_fc_full:
   5710 			adapter->hw.fc.requested_mode = ixgbe_flow_control;
   5711 			break;
   5712 		case ixgbe_fc_none:
   5713 		default:
   5714 			adapter->hw.fc.requested_mode = ixgbe_fc_none;
   5715 	}
   5716 
   5717 	ixgbe_fc_enable(&adapter->hw, 0);
   5718 	return 0;
   5719 }
   5720 
   5721 static void
   5722 ixgbe_add_rx_process_limit(struct adapter *adapter, const char *name,
   5723         const char *description, int *limit, int value)
   5724 {
   5725 	const struct sysctlnode *rnode, *cnode;
   5726 	struct sysctllog **log = &adapter->sysctllog;
   5727 
   5728         *limit = value;
   5729 
   5730 	if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL)
   5731 		aprint_error_dev(adapter->dev,
   5732 		    "could not create sysctl root\n");
   5733 	else if (sysctl_createv(log, 0, &rnode, &cnode,
   5734 	    CTLFLAG_READWRITE,
   5735 	    CTLTYPE_INT,
   5736 	    name, SYSCTL_DESCR(description),
   5737 	    NULL, 0, limit, 0,
   5738 	    CTL_CREATE, CTL_EOL) != 0) {
   5739 		aprint_error_dev(adapter->dev, "%s: could not create sysctl",
   5740 		    __func__);
   5741 	}
   5742 }
   5743 
   5744 /*
   5745 ** Control link advertise speed:
   5746 ** 	0 - normal
   5747 **	1 - advertise only 1G
   5748 */
   5749 static int
   5750 ixgbe_set_advertise(SYSCTLFN_ARGS)
   5751 {
   5752 	struct sysctlnode	node;
   5753 	int			t, error;
   5754 	struct adapter		*adapter;
   5755 	struct ixgbe_hw		*hw;
   5756 	ixgbe_link_speed	speed, last;
   5757 
   5758 	node = *rnode;
   5759 	adapter = (struct adapter *)node.sysctl_data;
   5760 	t = adapter->advertise;
   5761 	node.sysctl_data = &t;
   5762 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5763 	if (error != 0 || newp == NULL)
   5764 		return error;
   5765 
   5766 	if (t == -1)
   5767 		return 0;
   5768 
   5769 	adapter->advertise = t;
   5770 
   5771 	hw = &adapter->hw;
   5772 	last = hw->phy.autoneg_advertised;
   5773 
   5774 	if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
   5775             (hw->phy.multispeed_fiber)))
   5776 		return 0;
   5777 
   5778 	if (adapter->advertise == 1)
   5779                 speed = IXGBE_LINK_SPEED_1GB_FULL;
   5780 	else
   5781                 speed = IXGBE_LINK_SPEED_1GB_FULL |
   5782 			IXGBE_LINK_SPEED_10GB_FULL;
   5783 
   5784 	if (speed == last) /* no change */
   5785 		return 0;
   5786 
   5787 	hw->mac.autotry_restart = TRUE;
   5788 	hw->mac.ops.setup_link(hw, speed, TRUE, TRUE);
   5789 
   5790 	return 0;
   5791 }
   5792