Home | History | Annotate | Line # | Download | only in ixgbe
ixgbe_82599.c revision 1.27
      1 /* $NetBSD: ixgbe_82599.c,v 1.27 2021/12/14 05:26:08 msaitoh Exp $ */
      2 
      3 /******************************************************************************
      4   SPDX-License-Identifier: BSD-3-Clause
      5 
      6   Copyright (c) 2001-2017, Intel Corporation
      7   All rights reserved.
      8 
      9   Redistribution and use in source and binary forms, with or without
     10   modification, are permitted provided that the following conditions are met:
     11 
     12    1. Redistributions of source code must retain the above copyright notice,
     13       this list of conditions and the following disclaimer.
     14 
     15    2. Redistributions in binary form must reproduce the above copyright
     16       notice, this list of conditions and the following disclaimer in the
     17       documentation and/or other materials provided with the distribution.
     18 
     19    3. Neither the name of the Intel Corporation nor the names of its
     20       contributors may be used to endorse or promote products derived from
     21       this software without specific prior written permission.
     22 
     23   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     24   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     25   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     26   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     27   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     28   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     29   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     30   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     31   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     32   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     33   POSSIBILITY OF SUCH DAMAGE.
     34 
     35 ******************************************************************************/
     36 /*$FreeBSD: head/sys/dev/ixgbe/ixgbe_82599.c 331224 2018-03-19 20:55:05Z erj $*/
     37 
     38 #include <sys/cdefs.h>
     39 __KERNEL_RCSID(0, "$NetBSD: ixgbe_82599.c,v 1.27 2021/12/14 05:26:08 msaitoh Exp $");
     40 
     41 #include "ixgbe_type.h"
     42 #include "ixgbe_82599.h"
     43 #include "ixgbe_api.h"
     44 #include "ixgbe_common.h"
     45 #include "ixgbe_phy.h"
     46 
     47 #define IXGBE_82599_MAX_TX_QUEUES 128
     48 #define IXGBE_82599_MAX_RX_QUEUES 128
     49 #define IXGBE_82599_RAR_ENTRIES   128
     50 #define IXGBE_82599_MC_TBL_SIZE   128
     51 #define IXGBE_82599_VFT_TBL_SIZE  128
     52 #define IXGBE_82599_RX_PB_SIZE	  512
     53 
     54 static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
     55 					 ixgbe_link_speed speed,
     56 					 bool autoneg_wait_to_complete);
     57 static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw);
     58 static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
     59 				   u16 offset, u16 *data);
     60 static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
     61 					  u16 words, u16 *data);
     62 static s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw);
     63 static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
     64 					u8 dev_addr, u8 *data);
     65 static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
     66 					u8 dev_addr, u8 data);
     67 
     68 void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
     69 {
     70 	struct ixgbe_mac_info *mac = &hw->mac;
     71 
     72 	DEBUGFUNC("ixgbe_init_mac_link_ops_82599");
     73 
     74 	/*
     75 	 * enable the laser control functions for SFP+ fiber
     76 	 * and MNG not enabled
     77 	 */
     78 	if ((mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
     79 	    !ixgbe_mng_enabled(hw)) {
     80 		mac->ops.disable_tx_laser =
     81 				       ixgbe_disable_tx_laser_multispeed_fiber;
     82 		mac->ops.enable_tx_laser =
     83 					ixgbe_enable_tx_laser_multispeed_fiber;
     84 		mac->ops.flap_tx_laser = ixgbe_flap_tx_laser_multispeed_fiber;
     85 
     86 	} else {
     87 		mac->ops.disable_tx_laser = NULL;
     88 		mac->ops.enable_tx_laser = NULL;
     89 		mac->ops.flap_tx_laser = NULL;
     90 	}
     91 
     92 	if (hw->phy.multispeed_fiber) {
     93 		/* Set up dual speed SFP+ support */
     94 		mac->ops.setup_link = ixgbe_setup_mac_link_multispeed_fiber;
     95 		mac->ops.setup_mac_link = ixgbe_setup_mac_link_82599;
     96 		mac->ops.set_rate_select_speed =
     97 					       ixgbe_set_hard_rate_select_speed;
     98 		if (ixgbe_get_media_type(hw) == ixgbe_media_type_fiber_fixed)
     99 			mac->ops.set_rate_select_speed =
    100 					       ixgbe_set_soft_rate_select_speed;
    101 	} else {
    102 		if ((ixgbe_get_media_type(hw) == ixgbe_media_type_backplane) &&
    103 		     (hw->phy.smart_speed == ixgbe_smart_speed_auto ||
    104 		      hw->phy.smart_speed == ixgbe_smart_speed_on) &&
    105 		      !ixgbe_verify_lesm_fw_enabled_82599(hw)) {
    106 			mac->ops.setup_link = ixgbe_setup_mac_link_smartspeed;
    107 		} else {
    108 			mac->ops.setup_link = ixgbe_setup_mac_link_82599;
    109 		}
    110 	}
    111 }
    112 
    113 /**
    114  * ixgbe_init_phy_ops_82599 - PHY/SFP specific init
    115  * @hw: pointer to hardware structure
    116  *
    117  * Initialize any function pointers that were not able to be
    118  * set during init_shared_code because the PHY/SFP type was
    119  * not known.  Perform the SFP init if necessary.
    120  *
    121  **/
    122 s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
    123 {
    124 	struct ixgbe_mac_info *mac = &hw->mac;
    125 	struct ixgbe_phy_info *phy = &hw->phy;
    126 	s32 ret_val = IXGBE_SUCCESS;
    127 	u32 esdp;
    128 
    129 	DEBUGFUNC("ixgbe_init_phy_ops_82599");
    130 
    131 	if (hw->device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP) {
    132 		/* Store flag indicating I2C bus access control unit. */
    133 		hw->phy.qsfp_shared_i2c_bus = TRUE;
    134 
    135 		/* Initialize access to QSFP+ I2C bus */
    136 		esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
    137 		esdp |= IXGBE_ESDP_SDP0_DIR;
    138 		esdp &= ~IXGBE_ESDP_SDP1_DIR;
    139 		esdp &= ~IXGBE_ESDP_SDP0;
    140 		esdp &= ~IXGBE_ESDP_SDP0_NATIVE;
    141 		esdp &= ~IXGBE_ESDP_SDP1_NATIVE;
    142 		IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
    143 		IXGBE_WRITE_FLUSH(hw);
    144 
    145 		phy->ops.read_i2c_byte = ixgbe_read_i2c_byte_82599;
    146 		phy->ops.write_i2c_byte = ixgbe_write_i2c_byte_82599;
    147 	}
    148 	/* Identify the PHY or SFP module */
    149 	ret_val = phy->ops.identify(hw);
    150 	if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED)
    151 		goto init_phy_ops_out;
    152 
    153 	/* Setup function pointers based on detected SFP module and speeds */
    154 	ixgbe_init_mac_link_ops_82599(hw);
    155 	if (hw->phy.sfp_type != ixgbe_sfp_type_unknown)
    156 		hw->phy.ops.reset = NULL;
    157 
    158 	/* If copper media, overwrite with copper function pointers */
    159 	if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
    160 		mac->ops.setup_link = ixgbe_setup_copper_link_82599;
    161 		mac->ops.get_link_capabilities =
    162 				  ixgbe_get_copper_link_capabilities_generic;
    163 	}
    164 
    165 	/* Set necessary function pointers based on PHY type */
    166 	switch (hw->phy.type) {
    167 	case ixgbe_phy_tn:
    168 		phy->ops.setup_link = ixgbe_setup_phy_link_tnx;
    169 		phy->ops.check_link = ixgbe_check_phy_link_tnx;
    170 		phy->ops.get_firmware_version =
    171 			     ixgbe_get_phy_firmware_version_tnx;
    172 		break;
    173 	default:
    174 		break;
    175 	}
    176 init_phy_ops_out:
    177 	return ret_val;
    178 }
    179 
    180 s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
    181 {
    182 	s32 ret_val = IXGBE_SUCCESS;
    183 	u16 list_offset, data_offset, data_value;
    184 
    185 	DEBUGFUNC("ixgbe_setup_sfp_modules_82599");
    186 
    187 	if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) {
    188 		ixgbe_init_mac_link_ops_82599(hw);
    189 
    190 		hw->phy.ops.reset = NULL;
    191 
    192 		ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
    193 							      &data_offset);
    194 		if (ret_val != IXGBE_SUCCESS)
    195 			goto setup_sfp_out;
    196 
    197 		/* PHY config will finish before releasing the semaphore */
    198 		ret_val = hw->mac.ops.acquire_swfw_sync(hw,
    199 							IXGBE_GSSR_MAC_CSR_SM);
    200 		if (ret_val != IXGBE_SUCCESS) {
    201 			ret_val = IXGBE_ERR_SWFW_SYNC;
    202 			goto setup_sfp_out;
    203 		}
    204 
    205 		if (hw->eeprom.ops.read(hw, ++data_offset, &data_value))
    206 			goto setup_sfp_err;
    207 		while (data_value != 0xffff) {
    208 			IXGBE_WRITE_REG(hw, IXGBE_CORECTL, data_value);
    209 			IXGBE_WRITE_FLUSH(hw);
    210 			if (hw->eeprom.ops.read(hw, ++data_offset, &data_value))
    211 				goto setup_sfp_err;
    212 		}
    213 
    214 		/* Release the semaphore */
    215 		hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
    216 		/* Delay obtaining semaphore again to allow FW access
    217 		 * prot_autoc_write uses the semaphore too.
    218 		 */
    219 		msec_delay(hw->eeprom.semaphore_delay);
    220 
    221 		/* Restart DSP and set SFI mode */
    222 		ret_val = hw->mac.ops.prot_autoc_write(hw,
    223 			hw->mac.orig_autoc | IXGBE_AUTOC_LMS_10G_SERIAL,
    224 			FALSE);
    225 
    226 		if (ret_val) {
    227 			DEBUGOUT("sfp module setup not complete\n");
    228 			ret_val = IXGBE_ERR_SFP_SETUP_NOT_COMPLETE;
    229 			goto setup_sfp_out;
    230 		}
    231 
    232 	}
    233 
    234 setup_sfp_out:
    235 	return ret_val;
    236 
    237 setup_sfp_err:
    238 	/* Release the semaphore */
    239 	hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
    240 	/* Delay obtaining semaphore again to allow FW access */
    241 	msec_delay(hw->eeprom.semaphore_delay);
    242 	ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
    243 		      "eeprom read at offset %d failed", data_offset);
    244 	return IXGBE_ERR_PHY;
    245 }
    246 
    247 /**
    248  * prot_autoc_read_82599 - Hides MAC differences needed for AUTOC read
    249  * @hw: pointer to hardware structure
    250  * @locked: Return the if we locked for this read.
    251  * @reg_val: Value we read from AUTOC
    252  *
    253  * For this part (82599) we need to wrap read-modify-writes with a possible
    254  * FW/SW lock.  It is assumed this lock will be freed with the next
    255  * prot_autoc_write_82599().
    256  */
    257 s32 prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked, u32 *reg_val)
    258 {
    259 	s32 ret_val;
    260 
    261 	*locked = FALSE;
    262 	 /* If LESM is on then we need to hold the SW/FW semaphore. */
    263 	if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
    264 		ret_val = hw->mac.ops.acquire_swfw_sync(hw,
    265 					IXGBE_GSSR_MAC_CSR_SM);
    266 		if (ret_val != IXGBE_SUCCESS)
    267 			return IXGBE_ERR_SWFW_SYNC;
    268 
    269 		*locked = TRUE;
    270 	}
    271 
    272 	*reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC);
    273 	return IXGBE_SUCCESS;
    274 }
    275 
    276 /**
    277  * prot_autoc_write_82599 - Hides MAC differences needed for AUTOC write
    278  * @hw: pointer to hardware structure
    279  * @autoc: value to write to AUTOC
    280  * @locked: bool to indicate whether the SW/FW lock was already taken by
    281  *          previous proc_autoc_read_82599.
    282  *
    283  * This part (82599) may need to hold the SW/FW lock around all writes to
    284  * AUTOC. Likewise after a write we need to do a pipeline reset.
    285  */
    286 s32 prot_autoc_write_82599(struct ixgbe_hw *hw, u32 autoc, bool locked)
    287 {
    288 	s32 ret_val = IXGBE_SUCCESS;
    289 
    290 	/* Blocked by MNG FW so bail */
    291 	if (ixgbe_check_reset_blocked(hw))
    292 		goto out;
    293 
    294 	/* We only need to get the lock if:
    295 	 *  - We didn't do it already (in the read part of a read-modify-write)
    296 	 *  - LESM is enabled.
    297 	 */
    298 	if (!locked && ixgbe_verify_lesm_fw_enabled_82599(hw)) {
    299 		ret_val = hw->mac.ops.acquire_swfw_sync(hw,
    300 					IXGBE_GSSR_MAC_CSR_SM);
    301 		if (ret_val != IXGBE_SUCCESS)
    302 			return IXGBE_ERR_SWFW_SYNC;
    303 
    304 		locked = TRUE;
    305 	}
    306 
    307 	IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
    308 	ret_val = ixgbe_reset_pipeline_82599(hw);
    309 
    310 out:
    311 	/* Free the SW/FW semaphore as we either grabbed it here or
    312 	 * already had it when this function was called.
    313 	 */
    314 	if (locked)
    315 		hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
    316 
    317 	return ret_val;
    318 }
    319 
    320 /**
    321  * ixgbe_init_ops_82599 - Inits func ptrs and MAC type
    322  * @hw: pointer to hardware structure
    323  *
    324  * Initialize the function pointers and assign the MAC type for 82599.
    325  * Does not touch the hardware.
    326  **/
    327 
    328 s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw)
    329 {
    330 	struct ixgbe_mac_info *mac = &hw->mac;
    331 	struct ixgbe_phy_info *phy = &hw->phy;
    332 	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
    333 	s32 ret_val;
    334 
    335 	DEBUGFUNC("ixgbe_init_ops_82599");
    336 
    337 	ixgbe_init_phy_ops_generic(hw);
    338 	ret_val = ixgbe_init_ops_generic(hw);
    339 
    340 	/* PHY */
    341 	phy->ops.identify = ixgbe_identify_phy_82599;
    342 	phy->ops.init = ixgbe_init_phy_ops_82599;
    343 
    344 	/* MAC */
    345 	mac->ops.reset_hw = ixgbe_reset_hw_82599;
    346 	mac->ops.enable_relaxed_ordering = ixgbe_enable_relaxed_ordering_gen2;
    347 	mac->ops.get_media_type = ixgbe_get_media_type_82599;
    348 	mac->ops.get_supported_physical_layer =
    349 				    ixgbe_get_supported_physical_layer_82599;
    350 	mac->ops.disable_sec_rx_path = ixgbe_disable_sec_rx_path_generic;
    351 	mac->ops.enable_sec_rx_path = ixgbe_enable_sec_rx_path_generic;
    352 	mac->ops.enable_rx_dma = ixgbe_enable_rx_dma_82599;
    353 	mac->ops.read_analog_reg8 = ixgbe_read_analog_reg8_82599;
    354 	mac->ops.write_analog_reg8 = ixgbe_write_analog_reg8_82599;
    355 	mac->ops.start_hw = ixgbe_start_hw_82599;
    356 	mac->ops.get_san_mac_addr = ixgbe_get_san_mac_addr_generic;
    357 	mac->ops.set_san_mac_addr = ixgbe_set_san_mac_addr_generic;
    358 	mac->ops.get_device_caps = ixgbe_get_device_caps_generic;
    359 	mac->ops.get_wwn_prefix = ixgbe_get_wwn_prefix_generic;
    360 	mac->ops.get_fcoe_boot_status = ixgbe_get_fcoe_boot_status_generic;
    361 	mac->ops.prot_autoc_read = prot_autoc_read_82599;
    362 	mac->ops.prot_autoc_write = prot_autoc_write_82599;
    363 
    364 	/* RAR, Multicast, VLAN */
    365 	mac->ops.set_vmdq = ixgbe_set_vmdq_generic;
    366 	mac->ops.set_vmdq_san_mac = ixgbe_set_vmdq_san_mac_generic;
    367 	mac->ops.clear_vmdq = ixgbe_clear_vmdq_generic;
    368 	mac->ops.insert_mac_addr = ixgbe_insert_mac_addr_generic;
    369 	mac->rar_highwater = 1;
    370 	mac->ops.set_vfta = ixgbe_set_vfta_generic;
    371 	mac->ops.set_vlvf = ixgbe_set_vlvf_generic;
    372 	mac->ops.clear_vfta = ixgbe_clear_vfta_generic;
    373 	mac->ops.init_uta_tables = ixgbe_init_uta_tables_generic;
    374 	mac->ops.setup_sfp = ixgbe_setup_sfp_modules_82599;
    375 	mac->ops.set_mac_anti_spoofing = ixgbe_set_mac_anti_spoofing;
    376 	mac->ops.set_vlan_anti_spoofing = ixgbe_set_vlan_anti_spoofing;
    377 
    378 	/* Link */
    379 	mac->ops.get_link_capabilities = ixgbe_get_link_capabilities_82599;
    380 	mac->ops.check_link = ixgbe_check_mac_link_generic;
    381 	mac->ops.setup_rxpba = ixgbe_set_rxpba_generic;
    382 	ixgbe_init_mac_link_ops_82599(hw);
    383 
    384 	mac->mcft_size		= IXGBE_82599_MC_TBL_SIZE;
    385 	mac->vft_size		= IXGBE_82599_VFT_TBL_SIZE;
    386 	mac->num_rar_entries	= IXGBE_82599_RAR_ENTRIES;
    387 	mac->rx_pb_size		= IXGBE_82599_RX_PB_SIZE;
    388 	mac->max_rx_queues	= IXGBE_82599_MAX_RX_QUEUES;
    389 	mac->max_tx_queues	= IXGBE_82599_MAX_TX_QUEUES;
    390 	mac->max_msix_vectors	= ixgbe_get_pcie_msix_count_generic(hw);
    391 
    392 	mac->arc_subsystem_valid = !!(IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw))
    393 				      & IXGBE_FWSM_MODE_MASK);
    394 
    395 	hw->mbx.ops.init_params = ixgbe_init_mbx_params_pf;
    396 
    397 	/* EEPROM */
    398 	eeprom->ops.read = ixgbe_read_eeprom_82599;
    399 	eeprom->ops.read_buffer = ixgbe_read_eeprom_buffer_82599;
    400 
    401 	/* Manageability interface */
    402 	mac->ops.set_fw_drv_ver = ixgbe_set_fw_drv_ver_generic;
    403 
    404 	mac->ops.bypass_rw = ixgbe_bypass_rw_generic;
    405 	mac->ops.bypass_valid_rd = ixgbe_bypass_valid_rd_generic;
    406 	mac->ops.bypass_set = ixgbe_bypass_set_generic;
    407 	mac->ops.bypass_rd_eep = ixgbe_bypass_rd_eep_generic;
    408 
    409 	mac->ops.get_rtrup2tc = ixgbe_dcb_get_rtrup2tc_generic;
    410 
    411 	return ret_val;
    412 }
    413 
    414 /**
    415  * ixgbe_get_link_capabilities_82599 - Determines link capabilities
    416  * @hw: pointer to hardware structure
    417  * @speed: pointer to link speed
    418  * @autoneg: TRUE when autoneg or autotry is enabled
    419  *
    420  * Determines the link capabilities by reading the AUTOC register.
    421  **/
    422 s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
    423 				      ixgbe_link_speed *speed,
    424 				      bool *autoneg)
    425 {
    426 	s32 status = IXGBE_SUCCESS;
    427 	u32 autoc = 0;
    428 
    429 	DEBUGFUNC("ixgbe_get_link_capabilities_82599");
    430 
    431 
    432 	/* Check if 1G SFP module. */
    433 	if (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
    434 	    hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
    435 	    hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
    436 	    hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
    437 	    hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
    438 	    hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) {
    439 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
    440 		*autoneg = TRUE;
    441 		goto out;
    442 	}
    443 
    444 	/*
    445 	 * Determine link capabilities based on the stored value of AUTOC,
    446 	 * which represents EEPROM defaults.  If AUTOC value has not
    447 	 * been stored, use the current register values.
    448 	 */
    449 	if (hw->mac.orig_link_settings_stored)
    450 		autoc = hw->mac.orig_autoc;
    451 	else
    452 		autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
    453 
    454 	switch (autoc & IXGBE_AUTOC_LMS_MASK) {
    455 	case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
    456 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
    457 		*autoneg = FALSE;
    458 		break;
    459 
    460 	case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
    461 		*speed = IXGBE_LINK_SPEED_10GB_FULL;
    462 		*autoneg = FALSE;
    463 		break;
    464 
    465 	case IXGBE_AUTOC_LMS_1G_AN:
    466 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
    467 		*autoneg = TRUE;
    468 		break;
    469 
    470 	case IXGBE_AUTOC_LMS_10G_SERIAL:
    471 		*speed = IXGBE_LINK_SPEED_10GB_FULL;
    472 		*autoneg = FALSE;
    473 		break;
    474 
    475 	case IXGBE_AUTOC_LMS_KX4_KX_KR:
    476 	case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN:
    477 		*speed = IXGBE_LINK_SPEED_UNKNOWN;
    478 		if (autoc & IXGBE_AUTOC_KR_SUPP)
    479 			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
    480 		if (autoc & IXGBE_AUTOC_KX4_SUPP)
    481 			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
    482 		if (autoc & IXGBE_AUTOC_KX_SUPP)
    483 			*speed |= IXGBE_LINK_SPEED_1GB_FULL;
    484 		*autoneg = TRUE;
    485 		break;
    486 
    487 	case IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII:
    488 		*speed = IXGBE_LINK_SPEED_100_FULL;
    489 		if (autoc & IXGBE_AUTOC_KR_SUPP)
    490 			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
    491 		if (autoc & IXGBE_AUTOC_KX4_SUPP)
    492 			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
    493 		if (autoc & IXGBE_AUTOC_KX_SUPP)
    494 			*speed |= IXGBE_LINK_SPEED_1GB_FULL;
    495 		*autoneg = TRUE;
    496 		break;
    497 
    498 	case IXGBE_AUTOC_LMS_SGMII_1G_100M:
    499 		*speed = IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_100_FULL;
    500 		*autoneg = FALSE;
    501 		break;
    502 
    503 	default:
    504 		status = IXGBE_ERR_LINK_SETUP;
    505 		goto out;
    506 		break;
    507 	}
    508 
    509 	if (hw->phy.multispeed_fiber) {
    510 		*speed |= IXGBE_LINK_SPEED_10GB_FULL |
    511 			  IXGBE_LINK_SPEED_1GB_FULL;
    512 
    513 		/* QSFP must not enable full auto-negotiation
    514 		 * Limited autoneg is enabled at 1G
    515 		 */
    516 		if (hw->phy.media_type == ixgbe_media_type_fiber_qsfp)
    517 			*autoneg = FALSE;
    518 		else
    519 			*autoneg = TRUE;
    520 	}
    521 
    522 out:
    523 	return status;
    524 }
    525 
    526 /**
    527  * ixgbe_get_media_type_82599 - Get media type
    528  * @hw: pointer to hardware structure
    529  *
    530  * Returns the media type (fiber, copper, backplane)
    531  **/
    532 enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
    533 {
    534 	enum ixgbe_media_type media_type;
    535 
    536 	DEBUGFUNC("ixgbe_get_media_type_82599");
    537 
    538 	/* Detect if there is a copper PHY attached. */
    539 	switch (hw->phy.type) {
    540 	case ixgbe_phy_cu_unknown:
    541 	case ixgbe_phy_tn:
    542 		media_type = ixgbe_media_type_copper;
    543 		goto out;
    544 	default:
    545 		break;
    546 	}
    547 
    548 	switch (hw->device_id) {
    549 	case IXGBE_DEV_ID_82599_KX4:
    550 	case IXGBE_DEV_ID_82599_KX4_MEZZ:
    551 	case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
    552 	case IXGBE_DEV_ID_82599_KR:
    553 	case IXGBE_DEV_ID_82599_BACKPLANE_FCOE:
    554 	case IXGBE_DEV_ID_82599_XAUI_LOM:
    555 		/* Default device ID is mezzanine card KX/KX4 */
    556 		media_type = ixgbe_media_type_backplane;
    557 		break;
    558 	case IXGBE_DEV_ID_82599_SFP:
    559 	case IXGBE_DEV_ID_82599_SFP_FCOE:
    560 	case IXGBE_DEV_ID_82599_SFP_EM:
    561 	case IXGBE_DEV_ID_82599_SFP_SF2:
    562 	case IXGBE_DEV_ID_82599_SFP_SF_QP:
    563 	case IXGBE_DEV_ID_82599EN_SFP:
    564 		media_type = ixgbe_media_type_fiber;
    565 		break;
    566 	case IXGBE_DEV_ID_82599_CX4:
    567 		media_type = ixgbe_media_type_cx4;
    568 		break;
    569 	case IXGBE_DEV_ID_82599_T3_LOM:
    570 		media_type = ixgbe_media_type_copper;
    571 		break;
    572 	case IXGBE_DEV_ID_82599_QSFP_SF_QP:
    573 		media_type = ixgbe_media_type_fiber_qsfp;
    574 		break;
    575 	case IXGBE_DEV_ID_82599_BYPASS:
    576 		media_type = ixgbe_media_type_fiber_fixed;
    577 		hw->phy.multispeed_fiber = TRUE;
    578 		break;
    579 	default:
    580 		media_type = ixgbe_media_type_unknown;
    581 		break;
    582 	}
    583 out:
    584 	return media_type;
    585 }
    586 
    587 /**
    588  * ixgbe_stop_mac_link_on_d3_82599 - Disables link on D3
    589  * @hw: pointer to hardware structure
    590  *
    591  * Disables link during D3 power down sequence.
    592  *
    593  **/
    594 void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw)
    595 {
    596 	u32 autoc2_reg;
    597 	u16 ee_ctrl_2 = 0;
    598 
    599 	DEBUGFUNC("ixgbe_stop_mac_link_on_d3_82599");
    600 	ixgbe_read_eeprom(hw, IXGBE_EEPROM_CTRL_2, &ee_ctrl_2);
    601 
    602 	if (!ixgbe_mng_present(hw) && !hw->wol_enabled &&
    603 	    ee_ctrl_2 & IXGBE_EEPROM_CCD_BIT) {
    604 		autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
    605 		autoc2_reg |= IXGBE_AUTOC2_LINK_DISABLE_ON_D3_MASK;
    606 		IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2_reg);
    607 	}
    608 }
    609 
    610 /**
    611  * ixgbe_start_mac_link_82599 - Setup MAC link settings
    612  * @hw: pointer to hardware structure
    613  * @autoneg_wait_to_complete: TRUE when waiting for completion is needed
    614  *
    615  * Configures link settings based on values in the ixgbe_hw struct.
    616  * Restarts the link.  Performs autonegotiation if needed.
    617  **/
    618 s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
    619 			       bool autoneg_wait_to_complete)
    620 {
    621 	u32 autoc_reg;
    622 	u32 links_reg;
    623 	u32 i;
    624 	s32 status = IXGBE_SUCCESS;
    625 	bool got_lock = FALSE;
    626 
    627 	DEBUGFUNC("ixgbe_start_mac_link_82599");
    628 
    629 
    630 	/*  reset_pipeline requires us to hold this lock as it writes to
    631 	 *  AUTOC.
    632 	 */
    633 	if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
    634 		status = hw->mac.ops.acquire_swfw_sync(hw,
    635 						       IXGBE_GSSR_MAC_CSR_SM);
    636 		if (status != IXGBE_SUCCESS)
    637 			goto out;
    638 
    639 		got_lock = TRUE;
    640 	}
    641 
    642 	/* Restart link */
    643 	ixgbe_reset_pipeline_82599(hw);
    644 
    645 	if (got_lock)
    646 		hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
    647 
    648 	/* Only poll for autoneg to complete if specified to do so */
    649 	if (autoneg_wait_to_complete) {
    650 		autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
    651 		if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
    652 		     IXGBE_AUTOC_LMS_KX4_KX_KR ||
    653 		    (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
    654 		     IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
    655 		    (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
    656 		     IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
    657 			links_reg = 0; /* Just in case Autoneg time = 0 */
    658 			for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
    659 				links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
    660 				if (links_reg & IXGBE_LINKS_KX_AN_COMP)
    661 					break;
    662 				msec_delay(100);
    663 			}
    664 			if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
    665 				status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
    666 				DEBUGOUT("Autoneg did not complete.\n");
    667 			}
    668 		}
    669 	}
    670 
    671 	/* Add delay to filter out noises during initial link setup */
    672 	msec_delay(50);
    673 
    674 out:
    675 	return status;
    676 }
    677 
    678 /**
    679  * ixgbe_disable_tx_laser_multispeed_fiber - Disable Tx laser
    680  * @hw: pointer to hardware structure
    681  *
    682  * The base drivers may require better control over SFP+ module
    683  * PHY states.  This includes selectively shutting down the Tx
    684  * laser on the PHY, effectively halting physical link.
    685  **/
    686 void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
    687 {
    688 	u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
    689 
    690 	/* Blocked by MNG FW so bail */
    691 	if (ixgbe_check_reset_blocked(hw))
    692 		return;
    693 
    694 	/* Disable Tx laser; allow 100us to go dark per spec */
    695 	esdp_reg |= IXGBE_ESDP_SDP3;
    696 	IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
    697 	IXGBE_WRITE_FLUSH(hw);
    698 	usec_delay(100);
    699 }
    700 
    701 /**
    702  * ixgbe_enable_tx_laser_multispeed_fiber - Enable Tx laser
    703  * @hw: pointer to hardware structure
    704  *
    705  * The base drivers may require better control over SFP+ module
    706  * PHY states.  This includes selectively turning on the Tx
    707  * laser on the PHY, effectively starting physical link.
    708  **/
    709 void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
    710 {
    711 	u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
    712 
    713 	/* Enable Tx laser; allow 100ms to light up */
    714 	esdp_reg &= ~IXGBE_ESDP_SDP3;
    715 	IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
    716 	IXGBE_WRITE_FLUSH(hw);
    717 	msec_delay(100);
    718 }
    719 
    720 /**
    721  * ixgbe_flap_tx_laser_multispeed_fiber - Flap Tx laser
    722  * @hw: pointer to hardware structure
    723  *
    724  * When the driver changes the link speeds that it can support,
    725  * it sets autotry_restart to TRUE to indicate that we need to
    726  * initiate a new autotry session with the link partner.  To do
    727  * so, we set the speed then disable and re-enable the Tx laser, to
    728  * alert the link partner that it also needs to restart autotry on its
    729  * end.  This is consistent with TRUE clause 37 autoneg, which also
    730  * involves a loss of signal.
    731  **/
    732 void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
    733 {
    734 	DEBUGFUNC("ixgbe_flap_tx_laser_multispeed_fiber");
    735 
    736 	/* Blocked by MNG FW so bail */
    737 	if (ixgbe_check_reset_blocked(hw))
    738 		return;
    739 
    740 	if (hw->mac.autotry_restart) {
    741 		ixgbe_disable_tx_laser_multispeed_fiber(hw);
    742 		ixgbe_enable_tx_laser_multispeed_fiber(hw);
    743 		hw->mac.autotry_restart = FALSE;
    744 	}
    745 }
    746 
    747 /**
    748  * ixgbe_set_hard_rate_select_speed - Set module link speed
    749  * @hw: pointer to hardware structure
    750  * @speed: link speed to set
    751  *
    752  * Set module link speed via RS0/RS1 rate select pins.
    753  */
    754 void ixgbe_set_hard_rate_select_speed(struct ixgbe_hw *hw,
    755 					ixgbe_link_speed speed)
    756 {
    757 	u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
    758 
    759 	switch (speed) {
    760 	case IXGBE_LINK_SPEED_10GB_FULL:
    761 		esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5);
    762 		break;
    763 	case IXGBE_LINK_SPEED_1GB_FULL:
    764 		esdp_reg &= ~IXGBE_ESDP_SDP5;
    765 		esdp_reg |= IXGBE_ESDP_SDP5_DIR;
    766 		break;
    767 	default:
    768 		DEBUGOUT("Invalid fixed module speed\n");
    769 		return;
    770 	}
    771 
    772 	IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
    773 	IXGBE_WRITE_FLUSH(hw);
    774 }
    775 
    776 /**
    777  * ixgbe_setup_mac_link_smartspeed - Set MAC link speed using SmartSpeed
    778  * @hw: pointer to hardware structure
    779  * @speed: new link speed
    780  * @autoneg_wait_to_complete: TRUE when waiting for completion is needed
    781  *
    782  * Implements the Intel SmartSpeed algorithm.
    783  **/
    784 s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
    785 				    ixgbe_link_speed speed,
    786 				    bool autoneg_wait_to_complete)
    787 {
    788 	s32 status = IXGBE_SUCCESS;
    789 	ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
    790 	s32 i, j;
    791 	bool link_up = FALSE;
    792 	u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
    793 
    794 	DEBUGFUNC("ixgbe_setup_mac_link_smartspeed");
    795 
    796 	 /* Set autoneg_advertised value based on input link speed */
    797 	hw->phy.autoneg_advertised = 0;
    798 
    799 	if (speed & IXGBE_LINK_SPEED_10GB_FULL)
    800 		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
    801 
    802 	if (speed & IXGBE_LINK_SPEED_1GB_FULL)
    803 		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
    804 
    805 	if (speed & IXGBE_LINK_SPEED_100_FULL)
    806 		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL;
    807 
    808 	/*
    809 	 * Implement Intel SmartSpeed algorithm.  SmartSpeed will reduce the
    810 	 * autoneg advertisement if link is unable to be established at the
    811 	 * highest negotiated rate.  This can sometimes happen due to integrity
    812 	 * issues with the physical media connection.
    813 	 */
    814 
    815 	/* First, try to get link with full advertisement */
    816 	hw->phy.smart_speed_active = FALSE;
    817 	for (j = 0; j < IXGBE_SMARTSPEED_MAX_RETRIES; j++) {
    818 		status = ixgbe_setup_mac_link_82599(hw, speed,
    819 						    autoneg_wait_to_complete);
    820 		if (status != IXGBE_SUCCESS)
    821 			goto out;
    822 
    823 		/*
    824 		 * Wait for the controller to acquire link.  Per IEEE 802.3ap,
    825 		 * Section 73.10.2, we may have to wait up to 500ms if KR is
    826 		 * attempted, or 200ms if KX/KX4/BX/BX4 is attempted, per
    827 		 * Table 9 in the AN MAS.
    828 		 */
    829 		for (i = 0; i < 5; i++) {
    830 			msec_delay(100);
    831 
    832 			/* If we have link, just jump out */
    833 			status = ixgbe_check_link(hw, &link_speed, &link_up,
    834 						  FALSE);
    835 			if (status != IXGBE_SUCCESS)
    836 				goto out;
    837 
    838 			if (link_up)
    839 				goto out;
    840 		}
    841 	}
    842 
    843 	/*
    844 	 * We didn't get link.  If we advertised KR plus one of KX4/KX
    845 	 * (or BX4/BX), then disable KR and try again.
    846 	 */
    847 	if (((autoc_reg & IXGBE_AUTOC_KR_SUPP) == 0) ||
    848 	    ((autoc_reg & IXGBE_AUTOC_KX4_KX_SUPP_MASK) == 0))
    849 		goto out;
    850 
    851 	/* Turn SmartSpeed on to disable KR support */
    852 	hw->phy.smart_speed_active = TRUE;
    853 	status = ixgbe_setup_mac_link_82599(hw, speed,
    854 					    autoneg_wait_to_complete);
    855 	if (status != IXGBE_SUCCESS)
    856 		goto out;
    857 
    858 	/*
    859 	 * Wait for the controller to acquire link.  600ms will allow for
    860 	 * the AN link_fail_inhibit_timer as well for multiple cycles of
    861 	 * parallel detect, both 10g and 1g. This allows for the maximum
    862 	 * connect attempts as defined in the AN MAS table 73-7.
    863 	 */
    864 	for (i = 0; i < 6; i++) {
    865 		msec_delay(100);
    866 
    867 		/* If we have link, just jump out */
    868 		status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
    869 		if (status != IXGBE_SUCCESS)
    870 			goto out;
    871 
    872 		if (link_up)
    873 			goto out;
    874 	}
    875 
    876 	/* We didn't get link.  Turn SmartSpeed back off. */
    877 	hw->phy.smart_speed_active = FALSE;
    878 	status = ixgbe_setup_mac_link_82599(hw, speed,
    879 					    autoneg_wait_to_complete);
    880 
    881 out:
    882 	if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL))
    883 		DEBUGOUT("Smartspeed has downgraded the link speed "
    884 		"from the maximum advertised\n");
    885 	return status;
    886 }
    887 
    888 /**
    889  * ixgbe_setup_mac_link_82599 - Set MAC link speed
    890  * @hw: pointer to hardware structure
    891  * @speed: new link speed
    892  * @autoneg_wait_to_complete: TRUE when waiting for completion is needed
    893  *
    894  * Set the link speed in the AUTOC register and restarts link.
    895  **/
    896 s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
    897 			       ixgbe_link_speed speed,
    898 			       bool autoneg_wait_to_complete)
    899 {
    900 	bool autoneg = FALSE;
    901 	s32 status = IXGBE_SUCCESS;
    902 	u32 pma_pmd_1g, link_mode;
    903 	u32 current_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); /* holds the value of AUTOC register at this current point in time */
    904 	u32 orig_autoc = 0; /* holds the cached value of AUTOC register */
    905 	u32 autoc = current_autoc; /* Temporary variable used for comparison purposes */
    906 	u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
    907 	u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
    908 	u32 links_reg;
    909 	u32 i;
    910 	ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
    911 
    912 	DEBUGFUNC("ixgbe_setup_mac_link_82599");
    913 
    914 	/* Check to see if speed passed in is supported. */
    915 	status = ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg);
    916 	if (status)
    917 		goto out;
    918 
    919 	speed &= link_capabilities;
    920 
    921 	if (speed == 0) {
    922 		ixgbe_disable_tx_laser(hw); /* For fiber */
    923 		ixgbe_set_phy_power(hw, false); /* For copper */
    924 	} else {
    925 		/* In case previous media setting was none(down) */
    926 		ixgbe_enable_tx_laser(hw); /* for Fiber */
    927 		ixgbe_set_phy_power(hw, true); /* For copper */
    928 	}
    929 
    930 	/* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/
    931 	if (hw->mac.orig_link_settings_stored)
    932 		orig_autoc = hw->mac.orig_autoc;
    933 	else
    934 		orig_autoc = autoc;
    935 
    936 	link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
    937 	pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
    938 
    939 	if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
    940 	    link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
    941 	    link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
    942 		/* Set KX4/KX/KR support according to speed requested */
    943 		autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP);
    944 		if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
    945 			if (orig_autoc & IXGBE_AUTOC_KX4_SUPP)
    946 				autoc |= IXGBE_AUTOC_KX4_SUPP;
    947 			if ((orig_autoc & IXGBE_AUTOC_KR_SUPP) &&
    948 			    (hw->phy.smart_speed_active == FALSE))
    949 				autoc |= IXGBE_AUTOC_KR_SUPP;
    950 		}
    951 		if (speed & IXGBE_LINK_SPEED_1GB_FULL)
    952 			autoc |= IXGBE_AUTOC_KX_SUPP;
    953 	} else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) &&
    954 		   (link_mode == IXGBE_AUTOC_LMS_1G_LINK_NO_AN ||
    955 		    link_mode == IXGBE_AUTOC_LMS_1G_AN)) {
    956 		/* Switch from 1G SFI to 10G SFI if requested */
    957 		if ((speed == IXGBE_LINK_SPEED_10GB_FULL) &&
    958 		    (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)) {
    959 			autoc &= ~IXGBE_AUTOC_LMS_MASK;
    960 			autoc |= IXGBE_AUTOC_LMS_10G_SERIAL;
    961 		}
    962 	} else if ((pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) &&
    963 		   (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) {
    964 		/* Switch from 10G SFI to 1G SFI if requested */
    965 		if ((speed == IXGBE_LINK_SPEED_1GB_FULL) &&
    966 		    (pma_pmd_1g == IXGBE_AUTOC_1G_SFI)) {
    967 			autoc &= ~IXGBE_AUTOC_LMS_MASK;
    968 			if (autoneg || hw->phy.type == ixgbe_phy_qsfp_intel)
    969 				autoc |= IXGBE_AUTOC_LMS_1G_AN;
    970 			else
    971 				autoc |= IXGBE_AUTOC_LMS_1G_LINK_NO_AN;
    972 		}
    973 	}
    974 
    975 	if (autoc != current_autoc) {
    976 		/* Restart link */
    977 		status = hw->mac.ops.prot_autoc_write(hw, autoc, FALSE);
    978 		if (status != IXGBE_SUCCESS)
    979 			goto out;
    980 
    981 		/* Only poll for autoneg to complete if specified to do so */
    982 		if (autoneg_wait_to_complete) {
    983 			if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
    984 			    link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
    985 			    link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
    986 				links_reg = 0; /*Just in case Autoneg time=0*/
    987 				for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
    988 					links_reg =
    989 					       IXGBE_READ_REG(hw, IXGBE_LINKS);
    990 					if (links_reg & IXGBE_LINKS_KX_AN_COMP)
    991 						break;
    992 					msec_delay(100);
    993 				}
    994 				if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
    995 					status =
    996 						IXGBE_ERR_AUTONEG_NOT_COMPLETE;
    997 					DEBUGOUT("Autoneg did not complete.\n");
    998 				}
    999 			}
   1000 		}
   1001 
   1002 		/* Add delay to filter out noises during initial link setup */
   1003 		msec_delay(50);
   1004 	}
   1005 
   1006 out:
   1007 	return status;
   1008 }
   1009 
   1010 /**
   1011  * ixgbe_setup_copper_link_82599 - Set the PHY autoneg advertised field
   1012  * @hw: pointer to hardware structure
   1013  * @speed: new link speed
   1014  * @autoneg_wait_to_complete: TRUE if waiting is needed to complete
   1015  *
   1016  * Restarts link on PHY and MAC based on settings passed in.
   1017  **/
   1018 static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
   1019 					 ixgbe_link_speed speed,
   1020 					 bool autoneg_wait_to_complete)
   1021 {
   1022 	s32 status;
   1023 
   1024 	DEBUGFUNC("ixgbe_setup_copper_link_82599");
   1025 
   1026 	/* Setup the PHY according to input speed */
   1027 	status = hw->phy.ops.setup_link_speed(hw, speed,
   1028 					      autoneg_wait_to_complete);
   1029 	/* Set up MAC */
   1030 	ixgbe_start_mac_link_82599(hw, autoneg_wait_to_complete);
   1031 
   1032 	return status;
   1033 }
   1034 
   1035 /**
   1036  * ixgbe_reset_hw_82599 - Perform hardware reset
   1037  * @hw: pointer to hardware structure
   1038  *
   1039  * Resets the hardware by resetting the transmit and receive units, masks
   1040  * and clears all interrupts, perform a PHY reset, and perform a link (MAC)
   1041  * reset.
   1042  **/
   1043 s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
   1044 {
   1045 	ixgbe_link_speed link_speed;
   1046 	s32 status;
   1047 	s32 phy_status = IXGBE_SUCCESS;
   1048 	u32 ctrl = 0;
   1049 	u32 i, autoc, autoc2;
   1050 	u32 curr_lms;
   1051 	bool link_up = FALSE;
   1052 
   1053 	DEBUGFUNC("ixgbe_reset_hw_82599");
   1054 
   1055 	/* Call adapter stop to disable tx/rx and clear interrupts */
   1056 	status = hw->mac.ops.stop_adapter(hw);
   1057 	if (status != IXGBE_SUCCESS)
   1058 		goto reset_hw_out;
   1059 
   1060 	/* flush pending Tx transactions */
   1061 	ixgbe_clear_tx_pending(hw);
   1062 
   1063 	/* PHY ops must be identified and initialized prior to reset */
   1064 
   1065 	/* Identify PHY and related function pointers */
   1066 	phy_status = hw->phy.ops.init(hw);
   1067 
   1068 	if (phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED)
   1069 		goto mac_reset_top;
   1070 
   1071 	/* Setup SFP module if there is one present. */
   1072 	if (hw->phy.sfp_setup_needed) {
   1073 		phy_status = hw->mac.ops.setup_sfp(hw);
   1074 		hw->phy.sfp_setup_needed = FALSE;
   1075 	}
   1076 
   1077 	if (phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED)
   1078 		goto mac_reset_top;
   1079 
   1080 	/* Reset PHY */
   1081 	if (hw->phy.reset_disable == FALSE && hw->phy.ops.reset != NULL)
   1082 		hw->phy.ops.reset(hw);
   1083 
   1084 mac_reset_top:
   1085 	/* remember AUTOC from before we reset */
   1086 	curr_lms = IXGBE_READ_REG(hw, IXGBE_AUTOC) & IXGBE_AUTOC_LMS_MASK;
   1087 
   1088 mac_reset_retry:
   1089 	/*
   1090 	 * Issue global reset to the MAC.  Needs to be SW reset if link is up.
   1091 	 * If link reset is used when link is up, it might reset the PHY when
   1092 	 * mng is using it.  If link is down or the flag to force full link
   1093 	 * reset is set, then perform link reset.
   1094 	 */
   1095 	ctrl = IXGBE_CTRL_LNK_RST;
   1096 	if (!hw->force_full_reset) {
   1097 		hw->mac.ops.check_link(hw, &link_speed, &link_up, FALSE);
   1098 		if (link_up)
   1099 			ctrl = IXGBE_CTRL_RST;
   1100 	}
   1101 
   1102 	ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
   1103 	IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
   1104 	IXGBE_WRITE_FLUSH(hw);
   1105 
   1106 	/* Poll for reset bit to self-clear meaning reset is complete */
   1107 	for (i = 0; i < 10; i++) {
   1108 		usec_delay(1);
   1109 		ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
   1110 		if (!(ctrl & IXGBE_CTRL_RST_MASK))
   1111 			break;
   1112 	}
   1113 
   1114 	if (ctrl & IXGBE_CTRL_RST_MASK) {
   1115 		status = IXGBE_ERR_RESET_FAILED;
   1116 		DEBUGOUT("Reset polling failed to complete.\n");
   1117 	}
   1118 
   1119 	msec_delay(50);
   1120 
   1121 	/*
   1122 	 * Double resets are required for recovery from certain error
   1123 	 * conditions.  Between resets, it is necessary to stall to
   1124 	 * allow time for any pending HW events to complete.
   1125 	 */
   1126 	if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
   1127 		hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
   1128 		goto mac_reset_retry;
   1129 	}
   1130 
   1131 	/*
   1132 	 * Store the original AUTOC/AUTOC2 values if they have not been
   1133 	 * stored off yet.  Otherwise restore the stored original
   1134 	 * values since the reset operation sets back to defaults.
   1135 	 */
   1136 	autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
   1137 	autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
   1138 
   1139 	/* Enable link if disabled in NVM */
   1140 	if (autoc2 & IXGBE_AUTOC2_LINK_DISABLE_MASK) {
   1141 		autoc2 &= ~IXGBE_AUTOC2_LINK_DISABLE_MASK;
   1142 		IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
   1143 		IXGBE_WRITE_FLUSH(hw);
   1144 	}
   1145 
   1146 	if (hw->mac.orig_link_settings_stored == FALSE) {
   1147 		hw->mac.orig_autoc = autoc;
   1148 		hw->mac.orig_autoc2 = autoc2;
   1149 		hw->mac.orig_link_settings_stored = TRUE;
   1150 	} else {
   1151 
   1152 		/* If MNG FW is running on a multi-speed device that
   1153 		 * doesn't autoneg with out driver support we need to
   1154 		 * leave LMS in the state it was before we MAC reset.
   1155 		 * Likewise if we support WoL we don't want change the
   1156 		 * LMS state.
   1157 		 */
   1158 		if ((hw->phy.multispeed_fiber && ixgbe_mng_enabled(hw)) ||
   1159 		    hw->wol_enabled)
   1160 			hw->mac.orig_autoc =
   1161 				(hw->mac.orig_autoc & ~IXGBE_AUTOC_LMS_MASK) |
   1162 				curr_lms;
   1163 
   1164 		if (autoc != hw->mac.orig_autoc) {
   1165 			status = hw->mac.ops.prot_autoc_write(hw,
   1166 							hw->mac.orig_autoc,
   1167 							FALSE);
   1168 			if (status != IXGBE_SUCCESS)
   1169 				goto reset_hw_out;
   1170 		}
   1171 
   1172 		if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) !=
   1173 		    (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) {
   1174 			autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK;
   1175 			autoc2 |= (hw->mac.orig_autoc2 &
   1176 				   IXGBE_AUTOC2_UPPER_MASK);
   1177 			IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
   1178 		}
   1179 	}
   1180 
   1181 	/* Store the permanent mac address */
   1182 	hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
   1183 
   1184 	/*
   1185 	 * Store MAC address from RAR0, clear receive address registers, and
   1186 	 * clear the multicast table.  Also reset num_rar_entries to 128,
   1187 	 * since we modify this value when programming the SAN MAC address.
   1188 	 */
   1189 	hw->mac.num_rar_entries = 128;
   1190 	hw->mac.ops.init_rx_addrs(hw);
   1191 
   1192 	/* Store the permanent SAN mac address */
   1193 	hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr);
   1194 
   1195 	/* Add the SAN MAC address to the RAR only if it's a valid address */
   1196 	if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) {
   1197 		/* Save the SAN MAC RAR index */
   1198 		hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1;
   1199 
   1200 		hw->mac.ops.set_rar(hw, hw->mac.san_mac_rar_index,
   1201 				    hw->mac.san_addr, 0, IXGBE_RAH_AV);
   1202 
   1203 		/* clear VMDq pool/queue selection for this RAR */
   1204 		hw->mac.ops.clear_vmdq(hw, hw->mac.san_mac_rar_index,
   1205 				       IXGBE_CLEAR_VMDQ_ALL);
   1206 
   1207 		/* Reserve the last RAR for the SAN MAC address */
   1208 		hw->mac.num_rar_entries--;
   1209 	}
   1210 
   1211 	/* Store the alternative WWNN/WWPN prefix */
   1212 	hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
   1213 				   &hw->mac.wwpn_prefix);
   1214 
   1215 reset_hw_out:
   1216 	if (phy_status != IXGBE_SUCCESS)
   1217 		status = phy_status;
   1218 
   1219 	return status;
   1220 }
   1221 
   1222 /**
   1223  * ixgbe_fdir_check_cmd_complete - poll to check whether FDIRCMD is complete
   1224  * @hw: pointer to hardware structure
   1225  * @fdircmd: current value of FDIRCMD register
   1226  */
   1227 static s32 ixgbe_fdir_check_cmd_complete(struct ixgbe_hw *hw, u32 *fdircmd)
   1228 {
   1229 	int i;
   1230 
   1231 	for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) {
   1232 		*fdircmd = IXGBE_READ_REG(hw, IXGBE_FDIRCMD);
   1233 		if (!(*fdircmd & IXGBE_FDIRCMD_CMD_MASK))
   1234 			return IXGBE_SUCCESS;
   1235 		usec_delay(10);
   1236 	}
   1237 
   1238 	return IXGBE_ERR_FDIR_CMD_INCOMPLETE;
   1239 }
   1240 
   1241 /**
   1242  * ixgbe_reinit_fdir_tables_82599 - Reinitialize Flow Director tables.
   1243  * @hw: pointer to hardware structure
   1244  **/
   1245 s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
   1246 {
   1247 	s32 err;
   1248 	int i;
   1249 	u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
   1250 	u32 fdircmd;
   1251 	fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE;
   1252 
   1253 	DEBUGFUNC("ixgbe_reinit_fdir_tables_82599");
   1254 
   1255 	/*
   1256 	 * Before starting reinitialization process,
   1257 	 * FDIRCMD.CMD must be zero.
   1258 	 */
   1259 	err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
   1260 	if (err) {
   1261 		DEBUGOUT("Flow Director previous command did not complete, aborting table re-initialization.\n");
   1262 		return err;
   1263 	}
   1264 
   1265 	IXGBE_WRITE_REG(hw, IXGBE_FDIRFREE, 0);
   1266 	IXGBE_WRITE_FLUSH(hw);
   1267 	/*
   1268 	 * 82599 adapters flow director init flow cannot be restarted,
   1269 	 * Workaround 82599 silicon errata by performing the following steps
   1270 	 * before re-writing the FDIRCTRL control register with the same value.
   1271 	 * - write 1 to bit 8 of FDIRCMD register &
   1272 	 * - write 0 to bit 8 of FDIRCMD register
   1273 	 */
   1274 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
   1275 			(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) |
   1276 			 IXGBE_FDIRCMD_CLEARHT));
   1277 	IXGBE_WRITE_FLUSH(hw);
   1278 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
   1279 			(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
   1280 			 ~IXGBE_FDIRCMD_CLEARHT));
   1281 	IXGBE_WRITE_FLUSH(hw);
   1282 	/*
   1283 	 * Clear FDIR Hash register to clear any leftover hashes
   1284 	 * waiting to be programmed.
   1285 	 */
   1286 	IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, 0x00);
   1287 	IXGBE_WRITE_FLUSH(hw);
   1288 
   1289 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
   1290 	IXGBE_WRITE_FLUSH(hw);
   1291 
   1292 	/* Poll init-done after we write FDIRCTRL register */
   1293 	for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
   1294 		if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
   1295 				   IXGBE_FDIRCTRL_INIT_DONE)
   1296 			break;
   1297 		msec_delay(1);
   1298 	}
   1299 	if (i >= IXGBE_FDIR_INIT_DONE_POLL) {
   1300 		DEBUGOUT("Flow Director Signature poll time exceeded!\n");
   1301 		return IXGBE_ERR_FDIR_REINIT_FAILED;
   1302 	}
   1303 
   1304 	/* Clear FDIR statistics registers (read to clear) */
   1305 	IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT);
   1306 	IXGBE_READ_REG(hw, IXGBE_FDIRFSTAT);
   1307 	IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
   1308 	IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
   1309 	IXGBE_READ_REG(hw, IXGBE_FDIRLEN);
   1310 
   1311 	return IXGBE_SUCCESS;
   1312 }
   1313 
   1314 /**
   1315  * ixgbe_fdir_enable_82599 - Initialize Flow Director control registers
   1316  * @hw: pointer to hardware structure
   1317  * @fdirctrl: value to write to flow director control register
   1318  **/
   1319 static void ixgbe_fdir_enable_82599(struct ixgbe_hw *hw, u32 fdirctrl)
   1320 {
   1321 	int i;
   1322 
   1323 	DEBUGFUNC("ixgbe_fdir_enable_82599");
   1324 
   1325 	/* Prime the keys for hashing */
   1326 	IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY);
   1327 	IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY);
   1328 
   1329 	/*
   1330 	 * Poll init-done after we write the register.  Estimated times:
   1331 	 *      10G: PBALLOC = 11b, timing is 60us
   1332 	 *       1G: PBALLOC = 11b, timing is 600us
   1333 	 *     100M: PBALLOC = 11b, timing is 6ms
   1334 	 *
   1335 	 *     Multiple these timings by 4 if under full Rx load
   1336 	 *
   1337 	 * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for
   1338 	 * 1 msec per poll time.  If we're at line rate and drop to 100M, then
   1339 	 * this might not finish in our poll time, but we can live with that
   1340 	 * for now.
   1341 	 */
   1342 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
   1343 	IXGBE_WRITE_FLUSH(hw);
   1344 	for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
   1345 		if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
   1346 				   IXGBE_FDIRCTRL_INIT_DONE)
   1347 			break;
   1348 		msec_delay(1);
   1349 	}
   1350 
   1351 	if (i >= IXGBE_FDIR_INIT_DONE_POLL)
   1352 		DEBUGOUT("Flow Director poll time exceeded!\n");
   1353 }
   1354 
   1355 /**
   1356  * ixgbe_init_fdir_signature_82599 - Initialize Flow Director signature filters
   1357  * @hw: pointer to hardware structure
   1358  * @fdirctrl: value to write to flow director control register, initially
   1359  *	     contains just the value of the Rx packet buffer allocation
   1360  **/
   1361 s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl)
   1362 {
   1363 	DEBUGFUNC("ixgbe_init_fdir_signature_82599");
   1364 
   1365 	/*
   1366 	 * Continue setup of fdirctrl register bits:
   1367 	 *  Move the flexible bytes to use the ethertype - shift 6 words
   1368 	 *  Set the maximum length per hash bucket to 0xA filters
   1369 	 *  Send interrupt when 64 filters are left
   1370 	 */
   1371 	fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) |
   1372 		    (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
   1373 		    (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
   1374 
   1375 	/* write hashes and fdirctrl register, poll for completion */
   1376 	ixgbe_fdir_enable_82599(hw, fdirctrl);
   1377 
   1378 	return IXGBE_SUCCESS;
   1379 }
   1380 
   1381 /**
   1382  * ixgbe_init_fdir_perfect_82599 - Initialize Flow Director perfect filters
   1383  * @hw: pointer to hardware structure
   1384  * @fdirctrl: value to write to flow director control register, initially
   1385  *	     contains just the value of the Rx packet buffer allocation
   1386  * @cloud_mode: TRUE - cloud mode, FALSE - other mode
   1387  **/
   1388 s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl,
   1389 			bool cloud_mode)
   1390 {
   1391 	UNREFERENCED_1PARAMETER(cloud_mode);
   1392 	DEBUGFUNC("ixgbe_init_fdir_perfect_82599");
   1393 
   1394 	/*
   1395 	 * Continue setup of fdirctrl register bits:
   1396 	 *  Turn perfect match filtering on
   1397 	 *  Report hash in RSS field of Rx wb descriptor
   1398 	 *  Initialize the drop queue to queue 127
   1399 	 *  Move the flexible bytes to use the ethertype - shift 6 words
   1400 	 *  Set the maximum length per hash bucket to 0xA filters
   1401 	 *  Send interrupt when 64 (0x4 * 16) filters are left
   1402 	 */
   1403 	fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH |
   1404 		    IXGBE_FDIRCTRL_REPORT_STATUS |
   1405 		    (IXGBE_FDIR_DROP_QUEUE << IXGBE_FDIRCTRL_DROP_Q_SHIFT) |
   1406 		    (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) |
   1407 		    (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
   1408 		    (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
   1409 
   1410 	if (cloud_mode)
   1411 		fdirctrl |=(IXGBE_FDIRCTRL_FILTERMODE_CLOUD <<
   1412 					IXGBE_FDIRCTRL_FILTERMODE_SHIFT);
   1413 
   1414 	/* write hashes and fdirctrl register, poll for completion */
   1415 	ixgbe_fdir_enable_82599(hw, fdirctrl);
   1416 
   1417 	return IXGBE_SUCCESS;
   1418 }
   1419 
   1420 /**
   1421  * ixgbe_set_fdir_drop_queue_82599 - Set Flow Director drop queue
   1422  * @hw: pointer to hardware structure
   1423  * @dropqueue: Rx queue index used for the dropped packets
   1424  **/
   1425 void ixgbe_set_fdir_drop_queue_82599(struct ixgbe_hw *hw, u8 dropqueue)
   1426 {
   1427 	u32 fdirctrl;
   1428 
   1429 	DEBUGFUNC("ixgbe_set_fdir_drop_queue_82599");
   1430 	/* Clear init done bit and drop queue field */
   1431 	fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
   1432 	fdirctrl &= ~(IXGBE_FDIRCTRL_DROP_Q_MASK | IXGBE_FDIRCTRL_INIT_DONE);
   1433 
   1434 	/* Set drop queue */
   1435 	fdirctrl |= (dropqueue << IXGBE_FDIRCTRL_DROP_Q_SHIFT);
   1436 	if ((hw->mac.type == ixgbe_mac_X550) ||
   1437 	    (hw->mac.type == ixgbe_mac_X550EM_x) ||
   1438 	    (hw->mac.type == ixgbe_mac_X550EM_a))
   1439 		fdirctrl |= IXGBE_FDIRCTRL_DROP_NO_MATCH;
   1440 
   1441 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
   1442 			(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) |
   1443 			 IXGBE_FDIRCMD_CLEARHT));
   1444 	IXGBE_WRITE_FLUSH(hw);
   1445 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
   1446 			(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
   1447 			 ~IXGBE_FDIRCMD_CLEARHT));
   1448 	IXGBE_WRITE_FLUSH(hw);
   1449 
   1450 	/* write hashes and fdirctrl register, poll for completion */
   1451 	ixgbe_fdir_enable_82599(hw, fdirctrl);
   1452 }
   1453 
   1454 /*
   1455  * These defines allow us to quickly generate all of the necessary instructions
   1456  * in the function below by simply calling out IXGBE_COMPUTE_SIG_HASH_ITERATION
   1457  * for values 0 through 15
   1458  */
   1459 #define IXGBE_ATR_COMMON_HASH_KEY \
   1460 		(IXGBE_ATR_BUCKET_HASH_KEY & IXGBE_ATR_SIGNATURE_HASH_KEY)
   1461 #define IXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \
   1462 do { \
   1463 	u32 n = (_n); \
   1464 	if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) \
   1465 		common_hash ^= lo_hash_dword >> n; \
   1466 	else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
   1467 		bucket_hash ^= lo_hash_dword >> n; \
   1468 	else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << n)) \
   1469 		sig_hash ^= lo_hash_dword << (16 - n); \
   1470 	if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << (n + 16))) \
   1471 		common_hash ^= hi_hash_dword >> n; \
   1472 	else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
   1473 		bucket_hash ^= hi_hash_dword >> n; \
   1474 	else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \
   1475 		sig_hash ^= hi_hash_dword << (16 - n); \
   1476 } while (0)
   1477 
   1478 /**
   1479  * ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash
   1480  * @input: input bitstream to compute the hash on
   1481  * @common: compressed common input dword
   1482  *
   1483  * This function is almost identical to the function above but contains
   1484  * several optimizations such as unwinding all of the loops, letting the
   1485  * compiler work out all of the conditional ifs since the keys are static
   1486  * defines, and computing two keys at once since the hashed dword stream
   1487  * will be the same for both keys.
   1488  **/
   1489 u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
   1490 				     union ixgbe_atr_hash_dword common)
   1491 {
   1492 	u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
   1493 	u32 sig_hash = 0, bucket_hash = 0, common_hash = 0;
   1494 
   1495 	/* record the flow_vm_vlan bits as they are a key part to the hash */
   1496 	flow_vm_vlan = IXGBE_NTOHL(input.dword);
   1497 
   1498 	/* generate common hash dword */
   1499 	hi_hash_dword = IXGBE_NTOHL(common.dword);
   1500 
   1501 	/* low dword is word swapped version of common */
   1502 	lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
   1503 
   1504 	/* apply flow ID/VM pool/VLAN ID bits to hash words */
   1505 	hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
   1506 
   1507 	/* Process bits 0 and 16 */
   1508 	IXGBE_COMPUTE_SIG_HASH_ITERATION(0);
   1509 
   1510 	/*
   1511 	 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
   1512 	 * delay this because bit 0 of the stream should not be processed
   1513 	 * so we do not add the VLAN until after bit 0 was processed
   1514 	 */
   1515 	lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
   1516 
   1517 	/* Process remaining 30 bit of the key */
   1518 	IXGBE_COMPUTE_SIG_HASH_ITERATION(1);
   1519 	IXGBE_COMPUTE_SIG_HASH_ITERATION(2);
   1520 	IXGBE_COMPUTE_SIG_HASH_ITERATION(3);
   1521 	IXGBE_COMPUTE_SIG_HASH_ITERATION(4);
   1522 	IXGBE_COMPUTE_SIG_HASH_ITERATION(5);
   1523 	IXGBE_COMPUTE_SIG_HASH_ITERATION(6);
   1524 	IXGBE_COMPUTE_SIG_HASH_ITERATION(7);
   1525 	IXGBE_COMPUTE_SIG_HASH_ITERATION(8);
   1526 	IXGBE_COMPUTE_SIG_HASH_ITERATION(9);
   1527 	IXGBE_COMPUTE_SIG_HASH_ITERATION(10);
   1528 	IXGBE_COMPUTE_SIG_HASH_ITERATION(11);
   1529 	IXGBE_COMPUTE_SIG_HASH_ITERATION(12);
   1530 	IXGBE_COMPUTE_SIG_HASH_ITERATION(13);
   1531 	IXGBE_COMPUTE_SIG_HASH_ITERATION(14);
   1532 	IXGBE_COMPUTE_SIG_HASH_ITERATION(15);
   1533 
   1534 	/* combine common_hash result with signature and bucket hashes */
   1535 	bucket_hash ^= common_hash;
   1536 	bucket_hash &= IXGBE_ATR_HASH_MASK;
   1537 
   1538 	sig_hash ^= common_hash << 16;
   1539 	sig_hash &= IXGBE_ATR_HASH_MASK << 16;
   1540 
   1541 	/* return completed signature hash */
   1542 	return sig_hash ^ bucket_hash;
   1543 }
   1544 
   1545 /**
   1546  * ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter
   1547  * @hw: pointer to hardware structure
   1548  * @input: unique input dword
   1549  * @common: compressed common input dword
   1550  * @queue: queue index to direct traffic to
   1551  *
   1552  * Note that the tunnel bit in input must not be set when the hardware
   1553  * tunneling support does not exist.
   1554  **/
   1555 void ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
   1556 					   union ixgbe_atr_hash_dword input,
   1557 					   union ixgbe_atr_hash_dword common,
   1558 					   u8 queue)
   1559 {
   1560 	u64 fdirhashcmd;
   1561 	u8 flow_type;
   1562 	bool tunnel;
   1563 	u32 fdircmd;
   1564 
   1565 	DEBUGFUNC("ixgbe_fdir_add_signature_filter_82599");
   1566 
   1567 	/*
   1568 	 * Get the flow_type in order to program FDIRCMD properly
   1569 	 * lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6
   1570 	 * fifth is FDIRCMD.TUNNEL_FILTER
   1571 	 */
   1572 	tunnel = !!(input.formatted.flow_type & IXGBE_ATR_L4TYPE_TUNNEL_MASK);
   1573 	flow_type = input.formatted.flow_type &
   1574 		    (IXGBE_ATR_L4TYPE_TUNNEL_MASK - 1);
   1575 	switch (flow_type) {
   1576 	case IXGBE_ATR_FLOW_TYPE_TCPV4:
   1577 	case IXGBE_ATR_FLOW_TYPE_UDPV4:
   1578 	case IXGBE_ATR_FLOW_TYPE_SCTPV4:
   1579 	case IXGBE_ATR_FLOW_TYPE_TCPV6:
   1580 	case IXGBE_ATR_FLOW_TYPE_UDPV6:
   1581 	case IXGBE_ATR_FLOW_TYPE_SCTPV6:
   1582 		break;
   1583 	default:
   1584 		DEBUGOUT(" Error on flow type input\n");
   1585 		return;
   1586 	}
   1587 
   1588 	/* configure FDIRCMD register */
   1589 	fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
   1590 		  IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
   1591 	fdircmd |= (u32)flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
   1592 	fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
   1593 	if (tunnel)
   1594 		fdircmd |= IXGBE_FDIRCMD_TUNNEL_FILTER;
   1595 
   1596 	/*
   1597 	 * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits
   1598 	 * is for FDIRCMD.  Then do a 64-bit register write from FDIRHASH.
   1599 	 */
   1600 	fdirhashcmd = (u64)fdircmd << 32;
   1601 	fdirhashcmd |= (u64)ixgbe_atr_compute_sig_hash_82599(input, common);
   1602 	IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd);
   1603 
   1604 	DEBUGOUT2("Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd);
   1605 
   1606 	return;
   1607 }
   1608 
   1609 #define IXGBE_COMPUTE_BKT_HASH_ITERATION(_n) \
   1610 do { \
   1611 	u32 n = (_n); \
   1612 	if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
   1613 		bucket_hash ^= lo_hash_dword >> n; \
   1614 	if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
   1615 		bucket_hash ^= hi_hash_dword >> n; \
   1616 } while (0)
   1617 
   1618 /**
   1619  * ixgbe_atr_compute_perfect_hash_82599 - Compute the perfect filter hash
   1620  * @input: input bitstream to compute the hash on
   1621  * @input_mask: mask for the input bitstream
   1622  *
   1623  * This function serves two main purposes.  First it applies the input_mask
   1624  * to the atr_input resulting in a cleaned up atr_input data stream.
   1625  * Secondly it computes the hash and stores it in the bkt_hash field at
   1626  * the end of the input byte stream.  This way it will be available for
   1627  * future use without needing to recompute the hash.
   1628  **/
   1629 void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
   1630 					  union ixgbe_atr_input *input_mask)
   1631 {
   1632 
   1633 	u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
   1634 	u32 bucket_hash = 0;
   1635 	u32 hi_dword = 0;
   1636 	u32 i = 0;
   1637 
   1638 	/* Apply masks to input data */
   1639 	for (i = 0; i < 14; i++)
   1640 		input->dword_stream[i]  &= input_mask->dword_stream[i];
   1641 
   1642 	/* record the flow_vm_vlan bits as they are a key part to the hash */
   1643 	flow_vm_vlan = IXGBE_NTOHL(input->dword_stream[0]);
   1644 
   1645 	/* generate common hash dword */
   1646 	for (i = 1; i <= 13; i++)
   1647 		hi_dword ^= input->dword_stream[i];
   1648 	hi_hash_dword = IXGBE_NTOHL(hi_dword);
   1649 
   1650 	/* low dword is word swapped version of common */
   1651 	lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
   1652 
   1653 	/* apply flow ID/VM pool/VLAN ID bits to hash words */
   1654 	hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
   1655 
   1656 	/* Process bits 0 and 16 */
   1657 	IXGBE_COMPUTE_BKT_HASH_ITERATION(0);
   1658 
   1659 	/*
   1660 	 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
   1661 	 * delay this because bit 0 of the stream should not be processed
   1662 	 * so we do not add the VLAN until after bit 0 was processed
   1663 	 */
   1664 	lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
   1665 
   1666 	/* Process remaining 30 bit of the key */
   1667 	for (i = 1; i <= 15; i++)
   1668 		IXGBE_COMPUTE_BKT_HASH_ITERATION(i);
   1669 
   1670 	/*
   1671 	 * Limit hash to 13 bits since max bucket count is 8K.
   1672 	 * Store result at the end of the input stream.
   1673 	 */
   1674 	input->formatted.bkt_hash = bucket_hash & 0x1FFF;
   1675 }
   1676 
   1677 /**
   1678  * ixgbe_get_fdirtcpm_82599 - generate a TCP port from atr_input_masks
   1679  * @input_mask: mask to be bit swapped
   1680  *
   1681  * The source and destination port masks for flow director are bit swapped
   1682  * in that bit 15 effects bit 0, 14 effects 1, 13, 2 etc.  In order to
   1683  * generate a correctly swapped value we need to bit swap the mask and that
   1684  * is what is accomplished by this function.
   1685  **/
   1686 static u32 ixgbe_get_fdirtcpm_82599(union ixgbe_atr_input *input_mask)
   1687 {
   1688 	u32 mask = IXGBE_NTOHS(input_mask->formatted.dst_port);
   1689 	mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT;
   1690 	mask |= (u32)IXGBE_NTOHS(input_mask->formatted.src_port);
   1691 	mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1);
   1692 	mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2);
   1693 	mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4);
   1694 	return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8);
   1695 }
   1696 
   1697 /*
   1698  * These two macros are meant to address the fact that we have registers
   1699  * that are either all or in part big-endian.  As a result on big-endian
   1700  * systems we will end up byte swapping the value to little-endian before
   1701  * it is byte swapped again and written to the hardware in the original
   1702  * big-endian format.
   1703  */
   1704 #define IXGBE_STORE_AS_BE32(_value) \
   1705 	(((u32)(_value) >> 24) | (((u32)(_value) & 0x00FF0000) >> 8) | \
   1706 	 (((u32)(_value) & 0x0000FF00) << 8) | ((u32)(_value) << 24))
   1707 
   1708 #define IXGBE_WRITE_REG_BE32(a, reg, value) \
   1709 	IXGBE_WRITE_REG((a), (reg), IXGBE_STORE_AS_BE32(IXGBE_NTOHL(value)))
   1710 
   1711 #define IXGBE_STORE_AS_BE16(_value) \
   1712 	IXGBE_NTOHS(((u16)(_value) >> 8) | ((u16)(_value) << 8))
   1713 
   1714 s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
   1715 				    union ixgbe_atr_input *input_mask, bool cloud_mode)
   1716 {
   1717 	/* mask IPv6 since it is currently not supported */
   1718 	u32 fdirm = IXGBE_FDIRM_DIPv6;
   1719 	u32 fdirtcpm;
   1720 	u32 fdirip6m;
   1721 	UNREFERENCED_1PARAMETER(cloud_mode);
   1722 	DEBUGFUNC("ixgbe_fdir_set_atr_input_mask_82599");
   1723 
   1724 	/*
   1725 	 * Program the relevant mask registers.  If src/dst_port or src/dst_addr
   1726 	 * are zero, then assume a full mask for that field.  Also assume that
   1727 	 * a VLAN of 0 is unspecified, so mask that out as well.  L4type
   1728 	 * cannot be masked out in this implementation.
   1729 	 *
   1730 	 * This also assumes IPv4 only.  IPv6 masking isn't supported at this
   1731 	 * point in time.
   1732 	 */
   1733 
   1734 	/* verify bucket hash is cleared on hash generation */
   1735 	if (input_mask->formatted.bkt_hash)
   1736 		DEBUGOUT(" bucket hash should always be 0 in mask\n");
   1737 
   1738 	/* Program FDIRM and verify partial masks */
   1739 	switch (input_mask->formatted.vm_pool & 0x7F) {
   1740 	case 0x0:
   1741 		fdirm |= IXGBE_FDIRM_POOL;
   1742 	case 0x7F:
   1743 		break;
   1744 	default:
   1745 		DEBUGOUT(" Error on vm pool mask\n");
   1746 		return IXGBE_ERR_CONFIG;
   1747 	}
   1748 
   1749 	switch (input_mask->formatted.flow_type & IXGBE_ATR_L4TYPE_MASK) {
   1750 	case 0x0:
   1751 		fdirm |= IXGBE_FDIRM_L4P;
   1752 		if (input_mask->formatted.dst_port ||
   1753 		    input_mask->formatted.src_port) {
   1754 			DEBUGOUT(" Error on src/dst port mask\n");
   1755 			return IXGBE_ERR_CONFIG;
   1756 		}
   1757 	case IXGBE_ATR_L4TYPE_MASK:
   1758 		break;
   1759 	default:
   1760 		DEBUGOUT(" Error on flow type mask\n");
   1761 		return IXGBE_ERR_CONFIG;
   1762 	}
   1763 
   1764 	switch (IXGBE_NTOHS(input_mask->formatted.vlan_id) & 0xEFFF) {
   1765 	case 0x0000:
   1766 		/* mask VLAN ID */
   1767 		fdirm |= IXGBE_FDIRM_VLANID;
   1768 		/* fall through */
   1769 	case 0x0FFF:
   1770 		/* mask VLAN priority */
   1771 		fdirm |= IXGBE_FDIRM_VLANP;
   1772 		break;
   1773 	case 0xE000:
   1774 		/* mask VLAN ID only */
   1775 		fdirm |= IXGBE_FDIRM_VLANID;
   1776 		/* fall through */
   1777 	case 0xEFFF:
   1778 		/* no VLAN fields masked */
   1779 		break;
   1780 	default:
   1781 		DEBUGOUT(" Error on VLAN mask\n");
   1782 		return IXGBE_ERR_CONFIG;
   1783 	}
   1784 
   1785 	switch (input_mask->formatted.flex_bytes & 0xFFFF) {
   1786 	case 0x0000:
   1787 		/* Mask Flex Bytes */
   1788 		fdirm |= IXGBE_FDIRM_FLEX;
   1789 		/* fall through */
   1790 	case 0xFFFF:
   1791 		break;
   1792 	default:
   1793 		DEBUGOUT(" Error on flexible byte mask\n");
   1794 		return IXGBE_ERR_CONFIG;
   1795 	}
   1796 
   1797 	if (cloud_mode) {
   1798 		fdirm |= IXGBE_FDIRM_L3P;
   1799 		fdirip6m = ((u32) 0xFFFFU << IXGBE_FDIRIP6M_DIPM_SHIFT);
   1800 		fdirip6m |= IXGBE_FDIRIP6M_ALWAYS_MASK;
   1801 
   1802 		switch (input_mask->formatted.inner_mac[0] & 0xFF) {
   1803 		case 0x00:
   1804 			/* Mask inner MAC, fall through */
   1805 			fdirip6m |= IXGBE_FDIRIP6M_INNER_MAC;
   1806 		case 0xFF:
   1807 			break;
   1808 		default:
   1809 			DEBUGOUT(" Error on inner_mac byte mask\n");
   1810 			return IXGBE_ERR_CONFIG;
   1811 		}
   1812 
   1813 		switch (input_mask->formatted.tni_vni & 0xFFFFFFFF) {
   1814 		case 0x0:
   1815 			/* Mask vxlan id */
   1816 			fdirip6m |= IXGBE_FDIRIP6M_TNI_VNI;
   1817 			break;
   1818 		case 0x00FFFFFF:
   1819 			fdirip6m |= IXGBE_FDIRIP6M_TNI_VNI_24;
   1820 			break;
   1821 		case 0xFFFFFFFF:
   1822 			break;
   1823 		default:
   1824 			DEBUGOUT(" Error on TNI/VNI byte mask\n");
   1825 			return IXGBE_ERR_CONFIG;
   1826 		}
   1827 
   1828 		switch (input_mask->formatted.tunnel_type & 0xFFFF) {
   1829 		case 0x0:
   1830 			/* Mask turnnel type, fall through */
   1831 			fdirip6m |= IXGBE_FDIRIP6M_TUNNEL_TYPE;
   1832 		case 0xFFFF:
   1833 			break;
   1834 		default:
   1835 			DEBUGOUT(" Error on tunnel type byte mask\n");
   1836 			return IXGBE_ERR_CONFIG;
   1837 		}
   1838 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIP6M, fdirip6m);
   1839 
   1840 		/* Set all bits in FDIRTCPM, FDIRUDPM, FDIRSCTPM,
   1841 		 * FDIRSIP4M and FDIRDIP4M in cloud mode to allow
   1842 		 * L3/L3 packets to tunnel.
   1843 		 */
   1844 		IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, 0xFFFFFFFF);
   1845 		IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, 0xFFFFFFFF);
   1846 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M, 0xFFFFFFFF);
   1847 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M, 0xFFFFFFFF);
   1848 		switch (hw->mac.type) {
   1849 		case ixgbe_mac_X550:
   1850 		case ixgbe_mac_X550EM_x:
   1851 		case ixgbe_mac_X550EM_a:
   1852 			IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, 0xFFFFFFFF);
   1853 			break;
   1854 		default:
   1855 			break;
   1856 		}
   1857 	}
   1858 
   1859 	/* Now mask VM pool and destination IPv6 - bits 5 and 2 */
   1860 	IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
   1861 
   1862 	if (!cloud_mode) {
   1863 		/* store the TCP/UDP port masks, bit reversed from port
   1864 		 * layout */
   1865 		fdirtcpm = ixgbe_get_fdirtcpm_82599(input_mask);
   1866 
   1867 		/* write both the same so that UDP and TCP use the same mask */
   1868 		IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm);
   1869 		IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm);
   1870 		/* also use it for SCTP */
   1871 		switch (hw->mac.type) {
   1872 		case ixgbe_mac_X550:
   1873 		case ixgbe_mac_X550EM_x:
   1874 		case ixgbe_mac_X550EM_a:
   1875 			IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, ~fdirtcpm);
   1876 			break;
   1877 		default:
   1878 			break;
   1879 		}
   1880 
   1881 		/* store source and destination IP masks (big-enian) */
   1882 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M,
   1883 				     ~input_mask->formatted.src_ip[0]);
   1884 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M,
   1885 				     ~input_mask->formatted.dst_ip[0]);
   1886 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIP6M, 0xFFFFFFFF);
   1887 	}
   1888 	return IXGBE_SUCCESS;
   1889 }
   1890 
   1891 s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
   1892 					  union ixgbe_atr_input *input,
   1893 					  u16 soft_id, u8 queue, bool cloud_mode)
   1894 {
   1895 	u32 fdirport, fdirvlan, fdirhash, fdircmd;
   1896 	u32 addr_low, addr_high;
   1897 	u32 cloud_type = 0;
   1898 	s32 err;
   1899 	UNREFERENCED_1PARAMETER(cloud_mode);
   1900 
   1901 	DEBUGFUNC("ixgbe_fdir_write_perfect_filter_82599");
   1902 	if (!cloud_mode) {
   1903 		/* currently IPv6 is not supported, must be programmed with 0 */
   1904 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0),
   1905 				     input->formatted.src_ip[0]);
   1906 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1),
   1907 				     input->formatted.src_ip[1]);
   1908 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2),
   1909 				     input->formatted.src_ip[2]);
   1910 
   1911 		/* record the source address (big-endian) */
   1912 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA,
   1913 			input->formatted.src_ip[0]);
   1914 
   1915 		/* record the first 32 bits of the destination address
   1916 		 * (big-endian) */
   1917 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA,
   1918 			input->formatted.dst_ip[0]);
   1919 
   1920 		/* record source and destination port (little-endian)*/
   1921 		fdirport = IXGBE_NTOHS(input->formatted.dst_port);
   1922 		fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT;
   1923 		fdirport |= (u32)IXGBE_NTOHS(input->formatted.src_port);
   1924 		IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport);
   1925 	}
   1926 
   1927 	/* record VLAN (little-endian) and flex_bytes(big-endian) */
   1928 	fdirvlan = IXGBE_STORE_AS_BE16(input->formatted.flex_bytes);
   1929 	fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT;
   1930 	fdirvlan |= (u32)IXGBE_NTOHS(input->formatted.vlan_id);
   1931 	IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan);
   1932 
   1933 	if (cloud_mode) {
   1934 		if (input->formatted.tunnel_type != 0)
   1935 			cloud_type = 0x80000000;
   1936 
   1937 		addr_low = ((u32)input->formatted.inner_mac[0] |
   1938 				((u32)input->formatted.inner_mac[1] << 8) |
   1939 				((u32)input->formatted.inner_mac[2] << 16) |
   1940 				((u32)input->formatted.inner_mac[3] << 24));
   1941 		addr_high = ((u32)input->formatted.inner_mac[4] |
   1942 				((u32)input->formatted.inner_mac[5] << 8));
   1943 		cloud_type |= addr_high;
   1944 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0), addr_low);
   1945 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1), cloud_type);
   1946 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2), input->formatted.tni_vni);
   1947 	}
   1948 
   1949 	/* configure FDIRHASH register */
   1950 	fdirhash = input->formatted.bkt_hash;
   1951 	fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
   1952 	IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
   1953 
   1954 	/*
   1955 	 * flush all previous writes to make certain registers are
   1956 	 * programmed prior to issuing the command
   1957 	 */
   1958 	IXGBE_WRITE_FLUSH(hw);
   1959 
   1960 	/* configure FDIRCMD register */
   1961 	fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
   1962 		  IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
   1963 	if (queue == IXGBE_FDIR_DROP_QUEUE)
   1964 		fdircmd |= IXGBE_FDIRCMD_DROP;
   1965 	if (input->formatted.flow_type & IXGBE_ATR_L4TYPE_TUNNEL_MASK)
   1966 		fdircmd |= IXGBE_FDIRCMD_TUNNEL_FILTER;
   1967 	fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
   1968 	fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
   1969 	fdircmd |= (u32)input->formatted.vm_pool << IXGBE_FDIRCMD_VT_POOL_SHIFT;
   1970 
   1971 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
   1972 	err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
   1973 	if (err) {
   1974 		DEBUGOUT("Flow Director command did not complete!\n");
   1975 		return err;
   1976 	}
   1977 
   1978 	return IXGBE_SUCCESS;
   1979 }
   1980 
   1981 s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
   1982 					  union ixgbe_atr_input *input,
   1983 					  u16 soft_id)
   1984 {
   1985 	u32 fdirhash;
   1986 	u32 fdircmd;
   1987 	s32 err;
   1988 
   1989 	/* configure FDIRHASH register */
   1990 	fdirhash = input->formatted.bkt_hash;
   1991 	fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
   1992 	IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
   1993 
   1994 	/* flush hash to HW */
   1995 	IXGBE_WRITE_FLUSH(hw);
   1996 
   1997 	/* Query if filter is present */
   1998 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, IXGBE_FDIRCMD_CMD_QUERY_REM_FILT);
   1999 
   2000 	err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
   2001 	if (err) {
   2002 		DEBUGOUT("Flow Director command did not complete!\n");
   2003 		return err;
   2004 	}
   2005 
   2006 	/* if filter exists in hardware then remove it */
   2007 	if (fdircmd & IXGBE_FDIRCMD_FILTER_VALID) {
   2008 		IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
   2009 		IXGBE_WRITE_FLUSH(hw);
   2010 		IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
   2011 				IXGBE_FDIRCMD_CMD_REMOVE_FLOW);
   2012 	}
   2013 
   2014 	return IXGBE_SUCCESS;
   2015 }
   2016 
   2017 /**
   2018  * ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter
   2019  * @hw: pointer to hardware structure
   2020  * @input: input bitstream
   2021  * @input_mask: mask for the input bitstream
   2022  * @soft_id: software index for the filters
   2023  * @queue: queue index to direct traffic to
   2024  * @cloud_mode: unused
   2025  *
   2026  * Note that the caller to this function must lock before calling, since the
   2027  * hardware writes must be protected from one another.
   2028  **/
   2029 s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
   2030 					union ixgbe_atr_input *input,
   2031 					union ixgbe_atr_input *input_mask,
   2032 					u16 soft_id, u8 queue, bool cloud_mode)
   2033 {
   2034 	s32 err = IXGBE_ERR_CONFIG;
   2035 	UNREFERENCED_1PARAMETER(cloud_mode);
   2036 
   2037 	DEBUGFUNC("ixgbe_fdir_add_perfect_filter_82599");
   2038 
   2039 	/*
   2040 	 * Check flow_type formatting, and bail out before we touch the hardware
   2041 	 * if there's a configuration issue
   2042 	 */
   2043 	switch (input->formatted.flow_type) {
   2044 	case IXGBE_ATR_FLOW_TYPE_IPV4:
   2045 	case IXGBE_ATR_FLOW_TYPE_TUNNELED_IPV4:
   2046 		input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK;
   2047 		if (input->formatted.dst_port || input->formatted.src_port) {
   2048 			DEBUGOUT(" Error on src/dst port\n");
   2049 			return IXGBE_ERR_CONFIG;
   2050 		}
   2051 		break;
   2052 	case IXGBE_ATR_FLOW_TYPE_SCTPV4:
   2053 	case IXGBE_ATR_FLOW_TYPE_TUNNELED_SCTPV4:
   2054 		if (input->formatted.dst_port || input->formatted.src_port) {
   2055 			DEBUGOUT(" Error on src/dst port\n");
   2056 			return IXGBE_ERR_CONFIG;
   2057 		}
   2058 		/* fall through */
   2059 	case IXGBE_ATR_FLOW_TYPE_TCPV4:
   2060 	case IXGBE_ATR_FLOW_TYPE_TUNNELED_TCPV4:
   2061 	case IXGBE_ATR_FLOW_TYPE_UDPV4:
   2062 	case IXGBE_ATR_FLOW_TYPE_TUNNELED_UDPV4:
   2063 		input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
   2064 						  IXGBE_ATR_L4TYPE_MASK;
   2065 		break;
   2066 	default:
   2067 		DEBUGOUT(" Error on flow type input\n");
   2068 		return err;
   2069 	}
   2070 
   2071 	/* program input mask into the HW */
   2072 	err = ixgbe_fdir_set_input_mask_82599(hw, input_mask, cloud_mode);
   2073 	if (err)
   2074 		return err;
   2075 
   2076 	/* apply mask and compute/store hash */
   2077 	ixgbe_atr_compute_perfect_hash_82599(input, input_mask);
   2078 
   2079 	/* program filters to filter memory */
   2080 	return ixgbe_fdir_write_perfect_filter_82599(hw, input,
   2081 						     soft_id, queue, cloud_mode);
   2082 }
   2083 
   2084 /**
   2085  * ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register
   2086  * @hw: pointer to hardware structure
   2087  * @reg: analog register to read
   2088  * @val: read value
   2089  *
   2090  * Performs read operation to Omer analog register specified.
   2091  **/
   2092 s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val)
   2093 {
   2094 	u32  core_ctl;
   2095 
   2096 	DEBUGFUNC("ixgbe_read_analog_reg8_82599");
   2097 
   2098 	IXGBE_WRITE_REG(hw, IXGBE_CORECTL, IXGBE_CORECTL_WRITE_CMD |
   2099 			(reg << 8));
   2100 	IXGBE_WRITE_FLUSH(hw);
   2101 	usec_delay(10);
   2102 	core_ctl = IXGBE_READ_REG(hw, IXGBE_CORECTL);
   2103 	*val = (u8)core_ctl;
   2104 
   2105 	return IXGBE_SUCCESS;
   2106 }
   2107 
   2108 /**
   2109  * ixgbe_write_analog_reg8_82599 - Writes 8 bit Omer analog register
   2110  * @hw: pointer to hardware structure
   2111  * @reg: atlas register to write
   2112  * @val: value to write
   2113  *
   2114  * Performs write operation to Omer analog register specified.
   2115  **/
   2116 s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val)
   2117 {
   2118 	u32  core_ctl;
   2119 
   2120 	DEBUGFUNC("ixgbe_write_analog_reg8_82599");
   2121 
   2122 	core_ctl = (reg << 8) | val;
   2123 	IXGBE_WRITE_REG(hw, IXGBE_CORECTL, core_ctl);
   2124 	IXGBE_WRITE_FLUSH(hw);
   2125 	usec_delay(10);
   2126 
   2127 	return IXGBE_SUCCESS;
   2128 }
   2129 
   2130 /**
   2131  * ixgbe_start_hw_82599 - Prepare hardware for Tx/Rx
   2132  * @hw: pointer to hardware structure
   2133  *
   2134  * Starts the hardware using the generic start_hw function
   2135  * and the generation start_hw function.
   2136  * Then performs revision-specific operations, if any.
   2137  **/
   2138 s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw)
   2139 {
   2140 	s32 ret_val = IXGBE_SUCCESS;
   2141 
   2142 	DEBUGFUNC("ixgbe_start_hw_82599");
   2143 
   2144 	ret_val = ixgbe_start_hw_generic(hw);
   2145 	if (ret_val != IXGBE_SUCCESS)
   2146 		goto out;
   2147 
   2148 	ixgbe_start_hw_gen2(hw);
   2149 
   2150 	/* We need to run link autotry after the driver loads */
   2151 	hw->mac.autotry_restart = TRUE;
   2152 
   2153 	if (ret_val == IXGBE_SUCCESS)
   2154 		ret_val = ixgbe_verify_fw_version_82599(hw);
   2155 out:
   2156 	return ret_val;
   2157 }
   2158 
   2159 /**
   2160  * ixgbe_identify_phy_82599 - Get physical layer module
   2161  * @hw: pointer to hardware structure
   2162  *
   2163  * Determines the physical layer module found on the current adapter.
   2164  * If PHY already detected, maintains current PHY type in hw struct,
   2165  * otherwise executes the PHY detection routine.
   2166  **/
   2167 s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
   2168 {
   2169 	s32 status;
   2170 
   2171 	DEBUGFUNC("ixgbe_identify_phy_82599");
   2172 
   2173 	/* Detect PHY if not unknown - returns success if already detected. */
   2174 	status = ixgbe_identify_phy_generic(hw);
   2175 	if (status != IXGBE_SUCCESS) {
   2176 		/* 82599 10GBASE-T requires an external PHY */
   2177 		if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)
   2178 			return status;
   2179 		else
   2180 			status = ixgbe_identify_module_generic(hw);
   2181 	}
   2182 
   2183 	/* Set PHY type none if no PHY detected */
   2184 	if (hw->phy.type == ixgbe_phy_unknown) {
   2185 		hw->phy.type = ixgbe_phy_none;
   2186 		return IXGBE_SUCCESS;
   2187 	}
   2188 
   2189 	/* Return error if SFP module has been detected but is not supported */
   2190 	if (hw->phy.type == ixgbe_phy_sfp_unsupported)
   2191 		return IXGBE_ERR_SFP_NOT_SUPPORTED;
   2192 
   2193 	return status;
   2194 }
   2195 
   2196 /**
   2197  * ixgbe_get_supported_physical_layer_82599 - Returns physical layer type
   2198  * @hw: pointer to hardware structure
   2199  *
   2200  * Determines physical layer capabilities of the current configuration.
   2201  **/
   2202 u64 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw)
   2203 {
   2204 	u64 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
   2205 	u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
   2206 	u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
   2207 	u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
   2208 	u32 pma_pmd_10g_parallel = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK;
   2209 	u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
   2210 	u16 ext_ability = 0;
   2211 
   2212 	DEBUGFUNC("ixgbe_get_support_physical_layer_82599");
   2213 
   2214 	hw->phy.ops.identify(hw);
   2215 
   2216 	switch (hw->phy.type) {
   2217 	case ixgbe_phy_tn:
   2218 	case ixgbe_phy_cu_unknown:
   2219 		hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
   2220 		IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
   2221 		if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
   2222 			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
   2223 		if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
   2224 			physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
   2225 		if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
   2226 			physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
   2227 		goto out;
   2228 	default:
   2229 		break;
   2230 	}
   2231 
   2232 	switch (autoc & IXGBE_AUTOC_LMS_MASK) {
   2233 	case IXGBE_AUTOC_LMS_1G_AN:
   2234 	case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
   2235 		if (pma_pmd_1g == IXGBE_AUTOC_1G_KX_BX) {
   2236 			physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX |
   2237 			    IXGBE_PHYSICAL_LAYER_1000BASE_BX;
   2238 			goto out;
   2239 		} else
   2240 			/* SFI mode so read SFP module */
   2241 			goto sfp_check;
   2242 		break;
   2243 	case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
   2244 		if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_CX4)
   2245 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
   2246 		else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_KX4)
   2247 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
   2248 		else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_XAUI)
   2249 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_XAUI;
   2250 		goto out;
   2251 		break;
   2252 	case IXGBE_AUTOC_LMS_10G_SERIAL:
   2253 		if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_KR) {
   2254 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR;
   2255 			goto out;
   2256 		} else if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)
   2257 			goto sfp_check;
   2258 		break;
   2259 	case IXGBE_AUTOC_LMS_KX4_KX_KR:
   2260 	case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN:
   2261 		if (autoc & IXGBE_AUTOC_KX_SUPP)
   2262 			physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
   2263 		if (autoc & IXGBE_AUTOC_KX4_SUPP)
   2264 			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
   2265 		if (autoc & IXGBE_AUTOC_KR_SUPP)
   2266 			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KR;
   2267 		goto out;
   2268 		break;
   2269 	default:
   2270 		goto out;
   2271 		break;
   2272 	}
   2273 
   2274 sfp_check:
   2275 	/* SFP check must be done last since DA modules are sometimes used to
   2276 	 * test KR mode -  we need to id KR mode correctly before SFP module.
   2277 	 * Call identify_sfp because the pluggable module may have changed */
   2278 	physical_layer = ixgbe_get_supported_phy_sfp_layer_generic(hw);
   2279 out:
   2280 	return physical_layer;
   2281 }
   2282 
   2283 /**
   2284  * ixgbe_enable_rx_dma_82599 - Enable the Rx DMA unit on 82599
   2285  * @hw: pointer to hardware structure
   2286  * @regval: register value to write to RXCTRL
   2287  *
   2288  * Enables the Rx DMA unit for 82599
   2289  **/
   2290 s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval)
   2291 {
   2292 
   2293 	DEBUGFUNC("ixgbe_enable_rx_dma_82599");
   2294 
   2295 	/*
   2296 	 * Workaround for 82599 silicon errata when enabling the Rx datapath.
   2297 	 * If traffic is incoming before we enable the Rx unit, it could hang
   2298 	 * the Rx DMA unit.  Therefore, make sure the security engine is
   2299 	 * completely disabled prior to enabling the Rx unit.
   2300 	 */
   2301 
   2302 	hw->mac.ops.disable_sec_rx_path(hw);
   2303 
   2304 	if (regval & IXGBE_RXCTRL_RXEN)
   2305 		ixgbe_enable_rx(hw);
   2306 	else
   2307 		ixgbe_disable_rx(hw);
   2308 
   2309 	hw->mac.ops.enable_sec_rx_path(hw);
   2310 
   2311 	return IXGBE_SUCCESS;
   2312 }
   2313 
   2314 /**
   2315  * ixgbe_verify_fw_version_82599 - verify FW version for 82599
   2316  * @hw: pointer to hardware structure
   2317  *
   2318  * Verifies that installed the firmware version is 0.6 or higher
   2319  * for SFI devices. All 82599 SFI devices should have version 0.6 or higher.
   2320  *
   2321  * Returns IXGBE_ERR_EEPROM_VERSION if the FW is not present or
   2322  * if the FW version is not supported.
   2323  **/
   2324 static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
   2325 {
   2326 	s32 status = IXGBE_ERR_EEPROM_VERSION;
   2327 	u16 fw_offset, fw_ptp_cfg_offset;
   2328 	u16 fw_version;
   2329 
   2330 	DEBUGFUNC("ixgbe_verify_fw_version_82599");
   2331 
   2332 	/* firmware check is only necessary for SFI devices */
   2333 	if (hw->phy.media_type != ixgbe_media_type_fiber) {
   2334 		status = IXGBE_SUCCESS;
   2335 		goto fw_version_out;
   2336 	}
   2337 
   2338 	/* get the offset to the Firmware Module block */
   2339 	if (hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset)) {
   2340 		ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
   2341 			      "eeprom read at offset %d failed", IXGBE_FW_PTR);
   2342 		return IXGBE_ERR_EEPROM_VERSION;
   2343 	}
   2344 
   2345 	if ((fw_offset == 0) || (fw_offset == 0xFFFF))
   2346 		goto fw_version_out;
   2347 
   2348 	/* get the offset to the Pass Through Patch Configuration block */
   2349 	if (hw->eeprom.ops.read(hw, (fw_offset +
   2350 				 IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR),
   2351 				 &fw_ptp_cfg_offset)) {
   2352 		ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
   2353 			      "eeprom read at offset %d failed",
   2354 			      fw_offset +
   2355 			      IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR);
   2356 		return IXGBE_ERR_EEPROM_VERSION;
   2357 	}
   2358 
   2359 	if ((fw_ptp_cfg_offset == 0) || (fw_ptp_cfg_offset == 0xFFFF))
   2360 		goto fw_version_out;
   2361 
   2362 	/* get the firmware version */
   2363 	if (hw->eeprom.ops.read(hw, (fw_ptp_cfg_offset +
   2364 			    IXGBE_FW_PATCH_VERSION_4), &fw_version)) {
   2365 		ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
   2366 			      "eeprom read at offset %d failed",
   2367 			      fw_ptp_cfg_offset + IXGBE_FW_PATCH_VERSION_4);
   2368 		return IXGBE_ERR_EEPROM_VERSION;
   2369 	}
   2370 
   2371 	if (fw_version > 0x5)
   2372 		status = IXGBE_SUCCESS;
   2373 
   2374 fw_version_out:
   2375 	return status;
   2376 }
   2377 
   2378 /**
   2379  * ixgbe_verify_lesm_fw_enabled_82599 - Checks LESM FW module state.
   2380  * @hw: pointer to hardware structure
   2381  *
   2382  * Returns TRUE if the LESM FW module is present and enabled. Otherwise
   2383  * returns FALSE. Smart Speed must be disabled if LESM FW module is enabled.
   2384  **/
   2385 bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw)
   2386 {
   2387 	bool lesm_enabled = FALSE;
   2388 	u16 fw_offset, fw_lesm_param_offset, fw_lesm_state;
   2389 	s32 status;
   2390 
   2391 	DEBUGFUNC("ixgbe_verify_lesm_fw_enabled_82599");
   2392 
   2393 	/* get the offset to the Firmware Module block */
   2394 	status = hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset);
   2395 
   2396 	if ((status != IXGBE_SUCCESS) ||
   2397 	    (fw_offset == 0) || (fw_offset == 0xFFFF))
   2398 		goto out;
   2399 
   2400 	/* get the offset to the LESM Parameters block */
   2401 	status = hw->eeprom.ops.read(hw, (fw_offset +
   2402 				     IXGBE_FW_LESM_PARAMETERS_PTR),
   2403 				     &fw_lesm_param_offset);
   2404 
   2405 	if ((status != IXGBE_SUCCESS) ||
   2406 	    (fw_lesm_param_offset == 0) || (fw_lesm_param_offset == 0xFFFF))
   2407 		goto out;
   2408 
   2409 	/* get the LESM state word */
   2410 	status = hw->eeprom.ops.read(hw, (fw_lesm_param_offset +
   2411 				     IXGBE_FW_LESM_STATE_1),
   2412 				     &fw_lesm_state);
   2413 
   2414 	if ((status == IXGBE_SUCCESS) &&
   2415 	    (fw_lesm_state & IXGBE_FW_LESM_STATE_ENABLED))
   2416 		lesm_enabled = TRUE;
   2417 
   2418 out:
   2419 	return lesm_enabled;
   2420 }
   2421 
   2422 /**
   2423  * ixgbe_read_eeprom_buffer_82599 - Read EEPROM word(s) using
   2424  * fastest available method
   2425  *
   2426  * @hw: pointer to hardware structure
   2427  * @offset: offset of  word in EEPROM to read
   2428  * @words: number of words
   2429  * @data: word(s) read from the EEPROM
   2430  *
   2431  * Retrieves 16 bit word(s) read from EEPROM
   2432  **/
   2433 static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
   2434 					  u16 words, u16 *data)
   2435 {
   2436 	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
   2437 	s32 ret_val = IXGBE_ERR_CONFIG;
   2438 
   2439 	DEBUGFUNC("ixgbe_read_eeprom_buffer_82599");
   2440 
   2441 	/*
   2442 	 * If EEPROM is detected and can be addressed using 14 bits,
   2443 	 * use EERD otherwise use bit bang
   2444 	 */
   2445 	if ((eeprom->type == ixgbe_eeprom_spi) &&
   2446 	    (offset + (words - 1) <= IXGBE_EERD_MAX_ADDR))
   2447 		ret_val = ixgbe_read_eerd_buffer_generic(hw, offset, words,
   2448 							 data);
   2449 	else
   2450 		ret_val = ixgbe_read_eeprom_buffer_bit_bang_generic(hw, offset,
   2451 								    words,
   2452 								    data);
   2453 
   2454 	return ret_val;
   2455 }
   2456 
   2457 /**
   2458  * ixgbe_read_eeprom_82599 - Read EEPROM word using
   2459  * fastest available method
   2460  *
   2461  * @hw: pointer to hardware structure
   2462  * @offset: offset of  word in the EEPROM to read
   2463  * @data: word read from the EEPROM
   2464  *
   2465  * Reads a 16 bit word from the EEPROM
   2466  **/
   2467 static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
   2468 				   u16 offset, u16 *data)
   2469 {
   2470 	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
   2471 	s32 ret_val = IXGBE_ERR_CONFIG;
   2472 
   2473 	DEBUGFUNC("ixgbe_read_eeprom_82599");
   2474 
   2475 	/*
   2476 	 * If EEPROM is detected and can be addressed using 14 bits,
   2477 	 * use EERD otherwise use bit bang
   2478 	 */
   2479 	if ((eeprom->type == ixgbe_eeprom_spi) &&
   2480 	    (offset <= IXGBE_EERD_MAX_ADDR))
   2481 		ret_val = ixgbe_read_eerd_generic(hw, offset, data);
   2482 	else
   2483 		ret_val = ixgbe_read_eeprom_bit_bang_generic(hw, offset, data);
   2484 
   2485 	return ret_val;
   2486 }
   2487 
   2488 /**
   2489  * ixgbe_reset_pipeline_82599 - perform pipeline reset
   2490  *
   2491  * @hw: pointer to hardware structure
   2492  *
   2493  * Reset pipeline by asserting Restart_AN together with LMS change to ensure
   2494  * full pipeline reset.  This function assumes the SW/FW lock is held.
   2495  **/
   2496 static s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw)
   2497 {
   2498 	s32 ret_val;
   2499 	u32 anlp1_reg = 0;
   2500 	u32 i, autoc_reg, autoc2_reg;
   2501 
   2502 	/* Enable link if disabled in NVM */
   2503 	autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
   2504 	if (autoc2_reg & IXGBE_AUTOC2_LINK_DISABLE_MASK) {
   2505 		autoc2_reg &= ~IXGBE_AUTOC2_LINK_DISABLE_MASK;
   2506 		IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2_reg);
   2507 		IXGBE_WRITE_FLUSH(hw);
   2508 	}
   2509 
   2510 	autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
   2511 	autoc_reg |= IXGBE_AUTOC_AN_RESTART;
   2512 	/* Write AUTOC register with toggled LMS[2] bit and Restart_AN */
   2513 	IXGBE_WRITE_REG(hw, IXGBE_AUTOC,
   2514 			autoc_reg ^ (0x4 << IXGBE_AUTOC_LMS_SHIFT));
   2515 	/* Wait for AN to leave state 0 */
   2516 	for (i = 0; i < 10; i++) {
   2517 		msec_delay(4);
   2518 		anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
   2519 		if (anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)
   2520 			break;
   2521 	}
   2522 
   2523 	if (!(anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)) {
   2524 		DEBUGOUT("auto negotiation not completed\n");
   2525 		ret_val = IXGBE_ERR_RESET_FAILED;
   2526 		goto reset_pipeline_out;
   2527 	}
   2528 
   2529 	ret_val = IXGBE_SUCCESS;
   2530 
   2531 reset_pipeline_out:
   2532 	/* Write AUTOC register with original LMS field and Restart_AN */
   2533 	IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
   2534 	IXGBE_WRITE_FLUSH(hw);
   2535 
   2536 	return ret_val;
   2537 }
   2538 
   2539 /**
   2540  * ixgbe_read_i2c_byte_82599 - Reads 8 bit word over I2C
   2541  * @hw: pointer to hardware structure
   2542  * @byte_offset: byte offset to read
   2543  * @dev_addr: address to read from
   2544  * @data: value read
   2545  *
   2546  * Performs byte read operation to SFP module's EEPROM over I2C interface at
   2547  * a specified device address.
   2548  **/
   2549 static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
   2550 				u8 dev_addr, u8 *data)
   2551 {
   2552 	u32 esdp;
   2553 	s32 status;
   2554 	s32 timeout = 200;
   2555 
   2556 	DEBUGFUNC("ixgbe_read_i2c_byte_82599");
   2557 
   2558 	if (hw->phy.qsfp_shared_i2c_bus == TRUE) {
   2559 		/* Acquire I2C bus ownership. */
   2560 		esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
   2561 		esdp |= IXGBE_ESDP_SDP0;
   2562 		IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
   2563 		IXGBE_WRITE_FLUSH(hw);
   2564 
   2565 		while (timeout) {
   2566 			esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
   2567 			if (esdp & IXGBE_ESDP_SDP1)
   2568 				break;
   2569 
   2570 			msec_delay(5);
   2571 			timeout--;
   2572 		}
   2573 
   2574 		if (!timeout) {
   2575 			DEBUGOUT("Driver can't access resource,"
   2576 				 " acquiring I2C bus timeout.\n");
   2577 			status = IXGBE_ERR_I2C;
   2578 			goto release_i2c_access;
   2579 		}
   2580 	}
   2581 
   2582 	status = ixgbe_read_i2c_byte_generic(hw, byte_offset, dev_addr, data);
   2583 
   2584 release_i2c_access:
   2585 
   2586 	if (hw->phy.qsfp_shared_i2c_bus == TRUE) {
   2587 		/* Release I2C bus ownership. */
   2588 		esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
   2589 		esdp &= ~IXGBE_ESDP_SDP0;
   2590 		IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
   2591 		IXGBE_WRITE_FLUSH(hw);
   2592 	}
   2593 
   2594 	return status;
   2595 }
   2596 
   2597 /**
   2598  * ixgbe_write_i2c_byte_82599 - Writes 8 bit word over I2C
   2599  * @hw: pointer to hardware structure
   2600  * @byte_offset: byte offset to write
   2601  * @dev_addr: address to read from
   2602  * @data: value to write
   2603  *
   2604  * Performs byte write operation to SFP module's EEPROM over I2C interface at
   2605  * a specified device address.
   2606  **/
   2607 static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
   2608 				 u8 dev_addr, u8 data)
   2609 {
   2610 	u32 esdp;
   2611 	s32 status;
   2612 	s32 timeout = 200;
   2613 
   2614 	DEBUGFUNC("ixgbe_write_i2c_byte_82599");
   2615 
   2616 	if (hw->phy.qsfp_shared_i2c_bus == TRUE) {
   2617 		/* Acquire I2C bus ownership. */
   2618 		esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
   2619 		esdp |= IXGBE_ESDP_SDP0;
   2620 		IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
   2621 		IXGBE_WRITE_FLUSH(hw);
   2622 
   2623 		while (timeout) {
   2624 			esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
   2625 			if (esdp & IXGBE_ESDP_SDP1)
   2626 				break;
   2627 
   2628 			msec_delay(5);
   2629 			timeout--;
   2630 		}
   2631 
   2632 		if (!timeout) {
   2633 			DEBUGOUT("Driver can't access resource,"
   2634 				 " acquiring I2C bus timeout.\n");
   2635 			status = IXGBE_ERR_I2C;
   2636 			goto release_i2c_access;
   2637 		}
   2638 	}
   2639 
   2640 	status = ixgbe_write_i2c_byte_generic(hw, byte_offset, dev_addr, data);
   2641 
   2642 release_i2c_access:
   2643 
   2644 	if (hw->phy.qsfp_shared_i2c_bus == TRUE) {
   2645 		/* Release I2C bus ownership. */
   2646 		esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
   2647 		esdp &= ~IXGBE_ESDP_SDP0;
   2648 		IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
   2649 		IXGBE_WRITE_FLUSH(hw);
   2650 	}
   2651 
   2652 	return status;
   2653 }
   2654