Home | History | Annotate | Line # | Download | only in ixgbe
ixgbe_82599.c revision 1.14
      1 /******************************************************************************
      2 
      3   Copyright (c) 2001-2015, Intel Corporation
      4   All rights reserved.
      5 
      6   Redistribution and use in source and binary forms, with or without
      7   modification, are permitted provided that the following conditions are met:
      8 
      9    1. Redistributions of source code must retain the above copyright notice,
     10       this list of conditions and the following disclaimer.
     11 
     12    2. Redistributions in binary form must reproduce the above copyright
     13       notice, this list of conditions and the following disclaimer in the
     14       documentation and/or other materials provided with the distribution.
     15 
     16    3. Neither the name of the Intel Corporation nor the names of its
     17       contributors may be used to endorse or promote products derived from
     18       this software without specific prior written permission.
     19 
     20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     30   POSSIBILITY OF SUCH DAMAGE.
     31 
     32 ******************************************************************************/
     33 /*$FreeBSD: head/sys/dev/ixgbe/ixgbe_82599.c 292674 2015-12-23 22:45:17Z sbruno $*/
     34 /*$NetBSD: ixgbe_82599.c,v 1.14 2016/12/02 10:42:04 msaitoh Exp $*/
     35 
     36 #include "ixgbe_type.h"
     37 #include "ixgbe_82599.h"
     38 #include "ixgbe_api.h"
     39 #include "ixgbe_common.h"
     40 #include "ixgbe_phy.h"
     41 
     42 #define IXGBE_82599_MAX_TX_QUEUES 128
     43 #define IXGBE_82599_MAX_RX_QUEUES 128
     44 #define IXGBE_82599_RAR_ENTRIES   128
     45 #define IXGBE_82599_MC_TBL_SIZE   128
     46 #define IXGBE_82599_VFT_TBL_SIZE  128
     47 #define IXGBE_82599_RX_PB_SIZE	  512
     48 
     49 static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
     50 					 ixgbe_link_speed speed,
     51 					 bool autoneg_wait_to_complete);
     52 static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw);
     53 static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
     54 				   u16 offset, u16 *data);
     55 static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
     56 					  u16 words, u16 *data);
     57 static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
     58 					u8 dev_addr, u8 *data);
     59 static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
     60 					u8 dev_addr, u8 data);
     61 
     62 void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
     63 {
     64 	struct ixgbe_mac_info *mac = &hw->mac;
     65 
     66 	DEBUGFUNC("ixgbe_init_mac_link_ops_82599");
     67 
     68 	/*
     69 	 * enable the laser control functions for SFP+ fiber
     70 	 * and MNG not enabled
     71 	 */
     72 	if ((mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
     73 	    !ixgbe_mng_enabled(hw)) {
     74 		mac->ops.disable_tx_laser =
     75 				       ixgbe_disable_tx_laser_multispeed_fiber;
     76 		mac->ops.enable_tx_laser =
     77 					ixgbe_enable_tx_laser_multispeed_fiber;
     78 		mac->ops.flap_tx_laser = ixgbe_flap_tx_laser_multispeed_fiber;
     79 
     80 	} else {
     81 		mac->ops.disable_tx_laser = NULL;
     82 		mac->ops.enable_tx_laser = NULL;
     83 		mac->ops.flap_tx_laser = NULL;
     84 	}
     85 
     86 	if (hw->phy.multispeed_fiber) {
     87 		/* Set up dual speed SFP+ support */
     88 		mac->ops.setup_link = ixgbe_setup_mac_link_multispeed_fiber;
     89 		mac->ops.setup_mac_link = ixgbe_setup_mac_link_82599;
     90 		mac->ops.set_rate_select_speed =
     91 					       ixgbe_set_hard_rate_select_speed;
     92 		if (ixgbe_get_media_type(hw) == ixgbe_media_type_fiber_fixed)
     93 			mac->ops.set_rate_select_speed =
     94 					       ixgbe_set_soft_rate_select_speed;
     95 	} else {
     96 		if ((ixgbe_get_media_type(hw) == ixgbe_media_type_backplane) &&
     97 		     (hw->phy.smart_speed == ixgbe_smart_speed_auto ||
     98 		      hw->phy.smart_speed == ixgbe_smart_speed_on) &&
     99 		      !ixgbe_verify_lesm_fw_enabled_82599(hw)) {
    100 			mac->ops.setup_link = ixgbe_setup_mac_link_smartspeed;
    101 		} else {
    102 			mac->ops.setup_link = ixgbe_setup_mac_link_82599;
    103 		}
    104 	}
    105 }
    106 
    107 /**
    108  *  ixgbe_init_phy_ops_82599 - PHY/SFP specific init
    109  *  @hw: pointer to hardware structure
    110  *
    111  *  Initialize any function pointers that were not able to be
    112  *  set during init_shared_code because the PHY/SFP type was
    113  *  not known.  Perform the SFP init if necessary.
    114  *
    115  **/
    116 s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
    117 {
    118 	struct ixgbe_mac_info *mac = &hw->mac;
    119 	struct ixgbe_phy_info *phy = &hw->phy;
    120 	s32 ret_val = IXGBE_SUCCESS;
    121 	u32 esdp;
    122 
    123 	DEBUGFUNC("ixgbe_init_phy_ops_82599");
    124 
    125 	if (hw->device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP) {
    126 		/* Store flag indicating I2C bus access control unit. */
    127 		hw->phy.qsfp_shared_i2c_bus = TRUE;
    128 
    129 		/* Initialize access to QSFP+ I2C bus */
    130 		esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
    131 		esdp |= IXGBE_ESDP_SDP0_DIR;
    132 		esdp &= ~IXGBE_ESDP_SDP1_DIR;
    133 		esdp &= ~IXGBE_ESDP_SDP0;
    134 		esdp &= ~IXGBE_ESDP_SDP0_NATIVE;
    135 		esdp &= ~IXGBE_ESDP_SDP1_NATIVE;
    136 		IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
    137 		IXGBE_WRITE_FLUSH(hw);
    138 
    139 		phy->ops.read_i2c_byte = ixgbe_read_i2c_byte_82599;
    140 		phy->ops.write_i2c_byte = ixgbe_write_i2c_byte_82599;
    141 	}
    142 	/* Identify the PHY or SFP module */
    143 	ret_val = phy->ops.identify(hw);
    144 	if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED)
    145 		goto init_phy_ops_out;
    146 
    147 	/* Setup function pointers based on detected SFP module and speeds */
    148 	ixgbe_init_mac_link_ops_82599(hw);
    149 	if (hw->phy.sfp_type != ixgbe_sfp_type_unknown)
    150 		hw->phy.ops.reset = NULL;
    151 
    152 	/* If copper media, overwrite with copper function pointers */
    153 	if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
    154 		mac->ops.setup_link = ixgbe_setup_copper_link_82599;
    155 		mac->ops.get_link_capabilities =
    156 				  ixgbe_get_copper_link_capabilities_generic;
    157 	}
    158 
    159 	/* Set necessary function pointers based on PHY type */
    160 	switch (hw->phy.type) {
    161 	case ixgbe_phy_tn:
    162 		phy->ops.setup_link = ixgbe_setup_phy_link_tnx;
    163 		phy->ops.check_link = ixgbe_check_phy_link_tnx;
    164 		phy->ops.get_firmware_version =
    165 			     ixgbe_get_phy_firmware_version_tnx;
    166 		break;
    167 	default:
    168 		break;
    169 	}
    170 init_phy_ops_out:
    171 	return ret_val;
    172 }
    173 
    174 s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
    175 {
    176 	s32 ret_val = IXGBE_SUCCESS;
    177 	u16 list_offset, data_offset, data_value;
    178 
    179 	DEBUGFUNC("ixgbe_setup_sfp_modules_82599");
    180 
    181 	if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) {
    182 		ixgbe_init_mac_link_ops_82599(hw);
    183 
    184 		hw->phy.ops.reset = NULL;
    185 
    186 		ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
    187 							      &data_offset);
    188 		if (ret_val != IXGBE_SUCCESS)
    189 			goto setup_sfp_out;
    190 
    191 		/* PHY config will finish before releasing the semaphore */
    192 		ret_val = hw->mac.ops.acquire_swfw_sync(hw,
    193 							IXGBE_GSSR_MAC_CSR_SM);
    194 		if (ret_val != IXGBE_SUCCESS) {
    195 			ret_val = IXGBE_ERR_SWFW_SYNC;
    196 			goto setup_sfp_out;
    197 		}
    198 
    199 		if (hw->eeprom.ops.read(hw, ++data_offset, &data_value))
    200 			goto setup_sfp_err;
    201 		while (data_value != 0xffff) {
    202 			IXGBE_WRITE_REG(hw, IXGBE_CORECTL, data_value);
    203 			IXGBE_WRITE_FLUSH(hw);
    204 			if (hw->eeprom.ops.read(hw, ++data_offset, &data_value))
    205 				goto setup_sfp_err;
    206 		}
    207 
    208 		/* Release the semaphore */
    209 		hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
    210 		/* Delay obtaining semaphore again to allow FW access
    211 		 * prot_autoc_write uses the semaphore too.
    212 		 */
    213 		msec_delay(hw->eeprom.semaphore_delay);
    214 
    215 		/* Restart DSP and set SFI mode */
    216 		ret_val = hw->mac.ops.prot_autoc_write(hw,
    217 			hw->mac.orig_autoc | IXGBE_AUTOC_LMS_10G_SERIAL,
    218 			FALSE);
    219 
    220 		if (ret_val) {
    221 			DEBUGOUT("sfp module setup not complete\n");
    222 			ret_val = IXGBE_ERR_SFP_SETUP_NOT_COMPLETE;
    223 			goto setup_sfp_out;
    224 		}
    225 
    226 	}
    227 
    228 setup_sfp_out:
    229 	return ret_val;
    230 
    231 setup_sfp_err:
    232 	/* Release the semaphore */
    233 	hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
    234 	/* Delay obtaining semaphore again to allow FW access */
    235 	msec_delay(hw->eeprom.semaphore_delay);
    236 	ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
    237 		      "eeprom read at offset %d failed", data_offset);
    238 	return IXGBE_ERR_PHY;
    239 }
    240 
    241 /**
    242  *  prot_autoc_read_82599 - Hides MAC differences needed for AUTOC read
    243  *  @hw: pointer to hardware structure
    244  *  @locked: Return the if we locked for this read.
    245  *  @reg_val: Value we read from AUTOC
    246  *
    247  *  For this part (82599) we need to wrap read-modify-writes with a possible
    248  *  FW/SW lock.  It is assumed this lock will be freed with the next
    249  *  prot_autoc_write_82599().
    250  */
    251 s32 prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked, u32 *reg_val)
    252 {
    253 	s32 ret_val;
    254 
    255 	*locked = FALSE;
    256 	 /* If LESM is on then we need to hold the SW/FW semaphore. */
    257 	if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
    258 		ret_val = hw->mac.ops.acquire_swfw_sync(hw,
    259 					IXGBE_GSSR_MAC_CSR_SM);
    260 		if (ret_val != IXGBE_SUCCESS)
    261 			return IXGBE_ERR_SWFW_SYNC;
    262 
    263 		*locked = TRUE;
    264 	}
    265 
    266 	*reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC);
    267 	return IXGBE_SUCCESS;
    268 }
    269 
    270 /**
    271  * prot_autoc_write_82599 - Hides MAC differences needed for AUTOC write
    272  * @hw: pointer to hardware structure
    273  * @reg_val: value to write to AUTOC
    274  * @locked: bool to indicate whether the SW/FW lock was already taken by
    275  *           previous proc_autoc_read_82599.
    276  *
    277  * This part (82599) may need to hold the SW/FW lock around all writes to
    278  * AUTOC. Likewise after a write we need to do a pipeline reset.
    279  */
    280 s32 prot_autoc_write_82599(struct ixgbe_hw *hw, u32 autoc, bool locked)
    281 {
    282 	s32 ret_val = IXGBE_SUCCESS;
    283 
    284 	/* Blocked by MNG FW so bail */
    285 	if (ixgbe_check_reset_blocked(hw))
    286 		goto out;
    287 
    288 	/* We only need to get the lock if:
    289 	 *  - We didn't do it already (in the read part of a read-modify-write)
    290 	 *  - LESM is enabled.
    291 	 */
    292 	if (!locked && ixgbe_verify_lesm_fw_enabled_82599(hw)) {
    293 		ret_val = hw->mac.ops.acquire_swfw_sync(hw,
    294 					IXGBE_GSSR_MAC_CSR_SM);
    295 		if (ret_val != IXGBE_SUCCESS)
    296 			return IXGBE_ERR_SWFW_SYNC;
    297 
    298 		locked = TRUE;
    299 	}
    300 
    301 	IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
    302 	ret_val = ixgbe_reset_pipeline_82599(hw);
    303 
    304 out:
    305 	/* Free the SW/FW semaphore as we either grabbed it here or
    306 	 * already had it when this function was called.
    307 	 */
    308 	if (locked)
    309 		hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
    310 
    311 	return ret_val;
    312 }
    313 
    314 /**
    315  *  ixgbe_init_ops_82599 - Inits func ptrs and MAC type
    316  *  @hw: pointer to hardware structure
    317  *
    318  *  Initialize the function pointers and assign the MAC type for 82599.
    319  *  Does not touch the hardware.
    320  **/
    321 
    322 s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw)
    323 {
    324 	struct ixgbe_mac_info *mac = &hw->mac;
    325 	struct ixgbe_phy_info *phy = &hw->phy;
    326 	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
    327 	s32 ret_val;
    328 
    329 	DEBUGFUNC("ixgbe_init_ops_82599");
    330 
    331 	ixgbe_init_phy_ops_generic(hw);
    332 	ret_val = ixgbe_init_ops_generic(hw);
    333 
    334 	/* PHY */
    335 	phy->ops.identify = ixgbe_identify_phy_82599;
    336 	phy->ops.init = ixgbe_init_phy_ops_82599;
    337 
    338 	/* MAC */
    339 	mac->ops.reset_hw = ixgbe_reset_hw_82599;
    340 	mac->ops.enable_relaxed_ordering = ixgbe_enable_relaxed_ordering_gen2;
    341 	mac->ops.get_media_type = ixgbe_get_media_type_82599;
    342 	mac->ops.get_supported_physical_layer =
    343 				    ixgbe_get_supported_physical_layer_82599;
    344 	mac->ops.disable_sec_rx_path = ixgbe_disable_sec_rx_path_generic;
    345 	mac->ops.enable_sec_rx_path = ixgbe_enable_sec_rx_path_generic;
    346 	mac->ops.enable_rx_dma = ixgbe_enable_rx_dma_82599;
    347 	mac->ops.read_analog_reg8 = ixgbe_read_analog_reg8_82599;
    348 	mac->ops.write_analog_reg8 = ixgbe_write_analog_reg8_82599;
    349 	mac->ops.start_hw = ixgbe_start_hw_82599;
    350 	mac->ops.get_san_mac_addr = ixgbe_get_san_mac_addr_generic;
    351 	mac->ops.set_san_mac_addr = ixgbe_set_san_mac_addr_generic;
    352 	mac->ops.get_device_caps = ixgbe_get_device_caps_generic;
    353 	mac->ops.get_wwn_prefix = ixgbe_get_wwn_prefix_generic;
    354 	mac->ops.get_fcoe_boot_status = ixgbe_get_fcoe_boot_status_generic;
    355 	mac->ops.prot_autoc_read = prot_autoc_read_82599;
    356 	mac->ops.prot_autoc_write = prot_autoc_write_82599;
    357 
    358 	/* RAR, Multicast, VLAN */
    359 	mac->ops.set_vmdq = ixgbe_set_vmdq_generic;
    360 	mac->ops.set_vmdq_san_mac = ixgbe_set_vmdq_san_mac_generic;
    361 	mac->ops.clear_vmdq = ixgbe_clear_vmdq_generic;
    362 	mac->ops.insert_mac_addr = ixgbe_insert_mac_addr_generic;
    363 	mac->rar_highwater = 1;
    364 	mac->ops.set_vfta = ixgbe_set_vfta_generic;
    365 	mac->ops.set_vlvf = ixgbe_set_vlvf_generic;
    366 	mac->ops.clear_vfta = ixgbe_clear_vfta_generic;
    367 	mac->ops.init_uta_tables = ixgbe_init_uta_tables_generic;
    368 	mac->ops.setup_sfp = ixgbe_setup_sfp_modules_82599;
    369 	mac->ops.set_mac_anti_spoofing = ixgbe_set_mac_anti_spoofing;
    370 	mac->ops.set_vlan_anti_spoofing = ixgbe_set_vlan_anti_spoofing;
    371 
    372 	/* Link */
    373 	mac->ops.get_link_capabilities = ixgbe_get_link_capabilities_82599;
    374 	mac->ops.check_link = ixgbe_check_mac_link_generic;
    375 	mac->ops.setup_rxpba = ixgbe_set_rxpba_generic;
    376 	ixgbe_init_mac_link_ops_82599(hw);
    377 
    378 	mac->mcft_size		= IXGBE_82599_MC_TBL_SIZE;
    379 	mac->vft_size		= IXGBE_82599_VFT_TBL_SIZE;
    380 	mac->num_rar_entries	= IXGBE_82599_RAR_ENTRIES;
    381 	mac->rx_pb_size		= IXGBE_82599_RX_PB_SIZE;
    382 	mac->max_rx_queues	= IXGBE_82599_MAX_RX_QUEUES;
    383 	mac->max_tx_queues	= IXGBE_82599_MAX_TX_QUEUES;
    384 	mac->max_msix_vectors	= ixgbe_get_pcie_msix_count_generic(hw);
    385 
    386 	mac->arc_subsystem_valid = !!(IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw))
    387 				      & IXGBE_FWSM_MODE_MASK);
    388 
    389 	hw->mbx.ops.init_params = ixgbe_init_mbx_params_pf;
    390 
    391 	/* EEPROM */
    392 	eeprom->ops.read = ixgbe_read_eeprom_82599;
    393 	eeprom->ops.read_buffer = ixgbe_read_eeprom_buffer_82599;
    394 
    395 	/* Manageability interface */
    396 	mac->ops.set_fw_drv_ver = ixgbe_set_fw_drv_ver_generic;
    397 
    398 
    399 	mac->ops.get_rtrup2tc = ixgbe_dcb_get_rtrup2tc_generic;
    400 
    401 	return ret_val;
    402 }
    403 
    404 /**
    405  *  ixgbe_get_link_capabilities_82599 - Determines link capabilities
    406  *  @hw: pointer to hardware structure
    407  *  @speed: pointer to link speed
    408  *  @autoneg: TRUE when autoneg or autotry is enabled
    409  *
    410  *  Determines the link capabilities by reading the AUTOC register.
    411  **/
    412 s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
    413 				      ixgbe_link_speed *speed,
    414 				      bool *autoneg)
    415 {
    416 	s32 status = IXGBE_SUCCESS;
    417 	u32 autoc = 0;
    418 
    419 	DEBUGFUNC("ixgbe_get_link_capabilities_82599");
    420 
    421 
    422 	/* Check if 1G SFP module. */
    423 	if (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
    424 	    hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
    425 	    hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
    426 	    hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
    427 	    hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
    428 	    hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) {
    429 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
    430 		*autoneg = TRUE;
    431 		goto out;
    432 	}
    433 
    434 	/*
    435 	 * Determine link capabilities based on the stored value of AUTOC,
    436 	 * which represents EEPROM defaults.  If AUTOC value has not
    437 	 * been stored, use the current register values.
    438 	 */
    439 	if (hw->mac.orig_link_settings_stored)
    440 		autoc = hw->mac.orig_autoc;
    441 	else
    442 		autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
    443 
    444 	switch (autoc & IXGBE_AUTOC_LMS_MASK) {
    445 	case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
    446 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
    447 		*autoneg = FALSE;
    448 		break;
    449 
    450 	case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
    451 		*speed = IXGBE_LINK_SPEED_10GB_FULL;
    452 		*autoneg = FALSE;
    453 		break;
    454 
    455 	case IXGBE_AUTOC_LMS_1G_AN:
    456 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
    457 		*autoneg = TRUE;
    458 		break;
    459 
    460 	case IXGBE_AUTOC_LMS_10G_SERIAL:
    461 		*speed = IXGBE_LINK_SPEED_10GB_FULL;
    462 		*autoneg = FALSE;
    463 		break;
    464 
    465 	case IXGBE_AUTOC_LMS_KX4_KX_KR:
    466 	case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN:
    467 		*speed = IXGBE_LINK_SPEED_UNKNOWN;
    468 		if (autoc & IXGBE_AUTOC_KR_SUPP)
    469 			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
    470 		if (autoc & IXGBE_AUTOC_KX4_SUPP)
    471 			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
    472 		if (autoc & IXGBE_AUTOC_KX_SUPP)
    473 			*speed |= IXGBE_LINK_SPEED_1GB_FULL;
    474 		*autoneg = TRUE;
    475 		break;
    476 
    477 	case IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII:
    478 		*speed = IXGBE_LINK_SPEED_100_FULL;
    479 		if (autoc & IXGBE_AUTOC_KR_SUPP)
    480 			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
    481 		if (autoc & IXGBE_AUTOC_KX4_SUPP)
    482 			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
    483 		if (autoc & IXGBE_AUTOC_KX_SUPP)
    484 			*speed |= IXGBE_LINK_SPEED_1GB_FULL;
    485 		*autoneg = TRUE;
    486 		break;
    487 
    488 	case IXGBE_AUTOC_LMS_SGMII_1G_100M:
    489 		*speed = IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_100_FULL;
    490 		*autoneg = FALSE;
    491 		break;
    492 
    493 	default:
    494 		status = IXGBE_ERR_LINK_SETUP;
    495 		goto out;
    496 		break;
    497 	}
    498 
    499 	if (hw->phy.multispeed_fiber) {
    500 		*speed |= IXGBE_LINK_SPEED_10GB_FULL |
    501 			  IXGBE_LINK_SPEED_1GB_FULL;
    502 
    503 		/* QSFP must not enable full auto-negotiation
    504 		 * Limited autoneg is enabled at 1G
    505 		 */
    506 		if (hw->phy.media_type == ixgbe_media_type_fiber_qsfp)
    507 			*autoneg = FALSE;
    508 		else
    509 			*autoneg = TRUE;
    510 	}
    511 
    512 out:
    513 	return status;
    514 }
    515 
    516 /**
    517  *  ixgbe_get_media_type_82599 - Get media type
    518  *  @hw: pointer to hardware structure
    519  *
    520  *  Returns the media type (fiber, copper, backplane)
    521  **/
    522 enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
    523 {
    524 	enum ixgbe_media_type media_type;
    525 
    526 	DEBUGFUNC("ixgbe_get_media_type_82599");
    527 
    528 	/* Detect if there is a copper PHY attached. */
    529 	switch (hw->phy.type) {
    530 	case ixgbe_phy_cu_unknown:
    531 	case ixgbe_phy_tn:
    532 		media_type = ixgbe_media_type_copper;
    533 		goto out;
    534 	default:
    535 		break;
    536 	}
    537 
    538 	switch (hw->device_id) {
    539 	case IXGBE_DEV_ID_82599_KX4:
    540 	case IXGBE_DEV_ID_82599_KX4_MEZZ:
    541 	case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
    542 	case IXGBE_DEV_ID_82599_KR:
    543 	case IXGBE_DEV_ID_82599_BACKPLANE_FCOE:
    544 	case IXGBE_DEV_ID_82599_XAUI_LOM:
    545 		/* Default device ID is mezzanine card KX/KX4 */
    546 		media_type = ixgbe_media_type_backplane;
    547 		break;
    548 	case IXGBE_DEV_ID_82599_SFP:
    549 	case IXGBE_DEV_ID_82599_SFP_FCOE:
    550 	case IXGBE_DEV_ID_82599_SFP_EM:
    551 	case IXGBE_DEV_ID_82599_SFP_SF2:
    552 	case IXGBE_DEV_ID_82599_SFP_SF_QP:
    553 	case IXGBE_DEV_ID_82599EN_SFP:
    554 		media_type = ixgbe_media_type_fiber;
    555 		break;
    556 	case IXGBE_DEV_ID_82599_CX4:
    557 		media_type = ixgbe_media_type_cx4;
    558 		break;
    559 	case IXGBE_DEV_ID_82599_T3_LOM:
    560 		media_type = ixgbe_media_type_copper;
    561 		break;
    562 	case IXGBE_DEV_ID_82599_QSFP_SF_QP:
    563 		media_type = ixgbe_media_type_fiber_qsfp;
    564 		break;
    565 	case IXGBE_DEV_ID_82599_BYPASS:
    566 		media_type = ixgbe_media_type_fiber_fixed;
    567 		hw->phy.multispeed_fiber = TRUE;
    568 		break;
    569 	default:
    570 		media_type = ixgbe_media_type_unknown;
    571 		break;
    572 	}
    573 out:
    574 	return media_type;
    575 }
    576 
    577 /**
    578  *  ixgbe_stop_mac_link_on_d3_82599 - Disables link on D3
    579  *  @hw: pointer to hardware structure
    580  *
    581  *  Disables link during D3 power down sequence.
    582  *
    583  **/
    584 void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw)
    585 {
    586 	u32 autoc2_reg;
    587 	u16 ee_ctrl_2 = 0;
    588 
    589 	DEBUGFUNC("ixgbe_stop_mac_link_on_d3_82599");
    590 	ixgbe_read_eeprom(hw, IXGBE_EEPROM_CTRL_2, &ee_ctrl_2);
    591 
    592 	if (!ixgbe_mng_present(hw) && !hw->wol_enabled &&
    593 	    ee_ctrl_2 & IXGBE_EEPROM_CCD_BIT) {
    594 		autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
    595 		autoc2_reg |= IXGBE_AUTOC2_LINK_DISABLE_ON_D3_MASK;
    596 		IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2_reg);
    597 	}
    598 }
    599 
    600 /**
    601  *  ixgbe_start_mac_link_82599 - Setup MAC link settings
    602  *  @hw: pointer to hardware structure
    603  *  @autoneg_wait_to_complete: TRUE when waiting for completion is needed
    604  *
    605  *  Configures link settings based on values in the ixgbe_hw struct.
    606  *  Restarts the link.  Performs autonegotiation if needed.
    607  **/
    608 s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
    609 			       bool autoneg_wait_to_complete)
    610 {
    611 	u32 autoc_reg;
    612 	u32 links_reg;
    613 	u32 i;
    614 	s32 status = IXGBE_SUCCESS;
    615 	bool got_lock = FALSE;
    616 
    617 	DEBUGFUNC("ixgbe_start_mac_link_82599");
    618 
    619 
    620 	/*  reset_pipeline requires us to hold this lock as it writes to
    621 	 *  AUTOC.
    622 	 */
    623 	if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
    624 		status = hw->mac.ops.acquire_swfw_sync(hw,
    625 						       IXGBE_GSSR_MAC_CSR_SM);
    626 		if (status != IXGBE_SUCCESS)
    627 			goto out;
    628 
    629 		got_lock = TRUE;
    630 	}
    631 
    632 	/* Restart link */
    633 	ixgbe_reset_pipeline_82599(hw);
    634 
    635 	if (got_lock)
    636 		hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
    637 
    638 	/* Only poll for autoneg to complete if specified to do so */
    639 	if (autoneg_wait_to_complete) {
    640 		autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
    641 		if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
    642 		     IXGBE_AUTOC_LMS_KX4_KX_KR ||
    643 		    (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
    644 		     IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
    645 		    (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
    646 		     IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
    647 			links_reg = 0; /* Just in case Autoneg time = 0 */
    648 			for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
    649 				links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
    650 				if (links_reg & IXGBE_LINKS_KX_AN_COMP)
    651 					break;
    652 				msec_delay(100);
    653 			}
    654 			if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
    655 				status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
    656 				DEBUGOUT("Autoneg did not complete.\n");
    657 			}
    658 		}
    659 	}
    660 
    661 	/* Add delay to filter out noises during initial link setup */
    662 	msec_delay(50);
    663 
    664 out:
    665 	return status;
    666 }
    667 
    668 /**
    669  *  ixgbe_disable_tx_laser_multispeed_fiber - Disable Tx laser
    670  *  @hw: pointer to hardware structure
    671  *
    672  *  The base drivers may require better control over SFP+ module
    673  *  PHY states.  This includes selectively shutting down the Tx
    674  *  laser on the PHY, effectively halting physical link.
    675  **/
    676 void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
    677 {
    678 	u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
    679 
    680 	/* Blocked by MNG FW so bail */
    681 	if (ixgbe_check_reset_blocked(hw))
    682 		return;
    683 
    684 	/* Disable Tx laser; allow 100us to go dark per spec */
    685 	esdp_reg |= IXGBE_ESDP_SDP3;
    686 	IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
    687 	IXGBE_WRITE_FLUSH(hw);
    688 	usec_delay(100);
    689 }
    690 
    691 /**
    692  *  ixgbe_enable_tx_laser_multispeed_fiber - Enable Tx laser
    693  *  @hw: pointer to hardware structure
    694  *
    695  *  The base drivers may require better control over SFP+ module
    696  *  PHY states.  This includes selectively turning on the Tx
    697  *  laser on the PHY, effectively starting physical link.
    698  **/
    699 void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
    700 {
    701 	u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
    702 
    703 	/* Enable Tx laser; allow 100ms to light up */
    704 	esdp_reg &= ~IXGBE_ESDP_SDP3;
    705 	IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
    706 	IXGBE_WRITE_FLUSH(hw);
    707 	msec_delay(100);
    708 }
    709 
    710 /**
    711  *  ixgbe_flap_tx_laser_multispeed_fiber - Flap Tx laser
    712  *  @hw: pointer to hardware structure
    713  *
    714  *  When the driver changes the link speeds that it can support,
    715  *  it sets autotry_restart to TRUE to indicate that we need to
    716  *  initiate a new autotry session with the link partner.  To do
    717  *  so, we set the speed then disable and re-enable the Tx laser, to
    718  *  alert the link partner that it also needs to restart autotry on its
    719  *  end.  This is consistent with TRUE clause 37 autoneg, which also
    720  *  involves a loss of signal.
    721  **/
    722 void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
    723 {
    724 	DEBUGFUNC("ixgbe_flap_tx_laser_multispeed_fiber");
    725 
    726 	/* Blocked by MNG FW so bail */
    727 	if (ixgbe_check_reset_blocked(hw))
    728 		return;
    729 
    730 	if (hw->mac.autotry_restart) {
    731 		ixgbe_disable_tx_laser_multispeed_fiber(hw);
    732 		ixgbe_enable_tx_laser_multispeed_fiber(hw);
    733 		hw->mac.autotry_restart = FALSE;
    734 	}
    735 }
    736 
    737 /**
    738  *  ixgbe_set_hard_rate_select_speed - Set module link speed
    739  *  @hw: pointer to hardware structure
    740  *  @speed: link speed to set
    741  *
    742  *  Set module link speed via RS0/RS1 rate select pins.
    743  */
    744 void ixgbe_set_hard_rate_select_speed(struct ixgbe_hw *hw,
    745 					ixgbe_link_speed speed)
    746 {
    747 	u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
    748 
    749 	switch (speed) {
    750 	case IXGBE_LINK_SPEED_10GB_FULL:
    751 		esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5);
    752 		break;
    753 	case IXGBE_LINK_SPEED_1GB_FULL:
    754 		esdp_reg &= ~IXGBE_ESDP_SDP5;
    755 		esdp_reg |= IXGBE_ESDP_SDP5_DIR;
    756 		break;
    757 	default:
    758 		DEBUGOUT("Invalid fixed module speed\n");
    759 		return;
    760 	}
    761 
    762 	IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
    763 	IXGBE_WRITE_FLUSH(hw);
    764 }
    765 
    766 /**
    767  *  ixgbe_setup_mac_link_smartspeed - Set MAC link speed using SmartSpeed
    768  *  @hw: pointer to hardware structure
    769  *  @speed: new link speed
    770  *  @autoneg_wait_to_complete: TRUE when waiting for completion is needed
    771  *
    772  *  Implements the Intel SmartSpeed algorithm.
    773  **/
    774 s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
    775 				    ixgbe_link_speed speed,
    776 				    bool autoneg_wait_to_complete)
    777 {
    778 	s32 status = IXGBE_SUCCESS;
    779 	ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
    780 	s32 i, j;
    781 	bool link_up = FALSE;
    782 	u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
    783 
    784 	DEBUGFUNC("ixgbe_setup_mac_link_smartspeed");
    785 
    786 	 /* Set autoneg_advertised value based on input link speed */
    787 	hw->phy.autoneg_advertised = 0;
    788 
    789 	if (speed & IXGBE_LINK_SPEED_10GB_FULL)
    790 		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
    791 
    792 	if (speed & IXGBE_LINK_SPEED_1GB_FULL)
    793 		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
    794 
    795 	if (speed & IXGBE_LINK_SPEED_100_FULL)
    796 		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL;
    797 
    798 	/*
    799 	 * Implement Intel SmartSpeed algorithm.  SmartSpeed will reduce the
    800 	 * autoneg advertisement if link is unable to be established at the
    801 	 * highest negotiated rate.  This can sometimes happen due to integrity
    802 	 * issues with the physical media connection.
    803 	 */
    804 
    805 	/* First, try to get link with full advertisement */
    806 	hw->phy.smart_speed_active = FALSE;
    807 	for (j = 0; j < IXGBE_SMARTSPEED_MAX_RETRIES; j++) {
    808 		status = ixgbe_setup_mac_link_82599(hw, speed,
    809 						    autoneg_wait_to_complete);
    810 		if (status != IXGBE_SUCCESS)
    811 			goto out;
    812 
    813 		/*
    814 		 * Wait for the controller to acquire link.  Per IEEE 802.3ap,
    815 		 * Section 73.10.2, we may have to wait up to 500ms if KR is
    816 		 * attempted, or 200ms if KX/KX4/BX/BX4 is attempted, per
    817 		 * Table 9 in the AN MAS.
    818 		 */
    819 		for (i = 0; i < 5; i++) {
    820 			msec_delay(100);
    821 
    822 			/* If we have link, just jump out */
    823 			status = ixgbe_check_link(hw, &link_speed, &link_up,
    824 						  FALSE);
    825 			if (status != IXGBE_SUCCESS)
    826 				goto out;
    827 
    828 			if (link_up)
    829 				goto out;
    830 		}
    831 	}
    832 
    833 	/*
    834 	 * We didn't get link.  If we advertised KR plus one of KX4/KX
    835 	 * (or BX4/BX), then disable KR and try again.
    836 	 */
    837 	if (((autoc_reg & IXGBE_AUTOC_KR_SUPP) == 0) ||
    838 	    ((autoc_reg & IXGBE_AUTOC_KX4_KX_SUPP_MASK) == 0))
    839 		goto out;
    840 
    841 	/* Turn SmartSpeed on to disable KR support */
    842 	hw->phy.smart_speed_active = TRUE;
    843 	status = ixgbe_setup_mac_link_82599(hw, speed,
    844 					    autoneg_wait_to_complete);
    845 	if (status != IXGBE_SUCCESS)
    846 		goto out;
    847 
    848 	/*
    849 	 * Wait for the controller to acquire link.  600ms will allow for
    850 	 * the AN link_fail_inhibit_timer as well for multiple cycles of
    851 	 * parallel detect, both 10g and 1g. This allows for the maximum
    852 	 * connect attempts as defined in the AN MAS table 73-7.
    853 	 */
    854 	for (i = 0; i < 6; i++) {
    855 		msec_delay(100);
    856 
    857 		/* If we have link, just jump out */
    858 		status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
    859 		if (status != IXGBE_SUCCESS)
    860 			goto out;
    861 
    862 		if (link_up)
    863 			goto out;
    864 	}
    865 
    866 	/* We didn't get link.  Turn SmartSpeed back off. */
    867 	hw->phy.smart_speed_active = FALSE;
    868 	status = ixgbe_setup_mac_link_82599(hw, speed,
    869 					    autoneg_wait_to_complete);
    870 
    871 out:
    872 	if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL))
    873 		DEBUGOUT("Smartspeed has downgraded the link speed "
    874 		"from the maximum advertised\n");
    875 	return status;
    876 }
    877 
    878 /**
    879  *  ixgbe_setup_mac_link_82599 - Set MAC link speed
    880  *  @hw: pointer to hardware structure
    881  *  @speed: new link speed
    882  *  @autoneg_wait_to_complete: TRUE when waiting for completion is needed
    883  *
    884  *  Set the link speed in the AUTOC register and restarts link.
    885  **/
    886 s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
    887 			       ixgbe_link_speed speed,
    888 			       bool autoneg_wait_to_complete)
    889 {
    890 	bool autoneg = FALSE;
    891 	s32 status = IXGBE_SUCCESS;
    892 	u32 pma_pmd_1g, link_mode;
    893 	u32 current_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); /* holds the value of AUTOC register at this current point in time */
    894 	u32 orig_autoc = 0; /* holds the cached value of AUTOC register */
    895 	u32 autoc = current_autoc; /* Temporary variable used for comparison purposes */
    896 	u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
    897 	u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
    898 	u32 links_reg;
    899 	u32 i;
    900 	ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
    901 
    902 	DEBUGFUNC("ixgbe_setup_mac_link_82599");
    903 
    904 	/* Check to see if speed passed in is supported. */
    905 	status = ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg);
    906 	if (status)
    907 		goto out;
    908 
    909 	speed &= link_capabilities;
    910 
    911 	if (speed == IXGBE_LINK_SPEED_UNKNOWN) {
    912 		status = IXGBE_ERR_LINK_SETUP;
    913 		goto out;
    914 	}
    915 
    916 	/* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/
    917 	if (hw->mac.orig_link_settings_stored)
    918 		orig_autoc = hw->mac.orig_autoc;
    919 	else
    920 		orig_autoc = autoc;
    921 
    922 	link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
    923 	pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
    924 
    925 	if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
    926 	    link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
    927 	    link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
    928 		/* Set KX4/KX/KR support according to speed requested */
    929 		autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP);
    930 		if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
    931 			if (orig_autoc & IXGBE_AUTOC_KX4_SUPP)
    932 				autoc |= IXGBE_AUTOC_KX4_SUPP;
    933 			if ((orig_autoc & IXGBE_AUTOC_KR_SUPP) &&
    934 			    (hw->phy.smart_speed_active == FALSE))
    935 				autoc |= IXGBE_AUTOC_KR_SUPP;
    936 		}
    937 		if (speed & IXGBE_LINK_SPEED_1GB_FULL)
    938 			autoc |= IXGBE_AUTOC_KX_SUPP;
    939 	} else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) &&
    940 		   (link_mode == IXGBE_AUTOC_LMS_1G_LINK_NO_AN ||
    941 		    link_mode == IXGBE_AUTOC_LMS_1G_AN)) {
    942 		/* Switch from 1G SFI to 10G SFI if requested */
    943 		if ((speed == IXGBE_LINK_SPEED_10GB_FULL) &&
    944 		    (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)) {
    945 			autoc &= ~IXGBE_AUTOC_LMS_MASK;
    946 			autoc |= IXGBE_AUTOC_LMS_10G_SERIAL;
    947 		}
    948 	} else if ((pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) &&
    949 		   (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) {
    950 		/* Switch from 10G SFI to 1G SFI if requested */
    951 		if ((speed == IXGBE_LINK_SPEED_1GB_FULL) &&
    952 		    (pma_pmd_1g == IXGBE_AUTOC_1G_SFI)) {
    953 			autoc &= ~IXGBE_AUTOC_LMS_MASK;
    954 			if (autoneg || hw->phy.type == ixgbe_phy_qsfp_intel)
    955 				autoc |= IXGBE_AUTOC_LMS_1G_AN;
    956 			else
    957 				autoc |= IXGBE_AUTOC_LMS_1G_LINK_NO_AN;
    958 		}
    959 	}
    960 
    961 	if (autoc != current_autoc) {
    962 		/* Restart link */
    963 		status = hw->mac.ops.prot_autoc_write(hw, autoc, FALSE);
    964 		if (status != IXGBE_SUCCESS)
    965 			goto out;
    966 
    967 		/* Only poll for autoneg to complete if specified to do so */
    968 		if (autoneg_wait_to_complete) {
    969 			if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
    970 			    link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
    971 			    link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
    972 				links_reg = 0; /*Just in case Autoneg time=0*/
    973 				for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
    974 					links_reg =
    975 					       IXGBE_READ_REG(hw, IXGBE_LINKS);
    976 					if (links_reg & IXGBE_LINKS_KX_AN_COMP)
    977 						break;
    978 					msec_delay(100);
    979 				}
    980 				if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
    981 					status =
    982 						IXGBE_ERR_AUTONEG_NOT_COMPLETE;
    983 					DEBUGOUT("Autoneg did not complete.\n");
    984 				}
    985 			}
    986 		}
    987 
    988 		/* Add delay to filter out noises during initial link setup */
    989 		msec_delay(50);
    990 	}
    991 
    992 out:
    993 	return status;
    994 }
    995 
    996 /**
    997  *  ixgbe_setup_copper_link_82599 - Set the PHY autoneg advertised field
    998  *  @hw: pointer to hardware structure
    999  *  @speed: new link speed
   1000  *  @autoneg_wait_to_complete: TRUE if waiting is needed to complete
   1001  *
   1002  *  Restarts link on PHY and MAC based on settings passed in.
   1003  **/
   1004 static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
   1005 					 ixgbe_link_speed speed,
   1006 					 bool autoneg_wait_to_complete)
   1007 {
   1008 	s32 status;
   1009 
   1010 	DEBUGFUNC("ixgbe_setup_copper_link_82599");
   1011 
   1012 	/* Setup the PHY according to input speed */
   1013 	status = hw->phy.ops.setup_link_speed(hw, speed,
   1014 					      autoneg_wait_to_complete);
   1015 	/* Set up MAC */
   1016 	ixgbe_start_mac_link_82599(hw, autoneg_wait_to_complete);
   1017 
   1018 	return status;
   1019 }
   1020 
   1021 /**
   1022  *  ixgbe_reset_hw_82599 - Perform hardware reset
   1023  *  @hw: pointer to hardware structure
   1024  *
   1025  *  Resets the hardware by resetting the transmit and receive units, masks
   1026  *  and clears all interrupts, perform a PHY reset, and perform a link (MAC)
   1027  *  reset.
   1028  **/
   1029 s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
   1030 {
   1031 	ixgbe_link_speed link_speed;
   1032 	s32 status;
   1033 	u32 ctrl = 0;
   1034 	u32 i, autoc, autoc2;
   1035 	u32 curr_lms;
   1036 	bool link_up = FALSE;
   1037 
   1038 	DEBUGFUNC("ixgbe_reset_hw_82599");
   1039 
   1040 	/* Call adapter stop to disable tx/rx and clear interrupts */
   1041 	status = hw->mac.ops.stop_adapter(hw);
   1042 	if (status != IXGBE_SUCCESS)
   1043 		goto reset_hw_out;
   1044 
   1045 	/* flush pending Tx transactions */
   1046 	ixgbe_clear_tx_pending(hw);
   1047 
   1048 	/* PHY ops must be identified and initialized prior to reset */
   1049 
   1050 	/* Identify PHY and related function pointers */
   1051 	status = hw->phy.ops.init(hw);
   1052 
   1053 	if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
   1054 		goto reset_hw_out;
   1055 
   1056 	/* Setup SFP module if there is one present. */
   1057 	if (hw->phy.sfp_setup_needed) {
   1058 		status = hw->mac.ops.setup_sfp(hw);
   1059 		hw->phy.sfp_setup_needed = FALSE;
   1060 	}
   1061 
   1062 	if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
   1063 		goto reset_hw_out;
   1064 
   1065 	/* Reset PHY */
   1066 	if (hw->phy.reset_disable == FALSE && hw->phy.ops.reset != NULL)
   1067 		hw->phy.ops.reset(hw);
   1068 
   1069 	/* remember AUTOC from before we reset */
   1070 	curr_lms = IXGBE_READ_REG(hw, IXGBE_AUTOC) & IXGBE_AUTOC_LMS_MASK;
   1071 
   1072 mac_reset_top:
   1073 	/*
   1074 	 * Issue global reset to the MAC.  Needs to be SW reset if link is up.
   1075 	 * If link reset is used when link is up, it might reset the PHY when
   1076 	 * mng is using it.  If link is down or the flag to force full link
   1077 	 * reset is set, then perform link reset.
   1078 	 */
   1079 	ctrl = IXGBE_CTRL_LNK_RST;
   1080 	if (!hw->force_full_reset) {
   1081 		hw->mac.ops.check_link(hw, &link_speed, &link_up, FALSE);
   1082 		if (link_up)
   1083 			ctrl = IXGBE_CTRL_RST;
   1084 	}
   1085 
   1086 	ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
   1087 	IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
   1088 	IXGBE_WRITE_FLUSH(hw);
   1089 
   1090 	/* Poll for reset bit to self-clear meaning reset is complete */
   1091 	for (i = 0; i < 10; i++) {
   1092 		usec_delay(1);
   1093 		ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
   1094 		if (!(ctrl & IXGBE_CTRL_RST_MASK))
   1095 			break;
   1096 	}
   1097 
   1098 	if (ctrl & IXGBE_CTRL_RST_MASK) {
   1099 		status = IXGBE_ERR_RESET_FAILED;
   1100 		DEBUGOUT("Reset polling failed to complete.\n");
   1101 	}
   1102 
   1103 	msec_delay(50);
   1104 
   1105 	/*
   1106 	 * Double resets are required for recovery from certain error
   1107 	 * conditions.  Between resets, it is necessary to stall to
   1108 	 * allow time for any pending HW events to complete.
   1109 	 */
   1110 	if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
   1111 		hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
   1112 		goto mac_reset_top;
   1113 	}
   1114 
   1115 	/*
   1116 	 * Store the original AUTOC/AUTOC2 values if they have not been
   1117 	 * stored off yet.  Otherwise restore the stored original
   1118 	 * values since the reset operation sets back to defaults.
   1119 	 */
   1120 	autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
   1121 	autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
   1122 
   1123 	/* Enable link if disabled in NVM */
   1124 	if (autoc2 & IXGBE_AUTOC2_LINK_DISABLE_MASK) {
   1125 		autoc2 &= ~IXGBE_AUTOC2_LINK_DISABLE_MASK;
   1126 		IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
   1127 		IXGBE_WRITE_FLUSH(hw);
   1128 	}
   1129 
   1130 	if (hw->mac.orig_link_settings_stored == FALSE) {
   1131 		hw->mac.orig_autoc = autoc;
   1132 		hw->mac.orig_autoc2 = autoc2;
   1133 		hw->mac.orig_link_settings_stored = TRUE;
   1134 	} else {
   1135 
   1136 		/* If MNG FW is running on a multi-speed device that
   1137 		 * doesn't autoneg with out driver support we need to
   1138 		 * leave LMS in the state it was before we MAC reset.
   1139 		 * Likewise if we support WoL we don't want change the
   1140 		 * LMS state.
   1141 		 */
   1142 		if ((hw->phy.multispeed_fiber && ixgbe_mng_enabled(hw)) ||
   1143 		    hw->wol_enabled)
   1144 			hw->mac.orig_autoc =
   1145 				(hw->mac.orig_autoc & ~IXGBE_AUTOC_LMS_MASK) |
   1146 				curr_lms;
   1147 
   1148 		if (autoc != hw->mac.orig_autoc) {
   1149 			status = hw->mac.ops.prot_autoc_write(hw,
   1150 							hw->mac.orig_autoc,
   1151 							FALSE);
   1152 			if (status != IXGBE_SUCCESS)
   1153 				goto reset_hw_out;
   1154 		}
   1155 
   1156 		if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) !=
   1157 		    (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) {
   1158 			autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK;
   1159 			autoc2 |= (hw->mac.orig_autoc2 &
   1160 				   IXGBE_AUTOC2_UPPER_MASK);
   1161 			IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
   1162 		}
   1163 	}
   1164 
   1165 	/* Store the permanent mac address */
   1166 	hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
   1167 
   1168 	/*
   1169 	 * Store MAC address from RAR0, clear receive address registers, and
   1170 	 * clear the multicast table.  Also reset num_rar_entries to 128,
   1171 	 * since we modify this value when programming the SAN MAC address.
   1172 	 */
   1173 	hw->mac.num_rar_entries = 128;
   1174 	hw->mac.ops.init_rx_addrs(hw);
   1175 
   1176 	/* Store the permanent SAN mac address */
   1177 	hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr);
   1178 
   1179 	/* Add the SAN MAC address to the RAR only if it's a valid address */
   1180 	if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) {
   1181 		hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
   1182 				    hw->mac.san_addr, 0, IXGBE_RAH_AV);
   1183 
   1184 		/* Save the SAN MAC RAR index */
   1185 		hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1;
   1186 
   1187 		/* Reserve the last RAR for the SAN MAC address */
   1188 		hw->mac.num_rar_entries--;
   1189 	}
   1190 
   1191 	/* Store the alternative WWNN/WWPN prefix */
   1192 	hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
   1193 				   &hw->mac.wwpn_prefix);
   1194 
   1195 reset_hw_out:
   1196 	return status;
   1197 }
   1198 
   1199 /**
   1200  * ixgbe_fdir_check_cmd_complete - poll to check whether FDIRCMD is complete
   1201  * @hw: pointer to hardware structure
   1202  * @fdircmd: current value of FDIRCMD register
   1203  */
   1204 static s32 ixgbe_fdir_check_cmd_complete(struct ixgbe_hw *hw, u32 *fdircmd)
   1205 {
   1206 	int i;
   1207 
   1208 	for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) {
   1209 		*fdircmd = IXGBE_READ_REG(hw, IXGBE_FDIRCMD);
   1210 		if (!(*fdircmd & IXGBE_FDIRCMD_CMD_MASK))
   1211 			return IXGBE_SUCCESS;
   1212 		usec_delay(10);
   1213 	}
   1214 
   1215 	return IXGBE_ERR_FDIR_CMD_INCOMPLETE;
   1216 }
   1217 
   1218 /**
   1219  *  ixgbe_reinit_fdir_tables_82599 - Reinitialize Flow Director tables.
   1220  *  @hw: pointer to hardware structure
   1221  **/
   1222 s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
   1223 {
   1224 	s32 err;
   1225 	int i;
   1226 	u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
   1227 	u32 fdircmd;
   1228 	fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE;
   1229 
   1230 	DEBUGFUNC("ixgbe_reinit_fdir_tables_82599");
   1231 
   1232 	/*
   1233 	 * Before starting reinitialization process,
   1234 	 * FDIRCMD.CMD must be zero.
   1235 	 */
   1236 	err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
   1237 	if (err) {
   1238 		DEBUGOUT("Flow Director previous command did not complete, aborting table re-initialization.\n");
   1239 		return err;
   1240 	}
   1241 
   1242 	IXGBE_WRITE_REG(hw, IXGBE_FDIRFREE, 0);
   1243 	IXGBE_WRITE_FLUSH(hw);
   1244 	/*
   1245 	 * 82599 adapters flow director init flow cannot be restarted,
   1246 	 * Workaround 82599 silicon errata by performing the following steps
   1247 	 * before re-writing the FDIRCTRL control register with the same value.
   1248 	 * - write 1 to bit 8 of FDIRCMD register &
   1249 	 * - write 0 to bit 8 of FDIRCMD register
   1250 	 */
   1251 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
   1252 			(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) |
   1253 			 IXGBE_FDIRCMD_CLEARHT));
   1254 	IXGBE_WRITE_FLUSH(hw);
   1255 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
   1256 			(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
   1257 			 ~IXGBE_FDIRCMD_CLEARHT));
   1258 	IXGBE_WRITE_FLUSH(hw);
   1259 	/*
   1260 	 * Clear FDIR Hash register to clear any leftover hashes
   1261 	 * waiting to be programmed.
   1262 	 */
   1263 	IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, 0x00);
   1264 	IXGBE_WRITE_FLUSH(hw);
   1265 
   1266 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
   1267 	IXGBE_WRITE_FLUSH(hw);
   1268 
   1269 	/* Poll init-done after we write FDIRCTRL register */
   1270 	for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
   1271 		if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
   1272 				   IXGBE_FDIRCTRL_INIT_DONE)
   1273 			break;
   1274 		msec_delay(1);
   1275 	}
   1276 	if (i >= IXGBE_FDIR_INIT_DONE_POLL) {
   1277 		DEBUGOUT("Flow Director Signature poll time exceeded!\n");
   1278 		return IXGBE_ERR_FDIR_REINIT_FAILED;
   1279 	}
   1280 
   1281 	/* Clear FDIR statistics registers (read to clear) */
   1282 	IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT);
   1283 	IXGBE_READ_REG(hw, IXGBE_FDIRFSTAT);
   1284 	IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
   1285 	IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
   1286 	IXGBE_READ_REG(hw, IXGBE_FDIRLEN);
   1287 
   1288 	return IXGBE_SUCCESS;
   1289 }
   1290 
   1291 /**
   1292  *  ixgbe_fdir_enable_82599 - Initialize Flow Director control registers
   1293  *  @hw: pointer to hardware structure
   1294  *  @fdirctrl: value to write to flow director control register
   1295  **/
   1296 static void ixgbe_fdir_enable_82599(struct ixgbe_hw *hw, u32 fdirctrl)
   1297 {
   1298 	int i;
   1299 
   1300 	DEBUGFUNC("ixgbe_fdir_enable_82599");
   1301 
   1302 	/* Prime the keys for hashing */
   1303 	IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY);
   1304 	IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY);
   1305 
   1306 	/*
   1307 	 * Poll init-done after we write the register.  Estimated times:
   1308 	 *      10G: PBALLOC = 11b, timing is 60us
   1309 	 *       1G: PBALLOC = 11b, timing is 600us
   1310 	 *     100M: PBALLOC = 11b, timing is 6ms
   1311 	 *
   1312 	 *     Multiple these timings by 4 if under full Rx load
   1313 	 *
   1314 	 * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for
   1315 	 * 1 msec per poll time.  If we're at line rate and drop to 100M, then
   1316 	 * this might not finish in our poll time, but we can live with that
   1317 	 * for now.
   1318 	 */
   1319 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
   1320 	IXGBE_WRITE_FLUSH(hw);
   1321 	for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
   1322 		if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
   1323 				   IXGBE_FDIRCTRL_INIT_DONE)
   1324 			break;
   1325 		msec_delay(1);
   1326 	}
   1327 
   1328 	if (i >= IXGBE_FDIR_INIT_DONE_POLL)
   1329 		DEBUGOUT("Flow Director poll time exceeded!\n");
   1330 }
   1331 
   1332 /**
   1333  *  ixgbe_init_fdir_signature_82599 - Initialize Flow Director signature filters
   1334  *  @hw: pointer to hardware structure
   1335  *  @fdirctrl: value to write to flow director control register, initially
   1336  *	     contains just the value of the Rx packet buffer allocation
   1337  **/
   1338 s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl)
   1339 {
   1340 	DEBUGFUNC("ixgbe_init_fdir_signature_82599");
   1341 
   1342 	/*
   1343 	 * Continue setup of fdirctrl register bits:
   1344 	 *  Move the flexible bytes to use the ethertype - shift 6 words
   1345 	 *  Set the maximum length per hash bucket to 0xA filters
   1346 	 *  Send interrupt when 64 filters are left
   1347 	 */
   1348 	fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) |
   1349 		    (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
   1350 		    (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
   1351 
   1352 	/* write hashes and fdirctrl register, poll for completion */
   1353 	ixgbe_fdir_enable_82599(hw, fdirctrl);
   1354 
   1355 	return IXGBE_SUCCESS;
   1356 }
   1357 
   1358 /**
   1359  *  ixgbe_init_fdir_perfect_82599 - Initialize Flow Director perfect filters
   1360  *  @hw: pointer to hardware structure
   1361  *  @fdirctrl: value to write to flow director control register, initially
   1362  *	     contains just the value of the Rx packet buffer allocation
   1363  *  @cloud_mode: TRUE - cloud mode, FALSE - other mode
   1364  **/
   1365 s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl,
   1366 			bool cloud_mode)
   1367 {
   1368 	DEBUGFUNC("ixgbe_init_fdir_perfect_82599");
   1369 
   1370 	/*
   1371 	 * Continue setup of fdirctrl register bits:
   1372 	 *  Turn perfect match filtering on
   1373 	 *  Report hash in RSS field of Rx wb descriptor
   1374 	 *  Initialize the drop queue to queue 127
   1375 	 *  Move the flexible bytes to use the ethertype - shift 6 words
   1376 	 *  Set the maximum length per hash bucket to 0xA filters
   1377 	 *  Send interrupt when 64 (0x4 * 16) filters are left
   1378 	 */
   1379 	fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH |
   1380 		    IXGBE_FDIRCTRL_REPORT_STATUS |
   1381 		    (IXGBE_FDIR_DROP_QUEUE << IXGBE_FDIRCTRL_DROP_Q_SHIFT) |
   1382 		    (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) |
   1383 		    (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
   1384 		    (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
   1385 	if ((hw->mac.type == ixgbe_mac_X550) ||
   1386 	    (hw->mac.type == ixgbe_mac_X550EM_x))
   1387 		fdirctrl |= IXGBE_FDIRCTRL_DROP_NO_MATCH;
   1388 
   1389 	if (cloud_mode)
   1390 		fdirctrl |=(IXGBE_FDIRCTRL_FILTERMODE_CLOUD <<
   1391 					IXGBE_FDIRCTRL_FILTERMODE_SHIFT);
   1392 
   1393 	/* write hashes and fdirctrl register, poll for completion */
   1394 	ixgbe_fdir_enable_82599(hw, fdirctrl);
   1395 
   1396 	return IXGBE_SUCCESS;
   1397 }
   1398 
   1399 /**
   1400  *  ixgbe_set_fdir_drop_queue_82599 - Set Flow Director drop queue
   1401  *  @hw: pointer to hardware structure
   1402  *  @dropqueue: Rx queue index used for the dropped packets
   1403  **/
   1404 void ixgbe_set_fdir_drop_queue_82599(struct ixgbe_hw *hw, u8 dropqueue)
   1405 {
   1406 	u32 fdirctrl;
   1407 
   1408 	DEBUGFUNC("ixgbe_set_fdir_drop_queue_82599");
   1409 	/* Clear init done bit and drop queue field */
   1410 	fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
   1411 	fdirctrl &= ~(IXGBE_FDIRCTRL_DROP_Q_MASK | IXGBE_FDIRCTRL_INIT_DONE);
   1412 
   1413 	/* Set drop queue */
   1414 	fdirctrl |= (dropqueue << IXGBE_FDIRCTRL_DROP_Q_SHIFT);
   1415 	if ((hw->mac.type == ixgbe_mac_X550) ||
   1416 	    (hw->mac.type == ixgbe_mac_X550EM_x))
   1417 		fdirctrl |= IXGBE_FDIRCTRL_DROP_NO_MATCH;
   1418 
   1419 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
   1420 			(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) |
   1421 			 IXGBE_FDIRCMD_CLEARHT));
   1422 	IXGBE_WRITE_FLUSH(hw);
   1423 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
   1424 			(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
   1425 			 ~IXGBE_FDIRCMD_CLEARHT));
   1426 	IXGBE_WRITE_FLUSH(hw);
   1427 
   1428 	/* write hashes and fdirctrl register, poll for completion */
   1429 	ixgbe_fdir_enable_82599(hw, fdirctrl);
   1430 }
   1431 
   1432 /*
   1433  * These defines allow us to quickly generate all of the necessary instructions
   1434  * in the function below by simply calling out IXGBE_COMPUTE_SIG_HASH_ITERATION
   1435  * for values 0 through 15
   1436  */
   1437 #define IXGBE_ATR_COMMON_HASH_KEY \
   1438 		(IXGBE_ATR_BUCKET_HASH_KEY & IXGBE_ATR_SIGNATURE_HASH_KEY)
   1439 #define IXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \
   1440 do { \
   1441 	u32 n = (_n); \
   1442 	if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) \
   1443 		common_hash ^= lo_hash_dword >> n; \
   1444 	else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
   1445 		bucket_hash ^= lo_hash_dword >> n; \
   1446 	else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << n)) \
   1447 		sig_hash ^= lo_hash_dword << (16 - n); \
   1448 	if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << (n + 16))) \
   1449 		common_hash ^= hi_hash_dword >> n; \
   1450 	else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
   1451 		bucket_hash ^= hi_hash_dword >> n; \
   1452 	else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \
   1453 		sig_hash ^= hi_hash_dword << (16 - n); \
   1454 } while (0)
   1455 
   1456 /**
   1457  *  ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash
   1458  *  @stream: input bitstream to compute the hash on
   1459  *
   1460  *  This function is almost identical to the function above but contains
   1461  *  several optimizations such as unwinding all of the loops, letting the
   1462  *  compiler work out all of the conditional ifs since the keys are static
   1463  *  defines, and computing two keys at once since the hashed dword stream
   1464  *  will be the same for both keys.
   1465  **/
   1466 u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
   1467 				     union ixgbe_atr_hash_dword common)
   1468 {
   1469 	u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
   1470 	u32 sig_hash = 0, bucket_hash = 0, common_hash = 0;
   1471 
   1472 	/* record the flow_vm_vlan bits as they are a key part to the hash */
   1473 	flow_vm_vlan = IXGBE_NTOHL(input.dword);
   1474 
   1475 	/* generate common hash dword */
   1476 	hi_hash_dword = IXGBE_NTOHL(common.dword);
   1477 
   1478 	/* low dword is word swapped version of common */
   1479 	lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
   1480 
   1481 	/* apply flow ID/VM pool/VLAN ID bits to hash words */
   1482 	hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
   1483 
   1484 	/* Process bits 0 and 16 */
   1485 	IXGBE_COMPUTE_SIG_HASH_ITERATION(0);
   1486 
   1487 	/*
   1488 	 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
   1489 	 * delay this because bit 0 of the stream should not be processed
   1490 	 * so we do not add the VLAN until after bit 0 was processed
   1491 	 */
   1492 	lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
   1493 
   1494 	/* Process remaining 30 bit of the key */
   1495 	IXGBE_COMPUTE_SIG_HASH_ITERATION(1);
   1496 	IXGBE_COMPUTE_SIG_HASH_ITERATION(2);
   1497 	IXGBE_COMPUTE_SIG_HASH_ITERATION(3);
   1498 	IXGBE_COMPUTE_SIG_HASH_ITERATION(4);
   1499 	IXGBE_COMPUTE_SIG_HASH_ITERATION(5);
   1500 	IXGBE_COMPUTE_SIG_HASH_ITERATION(6);
   1501 	IXGBE_COMPUTE_SIG_HASH_ITERATION(7);
   1502 	IXGBE_COMPUTE_SIG_HASH_ITERATION(8);
   1503 	IXGBE_COMPUTE_SIG_HASH_ITERATION(9);
   1504 	IXGBE_COMPUTE_SIG_HASH_ITERATION(10);
   1505 	IXGBE_COMPUTE_SIG_HASH_ITERATION(11);
   1506 	IXGBE_COMPUTE_SIG_HASH_ITERATION(12);
   1507 	IXGBE_COMPUTE_SIG_HASH_ITERATION(13);
   1508 	IXGBE_COMPUTE_SIG_HASH_ITERATION(14);
   1509 	IXGBE_COMPUTE_SIG_HASH_ITERATION(15);
   1510 
   1511 	/* combine common_hash result with signature and bucket hashes */
   1512 	bucket_hash ^= common_hash;
   1513 	bucket_hash &= IXGBE_ATR_HASH_MASK;
   1514 
   1515 	sig_hash ^= common_hash << 16;
   1516 	sig_hash &= IXGBE_ATR_HASH_MASK << 16;
   1517 
   1518 	/* return completed signature hash */
   1519 	return sig_hash ^ bucket_hash;
   1520 }
   1521 
   1522 /**
   1523  *  ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter
   1524  *  @hw: pointer to hardware structure
   1525  *  @input: unique input dword
   1526  *  @common: compressed common input dword
   1527  *  @queue: queue index to direct traffic to
   1528  *
   1529  * Note that the tunnel bit in input must not be set when the hardware
   1530  * tunneling support does not exist.
   1531  **/
   1532 void ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
   1533 					   union ixgbe_atr_hash_dword input,
   1534 					   union ixgbe_atr_hash_dword common,
   1535 					   u8 queue)
   1536 {
   1537 	u64 fdirhashcmd;
   1538 	u8 flow_type;
   1539 	bool tunnel;
   1540 	u32 fdircmd;
   1541 
   1542 	DEBUGFUNC("ixgbe_fdir_add_signature_filter_82599");
   1543 
   1544 	/*
   1545 	 * Get the flow_type in order to program FDIRCMD properly
   1546 	 * lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6
   1547 	 * fifth is FDIRCMD.TUNNEL_FILTER
   1548 	 */
   1549 	tunnel = !!(input.formatted.flow_type & IXGBE_ATR_L4TYPE_TUNNEL_MASK);
   1550 	flow_type = input.formatted.flow_type &
   1551 		    (IXGBE_ATR_L4TYPE_TUNNEL_MASK - 1);
   1552 	switch (flow_type) {
   1553 	case IXGBE_ATR_FLOW_TYPE_TCPV4:
   1554 	case IXGBE_ATR_FLOW_TYPE_UDPV4:
   1555 	case IXGBE_ATR_FLOW_TYPE_SCTPV4:
   1556 	case IXGBE_ATR_FLOW_TYPE_TCPV6:
   1557 	case IXGBE_ATR_FLOW_TYPE_UDPV6:
   1558 	case IXGBE_ATR_FLOW_TYPE_SCTPV6:
   1559 		break;
   1560 	default:
   1561 		DEBUGOUT(" Error on flow type input\n");
   1562 		return;
   1563 	}
   1564 
   1565 	/* configure FDIRCMD register */
   1566 	fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
   1567 		  IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
   1568 	fdircmd |= (u32)flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
   1569 	fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
   1570 	if (tunnel)
   1571 		fdircmd |= IXGBE_FDIRCMD_TUNNEL_FILTER;
   1572 
   1573 	/*
   1574 	 * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits
   1575 	 * is for FDIRCMD.  Then do a 64-bit register write from FDIRHASH.
   1576 	 */
   1577 	fdirhashcmd = (u64)fdircmd << 32;
   1578 	fdirhashcmd |= ixgbe_atr_compute_sig_hash_82599(input, common);
   1579 	IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd);
   1580 
   1581 	DEBUGOUT2("Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd);
   1582 
   1583 	return;
   1584 }
   1585 
   1586 #define IXGBE_COMPUTE_BKT_HASH_ITERATION(_n) \
   1587 do { \
   1588 	u32 n = (_n); \
   1589 	if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
   1590 		bucket_hash ^= lo_hash_dword >> n; \
   1591 	if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
   1592 		bucket_hash ^= hi_hash_dword >> n; \
   1593 } while (0)
   1594 
   1595 /**
   1596  *  ixgbe_atr_compute_perfect_hash_82599 - Compute the perfect filter hash
   1597  *  @atr_input: input bitstream to compute the hash on
   1598  *  @input_mask: mask for the input bitstream
   1599  *
   1600  *  This function serves two main purposes.  First it applies the input_mask
   1601  *  to the atr_input resulting in a cleaned up atr_input data stream.
   1602  *  Secondly it computes the hash and stores it in the bkt_hash field at
   1603  *  the end of the input byte stream.  This way it will be available for
   1604  *  future use without needing to recompute the hash.
   1605  **/
   1606 void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
   1607 					  union ixgbe_atr_input *input_mask)
   1608 {
   1609 
   1610 	u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
   1611 	u32 bucket_hash = 0;
   1612 	u32 hi_dword = 0;
   1613 	u32 i = 0;
   1614 
   1615 	/* Apply masks to input data */
   1616 	for (i = 0; i < 14; i++)
   1617 		input->dword_stream[i]  &= input_mask->dword_stream[i];
   1618 
   1619 	/* record the flow_vm_vlan bits as they are a key part to the hash */
   1620 	flow_vm_vlan = IXGBE_NTOHL(input->dword_stream[0]);
   1621 
   1622 	/* generate common hash dword */
   1623 	for (i = 1; i <= 13; i++)
   1624 		hi_dword ^= input->dword_stream[i];
   1625 	hi_hash_dword = IXGBE_NTOHL(hi_dword);
   1626 
   1627 	/* low dword is word swapped version of common */
   1628 	lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
   1629 
   1630 	/* apply flow ID/VM pool/VLAN ID bits to hash words */
   1631 	hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
   1632 
   1633 	/* Process bits 0 and 16 */
   1634 	IXGBE_COMPUTE_BKT_HASH_ITERATION(0);
   1635 
   1636 	/*
   1637 	 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
   1638 	 * delay this because bit 0 of the stream should not be processed
   1639 	 * so we do not add the VLAN until after bit 0 was processed
   1640 	 */
   1641 	lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
   1642 
   1643 	/* Process remaining 30 bit of the key */
   1644 	for (i = 1; i <= 15; i++)
   1645 		IXGBE_COMPUTE_BKT_HASH_ITERATION(i);
   1646 
   1647 	/*
   1648 	 * Limit hash to 13 bits since max bucket count is 8K.
   1649 	 * Store result at the end of the input stream.
   1650 	 */
   1651 	input->formatted.bkt_hash = bucket_hash & 0x1FFF;
   1652 }
   1653 
   1654 /**
   1655  *  ixgbe_get_fdirtcpm_82599 - generate a TCP port from atr_input_masks
   1656  *  @input_mask: mask to be bit swapped
   1657  *
   1658  *  The source and destination port masks for flow director are bit swapped
   1659  *  in that bit 15 effects bit 0, 14 effects 1, 13, 2 etc.  In order to
   1660  *  generate a correctly swapped value we need to bit swap the mask and that
   1661  *  is what is accomplished by this function.
   1662  **/
   1663 static u32 ixgbe_get_fdirtcpm_82599(union ixgbe_atr_input *input_mask)
   1664 {
   1665 	u32 mask = IXGBE_NTOHS(input_mask->formatted.dst_port);
   1666 	mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT;
   1667 	mask |= IXGBE_NTOHS(input_mask->formatted.src_port);
   1668 	mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1);
   1669 	mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2);
   1670 	mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4);
   1671 	return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8);
   1672 }
   1673 
   1674 /*
   1675  * These two macros are meant to address the fact that we have registers
   1676  * that are either all or in part big-endian.  As a result on big-endian
   1677  * systems we will end up byte swapping the value to little-endian before
   1678  * it is byte swapped again and written to the hardware in the original
   1679  * big-endian format.
   1680  */
   1681 #define IXGBE_STORE_AS_BE32(_value) \
   1682 	(((u32)(_value) >> 24) | (((u32)(_value) & 0x00FF0000) >> 8) | \
   1683 	 (((u32)(_value) & 0x0000FF00) << 8) | ((u32)(_value) << 24))
   1684 
   1685 #define IXGBE_WRITE_REG_BE32(a, reg, value) \
   1686 	IXGBE_WRITE_REG((a), (reg), IXGBE_STORE_AS_BE32(IXGBE_NTOHL(value)))
   1687 
   1688 #define IXGBE_STORE_AS_BE16(_value) \
   1689 	IXGBE_NTOHS(((u16)(_value) >> 8) | ((u16)(_value) << 8))
   1690 
   1691 s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
   1692 				    union ixgbe_atr_input *input_mask, bool cloud_mode)
   1693 {
   1694 	/* mask IPv6 since it is currently not supported */
   1695 	u32 fdirm = IXGBE_FDIRM_DIPv6;
   1696 	u32 fdirtcpm;
   1697 	u32 fdirip6m;
   1698 	DEBUGFUNC("ixgbe_fdir_set_atr_input_mask_82599");
   1699 
   1700 	/*
   1701 	 * Program the relevant mask registers.  If src/dst_port or src/dst_addr
   1702 	 * are zero, then assume a full mask for that field.  Also assume that
   1703 	 * a VLAN of 0 is unspecified, so mask that out as well.  L4type
   1704 	 * cannot be masked out in this implementation.
   1705 	 *
   1706 	 * This also assumes IPv4 only.  IPv6 masking isn't supported at this
   1707 	 * point in time.
   1708 	 */
   1709 
   1710 	/* verify bucket hash is cleared on hash generation */
   1711 	if (input_mask->formatted.bkt_hash)
   1712 		DEBUGOUT(" bucket hash should always be 0 in mask\n");
   1713 
   1714 	/* Program FDIRM and verify partial masks */
   1715 	switch (input_mask->formatted.vm_pool & 0x7F) {
   1716 	case 0x0:
   1717 		fdirm |= IXGBE_FDIRM_POOL;
   1718 	case 0x7F:
   1719 		break;
   1720 	default:
   1721 		DEBUGOUT(" Error on vm pool mask\n");
   1722 		return IXGBE_ERR_CONFIG;
   1723 	}
   1724 
   1725 	switch (input_mask->formatted.flow_type & IXGBE_ATR_L4TYPE_MASK) {
   1726 	case 0x0:
   1727 		fdirm |= IXGBE_FDIRM_L4P;
   1728 		if (input_mask->formatted.dst_port ||
   1729 		    input_mask->formatted.src_port) {
   1730 			DEBUGOUT(" Error on src/dst port mask\n");
   1731 			return IXGBE_ERR_CONFIG;
   1732 		}
   1733 	case IXGBE_ATR_L4TYPE_MASK:
   1734 		break;
   1735 	default:
   1736 		DEBUGOUT(" Error on flow type mask\n");
   1737 		return IXGBE_ERR_CONFIG;
   1738 	}
   1739 
   1740 	switch (IXGBE_NTOHS(input_mask->formatted.vlan_id) & 0xEFFF) {
   1741 	case 0x0000:
   1742 		/* mask VLAN ID, fall through to mask VLAN priority */
   1743 		fdirm |= IXGBE_FDIRM_VLANID;
   1744 	case 0x0FFF:
   1745 		/* mask VLAN priority */
   1746 		fdirm |= IXGBE_FDIRM_VLANP;
   1747 		break;
   1748 	case 0xE000:
   1749 		/* mask VLAN ID only, fall through */
   1750 		fdirm |= IXGBE_FDIRM_VLANID;
   1751 	case 0xEFFF:
   1752 		/* no VLAN fields masked */
   1753 		break;
   1754 	default:
   1755 		DEBUGOUT(" Error on VLAN mask\n");
   1756 		return IXGBE_ERR_CONFIG;
   1757 	}
   1758 
   1759 	switch (input_mask->formatted.flex_bytes & 0xFFFF) {
   1760 	case 0x0000:
   1761 		/* Mask Flex Bytes, fall through */
   1762 		fdirm |= IXGBE_FDIRM_FLEX;
   1763 	case 0xFFFF:
   1764 		break;
   1765 	default:
   1766 		DEBUGOUT(" Error on flexible byte mask\n");
   1767 		return IXGBE_ERR_CONFIG;
   1768 	}
   1769 
   1770 	if (cloud_mode) {
   1771 		fdirm |= IXGBE_FDIRM_L3P;
   1772 		fdirip6m = ((u32) 0xFFFFU << IXGBE_FDIRIP6M_DIPM_SHIFT);
   1773 		fdirip6m |= IXGBE_FDIRIP6M_ALWAYS_MASK;
   1774 
   1775 		switch (input_mask->formatted.inner_mac[0] & 0xFF) {
   1776 		case 0x00:
   1777 			/* Mask inner MAC, fall through */
   1778 			fdirip6m |= IXGBE_FDIRIP6M_INNER_MAC;
   1779 		case 0xFF:
   1780 			break;
   1781 		default:
   1782 			DEBUGOUT(" Error on inner_mac byte mask\n");
   1783 			return IXGBE_ERR_CONFIG;
   1784 		}
   1785 
   1786 		switch (input_mask->formatted.tni_vni & 0xFFFFFFFF) {
   1787 		case 0x0:
   1788 			/* Mask vxlan id */
   1789 			fdirip6m |= IXGBE_FDIRIP6M_TNI_VNI;
   1790 			break;
   1791 		case 0x00FFFFFF:
   1792 			fdirip6m |= IXGBE_FDIRIP6M_TNI_VNI_24;
   1793 			break;
   1794 		case 0xFFFFFFFF:
   1795 			break;
   1796 		default:
   1797 			DEBUGOUT(" Error on TNI/VNI byte mask\n");
   1798 			return IXGBE_ERR_CONFIG;
   1799 		}
   1800 
   1801 		switch (input_mask->formatted.tunnel_type & 0xFFFF) {
   1802 		case 0x0:
   1803 			/* Mask turnnel type, fall through */
   1804 			fdirip6m |= IXGBE_FDIRIP6M_TUNNEL_TYPE;
   1805 		case 0xFFFF:
   1806 			break;
   1807 		default:
   1808 			DEBUGOUT(" Error on tunnel type byte mask\n");
   1809 			return IXGBE_ERR_CONFIG;
   1810 		}
   1811 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIP6M, fdirip6m);
   1812 
   1813 		/* Set all bits in FDIRTCPM, FDIRUDPM, FDIRSIP4M and
   1814 		 * FDIRDIP4M in cloud mode to allow L3/L3 packets to
   1815 		 * tunnel.
   1816 		 */
   1817 		IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, 0xFFFFFFFF);
   1818 		IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, 0xFFFFFFFF);
   1819 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M, 0xFFFFFFFF);
   1820 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M, 0xFFFFFFFF);
   1821 	}
   1822 
   1823 	/* Now mask VM pool and destination IPv6 - bits 5 and 2 */
   1824 	IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
   1825 
   1826 	if (!cloud_mode) {
   1827 		/* store the TCP/UDP port masks, bit reversed from port
   1828 		 * layout */
   1829 		fdirtcpm = ixgbe_get_fdirtcpm_82599(input_mask);
   1830 
   1831 		/* write both the same so that UDP and TCP use the same mask */
   1832 		IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm);
   1833 		IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm);
   1834 		/* also use it for SCTP */
   1835 		switch (hw->mac.type) {
   1836 		case ixgbe_mac_X550:
   1837 		case ixgbe_mac_X550EM_x:
   1838 			IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, ~fdirtcpm);
   1839 			break;
   1840 		default:
   1841 			break;
   1842 		}
   1843 
   1844 		/* store source and destination IP masks (big-enian) */
   1845 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M,
   1846 				     ~input_mask->formatted.src_ip[0]);
   1847 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M,
   1848 				     ~input_mask->formatted.dst_ip[0]);
   1849 	}
   1850 	return IXGBE_SUCCESS;
   1851 }
   1852 
   1853 s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
   1854 					  union ixgbe_atr_input *input,
   1855 					  u16 soft_id, u8 queue, bool cloud_mode)
   1856 {
   1857 	u32 fdirport, fdirvlan, fdirhash, fdircmd;
   1858 	u32 addr_low, addr_high;
   1859 	u32 cloud_type = 0;
   1860 	s32 err;
   1861 
   1862 	DEBUGFUNC("ixgbe_fdir_write_perfect_filter_82599");
   1863 	if (!cloud_mode) {
   1864 		/* currently IPv6 is not supported, must be programmed with 0 */
   1865 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0),
   1866 				     input->formatted.src_ip[0]);
   1867 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1),
   1868 				     input->formatted.src_ip[1]);
   1869 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2),
   1870 				     input->formatted.src_ip[2]);
   1871 
   1872 		/* record the source address (big-endian) */
   1873 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA,
   1874 			input->formatted.src_ip[0]);
   1875 
   1876 		/* record the first 32 bits of the destination address
   1877 		 * (big-endian) */
   1878 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA,
   1879 			input->formatted.dst_ip[0]);
   1880 
   1881 		/* record source and destination port (little-endian)*/
   1882 		fdirport = IXGBE_NTOHS(input->formatted.dst_port);
   1883 		fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT;
   1884 		fdirport |= IXGBE_NTOHS(input->formatted.src_port);
   1885 		IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport);
   1886 	}
   1887 
   1888 	/* record VLAN (little-endian) and flex_bytes(big-endian) */
   1889 	fdirvlan = IXGBE_STORE_AS_BE16(input->formatted.flex_bytes);
   1890 	fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT;
   1891 	fdirvlan |= IXGBE_NTOHS(input->formatted.vlan_id);
   1892 	IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan);
   1893 
   1894 	if (cloud_mode) {
   1895 		if (input->formatted.tunnel_type != 0)
   1896 			cloud_type = 0x80000000;
   1897 
   1898 		addr_low = ((u32)input->formatted.inner_mac[0] |
   1899 				((u32)input->formatted.inner_mac[1] << 8) |
   1900 				((u32)input->formatted.inner_mac[2] << 16) |
   1901 				((u32)input->formatted.inner_mac[3] << 24));
   1902 		addr_high = ((u32)input->formatted.inner_mac[4] |
   1903 				((u32)input->formatted.inner_mac[5] << 8));
   1904 		cloud_type |= addr_high;
   1905 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0), addr_low);
   1906 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1), cloud_type);
   1907 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2), input->formatted.tni_vni);
   1908 	}
   1909 
   1910 	/* configure FDIRHASH register */
   1911 	fdirhash = input->formatted.bkt_hash;
   1912 	fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
   1913 	IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
   1914 
   1915 	/*
   1916 	 * flush all previous writes to make certain registers are
   1917 	 * programmed prior to issuing the command
   1918 	 */
   1919 	IXGBE_WRITE_FLUSH(hw);
   1920 
   1921 	/* configure FDIRCMD register */
   1922 	fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
   1923 		  IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
   1924 	if (queue == IXGBE_FDIR_DROP_QUEUE)
   1925 		fdircmd |= IXGBE_FDIRCMD_DROP;
   1926 	if (input->formatted.flow_type & IXGBE_ATR_L4TYPE_TUNNEL_MASK)
   1927 		fdircmd |= IXGBE_FDIRCMD_TUNNEL_FILTER;
   1928 	fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
   1929 	fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
   1930 	fdircmd |= (u32)input->formatted.vm_pool << IXGBE_FDIRCMD_VT_POOL_SHIFT;
   1931 
   1932 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
   1933 	err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
   1934 	if (err) {
   1935 		DEBUGOUT("Flow Director command did not complete!\n");
   1936 		return err;
   1937 	}
   1938 
   1939 	return IXGBE_SUCCESS;
   1940 }
   1941 
   1942 s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
   1943 					  union ixgbe_atr_input *input,
   1944 					  u16 soft_id)
   1945 {
   1946 	u32 fdirhash;
   1947 	u32 fdircmd;
   1948 	s32 err;
   1949 
   1950 	/* configure FDIRHASH register */
   1951 	fdirhash = input->formatted.bkt_hash;
   1952 	fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
   1953 	IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
   1954 
   1955 	/* flush hash to HW */
   1956 	IXGBE_WRITE_FLUSH(hw);
   1957 
   1958 	/* Query if filter is present */
   1959 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, IXGBE_FDIRCMD_CMD_QUERY_REM_FILT);
   1960 
   1961 	err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
   1962 	if (err) {
   1963 		DEBUGOUT("Flow Director command did not complete!\n");
   1964 		return err;
   1965 	}
   1966 
   1967 	/* if filter exists in hardware then remove it */
   1968 	if (fdircmd & IXGBE_FDIRCMD_FILTER_VALID) {
   1969 		IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
   1970 		IXGBE_WRITE_FLUSH(hw);
   1971 		IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
   1972 				IXGBE_FDIRCMD_CMD_REMOVE_FLOW);
   1973 	}
   1974 
   1975 	return IXGBE_SUCCESS;
   1976 }
   1977 
   1978 /**
   1979  *  ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter
   1980  *  @hw: pointer to hardware structure
   1981  *  @input: input bitstream
   1982  *  @input_mask: mask for the input bitstream
   1983  *  @soft_id: software index for the filters
   1984  *  @queue: queue index to direct traffic to
   1985  *
   1986  *  Note that the caller to this function must lock before calling, since the
   1987  *  hardware writes must be protected from one another.
   1988  **/
   1989 s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
   1990 					union ixgbe_atr_input *input,
   1991 					union ixgbe_atr_input *input_mask,
   1992 					u16 soft_id, u8 queue, bool cloud_mode)
   1993 {
   1994 	s32 err = IXGBE_ERR_CONFIG;
   1995 
   1996 	DEBUGFUNC("ixgbe_fdir_add_perfect_filter_82599");
   1997 
   1998 	/*
   1999 	 * Check flow_type formatting, and bail out before we touch the hardware
   2000 	 * if there's a configuration issue
   2001 	 */
   2002 	switch (input->formatted.flow_type) {
   2003 	case IXGBE_ATR_FLOW_TYPE_IPV4:
   2004 	case IXGBE_ATR_FLOW_TYPE_TUNNELED_IPV4:
   2005 		input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK;
   2006 		if (input->formatted.dst_port || input->formatted.src_port) {
   2007 			DEBUGOUT(" Error on src/dst port\n");
   2008 			return IXGBE_ERR_CONFIG;
   2009 		}
   2010 		break;
   2011 	case IXGBE_ATR_FLOW_TYPE_SCTPV4:
   2012 	case IXGBE_ATR_FLOW_TYPE_TUNNELED_SCTPV4:
   2013 		if (input->formatted.dst_port || input->formatted.src_port) {
   2014 			DEBUGOUT(" Error on src/dst port\n");
   2015 			return IXGBE_ERR_CONFIG;
   2016 		}
   2017 	case IXGBE_ATR_FLOW_TYPE_TCPV4:
   2018 	case IXGBE_ATR_FLOW_TYPE_TUNNELED_TCPV4:
   2019 	case IXGBE_ATR_FLOW_TYPE_UDPV4:
   2020 	case IXGBE_ATR_FLOW_TYPE_TUNNELED_UDPV4:
   2021 		input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
   2022 						  IXGBE_ATR_L4TYPE_MASK;
   2023 		break;
   2024 	default:
   2025 		DEBUGOUT(" Error on flow type input\n");
   2026 		return err;
   2027 	}
   2028 
   2029 	/* program input mask into the HW */
   2030 	err = ixgbe_fdir_set_input_mask_82599(hw, input_mask, cloud_mode);
   2031 	if (err)
   2032 		return err;
   2033 
   2034 	/* apply mask and compute/store hash */
   2035 	ixgbe_atr_compute_perfect_hash_82599(input, input_mask);
   2036 
   2037 	/* program filters to filter memory */
   2038 	return ixgbe_fdir_write_perfect_filter_82599(hw, input,
   2039 						     soft_id, queue, cloud_mode);
   2040 }
   2041 
   2042 /**
   2043  *  ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register
   2044  *  @hw: pointer to hardware structure
   2045  *  @reg: analog register to read
   2046  *  @val: read value
   2047  *
   2048  *  Performs read operation to Omer analog register specified.
   2049  **/
   2050 s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val)
   2051 {
   2052 	u32  core_ctl;
   2053 
   2054 	DEBUGFUNC("ixgbe_read_analog_reg8_82599");
   2055 
   2056 	IXGBE_WRITE_REG(hw, IXGBE_CORECTL, IXGBE_CORECTL_WRITE_CMD |
   2057 			(reg << 8));
   2058 	IXGBE_WRITE_FLUSH(hw);
   2059 	usec_delay(10);
   2060 	core_ctl = IXGBE_READ_REG(hw, IXGBE_CORECTL);
   2061 	*val = (u8)core_ctl;
   2062 
   2063 	return IXGBE_SUCCESS;
   2064 }
   2065 
   2066 /**
   2067  *  ixgbe_write_analog_reg8_82599 - Writes 8 bit Omer analog register
   2068  *  @hw: pointer to hardware structure
   2069  *  @reg: atlas register to write
   2070  *  @val: value to write
   2071  *
   2072  *  Performs write operation to Omer analog register specified.
   2073  **/
   2074 s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val)
   2075 {
   2076 	u32  core_ctl;
   2077 
   2078 	DEBUGFUNC("ixgbe_write_analog_reg8_82599");
   2079 
   2080 	core_ctl = (reg << 8) | val;
   2081 	IXGBE_WRITE_REG(hw, IXGBE_CORECTL, core_ctl);
   2082 	IXGBE_WRITE_FLUSH(hw);
   2083 	usec_delay(10);
   2084 
   2085 	return IXGBE_SUCCESS;
   2086 }
   2087 
   2088 /**
   2089  *  ixgbe_start_hw_82599 - Prepare hardware for Tx/Rx
   2090  *  @hw: pointer to hardware structure
   2091  *
   2092  *  Starts the hardware using the generic start_hw function
   2093  *  and the generation start_hw function.
   2094  *  Then performs revision-specific operations, if any.
   2095  **/
   2096 s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw)
   2097 {
   2098 	s32 ret_val = IXGBE_SUCCESS;
   2099 
   2100 	DEBUGFUNC("ixgbe_start_hw_82599");
   2101 
   2102 	ret_val = ixgbe_start_hw_generic(hw);
   2103 	if (ret_val != IXGBE_SUCCESS)
   2104 		goto out;
   2105 
   2106 	ret_val = ixgbe_start_hw_gen2(hw);
   2107 	if (ret_val != IXGBE_SUCCESS)
   2108 		goto out;
   2109 
   2110 	/* We need to run link autotry after the driver loads */
   2111 	hw->mac.autotry_restart = TRUE;
   2112 
   2113 	if (ret_val == IXGBE_SUCCESS)
   2114 		ret_val = ixgbe_verify_fw_version_82599(hw);
   2115 out:
   2116 	return ret_val;
   2117 }
   2118 
   2119 /**
   2120  *  ixgbe_identify_phy_82599 - Get physical layer module
   2121  *  @hw: pointer to hardware structure
   2122  *
   2123  *  Determines the physical layer module found on the current adapter.
   2124  *  If PHY already detected, maintains current PHY type in hw struct,
   2125  *  otherwise executes the PHY detection routine.
   2126  **/
   2127 s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
   2128 {
   2129 	s32 status;
   2130 
   2131 	DEBUGFUNC("ixgbe_identify_phy_82599");
   2132 
   2133 	/* Detect PHY if not unknown - returns success if already detected. */
   2134 	status = ixgbe_identify_phy_generic(hw);
   2135 	if (status != IXGBE_SUCCESS) {
   2136 		/* 82599 10GBASE-T requires an external PHY */
   2137 		if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)
   2138 			return status;
   2139 		else
   2140 			status = ixgbe_identify_module_generic(hw);
   2141 	}
   2142 
   2143 	/* Set PHY type none if no PHY detected */
   2144 	if (hw->phy.type == ixgbe_phy_unknown) {
   2145 		hw->phy.type = ixgbe_phy_none;
   2146 		return IXGBE_SUCCESS;
   2147 	}
   2148 
   2149 	/* Return error if SFP module has been detected but is not supported */
   2150 	if (hw->phy.type == ixgbe_phy_sfp_unsupported)
   2151 		return IXGBE_ERR_SFP_NOT_SUPPORTED;
   2152 
   2153 	return status;
   2154 }
   2155 
   2156 /**
   2157  *  ixgbe_get_supported_physical_layer_82599 - Returns physical layer type
   2158  *  @hw: pointer to hardware structure
   2159  *
   2160  *  Determines physical layer capabilities of the current configuration.
   2161  **/
   2162 u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw)
   2163 {
   2164 	u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
   2165 	u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
   2166 	u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
   2167 	u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
   2168 	u32 pma_pmd_10g_parallel = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK;
   2169 	u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
   2170 	u16 ext_ability = 0;
   2171 
   2172 	DEBUGFUNC("ixgbe_get_support_physical_layer_82599");
   2173 
   2174 	hw->phy.ops.identify(hw);
   2175 
   2176 	switch (hw->phy.type) {
   2177 	case ixgbe_phy_tn:
   2178 	case ixgbe_phy_cu_unknown:
   2179 		hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
   2180 		IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
   2181 		if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
   2182 			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
   2183 		if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
   2184 			physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
   2185 		if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
   2186 			physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
   2187 		goto out;
   2188 	default:
   2189 		break;
   2190 	}
   2191 
   2192 	switch (autoc & IXGBE_AUTOC_LMS_MASK) {
   2193 	case IXGBE_AUTOC_LMS_1G_AN:
   2194 	case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
   2195 		if (pma_pmd_1g == IXGBE_AUTOC_1G_KX_BX) {
   2196 			physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX |
   2197 			    IXGBE_PHYSICAL_LAYER_1000BASE_BX;
   2198 			goto out;
   2199 		} else
   2200 			/* SFI mode so read SFP module */
   2201 			goto sfp_check;
   2202 		break;
   2203 	case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
   2204 		if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_CX4)
   2205 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
   2206 		else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_KX4)
   2207 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
   2208 		else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_XAUI)
   2209 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_XAUI;
   2210 		goto out;
   2211 		break;
   2212 	case IXGBE_AUTOC_LMS_10G_SERIAL:
   2213 		if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_KR) {
   2214 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR;
   2215 			goto out;
   2216 		} else if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)
   2217 			goto sfp_check;
   2218 		break;
   2219 	case IXGBE_AUTOC_LMS_KX4_KX_KR:
   2220 	case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN:
   2221 		if (autoc & IXGBE_AUTOC_KX_SUPP)
   2222 			physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
   2223 		if (autoc & IXGBE_AUTOC_KX4_SUPP)
   2224 			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
   2225 		if (autoc & IXGBE_AUTOC_KR_SUPP)
   2226 			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KR;
   2227 		goto out;
   2228 		break;
   2229 	default:
   2230 		goto out;
   2231 		break;
   2232 	}
   2233 
   2234 sfp_check:
   2235 	/* SFP check must be done last since DA modules are sometimes used to
   2236 	 * test KR mode -  we need to id KR mode correctly before SFP module.
   2237 	 * Call identify_sfp because the pluggable module may have changed */
   2238 	physical_layer = ixgbe_get_supported_phy_sfp_layer_generic(hw);
   2239 out:
   2240 	return physical_layer;
   2241 }
   2242 
   2243 /**
   2244  *  ixgbe_enable_rx_dma_82599 - Enable the Rx DMA unit on 82599
   2245  *  @hw: pointer to hardware structure
   2246  *  @regval: register value to write to RXCTRL
   2247  *
   2248  *  Enables the Rx DMA unit for 82599
   2249  **/
   2250 s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval)
   2251 {
   2252 
   2253 	DEBUGFUNC("ixgbe_enable_rx_dma_82599");
   2254 
   2255 	/*
   2256 	 * Workaround for 82599 silicon errata when enabling the Rx datapath.
   2257 	 * If traffic is incoming before we enable the Rx unit, it could hang
   2258 	 * the Rx DMA unit.  Therefore, make sure the security engine is
   2259 	 * completely disabled prior to enabling the Rx unit.
   2260 	 */
   2261 
   2262 	hw->mac.ops.disable_sec_rx_path(hw);
   2263 
   2264 	if (regval & IXGBE_RXCTRL_RXEN)
   2265 		ixgbe_enable_rx(hw);
   2266 	else
   2267 		ixgbe_disable_rx(hw);
   2268 
   2269 	hw->mac.ops.enable_sec_rx_path(hw);
   2270 
   2271 	return IXGBE_SUCCESS;
   2272 }
   2273 
   2274 /**
   2275  *  ixgbe_verify_fw_version_82599 - verify FW version for 82599
   2276  *  @hw: pointer to hardware structure
   2277  *
   2278  *  Verifies that installed the firmware version is 0.6 or higher
   2279  *  for SFI devices. All 82599 SFI devices should have version 0.6 or higher.
   2280  *
   2281  *  Returns IXGBE_ERR_EEPROM_VERSION if the FW is not present or
   2282  *  if the FW version is not supported.
   2283  **/
   2284 static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
   2285 {
   2286 	s32 status = IXGBE_ERR_EEPROM_VERSION;
   2287 	u16 fw_offset, fw_ptp_cfg_offset;
   2288 	u16 fw_version;
   2289 
   2290 	DEBUGFUNC("ixgbe_verify_fw_version_82599");
   2291 
   2292 	/* firmware check is only necessary for SFI devices */
   2293 	if (hw->phy.media_type != ixgbe_media_type_fiber) {
   2294 		status = IXGBE_SUCCESS;
   2295 		goto fw_version_out;
   2296 	}
   2297 
   2298 	/* get the offset to the Firmware Module block */
   2299 	if (hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset)) {
   2300 		ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
   2301 			      "eeprom read at offset %d failed", IXGBE_FW_PTR);
   2302 		return IXGBE_ERR_EEPROM_VERSION;
   2303 	}
   2304 
   2305 	if ((fw_offset == 0) || (fw_offset == 0xFFFF))
   2306 		goto fw_version_out;
   2307 
   2308 	/* get the offset to the Pass Through Patch Configuration block */
   2309 	if (hw->eeprom.ops.read(hw, (fw_offset +
   2310 				 IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR),
   2311 				 &fw_ptp_cfg_offset)) {
   2312 		ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
   2313 			      "eeprom read at offset %d failed",
   2314 			      fw_offset +
   2315 			      IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR);
   2316 		return IXGBE_ERR_EEPROM_VERSION;
   2317 	}
   2318 
   2319 	if ((fw_ptp_cfg_offset == 0) || (fw_ptp_cfg_offset == 0xFFFF))
   2320 		goto fw_version_out;
   2321 
   2322 	/* get the firmware version */
   2323 	if (hw->eeprom.ops.read(hw, (fw_ptp_cfg_offset +
   2324 			    IXGBE_FW_PATCH_VERSION_4), &fw_version)) {
   2325 		ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
   2326 			      "eeprom read at offset %d failed",
   2327 			      fw_ptp_cfg_offset + IXGBE_FW_PATCH_VERSION_4);
   2328 		return IXGBE_ERR_EEPROM_VERSION;
   2329 	}
   2330 
   2331 	if (fw_version > 0x5)
   2332 		status = IXGBE_SUCCESS;
   2333 
   2334 fw_version_out:
   2335 	return status;
   2336 }
   2337 
   2338 /**
   2339  *  ixgbe_verify_lesm_fw_enabled_82599 - Checks LESM FW module state.
   2340  *  @hw: pointer to hardware structure
   2341  *
   2342  *  Returns TRUE if the LESM FW module is present and enabled. Otherwise
   2343  *  returns FALSE. Smart Speed must be disabled if LESM FW module is enabled.
   2344  **/
   2345 bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw)
   2346 {
   2347 	bool lesm_enabled = FALSE;
   2348 	u16 fw_offset, fw_lesm_param_offset, fw_lesm_state;
   2349 	s32 status;
   2350 
   2351 	DEBUGFUNC("ixgbe_verify_lesm_fw_enabled_82599");
   2352 
   2353 	/* get the offset to the Firmware Module block */
   2354 	status = hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset);
   2355 
   2356 	if ((status != IXGBE_SUCCESS) ||
   2357 	    (fw_offset == 0) || (fw_offset == 0xFFFF))
   2358 		goto out;
   2359 
   2360 	/* get the offset to the LESM Parameters block */
   2361 	status = hw->eeprom.ops.read(hw, (fw_offset +
   2362 				     IXGBE_FW_LESM_PARAMETERS_PTR),
   2363 				     &fw_lesm_param_offset);
   2364 
   2365 	if ((status != IXGBE_SUCCESS) ||
   2366 	    (fw_lesm_param_offset == 0) || (fw_lesm_param_offset == 0xFFFF))
   2367 		goto out;
   2368 
   2369 	/* get the LESM state word */
   2370 	status = hw->eeprom.ops.read(hw, (fw_lesm_param_offset +
   2371 				     IXGBE_FW_LESM_STATE_1),
   2372 				     &fw_lesm_state);
   2373 
   2374 	if ((status == IXGBE_SUCCESS) &&
   2375 	    (fw_lesm_state & IXGBE_FW_LESM_STATE_ENABLED))
   2376 		lesm_enabled = TRUE;
   2377 
   2378 out:
   2379 	return lesm_enabled;
   2380 }
   2381 
   2382 /**
   2383  *  ixgbe_read_eeprom_buffer_82599 - Read EEPROM word(s) using
   2384  *  fastest available method
   2385  *
   2386  *  @hw: pointer to hardware structure
   2387  *  @offset: offset of  word in EEPROM to read
   2388  *  @words: number of words
   2389  *  @data: word(s) read from the EEPROM
   2390  *
   2391  *  Retrieves 16 bit word(s) read from EEPROM
   2392  **/
   2393 static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
   2394 					  u16 words, u16 *data)
   2395 {
   2396 	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
   2397 	s32 ret_val = IXGBE_ERR_CONFIG;
   2398 
   2399 	DEBUGFUNC("ixgbe_read_eeprom_buffer_82599");
   2400 
   2401 	/*
   2402 	 * If EEPROM is detected and can be addressed using 14 bits,
   2403 	 * use EERD otherwise use bit bang
   2404 	 */
   2405 	if ((eeprom->type == ixgbe_eeprom_spi) &&
   2406 	    (offset + (words - 1) <= IXGBE_EERD_MAX_ADDR))
   2407 		ret_val = ixgbe_read_eerd_buffer_generic(hw, offset, words,
   2408 							 data);
   2409 	else
   2410 		ret_val = ixgbe_read_eeprom_buffer_bit_bang_generic(hw, offset,
   2411 								    words,
   2412 								    data);
   2413 
   2414 	return ret_val;
   2415 }
   2416 
   2417 /**
   2418  *  ixgbe_read_eeprom_82599 - Read EEPROM word using
   2419  *  fastest available method
   2420  *
   2421  *  @hw: pointer to hardware structure
   2422  *  @offset: offset of  word in the EEPROM to read
   2423  *  @data: word read from the EEPROM
   2424  *
   2425  *  Reads a 16 bit word from the EEPROM
   2426  **/
   2427 static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
   2428 				   u16 offset, u16 *data)
   2429 {
   2430 	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
   2431 	s32 ret_val = IXGBE_ERR_CONFIG;
   2432 
   2433 	DEBUGFUNC("ixgbe_read_eeprom_82599");
   2434 
   2435 	/*
   2436 	 * If EEPROM is detected and can be addressed using 14 bits,
   2437 	 * use EERD otherwise use bit bang
   2438 	 */
   2439 	if ((eeprom->type == ixgbe_eeprom_spi) &&
   2440 	    (offset <= IXGBE_EERD_MAX_ADDR))
   2441 		ret_val = ixgbe_read_eerd_generic(hw, offset, data);
   2442 	else
   2443 		ret_val = ixgbe_read_eeprom_bit_bang_generic(hw, offset, data);
   2444 
   2445 	return ret_val;
   2446 }
   2447 
   2448 /**
   2449  * ixgbe_reset_pipeline_82599 - perform pipeline reset
   2450  *
   2451  *  @hw: pointer to hardware structure
   2452  *
   2453  * Reset pipeline by asserting Restart_AN together with LMS change to ensure
   2454  * full pipeline reset.  This function assumes the SW/FW lock is held.
   2455  **/
   2456 s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw)
   2457 {
   2458 	s32 ret_val;
   2459 	u32 anlp1_reg = 0;
   2460 	u32 i, autoc_reg, autoc2_reg;
   2461 
   2462 	/* Enable link if disabled in NVM */
   2463 	autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
   2464 	if (autoc2_reg & IXGBE_AUTOC2_LINK_DISABLE_MASK) {
   2465 		autoc2_reg &= ~IXGBE_AUTOC2_LINK_DISABLE_MASK;
   2466 		IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2_reg);
   2467 		IXGBE_WRITE_FLUSH(hw);
   2468 	}
   2469 
   2470 	autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
   2471 	autoc_reg |= IXGBE_AUTOC_AN_RESTART;
   2472 	/* Write AUTOC register with toggled LMS[2] bit and Restart_AN */
   2473 	IXGBE_WRITE_REG(hw, IXGBE_AUTOC,
   2474 			autoc_reg ^ (0x4 << IXGBE_AUTOC_LMS_SHIFT));
   2475 	/* Wait for AN to leave state 0 */
   2476 	for (i = 0; i < 10; i++) {
   2477 		msec_delay(4);
   2478 		anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
   2479 		if (anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)
   2480 			break;
   2481 	}
   2482 
   2483 	if (!(anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)) {
   2484 		DEBUGOUT("auto negotiation not completed\n");
   2485 		ret_val = IXGBE_ERR_RESET_FAILED;
   2486 		goto reset_pipeline_out;
   2487 	}
   2488 
   2489 	ret_val = IXGBE_SUCCESS;
   2490 
   2491 reset_pipeline_out:
   2492 	/* Write AUTOC register with original LMS field and Restart_AN */
   2493 	IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
   2494 	IXGBE_WRITE_FLUSH(hw);
   2495 
   2496 	return ret_val;
   2497 }
   2498 
   2499 /**
   2500  *  ixgbe_read_i2c_byte_82599 - Reads 8 bit word over I2C
   2501  *  @hw: pointer to hardware structure
   2502  *  @byte_offset: byte offset to read
   2503  *  @data: value read
   2504  *
   2505  *  Performs byte read operation to SFP module's EEPROM over I2C interface at
   2506  *  a specified device address.
   2507  **/
   2508 static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
   2509 				u8 dev_addr, u8 *data)
   2510 {
   2511 	u32 esdp;
   2512 	s32 status;
   2513 	s32 timeout = 200;
   2514 
   2515 	DEBUGFUNC("ixgbe_read_i2c_byte_82599");
   2516 
   2517 	if (hw->phy.qsfp_shared_i2c_bus == TRUE) {
   2518 		/* Acquire I2C bus ownership. */
   2519 		esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
   2520 		esdp |= IXGBE_ESDP_SDP0;
   2521 		IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
   2522 		IXGBE_WRITE_FLUSH(hw);
   2523 
   2524 		while (timeout) {
   2525 			esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
   2526 			if (esdp & IXGBE_ESDP_SDP1)
   2527 				break;
   2528 
   2529 			msec_delay(5);
   2530 			timeout--;
   2531 		}
   2532 
   2533 		if (!timeout) {
   2534 			DEBUGOUT("Driver can't access resource,"
   2535 				 " acquiring I2C bus timeout.\n");
   2536 			status = IXGBE_ERR_I2C;
   2537 			goto release_i2c_access;
   2538 		}
   2539 	}
   2540 
   2541 	status = ixgbe_read_i2c_byte_generic(hw, byte_offset, dev_addr, data);
   2542 
   2543 release_i2c_access:
   2544 
   2545 	if (hw->phy.qsfp_shared_i2c_bus == TRUE) {
   2546 		/* Release I2C bus ownership. */
   2547 		esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
   2548 		esdp &= ~IXGBE_ESDP_SDP0;
   2549 		IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
   2550 		IXGBE_WRITE_FLUSH(hw);
   2551 	}
   2552 
   2553 	return status;
   2554 }
   2555 
   2556 /**
   2557  *  ixgbe_write_i2c_byte_82599 - Writes 8 bit word over I2C
   2558  *  @hw: pointer to hardware structure
   2559  *  @byte_offset: byte offset to write
   2560  *  @data: value to write
   2561  *
   2562  *  Performs byte write operation to SFP module's EEPROM over I2C interface at
   2563  *  a specified device address.
   2564  **/
   2565 static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
   2566 				 u8 dev_addr, u8 data)
   2567 {
   2568 	u32 esdp;
   2569 	s32 status;
   2570 	s32 timeout = 200;
   2571 
   2572 	DEBUGFUNC("ixgbe_write_i2c_byte_82599");
   2573 
   2574 	if (hw->phy.qsfp_shared_i2c_bus == TRUE) {
   2575 		/* Acquire I2C bus ownership. */
   2576 		esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
   2577 		esdp |= IXGBE_ESDP_SDP0;
   2578 		IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
   2579 		IXGBE_WRITE_FLUSH(hw);
   2580 
   2581 		while (timeout) {
   2582 			esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
   2583 			if (esdp & IXGBE_ESDP_SDP1)
   2584 				break;
   2585 
   2586 			msec_delay(5);
   2587 			timeout--;
   2588 		}
   2589 
   2590 		if (!timeout) {
   2591 			DEBUGOUT("Driver can't access resource,"
   2592 				 " acquiring I2C bus timeout.\n");
   2593 			status = IXGBE_ERR_I2C;
   2594 			goto release_i2c_access;
   2595 		}
   2596 	}
   2597 
   2598 	status = ixgbe_write_i2c_byte_generic(hw, byte_offset, dev_addr, data);
   2599 
   2600 release_i2c_access:
   2601 
   2602 	if (hw->phy.qsfp_shared_i2c_bus == TRUE) {
   2603 		/* Release I2C bus ownership. */
   2604 		esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
   2605 		esdp &= ~IXGBE_ESDP_SDP0;
   2606 		IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
   2607 		IXGBE_WRITE_FLUSH(hw);
   2608 	}
   2609 
   2610 	return status;
   2611 }
   2612