Home | History | Annotate | Line # | Download | only in ixgbe
ixgbe_82599.c revision 1.13
      1 /******************************************************************************
      2 
      3   Copyright (c) 2001-2015, Intel Corporation
      4   All rights reserved.
      5 
      6   Redistribution and use in source and binary forms, with or without
      7   modification, are permitted provided that the following conditions are met:
      8 
      9    1. Redistributions of source code must retain the above copyright notice,
     10       this list of conditions and the following disclaimer.
     11 
     12    2. Redistributions in binary form must reproduce the above copyright
     13       notice, this list of conditions and the following disclaimer in the
     14       documentation and/or other materials provided with the distribution.
     15 
     16    3. Neither the name of the Intel Corporation nor the names of its
     17       contributors may be used to endorse or promote products derived from
     18       this software without specific prior written permission.
     19 
     20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     30   POSSIBILITY OF SUCH DAMAGE.
     31 
     32 ******************************************************************************/
     33 /*$FreeBSD: head/sys/dev/ixgbe/ixgbe_82599.c 282289 2015-04-30 22:53:27Z erj $*/
     34 /*$NetBSD: ixgbe_82599.c,v 1.13 2016/12/01 06:56:28 msaitoh Exp $*/
     35 
     36 #include "ixgbe_type.h"
     37 #include "ixgbe_82599.h"
     38 #include "ixgbe_api.h"
     39 #include "ixgbe_common.h"
     40 #include "ixgbe_phy.h"
     41 
     42 #define IXGBE_82599_MAX_TX_QUEUES 128
     43 #define IXGBE_82599_MAX_RX_QUEUES 128
     44 #define IXGBE_82599_RAR_ENTRIES   128
     45 #define IXGBE_82599_MC_TBL_SIZE   128
     46 #define IXGBE_82599_VFT_TBL_SIZE  128
     47 #define IXGBE_82599_RX_PB_SIZE	  512
     48 
     49 static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
     50 					 ixgbe_link_speed speed,
     51 					 bool autoneg_wait_to_complete);
     52 static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw);
     53 static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
     54 				   u16 offset, u16 *data);
     55 static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
     56 					  u16 words, u16 *data);
     57 static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
     58 					u8 dev_addr, u8 *data);
     59 static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
     60 					u8 dev_addr, u8 data);
     61 
     62 void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
     63 {
     64 	struct ixgbe_mac_info *mac = &hw->mac;
     65 
     66 	DEBUGFUNC("ixgbe_init_mac_link_ops_82599");
     67 
     68 	/*
     69 	 * enable the laser control functions for SFP+ fiber
     70 	 * and MNG not enabled
     71 	 */
     72 	if ((mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
     73 	    !ixgbe_mng_enabled(hw)) {
     74 		mac->ops.disable_tx_laser =
     75 				       ixgbe_disable_tx_laser_multispeed_fiber;
     76 		mac->ops.enable_tx_laser =
     77 					ixgbe_enable_tx_laser_multispeed_fiber;
     78 		mac->ops.flap_tx_laser = ixgbe_flap_tx_laser_multispeed_fiber;
     79 
     80 	} else {
     81 		mac->ops.disable_tx_laser = NULL;
     82 		mac->ops.enable_tx_laser = NULL;
     83 		mac->ops.flap_tx_laser = NULL;
     84 	}
     85 
     86 	if (hw->phy.multispeed_fiber) {
     87 		/* Set up dual speed SFP+ support */
     88 		mac->ops.setup_link = ixgbe_setup_mac_link_multispeed_fiber;
     89 		mac->ops.setup_mac_link = ixgbe_setup_mac_link_82599;
     90 		mac->ops.set_rate_select_speed =
     91 					       ixgbe_set_hard_rate_select_speed;
     92 		if (ixgbe_get_media_type(hw) == ixgbe_media_type_fiber_fixed)
     93 			mac->ops.set_rate_select_speed =
     94 					       ixgbe_set_soft_rate_select_speed;
     95 	} else {
     96 		if ((ixgbe_get_media_type(hw) == ixgbe_media_type_backplane) &&
     97 		     (hw->phy.smart_speed == ixgbe_smart_speed_auto ||
     98 		      hw->phy.smart_speed == ixgbe_smart_speed_on) &&
     99 		      !ixgbe_verify_lesm_fw_enabled_82599(hw)) {
    100 			mac->ops.setup_link = ixgbe_setup_mac_link_smartspeed;
    101 		} else {
    102 			mac->ops.setup_link = ixgbe_setup_mac_link_82599;
    103 		}
    104 	}
    105 }
    106 
    107 /**
    108  *  ixgbe_init_phy_ops_82599 - PHY/SFP specific init
    109  *  @hw: pointer to hardware structure
    110  *
    111  *  Initialize any function pointers that were not able to be
    112  *  set during init_shared_code because the PHY/SFP type was
    113  *  not known.  Perform the SFP init if necessary.
    114  *
    115  **/
    116 s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
    117 {
    118 	struct ixgbe_mac_info *mac = &hw->mac;
    119 	struct ixgbe_phy_info *phy = &hw->phy;
    120 	s32 ret_val = IXGBE_SUCCESS;
    121 	u32 esdp;
    122 
    123 	DEBUGFUNC("ixgbe_init_phy_ops_82599");
    124 
    125 	if (hw->device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP) {
    126 		/* Store flag indicating I2C bus access control unit. */
    127 		hw->phy.qsfp_shared_i2c_bus = TRUE;
    128 
    129 		/* Initialize access to QSFP+ I2C bus */
    130 		esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
    131 		esdp |= IXGBE_ESDP_SDP0_DIR;
    132 		esdp &= ~IXGBE_ESDP_SDP1_DIR;
    133 		esdp &= ~IXGBE_ESDP_SDP0;
    134 		esdp &= ~IXGBE_ESDP_SDP0_NATIVE;
    135 		esdp &= ~IXGBE_ESDP_SDP1_NATIVE;
    136 		IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
    137 		IXGBE_WRITE_FLUSH(hw);
    138 
    139 		phy->ops.read_i2c_byte = ixgbe_read_i2c_byte_82599;
    140 		phy->ops.write_i2c_byte = ixgbe_write_i2c_byte_82599;
    141 	}
    142 	/* Identify the PHY or SFP module */
    143 	ret_val = phy->ops.identify(hw);
    144 	if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED)
    145 		goto init_phy_ops_out;
    146 
    147 	/* Setup function pointers based on detected SFP module and speeds */
    148 	ixgbe_init_mac_link_ops_82599(hw);
    149 	if (hw->phy.sfp_type != ixgbe_sfp_type_unknown)
    150 		hw->phy.ops.reset = NULL;
    151 
    152 	/* If copper media, overwrite with copper function pointers */
    153 	if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
    154 		mac->ops.setup_link = ixgbe_setup_copper_link_82599;
    155 		mac->ops.get_link_capabilities =
    156 				  ixgbe_get_copper_link_capabilities_generic;
    157 	}
    158 
    159 	/* Set necessary function pointers based on PHY type */
    160 	switch (hw->phy.type) {
    161 	case ixgbe_phy_tn:
    162 		phy->ops.setup_link = ixgbe_setup_phy_link_tnx;
    163 		phy->ops.check_link = ixgbe_check_phy_link_tnx;
    164 		phy->ops.get_firmware_version =
    165 			     ixgbe_get_phy_firmware_version_tnx;
    166 		break;
    167 	default:
    168 		break;
    169 	}
    170 init_phy_ops_out:
    171 	return ret_val;
    172 }
    173 
    174 s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
    175 {
    176 	s32 ret_val = IXGBE_SUCCESS;
    177 	u16 list_offset, data_offset, data_value;
    178 
    179 	DEBUGFUNC("ixgbe_setup_sfp_modules_82599");
    180 
    181 	if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) {
    182 		ixgbe_init_mac_link_ops_82599(hw);
    183 
    184 		hw->phy.ops.reset = NULL;
    185 
    186 		ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
    187 							      &data_offset);
    188 		if (ret_val != IXGBE_SUCCESS)
    189 			goto setup_sfp_out;
    190 
    191 		/* PHY config will finish before releasing the semaphore */
    192 		ret_val = hw->mac.ops.acquire_swfw_sync(hw,
    193 							IXGBE_GSSR_MAC_CSR_SM);
    194 		if (ret_val != IXGBE_SUCCESS) {
    195 			ret_val = IXGBE_ERR_SWFW_SYNC;
    196 			goto setup_sfp_out;
    197 		}
    198 
    199 		if (hw->eeprom.ops.read(hw, ++data_offset, &data_value))
    200 			goto setup_sfp_err;
    201 		while (data_value != 0xffff) {
    202 			IXGBE_WRITE_REG(hw, IXGBE_CORECTL, data_value);
    203 			IXGBE_WRITE_FLUSH(hw);
    204 			if (hw->eeprom.ops.read(hw, ++data_offset, &data_value))
    205 				goto setup_sfp_err;
    206 		}
    207 
    208 		/* Release the semaphore */
    209 		hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
    210 		/* Delay obtaining semaphore again to allow FW access
    211 		 * prot_autoc_write uses the semaphore too.
    212 		 */
    213 		msec_delay(hw->eeprom.semaphore_delay);
    214 
    215 		/* Restart DSP and set SFI mode */
    216 		ret_val = hw->mac.ops.prot_autoc_write(hw,
    217 			hw->mac.orig_autoc | IXGBE_AUTOC_LMS_10G_SERIAL,
    218 			FALSE);
    219 
    220 		if (ret_val) {
    221 			DEBUGOUT("sfp module setup not complete\n");
    222 			ret_val = IXGBE_ERR_SFP_SETUP_NOT_COMPLETE;
    223 			goto setup_sfp_out;
    224 		}
    225 
    226 	}
    227 
    228 setup_sfp_out:
    229 	return ret_val;
    230 
    231 setup_sfp_err:
    232 	/* Release the semaphore */
    233 	hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
    234 	/* Delay obtaining semaphore again to allow FW access */
    235 	msec_delay(hw->eeprom.semaphore_delay);
    236 	ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
    237 		      "eeprom read at offset %d failed", data_offset);
    238 	return IXGBE_ERR_PHY;
    239 }
    240 
    241 /**
    242  *  prot_autoc_read_82599 - Hides MAC differences needed for AUTOC read
    243  *  @hw: pointer to hardware structure
    244  *  @locked: Return the if we locked for this read.
    245  *  @reg_val: Value we read from AUTOC
    246  *
    247  *  For this part (82599) we need to wrap read-modify-writes with a possible
    248  *  FW/SW lock.  It is assumed this lock will be freed with the next
    249  *  prot_autoc_write_82599().
    250  */
    251 s32 prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked, u32 *reg_val)
    252 {
    253 	s32 ret_val;
    254 
    255 	*locked = FALSE;
    256 	 /* If LESM is on then we need to hold the SW/FW semaphore. */
    257 	if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
    258 		ret_val = hw->mac.ops.acquire_swfw_sync(hw,
    259 					IXGBE_GSSR_MAC_CSR_SM);
    260 		if (ret_val != IXGBE_SUCCESS)
    261 			return IXGBE_ERR_SWFW_SYNC;
    262 
    263 		*locked = TRUE;
    264 	}
    265 
    266 	*reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC);
    267 	return IXGBE_SUCCESS;
    268 }
    269 
    270 /**
    271  * prot_autoc_write_82599 - Hides MAC differences needed for AUTOC write
    272  * @hw: pointer to hardware structure
    273  * @reg_val: value to write to AUTOC
    274  * @locked: bool to indicate whether the SW/FW lock was already taken by
    275  *           previous proc_autoc_read_82599.
    276  *
    277  * This part (82599) may need to hold the SW/FW lock around all writes to
    278  * AUTOC. Likewise after a write we need to do a pipeline reset.
    279  */
    280 s32 prot_autoc_write_82599(struct ixgbe_hw *hw, u32 autoc, bool locked)
    281 {
    282 	s32 ret_val = IXGBE_SUCCESS;
    283 
    284 	/* Blocked by MNG FW so bail */
    285 	if (ixgbe_check_reset_blocked(hw))
    286 		goto out;
    287 
    288 	/* We only need to get the lock if:
    289 	 *  - We didn't do it already (in the read part of a read-modify-write)
    290 	 *  - LESM is enabled.
    291 	 */
    292 	if (!locked && ixgbe_verify_lesm_fw_enabled_82599(hw)) {
    293 		ret_val = hw->mac.ops.acquire_swfw_sync(hw,
    294 					IXGBE_GSSR_MAC_CSR_SM);
    295 		if (ret_val != IXGBE_SUCCESS)
    296 			return IXGBE_ERR_SWFW_SYNC;
    297 
    298 		locked = TRUE;
    299 	}
    300 
    301 	IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
    302 	ret_val = ixgbe_reset_pipeline_82599(hw);
    303 
    304 out:
    305 	/* Free the SW/FW semaphore as we either grabbed it here or
    306 	 * already had it when this function was called.
    307 	 */
    308 	if (locked)
    309 		hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
    310 
    311 	return ret_val;
    312 }
    313 
    314 /**
    315  *  ixgbe_init_ops_82599 - Inits func ptrs and MAC type
    316  *  @hw: pointer to hardware structure
    317  *
    318  *  Initialize the function pointers and assign the MAC type for 82599.
    319  *  Does not touch the hardware.
    320  **/
    321 
    322 s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw)
    323 {
    324 	struct ixgbe_mac_info *mac = &hw->mac;
    325 	struct ixgbe_phy_info *phy = &hw->phy;
    326 	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
    327 	s32 ret_val;
    328 
    329 	DEBUGFUNC("ixgbe_init_ops_82599");
    330 
    331 	ixgbe_init_phy_ops_generic(hw);
    332 	ret_val = ixgbe_init_ops_generic(hw);
    333 
    334 	/* PHY */
    335 	phy->ops.identify = ixgbe_identify_phy_82599;
    336 	phy->ops.init = ixgbe_init_phy_ops_82599;
    337 
    338 	/* MAC */
    339 	mac->ops.reset_hw = ixgbe_reset_hw_82599;
    340 	mac->ops.enable_relaxed_ordering = ixgbe_enable_relaxed_ordering_gen2;
    341 	mac->ops.get_media_type = ixgbe_get_media_type_82599;
    342 	mac->ops.get_supported_physical_layer =
    343 				    ixgbe_get_supported_physical_layer_82599;
    344 	mac->ops.disable_sec_rx_path = ixgbe_disable_sec_rx_path_generic;
    345 	mac->ops.enable_sec_rx_path = ixgbe_enable_sec_rx_path_generic;
    346 	mac->ops.enable_rx_dma = ixgbe_enable_rx_dma_82599;
    347 	mac->ops.read_analog_reg8 = ixgbe_read_analog_reg8_82599;
    348 	mac->ops.write_analog_reg8 = ixgbe_write_analog_reg8_82599;
    349 	mac->ops.start_hw = ixgbe_start_hw_82599;
    350 	mac->ops.get_san_mac_addr = ixgbe_get_san_mac_addr_generic;
    351 	mac->ops.set_san_mac_addr = ixgbe_set_san_mac_addr_generic;
    352 	mac->ops.get_device_caps = ixgbe_get_device_caps_generic;
    353 	mac->ops.get_wwn_prefix = ixgbe_get_wwn_prefix_generic;
    354 	mac->ops.get_fcoe_boot_status = ixgbe_get_fcoe_boot_status_generic;
    355 	mac->ops.prot_autoc_read = prot_autoc_read_82599;
    356 	mac->ops.prot_autoc_write = prot_autoc_write_82599;
    357 
    358 	/* RAR, Multicast, VLAN */
    359 	mac->ops.set_vmdq = ixgbe_set_vmdq_generic;
    360 	mac->ops.set_vmdq_san_mac = ixgbe_set_vmdq_san_mac_generic;
    361 	mac->ops.clear_vmdq = ixgbe_clear_vmdq_generic;
    362 	mac->ops.insert_mac_addr = ixgbe_insert_mac_addr_generic;
    363 	mac->rar_highwater = 1;
    364 	mac->ops.set_vfta = ixgbe_set_vfta_generic;
    365 	mac->ops.set_vlvf = ixgbe_set_vlvf_generic;
    366 	mac->ops.clear_vfta = ixgbe_clear_vfta_generic;
    367 	mac->ops.init_uta_tables = ixgbe_init_uta_tables_generic;
    368 	mac->ops.setup_sfp = ixgbe_setup_sfp_modules_82599;
    369 	mac->ops.set_mac_anti_spoofing = ixgbe_set_mac_anti_spoofing;
    370 	mac->ops.set_vlan_anti_spoofing = ixgbe_set_vlan_anti_spoofing;
    371 
    372 	/* Link */
    373 	mac->ops.get_link_capabilities = ixgbe_get_link_capabilities_82599;
    374 	mac->ops.check_link = ixgbe_check_mac_link_generic;
    375 	mac->ops.setup_rxpba = ixgbe_set_rxpba_generic;
    376 	ixgbe_init_mac_link_ops_82599(hw);
    377 
    378 	mac->mcft_size		= IXGBE_82599_MC_TBL_SIZE;
    379 	mac->vft_size		= IXGBE_82599_VFT_TBL_SIZE;
    380 	mac->num_rar_entries	= IXGBE_82599_RAR_ENTRIES;
    381 	mac->rx_pb_size		= IXGBE_82599_RX_PB_SIZE;
    382 	mac->max_rx_queues	= IXGBE_82599_MAX_RX_QUEUES;
    383 	mac->max_tx_queues	= IXGBE_82599_MAX_TX_QUEUES;
    384 	mac->max_msix_vectors	= ixgbe_get_pcie_msix_count_generic(hw);
    385 
    386 	mac->arc_subsystem_valid = (IXGBE_READ_REG(hw, IXGBE_FWSM) &
    387 				   IXGBE_FWSM_MODE_MASK) ? TRUE : FALSE;
    388 
    389 	hw->mbx.ops.init_params = ixgbe_init_mbx_params_pf;
    390 
    391 	/* EEPROM */
    392 	eeprom->ops.read = ixgbe_read_eeprom_82599;
    393 	eeprom->ops.read_buffer = ixgbe_read_eeprom_buffer_82599;
    394 
    395 	/* Manageability interface */
    396 	mac->ops.set_fw_drv_ver = ixgbe_set_fw_drv_ver_generic;
    397 
    398 
    399 	mac->ops.get_rtrup2tc = ixgbe_dcb_get_rtrup2tc_generic;
    400 
    401 	return ret_val;
    402 }
    403 
    404 /**
    405  *  ixgbe_get_link_capabilities_82599 - Determines link capabilities
    406  *  @hw: pointer to hardware structure
    407  *  @speed: pointer to link speed
    408  *  @autoneg: TRUE when autoneg or autotry is enabled
    409  *
    410  *  Determines the link capabilities by reading the AUTOC register.
    411  **/
    412 s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
    413 				      ixgbe_link_speed *speed,
    414 				      bool *autoneg)
    415 {
    416 	s32 status = IXGBE_SUCCESS;
    417 	u32 autoc = 0;
    418 
    419 	DEBUGFUNC("ixgbe_get_link_capabilities_82599");
    420 
    421 
    422 	/* Check if 1G SFP module. */
    423 	if (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
    424 	    hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
    425 	    hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
    426 	    hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
    427 	    hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
    428 	    hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) {
    429 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
    430 		*autoneg = TRUE;
    431 		goto out;
    432 	}
    433 
    434 	/*
    435 	 * Determine link capabilities based on the stored value of AUTOC,
    436 	 * which represents EEPROM defaults.  If AUTOC value has not
    437 	 * been stored, use the current register values.
    438 	 */
    439 	if (hw->mac.orig_link_settings_stored)
    440 		autoc = hw->mac.orig_autoc;
    441 	else
    442 		autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
    443 
    444 	switch (autoc & IXGBE_AUTOC_LMS_MASK) {
    445 	case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
    446 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
    447 		*autoneg = FALSE;
    448 		break;
    449 
    450 	case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
    451 		*speed = IXGBE_LINK_SPEED_10GB_FULL;
    452 		*autoneg = FALSE;
    453 		break;
    454 
    455 	case IXGBE_AUTOC_LMS_1G_AN:
    456 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
    457 		*autoneg = TRUE;
    458 		break;
    459 
    460 	case IXGBE_AUTOC_LMS_10G_SERIAL:
    461 		*speed = IXGBE_LINK_SPEED_10GB_FULL;
    462 		*autoneg = FALSE;
    463 		break;
    464 
    465 	case IXGBE_AUTOC_LMS_KX4_KX_KR:
    466 	case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN:
    467 		*speed = IXGBE_LINK_SPEED_UNKNOWN;
    468 		if (autoc & IXGBE_AUTOC_KR_SUPP)
    469 			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
    470 		if (autoc & IXGBE_AUTOC_KX4_SUPP)
    471 			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
    472 		if (autoc & IXGBE_AUTOC_KX_SUPP)
    473 			*speed |= IXGBE_LINK_SPEED_1GB_FULL;
    474 		*autoneg = TRUE;
    475 		break;
    476 
    477 	case IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII:
    478 		*speed = IXGBE_LINK_SPEED_100_FULL;
    479 		if (autoc & IXGBE_AUTOC_KR_SUPP)
    480 			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
    481 		if (autoc & IXGBE_AUTOC_KX4_SUPP)
    482 			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
    483 		if (autoc & IXGBE_AUTOC_KX_SUPP)
    484 			*speed |= IXGBE_LINK_SPEED_1GB_FULL;
    485 		*autoneg = TRUE;
    486 		break;
    487 
    488 	case IXGBE_AUTOC_LMS_SGMII_1G_100M:
    489 		*speed = IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_100_FULL;
    490 		*autoneg = FALSE;
    491 		break;
    492 
    493 	default:
    494 		status = IXGBE_ERR_LINK_SETUP;
    495 		goto out;
    496 		break;
    497 	}
    498 
    499 	if (hw->phy.multispeed_fiber) {
    500 		*speed |= IXGBE_LINK_SPEED_10GB_FULL |
    501 			  IXGBE_LINK_SPEED_1GB_FULL;
    502 
    503 		/* QSFP must not enable full auto-negotiation
    504 		 * Limited autoneg is enabled at 1G
    505 		 */
    506 		if (hw->phy.media_type == ixgbe_media_type_fiber_qsfp)
    507 			*autoneg = FALSE;
    508 		else
    509 			*autoneg = TRUE;
    510 	}
    511 
    512 out:
    513 	return status;
    514 }
    515 
    516 /**
    517  *  ixgbe_get_media_type_82599 - Get media type
    518  *  @hw: pointer to hardware structure
    519  *
    520  *  Returns the media type (fiber, copper, backplane)
    521  **/
    522 enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
    523 {
    524 	enum ixgbe_media_type media_type;
    525 
    526 	DEBUGFUNC("ixgbe_get_media_type_82599");
    527 
    528 	/* Detect if there is a copper PHY attached. */
    529 	switch (hw->phy.type) {
    530 	case ixgbe_phy_cu_unknown:
    531 	case ixgbe_phy_tn:
    532 		media_type = ixgbe_media_type_copper;
    533 		goto out;
    534 	default:
    535 		break;
    536 	}
    537 
    538 	switch (hw->device_id) {
    539 	case IXGBE_DEV_ID_82599_KX4:
    540 	case IXGBE_DEV_ID_82599_KX4_MEZZ:
    541 	case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
    542 	case IXGBE_DEV_ID_82599_KR:
    543 	case IXGBE_DEV_ID_82599_BACKPLANE_FCOE:
    544 	case IXGBE_DEV_ID_82599_XAUI_LOM:
    545 		/* Default device ID is mezzanine card KX/KX4 */
    546 		media_type = ixgbe_media_type_backplane;
    547 		break;
    548 	case IXGBE_DEV_ID_82599_SFP:
    549 	case IXGBE_DEV_ID_82599_SFP_FCOE:
    550 	case IXGBE_DEV_ID_82599_SFP_EM:
    551 	case IXGBE_DEV_ID_82599_SFP_SF2:
    552 	case IXGBE_DEV_ID_82599_SFP_SF_QP:
    553 	case IXGBE_DEV_ID_82599EN_SFP:
    554 		media_type = ixgbe_media_type_fiber;
    555 		break;
    556 	case IXGBE_DEV_ID_82599_CX4:
    557 		media_type = ixgbe_media_type_cx4;
    558 		break;
    559 	case IXGBE_DEV_ID_82599_T3_LOM:
    560 		media_type = ixgbe_media_type_copper;
    561 		break;
    562 	case IXGBE_DEV_ID_82599_QSFP_SF_QP:
    563 		media_type = ixgbe_media_type_fiber_qsfp;
    564 		break;
    565 	case IXGBE_DEV_ID_82599_BYPASS:
    566 		media_type = ixgbe_media_type_fiber_fixed;
    567 		hw->phy.multispeed_fiber = TRUE;
    568 		break;
    569 	default:
    570 		media_type = ixgbe_media_type_unknown;
    571 		break;
    572 	}
    573 out:
    574 	return media_type;
    575 }
    576 
    577 /**
    578  *  ixgbe_stop_mac_link_on_d3_82599 - Disables link on D3
    579  *  @hw: pointer to hardware structure
    580  *
    581  *  Disables link during D3 power down sequence.
    582  *
    583  **/
    584 void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw)
    585 {
    586 	u32 autoc2_reg;
    587 	u16 ee_ctrl_2 = 0;
    588 
    589 	DEBUGFUNC("ixgbe_stop_mac_link_on_d3_82599");
    590 	ixgbe_read_eeprom(hw, IXGBE_EEPROM_CTRL_2, &ee_ctrl_2);
    591 
    592 	if (!ixgbe_mng_present(hw) && !hw->wol_enabled &&
    593 	    ee_ctrl_2 & IXGBE_EEPROM_CCD_BIT) {
    594 		autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
    595 		autoc2_reg |= IXGBE_AUTOC2_LINK_DISABLE_ON_D3_MASK;
    596 		IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2_reg);
    597 	}
    598 }
    599 
    600 /**
    601  *  ixgbe_start_mac_link_82599 - Setup MAC link settings
    602  *  @hw: pointer to hardware structure
    603  *  @autoneg_wait_to_complete: TRUE when waiting for completion is needed
    604  *
    605  *  Configures link settings based on values in the ixgbe_hw struct.
    606  *  Restarts the link.  Performs autonegotiation if needed.
    607  **/
    608 s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
    609 			       bool autoneg_wait_to_complete)
    610 {
    611 	u32 autoc_reg;
    612 	u32 links_reg;
    613 	u32 i;
    614 	s32 status = IXGBE_SUCCESS;
    615 	bool got_lock = FALSE;
    616 
    617 	DEBUGFUNC("ixgbe_start_mac_link_82599");
    618 
    619 
    620 	/*  reset_pipeline requires us to hold this lock as it writes to
    621 	 *  AUTOC.
    622 	 */
    623 	if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
    624 		status = hw->mac.ops.acquire_swfw_sync(hw,
    625 						       IXGBE_GSSR_MAC_CSR_SM);
    626 		if (status != IXGBE_SUCCESS)
    627 			goto out;
    628 
    629 		got_lock = TRUE;
    630 	}
    631 
    632 	/* Restart link */
    633 	ixgbe_reset_pipeline_82599(hw);
    634 
    635 	if (got_lock)
    636 		hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
    637 
    638 	/* Only poll for autoneg to complete if specified to do so */
    639 	if (autoneg_wait_to_complete) {
    640 		autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
    641 		if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
    642 		     IXGBE_AUTOC_LMS_KX4_KX_KR ||
    643 		    (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
    644 		     IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
    645 		    (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
    646 		     IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
    647 			links_reg = 0; /* Just in case Autoneg time = 0 */
    648 			for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
    649 				links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
    650 				if (links_reg & IXGBE_LINKS_KX_AN_COMP)
    651 					break;
    652 				msec_delay(100);
    653 			}
    654 			if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
    655 				status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
    656 				DEBUGOUT("Autoneg did not complete.\n");
    657 			}
    658 		}
    659 	}
    660 
    661 	/* Add delay to filter out noises during initial link setup */
    662 	msec_delay(50);
    663 
    664 out:
    665 	return status;
    666 }
    667 
    668 /**
    669  *  ixgbe_disable_tx_laser_multispeed_fiber - Disable Tx laser
    670  *  @hw: pointer to hardware structure
    671  *
    672  *  The base drivers may require better control over SFP+ module
    673  *  PHY states.  This includes selectively shutting down the Tx
    674  *  laser on the PHY, effectively halting physical link.
    675  **/
    676 void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
    677 {
    678 	u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
    679 
    680 	/* Blocked by MNG FW so bail */
    681 	if (ixgbe_check_reset_blocked(hw))
    682 		return;
    683 
    684 	/* Disable Tx laser; allow 100us to go dark per spec */
    685 	esdp_reg |= IXGBE_ESDP_SDP3;
    686 	IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
    687 	IXGBE_WRITE_FLUSH(hw);
    688 	usec_delay(100);
    689 }
    690 
    691 /**
    692  *  ixgbe_enable_tx_laser_multispeed_fiber - Enable Tx laser
    693  *  @hw: pointer to hardware structure
    694  *
    695  *  The base drivers may require better control over SFP+ module
    696  *  PHY states.  This includes selectively turning on the Tx
    697  *  laser on the PHY, effectively starting physical link.
    698  **/
    699 void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
    700 {
    701 	u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
    702 
    703 	/* Enable Tx laser; allow 100ms to light up */
    704 	esdp_reg &= ~IXGBE_ESDP_SDP3;
    705 	IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
    706 	IXGBE_WRITE_FLUSH(hw);
    707 	msec_delay(100);
    708 }
    709 
    710 /**
    711  *  ixgbe_flap_tx_laser_multispeed_fiber - Flap Tx laser
    712  *  @hw: pointer to hardware structure
    713  *
    714  *  When the driver changes the link speeds that it can support,
    715  *  it sets autotry_restart to TRUE to indicate that we need to
    716  *  initiate a new autotry session with the link partner.  To do
    717  *  so, we set the speed then disable and re-enable the Tx laser, to
    718  *  alert the link partner that it also needs to restart autotry on its
    719  *  end.  This is consistent with TRUE clause 37 autoneg, which also
    720  *  involves a loss of signal.
    721  **/
    722 void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
    723 {
    724 	DEBUGFUNC("ixgbe_flap_tx_laser_multispeed_fiber");
    725 
    726 	/* Blocked by MNG FW so bail */
    727 	if (ixgbe_check_reset_blocked(hw))
    728 		return;
    729 
    730 	if (hw->mac.autotry_restart) {
    731 		ixgbe_disable_tx_laser_multispeed_fiber(hw);
    732 		ixgbe_enable_tx_laser_multispeed_fiber(hw);
    733 		hw->mac.autotry_restart = FALSE;
    734 	}
    735 }
    736 
    737 /**
    738  *  ixgbe_set_hard_rate_select_speed - Set module link speed
    739  *  @hw: pointer to hardware structure
    740  *  @speed: link speed to set
    741  *
    742  *  Set module link speed via RS0/RS1 rate select pins.
    743  */
    744 void ixgbe_set_hard_rate_select_speed(struct ixgbe_hw *hw,
    745 					ixgbe_link_speed speed)
    746 {
    747 	u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
    748 
    749 	switch (speed) {
    750 	case IXGBE_LINK_SPEED_10GB_FULL:
    751 		esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5);
    752 		break;
    753 	case IXGBE_LINK_SPEED_1GB_FULL:
    754 		esdp_reg &= ~IXGBE_ESDP_SDP5;
    755 		esdp_reg |= IXGBE_ESDP_SDP5_DIR;
    756 		break;
    757 	default:
    758 		DEBUGOUT("Invalid fixed module speed\n");
    759 		return;
    760 	}
    761 
    762 	IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
    763 	IXGBE_WRITE_FLUSH(hw);
    764 }
    765 
    766 /**
    767  *  ixgbe_setup_mac_link_smartspeed - Set MAC link speed using SmartSpeed
    768  *  @hw: pointer to hardware structure
    769  *  @speed: new link speed
    770  *  @autoneg_wait_to_complete: TRUE when waiting for completion is needed
    771  *
    772  *  Implements the Intel SmartSpeed algorithm.
    773  **/
    774 s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
    775 				    ixgbe_link_speed speed,
    776 				    bool autoneg_wait_to_complete)
    777 {
    778 	s32 status = IXGBE_SUCCESS;
    779 	ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
    780 	s32 i, j;
    781 	bool link_up = FALSE;
    782 	u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
    783 
    784 	DEBUGFUNC("ixgbe_setup_mac_link_smartspeed");
    785 
    786 	 /* Set autoneg_advertised value based on input link speed */
    787 	hw->phy.autoneg_advertised = 0;
    788 
    789 	if (speed & IXGBE_LINK_SPEED_10GB_FULL)
    790 		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
    791 
    792 	if (speed & IXGBE_LINK_SPEED_1GB_FULL)
    793 		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
    794 
    795 	if (speed & IXGBE_LINK_SPEED_100_FULL)
    796 		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL;
    797 
    798 	/*
    799 	 * Implement Intel SmartSpeed algorithm.  SmartSpeed will reduce the
    800 	 * autoneg advertisement if link is unable to be established at the
    801 	 * highest negotiated rate.  This can sometimes happen due to integrity
    802 	 * issues with the physical media connection.
    803 	 */
    804 
    805 	/* First, try to get link with full advertisement */
    806 	hw->phy.smart_speed_active = FALSE;
    807 	for (j = 0; j < IXGBE_SMARTSPEED_MAX_RETRIES; j++) {
    808 		status = ixgbe_setup_mac_link_82599(hw, speed,
    809 						    autoneg_wait_to_complete);
    810 		if (status != IXGBE_SUCCESS)
    811 			goto out;
    812 
    813 		/*
    814 		 * Wait for the controller to acquire link.  Per IEEE 802.3ap,
    815 		 * Section 73.10.2, we may have to wait up to 500ms if KR is
    816 		 * attempted, or 200ms if KX/KX4/BX/BX4 is attempted, per
    817 		 * Table 9 in the AN MAS.
    818 		 */
    819 		for (i = 0; i < 5; i++) {
    820 			msec_delay(100);
    821 
    822 			/* If we have link, just jump out */
    823 			status = ixgbe_check_link(hw, &link_speed, &link_up,
    824 						  FALSE);
    825 			if (status != IXGBE_SUCCESS)
    826 				goto out;
    827 
    828 			if (link_up)
    829 				goto out;
    830 		}
    831 	}
    832 
    833 	/*
    834 	 * We didn't get link.  If we advertised KR plus one of KX4/KX
    835 	 * (or BX4/BX), then disable KR and try again.
    836 	 */
    837 	if (((autoc_reg & IXGBE_AUTOC_KR_SUPP) == 0) ||
    838 	    ((autoc_reg & IXGBE_AUTOC_KX4_KX_SUPP_MASK) == 0))
    839 		goto out;
    840 
    841 	/* Turn SmartSpeed on to disable KR support */
    842 	hw->phy.smart_speed_active = TRUE;
    843 	status = ixgbe_setup_mac_link_82599(hw, speed,
    844 					    autoneg_wait_to_complete);
    845 	if (status != IXGBE_SUCCESS)
    846 		goto out;
    847 
    848 	/*
    849 	 * Wait for the controller to acquire link.  600ms will allow for
    850 	 * the AN link_fail_inhibit_timer as well for multiple cycles of
    851 	 * parallel detect, both 10g and 1g. This allows for the maximum
    852 	 * connect attempts as defined in the AN MAS table 73-7.
    853 	 */
    854 	for (i = 0; i < 6; i++) {
    855 		msec_delay(100);
    856 
    857 		/* If we have link, just jump out */
    858 		status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
    859 		if (status != IXGBE_SUCCESS)
    860 			goto out;
    861 
    862 		if (link_up)
    863 			goto out;
    864 	}
    865 
    866 	/* We didn't get link.  Turn SmartSpeed back off. */
    867 	hw->phy.smart_speed_active = FALSE;
    868 	status = ixgbe_setup_mac_link_82599(hw, speed,
    869 					    autoneg_wait_to_complete);
    870 
    871 out:
    872 	if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL))
    873 		DEBUGOUT("Smartspeed has downgraded the link speed "
    874 		"from the maximum advertised\n");
    875 	return status;
    876 }
    877 
    878 /**
    879  *  ixgbe_setup_mac_link_82599 - Set MAC link speed
    880  *  @hw: pointer to hardware structure
    881  *  @speed: new link speed
    882  *  @autoneg_wait_to_complete: TRUE when waiting for completion is needed
    883  *
    884  *  Set the link speed in the AUTOC register and restarts link.
    885  **/
    886 s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
    887 			       ixgbe_link_speed speed,
    888 			       bool autoneg_wait_to_complete)
    889 {
    890 	bool autoneg = FALSE;
    891 	s32 status = IXGBE_SUCCESS;
    892 	u32 pma_pmd_1g, link_mode;
    893 	u32 current_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); /* holds the value of AUTOC register at this current point in time */
    894 	u32 orig_autoc = 0; /* holds the cached value of AUTOC register */
    895 	u32 autoc = current_autoc; /* Temporary variable used for comparison purposes */
    896 	u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
    897 	u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
    898 	u32 links_reg;
    899 	u32 i;
    900 	ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
    901 
    902 	DEBUGFUNC("ixgbe_setup_mac_link_82599");
    903 
    904 	/* Check to see if speed passed in is supported. */
    905 	status = ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg);
    906 	if (status)
    907 		goto out;
    908 
    909 	speed &= link_capabilities;
    910 
    911 	if (speed == IXGBE_LINK_SPEED_UNKNOWN) {
    912 		status = IXGBE_ERR_LINK_SETUP;
    913 		goto out;
    914 	}
    915 
    916 	/* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/
    917 	if (hw->mac.orig_link_settings_stored)
    918 		orig_autoc = hw->mac.orig_autoc;
    919 	else
    920 		orig_autoc = autoc;
    921 
    922 	link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
    923 	pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
    924 
    925 	if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
    926 	    link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
    927 	    link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
    928 		/* Set KX4/KX/KR support according to speed requested */
    929 		autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP);
    930 		if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
    931 			if (orig_autoc & IXGBE_AUTOC_KX4_SUPP)
    932 				autoc |= IXGBE_AUTOC_KX4_SUPP;
    933 			if ((orig_autoc & IXGBE_AUTOC_KR_SUPP) &&
    934 			    (hw->phy.smart_speed_active == FALSE))
    935 				autoc |= IXGBE_AUTOC_KR_SUPP;
    936 		}
    937 		if (speed & IXGBE_LINK_SPEED_1GB_FULL)
    938 			autoc |= IXGBE_AUTOC_KX_SUPP;
    939 	} else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) &&
    940 		   (link_mode == IXGBE_AUTOC_LMS_1G_LINK_NO_AN ||
    941 		    link_mode == IXGBE_AUTOC_LMS_1G_AN)) {
    942 		/* Switch from 1G SFI to 10G SFI if requested */
    943 		if ((speed == IXGBE_LINK_SPEED_10GB_FULL) &&
    944 		    (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)) {
    945 			autoc &= ~IXGBE_AUTOC_LMS_MASK;
    946 			autoc |= IXGBE_AUTOC_LMS_10G_SERIAL;
    947 		}
    948 	} else if ((pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) &&
    949 		   (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) {
    950 		/* Switch from 10G SFI to 1G SFI if requested */
    951 		if ((speed == IXGBE_LINK_SPEED_1GB_FULL) &&
    952 		    (pma_pmd_1g == IXGBE_AUTOC_1G_SFI)) {
    953 			autoc &= ~IXGBE_AUTOC_LMS_MASK;
    954 			if (autoneg || hw->phy.type == ixgbe_phy_qsfp_intel)
    955 				autoc |= IXGBE_AUTOC_LMS_1G_AN;
    956 			else
    957 				autoc |= IXGBE_AUTOC_LMS_1G_LINK_NO_AN;
    958 		}
    959 	}
    960 
    961 	if (autoc != current_autoc) {
    962 		/* Restart link */
    963 		status = hw->mac.ops.prot_autoc_write(hw, autoc, FALSE);
    964 		if (status != IXGBE_SUCCESS)
    965 			goto out;
    966 
    967 		/* Only poll for autoneg to complete if specified to do so */
    968 		if (autoneg_wait_to_complete) {
    969 			if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
    970 			    link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
    971 			    link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
    972 				links_reg = 0; /*Just in case Autoneg time=0*/
    973 				for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
    974 					links_reg =
    975 					       IXGBE_READ_REG(hw, IXGBE_LINKS);
    976 					if (links_reg & IXGBE_LINKS_KX_AN_COMP)
    977 						break;
    978 					msec_delay(100);
    979 				}
    980 				if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
    981 					status =
    982 						IXGBE_ERR_AUTONEG_NOT_COMPLETE;
    983 					DEBUGOUT("Autoneg did not complete.\n");
    984 				}
    985 			}
    986 		}
    987 
    988 		/* Add delay to filter out noises during initial link setup */
    989 		msec_delay(50);
    990 	}
    991 
    992 out:
    993 	return status;
    994 }
    995 
    996 /**
    997  *  ixgbe_setup_copper_link_82599 - Set the PHY autoneg advertised field
    998  *  @hw: pointer to hardware structure
    999  *  @speed: new link speed
   1000  *  @autoneg_wait_to_complete: TRUE if waiting is needed to complete
   1001  *
   1002  *  Restarts link on PHY and MAC based on settings passed in.
   1003  **/
   1004 static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
   1005 					 ixgbe_link_speed speed,
   1006 					 bool autoneg_wait_to_complete)
   1007 {
   1008 	s32 status;
   1009 
   1010 	DEBUGFUNC("ixgbe_setup_copper_link_82599");
   1011 
   1012 	/* Setup the PHY according to input speed */
   1013 	status = hw->phy.ops.setup_link_speed(hw, speed,
   1014 					      autoneg_wait_to_complete);
   1015 	/* Set up MAC */
   1016 	ixgbe_start_mac_link_82599(hw, autoneg_wait_to_complete);
   1017 
   1018 	return status;
   1019 }
   1020 
   1021 /**
   1022  *  ixgbe_reset_hw_82599 - Perform hardware reset
   1023  *  @hw: pointer to hardware structure
   1024  *
   1025  *  Resets the hardware by resetting the transmit and receive units, masks
   1026  *  and clears all interrupts, perform a PHY reset, and perform a link (MAC)
   1027  *  reset.
   1028  **/
   1029 s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
   1030 {
   1031 	ixgbe_link_speed link_speed;
   1032 	s32 status;
   1033 	u32 ctrl = 0;
   1034 	u32 i, autoc, autoc2;
   1035 	u32 curr_lms;
   1036 	bool link_up = FALSE;
   1037 
   1038 	DEBUGFUNC("ixgbe_reset_hw_82599");
   1039 
   1040 	/* Call adapter stop to disable tx/rx and clear interrupts */
   1041 	status = hw->mac.ops.stop_adapter(hw);
   1042 	if (status != IXGBE_SUCCESS)
   1043 		goto reset_hw_out;
   1044 
   1045 	/* flush pending Tx transactions */
   1046 	ixgbe_clear_tx_pending(hw);
   1047 
   1048 	/* PHY ops must be identified and initialized prior to reset */
   1049 
   1050 	/* Identify PHY and related function pointers */
   1051 	status = hw->phy.ops.init(hw);
   1052 
   1053 	if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
   1054 		goto reset_hw_out;
   1055 
   1056 	/* Setup SFP module if there is one present. */
   1057 	if (hw->phy.sfp_setup_needed) {
   1058 		status = hw->mac.ops.setup_sfp(hw);
   1059 		hw->phy.sfp_setup_needed = FALSE;
   1060 	}
   1061 
   1062 	if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
   1063 		goto reset_hw_out;
   1064 
   1065 	/* Reset PHY */
   1066 	if (hw->phy.reset_disable == FALSE && hw->phy.ops.reset != NULL)
   1067 		hw->phy.ops.reset(hw);
   1068 
   1069 	/* remember AUTOC from before we reset */
   1070 	curr_lms = IXGBE_READ_REG(hw, IXGBE_AUTOC) & IXGBE_AUTOC_LMS_MASK;
   1071 
   1072 mac_reset_top:
   1073 	/*
   1074 	 * Issue global reset to the MAC.  Needs to be SW reset if link is up.
   1075 	 * If link reset is used when link is up, it might reset the PHY when
   1076 	 * mng is using it.  If link is down or the flag to force full link
   1077 	 * reset is set, then perform link reset.
   1078 	 */
   1079 	ctrl = IXGBE_CTRL_LNK_RST;
   1080 	if (!hw->force_full_reset) {
   1081 		hw->mac.ops.check_link(hw, &link_speed, &link_up, FALSE);
   1082 		if (link_up)
   1083 			ctrl = IXGBE_CTRL_RST;
   1084 	}
   1085 
   1086 	ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
   1087 	IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
   1088 	IXGBE_WRITE_FLUSH(hw);
   1089 
   1090 	/* Poll for reset bit to self-clear meaning reset is complete */
   1091 	for (i = 0; i < 10; i++) {
   1092 		usec_delay(1);
   1093 		ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
   1094 		if (!(ctrl & IXGBE_CTRL_RST_MASK))
   1095 			break;
   1096 	}
   1097 
   1098 	if (ctrl & IXGBE_CTRL_RST_MASK) {
   1099 		status = IXGBE_ERR_RESET_FAILED;
   1100 		DEBUGOUT("Reset polling failed to complete.\n");
   1101 	}
   1102 
   1103 	msec_delay(50);
   1104 
   1105 	/*
   1106 	 * Double resets are required for recovery from certain error
   1107 	 * conditions.  Between resets, it is necessary to stall to
   1108 	 * allow time for any pending HW events to complete.
   1109 	 */
   1110 	if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
   1111 		hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
   1112 		goto mac_reset_top;
   1113 	}
   1114 
   1115 	/*
   1116 	 * Store the original AUTOC/AUTOC2 values if they have not been
   1117 	 * stored off yet.  Otherwise restore the stored original
   1118 	 * values since the reset operation sets back to defaults.
   1119 	 */
   1120 	autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
   1121 	autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
   1122 
   1123 	/* Enable link if disabled in NVM */
   1124 	if (autoc2 & IXGBE_AUTOC2_LINK_DISABLE_MASK) {
   1125 		autoc2 &= ~IXGBE_AUTOC2_LINK_DISABLE_MASK;
   1126 		IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
   1127 		IXGBE_WRITE_FLUSH(hw);
   1128 	}
   1129 
   1130 	if (hw->mac.orig_link_settings_stored == FALSE) {
   1131 		hw->mac.orig_autoc = autoc;
   1132 		hw->mac.orig_autoc2 = autoc2;
   1133 		hw->mac.orig_link_settings_stored = TRUE;
   1134 	} else {
   1135 
   1136 		/* If MNG FW is running on a multi-speed device that
   1137 		 * doesn't autoneg with out driver support we need to
   1138 		 * leave LMS in the state it was before we MAC reset.
   1139 		 * Likewise if we support WoL we don't want change the
   1140 		 * LMS state.
   1141 		 */
   1142 		if ((hw->phy.multispeed_fiber && ixgbe_mng_enabled(hw)) ||
   1143 		    hw->wol_enabled)
   1144 			hw->mac.orig_autoc =
   1145 				(hw->mac.orig_autoc & ~IXGBE_AUTOC_LMS_MASK) |
   1146 				curr_lms;
   1147 
   1148 		if (autoc != hw->mac.orig_autoc) {
   1149 			status = hw->mac.ops.prot_autoc_write(hw,
   1150 							hw->mac.orig_autoc,
   1151 							FALSE);
   1152 			if (status != IXGBE_SUCCESS)
   1153 				goto reset_hw_out;
   1154 		}
   1155 
   1156 		if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) !=
   1157 		    (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) {
   1158 			autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK;
   1159 			autoc2 |= (hw->mac.orig_autoc2 &
   1160 				   IXGBE_AUTOC2_UPPER_MASK);
   1161 			IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
   1162 		}
   1163 	}
   1164 
   1165 	/* Store the permanent mac address */
   1166 	hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
   1167 
   1168 	/*
   1169 	 * Store MAC address from RAR0, clear receive address registers, and
   1170 	 * clear the multicast table.  Also reset num_rar_entries to 128,
   1171 	 * since we modify this value when programming the SAN MAC address.
   1172 	 */
   1173 	hw->mac.num_rar_entries = 128;
   1174 	hw->mac.ops.init_rx_addrs(hw);
   1175 
   1176 	/* Store the permanent SAN mac address */
   1177 	hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr);
   1178 
   1179 	/* Add the SAN MAC address to the RAR only if it's a valid address */
   1180 	if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) {
   1181 		hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
   1182 				    hw->mac.san_addr, 0, IXGBE_RAH_AV);
   1183 
   1184 		/* Save the SAN MAC RAR index */
   1185 		hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1;
   1186 
   1187 		/* Reserve the last RAR for the SAN MAC address */
   1188 		hw->mac.num_rar_entries--;
   1189 	}
   1190 
   1191 	/* Store the alternative WWNN/WWPN prefix */
   1192 	hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
   1193 				   &hw->mac.wwpn_prefix);
   1194 
   1195 reset_hw_out:
   1196 	return status;
   1197 }
   1198 
   1199 /**
   1200  * ixgbe_fdir_check_cmd_complete - poll to check whether FDIRCMD is complete
   1201  * @hw: pointer to hardware structure
   1202  * @fdircmd: current value of FDIRCMD register
   1203  */
   1204 static s32 ixgbe_fdir_check_cmd_complete(struct ixgbe_hw *hw, u32 *fdircmd)
   1205 {
   1206 	int i;
   1207 
   1208 	for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) {
   1209 		*fdircmd = IXGBE_READ_REG(hw, IXGBE_FDIRCMD);
   1210 		if (!(*fdircmd & IXGBE_FDIRCMD_CMD_MASK))
   1211 			return IXGBE_SUCCESS;
   1212 		usec_delay(10);
   1213 	}
   1214 
   1215 	return IXGBE_ERR_FDIR_CMD_INCOMPLETE;
   1216 }
   1217 
   1218 /**
   1219  *  ixgbe_reinit_fdir_tables_82599 - Reinitialize Flow Director tables.
   1220  *  @hw: pointer to hardware structure
   1221  **/
   1222 s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
   1223 {
   1224 	s32 err;
   1225 	int i;
   1226 	u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
   1227 	u32 fdircmd;
   1228 	fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE;
   1229 
   1230 	DEBUGFUNC("ixgbe_reinit_fdir_tables_82599");
   1231 
   1232 	/*
   1233 	 * Before starting reinitialization process,
   1234 	 * FDIRCMD.CMD must be zero.
   1235 	 */
   1236 	err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
   1237 	if (err) {
   1238 		DEBUGOUT("Flow Director previous command did not complete, aborting table re-initialization.\n");
   1239 		return err;
   1240 	}
   1241 
   1242 	IXGBE_WRITE_REG(hw, IXGBE_FDIRFREE, 0);
   1243 	IXGBE_WRITE_FLUSH(hw);
   1244 	/*
   1245 	 * 82599 adapters flow director init flow cannot be restarted,
   1246 	 * Workaround 82599 silicon errata by performing the following steps
   1247 	 * before re-writing the FDIRCTRL control register with the same value.
   1248 	 * - write 1 to bit 8 of FDIRCMD register &
   1249 	 * - write 0 to bit 8 of FDIRCMD register
   1250 	 */
   1251 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
   1252 			(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) |
   1253 			 IXGBE_FDIRCMD_CLEARHT));
   1254 	IXGBE_WRITE_FLUSH(hw);
   1255 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
   1256 			(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
   1257 			 ~IXGBE_FDIRCMD_CLEARHT));
   1258 	IXGBE_WRITE_FLUSH(hw);
   1259 	/*
   1260 	 * Clear FDIR Hash register to clear any leftover hashes
   1261 	 * waiting to be programmed.
   1262 	 */
   1263 	IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, 0x00);
   1264 	IXGBE_WRITE_FLUSH(hw);
   1265 
   1266 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
   1267 	IXGBE_WRITE_FLUSH(hw);
   1268 
   1269 	/* Poll init-done after we write FDIRCTRL register */
   1270 	for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
   1271 		if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
   1272 				   IXGBE_FDIRCTRL_INIT_DONE)
   1273 			break;
   1274 		msec_delay(1);
   1275 	}
   1276 	if (i >= IXGBE_FDIR_INIT_DONE_POLL) {
   1277 		DEBUGOUT("Flow Director Signature poll time exceeded!\n");
   1278 		return IXGBE_ERR_FDIR_REINIT_FAILED;
   1279 	}
   1280 
   1281 	/* Clear FDIR statistics registers (read to clear) */
   1282 	IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT);
   1283 	IXGBE_READ_REG(hw, IXGBE_FDIRFSTAT);
   1284 	IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
   1285 	IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
   1286 	IXGBE_READ_REG(hw, IXGBE_FDIRLEN);
   1287 
   1288 	return IXGBE_SUCCESS;
   1289 }
   1290 
   1291 /**
   1292  *  ixgbe_fdir_enable_82599 - Initialize Flow Director control registers
   1293  *  @hw: pointer to hardware structure
   1294  *  @fdirctrl: value to write to flow director control register
   1295  **/
   1296 static void ixgbe_fdir_enable_82599(struct ixgbe_hw *hw, u32 fdirctrl)
   1297 {
   1298 	int i;
   1299 
   1300 	DEBUGFUNC("ixgbe_fdir_enable_82599");
   1301 
   1302 	/* Prime the keys for hashing */
   1303 	IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY);
   1304 	IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY);
   1305 
   1306 	/*
   1307 	 * Poll init-done after we write the register.  Estimated times:
   1308 	 *      10G: PBALLOC = 11b, timing is 60us
   1309 	 *       1G: PBALLOC = 11b, timing is 600us
   1310 	 *     100M: PBALLOC = 11b, timing is 6ms
   1311 	 *
   1312 	 *     Multiple these timings by 4 if under full Rx load
   1313 	 *
   1314 	 * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for
   1315 	 * 1 msec per poll time.  If we're at line rate and drop to 100M, then
   1316 	 * this might not finish in our poll time, but we can live with that
   1317 	 * for now.
   1318 	 */
   1319 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
   1320 	IXGBE_WRITE_FLUSH(hw);
   1321 	for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
   1322 		if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
   1323 				   IXGBE_FDIRCTRL_INIT_DONE)
   1324 			break;
   1325 		msec_delay(1);
   1326 	}
   1327 
   1328 	if (i >= IXGBE_FDIR_INIT_DONE_POLL)
   1329 		DEBUGOUT("Flow Director poll time exceeded!\n");
   1330 }
   1331 
   1332 /**
   1333  *  ixgbe_init_fdir_signature_82599 - Initialize Flow Director signature filters
   1334  *  @hw: pointer to hardware structure
   1335  *  @fdirctrl: value to write to flow director control register, initially
   1336  *	     contains just the value of the Rx packet buffer allocation
   1337  **/
   1338 s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl)
   1339 {
   1340 	DEBUGFUNC("ixgbe_init_fdir_signature_82599");
   1341 
   1342 	/*
   1343 	 * Continue setup of fdirctrl register bits:
   1344 	 *  Move the flexible bytes to use the ethertype - shift 6 words
   1345 	 *  Set the maximum length per hash bucket to 0xA filters
   1346 	 *  Send interrupt when 64 filters are left
   1347 	 */
   1348 	fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) |
   1349 		    (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
   1350 		    (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
   1351 
   1352 	/* write hashes and fdirctrl register, poll for completion */
   1353 	ixgbe_fdir_enable_82599(hw, fdirctrl);
   1354 
   1355 	return IXGBE_SUCCESS;
   1356 }
   1357 
   1358 /**
   1359  *  ixgbe_init_fdir_perfect_82599 - Initialize Flow Director perfect filters
   1360  *  @hw: pointer to hardware structure
   1361  *  @fdirctrl: value to write to flow director control register, initially
   1362  *	     contains just the value of the Rx packet buffer allocation
   1363  *  @cloud_mode: TRUE - cloud mode, FALSE - other mode
   1364  **/
   1365 s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl,
   1366 			bool cloud_mode)
   1367 {
   1368 	DEBUGFUNC("ixgbe_init_fdir_perfect_82599");
   1369 
   1370 	/*
   1371 	 * Continue setup of fdirctrl register bits:
   1372 	 *  Turn perfect match filtering on
   1373 	 *  Report hash in RSS field of Rx wb descriptor
   1374 	 *  Initialize the drop queue
   1375 	 *  Move the flexible bytes to use the ethertype - shift 6 words
   1376 	 *  Set the maximum length per hash bucket to 0xA filters
   1377 	 *  Send interrupt when 64 (0x4 * 16) filters are left
   1378 	 */
   1379 	fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH |
   1380 		    IXGBE_FDIRCTRL_REPORT_STATUS |
   1381 		    (IXGBE_FDIR_DROP_QUEUE << IXGBE_FDIRCTRL_DROP_Q_SHIFT) |
   1382 		    (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) |
   1383 		    (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
   1384 		    (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
   1385 
   1386 	if (cloud_mode)
   1387 		fdirctrl |=(IXGBE_FDIRCTRL_FILTERMODE_CLOUD <<
   1388 					IXGBE_FDIRCTRL_FILTERMODE_SHIFT);
   1389 
   1390 	/* write hashes and fdirctrl register, poll for completion */
   1391 	ixgbe_fdir_enable_82599(hw, fdirctrl);
   1392 
   1393 	return IXGBE_SUCCESS;
   1394 }
   1395 
   1396 /*
   1397  * These defines allow us to quickly generate all of the necessary instructions
   1398  * in the function below by simply calling out IXGBE_COMPUTE_SIG_HASH_ITERATION
   1399  * for values 0 through 15
   1400  */
   1401 #define IXGBE_ATR_COMMON_HASH_KEY \
   1402 		(IXGBE_ATR_BUCKET_HASH_KEY & IXGBE_ATR_SIGNATURE_HASH_KEY)
   1403 #define IXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \
   1404 do { \
   1405 	u32 n = (_n); \
   1406 	if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) \
   1407 		common_hash ^= lo_hash_dword >> n; \
   1408 	else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
   1409 		bucket_hash ^= lo_hash_dword >> n; \
   1410 	else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << n)) \
   1411 		sig_hash ^= lo_hash_dword << (16 - n); \
   1412 	if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << (n + 16))) \
   1413 		common_hash ^= hi_hash_dword >> n; \
   1414 	else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
   1415 		bucket_hash ^= hi_hash_dword >> n; \
   1416 	else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \
   1417 		sig_hash ^= hi_hash_dword << (16 - n); \
   1418 } while (0)
   1419 
   1420 /**
   1421  *  ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash
   1422  *  @stream: input bitstream to compute the hash on
   1423  *
   1424  *  This function is almost identical to the function above but contains
   1425  *  several optimizations such as unwinding all of the loops, letting the
   1426  *  compiler work out all of the conditional ifs since the keys are static
   1427  *  defines, and computing two keys at once since the hashed dword stream
   1428  *  will be the same for both keys.
   1429  **/
   1430 u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
   1431 				     union ixgbe_atr_hash_dword common)
   1432 {
   1433 	u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
   1434 	u32 sig_hash = 0, bucket_hash = 0, common_hash = 0;
   1435 
   1436 	/* record the flow_vm_vlan bits as they are a key part to the hash */
   1437 	flow_vm_vlan = IXGBE_NTOHL(input.dword);
   1438 
   1439 	/* generate common hash dword */
   1440 	hi_hash_dword = IXGBE_NTOHL(common.dword);
   1441 
   1442 	/* low dword is word swapped version of common */
   1443 	lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
   1444 
   1445 	/* apply flow ID/VM pool/VLAN ID bits to hash words */
   1446 	hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
   1447 
   1448 	/* Process bits 0 and 16 */
   1449 	IXGBE_COMPUTE_SIG_HASH_ITERATION(0);
   1450 
   1451 	/*
   1452 	 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
   1453 	 * delay this because bit 0 of the stream should not be processed
   1454 	 * so we do not add the VLAN until after bit 0 was processed
   1455 	 */
   1456 	lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
   1457 
   1458 	/* Process remaining 30 bit of the key */
   1459 	IXGBE_COMPUTE_SIG_HASH_ITERATION(1);
   1460 	IXGBE_COMPUTE_SIG_HASH_ITERATION(2);
   1461 	IXGBE_COMPUTE_SIG_HASH_ITERATION(3);
   1462 	IXGBE_COMPUTE_SIG_HASH_ITERATION(4);
   1463 	IXGBE_COMPUTE_SIG_HASH_ITERATION(5);
   1464 	IXGBE_COMPUTE_SIG_HASH_ITERATION(6);
   1465 	IXGBE_COMPUTE_SIG_HASH_ITERATION(7);
   1466 	IXGBE_COMPUTE_SIG_HASH_ITERATION(8);
   1467 	IXGBE_COMPUTE_SIG_HASH_ITERATION(9);
   1468 	IXGBE_COMPUTE_SIG_HASH_ITERATION(10);
   1469 	IXGBE_COMPUTE_SIG_HASH_ITERATION(11);
   1470 	IXGBE_COMPUTE_SIG_HASH_ITERATION(12);
   1471 	IXGBE_COMPUTE_SIG_HASH_ITERATION(13);
   1472 	IXGBE_COMPUTE_SIG_HASH_ITERATION(14);
   1473 	IXGBE_COMPUTE_SIG_HASH_ITERATION(15);
   1474 
   1475 	/* combine common_hash result with signature and bucket hashes */
   1476 	bucket_hash ^= common_hash;
   1477 	bucket_hash &= IXGBE_ATR_HASH_MASK;
   1478 
   1479 	sig_hash ^= common_hash << 16;
   1480 	sig_hash &= IXGBE_ATR_HASH_MASK << 16;
   1481 
   1482 	/* return completed signature hash */
   1483 	return sig_hash ^ bucket_hash;
   1484 }
   1485 
   1486 /**
   1487  *  ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter
   1488  *  @hw: pointer to hardware structure
   1489  *  @input: unique input dword
   1490  *  @common: compressed common input dword
   1491  *  @queue: queue index to direct traffic to
   1492  *
   1493  * Note that the tunnel bit in input must not be set when the hardware
   1494  * tunneling support does not exist.
   1495  **/
   1496 s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
   1497 					  union ixgbe_atr_hash_dword input,
   1498 					  union ixgbe_atr_hash_dword common,
   1499 					  u8 queue)
   1500 {
   1501 	u64 fdirhashcmd;
   1502 	u8 flow_type;
   1503 	bool tunnel;
   1504 	u32 fdircmd;
   1505 	s32 err;
   1506 
   1507 	DEBUGFUNC("ixgbe_fdir_add_signature_filter_82599");
   1508 
   1509 	/*
   1510 	 * Get the flow_type in order to program FDIRCMD properly
   1511 	 * lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6
   1512 	 * fifth is FDIRCMD.TUNNEL_FILTER
   1513 	 */
   1514 	tunnel = !!(input.formatted.flow_type & IXGBE_ATR_L4TYPE_TUNNEL_MASK);
   1515 	flow_type = input.formatted.flow_type &
   1516 		    (IXGBE_ATR_L4TYPE_TUNNEL_MASK - 1);
   1517 	switch (flow_type) {
   1518 	case IXGBE_ATR_FLOW_TYPE_TCPV4:
   1519 	case IXGBE_ATR_FLOW_TYPE_UDPV4:
   1520 	case IXGBE_ATR_FLOW_TYPE_SCTPV4:
   1521 	case IXGBE_ATR_FLOW_TYPE_TCPV6:
   1522 	case IXGBE_ATR_FLOW_TYPE_UDPV6:
   1523 	case IXGBE_ATR_FLOW_TYPE_SCTPV6:
   1524 		break;
   1525 	default:
   1526 		DEBUGOUT(" Error on flow type input\n");
   1527 		return IXGBE_ERR_CONFIG;
   1528 	}
   1529 
   1530 	/* configure FDIRCMD register */
   1531 	fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
   1532 		  IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
   1533 	fdircmd |= (u32)flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
   1534 	fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
   1535 	if (tunnel)
   1536 		fdircmd |= IXGBE_FDIRCMD_TUNNEL_FILTER;
   1537 
   1538 	/*
   1539 	 * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits
   1540 	 * is for FDIRCMD.  Then do a 64-bit register write from FDIRHASH.
   1541 	 */
   1542 	fdirhashcmd = (u64)fdircmd << 32;
   1543 	fdirhashcmd |= ixgbe_atr_compute_sig_hash_82599(input, common);
   1544 	IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd);
   1545 
   1546 	err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
   1547 	if (err) {
   1548 		DEBUGOUT("Flow Director command did not complete!\n");
   1549 		return err;
   1550 	}
   1551 
   1552 	DEBUGOUT2("Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd);
   1553 
   1554 	return IXGBE_SUCCESS;
   1555 }
   1556 
   1557 #define IXGBE_COMPUTE_BKT_HASH_ITERATION(_n) \
   1558 do { \
   1559 	u32 n = (_n); \
   1560 	if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
   1561 		bucket_hash ^= lo_hash_dword >> n; \
   1562 	if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
   1563 		bucket_hash ^= hi_hash_dword >> n; \
   1564 } while (0)
   1565 
   1566 /**
   1567  *  ixgbe_atr_compute_perfect_hash_82599 - Compute the perfect filter hash
   1568  *  @atr_input: input bitstream to compute the hash on
   1569  *  @input_mask: mask for the input bitstream
   1570  *
   1571  *  This function serves two main purposes.  First it applies the input_mask
   1572  *  to the atr_input resulting in a cleaned up atr_input data stream.
   1573  *  Secondly it computes the hash and stores it in the bkt_hash field at
   1574  *  the end of the input byte stream.  This way it will be available for
   1575  *  future use without needing to recompute the hash.
   1576  **/
   1577 void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
   1578 					  union ixgbe_atr_input *input_mask)
   1579 {
   1580 
   1581 	u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
   1582 	u32 bucket_hash = 0;
   1583 	u32 hi_dword = 0;
   1584 	u32 i = 0;
   1585 
   1586 	/* Apply masks to input data */
   1587 	for (i = 0; i < 14; i++)
   1588 		input->dword_stream[i]  &= input_mask->dword_stream[i];
   1589 
   1590 	/* record the flow_vm_vlan bits as they are a key part to the hash */
   1591 	flow_vm_vlan = IXGBE_NTOHL(input->dword_stream[0]);
   1592 
   1593 	/* generate common hash dword */
   1594 	for (i = 1; i <= 13; i++)
   1595 		hi_dword ^= input->dword_stream[i];
   1596 	hi_hash_dword = IXGBE_NTOHL(hi_dword);
   1597 
   1598 	/* low dword is word swapped version of common */
   1599 	lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
   1600 
   1601 	/* apply flow ID/VM pool/VLAN ID bits to hash words */
   1602 	hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
   1603 
   1604 	/* Process bits 0 and 16 */
   1605 	IXGBE_COMPUTE_BKT_HASH_ITERATION(0);
   1606 
   1607 	/*
   1608 	 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
   1609 	 * delay this because bit 0 of the stream should not be processed
   1610 	 * so we do not add the VLAN until after bit 0 was processed
   1611 	 */
   1612 	lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
   1613 
   1614 	/* Process remaining 30 bit of the key */
   1615 	for (i = 1; i <= 15; i++)
   1616 		IXGBE_COMPUTE_BKT_HASH_ITERATION(i);
   1617 
   1618 	/*
   1619 	 * Limit hash to 13 bits since max bucket count is 8K.
   1620 	 * Store result at the end of the input stream.
   1621 	 */
   1622 	input->formatted.bkt_hash = bucket_hash & 0x1FFF;
   1623 }
   1624 
   1625 /**
   1626  *  ixgbe_get_fdirtcpm_82599 - generate a TCP port from atr_input_masks
   1627  *  @input_mask: mask to be bit swapped
   1628  *
   1629  *  The source and destination port masks for flow director are bit swapped
   1630  *  in that bit 15 effects bit 0, 14 effects 1, 13, 2 etc.  In order to
   1631  *  generate a correctly swapped value we need to bit swap the mask and that
   1632  *  is what is accomplished by this function.
   1633  **/
   1634 static u32 ixgbe_get_fdirtcpm_82599(union ixgbe_atr_input *input_mask)
   1635 {
   1636 	u32 mask = IXGBE_NTOHS(input_mask->formatted.dst_port);
   1637 	mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT;
   1638 	mask |= IXGBE_NTOHS(input_mask->formatted.src_port);
   1639 	mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1);
   1640 	mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2);
   1641 	mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4);
   1642 	return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8);
   1643 }
   1644 
   1645 /*
   1646  * These two macros are meant to address the fact that we have registers
   1647  * that are either all or in part big-endian.  As a result on big-endian
   1648  * systems we will end up byte swapping the value to little-endian before
   1649  * it is byte swapped again and written to the hardware in the original
   1650  * big-endian format.
   1651  */
   1652 #define IXGBE_STORE_AS_BE32(_value) \
   1653 	(((u32)(_value) >> 24) | (((u32)(_value) & 0x00FF0000) >> 8) | \
   1654 	 (((u32)(_value) & 0x0000FF00) << 8) | ((u32)(_value) << 24))
   1655 
   1656 #define IXGBE_WRITE_REG_BE32(a, reg, value) \
   1657 	IXGBE_WRITE_REG((a), (reg), IXGBE_STORE_AS_BE32(IXGBE_NTOHL(value)))
   1658 
   1659 #define IXGBE_STORE_AS_BE16(_value) \
   1660 	IXGBE_NTOHS(((u16)(_value) >> 8) | ((u16)(_value) << 8))
   1661 
   1662 s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
   1663 				    union ixgbe_atr_input *input_mask, bool cloud_mode)
   1664 {
   1665 	/* mask IPv6 since it is currently not supported */
   1666 	u32 fdirm = IXGBE_FDIRM_DIPv6;
   1667 	u32 fdirtcpm;
   1668 	u32 fdirip6m;
   1669 	DEBUGFUNC("ixgbe_fdir_set_atr_input_mask_82599");
   1670 
   1671 	/*
   1672 	 * Program the relevant mask registers.  If src/dst_port or src/dst_addr
   1673 	 * are zero, then assume a full mask for that field.  Also assume that
   1674 	 * a VLAN of 0 is unspecified, so mask that out as well.  L4type
   1675 	 * cannot be masked out in this implementation.
   1676 	 *
   1677 	 * This also assumes IPv4 only.  IPv6 masking isn't supported at this
   1678 	 * point in time.
   1679 	 */
   1680 
   1681 	/* verify bucket hash is cleared on hash generation */
   1682 	if (input_mask->formatted.bkt_hash)
   1683 		DEBUGOUT(" bucket hash should always be 0 in mask\n");
   1684 
   1685 	/* Program FDIRM and verify partial masks */
   1686 	switch (input_mask->formatted.vm_pool & 0x7F) {
   1687 	case 0x0:
   1688 		fdirm |= IXGBE_FDIRM_POOL;
   1689 	case 0x7F:
   1690 		break;
   1691 	default:
   1692 		DEBUGOUT(" Error on vm pool mask\n");
   1693 		return IXGBE_ERR_CONFIG;
   1694 	}
   1695 
   1696 	switch (input_mask->formatted.flow_type & IXGBE_ATR_L4TYPE_MASK) {
   1697 	case 0x0:
   1698 		fdirm |= IXGBE_FDIRM_L4P;
   1699 		if (input_mask->formatted.dst_port ||
   1700 		    input_mask->formatted.src_port) {
   1701 			DEBUGOUT(" Error on src/dst port mask\n");
   1702 			return IXGBE_ERR_CONFIG;
   1703 		}
   1704 	case IXGBE_ATR_L4TYPE_MASK:
   1705 		break;
   1706 	default:
   1707 		DEBUGOUT(" Error on flow type mask\n");
   1708 		return IXGBE_ERR_CONFIG;
   1709 	}
   1710 
   1711 	switch (IXGBE_NTOHS(input_mask->formatted.vlan_id) & 0xEFFF) {
   1712 	case 0x0000:
   1713 		/* mask VLAN ID, fall through to mask VLAN priority */
   1714 		fdirm |= IXGBE_FDIRM_VLANID;
   1715 	case 0x0FFF:
   1716 		/* mask VLAN priority */
   1717 		fdirm |= IXGBE_FDIRM_VLANP;
   1718 		break;
   1719 	case 0xE000:
   1720 		/* mask VLAN ID only, fall through */
   1721 		fdirm |= IXGBE_FDIRM_VLANID;
   1722 	case 0xEFFF:
   1723 		/* no VLAN fields masked */
   1724 		break;
   1725 	default:
   1726 		DEBUGOUT(" Error on VLAN mask\n");
   1727 		return IXGBE_ERR_CONFIG;
   1728 	}
   1729 
   1730 	switch (input_mask->formatted.flex_bytes & 0xFFFF) {
   1731 	case 0x0000:
   1732 		/* Mask Flex Bytes, fall through */
   1733 		fdirm |= IXGBE_FDIRM_FLEX;
   1734 	case 0xFFFF:
   1735 		break;
   1736 	default:
   1737 		DEBUGOUT(" Error on flexible byte mask\n");
   1738 		return IXGBE_ERR_CONFIG;
   1739 	}
   1740 
   1741 	if (cloud_mode) {
   1742 		fdirm |= IXGBE_FDIRM_L3P;
   1743 		fdirip6m = ((u32) 0xFFFFU << IXGBE_FDIRIP6M_DIPM_SHIFT);
   1744 		fdirip6m |= IXGBE_FDIRIP6M_ALWAYS_MASK;
   1745 
   1746 		switch (input_mask->formatted.inner_mac[0] & 0xFF) {
   1747 		case 0x00:
   1748 			/* Mask inner MAC, fall through */
   1749 			fdirip6m |= IXGBE_FDIRIP6M_INNER_MAC;
   1750 		case 0xFF:
   1751 			break;
   1752 		default:
   1753 			DEBUGOUT(" Error on inner_mac byte mask\n");
   1754 			return IXGBE_ERR_CONFIG;
   1755 		}
   1756 
   1757 		switch (input_mask->formatted.tni_vni & 0xFFFFFFFF) {
   1758 		case 0x0:
   1759 			/* Mask vxlan id */
   1760 			fdirip6m |= IXGBE_FDIRIP6M_TNI_VNI;
   1761 			break;
   1762 		case 0x00FFFFFF:
   1763 			fdirip6m |= IXGBE_FDIRIP6M_TNI_VNI_24;
   1764 			break;
   1765 		case 0xFFFFFFFF:
   1766 			break;
   1767 		default:
   1768 			DEBUGOUT(" Error on TNI/VNI byte mask\n");
   1769 			return IXGBE_ERR_CONFIG;
   1770 		}
   1771 
   1772 		switch (input_mask->formatted.tunnel_type & 0xFFFF) {
   1773 		case 0x0:
   1774 			/* Mask turnnel type, fall through */
   1775 			fdirip6m |= IXGBE_FDIRIP6M_TUNNEL_TYPE;
   1776 		case 0xFFFF:
   1777 			break;
   1778 		default:
   1779 			DEBUGOUT(" Error on tunnel type byte mask\n");
   1780 			return IXGBE_ERR_CONFIG;
   1781 		}
   1782 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIP6M, fdirip6m);
   1783 
   1784 		/* Set all bits in FDIRTCPM, FDIRUDPM, FDIRSIP4M and
   1785 		 * FDIRDIP4M in cloud mode to allow L3/L3 packets to
   1786 		 * tunnel.
   1787 		 */
   1788 		IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, 0xFFFFFFFF);
   1789 		IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, 0xFFFFFFFF);
   1790 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M, 0xFFFFFFFF);
   1791 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M, 0xFFFFFFFF);
   1792 	}
   1793 
   1794 	/* Now mask VM pool and destination IPv6 - bits 5 and 2 */
   1795 	IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
   1796 
   1797 	if (!cloud_mode) {
   1798 		/* store the TCP/UDP port masks, bit reversed from port
   1799 		 * layout */
   1800 		fdirtcpm = ixgbe_get_fdirtcpm_82599(input_mask);
   1801 
   1802 		/* write both the same so that UDP and TCP use the same mask */
   1803 		IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm);
   1804 		IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm);
   1805 		/* also use it for SCTP */
   1806 		switch (hw->mac.type) {
   1807 		case ixgbe_mac_X550:
   1808 		case ixgbe_mac_X550EM_x:
   1809 			IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, ~fdirtcpm);
   1810 			break;
   1811 		default:
   1812 			break;
   1813 		}
   1814 
   1815 		/* store source and destination IP masks (big-enian) */
   1816 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M,
   1817 				     ~input_mask->formatted.src_ip[0]);
   1818 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M,
   1819 				     ~input_mask->formatted.dst_ip[0]);
   1820 	}
   1821 	return IXGBE_SUCCESS;
   1822 }
   1823 
   1824 s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
   1825 					  union ixgbe_atr_input *input,
   1826 					  u16 soft_id, u8 queue, bool cloud_mode)
   1827 {
   1828 	u32 fdirport, fdirvlan, fdirhash, fdircmd;
   1829 	u32 addr_low, addr_high;
   1830 	u32 cloud_type = 0;
   1831 	s32 err;
   1832 
   1833 	DEBUGFUNC("ixgbe_fdir_write_perfect_filter_82599");
   1834 	if (!cloud_mode) {
   1835 		/* currently IPv6 is not supported, must be programmed with 0 */
   1836 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0),
   1837 				     input->formatted.src_ip[0]);
   1838 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1),
   1839 				     input->formatted.src_ip[1]);
   1840 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2),
   1841 				     input->formatted.src_ip[2]);
   1842 
   1843 		/* record the source address (big-endian) */
   1844 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA,
   1845 			input->formatted.src_ip[0]);
   1846 
   1847 		/* record the first 32 bits of the destination address
   1848 		 * (big-endian) */
   1849 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA,
   1850 			input->formatted.dst_ip[0]);
   1851 
   1852 		/* record source and destination port (little-endian)*/
   1853 		fdirport = IXGBE_NTOHS(input->formatted.dst_port);
   1854 		fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT;
   1855 		fdirport |= IXGBE_NTOHS(input->formatted.src_port);
   1856 		IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport);
   1857 	}
   1858 
   1859 	/* record VLAN (little-endian) and flex_bytes(big-endian) */
   1860 	fdirvlan = IXGBE_STORE_AS_BE16(input->formatted.flex_bytes);
   1861 	fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT;
   1862 	fdirvlan |= IXGBE_NTOHS(input->formatted.vlan_id);
   1863 	IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan);
   1864 
   1865 	if (cloud_mode) {
   1866 		if (input->formatted.tunnel_type != 0)
   1867 			cloud_type = 0x80000000;
   1868 
   1869 		addr_low = ((u32)input->formatted.inner_mac[0] |
   1870 				((u32)input->formatted.inner_mac[1] << 8) |
   1871 				((u32)input->formatted.inner_mac[2] << 16) |
   1872 				((u32)input->formatted.inner_mac[3] << 24));
   1873 		addr_high = ((u32)input->formatted.inner_mac[4] |
   1874 				((u32)input->formatted.inner_mac[5] << 8));
   1875 		cloud_type |= addr_high;
   1876 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0), addr_low);
   1877 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1), cloud_type);
   1878 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2), input->formatted.tni_vni);
   1879 	}
   1880 
   1881 	/* configure FDIRHASH register */
   1882 	fdirhash = input->formatted.bkt_hash;
   1883 	fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
   1884 	IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
   1885 
   1886 	/*
   1887 	 * flush all previous writes to make certain registers are
   1888 	 * programmed prior to issuing the command
   1889 	 */
   1890 	IXGBE_WRITE_FLUSH(hw);
   1891 
   1892 	/* configure FDIRCMD register */
   1893 	fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
   1894 		  IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
   1895 	if (queue == IXGBE_FDIR_DROP_QUEUE)
   1896 		fdircmd |= IXGBE_FDIRCMD_DROP;
   1897 	if (input->formatted.flow_type & IXGBE_ATR_L4TYPE_TUNNEL_MASK)
   1898 		fdircmd |= IXGBE_FDIRCMD_TUNNEL_FILTER;
   1899 	fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
   1900 	fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
   1901 	fdircmd |= (u32)input->formatted.vm_pool << IXGBE_FDIRCMD_VT_POOL_SHIFT;
   1902 
   1903 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
   1904 	err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
   1905 	if (err) {
   1906 		DEBUGOUT("Flow Director command did not complete!\n");
   1907 		return err;
   1908 	}
   1909 
   1910 	return IXGBE_SUCCESS;
   1911 }
   1912 
   1913 s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
   1914 					  union ixgbe_atr_input *input,
   1915 					  u16 soft_id)
   1916 {
   1917 	u32 fdirhash;
   1918 	u32 fdircmd;
   1919 	s32 err;
   1920 
   1921 	/* configure FDIRHASH register */
   1922 	fdirhash = input->formatted.bkt_hash;
   1923 	fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
   1924 	IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
   1925 
   1926 	/* flush hash to HW */
   1927 	IXGBE_WRITE_FLUSH(hw);
   1928 
   1929 	/* Query if filter is present */
   1930 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, IXGBE_FDIRCMD_CMD_QUERY_REM_FILT);
   1931 
   1932 	err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
   1933 	if (err) {
   1934 		DEBUGOUT("Flow Director command did not complete!\n");
   1935 		return err;
   1936 	}
   1937 
   1938 	/* if filter exists in hardware then remove it */
   1939 	if (fdircmd & IXGBE_FDIRCMD_FILTER_VALID) {
   1940 		IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
   1941 		IXGBE_WRITE_FLUSH(hw);
   1942 		IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
   1943 				IXGBE_FDIRCMD_CMD_REMOVE_FLOW);
   1944 	}
   1945 
   1946 	return IXGBE_SUCCESS;
   1947 }
   1948 
   1949 /**
   1950  *  ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter
   1951  *  @hw: pointer to hardware structure
   1952  *  @input: input bitstream
   1953  *  @input_mask: mask for the input bitstream
   1954  *  @soft_id: software index for the filters
   1955  *  @queue: queue index to direct traffic to
   1956  *
   1957  *  Note that the caller to this function must lock before calling, since the
   1958  *  hardware writes must be protected from one another.
   1959  **/
   1960 s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
   1961 					union ixgbe_atr_input *input,
   1962 					union ixgbe_atr_input *input_mask,
   1963 					u16 soft_id, u8 queue, bool cloud_mode)
   1964 {
   1965 	s32 err = IXGBE_ERR_CONFIG;
   1966 
   1967 	DEBUGFUNC("ixgbe_fdir_add_perfect_filter_82599");
   1968 
   1969 	/*
   1970 	 * Check flow_type formatting, and bail out before we touch the hardware
   1971 	 * if there's a configuration issue
   1972 	 */
   1973 	switch (input->formatted.flow_type) {
   1974 	case IXGBE_ATR_FLOW_TYPE_IPV4:
   1975 	case IXGBE_ATR_FLOW_TYPE_TUNNELED_IPV4:
   1976 		input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK;
   1977 		if (input->formatted.dst_port || input->formatted.src_port) {
   1978 			DEBUGOUT(" Error on src/dst port\n");
   1979 			return IXGBE_ERR_CONFIG;
   1980 		}
   1981 		break;
   1982 	case IXGBE_ATR_FLOW_TYPE_SCTPV4:
   1983 	case IXGBE_ATR_FLOW_TYPE_TUNNELED_SCTPV4:
   1984 		if (input->formatted.dst_port || input->formatted.src_port) {
   1985 			DEBUGOUT(" Error on src/dst port\n");
   1986 			return IXGBE_ERR_CONFIG;
   1987 		}
   1988 	case IXGBE_ATR_FLOW_TYPE_TCPV4:
   1989 	case IXGBE_ATR_FLOW_TYPE_TUNNELED_TCPV4:
   1990 	case IXGBE_ATR_FLOW_TYPE_UDPV4:
   1991 	case IXGBE_ATR_FLOW_TYPE_TUNNELED_UDPV4:
   1992 		input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
   1993 						  IXGBE_ATR_L4TYPE_MASK;
   1994 		break;
   1995 	default:
   1996 		DEBUGOUT(" Error on flow type input\n");
   1997 		return err;
   1998 	}
   1999 
   2000 	/* program input mask into the HW */
   2001 	err = ixgbe_fdir_set_input_mask_82599(hw, input_mask, cloud_mode);
   2002 	if (err)
   2003 		return err;
   2004 
   2005 	/* apply mask and compute/store hash */
   2006 	ixgbe_atr_compute_perfect_hash_82599(input, input_mask);
   2007 
   2008 	/* program filters to filter memory */
   2009 	return ixgbe_fdir_write_perfect_filter_82599(hw, input,
   2010 						     soft_id, queue, cloud_mode);
   2011 }
   2012 
   2013 /**
   2014  *  ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register
   2015  *  @hw: pointer to hardware structure
   2016  *  @reg: analog register to read
   2017  *  @val: read value
   2018  *
   2019  *  Performs read operation to Omer analog register specified.
   2020  **/
   2021 s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val)
   2022 {
   2023 	u32  core_ctl;
   2024 
   2025 	DEBUGFUNC("ixgbe_read_analog_reg8_82599");
   2026 
   2027 	IXGBE_WRITE_REG(hw, IXGBE_CORECTL, IXGBE_CORECTL_WRITE_CMD |
   2028 			(reg << 8));
   2029 	IXGBE_WRITE_FLUSH(hw);
   2030 	usec_delay(10);
   2031 	core_ctl = IXGBE_READ_REG(hw, IXGBE_CORECTL);
   2032 	*val = (u8)core_ctl;
   2033 
   2034 	return IXGBE_SUCCESS;
   2035 }
   2036 
   2037 /**
   2038  *  ixgbe_write_analog_reg8_82599 - Writes 8 bit Omer analog register
   2039  *  @hw: pointer to hardware structure
   2040  *  @reg: atlas register to write
   2041  *  @val: value to write
   2042  *
   2043  *  Performs write operation to Omer analog register specified.
   2044  **/
   2045 s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val)
   2046 {
   2047 	u32  core_ctl;
   2048 
   2049 	DEBUGFUNC("ixgbe_write_analog_reg8_82599");
   2050 
   2051 	core_ctl = (reg << 8) | val;
   2052 	IXGBE_WRITE_REG(hw, IXGBE_CORECTL, core_ctl);
   2053 	IXGBE_WRITE_FLUSH(hw);
   2054 	usec_delay(10);
   2055 
   2056 	return IXGBE_SUCCESS;
   2057 }
   2058 
   2059 /**
   2060  *  ixgbe_start_hw_82599 - Prepare hardware for Tx/Rx
   2061  *  @hw: pointer to hardware structure
   2062  *
   2063  *  Starts the hardware using the generic start_hw function
   2064  *  and the generation start_hw function.
   2065  *  Then performs revision-specific operations, if any.
   2066  **/
   2067 s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw)
   2068 {
   2069 	s32 ret_val = IXGBE_SUCCESS;
   2070 
   2071 	DEBUGFUNC("ixgbe_start_hw_82599");
   2072 
   2073 	ret_val = ixgbe_start_hw_generic(hw);
   2074 	if (ret_val != IXGBE_SUCCESS)
   2075 		goto out;
   2076 
   2077 	ret_val = ixgbe_start_hw_gen2(hw);
   2078 	if (ret_val != IXGBE_SUCCESS)
   2079 		goto out;
   2080 
   2081 	/* We need to run link autotry after the driver loads */
   2082 	hw->mac.autotry_restart = TRUE;
   2083 
   2084 	if (ret_val == IXGBE_SUCCESS)
   2085 		ret_val = ixgbe_verify_fw_version_82599(hw);
   2086 out:
   2087 	return ret_val;
   2088 }
   2089 
   2090 /**
   2091  *  ixgbe_identify_phy_82599 - Get physical layer module
   2092  *  @hw: pointer to hardware structure
   2093  *
   2094  *  Determines the physical layer module found on the current adapter.
   2095  *  If PHY already detected, maintains current PHY type in hw struct,
   2096  *  otherwise executes the PHY detection routine.
   2097  **/
   2098 s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
   2099 {
   2100 	s32 status;
   2101 
   2102 	DEBUGFUNC("ixgbe_identify_phy_82599");
   2103 
   2104 	/* Detect PHY if not unknown - returns success if already detected. */
   2105 	status = ixgbe_identify_phy_generic(hw);
   2106 	if (status != IXGBE_SUCCESS) {
   2107 		/* 82599 10GBASE-T requires an external PHY */
   2108 		if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)
   2109 			return status;
   2110 		else
   2111 			status = ixgbe_identify_module_generic(hw);
   2112 	}
   2113 
   2114 	/* Set PHY type none if no PHY detected */
   2115 	if (hw->phy.type == ixgbe_phy_unknown) {
   2116 		hw->phy.type = ixgbe_phy_none;
   2117 		return IXGBE_SUCCESS;
   2118 	}
   2119 
   2120 	/* Return error if SFP module has been detected but is not supported */
   2121 	if (hw->phy.type == ixgbe_phy_sfp_unsupported)
   2122 		return IXGBE_ERR_SFP_NOT_SUPPORTED;
   2123 
   2124 	return status;
   2125 }
   2126 
   2127 /**
   2128  *  ixgbe_get_supported_physical_layer_82599 - Returns physical layer type
   2129  *  @hw: pointer to hardware structure
   2130  *
   2131  *  Determines physical layer capabilities of the current configuration.
   2132  **/
   2133 u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw)
   2134 {
   2135 	u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
   2136 	u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
   2137 	u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
   2138 	u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
   2139 	u32 pma_pmd_10g_parallel = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK;
   2140 	u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
   2141 	u16 ext_ability = 0;
   2142 
   2143 	DEBUGFUNC("ixgbe_get_support_physical_layer_82599");
   2144 
   2145 	hw->phy.ops.identify(hw);
   2146 
   2147 	switch (hw->phy.type) {
   2148 	case ixgbe_phy_tn:
   2149 	case ixgbe_phy_cu_unknown:
   2150 		hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
   2151 		IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
   2152 		if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
   2153 			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
   2154 		if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
   2155 			physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
   2156 		if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
   2157 			physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
   2158 		goto out;
   2159 	default:
   2160 		break;
   2161 	}
   2162 
   2163 	switch (autoc & IXGBE_AUTOC_LMS_MASK) {
   2164 	case IXGBE_AUTOC_LMS_1G_AN:
   2165 	case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
   2166 		if (pma_pmd_1g == IXGBE_AUTOC_1G_KX_BX) {
   2167 			physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX |
   2168 			    IXGBE_PHYSICAL_LAYER_1000BASE_BX;
   2169 			goto out;
   2170 		} else
   2171 			/* SFI mode so read SFP module */
   2172 			goto sfp_check;
   2173 		break;
   2174 	case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
   2175 		if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_CX4)
   2176 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
   2177 		else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_KX4)
   2178 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
   2179 		else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_XAUI)
   2180 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_XAUI;
   2181 		goto out;
   2182 		break;
   2183 	case IXGBE_AUTOC_LMS_10G_SERIAL:
   2184 		if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_KR) {
   2185 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR;
   2186 			goto out;
   2187 		} else if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)
   2188 			goto sfp_check;
   2189 		break;
   2190 	case IXGBE_AUTOC_LMS_KX4_KX_KR:
   2191 	case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN:
   2192 		if (autoc & IXGBE_AUTOC_KX_SUPP)
   2193 			physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
   2194 		if (autoc & IXGBE_AUTOC_KX4_SUPP)
   2195 			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
   2196 		if (autoc & IXGBE_AUTOC_KR_SUPP)
   2197 			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KR;
   2198 		goto out;
   2199 		break;
   2200 	default:
   2201 		goto out;
   2202 		break;
   2203 	}
   2204 
   2205 sfp_check:
   2206 	/* SFP check must be done last since DA modules are sometimes used to
   2207 	 * test KR mode -  we need to id KR mode correctly before SFP module.
   2208 	 * Call identify_sfp because the pluggable module may have changed */
   2209 	physical_layer = ixgbe_get_supported_phy_sfp_layer_generic(hw);
   2210 out:
   2211 	return physical_layer;
   2212 }
   2213 
   2214 /**
   2215  *  ixgbe_enable_rx_dma_82599 - Enable the Rx DMA unit on 82599
   2216  *  @hw: pointer to hardware structure
   2217  *  @regval: register value to write to RXCTRL
   2218  *
   2219  *  Enables the Rx DMA unit for 82599
   2220  **/
   2221 s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval)
   2222 {
   2223 
   2224 	DEBUGFUNC("ixgbe_enable_rx_dma_82599");
   2225 
   2226 	/*
   2227 	 * Workaround for 82599 silicon errata when enabling the Rx datapath.
   2228 	 * If traffic is incoming before we enable the Rx unit, it could hang
   2229 	 * the Rx DMA unit.  Therefore, make sure the security engine is
   2230 	 * completely disabled prior to enabling the Rx unit.
   2231 	 */
   2232 
   2233 	hw->mac.ops.disable_sec_rx_path(hw);
   2234 
   2235 	if (regval & IXGBE_RXCTRL_RXEN)
   2236 		ixgbe_enable_rx(hw);
   2237 	else
   2238 		ixgbe_disable_rx(hw);
   2239 
   2240 	hw->mac.ops.enable_sec_rx_path(hw);
   2241 
   2242 	return IXGBE_SUCCESS;
   2243 }
   2244 
   2245 /**
   2246  *  ixgbe_verify_fw_version_82599 - verify FW version for 82599
   2247  *  @hw: pointer to hardware structure
   2248  *
   2249  *  Verifies that installed the firmware version is 0.6 or higher
   2250  *  for SFI devices. All 82599 SFI devices should have version 0.6 or higher.
   2251  *
   2252  *  Returns IXGBE_ERR_EEPROM_VERSION if the FW is not present or
   2253  *  if the FW version is not supported.
   2254  **/
   2255 static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
   2256 {
   2257 	s32 status = IXGBE_ERR_EEPROM_VERSION;
   2258 	u16 fw_offset, fw_ptp_cfg_offset;
   2259 	u16 fw_version;
   2260 
   2261 	DEBUGFUNC("ixgbe_verify_fw_version_82599");
   2262 
   2263 	/* firmware check is only necessary for SFI devices */
   2264 	if (hw->phy.media_type != ixgbe_media_type_fiber) {
   2265 		status = IXGBE_SUCCESS;
   2266 		goto fw_version_out;
   2267 	}
   2268 
   2269 	/* get the offset to the Firmware Module block */
   2270 	if (hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset)) {
   2271 		ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
   2272 			      "eeprom read at offset %d failed", IXGBE_FW_PTR);
   2273 		return IXGBE_ERR_EEPROM_VERSION;
   2274 	}
   2275 
   2276 	if ((fw_offset == 0) || (fw_offset == 0xFFFF))
   2277 		goto fw_version_out;
   2278 
   2279 	/* get the offset to the Pass Through Patch Configuration block */
   2280 	if (hw->eeprom.ops.read(hw, (fw_offset +
   2281 				 IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR),
   2282 				 &fw_ptp_cfg_offset)) {
   2283 		ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
   2284 			      "eeprom read at offset %d failed",
   2285 			      fw_offset +
   2286 			      IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR);
   2287 		return IXGBE_ERR_EEPROM_VERSION;
   2288 	}
   2289 
   2290 	if ((fw_ptp_cfg_offset == 0) || (fw_ptp_cfg_offset == 0xFFFF))
   2291 		goto fw_version_out;
   2292 
   2293 	/* get the firmware version */
   2294 	if (hw->eeprom.ops.read(hw, (fw_ptp_cfg_offset +
   2295 			    IXGBE_FW_PATCH_VERSION_4), &fw_version)) {
   2296 		ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
   2297 			      "eeprom read at offset %d failed",
   2298 			      fw_ptp_cfg_offset + IXGBE_FW_PATCH_VERSION_4);
   2299 		return IXGBE_ERR_EEPROM_VERSION;
   2300 	}
   2301 
   2302 	if (fw_version > 0x5)
   2303 		status = IXGBE_SUCCESS;
   2304 
   2305 fw_version_out:
   2306 	return status;
   2307 }
   2308 
   2309 /**
   2310  *  ixgbe_verify_lesm_fw_enabled_82599 - Checks LESM FW module state.
   2311  *  @hw: pointer to hardware structure
   2312  *
   2313  *  Returns TRUE if the LESM FW module is present and enabled. Otherwise
   2314  *  returns FALSE. Smart Speed must be disabled if LESM FW module is enabled.
   2315  **/
   2316 bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw)
   2317 {
   2318 	bool lesm_enabled = FALSE;
   2319 	u16 fw_offset, fw_lesm_param_offset, fw_lesm_state;
   2320 	s32 status;
   2321 
   2322 	DEBUGFUNC("ixgbe_verify_lesm_fw_enabled_82599");
   2323 
   2324 	/* get the offset to the Firmware Module block */
   2325 	status = hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset);
   2326 
   2327 	if ((status != IXGBE_SUCCESS) ||
   2328 	    (fw_offset == 0) || (fw_offset == 0xFFFF))
   2329 		goto out;
   2330 
   2331 	/* get the offset to the LESM Parameters block */
   2332 	status = hw->eeprom.ops.read(hw, (fw_offset +
   2333 				     IXGBE_FW_LESM_PARAMETERS_PTR),
   2334 				     &fw_lesm_param_offset);
   2335 
   2336 	if ((status != IXGBE_SUCCESS) ||
   2337 	    (fw_lesm_param_offset == 0) || (fw_lesm_param_offset == 0xFFFF))
   2338 		goto out;
   2339 
   2340 	/* get the LESM state word */
   2341 	status = hw->eeprom.ops.read(hw, (fw_lesm_param_offset +
   2342 				     IXGBE_FW_LESM_STATE_1),
   2343 				     &fw_lesm_state);
   2344 
   2345 	if ((status == IXGBE_SUCCESS) &&
   2346 	    (fw_lesm_state & IXGBE_FW_LESM_STATE_ENABLED))
   2347 		lesm_enabled = TRUE;
   2348 
   2349 out:
   2350 	return lesm_enabled;
   2351 }
   2352 
   2353 /**
   2354  *  ixgbe_read_eeprom_buffer_82599 - Read EEPROM word(s) using
   2355  *  fastest available method
   2356  *
   2357  *  @hw: pointer to hardware structure
   2358  *  @offset: offset of  word in EEPROM to read
   2359  *  @words: number of words
   2360  *  @data: word(s) read from the EEPROM
   2361  *
   2362  *  Retrieves 16 bit word(s) read from EEPROM
   2363  **/
   2364 static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
   2365 					  u16 words, u16 *data)
   2366 {
   2367 	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
   2368 	s32 ret_val = IXGBE_ERR_CONFIG;
   2369 
   2370 	DEBUGFUNC("ixgbe_read_eeprom_buffer_82599");
   2371 
   2372 	/*
   2373 	 * If EEPROM is detected and can be addressed using 14 bits,
   2374 	 * use EERD otherwise use bit bang
   2375 	 */
   2376 	if ((eeprom->type == ixgbe_eeprom_spi) &&
   2377 	    (offset + (words - 1) <= IXGBE_EERD_MAX_ADDR))
   2378 		ret_val = ixgbe_read_eerd_buffer_generic(hw, offset, words,
   2379 							 data);
   2380 	else
   2381 		ret_val = ixgbe_read_eeprom_buffer_bit_bang_generic(hw, offset,
   2382 								    words,
   2383 								    data);
   2384 
   2385 	return ret_val;
   2386 }
   2387 
   2388 /**
   2389  *  ixgbe_read_eeprom_82599 - Read EEPROM word using
   2390  *  fastest available method
   2391  *
   2392  *  @hw: pointer to hardware structure
   2393  *  @offset: offset of  word in the EEPROM to read
   2394  *  @data: word read from the EEPROM
   2395  *
   2396  *  Reads a 16 bit word from the EEPROM
   2397  **/
   2398 static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
   2399 				   u16 offset, u16 *data)
   2400 {
   2401 	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
   2402 	s32 ret_val = IXGBE_ERR_CONFIG;
   2403 
   2404 	DEBUGFUNC("ixgbe_read_eeprom_82599");
   2405 
   2406 	/*
   2407 	 * If EEPROM is detected and can be addressed using 14 bits,
   2408 	 * use EERD otherwise use bit bang
   2409 	 */
   2410 	if ((eeprom->type == ixgbe_eeprom_spi) &&
   2411 	    (offset <= IXGBE_EERD_MAX_ADDR))
   2412 		ret_val = ixgbe_read_eerd_generic(hw, offset, data);
   2413 	else
   2414 		ret_val = ixgbe_read_eeprom_bit_bang_generic(hw, offset, data);
   2415 
   2416 	return ret_val;
   2417 }
   2418 
   2419 /**
   2420  * ixgbe_reset_pipeline_82599 - perform pipeline reset
   2421  *
   2422  *  @hw: pointer to hardware structure
   2423  *
   2424  * Reset pipeline by asserting Restart_AN together with LMS change to ensure
   2425  * full pipeline reset.  This function assumes the SW/FW lock is held.
   2426  **/
   2427 s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw)
   2428 {
   2429 	s32 ret_val;
   2430 	u32 anlp1_reg = 0;
   2431 	u32 i, autoc_reg, autoc2_reg;
   2432 
   2433 	/* Enable link if disabled in NVM */
   2434 	autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
   2435 	if (autoc2_reg & IXGBE_AUTOC2_LINK_DISABLE_MASK) {
   2436 		autoc2_reg &= ~IXGBE_AUTOC2_LINK_DISABLE_MASK;
   2437 		IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2_reg);
   2438 		IXGBE_WRITE_FLUSH(hw);
   2439 	}
   2440 
   2441 	autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
   2442 	autoc_reg |= IXGBE_AUTOC_AN_RESTART;
   2443 	/* Write AUTOC register with toggled LMS[2] bit and Restart_AN */
   2444 	IXGBE_WRITE_REG(hw, IXGBE_AUTOC,
   2445 			autoc_reg ^ (0x4 << IXGBE_AUTOC_LMS_SHIFT));
   2446 	/* Wait for AN to leave state 0 */
   2447 	for (i = 0; i < 10; i++) {
   2448 		msec_delay(4);
   2449 		anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
   2450 		if (anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)
   2451 			break;
   2452 	}
   2453 
   2454 	if (!(anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)) {
   2455 		DEBUGOUT("auto negotiation not completed\n");
   2456 		ret_val = IXGBE_ERR_RESET_FAILED;
   2457 		goto reset_pipeline_out;
   2458 	}
   2459 
   2460 	ret_val = IXGBE_SUCCESS;
   2461 
   2462 reset_pipeline_out:
   2463 	/* Write AUTOC register with original LMS field and Restart_AN */
   2464 	IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
   2465 	IXGBE_WRITE_FLUSH(hw);
   2466 
   2467 	return ret_val;
   2468 }
   2469 
   2470 /**
   2471  *  ixgbe_read_i2c_byte_82599 - Reads 8 bit word over I2C
   2472  *  @hw: pointer to hardware structure
   2473  *  @byte_offset: byte offset to read
   2474  *  @data: value read
   2475  *
   2476  *  Performs byte read operation to SFP module's EEPROM over I2C interface at
   2477  *  a specified device address.
   2478  **/
   2479 static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
   2480 				u8 dev_addr, u8 *data)
   2481 {
   2482 	u32 esdp;
   2483 	s32 status;
   2484 	s32 timeout = 200;
   2485 
   2486 	DEBUGFUNC("ixgbe_read_i2c_byte_82599");
   2487 
   2488 	if (hw->phy.qsfp_shared_i2c_bus == TRUE) {
   2489 		/* Acquire I2C bus ownership. */
   2490 		esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
   2491 		esdp |= IXGBE_ESDP_SDP0;
   2492 		IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
   2493 		IXGBE_WRITE_FLUSH(hw);
   2494 
   2495 		while (timeout) {
   2496 			esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
   2497 			if (esdp & IXGBE_ESDP_SDP1)
   2498 				break;
   2499 
   2500 			msec_delay(5);
   2501 			timeout--;
   2502 		}
   2503 
   2504 		if (!timeout) {
   2505 			DEBUGOUT("Driver can't access resource,"
   2506 				 " acquiring I2C bus timeout.\n");
   2507 			status = IXGBE_ERR_I2C;
   2508 			goto release_i2c_access;
   2509 		}
   2510 	}
   2511 
   2512 	status = ixgbe_read_i2c_byte_generic(hw, byte_offset, dev_addr, data);
   2513 
   2514 release_i2c_access:
   2515 
   2516 	if (hw->phy.qsfp_shared_i2c_bus == TRUE) {
   2517 		/* Release I2C bus ownership. */
   2518 		esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
   2519 		esdp &= ~IXGBE_ESDP_SDP0;
   2520 		IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
   2521 		IXGBE_WRITE_FLUSH(hw);
   2522 	}
   2523 
   2524 	return status;
   2525 }
   2526 
   2527 /**
   2528  *  ixgbe_write_i2c_byte_82599 - Writes 8 bit word over I2C
   2529  *  @hw: pointer to hardware structure
   2530  *  @byte_offset: byte offset to write
   2531  *  @data: value to write
   2532  *
   2533  *  Performs byte write operation to SFP module's EEPROM over I2C interface at
   2534  *  a specified device address.
   2535  **/
   2536 static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
   2537 				 u8 dev_addr, u8 data)
   2538 {
   2539 	u32 esdp;
   2540 	s32 status;
   2541 	s32 timeout = 200;
   2542 
   2543 	DEBUGFUNC("ixgbe_write_i2c_byte_82599");
   2544 
   2545 	if (hw->phy.qsfp_shared_i2c_bus == TRUE) {
   2546 		/* Acquire I2C bus ownership. */
   2547 		esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
   2548 		esdp |= IXGBE_ESDP_SDP0;
   2549 		IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
   2550 		IXGBE_WRITE_FLUSH(hw);
   2551 
   2552 		while (timeout) {
   2553 			esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
   2554 			if (esdp & IXGBE_ESDP_SDP1)
   2555 				break;
   2556 
   2557 			msec_delay(5);
   2558 			timeout--;
   2559 		}
   2560 
   2561 		if (!timeout) {
   2562 			DEBUGOUT("Driver can't access resource,"
   2563 				 " acquiring I2C bus timeout.\n");
   2564 			status = IXGBE_ERR_I2C;
   2565 			goto release_i2c_access;
   2566 		}
   2567 	}
   2568 
   2569 	status = ixgbe_write_i2c_byte_generic(hw, byte_offset, dev_addr, data);
   2570 
   2571 release_i2c_access:
   2572 
   2573 	if (hw->phy.qsfp_shared_i2c_bus == TRUE) {
   2574 		/* Release I2C bus ownership. */
   2575 		esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
   2576 		esdp &= ~IXGBE_ESDP_SDP0;
   2577 		IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
   2578 		IXGBE_WRITE_FLUSH(hw);
   2579 	}
   2580 
   2581 	return status;
   2582 }
   2583