Home | History | Annotate | Line # | Download | only in ixgbe
ixgbe_82599.c revision 1.19
      1 /* $NetBSD: ixgbe_82599.c,v 1.19 2018/04/04 08:59:22 msaitoh Exp $ */
      2 
      3 /******************************************************************************
      4   SPDX-License-Identifier: BSD-3-Clause
      5 
      6   Copyright (c) 2001-2017, Intel Corporation
      7   All rights reserved.
      8 
      9   Redistribution and use in source and binary forms, with or without
     10   modification, are permitted provided that the following conditions are met:
     11 
     12    1. Redistributions of source code must retain the above copyright notice,
     13       this list of conditions and the following disclaimer.
     14 
     15    2. Redistributions in binary form must reproduce the above copyright
     16       notice, this list of conditions and the following disclaimer in the
     17       documentation and/or other materials provided with the distribution.
     18 
     19    3. Neither the name of the Intel Corporation nor the names of its
     20       contributors may be used to endorse or promote products derived from
     21       this software without specific prior written permission.
     22 
     23   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     24   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     25   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     26   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     27   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     28   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     29   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     30   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     31   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     32   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     33   POSSIBILITY OF SUCH DAMAGE.
     34 
     35 ******************************************************************************/
     36 /*$FreeBSD: head/sys/dev/ixgbe/ixgbe_82599.c 331224 2018-03-19 20:55:05Z erj $*/
     37 
     38 #include "ixgbe_type.h"
     39 #include "ixgbe_82599.h"
     40 #include "ixgbe_api.h"
     41 #include "ixgbe_common.h"
     42 #include "ixgbe_phy.h"
     43 
     44 #define IXGBE_82599_MAX_TX_QUEUES 128
     45 #define IXGBE_82599_MAX_RX_QUEUES 128
     46 #define IXGBE_82599_RAR_ENTRIES   128
     47 #define IXGBE_82599_MC_TBL_SIZE   128
     48 #define IXGBE_82599_VFT_TBL_SIZE  128
     49 #define IXGBE_82599_RX_PB_SIZE	  512
     50 
     51 static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
     52 					 ixgbe_link_speed speed,
     53 					 bool autoneg_wait_to_complete);
     54 static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw);
     55 static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
     56 				   u16 offset, u16 *data);
     57 static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
     58 					  u16 words, u16 *data);
     59 static s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw);
     60 static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
     61 					u8 dev_addr, u8 *data);
     62 static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
     63 					u8 dev_addr, u8 data);
     64 
     65 void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
     66 {
     67 	struct ixgbe_mac_info *mac = &hw->mac;
     68 
     69 	DEBUGFUNC("ixgbe_init_mac_link_ops_82599");
     70 
     71 	/*
     72 	 * enable the laser control functions for SFP+ fiber
     73 	 * and MNG not enabled
     74 	 */
     75 	if ((mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
     76 	    !ixgbe_mng_enabled(hw)) {
     77 		mac->ops.disable_tx_laser =
     78 				       ixgbe_disable_tx_laser_multispeed_fiber;
     79 		mac->ops.enable_tx_laser =
     80 					ixgbe_enable_tx_laser_multispeed_fiber;
     81 		mac->ops.flap_tx_laser = ixgbe_flap_tx_laser_multispeed_fiber;
     82 
     83 	} else {
     84 		mac->ops.disable_tx_laser = NULL;
     85 		mac->ops.enable_tx_laser = NULL;
     86 		mac->ops.flap_tx_laser = NULL;
     87 	}
     88 
     89 	if (hw->phy.multispeed_fiber) {
     90 		/* Set up dual speed SFP+ support */
     91 		mac->ops.setup_link = ixgbe_setup_mac_link_multispeed_fiber;
     92 		mac->ops.setup_mac_link = ixgbe_setup_mac_link_82599;
     93 		mac->ops.set_rate_select_speed =
     94 					       ixgbe_set_hard_rate_select_speed;
     95 		if (ixgbe_get_media_type(hw) == ixgbe_media_type_fiber_fixed)
     96 			mac->ops.set_rate_select_speed =
     97 					       ixgbe_set_soft_rate_select_speed;
     98 	} else {
     99 		if ((ixgbe_get_media_type(hw) == ixgbe_media_type_backplane) &&
    100 		     (hw->phy.smart_speed == ixgbe_smart_speed_auto ||
    101 		      hw->phy.smart_speed == ixgbe_smart_speed_on) &&
    102 		      !ixgbe_verify_lesm_fw_enabled_82599(hw)) {
    103 			mac->ops.setup_link = ixgbe_setup_mac_link_smartspeed;
    104 		} else {
    105 			mac->ops.setup_link = ixgbe_setup_mac_link_82599;
    106 		}
    107 	}
    108 }
    109 
    110 /**
    111  *  ixgbe_init_phy_ops_82599 - PHY/SFP specific init
    112  *  @hw: pointer to hardware structure
    113  *
    114  *  Initialize any function pointers that were not able to be
    115  *  set during init_shared_code because the PHY/SFP type was
    116  *  not known.  Perform the SFP init if necessary.
    117  *
    118  **/
    119 s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
    120 {
    121 	struct ixgbe_mac_info *mac = &hw->mac;
    122 	struct ixgbe_phy_info *phy = &hw->phy;
    123 	s32 ret_val = IXGBE_SUCCESS;
    124 	u32 esdp;
    125 
    126 	DEBUGFUNC("ixgbe_init_phy_ops_82599");
    127 
    128 	if (hw->device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP) {
    129 		/* Store flag indicating I2C bus access control unit. */
    130 		hw->phy.qsfp_shared_i2c_bus = TRUE;
    131 
    132 		/* Initialize access to QSFP+ I2C bus */
    133 		esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
    134 		esdp |= IXGBE_ESDP_SDP0_DIR;
    135 		esdp &= ~IXGBE_ESDP_SDP1_DIR;
    136 		esdp &= ~IXGBE_ESDP_SDP0;
    137 		esdp &= ~IXGBE_ESDP_SDP0_NATIVE;
    138 		esdp &= ~IXGBE_ESDP_SDP1_NATIVE;
    139 		IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
    140 		IXGBE_WRITE_FLUSH(hw);
    141 
    142 		phy->ops.read_i2c_byte = ixgbe_read_i2c_byte_82599;
    143 		phy->ops.write_i2c_byte = ixgbe_write_i2c_byte_82599;
    144 	}
    145 	/* Identify the PHY or SFP module */
    146 	ret_val = phy->ops.identify(hw);
    147 	if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED)
    148 		goto init_phy_ops_out;
    149 
    150 	/* Setup function pointers based on detected SFP module and speeds */
    151 	ixgbe_init_mac_link_ops_82599(hw);
    152 	if (hw->phy.sfp_type != ixgbe_sfp_type_unknown)
    153 		hw->phy.ops.reset = NULL;
    154 
    155 	/* If copper media, overwrite with copper function pointers */
    156 	if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
    157 		mac->ops.setup_link = ixgbe_setup_copper_link_82599;
    158 		mac->ops.get_link_capabilities =
    159 				  ixgbe_get_copper_link_capabilities_generic;
    160 	}
    161 
    162 	/* Set necessary function pointers based on PHY type */
    163 	switch (hw->phy.type) {
    164 	case ixgbe_phy_tn:
    165 		phy->ops.setup_link = ixgbe_setup_phy_link_tnx;
    166 		phy->ops.check_link = ixgbe_check_phy_link_tnx;
    167 		phy->ops.get_firmware_version =
    168 			     ixgbe_get_phy_firmware_version_tnx;
    169 		break;
    170 	default:
    171 		break;
    172 	}
    173 init_phy_ops_out:
    174 	return ret_val;
    175 }
    176 
    177 s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
    178 {
    179 	s32 ret_val = IXGBE_SUCCESS;
    180 	u16 list_offset, data_offset, data_value;
    181 
    182 	DEBUGFUNC("ixgbe_setup_sfp_modules_82599");
    183 
    184 	if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) {
    185 		ixgbe_init_mac_link_ops_82599(hw);
    186 
    187 		hw->phy.ops.reset = NULL;
    188 
    189 		ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
    190 							      &data_offset);
    191 		if (ret_val != IXGBE_SUCCESS)
    192 			goto setup_sfp_out;
    193 
    194 		/* PHY config will finish before releasing the semaphore */
    195 		ret_val = hw->mac.ops.acquire_swfw_sync(hw,
    196 							IXGBE_GSSR_MAC_CSR_SM);
    197 		if (ret_val != IXGBE_SUCCESS) {
    198 			ret_val = IXGBE_ERR_SWFW_SYNC;
    199 			goto setup_sfp_out;
    200 		}
    201 
    202 		if (hw->eeprom.ops.read(hw, ++data_offset, &data_value))
    203 			goto setup_sfp_err;
    204 		while (data_value != 0xffff) {
    205 			IXGBE_WRITE_REG(hw, IXGBE_CORECTL, data_value);
    206 			IXGBE_WRITE_FLUSH(hw);
    207 			if (hw->eeprom.ops.read(hw, ++data_offset, &data_value))
    208 				goto setup_sfp_err;
    209 		}
    210 
    211 		/* Release the semaphore */
    212 		hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
    213 		/* Delay obtaining semaphore again to allow FW access
    214 		 * prot_autoc_write uses the semaphore too.
    215 		 */
    216 		msec_delay(hw->eeprom.semaphore_delay);
    217 
    218 		/* Restart DSP and set SFI mode */
    219 		ret_val = hw->mac.ops.prot_autoc_write(hw,
    220 			hw->mac.orig_autoc | IXGBE_AUTOC_LMS_10G_SERIAL,
    221 			FALSE);
    222 
    223 		if (ret_val) {
    224 			DEBUGOUT("sfp module setup not complete\n");
    225 			ret_val = IXGBE_ERR_SFP_SETUP_NOT_COMPLETE;
    226 			goto setup_sfp_out;
    227 		}
    228 
    229 	}
    230 
    231 setup_sfp_out:
    232 	return ret_val;
    233 
    234 setup_sfp_err:
    235 	/* Release the semaphore */
    236 	hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
    237 	/* Delay obtaining semaphore again to allow FW access */
    238 	msec_delay(hw->eeprom.semaphore_delay);
    239 	ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
    240 		      "eeprom read at offset %d failed", data_offset);
    241 	return IXGBE_ERR_PHY;
    242 }
    243 
    244 /**
    245  *  prot_autoc_read_82599 - Hides MAC differences needed for AUTOC read
    246  *  @hw: pointer to hardware structure
    247  *  @locked: Return the if we locked for this read.
    248  *  @reg_val: Value we read from AUTOC
    249  *
    250  *  For this part (82599) we need to wrap read-modify-writes with a possible
    251  *  FW/SW lock.  It is assumed this lock will be freed with the next
    252  *  prot_autoc_write_82599().
    253  */
    254 s32 prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked, u32 *reg_val)
    255 {
    256 	s32 ret_val;
    257 
    258 	*locked = FALSE;
    259 	 /* If LESM is on then we need to hold the SW/FW semaphore. */
    260 	if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
    261 		ret_val = hw->mac.ops.acquire_swfw_sync(hw,
    262 					IXGBE_GSSR_MAC_CSR_SM);
    263 		if (ret_val != IXGBE_SUCCESS)
    264 			return IXGBE_ERR_SWFW_SYNC;
    265 
    266 		*locked = TRUE;
    267 	}
    268 
    269 	*reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC);
    270 	return IXGBE_SUCCESS;
    271 }
    272 
    273 /**
    274  * prot_autoc_write_82599 - Hides MAC differences needed for AUTOC write
    275  * @hw: pointer to hardware structure
    276  * @autoc: value to write to AUTOC
    277  * @locked: bool to indicate whether the SW/FW lock was already taken by
    278  *           previous proc_autoc_read_82599.
    279  *
    280  * This part (82599) may need to hold the SW/FW lock around all writes to
    281  * AUTOC. Likewise after a write we need to do a pipeline reset.
    282  */
    283 s32 prot_autoc_write_82599(struct ixgbe_hw *hw, u32 autoc, bool locked)
    284 {
    285 	s32 ret_val = IXGBE_SUCCESS;
    286 
    287 	/* Blocked by MNG FW so bail */
    288 	if (ixgbe_check_reset_blocked(hw))
    289 		goto out;
    290 
    291 	/* We only need to get the lock if:
    292 	 *  - We didn't do it already (in the read part of a read-modify-write)
    293 	 *  - LESM is enabled.
    294 	 */
    295 	if (!locked && ixgbe_verify_lesm_fw_enabled_82599(hw)) {
    296 		ret_val = hw->mac.ops.acquire_swfw_sync(hw,
    297 					IXGBE_GSSR_MAC_CSR_SM);
    298 		if (ret_val != IXGBE_SUCCESS)
    299 			return IXGBE_ERR_SWFW_SYNC;
    300 
    301 		locked = TRUE;
    302 	}
    303 
    304 	IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
    305 	ret_val = ixgbe_reset_pipeline_82599(hw);
    306 
    307 out:
    308 	/* Free the SW/FW semaphore as we either grabbed it here or
    309 	 * already had it when this function was called.
    310 	 */
    311 	if (locked)
    312 		hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
    313 
    314 	return ret_val;
    315 }
    316 
    317 /**
    318  *  ixgbe_init_ops_82599 - Inits func ptrs and MAC type
    319  *  @hw: pointer to hardware structure
    320  *
    321  *  Initialize the function pointers and assign the MAC type for 82599.
    322  *  Does not touch the hardware.
    323  **/
    324 
    325 s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw)
    326 {
    327 	struct ixgbe_mac_info *mac = &hw->mac;
    328 	struct ixgbe_phy_info *phy = &hw->phy;
    329 	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
    330 	s32 ret_val;
    331 
    332 	DEBUGFUNC("ixgbe_init_ops_82599");
    333 
    334 	ixgbe_init_phy_ops_generic(hw);
    335 	ret_val = ixgbe_init_ops_generic(hw);
    336 
    337 	/* PHY */
    338 	phy->ops.identify = ixgbe_identify_phy_82599;
    339 	phy->ops.init = ixgbe_init_phy_ops_82599;
    340 
    341 	/* MAC */
    342 	mac->ops.reset_hw = ixgbe_reset_hw_82599;
    343 	mac->ops.enable_relaxed_ordering = ixgbe_enable_relaxed_ordering_gen2;
    344 	mac->ops.get_media_type = ixgbe_get_media_type_82599;
    345 	mac->ops.get_supported_physical_layer =
    346 				    ixgbe_get_supported_physical_layer_82599;
    347 	mac->ops.disable_sec_rx_path = ixgbe_disable_sec_rx_path_generic;
    348 	mac->ops.enable_sec_rx_path = ixgbe_enable_sec_rx_path_generic;
    349 	mac->ops.enable_rx_dma = ixgbe_enable_rx_dma_82599;
    350 	mac->ops.read_analog_reg8 = ixgbe_read_analog_reg8_82599;
    351 	mac->ops.write_analog_reg8 = ixgbe_write_analog_reg8_82599;
    352 	mac->ops.start_hw = ixgbe_start_hw_82599;
    353 	mac->ops.get_san_mac_addr = ixgbe_get_san_mac_addr_generic;
    354 	mac->ops.set_san_mac_addr = ixgbe_set_san_mac_addr_generic;
    355 	mac->ops.get_device_caps = ixgbe_get_device_caps_generic;
    356 	mac->ops.get_wwn_prefix = ixgbe_get_wwn_prefix_generic;
    357 	mac->ops.get_fcoe_boot_status = ixgbe_get_fcoe_boot_status_generic;
    358 	mac->ops.prot_autoc_read = prot_autoc_read_82599;
    359 	mac->ops.prot_autoc_write = prot_autoc_write_82599;
    360 
    361 	/* RAR, Multicast, VLAN */
    362 	mac->ops.set_vmdq = ixgbe_set_vmdq_generic;
    363 	mac->ops.set_vmdq_san_mac = ixgbe_set_vmdq_san_mac_generic;
    364 	mac->ops.clear_vmdq = ixgbe_clear_vmdq_generic;
    365 	mac->ops.insert_mac_addr = ixgbe_insert_mac_addr_generic;
    366 	mac->rar_highwater = 1;
    367 	mac->ops.set_vfta = ixgbe_set_vfta_generic;
    368 	mac->ops.set_vlvf = ixgbe_set_vlvf_generic;
    369 	mac->ops.clear_vfta = ixgbe_clear_vfta_generic;
    370 	mac->ops.init_uta_tables = ixgbe_init_uta_tables_generic;
    371 	mac->ops.setup_sfp = ixgbe_setup_sfp_modules_82599;
    372 	mac->ops.set_mac_anti_spoofing = ixgbe_set_mac_anti_spoofing;
    373 	mac->ops.set_vlan_anti_spoofing = ixgbe_set_vlan_anti_spoofing;
    374 
    375 	/* Link */
    376 	mac->ops.get_link_capabilities = ixgbe_get_link_capabilities_82599;
    377 	mac->ops.check_link = ixgbe_check_mac_link_generic;
    378 	mac->ops.setup_rxpba = ixgbe_set_rxpba_generic;
    379 	ixgbe_init_mac_link_ops_82599(hw);
    380 
    381 	mac->mcft_size		= IXGBE_82599_MC_TBL_SIZE;
    382 	mac->vft_size		= IXGBE_82599_VFT_TBL_SIZE;
    383 	mac->num_rar_entries	= IXGBE_82599_RAR_ENTRIES;
    384 	mac->rx_pb_size		= IXGBE_82599_RX_PB_SIZE;
    385 	mac->max_rx_queues	= IXGBE_82599_MAX_RX_QUEUES;
    386 	mac->max_tx_queues	= IXGBE_82599_MAX_TX_QUEUES;
    387 	mac->max_msix_vectors	= ixgbe_get_pcie_msix_count_generic(hw);
    388 
    389 	mac->arc_subsystem_valid = !!(IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw))
    390 				      & IXGBE_FWSM_MODE_MASK);
    391 
    392 	hw->mbx.ops.init_params = ixgbe_init_mbx_params_pf;
    393 
    394 	/* EEPROM */
    395 	eeprom->ops.read = ixgbe_read_eeprom_82599;
    396 	eeprom->ops.read_buffer = ixgbe_read_eeprom_buffer_82599;
    397 
    398 	/* Manageability interface */
    399 	mac->ops.set_fw_drv_ver = ixgbe_set_fw_drv_ver_generic;
    400 
    401 	mac->ops.bypass_rw = ixgbe_bypass_rw_generic;
    402 	mac->ops.bypass_valid_rd = ixgbe_bypass_valid_rd_generic;
    403 	mac->ops.bypass_set = ixgbe_bypass_set_generic;
    404 	mac->ops.bypass_rd_eep = ixgbe_bypass_rd_eep_generic;
    405 
    406 	mac->ops.get_rtrup2tc = ixgbe_dcb_get_rtrup2tc_generic;
    407 
    408 	return ret_val;
    409 }
    410 
    411 /**
    412  *  ixgbe_get_link_capabilities_82599 - Determines link capabilities
    413  *  @hw: pointer to hardware structure
    414  *  @speed: pointer to link speed
    415  *  @autoneg: TRUE when autoneg or autotry is enabled
    416  *
    417  *  Determines the link capabilities by reading the AUTOC register.
    418  **/
    419 s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
    420 				      ixgbe_link_speed *speed,
    421 				      bool *autoneg)
    422 {
    423 	s32 status = IXGBE_SUCCESS;
    424 	u32 autoc = 0;
    425 
    426 	DEBUGFUNC("ixgbe_get_link_capabilities_82599");
    427 
    428 
    429 	/* Check if 1G SFP module. */
    430 	if (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
    431 	    hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
    432 	    hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
    433 	    hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
    434 	    hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
    435 	    hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) {
    436 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
    437 		*autoneg = TRUE;
    438 		goto out;
    439 	}
    440 
    441 	/*
    442 	 * Determine link capabilities based on the stored value of AUTOC,
    443 	 * which represents EEPROM defaults.  If AUTOC value has not
    444 	 * been stored, use the current register values.
    445 	 */
    446 	if (hw->mac.orig_link_settings_stored)
    447 		autoc = hw->mac.orig_autoc;
    448 	else
    449 		autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
    450 
    451 	switch (autoc & IXGBE_AUTOC_LMS_MASK) {
    452 	case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
    453 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
    454 		*autoneg = FALSE;
    455 		break;
    456 
    457 	case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
    458 		*speed = IXGBE_LINK_SPEED_10GB_FULL;
    459 		*autoneg = FALSE;
    460 		break;
    461 
    462 	case IXGBE_AUTOC_LMS_1G_AN:
    463 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
    464 		*autoneg = TRUE;
    465 		break;
    466 
    467 	case IXGBE_AUTOC_LMS_10G_SERIAL:
    468 		*speed = IXGBE_LINK_SPEED_10GB_FULL;
    469 		*autoneg = FALSE;
    470 		break;
    471 
    472 	case IXGBE_AUTOC_LMS_KX4_KX_KR:
    473 	case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN:
    474 		*speed = IXGBE_LINK_SPEED_UNKNOWN;
    475 		if (autoc & IXGBE_AUTOC_KR_SUPP)
    476 			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
    477 		if (autoc & IXGBE_AUTOC_KX4_SUPP)
    478 			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
    479 		if (autoc & IXGBE_AUTOC_KX_SUPP)
    480 			*speed |= IXGBE_LINK_SPEED_1GB_FULL;
    481 		*autoneg = TRUE;
    482 		break;
    483 
    484 	case IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII:
    485 		*speed = IXGBE_LINK_SPEED_100_FULL;
    486 		if (autoc & IXGBE_AUTOC_KR_SUPP)
    487 			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
    488 		if (autoc & IXGBE_AUTOC_KX4_SUPP)
    489 			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
    490 		if (autoc & IXGBE_AUTOC_KX_SUPP)
    491 			*speed |= IXGBE_LINK_SPEED_1GB_FULL;
    492 		*autoneg = TRUE;
    493 		break;
    494 
    495 	case IXGBE_AUTOC_LMS_SGMII_1G_100M:
    496 		*speed = IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_100_FULL;
    497 		*autoneg = FALSE;
    498 		break;
    499 
    500 	default:
    501 		status = IXGBE_ERR_LINK_SETUP;
    502 		goto out;
    503 		break;
    504 	}
    505 
    506 	if (hw->phy.multispeed_fiber) {
    507 		*speed |= IXGBE_LINK_SPEED_10GB_FULL |
    508 			  IXGBE_LINK_SPEED_1GB_FULL;
    509 
    510 		/* QSFP must not enable full auto-negotiation
    511 		 * Limited autoneg is enabled at 1G
    512 		 */
    513 		if (hw->phy.media_type == ixgbe_media_type_fiber_qsfp)
    514 			*autoneg = FALSE;
    515 		else
    516 			*autoneg = TRUE;
    517 	}
    518 
    519 out:
    520 	return status;
    521 }
    522 
    523 /**
    524  *  ixgbe_get_media_type_82599 - Get media type
    525  *  @hw: pointer to hardware structure
    526  *
    527  *  Returns the media type (fiber, copper, backplane)
    528  **/
    529 enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
    530 {
    531 	enum ixgbe_media_type media_type;
    532 
    533 	DEBUGFUNC("ixgbe_get_media_type_82599");
    534 
    535 	/* Detect if there is a copper PHY attached. */
    536 	switch (hw->phy.type) {
    537 	case ixgbe_phy_cu_unknown:
    538 	case ixgbe_phy_tn:
    539 		media_type = ixgbe_media_type_copper;
    540 		goto out;
    541 	default:
    542 		break;
    543 	}
    544 
    545 	switch (hw->device_id) {
    546 	case IXGBE_DEV_ID_82599_KX4:
    547 	case IXGBE_DEV_ID_82599_KX4_MEZZ:
    548 	case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
    549 	case IXGBE_DEV_ID_82599_KR:
    550 	case IXGBE_DEV_ID_82599_BACKPLANE_FCOE:
    551 	case IXGBE_DEV_ID_82599_XAUI_LOM:
    552 		/* Default device ID is mezzanine card KX/KX4 */
    553 		media_type = ixgbe_media_type_backplane;
    554 		break;
    555 	case IXGBE_DEV_ID_82599_SFP:
    556 	case IXGBE_DEV_ID_82599_SFP_FCOE:
    557 	case IXGBE_DEV_ID_82599_SFP_EM:
    558 	case IXGBE_DEV_ID_82599_SFP_SF2:
    559 	case IXGBE_DEV_ID_82599_SFP_SF_QP:
    560 	case IXGBE_DEV_ID_82599EN_SFP:
    561 		media_type = ixgbe_media_type_fiber;
    562 		break;
    563 	case IXGBE_DEV_ID_82599_CX4:
    564 		media_type = ixgbe_media_type_cx4;
    565 		break;
    566 	case IXGBE_DEV_ID_82599_T3_LOM:
    567 		media_type = ixgbe_media_type_copper;
    568 		break;
    569 	case IXGBE_DEV_ID_82599_QSFP_SF_QP:
    570 		media_type = ixgbe_media_type_fiber_qsfp;
    571 		break;
    572 	case IXGBE_DEV_ID_82599_BYPASS:
    573 		media_type = ixgbe_media_type_fiber_fixed;
    574 		hw->phy.multispeed_fiber = TRUE;
    575 		break;
    576 	default:
    577 		media_type = ixgbe_media_type_unknown;
    578 		break;
    579 	}
    580 out:
    581 	return media_type;
    582 }
    583 
    584 /**
    585  *  ixgbe_stop_mac_link_on_d3_82599 - Disables link on D3
    586  *  @hw: pointer to hardware structure
    587  *
    588  *  Disables link during D3 power down sequence.
    589  *
    590  **/
    591 void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw)
    592 {
    593 	u32 autoc2_reg;
    594 	u16 ee_ctrl_2 = 0;
    595 
    596 	DEBUGFUNC("ixgbe_stop_mac_link_on_d3_82599");
    597 	ixgbe_read_eeprom(hw, IXGBE_EEPROM_CTRL_2, &ee_ctrl_2);
    598 
    599 	if (!ixgbe_mng_present(hw) && !hw->wol_enabled &&
    600 	    ee_ctrl_2 & IXGBE_EEPROM_CCD_BIT) {
    601 		autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
    602 		autoc2_reg |= IXGBE_AUTOC2_LINK_DISABLE_ON_D3_MASK;
    603 		IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2_reg);
    604 	}
    605 }
    606 
    607 /**
    608  *  ixgbe_start_mac_link_82599 - Setup MAC link settings
    609  *  @hw: pointer to hardware structure
    610  *  @autoneg_wait_to_complete: TRUE when waiting for completion is needed
    611  *
    612  *  Configures link settings based on values in the ixgbe_hw struct.
    613  *  Restarts the link.  Performs autonegotiation if needed.
    614  **/
    615 s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
    616 			       bool autoneg_wait_to_complete)
    617 {
    618 	u32 autoc_reg;
    619 	u32 links_reg;
    620 	u32 i;
    621 	s32 status = IXGBE_SUCCESS;
    622 	bool got_lock = FALSE;
    623 
    624 	DEBUGFUNC("ixgbe_start_mac_link_82599");
    625 
    626 
    627 	/*  reset_pipeline requires us to hold this lock as it writes to
    628 	 *  AUTOC.
    629 	 */
    630 	if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
    631 		status = hw->mac.ops.acquire_swfw_sync(hw,
    632 						       IXGBE_GSSR_MAC_CSR_SM);
    633 		if (status != IXGBE_SUCCESS)
    634 			goto out;
    635 
    636 		got_lock = TRUE;
    637 	}
    638 
    639 	/* Restart link */
    640 	ixgbe_reset_pipeline_82599(hw);
    641 
    642 	if (got_lock)
    643 		hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
    644 
    645 	/* Only poll for autoneg to complete if specified to do so */
    646 	if (autoneg_wait_to_complete) {
    647 		autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
    648 		if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
    649 		     IXGBE_AUTOC_LMS_KX4_KX_KR ||
    650 		    (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
    651 		     IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
    652 		    (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
    653 		     IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
    654 			links_reg = 0; /* Just in case Autoneg time = 0 */
    655 			for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
    656 				links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
    657 				if (links_reg & IXGBE_LINKS_KX_AN_COMP)
    658 					break;
    659 				msec_delay(100);
    660 			}
    661 			if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
    662 				status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
    663 				DEBUGOUT("Autoneg did not complete.\n");
    664 			}
    665 		}
    666 	}
    667 
    668 	/* Add delay to filter out noises during initial link setup */
    669 	msec_delay(50);
    670 
    671 out:
    672 	return status;
    673 }
    674 
    675 /**
    676  *  ixgbe_disable_tx_laser_multispeed_fiber - Disable Tx laser
    677  *  @hw: pointer to hardware structure
    678  *
    679  *  The base drivers may require better control over SFP+ module
    680  *  PHY states.  This includes selectively shutting down the Tx
    681  *  laser on the PHY, effectively halting physical link.
    682  **/
    683 void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
    684 {
    685 	u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
    686 
    687 	/* Blocked by MNG FW so bail */
    688 	if (ixgbe_check_reset_blocked(hw))
    689 		return;
    690 
    691 	/* Disable Tx laser; allow 100us to go dark per spec */
    692 	esdp_reg |= IXGBE_ESDP_SDP3;
    693 	IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
    694 	IXGBE_WRITE_FLUSH(hw);
    695 	usec_delay(100);
    696 }
    697 
    698 /**
    699  *  ixgbe_enable_tx_laser_multispeed_fiber - Enable Tx laser
    700  *  @hw: pointer to hardware structure
    701  *
    702  *  The base drivers may require better control over SFP+ module
    703  *  PHY states.  This includes selectively turning on the Tx
    704  *  laser on the PHY, effectively starting physical link.
    705  **/
    706 void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
    707 {
    708 	u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
    709 
    710 	/* Enable Tx laser; allow 100ms to light up */
    711 	esdp_reg &= ~IXGBE_ESDP_SDP3;
    712 	IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
    713 	IXGBE_WRITE_FLUSH(hw);
    714 	msec_delay(100);
    715 }
    716 
    717 /**
    718  *  ixgbe_flap_tx_laser_multispeed_fiber - Flap Tx laser
    719  *  @hw: pointer to hardware structure
    720  *
    721  *  When the driver changes the link speeds that it can support,
    722  *  it sets autotry_restart to TRUE to indicate that we need to
    723  *  initiate a new autotry session with the link partner.  To do
    724  *  so, we set the speed then disable and re-enable the Tx laser, to
    725  *  alert the link partner that it also needs to restart autotry on its
    726  *  end.  This is consistent with TRUE clause 37 autoneg, which also
    727  *  involves a loss of signal.
    728  **/
    729 void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
    730 {
    731 	DEBUGFUNC("ixgbe_flap_tx_laser_multispeed_fiber");
    732 
    733 	/* Blocked by MNG FW so bail */
    734 	if (ixgbe_check_reset_blocked(hw))
    735 		return;
    736 
    737 	if (hw->mac.autotry_restart) {
    738 		ixgbe_disable_tx_laser_multispeed_fiber(hw);
    739 		ixgbe_enable_tx_laser_multispeed_fiber(hw);
    740 		hw->mac.autotry_restart = FALSE;
    741 	}
    742 }
    743 
    744 /**
    745  *  ixgbe_set_hard_rate_select_speed - Set module link speed
    746  *  @hw: pointer to hardware structure
    747  *  @speed: link speed to set
    748  *
    749  *  Set module link speed via RS0/RS1 rate select pins.
    750  */
    751 void ixgbe_set_hard_rate_select_speed(struct ixgbe_hw *hw,
    752 					ixgbe_link_speed speed)
    753 {
    754 	u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
    755 
    756 	switch (speed) {
    757 	case IXGBE_LINK_SPEED_10GB_FULL:
    758 		esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5);
    759 		break;
    760 	case IXGBE_LINK_SPEED_1GB_FULL:
    761 		esdp_reg &= ~IXGBE_ESDP_SDP5;
    762 		esdp_reg |= IXGBE_ESDP_SDP5_DIR;
    763 		break;
    764 	default:
    765 		DEBUGOUT("Invalid fixed module speed\n");
    766 		return;
    767 	}
    768 
    769 	IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
    770 	IXGBE_WRITE_FLUSH(hw);
    771 }
    772 
    773 /**
    774  *  ixgbe_setup_mac_link_smartspeed - Set MAC link speed using SmartSpeed
    775  *  @hw: pointer to hardware structure
    776  *  @speed: new link speed
    777  *  @autoneg_wait_to_complete: TRUE when waiting for completion is needed
    778  *
    779  *  Implements the Intel SmartSpeed algorithm.
    780  **/
    781 s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
    782 				    ixgbe_link_speed speed,
    783 				    bool autoneg_wait_to_complete)
    784 {
    785 	s32 status = IXGBE_SUCCESS;
    786 	ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
    787 	s32 i, j;
    788 	bool link_up = FALSE;
    789 	u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
    790 
    791 	DEBUGFUNC("ixgbe_setup_mac_link_smartspeed");
    792 
    793 	 /* Set autoneg_advertised value based on input link speed */
    794 	hw->phy.autoneg_advertised = 0;
    795 
    796 	if (speed & IXGBE_LINK_SPEED_10GB_FULL)
    797 		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
    798 
    799 	if (speed & IXGBE_LINK_SPEED_1GB_FULL)
    800 		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
    801 
    802 	if (speed & IXGBE_LINK_SPEED_100_FULL)
    803 		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL;
    804 
    805 	/*
    806 	 * Implement Intel SmartSpeed algorithm.  SmartSpeed will reduce the
    807 	 * autoneg advertisement if link is unable to be established at the
    808 	 * highest negotiated rate.  This can sometimes happen due to integrity
    809 	 * issues with the physical media connection.
    810 	 */
    811 
    812 	/* First, try to get link with full advertisement */
    813 	hw->phy.smart_speed_active = FALSE;
    814 	for (j = 0; j < IXGBE_SMARTSPEED_MAX_RETRIES; j++) {
    815 		status = ixgbe_setup_mac_link_82599(hw, speed,
    816 						    autoneg_wait_to_complete);
    817 		if (status != IXGBE_SUCCESS)
    818 			goto out;
    819 
    820 		/*
    821 		 * Wait for the controller to acquire link.  Per IEEE 802.3ap,
    822 		 * Section 73.10.2, we may have to wait up to 500ms if KR is
    823 		 * attempted, or 200ms if KX/KX4/BX/BX4 is attempted, per
    824 		 * Table 9 in the AN MAS.
    825 		 */
    826 		for (i = 0; i < 5; i++) {
    827 			msec_delay(100);
    828 
    829 			/* If we have link, just jump out */
    830 			status = ixgbe_check_link(hw, &link_speed, &link_up,
    831 						  FALSE);
    832 			if (status != IXGBE_SUCCESS)
    833 				goto out;
    834 
    835 			if (link_up)
    836 				goto out;
    837 		}
    838 	}
    839 
    840 	/*
    841 	 * We didn't get link.  If we advertised KR plus one of KX4/KX
    842 	 * (or BX4/BX), then disable KR and try again.
    843 	 */
    844 	if (((autoc_reg & IXGBE_AUTOC_KR_SUPP) == 0) ||
    845 	    ((autoc_reg & IXGBE_AUTOC_KX4_KX_SUPP_MASK) == 0))
    846 		goto out;
    847 
    848 	/* Turn SmartSpeed on to disable KR support */
    849 	hw->phy.smart_speed_active = TRUE;
    850 	status = ixgbe_setup_mac_link_82599(hw, speed,
    851 					    autoneg_wait_to_complete);
    852 	if (status != IXGBE_SUCCESS)
    853 		goto out;
    854 
    855 	/*
    856 	 * Wait for the controller to acquire link.  600ms will allow for
    857 	 * the AN link_fail_inhibit_timer as well for multiple cycles of
    858 	 * parallel detect, both 10g and 1g. This allows for the maximum
    859 	 * connect attempts as defined in the AN MAS table 73-7.
    860 	 */
    861 	for (i = 0; i < 6; i++) {
    862 		msec_delay(100);
    863 
    864 		/* If we have link, just jump out */
    865 		status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
    866 		if (status != IXGBE_SUCCESS)
    867 			goto out;
    868 
    869 		if (link_up)
    870 			goto out;
    871 	}
    872 
    873 	/* We didn't get link.  Turn SmartSpeed back off. */
    874 	hw->phy.smart_speed_active = FALSE;
    875 	status = ixgbe_setup_mac_link_82599(hw, speed,
    876 					    autoneg_wait_to_complete);
    877 
    878 out:
    879 	if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL))
    880 		DEBUGOUT("Smartspeed has downgraded the link speed "
    881 		"from the maximum advertised\n");
    882 	return status;
    883 }
    884 
    885 /**
    886  *  ixgbe_setup_mac_link_82599 - Set MAC link speed
    887  *  @hw: pointer to hardware structure
    888  *  @speed: new link speed
    889  *  @autoneg_wait_to_complete: TRUE when waiting for completion is needed
    890  *
    891  *  Set the link speed in the AUTOC register and restarts link.
    892  **/
    893 s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
    894 			       ixgbe_link_speed speed,
    895 			       bool autoneg_wait_to_complete)
    896 {
    897 	bool autoneg = FALSE;
    898 	s32 status = IXGBE_SUCCESS;
    899 	u32 pma_pmd_1g, link_mode;
    900 	u32 current_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); /* holds the value of AUTOC register at this current point in time */
    901 	u32 orig_autoc = 0; /* holds the cached value of AUTOC register */
    902 	u32 autoc = current_autoc; /* Temporary variable used for comparison purposes */
    903 	u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
    904 	u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
    905 	u32 links_reg;
    906 	u32 i;
    907 	ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
    908 
    909 	DEBUGFUNC("ixgbe_setup_mac_link_82599");
    910 
    911 	/* Check to see if speed passed in is supported. */
    912 	status = ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg);
    913 	if (status)
    914 		goto out;
    915 
    916 	speed &= link_capabilities;
    917 
    918 	if (speed == 0) {
    919 		ixgbe_disable_tx_laser(hw); /* For fiber */
    920 		ixgbe_set_phy_power(hw, false); /* For copper */
    921 	} else {
    922 		/* In case previous media setting was none(down) */
    923 		ixgbe_enable_tx_laser(hw); /* for Fiber */
    924 		ixgbe_set_phy_power(hw, true); /* For copper */
    925 	}
    926 
    927 	/* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/
    928 	if (hw->mac.orig_link_settings_stored)
    929 		orig_autoc = hw->mac.orig_autoc;
    930 	else
    931 		orig_autoc = autoc;
    932 
    933 	link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
    934 	pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
    935 
    936 	if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
    937 	    link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
    938 	    link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
    939 		/* Set KX4/KX/KR support according to speed requested */
    940 		autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP);
    941 		if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
    942 			if (orig_autoc & IXGBE_AUTOC_KX4_SUPP)
    943 				autoc |= IXGBE_AUTOC_KX4_SUPP;
    944 			if ((orig_autoc & IXGBE_AUTOC_KR_SUPP) &&
    945 			    (hw->phy.smart_speed_active == FALSE))
    946 				autoc |= IXGBE_AUTOC_KR_SUPP;
    947 		}
    948 		if (speed & IXGBE_LINK_SPEED_1GB_FULL)
    949 			autoc |= IXGBE_AUTOC_KX_SUPP;
    950 	} else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) &&
    951 		   (link_mode == IXGBE_AUTOC_LMS_1G_LINK_NO_AN ||
    952 		    link_mode == IXGBE_AUTOC_LMS_1G_AN)) {
    953 		/* Switch from 1G SFI to 10G SFI if requested */
    954 		if ((speed == IXGBE_LINK_SPEED_10GB_FULL) &&
    955 		    (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)) {
    956 			autoc &= ~IXGBE_AUTOC_LMS_MASK;
    957 			autoc |= IXGBE_AUTOC_LMS_10G_SERIAL;
    958 		}
    959 	} else if ((pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) &&
    960 		   (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) {
    961 		/* Switch from 10G SFI to 1G SFI if requested */
    962 		if ((speed == IXGBE_LINK_SPEED_1GB_FULL) &&
    963 		    (pma_pmd_1g == IXGBE_AUTOC_1G_SFI)) {
    964 			autoc &= ~IXGBE_AUTOC_LMS_MASK;
    965 			if (autoneg || hw->phy.type == ixgbe_phy_qsfp_intel)
    966 				autoc |= IXGBE_AUTOC_LMS_1G_AN;
    967 			else
    968 				autoc |= IXGBE_AUTOC_LMS_1G_LINK_NO_AN;
    969 		}
    970 	}
    971 
    972 	if (autoc != current_autoc) {
    973 		/* Restart link */
    974 		status = hw->mac.ops.prot_autoc_write(hw, autoc, FALSE);
    975 		if (status != IXGBE_SUCCESS)
    976 			goto out;
    977 
    978 		/* Only poll for autoneg to complete if specified to do so */
    979 		if (autoneg_wait_to_complete) {
    980 			if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
    981 			    link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
    982 			    link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
    983 				links_reg = 0; /*Just in case Autoneg time=0*/
    984 				for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
    985 					links_reg =
    986 					       IXGBE_READ_REG(hw, IXGBE_LINKS);
    987 					if (links_reg & IXGBE_LINKS_KX_AN_COMP)
    988 						break;
    989 					msec_delay(100);
    990 				}
    991 				if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
    992 					status =
    993 						IXGBE_ERR_AUTONEG_NOT_COMPLETE;
    994 					DEBUGOUT("Autoneg did not complete.\n");
    995 				}
    996 			}
    997 		}
    998 
    999 		/* Add delay to filter out noises during initial link setup */
   1000 		msec_delay(50);
   1001 	}
   1002 
   1003 out:
   1004 	return status;
   1005 }
   1006 
   1007 /**
   1008  *  ixgbe_setup_copper_link_82599 - Set the PHY autoneg advertised field
   1009  *  @hw: pointer to hardware structure
   1010  *  @speed: new link speed
   1011  *  @autoneg_wait_to_complete: TRUE if waiting is needed to complete
   1012  *
   1013  *  Restarts link on PHY and MAC based on settings passed in.
   1014  **/
   1015 static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
   1016 					 ixgbe_link_speed speed,
   1017 					 bool autoneg_wait_to_complete)
   1018 {
   1019 	s32 status;
   1020 
   1021 	DEBUGFUNC("ixgbe_setup_copper_link_82599");
   1022 
   1023 	/* Setup the PHY according to input speed */
   1024 	status = hw->phy.ops.setup_link_speed(hw, speed,
   1025 					      autoneg_wait_to_complete);
   1026 	/* Set up MAC */
   1027 	ixgbe_start_mac_link_82599(hw, autoneg_wait_to_complete);
   1028 
   1029 	return status;
   1030 }
   1031 
   1032 /**
   1033  *  ixgbe_reset_hw_82599 - Perform hardware reset
   1034  *  @hw: pointer to hardware structure
   1035  *
   1036  *  Resets the hardware by resetting the transmit and receive units, masks
   1037  *  and clears all interrupts, perform a PHY reset, and perform a link (MAC)
   1038  *  reset.
   1039  **/
   1040 s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
   1041 {
   1042 	ixgbe_link_speed link_speed;
   1043 	s32 status;
   1044 	u32 ctrl = 0;
   1045 	u32 i, autoc, autoc2;
   1046 	u32 curr_lms;
   1047 	bool link_up = FALSE;
   1048 
   1049 	DEBUGFUNC("ixgbe_reset_hw_82599");
   1050 
   1051 	/* Call adapter stop to disable tx/rx and clear interrupts */
   1052 	status = hw->mac.ops.stop_adapter(hw);
   1053 	if (status != IXGBE_SUCCESS)
   1054 		goto reset_hw_out;
   1055 
   1056 	/* flush pending Tx transactions */
   1057 	ixgbe_clear_tx_pending(hw);
   1058 
   1059 	/* PHY ops must be identified and initialized prior to reset */
   1060 
   1061 	/* Identify PHY and related function pointers */
   1062 	status = hw->phy.ops.init(hw);
   1063 
   1064 	if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
   1065 		goto reset_hw_out;
   1066 
   1067 	/* Setup SFP module if there is one present. */
   1068 	if (hw->phy.sfp_setup_needed) {
   1069 		status = hw->mac.ops.setup_sfp(hw);
   1070 		hw->phy.sfp_setup_needed = FALSE;
   1071 	}
   1072 
   1073 	if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
   1074 		goto reset_hw_out;
   1075 
   1076 	/* Reset PHY */
   1077 	if (hw->phy.reset_disable == FALSE && hw->phy.ops.reset != NULL)
   1078 		hw->phy.ops.reset(hw);
   1079 
   1080 	/* remember AUTOC from before we reset */
   1081 	curr_lms = IXGBE_READ_REG(hw, IXGBE_AUTOC) & IXGBE_AUTOC_LMS_MASK;
   1082 
   1083 mac_reset_top:
   1084 	/*
   1085 	 * Issue global reset to the MAC.  Needs to be SW reset if link is up.
   1086 	 * If link reset is used when link is up, it might reset the PHY when
   1087 	 * mng is using it.  If link is down or the flag to force full link
   1088 	 * reset is set, then perform link reset.
   1089 	 */
   1090 	ctrl = IXGBE_CTRL_LNK_RST;
   1091 	if (!hw->force_full_reset) {
   1092 		hw->mac.ops.check_link(hw, &link_speed, &link_up, FALSE);
   1093 		if (link_up)
   1094 			ctrl = IXGBE_CTRL_RST;
   1095 	}
   1096 
   1097 	ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
   1098 	IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
   1099 	IXGBE_WRITE_FLUSH(hw);
   1100 
   1101 	/* Poll for reset bit to self-clear meaning reset is complete */
   1102 	for (i = 0; i < 10; i++) {
   1103 		usec_delay(1);
   1104 		ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
   1105 		if (!(ctrl & IXGBE_CTRL_RST_MASK))
   1106 			break;
   1107 	}
   1108 
   1109 	if (ctrl & IXGBE_CTRL_RST_MASK) {
   1110 		status = IXGBE_ERR_RESET_FAILED;
   1111 		DEBUGOUT("Reset polling failed to complete.\n");
   1112 	}
   1113 
   1114 	msec_delay(50);
   1115 
   1116 	/*
   1117 	 * Double resets are required for recovery from certain error
   1118 	 * conditions.  Between resets, it is necessary to stall to
   1119 	 * allow time for any pending HW events to complete.
   1120 	 */
   1121 	if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
   1122 		hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
   1123 		goto mac_reset_top;
   1124 	}
   1125 
   1126 	/*
   1127 	 * Store the original AUTOC/AUTOC2 values if they have not been
   1128 	 * stored off yet.  Otherwise restore the stored original
   1129 	 * values since the reset operation sets back to defaults.
   1130 	 */
   1131 	autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
   1132 	autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
   1133 
   1134 	/* Enable link if disabled in NVM */
   1135 	if (autoc2 & IXGBE_AUTOC2_LINK_DISABLE_MASK) {
   1136 		autoc2 &= ~IXGBE_AUTOC2_LINK_DISABLE_MASK;
   1137 		IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
   1138 		IXGBE_WRITE_FLUSH(hw);
   1139 	}
   1140 
   1141 	if (hw->mac.orig_link_settings_stored == FALSE) {
   1142 		hw->mac.orig_autoc = autoc;
   1143 		hw->mac.orig_autoc2 = autoc2;
   1144 		hw->mac.orig_link_settings_stored = TRUE;
   1145 	} else {
   1146 
   1147 		/* If MNG FW is running on a multi-speed device that
   1148 		 * doesn't autoneg with out driver support we need to
   1149 		 * leave LMS in the state it was before we MAC reset.
   1150 		 * Likewise if we support WoL we don't want change the
   1151 		 * LMS state.
   1152 		 */
   1153 		if ((hw->phy.multispeed_fiber && ixgbe_mng_enabled(hw)) ||
   1154 		    hw->wol_enabled)
   1155 			hw->mac.orig_autoc =
   1156 				(hw->mac.orig_autoc & ~IXGBE_AUTOC_LMS_MASK) |
   1157 				curr_lms;
   1158 
   1159 		if (autoc != hw->mac.orig_autoc) {
   1160 			status = hw->mac.ops.prot_autoc_write(hw,
   1161 							hw->mac.orig_autoc,
   1162 							FALSE);
   1163 			if (status != IXGBE_SUCCESS)
   1164 				goto reset_hw_out;
   1165 		}
   1166 
   1167 		if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) !=
   1168 		    (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) {
   1169 			autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK;
   1170 			autoc2 |= (hw->mac.orig_autoc2 &
   1171 				   IXGBE_AUTOC2_UPPER_MASK);
   1172 			IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
   1173 		}
   1174 	}
   1175 
   1176 	/* Store the permanent mac address */
   1177 	hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
   1178 
   1179 	/*
   1180 	 * Store MAC address from RAR0, clear receive address registers, and
   1181 	 * clear the multicast table.  Also reset num_rar_entries to 128,
   1182 	 * since we modify this value when programming the SAN MAC address.
   1183 	 */
   1184 	hw->mac.num_rar_entries = 128;
   1185 	hw->mac.ops.init_rx_addrs(hw);
   1186 
   1187 	/* Store the permanent SAN mac address */
   1188 	hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr);
   1189 
   1190 	/* Add the SAN MAC address to the RAR only if it's a valid address */
   1191 	if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) {
   1192 		/* Save the SAN MAC RAR index */
   1193 		hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1;
   1194 
   1195 		hw->mac.ops.set_rar(hw, hw->mac.san_mac_rar_index,
   1196 				    hw->mac.san_addr, 0, IXGBE_RAH_AV);
   1197 
   1198 		/* clear VMDq pool/queue selection for this RAR */
   1199 		hw->mac.ops.clear_vmdq(hw, hw->mac.san_mac_rar_index,
   1200 				       IXGBE_CLEAR_VMDQ_ALL);
   1201 
   1202 		/* Reserve the last RAR for the SAN MAC address */
   1203 		hw->mac.num_rar_entries--;
   1204 	}
   1205 
   1206 	/* Store the alternative WWNN/WWPN prefix */
   1207 	hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
   1208 				   &hw->mac.wwpn_prefix);
   1209 
   1210 reset_hw_out:
   1211 	return status;
   1212 }
   1213 
   1214 /**
   1215  * ixgbe_fdir_check_cmd_complete - poll to check whether FDIRCMD is complete
   1216  * @hw: pointer to hardware structure
   1217  * @fdircmd: current value of FDIRCMD register
   1218  */
   1219 static s32 ixgbe_fdir_check_cmd_complete(struct ixgbe_hw *hw, u32 *fdircmd)
   1220 {
   1221 	int i;
   1222 
   1223 	for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) {
   1224 		*fdircmd = IXGBE_READ_REG(hw, IXGBE_FDIRCMD);
   1225 		if (!(*fdircmd & IXGBE_FDIRCMD_CMD_MASK))
   1226 			return IXGBE_SUCCESS;
   1227 		usec_delay(10);
   1228 	}
   1229 
   1230 	return IXGBE_ERR_FDIR_CMD_INCOMPLETE;
   1231 }
   1232 
   1233 /**
   1234  *  ixgbe_reinit_fdir_tables_82599 - Reinitialize Flow Director tables.
   1235  *  @hw: pointer to hardware structure
   1236  **/
   1237 s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
   1238 {
   1239 	s32 err;
   1240 	int i;
   1241 	u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
   1242 	u32 fdircmd;
   1243 	fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE;
   1244 
   1245 	DEBUGFUNC("ixgbe_reinit_fdir_tables_82599");
   1246 
   1247 	/*
   1248 	 * Before starting reinitialization process,
   1249 	 * FDIRCMD.CMD must be zero.
   1250 	 */
   1251 	err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
   1252 	if (err) {
   1253 		DEBUGOUT("Flow Director previous command did not complete, aborting table re-initialization.\n");
   1254 		return err;
   1255 	}
   1256 
   1257 	IXGBE_WRITE_REG(hw, IXGBE_FDIRFREE, 0);
   1258 	IXGBE_WRITE_FLUSH(hw);
   1259 	/*
   1260 	 * 82599 adapters flow director init flow cannot be restarted,
   1261 	 * Workaround 82599 silicon errata by performing the following steps
   1262 	 * before re-writing the FDIRCTRL control register with the same value.
   1263 	 * - write 1 to bit 8 of FDIRCMD register &
   1264 	 * - write 0 to bit 8 of FDIRCMD register
   1265 	 */
   1266 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
   1267 			(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) |
   1268 			 IXGBE_FDIRCMD_CLEARHT));
   1269 	IXGBE_WRITE_FLUSH(hw);
   1270 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
   1271 			(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
   1272 			 ~IXGBE_FDIRCMD_CLEARHT));
   1273 	IXGBE_WRITE_FLUSH(hw);
   1274 	/*
   1275 	 * Clear FDIR Hash register to clear any leftover hashes
   1276 	 * waiting to be programmed.
   1277 	 */
   1278 	IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, 0x00);
   1279 	IXGBE_WRITE_FLUSH(hw);
   1280 
   1281 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
   1282 	IXGBE_WRITE_FLUSH(hw);
   1283 
   1284 	/* Poll init-done after we write FDIRCTRL register */
   1285 	for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
   1286 		if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
   1287 				   IXGBE_FDIRCTRL_INIT_DONE)
   1288 			break;
   1289 		msec_delay(1);
   1290 	}
   1291 	if (i >= IXGBE_FDIR_INIT_DONE_POLL) {
   1292 		DEBUGOUT("Flow Director Signature poll time exceeded!\n");
   1293 		return IXGBE_ERR_FDIR_REINIT_FAILED;
   1294 	}
   1295 
   1296 	/* Clear FDIR statistics registers (read to clear) */
   1297 	IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT);
   1298 	IXGBE_READ_REG(hw, IXGBE_FDIRFSTAT);
   1299 	IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
   1300 	IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
   1301 	IXGBE_READ_REG(hw, IXGBE_FDIRLEN);
   1302 
   1303 	return IXGBE_SUCCESS;
   1304 }
   1305 
   1306 /**
   1307  *  ixgbe_fdir_enable_82599 - Initialize Flow Director control registers
   1308  *  @hw: pointer to hardware structure
   1309  *  @fdirctrl: value to write to flow director control register
   1310  **/
   1311 static void ixgbe_fdir_enable_82599(struct ixgbe_hw *hw, u32 fdirctrl)
   1312 {
   1313 	int i;
   1314 
   1315 	DEBUGFUNC("ixgbe_fdir_enable_82599");
   1316 
   1317 	/* Prime the keys for hashing */
   1318 	IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY);
   1319 	IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY);
   1320 
   1321 	/*
   1322 	 * Poll init-done after we write the register.  Estimated times:
   1323 	 *      10G: PBALLOC = 11b, timing is 60us
   1324 	 *       1G: PBALLOC = 11b, timing is 600us
   1325 	 *     100M: PBALLOC = 11b, timing is 6ms
   1326 	 *
   1327 	 *     Multiple these timings by 4 if under full Rx load
   1328 	 *
   1329 	 * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for
   1330 	 * 1 msec per poll time.  If we're at line rate and drop to 100M, then
   1331 	 * this might not finish in our poll time, but we can live with that
   1332 	 * for now.
   1333 	 */
   1334 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
   1335 	IXGBE_WRITE_FLUSH(hw);
   1336 	for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
   1337 		if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
   1338 				   IXGBE_FDIRCTRL_INIT_DONE)
   1339 			break;
   1340 		msec_delay(1);
   1341 	}
   1342 
   1343 	if (i >= IXGBE_FDIR_INIT_DONE_POLL)
   1344 		DEBUGOUT("Flow Director poll time exceeded!\n");
   1345 }
   1346 
   1347 /**
   1348  *  ixgbe_init_fdir_signature_82599 - Initialize Flow Director signature filters
   1349  *  @hw: pointer to hardware structure
   1350  *  @fdirctrl: value to write to flow director control register, initially
   1351  *	     contains just the value of the Rx packet buffer allocation
   1352  **/
   1353 s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl)
   1354 {
   1355 	DEBUGFUNC("ixgbe_init_fdir_signature_82599");
   1356 
   1357 	/*
   1358 	 * Continue setup of fdirctrl register bits:
   1359 	 *  Move the flexible bytes to use the ethertype - shift 6 words
   1360 	 *  Set the maximum length per hash bucket to 0xA filters
   1361 	 *  Send interrupt when 64 filters are left
   1362 	 */
   1363 	fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) |
   1364 		    (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
   1365 		    (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
   1366 
   1367 	/* write hashes and fdirctrl register, poll for completion */
   1368 	ixgbe_fdir_enable_82599(hw, fdirctrl);
   1369 
   1370 	return IXGBE_SUCCESS;
   1371 }
   1372 
   1373 /**
   1374  *  ixgbe_init_fdir_perfect_82599 - Initialize Flow Director perfect filters
   1375  *  @hw: pointer to hardware structure
   1376  *  @fdirctrl: value to write to flow director control register, initially
   1377  *	     contains just the value of the Rx packet buffer allocation
   1378  *  @cloud_mode: TRUE - cloud mode, FALSE - other mode
   1379  **/
   1380 s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl,
   1381 			bool cloud_mode)
   1382 {
   1383 	UNREFERENCED_1PARAMETER(cloud_mode);
   1384 	DEBUGFUNC("ixgbe_init_fdir_perfect_82599");
   1385 
   1386 	/*
   1387 	 * Continue setup of fdirctrl register bits:
   1388 	 *  Turn perfect match filtering on
   1389 	 *  Report hash in RSS field of Rx wb descriptor
   1390 	 *  Initialize the drop queue to queue 127
   1391 	 *  Move the flexible bytes to use the ethertype - shift 6 words
   1392 	 *  Set the maximum length per hash bucket to 0xA filters
   1393 	 *  Send interrupt when 64 (0x4 * 16) filters are left
   1394 	 */
   1395 	fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH |
   1396 		    IXGBE_FDIRCTRL_REPORT_STATUS |
   1397 		    (IXGBE_FDIR_DROP_QUEUE << IXGBE_FDIRCTRL_DROP_Q_SHIFT) |
   1398 		    (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) |
   1399 		    (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
   1400 		    (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
   1401 
   1402 	if (cloud_mode)
   1403 		fdirctrl |=(IXGBE_FDIRCTRL_FILTERMODE_CLOUD <<
   1404 					IXGBE_FDIRCTRL_FILTERMODE_SHIFT);
   1405 
   1406 	/* write hashes and fdirctrl register, poll for completion */
   1407 	ixgbe_fdir_enable_82599(hw, fdirctrl);
   1408 
   1409 	return IXGBE_SUCCESS;
   1410 }
   1411 
   1412 /**
   1413  *  ixgbe_set_fdir_drop_queue_82599 - Set Flow Director drop queue
   1414  *  @hw: pointer to hardware structure
   1415  *  @dropqueue: Rx queue index used for the dropped packets
   1416  **/
   1417 void ixgbe_set_fdir_drop_queue_82599(struct ixgbe_hw *hw, u8 dropqueue)
   1418 {
   1419 	u32 fdirctrl;
   1420 
   1421 	DEBUGFUNC("ixgbe_set_fdir_drop_queue_82599");
   1422 	/* Clear init done bit and drop queue field */
   1423 	fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
   1424 	fdirctrl &= ~(IXGBE_FDIRCTRL_DROP_Q_MASK | IXGBE_FDIRCTRL_INIT_DONE);
   1425 
   1426 	/* Set drop queue */
   1427 	fdirctrl |= (dropqueue << IXGBE_FDIRCTRL_DROP_Q_SHIFT);
   1428 	if ((hw->mac.type == ixgbe_mac_X550) ||
   1429 	    (hw->mac.type == ixgbe_mac_X550EM_x) ||
   1430 	    (hw->mac.type == ixgbe_mac_X550EM_a))
   1431 		fdirctrl |= IXGBE_FDIRCTRL_DROP_NO_MATCH;
   1432 
   1433 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
   1434 			(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) |
   1435 			 IXGBE_FDIRCMD_CLEARHT));
   1436 	IXGBE_WRITE_FLUSH(hw);
   1437 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
   1438 			(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
   1439 			 ~IXGBE_FDIRCMD_CLEARHT));
   1440 	IXGBE_WRITE_FLUSH(hw);
   1441 
   1442 	/* write hashes and fdirctrl register, poll for completion */
   1443 	ixgbe_fdir_enable_82599(hw, fdirctrl);
   1444 }
   1445 
   1446 /*
   1447  * These defines allow us to quickly generate all of the necessary instructions
   1448  * in the function below by simply calling out IXGBE_COMPUTE_SIG_HASH_ITERATION
   1449  * for values 0 through 15
   1450  */
   1451 #define IXGBE_ATR_COMMON_HASH_KEY \
   1452 		(IXGBE_ATR_BUCKET_HASH_KEY & IXGBE_ATR_SIGNATURE_HASH_KEY)
   1453 #define IXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \
   1454 do { \
   1455 	u32 n = (_n); \
   1456 	if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) \
   1457 		common_hash ^= lo_hash_dword >> n; \
   1458 	else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
   1459 		bucket_hash ^= lo_hash_dword >> n; \
   1460 	else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << n)) \
   1461 		sig_hash ^= lo_hash_dword << (16 - n); \
   1462 	if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << (n + 16))) \
   1463 		common_hash ^= hi_hash_dword >> n; \
   1464 	else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
   1465 		bucket_hash ^= hi_hash_dword >> n; \
   1466 	else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \
   1467 		sig_hash ^= hi_hash_dword << (16 - n); \
   1468 } while (0)
   1469 
   1470 /**
   1471  *  ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash
   1472  *  @input: input bitstream to compute the hash on
   1473  *  @common: compressed common input dword
   1474  *
   1475  *  This function is almost identical to the function above but contains
   1476  *  several optimizations such as unwinding all of the loops, letting the
   1477  *  compiler work out all of the conditional ifs since the keys are static
   1478  *  defines, and computing two keys at once since the hashed dword stream
   1479  *  will be the same for both keys.
   1480  **/
   1481 u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
   1482 				     union ixgbe_atr_hash_dword common)
   1483 {
   1484 	u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
   1485 	u32 sig_hash = 0, bucket_hash = 0, common_hash = 0;
   1486 
   1487 	/* record the flow_vm_vlan bits as they are a key part to the hash */
   1488 	flow_vm_vlan = IXGBE_NTOHL(input.dword);
   1489 
   1490 	/* generate common hash dword */
   1491 	hi_hash_dword = IXGBE_NTOHL(common.dword);
   1492 
   1493 	/* low dword is word swapped version of common */
   1494 	lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
   1495 
   1496 	/* apply flow ID/VM pool/VLAN ID bits to hash words */
   1497 	hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
   1498 
   1499 	/* Process bits 0 and 16 */
   1500 	IXGBE_COMPUTE_SIG_HASH_ITERATION(0);
   1501 
   1502 	/*
   1503 	 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
   1504 	 * delay this because bit 0 of the stream should not be processed
   1505 	 * so we do not add the VLAN until after bit 0 was processed
   1506 	 */
   1507 	lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
   1508 
   1509 	/* Process remaining 30 bit of the key */
   1510 	IXGBE_COMPUTE_SIG_HASH_ITERATION(1);
   1511 	IXGBE_COMPUTE_SIG_HASH_ITERATION(2);
   1512 	IXGBE_COMPUTE_SIG_HASH_ITERATION(3);
   1513 	IXGBE_COMPUTE_SIG_HASH_ITERATION(4);
   1514 	IXGBE_COMPUTE_SIG_HASH_ITERATION(5);
   1515 	IXGBE_COMPUTE_SIG_HASH_ITERATION(6);
   1516 	IXGBE_COMPUTE_SIG_HASH_ITERATION(7);
   1517 	IXGBE_COMPUTE_SIG_HASH_ITERATION(8);
   1518 	IXGBE_COMPUTE_SIG_HASH_ITERATION(9);
   1519 	IXGBE_COMPUTE_SIG_HASH_ITERATION(10);
   1520 	IXGBE_COMPUTE_SIG_HASH_ITERATION(11);
   1521 	IXGBE_COMPUTE_SIG_HASH_ITERATION(12);
   1522 	IXGBE_COMPUTE_SIG_HASH_ITERATION(13);
   1523 	IXGBE_COMPUTE_SIG_HASH_ITERATION(14);
   1524 	IXGBE_COMPUTE_SIG_HASH_ITERATION(15);
   1525 
   1526 	/* combine common_hash result with signature and bucket hashes */
   1527 	bucket_hash ^= common_hash;
   1528 	bucket_hash &= IXGBE_ATR_HASH_MASK;
   1529 
   1530 	sig_hash ^= common_hash << 16;
   1531 	sig_hash &= IXGBE_ATR_HASH_MASK << 16;
   1532 
   1533 	/* return completed signature hash */
   1534 	return sig_hash ^ bucket_hash;
   1535 }
   1536 
   1537 /**
   1538  *  ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter
   1539  *  @hw: pointer to hardware structure
   1540  *  @input: unique input dword
   1541  *  @common: compressed common input dword
   1542  *  @queue: queue index to direct traffic to
   1543  *
   1544  * Note that the tunnel bit in input must not be set when the hardware
   1545  * tunneling support does not exist.
   1546  **/
   1547 void ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
   1548 					   union ixgbe_atr_hash_dword input,
   1549 					   union ixgbe_atr_hash_dword common,
   1550 					   u8 queue)
   1551 {
   1552 	u64 fdirhashcmd;
   1553 	u8 flow_type;
   1554 	bool tunnel;
   1555 	u32 fdircmd;
   1556 
   1557 	DEBUGFUNC("ixgbe_fdir_add_signature_filter_82599");
   1558 
   1559 	/*
   1560 	 * Get the flow_type in order to program FDIRCMD properly
   1561 	 * lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6
   1562 	 * fifth is FDIRCMD.TUNNEL_FILTER
   1563 	 */
   1564 	tunnel = !!(input.formatted.flow_type & IXGBE_ATR_L4TYPE_TUNNEL_MASK);
   1565 	flow_type = input.formatted.flow_type &
   1566 		    (IXGBE_ATR_L4TYPE_TUNNEL_MASK - 1);
   1567 	switch (flow_type) {
   1568 	case IXGBE_ATR_FLOW_TYPE_TCPV4:
   1569 	case IXGBE_ATR_FLOW_TYPE_UDPV4:
   1570 	case IXGBE_ATR_FLOW_TYPE_SCTPV4:
   1571 	case IXGBE_ATR_FLOW_TYPE_TCPV6:
   1572 	case IXGBE_ATR_FLOW_TYPE_UDPV6:
   1573 	case IXGBE_ATR_FLOW_TYPE_SCTPV6:
   1574 		break;
   1575 	default:
   1576 		DEBUGOUT(" Error on flow type input\n");
   1577 		return;
   1578 	}
   1579 
   1580 	/* configure FDIRCMD register */
   1581 	fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
   1582 		  IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
   1583 	fdircmd |= (u32)flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
   1584 	fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
   1585 	if (tunnel)
   1586 		fdircmd |= IXGBE_FDIRCMD_TUNNEL_FILTER;
   1587 
   1588 	/*
   1589 	 * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits
   1590 	 * is for FDIRCMD.  Then do a 64-bit register write from FDIRHASH.
   1591 	 */
   1592 	fdirhashcmd = (u64)fdircmd << 32;
   1593 	fdirhashcmd |= ixgbe_atr_compute_sig_hash_82599(input, common);
   1594 	IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd);
   1595 
   1596 	DEBUGOUT2("Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd);
   1597 
   1598 	return;
   1599 }
   1600 
   1601 #define IXGBE_COMPUTE_BKT_HASH_ITERATION(_n) \
   1602 do { \
   1603 	u32 n = (_n); \
   1604 	if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
   1605 		bucket_hash ^= lo_hash_dword >> n; \
   1606 	if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
   1607 		bucket_hash ^= hi_hash_dword >> n; \
   1608 } while (0)
   1609 
   1610 /**
   1611  *  ixgbe_atr_compute_perfect_hash_82599 - Compute the perfect filter hash
   1612  *  @input: input bitstream to compute the hash on
   1613  *  @input_mask: mask for the input bitstream
   1614  *
   1615  *  This function serves two main purposes.  First it applies the input_mask
   1616  *  to the atr_input resulting in a cleaned up atr_input data stream.
   1617  *  Secondly it computes the hash and stores it in the bkt_hash field at
   1618  *  the end of the input byte stream.  This way it will be available for
   1619  *  future use without needing to recompute the hash.
   1620  **/
   1621 void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
   1622 					  union ixgbe_atr_input *input_mask)
   1623 {
   1624 
   1625 	u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
   1626 	u32 bucket_hash = 0;
   1627 	u32 hi_dword = 0;
   1628 	u32 i = 0;
   1629 
   1630 	/* Apply masks to input data */
   1631 	for (i = 0; i < 14; i++)
   1632 		input->dword_stream[i]  &= input_mask->dword_stream[i];
   1633 
   1634 	/* record the flow_vm_vlan bits as they are a key part to the hash */
   1635 	flow_vm_vlan = IXGBE_NTOHL(input->dword_stream[0]);
   1636 
   1637 	/* generate common hash dword */
   1638 	for (i = 1; i <= 13; i++)
   1639 		hi_dword ^= input->dword_stream[i];
   1640 	hi_hash_dword = IXGBE_NTOHL(hi_dword);
   1641 
   1642 	/* low dword is word swapped version of common */
   1643 	lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
   1644 
   1645 	/* apply flow ID/VM pool/VLAN ID bits to hash words */
   1646 	hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
   1647 
   1648 	/* Process bits 0 and 16 */
   1649 	IXGBE_COMPUTE_BKT_HASH_ITERATION(0);
   1650 
   1651 	/*
   1652 	 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
   1653 	 * delay this because bit 0 of the stream should not be processed
   1654 	 * so we do not add the VLAN until after bit 0 was processed
   1655 	 */
   1656 	lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
   1657 
   1658 	/* Process remaining 30 bit of the key */
   1659 	for (i = 1; i <= 15; i++)
   1660 		IXGBE_COMPUTE_BKT_HASH_ITERATION(i);
   1661 
   1662 	/*
   1663 	 * Limit hash to 13 bits since max bucket count is 8K.
   1664 	 * Store result at the end of the input stream.
   1665 	 */
   1666 	input->formatted.bkt_hash = bucket_hash & 0x1FFF;
   1667 }
   1668 
   1669 /**
   1670  *  ixgbe_get_fdirtcpm_82599 - generate a TCP port from atr_input_masks
   1671  *  @input_mask: mask to be bit swapped
   1672  *
   1673  *  The source and destination port masks for flow director are bit swapped
   1674  *  in that bit 15 effects bit 0, 14 effects 1, 13, 2 etc.  In order to
   1675  *  generate a correctly swapped value we need to bit swap the mask and that
   1676  *  is what is accomplished by this function.
   1677  **/
   1678 static u32 ixgbe_get_fdirtcpm_82599(union ixgbe_atr_input *input_mask)
   1679 {
   1680 	u32 mask = IXGBE_NTOHS(input_mask->formatted.dst_port);
   1681 	mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT;
   1682 	mask |= IXGBE_NTOHS(input_mask->formatted.src_port);
   1683 	mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1);
   1684 	mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2);
   1685 	mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4);
   1686 	return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8);
   1687 }
   1688 
   1689 /*
   1690  * These two macros are meant to address the fact that we have registers
   1691  * that are either all or in part big-endian.  As a result on big-endian
   1692  * systems we will end up byte swapping the value to little-endian before
   1693  * it is byte swapped again and written to the hardware in the original
   1694  * big-endian format.
   1695  */
   1696 #define IXGBE_STORE_AS_BE32(_value) \
   1697 	(((u32)(_value) >> 24) | (((u32)(_value) & 0x00FF0000) >> 8) | \
   1698 	 (((u32)(_value) & 0x0000FF00) << 8) | ((u32)(_value) << 24))
   1699 
   1700 #define IXGBE_WRITE_REG_BE32(a, reg, value) \
   1701 	IXGBE_WRITE_REG((a), (reg), IXGBE_STORE_AS_BE32(IXGBE_NTOHL(value)))
   1702 
   1703 #define IXGBE_STORE_AS_BE16(_value) \
   1704 	IXGBE_NTOHS(((u16)(_value) >> 8) | ((u16)(_value) << 8))
   1705 
   1706 s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
   1707 				    union ixgbe_atr_input *input_mask, bool cloud_mode)
   1708 {
   1709 	/* mask IPv6 since it is currently not supported */
   1710 	u32 fdirm = IXGBE_FDIRM_DIPv6;
   1711 	u32 fdirtcpm;
   1712 	u32 fdirip6m;
   1713 	UNREFERENCED_1PARAMETER(cloud_mode);
   1714 	DEBUGFUNC("ixgbe_fdir_set_atr_input_mask_82599");
   1715 
   1716 	/*
   1717 	 * Program the relevant mask registers.  If src/dst_port or src/dst_addr
   1718 	 * are zero, then assume a full mask for that field.  Also assume that
   1719 	 * a VLAN of 0 is unspecified, so mask that out as well.  L4type
   1720 	 * cannot be masked out in this implementation.
   1721 	 *
   1722 	 * This also assumes IPv4 only.  IPv6 masking isn't supported at this
   1723 	 * point in time.
   1724 	 */
   1725 
   1726 	/* verify bucket hash is cleared on hash generation */
   1727 	if (input_mask->formatted.bkt_hash)
   1728 		DEBUGOUT(" bucket hash should always be 0 in mask\n");
   1729 
   1730 	/* Program FDIRM and verify partial masks */
   1731 	switch (input_mask->formatted.vm_pool & 0x7F) {
   1732 	case 0x0:
   1733 		fdirm |= IXGBE_FDIRM_POOL;
   1734 	case 0x7F:
   1735 		break;
   1736 	default:
   1737 		DEBUGOUT(" Error on vm pool mask\n");
   1738 		return IXGBE_ERR_CONFIG;
   1739 	}
   1740 
   1741 	switch (input_mask->formatted.flow_type & IXGBE_ATR_L4TYPE_MASK) {
   1742 	case 0x0:
   1743 		fdirm |= IXGBE_FDIRM_L4P;
   1744 		if (input_mask->formatted.dst_port ||
   1745 		    input_mask->formatted.src_port) {
   1746 			DEBUGOUT(" Error on src/dst port mask\n");
   1747 			return IXGBE_ERR_CONFIG;
   1748 		}
   1749 	case IXGBE_ATR_L4TYPE_MASK:
   1750 		break;
   1751 	default:
   1752 		DEBUGOUT(" Error on flow type mask\n");
   1753 		return IXGBE_ERR_CONFIG;
   1754 	}
   1755 
   1756 	switch (IXGBE_NTOHS(input_mask->formatted.vlan_id) & 0xEFFF) {
   1757 	case 0x0000:
   1758 		/* mask VLAN ID */
   1759 		fdirm |= IXGBE_FDIRM_VLANID;
   1760 		/* fall through */
   1761 	case 0x0FFF:
   1762 		/* mask VLAN priority */
   1763 		fdirm |= IXGBE_FDIRM_VLANP;
   1764 		break;
   1765 	case 0xE000:
   1766 		/* mask VLAN ID only */
   1767 		fdirm |= IXGBE_FDIRM_VLANID;
   1768 		/* fall through */
   1769 	case 0xEFFF:
   1770 		/* no VLAN fields masked */
   1771 		break;
   1772 	default:
   1773 		DEBUGOUT(" Error on VLAN mask\n");
   1774 		return IXGBE_ERR_CONFIG;
   1775 	}
   1776 
   1777 	switch (input_mask->formatted.flex_bytes & 0xFFFF) {
   1778 	case 0x0000:
   1779 		/* Mask Flex Bytes */
   1780 		fdirm |= IXGBE_FDIRM_FLEX;
   1781 		/* fall through */
   1782 	case 0xFFFF:
   1783 		break;
   1784 	default:
   1785 		DEBUGOUT(" Error on flexible byte mask\n");
   1786 		return IXGBE_ERR_CONFIG;
   1787 	}
   1788 
   1789 	if (cloud_mode) {
   1790 		fdirm |= IXGBE_FDIRM_L3P;
   1791 		fdirip6m = ((u32) 0xFFFFU << IXGBE_FDIRIP6M_DIPM_SHIFT);
   1792 		fdirip6m |= IXGBE_FDIRIP6M_ALWAYS_MASK;
   1793 
   1794 		switch (input_mask->formatted.inner_mac[0] & 0xFF) {
   1795 		case 0x00:
   1796 			/* Mask inner MAC, fall through */
   1797 			fdirip6m |= IXGBE_FDIRIP6M_INNER_MAC;
   1798 		case 0xFF:
   1799 			break;
   1800 		default:
   1801 			DEBUGOUT(" Error on inner_mac byte mask\n");
   1802 			return IXGBE_ERR_CONFIG;
   1803 		}
   1804 
   1805 		switch (input_mask->formatted.tni_vni & 0xFFFFFFFF) {
   1806 		case 0x0:
   1807 			/* Mask vxlan id */
   1808 			fdirip6m |= IXGBE_FDIRIP6M_TNI_VNI;
   1809 			break;
   1810 		case 0x00FFFFFF:
   1811 			fdirip6m |= IXGBE_FDIRIP6M_TNI_VNI_24;
   1812 			break;
   1813 		case 0xFFFFFFFF:
   1814 			break;
   1815 		default:
   1816 			DEBUGOUT(" Error on TNI/VNI byte mask\n");
   1817 			return IXGBE_ERR_CONFIG;
   1818 		}
   1819 
   1820 		switch (input_mask->formatted.tunnel_type & 0xFFFF) {
   1821 		case 0x0:
   1822 			/* Mask turnnel type, fall through */
   1823 			fdirip6m |= IXGBE_FDIRIP6M_TUNNEL_TYPE;
   1824 		case 0xFFFF:
   1825 			break;
   1826 		default:
   1827 			DEBUGOUT(" Error on tunnel type byte mask\n");
   1828 			return IXGBE_ERR_CONFIG;
   1829 		}
   1830 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIP6M, fdirip6m);
   1831 
   1832 		/* Set all bits in FDIRTCPM, FDIRUDPM, FDIRSCTPM,
   1833 		 * FDIRSIP4M and FDIRDIP4M in cloud mode to allow
   1834 		 * L3/L3 packets to tunnel.
   1835 		 */
   1836 		IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, 0xFFFFFFFF);
   1837 		IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, 0xFFFFFFFF);
   1838 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M, 0xFFFFFFFF);
   1839 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M, 0xFFFFFFFF);
   1840 		switch (hw->mac.type) {
   1841 		case ixgbe_mac_X550:
   1842 		case ixgbe_mac_X550EM_x:
   1843 		case ixgbe_mac_X550EM_a:
   1844 			IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, 0xFFFFFFFF);
   1845 			break;
   1846 		default:
   1847 			break;
   1848 		}
   1849 	}
   1850 
   1851 	/* Now mask VM pool and destination IPv6 - bits 5 and 2 */
   1852 	IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
   1853 
   1854 	if (!cloud_mode) {
   1855 		/* store the TCP/UDP port masks, bit reversed from port
   1856 		 * layout */
   1857 		fdirtcpm = ixgbe_get_fdirtcpm_82599(input_mask);
   1858 
   1859 		/* write both the same so that UDP and TCP use the same mask */
   1860 		IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm);
   1861 		IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm);
   1862 		/* also use it for SCTP */
   1863 		switch (hw->mac.type) {
   1864 		case ixgbe_mac_X550:
   1865 		case ixgbe_mac_X550EM_x:
   1866 		case ixgbe_mac_X550EM_a:
   1867 			IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, ~fdirtcpm);
   1868 			break;
   1869 		default:
   1870 			break;
   1871 		}
   1872 
   1873 		/* store source and destination IP masks (big-enian) */
   1874 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M,
   1875 				     ~input_mask->formatted.src_ip[0]);
   1876 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M,
   1877 				     ~input_mask->formatted.dst_ip[0]);
   1878 	}
   1879 	return IXGBE_SUCCESS;
   1880 }
   1881 
   1882 s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
   1883 					  union ixgbe_atr_input *input,
   1884 					  u16 soft_id, u8 queue, bool cloud_mode)
   1885 {
   1886 	u32 fdirport, fdirvlan, fdirhash, fdircmd;
   1887 	u32 addr_low, addr_high;
   1888 	u32 cloud_type = 0;
   1889 	s32 err;
   1890 	UNREFERENCED_1PARAMETER(cloud_mode);
   1891 
   1892 	DEBUGFUNC("ixgbe_fdir_write_perfect_filter_82599");
   1893 	if (!cloud_mode) {
   1894 		/* currently IPv6 is not supported, must be programmed with 0 */
   1895 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0),
   1896 				     input->formatted.src_ip[0]);
   1897 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1),
   1898 				     input->formatted.src_ip[1]);
   1899 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2),
   1900 				     input->formatted.src_ip[2]);
   1901 
   1902 		/* record the source address (big-endian) */
   1903 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA,
   1904 			input->formatted.src_ip[0]);
   1905 
   1906 		/* record the first 32 bits of the destination address
   1907 		 * (big-endian) */
   1908 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA,
   1909 			input->formatted.dst_ip[0]);
   1910 
   1911 		/* record source and destination port (little-endian)*/
   1912 		fdirport = IXGBE_NTOHS(input->formatted.dst_port);
   1913 		fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT;
   1914 		fdirport |= IXGBE_NTOHS(input->formatted.src_port);
   1915 		IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport);
   1916 	}
   1917 
   1918 	/* record VLAN (little-endian) and flex_bytes(big-endian) */
   1919 	fdirvlan = IXGBE_STORE_AS_BE16(input->formatted.flex_bytes);
   1920 	fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT;
   1921 	fdirvlan |= IXGBE_NTOHS(input->formatted.vlan_id);
   1922 	IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan);
   1923 
   1924 	if (cloud_mode) {
   1925 		if (input->formatted.tunnel_type != 0)
   1926 			cloud_type = 0x80000000;
   1927 
   1928 		addr_low = ((u32)input->formatted.inner_mac[0] |
   1929 				((u32)input->formatted.inner_mac[1] << 8) |
   1930 				((u32)input->formatted.inner_mac[2] << 16) |
   1931 				((u32)input->formatted.inner_mac[3] << 24));
   1932 		addr_high = ((u32)input->formatted.inner_mac[4] |
   1933 				((u32)input->formatted.inner_mac[5] << 8));
   1934 		cloud_type |= addr_high;
   1935 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0), addr_low);
   1936 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1), cloud_type);
   1937 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2), input->formatted.tni_vni);
   1938 	}
   1939 
   1940 	/* configure FDIRHASH register */
   1941 	fdirhash = input->formatted.bkt_hash;
   1942 	fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
   1943 	IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
   1944 
   1945 	/*
   1946 	 * flush all previous writes to make certain registers are
   1947 	 * programmed prior to issuing the command
   1948 	 */
   1949 	IXGBE_WRITE_FLUSH(hw);
   1950 
   1951 	/* configure FDIRCMD register */
   1952 	fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
   1953 		  IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
   1954 	if (queue == IXGBE_FDIR_DROP_QUEUE)
   1955 		fdircmd |= IXGBE_FDIRCMD_DROP;
   1956 	if (input->formatted.flow_type & IXGBE_ATR_L4TYPE_TUNNEL_MASK)
   1957 		fdircmd |= IXGBE_FDIRCMD_TUNNEL_FILTER;
   1958 	fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
   1959 	fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
   1960 	fdircmd |= (u32)input->formatted.vm_pool << IXGBE_FDIRCMD_VT_POOL_SHIFT;
   1961 
   1962 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
   1963 	err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
   1964 	if (err) {
   1965 		DEBUGOUT("Flow Director command did not complete!\n");
   1966 		return err;
   1967 	}
   1968 
   1969 	return IXGBE_SUCCESS;
   1970 }
   1971 
   1972 s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
   1973 					  union ixgbe_atr_input *input,
   1974 					  u16 soft_id)
   1975 {
   1976 	u32 fdirhash;
   1977 	u32 fdircmd;
   1978 	s32 err;
   1979 
   1980 	/* configure FDIRHASH register */
   1981 	fdirhash = input->formatted.bkt_hash;
   1982 	fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
   1983 	IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
   1984 
   1985 	/* flush hash to HW */
   1986 	IXGBE_WRITE_FLUSH(hw);
   1987 
   1988 	/* Query if filter is present */
   1989 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, IXGBE_FDIRCMD_CMD_QUERY_REM_FILT);
   1990 
   1991 	err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
   1992 	if (err) {
   1993 		DEBUGOUT("Flow Director command did not complete!\n");
   1994 		return err;
   1995 	}
   1996 
   1997 	/* if filter exists in hardware then remove it */
   1998 	if (fdircmd & IXGBE_FDIRCMD_FILTER_VALID) {
   1999 		IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
   2000 		IXGBE_WRITE_FLUSH(hw);
   2001 		IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
   2002 				IXGBE_FDIRCMD_CMD_REMOVE_FLOW);
   2003 	}
   2004 
   2005 	return IXGBE_SUCCESS;
   2006 }
   2007 
   2008 /**
   2009  *  ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter
   2010  *  @hw: pointer to hardware structure
   2011  *  @input: input bitstream
   2012  *  @input_mask: mask for the input bitstream
   2013  *  @soft_id: software index for the filters
   2014  *  @queue: queue index to direct traffic to
   2015  *  @cloud_mode: unused
   2016  *
   2017  *  Note that the caller to this function must lock before calling, since the
   2018  *  hardware writes must be protected from one another.
   2019  **/
   2020 s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
   2021 					union ixgbe_atr_input *input,
   2022 					union ixgbe_atr_input *input_mask,
   2023 					u16 soft_id, u8 queue, bool cloud_mode)
   2024 {
   2025 	s32 err = IXGBE_ERR_CONFIG;
   2026 	UNREFERENCED_1PARAMETER(cloud_mode);
   2027 
   2028 	DEBUGFUNC("ixgbe_fdir_add_perfect_filter_82599");
   2029 
   2030 	/*
   2031 	 * Check flow_type formatting, and bail out before we touch the hardware
   2032 	 * if there's a configuration issue
   2033 	 */
   2034 	switch (input->formatted.flow_type) {
   2035 	case IXGBE_ATR_FLOW_TYPE_IPV4:
   2036 	case IXGBE_ATR_FLOW_TYPE_TUNNELED_IPV4:
   2037 		input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK;
   2038 		if (input->formatted.dst_port || input->formatted.src_port) {
   2039 			DEBUGOUT(" Error on src/dst port\n");
   2040 			return IXGBE_ERR_CONFIG;
   2041 		}
   2042 		break;
   2043 	case IXGBE_ATR_FLOW_TYPE_SCTPV4:
   2044 	case IXGBE_ATR_FLOW_TYPE_TUNNELED_SCTPV4:
   2045 		if (input->formatted.dst_port || input->formatted.src_port) {
   2046 			DEBUGOUT(" Error on src/dst port\n");
   2047 			return IXGBE_ERR_CONFIG;
   2048 		}
   2049 		/* fall through */
   2050 	case IXGBE_ATR_FLOW_TYPE_TCPV4:
   2051 	case IXGBE_ATR_FLOW_TYPE_TUNNELED_TCPV4:
   2052 	case IXGBE_ATR_FLOW_TYPE_UDPV4:
   2053 	case IXGBE_ATR_FLOW_TYPE_TUNNELED_UDPV4:
   2054 		input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
   2055 						  IXGBE_ATR_L4TYPE_MASK;
   2056 		break;
   2057 	default:
   2058 		DEBUGOUT(" Error on flow type input\n");
   2059 		return err;
   2060 	}
   2061 
   2062 	/* program input mask into the HW */
   2063 	err = ixgbe_fdir_set_input_mask_82599(hw, input_mask, cloud_mode);
   2064 	if (err)
   2065 		return err;
   2066 
   2067 	/* apply mask and compute/store hash */
   2068 	ixgbe_atr_compute_perfect_hash_82599(input, input_mask);
   2069 
   2070 	/* program filters to filter memory */
   2071 	return ixgbe_fdir_write_perfect_filter_82599(hw, input,
   2072 						     soft_id, queue, cloud_mode);
   2073 }
   2074 
   2075 /**
   2076  *  ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register
   2077  *  @hw: pointer to hardware structure
   2078  *  @reg: analog register to read
   2079  *  @val: read value
   2080  *
   2081  *  Performs read operation to Omer analog register specified.
   2082  **/
   2083 s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val)
   2084 {
   2085 	u32  core_ctl;
   2086 
   2087 	DEBUGFUNC("ixgbe_read_analog_reg8_82599");
   2088 
   2089 	IXGBE_WRITE_REG(hw, IXGBE_CORECTL, IXGBE_CORECTL_WRITE_CMD |
   2090 			(reg << 8));
   2091 	IXGBE_WRITE_FLUSH(hw);
   2092 	usec_delay(10);
   2093 	core_ctl = IXGBE_READ_REG(hw, IXGBE_CORECTL);
   2094 	*val = (u8)core_ctl;
   2095 
   2096 	return IXGBE_SUCCESS;
   2097 }
   2098 
   2099 /**
   2100  *  ixgbe_write_analog_reg8_82599 - Writes 8 bit Omer analog register
   2101  *  @hw: pointer to hardware structure
   2102  *  @reg: atlas register to write
   2103  *  @val: value to write
   2104  *
   2105  *  Performs write operation to Omer analog register specified.
   2106  **/
   2107 s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val)
   2108 {
   2109 	u32  core_ctl;
   2110 
   2111 	DEBUGFUNC("ixgbe_write_analog_reg8_82599");
   2112 
   2113 	core_ctl = (reg << 8) | val;
   2114 	IXGBE_WRITE_REG(hw, IXGBE_CORECTL, core_ctl);
   2115 	IXGBE_WRITE_FLUSH(hw);
   2116 	usec_delay(10);
   2117 
   2118 	return IXGBE_SUCCESS;
   2119 }
   2120 
   2121 /**
   2122  *  ixgbe_start_hw_82599 - Prepare hardware for Tx/Rx
   2123  *  @hw: pointer to hardware structure
   2124  *
   2125  *  Starts the hardware using the generic start_hw function
   2126  *  and the generation start_hw function.
   2127  *  Then performs revision-specific operations, if any.
   2128  **/
   2129 s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw)
   2130 {
   2131 	s32 ret_val = IXGBE_SUCCESS;
   2132 
   2133 	DEBUGFUNC("ixgbe_start_hw_82599");
   2134 
   2135 	ret_val = ixgbe_start_hw_generic(hw);
   2136 	if (ret_val != IXGBE_SUCCESS)
   2137 		goto out;
   2138 
   2139 	ret_val = ixgbe_start_hw_gen2(hw);
   2140 	if (ret_val != IXGBE_SUCCESS)
   2141 		goto out;
   2142 
   2143 	/* We need to run link autotry after the driver loads */
   2144 	hw->mac.autotry_restart = TRUE;
   2145 
   2146 	if (ret_val == IXGBE_SUCCESS)
   2147 		ret_val = ixgbe_verify_fw_version_82599(hw);
   2148 out:
   2149 	return ret_val;
   2150 }
   2151 
   2152 /**
   2153  *  ixgbe_identify_phy_82599 - Get physical layer module
   2154  *  @hw: pointer to hardware structure
   2155  *
   2156  *  Determines the physical layer module found on the current adapter.
   2157  *  If PHY already detected, maintains current PHY type in hw struct,
   2158  *  otherwise executes the PHY detection routine.
   2159  **/
   2160 s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
   2161 {
   2162 	s32 status;
   2163 
   2164 	DEBUGFUNC("ixgbe_identify_phy_82599");
   2165 
   2166 	/* Detect PHY if not unknown - returns success if already detected. */
   2167 	status = ixgbe_identify_phy_generic(hw);
   2168 	if (status != IXGBE_SUCCESS) {
   2169 		/* 82599 10GBASE-T requires an external PHY */
   2170 		if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)
   2171 			return status;
   2172 		else
   2173 			status = ixgbe_identify_module_generic(hw);
   2174 	}
   2175 
   2176 	/* Set PHY type none if no PHY detected */
   2177 	if (hw->phy.type == ixgbe_phy_unknown) {
   2178 		hw->phy.type = ixgbe_phy_none;
   2179 		return IXGBE_SUCCESS;
   2180 	}
   2181 
   2182 	/* Return error if SFP module has been detected but is not supported */
   2183 	if (hw->phy.type == ixgbe_phy_sfp_unsupported)
   2184 		return IXGBE_ERR_SFP_NOT_SUPPORTED;
   2185 
   2186 	return status;
   2187 }
   2188 
   2189 /**
   2190  *  ixgbe_get_supported_physical_layer_82599 - Returns physical layer type
   2191  *  @hw: pointer to hardware structure
   2192  *
   2193  *  Determines physical layer capabilities of the current configuration.
   2194  **/
   2195 u64 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw)
   2196 {
   2197 	u64 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
   2198 	u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
   2199 	u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
   2200 	u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
   2201 	u32 pma_pmd_10g_parallel = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK;
   2202 	u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
   2203 	u16 ext_ability = 0;
   2204 
   2205 	DEBUGFUNC("ixgbe_get_support_physical_layer_82599");
   2206 
   2207 	hw->phy.ops.identify(hw);
   2208 
   2209 	switch (hw->phy.type) {
   2210 	case ixgbe_phy_tn:
   2211 	case ixgbe_phy_cu_unknown:
   2212 		hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
   2213 		IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
   2214 		if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
   2215 			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
   2216 		if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
   2217 			physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
   2218 		if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
   2219 			physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
   2220 		goto out;
   2221 	default:
   2222 		break;
   2223 	}
   2224 
   2225 	switch (autoc & IXGBE_AUTOC_LMS_MASK) {
   2226 	case IXGBE_AUTOC_LMS_1G_AN:
   2227 	case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
   2228 		if (pma_pmd_1g == IXGBE_AUTOC_1G_KX_BX) {
   2229 			physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX |
   2230 			    IXGBE_PHYSICAL_LAYER_1000BASE_BX;
   2231 			goto out;
   2232 		} else
   2233 			/* SFI mode so read SFP module */
   2234 			goto sfp_check;
   2235 		break;
   2236 	case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
   2237 		if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_CX4)
   2238 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
   2239 		else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_KX4)
   2240 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
   2241 		else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_XAUI)
   2242 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_XAUI;
   2243 		goto out;
   2244 		break;
   2245 	case IXGBE_AUTOC_LMS_10G_SERIAL:
   2246 		if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_KR) {
   2247 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR;
   2248 			goto out;
   2249 		} else if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)
   2250 			goto sfp_check;
   2251 		break;
   2252 	case IXGBE_AUTOC_LMS_KX4_KX_KR:
   2253 	case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN:
   2254 		if (autoc & IXGBE_AUTOC_KX_SUPP)
   2255 			physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
   2256 		if (autoc & IXGBE_AUTOC_KX4_SUPP)
   2257 			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
   2258 		if (autoc & IXGBE_AUTOC_KR_SUPP)
   2259 			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KR;
   2260 		goto out;
   2261 		break;
   2262 	default:
   2263 		goto out;
   2264 		break;
   2265 	}
   2266 
   2267 sfp_check:
   2268 	/* SFP check must be done last since DA modules are sometimes used to
   2269 	 * test KR mode -  we need to id KR mode correctly before SFP module.
   2270 	 * Call identify_sfp because the pluggable module may have changed */
   2271 	physical_layer = ixgbe_get_supported_phy_sfp_layer_generic(hw);
   2272 out:
   2273 	return physical_layer;
   2274 }
   2275 
   2276 /**
   2277  *  ixgbe_enable_rx_dma_82599 - Enable the Rx DMA unit on 82599
   2278  *  @hw: pointer to hardware structure
   2279  *  @regval: register value to write to RXCTRL
   2280  *
   2281  *  Enables the Rx DMA unit for 82599
   2282  **/
   2283 s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval)
   2284 {
   2285 
   2286 	DEBUGFUNC("ixgbe_enable_rx_dma_82599");
   2287 
   2288 	/*
   2289 	 * Workaround for 82599 silicon errata when enabling the Rx datapath.
   2290 	 * If traffic is incoming before we enable the Rx unit, it could hang
   2291 	 * the Rx DMA unit.  Therefore, make sure the security engine is
   2292 	 * completely disabled prior to enabling the Rx unit.
   2293 	 */
   2294 
   2295 	hw->mac.ops.disable_sec_rx_path(hw);
   2296 
   2297 	if (regval & IXGBE_RXCTRL_RXEN)
   2298 		ixgbe_enable_rx(hw);
   2299 	else
   2300 		ixgbe_disable_rx(hw);
   2301 
   2302 	hw->mac.ops.enable_sec_rx_path(hw);
   2303 
   2304 	return IXGBE_SUCCESS;
   2305 }
   2306 
   2307 /**
   2308  *  ixgbe_verify_fw_version_82599 - verify FW version for 82599
   2309  *  @hw: pointer to hardware structure
   2310  *
   2311  *  Verifies that installed the firmware version is 0.6 or higher
   2312  *  for SFI devices. All 82599 SFI devices should have version 0.6 or higher.
   2313  *
   2314  *  Returns IXGBE_ERR_EEPROM_VERSION if the FW is not present or
   2315  *  if the FW version is not supported.
   2316  **/
   2317 static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
   2318 {
   2319 	s32 status = IXGBE_ERR_EEPROM_VERSION;
   2320 	u16 fw_offset, fw_ptp_cfg_offset;
   2321 	u16 fw_version;
   2322 
   2323 	DEBUGFUNC("ixgbe_verify_fw_version_82599");
   2324 
   2325 	/* firmware check is only necessary for SFI devices */
   2326 	if (hw->phy.media_type != ixgbe_media_type_fiber) {
   2327 		status = IXGBE_SUCCESS;
   2328 		goto fw_version_out;
   2329 	}
   2330 
   2331 	/* get the offset to the Firmware Module block */
   2332 	if (hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset)) {
   2333 		ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
   2334 			      "eeprom read at offset %d failed", IXGBE_FW_PTR);
   2335 		return IXGBE_ERR_EEPROM_VERSION;
   2336 	}
   2337 
   2338 	if ((fw_offset == 0) || (fw_offset == 0xFFFF))
   2339 		goto fw_version_out;
   2340 
   2341 	/* get the offset to the Pass Through Patch Configuration block */
   2342 	if (hw->eeprom.ops.read(hw, (fw_offset +
   2343 				 IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR),
   2344 				 &fw_ptp_cfg_offset)) {
   2345 		ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
   2346 			      "eeprom read at offset %d failed",
   2347 			      fw_offset +
   2348 			      IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR);
   2349 		return IXGBE_ERR_EEPROM_VERSION;
   2350 	}
   2351 
   2352 	if ((fw_ptp_cfg_offset == 0) || (fw_ptp_cfg_offset == 0xFFFF))
   2353 		goto fw_version_out;
   2354 
   2355 	/* get the firmware version */
   2356 	if (hw->eeprom.ops.read(hw, (fw_ptp_cfg_offset +
   2357 			    IXGBE_FW_PATCH_VERSION_4), &fw_version)) {
   2358 		ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
   2359 			      "eeprom read at offset %d failed",
   2360 			      fw_ptp_cfg_offset + IXGBE_FW_PATCH_VERSION_4);
   2361 		return IXGBE_ERR_EEPROM_VERSION;
   2362 	}
   2363 
   2364 	if (fw_version > 0x5)
   2365 		status = IXGBE_SUCCESS;
   2366 
   2367 fw_version_out:
   2368 	return status;
   2369 }
   2370 
   2371 /**
   2372  *  ixgbe_verify_lesm_fw_enabled_82599 - Checks LESM FW module state.
   2373  *  @hw: pointer to hardware structure
   2374  *
   2375  *  Returns TRUE if the LESM FW module is present and enabled. Otherwise
   2376  *  returns FALSE. Smart Speed must be disabled if LESM FW module is enabled.
   2377  **/
   2378 bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw)
   2379 {
   2380 	bool lesm_enabled = FALSE;
   2381 	u16 fw_offset, fw_lesm_param_offset, fw_lesm_state;
   2382 	s32 status;
   2383 
   2384 	DEBUGFUNC("ixgbe_verify_lesm_fw_enabled_82599");
   2385 
   2386 	/* get the offset to the Firmware Module block */
   2387 	status = hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset);
   2388 
   2389 	if ((status != IXGBE_SUCCESS) ||
   2390 	    (fw_offset == 0) || (fw_offset == 0xFFFF))
   2391 		goto out;
   2392 
   2393 	/* get the offset to the LESM Parameters block */
   2394 	status = hw->eeprom.ops.read(hw, (fw_offset +
   2395 				     IXGBE_FW_LESM_PARAMETERS_PTR),
   2396 				     &fw_lesm_param_offset);
   2397 
   2398 	if ((status != IXGBE_SUCCESS) ||
   2399 	    (fw_lesm_param_offset == 0) || (fw_lesm_param_offset == 0xFFFF))
   2400 		goto out;
   2401 
   2402 	/* get the LESM state word */
   2403 	status = hw->eeprom.ops.read(hw, (fw_lesm_param_offset +
   2404 				     IXGBE_FW_LESM_STATE_1),
   2405 				     &fw_lesm_state);
   2406 
   2407 	if ((status == IXGBE_SUCCESS) &&
   2408 	    (fw_lesm_state & IXGBE_FW_LESM_STATE_ENABLED))
   2409 		lesm_enabled = TRUE;
   2410 
   2411 out:
   2412 	return lesm_enabled;
   2413 }
   2414 
   2415 /**
   2416  *  ixgbe_read_eeprom_buffer_82599 - Read EEPROM word(s) using
   2417  *  fastest available method
   2418  *
   2419  *  @hw: pointer to hardware structure
   2420  *  @offset: offset of  word in EEPROM to read
   2421  *  @words: number of words
   2422  *  @data: word(s) read from the EEPROM
   2423  *
   2424  *  Retrieves 16 bit word(s) read from EEPROM
   2425  **/
   2426 static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
   2427 					  u16 words, u16 *data)
   2428 {
   2429 	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
   2430 	s32 ret_val = IXGBE_ERR_CONFIG;
   2431 
   2432 	DEBUGFUNC("ixgbe_read_eeprom_buffer_82599");
   2433 
   2434 	/*
   2435 	 * If EEPROM is detected and can be addressed using 14 bits,
   2436 	 * use EERD otherwise use bit bang
   2437 	 */
   2438 	if ((eeprom->type == ixgbe_eeprom_spi) &&
   2439 	    (offset + (words - 1) <= IXGBE_EERD_MAX_ADDR))
   2440 		ret_val = ixgbe_read_eerd_buffer_generic(hw, offset, words,
   2441 							 data);
   2442 	else
   2443 		ret_val = ixgbe_read_eeprom_buffer_bit_bang_generic(hw, offset,
   2444 								    words,
   2445 								    data);
   2446 
   2447 	return ret_val;
   2448 }
   2449 
   2450 /**
   2451  *  ixgbe_read_eeprom_82599 - Read EEPROM word using
   2452  *  fastest available method
   2453  *
   2454  *  @hw: pointer to hardware structure
   2455  *  @offset: offset of  word in the EEPROM to read
   2456  *  @data: word read from the EEPROM
   2457  *
   2458  *  Reads a 16 bit word from the EEPROM
   2459  **/
   2460 static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
   2461 				   u16 offset, u16 *data)
   2462 {
   2463 	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
   2464 	s32 ret_val = IXGBE_ERR_CONFIG;
   2465 
   2466 	DEBUGFUNC("ixgbe_read_eeprom_82599");
   2467 
   2468 	/*
   2469 	 * If EEPROM is detected and can be addressed using 14 bits,
   2470 	 * use EERD otherwise use bit bang
   2471 	 */
   2472 	if ((eeprom->type == ixgbe_eeprom_spi) &&
   2473 	    (offset <= IXGBE_EERD_MAX_ADDR))
   2474 		ret_val = ixgbe_read_eerd_generic(hw, offset, data);
   2475 	else
   2476 		ret_val = ixgbe_read_eeprom_bit_bang_generic(hw, offset, data);
   2477 
   2478 	return ret_val;
   2479 }
   2480 
   2481 /**
   2482  * ixgbe_reset_pipeline_82599 - perform pipeline reset
   2483  *
   2484  *  @hw: pointer to hardware structure
   2485  *
   2486  * Reset pipeline by asserting Restart_AN together with LMS change to ensure
   2487  * full pipeline reset.  This function assumes the SW/FW lock is held.
   2488  **/
   2489 static s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw)
   2490 {
   2491 	s32 ret_val;
   2492 	u32 anlp1_reg = 0;
   2493 	u32 i, autoc_reg, autoc2_reg;
   2494 
   2495 	/* Enable link if disabled in NVM */
   2496 	autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
   2497 	if (autoc2_reg & IXGBE_AUTOC2_LINK_DISABLE_MASK) {
   2498 		autoc2_reg &= ~IXGBE_AUTOC2_LINK_DISABLE_MASK;
   2499 		IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2_reg);
   2500 		IXGBE_WRITE_FLUSH(hw);
   2501 	}
   2502 
   2503 	autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
   2504 	autoc_reg |= IXGBE_AUTOC_AN_RESTART;
   2505 	/* Write AUTOC register with toggled LMS[2] bit and Restart_AN */
   2506 	IXGBE_WRITE_REG(hw, IXGBE_AUTOC,
   2507 			autoc_reg ^ (0x4 << IXGBE_AUTOC_LMS_SHIFT));
   2508 	/* Wait for AN to leave state 0 */
   2509 	for (i = 0; i < 10; i++) {
   2510 		msec_delay(4);
   2511 		anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
   2512 		if (anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)
   2513 			break;
   2514 	}
   2515 
   2516 	if (!(anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)) {
   2517 		DEBUGOUT("auto negotiation not completed\n");
   2518 		ret_val = IXGBE_ERR_RESET_FAILED;
   2519 		goto reset_pipeline_out;
   2520 	}
   2521 
   2522 	ret_val = IXGBE_SUCCESS;
   2523 
   2524 reset_pipeline_out:
   2525 	/* Write AUTOC register with original LMS field and Restart_AN */
   2526 	IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
   2527 	IXGBE_WRITE_FLUSH(hw);
   2528 
   2529 	return ret_val;
   2530 }
   2531 
   2532 /**
   2533  *  ixgbe_read_i2c_byte_82599 - Reads 8 bit word over I2C
   2534  *  @hw: pointer to hardware structure
   2535  *  @byte_offset: byte offset to read
   2536  *  @dev_addr: address to read from
   2537  *  @data: value read
   2538  *
   2539  *  Performs byte read operation to SFP module's EEPROM over I2C interface at
   2540  *  a specified device address.
   2541  **/
   2542 static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
   2543 				u8 dev_addr, u8 *data)
   2544 {
   2545 	u32 esdp;
   2546 	s32 status;
   2547 	s32 timeout = 200;
   2548 
   2549 	DEBUGFUNC("ixgbe_read_i2c_byte_82599");
   2550 
   2551 	if (hw->phy.qsfp_shared_i2c_bus == TRUE) {
   2552 		/* Acquire I2C bus ownership. */
   2553 		esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
   2554 		esdp |= IXGBE_ESDP_SDP0;
   2555 		IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
   2556 		IXGBE_WRITE_FLUSH(hw);
   2557 
   2558 		while (timeout) {
   2559 			esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
   2560 			if (esdp & IXGBE_ESDP_SDP1)
   2561 				break;
   2562 
   2563 			msec_delay(5);
   2564 			timeout--;
   2565 		}
   2566 
   2567 		if (!timeout) {
   2568 			DEBUGOUT("Driver can't access resource,"
   2569 				 " acquiring I2C bus timeout.\n");
   2570 			status = IXGBE_ERR_I2C;
   2571 			goto release_i2c_access;
   2572 		}
   2573 	}
   2574 
   2575 	status = ixgbe_read_i2c_byte_generic(hw, byte_offset, dev_addr, data);
   2576 
   2577 release_i2c_access:
   2578 
   2579 	if (hw->phy.qsfp_shared_i2c_bus == TRUE) {
   2580 		/* Release I2C bus ownership. */
   2581 		esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
   2582 		esdp &= ~IXGBE_ESDP_SDP0;
   2583 		IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
   2584 		IXGBE_WRITE_FLUSH(hw);
   2585 	}
   2586 
   2587 	return status;
   2588 }
   2589 
   2590 /**
   2591  *  ixgbe_write_i2c_byte_82599 - Writes 8 bit word over I2C
   2592  *  @hw: pointer to hardware structure
   2593  *  @byte_offset: byte offset to write
   2594  *  @dev_addr: address to read from
   2595  *  @data: value to write
   2596  *
   2597  *  Performs byte write operation to SFP module's EEPROM over I2C interface at
   2598  *  a specified device address.
   2599  **/
   2600 static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
   2601 				 u8 dev_addr, u8 data)
   2602 {
   2603 	u32 esdp;
   2604 	s32 status;
   2605 	s32 timeout = 200;
   2606 
   2607 	DEBUGFUNC("ixgbe_write_i2c_byte_82599");
   2608 
   2609 	if (hw->phy.qsfp_shared_i2c_bus == TRUE) {
   2610 		/* Acquire I2C bus ownership. */
   2611 		esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
   2612 		esdp |= IXGBE_ESDP_SDP0;
   2613 		IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
   2614 		IXGBE_WRITE_FLUSH(hw);
   2615 
   2616 		while (timeout) {
   2617 			esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
   2618 			if (esdp & IXGBE_ESDP_SDP1)
   2619 				break;
   2620 
   2621 			msec_delay(5);
   2622 			timeout--;
   2623 		}
   2624 
   2625 		if (!timeout) {
   2626 			DEBUGOUT("Driver can't access resource,"
   2627 				 " acquiring I2C bus timeout.\n");
   2628 			status = IXGBE_ERR_I2C;
   2629 			goto release_i2c_access;
   2630 		}
   2631 	}
   2632 
   2633 	status = ixgbe_write_i2c_byte_generic(hw, byte_offset, dev_addr, data);
   2634 
   2635 release_i2c_access:
   2636 
   2637 	if (hw->phy.qsfp_shared_i2c_bus == TRUE) {
   2638 		/* Release I2C bus ownership. */
   2639 		esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
   2640 		esdp &= ~IXGBE_ESDP_SDP0;
   2641 		IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
   2642 		IXGBE_WRITE_FLUSH(hw);
   2643 	}
   2644 
   2645 	return status;
   2646 }
   2647