Home | History | Annotate | Line # | Download | only in ixgbe
ixgbe_82599.c revision 1.17
      1 /* $NetBSD: ixgbe_82599.c,v 1.17 2018/03/30 06:44:30 msaitoh Exp $ */
      2 
      3 /******************************************************************************
      4   SPDX-License-Identifier: BSD-3-Clause
      5 
      6   Copyright (c) 2001-2017, Intel Corporation
      7   All rights reserved.
      8 
      9   Redistribution and use in source and binary forms, with or without
     10   modification, are permitted provided that the following conditions are met:
     11 
     12    1. Redistributions of source code must retain the above copyright notice,
     13       this list of conditions and the following disclaimer.
     14 
     15    2. Redistributions in binary form must reproduce the above copyright
     16       notice, this list of conditions and the following disclaimer in the
     17       documentation and/or other materials provided with the distribution.
     18 
     19    3. Neither the name of the Intel Corporation nor the names of its
     20       contributors may be used to endorse or promote products derived from
     21       this software without specific prior written permission.
     22 
     23   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     24   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     25   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     26   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     27   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     28   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     29   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     30   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     31   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     32   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     33   POSSIBILITY OF SUCH DAMAGE.
     34 
     35 ******************************************************************************/
     36 /*$FreeBSD: head/sys/dev/ixgbe/ixgbe_82599.c 320688 2017-07-05 17:27:03Z erj $*/
     37 
     38 #include "ixgbe_type.h"
     39 #include "ixgbe_82599.h"
     40 #include "ixgbe_api.h"
     41 #include "ixgbe_common.h"
     42 #include "ixgbe_phy.h"
     43 
     44 #define IXGBE_82599_MAX_TX_QUEUES 128
     45 #define IXGBE_82599_MAX_RX_QUEUES 128
     46 #define IXGBE_82599_RAR_ENTRIES   128
     47 #define IXGBE_82599_MC_TBL_SIZE   128
     48 #define IXGBE_82599_VFT_TBL_SIZE  128
     49 #define IXGBE_82599_RX_PB_SIZE	  512
     50 
     51 static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
     52 					 ixgbe_link_speed speed,
     53 					 bool autoneg_wait_to_complete);
     54 static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw);
     55 static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
     56 				   u16 offset, u16 *data);
     57 static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
     58 					  u16 words, u16 *data);
     59 static s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw);
     60 static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
     61 					u8 dev_addr, u8 *data);
     62 static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
     63 					u8 dev_addr, u8 data);
     64 
     65 void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
     66 {
     67 	struct ixgbe_mac_info *mac = &hw->mac;
     68 
     69 	DEBUGFUNC("ixgbe_init_mac_link_ops_82599");
     70 
     71 	/*
     72 	 * enable the laser control functions for SFP+ fiber
     73 	 * and MNG not enabled
     74 	 */
     75 	if ((mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
     76 	    !ixgbe_mng_enabled(hw)) {
     77 		mac->ops.disable_tx_laser =
     78 				       ixgbe_disable_tx_laser_multispeed_fiber;
     79 		mac->ops.enable_tx_laser =
     80 					ixgbe_enable_tx_laser_multispeed_fiber;
     81 		mac->ops.flap_tx_laser = ixgbe_flap_tx_laser_multispeed_fiber;
     82 
     83 	} else {
     84 		mac->ops.disable_tx_laser = NULL;
     85 		mac->ops.enable_tx_laser = NULL;
     86 		mac->ops.flap_tx_laser = NULL;
     87 	}
     88 
     89 	if (hw->phy.multispeed_fiber) {
     90 		/* Set up dual speed SFP+ support */
     91 		mac->ops.setup_link = ixgbe_setup_mac_link_multispeed_fiber;
     92 		mac->ops.setup_mac_link = ixgbe_setup_mac_link_82599;
     93 		mac->ops.set_rate_select_speed =
     94 					       ixgbe_set_hard_rate_select_speed;
     95 		if (ixgbe_get_media_type(hw) == ixgbe_media_type_fiber_fixed)
     96 			mac->ops.set_rate_select_speed =
     97 					       ixgbe_set_soft_rate_select_speed;
     98 	} else {
     99 		if ((ixgbe_get_media_type(hw) == ixgbe_media_type_backplane) &&
    100 		     (hw->phy.smart_speed == ixgbe_smart_speed_auto ||
    101 		      hw->phy.smart_speed == ixgbe_smart_speed_on) &&
    102 		      !ixgbe_verify_lesm_fw_enabled_82599(hw)) {
    103 			mac->ops.setup_link = ixgbe_setup_mac_link_smartspeed;
    104 		} else {
    105 			mac->ops.setup_link = ixgbe_setup_mac_link_82599;
    106 		}
    107 	}
    108 }
    109 
    110 /**
    111  *  ixgbe_init_phy_ops_82599 - PHY/SFP specific init
    112  *  @hw: pointer to hardware structure
    113  *
    114  *  Initialize any function pointers that were not able to be
    115  *  set during init_shared_code because the PHY/SFP type was
    116  *  not known.  Perform the SFP init if necessary.
    117  *
    118  **/
    119 s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
    120 {
    121 	struct ixgbe_mac_info *mac = &hw->mac;
    122 	struct ixgbe_phy_info *phy = &hw->phy;
    123 	s32 ret_val = IXGBE_SUCCESS;
    124 	u32 esdp;
    125 
    126 	DEBUGFUNC("ixgbe_init_phy_ops_82599");
    127 
    128 	if (hw->device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP) {
    129 		/* Store flag indicating I2C bus access control unit. */
    130 		hw->phy.qsfp_shared_i2c_bus = TRUE;
    131 
    132 		/* Initialize access to QSFP+ I2C bus */
    133 		esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
    134 		esdp |= IXGBE_ESDP_SDP0_DIR;
    135 		esdp &= ~IXGBE_ESDP_SDP1_DIR;
    136 		esdp &= ~IXGBE_ESDP_SDP0;
    137 		esdp &= ~IXGBE_ESDP_SDP0_NATIVE;
    138 		esdp &= ~IXGBE_ESDP_SDP1_NATIVE;
    139 		IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
    140 		IXGBE_WRITE_FLUSH(hw);
    141 
    142 		phy->ops.read_i2c_byte = ixgbe_read_i2c_byte_82599;
    143 		phy->ops.write_i2c_byte = ixgbe_write_i2c_byte_82599;
    144 	}
    145 	/* Identify the PHY or SFP module */
    146 	ret_val = phy->ops.identify(hw);
    147 	if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED)
    148 		goto init_phy_ops_out;
    149 
    150 	/* Setup function pointers based on detected SFP module and speeds */
    151 	ixgbe_init_mac_link_ops_82599(hw);
    152 	if (hw->phy.sfp_type != ixgbe_sfp_type_unknown)
    153 		hw->phy.ops.reset = NULL;
    154 
    155 	/* If copper media, overwrite with copper function pointers */
    156 	if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
    157 		mac->ops.setup_link = ixgbe_setup_copper_link_82599;
    158 		mac->ops.get_link_capabilities =
    159 				  ixgbe_get_copper_link_capabilities_generic;
    160 	}
    161 
    162 	/* Set necessary function pointers based on PHY type */
    163 	switch (hw->phy.type) {
    164 	case ixgbe_phy_tn:
    165 		phy->ops.setup_link = ixgbe_setup_phy_link_tnx;
    166 		phy->ops.check_link = ixgbe_check_phy_link_tnx;
    167 		phy->ops.get_firmware_version =
    168 			     ixgbe_get_phy_firmware_version_tnx;
    169 		break;
    170 	default:
    171 		break;
    172 	}
    173 init_phy_ops_out:
    174 	return ret_val;
    175 }
    176 
    177 s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
    178 {
    179 	s32 ret_val = IXGBE_SUCCESS;
    180 	u16 list_offset, data_offset, data_value;
    181 
    182 	DEBUGFUNC("ixgbe_setup_sfp_modules_82599");
    183 
    184 	if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) {
    185 		ixgbe_init_mac_link_ops_82599(hw);
    186 
    187 		hw->phy.ops.reset = NULL;
    188 
    189 		ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
    190 							      &data_offset);
    191 		if (ret_val != IXGBE_SUCCESS)
    192 			goto setup_sfp_out;
    193 
    194 		/* PHY config will finish before releasing the semaphore */
    195 		ret_val = hw->mac.ops.acquire_swfw_sync(hw,
    196 							IXGBE_GSSR_MAC_CSR_SM);
    197 		if (ret_val != IXGBE_SUCCESS) {
    198 			ret_val = IXGBE_ERR_SWFW_SYNC;
    199 			goto setup_sfp_out;
    200 		}
    201 
    202 		if (hw->eeprom.ops.read(hw, ++data_offset, &data_value))
    203 			goto setup_sfp_err;
    204 		while (data_value != 0xffff) {
    205 			IXGBE_WRITE_REG(hw, IXGBE_CORECTL, data_value);
    206 			IXGBE_WRITE_FLUSH(hw);
    207 			if (hw->eeprom.ops.read(hw, ++data_offset, &data_value))
    208 				goto setup_sfp_err;
    209 		}
    210 
    211 		/* Release the semaphore */
    212 		hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
    213 		/* Delay obtaining semaphore again to allow FW access
    214 		 * prot_autoc_write uses the semaphore too.
    215 		 */
    216 		msec_delay(hw->eeprom.semaphore_delay);
    217 
    218 		/* Restart DSP and set SFI mode */
    219 		ret_val = hw->mac.ops.prot_autoc_write(hw,
    220 			hw->mac.orig_autoc | IXGBE_AUTOC_LMS_10G_SERIAL,
    221 			FALSE);
    222 
    223 		if (ret_val) {
    224 			DEBUGOUT("sfp module setup not complete\n");
    225 			ret_val = IXGBE_ERR_SFP_SETUP_NOT_COMPLETE;
    226 			goto setup_sfp_out;
    227 		}
    228 
    229 	}
    230 
    231 setup_sfp_out:
    232 	return ret_val;
    233 
    234 setup_sfp_err:
    235 	/* Release the semaphore */
    236 	hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
    237 	/* Delay obtaining semaphore again to allow FW access */
    238 	msec_delay(hw->eeprom.semaphore_delay);
    239 	ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
    240 		      "eeprom read at offset %d failed", data_offset);
    241 	return IXGBE_ERR_PHY;
    242 }
    243 
    244 /**
    245  *  prot_autoc_read_82599 - Hides MAC differences needed for AUTOC read
    246  *  @hw: pointer to hardware structure
    247  *  @locked: Return the if we locked for this read.
    248  *  @reg_val: Value we read from AUTOC
    249  *
    250  *  For this part (82599) we need to wrap read-modify-writes with a possible
    251  *  FW/SW lock.  It is assumed this lock will be freed with the next
    252  *  prot_autoc_write_82599().
    253  */
    254 s32 prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked, u32 *reg_val)
    255 {
    256 	s32 ret_val;
    257 
    258 	*locked = FALSE;
    259 	 /* If LESM is on then we need to hold the SW/FW semaphore. */
    260 	if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
    261 		ret_val = hw->mac.ops.acquire_swfw_sync(hw,
    262 					IXGBE_GSSR_MAC_CSR_SM);
    263 		if (ret_val != IXGBE_SUCCESS)
    264 			return IXGBE_ERR_SWFW_SYNC;
    265 
    266 		*locked = TRUE;
    267 	}
    268 
    269 	*reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC);
    270 	return IXGBE_SUCCESS;
    271 }
    272 
    273 /**
    274  * prot_autoc_write_82599 - Hides MAC differences needed for AUTOC write
    275  * @hw: pointer to hardware structure
    276  * @reg_val: value to write to AUTOC
    277  * @locked: bool to indicate whether the SW/FW lock was already taken by
    278  *           previous proc_autoc_read_82599.
    279  *
    280  * This part (82599) may need to hold the SW/FW lock around all writes to
    281  * AUTOC. Likewise after a write we need to do a pipeline reset.
    282  */
    283 s32 prot_autoc_write_82599(struct ixgbe_hw *hw, u32 autoc, bool locked)
    284 {
    285 	s32 ret_val = IXGBE_SUCCESS;
    286 
    287 	/* Blocked by MNG FW so bail */
    288 	if (ixgbe_check_reset_blocked(hw))
    289 		goto out;
    290 
    291 	/* We only need to get the lock if:
    292 	 *  - We didn't do it already (in the read part of a read-modify-write)
    293 	 *  - LESM is enabled.
    294 	 */
    295 	if (!locked && ixgbe_verify_lesm_fw_enabled_82599(hw)) {
    296 		ret_val = hw->mac.ops.acquire_swfw_sync(hw,
    297 					IXGBE_GSSR_MAC_CSR_SM);
    298 		if (ret_val != IXGBE_SUCCESS)
    299 			return IXGBE_ERR_SWFW_SYNC;
    300 
    301 		locked = TRUE;
    302 	}
    303 
    304 	IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
    305 	ret_val = ixgbe_reset_pipeline_82599(hw);
    306 
    307 out:
    308 	/* Free the SW/FW semaphore as we either grabbed it here or
    309 	 * already had it when this function was called.
    310 	 */
    311 	if (locked)
    312 		hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
    313 
    314 	return ret_val;
    315 }
    316 
    317 /**
    318  *  ixgbe_init_ops_82599 - Inits func ptrs and MAC type
    319  *  @hw: pointer to hardware structure
    320  *
    321  *  Initialize the function pointers and assign the MAC type for 82599.
    322  *  Does not touch the hardware.
    323  **/
    324 
    325 s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw)
    326 {
    327 	struct ixgbe_mac_info *mac = &hw->mac;
    328 	struct ixgbe_phy_info *phy = &hw->phy;
    329 	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
    330 	s32 ret_val;
    331 
    332 	DEBUGFUNC("ixgbe_init_ops_82599");
    333 
    334 	ixgbe_init_phy_ops_generic(hw);
    335 	ret_val = ixgbe_init_ops_generic(hw);
    336 
    337 	/* PHY */
    338 	phy->ops.identify = ixgbe_identify_phy_82599;
    339 	phy->ops.init = ixgbe_init_phy_ops_82599;
    340 
    341 	/* MAC */
    342 	mac->ops.reset_hw = ixgbe_reset_hw_82599;
    343 	mac->ops.enable_relaxed_ordering = ixgbe_enable_relaxed_ordering_gen2;
    344 	mac->ops.get_media_type = ixgbe_get_media_type_82599;
    345 	mac->ops.get_supported_physical_layer =
    346 				    ixgbe_get_supported_physical_layer_82599;
    347 	mac->ops.disable_sec_rx_path = ixgbe_disable_sec_rx_path_generic;
    348 	mac->ops.enable_sec_rx_path = ixgbe_enable_sec_rx_path_generic;
    349 	mac->ops.enable_rx_dma = ixgbe_enable_rx_dma_82599;
    350 	mac->ops.read_analog_reg8 = ixgbe_read_analog_reg8_82599;
    351 	mac->ops.write_analog_reg8 = ixgbe_write_analog_reg8_82599;
    352 	mac->ops.start_hw = ixgbe_start_hw_82599;
    353 	mac->ops.get_san_mac_addr = ixgbe_get_san_mac_addr_generic;
    354 	mac->ops.set_san_mac_addr = ixgbe_set_san_mac_addr_generic;
    355 	mac->ops.get_device_caps = ixgbe_get_device_caps_generic;
    356 	mac->ops.get_wwn_prefix = ixgbe_get_wwn_prefix_generic;
    357 	mac->ops.get_fcoe_boot_status = ixgbe_get_fcoe_boot_status_generic;
    358 	mac->ops.prot_autoc_read = prot_autoc_read_82599;
    359 	mac->ops.prot_autoc_write = prot_autoc_write_82599;
    360 
    361 	/* RAR, Multicast, VLAN */
    362 	mac->ops.set_vmdq = ixgbe_set_vmdq_generic;
    363 	mac->ops.set_vmdq_san_mac = ixgbe_set_vmdq_san_mac_generic;
    364 	mac->ops.clear_vmdq = ixgbe_clear_vmdq_generic;
    365 	mac->ops.insert_mac_addr = ixgbe_insert_mac_addr_generic;
    366 	mac->rar_highwater = 1;
    367 	mac->ops.set_vfta = ixgbe_set_vfta_generic;
    368 	mac->ops.set_vlvf = ixgbe_set_vlvf_generic;
    369 	mac->ops.clear_vfta = ixgbe_clear_vfta_generic;
    370 	mac->ops.init_uta_tables = ixgbe_init_uta_tables_generic;
    371 	mac->ops.setup_sfp = ixgbe_setup_sfp_modules_82599;
    372 	mac->ops.set_mac_anti_spoofing = ixgbe_set_mac_anti_spoofing;
    373 	mac->ops.set_vlan_anti_spoofing = ixgbe_set_vlan_anti_spoofing;
    374 
    375 	/* Link */
    376 	mac->ops.get_link_capabilities = ixgbe_get_link_capabilities_82599;
    377 	mac->ops.check_link = ixgbe_check_mac_link_generic;
    378 	mac->ops.setup_rxpba = ixgbe_set_rxpba_generic;
    379 	ixgbe_init_mac_link_ops_82599(hw);
    380 
    381 	mac->mcft_size		= IXGBE_82599_MC_TBL_SIZE;
    382 	mac->vft_size		= IXGBE_82599_VFT_TBL_SIZE;
    383 	mac->num_rar_entries	= IXGBE_82599_RAR_ENTRIES;
    384 	mac->rx_pb_size		= IXGBE_82599_RX_PB_SIZE;
    385 	mac->max_rx_queues	= IXGBE_82599_MAX_RX_QUEUES;
    386 	mac->max_tx_queues	= IXGBE_82599_MAX_TX_QUEUES;
    387 	mac->max_msix_vectors	= ixgbe_get_pcie_msix_count_generic(hw);
    388 
    389 	mac->arc_subsystem_valid = !!(IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw))
    390 				      & IXGBE_FWSM_MODE_MASK);
    391 
    392 	hw->mbx.ops.init_params = ixgbe_init_mbx_params_pf;
    393 
    394 	/* EEPROM */
    395 	eeprom->ops.read = ixgbe_read_eeprom_82599;
    396 	eeprom->ops.read_buffer = ixgbe_read_eeprom_buffer_82599;
    397 
    398 	/* Manageability interface */
    399 	mac->ops.set_fw_drv_ver = ixgbe_set_fw_drv_ver_generic;
    400 
    401 	mac->ops.bypass_rw = ixgbe_bypass_rw_generic;
    402 	mac->ops.bypass_valid_rd = ixgbe_bypass_valid_rd_generic;
    403 	mac->ops.bypass_set = ixgbe_bypass_set_generic;
    404 	mac->ops.bypass_rd_eep = ixgbe_bypass_rd_eep_generic;
    405 
    406 	mac->ops.get_rtrup2tc = ixgbe_dcb_get_rtrup2tc_generic;
    407 
    408 	return ret_val;
    409 }
    410 
    411 /**
    412  *  ixgbe_get_link_capabilities_82599 - Determines link capabilities
    413  *  @hw: pointer to hardware structure
    414  *  @speed: pointer to link speed
    415  *  @autoneg: TRUE when autoneg or autotry is enabled
    416  *
    417  *  Determines the link capabilities by reading the AUTOC register.
    418  **/
    419 s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
    420 				      ixgbe_link_speed *speed,
    421 				      bool *autoneg)
    422 {
    423 	s32 status = IXGBE_SUCCESS;
    424 	u32 autoc = 0;
    425 
    426 	DEBUGFUNC("ixgbe_get_link_capabilities_82599");
    427 
    428 
    429 	/* Check if 1G SFP module. */
    430 	if (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
    431 	    hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
    432 	    hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
    433 	    hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
    434 	    hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
    435 	    hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) {
    436 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
    437 		*autoneg = TRUE;
    438 		goto out;
    439 	}
    440 
    441 	/*
    442 	 * Determine link capabilities based on the stored value of AUTOC,
    443 	 * which represents EEPROM defaults.  If AUTOC value has not
    444 	 * been stored, use the current register values.
    445 	 */
    446 	if (hw->mac.orig_link_settings_stored)
    447 		autoc = hw->mac.orig_autoc;
    448 	else
    449 		autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
    450 
    451 	switch (autoc & IXGBE_AUTOC_LMS_MASK) {
    452 	case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
    453 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
    454 		*autoneg = FALSE;
    455 		break;
    456 
    457 	case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
    458 		*speed = IXGBE_LINK_SPEED_10GB_FULL;
    459 		*autoneg = FALSE;
    460 		break;
    461 
    462 	case IXGBE_AUTOC_LMS_1G_AN:
    463 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
    464 		*autoneg = TRUE;
    465 		break;
    466 
    467 	case IXGBE_AUTOC_LMS_10G_SERIAL:
    468 		*speed = IXGBE_LINK_SPEED_10GB_FULL;
    469 		*autoneg = FALSE;
    470 		break;
    471 
    472 	case IXGBE_AUTOC_LMS_KX4_KX_KR:
    473 	case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN:
    474 		*speed = IXGBE_LINK_SPEED_UNKNOWN;
    475 		if (autoc & IXGBE_AUTOC_KR_SUPP)
    476 			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
    477 		if (autoc & IXGBE_AUTOC_KX4_SUPP)
    478 			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
    479 		if (autoc & IXGBE_AUTOC_KX_SUPP)
    480 			*speed |= IXGBE_LINK_SPEED_1GB_FULL;
    481 		*autoneg = TRUE;
    482 		break;
    483 
    484 	case IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII:
    485 		*speed = IXGBE_LINK_SPEED_100_FULL;
    486 		if (autoc & IXGBE_AUTOC_KR_SUPP)
    487 			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
    488 		if (autoc & IXGBE_AUTOC_KX4_SUPP)
    489 			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
    490 		if (autoc & IXGBE_AUTOC_KX_SUPP)
    491 			*speed |= IXGBE_LINK_SPEED_1GB_FULL;
    492 		*autoneg = TRUE;
    493 		break;
    494 
    495 	case IXGBE_AUTOC_LMS_SGMII_1G_100M:
    496 		*speed = IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_100_FULL;
    497 		*autoneg = FALSE;
    498 		break;
    499 
    500 	default:
    501 		status = IXGBE_ERR_LINK_SETUP;
    502 		goto out;
    503 		break;
    504 	}
    505 
    506 	if (hw->phy.multispeed_fiber) {
    507 		*speed |= IXGBE_LINK_SPEED_10GB_FULL |
    508 			  IXGBE_LINK_SPEED_1GB_FULL;
    509 
    510 		/* QSFP must not enable full auto-negotiation
    511 		 * Limited autoneg is enabled at 1G
    512 		 */
    513 		if (hw->phy.media_type == ixgbe_media_type_fiber_qsfp)
    514 			*autoneg = FALSE;
    515 		else
    516 			*autoneg = TRUE;
    517 	}
    518 
    519 out:
    520 	return status;
    521 }
    522 
    523 /**
    524  *  ixgbe_get_media_type_82599 - Get media type
    525  *  @hw: pointer to hardware structure
    526  *
    527  *  Returns the media type (fiber, copper, backplane)
    528  **/
    529 enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
    530 {
    531 	enum ixgbe_media_type media_type;
    532 
    533 	DEBUGFUNC("ixgbe_get_media_type_82599");
    534 
    535 	/* Detect if there is a copper PHY attached. */
    536 	switch (hw->phy.type) {
    537 	case ixgbe_phy_cu_unknown:
    538 	case ixgbe_phy_tn:
    539 		media_type = ixgbe_media_type_copper;
    540 		goto out;
    541 	default:
    542 		break;
    543 	}
    544 
    545 	switch (hw->device_id) {
    546 	case IXGBE_DEV_ID_82599_KX4:
    547 	case IXGBE_DEV_ID_82599_KX4_MEZZ:
    548 	case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
    549 	case IXGBE_DEV_ID_82599_KR:
    550 	case IXGBE_DEV_ID_82599_BACKPLANE_FCOE:
    551 	case IXGBE_DEV_ID_82599_XAUI_LOM:
    552 		/* Default device ID is mezzanine card KX/KX4 */
    553 		media_type = ixgbe_media_type_backplane;
    554 		break;
    555 	case IXGBE_DEV_ID_82599_SFP:
    556 	case IXGBE_DEV_ID_82599_SFP_FCOE:
    557 	case IXGBE_DEV_ID_82599_SFP_EM:
    558 	case IXGBE_DEV_ID_82599_SFP_SF2:
    559 	case IXGBE_DEV_ID_82599_SFP_SF_QP:
    560 	case IXGBE_DEV_ID_82599EN_SFP:
    561 		media_type = ixgbe_media_type_fiber;
    562 		break;
    563 	case IXGBE_DEV_ID_82599_CX4:
    564 		media_type = ixgbe_media_type_cx4;
    565 		break;
    566 	case IXGBE_DEV_ID_82599_T3_LOM:
    567 		media_type = ixgbe_media_type_copper;
    568 		break;
    569 	case IXGBE_DEV_ID_82599_QSFP_SF_QP:
    570 		media_type = ixgbe_media_type_fiber_qsfp;
    571 		break;
    572 	case IXGBE_DEV_ID_82599_BYPASS:
    573 		media_type = ixgbe_media_type_fiber_fixed;
    574 		hw->phy.multispeed_fiber = TRUE;
    575 		break;
    576 	default:
    577 		media_type = ixgbe_media_type_unknown;
    578 		break;
    579 	}
    580 out:
    581 	return media_type;
    582 }
    583 
    584 /**
    585  *  ixgbe_stop_mac_link_on_d3_82599 - Disables link on D3
    586  *  @hw: pointer to hardware structure
    587  *
    588  *  Disables link during D3 power down sequence.
    589  *
    590  **/
    591 void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw)
    592 {
    593 	u32 autoc2_reg;
    594 	u16 ee_ctrl_2 = 0;
    595 
    596 	DEBUGFUNC("ixgbe_stop_mac_link_on_d3_82599");
    597 	ixgbe_read_eeprom(hw, IXGBE_EEPROM_CTRL_2, &ee_ctrl_2);
    598 
    599 	if (!ixgbe_mng_present(hw) && !hw->wol_enabled &&
    600 	    ee_ctrl_2 & IXGBE_EEPROM_CCD_BIT) {
    601 		autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
    602 		autoc2_reg |= IXGBE_AUTOC2_LINK_DISABLE_ON_D3_MASK;
    603 		IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2_reg);
    604 	}
    605 }
    606 
    607 /**
    608  *  ixgbe_start_mac_link_82599 - Setup MAC link settings
    609  *  @hw: pointer to hardware structure
    610  *  @autoneg_wait_to_complete: TRUE when waiting for completion is needed
    611  *
    612  *  Configures link settings based on values in the ixgbe_hw struct.
    613  *  Restarts the link.  Performs autonegotiation if needed.
    614  **/
    615 s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
    616 			       bool autoneg_wait_to_complete)
    617 {
    618 	u32 autoc_reg;
    619 	u32 links_reg;
    620 	u32 i;
    621 	s32 status = IXGBE_SUCCESS;
    622 	bool got_lock = FALSE;
    623 
    624 	DEBUGFUNC("ixgbe_start_mac_link_82599");
    625 
    626 
    627 	/*  reset_pipeline requires us to hold this lock as it writes to
    628 	 *  AUTOC.
    629 	 */
    630 	if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
    631 		status = hw->mac.ops.acquire_swfw_sync(hw,
    632 						       IXGBE_GSSR_MAC_CSR_SM);
    633 		if (status != IXGBE_SUCCESS)
    634 			goto out;
    635 
    636 		got_lock = TRUE;
    637 	}
    638 
    639 	/* Restart link */
    640 	ixgbe_reset_pipeline_82599(hw);
    641 
    642 	if (got_lock)
    643 		hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
    644 
    645 	/* Only poll for autoneg to complete if specified to do so */
    646 	if (autoneg_wait_to_complete) {
    647 		autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
    648 		if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
    649 		     IXGBE_AUTOC_LMS_KX4_KX_KR ||
    650 		    (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
    651 		     IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
    652 		    (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
    653 		     IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
    654 			links_reg = 0; /* Just in case Autoneg time = 0 */
    655 			for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
    656 				links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
    657 				if (links_reg & IXGBE_LINKS_KX_AN_COMP)
    658 					break;
    659 				msec_delay(100);
    660 			}
    661 			if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
    662 				status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
    663 				DEBUGOUT("Autoneg did not complete.\n");
    664 			}
    665 		}
    666 	}
    667 
    668 	/* Add delay to filter out noises during initial link setup */
    669 	msec_delay(50);
    670 
    671 out:
    672 	return status;
    673 }
    674 
    675 /**
    676  *  ixgbe_disable_tx_laser_multispeed_fiber - Disable Tx laser
    677  *  @hw: pointer to hardware structure
    678  *
    679  *  The base drivers may require better control over SFP+ module
    680  *  PHY states.  This includes selectively shutting down the Tx
    681  *  laser on the PHY, effectively halting physical link.
    682  **/
    683 void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
    684 {
    685 	u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
    686 
    687 	/* Blocked by MNG FW so bail */
    688 	if (ixgbe_check_reset_blocked(hw))
    689 		return;
    690 
    691 	/* Disable Tx laser; allow 100us to go dark per spec */
    692 	esdp_reg |= IXGBE_ESDP_SDP3;
    693 	IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
    694 	IXGBE_WRITE_FLUSH(hw);
    695 	usec_delay(100);
    696 }
    697 
    698 /**
    699  *  ixgbe_enable_tx_laser_multispeed_fiber - Enable Tx laser
    700  *  @hw: pointer to hardware structure
    701  *
    702  *  The base drivers may require better control over SFP+ module
    703  *  PHY states.  This includes selectively turning on the Tx
    704  *  laser on the PHY, effectively starting physical link.
    705  **/
    706 void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
    707 {
    708 	u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
    709 
    710 	/* Enable Tx laser; allow 100ms to light up */
    711 	esdp_reg &= ~IXGBE_ESDP_SDP3;
    712 	IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
    713 	IXGBE_WRITE_FLUSH(hw);
    714 	msec_delay(100);
    715 }
    716 
    717 /**
    718  *  ixgbe_flap_tx_laser_multispeed_fiber - Flap Tx laser
    719  *  @hw: pointer to hardware structure
    720  *
    721  *  When the driver changes the link speeds that it can support,
    722  *  it sets autotry_restart to TRUE to indicate that we need to
    723  *  initiate a new autotry session with the link partner.  To do
    724  *  so, we set the speed then disable and re-enable the Tx laser, to
    725  *  alert the link partner that it also needs to restart autotry on its
    726  *  end.  This is consistent with TRUE clause 37 autoneg, which also
    727  *  involves a loss of signal.
    728  **/
    729 void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
    730 {
    731 	DEBUGFUNC("ixgbe_flap_tx_laser_multispeed_fiber");
    732 
    733 	/* Blocked by MNG FW so bail */
    734 	if (ixgbe_check_reset_blocked(hw))
    735 		return;
    736 
    737 	if (hw->mac.autotry_restart) {
    738 		ixgbe_disable_tx_laser_multispeed_fiber(hw);
    739 		ixgbe_enable_tx_laser_multispeed_fiber(hw);
    740 		hw->mac.autotry_restart = FALSE;
    741 	}
    742 }
    743 
    744 /**
    745  *  ixgbe_set_hard_rate_select_speed - Set module link speed
    746  *  @hw: pointer to hardware structure
    747  *  @speed: link speed to set
    748  *
    749  *  Set module link speed via RS0/RS1 rate select pins.
    750  */
    751 void ixgbe_set_hard_rate_select_speed(struct ixgbe_hw *hw,
    752 					ixgbe_link_speed speed)
    753 {
    754 	u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
    755 
    756 	switch (speed) {
    757 	case IXGBE_LINK_SPEED_10GB_FULL:
    758 		esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5);
    759 		break;
    760 	case IXGBE_LINK_SPEED_1GB_FULL:
    761 		esdp_reg &= ~IXGBE_ESDP_SDP5;
    762 		esdp_reg |= IXGBE_ESDP_SDP5_DIR;
    763 		break;
    764 	default:
    765 		DEBUGOUT("Invalid fixed module speed\n");
    766 		return;
    767 	}
    768 
    769 	IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
    770 	IXGBE_WRITE_FLUSH(hw);
    771 }
    772 
    773 /**
    774  *  ixgbe_setup_mac_link_smartspeed - Set MAC link speed using SmartSpeed
    775  *  @hw: pointer to hardware structure
    776  *  @speed: new link speed
    777  *  @autoneg_wait_to_complete: TRUE when waiting for completion is needed
    778  *
    779  *  Implements the Intel SmartSpeed algorithm.
    780  **/
    781 s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
    782 				    ixgbe_link_speed speed,
    783 				    bool autoneg_wait_to_complete)
    784 {
    785 	s32 status = IXGBE_SUCCESS;
    786 	ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
    787 	s32 i, j;
    788 	bool link_up = FALSE;
    789 	u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
    790 
    791 	DEBUGFUNC("ixgbe_setup_mac_link_smartspeed");
    792 
    793 	 /* Set autoneg_advertised value based on input link speed */
    794 	hw->phy.autoneg_advertised = 0;
    795 
    796 	if (speed & IXGBE_LINK_SPEED_10GB_FULL)
    797 		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
    798 
    799 	if (speed & IXGBE_LINK_SPEED_1GB_FULL)
    800 		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
    801 
    802 	if (speed & IXGBE_LINK_SPEED_100_FULL)
    803 		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL;
    804 
    805 	/*
    806 	 * Implement Intel SmartSpeed algorithm.  SmartSpeed will reduce the
    807 	 * autoneg advertisement if link is unable to be established at the
    808 	 * highest negotiated rate.  This can sometimes happen due to integrity
    809 	 * issues with the physical media connection.
    810 	 */
    811 
    812 	/* First, try to get link with full advertisement */
    813 	hw->phy.smart_speed_active = FALSE;
    814 	for (j = 0; j < IXGBE_SMARTSPEED_MAX_RETRIES; j++) {
    815 		status = ixgbe_setup_mac_link_82599(hw, speed,
    816 						    autoneg_wait_to_complete);
    817 		if (status != IXGBE_SUCCESS)
    818 			goto out;
    819 
    820 		/*
    821 		 * Wait for the controller to acquire link.  Per IEEE 802.3ap,
    822 		 * Section 73.10.2, we may have to wait up to 500ms if KR is
    823 		 * attempted, or 200ms if KX/KX4/BX/BX4 is attempted, per
    824 		 * Table 9 in the AN MAS.
    825 		 */
    826 		for (i = 0; i < 5; i++) {
    827 			msec_delay(100);
    828 
    829 			/* If we have link, just jump out */
    830 			status = ixgbe_check_link(hw, &link_speed, &link_up,
    831 						  FALSE);
    832 			if (status != IXGBE_SUCCESS)
    833 				goto out;
    834 
    835 			if (link_up)
    836 				goto out;
    837 		}
    838 	}
    839 
    840 	/*
    841 	 * We didn't get link.  If we advertised KR plus one of KX4/KX
    842 	 * (or BX4/BX), then disable KR and try again.
    843 	 */
    844 	if (((autoc_reg & IXGBE_AUTOC_KR_SUPP) == 0) ||
    845 	    ((autoc_reg & IXGBE_AUTOC_KX4_KX_SUPP_MASK) == 0))
    846 		goto out;
    847 
    848 	/* Turn SmartSpeed on to disable KR support */
    849 	hw->phy.smart_speed_active = TRUE;
    850 	status = ixgbe_setup_mac_link_82599(hw, speed,
    851 					    autoneg_wait_to_complete);
    852 	if (status != IXGBE_SUCCESS)
    853 		goto out;
    854 
    855 	/*
    856 	 * Wait for the controller to acquire link.  600ms will allow for
    857 	 * the AN link_fail_inhibit_timer as well for multiple cycles of
    858 	 * parallel detect, both 10g and 1g. This allows for the maximum
    859 	 * connect attempts as defined in the AN MAS table 73-7.
    860 	 */
    861 	for (i = 0; i < 6; i++) {
    862 		msec_delay(100);
    863 
    864 		/* If we have link, just jump out */
    865 		status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
    866 		if (status != IXGBE_SUCCESS)
    867 			goto out;
    868 
    869 		if (link_up)
    870 			goto out;
    871 	}
    872 
    873 	/* We didn't get link.  Turn SmartSpeed back off. */
    874 	hw->phy.smart_speed_active = FALSE;
    875 	status = ixgbe_setup_mac_link_82599(hw, speed,
    876 					    autoneg_wait_to_complete);
    877 
    878 out:
    879 	if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL))
    880 		DEBUGOUT("Smartspeed has downgraded the link speed "
    881 		"from the maximum advertised\n");
    882 	return status;
    883 }
    884 
    885 /**
    886  *  ixgbe_setup_mac_link_82599 - Set MAC link speed
    887  *  @hw: pointer to hardware structure
    888  *  @speed: new link speed
    889  *  @autoneg_wait_to_complete: TRUE when waiting for completion is needed
    890  *
    891  *  Set the link speed in the AUTOC register and restarts link.
    892  **/
    893 s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
    894 			       ixgbe_link_speed speed,
    895 			       bool autoneg_wait_to_complete)
    896 {
    897 	bool autoneg = FALSE;
    898 	s32 status = IXGBE_SUCCESS;
    899 	u32 pma_pmd_1g, link_mode;
    900 	u32 current_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); /* holds the value of AUTOC register at this current point in time */
    901 	u32 orig_autoc = 0; /* holds the cached value of AUTOC register */
    902 	u32 autoc = current_autoc; /* Temporary variable used for comparison purposes */
    903 	u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
    904 	u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
    905 	u32 links_reg;
    906 	u32 i;
    907 	ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
    908 
    909 	DEBUGFUNC("ixgbe_setup_mac_link_82599");
    910 
    911 	/* Check to see if speed passed in is supported. */
    912 	status = ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg);
    913 	if (status)
    914 		goto out;
    915 
    916 	speed &= link_capabilities;
    917 
    918 	if (speed == 0) {
    919 		ixgbe_disable_tx_laser(hw); /* For fiber */
    920 		ixgbe_set_phy_power(hw, false); /* For copper */
    921 	} else {
    922 		/* In case previous media setting was none(down) */
    923 		ixgbe_enable_tx_laser(hw); /* for Fiber */
    924 		ixgbe_set_phy_power(hw, true); /* For copper */
    925 	}
    926 
    927 	/* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/
    928 	if (hw->mac.orig_link_settings_stored)
    929 		orig_autoc = hw->mac.orig_autoc;
    930 	else
    931 		orig_autoc = autoc;
    932 
    933 	link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
    934 	pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
    935 
    936 	if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
    937 	    link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
    938 	    link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
    939 		/* Set KX4/KX/KR support according to speed requested */
    940 		autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP);
    941 		if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
    942 			if (orig_autoc & IXGBE_AUTOC_KX4_SUPP)
    943 				autoc |= IXGBE_AUTOC_KX4_SUPP;
    944 			if ((orig_autoc & IXGBE_AUTOC_KR_SUPP) &&
    945 			    (hw->phy.smart_speed_active == FALSE))
    946 				autoc |= IXGBE_AUTOC_KR_SUPP;
    947 		}
    948 		if (speed & IXGBE_LINK_SPEED_1GB_FULL)
    949 			autoc |= IXGBE_AUTOC_KX_SUPP;
    950 	} else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) &&
    951 		   (link_mode == IXGBE_AUTOC_LMS_1G_LINK_NO_AN ||
    952 		    link_mode == IXGBE_AUTOC_LMS_1G_AN)) {
    953 		/* Switch from 1G SFI to 10G SFI if requested */
    954 		if ((speed == IXGBE_LINK_SPEED_10GB_FULL) &&
    955 		    (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)) {
    956 			autoc &= ~IXGBE_AUTOC_LMS_MASK;
    957 			autoc |= IXGBE_AUTOC_LMS_10G_SERIAL;
    958 		}
    959 	} else if ((pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) &&
    960 		   (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) {
    961 		/* Switch from 10G SFI to 1G SFI if requested */
    962 		if ((speed == IXGBE_LINK_SPEED_1GB_FULL) &&
    963 		    (pma_pmd_1g == IXGBE_AUTOC_1G_SFI)) {
    964 			autoc &= ~IXGBE_AUTOC_LMS_MASK;
    965 			if (autoneg || hw->phy.type == ixgbe_phy_qsfp_intel)
    966 				autoc |= IXGBE_AUTOC_LMS_1G_AN;
    967 			else
    968 				autoc |= IXGBE_AUTOC_LMS_1G_LINK_NO_AN;
    969 		}
    970 	}
    971 
    972 	if (autoc != current_autoc) {
    973 		/* Restart link */
    974 		status = hw->mac.ops.prot_autoc_write(hw, autoc, FALSE);
    975 		if (status != IXGBE_SUCCESS)
    976 			goto out;
    977 
    978 		/* Only poll for autoneg to complete if specified to do so */
    979 		if (autoneg_wait_to_complete) {
    980 			if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
    981 			    link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
    982 			    link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
    983 				links_reg = 0; /*Just in case Autoneg time=0*/
    984 				for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
    985 					links_reg =
    986 					       IXGBE_READ_REG(hw, IXGBE_LINKS);
    987 					if (links_reg & IXGBE_LINKS_KX_AN_COMP)
    988 						break;
    989 					msec_delay(100);
    990 				}
    991 				if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
    992 					status =
    993 						IXGBE_ERR_AUTONEG_NOT_COMPLETE;
    994 					DEBUGOUT("Autoneg did not complete.\n");
    995 				}
    996 			}
    997 		}
    998 
    999 		/* Add delay to filter out noises during initial link setup */
   1000 		msec_delay(50);
   1001 	}
   1002 
   1003 out:
   1004 	return status;
   1005 }
   1006 
   1007 /**
   1008  *  ixgbe_setup_copper_link_82599 - Set the PHY autoneg advertised field
   1009  *  @hw: pointer to hardware structure
   1010  *  @speed: new link speed
   1011  *  @autoneg_wait_to_complete: TRUE if waiting is needed to complete
   1012  *
   1013  *  Restarts link on PHY and MAC based on settings passed in.
   1014  **/
   1015 static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
   1016 					 ixgbe_link_speed speed,
   1017 					 bool autoneg_wait_to_complete)
   1018 {
   1019 	s32 status;
   1020 
   1021 	DEBUGFUNC("ixgbe_setup_copper_link_82599");
   1022 
   1023 	/* Setup the PHY according to input speed */
   1024 	status = hw->phy.ops.setup_link_speed(hw, speed,
   1025 					      autoneg_wait_to_complete);
   1026 	/* Set up MAC */
   1027 	ixgbe_start_mac_link_82599(hw, autoneg_wait_to_complete);
   1028 
   1029 	return status;
   1030 }
   1031 
   1032 /**
   1033  *  ixgbe_reset_hw_82599 - Perform hardware reset
   1034  *  @hw: pointer to hardware structure
   1035  *
   1036  *  Resets the hardware by resetting the transmit and receive units, masks
   1037  *  and clears all interrupts, perform a PHY reset, and perform a link (MAC)
   1038  *  reset.
   1039  **/
   1040 s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
   1041 {
   1042 	ixgbe_link_speed link_speed;
   1043 	s32 status;
   1044 	u32 ctrl = 0;
   1045 	u32 i, autoc, autoc2;
   1046 	u32 curr_lms;
   1047 	bool link_up = FALSE;
   1048 
   1049 	DEBUGFUNC("ixgbe_reset_hw_82599");
   1050 
   1051 	/* Call adapter stop to disable tx/rx and clear interrupts */
   1052 	status = hw->mac.ops.stop_adapter(hw);
   1053 	if (status != IXGBE_SUCCESS)
   1054 		goto reset_hw_out;
   1055 
   1056 	/* flush pending Tx transactions */
   1057 	ixgbe_clear_tx_pending(hw);
   1058 
   1059 	/* PHY ops must be identified and initialized prior to reset */
   1060 
   1061 	/* Identify PHY and related function pointers */
   1062 	status = hw->phy.ops.init(hw);
   1063 
   1064 	if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
   1065 		goto reset_hw_out;
   1066 
   1067 	/* Setup SFP module if there is one present. */
   1068 	if (hw->phy.sfp_setup_needed) {
   1069 		status = hw->mac.ops.setup_sfp(hw);
   1070 		hw->phy.sfp_setup_needed = FALSE;
   1071 	}
   1072 
   1073 	if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
   1074 		goto reset_hw_out;
   1075 
   1076 	/* Reset PHY */
   1077 	if (hw->phy.reset_disable == FALSE && hw->phy.ops.reset != NULL)
   1078 		hw->phy.ops.reset(hw);
   1079 
   1080 	/* remember AUTOC from before we reset */
   1081 	curr_lms = IXGBE_READ_REG(hw, IXGBE_AUTOC) & IXGBE_AUTOC_LMS_MASK;
   1082 
   1083 mac_reset_top:
   1084 	/*
   1085 	 * Issue global reset to the MAC.  Needs to be SW reset if link is up.
   1086 	 * If link reset is used when link is up, it might reset the PHY when
   1087 	 * mng is using it.  If link is down or the flag to force full link
   1088 	 * reset is set, then perform link reset.
   1089 	 */
   1090 	ctrl = IXGBE_CTRL_LNK_RST;
   1091 	if (!hw->force_full_reset) {
   1092 		hw->mac.ops.check_link(hw, &link_speed, &link_up, FALSE);
   1093 		if (link_up)
   1094 			ctrl = IXGBE_CTRL_RST;
   1095 	}
   1096 
   1097 	ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
   1098 	IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
   1099 	IXGBE_WRITE_FLUSH(hw);
   1100 
   1101 	/* Poll for reset bit to self-clear meaning reset is complete */
   1102 	for (i = 0; i < 10; i++) {
   1103 		usec_delay(1);
   1104 		ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
   1105 		if (!(ctrl & IXGBE_CTRL_RST_MASK))
   1106 			break;
   1107 	}
   1108 
   1109 	if (ctrl & IXGBE_CTRL_RST_MASK) {
   1110 		status = IXGBE_ERR_RESET_FAILED;
   1111 		DEBUGOUT("Reset polling failed to complete.\n");
   1112 	}
   1113 
   1114 	msec_delay(50);
   1115 
   1116 	/*
   1117 	 * Double resets are required for recovery from certain error
   1118 	 * conditions.  Between resets, it is necessary to stall to
   1119 	 * allow time for any pending HW events to complete.
   1120 	 */
   1121 	if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
   1122 		hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
   1123 		goto mac_reset_top;
   1124 	}
   1125 
   1126 	/*
   1127 	 * Store the original AUTOC/AUTOC2 values if they have not been
   1128 	 * stored off yet.  Otherwise restore the stored original
   1129 	 * values since the reset operation sets back to defaults.
   1130 	 */
   1131 	autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
   1132 	autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
   1133 
   1134 	/* Enable link if disabled in NVM */
   1135 	if (autoc2 & IXGBE_AUTOC2_LINK_DISABLE_MASK) {
   1136 		autoc2 &= ~IXGBE_AUTOC2_LINK_DISABLE_MASK;
   1137 		IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
   1138 		IXGBE_WRITE_FLUSH(hw);
   1139 	}
   1140 
   1141 	if (hw->mac.orig_link_settings_stored == FALSE) {
   1142 		hw->mac.orig_autoc = autoc;
   1143 		hw->mac.orig_autoc2 = autoc2;
   1144 		hw->mac.orig_link_settings_stored = TRUE;
   1145 	} else {
   1146 
   1147 		/* If MNG FW is running on a multi-speed device that
   1148 		 * doesn't autoneg with out driver support we need to
   1149 		 * leave LMS in the state it was before we MAC reset.
   1150 		 * Likewise if we support WoL we don't want change the
   1151 		 * LMS state.
   1152 		 */
   1153 		if ((hw->phy.multispeed_fiber && ixgbe_mng_enabled(hw)) ||
   1154 		    hw->wol_enabled)
   1155 			hw->mac.orig_autoc =
   1156 				(hw->mac.orig_autoc & ~IXGBE_AUTOC_LMS_MASK) |
   1157 				curr_lms;
   1158 
   1159 		if (autoc != hw->mac.orig_autoc) {
   1160 			status = hw->mac.ops.prot_autoc_write(hw,
   1161 							hw->mac.orig_autoc,
   1162 							FALSE);
   1163 			if (status != IXGBE_SUCCESS)
   1164 				goto reset_hw_out;
   1165 		}
   1166 
   1167 		if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) !=
   1168 		    (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) {
   1169 			autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK;
   1170 			autoc2 |= (hw->mac.orig_autoc2 &
   1171 				   IXGBE_AUTOC2_UPPER_MASK);
   1172 			IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
   1173 		}
   1174 	}
   1175 
   1176 	/* Store the permanent mac address */
   1177 	hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
   1178 
   1179 	/*
   1180 	 * Store MAC address from RAR0, clear receive address registers, and
   1181 	 * clear the multicast table.  Also reset num_rar_entries to 128,
   1182 	 * since we modify this value when programming the SAN MAC address.
   1183 	 */
   1184 	hw->mac.num_rar_entries = 128;
   1185 	hw->mac.ops.init_rx_addrs(hw);
   1186 
   1187 	/* Store the permanent SAN mac address */
   1188 	hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr);
   1189 
   1190 	/* Add the SAN MAC address to the RAR only if it's a valid address */
   1191 	if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) {
   1192 		/* Save the SAN MAC RAR index */
   1193 		hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1;
   1194 
   1195 		hw->mac.ops.set_rar(hw, hw->mac.san_mac_rar_index,
   1196 				    hw->mac.san_addr, 0, IXGBE_RAH_AV);
   1197 
   1198 		/* clear VMDq pool/queue selection for this RAR */
   1199 		hw->mac.ops.clear_vmdq(hw, hw->mac.san_mac_rar_index,
   1200 				       IXGBE_CLEAR_VMDQ_ALL);
   1201 
   1202 		/* Reserve the last RAR for the SAN MAC address */
   1203 		hw->mac.num_rar_entries--;
   1204 	}
   1205 
   1206 	/* Store the alternative WWNN/WWPN prefix */
   1207 	hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
   1208 				   &hw->mac.wwpn_prefix);
   1209 
   1210 reset_hw_out:
   1211 	return status;
   1212 }
   1213 
   1214 /**
   1215  * ixgbe_fdir_check_cmd_complete - poll to check whether FDIRCMD is complete
   1216  * @hw: pointer to hardware structure
   1217  * @fdircmd: current value of FDIRCMD register
   1218  */
   1219 static s32 ixgbe_fdir_check_cmd_complete(struct ixgbe_hw *hw, u32 *fdircmd)
   1220 {
   1221 	int i;
   1222 
   1223 	for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) {
   1224 		*fdircmd = IXGBE_READ_REG(hw, IXGBE_FDIRCMD);
   1225 		if (!(*fdircmd & IXGBE_FDIRCMD_CMD_MASK))
   1226 			return IXGBE_SUCCESS;
   1227 		usec_delay(10);
   1228 	}
   1229 
   1230 	return IXGBE_ERR_FDIR_CMD_INCOMPLETE;
   1231 }
   1232 
   1233 /**
   1234  *  ixgbe_reinit_fdir_tables_82599 - Reinitialize Flow Director tables.
   1235  *  @hw: pointer to hardware structure
   1236  **/
   1237 s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
   1238 {
   1239 	s32 err;
   1240 	int i;
   1241 	u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
   1242 	u32 fdircmd;
   1243 	fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE;
   1244 
   1245 	DEBUGFUNC("ixgbe_reinit_fdir_tables_82599");
   1246 
   1247 	/*
   1248 	 * Before starting reinitialization process,
   1249 	 * FDIRCMD.CMD must be zero.
   1250 	 */
   1251 	err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
   1252 	if (err) {
   1253 		DEBUGOUT("Flow Director previous command did not complete, aborting table re-initialization.\n");
   1254 		return err;
   1255 	}
   1256 
   1257 	IXGBE_WRITE_REG(hw, IXGBE_FDIRFREE, 0);
   1258 	IXGBE_WRITE_FLUSH(hw);
   1259 	/*
   1260 	 * 82599 adapters flow director init flow cannot be restarted,
   1261 	 * Workaround 82599 silicon errata by performing the following steps
   1262 	 * before re-writing the FDIRCTRL control register with the same value.
   1263 	 * - write 1 to bit 8 of FDIRCMD register &
   1264 	 * - write 0 to bit 8 of FDIRCMD register
   1265 	 */
   1266 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
   1267 			(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) |
   1268 			 IXGBE_FDIRCMD_CLEARHT));
   1269 	IXGBE_WRITE_FLUSH(hw);
   1270 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
   1271 			(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
   1272 			 ~IXGBE_FDIRCMD_CLEARHT));
   1273 	IXGBE_WRITE_FLUSH(hw);
   1274 	/*
   1275 	 * Clear FDIR Hash register to clear any leftover hashes
   1276 	 * waiting to be programmed.
   1277 	 */
   1278 	IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, 0x00);
   1279 	IXGBE_WRITE_FLUSH(hw);
   1280 
   1281 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
   1282 	IXGBE_WRITE_FLUSH(hw);
   1283 
   1284 	/* Poll init-done after we write FDIRCTRL register */
   1285 	for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
   1286 		if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
   1287 				   IXGBE_FDIRCTRL_INIT_DONE)
   1288 			break;
   1289 		msec_delay(1);
   1290 	}
   1291 	if (i >= IXGBE_FDIR_INIT_DONE_POLL) {
   1292 		DEBUGOUT("Flow Director Signature poll time exceeded!\n");
   1293 		return IXGBE_ERR_FDIR_REINIT_FAILED;
   1294 	}
   1295 
   1296 	/* Clear FDIR statistics registers (read to clear) */
   1297 	IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT);
   1298 	IXGBE_READ_REG(hw, IXGBE_FDIRFSTAT);
   1299 	IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
   1300 	IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
   1301 	IXGBE_READ_REG(hw, IXGBE_FDIRLEN);
   1302 
   1303 	return IXGBE_SUCCESS;
   1304 }
   1305 
   1306 /**
   1307  *  ixgbe_fdir_enable_82599 - Initialize Flow Director control registers
   1308  *  @hw: pointer to hardware structure
   1309  *  @fdirctrl: value to write to flow director control register
   1310  **/
   1311 static void ixgbe_fdir_enable_82599(struct ixgbe_hw *hw, u32 fdirctrl)
   1312 {
   1313 	int i;
   1314 
   1315 	DEBUGFUNC("ixgbe_fdir_enable_82599");
   1316 
   1317 	/* Prime the keys for hashing */
   1318 	IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY);
   1319 	IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY);
   1320 
   1321 	/*
   1322 	 * Poll init-done after we write the register.  Estimated times:
   1323 	 *      10G: PBALLOC = 11b, timing is 60us
   1324 	 *       1G: PBALLOC = 11b, timing is 600us
   1325 	 *     100M: PBALLOC = 11b, timing is 6ms
   1326 	 *
   1327 	 *     Multiple these timings by 4 if under full Rx load
   1328 	 *
   1329 	 * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for
   1330 	 * 1 msec per poll time.  If we're at line rate and drop to 100M, then
   1331 	 * this might not finish in our poll time, but we can live with that
   1332 	 * for now.
   1333 	 */
   1334 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
   1335 	IXGBE_WRITE_FLUSH(hw);
   1336 	for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
   1337 		if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
   1338 				   IXGBE_FDIRCTRL_INIT_DONE)
   1339 			break;
   1340 		msec_delay(1);
   1341 	}
   1342 
   1343 	if (i >= IXGBE_FDIR_INIT_DONE_POLL)
   1344 		DEBUGOUT("Flow Director poll time exceeded!\n");
   1345 }
   1346 
   1347 /**
   1348  *  ixgbe_init_fdir_signature_82599 - Initialize Flow Director signature filters
   1349  *  @hw: pointer to hardware structure
   1350  *  @fdirctrl: value to write to flow director control register, initially
   1351  *	     contains just the value of the Rx packet buffer allocation
   1352  **/
   1353 s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl)
   1354 {
   1355 	DEBUGFUNC("ixgbe_init_fdir_signature_82599");
   1356 
   1357 	/*
   1358 	 * Continue setup of fdirctrl register bits:
   1359 	 *  Move the flexible bytes to use the ethertype - shift 6 words
   1360 	 *  Set the maximum length per hash bucket to 0xA filters
   1361 	 *  Send interrupt when 64 filters are left
   1362 	 */
   1363 	fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) |
   1364 		    (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
   1365 		    (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
   1366 
   1367 	/* write hashes and fdirctrl register, poll for completion */
   1368 	ixgbe_fdir_enable_82599(hw, fdirctrl);
   1369 
   1370 	return IXGBE_SUCCESS;
   1371 }
   1372 
   1373 /**
   1374  *  ixgbe_init_fdir_perfect_82599 - Initialize Flow Director perfect filters
   1375  *  @hw: pointer to hardware structure
   1376  *  @fdirctrl: value to write to flow director control register, initially
   1377  *	     contains just the value of the Rx packet buffer allocation
   1378  *  @cloud_mode: TRUE - cloud mode, FALSE - other mode
   1379  **/
   1380 s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl,
   1381 			bool cloud_mode)
   1382 {
   1383 	DEBUGFUNC("ixgbe_init_fdir_perfect_82599");
   1384 
   1385 	/*
   1386 	 * Continue setup of fdirctrl register bits:
   1387 	 *  Turn perfect match filtering on
   1388 	 *  Report hash in RSS field of Rx wb descriptor
   1389 	 *  Initialize the drop queue to queue 127
   1390 	 *  Move the flexible bytes to use the ethertype - shift 6 words
   1391 	 *  Set the maximum length per hash bucket to 0xA filters
   1392 	 *  Send interrupt when 64 (0x4 * 16) filters are left
   1393 	 */
   1394 	fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH |
   1395 		    IXGBE_FDIRCTRL_REPORT_STATUS |
   1396 		    (IXGBE_FDIR_DROP_QUEUE << IXGBE_FDIRCTRL_DROP_Q_SHIFT) |
   1397 		    (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) |
   1398 		    (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
   1399 		    (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
   1400 
   1401 	if (cloud_mode)
   1402 		fdirctrl |=(IXGBE_FDIRCTRL_FILTERMODE_CLOUD <<
   1403 					IXGBE_FDIRCTRL_FILTERMODE_SHIFT);
   1404 
   1405 	/* write hashes and fdirctrl register, poll for completion */
   1406 	ixgbe_fdir_enable_82599(hw, fdirctrl);
   1407 
   1408 	return IXGBE_SUCCESS;
   1409 }
   1410 
   1411 /**
   1412  *  ixgbe_set_fdir_drop_queue_82599 - Set Flow Director drop queue
   1413  *  @hw: pointer to hardware structure
   1414  *  @dropqueue: Rx queue index used for the dropped packets
   1415  **/
   1416 void ixgbe_set_fdir_drop_queue_82599(struct ixgbe_hw *hw, u8 dropqueue)
   1417 {
   1418 	u32 fdirctrl;
   1419 
   1420 	DEBUGFUNC("ixgbe_set_fdir_drop_queue_82599");
   1421 	/* Clear init done bit and drop queue field */
   1422 	fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
   1423 	fdirctrl &= ~(IXGBE_FDIRCTRL_DROP_Q_MASK | IXGBE_FDIRCTRL_INIT_DONE);
   1424 
   1425 	/* Set drop queue */
   1426 	fdirctrl |= (dropqueue << IXGBE_FDIRCTRL_DROP_Q_SHIFT);
   1427 	if ((hw->mac.type == ixgbe_mac_X550) ||
   1428 	    (hw->mac.type == ixgbe_mac_X550EM_x) ||
   1429 	    (hw->mac.type == ixgbe_mac_X550EM_a))
   1430 		fdirctrl |= IXGBE_FDIRCTRL_DROP_NO_MATCH;
   1431 
   1432 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
   1433 			(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) |
   1434 			 IXGBE_FDIRCMD_CLEARHT));
   1435 	IXGBE_WRITE_FLUSH(hw);
   1436 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
   1437 			(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
   1438 			 ~IXGBE_FDIRCMD_CLEARHT));
   1439 	IXGBE_WRITE_FLUSH(hw);
   1440 
   1441 	/* write hashes and fdirctrl register, poll for completion */
   1442 	ixgbe_fdir_enable_82599(hw, fdirctrl);
   1443 }
   1444 
   1445 /*
   1446  * These defines allow us to quickly generate all of the necessary instructions
   1447  * in the function below by simply calling out IXGBE_COMPUTE_SIG_HASH_ITERATION
   1448  * for values 0 through 15
   1449  */
   1450 #define IXGBE_ATR_COMMON_HASH_KEY \
   1451 		(IXGBE_ATR_BUCKET_HASH_KEY & IXGBE_ATR_SIGNATURE_HASH_KEY)
   1452 #define IXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \
   1453 do { \
   1454 	u32 n = (_n); \
   1455 	if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) \
   1456 		common_hash ^= lo_hash_dword >> n; \
   1457 	else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
   1458 		bucket_hash ^= lo_hash_dword >> n; \
   1459 	else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << n)) \
   1460 		sig_hash ^= lo_hash_dword << (16 - n); \
   1461 	if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << (n + 16))) \
   1462 		common_hash ^= hi_hash_dword >> n; \
   1463 	else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
   1464 		bucket_hash ^= hi_hash_dword >> n; \
   1465 	else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \
   1466 		sig_hash ^= hi_hash_dword << (16 - n); \
   1467 } while (0)
   1468 
   1469 /**
   1470  *  ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash
   1471  *  @stream: input bitstream to compute the hash on
   1472  *
   1473  *  This function is almost identical to the function above but contains
   1474  *  several optimizations such as unwinding all of the loops, letting the
   1475  *  compiler work out all of the conditional ifs since the keys are static
   1476  *  defines, and computing two keys at once since the hashed dword stream
   1477  *  will be the same for both keys.
   1478  **/
   1479 u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
   1480 				     union ixgbe_atr_hash_dword common)
   1481 {
   1482 	u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
   1483 	u32 sig_hash = 0, bucket_hash = 0, common_hash = 0;
   1484 
   1485 	/* record the flow_vm_vlan bits as they are a key part to the hash */
   1486 	flow_vm_vlan = IXGBE_NTOHL(input.dword);
   1487 
   1488 	/* generate common hash dword */
   1489 	hi_hash_dword = IXGBE_NTOHL(common.dword);
   1490 
   1491 	/* low dword is word swapped version of common */
   1492 	lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
   1493 
   1494 	/* apply flow ID/VM pool/VLAN ID bits to hash words */
   1495 	hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
   1496 
   1497 	/* Process bits 0 and 16 */
   1498 	IXGBE_COMPUTE_SIG_HASH_ITERATION(0);
   1499 
   1500 	/*
   1501 	 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
   1502 	 * delay this because bit 0 of the stream should not be processed
   1503 	 * so we do not add the VLAN until after bit 0 was processed
   1504 	 */
   1505 	lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
   1506 
   1507 	/* Process remaining 30 bit of the key */
   1508 	IXGBE_COMPUTE_SIG_HASH_ITERATION(1);
   1509 	IXGBE_COMPUTE_SIG_HASH_ITERATION(2);
   1510 	IXGBE_COMPUTE_SIG_HASH_ITERATION(3);
   1511 	IXGBE_COMPUTE_SIG_HASH_ITERATION(4);
   1512 	IXGBE_COMPUTE_SIG_HASH_ITERATION(5);
   1513 	IXGBE_COMPUTE_SIG_HASH_ITERATION(6);
   1514 	IXGBE_COMPUTE_SIG_HASH_ITERATION(7);
   1515 	IXGBE_COMPUTE_SIG_HASH_ITERATION(8);
   1516 	IXGBE_COMPUTE_SIG_HASH_ITERATION(9);
   1517 	IXGBE_COMPUTE_SIG_HASH_ITERATION(10);
   1518 	IXGBE_COMPUTE_SIG_HASH_ITERATION(11);
   1519 	IXGBE_COMPUTE_SIG_HASH_ITERATION(12);
   1520 	IXGBE_COMPUTE_SIG_HASH_ITERATION(13);
   1521 	IXGBE_COMPUTE_SIG_HASH_ITERATION(14);
   1522 	IXGBE_COMPUTE_SIG_HASH_ITERATION(15);
   1523 
   1524 	/* combine common_hash result with signature and bucket hashes */
   1525 	bucket_hash ^= common_hash;
   1526 	bucket_hash &= IXGBE_ATR_HASH_MASK;
   1527 
   1528 	sig_hash ^= common_hash << 16;
   1529 	sig_hash &= IXGBE_ATR_HASH_MASK << 16;
   1530 
   1531 	/* return completed signature hash */
   1532 	return sig_hash ^ bucket_hash;
   1533 }
   1534 
   1535 /**
   1536  *  ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter
   1537  *  @hw: pointer to hardware structure
   1538  *  @input: unique input dword
   1539  *  @common: compressed common input dword
   1540  *  @queue: queue index to direct traffic to
   1541  *
   1542  * Note that the tunnel bit in input must not be set when the hardware
   1543  * tunneling support does not exist.
   1544  **/
   1545 void ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
   1546 					   union ixgbe_atr_hash_dword input,
   1547 					   union ixgbe_atr_hash_dword common,
   1548 					   u8 queue)
   1549 {
   1550 	u64 fdirhashcmd;
   1551 	u8 flow_type;
   1552 	bool tunnel;
   1553 	u32 fdircmd;
   1554 
   1555 	DEBUGFUNC("ixgbe_fdir_add_signature_filter_82599");
   1556 
   1557 	/*
   1558 	 * Get the flow_type in order to program FDIRCMD properly
   1559 	 * lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6
   1560 	 * fifth is FDIRCMD.TUNNEL_FILTER
   1561 	 */
   1562 	tunnel = !!(input.formatted.flow_type & IXGBE_ATR_L4TYPE_TUNNEL_MASK);
   1563 	flow_type = input.formatted.flow_type &
   1564 		    (IXGBE_ATR_L4TYPE_TUNNEL_MASK - 1);
   1565 	switch (flow_type) {
   1566 	case IXGBE_ATR_FLOW_TYPE_TCPV4:
   1567 	case IXGBE_ATR_FLOW_TYPE_UDPV4:
   1568 	case IXGBE_ATR_FLOW_TYPE_SCTPV4:
   1569 	case IXGBE_ATR_FLOW_TYPE_TCPV6:
   1570 	case IXGBE_ATR_FLOW_TYPE_UDPV6:
   1571 	case IXGBE_ATR_FLOW_TYPE_SCTPV6:
   1572 		break;
   1573 	default:
   1574 		DEBUGOUT(" Error on flow type input\n");
   1575 		return;
   1576 	}
   1577 
   1578 	/* configure FDIRCMD register */
   1579 	fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
   1580 		  IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
   1581 	fdircmd |= (u32)flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
   1582 	fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
   1583 	if (tunnel)
   1584 		fdircmd |= IXGBE_FDIRCMD_TUNNEL_FILTER;
   1585 
   1586 	/*
   1587 	 * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits
   1588 	 * is for FDIRCMD.  Then do a 64-bit register write from FDIRHASH.
   1589 	 */
   1590 	fdirhashcmd = (u64)fdircmd << 32;
   1591 	fdirhashcmd |= ixgbe_atr_compute_sig_hash_82599(input, common);
   1592 	IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd);
   1593 
   1594 	DEBUGOUT2("Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd);
   1595 
   1596 	return;
   1597 }
   1598 
   1599 #define IXGBE_COMPUTE_BKT_HASH_ITERATION(_n) \
   1600 do { \
   1601 	u32 n = (_n); \
   1602 	if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
   1603 		bucket_hash ^= lo_hash_dword >> n; \
   1604 	if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
   1605 		bucket_hash ^= hi_hash_dword >> n; \
   1606 } while (0)
   1607 
   1608 /**
   1609  *  ixgbe_atr_compute_perfect_hash_82599 - Compute the perfect filter hash
   1610  *  @atr_input: input bitstream to compute the hash on
   1611  *  @input_mask: mask for the input bitstream
   1612  *
   1613  *  This function serves two main purposes.  First it applies the input_mask
   1614  *  to the atr_input resulting in a cleaned up atr_input data stream.
   1615  *  Secondly it computes the hash and stores it in the bkt_hash field at
   1616  *  the end of the input byte stream.  This way it will be available for
   1617  *  future use without needing to recompute the hash.
   1618  **/
   1619 void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
   1620 					  union ixgbe_atr_input *input_mask)
   1621 {
   1622 
   1623 	u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
   1624 	u32 bucket_hash = 0;
   1625 	u32 hi_dword = 0;
   1626 	u32 i = 0;
   1627 
   1628 	/* Apply masks to input data */
   1629 	for (i = 0; i < 14; i++)
   1630 		input->dword_stream[i]  &= input_mask->dword_stream[i];
   1631 
   1632 	/* record the flow_vm_vlan bits as they are a key part to the hash */
   1633 	flow_vm_vlan = IXGBE_NTOHL(input->dword_stream[0]);
   1634 
   1635 	/* generate common hash dword */
   1636 	for (i = 1; i <= 13; i++)
   1637 		hi_dword ^= input->dword_stream[i];
   1638 	hi_hash_dword = IXGBE_NTOHL(hi_dword);
   1639 
   1640 	/* low dword is word swapped version of common */
   1641 	lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
   1642 
   1643 	/* apply flow ID/VM pool/VLAN ID bits to hash words */
   1644 	hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
   1645 
   1646 	/* Process bits 0 and 16 */
   1647 	IXGBE_COMPUTE_BKT_HASH_ITERATION(0);
   1648 
   1649 	/*
   1650 	 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
   1651 	 * delay this because bit 0 of the stream should not be processed
   1652 	 * so we do not add the VLAN until after bit 0 was processed
   1653 	 */
   1654 	lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
   1655 
   1656 	/* Process remaining 30 bit of the key */
   1657 	for (i = 1; i <= 15; i++)
   1658 		IXGBE_COMPUTE_BKT_HASH_ITERATION(i);
   1659 
   1660 	/*
   1661 	 * Limit hash to 13 bits since max bucket count is 8K.
   1662 	 * Store result at the end of the input stream.
   1663 	 */
   1664 	input->formatted.bkt_hash = bucket_hash & 0x1FFF;
   1665 }
   1666 
   1667 /**
   1668  *  ixgbe_get_fdirtcpm_82599 - generate a TCP port from atr_input_masks
   1669  *  @input_mask: mask to be bit swapped
   1670  *
   1671  *  The source and destination port masks for flow director are bit swapped
   1672  *  in that bit 15 effects bit 0, 14 effects 1, 13, 2 etc.  In order to
   1673  *  generate a correctly swapped value we need to bit swap the mask and that
   1674  *  is what is accomplished by this function.
   1675  **/
   1676 static u32 ixgbe_get_fdirtcpm_82599(union ixgbe_atr_input *input_mask)
   1677 {
   1678 	u32 mask = IXGBE_NTOHS(input_mask->formatted.dst_port);
   1679 	mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT;
   1680 	mask |= IXGBE_NTOHS(input_mask->formatted.src_port);
   1681 	mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1);
   1682 	mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2);
   1683 	mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4);
   1684 	return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8);
   1685 }
   1686 
   1687 /*
   1688  * These two macros are meant to address the fact that we have registers
   1689  * that are either all or in part big-endian.  As a result on big-endian
   1690  * systems we will end up byte swapping the value to little-endian before
   1691  * it is byte swapped again and written to the hardware in the original
   1692  * big-endian format.
   1693  */
   1694 #define IXGBE_STORE_AS_BE32(_value) \
   1695 	(((u32)(_value) >> 24) | (((u32)(_value) & 0x00FF0000) >> 8) | \
   1696 	 (((u32)(_value) & 0x0000FF00) << 8) | ((u32)(_value) << 24))
   1697 
   1698 #define IXGBE_WRITE_REG_BE32(a, reg, value) \
   1699 	IXGBE_WRITE_REG((a), (reg), IXGBE_STORE_AS_BE32(IXGBE_NTOHL(value)))
   1700 
   1701 #define IXGBE_STORE_AS_BE16(_value) \
   1702 	IXGBE_NTOHS(((u16)(_value) >> 8) | ((u16)(_value) << 8))
   1703 
   1704 s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
   1705 				    union ixgbe_atr_input *input_mask, bool cloud_mode)
   1706 {
   1707 	/* mask IPv6 since it is currently not supported */
   1708 	u32 fdirm = IXGBE_FDIRM_DIPv6;
   1709 	u32 fdirtcpm;
   1710 	u32 fdirip6m;
   1711 	DEBUGFUNC("ixgbe_fdir_set_atr_input_mask_82599");
   1712 
   1713 	/*
   1714 	 * Program the relevant mask registers.  If src/dst_port or src/dst_addr
   1715 	 * are zero, then assume a full mask for that field.  Also assume that
   1716 	 * a VLAN of 0 is unspecified, so mask that out as well.  L4type
   1717 	 * cannot be masked out in this implementation.
   1718 	 *
   1719 	 * This also assumes IPv4 only.  IPv6 masking isn't supported at this
   1720 	 * point in time.
   1721 	 */
   1722 
   1723 	/* verify bucket hash is cleared on hash generation */
   1724 	if (input_mask->formatted.bkt_hash)
   1725 		DEBUGOUT(" bucket hash should always be 0 in mask\n");
   1726 
   1727 	/* Program FDIRM and verify partial masks */
   1728 	switch (input_mask->formatted.vm_pool & 0x7F) {
   1729 	case 0x0:
   1730 		fdirm |= IXGBE_FDIRM_POOL;
   1731 	case 0x7F:
   1732 		break;
   1733 	default:
   1734 		DEBUGOUT(" Error on vm pool mask\n");
   1735 		return IXGBE_ERR_CONFIG;
   1736 	}
   1737 
   1738 	switch (input_mask->formatted.flow_type & IXGBE_ATR_L4TYPE_MASK) {
   1739 	case 0x0:
   1740 		fdirm |= IXGBE_FDIRM_L4P;
   1741 		if (input_mask->formatted.dst_port ||
   1742 		    input_mask->formatted.src_port) {
   1743 			DEBUGOUT(" Error on src/dst port mask\n");
   1744 			return IXGBE_ERR_CONFIG;
   1745 		}
   1746 	case IXGBE_ATR_L4TYPE_MASK:
   1747 		break;
   1748 	default:
   1749 		DEBUGOUT(" Error on flow type mask\n");
   1750 		return IXGBE_ERR_CONFIG;
   1751 	}
   1752 
   1753 	switch (IXGBE_NTOHS(input_mask->formatted.vlan_id) & 0xEFFF) {
   1754 	case 0x0000:
   1755 		/* mask VLAN ID */
   1756 		fdirm |= IXGBE_FDIRM_VLANID;
   1757 		/* fall through */
   1758 	case 0x0FFF:
   1759 		/* mask VLAN priority */
   1760 		fdirm |= IXGBE_FDIRM_VLANP;
   1761 		break;
   1762 	case 0xE000:
   1763 		/* mask VLAN ID only */
   1764 		fdirm |= IXGBE_FDIRM_VLANID;
   1765 		/* fall through */
   1766 	case 0xEFFF:
   1767 		/* no VLAN fields masked */
   1768 		break;
   1769 	default:
   1770 		DEBUGOUT(" Error on VLAN mask\n");
   1771 		return IXGBE_ERR_CONFIG;
   1772 	}
   1773 
   1774 	switch (input_mask->formatted.flex_bytes & 0xFFFF) {
   1775 	case 0x0000:
   1776 		/* Mask Flex Bytes */
   1777 		fdirm |= IXGBE_FDIRM_FLEX;
   1778 		/* fall through */
   1779 	case 0xFFFF:
   1780 		break;
   1781 	default:
   1782 		DEBUGOUT(" Error on flexible byte mask\n");
   1783 		return IXGBE_ERR_CONFIG;
   1784 	}
   1785 
   1786 	if (cloud_mode) {
   1787 		fdirm |= IXGBE_FDIRM_L3P;
   1788 		fdirip6m = ((u32) 0xFFFFU << IXGBE_FDIRIP6M_DIPM_SHIFT);
   1789 		fdirip6m |= IXGBE_FDIRIP6M_ALWAYS_MASK;
   1790 
   1791 		switch (input_mask->formatted.inner_mac[0] & 0xFF) {
   1792 		case 0x00:
   1793 			/* Mask inner MAC, fall through */
   1794 			fdirip6m |= IXGBE_FDIRIP6M_INNER_MAC;
   1795 		case 0xFF:
   1796 			break;
   1797 		default:
   1798 			DEBUGOUT(" Error on inner_mac byte mask\n");
   1799 			return IXGBE_ERR_CONFIG;
   1800 		}
   1801 
   1802 		switch (input_mask->formatted.tni_vni & 0xFFFFFFFF) {
   1803 		case 0x0:
   1804 			/* Mask vxlan id */
   1805 			fdirip6m |= IXGBE_FDIRIP6M_TNI_VNI;
   1806 			break;
   1807 		case 0x00FFFFFF:
   1808 			fdirip6m |= IXGBE_FDIRIP6M_TNI_VNI_24;
   1809 			break;
   1810 		case 0xFFFFFFFF:
   1811 			break;
   1812 		default:
   1813 			DEBUGOUT(" Error on TNI/VNI byte mask\n");
   1814 			return IXGBE_ERR_CONFIG;
   1815 		}
   1816 
   1817 		switch (input_mask->formatted.tunnel_type & 0xFFFF) {
   1818 		case 0x0:
   1819 			/* Mask turnnel type, fall through */
   1820 			fdirip6m |= IXGBE_FDIRIP6M_TUNNEL_TYPE;
   1821 		case 0xFFFF:
   1822 			break;
   1823 		default:
   1824 			DEBUGOUT(" Error on tunnel type byte mask\n");
   1825 			return IXGBE_ERR_CONFIG;
   1826 		}
   1827 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIP6M, fdirip6m);
   1828 
   1829 		/* Set all bits in FDIRTCPM, FDIRUDPM, FDIRSCTPM,
   1830 		 * FDIRSIP4M and FDIRDIP4M in cloud mode to allow
   1831 		 * L3/L3 packets to tunnel.
   1832 		 */
   1833 		IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, 0xFFFFFFFF);
   1834 		IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, 0xFFFFFFFF);
   1835 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M, 0xFFFFFFFF);
   1836 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M, 0xFFFFFFFF);
   1837 		switch (hw->mac.type) {
   1838 		case ixgbe_mac_X550:
   1839 		case ixgbe_mac_X550EM_x:
   1840 		case ixgbe_mac_X550EM_a:
   1841 			IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, 0xFFFFFFFF);
   1842 			break;
   1843 		default:
   1844 			break;
   1845 		}
   1846 	}
   1847 
   1848 	/* Now mask VM pool and destination IPv6 - bits 5 and 2 */
   1849 	IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
   1850 
   1851 	if (!cloud_mode) {
   1852 		/* store the TCP/UDP port masks, bit reversed from port
   1853 		 * layout */
   1854 		fdirtcpm = ixgbe_get_fdirtcpm_82599(input_mask);
   1855 
   1856 		/* write both the same so that UDP and TCP use the same mask */
   1857 		IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm);
   1858 		IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm);
   1859 		/* also use it for SCTP */
   1860 		switch (hw->mac.type) {
   1861 		case ixgbe_mac_X550:
   1862 		case ixgbe_mac_X550EM_x:
   1863 		case ixgbe_mac_X550EM_a:
   1864 			IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, ~fdirtcpm);
   1865 			break;
   1866 		default:
   1867 			break;
   1868 		}
   1869 
   1870 		/* store source and destination IP masks (big-enian) */
   1871 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M,
   1872 				     ~input_mask->formatted.src_ip[0]);
   1873 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M,
   1874 				     ~input_mask->formatted.dst_ip[0]);
   1875 	}
   1876 	return IXGBE_SUCCESS;
   1877 }
   1878 
   1879 s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
   1880 					  union ixgbe_atr_input *input,
   1881 					  u16 soft_id, u8 queue, bool cloud_mode)
   1882 {
   1883 	u32 fdirport, fdirvlan, fdirhash, fdircmd;
   1884 	u32 addr_low, addr_high;
   1885 	u32 cloud_type = 0;
   1886 	s32 err;
   1887 
   1888 	DEBUGFUNC("ixgbe_fdir_write_perfect_filter_82599");
   1889 	if (!cloud_mode) {
   1890 		/* currently IPv6 is not supported, must be programmed with 0 */
   1891 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0),
   1892 				     input->formatted.src_ip[0]);
   1893 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1),
   1894 				     input->formatted.src_ip[1]);
   1895 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2),
   1896 				     input->formatted.src_ip[2]);
   1897 
   1898 		/* record the source address (big-endian) */
   1899 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA,
   1900 			input->formatted.src_ip[0]);
   1901 
   1902 		/* record the first 32 bits of the destination address
   1903 		 * (big-endian) */
   1904 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA,
   1905 			input->formatted.dst_ip[0]);
   1906 
   1907 		/* record source and destination port (little-endian)*/
   1908 		fdirport = IXGBE_NTOHS(input->formatted.dst_port);
   1909 		fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT;
   1910 		fdirport |= IXGBE_NTOHS(input->formatted.src_port);
   1911 		IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport);
   1912 	}
   1913 
   1914 	/* record VLAN (little-endian) and flex_bytes(big-endian) */
   1915 	fdirvlan = IXGBE_STORE_AS_BE16(input->formatted.flex_bytes);
   1916 	fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT;
   1917 	fdirvlan |= IXGBE_NTOHS(input->formatted.vlan_id);
   1918 	IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan);
   1919 
   1920 	if (cloud_mode) {
   1921 		if (input->formatted.tunnel_type != 0)
   1922 			cloud_type = 0x80000000;
   1923 
   1924 		addr_low = ((u32)input->formatted.inner_mac[0] |
   1925 				((u32)input->formatted.inner_mac[1] << 8) |
   1926 				((u32)input->formatted.inner_mac[2] << 16) |
   1927 				((u32)input->formatted.inner_mac[3] << 24));
   1928 		addr_high = ((u32)input->formatted.inner_mac[4] |
   1929 				((u32)input->formatted.inner_mac[5] << 8));
   1930 		cloud_type |= addr_high;
   1931 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0), addr_low);
   1932 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1), cloud_type);
   1933 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2), input->formatted.tni_vni);
   1934 	}
   1935 
   1936 	/* configure FDIRHASH register */
   1937 	fdirhash = input->formatted.bkt_hash;
   1938 	fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
   1939 	IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
   1940 
   1941 	/*
   1942 	 * flush all previous writes to make certain registers are
   1943 	 * programmed prior to issuing the command
   1944 	 */
   1945 	IXGBE_WRITE_FLUSH(hw);
   1946 
   1947 	/* configure FDIRCMD register */
   1948 	fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
   1949 		  IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
   1950 	if (queue == IXGBE_FDIR_DROP_QUEUE)
   1951 		fdircmd |= IXGBE_FDIRCMD_DROP;
   1952 	if (input->formatted.flow_type & IXGBE_ATR_L4TYPE_TUNNEL_MASK)
   1953 		fdircmd |= IXGBE_FDIRCMD_TUNNEL_FILTER;
   1954 	fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
   1955 	fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
   1956 	fdircmd |= (u32)input->formatted.vm_pool << IXGBE_FDIRCMD_VT_POOL_SHIFT;
   1957 
   1958 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
   1959 	err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
   1960 	if (err) {
   1961 		DEBUGOUT("Flow Director command did not complete!\n");
   1962 		return err;
   1963 	}
   1964 
   1965 	return IXGBE_SUCCESS;
   1966 }
   1967 
   1968 s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
   1969 					  union ixgbe_atr_input *input,
   1970 					  u16 soft_id)
   1971 {
   1972 	u32 fdirhash;
   1973 	u32 fdircmd;
   1974 	s32 err;
   1975 
   1976 	/* configure FDIRHASH register */
   1977 	fdirhash = input->formatted.bkt_hash;
   1978 	fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
   1979 	IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
   1980 
   1981 	/* flush hash to HW */
   1982 	IXGBE_WRITE_FLUSH(hw);
   1983 
   1984 	/* Query if filter is present */
   1985 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, IXGBE_FDIRCMD_CMD_QUERY_REM_FILT);
   1986 
   1987 	err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
   1988 	if (err) {
   1989 		DEBUGOUT("Flow Director command did not complete!\n");
   1990 		return err;
   1991 	}
   1992 
   1993 	/* if filter exists in hardware then remove it */
   1994 	if (fdircmd & IXGBE_FDIRCMD_FILTER_VALID) {
   1995 		IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
   1996 		IXGBE_WRITE_FLUSH(hw);
   1997 		IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
   1998 				IXGBE_FDIRCMD_CMD_REMOVE_FLOW);
   1999 	}
   2000 
   2001 	return IXGBE_SUCCESS;
   2002 }
   2003 
   2004 /**
   2005  *  ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter
   2006  *  @hw: pointer to hardware structure
   2007  *  @input: input bitstream
   2008  *  @input_mask: mask for the input bitstream
   2009  *  @soft_id: software index for the filters
   2010  *  @queue: queue index to direct traffic to
   2011  *
   2012  *  Note that the caller to this function must lock before calling, since the
   2013  *  hardware writes must be protected from one another.
   2014  **/
   2015 s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
   2016 					union ixgbe_atr_input *input,
   2017 					union ixgbe_atr_input *input_mask,
   2018 					u16 soft_id, u8 queue, bool cloud_mode)
   2019 {
   2020 	s32 err = IXGBE_ERR_CONFIG;
   2021 
   2022 	DEBUGFUNC("ixgbe_fdir_add_perfect_filter_82599");
   2023 
   2024 	/*
   2025 	 * Check flow_type formatting, and bail out before we touch the hardware
   2026 	 * if there's a configuration issue
   2027 	 */
   2028 	switch (input->formatted.flow_type) {
   2029 	case IXGBE_ATR_FLOW_TYPE_IPV4:
   2030 	case IXGBE_ATR_FLOW_TYPE_TUNNELED_IPV4:
   2031 		input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK;
   2032 		if (input->formatted.dst_port || input->formatted.src_port) {
   2033 			DEBUGOUT(" Error on src/dst port\n");
   2034 			return IXGBE_ERR_CONFIG;
   2035 		}
   2036 		break;
   2037 	case IXGBE_ATR_FLOW_TYPE_SCTPV4:
   2038 	case IXGBE_ATR_FLOW_TYPE_TUNNELED_SCTPV4:
   2039 		if (input->formatted.dst_port || input->formatted.src_port) {
   2040 			DEBUGOUT(" Error on src/dst port\n");
   2041 			return IXGBE_ERR_CONFIG;
   2042 		}
   2043 		/* fall through */
   2044 	case IXGBE_ATR_FLOW_TYPE_TCPV4:
   2045 	case IXGBE_ATR_FLOW_TYPE_TUNNELED_TCPV4:
   2046 	case IXGBE_ATR_FLOW_TYPE_UDPV4:
   2047 	case IXGBE_ATR_FLOW_TYPE_TUNNELED_UDPV4:
   2048 		input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
   2049 						  IXGBE_ATR_L4TYPE_MASK;
   2050 		break;
   2051 	default:
   2052 		DEBUGOUT(" Error on flow type input\n");
   2053 		return err;
   2054 	}
   2055 
   2056 	/* program input mask into the HW */
   2057 	err = ixgbe_fdir_set_input_mask_82599(hw, input_mask, cloud_mode);
   2058 	if (err)
   2059 		return err;
   2060 
   2061 	/* apply mask and compute/store hash */
   2062 	ixgbe_atr_compute_perfect_hash_82599(input, input_mask);
   2063 
   2064 	/* program filters to filter memory */
   2065 	return ixgbe_fdir_write_perfect_filter_82599(hw, input,
   2066 						     soft_id, queue, cloud_mode);
   2067 }
   2068 
   2069 /**
   2070  *  ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register
   2071  *  @hw: pointer to hardware structure
   2072  *  @reg: analog register to read
   2073  *  @val: read value
   2074  *
   2075  *  Performs read operation to Omer analog register specified.
   2076  **/
   2077 s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val)
   2078 {
   2079 	u32  core_ctl;
   2080 
   2081 	DEBUGFUNC("ixgbe_read_analog_reg8_82599");
   2082 
   2083 	IXGBE_WRITE_REG(hw, IXGBE_CORECTL, IXGBE_CORECTL_WRITE_CMD |
   2084 			(reg << 8));
   2085 	IXGBE_WRITE_FLUSH(hw);
   2086 	usec_delay(10);
   2087 	core_ctl = IXGBE_READ_REG(hw, IXGBE_CORECTL);
   2088 	*val = (u8)core_ctl;
   2089 
   2090 	return IXGBE_SUCCESS;
   2091 }
   2092 
   2093 /**
   2094  *  ixgbe_write_analog_reg8_82599 - Writes 8 bit Omer analog register
   2095  *  @hw: pointer to hardware structure
   2096  *  @reg: atlas register to write
   2097  *  @val: value to write
   2098  *
   2099  *  Performs write operation to Omer analog register specified.
   2100  **/
   2101 s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val)
   2102 {
   2103 	u32  core_ctl;
   2104 
   2105 	DEBUGFUNC("ixgbe_write_analog_reg8_82599");
   2106 
   2107 	core_ctl = (reg << 8) | val;
   2108 	IXGBE_WRITE_REG(hw, IXGBE_CORECTL, core_ctl);
   2109 	IXGBE_WRITE_FLUSH(hw);
   2110 	usec_delay(10);
   2111 
   2112 	return IXGBE_SUCCESS;
   2113 }
   2114 
   2115 /**
   2116  *  ixgbe_start_hw_82599 - Prepare hardware for Tx/Rx
   2117  *  @hw: pointer to hardware structure
   2118  *
   2119  *  Starts the hardware using the generic start_hw function
   2120  *  and the generation start_hw function.
   2121  *  Then performs revision-specific operations, if any.
   2122  **/
   2123 s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw)
   2124 {
   2125 	s32 ret_val = IXGBE_SUCCESS;
   2126 
   2127 	DEBUGFUNC("ixgbe_start_hw_82599");
   2128 
   2129 	ret_val = ixgbe_start_hw_generic(hw);
   2130 	if (ret_val != IXGBE_SUCCESS)
   2131 		goto out;
   2132 
   2133 	ret_val = ixgbe_start_hw_gen2(hw);
   2134 	if (ret_val != IXGBE_SUCCESS)
   2135 		goto out;
   2136 
   2137 	/* We need to run link autotry after the driver loads */
   2138 	hw->mac.autotry_restart = TRUE;
   2139 
   2140 	if (ret_val == IXGBE_SUCCESS)
   2141 		ret_val = ixgbe_verify_fw_version_82599(hw);
   2142 out:
   2143 	return ret_val;
   2144 }
   2145 
   2146 /**
   2147  *  ixgbe_identify_phy_82599 - Get physical layer module
   2148  *  @hw: pointer to hardware structure
   2149  *
   2150  *  Determines the physical layer module found on the current adapter.
   2151  *  If PHY already detected, maintains current PHY type in hw struct,
   2152  *  otherwise executes the PHY detection routine.
   2153  **/
   2154 s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
   2155 {
   2156 	s32 status;
   2157 
   2158 	DEBUGFUNC("ixgbe_identify_phy_82599");
   2159 
   2160 	/* Detect PHY if not unknown - returns success if already detected. */
   2161 	status = ixgbe_identify_phy_generic(hw);
   2162 	if (status != IXGBE_SUCCESS) {
   2163 		/* 82599 10GBASE-T requires an external PHY */
   2164 		if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)
   2165 			return status;
   2166 		else
   2167 			status = ixgbe_identify_module_generic(hw);
   2168 	}
   2169 
   2170 	/* Set PHY type none if no PHY detected */
   2171 	if (hw->phy.type == ixgbe_phy_unknown) {
   2172 		hw->phy.type = ixgbe_phy_none;
   2173 		return IXGBE_SUCCESS;
   2174 	}
   2175 
   2176 	/* Return error if SFP module has been detected but is not supported */
   2177 	if (hw->phy.type == ixgbe_phy_sfp_unsupported)
   2178 		return IXGBE_ERR_SFP_NOT_SUPPORTED;
   2179 
   2180 	return status;
   2181 }
   2182 
   2183 /**
   2184  *  ixgbe_get_supported_physical_layer_82599 - Returns physical layer type
   2185  *  @hw: pointer to hardware structure
   2186  *
   2187  *  Determines physical layer capabilities of the current configuration.
   2188  **/
   2189 u64 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw)
   2190 {
   2191 	u64 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
   2192 	u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
   2193 	u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
   2194 	u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
   2195 	u32 pma_pmd_10g_parallel = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK;
   2196 	u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
   2197 	u16 ext_ability = 0;
   2198 
   2199 	DEBUGFUNC("ixgbe_get_support_physical_layer_82599");
   2200 
   2201 	hw->phy.ops.identify(hw);
   2202 
   2203 	switch (hw->phy.type) {
   2204 	case ixgbe_phy_tn:
   2205 	case ixgbe_phy_cu_unknown:
   2206 		hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
   2207 		IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
   2208 		if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
   2209 			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
   2210 		if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
   2211 			physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
   2212 		if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
   2213 			physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
   2214 		goto out;
   2215 	default:
   2216 		break;
   2217 	}
   2218 
   2219 	switch (autoc & IXGBE_AUTOC_LMS_MASK) {
   2220 	case IXGBE_AUTOC_LMS_1G_AN:
   2221 	case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
   2222 		if (pma_pmd_1g == IXGBE_AUTOC_1G_KX_BX) {
   2223 			physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX |
   2224 			    IXGBE_PHYSICAL_LAYER_1000BASE_BX;
   2225 			goto out;
   2226 		} else
   2227 			/* SFI mode so read SFP module */
   2228 			goto sfp_check;
   2229 		break;
   2230 	case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
   2231 		if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_CX4)
   2232 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
   2233 		else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_KX4)
   2234 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
   2235 		else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_XAUI)
   2236 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_XAUI;
   2237 		goto out;
   2238 		break;
   2239 	case IXGBE_AUTOC_LMS_10G_SERIAL:
   2240 		if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_KR) {
   2241 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR;
   2242 			goto out;
   2243 		} else if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)
   2244 			goto sfp_check;
   2245 		break;
   2246 	case IXGBE_AUTOC_LMS_KX4_KX_KR:
   2247 	case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN:
   2248 		if (autoc & IXGBE_AUTOC_KX_SUPP)
   2249 			physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
   2250 		if (autoc & IXGBE_AUTOC_KX4_SUPP)
   2251 			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
   2252 		if (autoc & IXGBE_AUTOC_KR_SUPP)
   2253 			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KR;
   2254 		goto out;
   2255 		break;
   2256 	default:
   2257 		goto out;
   2258 		break;
   2259 	}
   2260 
   2261 sfp_check:
   2262 	/* SFP check must be done last since DA modules are sometimes used to
   2263 	 * test KR mode -  we need to id KR mode correctly before SFP module.
   2264 	 * Call identify_sfp because the pluggable module may have changed */
   2265 	physical_layer = ixgbe_get_supported_phy_sfp_layer_generic(hw);
   2266 out:
   2267 	return physical_layer;
   2268 }
   2269 
   2270 /**
   2271  *  ixgbe_enable_rx_dma_82599 - Enable the Rx DMA unit on 82599
   2272  *  @hw: pointer to hardware structure
   2273  *  @regval: register value to write to RXCTRL
   2274  *
   2275  *  Enables the Rx DMA unit for 82599
   2276  **/
   2277 s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval)
   2278 {
   2279 
   2280 	DEBUGFUNC("ixgbe_enable_rx_dma_82599");
   2281 
   2282 	/*
   2283 	 * Workaround for 82599 silicon errata when enabling the Rx datapath.
   2284 	 * If traffic is incoming before we enable the Rx unit, it could hang
   2285 	 * the Rx DMA unit.  Therefore, make sure the security engine is
   2286 	 * completely disabled prior to enabling the Rx unit.
   2287 	 */
   2288 
   2289 	hw->mac.ops.disable_sec_rx_path(hw);
   2290 
   2291 	if (regval & IXGBE_RXCTRL_RXEN)
   2292 		ixgbe_enable_rx(hw);
   2293 	else
   2294 		ixgbe_disable_rx(hw);
   2295 
   2296 	hw->mac.ops.enable_sec_rx_path(hw);
   2297 
   2298 	return IXGBE_SUCCESS;
   2299 }
   2300 
   2301 /**
   2302  *  ixgbe_verify_fw_version_82599 - verify FW version for 82599
   2303  *  @hw: pointer to hardware structure
   2304  *
   2305  *  Verifies that installed the firmware version is 0.6 or higher
   2306  *  for SFI devices. All 82599 SFI devices should have version 0.6 or higher.
   2307  *
   2308  *  Returns IXGBE_ERR_EEPROM_VERSION if the FW is not present or
   2309  *  if the FW version is not supported.
   2310  **/
   2311 static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
   2312 {
   2313 	s32 status = IXGBE_ERR_EEPROM_VERSION;
   2314 	u16 fw_offset, fw_ptp_cfg_offset;
   2315 	u16 fw_version;
   2316 
   2317 	DEBUGFUNC("ixgbe_verify_fw_version_82599");
   2318 
   2319 	/* firmware check is only necessary for SFI devices */
   2320 	if (hw->phy.media_type != ixgbe_media_type_fiber) {
   2321 		status = IXGBE_SUCCESS;
   2322 		goto fw_version_out;
   2323 	}
   2324 
   2325 	/* get the offset to the Firmware Module block */
   2326 	if (hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset)) {
   2327 		ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
   2328 			      "eeprom read at offset %d failed", IXGBE_FW_PTR);
   2329 		return IXGBE_ERR_EEPROM_VERSION;
   2330 	}
   2331 
   2332 	if ((fw_offset == 0) || (fw_offset == 0xFFFF))
   2333 		goto fw_version_out;
   2334 
   2335 	/* get the offset to the Pass Through Patch Configuration block */
   2336 	if (hw->eeprom.ops.read(hw, (fw_offset +
   2337 				 IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR),
   2338 				 &fw_ptp_cfg_offset)) {
   2339 		ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
   2340 			      "eeprom read at offset %d failed",
   2341 			      fw_offset +
   2342 			      IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR);
   2343 		return IXGBE_ERR_EEPROM_VERSION;
   2344 	}
   2345 
   2346 	if ((fw_ptp_cfg_offset == 0) || (fw_ptp_cfg_offset == 0xFFFF))
   2347 		goto fw_version_out;
   2348 
   2349 	/* get the firmware version */
   2350 	if (hw->eeprom.ops.read(hw, (fw_ptp_cfg_offset +
   2351 			    IXGBE_FW_PATCH_VERSION_4), &fw_version)) {
   2352 		ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
   2353 			      "eeprom read at offset %d failed",
   2354 			      fw_ptp_cfg_offset + IXGBE_FW_PATCH_VERSION_4);
   2355 		return IXGBE_ERR_EEPROM_VERSION;
   2356 	}
   2357 
   2358 	if (fw_version > 0x5)
   2359 		status = IXGBE_SUCCESS;
   2360 
   2361 fw_version_out:
   2362 	return status;
   2363 }
   2364 
   2365 /**
   2366  *  ixgbe_verify_lesm_fw_enabled_82599 - Checks LESM FW module state.
   2367  *  @hw: pointer to hardware structure
   2368  *
   2369  *  Returns TRUE if the LESM FW module is present and enabled. Otherwise
   2370  *  returns FALSE. Smart Speed must be disabled if LESM FW module is enabled.
   2371  **/
   2372 bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw)
   2373 {
   2374 	bool lesm_enabled = FALSE;
   2375 	u16 fw_offset, fw_lesm_param_offset, fw_lesm_state;
   2376 	s32 status;
   2377 
   2378 	DEBUGFUNC("ixgbe_verify_lesm_fw_enabled_82599");
   2379 
   2380 	/* get the offset to the Firmware Module block */
   2381 	status = hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset);
   2382 
   2383 	if ((status != IXGBE_SUCCESS) ||
   2384 	    (fw_offset == 0) || (fw_offset == 0xFFFF))
   2385 		goto out;
   2386 
   2387 	/* get the offset to the LESM Parameters block */
   2388 	status = hw->eeprom.ops.read(hw, (fw_offset +
   2389 				     IXGBE_FW_LESM_PARAMETERS_PTR),
   2390 				     &fw_lesm_param_offset);
   2391 
   2392 	if ((status != IXGBE_SUCCESS) ||
   2393 	    (fw_lesm_param_offset == 0) || (fw_lesm_param_offset == 0xFFFF))
   2394 		goto out;
   2395 
   2396 	/* get the LESM state word */
   2397 	status = hw->eeprom.ops.read(hw, (fw_lesm_param_offset +
   2398 				     IXGBE_FW_LESM_STATE_1),
   2399 				     &fw_lesm_state);
   2400 
   2401 	if ((status == IXGBE_SUCCESS) &&
   2402 	    (fw_lesm_state & IXGBE_FW_LESM_STATE_ENABLED))
   2403 		lesm_enabled = TRUE;
   2404 
   2405 out:
   2406 	return lesm_enabled;
   2407 }
   2408 
   2409 /**
   2410  *  ixgbe_read_eeprom_buffer_82599 - Read EEPROM word(s) using
   2411  *  fastest available method
   2412  *
   2413  *  @hw: pointer to hardware structure
   2414  *  @offset: offset of  word in EEPROM to read
   2415  *  @words: number of words
   2416  *  @data: word(s) read from the EEPROM
   2417  *
   2418  *  Retrieves 16 bit word(s) read from EEPROM
   2419  **/
   2420 static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
   2421 					  u16 words, u16 *data)
   2422 {
   2423 	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
   2424 	s32 ret_val = IXGBE_ERR_CONFIG;
   2425 
   2426 	DEBUGFUNC("ixgbe_read_eeprom_buffer_82599");
   2427 
   2428 	/*
   2429 	 * If EEPROM is detected and can be addressed using 14 bits,
   2430 	 * use EERD otherwise use bit bang
   2431 	 */
   2432 	if ((eeprom->type == ixgbe_eeprom_spi) &&
   2433 	    (offset + (words - 1) <= IXGBE_EERD_MAX_ADDR))
   2434 		ret_val = ixgbe_read_eerd_buffer_generic(hw, offset, words,
   2435 							 data);
   2436 	else
   2437 		ret_val = ixgbe_read_eeprom_buffer_bit_bang_generic(hw, offset,
   2438 								    words,
   2439 								    data);
   2440 
   2441 	return ret_val;
   2442 }
   2443 
   2444 /**
   2445  *  ixgbe_read_eeprom_82599 - Read EEPROM word using
   2446  *  fastest available method
   2447  *
   2448  *  @hw: pointer to hardware structure
   2449  *  @offset: offset of  word in the EEPROM to read
   2450  *  @data: word read from the EEPROM
   2451  *
   2452  *  Reads a 16 bit word from the EEPROM
   2453  **/
   2454 static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
   2455 				   u16 offset, u16 *data)
   2456 {
   2457 	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
   2458 	s32 ret_val = IXGBE_ERR_CONFIG;
   2459 
   2460 	DEBUGFUNC("ixgbe_read_eeprom_82599");
   2461 
   2462 	/*
   2463 	 * If EEPROM is detected and can be addressed using 14 bits,
   2464 	 * use EERD otherwise use bit bang
   2465 	 */
   2466 	if ((eeprom->type == ixgbe_eeprom_spi) &&
   2467 	    (offset <= IXGBE_EERD_MAX_ADDR))
   2468 		ret_val = ixgbe_read_eerd_generic(hw, offset, data);
   2469 	else
   2470 		ret_val = ixgbe_read_eeprom_bit_bang_generic(hw, offset, data);
   2471 
   2472 	return ret_val;
   2473 }
   2474 
   2475 /**
   2476  * ixgbe_reset_pipeline_82599 - perform pipeline reset
   2477  *
   2478  *  @hw: pointer to hardware structure
   2479  *
   2480  * Reset pipeline by asserting Restart_AN together with LMS change to ensure
   2481  * full pipeline reset.  This function assumes the SW/FW lock is held.
   2482  **/
   2483 static s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw)
   2484 {
   2485 	s32 ret_val;
   2486 	u32 anlp1_reg = 0;
   2487 	u32 i, autoc_reg, autoc2_reg;
   2488 
   2489 	/* Enable link if disabled in NVM */
   2490 	autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
   2491 	if (autoc2_reg & IXGBE_AUTOC2_LINK_DISABLE_MASK) {
   2492 		autoc2_reg &= ~IXGBE_AUTOC2_LINK_DISABLE_MASK;
   2493 		IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2_reg);
   2494 		IXGBE_WRITE_FLUSH(hw);
   2495 	}
   2496 
   2497 	autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
   2498 	autoc_reg |= IXGBE_AUTOC_AN_RESTART;
   2499 	/* Write AUTOC register with toggled LMS[2] bit and Restart_AN */
   2500 	IXGBE_WRITE_REG(hw, IXGBE_AUTOC,
   2501 			autoc_reg ^ (0x4 << IXGBE_AUTOC_LMS_SHIFT));
   2502 	/* Wait for AN to leave state 0 */
   2503 	for (i = 0; i < 10; i++) {
   2504 		msec_delay(4);
   2505 		anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
   2506 		if (anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)
   2507 			break;
   2508 	}
   2509 
   2510 	if (!(anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)) {
   2511 		DEBUGOUT("auto negotiation not completed\n");
   2512 		ret_val = IXGBE_ERR_RESET_FAILED;
   2513 		goto reset_pipeline_out;
   2514 	}
   2515 
   2516 	ret_val = IXGBE_SUCCESS;
   2517 
   2518 reset_pipeline_out:
   2519 	/* Write AUTOC register with original LMS field and Restart_AN */
   2520 	IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
   2521 	IXGBE_WRITE_FLUSH(hw);
   2522 
   2523 	return ret_val;
   2524 }
   2525 
   2526 /**
   2527  *  ixgbe_read_i2c_byte_82599 - Reads 8 bit word over I2C
   2528  *  @hw: pointer to hardware structure
   2529  *  @byte_offset: byte offset to read
   2530  *  @data: value read
   2531  *
   2532  *  Performs byte read operation to SFP module's EEPROM over I2C interface at
   2533  *  a specified device address.
   2534  **/
   2535 static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
   2536 				u8 dev_addr, u8 *data)
   2537 {
   2538 	u32 esdp;
   2539 	s32 status;
   2540 	s32 timeout = 200;
   2541 
   2542 	DEBUGFUNC("ixgbe_read_i2c_byte_82599");
   2543 
   2544 	if (hw->phy.qsfp_shared_i2c_bus == TRUE) {
   2545 		/* Acquire I2C bus ownership. */
   2546 		esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
   2547 		esdp |= IXGBE_ESDP_SDP0;
   2548 		IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
   2549 		IXGBE_WRITE_FLUSH(hw);
   2550 
   2551 		while (timeout) {
   2552 			esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
   2553 			if (esdp & IXGBE_ESDP_SDP1)
   2554 				break;
   2555 
   2556 			msec_delay(5);
   2557 			timeout--;
   2558 		}
   2559 
   2560 		if (!timeout) {
   2561 			DEBUGOUT("Driver can't access resource,"
   2562 				 " acquiring I2C bus timeout.\n");
   2563 			status = IXGBE_ERR_I2C;
   2564 			goto release_i2c_access;
   2565 		}
   2566 	}
   2567 
   2568 	status = ixgbe_read_i2c_byte_generic(hw, byte_offset, dev_addr, data);
   2569 
   2570 release_i2c_access:
   2571 
   2572 	if (hw->phy.qsfp_shared_i2c_bus == TRUE) {
   2573 		/* Release I2C bus ownership. */
   2574 		esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
   2575 		esdp &= ~IXGBE_ESDP_SDP0;
   2576 		IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
   2577 		IXGBE_WRITE_FLUSH(hw);
   2578 	}
   2579 
   2580 	return status;
   2581 }
   2582 
   2583 /**
   2584  *  ixgbe_write_i2c_byte_82599 - Writes 8 bit word over I2C
   2585  *  @hw: pointer to hardware structure
   2586  *  @byte_offset: byte offset to write
   2587  *  @data: value to write
   2588  *
   2589  *  Performs byte write operation to SFP module's EEPROM over I2C interface at
   2590  *  a specified device address.
   2591  **/
   2592 static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
   2593 				 u8 dev_addr, u8 data)
   2594 {
   2595 	u32 esdp;
   2596 	s32 status;
   2597 	s32 timeout = 200;
   2598 
   2599 	DEBUGFUNC("ixgbe_write_i2c_byte_82599");
   2600 
   2601 	if (hw->phy.qsfp_shared_i2c_bus == TRUE) {
   2602 		/* Acquire I2C bus ownership. */
   2603 		esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
   2604 		esdp |= IXGBE_ESDP_SDP0;
   2605 		IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
   2606 		IXGBE_WRITE_FLUSH(hw);
   2607 
   2608 		while (timeout) {
   2609 			esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
   2610 			if (esdp & IXGBE_ESDP_SDP1)
   2611 				break;
   2612 
   2613 			msec_delay(5);
   2614 			timeout--;
   2615 		}
   2616 
   2617 		if (!timeout) {
   2618 			DEBUGOUT("Driver can't access resource,"
   2619 				 " acquiring I2C bus timeout.\n");
   2620 			status = IXGBE_ERR_I2C;
   2621 			goto release_i2c_access;
   2622 		}
   2623 	}
   2624 
   2625 	status = ixgbe_write_i2c_byte_generic(hw, byte_offset, dev_addr, data);
   2626 
   2627 release_i2c_access:
   2628 
   2629 	if (hw->phy.qsfp_shared_i2c_bus == TRUE) {
   2630 		/* Release I2C bus ownership. */
   2631 		esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
   2632 		esdp &= ~IXGBE_ESDP_SDP0;
   2633 		IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
   2634 		IXGBE_WRITE_FLUSH(hw);
   2635 	}
   2636 
   2637 	return status;
   2638 }
   2639