Home | History | Annotate | Line # | Download | only in ixgbe
ixgbe_82599.c revision 1.6
      1 /******************************************************************************
      2 
      3   Copyright (c) 2001-2012, Intel Corporation
      4   All rights reserved.
      5 
      6   Redistribution and use in source and binary forms, with or without
      7   modification, are permitted provided that the following conditions are met:
      8 
      9    1. Redistributions of source code must retain the above copyright notice,
     10       this list of conditions and the following disclaimer.
     11 
     12    2. Redistributions in binary form must reproduce the above copyright
     13       notice, this list of conditions and the following disclaimer in the
     14       documentation and/or other materials provided with the distribution.
     15 
     16    3. Neither the name of the Intel Corporation nor the names of its
     17       contributors may be used to endorse or promote products derived from
     18       this software without specific prior written permission.
     19 
     20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     30   POSSIBILITY OF SUCH DAMAGE.
     31 
     32 ******************************************************************************/
     33 /*$FreeBSD: src/sys/dev/ixgbe/ixgbe_82599.c,v 1.6 2011/01/19 19:36:27 jfv Exp $*/
     34 /*$NetBSD: ixgbe_82599.c,v 1.6 2015/03/27 05:57:28 msaitoh Exp $*/
     35 
     36 #include "ixgbe_type.h"
     37 #include "ixgbe_82599.h"
     38 #include "ixgbe_api.h"
     39 #include "ixgbe_common.h"
     40 #include "ixgbe_phy.h"
     41 
     42 static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
     43 					 ixgbe_link_speed speed,
     44 					 bool autoneg,
     45 					 bool autoneg_wait_to_complete);
     46 static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw);
     47 static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
     48 				   u16 offset, u16 *data);
     49 static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
     50 					  u16 words, u16 *data);
     51 
     52 void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
     53 {
     54 	struct ixgbe_mac_info *mac = &hw->mac;
     55 
     56 	DEBUGFUNC("ixgbe_init_mac_link_ops_82599");
     57 
     58 	/* enable the laser control functions for SFP+ fiber */
     59 	if (mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) {
     60 		mac->ops.disable_tx_laser =
     61 				       &ixgbe_disable_tx_laser_multispeed_fiber;
     62 		mac->ops.enable_tx_laser =
     63 					&ixgbe_enable_tx_laser_multispeed_fiber;
     64 		mac->ops.flap_tx_laser = &ixgbe_flap_tx_laser_multispeed_fiber;
     65 
     66 	} else {
     67 		mac->ops.disable_tx_laser = NULL;
     68 		mac->ops.enable_tx_laser = NULL;
     69 		mac->ops.flap_tx_laser = NULL;
     70 	}
     71 
     72 	if (hw->phy.multispeed_fiber) {
     73 		/* Set up dual speed SFP+ support */
     74 		mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber;
     75 	} else {
     76 		if ((ixgbe_get_media_type(hw) == ixgbe_media_type_backplane) &&
     77 		     (hw->phy.smart_speed == ixgbe_smart_speed_auto ||
     78 		      hw->phy.smart_speed == ixgbe_smart_speed_on) &&
     79 		      !ixgbe_verify_lesm_fw_enabled_82599(hw)) {
     80 			mac->ops.setup_link = &ixgbe_setup_mac_link_smartspeed;
     81 		} else {
     82 			mac->ops.setup_link = &ixgbe_setup_mac_link_82599;
     83 		}
     84 	}
     85 }
     86 
     87 /**
     88  *  ixgbe_init_phy_ops_82599 - PHY/SFP specific init
     89  *  @hw: pointer to hardware structure
     90  *
     91  *  Initialize any function pointers that were not able to be
     92  *  set during init_shared_code because the PHY/SFP type was
     93  *  not known.  Perform the SFP init if necessary.
     94  *
     95  **/
     96 s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
     97 {
     98 	struct ixgbe_mac_info *mac = &hw->mac;
     99 	struct ixgbe_phy_info *phy = &hw->phy;
    100 	s32 ret_val = IXGBE_SUCCESS;
    101 
    102 	DEBUGFUNC("ixgbe_init_phy_ops_82599");
    103 
    104 	/* Identify the PHY or SFP module */
    105 	ret_val = phy->ops.identify(hw);
    106 	if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED)
    107 		goto init_phy_ops_out;
    108 
    109 	/* Setup function pointers based on detected SFP module and speeds */
    110 	ixgbe_init_mac_link_ops_82599(hw);
    111 	if (hw->phy.sfp_type != ixgbe_sfp_type_unknown)
    112 		hw->phy.ops.reset = NULL;
    113 
    114 	/* If copper media, overwrite with copper function pointers */
    115 	if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
    116 		mac->ops.setup_link = &ixgbe_setup_copper_link_82599;
    117 		mac->ops.get_link_capabilities =
    118 				  &ixgbe_get_copper_link_capabilities_generic;
    119 	}
    120 
    121 	/* Set necessary function pointers based on phy type */
    122 	switch (hw->phy.type) {
    123 	case ixgbe_phy_tn:
    124 		phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
    125 		phy->ops.check_link = &ixgbe_check_phy_link_tnx;
    126 		phy->ops.get_firmware_version =
    127 			     &ixgbe_get_phy_firmware_version_tnx;
    128 		break;
    129 	default:
    130 		break;
    131 	}
    132 init_phy_ops_out:
    133 	return ret_val;
    134 }
    135 
    136 s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
    137 {
    138 	s32 ret_val = IXGBE_SUCCESS;
    139 	u32 reg_anlp1 = 0;
    140 	u32 i = 0;
    141 	u16 list_offset, data_offset, data_value;
    142 
    143 	DEBUGFUNC("ixgbe_setup_sfp_modules_82599");
    144 
    145 	if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) {
    146 		ixgbe_init_mac_link_ops_82599(hw);
    147 
    148 		hw->phy.ops.reset = NULL;
    149 
    150 		ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
    151 							      &data_offset);
    152 		if (ret_val != IXGBE_SUCCESS)
    153 			goto setup_sfp_out;
    154 
    155 		/* PHY config will finish before releasing the semaphore */
    156 		ret_val = hw->mac.ops.acquire_swfw_sync(hw,
    157 							IXGBE_GSSR_MAC_CSR_SM);
    158 		if (ret_val != IXGBE_SUCCESS) {
    159 			ret_val = IXGBE_ERR_SWFW_SYNC;
    160 			goto setup_sfp_out;
    161 		}
    162 
    163 		hw->eeprom.ops.read(hw, ++data_offset, &data_value);
    164 		while (data_value != 0xffff) {
    165 			IXGBE_WRITE_REG(hw, IXGBE_CORECTL, data_value);
    166 			IXGBE_WRITE_FLUSH(hw);
    167 			hw->eeprom.ops.read(hw, ++data_offset, &data_value);
    168 		}
    169 
    170 		/* Release the semaphore */
    171 		hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
    172 		/* Delay obtaining semaphore again to allow FW access */
    173 		msec_delay(hw->eeprom.semaphore_delay);
    174 
    175 		/* Now restart DSP by setting Restart_AN and clearing LMS */
    176 		IXGBE_WRITE_REG(hw, IXGBE_AUTOC, ((IXGBE_READ_REG(hw,
    177 				IXGBE_AUTOC) & ~IXGBE_AUTOC_LMS_MASK) |
    178 				IXGBE_AUTOC_AN_RESTART));
    179 
    180 		/* Wait for AN to leave state 0 */
    181 		for (i = 0; i < 10; i++) {
    182 			msec_delay(4);
    183 			reg_anlp1 = IXGBE_READ_REG(hw, IXGBE_ANLP1);
    184 			if (reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK)
    185 				break;
    186 		}
    187 		if (!(reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK)) {
    188 			DEBUGOUT("sfp module setup not complete\n");
    189 			ret_val = IXGBE_ERR_SFP_SETUP_NOT_COMPLETE;
    190 			goto setup_sfp_out;
    191 		}
    192 
    193 		/* Restart DSP by setting Restart_AN and return to SFI mode */
    194 		IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (IXGBE_READ_REG(hw,
    195 				IXGBE_AUTOC) | IXGBE_AUTOC_LMS_10G_SERIAL |
    196 				IXGBE_AUTOC_AN_RESTART));
    197 	}
    198 
    199 setup_sfp_out:
    200 	return ret_val;
    201 }
    202 
    203 /**
    204  *  ixgbe_init_ops_82599 - Inits func ptrs and MAC type
    205  *  @hw: pointer to hardware structure
    206  *
    207  *  Initialize the function pointers and assign the MAC type for 82599.
    208  *  Does not touch the hardware.
    209  **/
    210 
    211 s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw)
    212 {
    213 	struct ixgbe_mac_info *mac = &hw->mac;
    214 	struct ixgbe_phy_info *phy = &hw->phy;
    215 	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
    216 	s32 ret_val;
    217 
    218 	DEBUGFUNC("ixgbe_init_ops_82599");
    219 
    220 	ret_val = ixgbe_init_phy_ops_generic(hw);
    221 	ret_val = ixgbe_init_ops_generic(hw);
    222 
    223 	/* PHY */
    224 	phy->ops.identify = &ixgbe_identify_phy_82599;
    225 	phy->ops.init = &ixgbe_init_phy_ops_82599;
    226 
    227 	/* MAC */
    228 	mac->ops.reset_hw = &ixgbe_reset_hw_82599;
    229 	mac->ops.enable_relaxed_ordering = &ixgbe_enable_relaxed_ordering_gen2;
    230 	mac->ops.get_media_type = &ixgbe_get_media_type_82599;
    231 	mac->ops.get_supported_physical_layer =
    232 				    &ixgbe_get_supported_physical_layer_82599;
    233 	mac->ops.disable_sec_rx_path = &ixgbe_disable_sec_rx_path_generic;
    234 	mac->ops.enable_sec_rx_path = &ixgbe_enable_sec_rx_path_generic;
    235 	mac->ops.enable_rx_dma = &ixgbe_enable_rx_dma_82599;
    236 	mac->ops.read_analog_reg8 = &ixgbe_read_analog_reg8_82599;
    237 	mac->ops.write_analog_reg8 = &ixgbe_write_analog_reg8_82599;
    238 	mac->ops.start_hw = &ixgbe_start_hw_82599;
    239 	mac->ops.get_san_mac_addr = &ixgbe_get_san_mac_addr_generic;
    240 	mac->ops.set_san_mac_addr = &ixgbe_set_san_mac_addr_generic;
    241 	mac->ops.get_device_caps = &ixgbe_get_device_caps_generic;
    242 	mac->ops.get_wwn_prefix = &ixgbe_get_wwn_prefix_generic;
    243 	mac->ops.get_fcoe_boot_status = &ixgbe_get_fcoe_boot_status_generic;
    244 
    245 	/* RAR, Multicast, VLAN */
    246 	mac->ops.set_vmdq = &ixgbe_set_vmdq_generic;
    247 	mac->ops.clear_vmdq = &ixgbe_clear_vmdq_generic;
    248 	mac->ops.insert_mac_addr = &ixgbe_insert_mac_addr_generic;
    249 	mac->rar_highwater = 1;
    250 	mac->ops.set_vfta = &ixgbe_set_vfta_generic;
    251 	mac->ops.set_vlvf = &ixgbe_set_vlvf_generic;
    252 	mac->ops.clear_vfta = &ixgbe_clear_vfta_generic;
    253 	mac->ops.init_uta_tables = &ixgbe_init_uta_tables_generic;
    254 	mac->ops.setup_sfp = &ixgbe_setup_sfp_modules_82599;
    255 	mac->ops.set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing;
    256 	mac->ops.set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing;
    257 
    258 	/* Link */
    259 	mac->ops.get_link_capabilities = &ixgbe_get_link_capabilities_82599;
    260 	mac->ops.check_link = &ixgbe_check_mac_link_generic;
    261 	mac->ops.setup_rxpba = &ixgbe_set_rxpba_generic;
    262 	ixgbe_init_mac_link_ops_82599(hw);
    263 
    264 	mac->mcft_size		= 128;
    265 	mac->vft_size		= 128;
    266 	mac->num_rar_entries	= 128;
    267 	mac->rx_pb_size		= 512;
    268 	mac->max_tx_queues	= 128;
    269 	mac->max_rx_queues	= 128;
    270 	mac->max_msix_vectors	= ixgbe_get_pcie_msix_count_generic(hw);
    271 
    272 	mac->arc_subsystem_valid = (IXGBE_READ_REG(hw, IXGBE_FWSM) &
    273 				   IXGBE_FWSM_MODE_MASK) ? TRUE : FALSE;
    274 
    275 	hw->mbx.ops.init_params = ixgbe_init_mbx_params_pf;
    276 
    277 	/* EEPROM */
    278 	eeprom->ops.read = &ixgbe_read_eeprom_82599;
    279 	eeprom->ops.read_buffer = &ixgbe_read_eeprom_buffer_82599;
    280 
    281 	/* Manageability interface */
    282 	mac->ops.set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic;
    283 
    284 
    285 	return ret_val;
    286 }
    287 
    288 /**
    289  *  ixgbe_get_link_capabilities_82599 - Determines link capabilities
    290  *  @hw: pointer to hardware structure
    291  *  @speed: pointer to link speed
    292  *  @negotiation: TRUE when autoneg or autotry is enabled
    293  *
    294  *  Determines the link capabilities by reading the AUTOC register.
    295  **/
    296 s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
    297 				      ixgbe_link_speed *speed,
    298 				      bool *negotiation)
    299 {
    300 	s32 status = IXGBE_SUCCESS;
    301 	u32 autoc = 0;
    302 
    303 	DEBUGFUNC("ixgbe_get_link_capabilities_82599");
    304 
    305 
    306 	/* Check if 1G SFP module. */
    307 	if (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
    308 	    hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1) {
    309 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
    310 		*negotiation = TRUE;
    311 		goto out;
    312 	}
    313 
    314 	/*
    315 	 * Determine link capabilities based on the stored value of AUTOC,
    316 	 * which represents EEPROM defaults.  If AUTOC value has not
    317 	 * been stored, use the current register values.
    318 	 */
    319 	if (hw->mac.orig_link_settings_stored)
    320 		autoc = hw->mac.orig_autoc;
    321 	else
    322 		autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
    323 
    324 	switch (autoc & IXGBE_AUTOC_LMS_MASK) {
    325 	case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
    326 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
    327 		*negotiation = FALSE;
    328 		break;
    329 
    330 	case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
    331 		*speed = IXGBE_LINK_SPEED_10GB_FULL;
    332 		*negotiation = FALSE;
    333 		break;
    334 
    335 	case IXGBE_AUTOC_LMS_1G_AN:
    336 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
    337 		*negotiation = TRUE;
    338 		break;
    339 
    340 	case IXGBE_AUTOC_LMS_10G_SERIAL:
    341 		*speed = IXGBE_LINK_SPEED_10GB_FULL;
    342 		*negotiation = FALSE;
    343 		break;
    344 
    345 	case IXGBE_AUTOC_LMS_KX4_KX_KR:
    346 	case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN:
    347 		*speed = IXGBE_LINK_SPEED_UNKNOWN;
    348 		if (autoc & IXGBE_AUTOC_KR_SUPP)
    349 			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
    350 		if (autoc & IXGBE_AUTOC_KX4_SUPP)
    351 			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
    352 		if (autoc & IXGBE_AUTOC_KX_SUPP)
    353 			*speed |= IXGBE_LINK_SPEED_1GB_FULL;
    354 		*negotiation = TRUE;
    355 		break;
    356 
    357 	case IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII:
    358 		*speed = IXGBE_LINK_SPEED_100_FULL;
    359 		if (autoc & IXGBE_AUTOC_KR_SUPP)
    360 			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
    361 		if (autoc & IXGBE_AUTOC_KX4_SUPP)
    362 			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
    363 		if (autoc & IXGBE_AUTOC_KX_SUPP)
    364 			*speed |= IXGBE_LINK_SPEED_1GB_FULL;
    365 		*negotiation = TRUE;
    366 		break;
    367 
    368 	case IXGBE_AUTOC_LMS_SGMII_1G_100M:
    369 		*speed = IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_100_FULL;
    370 		*negotiation = FALSE;
    371 		break;
    372 
    373 	default:
    374 		status = IXGBE_ERR_LINK_SETUP;
    375 		goto out;
    376 		break;
    377 	}
    378 
    379 	if (hw->phy.multispeed_fiber) {
    380 		*speed |= IXGBE_LINK_SPEED_10GB_FULL |
    381 			  IXGBE_LINK_SPEED_1GB_FULL;
    382 		*negotiation = TRUE;
    383 	}
    384 
    385 out:
    386 	return status;
    387 }
    388 
    389 /**
    390  *  ixgbe_get_media_type_82599 - Get media type
    391  *  @hw: pointer to hardware structure
    392  *
    393  *  Returns the media type (fiber, copper, backplane)
    394  **/
    395 enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
    396 {
    397 	enum ixgbe_media_type media_type;
    398 
    399 	DEBUGFUNC("ixgbe_get_media_type_82599");
    400 
    401 	/* Detect if there is a copper PHY attached. */
    402 	switch (hw->phy.type) {
    403 	case ixgbe_phy_cu_unknown:
    404 	case ixgbe_phy_tn:
    405 		media_type = ixgbe_media_type_copper;
    406 		goto out;
    407 	default:
    408 		break;
    409 	}
    410 
    411 	switch (hw->device_id) {
    412 	case IXGBE_DEV_ID_82599_KX4:
    413 	case IXGBE_DEV_ID_82599_KX4_MEZZ:
    414 	case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
    415 	case IXGBE_DEV_ID_82599_KR:
    416 	case IXGBE_DEV_ID_82599_BACKPLANE_FCOE:
    417 	case IXGBE_DEV_ID_82599_XAUI_LOM:
    418 		/* Default device ID is mezzanine card KX/KX4 */
    419 		media_type = ixgbe_media_type_backplane;
    420 		break;
    421 	case IXGBE_DEV_ID_82599_SFP:
    422 	case IXGBE_DEV_ID_82599_SFP_FCOE:
    423 	case IXGBE_DEV_ID_82599_SFP_SF2:
    424 	case IXGBE_DEV_ID_82599_SFP_SF_QP:
    425 	case IXGBE_DEV_ID_82599_SFP_EM:
    426 	case IXGBE_DEV_ID_82599EN_SFP:
    427 		media_type = ixgbe_media_type_fiber;
    428 		break;
    429 	case IXGBE_DEV_ID_82599_CX4:
    430 		media_type = ixgbe_media_type_cx4;
    431 		break;
    432 	case IXGBE_DEV_ID_82599_T3_LOM:
    433 		media_type = ixgbe_media_type_copper;
    434 		break;
    435 	default:
    436 		media_type = ixgbe_media_type_unknown;
    437 		break;
    438 	}
    439 out:
    440 	return media_type;
    441 }
    442 
    443 /**
    444  *  ixgbe_start_mac_link_82599 - Setup MAC link settings
    445  *  @hw: pointer to hardware structure
    446  *  @autoneg_wait_to_complete: TRUE when waiting for completion is needed
    447  *
    448  *  Configures link settings based on values in the ixgbe_hw struct.
    449  *  Restarts the link.  Performs autonegotiation if needed.
    450  **/
    451 s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
    452 			       bool autoneg_wait_to_complete)
    453 {
    454 	u32 autoc_reg;
    455 	u32 links_reg;
    456 	u32 i;
    457 	s32 status = IXGBE_SUCCESS;
    458 
    459 	DEBUGFUNC("ixgbe_start_mac_link_82599");
    460 
    461 
    462 	/* Restart link */
    463 	autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
    464 	autoc_reg |= IXGBE_AUTOC_AN_RESTART;
    465 	IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
    466 
    467 	/* Only poll for autoneg to complete if specified to do so */
    468 	if (autoneg_wait_to_complete) {
    469 		if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
    470 		     IXGBE_AUTOC_LMS_KX4_KX_KR ||
    471 		    (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
    472 		     IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
    473 		    (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
    474 		     IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
    475 			links_reg = 0; /* Just in case Autoneg time = 0 */
    476 			for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
    477 				links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
    478 				if (links_reg & IXGBE_LINKS_KX_AN_COMP)
    479 					break;
    480 				msec_delay(100);
    481 			}
    482 			if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
    483 				status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
    484 				DEBUGOUT("Autoneg did not complete.\n");
    485 			}
    486 		}
    487 	}
    488 
    489 	/* Add delay to filter out noises during initial link setup */
    490 	msec_delay(50);
    491 
    492 	return status;
    493 }
    494 
    495 /**
    496  *  ixgbe_disable_tx_laser_multispeed_fiber - Disable Tx laser
    497  *  @hw: pointer to hardware structure
    498  *
    499  *  The base drivers may require better control over SFP+ module
    500  *  PHY states.  This includes selectively shutting down the Tx
    501  *  laser on the PHY, effectively halting physical link.
    502  **/
    503 void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
    504 {
    505 	u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
    506 
    507 	/* Disable tx laser; allow 100us to go dark per spec */
    508 	esdp_reg |= IXGBE_ESDP_SDP3;
    509 	IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
    510 	IXGBE_WRITE_FLUSH(hw);
    511 	usec_delay(100);
    512 }
    513 
    514 /**
    515  *  ixgbe_enable_tx_laser_multispeed_fiber - Enable Tx laser
    516  *  @hw: pointer to hardware structure
    517  *
    518  *  The base drivers may require better control over SFP+ module
    519  *  PHY states.  This includes selectively turning on the Tx
    520  *  laser on the PHY, effectively starting physical link.
    521  **/
    522 void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
    523 {
    524 	u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
    525 
    526 	/* Enable tx laser; allow 100ms to light up */
    527 	esdp_reg &= ~IXGBE_ESDP_SDP3;
    528 	IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
    529 	IXGBE_WRITE_FLUSH(hw);
    530 	msec_delay(100);
    531 }
    532 
    533 /**
    534  *  ixgbe_flap_tx_laser_multispeed_fiber - Flap Tx laser
    535  *  @hw: pointer to hardware structure
    536  *
    537  *  When the driver changes the link speeds that it can support,
    538  *  it sets autotry_restart to TRUE to indicate that we need to
    539  *  initiate a new autotry session with the link partner.  To do
    540  *  so, we set the speed then disable and re-enable the tx laser, to
    541  *  alert the link partner that it also needs to restart autotry on its
    542  *  end.  This is consistent with TRUE clause 37 autoneg, which also
    543  *  involves a loss of signal.
    544  **/
    545 void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
    546 {
    547 	DEBUGFUNC("ixgbe_flap_tx_laser_multispeed_fiber");
    548 
    549 	if (hw->mac.autotry_restart) {
    550 		ixgbe_disable_tx_laser_multispeed_fiber(hw);
    551 		ixgbe_enable_tx_laser_multispeed_fiber(hw);
    552 		hw->mac.autotry_restart = FALSE;
    553 	}
    554 }
    555 
    556 /**
    557  *  ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed
    558  *  @hw: pointer to hardware structure
    559  *  @speed: new link speed
    560  *  @autoneg: TRUE if autonegotiation enabled
    561  *  @autoneg_wait_to_complete: TRUE when waiting for completion is needed
    562  *
    563  *  Set the link speed in the AUTOC register and restarts link.
    564  **/
    565 s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
    566 				     ixgbe_link_speed speed, bool autoneg,
    567 				     bool autoneg_wait_to_complete)
    568 {
    569 	s32 status = IXGBE_SUCCESS;
    570 	ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
    571 	ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN;
    572 	u32 speedcnt = 0;
    573 	u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
    574 	u32 i = 0;
    575 	bool link_up = FALSE;
    576 	bool negotiation;
    577 
    578 	DEBUGFUNC("ixgbe_setup_mac_link_multispeed_fiber");
    579 
    580 	/* Mask off requested but non-supported speeds */
    581 	status = ixgbe_get_link_capabilities(hw, &link_speed, &negotiation);
    582 	if (status != IXGBE_SUCCESS)
    583 		return status;
    584 
    585 	speed &= link_speed;
    586 
    587 	/*
    588 	 * Try each speed one by one, highest priority first.  We do this in
    589 	 * software because 10gb fiber doesn't support speed autonegotiation.
    590 	 */
    591 	if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
    592 		speedcnt++;
    593 		highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL;
    594 
    595 		/* If we already have link at this speed, just jump out */
    596 		status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
    597 		if (status != IXGBE_SUCCESS)
    598 			return status;
    599 
    600 		if ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) && link_up)
    601 			goto out;
    602 
    603 		/* Set the module link speed */
    604 		esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5);
    605 		IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
    606 		IXGBE_WRITE_FLUSH(hw);
    607 
    608 		/* Allow module to change analog characteristics (1G->10G) */
    609 		msec_delay(40);
    610 
    611 		status = ixgbe_setup_mac_link_82599(hw,
    612 						    IXGBE_LINK_SPEED_10GB_FULL,
    613 						    autoneg,
    614 						    autoneg_wait_to_complete);
    615 		if (status != IXGBE_SUCCESS)
    616 			return status;
    617 
    618 		/* Flap the tx laser if it has not already been done */
    619 		ixgbe_flap_tx_laser(hw);
    620 
    621 		/*
    622 		 * Wait for the controller to acquire link.  Per IEEE 802.3ap,
    623 		 * Section 73.10.2, we may have to wait up to 500ms if KR is
    624 		 * attempted.  82599 uses the same timing for 10g SFI.
    625 		 */
    626 		for (i = 0; i < 5; i++) {
    627 			/* Wait for the link partner to also set speed */
    628 			msec_delay(100);
    629 
    630 			/* If we have link, just jump out */
    631 			status = ixgbe_check_link(hw, &link_speed,
    632 						  &link_up, FALSE);
    633 			if (status != IXGBE_SUCCESS)
    634 				return status;
    635 
    636 			if (link_up)
    637 				goto out;
    638 		}
    639 	}
    640 
    641 	if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
    642 		speedcnt++;
    643 		if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN)
    644 			highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL;
    645 
    646 		/* If we already have link at this speed, just jump out */
    647 		status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
    648 		if (status != IXGBE_SUCCESS)
    649 			return status;
    650 
    651 		if ((link_speed == IXGBE_LINK_SPEED_1GB_FULL) && link_up)
    652 			goto out;
    653 
    654 		/* Set the module link speed */
    655 		esdp_reg &= ~IXGBE_ESDP_SDP5;
    656 		esdp_reg |= IXGBE_ESDP_SDP5_DIR;
    657 		IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
    658 		IXGBE_WRITE_FLUSH(hw);
    659 
    660 		/* Allow module to change analog characteristics (10G->1G) */
    661 		msec_delay(40);
    662 
    663 		status = ixgbe_setup_mac_link_82599(hw,
    664 						    IXGBE_LINK_SPEED_1GB_FULL,
    665 						    autoneg,
    666 						    autoneg_wait_to_complete);
    667 		if (status != IXGBE_SUCCESS)
    668 			return status;
    669 
    670 		/* Flap the tx laser if it has not already been done */
    671 		ixgbe_flap_tx_laser(hw);
    672 
    673 		/* Wait for the link partner to also set speed */
    674 		msec_delay(100);
    675 
    676 		/* If we have link, just jump out */
    677 		status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
    678 		if (status != IXGBE_SUCCESS)
    679 			return status;
    680 
    681 		if (link_up)
    682 			goto out;
    683 	}
    684 
    685 	/*
    686 	 * We didn't get link.  Configure back to the highest speed we tried,
    687 	 * (if there was more than one).  We call ourselves back with just the
    688 	 * single highest speed that the user requested.
    689 	 */
    690 	if (speedcnt > 1)
    691 		status = ixgbe_setup_mac_link_multispeed_fiber(hw,
    692 			highest_link_speed, autoneg, autoneg_wait_to_complete);
    693 
    694 out:
    695 	/* Set autoneg_advertised value based on input link speed */
    696 	hw->phy.autoneg_advertised = 0;
    697 
    698 	if (speed & IXGBE_LINK_SPEED_10GB_FULL)
    699 		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
    700 
    701 	if (speed & IXGBE_LINK_SPEED_1GB_FULL)
    702 		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
    703 
    704 	return status;
    705 }
    706 
    707 /**
    708  *  ixgbe_setup_mac_link_smartspeed - Set MAC link speed using SmartSpeed
    709  *  @hw: pointer to hardware structure
    710  *  @speed: new link speed
    711  *  @autoneg: TRUE if autonegotiation enabled
    712  *  @autoneg_wait_to_complete: TRUE when waiting for completion is needed
    713  *
    714  *  Implements the Intel SmartSpeed algorithm.
    715  **/
    716 s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
    717 				    ixgbe_link_speed speed, bool autoneg,
    718 				    bool autoneg_wait_to_complete)
    719 {
    720 	s32 status = IXGBE_SUCCESS;
    721 	ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
    722 	s32 i, j;
    723 	bool link_up = FALSE;
    724 	u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
    725 
    726 	DEBUGFUNC("ixgbe_setup_mac_link_smartspeed");
    727 
    728 	 /* Set autoneg_advertised value based on input link speed */
    729 	hw->phy.autoneg_advertised = 0;
    730 
    731 	if (speed & IXGBE_LINK_SPEED_10GB_FULL)
    732 		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
    733 
    734 	if (speed & IXGBE_LINK_SPEED_1GB_FULL)
    735 		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
    736 
    737 	if (speed & IXGBE_LINK_SPEED_100_FULL)
    738 		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL;
    739 
    740 	/*
    741 	 * Implement Intel SmartSpeed algorithm.  SmartSpeed will reduce the
    742 	 * autoneg advertisement if link is unable to be established at the
    743 	 * highest negotiated rate.  This can sometimes happen due to integrity
    744 	 * issues with the physical media connection.
    745 	 */
    746 
    747 	/* First, try to get link with full advertisement */
    748 	hw->phy.smart_speed_active = FALSE;
    749 	for (j = 0; j < IXGBE_SMARTSPEED_MAX_RETRIES; j++) {
    750 		status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
    751 						    autoneg_wait_to_complete);
    752 		if (status != IXGBE_SUCCESS)
    753 			goto out;
    754 
    755 		/*
    756 		 * Wait for the controller to acquire link.  Per IEEE 802.3ap,
    757 		 * Section 73.10.2, we may have to wait up to 500ms if KR is
    758 		 * attempted, or 200ms if KX/KX4/BX/BX4 is attempted, per
    759 		 * Table 9 in the AN MAS.
    760 		 */
    761 		for (i = 0; i < 5; i++) {
    762 			msec_delay(100);
    763 
    764 			/* If we have link, just jump out */
    765 			status = ixgbe_check_link(hw, &link_speed, &link_up,
    766 						  FALSE);
    767 			if (status != IXGBE_SUCCESS)
    768 				goto out;
    769 
    770 			if (link_up)
    771 				goto out;
    772 		}
    773 	}
    774 
    775 	/*
    776 	 * We didn't get link.  If we advertised KR plus one of KX4/KX
    777 	 * (or BX4/BX), then disable KR and try again.
    778 	 */
    779 	if (((autoc_reg & IXGBE_AUTOC_KR_SUPP) == 0) ||
    780 	    ((autoc_reg & IXGBE_AUTOC_KX4_KX_SUPP_MASK) == 0))
    781 		goto out;
    782 
    783 	/* Turn SmartSpeed on to disable KR support */
    784 	hw->phy.smart_speed_active = TRUE;
    785 	status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
    786 					    autoneg_wait_to_complete);
    787 	if (status != IXGBE_SUCCESS)
    788 		goto out;
    789 
    790 	/*
    791 	 * Wait for the controller to acquire link.  600ms will allow for
    792 	 * the AN link_fail_inhibit_timer as well for multiple cycles of
    793 	 * parallel detect, both 10g and 1g. This allows for the maximum
    794 	 * connect attempts as defined in the AN MAS table 73-7.
    795 	 */
    796 	for (i = 0; i < 6; i++) {
    797 		msec_delay(100);
    798 
    799 		/* If we have link, just jump out */
    800 		status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
    801 		if (status != IXGBE_SUCCESS)
    802 			goto out;
    803 
    804 		if (link_up)
    805 			goto out;
    806 	}
    807 
    808 	/* We didn't get link.  Turn SmartSpeed back off. */
    809 	hw->phy.smart_speed_active = FALSE;
    810 	status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
    811 					    autoneg_wait_to_complete);
    812 
    813 out:
    814 	if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL))
    815 		DEBUGOUT("Smartspeed has downgraded the link speed "
    816 		"from the maximum advertised\n");
    817 	return status;
    818 }
    819 
    820 /**
    821  *  ixgbe_setup_mac_link_82599 - Set MAC link speed
    822  *  @hw: pointer to hardware structure
    823  *  @speed: new link speed
    824  *  @autoneg: TRUE if autonegotiation enabled
    825  *  @autoneg_wait_to_complete: TRUE when waiting for completion is needed
    826  *
    827  *  Set the link speed in the AUTOC register and restarts link.
    828  **/
    829 s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
    830 			       ixgbe_link_speed speed, bool autoneg,
    831 			       bool autoneg_wait_to_complete)
    832 {
    833 	s32 status = IXGBE_SUCCESS;
    834 	u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
    835 	u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
    836 	u32 start_autoc = autoc;
    837 	u32 orig_autoc = 0;
    838 	u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
    839 	u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
    840 	u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
    841 	u32 links_reg;
    842 	u32 i;
    843 	ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
    844 
    845 	DEBUGFUNC("ixgbe_setup_mac_link_82599");
    846 
    847 	/* Check to see if speed passed in is supported. */
    848 	status = ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg);
    849 	if (status != IXGBE_SUCCESS)
    850 		goto out;
    851 
    852 	speed &= link_capabilities;
    853 
    854 	if (speed == IXGBE_LINK_SPEED_UNKNOWN) {
    855 		status = IXGBE_ERR_LINK_SETUP;
    856 		goto out;
    857 	}
    858 
    859 	/* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/
    860 	if (hw->mac.orig_link_settings_stored)
    861 		orig_autoc = hw->mac.orig_autoc;
    862 	else
    863 		orig_autoc = autoc;
    864 
    865 	if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
    866 	    link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
    867 	    link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
    868 		/* Set KX4/KX/KR support according to speed requested */
    869 		autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP);
    870 		if (speed & IXGBE_LINK_SPEED_10GB_FULL)
    871 			if (orig_autoc & IXGBE_AUTOC_KX4_SUPP)
    872 				autoc |= IXGBE_AUTOC_KX4_SUPP;
    873 			if ((orig_autoc & IXGBE_AUTOC_KR_SUPP) &&
    874 			    (hw->phy.smart_speed_active == FALSE))
    875 				autoc |= IXGBE_AUTOC_KR_SUPP;
    876 		if (speed & IXGBE_LINK_SPEED_1GB_FULL)
    877 			autoc |= IXGBE_AUTOC_KX_SUPP;
    878 	} else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) &&
    879 		   (link_mode == IXGBE_AUTOC_LMS_1G_LINK_NO_AN ||
    880 		    link_mode == IXGBE_AUTOC_LMS_1G_AN)) {
    881 		/* Switch from 1G SFI to 10G SFI if requested */
    882 		if ((speed == IXGBE_LINK_SPEED_10GB_FULL) &&
    883 		    (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)) {
    884 			autoc &= ~IXGBE_AUTOC_LMS_MASK;
    885 			autoc |= IXGBE_AUTOC_LMS_10G_SERIAL;
    886 		}
    887 	} else if ((pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) &&
    888 		   (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) {
    889 		/* Switch from 10G SFI to 1G SFI if requested */
    890 		if ((speed == IXGBE_LINK_SPEED_1GB_FULL) &&
    891 		    (pma_pmd_1g == IXGBE_AUTOC_1G_SFI)) {
    892 			autoc &= ~IXGBE_AUTOC_LMS_MASK;
    893 			if (autoneg)
    894 				autoc |= IXGBE_AUTOC_LMS_1G_AN;
    895 			else
    896 				autoc |= IXGBE_AUTOC_LMS_1G_LINK_NO_AN;
    897 		}
    898 	}
    899 
    900 	if (autoc != start_autoc) {
    901 		/* Restart link */
    902 		autoc |= IXGBE_AUTOC_AN_RESTART;
    903 		IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
    904 
    905 		/* Only poll for autoneg to complete if specified to do so */
    906 		if (autoneg_wait_to_complete) {
    907 			if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
    908 			    link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
    909 			    link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
    910 				links_reg = 0; /*Just in case Autoneg time=0*/
    911 				for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
    912 					links_reg =
    913 					       IXGBE_READ_REG(hw, IXGBE_LINKS);
    914 					if (links_reg & IXGBE_LINKS_KX_AN_COMP)
    915 						break;
    916 					msec_delay(100);
    917 				}
    918 				if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
    919 					status =
    920 						IXGBE_ERR_AUTONEG_NOT_COMPLETE;
    921 					DEBUGOUT("Autoneg did not complete.\n");
    922 				}
    923 			}
    924 		}
    925 
    926 		/* Add delay to filter out noises during initial link setup */
    927 		msec_delay(50);
    928 	}
    929 
    930 out:
    931 	return status;
    932 }
    933 
    934 /**
    935  *  ixgbe_setup_copper_link_82599 - Set the PHY autoneg advertised field
    936  *  @hw: pointer to hardware structure
    937  *  @speed: new link speed
    938  *  @autoneg: TRUE if autonegotiation enabled
    939  *  @autoneg_wait_to_complete: TRUE if waiting is needed to complete
    940  *
    941  *  Restarts link on PHY and MAC based on settings passed in.
    942  **/
    943 static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
    944 					 ixgbe_link_speed speed,
    945 					 bool autoneg,
    946 					 bool autoneg_wait_to_complete)
    947 {
    948 	s32 status;
    949 
    950 	DEBUGFUNC("ixgbe_setup_copper_link_82599");
    951 
    952 	/* Setup the PHY according to input speed */
    953 	status = hw->phy.ops.setup_link_speed(hw, speed, autoneg,
    954 					      autoneg_wait_to_complete);
    955 	/* Set up MAC */
    956 	ixgbe_start_mac_link_82599(hw, autoneg_wait_to_complete);
    957 
    958 	return status;
    959 }
    960 
    961 /**
    962  *  ixgbe_reset_hw_82599 - Perform hardware reset
    963  *  @hw: pointer to hardware structure
    964  *
    965  *  Resets the hardware by resetting the transmit and receive units, masks
    966  *  and clears all interrupts, perform a PHY reset, and perform a link (MAC)
    967  *  reset.
    968  **/
    969 s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
    970 {
    971 	ixgbe_link_speed link_speed;
    972 	s32 status;
    973 	u32 ctrl, i, autoc, autoc2;
    974 	bool link_up = FALSE;
    975 
    976 	DEBUGFUNC("ixgbe_reset_hw_82599");
    977 
    978 	/* Call adapter stop to disable tx/rx and clear interrupts */
    979 	status = hw->mac.ops.stop_adapter(hw);
    980 	if (status != IXGBE_SUCCESS)
    981 		goto reset_hw_out;
    982 
    983 	/* flush pending Tx transactions */
    984 	ixgbe_clear_tx_pending(hw);
    985 
    986 	/* PHY ops must be identified and initialized prior to reset */
    987 
    988 	/* Identify PHY and related function pointers */
    989 	status = hw->phy.ops.init(hw);
    990 
    991 	if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
    992 		goto reset_hw_out;
    993 
    994 	/* Setup SFP module if there is one present. */
    995 	if (hw->phy.sfp_setup_needed) {
    996 		status = hw->mac.ops.setup_sfp(hw);
    997 		hw->phy.sfp_setup_needed = FALSE;
    998 	}
    999 
   1000 	if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
   1001 		goto reset_hw_out;
   1002 
   1003 	/* Reset PHY */
   1004 	if (hw->phy.reset_disable == FALSE && hw->phy.ops.reset != NULL)
   1005 		hw->phy.ops.reset(hw);
   1006 
   1007 mac_reset_top:
   1008 	/*
   1009 	 * Issue global reset to the MAC.  Needs to be SW reset if link is up.
   1010 	 * If link reset is used when link is up, it might reset the PHY when
   1011 	 * mng is using it.  If link is down or the flag to force full link
   1012 	 * reset is set, then perform link reset.
   1013 	 */
   1014 	ctrl = IXGBE_CTRL_LNK_RST;
   1015 	if (!hw->force_full_reset) {
   1016 		hw->mac.ops.check_link(hw, &link_speed, &link_up, FALSE);
   1017 		if (link_up)
   1018 			ctrl = IXGBE_CTRL_RST;
   1019 	}
   1020 
   1021 	ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
   1022 	IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
   1023 	IXGBE_WRITE_FLUSH(hw);
   1024 
   1025 	/* Poll for reset bit to self-clear indicating reset is complete */
   1026 	for (i = 0; i < 10; i++) {
   1027 		usec_delay(1);
   1028 		ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
   1029 		if (!(ctrl & IXGBE_CTRL_RST_MASK))
   1030 			break;
   1031 	}
   1032 
   1033 	if (ctrl & IXGBE_CTRL_RST_MASK) {
   1034 		status = IXGBE_ERR_RESET_FAILED;
   1035 		DEBUGOUT("Reset polling failed to complete.\n");
   1036 	}
   1037 
   1038 	msec_delay(50);
   1039 
   1040 	/*
   1041 	 * Double resets are required for recovery from certain error
   1042 	 * conditions.  Between resets, it is necessary to stall to allow time
   1043 	 * for any pending HW events to complete.
   1044 	 */
   1045 	if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
   1046 		hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
   1047 		goto mac_reset_top;
   1048 	}
   1049 
   1050 	/*
   1051 	 * Store the original AUTOC/AUTOC2 values if they have not been
   1052 	 * stored off yet.  Otherwise restore the stored original
   1053 	 * values since the reset operation sets back to defaults.
   1054 	 */
   1055 	autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
   1056 	autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
   1057 	if (hw->mac.orig_link_settings_stored == FALSE) {
   1058 		hw->mac.orig_autoc = autoc;
   1059 		hw->mac.orig_autoc2 = autoc2;
   1060 		hw->mac.orig_link_settings_stored = TRUE;
   1061 	} else {
   1062 		if (autoc != hw->mac.orig_autoc)
   1063 			IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (hw->mac.orig_autoc |
   1064 					IXGBE_AUTOC_AN_RESTART));
   1065 
   1066 		if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) !=
   1067 		    (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) {
   1068 			autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK;
   1069 			autoc2 |= (hw->mac.orig_autoc2 &
   1070 				   IXGBE_AUTOC2_UPPER_MASK);
   1071 			IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
   1072 		}
   1073 	}
   1074 
   1075 	/* Store the permanent mac address */
   1076 	hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
   1077 
   1078 	/*
   1079 	 * Store MAC address from RAR0, clear receive address registers, and
   1080 	 * clear the multicast table.  Also reset num_rar_entries to 128,
   1081 	 * since we modify this value when programming the SAN MAC address.
   1082 	 */
   1083 	hw->mac.num_rar_entries = 128;
   1084 	hw->mac.ops.init_rx_addrs(hw);
   1085 
   1086 	/* Store the permanent SAN mac address */
   1087 	hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr);
   1088 
   1089 	/* Add the SAN MAC address to the RAR only if it's a valid address */
   1090 	if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) {
   1091 		hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
   1092 				    hw->mac.san_addr, 0, IXGBE_RAH_AV);
   1093 
   1094 		/* Reserve the last RAR for the SAN MAC address */
   1095 		hw->mac.num_rar_entries--;
   1096 	}
   1097 
   1098 	/* Store the alternative WWNN/WWPN prefix */
   1099 	hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
   1100 				   &hw->mac.wwpn_prefix);
   1101 
   1102 reset_hw_out:
   1103 	return status;
   1104 }
   1105 
   1106 /**
   1107  *  ixgbe_reinit_fdir_tables_82599 - Reinitialize Flow Director tables.
   1108  *  @hw: pointer to hardware structure
   1109  **/
   1110 s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
   1111 {
   1112 	int i;
   1113 	u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
   1114 	fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE;
   1115 
   1116 	DEBUGFUNC("ixgbe_reinit_fdir_tables_82599");
   1117 
   1118 	/*
   1119 	 * Before starting reinitialization process,
   1120 	 * FDIRCMD.CMD must be zero.
   1121 	 */
   1122 	for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) {
   1123 		if (!(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
   1124 		      IXGBE_FDIRCMD_CMD_MASK))
   1125 			break;
   1126 		usec_delay(10);
   1127 	}
   1128 	if (i >= IXGBE_FDIRCMD_CMD_POLL) {
   1129 		DEBUGOUT("Flow Director previous command isn't complete, "
   1130 			 "aborting table re-initialization.\n");
   1131 		return IXGBE_ERR_FDIR_REINIT_FAILED;
   1132 	}
   1133 
   1134 	IXGBE_WRITE_REG(hw, IXGBE_FDIRFREE, 0);
   1135 	IXGBE_WRITE_FLUSH(hw);
   1136 	/*
   1137 	 * 82599 adapters flow director init flow cannot be restarted,
   1138 	 * Workaround 82599 silicon errata by performing the following steps
   1139 	 * before re-writing the FDIRCTRL control register with the same value.
   1140 	 * - write 1 to bit 8 of FDIRCMD register &
   1141 	 * - write 0 to bit 8 of FDIRCMD register
   1142 	 */
   1143 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
   1144 			(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) |
   1145 			 IXGBE_FDIRCMD_CLEARHT));
   1146 	IXGBE_WRITE_FLUSH(hw);
   1147 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
   1148 			(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
   1149 			 ~IXGBE_FDIRCMD_CLEARHT));
   1150 	IXGBE_WRITE_FLUSH(hw);
   1151 	/*
   1152 	 * Clear FDIR Hash register to clear any leftover hashes
   1153 	 * waiting to be programmed.
   1154 	 */
   1155 	IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, 0x00);
   1156 	IXGBE_WRITE_FLUSH(hw);
   1157 
   1158 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
   1159 	IXGBE_WRITE_FLUSH(hw);
   1160 
   1161 	/* Poll init-done after we write FDIRCTRL register */
   1162 	for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
   1163 		if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
   1164 				   IXGBE_FDIRCTRL_INIT_DONE)
   1165 			break;
   1166 		usec_delay(10);
   1167 	}
   1168 	if (i >= IXGBE_FDIR_INIT_DONE_POLL) {
   1169 		DEBUGOUT("Flow Director Signature poll time exceeded!\n");
   1170 		return IXGBE_ERR_FDIR_REINIT_FAILED;
   1171 	}
   1172 
   1173 	/* Clear FDIR statistics registers (read to clear) */
   1174 	IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT);
   1175 	IXGBE_READ_REG(hw, IXGBE_FDIRFSTAT);
   1176 	IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
   1177 	IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
   1178 	IXGBE_READ_REG(hw, IXGBE_FDIRLEN);
   1179 
   1180 	return IXGBE_SUCCESS;
   1181 }
   1182 
   1183 /**
   1184  *  ixgbe_fdir_enable_82599 - Initialize Flow Director control registers
   1185  *  @hw: pointer to hardware structure
   1186  *  @fdirctrl: value to write to flow director control register
   1187  **/
   1188 static void ixgbe_fdir_enable_82599(struct ixgbe_hw *hw, u32 fdirctrl)
   1189 {
   1190 	int i;
   1191 
   1192 	DEBUGFUNC("ixgbe_fdir_enable_82599");
   1193 
   1194 	/* Prime the keys for hashing */
   1195 	IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY);
   1196 	IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY);
   1197 
   1198 	/*
   1199 	 * Poll init-done after we write the register.  Estimated times:
   1200 	 *      10G: PBALLOC = 11b, timing is 60us
   1201 	 *       1G: PBALLOC = 11b, timing is 600us
   1202 	 *     100M: PBALLOC = 11b, timing is 6ms
   1203 	 *
   1204 	 *     Multiple these timings by 4 if under full Rx load
   1205 	 *
   1206 	 * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for
   1207 	 * 1 msec per poll time.  If we're at line rate and drop to 100M, then
   1208 	 * this might not finish in our poll time, but we can live with that
   1209 	 * for now.
   1210 	 */
   1211 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
   1212 	IXGBE_WRITE_FLUSH(hw);
   1213 	for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
   1214 		if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
   1215 				   IXGBE_FDIRCTRL_INIT_DONE)
   1216 			break;
   1217 		msec_delay(1);
   1218 	}
   1219 
   1220 	if (i >= IXGBE_FDIR_INIT_DONE_POLL)
   1221 		DEBUGOUT("Flow Director poll time exceeded!\n");
   1222 }
   1223 
   1224 /**
   1225  *  ixgbe_init_fdir_signature_82599 - Initialize Flow Director signature filters
   1226  *  @hw: pointer to hardware structure
   1227  *  @fdirctrl: value to write to flow director control register, initially
   1228  *	     contains just the value of the Rx packet buffer allocation
   1229  **/
   1230 s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl)
   1231 {
   1232 	DEBUGFUNC("ixgbe_init_fdir_signature_82599");
   1233 
   1234 	/*
   1235 	 * Continue setup of fdirctrl register bits:
   1236 	 *  Move the flexible bytes to use the ethertype - shift 6 words
   1237 	 *  Set the maximum length per hash bucket to 0xA filters
   1238 	 *  Send interrupt when 64 filters are left
   1239 	 */
   1240 	fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) |
   1241 		    (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
   1242 		    (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
   1243 
   1244 	/* write hashes and fdirctrl register, poll for completion */
   1245 	ixgbe_fdir_enable_82599(hw, fdirctrl);
   1246 
   1247 	return IXGBE_SUCCESS;
   1248 }
   1249 
   1250 /**
   1251  *  ixgbe_init_fdir_perfect_82599 - Initialize Flow Director perfect filters
   1252  *  @hw: pointer to hardware structure
   1253  *  @fdirctrl: value to write to flow director control register, initially
   1254  *	     contains just the value of the Rx packet buffer allocation
   1255  **/
   1256 s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl)
   1257 {
   1258 	DEBUGFUNC("ixgbe_init_fdir_perfect_82599");
   1259 
   1260 	/*
   1261 	 * Continue setup of fdirctrl register bits:
   1262 	 *  Turn perfect match filtering on
   1263 	 *  Report hash in RSS field of Rx wb descriptor
   1264 	 *  Initialize the drop queue
   1265 	 *  Move the flexible bytes to use the ethertype - shift 6 words
   1266 	 *  Set the maximum length per hash bucket to 0xA filters
   1267 	 *  Send interrupt when 64 (0x4 * 16) filters are left
   1268 	 */
   1269 	fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH |
   1270 		    IXGBE_FDIRCTRL_REPORT_STATUS |
   1271 		    (IXGBE_FDIR_DROP_QUEUE << IXGBE_FDIRCTRL_DROP_Q_SHIFT) |
   1272 		    (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) |
   1273 		    (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
   1274 		    (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
   1275 
   1276 	/* write hashes and fdirctrl register, poll for completion */
   1277 	ixgbe_fdir_enable_82599(hw, fdirctrl);
   1278 
   1279 	return IXGBE_SUCCESS;
   1280 }
   1281 
   1282 /*
   1283  * These defines allow us to quickly generate all of the necessary instructions
   1284  * in the function below by simply calling out IXGBE_COMPUTE_SIG_HASH_ITERATION
   1285  * for values 0 through 15
   1286  */
   1287 #define IXGBE_ATR_COMMON_HASH_KEY \
   1288 		(IXGBE_ATR_BUCKET_HASH_KEY & IXGBE_ATR_SIGNATURE_HASH_KEY)
   1289 #define IXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \
   1290 do { \
   1291 	u32 n = (_n); \
   1292 	if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) \
   1293 		common_hash ^= lo_hash_dword >> n; \
   1294 	else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
   1295 		bucket_hash ^= lo_hash_dword >> n; \
   1296 	else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << n)) \
   1297 		sig_hash ^= lo_hash_dword << (16 - n); \
   1298 	if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << (n + 16))) \
   1299 		common_hash ^= hi_hash_dword >> n; \
   1300 	else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
   1301 		bucket_hash ^= hi_hash_dword >> n; \
   1302 	else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \
   1303 		sig_hash ^= hi_hash_dword << (16 - n); \
   1304 } while (0);
   1305 
   1306 /**
   1307  *  ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash
   1308  *  @stream: input bitstream to compute the hash on
   1309  *
   1310  *  This function is almost identical to the function above but contains
   1311  *  several optomizations such as unwinding all of the loops, letting the
   1312  *  compiler work out all of the conditional ifs since the keys are static
   1313  *  defines, and computing two keys at once since the hashed dword stream
   1314  *  will be the same for both keys.
   1315  **/
   1316 u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
   1317 				     union ixgbe_atr_hash_dword common)
   1318 {
   1319 	u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
   1320 	u32 sig_hash = 0, bucket_hash = 0, common_hash = 0;
   1321 
   1322 	/* record the flow_vm_vlan bits as they are a key part to the hash */
   1323 	flow_vm_vlan = IXGBE_NTOHL(input.dword);
   1324 
   1325 	/* generate common hash dword */
   1326 	hi_hash_dword = IXGBE_NTOHL(common.dword);
   1327 
   1328 	/* low dword is word swapped version of common */
   1329 	lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
   1330 
   1331 	/* apply flow ID/VM pool/VLAN ID bits to hash words */
   1332 	hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
   1333 
   1334 	/* Process bits 0 and 16 */
   1335 	IXGBE_COMPUTE_SIG_HASH_ITERATION(0);
   1336 
   1337 	/*
   1338 	 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
   1339 	 * delay this because bit 0 of the stream should not be processed
   1340 	 * so we do not add the vlan until after bit 0 was processed
   1341 	 */
   1342 	lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
   1343 
   1344 	/* Process remaining 30 bit of the key */
   1345 	IXGBE_COMPUTE_SIG_HASH_ITERATION(1);
   1346 	IXGBE_COMPUTE_SIG_HASH_ITERATION(2);
   1347 	IXGBE_COMPUTE_SIG_HASH_ITERATION(3);
   1348 	IXGBE_COMPUTE_SIG_HASH_ITERATION(4);
   1349 	IXGBE_COMPUTE_SIG_HASH_ITERATION(5);
   1350 	IXGBE_COMPUTE_SIG_HASH_ITERATION(6);
   1351 	IXGBE_COMPUTE_SIG_HASH_ITERATION(7);
   1352 	IXGBE_COMPUTE_SIG_HASH_ITERATION(8);
   1353 	IXGBE_COMPUTE_SIG_HASH_ITERATION(9);
   1354 	IXGBE_COMPUTE_SIG_HASH_ITERATION(10);
   1355 	IXGBE_COMPUTE_SIG_HASH_ITERATION(11);
   1356 	IXGBE_COMPUTE_SIG_HASH_ITERATION(12);
   1357 	IXGBE_COMPUTE_SIG_HASH_ITERATION(13);
   1358 	IXGBE_COMPUTE_SIG_HASH_ITERATION(14);
   1359 	IXGBE_COMPUTE_SIG_HASH_ITERATION(15);
   1360 
   1361 	/* combine common_hash result with signature and bucket hashes */
   1362 	bucket_hash ^= common_hash;
   1363 	bucket_hash &= IXGBE_ATR_HASH_MASK;
   1364 
   1365 	sig_hash ^= common_hash << 16;
   1366 	sig_hash &= IXGBE_ATR_HASH_MASK << 16;
   1367 
   1368 	/* return completed signature hash */
   1369 	return sig_hash ^ bucket_hash;
   1370 }
   1371 
   1372 /**
   1373  *  ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter
   1374  *  @hw: pointer to hardware structure
   1375  *  @input: unique input dword
   1376  *  @common: compressed common input dword
   1377  *  @queue: queue index to direct traffic to
   1378  **/
   1379 s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
   1380 					  union ixgbe_atr_hash_dword input,
   1381 					  union ixgbe_atr_hash_dword common,
   1382 					  u8 queue)
   1383 {
   1384 	u64  fdirhashcmd;
   1385 	u32  fdircmd;
   1386 
   1387 	DEBUGFUNC("ixgbe_fdir_add_signature_filter_82599");
   1388 
   1389 	/*
   1390 	 * Get the flow_type in order to program FDIRCMD properly
   1391 	 * lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6
   1392 	 */
   1393 	switch (input.formatted.flow_type) {
   1394 	case IXGBE_ATR_FLOW_TYPE_TCPV4:
   1395 	case IXGBE_ATR_FLOW_TYPE_UDPV4:
   1396 	case IXGBE_ATR_FLOW_TYPE_SCTPV4:
   1397 	case IXGBE_ATR_FLOW_TYPE_TCPV6:
   1398 	case IXGBE_ATR_FLOW_TYPE_UDPV6:
   1399 	case IXGBE_ATR_FLOW_TYPE_SCTPV6:
   1400 		break;
   1401 	default:
   1402 		DEBUGOUT(" Error on flow type input\n");
   1403 		return IXGBE_ERR_CONFIG;
   1404 	}
   1405 
   1406 	/* configure FDIRCMD register */
   1407 	fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
   1408 		  IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
   1409 	fdircmd |= input.formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
   1410 	fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
   1411 
   1412 	/*
   1413 	 * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits
   1414 	 * is for FDIRCMD.  Then do a 64-bit register write from FDIRHASH.
   1415 	 */
   1416 	fdirhashcmd = (u64)fdircmd << 32;
   1417 	fdirhashcmd |= ixgbe_atr_compute_sig_hash_82599(input, common);
   1418 	IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd);
   1419 
   1420 	DEBUGOUT2("Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd);
   1421 
   1422 	return IXGBE_SUCCESS;
   1423 }
   1424 
   1425 #define IXGBE_COMPUTE_BKT_HASH_ITERATION(_n) \
   1426 do { \
   1427 	u32 n = (_n); \
   1428 	if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
   1429 		bucket_hash ^= lo_hash_dword >> n; \
   1430 	if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
   1431 		bucket_hash ^= hi_hash_dword >> n; \
   1432 } while (0);
   1433 
   1434 /**
   1435  *  ixgbe_atr_compute_perfect_hash_82599 - Compute the perfect filter hash
   1436  *  @atr_input: input bitstream to compute the hash on
   1437  *  @input_mask: mask for the input bitstream
   1438  *
   1439  *  This function serves two main purposes.  First it applys the input_mask
   1440  *  to the atr_input resulting in a cleaned up atr_input data stream.
   1441  *  Secondly it computes the hash and stores it in the bkt_hash field at
   1442  *  the end of the input byte stream.  This way it will be available for
   1443  *  future use without needing to recompute the hash.
   1444  **/
   1445 void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
   1446 					  union ixgbe_atr_input *input_mask)
   1447 {
   1448 
   1449 	u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
   1450 	u32 bucket_hash = 0;
   1451 
   1452 	/* Apply masks to input data */
   1453 	input->dword_stream[0]  &= input_mask->dword_stream[0];
   1454 	input->dword_stream[1]  &= input_mask->dword_stream[1];
   1455 	input->dword_stream[2]  &= input_mask->dword_stream[2];
   1456 	input->dword_stream[3]  &= input_mask->dword_stream[3];
   1457 	input->dword_stream[4]  &= input_mask->dword_stream[4];
   1458 	input->dword_stream[5]  &= input_mask->dword_stream[5];
   1459 	input->dword_stream[6]  &= input_mask->dword_stream[6];
   1460 	input->dword_stream[7]  &= input_mask->dword_stream[7];
   1461 	input->dword_stream[8]  &= input_mask->dword_stream[8];
   1462 	input->dword_stream[9]  &= input_mask->dword_stream[9];
   1463 	input->dword_stream[10] &= input_mask->dword_stream[10];
   1464 
   1465 	/* record the flow_vm_vlan bits as they are a key part to the hash */
   1466 	flow_vm_vlan = IXGBE_NTOHL(input->dword_stream[0]);
   1467 
   1468 	/* generate common hash dword */
   1469 	hi_hash_dword = IXGBE_NTOHL(input->dword_stream[1] ^
   1470 				    input->dword_stream[2] ^
   1471 				    input->dword_stream[3] ^
   1472 				    input->dword_stream[4] ^
   1473 				    input->dword_stream[5] ^
   1474 				    input->dword_stream[6] ^
   1475 				    input->dword_stream[7] ^
   1476 				    input->dword_stream[8] ^
   1477 				    input->dword_stream[9] ^
   1478 				    input->dword_stream[10]);
   1479 
   1480 	/* low dword is word swapped version of common */
   1481 	lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
   1482 
   1483 	/* apply flow ID/VM pool/VLAN ID bits to hash words */
   1484 	hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
   1485 
   1486 	/* Process bits 0 and 16 */
   1487 	IXGBE_COMPUTE_BKT_HASH_ITERATION(0);
   1488 
   1489 	/*
   1490 	 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
   1491 	 * delay this because bit 0 of the stream should not be processed
   1492 	 * so we do not add the vlan until after bit 0 was processed
   1493 	 */
   1494 	lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
   1495 
   1496 	/* Process remaining 30 bit of the key */
   1497 	IXGBE_COMPUTE_BKT_HASH_ITERATION(1);
   1498 	IXGBE_COMPUTE_BKT_HASH_ITERATION(2);
   1499 	IXGBE_COMPUTE_BKT_HASH_ITERATION(3);
   1500 	IXGBE_COMPUTE_BKT_HASH_ITERATION(4);
   1501 	IXGBE_COMPUTE_BKT_HASH_ITERATION(5);
   1502 	IXGBE_COMPUTE_BKT_HASH_ITERATION(6);
   1503 	IXGBE_COMPUTE_BKT_HASH_ITERATION(7);
   1504 	IXGBE_COMPUTE_BKT_HASH_ITERATION(8);
   1505 	IXGBE_COMPUTE_BKT_HASH_ITERATION(9);
   1506 	IXGBE_COMPUTE_BKT_HASH_ITERATION(10);
   1507 	IXGBE_COMPUTE_BKT_HASH_ITERATION(11);
   1508 	IXGBE_COMPUTE_BKT_HASH_ITERATION(12);
   1509 	IXGBE_COMPUTE_BKT_HASH_ITERATION(13);
   1510 	IXGBE_COMPUTE_BKT_HASH_ITERATION(14);
   1511 	IXGBE_COMPUTE_BKT_HASH_ITERATION(15);
   1512 
   1513 	/*
   1514 	 * Limit hash to 13 bits since max bucket count is 8K.
   1515 	 * Store result at the end of the input stream.
   1516 	 */
   1517 	input->formatted.bkt_hash = bucket_hash & 0x1FFF;
   1518 }
   1519 
   1520 /**
   1521  *  ixgbe_get_fdirtcpm_82599 - generate a tcp port from atr_input_masks
   1522  *  @input_mask: mask to be bit swapped
   1523  *
   1524  *  The source and destination port masks for flow director are bit swapped
   1525  *  in that bit 15 effects bit 0, 14 effects 1, 13, 2 etc.  In order to
   1526  *  generate a correctly swapped value we need to bit swap the mask and that
   1527  *  is what is accomplished by this function.
   1528  **/
   1529 static u32 ixgbe_get_fdirtcpm_82599(union ixgbe_atr_input *input_mask)
   1530 {
   1531 	u32 mask = IXGBE_NTOHS(input_mask->formatted.dst_port);
   1532 	mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT;
   1533 	mask |= IXGBE_NTOHS(input_mask->formatted.src_port);
   1534 	mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1);
   1535 	mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2);
   1536 	mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4);
   1537 	return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8);
   1538 }
   1539 
   1540 /*
   1541  * These two macros are meant to address the fact that we have registers
   1542  * that are either all or in part big-endian.  As a result on big-endian
   1543  * systems we will end up byte swapping the value to little-endian before
   1544  * it is byte swapped again and written to the hardware in the original
   1545  * big-endian format.
   1546  */
   1547 #define IXGBE_STORE_AS_BE32(_value) \
   1548 	(((u32)(_value) >> 24) | (((u32)(_value) & 0x00FF0000) >> 8) | \
   1549 	 (((u32)(_value) & 0x0000FF00) << 8) | ((u32)(_value) << 24))
   1550 
   1551 #define IXGBE_WRITE_REG_BE32(a, reg, value) \
   1552 	IXGBE_WRITE_REG((a), (reg), IXGBE_STORE_AS_BE32(IXGBE_NTOHL(value)))
   1553 
   1554 #define IXGBE_STORE_AS_BE16(_value) \
   1555 	IXGBE_NTOHS(((u16)(_value) >> 8) | ((u16)(_value) << 8))
   1556 
   1557 s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
   1558 				    union ixgbe_atr_input *input_mask)
   1559 {
   1560 	/* mask IPv6 since it is currently not supported */
   1561 	u32 fdirm = IXGBE_FDIRM_DIPv6;
   1562 	u32 fdirtcpm;
   1563 
   1564 	DEBUGFUNC("ixgbe_fdir_set_atr_input_mask_82599");
   1565 
   1566 	/*
   1567 	 * Program the relevant mask registers.  If src/dst_port or src/dst_addr
   1568 	 * are zero, then assume a full mask for that field.  Also assume that
   1569 	 * a VLAN of 0 is unspecified, so mask that out as well.  L4type
   1570 	 * cannot be masked out in this implementation.
   1571 	 *
   1572 	 * This also assumes IPv4 only.  IPv6 masking isn't supported at this
   1573 	 * point in time.
   1574 	 */
   1575 
   1576 	/* verify bucket hash is cleared on hash generation */
   1577 	if (input_mask->formatted.bkt_hash)
   1578 		DEBUGOUT(" bucket hash should always be 0 in mask\n");
   1579 
   1580 	/* Program FDIRM and verify partial masks */
   1581 	switch (input_mask->formatted.vm_pool & 0x7F) {
   1582 	case 0x0:
   1583 		fdirm |= IXGBE_FDIRM_POOL;
   1584 	case 0x7F:
   1585 		break;
   1586 	default:
   1587 		DEBUGOUT(" Error on vm pool mask\n");
   1588 		return IXGBE_ERR_CONFIG;
   1589 	}
   1590 
   1591 	switch (input_mask->formatted.flow_type & IXGBE_ATR_L4TYPE_MASK) {
   1592 	case 0x0:
   1593 		fdirm |= IXGBE_FDIRM_L4P;
   1594 		if (input_mask->formatted.dst_port ||
   1595 		    input_mask->formatted.src_port) {
   1596 			DEBUGOUT(" Error on src/dst port mask\n");
   1597 			return IXGBE_ERR_CONFIG;
   1598 		}
   1599 	case IXGBE_ATR_L4TYPE_MASK:
   1600 		break;
   1601 	default:
   1602 		DEBUGOUT(" Error on flow type mask\n");
   1603 		return IXGBE_ERR_CONFIG;
   1604 	}
   1605 
   1606 	switch (IXGBE_NTOHS(input_mask->formatted.vlan_id) & 0xEFFF) {
   1607 	case 0x0000:
   1608 		/* mask VLAN ID, fall through to mask VLAN priority */
   1609 		fdirm |= IXGBE_FDIRM_VLANID;
   1610 	case 0x0FFF:
   1611 		/* mask VLAN priority */
   1612 		fdirm |= IXGBE_FDIRM_VLANP;
   1613 		break;
   1614 	case 0xE000:
   1615 		/* mask VLAN ID only, fall through */
   1616 		fdirm |= IXGBE_FDIRM_VLANID;
   1617 	case 0xEFFF:
   1618 		/* no VLAN fields masked */
   1619 		break;
   1620 	default:
   1621 		DEBUGOUT(" Error on VLAN mask\n");
   1622 		return IXGBE_ERR_CONFIG;
   1623 	}
   1624 
   1625 	switch (input_mask->formatted.flex_bytes & 0xFFFF) {
   1626 	case 0x0000:
   1627 		/* Mask Flex Bytes, fall through */
   1628 		fdirm |= IXGBE_FDIRM_FLEX;
   1629 	case 0xFFFF:
   1630 		break;
   1631 	default:
   1632 		DEBUGOUT(" Error on flexible byte mask\n");
   1633 		return IXGBE_ERR_CONFIG;
   1634 	}
   1635 
   1636 	/* Now mask VM pool and destination IPv6 - bits 5 and 2 */
   1637 	IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
   1638 
   1639 	/* store the TCP/UDP port masks, bit reversed from port layout */
   1640 	fdirtcpm = ixgbe_get_fdirtcpm_82599(input_mask);
   1641 
   1642 	/* write both the same so that UDP and TCP use the same mask */
   1643 	IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm);
   1644 	IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm);
   1645 
   1646 	/* store source and destination IP masks (big-enian) */
   1647 	IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M,
   1648 			     ~input_mask->formatted.src_ip[0]);
   1649 	IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M,
   1650 			     ~input_mask->formatted.dst_ip[0]);
   1651 
   1652 	return IXGBE_SUCCESS;
   1653 }
   1654 
   1655 s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
   1656 					  union ixgbe_atr_input *input,
   1657 					  u16 soft_id, u8 queue)
   1658 {
   1659 	u32 fdirport, fdirvlan, fdirhash, fdircmd;
   1660 
   1661 	DEBUGFUNC("ixgbe_fdir_write_perfect_filter_82599");
   1662 
   1663 	/* currently IPv6 is not supported, must be programmed with 0 */
   1664 	IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0),
   1665 			     input->formatted.src_ip[0]);
   1666 	IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1),
   1667 			     input->formatted.src_ip[1]);
   1668 	IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2),
   1669 			     input->formatted.src_ip[2]);
   1670 
   1671 	/* record the source address (big-endian) */
   1672 	IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA, input->formatted.src_ip[0]);
   1673 
   1674 	/* record the first 32 bits of the destination address (big-endian) */
   1675 	IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA, input->formatted.dst_ip[0]);
   1676 
   1677 	/* record source and destination port (little-endian)*/
   1678 	fdirport = IXGBE_NTOHS(input->formatted.dst_port);
   1679 	fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT;
   1680 	fdirport |= IXGBE_NTOHS(input->formatted.src_port);
   1681 	IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport);
   1682 
   1683 	/* record vlan (little-endian) and flex_bytes(big-endian) */
   1684 	fdirvlan = IXGBE_STORE_AS_BE16(input->formatted.flex_bytes);
   1685 	fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT;
   1686 	fdirvlan |= IXGBE_NTOHS(input->formatted.vlan_id);
   1687 	IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan);
   1688 
   1689 	/* configure FDIRHASH register */
   1690 	fdirhash = input->formatted.bkt_hash;
   1691 	fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
   1692 	IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
   1693 
   1694 	/*
   1695 	 * flush all previous writes to make certain registers are
   1696 	 * programmed prior to issuing the command
   1697 	 */
   1698 	IXGBE_WRITE_FLUSH(hw);
   1699 
   1700 	/* configure FDIRCMD register */
   1701 	fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
   1702 		  IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
   1703 	if (queue == IXGBE_FDIR_DROP_QUEUE)
   1704 		fdircmd |= IXGBE_FDIRCMD_DROP;
   1705 	fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
   1706 	fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
   1707 	fdircmd |= (u32)input->formatted.vm_pool << IXGBE_FDIRCMD_VT_POOL_SHIFT;
   1708 
   1709 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
   1710 
   1711 	return IXGBE_SUCCESS;
   1712 }
   1713 
   1714 s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
   1715 					  union ixgbe_atr_input *input,
   1716 					  u16 soft_id)
   1717 {
   1718 	u32 fdirhash;
   1719 	u32 fdircmd = 0;
   1720 	u32 retry_count;
   1721 	s32 err = IXGBE_SUCCESS;
   1722 
   1723 	/* configure FDIRHASH register */
   1724 	fdirhash = input->formatted.bkt_hash;
   1725 	fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
   1726 	IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
   1727 
   1728 	/* flush hash to HW */
   1729 	IXGBE_WRITE_FLUSH(hw);
   1730 
   1731 	/* Query if filter is present */
   1732 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, IXGBE_FDIRCMD_CMD_QUERY_REM_FILT);
   1733 
   1734 	for (retry_count = 10; retry_count; retry_count--) {
   1735 		/* allow 10us for query to process */
   1736 		usec_delay(10);
   1737 		/* verify query completed successfully */
   1738 		fdircmd = IXGBE_READ_REG(hw, IXGBE_FDIRCMD);
   1739 		if (!(fdircmd & IXGBE_FDIRCMD_CMD_MASK))
   1740 			break;
   1741 	}
   1742 
   1743 	if (!retry_count)
   1744 		err = IXGBE_ERR_FDIR_REINIT_FAILED;
   1745 
   1746 	/* if filter exists in hardware then remove it */
   1747 	if (fdircmd & IXGBE_FDIRCMD_FILTER_VALID) {
   1748 		IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
   1749 		IXGBE_WRITE_FLUSH(hw);
   1750 		IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
   1751 				IXGBE_FDIRCMD_CMD_REMOVE_FLOW);
   1752 	}
   1753 
   1754 	return err;
   1755 }
   1756 
   1757 /**
   1758  *  ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter
   1759  *  @hw: pointer to hardware structure
   1760  *  @input: input bitstream
   1761  *  @input_mask: mask for the input bitstream
   1762  *  @soft_id: software index for the filters
   1763  *  @queue: queue index to direct traffic to
   1764  *
   1765  *  Note that the caller to this function must lock before calling, since the
   1766  *  hardware writes must be protected from one another.
   1767  **/
   1768 s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
   1769 					union ixgbe_atr_input *input,
   1770 					union ixgbe_atr_input *input_mask,
   1771 					u16 soft_id, u8 queue)
   1772 {
   1773 	s32 err = IXGBE_ERR_CONFIG;
   1774 
   1775 	DEBUGFUNC("ixgbe_fdir_add_perfect_filter_82599");
   1776 
   1777 	/*
   1778 	 * Check flow_type formatting, and bail out before we touch the hardware
   1779 	 * if there's a configuration issue
   1780 	 */
   1781 	switch (input->formatted.flow_type) {
   1782 	case IXGBE_ATR_FLOW_TYPE_IPV4:
   1783 		input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK;
   1784 		if (input->formatted.dst_port || input->formatted.src_port) {
   1785 			DEBUGOUT(" Error on src/dst port\n");
   1786 			return IXGBE_ERR_CONFIG;
   1787 		}
   1788 		break;
   1789 	case IXGBE_ATR_FLOW_TYPE_SCTPV4:
   1790 		if (input->formatted.dst_port || input->formatted.src_port) {
   1791 			DEBUGOUT(" Error on src/dst port\n");
   1792 			return IXGBE_ERR_CONFIG;
   1793 		}
   1794 	case IXGBE_ATR_FLOW_TYPE_TCPV4:
   1795 	case IXGBE_ATR_FLOW_TYPE_UDPV4:
   1796 		input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
   1797 						  IXGBE_ATR_L4TYPE_MASK;
   1798 		break;
   1799 	default:
   1800 		DEBUGOUT(" Error on flow type input\n");
   1801 		return err;
   1802 	}
   1803 
   1804 	/* program input mask into the HW */
   1805 	err = ixgbe_fdir_set_input_mask_82599(hw, input_mask);
   1806 	if (err)
   1807 		return err;
   1808 
   1809 	/* apply mask and compute/store hash */
   1810 	ixgbe_atr_compute_perfect_hash_82599(input, input_mask);
   1811 
   1812 	/* program filters to filter memory */
   1813 	return ixgbe_fdir_write_perfect_filter_82599(hw, input,
   1814 						     soft_id, queue);
   1815 }
   1816 
   1817 /**
   1818  *  ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register
   1819  *  @hw: pointer to hardware structure
   1820  *  @reg: analog register to read
   1821  *  @val: read value
   1822  *
   1823  *  Performs read operation to Omer analog register specified.
   1824  **/
   1825 s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val)
   1826 {
   1827 	u32  core_ctl;
   1828 
   1829 	DEBUGFUNC("ixgbe_read_analog_reg8_82599");
   1830 
   1831 	IXGBE_WRITE_REG(hw, IXGBE_CORECTL, IXGBE_CORECTL_WRITE_CMD |
   1832 			(reg << 8));
   1833 	IXGBE_WRITE_FLUSH(hw);
   1834 	usec_delay(10);
   1835 	core_ctl = IXGBE_READ_REG(hw, IXGBE_CORECTL);
   1836 	*val = (u8)core_ctl;
   1837 
   1838 	return IXGBE_SUCCESS;
   1839 }
   1840 
   1841 /**
   1842  *  ixgbe_write_analog_reg8_82599 - Writes 8 bit Omer analog register
   1843  *  @hw: pointer to hardware structure
   1844  *  @reg: atlas register to write
   1845  *  @val: value to write
   1846  *
   1847  *  Performs write operation to Omer analog register specified.
   1848  **/
   1849 s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val)
   1850 {
   1851 	u32  core_ctl;
   1852 
   1853 	DEBUGFUNC("ixgbe_write_analog_reg8_82599");
   1854 
   1855 	core_ctl = (reg << 8) | val;
   1856 	IXGBE_WRITE_REG(hw, IXGBE_CORECTL, core_ctl);
   1857 	IXGBE_WRITE_FLUSH(hw);
   1858 	usec_delay(10);
   1859 
   1860 	return IXGBE_SUCCESS;
   1861 }
   1862 
   1863 /**
   1864  *  ixgbe_start_hw_82599 - Prepare hardware for Tx/Rx
   1865  *  @hw: pointer to hardware structure
   1866  *
   1867  *  Starts the hardware using the generic start_hw function
   1868  *  and the generation start_hw function.
   1869  *  Then performs revision-specific operations, if any.
   1870  **/
   1871 s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw)
   1872 {
   1873 	s32 ret_val = IXGBE_SUCCESS;
   1874 
   1875 	DEBUGFUNC("ixgbe_start_hw_82599");
   1876 
   1877 	ret_val = ixgbe_start_hw_generic(hw);
   1878 	if (ret_val != IXGBE_SUCCESS)
   1879 		goto out;
   1880 
   1881 	ret_val = ixgbe_start_hw_gen2(hw);
   1882 	if (ret_val != IXGBE_SUCCESS)
   1883 		goto out;
   1884 
   1885 	/* We need to run link autotry after the driver loads */
   1886 	hw->mac.autotry_restart = TRUE;
   1887 
   1888 	if (ret_val == IXGBE_SUCCESS)
   1889 		ret_val = ixgbe_verify_fw_version_82599(hw);
   1890 out:
   1891 	return ret_val;
   1892 }
   1893 
   1894 /**
   1895  *  ixgbe_identify_phy_82599 - Get physical layer module
   1896  *  @hw: pointer to hardware structure
   1897  *
   1898  *  Determines the physical layer module found on the current adapter.
   1899  *  If PHY already detected, maintains current PHY type in hw struct,
   1900  *  otherwise executes the PHY detection routine.
   1901  **/
   1902 s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
   1903 {
   1904 	s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
   1905 
   1906 	DEBUGFUNC("ixgbe_identify_phy_82599");
   1907 
   1908 	/* Detect PHY if not unknown - returns success if already detected. */
   1909 	status = ixgbe_identify_phy_generic(hw);
   1910 	if (status != IXGBE_SUCCESS) {
   1911 		/* 82599 10GBASE-T requires an external PHY */
   1912 		if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)
   1913 			goto out;
   1914 		else
   1915 			status = ixgbe_identify_module_generic(hw);
   1916 	}
   1917 
   1918 	/* Set PHY type none if no PHY detected */
   1919 	if (hw->phy.type == ixgbe_phy_unknown) {
   1920 		hw->phy.type = ixgbe_phy_none;
   1921 		status = IXGBE_SUCCESS;
   1922 	}
   1923 
   1924 	/* Return error if SFP module has been detected but is not supported */
   1925 	if (hw->phy.type == ixgbe_phy_sfp_unsupported)
   1926 		status = IXGBE_ERR_SFP_NOT_SUPPORTED;
   1927 
   1928 out:
   1929 	return status;
   1930 }
   1931 
   1932 /**
   1933  *  ixgbe_get_supported_physical_layer_82599 - Returns physical layer type
   1934  *  @hw: pointer to hardware structure
   1935  *
   1936  *  Determines physical layer capabilities of the current configuration.
   1937  **/
   1938 u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw)
   1939 {
   1940 	u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
   1941 	u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
   1942 	u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
   1943 	u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
   1944 	u32 pma_pmd_10g_parallel = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK;
   1945 	u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
   1946 	u16 ext_ability = 0;
   1947 	u8 comp_codes_10g = 0;
   1948 	u8 comp_codes_1g = 0;
   1949 
   1950 	DEBUGFUNC("ixgbe_get_support_physical_layer_82599");
   1951 
   1952 	hw->phy.ops.identify(hw);
   1953 
   1954 	switch (hw->phy.type) {
   1955 	case ixgbe_phy_tn:
   1956 	case ixgbe_phy_cu_unknown:
   1957 		hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
   1958 		IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
   1959 		if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
   1960 			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
   1961 		if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
   1962 			physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
   1963 		if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
   1964 			physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
   1965 		goto out;
   1966 	default:
   1967 		break;
   1968 	}
   1969 
   1970 	switch (autoc & IXGBE_AUTOC_LMS_MASK) {
   1971 	case IXGBE_AUTOC_LMS_1G_AN:
   1972 	case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
   1973 		if (pma_pmd_1g == IXGBE_AUTOC_1G_KX_BX) {
   1974 			physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX |
   1975 			    IXGBE_PHYSICAL_LAYER_1000BASE_BX;
   1976 			goto out;
   1977 		} else
   1978 			/* SFI mode so read SFP module */
   1979 			goto sfp_check;
   1980 		break;
   1981 	case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
   1982 		if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_CX4)
   1983 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
   1984 		else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_KX4)
   1985 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
   1986 		else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_XAUI)
   1987 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_XAUI;
   1988 		goto out;
   1989 		break;
   1990 	case IXGBE_AUTOC_LMS_10G_SERIAL:
   1991 		if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_KR) {
   1992 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR;
   1993 			goto out;
   1994 		} else if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)
   1995 			goto sfp_check;
   1996 		break;
   1997 	case IXGBE_AUTOC_LMS_KX4_KX_KR:
   1998 	case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN:
   1999 		if (autoc & IXGBE_AUTOC_KX_SUPP)
   2000 			physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
   2001 		if (autoc & IXGBE_AUTOC_KX4_SUPP)
   2002 			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
   2003 		if (autoc & IXGBE_AUTOC_KR_SUPP)
   2004 			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KR;
   2005 		goto out;
   2006 		break;
   2007 	default:
   2008 		goto out;
   2009 		break;
   2010 	}
   2011 
   2012 sfp_check:
   2013 	/* SFP check must be done last since DA modules are sometimes used to
   2014 	 * test KR mode -  we need to id KR mode correctly before SFP module.
   2015 	 * Call identify_sfp because the pluggable module may have changed */
   2016 	hw->phy.ops.identify_sfp(hw);
   2017 	if (hw->phy.sfp_type == ixgbe_sfp_type_not_present)
   2018 		goto out;
   2019 
   2020 	switch (hw->phy.type) {
   2021 	case ixgbe_phy_sfp_passive_tyco:
   2022 	case ixgbe_phy_sfp_passive_unknown:
   2023 		physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
   2024 		break;
   2025 	case ixgbe_phy_sfp_ftl_active:
   2026 	case ixgbe_phy_sfp_active_unknown:
   2027 		physical_layer = IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA;
   2028 		break;
   2029 	case ixgbe_phy_sfp_avago:
   2030 	case ixgbe_phy_sfp_ftl:
   2031 	case ixgbe_phy_sfp_intel:
   2032 	case ixgbe_phy_sfp_unknown:
   2033 		hw->phy.ops.read_i2c_eeprom(hw,
   2034 		      IXGBE_SFF_1GBE_COMP_CODES, &comp_codes_1g);
   2035 		hw->phy.ops.read_i2c_eeprom(hw,
   2036 		      IXGBE_SFF_10GBE_COMP_CODES, &comp_codes_10g);
   2037 		if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
   2038 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
   2039 		else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)
   2040 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
   2041 		else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE)
   2042 			physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_T;
   2043 		break;
   2044 	default:
   2045 		break;
   2046 	}
   2047 
   2048 out:
   2049 	return physical_layer;
   2050 }
   2051 
   2052 /**
   2053  *  ixgbe_enable_rx_dma_82599 - Enable the Rx DMA unit on 82599
   2054  *  @hw: pointer to hardware structure
   2055  *  @regval: register value to write to RXCTRL
   2056  *
   2057  *  Enables the Rx DMA unit for 82599
   2058  **/
   2059 s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval)
   2060 {
   2061 
   2062 	DEBUGFUNC("ixgbe_enable_rx_dma_82599");
   2063 
   2064 	/*
   2065 	 * Workaround for 82599 silicon errata when enabling the Rx datapath.
   2066 	 * If traffic is incoming before we enable the Rx unit, it could hang
   2067 	 * the Rx DMA unit.  Therefore, make sure the security engine is
   2068 	 * completely disabled prior to enabling the Rx unit.
   2069 	 */
   2070 
   2071 	hw->mac.ops.disable_sec_rx_path(hw);
   2072 
   2073 	IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval);
   2074 
   2075 	hw->mac.ops.enable_sec_rx_path(hw);
   2076 
   2077 	return IXGBE_SUCCESS;
   2078 }
   2079 
   2080 /**
   2081  *  ixgbe_verify_fw_version_82599 - verify fw version for 82599
   2082  *  @hw: pointer to hardware structure
   2083  *
   2084  *  Verifies that installed the firmware version is 0.6 or higher
   2085  *  for SFI devices. All 82599 SFI devices should have version 0.6 or higher.
   2086  *
   2087  *  Returns IXGBE_ERR_EEPROM_VERSION if the FW is not present or
   2088  *  if the FW version is not supported.
   2089  **/
   2090 static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
   2091 {
   2092 	s32 status = IXGBE_ERR_EEPROM_VERSION;
   2093 	u16 fw_offset, fw_ptp_cfg_offset;
   2094 	u16 fw_version = 0;
   2095 
   2096 	DEBUGFUNC("ixgbe_verify_fw_version_82599");
   2097 
   2098 	/* firmware check is only necessary for SFI devices */
   2099 	if (hw->phy.media_type != ixgbe_media_type_fiber) {
   2100 		status = IXGBE_SUCCESS;
   2101 		goto fw_version_out;
   2102 	}
   2103 
   2104 	/* get the offset to the Firmware Module block */
   2105 	hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset);
   2106 
   2107 	if ((fw_offset == 0) || (fw_offset == 0xFFFF))
   2108 		goto fw_version_out;
   2109 
   2110 	/* get the offset to the Pass Through Patch Configuration block */
   2111 	hw->eeprom.ops.read(hw, (fw_offset +
   2112 				 IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR),
   2113 				 &fw_ptp_cfg_offset);
   2114 
   2115 	if ((fw_ptp_cfg_offset == 0) || (fw_ptp_cfg_offset == 0xFFFF))
   2116 		goto fw_version_out;
   2117 
   2118 	/* get the firmware version */
   2119 	hw->eeprom.ops.read(hw, (fw_ptp_cfg_offset +
   2120 			    IXGBE_FW_PATCH_VERSION_4), &fw_version);
   2121 
   2122 	if (fw_version > 0x5)
   2123 		status = IXGBE_SUCCESS;
   2124 
   2125 fw_version_out:
   2126 	return status;
   2127 }
   2128 
   2129 /**
   2130  *  ixgbe_verify_lesm_fw_enabled_82599 - Checks LESM FW module state.
   2131  *  @hw: pointer to hardware structure
   2132  *
   2133  *  Returns TRUE if the LESM FW module is present and enabled. Otherwise
   2134  *  returns FALSE. Smart Speed must be disabled if LESM FW module is enabled.
   2135  **/
   2136 bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw)
   2137 {
   2138 	bool lesm_enabled = FALSE;
   2139 	u16 fw_offset, fw_lesm_param_offset, fw_lesm_state;
   2140 	s32 status;
   2141 
   2142 	DEBUGFUNC("ixgbe_verify_lesm_fw_enabled_82599");
   2143 
   2144 	/* get the offset to the Firmware Module block */
   2145 	status = hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset);
   2146 
   2147 	if ((status != IXGBE_SUCCESS) ||
   2148 	    (fw_offset == 0) || (fw_offset == 0xFFFF))
   2149 		goto out;
   2150 
   2151 	/* get the offset to the LESM Parameters block */
   2152 	status = hw->eeprom.ops.read(hw, (fw_offset +
   2153 				     IXGBE_FW_LESM_PARAMETERS_PTR),
   2154 				     &fw_lesm_param_offset);
   2155 
   2156 	if ((status != IXGBE_SUCCESS) ||
   2157 	    (fw_lesm_param_offset == 0) || (fw_lesm_param_offset == 0xFFFF))
   2158 		goto out;
   2159 
   2160 	/* get the lesm state word */
   2161 	status = hw->eeprom.ops.read(hw, (fw_lesm_param_offset +
   2162 				     IXGBE_FW_LESM_STATE_1),
   2163 				     &fw_lesm_state);
   2164 
   2165 	if ((status == IXGBE_SUCCESS) &&
   2166 	    (fw_lesm_state & IXGBE_FW_LESM_STATE_ENABLED))
   2167 		lesm_enabled = TRUE;
   2168 
   2169 out:
   2170 	return lesm_enabled;
   2171 }
   2172 
   2173 /**
   2174  *  ixgbe_read_eeprom_buffer_82599 - Read EEPROM word(s) using
   2175  *  fastest available method
   2176  *
   2177  *  @hw: pointer to hardware structure
   2178  *  @offset: offset of  word in EEPROM to read
   2179  *  @words: number of words
   2180  *  @data: word(s) read from the EEPROM
   2181  *
   2182  *  Retrieves 16 bit word(s) read from EEPROM
   2183  **/
   2184 static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
   2185 					  u16 words, u16 *data)
   2186 {
   2187 	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
   2188 	s32 ret_val = IXGBE_ERR_CONFIG;
   2189 
   2190 	DEBUGFUNC("ixgbe_read_eeprom_buffer_82599");
   2191 
   2192 	/*
   2193 	 * If EEPROM is detected and can be addressed using 14 bits,
   2194 	 * use EERD otherwise use bit bang
   2195 	 */
   2196 	if ((eeprom->type == ixgbe_eeprom_spi) &&
   2197 	    (offset + (words - 1) <= IXGBE_EERD_MAX_ADDR))
   2198 		ret_val = ixgbe_read_eerd_buffer_generic(hw, offset, words,
   2199 							 data);
   2200 	else
   2201 		ret_val = ixgbe_read_eeprom_buffer_bit_bang_generic(hw, offset,
   2202 								    words,
   2203 								    data);
   2204 
   2205 	return ret_val;
   2206 }
   2207 
   2208 /**
   2209  *  ixgbe_read_eeprom_82599 - Read EEPROM word using
   2210  *  fastest available method
   2211  *
   2212  *  @hw: pointer to hardware structure
   2213  *  @offset: offset of  word in the EEPROM to read
   2214  *  @data: word read from the EEPROM
   2215  *
   2216  *  Reads a 16 bit word from the EEPROM
   2217  **/
   2218 static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
   2219 				   u16 offset, u16 *data)
   2220 {
   2221 	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
   2222 	s32 ret_val = IXGBE_ERR_CONFIG;
   2223 
   2224 	DEBUGFUNC("ixgbe_read_eeprom_82599");
   2225 
   2226 	/*
   2227 	 * If EEPROM is detected and can be addressed using 14 bits,
   2228 	 * use EERD otherwise use bit bang
   2229 	 */
   2230 	if ((eeprom->type == ixgbe_eeprom_spi) &&
   2231 	    (offset <= IXGBE_EERD_MAX_ADDR))
   2232 		ret_val = ixgbe_read_eerd_generic(hw, offset, data);
   2233 	else
   2234 		ret_val = ixgbe_read_eeprom_bit_bang_generic(hw, offset, data);
   2235 
   2236 	return ret_val;
   2237 }
   2238 
   2239