Home | History | Annotate | Line # | Download | only in ixgbe
ixgbe_82599.c revision 1.7
      1 /******************************************************************************
      2 
      3   Copyright (c) 2001-2012, Intel Corporation
      4   All rights reserved.
      5 
      6   Redistribution and use in source and binary forms, with or without
      7   modification, are permitted provided that the following conditions are met:
      8 
      9    1. Redistributions of source code must retain the above copyright notice,
     10       this list of conditions and the following disclaimer.
     11 
     12    2. Redistributions in binary form must reproduce the above copyright
     13       notice, this list of conditions and the following disclaimer in the
     14       documentation and/or other materials provided with the distribution.
     15 
     16    3. Neither the name of the Intel Corporation nor the names of its
     17       contributors may be used to endorse or promote products derived from
     18       this software without specific prior written permission.
     19 
     20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     30   POSSIBILITY OF SUCH DAMAGE.
     31 
     32 ******************************************************************************/
     33 /*$FreeBSD: head/sys/dev/ixgbe/ixgbe_82599.c 238149 2012-07-05 20:51:44Z jfv $*/
     34 /*$NetBSD: ixgbe_82599.c,v 1.7 2015/04/02 09:26:55 msaitoh Exp $*/
     35 
     36 #include "ixgbe_type.h"
     37 #include "ixgbe_82599.h"
     38 #include "ixgbe_api.h"
     39 #include "ixgbe_common.h"
     40 #include "ixgbe_phy.h"
     41 
     42 static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
     43 					 ixgbe_link_speed speed,
     44 					 bool autoneg,
     45 					 bool autoneg_wait_to_complete);
     46 static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw);
     47 static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
     48 				   u16 offset, u16 *data);
     49 static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
     50 					  u16 words, u16 *data);
     51 
     52 void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
     53 {
     54 	struct ixgbe_mac_info *mac = &hw->mac;
     55 
     56 	DEBUGFUNC("ixgbe_init_mac_link_ops_82599");
     57 
     58 	/* enable the laser control functions for SFP+ fiber */
     59 	if (mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) {
     60 		mac->ops.disable_tx_laser =
     61 				       &ixgbe_disable_tx_laser_multispeed_fiber;
     62 		mac->ops.enable_tx_laser =
     63 					&ixgbe_enable_tx_laser_multispeed_fiber;
     64 		mac->ops.flap_tx_laser = &ixgbe_flap_tx_laser_multispeed_fiber;
     65 
     66 	} else {
     67 		mac->ops.disable_tx_laser = NULL;
     68 		mac->ops.enable_tx_laser = NULL;
     69 		mac->ops.flap_tx_laser = NULL;
     70 	}
     71 
     72 	if (hw->phy.multispeed_fiber) {
     73 		/* Set up dual speed SFP+ support */
     74 		mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber;
     75 	} else {
     76 		if ((ixgbe_get_media_type(hw) == ixgbe_media_type_backplane) &&
     77 		     (hw->phy.smart_speed == ixgbe_smart_speed_auto ||
     78 		      hw->phy.smart_speed == ixgbe_smart_speed_on) &&
     79 		      !ixgbe_verify_lesm_fw_enabled_82599(hw)) {
     80 			mac->ops.setup_link = &ixgbe_setup_mac_link_smartspeed;
     81 		} else {
     82 			mac->ops.setup_link = &ixgbe_setup_mac_link_82599;
     83 		}
     84 	}
     85 }
     86 
     87 /**
     88  *  ixgbe_init_phy_ops_82599 - PHY/SFP specific init
     89  *  @hw: pointer to hardware structure
     90  *
     91  *  Initialize any function pointers that were not able to be
     92  *  set during init_shared_code because the PHY/SFP type was
     93  *  not known.  Perform the SFP init if necessary.
     94  *
     95  **/
     96 s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
     97 {
     98 	struct ixgbe_mac_info *mac = &hw->mac;
     99 	struct ixgbe_phy_info *phy = &hw->phy;
    100 	s32 ret_val = IXGBE_SUCCESS;
    101 
    102 	DEBUGFUNC("ixgbe_init_phy_ops_82599");
    103 
    104 	/* Identify the PHY or SFP module */
    105 	ret_val = phy->ops.identify(hw);
    106 	if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED)
    107 		goto init_phy_ops_out;
    108 
    109 	/* Setup function pointers based on detected SFP module and speeds */
    110 	ixgbe_init_mac_link_ops_82599(hw);
    111 	if (hw->phy.sfp_type != ixgbe_sfp_type_unknown)
    112 		hw->phy.ops.reset = NULL;
    113 
    114 	/* If copper media, overwrite with copper function pointers */
    115 	if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
    116 		mac->ops.setup_link = &ixgbe_setup_copper_link_82599;
    117 		mac->ops.get_link_capabilities =
    118 				  &ixgbe_get_copper_link_capabilities_generic;
    119 	}
    120 
    121 	/* Set necessary function pointers based on phy type */
    122 	switch (hw->phy.type) {
    123 	case ixgbe_phy_tn:
    124 		phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
    125 		phy->ops.check_link = &ixgbe_check_phy_link_tnx;
    126 		phy->ops.get_firmware_version =
    127 			     &ixgbe_get_phy_firmware_version_tnx;
    128 		break;
    129 	default:
    130 		break;
    131 	}
    132 init_phy_ops_out:
    133 	return ret_val;
    134 }
    135 
    136 s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
    137 {
    138 	s32 ret_val = IXGBE_SUCCESS;
    139 	u32 reg_anlp1 = 0;
    140 	u32 i = 0;
    141 	u16 list_offset, data_offset, data_value;
    142 
    143 	DEBUGFUNC("ixgbe_setup_sfp_modules_82599");
    144 
    145 	if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) {
    146 		ixgbe_init_mac_link_ops_82599(hw);
    147 
    148 		hw->phy.ops.reset = NULL;
    149 
    150 		ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
    151 							      &data_offset);
    152 		if (ret_val != IXGBE_SUCCESS)
    153 			goto setup_sfp_out;
    154 
    155 		/* PHY config will finish before releasing the semaphore */
    156 		ret_val = hw->mac.ops.acquire_swfw_sync(hw,
    157 							IXGBE_GSSR_MAC_CSR_SM);
    158 		if (ret_val != IXGBE_SUCCESS) {
    159 			ret_val = IXGBE_ERR_SWFW_SYNC;
    160 			goto setup_sfp_out;
    161 		}
    162 
    163 		hw->eeprom.ops.read(hw, ++data_offset, &data_value);
    164 		while (data_value != 0xffff) {
    165 			IXGBE_WRITE_REG(hw, IXGBE_CORECTL, data_value);
    166 			IXGBE_WRITE_FLUSH(hw);
    167 			hw->eeprom.ops.read(hw, ++data_offset, &data_value);
    168 		}
    169 
    170 		/* Release the semaphore */
    171 		hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
    172 		/* Delay obtaining semaphore again to allow FW access */
    173 		msec_delay(hw->eeprom.semaphore_delay);
    174 
    175 		/* Now restart DSP by setting Restart_AN and clearing LMS */
    176 		IXGBE_WRITE_REG(hw, IXGBE_AUTOC, ((IXGBE_READ_REG(hw,
    177 				IXGBE_AUTOC) & ~IXGBE_AUTOC_LMS_MASK) |
    178 				IXGBE_AUTOC_AN_RESTART));
    179 
    180 		/* Wait for AN to leave state 0 */
    181 		for (i = 0; i < 10; i++) {
    182 			msec_delay(4);
    183 			reg_anlp1 = IXGBE_READ_REG(hw, IXGBE_ANLP1);
    184 			if (reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK)
    185 				break;
    186 		}
    187 		if (!(reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK)) {
    188 			DEBUGOUT("sfp module setup not complete\n");
    189 			ret_val = IXGBE_ERR_SFP_SETUP_NOT_COMPLETE;
    190 			goto setup_sfp_out;
    191 		}
    192 
    193 		/* Restart DSP by setting Restart_AN and return to SFI mode */
    194 		IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (IXGBE_READ_REG(hw,
    195 				IXGBE_AUTOC) | IXGBE_AUTOC_LMS_10G_SERIAL |
    196 				IXGBE_AUTOC_AN_RESTART));
    197 	}
    198 
    199 setup_sfp_out:
    200 	return ret_val;
    201 }
    202 
    203 /**
    204  *  ixgbe_init_ops_82599 - Inits func ptrs and MAC type
    205  *  @hw: pointer to hardware structure
    206  *
    207  *  Initialize the function pointers and assign the MAC type for 82599.
    208  *  Does not touch the hardware.
    209  **/
    210 
    211 s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw)
    212 {
    213 	struct ixgbe_mac_info *mac = &hw->mac;
    214 	struct ixgbe_phy_info *phy = &hw->phy;
    215 	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
    216 	s32 ret_val;
    217 
    218 	DEBUGFUNC("ixgbe_init_ops_82599");
    219 
    220 	ret_val = ixgbe_init_phy_ops_generic(hw);
    221 	ret_val = ixgbe_init_ops_generic(hw);
    222 
    223 	/* PHY */
    224 	phy->ops.identify = &ixgbe_identify_phy_82599;
    225 	phy->ops.init = &ixgbe_init_phy_ops_82599;
    226 
    227 	/* MAC */
    228 	mac->ops.reset_hw = &ixgbe_reset_hw_82599;
    229 	mac->ops.enable_relaxed_ordering = &ixgbe_enable_relaxed_ordering_gen2;
    230 	mac->ops.get_media_type = &ixgbe_get_media_type_82599;
    231 	mac->ops.get_supported_physical_layer =
    232 				    &ixgbe_get_supported_physical_layer_82599;
    233 	mac->ops.disable_sec_rx_path = &ixgbe_disable_sec_rx_path_generic;
    234 	mac->ops.enable_sec_rx_path = &ixgbe_enable_sec_rx_path_generic;
    235 	mac->ops.enable_rx_dma = &ixgbe_enable_rx_dma_82599;
    236 	mac->ops.read_analog_reg8 = &ixgbe_read_analog_reg8_82599;
    237 	mac->ops.write_analog_reg8 = &ixgbe_write_analog_reg8_82599;
    238 	mac->ops.start_hw = &ixgbe_start_hw_82599;
    239 	mac->ops.get_san_mac_addr = &ixgbe_get_san_mac_addr_generic;
    240 	mac->ops.set_san_mac_addr = &ixgbe_set_san_mac_addr_generic;
    241 	mac->ops.get_device_caps = &ixgbe_get_device_caps_generic;
    242 	mac->ops.get_wwn_prefix = &ixgbe_get_wwn_prefix_generic;
    243 	mac->ops.get_fcoe_boot_status = &ixgbe_get_fcoe_boot_status_generic;
    244 
    245 	/* RAR, Multicast, VLAN */
    246 	mac->ops.set_vmdq = &ixgbe_set_vmdq_generic;
    247 	mac->ops.set_vmdq_san_mac = &ixgbe_set_vmdq_san_mac_generic;
    248 	mac->ops.clear_vmdq = &ixgbe_clear_vmdq_generic;
    249 	mac->ops.insert_mac_addr = &ixgbe_insert_mac_addr_generic;
    250 	mac->rar_highwater = 1;
    251 	mac->ops.set_vfta = &ixgbe_set_vfta_generic;
    252 	mac->ops.set_vlvf = &ixgbe_set_vlvf_generic;
    253 	mac->ops.clear_vfta = &ixgbe_clear_vfta_generic;
    254 	mac->ops.init_uta_tables = &ixgbe_init_uta_tables_generic;
    255 	mac->ops.setup_sfp = &ixgbe_setup_sfp_modules_82599;
    256 	mac->ops.set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing;
    257 	mac->ops.set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing;
    258 
    259 	/* Link */
    260 	mac->ops.get_link_capabilities = &ixgbe_get_link_capabilities_82599;
    261 	mac->ops.check_link = &ixgbe_check_mac_link_generic;
    262 	mac->ops.setup_rxpba = &ixgbe_set_rxpba_generic;
    263 	ixgbe_init_mac_link_ops_82599(hw);
    264 
    265 	mac->mcft_size		= 128;
    266 	mac->vft_size		= 128;
    267 	mac->num_rar_entries	= 128;
    268 	mac->rx_pb_size		= 512;
    269 	mac->max_tx_queues	= 128;
    270 	mac->max_rx_queues	= 128;
    271 	mac->max_msix_vectors	= ixgbe_get_pcie_msix_count_generic(hw);
    272 
    273 	mac->arc_subsystem_valid = (IXGBE_READ_REG(hw, IXGBE_FWSM) &
    274 				   IXGBE_FWSM_MODE_MASK) ? TRUE : FALSE;
    275 
    276 	hw->mbx.ops.init_params = ixgbe_init_mbx_params_pf;
    277 
    278 	/* EEPROM */
    279 	eeprom->ops.read = &ixgbe_read_eeprom_82599;
    280 	eeprom->ops.read_buffer = &ixgbe_read_eeprom_buffer_82599;
    281 
    282 	/* Manageability interface */
    283 	mac->ops.set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic;
    284 
    285 
    286 	return ret_val;
    287 }
    288 
    289 /**
    290  *  ixgbe_get_link_capabilities_82599 - Determines link capabilities
    291  *  @hw: pointer to hardware structure
    292  *  @speed: pointer to link speed
    293  *  @negotiation: TRUE when autoneg or autotry is enabled
    294  *
    295  *  Determines the link capabilities by reading the AUTOC register.
    296  **/
    297 s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
    298 				      ixgbe_link_speed *speed,
    299 				      bool *negotiation)
    300 {
    301 	s32 status = IXGBE_SUCCESS;
    302 	u32 autoc = 0;
    303 
    304 	DEBUGFUNC("ixgbe_get_link_capabilities_82599");
    305 
    306 
    307 	/* Check if 1G SFP module. */
    308 	if (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
    309 	    hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
    310 	    hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
    311 	    hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) {
    312 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
    313 		*negotiation = TRUE;
    314 		goto out;
    315 	}
    316 
    317 	/*
    318 	 * Determine link capabilities based on the stored value of AUTOC,
    319 	 * which represents EEPROM defaults.  If AUTOC value has not
    320 	 * been stored, use the current register values.
    321 	 */
    322 	if (hw->mac.orig_link_settings_stored)
    323 		autoc = hw->mac.orig_autoc;
    324 	else
    325 		autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
    326 
    327 	switch (autoc & IXGBE_AUTOC_LMS_MASK) {
    328 	case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
    329 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
    330 		*negotiation = FALSE;
    331 		break;
    332 
    333 	case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
    334 		*speed = IXGBE_LINK_SPEED_10GB_FULL;
    335 		*negotiation = FALSE;
    336 		break;
    337 
    338 	case IXGBE_AUTOC_LMS_1G_AN:
    339 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
    340 		*negotiation = TRUE;
    341 		break;
    342 
    343 	case IXGBE_AUTOC_LMS_10G_SERIAL:
    344 		*speed = IXGBE_LINK_SPEED_10GB_FULL;
    345 		*negotiation = FALSE;
    346 		break;
    347 
    348 	case IXGBE_AUTOC_LMS_KX4_KX_KR:
    349 	case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN:
    350 		*speed = IXGBE_LINK_SPEED_UNKNOWN;
    351 		if (autoc & IXGBE_AUTOC_KR_SUPP)
    352 			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
    353 		if (autoc & IXGBE_AUTOC_KX4_SUPP)
    354 			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
    355 		if (autoc & IXGBE_AUTOC_KX_SUPP)
    356 			*speed |= IXGBE_LINK_SPEED_1GB_FULL;
    357 		*negotiation = TRUE;
    358 		break;
    359 
    360 	case IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII:
    361 		*speed = IXGBE_LINK_SPEED_100_FULL;
    362 		if (autoc & IXGBE_AUTOC_KR_SUPP)
    363 			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
    364 		if (autoc & IXGBE_AUTOC_KX4_SUPP)
    365 			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
    366 		if (autoc & IXGBE_AUTOC_KX_SUPP)
    367 			*speed |= IXGBE_LINK_SPEED_1GB_FULL;
    368 		*negotiation = TRUE;
    369 		break;
    370 
    371 	case IXGBE_AUTOC_LMS_SGMII_1G_100M:
    372 		*speed = IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_100_FULL;
    373 		*negotiation = FALSE;
    374 		break;
    375 
    376 	default:
    377 		status = IXGBE_ERR_LINK_SETUP;
    378 		goto out;
    379 		break;
    380 	}
    381 
    382 	if (hw->phy.multispeed_fiber) {
    383 		*speed |= IXGBE_LINK_SPEED_10GB_FULL |
    384 			  IXGBE_LINK_SPEED_1GB_FULL;
    385 		*negotiation = TRUE;
    386 	}
    387 
    388 out:
    389 	return status;
    390 }
    391 
    392 /**
    393  *  ixgbe_get_media_type_82599 - Get media type
    394  *  @hw: pointer to hardware structure
    395  *
    396  *  Returns the media type (fiber, copper, backplane)
    397  **/
    398 enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
    399 {
    400 	enum ixgbe_media_type media_type;
    401 
    402 	DEBUGFUNC("ixgbe_get_media_type_82599");
    403 
    404 	/* Detect if there is a copper PHY attached. */
    405 	switch (hw->phy.type) {
    406 	case ixgbe_phy_cu_unknown:
    407 	case ixgbe_phy_tn:
    408 		media_type = ixgbe_media_type_copper;
    409 		goto out;
    410 	default:
    411 		break;
    412 	}
    413 
    414 	switch (hw->device_id) {
    415 	case IXGBE_DEV_ID_82599_KX4:
    416 	case IXGBE_DEV_ID_82599_KX4_MEZZ:
    417 	case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
    418 	case IXGBE_DEV_ID_82599_KR:
    419 	case IXGBE_DEV_ID_82599_BACKPLANE_FCOE:
    420 	case IXGBE_DEV_ID_82599_XAUI_LOM:
    421 		/* Default device ID is mezzanine card KX/KX4 */
    422 		media_type = ixgbe_media_type_backplane;
    423 		break;
    424 	case IXGBE_DEV_ID_82599_SFP:
    425 	case IXGBE_DEV_ID_82599_SFP_FCOE:
    426 	case IXGBE_DEV_ID_82599_SFP_EM:
    427 	case IXGBE_DEV_ID_82599_SFP_SF2:
    428 	case IXGBE_DEV_ID_82599_SFP_SF_QP:
    429 	case IXGBE_DEV_ID_82599EN_SFP:
    430 		media_type = ixgbe_media_type_fiber;
    431 		break;
    432 	case IXGBE_DEV_ID_82599_CX4:
    433 		media_type = ixgbe_media_type_cx4;
    434 		break;
    435 	case IXGBE_DEV_ID_82599_T3_LOM:
    436 		media_type = ixgbe_media_type_copper;
    437 		break;
    438 	default:
    439 		media_type = ixgbe_media_type_unknown;
    440 		break;
    441 	}
    442 out:
    443 	return media_type;
    444 }
    445 
    446 /**
    447  *  ixgbe_start_mac_link_82599 - Setup MAC link settings
    448  *  @hw: pointer to hardware structure
    449  *  @autoneg_wait_to_complete: TRUE when waiting for completion is needed
    450  *
    451  *  Configures link settings based on values in the ixgbe_hw struct.
    452  *  Restarts the link.  Performs autonegotiation if needed.
    453  **/
    454 s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
    455 			       bool autoneg_wait_to_complete)
    456 {
    457 	u32 autoc_reg;
    458 	u32 links_reg;
    459 	u32 i;
    460 	s32 status = IXGBE_SUCCESS;
    461 
    462 	DEBUGFUNC("ixgbe_start_mac_link_82599");
    463 
    464 
    465 	/* Restart link */
    466 	autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
    467 	autoc_reg |= IXGBE_AUTOC_AN_RESTART;
    468 	IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
    469 
    470 	/* Only poll for autoneg to complete if specified to do so */
    471 	if (autoneg_wait_to_complete) {
    472 		if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
    473 		     IXGBE_AUTOC_LMS_KX4_KX_KR ||
    474 		    (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
    475 		     IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
    476 		    (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
    477 		     IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
    478 			links_reg = 0; /* Just in case Autoneg time = 0 */
    479 			for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
    480 				links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
    481 				if (links_reg & IXGBE_LINKS_KX_AN_COMP)
    482 					break;
    483 				msec_delay(100);
    484 			}
    485 			if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
    486 				status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
    487 				DEBUGOUT("Autoneg did not complete.\n");
    488 			}
    489 		}
    490 	}
    491 
    492 	/* Add delay to filter out noises during initial link setup */
    493 	msec_delay(50);
    494 
    495 	return status;
    496 }
    497 
    498 /**
    499  *  ixgbe_disable_tx_laser_multispeed_fiber - Disable Tx laser
    500  *  @hw: pointer to hardware structure
    501  *
    502  *  The base drivers may require better control over SFP+ module
    503  *  PHY states.  This includes selectively shutting down the Tx
    504  *  laser on the PHY, effectively halting physical link.
    505  **/
    506 void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
    507 {
    508 	u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
    509 
    510 	/* Disable tx laser; allow 100us to go dark per spec */
    511 	esdp_reg |= IXGBE_ESDP_SDP3;
    512 	IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
    513 	IXGBE_WRITE_FLUSH(hw);
    514 	usec_delay(100);
    515 }
    516 
    517 /**
    518  *  ixgbe_enable_tx_laser_multispeed_fiber - Enable Tx laser
    519  *  @hw: pointer to hardware structure
    520  *
    521  *  The base drivers may require better control over SFP+ module
    522  *  PHY states.  This includes selectively turning on the Tx
    523  *  laser on the PHY, effectively starting physical link.
    524  **/
    525 void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
    526 {
    527 	u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
    528 
    529 	/* Enable tx laser; allow 100ms to light up */
    530 	esdp_reg &= ~IXGBE_ESDP_SDP3;
    531 	IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
    532 	IXGBE_WRITE_FLUSH(hw);
    533 	msec_delay(100);
    534 }
    535 
    536 /**
    537  *  ixgbe_flap_tx_laser_multispeed_fiber - Flap Tx laser
    538  *  @hw: pointer to hardware structure
    539  *
    540  *  When the driver changes the link speeds that it can support,
    541  *  it sets autotry_restart to TRUE to indicate that we need to
    542  *  initiate a new autotry session with the link partner.  To do
    543  *  so, we set the speed then disable and re-enable the tx laser, to
    544  *  alert the link partner that it also needs to restart autotry on its
    545  *  end.  This is consistent with TRUE clause 37 autoneg, which also
    546  *  involves a loss of signal.
    547  **/
    548 void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
    549 {
    550 	DEBUGFUNC("ixgbe_flap_tx_laser_multispeed_fiber");
    551 
    552 	if (hw->mac.autotry_restart) {
    553 		ixgbe_disable_tx_laser_multispeed_fiber(hw);
    554 		ixgbe_enable_tx_laser_multispeed_fiber(hw);
    555 		hw->mac.autotry_restart = FALSE;
    556 	}
    557 }
    558 
    559 /**
    560  *  ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed
    561  *  @hw: pointer to hardware structure
    562  *  @speed: new link speed
    563  *  @autoneg: TRUE if autonegotiation enabled
    564  *  @autoneg_wait_to_complete: TRUE when waiting for completion is needed
    565  *
    566  *  Set the link speed in the AUTOC register and restarts link.
    567  **/
    568 s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
    569 				     ixgbe_link_speed speed, bool autoneg,
    570 				     bool autoneg_wait_to_complete)
    571 {
    572 	s32 status = IXGBE_SUCCESS;
    573 	ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
    574 	ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN;
    575 	u32 speedcnt = 0;
    576 	u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
    577 	u32 i = 0;
    578 	bool link_up = FALSE;
    579 	bool negotiation;
    580 
    581 	DEBUGFUNC("ixgbe_setup_mac_link_multispeed_fiber");
    582 
    583 	/* Mask off requested but non-supported speeds */
    584 	status = ixgbe_get_link_capabilities(hw, &link_speed, &negotiation);
    585 	if (status != IXGBE_SUCCESS)
    586 		return status;
    587 
    588 	speed &= link_speed;
    589 
    590 	/*
    591 	 * Try each speed one by one, highest priority first.  We do this in
    592 	 * software because 10gb fiber doesn't support speed autonegotiation.
    593 	 */
    594 	if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
    595 		speedcnt++;
    596 		highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL;
    597 
    598 		/* If we already have link at this speed, just jump out */
    599 		status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
    600 		if (status != IXGBE_SUCCESS)
    601 			return status;
    602 
    603 		if ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) && link_up)
    604 			goto out;
    605 
    606 		/* Set the module link speed */
    607 		esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5);
    608 		IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
    609 		IXGBE_WRITE_FLUSH(hw);
    610 
    611 		/* Allow module to change analog characteristics (1G->10G) */
    612 		msec_delay(40);
    613 
    614 		status = ixgbe_setup_mac_link_82599(hw,
    615 						    IXGBE_LINK_SPEED_10GB_FULL,
    616 						    autoneg,
    617 						    autoneg_wait_to_complete);
    618 		if (status != IXGBE_SUCCESS)
    619 			return status;
    620 
    621 		/* Flap the tx laser if it has not already been done */
    622 		ixgbe_flap_tx_laser(hw);
    623 
    624 		/*
    625 		 * Wait for the controller to acquire link.  Per IEEE 802.3ap,
    626 		 * Section 73.10.2, we may have to wait up to 500ms if KR is
    627 		 * attempted.  82599 uses the same timing for 10g SFI.
    628 		 */
    629 		for (i = 0; i < 5; i++) {
    630 			/* Wait for the link partner to also set speed */
    631 			msec_delay(100);
    632 
    633 			/* If we have link, just jump out */
    634 			status = ixgbe_check_link(hw, &link_speed,
    635 						  &link_up, FALSE);
    636 			if (status != IXGBE_SUCCESS)
    637 				return status;
    638 
    639 			if (link_up)
    640 				goto out;
    641 		}
    642 	}
    643 
    644 	if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
    645 		speedcnt++;
    646 		if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN)
    647 			highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL;
    648 
    649 		/* If we already have link at this speed, just jump out */
    650 		status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
    651 		if (status != IXGBE_SUCCESS)
    652 			return status;
    653 
    654 		if ((link_speed == IXGBE_LINK_SPEED_1GB_FULL) && link_up)
    655 			goto out;
    656 
    657 		/* Set the module link speed */
    658 		esdp_reg &= ~IXGBE_ESDP_SDP5;
    659 		esdp_reg |= IXGBE_ESDP_SDP5_DIR;
    660 		IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
    661 		IXGBE_WRITE_FLUSH(hw);
    662 
    663 		/* Allow module to change analog characteristics (10G->1G) */
    664 		msec_delay(40);
    665 
    666 		status = ixgbe_setup_mac_link_82599(hw,
    667 						    IXGBE_LINK_SPEED_1GB_FULL,
    668 						    autoneg,
    669 						    autoneg_wait_to_complete);
    670 		if (status != IXGBE_SUCCESS)
    671 			return status;
    672 
    673 		/* Flap the tx laser if it has not already been done */
    674 		ixgbe_flap_tx_laser(hw);
    675 
    676 		/* Wait for the link partner to also set speed */
    677 		msec_delay(100);
    678 
    679 		/* If we have link, just jump out */
    680 		status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
    681 		if (status != IXGBE_SUCCESS)
    682 			return status;
    683 
    684 		if (link_up)
    685 			goto out;
    686 	}
    687 
    688 	/*
    689 	 * We didn't get link.  Configure back to the highest speed we tried,
    690 	 * (if there was more than one).  We call ourselves back with just the
    691 	 * single highest speed that the user requested.
    692 	 */
    693 	if (speedcnt > 1)
    694 		status = ixgbe_setup_mac_link_multispeed_fiber(hw,
    695 			highest_link_speed, autoneg, autoneg_wait_to_complete);
    696 
    697 out:
    698 	/* Set autoneg_advertised value based on input link speed */
    699 	hw->phy.autoneg_advertised = 0;
    700 
    701 	if (speed & IXGBE_LINK_SPEED_10GB_FULL)
    702 		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
    703 
    704 	if (speed & IXGBE_LINK_SPEED_1GB_FULL)
    705 		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
    706 
    707 	return status;
    708 }
    709 
    710 /**
    711  *  ixgbe_setup_mac_link_smartspeed - Set MAC link speed using SmartSpeed
    712  *  @hw: pointer to hardware structure
    713  *  @speed: new link speed
    714  *  @autoneg: TRUE if autonegotiation enabled
    715  *  @autoneg_wait_to_complete: TRUE when waiting for completion is needed
    716  *
    717  *  Implements the Intel SmartSpeed algorithm.
    718  **/
    719 s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
    720 				    ixgbe_link_speed speed, bool autoneg,
    721 				    bool autoneg_wait_to_complete)
    722 {
    723 	s32 status = IXGBE_SUCCESS;
    724 	ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
    725 	s32 i, j;
    726 	bool link_up = FALSE;
    727 	u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
    728 
    729 	DEBUGFUNC("ixgbe_setup_mac_link_smartspeed");
    730 
    731 	 /* Set autoneg_advertised value based on input link speed */
    732 	hw->phy.autoneg_advertised = 0;
    733 
    734 	if (speed & IXGBE_LINK_SPEED_10GB_FULL)
    735 		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
    736 
    737 	if (speed & IXGBE_LINK_SPEED_1GB_FULL)
    738 		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
    739 
    740 	if (speed & IXGBE_LINK_SPEED_100_FULL)
    741 		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL;
    742 
    743 	/*
    744 	 * Implement Intel SmartSpeed algorithm.  SmartSpeed will reduce the
    745 	 * autoneg advertisement if link is unable to be established at the
    746 	 * highest negotiated rate.  This can sometimes happen due to integrity
    747 	 * issues with the physical media connection.
    748 	 */
    749 
    750 	/* First, try to get link with full advertisement */
    751 	hw->phy.smart_speed_active = FALSE;
    752 	for (j = 0; j < IXGBE_SMARTSPEED_MAX_RETRIES; j++) {
    753 		status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
    754 						    autoneg_wait_to_complete);
    755 		if (status != IXGBE_SUCCESS)
    756 			goto out;
    757 
    758 		/*
    759 		 * Wait for the controller to acquire link.  Per IEEE 802.3ap,
    760 		 * Section 73.10.2, we may have to wait up to 500ms if KR is
    761 		 * attempted, or 200ms if KX/KX4/BX/BX4 is attempted, per
    762 		 * Table 9 in the AN MAS.
    763 		 */
    764 		for (i = 0; i < 5; i++) {
    765 			msec_delay(100);
    766 
    767 			/* If we have link, just jump out */
    768 			status = ixgbe_check_link(hw, &link_speed, &link_up,
    769 						  FALSE);
    770 			if (status != IXGBE_SUCCESS)
    771 				goto out;
    772 
    773 			if (link_up)
    774 				goto out;
    775 		}
    776 	}
    777 
    778 	/*
    779 	 * We didn't get link.  If we advertised KR plus one of KX4/KX
    780 	 * (or BX4/BX), then disable KR and try again.
    781 	 */
    782 	if (((autoc_reg & IXGBE_AUTOC_KR_SUPP) == 0) ||
    783 	    ((autoc_reg & IXGBE_AUTOC_KX4_KX_SUPP_MASK) == 0))
    784 		goto out;
    785 
    786 	/* Turn SmartSpeed on to disable KR support */
    787 	hw->phy.smart_speed_active = TRUE;
    788 	status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
    789 					    autoneg_wait_to_complete);
    790 	if (status != IXGBE_SUCCESS)
    791 		goto out;
    792 
    793 	/*
    794 	 * Wait for the controller to acquire link.  600ms will allow for
    795 	 * the AN link_fail_inhibit_timer as well for multiple cycles of
    796 	 * parallel detect, both 10g and 1g. This allows for the maximum
    797 	 * connect attempts as defined in the AN MAS table 73-7.
    798 	 */
    799 	for (i = 0; i < 6; i++) {
    800 		msec_delay(100);
    801 
    802 		/* If we have link, just jump out */
    803 		status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
    804 		if (status != IXGBE_SUCCESS)
    805 			goto out;
    806 
    807 		if (link_up)
    808 			goto out;
    809 	}
    810 
    811 	/* We didn't get link.  Turn SmartSpeed back off. */
    812 	hw->phy.smart_speed_active = FALSE;
    813 	status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
    814 					    autoneg_wait_to_complete);
    815 
    816 out:
    817 	if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL))
    818 		DEBUGOUT("Smartspeed has downgraded the link speed "
    819 		"from the maximum advertised\n");
    820 	return status;
    821 }
    822 
    823 /**
    824  *  ixgbe_setup_mac_link_82599 - Set MAC link speed
    825  *  @hw: pointer to hardware structure
    826  *  @speed: new link speed
    827  *  @autoneg: TRUE if autonegotiation enabled
    828  *  @autoneg_wait_to_complete: TRUE when waiting for completion is needed
    829  *
    830  *  Set the link speed in the AUTOC register and restarts link.
    831  **/
    832 s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
    833 			       ixgbe_link_speed speed, bool autoneg,
    834 			       bool autoneg_wait_to_complete)
    835 {
    836 	s32 status = IXGBE_SUCCESS;
    837 	u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
    838 	u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
    839 	u32 start_autoc = autoc;
    840 	u32 orig_autoc = 0;
    841 	u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
    842 	u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
    843 	u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
    844 	u32 links_reg;
    845 	u32 i;
    846 	ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
    847 
    848 	DEBUGFUNC("ixgbe_setup_mac_link_82599");
    849 
    850 	/* Check to see if speed passed in is supported. */
    851 	status = ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg);
    852 	if (status != IXGBE_SUCCESS)
    853 		goto out;
    854 
    855 	speed &= link_capabilities;
    856 
    857 	if (speed == IXGBE_LINK_SPEED_UNKNOWN) {
    858 		status = IXGBE_ERR_LINK_SETUP;
    859 		goto out;
    860 	}
    861 
    862 	/* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/
    863 	if (hw->mac.orig_link_settings_stored)
    864 		orig_autoc = hw->mac.orig_autoc;
    865 	else
    866 		orig_autoc = autoc;
    867 
    868 	if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
    869 	    link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
    870 	    link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
    871 		/* Set KX4/KX/KR support according to speed requested */
    872 		autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP);
    873 		if (speed & IXGBE_LINK_SPEED_10GB_FULL)
    874 			if (orig_autoc & IXGBE_AUTOC_KX4_SUPP)
    875 				autoc |= IXGBE_AUTOC_KX4_SUPP;
    876 			if ((orig_autoc & IXGBE_AUTOC_KR_SUPP) &&
    877 			    (hw->phy.smart_speed_active == FALSE))
    878 				autoc |= IXGBE_AUTOC_KR_SUPP;
    879 		if (speed & IXGBE_LINK_SPEED_1GB_FULL)
    880 			autoc |= IXGBE_AUTOC_KX_SUPP;
    881 	} else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) &&
    882 		   (link_mode == IXGBE_AUTOC_LMS_1G_LINK_NO_AN ||
    883 		    link_mode == IXGBE_AUTOC_LMS_1G_AN)) {
    884 		/* Switch from 1G SFI to 10G SFI if requested */
    885 		if ((speed == IXGBE_LINK_SPEED_10GB_FULL) &&
    886 		    (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)) {
    887 			autoc &= ~IXGBE_AUTOC_LMS_MASK;
    888 			autoc |= IXGBE_AUTOC_LMS_10G_SERIAL;
    889 		}
    890 	} else if ((pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) &&
    891 		   (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) {
    892 		/* Switch from 10G SFI to 1G SFI if requested */
    893 		if ((speed == IXGBE_LINK_SPEED_1GB_FULL) &&
    894 		    (pma_pmd_1g == IXGBE_AUTOC_1G_SFI)) {
    895 			autoc &= ~IXGBE_AUTOC_LMS_MASK;
    896 			if (autoneg)
    897 				autoc |= IXGBE_AUTOC_LMS_1G_AN;
    898 			else
    899 				autoc |= IXGBE_AUTOC_LMS_1G_LINK_NO_AN;
    900 		}
    901 	}
    902 
    903 	if (autoc != start_autoc) {
    904 		/* Restart link */
    905 		autoc |= IXGBE_AUTOC_AN_RESTART;
    906 		IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
    907 
    908 		/* Only poll for autoneg to complete if specified to do so */
    909 		if (autoneg_wait_to_complete) {
    910 			if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
    911 			    link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
    912 			    link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
    913 				links_reg = 0; /*Just in case Autoneg time=0*/
    914 				for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
    915 					links_reg =
    916 					       IXGBE_READ_REG(hw, IXGBE_LINKS);
    917 					if (links_reg & IXGBE_LINKS_KX_AN_COMP)
    918 						break;
    919 					msec_delay(100);
    920 				}
    921 				if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
    922 					status =
    923 						IXGBE_ERR_AUTONEG_NOT_COMPLETE;
    924 					DEBUGOUT("Autoneg did not complete.\n");
    925 				}
    926 			}
    927 		}
    928 
    929 		/* Add delay to filter out noises during initial link setup */
    930 		msec_delay(50);
    931 	}
    932 
    933 out:
    934 	return status;
    935 }
    936 
    937 /**
    938  *  ixgbe_setup_copper_link_82599 - Set the PHY autoneg advertised field
    939  *  @hw: pointer to hardware structure
    940  *  @speed: new link speed
    941  *  @autoneg: TRUE if autonegotiation enabled
    942  *  @autoneg_wait_to_complete: TRUE if waiting is needed to complete
    943  *
    944  *  Restarts link on PHY and MAC based on settings passed in.
    945  **/
    946 static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
    947 					 ixgbe_link_speed speed,
    948 					 bool autoneg,
    949 					 bool autoneg_wait_to_complete)
    950 {
    951 	s32 status;
    952 
    953 	DEBUGFUNC("ixgbe_setup_copper_link_82599");
    954 
    955 	/* Setup the PHY according to input speed */
    956 	status = hw->phy.ops.setup_link_speed(hw, speed, autoneg,
    957 					      autoneg_wait_to_complete);
    958 	/* Set up MAC */
    959 	ixgbe_start_mac_link_82599(hw, autoneg_wait_to_complete);
    960 
    961 	return status;
    962 }
    963 
    964 /**
    965  *  ixgbe_reset_hw_82599 - Perform hardware reset
    966  *  @hw: pointer to hardware structure
    967  *
    968  *  Resets the hardware by resetting the transmit and receive units, masks
    969  *  and clears all interrupts, perform a PHY reset, and perform a link (MAC)
    970  *  reset.
    971  **/
    972 s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
    973 {
    974 	ixgbe_link_speed link_speed;
    975 	s32 status;
    976 	u32 ctrl, i, autoc, autoc2;
    977 	bool link_up = FALSE;
    978 
    979 	DEBUGFUNC("ixgbe_reset_hw_82599");
    980 
    981 	/* Call adapter stop to disable tx/rx and clear interrupts */
    982 	status = hw->mac.ops.stop_adapter(hw);
    983 	if (status != IXGBE_SUCCESS)
    984 		goto reset_hw_out;
    985 
    986 	/* flush pending Tx transactions */
    987 	ixgbe_clear_tx_pending(hw);
    988 
    989 	/* PHY ops must be identified and initialized prior to reset */
    990 
    991 	/* Identify PHY and related function pointers */
    992 	status = hw->phy.ops.init(hw);
    993 
    994 	if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
    995 		goto reset_hw_out;
    996 
    997 	/* Setup SFP module if there is one present. */
    998 	if (hw->phy.sfp_setup_needed) {
    999 		status = hw->mac.ops.setup_sfp(hw);
   1000 		hw->phy.sfp_setup_needed = FALSE;
   1001 	}
   1002 
   1003 	if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
   1004 		goto reset_hw_out;
   1005 
   1006 	/* Reset PHY */
   1007 	if (hw->phy.reset_disable == FALSE && hw->phy.ops.reset != NULL)
   1008 		hw->phy.ops.reset(hw);
   1009 
   1010 mac_reset_top:
   1011 	/*
   1012 	 * Issue global reset to the MAC.  Needs to be SW reset if link is up.
   1013 	 * If link reset is used when link is up, it might reset the PHY when
   1014 	 * mng is using it.  If link is down or the flag to force full link
   1015 	 * reset is set, then perform link reset.
   1016 	 */
   1017 	ctrl = IXGBE_CTRL_LNK_RST;
   1018 	if (!hw->force_full_reset) {
   1019 		hw->mac.ops.check_link(hw, &link_speed, &link_up, FALSE);
   1020 		if (link_up)
   1021 			ctrl = IXGBE_CTRL_RST;
   1022 	}
   1023 
   1024 	ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
   1025 	IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
   1026 	IXGBE_WRITE_FLUSH(hw);
   1027 
   1028 	/* Poll for reset bit to self-clear indicating reset is complete */
   1029 	for (i = 0; i < 10; i++) {
   1030 		usec_delay(1);
   1031 		ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
   1032 		if (!(ctrl & IXGBE_CTRL_RST_MASK))
   1033 			break;
   1034 	}
   1035 
   1036 	if (ctrl & IXGBE_CTRL_RST_MASK) {
   1037 		status = IXGBE_ERR_RESET_FAILED;
   1038 		DEBUGOUT("Reset polling failed to complete.\n");
   1039 	}
   1040 
   1041 	msec_delay(50);
   1042 
   1043 	/*
   1044 	 * Double resets are required for recovery from certain error
   1045 	 * conditions.  Between resets, it is necessary to stall to allow time
   1046 	 * for any pending HW events to complete.
   1047 	 */
   1048 	if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
   1049 		hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
   1050 		goto mac_reset_top;
   1051 	}
   1052 
   1053 	/*
   1054 	 * Store the original AUTOC/AUTOC2 values if they have not been
   1055 	 * stored off yet.  Otherwise restore the stored original
   1056 	 * values since the reset operation sets back to defaults.
   1057 	 */
   1058 	autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
   1059 	autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
   1060 	if (hw->mac.orig_link_settings_stored == FALSE) {
   1061 		hw->mac.orig_autoc = autoc;
   1062 		hw->mac.orig_autoc2 = autoc2;
   1063 		hw->mac.orig_link_settings_stored = TRUE;
   1064 	} else {
   1065 		if (autoc != hw->mac.orig_autoc)
   1066 			IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (hw->mac.orig_autoc |
   1067 					IXGBE_AUTOC_AN_RESTART));
   1068 
   1069 		if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) !=
   1070 		    (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) {
   1071 			autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK;
   1072 			autoc2 |= (hw->mac.orig_autoc2 &
   1073 				   IXGBE_AUTOC2_UPPER_MASK);
   1074 			IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
   1075 		}
   1076 	}
   1077 
   1078 	/* Store the permanent mac address */
   1079 	hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
   1080 
   1081 	/*
   1082 	 * Store MAC address from RAR0, clear receive address registers, and
   1083 	 * clear the multicast table.  Also reset num_rar_entries to 128,
   1084 	 * since we modify this value when programming the SAN MAC address.
   1085 	 */
   1086 	hw->mac.num_rar_entries = 128;
   1087 	hw->mac.ops.init_rx_addrs(hw);
   1088 
   1089 	/* Store the permanent SAN mac address */
   1090 	hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr);
   1091 
   1092 	/* Add the SAN MAC address to the RAR only if it's a valid address */
   1093 	if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) {
   1094 		hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
   1095 				    hw->mac.san_addr, 0, IXGBE_RAH_AV);
   1096 
   1097 		/* Save the SAN MAC RAR index */
   1098 		hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1;
   1099 
   1100 		/* Reserve the last RAR for the SAN MAC address */
   1101 		hw->mac.num_rar_entries--;
   1102 	}
   1103 
   1104 	/* Store the alternative WWNN/WWPN prefix */
   1105 	hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
   1106 				   &hw->mac.wwpn_prefix);
   1107 
   1108 reset_hw_out:
   1109 	return status;
   1110 }
   1111 
   1112 /**
   1113  *  ixgbe_reinit_fdir_tables_82599 - Reinitialize Flow Director tables.
   1114  *  @hw: pointer to hardware structure
   1115  **/
   1116 s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
   1117 {
   1118 	int i;
   1119 	u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
   1120 	fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE;
   1121 
   1122 	DEBUGFUNC("ixgbe_reinit_fdir_tables_82599");
   1123 
   1124 	/*
   1125 	 * Before starting reinitialization process,
   1126 	 * FDIRCMD.CMD must be zero.
   1127 	 */
   1128 	for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) {
   1129 		if (!(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
   1130 		      IXGBE_FDIRCMD_CMD_MASK))
   1131 			break;
   1132 		usec_delay(10);
   1133 	}
   1134 	if (i >= IXGBE_FDIRCMD_CMD_POLL) {
   1135 		DEBUGOUT("Flow Director previous command isn't complete, "
   1136 			 "aborting table re-initialization.\n");
   1137 		return IXGBE_ERR_FDIR_REINIT_FAILED;
   1138 	}
   1139 
   1140 	IXGBE_WRITE_REG(hw, IXGBE_FDIRFREE, 0);
   1141 	IXGBE_WRITE_FLUSH(hw);
   1142 	/*
   1143 	 * 82599 adapters flow director init flow cannot be restarted,
   1144 	 * Workaround 82599 silicon errata by performing the following steps
   1145 	 * before re-writing the FDIRCTRL control register with the same value.
   1146 	 * - write 1 to bit 8 of FDIRCMD register &
   1147 	 * - write 0 to bit 8 of FDIRCMD register
   1148 	 */
   1149 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
   1150 			(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) |
   1151 			 IXGBE_FDIRCMD_CLEARHT));
   1152 	IXGBE_WRITE_FLUSH(hw);
   1153 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
   1154 			(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
   1155 			 ~IXGBE_FDIRCMD_CLEARHT));
   1156 	IXGBE_WRITE_FLUSH(hw);
   1157 	/*
   1158 	 * Clear FDIR Hash register to clear any leftover hashes
   1159 	 * waiting to be programmed.
   1160 	 */
   1161 	IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, 0x00);
   1162 	IXGBE_WRITE_FLUSH(hw);
   1163 
   1164 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
   1165 	IXGBE_WRITE_FLUSH(hw);
   1166 
   1167 	/* Poll init-done after we write FDIRCTRL register */
   1168 	for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
   1169 		if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
   1170 				   IXGBE_FDIRCTRL_INIT_DONE)
   1171 			break;
   1172 		usec_delay(10);
   1173 	}
   1174 	if (i >= IXGBE_FDIR_INIT_DONE_POLL) {
   1175 		DEBUGOUT("Flow Director Signature poll time exceeded!\n");
   1176 		return IXGBE_ERR_FDIR_REINIT_FAILED;
   1177 	}
   1178 
   1179 	/* Clear FDIR statistics registers (read to clear) */
   1180 	IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT);
   1181 	IXGBE_READ_REG(hw, IXGBE_FDIRFSTAT);
   1182 	IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
   1183 	IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
   1184 	IXGBE_READ_REG(hw, IXGBE_FDIRLEN);
   1185 
   1186 	return IXGBE_SUCCESS;
   1187 }
   1188 
   1189 /**
   1190  *  ixgbe_fdir_enable_82599 - Initialize Flow Director control registers
   1191  *  @hw: pointer to hardware structure
   1192  *  @fdirctrl: value to write to flow director control register
   1193  **/
   1194 static void ixgbe_fdir_enable_82599(struct ixgbe_hw *hw, u32 fdirctrl)
   1195 {
   1196 	int i;
   1197 
   1198 	DEBUGFUNC("ixgbe_fdir_enable_82599");
   1199 
   1200 	/* Prime the keys for hashing */
   1201 	IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY);
   1202 	IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY);
   1203 
   1204 	/*
   1205 	 * Poll init-done after we write the register.  Estimated times:
   1206 	 *      10G: PBALLOC = 11b, timing is 60us
   1207 	 *       1G: PBALLOC = 11b, timing is 600us
   1208 	 *     100M: PBALLOC = 11b, timing is 6ms
   1209 	 *
   1210 	 *     Multiple these timings by 4 if under full Rx load
   1211 	 *
   1212 	 * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for
   1213 	 * 1 msec per poll time.  If we're at line rate and drop to 100M, then
   1214 	 * this might not finish in our poll time, but we can live with that
   1215 	 * for now.
   1216 	 */
   1217 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
   1218 	IXGBE_WRITE_FLUSH(hw);
   1219 	for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
   1220 		if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
   1221 				   IXGBE_FDIRCTRL_INIT_DONE)
   1222 			break;
   1223 		msec_delay(1);
   1224 	}
   1225 
   1226 	if (i >= IXGBE_FDIR_INIT_DONE_POLL)
   1227 		DEBUGOUT("Flow Director poll time exceeded!\n");
   1228 }
   1229 
   1230 /**
   1231  *  ixgbe_init_fdir_signature_82599 - Initialize Flow Director signature filters
   1232  *  @hw: pointer to hardware structure
   1233  *  @fdirctrl: value to write to flow director control register, initially
   1234  *	     contains just the value of the Rx packet buffer allocation
   1235  **/
   1236 s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl)
   1237 {
   1238 	DEBUGFUNC("ixgbe_init_fdir_signature_82599");
   1239 
   1240 	/*
   1241 	 * Continue setup of fdirctrl register bits:
   1242 	 *  Move the flexible bytes to use the ethertype - shift 6 words
   1243 	 *  Set the maximum length per hash bucket to 0xA filters
   1244 	 *  Send interrupt when 64 filters are left
   1245 	 */
   1246 	fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) |
   1247 		    (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
   1248 		    (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
   1249 
   1250 	/* write hashes and fdirctrl register, poll for completion */
   1251 	ixgbe_fdir_enable_82599(hw, fdirctrl);
   1252 
   1253 	return IXGBE_SUCCESS;
   1254 }
   1255 
   1256 /**
   1257  *  ixgbe_init_fdir_perfect_82599 - Initialize Flow Director perfect filters
   1258  *  @hw: pointer to hardware structure
   1259  *  @fdirctrl: value to write to flow director control register, initially
   1260  *	     contains just the value of the Rx packet buffer allocation
   1261  **/
   1262 s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl)
   1263 {
   1264 	DEBUGFUNC("ixgbe_init_fdir_perfect_82599");
   1265 
   1266 	/*
   1267 	 * Continue setup of fdirctrl register bits:
   1268 	 *  Turn perfect match filtering on
   1269 	 *  Report hash in RSS field of Rx wb descriptor
   1270 	 *  Initialize the drop queue
   1271 	 *  Move the flexible bytes to use the ethertype - shift 6 words
   1272 	 *  Set the maximum length per hash bucket to 0xA filters
   1273 	 *  Send interrupt when 64 (0x4 * 16) filters are left
   1274 	 */
   1275 	fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH |
   1276 		    IXGBE_FDIRCTRL_REPORT_STATUS |
   1277 		    (IXGBE_FDIR_DROP_QUEUE << IXGBE_FDIRCTRL_DROP_Q_SHIFT) |
   1278 		    (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) |
   1279 		    (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
   1280 		    (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
   1281 
   1282 	/* write hashes and fdirctrl register, poll for completion */
   1283 	ixgbe_fdir_enable_82599(hw, fdirctrl);
   1284 
   1285 	return IXGBE_SUCCESS;
   1286 }
   1287 
   1288 /*
   1289  * These defines allow us to quickly generate all of the necessary instructions
   1290  * in the function below by simply calling out IXGBE_COMPUTE_SIG_HASH_ITERATION
   1291  * for values 0 through 15
   1292  */
   1293 #define IXGBE_ATR_COMMON_HASH_KEY \
   1294 		(IXGBE_ATR_BUCKET_HASH_KEY & IXGBE_ATR_SIGNATURE_HASH_KEY)
   1295 #define IXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \
   1296 do { \
   1297 	u32 n = (_n); \
   1298 	if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) \
   1299 		common_hash ^= lo_hash_dword >> n; \
   1300 	else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
   1301 		bucket_hash ^= lo_hash_dword >> n; \
   1302 	else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << n)) \
   1303 		sig_hash ^= lo_hash_dword << (16 - n); \
   1304 	if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << (n + 16))) \
   1305 		common_hash ^= hi_hash_dword >> n; \
   1306 	else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
   1307 		bucket_hash ^= hi_hash_dword >> n; \
   1308 	else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \
   1309 		sig_hash ^= hi_hash_dword << (16 - n); \
   1310 } while (0);
   1311 
   1312 /**
   1313  *  ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash
   1314  *  @stream: input bitstream to compute the hash on
   1315  *
   1316  *  This function is almost identical to the function above but contains
   1317  *  several optomizations such as unwinding all of the loops, letting the
   1318  *  compiler work out all of the conditional ifs since the keys are static
   1319  *  defines, and computing two keys at once since the hashed dword stream
   1320  *  will be the same for both keys.
   1321  **/
   1322 u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
   1323 				     union ixgbe_atr_hash_dword common)
   1324 {
   1325 	u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
   1326 	u32 sig_hash = 0, bucket_hash = 0, common_hash = 0;
   1327 
   1328 	/* record the flow_vm_vlan bits as they are a key part to the hash */
   1329 	flow_vm_vlan = IXGBE_NTOHL(input.dword);
   1330 
   1331 	/* generate common hash dword */
   1332 	hi_hash_dword = IXGBE_NTOHL(common.dword);
   1333 
   1334 	/* low dword is word swapped version of common */
   1335 	lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
   1336 
   1337 	/* apply flow ID/VM pool/VLAN ID bits to hash words */
   1338 	hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
   1339 
   1340 	/* Process bits 0 and 16 */
   1341 	IXGBE_COMPUTE_SIG_HASH_ITERATION(0);
   1342 
   1343 	/*
   1344 	 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
   1345 	 * delay this because bit 0 of the stream should not be processed
   1346 	 * so we do not add the vlan until after bit 0 was processed
   1347 	 */
   1348 	lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
   1349 
   1350 	/* Process remaining 30 bit of the key */
   1351 	IXGBE_COMPUTE_SIG_HASH_ITERATION(1);
   1352 	IXGBE_COMPUTE_SIG_HASH_ITERATION(2);
   1353 	IXGBE_COMPUTE_SIG_HASH_ITERATION(3);
   1354 	IXGBE_COMPUTE_SIG_HASH_ITERATION(4);
   1355 	IXGBE_COMPUTE_SIG_HASH_ITERATION(5);
   1356 	IXGBE_COMPUTE_SIG_HASH_ITERATION(6);
   1357 	IXGBE_COMPUTE_SIG_HASH_ITERATION(7);
   1358 	IXGBE_COMPUTE_SIG_HASH_ITERATION(8);
   1359 	IXGBE_COMPUTE_SIG_HASH_ITERATION(9);
   1360 	IXGBE_COMPUTE_SIG_HASH_ITERATION(10);
   1361 	IXGBE_COMPUTE_SIG_HASH_ITERATION(11);
   1362 	IXGBE_COMPUTE_SIG_HASH_ITERATION(12);
   1363 	IXGBE_COMPUTE_SIG_HASH_ITERATION(13);
   1364 	IXGBE_COMPUTE_SIG_HASH_ITERATION(14);
   1365 	IXGBE_COMPUTE_SIG_HASH_ITERATION(15);
   1366 
   1367 	/* combine common_hash result with signature and bucket hashes */
   1368 	bucket_hash ^= common_hash;
   1369 	bucket_hash &= IXGBE_ATR_HASH_MASK;
   1370 
   1371 	sig_hash ^= common_hash << 16;
   1372 	sig_hash &= IXGBE_ATR_HASH_MASK << 16;
   1373 
   1374 	/* return completed signature hash */
   1375 	return sig_hash ^ bucket_hash;
   1376 }
   1377 
   1378 /**
   1379  *  ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter
   1380  *  @hw: pointer to hardware structure
   1381  *  @input: unique input dword
   1382  *  @common: compressed common input dword
   1383  *  @queue: queue index to direct traffic to
   1384  **/
   1385 s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
   1386 					  union ixgbe_atr_hash_dword input,
   1387 					  union ixgbe_atr_hash_dword common,
   1388 					  u8 queue)
   1389 {
   1390 	u64  fdirhashcmd;
   1391 	u32  fdircmd;
   1392 
   1393 	DEBUGFUNC("ixgbe_fdir_add_signature_filter_82599");
   1394 
   1395 	/*
   1396 	 * Get the flow_type in order to program FDIRCMD properly
   1397 	 * lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6
   1398 	 */
   1399 	switch (input.formatted.flow_type) {
   1400 	case IXGBE_ATR_FLOW_TYPE_TCPV4:
   1401 	case IXGBE_ATR_FLOW_TYPE_UDPV4:
   1402 	case IXGBE_ATR_FLOW_TYPE_SCTPV4:
   1403 	case IXGBE_ATR_FLOW_TYPE_TCPV6:
   1404 	case IXGBE_ATR_FLOW_TYPE_UDPV6:
   1405 	case IXGBE_ATR_FLOW_TYPE_SCTPV6:
   1406 		break;
   1407 	default:
   1408 		DEBUGOUT(" Error on flow type input\n");
   1409 		return IXGBE_ERR_CONFIG;
   1410 	}
   1411 
   1412 	/* configure FDIRCMD register */
   1413 	fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
   1414 		  IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
   1415 	fdircmd |= input.formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
   1416 	fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
   1417 
   1418 	/*
   1419 	 * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits
   1420 	 * is for FDIRCMD.  Then do a 64-bit register write from FDIRHASH.
   1421 	 */
   1422 	fdirhashcmd = (u64)fdircmd << 32;
   1423 	fdirhashcmd |= ixgbe_atr_compute_sig_hash_82599(input, common);
   1424 	IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd);
   1425 
   1426 	DEBUGOUT2("Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd);
   1427 
   1428 	return IXGBE_SUCCESS;
   1429 }
   1430 
   1431 #define IXGBE_COMPUTE_BKT_HASH_ITERATION(_n) \
   1432 do { \
   1433 	u32 n = (_n); \
   1434 	if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
   1435 		bucket_hash ^= lo_hash_dword >> n; \
   1436 	if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
   1437 		bucket_hash ^= hi_hash_dword >> n; \
   1438 } while (0);
   1439 
   1440 /**
   1441  *  ixgbe_atr_compute_perfect_hash_82599 - Compute the perfect filter hash
   1442  *  @atr_input: input bitstream to compute the hash on
   1443  *  @input_mask: mask for the input bitstream
   1444  *
   1445  *  This function serves two main purposes.  First it applys the input_mask
   1446  *  to the atr_input resulting in a cleaned up atr_input data stream.
   1447  *  Secondly it computes the hash and stores it in the bkt_hash field at
   1448  *  the end of the input byte stream.  This way it will be available for
   1449  *  future use without needing to recompute the hash.
   1450  **/
   1451 void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
   1452 					  union ixgbe_atr_input *input_mask)
   1453 {
   1454 
   1455 	u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
   1456 	u32 bucket_hash = 0;
   1457 
   1458 	/* Apply masks to input data */
   1459 	input->dword_stream[0]  &= input_mask->dword_stream[0];
   1460 	input->dword_stream[1]  &= input_mask->dword_stream[1];
   1461 	input->dword_stream[2]  &= input_mask->dword_stream[2];
   1462 	input->dword_stream[3]  &= input_mask->dword_stream[3];
   1463 	input->dword_stream[4]  &= input_mask->dword_stream[4];
   1464 	input->dword_stream[5]  &= input_mask->dword_stream[5];
   1465 	input->dword_stream[6]  &= input_mask->dword_stream[6];
   1466 	input->dword_stream[7]  &= input_mask->dword_stream[7];
   1467 	input->dword_stream[8]  &= input_mask->dword_stream[8];
   1468 	input->dword_stream[9]  &= input_mask->dword_stream[9];
   1469 	input->dword_stream[10] &= input_mask->dword_stream[10];
   1470 
   1471 	/* record the flow_vm_vlan bits as they are a key part to the hash */
   1472 	flow_vm_vlan = IXGBE_NTOHL(input->dword_stream[0]);
   1473 
   1474 	/* generate common hash dword */
   1475 	hi_hash_dword = IXGBE_NTOHL(input->dword_stream[1] ^
   1476 				    input->dword_stream[2] ^
   1477 				    input->dword_stream[3] ^
   1478 				    input->dword_stream[4] ^
   1479 				    input->dword_stream[5] ^
   1480 				    input->dword_stream[6] ^
   1481 				    input->dword_stream[7] ^
   1482 				    input->dword_stream[8] ^
   1483 				    input->dword_stream[9] ^
   1484 				    input->dword_stream[10]);
   1485 
   1486 	/* low dword is word swapped version of common */
   1487 	lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
   1488 
   1489 	/* apply flow ID/VM pool/VLAN ID bits to hash words */
   1490 	hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
   1491 
   1492 	/* Process bits 0 and 16 */
   1493 	IXGBE_COMPUTE_BKT_HASH_ITERATION(0);
   1494 
   1495 	/*
   1496 	 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
   1497 	 * delay this because bit 0 of the stream should not be processed
   1498 	 * so we do not add the vlan until after bit 0 was processed
   1499 	 */
   1500 	lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
   1501 
   1502 	/* Process remaining 30 bit of the key */
   1503 	IXGBE_COMPUTE_BKT_HASH_ITERATION(1);
   1504 	IXGBE_COMPUTE_BKT_HASH_ITERATION(2);
   1505 	IXGBE_COMPUTE_BKT_HASH_ITERATION(3);
   1506 	IXGBE_COMPUTE_BKT_HASH_ITERATION(4);
   1507 	IXGBE_COMPUTE_BKT_HASH_ITERATION(5);
   1508 	IXGBE_COMPUTE_BKT_HASH_ITERATION(6);
   1509 	IXGBE_COMPUTE_BKT_HASH_ITERATION(7);
   1510 	IXGBE_COMPUTE_BKT_HASH_ITERATION(8);
   1511 	IXGBE_COMPUTE_BKT_HASH_ITERATION(9);
   1512 	IXGBE_COMPUTE_BKT_HASH_ITERATION(10);
   1513 	IXGBE_COMPUTE_BKT_HASH_ITERATION(11);
   1514 	IXGBE_COMPUTE_BKT_HASH_ITERATION(12);
   1515 	IXGBE_COMPUTE_BKT_HASH_ITERATION(13);
   1516 	IXGBE_COMPUTE_BKT_HASH_ITERATION(14);
   1517 	IXGBE_COMPUTE_BKT_HASH_ITERATION(15);
   1518 
   1519 	/*
   1520 	 * Limit hash to 13 bits since max bucket count is 8K.
   1521 	 * Store result at the end of the input stream.
   1522 	 */
   1523 	input->formatted.bkt_hash = bucket_hash & 0x1FFF;
   1524 }
   1525 
   1526 /**
   1527  *  ixgbe_get_fdirtcpm_82599 - generate a tcp port from atr_input_masks
   1528  *  @input_mask: mask to be bit swapped
   1529  *
   1530  *  The source and destination port masks for flow director are bit swapped
   1531  *  in that bit 15 effects bit 0, 14 effects 1, 13, 2 etc.  In order to
   1532  *  generate a correctly swapped value we need to bit swap the mask and that
   1533  *  is what is accomplished by this function.
   1534  **/
   1535 static u32 ixgbe_get_fdirtcpm_82599(union ixgbe_atr_input *input_mask)
   1536 {
   1537 	u32 mask = IXGBE_NTOHS(input_mask->formatted.dst_port);
   1538 	mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT;
   1539 	mask |= IXGBE_NTOHS(input_mask->formatted.src_port);
   1540 	mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1);
   1541 	mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2);
   1542 	mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4);
   1543 	return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8);
   1544 }
   1545 
   1546 /*
   1547  * These two macros are meant to address the fact that we have registers
   1548  * that are either all or in part big-endian.  As a result on big-endian
   1549  * systems we will end up byte swapping the value to little-endian before
   1550  * it is byte swapped again and written to the hardware in the original
   1551  * big-endian format.
   1552  */
   1553 #define IXGBE_STORE_AS_BE32(_value) \
   1554 	(((u32)(_value) >> 24) | (((u32)(_value) & 0x00FF0000) >> 8) | \
   1555 	 (((u32)(_value) & 0x0000FF00) << 8) | ((u32)(_value) << 24))
   1556 
   1557 #define IXGBE_WRITE_REG_BE32(a, reg, value) \
   1558 	IXGBE_WRITE_REG((a), (reg), IXGBE_STORE_AS_BE32(IXGBE_NTOHL(value)))
   1559 
   1560 #define IXGBE_STORE_AS_BE16(_value) \
   1561 	IXGBE_NTOHS(((u16)(_value) >> 8) | ((u16)(_value) << 8))
   1562 
   1563 s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
   1564 				    union ixgbe_atr_input *input_mask)
   1565 {
   1566 	/* mask IPv6 since it is currently not supported */
   1567 	u32 fdirm = IXGBE_FDIRM_DIPv6;
   1568 	u32 fdirtcpm;
   1569 
   1570 	DEBUGFUNC("ixgbe_fdir_set_atr_input_mask_82599");
   1571 
   1572 	/*
   1573 	 * Program the relevant mask registers.  If src/dst_port or src/dst_addr
   1574 	 * are zero, then assume a full mask for that field.  Also assume that
   1575 	 * a VLAN of 0 is unspecified, so mask that out as well.  L4type
   1576 	 * cannot be masked out in this implementation.
   1577 	 *
   1578 	 * This also assumes IPv4 only.  IPv6 masking isn't supported at this
   1579 	 * point in time.
   1580 	 */
   1581 
   1582 	/* verify bucket hash is cleared on hash generation */
   1583 	if (input_mask->formatted.bkt_hash)
   1584 		DEBUGOUT(" bucket hash should always be 0 in mask\n");
   1585 
   1586 	/* Program FDIRM and verify partial masks */
   1587 	switch (input_mask->formatted.vm_pool & 0x7F) {
   1588 	case 0x0:
   1589 		fdirm |= IXGBE_FDIRM_POOL;
   1590 	case 0x7F:
   1591 		break;
   1592 	default:
   1593 		DEBUGOUT(" Error on vm pool mask\n");
   1594 		return IXGBE_ERR_CONFIG;
   1595 	}
   1596 
   1597 	switch (input_mask->formatted.flow_type & IXGBE_ATR_L4TYPE_MASK) {
   1598 	case 0x0:
   1599 		fdirm |= IXGBE_FDIRM_L4P;
   1600 		if (input_mask->formatted.dst_port ||
   1601 		    input_mask->formatted.src_port) {
   1602 			DEBUGOUT(" Error on src/dst port mask\n");
   1603 			return IXGBE_ERR_CONFIG;
   1604 		}
   1605 	case IXGBE_ATR_L4TYPE_MASK:
   1606 		break;
   1607 	default:
   1608 		DEBUGOUT(" Error on flow type mask\n");
   1609 		return IXGBE_ERR_CONFIG;
   1610 	}
   1611 
   1612 	switch (IXGBE_NTOHS(input_mask->formatted.vlan_id) & 0xEFFF) {
   1613 	case 0x0000:
   1614 		/* mask VLAN ID, fall through to mask VLAN priority */
   1615 		fdirm |= IXGBE_FDIRM_VLANID;
   1616 	case 0x0FFF:
   1617 		/* mask VLAN priority */
   1618 		fdirm |= IXGBE_FDIRM_VLANP;
   1619 		break;
   1620 	case 0xE000:
   1621 		/* mask VLAN ID only, fall through */
   1622 		fdirm |= IXGBE_FDIRM_VLANID;
   1623 	case 0xEFFF:
   1624 		/* no VLAN fields masked */
   1625 		break;
   1626 	default:
   1627 		DEBUGOUT(" Error on VLAN mask\n");
   1628 		return IXGBE_ERR_CONFIG;
   1629 	}
   1630 
   1631 	switch (input_mask->formatted.flex_bytes & 0xFFFF) {
   1632 	case 0x0000:
   1633 		/* Mask Flex Bytes, fall through */
   1634 		fdirm |= IXGBE_FDIRM_FLEX;
   1635 	case 0xFFFF:
   1636 		break;
   1637 	default:
   1638 		DEBUGOUT(" Error on flexible byte mask\n");
   1639 		return IXGBE_ERR_CONFIG;
   1640 	}
   1641 
   1642 	/* Now mask VM pool and destination IPv6 - bits 5 and 2 */
   1643 	IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
   1644 
   1645 	/* store the TCP/UDP port masks, bit reversed from port layout */
   1646 	fdirtcpm = ixgbe_get_fdirtcpm_82599(input_mask);
   1647 
   1648 	/* write both the same so that UDP and TCP use the same mask */
   1649 	IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm);
   1650 	IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm);
   1651 
   1652 	/* store source and destination IP masks (big-enian) */
   1653 	IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M,
   1654 			     ~input_mask->formatted.src_ip[0]);
   1655 	IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M,
   1656 			     ~input_mask->formatted.dst_ip[0]);
   1657 
   1658 	return IXGBE_SUCCESS;
   1659 }
   1660 
   1661 s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
   1662 					  union ixgbe_atr_input *input,
   1663 					  u16 soft_id, u8 queue)
   1664 {
   1665 	u32 fdirport, fdirvlan, fdirhash, fdircmd;
   1666 
   1667 	DEBUGFUNC("ixgbe_fdir_write_perfect_filter_82599");
   1668 
   1669 	/* currently IPv6 is not supported, must be programmed with 0 */
   1670 	IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0),
   1671 			     input->formatted.src_ip[0]);
   1672 	IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1),
   1673 			     input->formatted.src_ip[1]);
   1674 	IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2),
   1675 			     input->formatted.src_ip[2]);
   1676 
   1677 	/* record the source address (big-endian) */
   1678 	IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA, input->formatted.src_ip[0]);
   1679 
   1680 	/* record the first 32 bits of the destination address (big-endian) */
   1681 	IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA, input->formatted.dst_ip[0]);
   1682 
   1683 	/* record source and destination port (little-endian)*/
   1684 	fdirport = IXGBE_NTOHS(input->formatted.dst_port);
   1685 	fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT;
   1686 	fdirport |= IXGBE_NTOHS(input->formatted.src_port);
   1687 	IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport);
   1688 
   1689 	/* record vlan (little-endian) and flex_bytes(big-endian) */
   1690 	fdirvlan = IXGBE_STORE_AS_BE16(input->formatted.flex_bytes);
   1691 	fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT;
   1692 	fdirvlan |= IXGBE_NTOHS(input->formatted.vlan_id);
   1693 	IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan);
   1694 
   1695 	/* configure FDIRHASH register */
   1696 	fdirhash = input->formatted.bkt_hash;
   1697 	fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
   1698 	IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
   1699 
   1700 	/*
   1701 	 * flush all previous writes to make certain registers are
   1702 	 * programmed prior to issuing the command
   1703 	 */
   1704 	IXGBE_WRITE_FLUSH(hw);
   1705 
   1706 	/* configure FDIRCMD register */
   1707 	fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
   1708 		  IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
   1709 	if (queue == IXGBE_FDIR_DROP_QUEUE)
   1710 		fdircmd |= IXGBE_FDIRCMD_DROP;
   1711 	fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
   1712 	fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
   1713 	fdircmd |= (u32)input->formatted.vm_pool << IXGBE_FDIRCMD_VT_POOL_SHIFT;
   1714 
   1715 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
   1716 
   1717 	return IXGBE_SUCCESS;
   1718 }
   1719 
   1720 s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
   1721 					  union ixgbe_atr_input *input,
   1722 					  u16 soft_id)
   1723 {
   1724 	u32 fdirhash;
   1725 	u32 fdircmd = 0;
   1726 	u32 retry_count;
   1727 	s32 err = IXGBE_SUCCESS;
   1728 
   1729 	/* configure FDIRHASH register */
   1730 	fdirhash = input->formatted.bkt_hash;
   1731 	fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
   1732 	IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
   1733 
   1734 	/* flush hash to HW */
   1735 	IXGBE_WRITE_FLUSH(hw);
   1736 
   1737 	/* Query if filter is present */
   1738 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, IXGBE_FDIRCMD_CMD_QUERY_REM_FILT);
   1739 
   1740 	for (retry_count = 10; retry_count; retry_count--) {
   1741 		/* allow 10us for query to process */
   1742 		usec_delay(10);
   1743 		/* verify query completed successfully */
   1744 		fdircmd = IXGBE_READ_REG(hw, IXGBE_FDIRCMD);
   1745 		if (!(fdircmd & IXGBE_FDIRCMD_CMD_MASK))
   1746 			break;
   1747 	}
   1748 
   1749 	if (!retry_count)
   1750 		err = IXGBE_ERR_FDIR_REINIT_FAILED;
   1751 
   1752 	/* if filter exists in hardware then remove it */
   1753 	if (fdircmd & IXGBE_FDIRCMD_FILTER_VALID) {
   1754 		IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
   1755 		IXGBE_WRITE_FLUSH(hw);
   1756 		IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
   1757 				IXGBE_FDIRCMD_CMD_REMOVE_FLOW);
   1758 	}
   1759 
   1760 	return err;
   1761 }
   1762 
   1763 /**
   1764  *  ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter
   1765  *  @hw: pointer to hardware structure
   1766  *  @input: input bitstream
   1767  *  @input_mask: mask for the input bitstream
   1768  *  @soft_id: software index for the filters
   1769  *  @queue: queue index to direct traffic to
   1770  *
   1771  *  Note that the caller to this function must lock before calling, since the
   1772  *  hardware writes must be protected from one another.
   1773  **/
   1774 s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
   1775 					union ixgbe_atr_input *input,
   1776 					union ixgbe_atr_input *input_mask,
   1777 					u16 soft_id, u8 queue)
   1778 {
   1779 	s32 err = IXGBE_ERR_CONFIG;
   1780 
   1781 	DEBUGFUNC("ixgbe_fdir_add_perfect_filter_82599");
   1782 
   1783 	/*
   1784 	 * Check flow_type formatting, and bail out before we touch the hardware
   1785 	 * if there's a configuration issue
   1786 	 */
   1787 	switch (input->formatted.flow_type) {
   1788 	case IXGBE_ATR_FLOW_TYPE_IPV4:
   1789 		input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK;
   1790 		if (input->formatted.dst_port || input->formatted.src_port) {
   1791 			DEBUGOUT(" Error on src/dst port\n");
   1792 			return IXGBE_ERR_CONFIG;
   1793 		}
   1794 		break;
   1795 	case IXGBE_ATR_FLOW_TYPE_SCTPV4:
   1796 		if (input->formatted.dst_port || input->formatted.src_port) {
   1797 			DEBUGOUT(" Error on src/dst port\n");
   1798 			return IXGBE_ERR_CONFIG;
   1799 		}
   1800 	case IXGBE_ATR_FLOW_TYPE_TCPV4:
   1801 	case IXGBE_ATR_FLOW_TYPE_UDPV4:
   1802 		input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
   1803 						  IXGBE_ATR_L4TYPE_MASK;
   1804 		break;
   1805 	default:
   1806 		DEBUGOUT(" Error on flow type input\n");
   1807 		return err;
   1808 	}
   1809 
   1810 	/* program input mask into the HW */
   1811 	err = ixgbe_fdir_set_input_mask_82599(hw, input_mask);
   1812 	if (err)
   1813 		return err;
   1814 
   1815 	/* apply mask and compute/store hash */
   1816 	ixgbe_atr_compute_perfect_hash_82599(input, input_mask);
   1817 
   1818 	/* program filters to filter memory */
   1819 	return ixgbe_fdir_write_perfect_filter_82599(hw, input,
   1820 						     soft_id, queue);
   1821 }
   1822 
   1823 /**
   1824  *  ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register
   1825  *  @hw: pointer to hardware structure
   1826  *  @reg: analog register to read
   1827  *  @val: read value
   1828  *
   1829  *  Performs read operation to Omer analog register specified.
   1830  **/
   1831 s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val)
   1832 {
   1833 	u32  core_ctl;
   1834 
   1835 	DEBUGFUNC("ixgbe_read_analog_reg8_82599");
   1836 
   1837 	IXGBE_WRITE_REG(hw, IXGBE_CORECTL, IXGBE_CORECTL_WRITE_CMD |
   1838 			(reg << 8));
   1839 	IXGBE_WRITE_FLUSH(hw);
   1840 	usec_delay(10);
   1841 	core_ctl = IXGBE_READ_REG(hw, IXGBE_CORECTL);
   1842 	*val = (u8)core_ctl;
   1843 
   1844 	return IXGBE_SUCCESS;
   1845 }
   1846 
   1847 /**
   1848  *  ixgbe_write_analog_reg8_82599 - Writes 8 bit Omer analog register
   1849  *  @hw: pointer to hardware structure
   1850  *  @reg: atlas register to write
   1851  *  @val: value to write
   1852  *
   1853  *  Performs write operation to Omer analog register specified.
   1854  **/
   1855 s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val)
   1856 {
   1857 	u32  core_ctl;
   1858 
   1859 	DEBUGFUNC("ixgbe_write_analog_reg8_82599");
   1860 
   1861 	core_ctl = (reg << 8) | val;
   1862 	IXGBE_WRITE_REG(hw, IXGBE_CORECTL, core_ctl);
   1863 	IXGBE_WRITE_FLUSH(hw);
   1864 	usec_delay(10);
   1865 
   1866 	return IXGBE_SUCCESS;
   1867 }
   1868 
   1869 /**
   1870  *  ixgbe_start_hw_82599 - Prepare hardware for Tx/Rx
   1871  *  @hw: pointer to hardware structure
   1872  *
   1873  *  Starts the hardware using the generic start_hw function
   1874  *  and the generation start_hw function.
   1875  *  Then performs revision-specific operations, if any.
   1876  **/
   1877 s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw)
   1878 {
   1879 	s32 ret_val = IXGBE_SUCCESS;
   1880 
   1881 	DEBUGFUNC("ixgbe_start_hw_82599");
   1882 
   1883 	ret_val = ixgbe_start_hw_generic(hw);
   1884 	if (ret_val != IXGBE_SUCCESS)
   1885 		goto out;
   1886 
   1887 	ret_val = ixgbe_start_hw_gen2(hw);
   1888 	if (ret_val != IXGBE_SUCCESS)
   1889 		goto out;
   1890 
   1891 	/* We need to run link autotry after the driver loads */
   1892 	hw->mac.autotry_restart = TRUE;
   1893 
   1894 	if (ret_val == IXGBE_SUCCESS)
   1895 		ret_val = ixgbe_verify_fw_version_82599(hw);
   1896 out:
   1897 	return ret_val;
   1898 }
   1899 
   1900 /**
   1901  *  ixgbe_identify_phy_82599 - Get physical layer module
   1902  *  @hw: pointer to hardware structure
   1903  *
   1904  *  Determines the physical layer module found on the current adapter.
   1905  *  If PHY already detected, maintains current PHY type in hw struct,
   1906  *  otherwise executes the PHY detection routine.
   1907  **/
   1908 s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
   1909 {
   1910 	s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
   1911 
   1912 	DEBUGFUNC("ixgbe_identify_phy_82599");
   1913 
   1914 	/* Detect PHY if not unknown - returns success if already detected. */
   1915 	status = ixgbe_identify_phy_generic(hw);
   1916 	if (status != IXGBE_SUCCESS) {
   1917 		/* 82599 10GBASE-T requires an external PHY */
   1918 		if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)
   1919 			goto out;
   1920 		else
   1921 			status = ixgbe_identify_module_generic(hw);
   1922 	}
   1923 
   1924 	/* Set PHY type none if no PHY detected */
   1925 	if (hw->phy.type == ixgbe_phy_unknown) {
   1926 		hw->phy.type = ixgbe_phy_none;
   1927 		status = IXGBE_SUCCESS;
   1928 	}
   1929 
   1930 	/* Return error if SFP module has been detected but is not supported */
   1931 	if (hw->phy.type == ixgbe_phy_sfp_unsupported)
   1932 		status = IXGBE_ERR_SFP_NOT_SUPPORTED;
   1933 
   1934 out:
   1935 	return status;
   1936 }
   1937 
   1938 /**
   1939  *  ixgbe_get_supported_physical_layer_82599 - Returns physical layer type
   1940  *  @hw: pointer to hardware structure
   1941  *
   1942  *  Determines physical layer capabilities of the current configuration.
   1943  **/
   1944 u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw)
   1945 {
   1946 	u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
   1947 	u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
   1948 	u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
   1949 	u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
   1950 	u32 pma_pmd_10g_parallel = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK;
   1951 	u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
   1952 	u16 ext_ability = 0;
   1953 	u8 comp_codes_10g = 0;
   1954 	u8 comp_codes_1g = 0;
   1955 
   1956 	DEBUGFUNC("ixgbe_get_support_physical_layer_82599");
   1957 
   1958 	hw->phy.ops.identify(hw);
   1959 
   1960 	switch (hw->phy.type) {
   1961 	case ixgbe_phy_tn:
   1962 	case ixgbe_phy_cu_unknown:
   1963 		hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
   1964 		IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
   1965 		if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
   1966 			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
   1967 		if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
   1968 			physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
   1969 		if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
   1970 			physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
   1971 		goto out;
   1972 	default:
   1973 		break;
   1974 	}
   1975 
   1976 	switch (autoc & IXGBE_AUTOC_LMS_MASK) {
   1977 	case IXGBE_AUTOC_LMS_1G_AN:
   1978 	case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
   1979 		if (pma_pmd_1g == IXGBE_AUTOC_1G_KX_BX) {
   1980 			physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX |
   1981 			    IXGBE_PHYSICAL_LAYER_1000BASE_BX;
   1982 			goto out;
   1983 		} else
   1984 			/* SFI mode so read SFP module */
   1985 			goto sfp_check;
   1986 		break;
   1987 	case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
   1988 		if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_CX4)
   1989 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
   1990 		else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_KX4)
   1991 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
   1992 		else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_XAUI)
   1993 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_XAUI;
   1994 		goto out;
   1995 		break;
   1996 	case IXGBE_AUTOC_LMS_10G_SERIAL:
   1997 		if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_KR) {
   1998 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR;
   1999 			goto out;
   2000 		} else if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)
   2001 			goto sfp_check;
   2002 		break;
   2003 	case IXGBE_AUTOC_LMS_KX4_KX_KR:
   2004 	case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN:
   2005 		if (autoc & IXGBE_AUTOC_KX_SUPP)
   2006 			physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
   2007 		if (autoc & IXGBE_AUTOC_KX4_SUPP)
   2008 			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
   2009 		if (autoc & IXGBE_AUTOC_KR_SUPP)
   2010 			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KR;
   2011 		goto out;
   2012 		break;
   2013 	default:
   2014 		goto out;
   2015 		break;
   2016 	}
   2017 
   2018 sfp_check:
   2019 	/* SFP check must be done last since DA modules are sometimes used to
   2020 	 * test KR mode -  we need to id KR mode correctly before SFP module.
   2021 	 * Call identify_sfp because the pluggable module may have changed */
   2022 	hw->phy.ops.identify_sfp(hw);
   2023 	if (hw->phy.sfp_type == ixgbe_sfp_type_not_present)
   2024 		goto out;
   2025 
   2026 	switch (hw->phy.type) {
   2027 	case ixgbe_phy_sfp_passive_tyco:
   2028 	case ixgbe_phy_sfp_passive_unknown:
   2029 		physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
   2030 		break;
   2031 	case ixgbe_phy_sfp_ftl_active:
   2032 	case ixgbe_phy_sfp_active_unknown:
   2033 		physical_layer = IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA;
   2034 		break;
   2035 	case ixgbe_phy_sfp_avago:
   2036 	case ixgbe_phy_sfp_ftl:
   2037 	case ixgbe_phy_sfp_intel:
   2038 	case ixgbe_phy_sfp_unknown:
   2039 		hw->phy.ops.read_i2c_eeprom(hw,
   2040 		      IXGBE_SFF_1GBE_COMP_CODES, &comp_codes_1g);
   2041 		hw->phy.ops.read_i2c_eeprom(hw,
   2042 		      IXGBE_SFF_10GBE_COMP_CODES, &comp_codes_10g);
   2043 		if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
   2044 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
   2045 		else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)
   2046 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
   2047 		else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE)
   2048 			physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_T;
   2049 		else if (comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE)
   2050 			physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_SX;
   2051 		break;
   2052 	default:
   2053 		break;
   2054 	}
   2055 
   2056 out:
   2057 	return physical_layer;
   2058 }
   2059 
   2060 /**
   2061  *  ixgbe_enable_rx_dma_82599 - Enable the Rx DMA unit on 82599
   2062  *  @hw: pointer to hardware structure
   2063  *  @regval: register value to write to RXCTRL
   2064  *
   2065  *  Enables the Rx DMA unit for 82599
   2066  **/
   2067 s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval)
   2068 {
   2069 
   2070 	DEBUGFUNC("ixgbe_enable_rx_dma_82599");
   2071 
   2072 	/*
   2073 	 * Workaround for 82599 silicon errata when enabling the Rx datapath.
   2074 	 * If traffic is incoming before we enable the Rx unit, it could hang
   2075 	 * the Rx DMA unit.  Therefore, make sure the security engine is
   2076 	 * completely disabled prior to enabling the Rx unit.
   2077 	 */
   2078 
   2079 	hw->mac.ops.disable_sec_rx_path(hw);
   2080 
   2081 	IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval);
   2082 
   2083 	hw->mac.ops.enable_sec_rx_path(hw);
   2084 
   2085 	return IXGBE_SUCCESS;
   2086 }
   2087 
   2088 /**
   2089  *  ixgbe_verify_fw_version_82599 - verify fw version for 82599
   2090  *  @hw: pointer to hardware structure
   2091  *
   2092  *  Verifies that installed the firmware version is 0.6 or higher
   2093  *  for SFI devices. All 82599 SFI devices should have version 0.6 or higher.
   2094  *
   2095  *  Returns IXGBE_ERR_EEPROM_VERSION if the FW is not present or
   2096  *  if the FW version is not supported.
   2097  **/
   2098 static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
   2099 {
   2100 	s32 status = IXGBE_ERR_EEPROM_VERSION;
   2101 	u16 fw_offset, fw_ptp_cfg_offset;
   2102 	u16 fw_version = 0;
   2103 
   2104 	DEBUGFUNC("ixgbe_verify_fw_version_82599");
   2105 
   2106 	/* firmware check is only necessary for SFI devices */
   2107 	if (hw->phy.media_type != ixgbe_media_type_fiber) {
   2108 		status = IXGBE_SUCCESS;
   2109 		goto fw_version_out;
   2110 	}
   2111 
   2112 	/* get the offset to the Firmware Module block */
   2113 	hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset);
   2114 
   2115 	if ((fw_offset == 0) || (fw_offset == 0xFFFF))
   2116 		goto fw_version_out;
   2117 
   2118 	/* get the offset to the Pass Through Patch Configuration block */
   2119 	hw->eeprom.ops.read(hw, (fw_offset +
   2120 				 IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR),
   2121 				 &fw_ptp_cfg_offset);
   2122 
   2123 	if ((fw_ptp_cfg_offset == 0) || (fw_ptp_cfg_offset == 0xFFFF))
   2124 		goto fw_version_out;
   2125 
   2126 	/* get the firmware version */
   2127 	hw->eeprom.ops.read(hw, (fw_ptp_cfg_offset +
   2128 			    IXGBE_FW_PATCH_VERSION_4), &fw_version);
   2129 
   2130 	if (fw_version > 0x5)
   2131 		status = IXGBE_SUCCESS;
   2132 
   2133 fw_version_out:
   2134 	return status;
   2135 }
   2136 
   2137 /**
   2138  *  ixgbe_verify_lesm_fw_enabled_82599 - Checks LESM FW module state.
   2139  *  @hw: pointer to hardware structure
   2140  *
   2141  *  Returns TRUE if the LESM FW module is present and enabled. Otherwise
   2142  *  returns FALSE. Smart Speed must be disabled if LESM FW module is enabled.
   2143  **/
   2144 bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw)
   2145 {
   2146 	bool lesm_enabled = FALSE;
   2147 	u16 fw_offset, fw_lesm_param_offset, fw_lesm_state;
   2148 	s32 status;
   2149 
   2150 	DEBUGFUNC("ixgbe_verify_lesm_fw_enabled_82599");
   2151 
   2152 	/* get the offset to the Firmware Module block */
   2153 	status = hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset);
   2154 
   2155 	if ((status != IXGBE_SUCCESS) ||
   2156 	    (fw_offset == 0) || (fw_offset == 0xFFFF))
   2157 		goto out;
   2158 
   2159 	/* get the offset to the LESM Parameters block */
   2160 	status = hw->eeprom.ops.read(hw, (fw_offset +
   2161 				     IXGBE_FW_LESM_PARAMETERS_PTR),
   2162 				     &fw_lesm_param_offset);
   2163 
   2164 	if ((status != IXGBE_SUCCESS) ||
   2165 	    (fw_lesm_param_offset == 0) || (fw_lesm_param_offset == 0xFFFF))
   2166 		goto out;
   2167 
   2168 	/* get the lesm state word */
   2169 	status = hw->eeprom.ops.read(hw, (fw_lesm_param_offset +
   2170 				     IXGBE_FW_LESM_STATE_1),
   2171 				     &fw_lesm_state);
   2172 
   2173 	if ((status == IXGBE_SUCCESS) &&
   2174 	    (fw_lesm_state & IXGBE_FW_LESM_STATE_ENABLED))
   2175 		lesm_enabled = TRUE;
   2176 
   2177 out:
   2178 	return lesm_enabled;
   2179 }
   2180 
   2181 /**
   2182  *  ixgbe_read_eeprom_buffer_82599 - Read EEPROM word(s) using
   2183  *  fastest available method
   2184  *
   2185  *  @hw: pointer to hardware structure
   2186  *  @offset: offset of  word in EEPROM to read
   2187  *  @words: number of words
   2188  *  @data: word(s) read from the EEPROM
   2189  *
   2190  *  Retrieves 16 bit word(s) read from EEPROM
   2191  **/
   2192 static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
   2193 					  u16 words, u16 *data)
   2194 {
   2195 	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
   2196 	s32 ret_val = IXGBE_ERR_CONFIG;
   2197 
   2198 	DEBUGFUNC("ixgbe_read_eeprom_buffer_82599");
   2199 
   2200 	/*
   2201 	 * If EEPROM is detected and can be addressed using 14 bits,
   2202 	 * use EERD otherwise use bit bang
   2203 	 */
   2204 	if ((eeprom->type == ixgbe_eeprom_spi) &&
   2205 	    (offset + (words - 1) <= IXGBE_EERD_MAX_ADDR))
   2206 		ret_val = ixgbe_read_eerd_buffer_generic(hw, offset, words,
   2207 							 data);
   2208 	else
   2209 		ret_val = ixgbe_read_eeprom_buffer_bit_bang_generic(hw, offset,
   2210 								    words,
   2211 								    data);
   2212 
   2213 	return ret_val;
   2214 }
   2215 
   2216 /**
   2217  *  ixgbe_read_eeprom_82599 - Read EEPROM word using
   2218  *  fastest available method
   2219  *
   2220  *  @hw: pointer to hardware structure
   2221  *  @offset: offset of  word in the EEPROM to read
   2222  *  @data: word read from the EEPROM
   2223  *
   2224  *  Reads a 16 bit word from the EEPROM
   2225  **/
   2226 static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
   2227 				   u16 offset, u16 *data)
   2228 {
   2229 	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
   2230 	s32 ret_val = IXGBE_ERR_CONFIG;
   2231 
   2232 	DEBUGFUNC("ixgbe_read_eeprom_82599");
   2233 
   2234 	/*
   2235 	 * If EEPROM is detected and can be addressed using 14 bits,
   2236 	 * use EERD otherwise use bit bang
   2237 	 */
   2238 	if ((eeprom->type == ixgbe_eeprom_spi) &&
   2239 	    (offset <= IXGBE_EERD_MAX_ADDR))
   2240 		ret_val = ixgbe_read_eerd_generic(hw, offset, data);
   2241 	else
   2242 		ret_val = ixgbe_read_eeprom_bit_bang_generic(hw, offset, data);
   2243 
   2244 	return ret_val;
   2245 }
   2246 
   2247 
   2248