Home | History | Annotate | Line # | Download | only in ixgbe
ixgbe_82598.c revision 1.3
      1 /******************************************************************************
      2 
      3   Copyright (c) 2001-2012, Intel Corporation
      4   All rights reserved.
      5 
      6   Redistribution and use in source and binary forms, with or without
      7   modification, are permitted provided that the following conditions are met:
      8 
      9    1. Redistributions of source code must retain the above copyright notice,
     10       this list of conditions and the following disclaimer.
     11 
     12    2. Redistributions in binary form must reproduce the above copyright
     13       notice, this list of conditions and the following disclaimer in the
     14       documentation and/or other materials provided with the distribution.
     15 
     16    3. Neither the name of the Intel Corporation nor the names of its
     17       contributors may be used to endorse or promote products derived from
     18       this software without specific prior written permission.
     19 
     20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     30   POSSIBILITY OF SUCH DAMAGE.
     31 
     32 ******************************************************************************/
     33 /*$FreeBSD: head/sys/dev/ixgbe/ixgbe_82598.c 238149 2012-07-05 20:51:44Z jfv $*/
     34 /*$NetBSD: ixgbe_82598.c,v 1.3 2015/04/02 09:26:55 msaitoh Exp $*/
     35 
     36 #include "ixgbe_type.h"
     37 #include "ixgbe_82598.h"
     38 #include "ixgbe_api.h"
     39 #include "ixgbe_common.h"
     40 #include "ixgbe_phy.h"
     41 
     42 static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
     43 					     ixgbe_link_speed *speed,
     44 					     bool *autoneg);
     45 static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw);
     46 static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
     47 				      bool autoneg_wait_to_complete);
     48 static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
     49 				      ixgbe_link_speed *speed, bool *link_up,
     50 				      bool link_up_wait_to_complete);
     51 static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
     52 				      ixgbe_link_speed speed,
     53 				      bool autoneg,
     54 				      bool autoneg_wait_to_complete);
     55 static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
     56 					 ixgbe_link_speed speed,
     57 					 bool autoneg,
     58 					 bool autoneg_wait_to_complete);
     59 static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw);
     60 static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
     61 static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw);
     62 static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb,
     63 				  u32 headroom, int strategy);
     64 
     65 /**
     66  *  ixgbe_set_pcie_completion_timeout - set pci-e completion timeout
     67  *  @hw: pointer to the HW structure
     68  *
     69  *  The defaults for 82598 should be in the range of 50us to 50ms,
     70  *  however the hardware default for these parts is 500us to 1ms which is less
     71  *  than the 10ms recommended by the pci-e spec.  To address this we need to
     72  *  increase the value to either 10ms to 250ms for capability version 1 config,
     73  *  or 16ms to 55ms for version 2.
     74  **/
     75 void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw)
     76 {
     77 	u32 gcr = IXGBE_READ_REG(hw, IXGBE_GCR);
     78 	u16 pcie_devctl2;
     79 
     80 	/* only take action if timeout value is defaulted to 0 */
     81 	if (gcr & IXGBE_GCR_CMPL_TMOUT_MASK)
     82 		goto out;
     83 
     84 	/*
     85 	 * if capababilities version is type 1 we can write the
     86 	 * timeout of 10ms to 250ms through the GCR register
     87 	 */
     88 	if (!(gcr & IXGBE_GCR_CAP_VER2)) {
     89 		gcr |= IXGBE_GCR_CMPL_TMOUT_10ms;
     90 		goto out;
     91 	}
     92 
     93 	/*
     94 	 * for version 2 capabilities we need to write the config space
     95 	 * directly in order to set the completion timeout value for
     96 	 * 16ms to 55ms
     97 	 */
     98 	pcie_devctl2 = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2);
     99 	pcie_devctl2 |= IXGBE_PCI_DEVICE_CONTROL2_16ms;
    100 	IXGBE_WRITE_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2, pcie_devctl2);
    101 out:
    102 	/* disable completion timeout resend */
    103 	gcr &= ~IXGBE_GCR_CMPL_TMOUT_RESEND;
    104 	IXGBE_WRITE_REG(hw, IXGBE_GCR, gcr);
    105 }
    106 
    107 /**
    108  *  ixgbe_init_ops_82598 - Inits func ptrs and MAC type
    109  *  @hw: pointer to hardware structure
    110  *
    111  *  Initialize the function pointers and assign the MAC type for 82598.
    112  *  Does not touch the hardware.
    113  **/
    114 s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw)
    115 {
    116 	struct ixgbe_mac_info *mac = &hw->mac;
    117 	struct ixgbe_phy_info *phy = &hw->phy;
    118 	s32 ret_val;
    119 
    120 	DEBUGFUNC("ixgbe_init_ops_82598");
    121 
    122 	ret_val = ixgbe_init_phy_ops_generic(hw);
    123 	ret_val = ixgbe_init_ops_generic(hw);
    124 
    125 	/* PHY */
    126 	phy->ops.init = &ixgbe_init_phy_ops_82598;
    127 
    128 	/* MAC */
    129 	mac->ops.start_hw = &ixgbe_start_hw_82598;
    130 	mac->ops.enable_relaxed_ordering = &ixgbe_enable_relaxed_ordering_82598;
    131 	mac->ops.reset_hw = &ixgbe_reset_hw_82598;
    132 	mac->ops.get_media_type = &ixgbe_get_media_type_82598;
    133 	mac->ops.get_supported_physical_layer =
    134 				&ixgbe_get_supported_physical_layer_82598;
    135 	mac->ops.read_analog_reg8 = &ixgbe_read_analog_reg8_82598;
    136 	mac->ops.write_analog_reg8 = &ixgbe_write_analog_reg8_82598;
    137 	mac->ops.set_lan_id = &ixgbe_set_lan_id_multi_port_pcie_82598;
    138 
    139 	/* RAR, Multicast, VLAN */
    140 	mac->ops.set_vmdq = &ixgbe_set_vmdq_82598;
    141 	mac->ops.clear_vmdq = &ixgbe_clear_vmdq_82598;
    142 	mac->ops.set_vfta = &ixgbe_set_vfta_82598;
    143 	mac->ops.set_vlvf = NULL;
    144 	mac->ops.clear_vfta = &ixgbe_clear_vfta_82598;
    145 
    146 	/* Flow Control */
    147 	mac->ops.fc_enable = &ixgbe_fc_enable_82598;
    148 
    149 	mac->mcft_size		= 128;
    150 	mac->vft_size		= 128;
    151 	mac->num_rar_entries	= 16;
    152 	mac->rx_pb_size		= 512;
    153 	mac->max_tx_queues	= 32;
    154 	mac->max_rx_queues	= 64;
    155 	mac->max_msix_vectors	= ixgbe_get_pcie_msix_count_generic(hw);
    156 
    157 	/* SFP+ Module */
    158 	phy->ops.read_i2c_eeprom = &ixgbe_read_i2c_eeprom_82598;
    159 
    160 	/* Link */
    161 	mac->ops.check_link = &ixgbe_check_mac_link_82598;
    162 	mac->ops.setup_link = &ixgbe_setup_mac_link_82598;
    163 	mac->ops.flap_tx_laser = NULL;
    164 	mac->ops.get_link_capabilities = &ixgbe_get_link_capabilities_82598;
    165 	mac->ops.setup_rxpba = &ixgbe_set_rxpba_82598;
    166 
    167 	/* Manageability interface */
    168 	mac->ops.set_fw_drv_ver = NULL;
    169 
    170 	return ret_val;
    171 }
    172 
    173 /**
    174  *  ixgbe_init_phy_ops_82598 - PHY/SFP specific init
    175  *  @hw: pointer to hardware structure
    176  *
    177  *  Initialize any function pointers that were not able to be
    178  *  set during init_shared_code because the PHY/SFP type was
    179  *  not known.  Perform the SFP init if necessary.
    180  *
    181  **/
    182 s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
    183 {
    184 	struct ixgbe_mac_info *mac = &hw->mac;
    185 	struct ixgbe_phy_info *phy = &hw->phy;
    186 	s32 ret_val = IXGBE_SUCCESS;
    187 	u16 list_offset, data_offset;
    188 
    189 	DEBUGFUNC("ixgbe_init_phy_ops_82598");
    190 
    191 	/* Identify the PHY */
    192 	phy->ops.identify(hw);
    193 
    194 	/* Overwrite the link function pointers if copper PHY */
    195 	if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
    196 		mac->ops.setup_link = &ixgbe_setup_copper_link_82598;
    197 		mac->ops.get_link_capabilities =
    198 				&ixgbe_get_copper_link_capabilities_generic;
    199 	}
    200 
    201 	switch (hw->phy.type) {
    202 	case ixgbe_phy_tn:
    203 		phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
    204 		phy->ops.check_link = &ixgbe_check_phy_link_tnx;
    205 		phy->ops.get_firmware_version =
    206 					&ixgbe_get_phy_firmware_version_tnx;
    207 		break;
    208 	case ixgbe_phy_nl:
    209 		phy->ops.reset = &ixgbe_reset_phy_nl;
    210 
    211 		/* Call SFP+ identify routine to get the SFP+ module type */
    212 		ret_val = phy->ops.identify_sfp(hw);
    213 		if (ret_val != IXGBE_SUCCESS)
    214 			goto out;
    215 		else if (hw->phy.sfp_type == ixgbe_sfp_type_unknown) {
    216 			ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
    217 			goto out;
    218 		}
    219 
    220 		/* Check to see if SFP+ module is supported */
    221 		ret_val = ixgbe_get_sfp_init_sequence_offsets(hw,
    222 							      &list_offset,
    223 							      &data_offset);
    224 		if (ret_val != IXGBE_SUCCESS) {
    225 			ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
    226 			goto out;
    227 		}
    228 		break;
    229 	default:
    230 		break;
    231 	}
    232 
    233 out:
    234 	return ret_val;
    235 }
    236 
    237 /**
    238  *  ixgbe_start_hw_82598 - Prepare hardware for Tx/Rx
    239  *  @hw: pointer to hardware structure
    240  *
    241  *  Starts the hardware using the generic start_hw function.
    242  *  Disables relaxed ordering Then set pcie completion timeout
    243  *
    244  **/
    245 s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
    246 {
    247 	u32 regval;
    248 	u32 i;
    249 	s32 ret_val = IXGBE_SUCCESS;
    250 
    251 	DEBUGFUNC("ixgbe_start_hw_82598");
    252 
    253 	ret_val = ixgbe_start_hw_generic(hw);
    254 
    255 	/* Disable relaxed ordering */
    256 	for (i = 0; ((i < hw->mac.max_tx_queues) &&
    257 	     (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
    258 		regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
    259 		regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
    260 		IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval);
    261 	}
    262 
    263 	for (i = 0; ((i < hw->mac.max_rx_queues) &&
    264 	     (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
    265 		regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
    266 		regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
    267 			    IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
    268 		IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
    269 	}
    270 
    271 	/* set the completion timeout for interface */
    272 	if (ret_val == IXGBE_SUCCESS)
    273 		ixgbe_set_pcie_completion_timeout(hw);
    274 
    275 	return ret_val;
    276 }
    277 
    278 /**
    279  *  ixgbe_get_link_capabilities_82598 - Determines link capabilities
    280  *  @hw: pointer to hardware structure
    281  *  @speed: pointer to link speed
    282  *  @autoneg: boolean auto-negotiation value
    283  *
    284  *  Determines the link capabilities by reading the AUTOC register.
    285  **/
    286 static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
    287 					     ixgbe_link_speed *speed,
    288 					     bool *autoneg)
    289 {
    290 	s32 status = IXGBE_SUCCESS;
    291 	u32 autoc = 0;
    292 
    293 	DEBUGFUNC("ixgbe_get_link_capabilities_82598");
    294 
    295 	/*
    296 	 * Determine link capabilities based on the stored value of AUTOC,
    297 	 * which represents EEPROM defaults.  If AUTOC value has not been
    298 	 * stored, use the current register value.
    299 	 */
    300 	if (hw->mac.orig_link_settings_stored)
    301 		autoc = hw->mac.orig_autoc;
    302 	else
    303 		autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
    304 
    305 	switch (autoc & IXGBE_AUTOC_LMS_MASK) {
    306 	case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
    307 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
    308 		*autoneg = FALSE;
    309 		break;
    310 
    311 	case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
    312 		*speed = IXGBE_LINK_SPEED_10GB_FULL;
    313 		*autoneg = FALSE;
    314 		break;
    315 
    316 	case IXGBE_AUTOC_LMS_1G_AN:
    317 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
    318 		*autoneg = TRUE;
    319 		break;
    320 
    321 	case IXGBE_AUTOC_LMS_KX4_AN:
    322 	case IXGBE_AUTOC_LMS_KX4_AN_1G_AN:
    323 		*speed = IXGBE_LINK_SPEED_UNKNOWN;
    324 		if (autoc & IXGBE_AUTOC_KX4_SUPP)
    325 			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
    326 		if (autoc & IXGBE_AUTOC_KX_SUPP)
    327 			*speed |= IXGBE_LINK_SPEED_1GB_FULL;
    328 		*autoneg = TRUE;
    329 		break;
    330 
    331 	default:
    332 		status = IXGBE_ERR_LINK_SETUP;
    333 		break;
    334 	}
    335 
    336 	return status;
    337 }
    338 
    339 /**
    340  *  ixgbe_get_media_type_82598 - Determines media type
    341  *  @hw: pointer to hardware structure
    342  *
    343  *  Returns the media type (fiber, copper, backplane)
    344  **/
    345 static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw)
    346 {
    347 	enum ixgbe_media_type media_type;
    348 
    349 	DEBUGFUNC("ixgbe_get_media_type_82598");
    350 
    351 	/* Detect if there is a copper PHY attached. */
    352 	switch (hw->phy.type) {
    353 	case ixgbe_phy_cu_unknown:
    354 	case ixgbe_phy_tn:
    355 		media_type = ixgbe_media_type_copper;
    356 		goto out;
    357 	default:
    358 		break;
    359 	}
    360 
    361 	/* Media type for I82598 is based on device ID */
    362 	switch (hw->device_id) {
    363 	case IXGBE_DEV_ID_82598:
    364 	case IXGBE_DEV_ID_82598_BX:
    365 		/* Default device ID is mezzanine card KX/KX4 */
    366 		media_type = ixgbe_media_type_backplane;
    367 		break;
    368 	case IXGBE_DEV_ID_82598AF_DUAL_PORT:
    369 	case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
    370 	case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
    371 	case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
    372 	case IXGBE_DEV_ID_82598EB_XF_LR:
    373 	case IXGBE_DEV_ID_82598EB_SFP_LOM:
    374 		media_type = ixgbe_media_type_fiber;
    375 		break;
    376 	case IXGBE_DEV_ID_82598EB_CX4:
    377 	case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
    378 		media_type = ixgbe_media_type_cx4;
    379 		break;
    380 	case IXGBE_DEV_ID_82598AT:
    381 	case IXGBE_DEV_ID_82598AT2:
    382 		media_type = ixgbe_media_type_copper;
    383 		break;
    384 	default:
    385 		media_type = ixgbe_media_type_unknown;
    386 		break;
    387 	}
    388 out:
    389 	return media_type;
    390 }
    391 
    392 /**
    393  *  ixgbe_fc_enable_82598 - Enable flow control
    394  *  @hw: pointer to hardware structure
    395  *
    396  *  Enable flow control according to the current settings.
    397  **/
    398 s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
    399 {
    400 	s32 ret_val = IXGBE_SUCCESS;
    401 	u32 fctrl_reg;
    402 	u32 rmcs_reg;
    403 	u32 reg;
    404 	u32 fcrtl, fcrth;
    405 	u32 link_speed = 0;
    406 	int i;
    407 	bool link_up;
    408 
    409 	DEBUGFUNC("ixgbe_fc_enable_82598");
    410 
    411 	/* Validate the water mark configuration */
    412 	if (!hw->fc.pause_time) {
    413 		ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
    414 		goto out;
    415 	}
    416 
    417 	/* Low water mark of zero causes XOFF floods */
    418 	for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
    419 		if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
    420 		    hw->fc.high_water[i]) {
    421 			if (!hw->fc.low_water[i] ||
    422 			    hw->fc.low_water[i] >= hw->fc.high_water[i]) {
    423 				DEBUGOUT("Invalid water mark configuration\n");
    424 				ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
    425 				goto out;
    426 			}
    427 		}
    428 	}
    429 
    430 	/*
    431 	 * On 82598 having Rx FC on causes resets while doing 1G
    432 	 * so if it's on turn it off once we know link_speed. For
    433 	 * more details see 82598 Specification update.
    434 	 */
    435 	hw->mac.ops.check_link(hw, &link_speed, &link_up, FALSE);
    436 	if (link_up && link_speed == IXGBE_LINK_SPEED_1GB_FULL) {
    437 		switch (hw->fc.requested_mode) {
    438 		case ixgbe_fc_full:
    439 			hw->fc.requested_mode = ixgbe_fc_tx_pause;
    440 			break;
    441 		case ixgbe_fc_rx_pause:
    442 			hw->fc.requested_mode = ixgbe_fc_none;
    443 			break;
    444 		default:
    445 			/* no change */
    446 			break;
    447 		}
    448 	}
    449 
    450 	/* Negotiate the fc mode to use */
    451 	ixgbe_fc_autoneg(hw);
    452 
    453 	/* Disable any previous flow control settings */
    454 	fctrl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
    455 	fctrl_reg &= ~(IXGBE_FCTRL_RFCE | IXGBE_FCTRL_RPFCE);
    456 
    457 	rmcs_reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
    458 	rmcs_reg &= ~(IXGBE_RMCS_TFCE_PRIORITY | IXGBE_RMCS_TFCE_802_3X);
    459 
    460 	/*
    461 	 * The possible values of fc.current_mode are:
    462 	 * 0: Flow control is completely disabled
    463 	 * 1: Rx flow control is enabled (we can receive pause frames,
    464 	 *    but not send pause frames).
    465 	 * 2: Tx flow control is enabled (we can send pause frames but
    466 	 *     we do not support receiving pause frames).
    467 	 * 3: Both Rx and Tx flow control (symmetric) are enabled.
    468 	 * other: Invalid.
    469 	 */
    470 	switch (hw->fc.current_mode) {
    471 	case ixgbe_fc_none:
    472 		/*
    473 		 * Flow control is disabled by software override or autoneg.
    474 		 * The code below will actually disable it in the HW.
    475 		 */
    476 		break;
    477 	case ixgbe_fc_rx_pause:
    478 		/*
    479 		 * Rx Flow control is enabled and Tx Flow control is
    480 		 * disabled by software override. Since there really
    481 		 * isn't a way to advertise that we are capable of RX
    482 		 * Pause ONLY, we will advertise that we support both
    483 		 * symmetric and asymmetric Rx PAUSE.  Later, we will
    484 		 * disable the adapter's ability to send PAUSE frames.
    485 		 */
    486 		fctrl_reg |= IXGBE_FCTRL_RFCE;
    487 		break;
    488 	case ixgbe_fc_tx_pause:
    489 		/*
    490 		 * Tx Flow control is enabled, and Rx Flow control is
    491 		 * disabled by software override.
    492 		 */
    493 		rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
    494 		break;
    495 	case ixgbe_fc_full:
    496 		/* Flow control (both Rx and Tx) is enabled by SW override. */
    497 		fctrl_reg |= IXGBE_FCTRL_RFCE;
    498 		rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
    499 		break;
    500 	default:
    501 		DEBUGOUT("Flow control param set incorrectly\n");
    502 		ret_val = IXGBE_ERR_CONFIG;
    503 		goto out;
    504 		break;
    505 	}
    506 
    507 	/* Set 802.3x based flow control settings. */
    508 	fctrl_reg |= IXGBE_FCTRL_DPF;
    509 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl_reg);
    510 	IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg);
    511 
    512 	/* Set up and enable Rx high/low water mark thresholds, enable XON. */
    513 	for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
    514 		if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
    515 		    hw->fc.high_water[i]) {
    516 			fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
    517 			fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
    518 			IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl);
    519 			IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), fcrth);
    520 		} else {
    521 			IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), 0);
    522 			IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), 0);
    523 		}
    524 
    525 	}
    526 
    527 	/* Configure pause time (2 TCs per register) */
    528 	reg = hw->fc.pause_time * 0x00010001;
    529 	for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
    530 		IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
    531 
    532 	/* Configure flow control refresh threshold value */
    533 	IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
    534 
    535 out:
    536 	return ret_val;
    537 }
    538 
    539 /**
    540  *  ixgbe_start_mac_link_82598 - Configures MAC link settings
    541  *  @hw: pointer to hardware structure
    542  *
    543  *  Configures link settings based on values in the ixgbe_hw struct.
    544  *  Restarts the link.  Performs autonegotiation if needed.
    545  **/
    546 static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
    547 				      bool autoneg_wait_to_complete)
    548 {
    549 	u32 autoc_reg;
    550 	u32 links_reg;
    551 	u32 i;
    552 	s32 status = IXGBE_SUCCESS;
    553 
    554 	DEBUGFUNC("ixgbe_start_mac_link_82598");
    555 
    556 	/* Restart link */
    557 	autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
    558 	autoc_reg |= IXGBE_AUTOC_AN_RESTART;
    559 	IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
    560 
    561 	/* Only poll for autoneg to complete if specified to do so */
    562 	if (autoneg_wait_to_complete) {
    563 		if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
    564 		     IXGBE_AUTOC_LMS_KX4_AN ||
    565 		    (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
    566 		     IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
    567 			links_reg = 0; /* Just in case Autoneg time = 0 */
    568 			for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
    569 				links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
    570 				if (links_reg & IXGBE_LINKS_KX_AN_COMP)
    571 					break;
    572 				msec_delay(100);
    573 			}
    574 			if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
    575 				status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
    576 				DEBUGOUT("Autonegotiation did not complete.\n");
    577 			}
    578 		}
    579 	}
    580 
    581 	/* Add delay to filter out noises during initial link setup */
    582 	msec_delay(50);
    583 
    584 	return status;
    585 }
    586 
    587 /**
    588  *  ixgbe_validate_link_ready - Function looks for phy link
    589  *  @hw: pointer to hardware structure
    590  *
    591  *  Function indicates success when phy link is available. If phy is not ready
    592  *  within 5 seconds of MAC indicating link, the function returns error.
    593  **/
    594 static s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw)
    595 {
    596 	u32 timeout;
    597 	u16 an_reg;
    598 
    599 	if (hw->device_id != IXGBE_DEV_ID_82598AT2)
    600 		return IXGBE_SUCCESS;
    601 
    602 	for (timeout = 0;
    603 	     timeout < IXGBE_VALIDATE_LINK_READY_TIMEOUT; timeout++) {
    604 		hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
    605 				     IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &an_reg);
    606 
    607 		if ((an_reg & IXGBE_MII_AUTONEG_COMPLETE) &&
    608 		    (an_reg & IXGBE_MII_AUTONEG_LINK_UP))
    609 			break;
    610 
    611 		msec_delay(100);
    612 	}
    613 
    614 	if (timeout == IXGBE_VALIDATE_LINK_READY_TIMEOUT) {
    615 		DEBUGOUT("Link was indicated but link is down\n");
    616 		return IXGBE_ERR_LINK_SETUP;
    617 	}
    618 
    619 	return IXGBE_SUCCESS;
    620 }
    621 
    622 /**
    623  *  ixgbe_check_mac_link_82598 - Get link/speed status
    624  *  @hw: pointer to hardware structure
    625  *  @speed: pointer to link speed
    626  *  @link_up: TRUE is link is up, FALSE otherwise
    627  *  @link_up_wait_to_complete: bool used to wait for link up or not
    628  *
    629  *  Reads the links register to determine if link is up and the current speed
    630  **/
    631 static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
    632 				      ixgbe_link_speed *speed, bool *link_up,
    633 				      bool link_up_wait_to_complete)
    634 {
    635 	u32 links_reg;
    636 	u32 i;
    637 	u16 link_reg, adapt_comp_reg;
    638 
    639 	DEBUGFUNC("ixgbe_check_mac_link_82598");
    640 
    641 	/*
    642 	 * SERDES PHY requires us to read link status from undocumented
    643 	 * register 0xC79F.  Bit 0 set indicates link is up/ready; clear
    644 	 * indicates link down.  OxC00C is read to check that the XAUI lanes
    645 	 * are active.  Bit 0 clear indicates active; set indicates inactive.
    646 	 */
    647 	if (hw->phy.type == ixgbe_phy_nl) {
    648 		hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg);
    649 		hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg);
    650 		hw->phy.ops.read_reg(hw, 0xC00C, IXGBE_TWINAX_DEV,
    651 				     &adapt_comp_reg);
    652 		if (link_up_wait_to_complete) {
    653 			for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
    654 				if ((link_reg & 1) &&
    655 				    ((adapt_comp_reg & 1) == 0)) {
    656 					*link_up = TRUE;
    657 					break;
    658 				} else {
    659 					*link_up = FALSE;
    660 				}
    661 				msec_delay(100);
    662 				hw->phy.ops.read_reg(hw, 0xC79F,
    663 						     IXGBE_TWINAX_DEV,
    664 						     &link_reg);
    665 				hw->phy.ops.read_reg(hw, 0xC00C,
    666 						     IXGBE_TWINAX_DEV,
    667 						     &adapt_comp_reg);
    668 			}
    669 		} else {
    670 			if ((link_reg & 1) && ((adapt_comp_reg & 1) == 0))
    671 				*link_up = TRUE;
    672 			else
    673 				*link_up = FALSE;
    674 		}
    675 
    676 		if (*link_up == FALSE)
    677 			goto out;
    678 	}
    679 
    680 	links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
    681 	if (link_up_wait_to_complete) {
    682 		for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
    683 			if (links_reg & IXGBE_LINKS_UP) {
    684 				*link_up = TRUE;
    685 				break;
    686 			} else {
    687 				*link_up = FALSE;
    688 			}
    689 			msec_delay(100);
    690 			links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
    691 		}
    692 	} else {
    693 		if (links_reg & IXGBE_LINKS_UP)
    694 			*link_up = TRUE;
    695 		else
    696 			*link_up = FALSE;
    697 	}
    698 
    699 	if (links_reg & IXGBE_LINKS_SPEED)
    700 		*speed = IXGBE_LINK_SPEED_10GB_FULL;
    701 	else
    702 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
    703 
    704 	if ((hw->device_id == IXGBE_DEV_ID_82598AT2) && (*link_up == TRUE) &&
    705 	    (ixgbe_validate_link_ready(hw) != IXGBE_SUCCESS))
    706 		*link_up = FALSE;
    707 
    708 out:
    709 	return IXGBE_SUCCESS;
    710 }
    711 
    712 /**
    713  *  ixgbe_setup_mac_link_82598 - Set MAC link speed
    714  *  @hw: pointer to hardware structure
    715  *  @speed: new link speed
    716  *  @autoneg: TRUE if autonegotiation enabled
    717  *  @autoneg_wait_to_complete: TRUE when waiting for completion is needed
    718  *
    719  *  Set the link speed in the AUTOC register and restarts link.
    720  **/
    721 static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
    722 				      ixgbe_link_speed speed, bool autoneg,
    723 				      bool autoneg_wait_to_complete)
    724 {
    725 	s32 status = IXGBE_SUCCESS;
    726 	ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
    727 	u32 curr_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
    728 	u32 autoc = curr_autoc;
    729 	u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
    730 
    731 	DEBUGFUNC("ixgbe_setup_mac_link_82598");
    732 
    733 	/* Check to see if speed passed in is supported. */
    734 	ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg);
    735 	speed &= link_capabilities;
    736 
    737 	if (speed == IXGBE_LINK_SPEED_UNKNOWN)
    738 		status = IXGBE_ERR_LINK_SETUP;
    739 
    740 	/* Set KX4/KX support according to speed requested */
    741 	else if (link_mode == IXGBE_AUTOC_LMS_KX4_AN ||
    742 		 link_mode == IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
    743 		autoc &= ~IXGBE_AUTOC_KX4_KX_SUPP_MASK;
    744 		if (speed & IXGBE_LINK_SPEED_10GB_FULL)
    745 			autoc |= IXGBE_AUTOC_KX4_SUPP;
    746 		if (speed & IXGBE_LINK_SPEED_1GB_FULL)
    747 			autoc |= IXGBE_AUTOC_KX_SUPP;
    748 		if (autoc != curr_autoc)
    749 			IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
    750 	}
    751 
    752 	if (status == IXGBE_SUCCESS) {
    753 		/*
    754 		 * Setup and restart the link based on the new values in
    755 		 * ixgbe_hw This will write the AUTOC register based on the new
    756 		 * stored values
    757 		 */
    758 		status = ixgbe_start_mac_link_82598(hw,
    759 						    autoneg_wait_to_complete);
    760 	}
    761 
    762 	return status;
    763 }
    764 
    765 
    766 /**
    767  *  ixgbe_setup_copper_link_82598 - Set the PHY autoneg advertised field
    768  *  @hw: pointer to hardware structure
    769  *  @speed: new link speed
    770  *  @autoneg: TRUE if autonegotiation enabled
    771  *  @autoneg_wait_to_complete: TRUE if waiting is needed to complete
    772  *
    773  *  Sets the link speed in the AUTOC register in the MAC and restarts link.
    774  **/
    775 static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
    776 					 ixgbe_link_speed speed,
    777 					 bool autoneg,
    778 					 bool autoneg_wait_to_complete)
    779 {
    780 	s32 status;
    781 
    782 	DEBUGFUNC("ixgbe_setup_copper_link_82598");
    783 
    784 	/* Setup the PHY according to input speed */
    785 	status = hw->phy.ops.setup_link_speed(hw, speed, autoneg,
    786 					      autoneg_wait_to_complete);
    787 	/* Set up MAC */
    788 	ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete);
    789 
    790 	return status;
    791 }
    792 
    793 /**
    794  *  ixgbe_reset_hw_82598 - Performs hardware reset
    795  *  @hw: pointer to hardware structure
    796  *
    797  *  Resets the hardware by resetting the transmit and receive units, masks and
    798  *  clears all interrupts, performing a PHY reset, and performing a link (MAC)
    799  *  reset.
    800  **/
    801 static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
    802 {
    803 	s32 status = IXGBE_SUCCESS;
    804 	s32 phy_status = IXGBE_SUCCESS;
    805 	u32 ctrl;
    806 	u32 gheccr;
    807 	u32 i;
    808 	u32 autoc;
    809 	u8  analog_val;
    810 
    811 	DEBUGFUNC("ixgbe_reset_hw_82598");
    812 
    813 	/* Call adapter stop to disable tx/rx and clear interrupts */
    814 	status = hw->mac.ops.stop_adapter(hw);
    815 	if (status != IXGBE_SUCCESS)
    816 		goto reset_hw_out;
    817 
    818 	/*
    819 	 * Power up the Atlas Tx lanes if they are currently powered down.
    820 	 * Atlas Tx lanes are powered down for MAC loopback tests, but
    821 	 * they are not automatically restored on reset.
    822 	 */
    823 	hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val);
    824 	if (analog_val & IXGBE_ATLAS_PDN_TX_REG_EN) {
    825 		/* Enable Tx Atlas so packets can be transmitted again */
    826 		hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
    827 					     &analog_val);
    828 		analog_val &= ~IXGBE_ATLAS_PDN_TX_REG_EN;
    829 		hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
    830 					      analog_val);
    831 
    832 		hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
    833 					     &analog_val);
    834 		analog_val &= ~IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
    835 		hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
    836 					      analog_val);
    837 
    838 		hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
    839 					     &analog_val);
    840 		analog_val &= ~IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
    841 		hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
    842 					      analog_val);
    843 
    844 		hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
    845 					     &analog_val);
    846 		analog_val &= ~IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
    847 		hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
    848 					      analog_val);
    849 	}
    850 
    851 	/* Reset PHY */
    852 	if (hw->phy.reset_disable == FALSE) {
    853 		/* PHY ops must be identified and initialized prior to reset */
    854 
    855 		/* Init PHY and function pointers, perform SFP setup */
    856 		phy_status = hw->phy.ops.init(hw);
    857 		if (phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED)
    858 			goto reset_hw_out;
    859 		if (phy_status == IXGBE_ERR_SFP_NOT_PRESENT)
    860 			goto mac_reset_top;
    861 
    862 		hw->phy.ops.reset(hw);
    863 	}
    864 
    865 mac_reset_top:
    866 	/*
    867 	 * Issue global reset to the MAC.  This needs to be a SW reset.
    868 	 * If link reset is used, it might reset the MAC when mng is using it
    869 	 */
    870 	ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL) | IXGBE_CTRL_RST;
    871 	IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
    872 	IXGBE_WRITE_FLUSH(hw);
    873 
    874 	/* Poll for reset bit to self-clear indicating reset is complete */
    875 	for (i = 0; i < 10; i++) {
    876 		usec_delay(1);
    877 		ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
    878 		if (!(ctrl & IXGBE_CTRL_RST))
    879 			break;
    880 	}
    881 	if (ctrl & IXGBE_CTRL_RST) {
    882 		status = IXGBE_ERR_RESET_FAILED;
    883 		DEBUGOUT("Reset polling failed to complete.\n");
    884 	}
    885 
    886 	msec_delay(50);
    887 
    888 	/*
    889 	 * Double resets are required for recovery from certain error
    890 	 * conditions.  Between resets, it is necessary to stall to allow time
    891 	 * for any pending HW events to complete.
    892 	 */
    893 	if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
    894 		hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
    895 		goto mac_reset_top;
    896 	}
    897 
    898 	gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR);
    899 	gheccr &= ~((1 << 21) | (1 << 18) | (1 << 9) | (1 << 6));
    900 	IXGBE_WRITE_REG(hw, IXGBE_GHECCR, gheccr);
    901 
    902 	/*
    903 	 * Store the original AUTOC value if it has not been
    904 	 * stored off yet.  Otherwise restore the stored original
    905 	 * AUTOC value since the reset operation sets back to deaults.
    906 	 */
    907 	autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
    908 	if (hw->mac.orig_link_settings_stored == FALSE) {
    909 		hw->mac.orig_autoc = autoc;
    910 		hw->mac.orig_link_settings_stored = TRUE;
    911 	} else if (autoc != hw->mac.orig_autoc) {
    912 		IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc);
    913 	}
    914 
    915 	/* Store the permanent mac address */
    916 	hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
    917 
    918 	/*
    919 	 * Store MAC address from RAR0, clear receive address registers, and
    920 	 * clear the multicast table
    921 	 */
    922 	hw->mac.ops.init_rx_addrs(hw);
    923 
    924 reset_hw_out:
    925 	if (phy_status != IXGBE_SUCCESS)
    926 		status = phy_status;
    927 
    928 	return status;
    929 }
    930 
    931 /**
    932  *  ixgbe_set_vmdq_82598 - Associate a VMDq set index with a rx address
    933  *  @hw: pointer to hardware struct
    934  *  @rar: receive address register index to associate with a VMDq index
    935  *  @vmdq: VMDq set index
    936  **/
    937 s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
    938 {
    939 	u32 rar_high;
    940 	u32 rar_entries = hw->mac.num_rar_entries;
    941 
    942 	DEBUGFUNC("ixgbe_set_vmdq_82598");
    943 
    944 	/* Make sure we are using a valid rar index range */
    945 	if (rar >= rar_entries) {
    946 		DEBUGOUT1("RAR index %d is out of range.\n", rar);
    947 		return IXGBE_ERR_INVALID_ARGUMENT;
    948 	}
    949 
    950 	rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
    951 	rar_high &= ~IXGBE_RAH_VIND_MASK;
    952 	rar_high |= ((vmdq << IXGBE_RAH_VIND_SHIFT) & IXGBE_RAH_VIND_MASK);
    953 	IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
    954 	return IXGBE_SUCCESS;
    955 }
    956 
    957 /**
    958  *  ixgbe_clear_vmdq_82598 - Disassociate a VMDq set index from an rx address
    959  *  @hw: pointer to hardware struct
    960  *  @rar: receive address register index to associate with a VMDq index
    961  *  @vmdq: VMDq clear index (not used in 82598, but elsewhere)
    962  **/
    963 static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
    964 {
    965 	u32 rar_high;
    966 	u32 rar_entries = hw->mac.num_rar_entries;
    967 
    968 	UNREFERENCED_1PARAMETER(vmdq);
    969 
    970 	/* Make sure we are using a valid rar index range */
    971 	if (rar >= rar_entries) {
    972 		DEBUGOUT1("RAR index %d is out of range.\n", rar);
    973 		return IXGBE_ERR_INVALID_ARGUMENT;
    974 	}
    975 
    976 	rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
    977 	if (rar_high & IXGBE_RAH_VIND_MASK) {
    978 		rar_high &= ~IXGBE_RAH_VIND_MASK;
    979 		IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
    980 	}
    981 
    982 	return IXGBE_SUCCESS;
    983 }
    984 
    985 /**
    986  *  ixgbe_set_vfta_82598 - Set VLAN filter table
    987  *  @hw: pointer to hardware structure
    988  *  @vlan: VLAN id to write to VLAN filter
    989  *  @vind: VMDq output index that maps queue to VLAN id in VFTA
    990  *  @vlan_on: boolean flag to turn on/off VLAN in VFTA
    991  *
    992  *  Turn on/off specified VLAN in the VLAN filter table.
    993  **/
    994 s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind,
    995 			 bool vlan_on)
    996 {
    997 	u32 regindex;
    998 	u32 bitindex;
    999 	u32 bits;
   1000 	u32 vftabyte;
   1001 
   1002 	DEBUGFUNC("ixgbe_set_vfta_82598");
   1003 
   1004 	if (vlan > 4095)
   1005 		return IXGBE_ERR_PARAM;
   1006 
   1007 	/* Determine 32-bit word position in array */
   1008 	regindex = (vlan >> 5) & 0x7F;   /* upper seven bits */
   1009 
   1010 	/* Determine the location of the (VMD) queue index */
   1011 	vftabyte =  ((vlan >> 3) & 0x03); /* bits (4:3) indicating byte array */
   1012 	bitindex = (vlan & 0x7) << 2;    /* lower 3 bits indicate nibble */
   1013 
   1014 	/* Set the nibble for VMD queue index */
   1015 	bits = IXGBE_READ_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex));
   1016 	bits &= (~(0x0F << bitindex));
   1017 	bits |= (vind << bitindex);
   1018 	IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex), bits);
   1019 
   1020 	/* Determine the location of the bit for this VLAN id */
   1021 	bitindex = vlan & 0x1F;   /* lower five bits */
   1022 
   1023 	bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
   1024 	if (vlan_on)
   1025 		/* Turn on this VLAN id */
   1026 		bits |= (1 << bitindex);
   1027 	else
   1028 		/* Turn off this VLAN id */
   1029 		bits &= ~(1 << bitindex);
   1030 	IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits);
   1031 
   1032 	return IXGBE_SUCCESS;
   1033 }
   1034 
   1035 /**
   1036  *  ixgbe_clear_vfta_82598 - Clear VLAN filter table
   1037  *  @hw: pointer to hardware structure
   1038  *
   1039  *  Clears the VLAN filer table, and the VMDq index associated with the filter
   1040  **/
   1041 static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw)
   1042 {
   1043 	u32 offset;
   1044 	u32 vlanbyte;
   1045 
   1046 	DEBUGFUNC("ixgbe_clear_vfta_82598");
   1047 
   1048 	for (offset = 0; offset < hw->mac.vft_size; offset++)
   1049 		IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
   1050 
   1051 	for (vlanbyte = 0; vlanbyte < 4; vlanbyte++)
   1052 		for (offset = 0; offset < hw->mac.vft_size; offset++)
   1053 			IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset),
   1054 					0);
   1055 
   1056 	return IXGBE_SUCCESS;
   1057 }
   1058 
   1059 /**
   1060  *  ixgbe_read_analog_reg8_82598 - Reads 8 bit Atlas analog register
   1061  *  @hw: pointer to hardware structure
   1062  *  @reg: analog register to read
   1063  *  @val: read value
   1064  *
   1065  *  Performs read operation to Atlas analog register specified.
   1066  **/
   1067 s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val)
   1068 {
   1069 	u32  atlas_ctl;
   1070 
   1071 	DEBUGFUNC("ixgbe_read_analog_reg8_82598");
   1072 
   1073 	IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL,
   1074 			IXGBE_ATLASCTL_WRITE_CMD | (reg << 8));
   1075 	IXGBE_WRITE_FLUSH(hw);
   1076 	usec_delay(10);
   1077 	atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
   1078 	*val = (u8)atlas_ctl;
   1079 
   1080 	return IXGBE_SUCCESS;
   1081 }
   1082 
   1083 /**
   1084  *  ixgbe_write_analog_reg8_82598 - Writes 8 bit Atlas analog register
   1085  *  @hw: pointer to hardware structure
   1086  *  @reg: atlas register to write
   1087  *  @val: value to write
   1088  *
   1089  *  Performs write operation to Atlas analog register specified.
   1090  **/
   1091 s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val)
   1092 {
   1093 	u32  atlas_ctl;
   1094 
   1095 	DEBUGFUNC("ixgbe_write_analog_reg8_82598");
   1096 
   1097 	atlas_ctl = (reg << 8) | val;
   1098 	IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, atlas_ctl);
   1099 	IXGBE_WRITE_FLUSH(hw);
   1100 	usec_delay(10);
   1101 
   1102 	return IXGBE_SUCCESS;
   1103 }
   1104 
   1105 /**
   1106  *  ixgbe_read_i2c_eeprom_82598 - Reads 8 bit word over I2C interface.
   1107  *  @hw: pointer to hardware structure
   1108  *  @byte_offset: EEPROM byte offset to read
   1109  *  @eeprom_data: value read
   1110  *
   1111  *  Performs 8 byte read operation to SFP module's EEPROM over I2C interface.
   1112  **/
   1113 s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
   1114 				u8 *eeprom_data)
   1115 {
   1116 	s32 status = IXGBE_SUCCESS;
   1117 	u16 sfp_addr = 0;
   1118 	u16 sfp_data = 0;
   1119 	u16 sfp_stat = 0;
   1120 	u32 i;
   1121 
   1122 	DEBUGFUNC("ixgbe_read_i2c_eeprom_82598");
   1123 
   1124 	if (hw->phy.type == ixgbe_phy_nl) {
   1125 		/*
   1126 		 * NetLogic phy SDA/SCL registers are at addresses 0xC30A to
   1127 		 * 0xC30D. These registers are used to talk to the SFP+
   1128 		 * module's EEPROM through the SDA/SCL (I2C) interface.
   1129 		 */
   1130 		sfp_addr = (IXGBE_I2C_EEPROM_DEV_ADDR << 8) + byte_offset;
   1131 		sfp_addr = (sfp_addr | IXGBE_I2C_EEPROM_READ_MASK);
   1132 		hw->phy.ops.write_reg(hw,
   1133 				      IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR,
   1134 				      IXGBE_MDIO_PMA_PMD_DEV_TYPE,
   1135 				      sfp_addr);
   1136 
   1137 		/* Poll status */
   1138 		for (i = 0; i < 100; i++) {
   1139 			hw->phy.ops.read_reg(hw,
   1140 					     IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT,
   1141 					     IXGBE_MDIO_PMA_PMD_DEV_TYPE,
   1142 					     &sfp_stat);
   1143 			sfp_stat = sfp_stat & IXGBE_I2C_EEPROM_STATUS_MASK;
   1144 			if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS)
   1145 				break;
   1146 			msec_delay(10);
   1147 		}
   1148 
   1149 		if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_PASS) {
   1150 			DEBUGOUT("EEPROM read did not pass.\n");
   1151 			status = IXGBE_ERR_SFP_NOT_PRESENT;
   1152 			goto out;
   1153 		}
   1154 
   1155 		/* Read data */
   1156 		hw->phy.ops.read_reg(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA,
   1157 				     IXGBE_MDIO_PMA_PMD_DEV_TYPE, &sfp_data);
   1158 
   1159 		*eeprom_data = (u8)(sfp_data >> 8);
   1160 	} else {
   1161 		status = IXGBE_ERR_PHY;
   1162 		goto out;
   1163 	}
   1164 
   1165 out:
   1166 	return status;
   1167 }
   1168 
   1169 /**
   1170  *  ixgbe_get_supported_physical_layer_82598 - Returns physical layer type
   1171  *  @hw: pointer to hardware structure
   1172  *
   1173  *  Determines physical layer capabilities of the current configuration.
   1174  **/
   1175 u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw)
   1176 {
   1177 	u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
   1178 	u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
   1179 	u32 pma_pmd_10g = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK;
   1180 	u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
   1181 	u16 ext_ability = 0;
   1182 
   1183 	DEBUGFUNC("ixgbe_get_supported_physical_layer_82598");
   1184 
   1185 	hw->phy.ops.identify(hw);
   1186 
   1187 	/* Copper PHY must be checked before AUTOC LMS to determine correct
   1188 	 * physical layer because 10GBase-T PHYs use LMS = KX4/KX */
   1189 	switch (hw->phy.type) {
   1190 	case ixgbe_phy_tn:
   1191 	case ixgbe_phy_cu_unknown:
   1192 		hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
   1193 		IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
   1194 		if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
   1195 			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
   1196 		if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
   1197 			physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
   1198 		if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
   1199 			physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
   1200 		goto out;
   1201 	default:
   1202 		break;
   1203 	}
   1204 
   1205 	switch (autoc & IXGBE_AUTOC_LMS_MASK) {
   1206 	case IXGBE_AUTOC_LMS_1G_AN:
   1207 	case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
   1208 		if (pma_pmd_1g == IXGBE_AUTOC_1G_KX)
   1209 			physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX;
   1210 		else
   1211 			physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_BX;
   1212 		break;
   1213 	case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
   1214 		if (pma_pmd_10g == IXGBE_AUTOC_10G_CX4)
   1215 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
   1216 		else if (pma_pmd_10g == IXGBE_AUTOC_10G_KX4)
   1217 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
   1218 		else /* XAUI */
   1219 			physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
   1220 		break;
   1221 	case IXGBE_AUTOC_LMS_KX4_AN:
   1222 	case IXGBE_AUTOC_LMS_KX4_AN_1G_AN:
   1223 		if (autoc & IXGBE_AUTOC_KX_SUPP)
   1224 			physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
   1225 		if (autoc & IXGBE_AUTOC_KX4_SUPP)
   1226 			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
   1227 		break;
   1228 	default:
   1229 		break;
   1230 	}
   1231 
   1232 	if (hw->phy.type == ixgbe_phy_nl) {
   1233 		hw->phy.ops.identify_sfp(hw);
   1234 
   1235 		switch (hw->phy.sfp_type) {
   1236 		case ixgbe_sfp_type_da_cu:
   1237 			physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
   1238 			break;
   1239 		case ixgbe_sfp_type_sr:
   1240 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
   1241 			break;
   1242 		case ixgbe_sfp_type_lr:
   1243 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
   1244 			break;
   1245 		default:
   1246 			physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
   1247 			break;
   1248 		}
   1249 	}
   1250 
   1251 	switch (hw->device_id) {
   1252 	case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
   1253 		physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
   1254 		break;
   1255 	case IXGBE_DEV_ID_82598AF_DUAL_PORT:
   1256 	case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
   1257 	case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
   1258 		physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
   1259 		break;
   1260 	case IXGBE_DEV_ID_82598EB_XF_LR:
   1261 		physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
   1262 		break;
   1263 	default:
   1264 		break;
   1265 	}
   1266 
   1267 out:
   1268 	return physical_layer;
   1269 }
   1270 
   1271 /**
   1272  *  ixgbe_set_lan_id_multi_port_pcie_82598 - Set LAN id for PCIe multiple
   1273  *  port devices.
   1274  *  @hw: pointer to the HW structure
   1275  *
   1276  *  Calls common function and corrects issue with some single port devices
   1277  *  that enable LAN1 but not LAN0.
   1278  **/
   1279 void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw)
   1280 {
   1281 	struct ixgbe_bus_info *bus = &hw->bus;
   1282 	u16 pci_gen = 0;
   1283 	u16 pci_ctrl2 = 0;
   1284 
   1285 	DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie_82598");
   1286 
   1287 	ixgbe_set_lan_id_multi_port_pcie(hw);
   1288 
   1289 	/* check if LAN0 is disabled */
   1290 	hw->eeprom.ops.read(hw, IXGBE_PCIE_GENERAL_PTR, &pci_gen);
   1291 	if ((pci_gen != 0) && (pci_gen != 0xFFFF)) {
   1292 
   1293 		hw->eeprom.ops.read(hw, pci_gen + IXGBE_PCIE_CTRL2, &pci_ctrl2);
   1294 
   1295 		/* if LAN0 is completely disabled force function to 0 */
   1296 		if ((pci_ctrl2 & IXGBE_PCIE_CTRL2_LAN_DISABLE) &&
   1297 		    !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DISABLE_SELECT) &&
   1298 		    !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DUMMY_ENABLE)) {
   1299 
   1300 			bus->func = 0;
   1301 		}
   1302 	}
   1303 }
   1304 
   1305 /**
   1306  *  ixgbe_enable_relaxed_ordering_82598 - enable relaxed ordering
   1307  *  @hw: pointer to hardware structure
   1308  *
   1309  **/
   1310 void ixgbe_enable_relaxed_ordering_82598(struct ixgbe_hw *hw)
   1311 {
   1312 	u32 regval;
   1313 	u32 i;
   1314 
   1315 	DEBUGFUNC("ixgbe_enable_relaxed_ordering_82598");
   1316 
   1317 	/* Enable relaxed ordering */
   1318 	for (i = 0; ((i < hw->mac.max_tx_queues) &&
   1319 	     (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
   1320 		regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
   1321 		regval |= IXGBE_DCA_TXCTRL_DESC_WRO_EN;
   1322 		IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval);
   1323 	}
   1324 
   1325 	for (i = 0; ((i < hw->mac.max_rx_queues) &&
   1326 	     (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
   1327 		regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
   1328 		regval |= IXGBE_DCA_RXCTRL_DATA_WRO_EN |
   1329 			  IXGBE_DCA_RXCTRL_HEAD_WRO_EN;
   1330 		IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
   1331 	}
   1332 
   1333 }
   1334 
   1335 /**
   1336  * ixgbe_set_rxpba_82598 - Initialize RX packet buffer
   1337  * @hw: pointer to hardware structure
   1338  * @num_pb: number of packet buffers to allocate
   1339  * @headroom: reserve n KB of headroom
   1340  * @strategy: packet buffer allocation strategy
   1341  **/
   1342 static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb,
   1343 				  u32 headroom, int strategy)
   1344 {
   1345 	u32 rxpktsize = IXGBE_RXPBSIZE_64KB;
   1346 	u8 i = 0;
   1347 	UNREFERENCED_1PARAMETER(headroom);
   1348 
   1349 	if (!num_pb)
   1350 		return;
   1351 
   1352 	/* Setup Rx packet buffer sizes */
   1353 	switch (strategy) {
   1354 	case PBA_STRATEGY_WEIGHTED:
   1355 		/* Setup the first four at 80KB */
   1356 		rxpktsize = IXGBE_RXPBSIZE_80KB;
   1357 		for (; i < 4; i++)
   1358 			IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
   1359 		/* Setup the last four at 48KB...don't re-init i */
   1360 		rxpktsize = IXGBE_RXPBSIZE_48KB;
   1361 		/* Fall Through */
   1362 	case PBA_STRATEGY_EQUAL:
   1363 	default:
   1364 		/* Divide the remaining Rx packet buffer evenly among the TCs */
   1365 		for (; i < IXGBE_MAX_PACKET_BUFFERS; i++)
   1366 			IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
   1367 		break;
   1368 	}
   1369 
   1370 	/* Setup Tx packet buffer sizes */
   1371 	for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++)
   1372 		IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), IXGBE_TXPBSIZE_40KB);
   1373 
   1374 	return;
   1375 }
   1376