Home | History | Annotate | Line # | Download | only in ixgbe
ixgbe_common.c revision 1.35
      1 /* $NetBSD: ixgbe_common.c,v 1.35 2021/12/10 11:20:13 msaitoh Exp $ */
      2 
      3 /******************************************************************************
      4   SPDX-License-Identifier: BSD-3-Clause
      5 
      6   Copyright (c) 2001-2017, Intel Corporation
      7   All rights reserved.
      8 
      9   Redistribution and use in source and binary forms, with or without
     10   modification, are permitted provided that the following conditions are met:
     11 
     12    1. Redistributions of source code must retain the above copyright notice,
     13       this list of conditions and the following disclaimer.
     14 
     15    2. Redistributions in binary form must reproduce the above copyright
     16       notice, this list of conditions and the following disclaimer in the
     17       documentation and/or other materials provided with the distribution.
     18 
     19    3. Neither the name of the Intel Corporation nor the names of its
     20       contributors may be used to endorse or promote products derived from
     21       this software without specific prior written permission.
     22 
     23   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     24   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     25   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     26   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     27   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     28   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     29   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     30   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     31   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     32   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     33   POSSIBILITY OF SUCH DAMAGE.
     34 
     35 ******************************************************************************/
     36 /*$FreeBSD: head/sys/dev/ixgbe/ixgbe_common.c 331224 2018-03-19 20:55:05Z erj $*/
     37 
     38 #include <sys/cdefs.h>
     39 __KERNEL_RCSID(0, "$NetBSD: ixgbe_common.c,v 1.35 2021/12/10 11:20:13 msaitoh Exp $");
     40 
     41 #include "ixgbe_common.h"
     42 #include "ixgbe_phy.h"
     43 #include "ixgbe_dcb.h"
     44 #include "ixgbe_dcb_82599.h"
     45 #include "ixgbe_api.h"
     46 
     47 static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw);
     48 static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw);
     49 static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw);
     50 static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw);
     51 static void ixgbe_standby_eeprom(struct ixgbe_hw *hw);
     52 static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
     53 					u16 count);
     54 static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count);
     55 static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
     56 static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
     57 static void ixgbe_release_eeprom(struct ixgbe_hw *hw);
     58 
     59 static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr);
     60 static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
     61 					 u16 *san_mac_offset);
     62 static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
     63 					     u16 words, u16 *data);
     64 static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
     65 					      u16 words, u16 *data);
     66 static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
     67 						 u16 offset);
     68 
     69 /**
     70  *  ixgbe_init_ops_generic - Inits function ptrs
     71  *  @hw: pointer to the hardware structure
     72  *
     73  *  Initialize the function pointers.
     74  **/
     75 s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw)
     76 {
     77 	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
     78 	struct ixgbe_mac_info *mac = &hw->mac;
     79 	u32 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
     80 
     81 	DEBUGFUNC("ixgbe_init_ops_generic");
     82 
     83 	/* EEPROM */
     84 	eeprom->ops.init_params = ixgbe_init_eeprom_params_generic;
     85 	/* If EEPROM is valid (bit 8 = 1), use EERD otherwise use bit bang */
     86 	if (eec & IXGBE_EEC_PRES) {
     87 		eeprom->ops.read = ixgbe_read_eerd_generic;
     88 		eeprom->ops.read_buffer = ixgbe_read_eerd_buffer_generic;
     89 	} else {
     90 		eeprom->ops.read = ixgbe_read_eeprom_bit_bang_generic;
     91 		eeprom->ops.read_buffer =
     92 				 ixgbe_read_eeprom_buffer_bit_bang_generic;
     93 	}
     94 	eeprom->ops.write = ixgbe_write_eeprom_generic;
     95 	eeprom->ops.write_buffer = ixgbe_write_eeprom_buffer_bit_bang_generic;
     96 	eeprom->ops.validate_checksum =
     97 				      ixgbe_validate_eeprom_checksum_generic;
     98 	eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_generic;
     99 	eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_generic;
    100 
    101 	/* MAC */
    102 	mac->ops.init_hw = ixgbe_init_hw_generic;
    103 	mac->ops.reset_hw = NULL;
    104 	mac->ops.start_hw = ixgbe_start_hw_generic;
    105 	mac->ops.clear_hw_cntrs = ixgbe_clear_hw_cntrs_generic;
    106 	mac->ops.get_media_type = NULL;
    107 	mac->ops.get_supported_physical_layer = NULL;
    108 	mac->ops.enable_rx_dma = ixgbe_enable_rx_dma_generic;
    109 	mac->ops.get_mac_addr = ixgbe_get_mac_addr_generic;
    110 	mac->ops.stop_adapter = ixgbe_stop_adapter_generic;
    111 	mac->ops.get_bus_info = ixgbe_get_bus_info_generic;
    112 	mac->ops.set_lan_id = ixgbe_set_lan_id_multi_port_pcie;
    113 	mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync;
    114 	mac->ops.release_swfw_sync = ixgbe_release_swfw_sync;
    115 	mac->ops.prot_autoc_read = prot_autoc_read_generic;
    116 	mac->ops.prot_autoc_write = prot_autoc_write_generic;
    117 
    118 	/* LEDs */
    119 	mac->ops.led_on = ixgbe_led_on_generic;
    120 	mac->ops.led_off = ixgbe_led_off_generic;
    121 	mac->ops.blink_led_start = ixgbe_blink_led_start_generic;
    122 	mac->ops.blink_led_stop = ixgbe_blink_led_stop_generic;
    123 	mac->ops.init_led_link_act = ixgbe_init_led_link_act_generic;
    124 
    125 	/* RAR, Multicast, VLAN */
    126 	mac->ops.set_rar = ixgbe_set_rar_generic;
    127 	mac->ops.clear_rar = ixgbe_clear_rar_generic;
    128 	mac->ops.insert_mac_addr = NULL;
    129 	mac->ops.set_vmdq = NULL;
    130 	mac->ops.clear_vmdq = NULL;
    131 	mac->ops.init_rx_addrs = ixgbe_init_rx_addrs_generic;
    132 	mac->ops.update_uc_addr_list = ixgbe_update_uc_addr_list_generic;
    133 	mac->ops.update_mc_addr_list = ixgbe_update_mc_addr_list_generic;
    134 	mac->ops.enable_mc = ixgbe_enable_mc_generic;
    135 	mac->ops.disable_mc = ixgbe_disable_mc_generic;
    136 	mac->ops.clear_vfta = NULL;
    137 	mac->ops.set_vfta = NULL;
    138 	mac->ops.set_vlvf = NULL;
    139 	mac->ops.init_uta_tables = NULL;
    140 	mac->ops.enable_rx = ixgbe_enable_rx_generic;
    141 	mac->ops.disable_rx = ixgbe_disable_rx_generic;
    142 	mac->ops.toggle_txdctl = ixgbe_toggle_txdctl_generic;
    143 
    144 	/* Flow Control */
    145 	mac->ops.fc_enable = ixgbe_fc_enable_generic;
    146 	mac->ops.setup_fc = ixgbe_setup_fc_generic;
    147 	mac->ops.fc_autoneg = ixgbe_fc_autoneg;
    148 
    149 	/* Link */
    150 	mac->ops.get_link_capabilities = NULL;
    151 	mac->ops.setup_link = NULL;
    152 	mac->ops.check_link = NULL;
    153 	mac->ops.dmac_config = NULL;
    154 	mac->ops.dmac_update_tcs = NULL;
    155 	mac->ops.dmac_config_tcs = NULL;
    156 
    157 	return IXGBE_SUCCESS;
    158 }
    159 
    160 /**
    161  * ixgbe_device_supports_autoneg_fc - Check if device supports autonegotiation
    162  * of flow control
    163  * @hw: pointer to hardware structure
    164  *
    165  * This function returns TRUE if the device supports flow control
    166  * autonegotiation, and FALSE if it does not.
    167  *
    168  **/
    169 bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
    170 {
    171 	bool supported = FALSE;
    172 	ixgbe_link_speed speed;
    173 	bool link_up;
    174 
    175 	DEBUGFUNC("ixgbe_device_supports_autoneg_fc");
    176 
    177 	switch (hw->phy.media_type) {
    178 	case ixgbe_media_type_fiber_fixed:
    179 	case ixgbe_media_type_fiber_qsfp:
    180 	case ixgbe_media_type_fiber:
    181 		/* flow control autoneg black list */
    182 		switch (hw->device_id) {
    183 		case IXGBE_DEV_ID_X550EM_A_SFP:
    184 		case IXGBE_DEV_ID_X550EM_A_SFP_N:
    185 		case IXGBE_DEV_ID_X550EM_A_QSFP:
    186 		case IXGBE_DEV_ID_X550EM_A_QSFP_N:
    187 			supported = FALSE;
    188 			break;
    189 		default:
    190 			hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
    191 			/* if link is down, assume supported */
    192 			if (link_up)
    193 				supported = speed == IXGBE_LINK_SPEED_1GB_FULL ?
    194 				    TRUE : FALSE;
    195 			else
    196 				supported = TRUE;
    197 		}
    198 
    199 		break;
    200 	case ixgbe_media_type_backplane:
    201 		if (hw->device_id == IXGBE_DEV_ID_X550EM_X_XFI)
    202 			supported = FALSE;
    203 		else
    204 			supported = TRUE;
    205 		break;
    206 	case ixgbe_media_type_copper:
    207 		/* only some copper devices support flow control autoneg */
    208 		switch (hw->device_id) {
    209 		case IXGBE_DEV_ID_82599_T3_LOM:
    210 		case IXGBE_DEV_ID_X540T:
    211 		case IXGBE_DEV_ID_X540T1:
    212 		case IXGBE_DEV_ID_X540_BYPASS:
    213 		case IXGBE_DEV_ID_X550T:
    214 		case IXGBE_DEV_ID_X550T1:
    215 		case IXGBE_DEV_ID_X550EM_X_10G_T:
    216 		case IXGBE_DEV_ID_X550EM_A_10G_T:
    217 		case IXGBE_DEV_ID_X550EM_A_1G_T:
    218 		case IXGBE_DEV_ID_X550EM_A_1G_T_L:
    219 			supported = TRUE;
    220 			break;
    221 		default:
    222 			supported = FALSE;
    223 		}
    224 	default:
    225 		break;
    226 	}
    227 
    228 	if (!supported)
    229 		ERROR_REPORT2(IXGBE_ERROR_UNSUPPORTED,
    230 			      "Device %x does not support flow control autoneg",
    231 			      hw->device_id);
    232 
    233 	return supported;
    234 }
    235 
    236 /**
    237  *  ixgbe_setup_fc_generic - Set up flow control
    238  *  @hw: pointer to hardware structure
    239  *
    240  *  Called at init time to set up flow control.
    241  **/
    242 s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw)
    243 {
    244 	s32 ret_val = IXGBE_SUCCESS;
    245 	u32 reg = 0, reg_bp = 0;
    246 	u16 reg_cu = 0;
    247 	bool locked = FALSE;
    248 
    249 	DEBUGFUNC("ixgbe_setup_fc_generic");
    250 
    251 	/* Validate the requested mode */
    252 	if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
    253 		ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
    254 			   "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
    255 		ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
    256 		goto out;
    257 	}
    258 
    259 	/*
    260 	 * 10gig parts do not have a word in the EEPROM to determine the
    261 	 * default flow control setting, so we explicitly set it to full.
    262 	 */
    263 	if (hw->fc.requested_mode == ixgbe_fc_default)
    264 		hw->fc.requested_mode = ixgbe_fc_full;
    265 
    266 	/*
    267 	 * Set up the 1G and 10G flow control advertisement registers so the
    268 	 * HW will be able to do fc autoneg once the cable is plugged in.  If
    269 	 * we link at 10G, the 1G advertisement is harmless and vice versa.
    270 	 */
    271 	switch (hw->phy.media_type) {
    272 	case ixgbe_media_type_backplane:
    273 		/* some MAC's need RMW protection on AUTOC */
    274 		ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &reg_bp);
    275 		if (ret_val != IXGBE_SUCCESS)
    276 			goto out;
    277 
    278 		/* fall through - only backplane uses autoc */
    279 	case ixgbe_media_type_fiber_fixed:
    280 	case ixgbe_media_type_fiber_qsfp:
    281 	case ixgbe_media_type_fiber:
    282 		reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
    283 
    284 		break;
    285 	case ixgbe_media_type_copper:
    286 		hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
    287 				     IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &reg_cu);
    288 		break;
    289 	default:
    290 		break;
    291 	}
    292 
    293 	/*
    294 	 * The possible values of fc.requested_mode are:
    295 	 * 0: Flow control is completely disabled
    296 	 * 1: Rx flow control is enabled (we can receive pause frames,
    297 	 *    but not send pause frames).
    298 	 * 2: Tx flow control is enabled (we can send pause frames but
    299 	 *    we do not support receiving pause frames).
    300 	 * 3: Both Rx and Tx flow control (symmetric) are enabled.
    301 	 * other: Invalid.
    302 	 */
    303 	switch (hw->fc.requested_mode) {
    304 	case ixgbe_fc_none:
    305 		/* Flow control completely disabled by software override. */
    306 		reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
    307 		if (hw->phy.media_type == ixgbe_media_type_backplane)
    308 			reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE |
    309 				    IXGBE_AUTOC_ASM_PAUSE);
    310 		else if (hw->phy.media_type == ixgbe_media_type_copper)
    311 			reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
    312 		break;
    313 	case ixgbe_fc_tx_pause:
    314 		/*
    315 		 * Tx Flow control is enabled, and Rx Flow control is
    316 		 * disabled by software override.
    317 		 */
    318 		reg |= IXGBE_PCS1GANA_ASM_PAUSE;
    319 		reg &= ~IXGBE_PCS1GANA_SYM_PAUSE;
    320 		if (hw->phy.media_type == ixgbe_media_type_backplane) {
    321 			reg_bp |= IXGBE_AUTOC_ASM_PAUSE;
    322 			reg_bp &= ~IXGBE_AUTOC_SYM_PAUSE;
    323 		} else if (hw->phy.media_type == ixgbe_media_type_copper) {
    324 			reg_cu |= IXGBE_TAF_ASM_PAUSE;
    325 			reg_cu &= ~IXGBE_TAF_SYM_PAUSE;
    326 		}
    327 		break;
    328 	case ixgbe_fc_rx_pause:
    329 		/*
    330 		 * Rx Flow control is enabled and Tx Flow control is
    331 		 * disabled by software override. Since there really
    332 		 * isn't a way to advertise that we are capable of RX
    333 		 * Pause ONLY, we will advertise that we support both
    334 		 * symmetric and asymmetric Rx PAUSE, as such we fall
    335 		 * through to the fc_full statement.  Later, we will
    336 		 * disable the adapter's ability to send PAUSE frames.
    337 		 */
    338 	case ixgbe_fc_full:
    339 		/* Flow control (both Rx and Tx) is enabled by SW override. */
    340 		reg |= IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE;
    341 		if (hw->phy.media_type == ixgbe_media_type_backplane)
    342 			reg_bp |= IXGBE_AUTOC_SYM_PAUSE |
    343 				  IXGBE_AUTOC_ASM_PAUSE;
    344 		else if (hw->phy.media_type == ixgbe_media_type_copper)
    345 			reg_cu |= IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE;
    346 		break;
    347 	default:
    348 		ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
    349 			     "Flow control param set incorrectly\n");
    350 		ret_val = IXGBE_ERR_CONFIG;
    351 		goto out;
    352 		break;
    353 	}
    354 
    355 	if (hw->mac.type < ixgbe_mac_X540) {
    356 		/*
    357 		 * Enable auto-negotiation between the MAC & PHY;
    358 		 * the MAC will advertise clause 37 flow control.
    359 		 */
    360 		IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
    361 		reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
    362 
    363 		/* Disable AN timeout */
    364 		if (hw->fc.strict_ieee)
    365 			reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN;
    366 
    367 		IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg);
    368 		DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg);
    369 	}
    370 
    371 	/*
    372 	 * AUTOC restart handles negotiation of 1G and 10G on backplane
    373 	 * and copper. There is no need to set the PCS1GCTL register.
    374 	 *
    375 	 */
    376 	if (hw->phy.media_type == ixgbe_media_type_backplane) {
    377 		reg_bp |= IXGBE_AUTOC_AN_RESTART;
    378 		ret_val = hw->mac.ops.prot_autoc_write(hw, reg_bp, locked);
    379 		if (ret_val)
    380 			goto out;
    381 	} else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
    382 		    (ixgbe_device_supports_autoneg_fc(hw))) {
    383 		hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
    384 				      IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg_cu);
    385 	}
    386 
    387 	DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg);
    388 out:
    389 	return ret_val;
    390 }
    391 
    392 /**
    393  *  ixgbe_start_hw_generic - Prepare hardware for Tx/Rx
    394  *  @hw: pointer to hardware structure
    395  *
    396  *  Starts the hardware by filling the bus info structure and media type, clears
    397  *  all on chip counters, initializes receive address registers, multicast
    398  *  table, VLAN filter table, calls routine to set up link and flow control
    399  *  settings, and leaves transmit and receive units disabled and uninitialized
    400  **/
    401 s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
    402 {
    403 	s32 ret_val;
    404 	u32 ctrl_ext;
    405 	u16 device_caps;
    406 
    407 	DEBUGFUNC("ixgbe_start_hw_generic");
    408 
    409 	/* Set the media type */
    410 	hw->phy.media_type = hw->mac.ops.get_media_type(hw);
    411 
    412 	/* PHY ops initialization must be done in reset_hw() */
    413 
    414 	/* Clear the VLAN filter table */
    415 	hw->mac.ops.clear_vfta(hw);
    416 
    417 	/* Clear statistics registers */
    418 	hw->mac.ops.clear_hw_cntrs(hw);
    419 
    420 	/* Set No Snoop Disable */
    421 	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
    422 	ctrl_ext |= IXGBE_CTRL_EXT_NS_DIS;
    423 	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
    424 	IXGBE_WRITE_FLUSH(hw);
    425 
    426 	/* Setup flow control */
    427 	ret_val = ixgbe_setup_fc(hw);
    428 	if (ret_val != IXGBE_SUCCESS && ret_val != IXGBE_NOT_IMPLEMENTED) {
    429 		DEBUGOUT1("Flow control setup failed, returning %d\n", ret_val);
    430 		return ret_val;
    431 	}
    432 
    433 	/* Cache bit indicating need for crosstalk fix */
    434 	switch (hw->mac.type) {
    435 	case ixgbe_mac_82599EB:
    436 	case ixgbe_mac_X550EM_x:
    437 	case ixgbe_mac_X550EM_a:
    438 		hw->mac.ops.get_device_caps(hw, &device_caps);
    439 		if (device_caps & IXGBE_DEVICE_CAPS_NO_CROSSTALK_WR)
    440 			hw->need_crosstalk_fix = FALSE;
    441 		else
    442 			hw->need_crosstalk_fix = TRUE;
    443 		break;
    444 	default:
    445 		hw->need_crosstalk_fix = FALSE;
    446 		break;
    447 	}
    448 
    449 	/* Clear adapter stopped flag */
    450 	hw->adapter_stopped = FALSE;
    451 
    452 	return IXGBE_SUCCESS;
    453 }
    454 
    455 /**
    456  *  ixgbe_start_hw_gen2 - Init sequence for common device family
    457  *  @hw: pointer to hw structure
    458  *
    459  * Performs the init sequence common to the second generation
    460  * of 10 GbE devices.
    461  * Devices in the second generation:
    462  *     82599
    463  *     X540
    464  **/
    465 void ixgbe_start_hw_gen2(struct ixgbe_hw *hw)
    466 {
    467 	u32 i;
    468 	u32 regval;
    469 
    470 	DEBUGFUNC("ixgbe_start_hw_gen2");
    471 
    472 	/* Clear the rate limiters */
    473 	for (i = 0; i < hw->mac.max_tx_queues; i++) {
    474 		IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i);
    475 		IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, 0);
    476 	}
    477 	IXGBE_WRITE_FLUSH(hw);
    478 
    479 	/* Disable relaxed ordering */
    480 	for (i = 0; i < hw->mac.max_tx_queues; i++) {
    481 		regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
    482 		regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
    483 		IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
    484 	}
    485 
    486 	for (i = 0; i < hw->mac.max_rx_queues; i++) {
    487 		regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
    488 		regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
    489 			    IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
    490 		IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
    491 	}
    492 }
    493 
    494 /**
    495  *  ixgbe_init_hw_generic - Generic hardware initialization
    496  *  @hw: pointer to hardware structure
    497  *
    498  *  Initialize the hardware by resetting the hardware, filling the bus info
    499  *  structure and media type, clears all on chip counters, initializes receive
    500  *  address registers, multicast table, VLAN filter table, calls routine to set
    501  *  up link and flow control settings, and leaves transmit and receive units
    502  *  disabled and uninitialized
    503  **/
    504 s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw)
    505 {
    506 	s32 status;
    507 
    508 	DEBUGFUNC("ixgbe_init_hw_generic");
    509 
    510 	/* Reset the hardware */
    511 	status = hw->mac.ops.reset_hw(hw);
    512 
    513 	if (status == IXGBE_SUCCESS || status == IXGBE_ERR_SFP_NOT_PRESENT) {
    514 		/* Start the HW */
    515 		status = hw->mac.ops.start_hw(hw);
    516 	}
    517 
    518 	/* Initialize the LED link active for LED blink support */
    519 	if (hw->mac.ops.init_led_link_act)
    520 		hw->mac.ops.init_led_link_act(hw);
    521 
    522 	if (status != IXGBE_SUCCESS)
    523 		DEBUGOUT1("Failed to initialize HW, STATUS = %d\n", status);
    524 
    525 	return status;
    526 }
    527 
    528 /**
    529  *  ixgbe_clear_hw_cntrs_generic - Generic clear hardware counters
    530  *  @hw: pointer to hardware structure
    531  *
    532  *  Clears all hardware statistics counters by reading them from the hardware
    533  *  Statistics counters are clear on read.
    534  **/
    535 s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
    536 {
    537 	u16 i = 0;
    538 
    539 	DEBUGFUNC("ixgbe_clear_hw_cntrs_generic");
    540 
    541 	IXGBE_READ_REG(hw, IXGBE_CRCERRS);
    542 	IXGBE_READ_REG(hw, IXGBE_ILLERRC);
    543 	IXGBE_READ_REG(hw, IXGBE_ERRBC);
    544 	IXGBE_READ_REG(hw, IXGBE_MSPDC);
    545 	if (hw->mac.type >= ixgbe_mac_X550)
    546 		IXGBE_READ_REG(hw, IXGBE_MBSDC);
    547 	for (i = 0; i < 8; i++)
    548 		IXGBE_READ_REG(hw, IXGBE_MPC(i));
    549 
    550 	IXGBE_READ_REG(hw, IXGBE_MLFC);
    551 	IXGBE_READ_REG(hw, IXGBE_MRFC);
    552 	IXGBE_READ_REG(hw, IXGBE_RLEC);
    553 	IXGBE_READ_REG(hw, IXGBE_LXONTXC);
    554 	IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
    555 	if (hw->mac.type >= ixgbe_mac_82599EB) {
    556 		IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
    557 		IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
    558 	} else {
    559 		IXGBE_READ_REG(hw, IXGBE_LXONRXC);
    560 		IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
    561 	}
    562 
    563 	for (i = 0; i < 8; i++) {
    564 		IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
    565 		IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
    566 		if (hw->mac.type >= ixgbe_mac_82599EB) {
    567 			IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
    568 			IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
    569 		} else {
    570 			IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
    571 			IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
    572 		}
    573 	}
    574 	if (hw->mac.type >= ixgbe_mac_82599EB)
    575 		for (i = 0; i < 8; i++)
    576 			IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
    577 	IXGBE_READ_REG(hw, IXGBE_PRC64);
    578 	IXGBE_READ_REG(hw, IXGBE_PRC127);
    579 	IXGBE_READ_REG(hw, IXGBE_PRC255);
    580 	IXGBE_READ_REG(hw, IXGBE_PRC511);
    581 	IXGBE_READ_REG(hw, IXGBE_PRC1023);
    582 	IXGBE_READ_REG(hw, IXGBE_PRC1522);
    583 	IXGBE_READ_REG(hw, IXGBE_GPRC);
    584 	IXGBE_READ_REG(hw, IXGBE_BPRC);
    585 	IXGBE_READ_REG(hw, IXGBE_MPRC);
    586 	IXGBE_READ_REG(hw, IXGBE_GPTC);
    587 	IXGBE_READ_REG(hw, IXGBE_GORCL);
    588 	IXGBE_READ_REG(hw, IXGBE_GORCH);
    589 	IXGBE_READ_REG(hw, IXGBE_GOTCL);
    590 	IXGBE_READ_REG(hw, IXGBE_GOTCH);
    591 	if (hw->mac.type == ixgbe_mac_82598EB)
    592 		for (i = 0; i < 8; i++)
    593 			IXGBE_READ_REG(hw, IXGBE_RNBC(i));
    594 	IXGBE_READ_REG(hw, IXGBE_RUC);
    595 	IXGBE_READ_REG(hw, IXGBE_RFC);
    596 	IXGBE_READ_REG(hw, IXGBE_ROC);
    597 	IXGBE_READ_REG(hw, IXGBE_RJC);
    598 	IXGBE_READ_REG(hw, IXGBE_MNGPRC);
    599 	IXGBE_READ_REG(hw, IXGBE_MNGPDC);
    600 	IXGBE_READ_REG(hw, IXGBE_MNGPTC);
    601 	IXGBE_READ_REG(hw, IXGBE_TORL);
    602 	IXGBE_READ_REG(hw, IXGBE_TORH);
    603 	IXGBE_READ_REG(hw, IXGBE_TPR);
    604 	IXGBE_READ_REG(hw, IXGBE_TPT);
    605 	IXGBE_READ_REG(hw, IXGBE_PTC64);
    606 	IXGBE_READ_REG(hw, IXGBE_PTC127);
    607 	IXGBE_READ_REG(hw, IXGBE_PTC255);
    608 	IXGBE_READ_REG(hw, IXGBE_PTC511);
    609 	IXGBE_READ_REG(hw, IXGBE_PTC1023);
    610 	IXGBE_READ_REG(hw, IXGBE_PTC1522);
    611 	IXGBE_READ_REG(hw, IXGBE_MPTC);
    612 	IXGBE_READ_REG(hw, IXGBE_BPTC);
    613 	for (i = 0; i < 16; i++) {
    614 		IXGBE_READ_REG(hw, IXGBE_QPRC(i));
    615 		IXGBE_READ_REG(hw, IXGBE_QPTC(i));
    616 		if (hw->mac.type >= ixgbe_mac_82599EB) {
    617 			IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
    618 			IXGBE_READ_REG(hw, IXGBE_QBRC_H(i));
    619 			IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
    620 			IXGBE_READ_REG(hw, IXGBE_QBTC_H(i));
    621 			IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
    622 		} else {
    623 			IXGBE_READ_REG(hw, IXGBE_QBRC(i));
    624 			IXGBE_READ_REG(hw, IXGBE_QBTC(i));
    625 		}
    626 	}
    627 
    628 	if (hw->mac.type == ixgbe_mac_X550 || hw->mac.type == ixgbe_mac_X540) {
    629 		if (hw->phy.id == 0)
    630 			ixgbe_identify_phy(hw);
    631 		hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL,
    632 				     IXGBE_MDIO_PCS_DEV_TYPE, &i);
    633 		hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECH,
    634 				     IXGBE_MDIO_PCS_DEV_TYPE, &i);
    635 		hw->phy.ops.read_reg(hw, IXGBE_LDPCECL,
    636 				     IXGBE_MDIO_PCS_DEV_TYPE, &i);
    637 		hw->phy.ops.read_reg(hw, IXGBE_LDPCECH,
    638 				     IXGBE_MDIO_PCS_DEV_TYPE, &i);
    639 	}
    640 
    641 	return IXGBE_SUCCESS;
    642 }
    643 
    644 /**
    645  *  ixgbe_read_pba_string_generic - Reads part number string from EEPROM
    646  *  @hw: pointer to hardware structure
    647  *  @pba_num: stores the part number string from the EEPROM
    648  *  @pba_num_size: part number string buffer length
    649  *
    650  *  Reads the part number string from the EEPROM.
    651  **/
    652 s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
    653 				  u32 pba_num_size)
    654 {
    655 	s32 ret_val;
    656 	u16 data;
    657 	u16 pba_ptr;
    658 	u16 offset;
    659 	u16 length;
    660 
    661 	DEBUGFUNC("ixgbe_read_pba_string_generic");
    662 
    663 	if (pba_num == NULL) {
    664 		DEBUGOUT("PBA string buffer was null\n");
    665 		return IXGBE_ERR_INVALID_ARGUMENT;
    666 	}
    667 
    668 	ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
    669 	if (ret_val) {
    670 		DEBUGOUT("NVM Read Error\n");
    671 		return ret_val;
    672 	}
    673 
    674 	ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &pba_ptr);
    675 	if (ret_val) {
    676 		DEBUGOUT("NVM Read Error\n");
    677 		return ret_val;
    678 	}
    679 
    680 	/*
    681 	 * if data is not ptr guard the PBA must be in legacy format which
    682 	 * means pba_ptr is actually our second data word for the PBA number
    683 	 * and we can decode it into an ascii string
    684 	 */
    685 	if (data != IXGBE_PBANUM_PTR_GUARD) {
    686 		DEBUGOUT("NVM PBA number is not stored as string\n");
    687 
    688 		/* we will need 11 characters to store the PBA */
    689 		if (pba_num_size < 11) {
    690 			DEBUGOUT("PBA string buffer too small\n");
    691 			return IXGBE_ERR_NO_SPACE;
    692 		}
    693 
    694 		/* extract hex string from data and pba_ptr */
    695 		pba_num[0] = (data >> 12) & 0xF;
    696 		pba_num[1] = (data >> 8) & 0xF;
    697 		pba_num[2] = (data >> 4) & 0xF;
    698 		pba_num[3] = data & 0xF;
    699 		pba_num[4] = (pba_ptr >> 12) & 0xF;
    700 		pba_num[5] = (pba_ptr >> 8) & 0xF;
    701 		pba_num[6] = '-';
    702 		pba_num[7] = 0;
    703 		pba_num[8] = (pba_ptr >> 4) & 0xF;
    704 		pba_num[9] = pba_ptr & 0xF;
    705 
    706 		/* put a null character on the end of our string */
    707 		pba_num[10] = '\0';
    708 
    709 		/* switch all the data but the '-' to hex char */
    710 		for (offset = 0; offset < 10; offset++) {
    711 			if (pba_num[offset] < 0xA)
    712 				pba_num[offset] += '0';
    713 			else if (pba_num[offset] < 0x10)
    714 				pba_num[offset] += 'A' - 0xA;
    715 		}
    716 
    717 		return IXGBE_SUCCESS;
    718 	}
    719 
    720 	ret_val = hw->eeprom.ops.read(hw, pba_ptr, &length);
    721 	if (ret_val) {
    722 		DEBUGOUT("NVM Read Error\n");
    723 		return ret_val;
    724 	}
    725 
    726 	if (length == 0xFFFF || length == 0) {
    727 		DEBUGOUT("NVM PBA number section invalid length\n");
    728 		return IXGBE_ERR_PBA_SECTION;
    729 	}
    730 
    731 	/* check if pba_num buffer is big enough */
    732 	if (pba_num_size  < (((u32)length * 2) - 1)) {
    733 		DEBUGOUT("PBA string buffer too small\n");
    734 		return IXGBE_ERR_NO_SPACE;
    735 	}
    736 
    737 	/* trim pba length from start of string */
    738 	pba_ptr++;
    739 	length--;
    740 
    741 	for (offset = 0; offset < length; offset++) {
    742 		ret_val = hw->eeprom.ops.read(hw, pba_ptr + offset, &data);
    743 		if (ret_val) {
    744 			DEBUGOUT("NVM Read Error\n");
    745 			return ret_val;
    746 		}
    747 		pba_num[offset * 2] = (u8)(data >> 8);
    748 		pba_num[(offset * 2) + 1] = (u8)(data & 0xFF);
    749 	}
    750 	pba_num[offset * 2] = '\0';
    751 
    752 	return IXGBE_SUCCESS;
    753 }
    754 
    755 /**
    756  *  ixgbe_read_pba_num_generic - Reads part number from EEPROM
    757  *  @hw: pointer to hardware structure
    758  *  @pba_num: stores the part number from the EEPROM
    759  *
    760  *  Reads the part number from the EEPROM.
    761  **/
    762 s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num)
    763 {
    764 	s32 ret_val;
    765 	u16 data;
    766 
    767 	DEBUGFUNC("ixgbe_read_pba_num_generic");
    768 
    769 	ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
    770 	if (ret_val) {
    771 		DEBUGOUT("NVM Read Error\n");
    772 		return ret_val;
    773 	} else if (data == IXGBE_PBANUM_PTR_GUARD) {
    774 		DEBUGOUT("NVM Not supported\n");
    775 		return IXGBE_NOT_IMPLEMENTED;
    776 	}
    777 	*pba_num = (u32)(data << 16);
    778 
    779 	ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &data);
    780 	if (ret_val) {
    781 		DEBUGOUT("NVM Read Error\n");
    782 		return ret_val;
    783 	}
    784 	*pba_num |= (u32)data;
    785 
    786 	return IXGBE_SUCCESS;
    787 }
    788 
    789 /**
    790  *  ixgbe_read_pba_raw
    791  *  @hw: pointer to the HW structure
    792  *  @eeprom_buf: optional pointer to EEPROM image
    793  *  @eeprom_buf_size: size of EEPROM image in words
    794  *  @max_pba_block_size: PBA block size limit
    795  *  @pba: pointer to output PBA structure
    796  *
    797  *  Reads PBA from EEPROM image when eeprom_buf is not NULL.
    798  *  Reads PBA from physical EEPROM device when eeprom_buf is NULL.
    799  *
    800  **/
    801 s32 ixgbe_read_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf,
    802 		       u32 eeprom_buf_size, u16 max_pba_block_size,
    803 		       struct ixgbe_pba *pba)
    804 {
    805 	s32 ret_val;
    806 	u16 pba_block_size;
    807 
    808 	if (pba == NULL)
    809 		return IXGBE_ERR_PARAM;
    810 
    811 	if (eeprom_buf == NULL) {
    812 		ret_val = hw->eeprom.ops.read_buffer(hw, IXGBE_PBANUM0_PTR, 2,
    813 						     &pba->word[0]);
    814 		if (ret_val)
    815 			return ret_val;
    816 	} else {
    817 		if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
    818 			pba->word[0] = eeprom_buf[IXGBE_PBANUM0_PTR];
    819 			pba->word[1] = eeprom_buf[IXGBE_PBANUM1_PTR];
    820 		} else {
    821 			return IXGBE_ERR_PARAM;
    822 		}
    823 	}
    824 
    825 	if (pba->word[0] == IXGBE_PBANUM_PTR_GUARD) {
    826 		if (pba->pba_block == NULL)
    827 			return IXGBE_ERR_PARAM;
    828 
    829 		ret_val = ixgbe_get_pba_block_size(hw, eeprom_buf,
    830 						   eeprom_buf_size,
    831 						   &pba_block_size);
    832 		if (ret_val)
    833 			return ret_val;
    834 
    835 		if (pba_block_size > max_pba_block_size)
    836 			return IXGBE_ERR_PARAM;
    837 
    838 		if (eeprom_buf == NULL) {
    839 			ret_val = hw->eeprom.ops.read_buffer(hw, pba->word[1],
    840 							     pba_block_size,
    841 							     pba->pba_block);
    842 			if (ret_val)
    843 				return ret_val;
    844 		} else {
    845 			if (eeprom_buf_size > (u32)(pba->word[1] +
    846 					      pba_block_size)) {
    847 				memcpy(pba->pba_block,
    848 				       &eeprom_buf[pba->word[1]],
    849 				       pba_block_size * sizeof(u16));
    850 			} else {
    851 				return IXGBE_ERR_PARAM;
    852 			}
    853 		}
    854 	}
    855 
    856 	return IXGBE_SUCCESS;
    857 }
    858 
    859 /**
    860  *  ixgbe_write_pba_raw
    861  *  @hw: pointer to the HW structure
    862  *  @eeprom_buf: optional pointer to EEPROM image
    863  *  @eeprom_buf_size: size of EEPROM image in words
    864  *  @pba: pointer to PBA structure
    865  *
    866  *  Writes PBA to EEPROM image when eeprom_buf is not NULL.
    867  *  Writes PBA to physical EEPROM device when eeprom_buf is NULL.
    868  *
    869  **/
    870 s32 ixgbe_write_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf,
    871 			u32 eeprom_buf_size, struct ixgbe_pba *pba)
    872 {
    873 	s32 ret_val;
    874 
    875 	if (pba == NULL)
    876 		return IXGBE_ERR_PARAM;
    877 
    878 	if (eeprom_buf == NULL) {
    879 		ret_val = hw->eeprom.ops.write_buffer(hw, IXGBE_PBANUM0_PTR, 2,
    880 						      &pba->word[0]);
    881 		if (ret_val)
    882 			return ret_val;
    883 	} else {
    884 		if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
    885 			eeprom_buf[IXGBE_PBANUM0_PTR] = pba->word[0];
    886 			eeprom_buf[IXGBE_PBANUM1_PTR] = pba->word[1];
    887 		} else {
    888 			return IXGBE_ERR_PARAM;
    889 		}
    890 	}
    891 
    892 	if (pba->word[0] == IXGBE_PBANUM_PTR_GUARD) {
    893 		if (pba->pba_block == NULL)
    894 			return IXGBE_ERR_PARAM;
    895 
    896 		if (eeprom_buf == NULL) {
    897 			ret_val = hw->eeprom.ops.write_buffer(hw, pba->word[1],
    898 							      pba->pba_block[0],
    899 							      pba->pba_block);
    900 			if (ret_val)
    901 				return ret_val;
    902 		} else {
    903 			if (eeprom_buf_size > (u32)(pba->word[1] +
    904 					      pba->pba_block[0])) {
    905 				memcpy(&eeprom_buf[pba->word[1]],
    906 				       pba->pba_block,
    907 				       pba->pba_block[0] * sizeof(u16));
    908 			} else {
    909 				return IXGBE_ERR_PARAM;
    910 			}
    911 		}
    912 	}
    913 
    914 	return IXGBE_SUCCESS;
    915 }
    916 
    917 /**
    918  *  ixgbe_get_pba_block_size
    919  *  @hw: pointer to the HW structure
    920  *  @eeprom_buf: optional pointer to EEPROM image
    921  *  @eeprom_buf_size: size of EEPROM image in words
    922  *  @pba_data_size: pointer to output variable
    923  *
    924  *  Returns the size of the PBA block in words. Function operates on EEPROM
    925  *  image if the eeprom_buf pointer is not NULL otherwise it accesses physical
    926  *  EEPROM device.
    927  *
    928  **/
    929 s32 ixgbe_get_pba_block_size(struct ixgbe_hw *hw, u16 *eeprom_buf,
    930 			     u32 eeprom_buf_size, u16 *pba_block_size)
    931 {
    932 	s32 ret_val;
    933 	u16 pba_word[2];
    934 	u16 length;
    935 
    936 	DEBUGFUNC("ixgbe_get_pba_block_size");
    937 
    938 	if (eeprom_buf == NULL) {
    939 		ret_val = hw->eeprom.ops.read_buffer(hw, IXGBE_PBANUM0_PTR, 2,
    940 						     &pba_word[0]);
    941 		if (ret_val)
    942 			return ret_val;
    943 	} else {
    944 		if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
    945 			pba_word[0] = eeprom_buf[IXGBE_PBANUM0_PTR];
    946 			pba_word[1] = eeprom_buf[IXGBE_PBANUM1_PTR];
    947 		} else {
    948 			return IXGBE_ERR_PARAM;
    949 		}
    950 	}
    951 
    952 	if (pba_word[0] == IXGBE_PBANUM_PTR_GUARD) {
    953 		if (eeprom_buf == NULL) {
    954 			ret_val = hw->eeprom.ops.read(hw, pba_word[1] + 0,
    955 						      &length);
    956 			if (ret_val)
    957 				return ret_val;
    958 		} else {
    959 			if (eeprom_buf_size > pba_word[1])
    960 				length = eeprom_buf[pba_word[1] + 0];
    961 			else
    962 				return IXGBE_ERR_PARAM;
    963 		}
    964 
    965 		if (length == 0xFFFF || length == 0)
    966 			return IXGBE_ERR_PBA_SECTION;
    967 	} else {
    968 		/* PBA number in legacy format, there is no PBA Block. */
    969 		length = 0;
    970 	}
    971 
    972 	if (pba_block_size != NULL)
    973 		*pba_block_size = length;
    974 
    975 	return IXGBE_SUCCESS;
    976 }
    977 
    978 /**
    979  *  ixgbe_get_mac_addr_generic - Generic get MAC address
    980  *  @hw: pointer to hardware structure
    981  *  @mac_addr: Adapter MAC address
    982  *
    983  *  Reads the adapter's MAC address from first Receive Address Register (RAR0)
    984  *  A reset of the adapter must be performed prior to calling this function
    985  *  in order for the MAC address to have been loaded from the EEPROM into RAR0
    986  **/
    987 s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr)
    988 {
    989 	u32 rar_high;
    990 	u32 rar_low;
    991 	u16 i;
    992 
    993 	DEBUGFUNC("ixgbe_get_mac_addr_generic");
    994 
    995 	rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(0));
    996 	rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(0));
    997 
    998 	for (i = 0; i < 4; i++)
    999 		mac_addr[i] = (u8)(rar_low >> (i*8));
   1000 
   1001 	for (i = 0; i < 2; i++)
   1002 		mac_addr[i+4] = (u8)(rar_high >> (i*8));
   1003 
   1004 	return IXGBE_SUCCESS;
   1005 }
   1006 
   1007 /**
   1008  *  ixgbe_set_pci_config_data_generic - Generic store PCI bus info
   1009  *  @hw: pointer to hardware structure
   1010  *  @link_status: the link status returned by the PCI config space
   1011  *
   1012  *  Stores the PCI bus info (speed, width, type) within the ixgbe_hw structure
   1013  **/
   1014 void ixgbe_set_pci_config_data_generic(struct ixgbe_hw *hw, u16 link_status)
   1015 {
   1016 	struct ixgbe_mac_info *mac = &hw->mac;
   1017 
   1018 	if (hw->bus.type == ixgbe_bus_type_unknown)
   1019 		hw->bus.type = ixgbe_bus_type_pci_express;
   1020 
   1021 	switch (link_status & IXGBE_PCI_LINK_WIDTH) {
   1022 	case IXGBE_PCI_LINK_WIDTH_1:
   1023 		hw->bus.width = ixgbe_bus_width_pcie_x1;
   1024 		break;
   1025 	case IXGBE_PCI_LINK_WIDTH_2:
   1026 		hw->bus.width = ixgbe_bus_width_pcie_x2;
   1027 		break;
   1028 	case IXGBE_PCI_LINK_WIDTH_4:
   1029 		hw->bus.width = ixgbe_bus_width_pcie_x4;
   1030 		break;
   1031 	case IXGBE_PCI_LINK_WIDTH_8:
   1032 		hw->bus.width = ixgbe_bus_width_pcie_x8;
   1033 		break;
   1034 	default:
   1035 		hw->bus.width = ixgbe_bus_width_unknown;
   1036 		break;
   1037 	}
   1038 
   1039 	switch (link_status & IXGBE_PCI_LINK_SPEED) {
   1040 	case IXGBE_PCI_LINK_SPEED_2500:
   1041 		hw->bus.speed = ixgbe_bus_speed_2500;
   1042 		break;
   1043 	case IXGBE_PCI_LINK_SPEED_5000:
   1044 		hw->bus.speed = ixgbe_bus_speed_5000;
   1045 		break;
   1046 	case IXGBE_PCI_LINK_SPEED_8000:
   1047 		hw->bus.speed = ixgbe_bus_speed_8000;
   1048 		break;
   1049 	default:
   1050 		hw->bus.speed = ixgbe_bus_speed_unknown;
   1051 		break;
   1052 	}
   1053 
   1054 	mac->ops.set_lan_id(hw);
   1055 }
   1056 
   1057 /**
   1058  *  ixgbe_get_bus_info_generic - Generic set PCI bus info
   1059  *  @hw: pointer to hardware structure
   1060  *
   1061  *  Gets the PCI bus info (speed, width, type) then calls helper function to
   1062  *  store this data within the ixgbe_hw structure.
   1063  **/
   1064 s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
   1065 {
   1066 	u16 link_status;
   1067 
   1068 	DEBUGFUNC("ixgbe_get_bus_info_generic");
   1069 
   1070 	/* Get the negotiated link width and speed from PCI config space */
   1071 	link_status = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_LINK_STATUS);
   1072 
   1073 	ixgbe_set_pci_config_data_generic(hw, link_status);
   1074 
   1075 	return IXGBE_SUCCESS;
   1076 }
   1077 
   1078 /**
   1079  *  ixgbe_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices
   1080  *  @hw: pointer to the HW structure
   1081  *
   1082  *  Determines the LAN function id by reading memory-mapped registers and swaps
   1083  *  the port value if requested, and set MAC instance for devices that share
   1084  *  CS4227.
   1085  **/
   1086 void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw)
   1087 {
   1088 	struct ixgbe_bus_info *bus = &hw->bus;
   1089 	u32 reg;
   1090 	u16 ee_ctrl_4;
   1091 
   1092 	DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie");
   1093 
   1094 	reg = IXGBE_READ_REG(hw, IXGBE_STATUS);
   1095 	bus->func = (reg & IXGBE_STATUS_LAN_ID) >> IXGBE_STATUS_LAN_ID_SHIFT;
   1096 	bus->lan_id = (u8)bus->func;
   1097 
   1098 	/* check for a port swap */
   1099 	reg = IXGBE_READ_REG(hw, IXGBE_FACTPS_BY_MAC(hw));
   1100 	if (reg & IXGBE_FACTPS_LFS)
   1101 		bus->func ^= 0x1;
   1102 
   1103 	/* Get MAC instance from EEPROM for configuring CS4227 */
   1104 	if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP) {
   1105 		hw->eeprom.ops.read(hw, IXGBE_EEPROM_CTRL_4, &ee_ctrl_4);
   1106 		bus->instance_id = (ee_ctrl_4 & IXGBE_EE_CTRL_4_INST_ID) >>
   1107 				   IXGBE_EE_CTRL_4_INST_ID_SHIFT;
   1108 	}
   1109 }
   1110 
   1111 /**
   1112  *  ixgbe_stop_adapter_generic - Generic stop Tx/Rx units
   1113  *  @hw: pointer to hardware structure
   1114  *
   1115  *  Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
   1116  *  disables transmit and receive units. The adapter_stopped flag is used by
   1117  *  the shared code and drivers to determine if the adapter is in a stopped
   1118  *  state and should not touch the hardware.
   1119  **/
   1120 s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
   1121 {
   1122 	u32 reg_val;
   1123 	u16 i;
   1124 
   1125 	DEBUGFUNC("ixgbe_stop_adapter_generic");
   1126 
   1127 	/*
   1128 	 * Set the adapter_stopped flag so other driver functions stop touching
   1129 	 * the hardware
   1130 	 */
   1131 	hw->adapter_stopped = TRUE;
   1132 
   1133 	/* Disable the receive unit */
   1134 	ixgbe_disable_rx(hw);
   1135 
   1136 	/* Clear interrupt mask to stop interrupts from being generated */
   1137 	/*
   1138 	 * XXX
   1139 	 * This function is called in the state of both interrupt disabled
   1140 	 * and interrupt enabled, e.g.
   1141 	 * + interrupt disabled case:
   1142 	 *   - ixgbe_stop_locked()
   1143 	 *     - ixgbe_disable_intr() // interrupt disabled here
   1144 	 *     - ixgbe_stop_adapter()
   1145 	 *       - hw->mac.ops.stop_adapter()
   1146 	 *         == this function
   1147 	 * + interrupt enabled case:
   1148 	 *   - ixgbe_local_timer1()
   1149 	 *     - ixgbe_init_locked()
   1150 	 *       - ixgbe_stop_adapter()
   1151 	 *         - hw->mac.ops.stop_adapter()
   1152 	 *           == this function
   1153 	 * Therefore, it causes nest status breaking to nest the status
   1154 	 * (that is, que->im_nest++) at all times. So, this function must
   1155 	 * use ixgbe_ensure_disabled_intr() instead of ixgbe_disable_intr().
   1156 	 */
   1157 	ixgbe_ensure_disabled_intr(hw->back);
   1158 
   1159 	/* Clear any pending interrupts, flush previous writes */
   1160 	IXGBE_READ_REG(hw, IXGBE_EICR);
   1161 
   1162 	/* Disable the transmit unit.  Each queue must be disabled. */
   1163 	for (i = 0; i < hw->mac.max_tx_queues; i++)
   1164 		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), IXGBE_TXDCTL_SWFLSH);
   1165 
   1166 	/* Disable the receive unit by stopping each queue */
   1167 	for (i = 0; i < hw->mac.max_rx_queues; i++) {
   1168 		reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
   1169 		reg_val &= ~IXGBE_RXDCTL_ENABLE;
   1170 		reg_val |= IXGBE_RXDCTL_SWFLSH;
   1171 		IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val);
   1172 	}
   1173 
   1174 	/* flush all queues disables */
   1175 	IXGBE_WRITE_FLUSH(hw);
   1176 	msec_delay(2);
   1177 
   1178 	/*
   1179 	 * Prevent the PCI-E bus from hanging by disabling PCI-E master
   1180 	 * access and verify no pending requests
   1181 	 */
   1182 	return ixgbe_disable_pcie_master(hw);
   1183 }
   1184 
   1185 /**
   1186  *  ixgbe_init_led_link_act_generic - Store the LED index link/activity.
   1187  *  @hw: pointer to hardware structure
   1188  *
   1189  *  Store the index for the link active LED. This will be used to support
   1190  *  blinking the LED.
   1191  **/
   1192 s32 ixgbe_init_led_link_act_generic(struct ixgbe_hw *hw)
   1193 {
   1194 	struct ixgbe_mac_info *mac = &hw->mac;
   1195 	u32 led_reg, led_mode;
   1196 	u8 i;
   1197 
   1198 	led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
   1199 
   1200 	/* Get LED link active from the LEDCTL register */
   1201 	for (i = 0; i < 4; i++) {
   1202 		led_mode = led_reg >> IXGBE_LED_MODE_SHIFT(i);
   1203 
   1204 		if ((led_mode & IXGBE_LED_MODE_MASK_BASE) ==
   1205 		     IXGBE_LED_LINK_ACTIVE) {
   1206 			mac->led_link_act = i;
   1207 			return IXGBE_SUCCESS;
   1208 		}
   1209 	}
   1210 
   1211 	/*
   1212 	 * If LEDCTL register does not have the LED link active set, then use
   1213 	 * known MAC defaults.
   1214 	 */
   1215 	switch (hw->mac.type) {
   1216 	case ixgbe_mac_X550EM_a:
   1217 	case ixgbe_mac_X550EM_x:
   1218 		mac->led_link_act = 1;
   1219 		break;
   1220 	default:
   1221 		mac->led_link_act = 2;
   1222 	}
   1223 	return IXGBE_SUCCESS;
   1224 }
   1225 
   1226 /**
   1227  *  ixgbe_led_on_generic - Turns on the software controllable LEDs.
   1228  *  @hw: pointer to hardware structure
   1229  *  @index: led number to turn on
   1230  **/
   1231 s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index)
   1232 {
   1233 	u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
   1234 
   1235 	DEBUGFUNC("ixgbe_led_on_generic");
   1236 
   1237 	if (index > 3)
   1238 		return IXGBE_ERR_PARAM;
   1239 
   1240 	/* To turn on the LED, set mode to ON. */
   1241 	led_reg &= ~IXGBE_LED_MODE_MASK(index);
   1242 	led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index);
   1243 	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
   1244 	IXGBE_WRITE_FLUSH(hw);
   1245 
   1246 	return IXGBE_SUCCESS;
   1247 }
   1248 
   1249 /**
   1250  *  ixgbe_led_off_generic - Turns off the software controllable LEDs.
   1251  *  @hw: pointer to hardware structure
   1252  *  @index: led number to turn off
   1253  **/
   1254 s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index)
   1255 {
   1256 	u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
   1257 
   1258 	DEBUGFUNC("ixgbe_led_off_generic");
   1259 
   1260 	if (index > 3)
   1261 		return IXGBE_ERR_PARAM;
   1262 
   1263 	/* To turn off the LED, set mode to OFF. */
   1264 	led_reg &= ~IXGBE_LED_MODE_MASK(index);
   1265 	led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index);
   1266 	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
   1267 	IXGBE_WRITE_FLUSH(hw);
   1268 
   1269 	return IXGBE_SUCCESS;
   1270 }
   1271 
   1272 /**
   1273  *  ixgbe_init_eeprom_params_generic - Initialize EEPROM params
   1274  *  @hw: pointer to hardware structure
   1275  *
   1276  *  Initializes the EEPROM parameters ixgbe_eeprom_info within the
   1277  *  ixgbe_hw struct in order to set up EEPROM access.
   1278  **/
   1279 s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
   1280 {
   1281 	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
   1282 	u32 eec;
   1283 	u16 eeprom_size;
   1284 
   1285 	DEBUGFUNC("ixgbe_init_eeprom_params_generic");
   1286 
   1287 	if (eeprom->type == ixgbe_eeprom_uninitialized) {
   1288 		eeprom->type = ixgbe_eeprom_none;
   1289 		/* Set default semaphore delay to 10ms which is a well
   1290 		 * tested value */
   1291 		eeprom->semaphore_delay = 10;
   1292 		/* Clear EEPROM page size, it will be initialized as needed */
   1293 		eeprom->word_page_size = 0;
   1294 
   1295 		/*
   1296 		 * Check for EEPROM present first.
   1297 		 * If not present leave as none
   1298 		 */
   1299 		eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
   1300 		if (eec & IXGBE_EEC_PRES) {
   1301 			eeprom->type = ixgbe_eeprom_spi;
   1302 
   1303 			/*
   1304 			 * SPI EEPROM is assumed here.  This code would need to
   1305 			 * change if a future EEPROM is not SPI.
   1306 			 */
   1307 			eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
   1308 					    IXGBE_EEC_SIZE_SHIFT);
   1309 			eeprom->word_size = 1 << (eeprom_size +
   1310 					     IXGBE_EEPROM_WORD_SIZE_SHIFT);
   1311 		}
   1312 
   1313 		if (eec & IXGBE_EEC_ADDR_SIZE)
   1314 			eeprom->address_bits = 16;
   1315 		else
   1316 			eeprom->address_bits = 8;
   1317 		DEBUGOUT3("Eeprom params: type = %d, size = %d, address bits: "
   1318 			  "%d\n", eeprom->type, eeprom->word_size,
   1319 			  eeprom->address_bits);
   1320 	}
   1321 
   1322 	return IXGBE_SUCCESS;
   1323 }
   1324 
   1325 /**
   1326  *  ixgbe_write_eeprom_buffer_bit_bang_generic - Write EEPROM using bit-bang
   1327  *  @hw: pointer to hardware structure
   1328  *  @offset: offset within the EEPROM to write
   1329  *  @words: number of word(s)
   1330  *  @data: 16 bit word(s) to write to EEPROM
   1331  *
   1332  *  Reads 16 bit word(s) from EEPROM through bit-bang method
   1333  **/
   1334 s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
   1335 					       u16 words, u16 *data)
   1336 {
   1337 	s32 status = IXGBE_SUCCESS;
   1338 	u16 i, count;
   1339 
   1340 	DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang_generic");
   1341 
   1342 	hw->eeprom.ops.init_params(hw);
   1343 
   1344 	if (words == 0) {
   1345 		status = IXGBE_ERR_INVALID_ARGUMENT;
   1346 		goto out;
   1347 	}
   1348 
   1349 	if (offset + words > hw->eeprom.word_size) {
   1350 		status = IXGBE_ERR_EEPROM;
   1351 		goto out;
   1352 	}
   1353 
   1354 	/*
   1355 	 * The EEPROM page size cannot be queried from the chip. We do lazy
   1356 	 * initialization. It is worth to do that when we write large buffer.
   1357 	 */
   1358 	if ((hw->eeprom.word_page_size == 0) &&
   1359 	    (words > IXGBE_EEPROM_PAGE_SIZE_MAX))
   1360 		ixgbe_detect_eeprom_page_size_generic(hw, offset);
   1361 
   1362 	/*
   1363 	 * We cannot hold synchronization semaphores for too long
   1364 	 * to avoid other entity starvation. However it is more efficient
   1365 	 * to read in bursts than synchronizing access for each word.
   1366 	 */
   1367 	for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
   1368 		count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
   1369 			IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
   1370 		status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset + i,
   1371 							    count, &data[i]);
   1372 
   1373 		if (status != IXGBE_SUCCESS)
   1374 			break;
   1375 	}
   1376 
   1377 out:
   1378 	return status;
   1379 }
   1380 
   1381 /**
   1382  *  ixgbe_write_eeprom_buffer_bit_bang - Writes 16 bit word(s) to EEPROM
   1383  *  @hw: pointer to hardware structure
   1384  *  @offset: offset within the EEPROM to be written to
   1385  *  @words: number of word(s)
   1386  *  @data: 16 bit word(s) to be written to the EEPROM
   1387  *
   1388  *  If ixgbe_eeprom_update_checksum is not called after this function, the
   1389  *  EEPROM will most likely contain an invalid checksum.
   1390  **/
   1391 static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
   1392 					      u16 words, u16 *data)
   1393 {
   1394 	s32 status;
   1395 	u16 word;
   1396 	u16 page_size;
   1397 	u16 i;
   1398 	u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI;
   1399 
   1400 	DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang");
   1401 
   1402 	/* Prepare the EEPROM for writing  */
   1403 	status = ixgbe_acquire_eeprom(hw);
   1404 
   1405 	if (status == IXGBE_SUCCESS) {
   1406 		if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) {
   1407 			ixgbe_release_eeprom(hw);
   1408 			status = IXGBE_ERR_EEPROM;
   1409 		}
   1410 	}
   1411 
   1412 	if (status == IXGBE_SUCCESS) {
   1413 		for (i = 0; i < words; i++) {
   1414 			ixgbe_standby_eeprom(hw);
   1415 
   1416 			/*  Send the WRITE ENABLE command (8 bit opcode )  */
   1417 			ixgbe_shift_out_eeprom_bits(hw,
   1418 						   IXGBE_EEPROM_WREN_OPCODE_SPI,
   1419 						   IXGBE_EEPROM_OPCODE_BITS);
   1420 
   1421 			ixgbe_standby_eeprom(hw);
   1422 
   1423 			/*
   1424 			 * Some SPI eeproms use the 8th address bit embedded
   1425 			 * in the opcode
   1426 			 */
   1427 			if ((hw->eeprom.address_bits == 8) &&
   1428 			    ((offset + i) >= 128))
   1429 				write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
   1430 
   1431 			/* Send the Write command (8-bit opcode + addr) */
   1432 			ixgbe_shift_out_eeprom_bits(hw, write_opcode,
   1433 						    IXGBE_EEPROM_OPCODE_BITS);
   1434 			ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
   1435 						    hw->eeprom.address_bits);
   1436 
   1437 			page_size = hw->eeprom.word_page_size;
   1438 
   1439 			/* Send the data in burst via SPI*/
   1440 			do {
   1441 				word = data[i];
   1442 				word = (word >> 8) | (word << 8);
   1443 				ixgbe_shift_out_eeprom_bits(hw, word, 16);
   1444 
   1445 				if (page_size == 0)
   1446 					break;
   1447 
   1448 				/* do not wrap around page */
   1449 				if (((offset + i) & (page_size - 1)) ==
   1450 				    (page_size - 1))
   1451 					break;
   1452 			} while (++i < words);
   1453 
   1454 			ixgbe_standby_eeprom(hw);
   1455 			msec_delay(10);
   1456 		}
   1457 		/* Done with writing - release the EEPROM */
   1458 		ixgbe_release_eeprom(hw);
   1459 	}
   1460 
   1461 	return status;
   1462 }
   1463 
   1464 /**
   1465  *  ixgbe_write_eeprom_generic - Writes 16 bit value to EEPROM
   1466  *  @hw: pointer to hardware structure
   1467  *  @offset: offset within the EEPROM to be written to
   1468  *  @data: 16 bit word to be written to the EEPROM
   1469  *
   1470  *  If ixgbe_eeprom_update_checksum is not called after this function, the
   1471  *  EEPROM will most likely contain an invalid checksum.
   1472  **/
   1473 s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
   1474 {
   1475 	s32 status;
   1476 
   1477 	DEBUGFUNC("ixgbe_write_eeprom_generic");
   1478 
   1479 	hw->eeprom.ops.init_params(hw);
   1480 
   1481 	if (offset >= hw->eeprom.word_size) {
   1482 		status = IXGBE_ERR_EEPROM;
   1483 		goto out;
   1484 	}
   1485 
   1486 	status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1, &data);
   1487 
   1488 out:
   1489 	return status;
   1490 }
   1491 
   1492 /**
   1493  *  ixgbe_read_eeprom_buffer_bit_bang_generic - Read EEPROM using bit-bang
   1494  *  @hw: pointer to hardware structure
   1495  *  @offset: offset within the EEPROM to be read
   1496  *  @data: read 16 bit words(s) from EEPROM
   1497  *  @words: number of word(s)
   1498  *
   1499  *  Reads 16 bit word(s) from EEPROM through bit-bang method
   1500  **/
   1501 s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
   1502 					      u16 words, u16 *data)
   1503 {
   1504 	s32 status = IXGBE_SUCCESS;
   1505 	u16 i, count;
   1506 
   1507 	DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang_generic");
   1508 
   1509 	hw->eeprom.ops.init_params(hw);
   1510 
   1511 	if (words == 0) {
   1512 		status = IXGBE_ERR_INVALID_ARGUMENT;
   1513 		goto out;
   1514 	}
   1515 
   1516 	if (offset + words > hw->eeprom.word_size) {
   1517 		status = IXGBE_ERR_EEPROM;
   1518 		goto out;
   1519 	}
   1520 
   1521 	/*
   1522 	 * We cannot hold synchronization semaphores for too long
   1523 	 * to avoid other entity starvation. However it is more efficient
   1524 	 * to read in bursts than synchronizing access for each word.
   1525 	 */
   1526 	for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
   1527 		count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
   1528 			IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
   1529 
   1530 		status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset + i,
   1531 							   count, &data[i]);
   1532 
   1533 		if (status != IXGBE_SUCCESS)
   1534 			break;
   1535 	}
   1536 
   1537 out:
   1538 	return status;
   1539 }
   1540 
   1541 /**
   1542  *  ixgbe_read_eeprom_buffer_bit_bang - Read EEPROM using bit-bang
   1543  *  @hw: pointer to hardware structure
   1544  *  @offset: offset within the EEPROM to be read
   1545  *  @words: number of word(s)
   1546  *  @data: read 16 bit word(s) from EEPROM
   1547  *
   1548  *  Reads 16 bit word(s) from EEPROM through bit-bang method
   1549  **/
   1550 static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
   1551 					     u16 words, u16 *data)
   1552 {
   1553 	s32 status;
   1554 	u16 word_in;
   1555 	u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI;
   1556 	u16 i;
   1557 
   1558 	DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang");
   1559 
   1560 	/* Prepare the EEPROM for reading  */
   1561 	status = ixgbe_acquire_eeprom(hw);
   1562 
   1563 	if (status == IXGBE_SUCCESS) {
   1564 		if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) {
   1565 			ixgbe_release_eeprom(hw);
   1566 			status = IXGBE_ERR_EEPROM;
   1567 		}
   1568 	}
   1569 
   1570 	if (status == IXGBE_SUCCESS) {
   1571 		for (i = 0; i < words; i++) {
   1572 			ixgbe_standby_eeprom(hw);
   1573 			/*
   1574 			 * Some SPI eeproms use the 8th address bit embedded
   1575 			 * in the opcode
   1576 			 */
   1577 			if ((hw->eeprom.address_bits == 8) &&
   1578 			    ((offset + i) >= 128))
   1579 				read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
   1580 
   1581 			/* Send the READ command (opcode + addr) */
   1582 			ixgbe_shift_out_eeprom_bits(hw, read_opcode,
   1583 						    IXGBE_EEPROM_OPCODE_BITS);
   1584 			ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
   1585 						    hw->eeprom.address_bits);
   1586 
   1587 			/* Read the data. */
   1588 			word_in = ixgbe_shift_in_eeprom_bits(hw, 16);
   1589 			data[i] = (word_in >> 8) | (word_in << 8);
   1590 		}
   1591 
   1592 		/* End this read operation */
   1593 		ixgbe_release_eeprom(hw);
   1594 	}
   1595 
   1596 	return status;
   1597 }
   1598 
   1599 /**
   1600  *  ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang
   1601  *  @hw: pointer to hardware structure
   1602  *  @offset: offset within the EEPROM to be read
   1603  *  @data: read 16 bit value from EEPROM
   1604  *
   1605  *  Reads 16 bit value from EEPROM through bit-bang method
   1606  **/
   1607 s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
   1608 				       u16 *data)
   1609 {
   1610 	s32 status;
   1611 
   1612 	DEBUGFUNC("ixgbe_read_eeprom_bit_bang_generic");
   1613 
   1614 	hw->eeprom.ops.init_params(hw);
   1615 
   1616 	if (offset >= hw->eeprom.word_size) {
   1617 		status = IXGBE_ERR_EEPROM;
   1618 		goto out;
   1619 	}
   1620 
   1621 	status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
   1622 
   1623 out:
   1624 	return status;
   1625 }
   1626 
   1627 /**
   1628  *  ixgbe_read_eerd_buffer_generic - Read EEPROM word(s) using EERD
   1629  *  @hw: pointer to hardware structure
   1630  *  @offset: offset of word in the EEPROM to read
   1631  *  @words: number of word(s)
   1632  *  @data: 16 bit word(s) from the EEPROM
   1633  *
   1634  *  Reads a 16 bit word(s) from the EEPROM using the EERD register.
   1635  **/
   1636 s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset,
   1637 				   u16 words, u16 *data)
   1638 {
   1639 	u32 eerd;
   1640 	s32 status = IXGBE_SUCCESS;
   1641 	u32 i;
   1642 
   1643 	DEBUGFUNC("ixgbe_read_eerd_buffer_generic");
   1644 
   1645 	hw->eeprom.ops.init_params(hw);
   1646 
   1647 	if (words == 0) {
   1648 		status = IXGBE_ERR_INVALID_ARGUMENT;
   1649 		ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM words");
   1650 		goto out;
   1651 	}
   1652 
   1653 	if (offset >= hw->eeprom.word_size) {
   1654 		status = IXGBE_ERR_EEPROM;
   1655 		ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM offset");
   1656 		goto out;
   1657 	}
   1658 
   1659 	for (i = 0; i < words; i++) {
   1660 		eerd = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
   1661 		       IXGBE_EEPROM_RW_REG_START;
   1662 
   1663 		IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd);
   1664 		status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_READ);
   1665 
   1666 		if (status == IXGBE_SUCCESS) {
   1667 			data[i] = (IXGBE_READ_REG(hw, IXGBE_EERD) >>
   1668 				   IXGBE_EEPROM_RW_REG_DATA);
   1669 		} else {
   1670 			DEBUGOUT("Eeprom read timed out\n");
   1671 			goto out;
   1672 		}
   1673 	}
   1674 out:
   1675 	return status;
   1676 }
   1677 
   1678 /**
   1679  *  ixgbe_detect_eeprom_page_size_generic - Detect EEPROM page size
   1680  *  @hw: pointer to hardware structure
   1681  *  @offset: offset within the EEPROM to be used as a scratch pad
   1682  *
   1683  *  Discover EEPROM page size by writing marching data at given offset.
   1684  *  This function is called only when we are writing a new large buffer
   1685  *  at given offset so the data would be overwritten anyway.
   1686  **/
   1687 static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
   1688 						 u16 offset)
   1689 {
   1690 	u16 data[IXGBE_EEPROM_PAGE_SIZE_MAX];
   1691 	s32 status = IXGBE_SUCCESS;
   1692 	u16 i;
   1693 
   1694 	DEBUGFUNC("ixgbe_detect_eeprom_page_size_generic");
   1695 
   1696 	for (i = 0; i < IXGBE_EEPROM_PAGE_SIZE_MAX; i++)
   1697 		data[i] = i;
   1698 
   1699 	hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX;
   1700 	status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset,
   1701 					     IXGBE_EEPROM_PAGE_SIZE_MAX, data);
   1702 	hw->eeprom.word_page_size = 0;
   1703 	if (status != IXGBE_SUCCESS)
   1704 		goto out;
   1705 
   1706 	status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
   1707 	if (status != IXGBE_SUCCESS)
   1708 		goto out;
   1709 
   1710 	/*
   1711 	 * When writing in burst more than the actual page size
   1712 	 * EEPROM address wraps around current page.
   1713 	 */
   1714 	hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX - data[0];
   1715 
   1716 	DEBUGOUT1("Detected EEPROM page size = %d words.",
   1717 		  hw->eeprom.word_page_size);
   1718 out:
   1719 	return status;
   1720 }
   1721 
   1722 /**
   1723  *  ixgbe_read_eerd_generic - Read EEPROM word using EERD
   1724  *  @hw: pointer to hardware structure
   1725  *  @offset: offset of  word in the EEPROM to read
   1726  *  @data: word read from the EEPROM
   1727  *
   1728  *  Reads a 16 bit word from the EEPROM using the EERD register.
   1729  **/
   1730 s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data)
   1731 {
   1732 	return ixgbe_read_eerd_buffer_generic(hw, offset, 1, data);
   1733 }
   1734 
   1735 /**
   1736  *  ixgbe_write_eewr_buffer_generic - Write EEPROM word(s) using EEWR
   1737  *  @hw: pointer to hardware structure
   1738  *  @offset: offset of  word in the EEPROM to write
   1739  *  @words: number of word(s)
   1740  *  @data: word(s) write to the EEPROM
   1741  *
   1742  *  Write a 16 bit word(s) to the EEPROM using the EEWR register.
   1743  **/
   1744 s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
   1745 				    u16 words, u16 *data)
   1746 {
   1747 	u32 eewr;
   1748 	s32 status = IXGBE_SUCCESS;
   1749 	u16 i;
   1750 
   1751 	DEBUGFUNC("ixgbe_write_eewr_generic");
   1752 
   1753 	hw->eeprom.ops.init_params(hw);
   1754 
   1755 	if (words == 0) {
   1756 		status = IXGBE_ERR_INVALID_ARGUMENT;
   1757 		ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM words");
   1758 		goto out;
   1759 	}
   1760 
   1761 	if (offset >= hw->eeprom.word_size) {
   1762 		status = IXGBE_ERR_EEPROM;
   1763 		ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM offset");
   1764 		goto out;
   1765 	}
   1766 
   1767 	for (i = 0; i < words; i++) {
   1768 		eewr = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
   1769 			(data[i] << IXGBE_EEPROM_RW_REG_DATA) |
   1770 			IXGBE_EEPROM_RW_REG_START;
   1771 
   1772 		status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
   1773 		if (status != IXGBE_SUCCESS) {
   1774 			DEBUGOUT("Eeprom write EEWR timed out\n");
   1775 			goto out;
   1776 		}
   1777 
   1778 		IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr);
   1779 
   1780 		status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
   1781 		if (status != IXGBE_SUCCESS) {
   1782 			DEBUGOUT("Eeprom write EEWR timed out\n");
   1783 			goto out;
   1784 		}
   1785 	}
   1786 
   1787 out:
   1788 	return status;
   1789 }
   1790 
   1791 /**
   1792  *  ixgbe_write_eewr_generic - Write EEPROM word using EEWR
   1793  *  @hw: pointer to hardware structure
   1794  *  @offset: offset of  word in the EEPROM to write
   1795  *  @data: word write to the EEPROM
   1796  *
   1797  *  Write a 16 bit word to the EEPROM using the EEWR register.
   1798  **/
   1799 s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
   1800 {
   1801 	return ixgbe_write_eewr_buffer_generic(hw, offset, 1, &data);
   1802 }
   1803 
   1804 /**
   1805  *  ixgbe_poll_eerd_eewr_done - Poll EERD read or EEWR write status
   1806  *  @hw: pointer to hardware structure
   1807  *  @ee_reg: EEPROM flag for polling
   1808  *
   1809  *  Polls the status bit (bit 1) of the EERD or EEWR to determine when the
   1810  *  read or write is done respectively.
   1811  **/
   1812 s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
   1813 {
   1814 	u32 i;
   1815 	u32 reg;
   1816 	s32 status = IXGBE_ERR_EEPROM;
   1817 
   1818 	DEBUGFUNC("ixgbe_poll_eerd_eewr_done");
   1819 
   1820 	for (i = 0; i < IXGBE_EERD_EEWR_ATTEMPTS; i++) {
   1821 		if (ee_reg == IXGBE_NVM_POLL_READ)
   1822 			reg = IXGBE_READ_REG(hw, IXGBE_EERD);
   1823 		else
   1824 			reg = IXGBE_READ_REG(hw, IXGBE_EEWR);
   1825 
   1826 		if (reg & IXGBE_EEPROM_RW_REG_DONE) {
   1827 			status = IXGBE_SUCCESS;
   1828 			break;
   1829 		}
   1830 		usec_delay(5);
   1831 	}
   1832 
   1833 	if (i == IXGBE_EERD_EEWR_ATTEMPTS)
   1834 		ERROR_REPORT1(IXGBE_ERROR_POLLING,
   1835 			     "EEPROM read/write done polling timed out");
   1836 
   1837 	return status;
   1838 }
   1839 
   1840 /**
   1841  *  ixgbe_acquire_eeprom - Acquire EEPROM using bit-bang
   1842  *  @hw: pointer to hardware structure
   1843  *
   1844  *  Prepares EEPROM for access using bit-bang method. This function should
   1845  *  be called before issuing a command to the EEPROM.
   1846  **/
   1847 static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
   1848 {
   1849 	s32 status = IXGBE_SUCCESS;
   1850 	u32 eec;
   1851 	u32 i;
   1852 
   1853 	DEBUGFUNC("ixgbe_acquire_eeprom");
   1854 
   1855 	if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)
   1856 	    != IXGBE_SUCCESS)
   1857 		status = IXGBE_ERR_SWFW_SYNC;
   1858 
   1859 	if (status == IXGBE_SUCCESS) {
   1860 		eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
   1861 
   1862 		/* Request EEPROM Access */
   1863 		eec |= IXGBE_EEC_REQ;
   1864 		IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
   1865 
   1866 		for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) {
   1867 			eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
   1868 			if (eec & IXGBE_EEC_GNT)
   1869 				break;
   1870 			usec_delay(5);
   1871 		}
   1872 
   1873 		/* Release if grant not acquired */
   1874 		if (!(eec & IXGBE_EEC_GNT)) {
   1875 			eec &= ~IXGBE_EEC_REQ;
   1876 			IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
   1877 			DEBUGOUT("Could not acquire EEPROM grant\n");
   1878 
   1879 			hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
   1880 			status = IXGBE_ERR_EEPROM;
   1881 		}
   1882 
   1883 		/* Setup EEPROM for Read/Write */
   1884 		if (status == IXGBE_SUCCESS) {
   1885 			/* Clear CS and SK */
   1886 			eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK);
   1887 			IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
   1888 			IXGBE_WRITE_FLUSH(hw);
   1889 			usec_delay(1);
   1890 		}
   1891 	}
   1892 	return status;
   1893 }
   1894 
   1895 /**
   1896  *  ixgbe_get_eeprom_semaphore - Get hardware semaphore
   1897  *  @hw: pointer to hardware structure
   1898  *
   1899  *  Sets the hardware semaphores so EEPROM access can occur for bit-bang method
   1900  **/
   1901 static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
   1902 {
   1903 	s32 status = IXGBE_ERR_EEPROM;
   1904 	u32 timeout = 2000;
   1905 	u32 i;
   1906 	u32 swsm;
   1907 
   1908 	DEBUGFUNC("ixgbe_get_eeprom_semaphore");
   1909 
   1910 
   1911 	/* Get SMBI software semaphore between device drivers first */
   1912 	for (i = 0; i < timeout; i++) {
   1913 		/*
   1914 		 * If the SMBI bit is 0 when we read it, then the bit will be
   1915 		 * set and we have the semaphore
   1916 		 */
   1917 		swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw));
   1918 		if (!(swsm & IXGBE_SWSM_SMBI)) {
   1919 			status = IXGBE_SUCCESS;
   1920 			break;
   1921 		}
   1922 		usec_delay(50);
   1923 	}
   1924 
   1925 	if (i == timeout) {
   1926 		DEBUGOUT("Driver can't access the Eeprom - SMBI Semaphore "
   1927 			 "not granted.\n");
   1928 		/*
   1929 		 * this release is particularly important because our attempts
   1930 		 * above to get the semaphore may have succeeded, and if there
   1931 		 * was a timeout, we should unconditionally clear the semaphore
   1932 		 * bits to free the driver to make progress
   1933 		 */
   1934 		ixgbe_release_eeprom_semaphore(hw);
   1935 
   1936 		usec_delay(50);
   1937 		/*
   1938 		 * one last try
   1939 		 * If the SMBI bit is 0 when we read it, then the bit will be
   1940 		 * set and we have the semaphore
   1941 		 */
   1942 		swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw));
   1943 		if (!(swsm & IXGBE_SWSM_SMBI))
   1944 			status = IXGBE_SUCCESS;
   1945 	}
   1946 
   1947 	/* Now get the semaphore between SW/FW through the SWESMBI bit */
   1948 	if (status == IXGBE_SUCCESS) {
   1949 		for (i = 0; i < timeout; i++) {
   1950 			swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw));
   1951 
   1952 			/* Set the SW EEPROM semaphore bit to request access */
   1953 			swsm |= IXGBE_SWSM_SWESMBI;
   1954 			IXGBE_WRITE_REG(hw, IXGBE_SWSM_BY_MAC(hw), swsm);
   1955 
   1956 			/*
   1957 			 * If we set the bit successfully then we got the
   1958 			 * semaphore.
   1959 			 */
   1960 			swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw));
   1961 			if (swsm & IXGBE_SWSM_SWESMBI)
   1962 				break;
   1963 
   1964 			usec_delay(50);
   1965 		}
   1966 
   1967 		/*
   1968 		 * Release semaphores and return error if SW EEPROM semaphore
   1969 		 * was not granted because we don't have access to the EEPROM
   1970 		 */
   1971 		if (i >= timeout) {
   1972 			ERROR_REPORT1(IXGBE_ERROR_POLLING,
   1973 			    "SWESMBI Software EEPROM semaphore not granted.\n");
   1974 			ixgbe_release_eeprom_semaphore(hw);
   1975 			status = IXGBE_ERR_EEPROM;
   1976 		}
   1977 	} else {
   1978 		ERROR_REPORT1(IXGBE_ERROR_POLLING,
   1979 			     "Software semaphore SMBI between device drivers "
   1980 			     "not granted.\n");
   1981 	}
   1982 
   1983 	return status;
   1984 }
   1985 
   1986 /**
   1987  *  ixgbe_release_eeprom_semaphore - Release hardware semaphore
   1988  *  @hw: pointer to hardware structure
   1989  *
   1990  *  This function clears hardware semaphore bits.
   1991  **/
   1992 static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw)
   1993 {
   1994 	u32 swsm;
   1995 
   1996 	DEBUGFUNC("ixgbe_release_eeprom_semaphore");
   1997 
   1998 	swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
   1999 
   2000 	/* Release both semaphores by writing 0 to the bits SWESMBI and SMBI */
   2001 	swsm &= ~(IXGBE_SWSM_SWESMBI | IXGBE_SWSM_SMBI);
   2002 	IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
   2003 	IXGBE_WRITE_FLUSH(hw);
   2004 }
   2005 
   2006 /**
   2007  *  ixgbe_ready_eeprom - Polls for EEPROM ready
   2008  *  @hw: pointer to hardware structure
   2009  **/
   2010 static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw)
   2011 {
   2012 	s32 status = IXGBE_SUCCESS;
   2013 	u16 i;
   2014 	u8 spi_stat_reg;
   2015 
   2016 	DEBUGFUNC("ixgbe_ready_eeprom");
   2017 
   2018 	/*
   2019 	 * Read "Status Register" repeatedly until the LSB is cleared.  The
   2020 	 * EEPROM will signal that the command has been completed by clearing
   2021 	 * bit 0 of the internal status register.  If it's not cleared within
   2022 	 * 5 milliseconds, then error out.
   2023 	 */
   2024 	for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) {
   2025 		ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI,
   2026 					    IXGBE_EEPROM_OPCODE_BITS);
   2027 		spi_stat_reg = (u8)ixgbe_shift_in_eeprom_bits(hw, 8);
   2028 		if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI))
   2029 			break;
   2030 
   2031 		usec_delay(5);
   2032 		ixgbe_standby_eeprom(hw);
   2033 	}
   2034 
   2035 	/*
   2036 	 * On some parts, SPI write time could vary from 0-20mSec on 3.3V
   2037 	 * devices (and only 0-5mSec on 5V devices)
   2038 	 */
   2039 	if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) {
   2040 		DEBUGOUT("SPI EEPROM Status error\n");
   2041 		status = IXGBE_ERR_EEPROM;
   2042 	}
   2043 
   2044 	return status;
   2045 }
   2046 
   2047 /**
   2048  *  ixgbe_standby_eeprom - Returns EEPROM to a "standby" state
   2049  *  @hw: pointer to hardware structure
   2050  **/
   2051 static void ixgbe_standby_eeprom(struct ixgbe_hw *hw)
   2052 {
   2053 	u32 eec;
   2054 
   2055 	DEBUGFUNC("ixgbe_standby_eeprom");
   2056 
   2057 	eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
   2058 
   2059 	/* Toggle CS to flush commands */
   2060 	eec |= IXGBE_EEC_CS;
   2061 	IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
   2062 	IXGBE_WRITE_FLUSH(hw);
   2063 	usec_delay(1);
   2064 	eec &= ~IXGBE_EEC_CS;
   2065 	IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
   2066 	IXGBE_WRITE_FLUSH(hw);
   2067 	usec_delay(1);
   2068 }
   2069 
   2070 /**
   2071  *  ixgbe_shift_out_eeprom_bits - Shift data bits out to the EEPROM.
   2072  *  @hw: pointer to hardware structure
   2073  *  @data: data to send to the EEPROM
   2074  *  @count: number of bits to shift out
   2075  **/
   2076 static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
   2077 					u16 count)
   2078 {
   2079 	u32 eec;
   2080 	u32 mask;
   2081 	u32 i;
   2082 
   2083 	DEBUGFUNC("ixgbe_shift_out_eeprom_bits");
   2084 
   2085 	eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
   2086 
   2087 	/*
   2088 	 * Mask is used to shift "count" bits of "data" out to the EEPROM
   2089 	 * one bit at a time.  Determine the starting bit based on count
   2090 	 */
   2091 	mask = 0x01 << (count - 1);
   2092 
   2093 	for (i = 0; i < count; i++) {
   2094 		/*
   2095 		 * A "1" is shifted out to the EEPROM by setting bit "DI" to a
   2096 		 * "1", and then raising and then lowering the clock (the SK
   2097 		 * bit controls the clock input to the EEPROM).  A "0" is
   2098 		 * shifted out to the EEPROM by setting "DI" to "0" and then
   2099 		 * raising and then lowering the clock.
   2100 		 */
   2101 		if (data & mask)
   2102 			eec |= IXGBE_EEC_DI;
   2103 		else
   2104 			eec &= ~IXGBE_EEC_DI;
   2105 
   2106 		IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
   2107 		IXGBE_WRITE_FLUSH(hw);
   2108 
   2109 		usec_delay(1);
   2110 
   2111 		ixgbe_raise_eeprom_clk(hw, &eec);
   2112 		ixgbe_lower_eeprom_clk(hw, &eec);
   2113 
   2114 		/*
   2115 		 * Shift mask to signify next bit of data to shift in to the
   2116 		 * EEPROM
   2117 		 */
   2118 		mask = mask >> 1;
   2119 	}
   2120 
   2121 	/* We leave the "DI" bit set to "0" when we leave this routine. */
   2122 	eec &= ~IXGBE_EEC_DI;
   2123 	IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
   2124 	IXGBE_WRITE_FLUSH(hw);
   2125 }
   2126 
   2127 /**
   2128  *  ixgbe_shift_in_eeprom_bits - Shift data bits in from the EEPROM
   2129  *  @hw: pointer to hardware structure
   2130  *  @count: number of bits to shift
   2131  **/
   2132 static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count)
   2133 {
   2134 	u32 eec;
   2135 	u32 i;
   2136 	u16 data = 0;
   2137 
   2138 	DEBUGFUNC("ixgbe_shift_in_eeprom_bits");
   2139 
   2140 	/*
   2141 	 * In order to read a register from the EEPROM, we need to shift
   2142 	 * 'count' bits in from the EEPROM. Bits are "shifted in" by raising
   2143 	 * the clock input to the EEPROM (setting the SK bit), and then reading
   2144 	 * the value of the "DO" bit.  During this "shifting in" process the
   2145 	 * "DI" bit should always be clear.
   2146 	 */
   2147 	eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
   2148 
   2149 	eec &= ~(IXGBE_EEC_DO | IXGBE_EEC_DI);
   2150 
   2151 	for (i = 0; i < count; i++) {
   2152 		data = data << 1;
   2153 		ixgbe_raise_eeprom_clk(hw, &eec);
   2154 
   2155 		eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
   2156 
   2157 		eec &= ~(IXGBE_EEC_DI);
   2158 		if (eec & IXGBE_EEC_DO)
   2159 			data |= 1;
   2160 
   2161 		ixgbe_lower_eeprom_clk(hw, &eec);
   2162 	}
   2163 
   2164 	return data;
   2165 }
   2166 
   2167 /**
   2168  *  ixgbe_raise_eeprom_clk - Raises the EEPROM's clock input.
   2169  *  @hw: pointer to hardware structure
   2170  *  @eec: EEC register's current value
   2171  **/
   2172 static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
   2173 {
   2174 	DEBUGFUNC("ixgbe_raise_eeprom_clk");
   2175 
   2176 	/*
   2177 	 * Raise the clock input to the EEPROM
   2178 	 * (setting the SK bit), then delay
   2179 	 */
   2180 	*eec = *eec | IXGBE_EEC_SK;
   2181 	IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), *eec);
   2182 	IXGBE_WRITE_FLUSH(hw);
   2183 	usec_delay(1);
   2184 }
   2185 
   2186 /**
   2187  *  ixgbe_lower_eeprom_clk - Lowers the EEPROM's clock input.
   2188  *  @hw: pointer to hardware structure
   2189  *  @eec: EEC's current value
   2190  **/
   2191 static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
   2192 {
   2193 	DEBUGFUNC("ixgbe_lower_eeprom_clk");
   2194 
   2195 	/*
   2196 	 * Lower the clock input to the EEPROM (clearing the SK bit), then
   2197 	 * delay
   2198 	 */
   2199 	*eec = *eec & ~IXGBE_EEC_SK;
   2200 	IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), *eec);
   2201 	IXGBE_WRITE_FLUSH(hw);
   2202 	usec_delay(1);
   2203 }
   2204 
   2205 /**
   2206  *  ixgbe_release_eeprom - Release EEPROM, release semaphores
   2207  *  @hw: pointer to hardware structure
   2208  **/
   2209 static void ixgbe_release_eeprom(struct ixgbe_hw *hw)
   2210 {
   2211 	u32 eec;
   2212 
   2213 	DEBUGFUNC("ixgbe_release_eeprom");
   2214 
   2215 	eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
   2216 
   2217 	eec |= IXGBE_EEC_CS;  /* Pull CS high */
   2218 	eec &= ~IXGBE_EEC_SK; /* Lower SCK */
   2219 
   2220 	IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
   2221 	IXGBE_WRITE_FLUSH(hw);
   2222 
   2223 	usec_delay(1);
   2224 
   2225 	/* Stop requesting EEPROM access */
   2226 	eec &= ~IXGBE_EEC_REQ;
   2227 	IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
   2228 
   2229 	hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
   2230 
   2231 	/* Delay before attempt to obtain semaphore again to allow FW access */
   2232 	msec_delay(hw->eeprom.semaphore_delay);
   2233 }
   2234 
   2235 /**
   2236  *  ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum
   2237  *  @hw: pointer to hardware structure
   2238  *
   2239  *  Returns a negative error code on error, or the 16-bit checksum
   2240  **/
   2241 s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
   2242 {
   2243 	u16 i;
   2244 	u16 j;
   2245 	u16 checksum = 0;
   2246 	u16 length = 0;
   2247 	u16 pointer = 0;
   2248 	u16 word = 0;
   2249 
   2250 	DEBUGFUNC("ixgbe_calc_eeprom_checksum_generic");
   2251 
   2252 	/* Include 0x0-0x3F in the checksum */
   2253 	for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
   2254 		if (hw->eeprom.ops.read(hw, i, &word)) {
   2255 			DEBUGOUT("EEPROM read failed\n");
   2256 			return IXGBE_ERR_EEPROM;
   2257 		}
   2258 		checksum += word;
   2259 	}
   2260 
   2261 	/* Include all data from pointers except for the fw pointer */
   2262 	for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) {
   2263 		if (hw->eeprom.ops.read(hw, i, &pointer)) {
   2264 			DEBUGOUT("EEPROM read failed\n");
   2265 			return IXGBE_ERR_EEPROM;
   2266 		}
   2267 
   2268 		/* If the pointer seems invalid */
   2269 		if (pointer == 0xFFFF || pointer == 0)
   2270 			continue;
   2271 
   2272 		if (hw->eeprom.ops.read(hw, pointer, &length)) {
   2273 			DEBUGOUT("EEPROM read failed\n");
   2274 			return IXGBE_ERR_EEPROM;
   2275 		}
   2276 
   2277 		if (length == 0xFFFF || length == 0)
   2278 			continue;
   2279 
   2280 		for (j = pointer + 1; j <= pointer + length; j++) {
   2281 			if (hw->eeprom.ops.read(hw, j, &word)) {
   2282 				DEBUGOUT("EEPROM read failed\n");
   2283 				return IXGBE_ERR_EEPROM;
   2284 			}
   2285 			checksum += word;
   2286 		}
   2287 	}
   2288 
   2289 	checksum = (u16)IXGBE_EEPROM_SUM - checksum;
   2290 
   2291 	return (s32)checksum;
   2292 }
   2293 
   2294 /**
   2295  *  ixgbe_validate_eeprom_checksum_generic - Validate EEPROM checksum
   2296  *  @hw: pointer to hardware structure
   2297  *  @checksum_val: calculated checksum
   2298  *
   2299  *  Performs checksum calculation and validates the EEPROM checksum.  If the
   2300  *  caller does not need checksum_val, the value can be NULL.
   2301  **/
   2302 s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
   2303 					   u16 *checksum_val)
   2304 {
   2305 	s32 status;
   2306 	u16 checksum;
   2307 	u16 read_checksum = 0;
   2308 
   2309 	DEBUGFUNC("ixgbe_validate_eeprom_checksum_generic");
   2310 
   2311 	/* Read the first word from the EEPROM. If this times out or fails, do
   2312 	 * not continue or we could be in for a very long wait while every
   2313 	 * EEPROM read fails
   2314 	 */
   2315 	status = hw->eeprom.ops.read(hw, 0, &checksum);
   2316 	if (status) {
   2317 		DEBUGOUT("EEPROM read failed\n");
   2318 		return status;
   2319 	}
   2320 
   2321 	status = hw->eeprom.ops.calc_checksum(hw);
   2322 	if (status < 0)
   2323 		return status;
   2324 
   2325 	checksum = (u16)(status & 0xffff);
   2326 
   2327 	status = hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum);
   2328 	if (status) {
   2329 		DEBUGOUT("EEPROM read failed\n");
   2330 		return status;
   2331 	}
   2332 
   2333 	/* Verify read checksum from EEPROM is the same as
   2334 	 * calculated checksum
   2335 	 */
   2336 	if (read_checksum != checksum)
   2337 		status = IXGBE_ERR_EEPROM_CHECKSUM;
   2338 
   2339 	/* If the user cares, return the calculated checksum */
   2340 	if (checksum_val)
   2341 		*checksum_val = checksum;
   2342 
   2343 	return status;
   2344 }
   2345 
   2346 /**
   2347  *  ixgbe_update_eeprom_checksum_generic - Updates the EEPROM checksum
   2348  *  @hw: pointer to hardware structure
   2349  **/
   2350 s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
   2351 {
   2352 	s32 status;
   2353 	u16 checksum;
   2354 
   2355 	DEBUGFUNC("ixgbe_update_eeprom_checksum_generic");
   2356 
   2357 	/* Read the first word from the EEPROM. If this times out or fails, do
   2358 	 * not continue or we could be in for a very long wait while every
   2359 	 * EEPROM read fails
   2360 	 */
   2361 	status = hw->eeprom.ops.read(hw, 0, &checksum);
   2362 	if (status) {
   2363 		DEBUGOUT("EEPROM read failed\n");
   2364 		return status;
   2365 	}
   2366 
   2367 	status = hw->eeprom.ops.calc_checksum(hw);
   2368 	if (status < 0)
   2369 		return status;
   2370 
   2371 	checksum = (u16)(status & 0xffff);
   2372 
   2373 	status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM, checksum);
   2374 
   2375 	return status;
   2376 }
   2377 
   2378 /**
   2379  *  ixgbe_validate_mac_addr - Validate MAC address
   2380  *  @mac_addr: pointer to MAC address.
   2381  *
   2382  *  Tests a MAC address to ensure it is a valid Individual Address.
   2383  **/
   2384 s32 ixgbe_validate_mac_addr(u8 *mac_addr)
   2385 {
   2386 	s32 status = IXGBE_SUCCESS;
   2387 
   2388 	DEBUGFUNC("ixgbe_validate_mac_addr");
   2389 
   2390 	/* Make sure it is not a multicast address */
   2391 	if (IXGBE_IS_MULTICAST(mac_addr)) {
   2392 		status = IXGBE_ERR_INVALID_MAC_ADDR;
   2393 	/* Not a broadcast address */
   2394 	} else if (IXGBE_IS_BROADCAST(mac_addr)) {
   2395 		status = IXGBE_ERR_INVALID_MAC_ADDR;
   2396 	/* Reject the zero address */
   2397 	} else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
   2398 		   mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) {
   2399 		status = IXGBE_ERR_INVALID_MAC_ADDR;
   2400 	}
   2401 	return status;
   2402 }
   2403 
   2404 /**
   2405  *  ixgbe_set_rar_generic - Set Rx address register
   2406  *  @hw: pointer to hardware structure
   2407  *  @index: Receive address register to write
   2408  *  @addr: Address to put into receive address register
   2409  *  @vmdq: VMDq "set" or "pool" index
   2410  *  @enable_addr: set flag that address is active
   2411  *
   2412  *  Puts an ethernet address into a receive address register.
   2413  **/
   2414 s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
   2415 			  u32 enable_addr)
   2416 {
   2417 	u32 rar_low, rar_high;
   2418 	u32 rar_entries = hw->mac.num_rar_entries;
   2419 
   2420 	DEBUGFUNC("ixgbe_set_rar_generic");
   2421 
   2422 	/* Make sure we are using a valid rar index range */
   2423 	if (index >= rar_entries) {
   2424 		ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
   2425 			     "RAR index %d is out of range.\n", index);
   2426 		return IXGBE_ERR_INVALID_ARGUMENT;
   2427 	}
   2428 
   2429 	/* setup VMDq pool selection before this RAR gets enabled */
   2430 	hw->mac.ops.set_vmdq(hw, index, vmdq);
   2431 
   2432 	/*
   2433 	 * HW expects these in little endian so we reverse the byte
   2434 	 * order from network order (big endian) to little endian
   2435 	 */
   2436 	rar_low = ((u32)addr[0] |
   2437 		   ((u32)addr[1] << 8) |
   2438 		   ((u32)addr[2] << 16) |
   2439 		   ((u32)addr[3] << 24));
   2440 	/*
   2441 	 * Some parts put the VMDq setting in the extra RAH bits,
   2442 	 * so save everything except the lower 16 bits that hold part
   2443 	 * of the address and the address valid bit.
   2444 	 */
   2445 	rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
   2446 	rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
   2447 	rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8));
   2448 
   2449 	if (enable_addr != 0)
   2450 		rar_high |= IXGBE_RAH_AV;
   2451 
   2452 	IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low);
   2453 	IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
   2454 
   2455 	return IXGBE_SUCCESS;
   2456 }
   2457 
   2458 /**
   2459  *  ixgbe_clear_rar_generic - Remove Rx address register
   2460  *  @hw: pointer to hardware structure
   2461  *  @index: Receive address register to write
   2462  *
   2463  *  Clears an ethernet address from a receive address register.
   2464  **/
   2465 s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index)
   2466 {
   2467 	u32 rar_high;
   2468 	u32 rar_entries = hw->mac.num_rar_entries;
   2469 
   2470 	DEBUGFUNC("ixgbe_clear_rar_generic");
   2471 
   2472 	/* Make sure we are using a valid rar index range */
   2473 	if (index >= rar_entries) {
   2474 		ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
   2475 			     "RAR index %d is out of range.\n", index);
   2476 		return IXGBE_ERR_INVALID_ARGUMENT;
   2477 	}
   2478 
   2479 	/*
   2480 	 * Some parts put the VMDq setting in the extra RAH bits,
   2481 	 * so save everything except the lower 16 bits that hold part
   2482 	 * of the address and the address valid bit.
   2483 	 */
   2484 	rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
   2485 	rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
   2486 
   2487 	IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
   2488 	IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
   2489 
   2490 	/* clear VMDq pool/queue selection for this RAR */
   2491 	hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL);
   2492 
   2493 	return IXGBE_SUCCESS;
   2494 }
   2495 
   2496 /**
   2497  *  ixgbe_init_rx_addrs_generic - Initializes receive address filters.
   2498  *  @hw: pointer to hardware structure
   2499  *
   2500  *  Places the MAC address in receive address register 0 and clears the rest
   2501  *  of the receive address registers. Clears the multicast table. Assumes
   2502  *  the receiver is in reset when the routine is called.
   2503  **/
   2504 s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
   2505 {
   2506 	u32 i;
   2507 	u32 rar_entries = hw->mac.num_rar_entries;
   2508 
   2509 	DEBUGFUNC("ixgbe_init_rx_addrs_generic");
   2510 
   2511 	/*
   2512 	 * If the current mac address is valid, assume it is a software override
   2513 	 * to the permanent address.
   2514 	 * Otherwise, use the permanent address from the eeprom.
   2515 	 */
   2516 	if (ixgbe_validate_mac_addr(hw->mac.addr) ==
   2517 	    IXGBE_ERR_INVALID_MAC_ADDR) {
   2518 		/* Get the MAC address from the RAR0 for later reference */
   2519 		hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
   2520 
   2521 		DEBUGOUT3(" Keeping Current RAR0 Addr =%.2X %.2X %.2X ",
   2522 			  hw->mac.addr[0], hw->mac.addr[1],
   2523 			  hw->mac.addr[2]);
   2524 		DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3],
   2525 			  hw->mac.addr[4], hw->mac.addr[5]);
   2526 	} else {
   2527 		/* Setup the receive address. */
   2528 		DEBUGOUT("Overriding MAC Address in RAR[0]\n");
   2529 		DEBUGOUT3(" New MAC Addr =%.2X %.2X %.2X ",
   2530 			  hw->mac.addr[0], hw->mac.addr[1],
   2531 			  hw->mac.addr[2]);
   2532 		DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3],
   2533 			  hw->mac.addr[4], hw->mac.addr[5]);
   2534 
   2535 		hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
   2536 	}
   2537 
   2538 	/* clear VMDq pool/queue selection for RAR 0 */
   2539 	hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL);
   2540 
   2541 	hw->addr_ctrl.overflow_promisc = 0;
   2542 
   2543 	hw->addr_ctrl.rar_used_count = 1;
   2544 
   2545 	/* Zero out the other receive addresses. */
   2546 	DEBUGOUT1("Clearing RAR[1-%d]\n", rar_entries - 1);
   2547 	for (i = 1; i < rar_entries; i++) {
   2548 		IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
   2549 		IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
   2550 	}
   2551 
   2552 	/* Clear the MTA */
   2553 	hw->addr_ctrl.mta_in_use = 0;
   2554 	IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
   2555 
   2556 	DEBUGOUT(" Clearing MTA\n");
   2557 	for (i = 0; i < hw->mac.mcft_size; i++)
   2558 		IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0);
   2559 
   2560 	ixgbe_init_uta_tables(hw);
   2561 
   2562 	return IXGBE_SUCCESS;
   2563 }
   2564 
   2565 /**
   2566  *  ixgbe_add_uc_addr - Adds a secondary unicast address.
   2567  *  @hw: pointer to hardware structure
   2568  *  @addr: new address
   2569  *  @vmdq: VMDq "set" or "pool" index
   2570  *
   2571  *  Adds it to unused receive address register or goes into promiscuous mode.
   2572  **/
   2573 void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
   2574 {
   2575 	u32 rar_entries = hw->mac.num_rar_entries;
   2576 	u32 rar;
   2577 
   2578 	DEBUGFUNC("ixgbe_add_uc_addr");
   2579 
   2580 	DEBUGOUT6(" UC Addr = %.2X %.2X %.2X %.2X %.2X %.2X\n",
   2581 		  addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
   2582 
   2583 	/*
   2584 	 * Place this address in the RAR if there is room,
   2585 	 * else put the controller into promiscuous mode
   2586 	 */
   2587 	if (hw->addr_ctrl.rar_used_count < rar_entries) {
   2588 		rar = hw->addr_ctrl.rar_used_count;
   2589 		hw->mac.ops.set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
   2590 		DEBUGOUT1("Added a secondary address to RAR[%d]\n", rar);
   2591 		hw->addr_ctrl.rar_used_count++;
   2592 	} else {
   2593 		hw->addr_ctrl.overflow_promisc++;
   2594 	}
   2595 
   2596 	DEBUGOUT("ixgbe_add_uc_addr Complete\n");
   2597 }
   2598 
   2599 /**
   2600  *  ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses
   2601  *  @hw: pointer to hardware structure
   2602  *  @addr_list: the list of new addresses
   2603  *  @addr_count: number of addresses
   2604  *  @next: iterator function to walk the address list
   2605  *
   2606  *  The given list replaces any existing list.  Clears the secondary addrs from
   2607  *  receive address registers.  Uses unused receive address registers for the
   2608  *  first secondary addresses, and falls back to promiscuous mode as needed.
   2609  *
   2610  *  Drivers using secondary unicast addresses must set user_set_promisc when
   2611  *  manually putting the device into promiscuous mode.
   2612  **/
   2613 s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list,
   2614 				      u32 addr_count, ixgbe_mc_addr_itr next)
   2615 {
   2616 	u8 *addr;
   2617 	u32 i;
   2618 	u32 old_promisc_setting = hw->addr_ctrl.overflow_promisc;
   2619 	u32 uc_addr_in_use;
   2620 	u32 fctrl;
   2621 	u32 vmdq;
   2622 
   2623 	DEBUGFUNC("ixgbe_update_uc_addr_list_generic");
   2624 
   2625 	/*
   2626 	 * Clear accounting of old secondary address list,
   2627 	 * don't count RAR[0]
   2628 	 */
   2629 	uc_addr_in_use = hw->addr_ctrl.rar_used_count - 1;
   2630 	hw->addr_ctrl.rar_used_count -= uc_addr_in_use;
   2631 	hw->addr_ctrl.overflow_promisc = 0;
   2632 
   2633 	/* Zero out the other receive addresses */
   2634 	DEBUGOUT1("Clearing RAR[1-%d]\n", uc_addr_in_use+1);
   2635 	for (i = 0; i < uc_addr_in_use; i++) {
   2636 		IXGBE_WRITE_REG(hw, IXGBE_RAL(1+i), 0);
   2637 		IXGBE_WRITE_REG(hw, IXGBE_RAH(1+i), 0);
   2638 	}
   2639 
   2640 	/* Add the new addresses */
   2641 	for (i = 0; i < addr_count; i++) {
   2642 		DEBUGOUT(" Adding the secondary addresses:\n");
   2643 		addr = next(hw, &addr_list, &vmdq);
   2644 		ixgbe_add_uc_addr(hw, addr, vmdq);
   2645 	}
   2646 
   2647 	if (hw->addr_ctrl.overflow_promisc) {
   2648 		/* enable promisc if not already in overflow or set by user */
   2649 		if (!old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
   2650 			DEBUGOUT(" Entering address overflow promisc mode\n");
   2651 			fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
   2652 			fctrl |= IXGBE_FCTRL_UPE;
   2653 			IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
   2654 		}
   2655 	} else {
   2656 		/* only disable if set by overflow, not by user */
   2657 		if (old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
   2658 			DEBUGOUT(" Leaving address overflow promisc mode\n");
   2659 			fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
   2660 			fctrl &= ~IXGBE_FCTRL_UPE;
   2661 			IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
   2662 		}
   2663 	}
   2664 
   2665 	DEBUGOUT("ixgbe_update_uc_addr_list_generic Complete\n");
   2666 	return IXGBE_SUCCESS;
   2667 }
   2668 
   2669 /**
   2670  *  ixgbe_mta_vector - Determines bit-vector in multicast table to set
   2671  *  @hw: pointer to hardware structure
   2672  *  @mc_addr: the multicast address
   2673  *
   2674  *  Extracts the 12 bits, from a multicast address, to determine which
   2675  *  bit-vector to set in the multicast table. The hardware uses 12 bits, from
   2676  *  incoming rx multicast addresses, to determine the bit-vector to check in
   2677  *  the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
   2678  *  by the MO field of the MCSTCTRL. The MO field is set during initialization
   2679  *  to mc_filter_type.
   2680  **/
   2681 static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
   2682 {
   2683 	u32 vector = 0;
   2684 
   2685 	DEBUGFUNC("ixgbe_mta_vector");
   2686 
   2687 	switch (hw->mac.mc_filter_type) {
   2688 	case 0:   /* use bits [47:36] of the address */
   2689 		vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
   2690 		break;
   2691 	case 1:   /* use bits [46:35] of the address */
   2692 		vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
   2693 		break;
   2694 	case 2:   /* use bits [45:34] of the address */
   2695 		vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
   2696 		break;
   2697 	case 3:   /* use bits [43:32] of the address */
   2698 		vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
   2699 		break;
   2700 	default:  /* Invalid mc_filter_type */
   2701 		DEBUGOUT("MC filter type param set incorrectly\n");
   2702 		ASSERT(0);
   2703 		break;
   2704 	}
   2705 
   2706 	/* vector can only be 12-bits or boundary will be exceeded */
   2707 	vector &= 0xFFF;
   2708 	return vector;
   2709 }
   2710 
   2711 /**
   2712  *  ixgbe_set_mta - Set bit-vector in multicast table
   2713  *  @hw: pointer to hardware structure
   2714  *  @mc_addr: Multicast address
   2715  *
   2716  *  Sets the bit-vector in the multicast table.
   2717  **/
   2718 void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr)
   2719 {
   2720 	u32 vector;
   2721 	u32 vector_bit;
   2722 	u32 vector_reg;
   2723 
   2724 	DEBUGFUNC("ixgbe_set_mta");
   2725 
   2726 	hw->addr_ctrl.mta_in_use++;
   2727 
   2728 	vector = ixgbe_mta_vector(hw, mc_addr);
   2729 	DEBUGOUT1(" bit-vector = 0x%03X\n", vector);
   2730 
   2731 	/*
   2732 	 * The MTA is a register array of 128 32-bit registers. It is treated
   2733 	 * like an array of 4096 bits.  We want to set bit
   2734 	 * BitArray[vector_value]. So we figure out what register the bit is
   2735 	 * in, read it, OR in the new bit, then write back the new value.  The
   2736 	 * register is determined by the upper 7 bits of the vector value and
   2737 	 * the bit within that register are determined by the lower 5 bits of
   2738 	 * the value.
   2739 	 */
   2740 	vector_reg = (vector >> 5) & 0x7F;
   2741 	vector_bit = vector & 0x1F;
   2742 	hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit);
   2743 }
   2744 
   2745 /**
   2746  *  ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses
   2747  *  @hw: pointer to hardware structure
   2748  *  @mc_addr_list: the list of new multicast addresses
   2749  *  @mc_addr_count: number of addresses
   2750  *  @next: iterator function to walk the multicast address list
   2751  *  @clear: flag, when set clears the table beforehand
   2752  *
   2753  *  When the clear flag is set, the given list replaces any existing list.
   2754  *  Hashes the given addresses into the multicast table.
   2755  **/
   2756 s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
   2757 				      u32 mc_addr_count, ixgbe_mc_addr_itr next,
   2758 				      bool clear)
   2759 {
   2760 	u32 i;
   2761 	u32 vmdq;
   2762 
   2763 	DEBUGFUNC("ixgbe_update_mc_addr_list_generic");
   2764 
   2765 	/*
   2766 	 * Set the new number of MC addresses that we are being requested to
   2767 	 * use.
   2768 	 */
   2769 	hw->addr_ctrl.num_mc_addrs = mc_addr_count;
   2770 	hw->addr_ctrl.mta_in_use = 0;
   2771 
   2772 	/* Clear mta_shadow */
   2773 	if (clear) {
   2774 		DEBUGOUT(" Clearing MTA\n");
   2775 		memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
   2776 	}
   2777 
   2778 	/* Update mta_shadow */
   2779 	for (i = 0; i < mc_addr_count; i++) {
   2780 		DEBUGOUT(" Adding the multicast addresses:\n");
   2781 		ixgbe_set_mta(hw, next(hw, &mc_addr_list, &vmdq));
   2782 	}
   2783 
   2784 	/* Enable mta */
   2785 	for (i = 0; i < hw->mac.mcft_size; i++)
   2786 		IXGBE_WRITE_REG_ARRAY(hw, IXGBE_MTA(0), i,
   2787 				      hw->mac.mta_shadow[i]);
   2788 
   2789 	if (hw->addr_ctrl.mta_in_use > 0)
   2790 		IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
   2791 				IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
   2792 
   2793 	DEBUGOUT("ixgbe_update_mc_addr_list_generic Complete\n");
   2794 	return IXGBE_SUCCESS;
   2795 }
   2796 
   2797 /**
   2798  *  ixgbe_enable_mc_generic - Enable multicast address in RAR
   2799  *  @hw: pointer to hardware structure
   2800  *
   2801  *  Enables multicast address in RAR and the use of the multicast hash table.
   2802  **/
   2803 s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
   2804 {
   2805 	struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
   2806 
   2807 	DEBUGFUNC("ixgbe_enable_mc_generic");
   2808 
   2809 	if (a->mta_in_use > 0)
   2810 		IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE |
   2811 				hw->mac.mc_filter_type);
   2812 
   2813 	return IXGBE_SUCCESS;
   2814 }
   2815 
   2816 /**
   2817  *  ixgbe_disable_mc_generic - Disable multicast address in RAR
   2818  *  @hw: pointer to hardware structure
   2819  *
   2820  *  Disables multicast address in RAR and the use of the multicast hash table.
   2821  **/
   2822 s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw)
   2823 {
   2824 	struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
   2825 
   2826 	DEBUGFUNC("ixgbe_disable_mc_generic");
   2827 
   2828 	if (a->mta_in_use > 0)
   2829 		IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
   2830 
   2831 	return IXGBE_SUCCESS;
   2832 }
   2833 
   2834 /**
   2835  *  ixgbe_fc_enable_generic - Enable flow control
   2836  *  @hw: pointer to hardware structure
   2837  *
   2838  *  Enable flow control according to the current settings.
   2839  **/
   2840 s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
   2841 {
   2842 	s32 ret_val = IXGBE_SUCCESS;
   2843 	u32 mflcn_reg, fccfg_reg;
   2844 	u32 reg;
   2845 	u32 fcrtl, fcrth;
   2846 	int i;
   2847 
   2848 	DEBUGFUNC("ixgbe_fc_enable_generic");
   2849 
   2850 	/* Validate the water mark configuration */
   2851 	if (!hw->fc.pause_time) {
   2852 		ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
   2853 		goto out;
   2854 	}
   2855 
   2856 	/* Low water mark of zero causes XOFF floods */
   2857 	for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
   2858 		if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
   2859 		    hw->fc.high_water[i]) {
   2860 			if (!hw->fc.low_water[i] ||
   2861 			    hw->fc.low_water[i] >= hw->fc.high_water[i]) {
   2862 				DEBUGOUT("Invalid water mark configuration\n");
   2863 				ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
   2864 				goto out;
   2865 			}
   2866 		}
   2867 	}
   2868 
   2869 	/* Negotiate the fc mode to use */
   2870 	hw->mac.ops.fc_autoneg(hw);
   2871 
   2872 	/* Disable any previous flow control settings */
   2873 	mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
   2874 	mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE);
   2875 
   2876 	fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
   2877 	fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY);
   2878 
   2879 	/*
   2880 	 * The possible values of fc.current_mode are:
   2881 	 * 0: Flow control is completely disabled
   2882 	 * 1: Rx flow control is enabled (we can receive pause frames,
   2883 	 *    but not send pause frames).
   2884 	 * 2: Tx flow control is enabled (we can send pause frames but
   2885 	 *    we do not support receiving pause frames).
   2886 	 * 3: Both Rx and Tx flow control (symmetric) are enabled.
   2887 	 * other: Invalid.
   2888 	 */
   2889 	switch (hw->fc.current_mode) {
   2890 	case ixgbe_fc_none:
   2891 		/*
   2892 		 * Flow control is disabled by software override or autoneg.
   2893 		 * The code below will actually disable it in the HW.
   2894 		 */
   2895 		break;
   2896 	case ixgbe_fc_rx_pause:
   2897 		/*
   2898 		 * Rx Flow control is enabled and Tx Flow control is
   2899 		 * disabled by software override. Since there really
   2900 		 * isn't a way to advertise that we are capable of RX
   2901 		 * Pause ONLY, we will advertise that we support both
   2902 		 * symmetric and asymmetric Rx PAUSE.  Later, we will
   2903 		 * disable the adapter's ability to send PAUSE frames.
   2904 		 */
   2905 		mflcn_reg |= IXGBE_MFLCN_RFCE;
   2906 		break;
   2907 	case ixgbe_fc_tx_pause:
   2908 		/*
   2909 		 * Tx Flow control is enabled, and Rx Flow control is
   2910 		 * disabled by software override.
   2911 		 */
   2912 		fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
   2913 		break;
   2914 	case ixgbe_fc_full:
   2915 		/* Flow control (both Rx and Tx) is enabled by SW override. */
   2916 		mflcn_reg |= IXGBE_MFLCN_RFCE;
   2917 		fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
   2918 		break;
   2919 	default:
   2920 		ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
   2921 			     "Flow control param set incorrectly\n");
   2922 		ret_val = IXGBE_ERR_CONFIG;
   2923 		goto out;
   2924 		break;
   2925 	}
   2926 
   2927 	/* Set 802.3x based flow control settings. */
   2928 	mflcn_reg |= IXGBE_MFLCN_DPF;
   2929 	IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
   2930 	IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
   2931 
   2932 
   2933 	/* Set up and enable Rx high/low water mark thresholds, enable XON. */
   2934 	for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
   2935 		if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
   2936 		    hw->fc.high_water[i]) {
   2937 			fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
   2938 			IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl);
   2939 			fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
   2940 		} else {
   2941 			IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
   2942 			/*
   2943 			 * In order to prevent Tx hangs when the internal Tx
   2944 			 * switch is enabled we must set the high water mark
   2945 			 * to the Rx packet buffer size - 24KB.  This allows
   2946 			 * the Tx switch to function even under heavy Rx
   2947 			 * workloads.
   2948 			 */
   2949 			fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 24576;
   2950 		}
   2951 
   2952 		IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), fcrth);
   2953 	}
   2954 
   2955 	/* Configure pause time (2 TCs per register) */
   2956 	reg = (u32)hw->fc.pause_time * 0x00010001;
   2957 	for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
   2958 		IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
   2959 
   2960 	/* Configure flow control refresh threshold value */
   2961 	IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
   2962 
   2963 out:
   2964 	return ret_val;
   2965 }
   2966 
   2967 /**
   2968  *  ixgbe_negotiate_fc - Negotiate flow control
   2969  *  @hw: pointer to hardware structure
   2970  *  @adv_reg: flow control advertised settings
   2971  *  @lp_reg: link partner's flow control settings
   2972  *  @adv_sym: symmetric pause bit in advertisement
   2973  *  @adv_asm: asymmetric pause bit in advertisement
   2974  *  @lp_sym: symmetric pause bit in link partner advertisement
   2975  *  @lp_asm: asymmetric pause bit in link partner advertisement
   2976  *
   2977  *  Find the intersection between advertised settings and link partner's
   2978  *  advertised settings
   2979  **/
   2980 s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
   2981 		       u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm)
   2982 {
   2983 	if ((!(adv_reg)) ||  (!(lp_reg))) {
   2984 		ERROR_REPORT3(IXGBE_ERROR_UNSUPPORTED,
   2985 			     "Local or link partner's advertised flow control "
   2986 			     "settings are NULL. Local: %x, link partner: %x\n",
   2987 			     adv_reg, lp_reg);
   2988 		return IXGBE_ERR_FC_NOT_NEGOTIATED;
   2989 	}
   2990 
   2991 	if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) {
   2992 		/*
   2993 		 * Now we need to check if the user selected Rx ONLY
   2994 		 * of pause frames.  In this case, we had to advertise
   2995 		 * FULL flow control because we could not advertise RX
   2996 		 * ONLY. Hence, we must now check to see if we need to
   2997 		 * turn OFF the TRANSMISSION of PAUSE frames.
   2998 		 */
   2999 		if (hw->fc.requested_mode == ixgbe_fc_full) {
   3000 			hw->fc.current_mode = ixgbe_fc_full;
   3001 			DEBUGOUT("Flow Control = FULL.\n");
   3002 		} else {
   3003 			hw->fc.current_mode = ixgbe_fc_rx_pause;
   3004 			DEBUGOUT("Flow Control=RX PAUSE frames only\n");
   3005 		}
   3006 	} else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) &&
   3007 		   (lp_reg & lp_sym) && (lp_reg & lp_asm)) {
   3008 		hw->fc.current_mode = ixgbe_fc_tx_pause;
   3009 		DEBUGOUT("Flow Control = TX PAUSE frames only.\n");
   3010 	} else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) &&
   3011 		   !(lp_reg & lp_sym) && (lp_reg & lp_asm)) {
   3012 		hw->fc.current_mode = ixgbe_fc_rx_pause;
   3013 		DEBUGOUT("Flow Control = RX PAUSE frames only.\n");
   3014 	} else {
   3015 		hw->fc.current_mode = ixgbe_fc_none;
   3016 		DEBUGOUT("Flow Control = NONE.\n");
   3017 	}
   3018 	return IXGBE_SUCCESS;
   3019 }
   3020 
   3021 /**
   3022  *  ixgbe_fc_autoneg_fiber - Enable flow control on 1 gig fiber
   3023  *  @hw: pointer to hardware structure
   3024  *
   3025  *  Enable flow control according on 1 gig fiber.
   3026  **/
   3027 static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
   3028 {
   3029 	u32 pcs_anadv_reg, pcs_lpab_reg, linkstat;
   3030 	s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
   3031 
   3032 	/*
   3033 	 * On multispeed fiber at 1g, bail out if
   3034 	 * - link is up but AN did not complete, or if
   3035 	 * - link is up and AN completed but timed out
   3036 	 */
   3037 
   3038 	linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
   3039 	if ((!!(linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
   3040 	    (!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) {
   3041 		DEBUGOUT("Auto-Negotiation did not complete or timed out\n");
   3042 		goto out;
   3043 	}
   3044 
   3045 	pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
   3046 	pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
   3047 
   3048 	ret_val =  ixgbe_negotiate_fc(hw, pcs_anadv_reg,
   3049 				      pcs_lpab_reg, IXGBE_PCS1GANA_SYM_PAUSE,
   3050 				      IXGBE_PCS1GANA_ASM_PAUSE,
   3051 				      IXGBE_PCS1GANA_SYM_PAUSE,
   3052 				      IXGBE_PCS1GANA_ASM_PAUSE);
   3053 
   3054 out:
   3055 	return ret_val;
   3056 }
   3057 
   3058 /**
   3059  *  ixgbe_fc_autoneg_backplane - Enable flow control IEEE clause 37
   3060  *  @hw: pointer to hardware structure
   3061  *
   3062  *  Enable flow control according to IEEE clause 37.
   3063  **/
   3064 static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
   3065 {
   3066 	u32 links2, anlp1_reg, autoc_reg, links;
   3067 	s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
   3068 
   3069 	/*
   3070 	 * On backplane, bail out if
   3071 	 * - backplane autoneg was not completed, or if
   3072 	 * - we are 82599 and link partner is not AN enabled
   3073 	 */
   3074 	links = IXGBE_READ_REG(hw, IXGBE_LINKS);
   3075 	if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) {
   3076 		DEBUGOUT("Auto-Negotiation did not complete\n");
   3077 		goto out;
   3078 	}
   3079 
   3080 	if (hw->mac.type == ixgbe_mac_82599EB) {
   3081 		links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
   3082 		if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) {
   3083 			DEBUGOUT("Link partner is not AN enabled\n");
   3084 			goto out;
   3085 		}
   3086 	}
   3087 	/*
   3088 	 * Read the 10g AN autoc and LP ability registers and resolve
   3089 	 * local flow control settings accordingly
   3090 	 */
   3091 	autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
   3092 	anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
   3093 
   3094 	ret_val = ixgbe_negotiate_fc(hw, autoc_reg,
   3095 		anlp1_reg, IXGBE_AUTOC_SYM_PAUSE, IXGBE_AUTOC_ASM_PAUSE,
   3096 		IXGBE_ANLP1_SYM_PAUSE, IXGBE_ANLP1_ASM_PAUSE);
   3097 
   3098 out:
   3099 	return ret_val;
   3100 }
   3101 
   3102 /**
   3103  *  ixgbe_fc_autoneg_copper - Enable flow control IEEE clause 37
   3104  *  @hw: pointer to hardware structure
   3105  *
   3106  *  Enable flow control according to IEEE clause 37.
   3107  **/
   3108 static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw)
   3109 {
   3110 	u16 technology_ability_reg = 0;
   3111 	u16 lp_technology_ability_reg = 0;
   3112 
   3113 	hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
   3114 			     IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
   3115 			     &technology_ability_reg);
   3116 	hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_LP,
   3117 			     IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
   3118 			     &lp_technology_ability_reg);
   3119 
   3120 	return ixgbe_negotiate_fc(hw, (u32)technology_ability_reg,
   3121 				  (u32)lp_technology_ability_reg,
   3122 				  IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE,
   3123 				  IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE);
   3124 }
   3125 
   3126 /**
   3127  *  ixgbe_fc_autoneg - Configure flow control
   3128  *  @hw: pointer to hardware structure
   3129  *
   3130  *  Compares our advertised flow control capabilities to those advertised by
   3131  *  our link partner, and determines the proper flow control mode to use.
   3132  **/
   3133 void ixgbe_fc_autoneg(struct ixgbe_hw *hw)
   3134 {
   3135 	s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
   3136 	ixgbe_link_speed speed;
   3137 	bool link_up;
   3138 
   3139 	DEBUGFUNC("ixgbe_fc_autoneg");
   3140 
   3141 	/*
   3142 	 * AN should have completed when the cable was plugged in.
   3143 	 * Look for reasons to bail out.  Bail out if:
   3144 	 * - FC autoneg is disabled, or if
   3145 	 * - link is not up.
   3146 	 */
   3147 	if (hw->fc.disable_fc_autoneg) {
   3148 		ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
   3149 			     "Flow control autoneg is disabled");
   3150 		goto out;
   3151 	}
   3152 
   3153 	hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
   3154 	if (!link_up) {
   3155 		ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "The link is down");
   3156 		goto out;
   3157 	}
   3158 
   3159 	switch (hw->phy.media_type) {
   3160 	/* Autoneg flow control on fiber adapters */
   3161 	case ixgbe_media_type_fiber_fixed:
   3162 	case ixgbe_media_type_fiber_qsfp:
   3163 	case ixgbe_media_type_fiber:
   3164 		if (speed == IXGBE_LINK_SPEED_1GB_FULL)
   3165 			ret_val = ixgbe_fc_autoneg_fiber(hw);
   3166 		break;
   3167 
   3168 	/* Autoneg flow control on backplane adapters */
   3169 	case ixgbe_media_type_backplane:
   3170 		ret_val = ixgbe_fc_autoneg_backplane(hw);
   3171 		break;
   3172 
   3173 	/* Autoneg flow control on copper adapters */
   3174 	case ixgbe_media_type_copper:
   3175 		if (ixgbe_device_supports_autoneg_fc(hw))
   3176 			ret_val = ixgbe_fc_autoneg_copper(hw);
   3177 		break;
   3178 
   3179 	default:
   3180 		break;
   3181 	}
   3182 
   3183 out:
   3184 	if (ret_val == IXGBE_SUCCESS) {
   3185 		hw->fc.fc_was_autonegged = TRUE;
   3186 	} else {
   3187 		hw->fc.fc_was_autonegged = FALSE;
   3188 		hw->fc.current_mode = hw->fc.requested_mode;
   3189 	}
   3190 }
   3191 
   3192 /*
   3193  * ixgbe_pcie_timeout_poll - Return number of times to poll for completion
   3194  * @hw: pointer to hardware structure
   3195  *
   3196  * System-wide timeout range is encoded in PCIe Device Control2 register.
   3197  *
   3198  * Add 10% to specified maximum and return the number of times to poll for
   3199  * completion timeout, in units of 100 microsec.  Never return less than
   3200  * 800 = 80 millisec.
   3201  */
   3202 static u32 ixgbe_pcie_timeout_poll(struct ixgbe_hw *hw)
   3203 {
   3204 	s16 devctl2;
   3205 	u32 pollcnt;
   3206 
   3207 	devctl2 = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2);
   3208 	devctl2 &= IXGBE_PCIDEVCTRL2_TIMEO_MASK;
   3209 
   3210 	switch (devctl2) {
   3211 	case IXGBE_PCIDEVCTRL2_65_130ms:
   3212 		pollcnt = 1300;		/* 130 millisec */
   3213 		break;
   3214 	case IXGBE_PCIDEVCTRL2_260_520ms:
   3215 		pollcnt = 5200;		/* 520 millisec */
   3216 		break;
   3217 	case IXGBE_PCIDEVCTRL2_1_2s:
   3218 		pollcnt = 20000;	/* 2 sec */
   3219 		break;
   3220 	case IXGBE_PCIDEVCTRL2_4_8s:
   3221 		pollcnt = 80000;	/* 8 sec */
   3222 		break;
   3223 	case IXGBE_PCIDEVCTRL2_17_34s:
   3224 		pollcnt = 34000;	/* 34 sec */
   3225 		break;
   3226 	case IXGBE_PCIDEVCTRL2_50_100us:	/* 100 microsecs */
   3227 	case IXGBE_PCIDEVCTRL2_1_2ms:		/* 2 millisecs */
   3228 	case IXGBE_PCIDEVCTRL2_16_32ms:		/* 32 millisec */
   3229 	case IXGBE_PCIDEVCTRL2_16_32ms_def:	/* 32 millisec default */
   3230 	default:
   3231 		pollcnt = 800;		/* 80 millisec minimum */
   3232 		break;
   3233 	}
   3234 
   3235 	/* add 10% to spec maximum */
   3236 	return (pollcnt * 11) / 10;
   3237 }
   3238 
   3239 /**
   3240  *  ixgbe_disable_pcie_master - Disable PCI-express master access
   3241  *  @hw: pointer to hardware structure
   3242  *
   3243  *  Disables PCI-Express master access and verifies there are no pending
   3244  *  requests. IXGBE_ERR_MASTER_REQUESTS_PENDING is returned if master disable
   3245  *  bit hasn't caused the master requests to be disabled, else IXGBE_SUCCESS
   3246  *  is returned signifying master requests disabled.
   3247  **/
   3248 s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
   3249 {
   3250 	s32 status = IXGBE_SUCCESS;
   3251 	u32 i, poll;
   3252 	u16 value;
   3253 
   3254 	DEBUGFUNC("ixgbe_disable_pcie_master");
   3255 
   3256 	/* Always set this bit to ensure any future transactions are blocked */
   3257 	IXGBE_WRITE_REG(hw, IXGBE_CTRL, IXGBE_CTRL_GIO_DIS);
   3258 
   3259 	/* Exit if master requests are blocked */
   3260 	if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO) ||
   3261 	    IXGBE_REMOVED(hw->hw_addr))
   3262 		goto out;
   3263 
   3264 	/* Poll for master request bit to clear */
   3265 	for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
   3266 		usec_delay(100);
   3267 		if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
   3268 			goto out;
   3269 	}
   3270 
   3271 	/*
   3272 	 * Two consecutive resets are required via CTRL.RST per datasheet
   3273 	 * 5.2.5.3.2 Master Disable.  We set a flag to inform the reset routine
   3274 	 * of this need.  The first reset prevents new master requests from
   3275 	 * being issued by our device.  We then must wait 1usec or more for any
   3276 	 * remaining completions from the PCIe bus to trickle in, and then reset
   3277 	 * again to clear out any effects they may have had on our device.
   3278 	 */
   3279 	DEBUGOUT("GIO Master Disable bit didn't clear - requesting resets\n");
   3280 	hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
   3281 
   3282 	if (hw->mac.type >= ixgbe_mac_X550)
   3283 		goto out;
   3284 
   3285 	/*
   3286 	 * Before proceeding, make sure that the PCIe block does not have
   3287 	 * transactions pending.
   3288 	 */
   3289 	poll = ixgbe_pcie_timeout_poll(hw);
   3290 	for (i = 0; i < poll; i++) {
   3291 		usec_delay(100);
   3292 		value = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS);
   3293 		if (IXGBE_REMOVED(hw->hw_addr))
   3294 			goto out;
   3295 		if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
   3296 			goto out;
   3297 	}
   3298 
   3299 	ERROR_REPORT1(IXGBE_ERROR_POLLING,
   3300 		     "PCIe transaction pending bit also did not clear.\n");
   3301 	status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
   3302 
   3303 out:
   3304 	return status;
   3305 }
   3306 
   3307 /**
   3308  *  ixgbe_acquire_swfw_sync - Acquire SWFW semaphore
   3309  *  @hw: pointer to hardware structure
   3310  *  @mask: Mask to specify which semaphore to acquire
   3311  *
   3312  *  Acquires the SWFW semaphore through the GSSR register for the specified
   3313  *  function (CSR, PHY0, PHY1, EEPROM, Flash)
   3314  **/
   3315 s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask)
   3316 {
   3317 	u32 gssr = 0;
   3318 	u32 swmask = mask;
   3319 	u32 fwmask = mask << 5;
   3320 	u32 timeout = 200;
   3321 	u32 i;
   3322 
   3323 	DEBUGFUNC("ixgbe_acquire_swfw_sync");
   3324 
   3325 	for (i = 0; i < timeout; i++) {
   3326 		/*
   3327 		 * SW NVM semaphore bit is used for access to all
   3328 		 * SW_FW_SYNC bits (not just NVM)
   3329 		 */
   3330 		if (ixgbe_get_eeprom_semaphore(hw))
   3331 			return IXGBE_ERR_SWFW_SYNC;
   3332 
   3333 		gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
   3334 		if (!(gssr & (fwmask | swmask))) {
   3335 			gssr |= swmask;
   3336 			IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
   3337 			ixgbe_release_eeprom_semaphore(hw);
   3338 			return IXGBE_SUCCESS;
   3339 		} else {
   3340 			/* Resource is currently in use by FW or SW */
   3341 			ixgbe_release_eeprom_semaphore(hw);
   3342 			msec_delay(5);
   3343 		}
   3344 	}
   3345 
   3346 	/* If time expired clear the bits holding the lock and retry */
   3347 	if (gssr & (fwmask | swmask))
   3348 		ixgbe_release_swfw_sync(hw, gssr & (fwmask | swmask));
   3349 
   3350 	msec_delay(5);
   3351 	return IXGBE_ERR_SWFW_SYNC;
   3352 }
   3353 
   3354 /**
   3355  *  ixgbe_release_swfw_sync - Release SWFW semaphore
   3356  *  @hw: pointer to hardware structure
   3357  *  @mask: Mask to specify which semaphore to release
   3358  *
   3359  *  Releases the SWFW semaphore through the GSSR register for the specified
   3360  *  function (CSR, PHY0, PHY1, EEPROM, Flash)
   3361  **/
   3362 void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u32 mask)
   3363 {
   3364 	u32 gssr;
   3365 	u32 swmask = mask;
   3366 
   3367 	DEBUGFUNC("ixgbe_release_swfw_sync");
   3368 
   3369 	ixgbe_get_eeprom_semaphore(hw);
   3370 
   3371 	gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
   3372 	gssr &= ~swmask;
   3373 	IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
   3374 
   3375 	ixgbe_release_eeprom_semaphore(hw);
   3376 }
   3377 
   3378 /**
   3379  *  ixgbe_disable_sec_rx_path_generic - Stops the receive data path
   3380  *  @hw: pointer to hardware structure
   3381  *
   3382  *  Stops the receive data path and waits for the HW to internally empty
   3383  *  the Rx security block
   3384  **/
   3385 s32 ixgbe_disable_sec_rx_path_generic(struct ixgbe_hw *hw)
   3386 {
   3387 #define IXGBE_MAX_SECRX_POLL 4000
   3388 
   3389 	int i;
   3390 	int secrxreg;
   3391 
   3392 	DEBUGFUNC("ixgbe_disable_sec_rx_path_generic");
   3393 
   3394 
   3395 	secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
   3396 	secrxreg |= IXGBE_SECRXCTRL_RX_DIS;
   3397 	IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
   3398 	for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) {
   3399 		secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT);
   3400 		if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY)
   3401 			break;
   3402 		else
   3403 			/* Use interrupt-safe sleep just in case */
   3404 			usec_delay(10);
   3405 	}
   3406 
   3407 	/* For informational purposes only */
   3408 	if (i >= IXGBE_MAX_SECRX_POLL)
   3409 		DEBUGOUT("Rx unit being enabled before security "
   3410 			 "path fully disabled.  Continuing with init.\n");
   3411 
   3412 	return IXGBE_SUCCESS;
   3413 }
   3414 
   3415 /**
   3416  *  prot_autoc_read_generic - Hides MAC differences needed for AUTOC read
   3417  *  @hw: pointer to hardware structure
   3418  *  @locked: bool to indicate whether the SW/FW lock was taken
   3419  *  @reg_val: Value we read from AUTOC
   3420  *
   3421  *  The default case requires no protection so just to the register read.
   3422  */
   3423 s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *locked, u32 *reg_val)
   3424 {
   3425 	*locked = FALSE;
   3426 	*reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC);
   3427 	return IXGBE_SUCCESS;
   3428 }
   3429 
   3430 /**
   3431  * prot_autoc_write_generic - Hides MAC differences needed for AUTOC write
   3432  * @hw: pointer to hardware structure
   3433  * @reg_val: value to write to AUTOC
   3434  * @locked: bool to indicate whether the SW/FW lock was already taken by
   3435  *           previous read.
   3436  *
   3437  * The default case requires no protection so just to the register write.
   3438  */
   3439 s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked)
   3440 {
   3441 	UNREFERENCED_1PARAMETER(locked);
   3442 
   3443 	IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_val);
   3444 	return IXGBE_SUCCESS;
   3445 }
   3446 
   3447 /**
   3448  *  ixgbe_enable_sec_rx_path_generic - Enables the receive data path
   3449  *  @hw: pointer to hardware structure
   3450  *
   3451  *  Enables the receive data path.
   3452  **/
   3453 s32 ixgbe_enable_sec_rx_path_generic(struct ixgbe_hw *hw)
   3454 {
   3455 	u32 secrxreg;
   3456 
   3457 	DEBUGFUNC("ixgbe_enable_sec_rx_path_generic");
   3458 
   3459 	secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
   3460 	secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS;
   3461 	IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
   3462 	IXGBE_WRITE_FLUSH(hw);
   3463 
   3464 	return IXGBE_SUCCESS;
   3465 }
   3466 
   3467 /**
   3468  *  ixgbe_enable_rx_dma_generic - Enable the Rx DMA unit
   3469  *  @hw: pointer to hardware structure
   3470  *  @regval: register value to write to RXCTRL
   3471  *
   3472  *  Enables the Rx DMA unit
   3473  **/
   3474 s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval)
   3475 {
   3476 	DEBUGFUNC("ixgbe_enable_rx_dma_generic");
   3477 
   3478 	if (regval & IXGBE_RXCTRL_RXEN)
   3479 		ixgbe_enable_rx(hw);
   3480 	else
   3481 		ixgbe_disable_rx(hw);
   3482 
   3483 	return IXGBE_SUCCESS;
   3484 }
   3485 
   3486 /**
   3487  *  ixgbe_blink_led_start_generic - Blink LED based on index.
   3488  *  @hw: pointer to hardware structure
   3489  *  @index: led number to blink
   3490  **/
   3491 s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
   3492 {
   3493 	ixgbe_link_speed speed = 0;
   3494 	bool link_up = 0;
   3495 	u32 autoc_reg = 0;
   3496 	u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
   3497 	s32 ret_val = IXGBE_SUCCESS;
   3498 	bool locked = FALSE;
   3499 
   3500 	DEBUGFUNC("ixgbe_blink_led_start_generic");
   3501 
   3502 	if (index > 3)
   3503 		return IXGBE_ERR_PARAM;
   3504 
   3505 	/*
   3506 	 * Link must be up to auto-blink the LEDs;
   3507 	 * Force it if link is down.
   3508 	 */
   3509 	hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
   3510 
   3511 	if (!link_up) {
   3512 		ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg);
   3513 		if (ret_val != IXGBE_SUCCESS)
   3514 			goto out;
   3515 
   3516 		autoc_reg |= IXGBE_AUTOC_AN_RESTART;
   3517 		autoc_reg |= IXGBE_AUTOC_FLU;
   3518 
   3519 		ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked);
   3520 		if (ret_val != IXGBE_SUCCESS)
   3521 			goto out;
   3522 
   3523 		IXGBE_WRITE_FLUSH(hw);
   3524 		msec_delay(10);
   3525 	}
   3526 
   3527 	led_reg &= ~IXGBE_LED_MODE_MASK(index);
   3528 	led_reg |= IXGBE_LED_BLINK(index);
   3529 	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
   3530 	IXGBE_WRITE_FLUSH(hw);
   3531 
   3532 out:
   3533 	return ret_val;
   3534 }
   3535 
   3536 /**
   3537  *  ixgbe_blink_led_stop_generic - Stop blinking LED based on index.
   3538  *  @hw: pointer to hardware structure
   3539  *  @index: led number to stop blinking
   3540  **/
   3541 s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
   3542 {
   3543 	u32 autoc_reg = 0;
   3544 	u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
   3545 	s32 ret_val = IXGBE_SUCCESS;
   3546 	bool locked = FALSE;
   3547 
   3548 	DEBUGFUNC("ixgbe_blink_led_stop_generic");
   3549 
   3550 	if (index > 3)
   3551 		return IXGBE_ERR_PARAM;
   3552 
   3553 	ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg);
   3554 	if (ret_val != IXGBE_SUCCESS)
   3555 		goto out;
   3556 
   3557 	autoc_reg &= ~IXGBE_AUTOC_FLU;
   3558 	autoc_reg |= IXGBE_AUTOC_AN_RESTART;
   3559 
   3560 	ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked);
   3561 	if (ret_val != IXGBE_SUCCESS)
   3562 		goto out;
   3563 
   3564 	led_reg &= ~IXGBE_LED_MODE_MASK(index);
   3565 	led_reg &= ~IXGBE_LED_BLINK(index);
   3566 	led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index);
   3567 	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
   3568 	IXGBE_WRITE_FLUSH(hw);
   3569 
   3570 out:
   3571 	return ret_val;
   3572 }
   3573 
   3574 /**
   3575  *  ixgbe_get_san_mac_addr_offset - Get SAN MAC address offset from the EEPROM
   3576  *  @hw: pointer to hardware structure
   3577  *  @san_mac_offset: SAN MAC address offset
   3578  *
   3579  *  This function will read the EEPROM location for the SAN MAC address
   3580  *  pointer, and returns the value at that location.  This is used in both
   3581  *  get and set mac_addr routines.
   3582  **/
   3583 static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
   3584 					 u16 *san_mac_offset)
   3585 {
   3586 	s32 ret_val;
   3587 
   3588 	DEBUGFUNC("ixgbe_get_san_mac_addr_offset");
   3589 
   3590 	/*
   3591 	 * First read the EEPROM pointer to see if the MAC addresses are
   3592 	 * available.
   3593 	 */
   3594 	ret_val = hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR,
   3595 				      san_mac_offset);
   3596 	if (ret_val) {
   3597 		ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
   3598 			      "eeprom at offset %d failed",
   3599 			      IXGBE_SAN_MAC_ADDR_PTR);
   3600 	}
   3601 
   3602 	return ret_val;
   3603 }
   3604 
   3605 /**
   3606  *  ixgbe_get_san_mac_addr_generic - SAN MAC address retrieval from the EEPROM
   3607  *  @hw: pointer to hardware structure
   3608  *  @san_mac_addr: SAN MAC address
   3609  *
   3610  *  Reads the SAN MAC address from the EEPROM, if it's available.  This is
   3611  *  per-port, so set_lan_id() must be called before reading the addresses.
   3612  *  set_lan_id() is called by identify_sfp(), but this cannot be relied
   3613  *  upon for non-SFP connections, so we must call it here.
   3614  **/
   3615 s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
   3616 {
   3617 	u16 san_mac_data, san_mac_offset;
   3618 	u8 i;
   3619 	s32 ret_val;
   3620 
   3621 	DEBUGFUNC("ixgbe_get_san_mac_addr_generic");
   3622 
   3623 	/*
   3624 	 * First read the EEPROM pointer to see if the MAC addresses are
   3625 	 * available.  If they're not, no point in calling set_lan_id() here.
   3626 	 */
   3627 	ret_val = ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
   3628 	if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF)
   3629 		goto san_mac_addr_out;
   3630 
   3631 	/* make sure we know which port we need to program */
   3632 	hw->mac.ops.set_lan_id(hw);
   3633 	/* apply the port offset to the address offset */
   3634 	(hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
   3635 			 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
   3636 	for (i = 0; i < 3; i++) {
   3637 		ret_val = hw->eeprom.ops.read(hw, san_mac_offset,
   3638 					      &san_mac_data);
   3639 		if (ret_val) {
   3640 			ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
   3641 				      "eeprom read at offset %d failed",
   3642 				      san_mac_offset);
   3643 			goto san_mac_addr_out;
   3644 		}
   3645 		san_mac_addr[i * 2] = (u8)(san_mac_data);
   3646 		san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8);
   3647 		san_mac_offset++;
   3648 	}
   3649 	return IXGBE_SUCCESS;
   3650 
   3651 san_mac_addr_out:
   3652 	/*
   3653 	 * No addresses available in this EEPROM.  It's not an
   3654 	 * error though, so just wipe the local address and return.
   3655 	 */
   3656 	for (i = 0; i < 6; i++)
   3657 		san_mac_addr[i] = 0xFF;
   3658 	return IXGBE_SUCCESS;
   3659 }
   3660 
   3661 /**
   3662  *  ixgbe_set_san_mac_addr_generic - Write the SAN MAC address to the EEPROM
   3663  *  @hw: pointer to hardware structure
   3664  *  @san_mac_addr: SAN MAC address
   3665  *
   3666  *  Write a SAN MAC address to the EEPROM.
   3667  **/
   3668 s32 ixgbe_set_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
   3669 {
   3670 	s32 ret_val;
   3671 	u16 san_mac_data, san_mac_offset;
   3672 	u8 i;
   3673 
   3674 	DEBUGFUNC("ixgbe_set_san_mac_addr_generic");
   3675 
   3676 	/* Look for SAN mac address pointer.  If not defined, return */
   3677 	ret_val = ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
   3678 	if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF)
   3679 		return IXGBE_ERR_NO_SAN_ADDR_PTR;
   3680 
   3681 	/* Make sure we know which port we need to write */
   3682 	hw->mac.ops.set_lan_id(hw);
   3683 	/* Apply the port offset to the address offset */
   3684 	(hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
   3685 			 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
   3686 
   3687 	for (i = 0; i < 3; i++) {
   3688 		san_mac_data = (u16)((u16)(san_mac_addr[i * 2 + 1]) << 8);
   3689 		san_mac_data |= (u16)(san_mac_addr[i * 2]);
   3690 		hw->eeprom.ops.write(hw, san_mac_offset, san_mac_data);
   3691 		san_mac_offset++;
   3692 	}
   3693 
   3694 	return IXGBE_SUCCESS;
   3695 }
   3696 
   3697 /**
   3698  *  ixgbe_get_pcie_msix_count_generic - Gets MSI-X vector count
   3699  *  @hw: pointer to hardware structure
   3700  *
   3701  *  Read PCIe configuration space, and get the MSI-X vector count from
   3702  *  the capabilities table.
   3703  **/
   3704 u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
   3705 {
   3706 	u16 msix_count = 1;
   3707 	u16 max_msix_count;
   3708 	u16 pcie_offset;
   3709 
   3710 	switch (hw->mac.type) {
   3711 	case ixgbe_mac_82598EB:
   3712 		pcie_offset = IXGBE_PCIE_MSIX_82598_CAPS;
   3713 		max_msix_count = IXGBE_MAX_MSIX_VECTORS_82598;
   3714 		break;
   3715 	case ixgbe_mac_82599EB:
   3716 	case ixgbe_mac_X540:
   3717 	case ixgbe_mac_X550:
   3718 	case ixgbe_mac_X550EM_x:
   3719 	case ixgbe_mac_X550EM_a:
   3720 		pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS;
   3721 		max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599;
   3722 		break;
   3723 	default:
   3724 		return msix_count;
   3725 	}
   3726 
   3727 	DEBUGFUNC("ixgbe_get_pcie_msix_count_generic");
   3728 	msix_count = IXGBE_READ_PCIE_WORD(hw, pcie_offset);
   3729 	if (IXGBE_REMOVED(hw->hw_addr))
   3730 		msix_count = 0;
   3731 	msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
   3732 
   3733 	/* MSI-X count is zero-based in HW */
   3734 	msix_count++;
   3735 
   3736 	if (msix_count > max_msix_count)
   3737 		msix_count = max_msix_count;
   3738 
   3739 	return msix_count;
   3740 }
   3741 
   3742 /**
   3743  *  ixgbe_insert_mac_addr_generic - Find a RAR for this mac address
   3744  *  @hw: pointer to hardware structure
   3745  *  @addr: Address to put into receive address register
   3746  *  @vmdq: VMDq pool to assign
   3747  *
   3748  *  Puts an ethernet address into a receive address register, or
   3749  *  finds the rar that it is already in; adds to the pool list
   3750  **/
   3751 s32 ixgbe_insert_mac_addr_generic(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
   3752 {
   3753 	static const u32 NO_EMPTY_RAR_FOUND = 0xFFFFFFFF;
   3754 	u32 first_empty_rar = NO_EMPTY_RAR_FOUND;
   3755 	u32 rar;
   3756 	u32 rar_low, rar_high;
   3757 	u32 addr_low, addr_high;
   3758 
   3759 	DEBUGFUNC("ixgbe_insert_mac_addr_generic");
   3760 
   3761 	/* swap bytes for HW little endian */
   3762 	addr_low  = addr[0] | (addr[1] << 8)
   3763 			    | (addr[2] << 16)
   3764 			    | (addr[3] << 24);
   3765 	addr_high = addr[4] | (addr[5] << 8);
   3766 
   3767 	/*
   3768 	 * Either find the mac_id in rar or find the first empty space.
   3769 	 * rar_highwater points to just after the highest currently used
   3770 	 * rar in order to shorten the search.  It grows when we add a new
   3771 	 * rar to the top.
   3772 	 */
   3773 	for (rar = 0; rar < hw->mac.rar_highwater; rar++) {
   3774 		rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
   3775 
   3776 		if (((IXGBE_RAH_AV & rar_high) == 0)
   3777 		    && first_empty_rar == NO_EMPTY_RAR_FOUND) {
   3778 			first_empty_rar = rar;
   3779 		} else if ((rar_high & 0xFFFF) == addr_high) {
   3780 			rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(rar));
   3781 			if (rar_low == addr_low)
   3782 				break;    /* found it already in the rars */
   3783 		}
   3784 	}
   3785 
   3786 	if (rar < hw->mac.rar_highwater) {
   3787 		/* already there so just add to the pool bits */
   3788 		ixgbe_set_vmdq(hw, rar, vmdq);
   3789 	} else if (first_empty_rar != NO_EMPTY_RAR_FOUND) {
   3790 		/* stick it into first empty RAR slot we found */
   3791 		rar = first_empty_rar;
   3792 		ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
   3793 	} else if (rar == hw->mac.rar_highwater) {
   3794 		/* add it to the top of the list and inc the highwater mark */
   3795 		ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
   3796 		hw->mac.rar_highwater++;
   3797 	} else if (rar >= hw->mac.num_rar_entries) {
   3798 		return IXGBE_ERR_INVALID_MAC_ADDR;
   3799 	}
   3800 
   3801 	/*
   3802 	 * If we found rar[0], make sure the default pool bit (we use pool 0)
   3803 	 * remains cleared to be sure default pool packets will get delivered
   3804 	 */
   3805 	if (rar == 0)
   3806 		ixgbe_clear_vmdq(hw, rar, 0);
   3807 
   3808 	return rar;
   3809 }
   3810 
   3811 /**
   3812  *  ixgbe_clear_vmdq_generic - Disassociate a VMDq pool index from a rx address
   3813  *  @hw: pointer to hardware struct
   3814  *  @rar: receive address register index to disassociate
   3815  *  @vmdq: VMDq pool index to remove from the rar
   3816  **/
   3817 s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
   3818 {
   3819 	u32 mpsar_lo, mpsar_hi;
   3820 	u32 rar_entries = hw->mac.num_rar_entries;
   3821 
   3822 	DEBUGFUNC("ixgbe_clear_vmdq_generic");
   3823 
   3824 	/* Make sure we are using a valid rar index range */
   3825 	if (rar >= rar_entries) {
   3826 		ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
   3827 			     "RAR index %d is out of range.\n", rar);
   3828 		return IXGBE_ERR_INVALID_ARGUMENT;
   3829 	}
   3830 
   3831 	mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
   3832 	mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
   3833 
   3834 	if (IXGBE_REMOVED(hw->hw_addr))
   3835 		goto done;
   3836 
   3837 	if (!mpsar_lo && !mpsar_hi)
   3838 		goto done;
   3839 
   3840 	if (vmdq == IXGBE_CLEAR_VMDQ_ALL) {
   3841 		if (mpsar_lo) {
   3842 			IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
   3843 			mpsar_lo = 0;
   3844 		}
   3845 		if (mpsar_hi) {
   3846 			IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
   3847 			mpsar_hi = 0;
   3848 		}
   3849 	} else if (vmdq < 32) {
   3850 		mpsar_lo &= ~(1 << vmdq);
   3851 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo);
   3852 	} else {
   3853 		mpsar_hi &= ~(1 << (vmdq - 32));
   3854 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi);
   3855 	}
   3856 
   3857 	/* was that the last pool using this rar? */
   3858 	if (mpsar_lo == 0 && mpsar_hi == 0 &&
   3859 	    rar != 0 && rar != hw->mac.san_mac_rar_index)
   3860 		hw->mac.ops.clear_rar(hw, rar);
   3861 done:
   3862 	return IXGBE_SUCCESS;
   3863 }
   3864 
   3865 /**
   3866  *  ixgbe_set_vmdq_generic - Associate a VMDq pool index with a rx address
   3867  *  @hw: pointer to hardware struct
   3868  *  @rar: receive address register index to associate with a VMDq index
   3869  *  @vmdq: VMDq pool index
   3870  **/
   3871 s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
   3872 {
   3873 	u32 mpsar;
   3874 	u32 rar_entries = hw->mac.num_rar_entries;
   3875 
   3876 	DEBUGFUNC("ixgbe_set_vmdq_generic");
   3877 
   3878 	/* Make sure we are using a valid rar index range */
   3879 	if (rar >= rar_entries) {
   3880 		ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
   3881 			     "RAR index %d is out of range.\n", rar);
   3882 		return IXGBE_ERR_INVALID_ARGUMENT;
   3883 	}
   3884 
   3885 	if (vmdq < 32) {
   3886 		mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
   3887 		mpsar |= 1 << vmdq;
   3888 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar);
   3889 	} else {
   3890 		mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
   3891 		mpsar |= 1 << (vmdq - 32);
   3892 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar);
   3893 	}
   3894 	return IXGBE_SUCCESS;
   3895 }
   3896 
   3897 /**
   3898  *  This function should only be involved in the IOV mode.
   3899  *  In IOV mode, Default pool is next pool after the number of
   3900  *  VFs advertized and not 0.
   3901  *  MPSAR table needs to be updated for SAN_MAC RAR [hw->mac.san_mac_rar_index]
   3902  *
   3903  *  ixgbe_set_vmdq_san_mac - Associate default VMDq pool index with a rx address
   3904  *  @hw: pointer to hardware struct
   3905  *  @vmdq: VMDq pool index
   3906  **/
   3907 s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq)
   3908 {
   3909 	u32 rar = hw->mac.san_mac_rar_index;
   3910 
   3911 	DEBUGFUNC("ixgbe_set_vmdq_san_mac");
   3912 
   3913 	if (vmdq < 32) {
   3914 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 1 << vmdq);
   3915 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
   3916 	} else {
   3917 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
   3918 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 1 << (vmdq - 32));
   3919 	}
   3920 
   3921 	return IXGBE_SUCCESS;
   3922 }
   3923 
   3924 /**
   3925  *  ixgbe_init_uta_tables_generic - Initialize the Unicast Table Array
   3926  *  @hw: pointer to hardware structure
   3927  **/
   3928 s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw)
   3929 {
   3930 	int i;
   3931 
   3932 	DEBUGFUNC("ixgbe_init_uta_tables_generic");
   3933 	DEBUGOUT(" Clearing UTA\n");
   3934 
   3935 	for (i = 0; i < 128; i++)
   3936 		IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
   3937 
   3938 	return IXGBE_SUCCESS;
   3939 }
   3940 
   3941 /**
   3942  *  ixgbe_find_vlvf_slot - find the vlanid or the first empty slot
   3943  *  @hw: pointer to hardware structure
   3944  *  @vlan: VLAN id to write to VLAN filter
   3945  *  @vlvf_bypass: TRUE to find vlanid only, FALSE returns first empty slot if
   3946  *		  vlanid not found
   3947  *
   3948  *
   3949  *  return the VLVF index where this VLAN id should be placed
   3950  *
   3951  **/
   3952 s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan, bool vlvf_bypass)
   3953 {
   3954 	s32 regindex, first_empty_slot;
   3955 	u32 bits;
   3956 
   3957 	/* short cut the special case */
   3958 	if (vlan == 0)
   3959 		return 0;
   3960 
   3961 	/* if vlvf_bypass is set we don't want to use an empty slot, we
   3962 	 * will simply bypass the VLVF if there are no entries present in the
   3963 	 * VLVF that contain our VLAN
   3964 	 */
   3965 	first_empty_slot = vlvf_bypass ? IXGBE_ERR_NO_SPACE : 0;
   3966 
   3967 	/* add VLAN enable bit for comparison */
   3968 	vlan |= IXGBE_VLVF_VIEN;
   3969 
   3970 	/* Search for the vlan id in the VLVF entries. Save off the first empty
   3971 	 * slot found along the way.
   3972 	 *
   3973 	 * pre-decrement loop covering (IXGBE_VLVF_ENTRIES - 1) .. 1
   3974 	 */
   3975 	for (regindex = IXGBE_VLVF_ENTRIES; --regindex;) {
   3976 		bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex));
   3977 		if (bits == vlan)
   3978 			return regindex;
   3979 		if (!first_empty_slot && !bits)
   3980 			first_empty_slot = regindex;
   3981 	}
   3982 
   3983 	/* If we are here then we didn't find the VLAN.  Return first empty
   3984 	 * slot we found during our search, else error.
   3985 	 */
   3986 	if (!first_empty_slot)
   3987 		ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "No space in VLVF.\n");
   3988 
   3989 	return first_empty_slot ? first_empty_slot : IXGBE_ERR_NO_SPACE;
   3990 }
   3991 
   3992 /**
   3993  *  ixgbe_set_vfta_generic - Set VLAN filter table
   3994  *  @hw: pointer to hardware structure
   3995  *  @vlan: VLAN id to write to VLAN filter
   3996  *  @vind: VMDq output index that maps queue to VLAN id in VLVFB
   3997  *  @vlan_on: boolean flag to turn on/off VLAN
   3998  *  @vlvf_bypass: boolean flag indicating updating default pool is okay
   3999  *
   4000  *  Turn on/off specified VLAN in the VLAN filter table.
   4001  **/
   4002 s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
   4003 			   bool vlan_on, bool vlvf_bypass)
   4004 {
   4005 	u32 regidx, vfta_delta, vfta;
   4006 	s32 ret_val;
   4007 
   4008 	DEBUGFUNC("ixgbe_set_vfta_generic");
   4009 
   4010 	if (vlan > 4095 || vind > 63)
   4011 		return IXGBE_ERR_PARAM;
   4012 
   4013 	/*
   4014 	 * this is a 2 part operation - first the VFTA, then the
   4015 	 * VLVF and VLVFB if VT Mode is set
   4016 	 * We don't write the VFTA until we know the VLVF part succeeded.
   4017 	 */
   4018 
   4019 	/* Part 1
   4020 	 * The VFTA is a bitstring made up of 128 32-bit registers
   4021 	 * that enable the particular VLAN id, much like the MTA:
   4022 	 *    bits[11-5]: which register
   4023 	 *    bits[4-0]:  which bit in the register
   4024 	 */
   4025 	regidx = vlan / 32;
   4026 	vfta_delta = (u32)1 << (vlan % 32);
   4027 	vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regidx));
   4028 
   4029 	/*
   4030 	 * vfta_delta represents the difference between the current value
   4031 	 * of vfta and the value we want in the register.  Since the diff
   4032 	 * is an XOR mask we can just update the vfta using an XOR
   4033 	 */
   4034 	vfta_delta &= vlan_on ? ~vfta : vfta;
   4035 	vfta ^= vfta_delta;
   4036 
   4037 	/* Part 2
   4038 	 * Call ixgbe_set_vlvf_generic to set VLVFB and VLVF
   4039 	 */
   4040 	ret_val = ixgbe_set_vlvf_generic(hw, vlan, vind, vlan_on, &vfta_delta,
   4041 					 vfta, vlvf_bypass);
   4042 	if (ret_val != IXGBE_SUCCESS) {
   4043 		if (vlvf_bypass)
   4044 			goto vfta_update;
   4045 		return ret_val;
   4046 	}
   4047 
   4048 vfta_update:
   4049 	/* Update VFTA now that we are ready for traffic */
   4050 	if (vfta_delta)
   4051 		IXGBE_WRITE_REG(hw, IXGBE_VFTA(regidx), vfta);
   4052 
   4053 	return IXGBE_SUCCESS;
   4054 }
   4055 
   4056 /**
   4057  *  ixgbe_set_vlvf_generic - Set VLAN Pool Filter
   4058  *  @hw: pointer to hardware structure
   4059  *  @vlan: VLAN id to write to VLAN filter
   4060  *  @vind: VMDq output index that maps queue to VLAN id in VLVFB
   4061  *  @vlan_on: boolean flag to turn on/off VLAN in VLVF
   4062  *  @vfta_delta: pointer to the difference between the current value of VFTA
   4063  *		 and the desired value
   4064  *  @vfta: the desired value of the VFTA
   4065  *  @vlvf_bypass: boolean flag indicating updating default pool is okay
   4066  *
   4067  *  Turn on/off specified bit in VLVF table.
   4068  **/
   4069 s32 ixgbe_set_vlvf_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
   4070 			   bool vlan_on, u32 *vfta_delta, u32 vfta,
   4071 			   bool vlvf_bypass)
   4072 {
   4073 	u32 bits;
   4074 	s32 vlvf_index;
   4075 
   4076 	DEBUGFUNC("ixgbe_set_vlvf_generic");
   4077 
   4078 	if (vlan > 4095 || vind > 63)
   4079 		return IXGBE_ERR_PARAM;
   4080 
   4081 	/* If VT Mode is set
   4082 	 *   Either vlan_on
   4083 	 *     make sure the vlan is in VLVF
   4084 	 *     set the vind bit in the matching VLVFB
   4085 	 *   Or !vlan_on
   4086 	 *     clear the pool bit and possibly the vind
   4087 	 */
   4088 	if (!(IXGBE_READ_REG(hw, IXGBE_VT_CTL) & IXGBE_VT_CTL_VT_ENABLE))
   4089 		return IXGBE_SUCCESS;
   4090 
   4091 	vlvf_index = ixgbe_find_vlvf_slot(hw, vlan, vlvf_bypass);
   4092 	if (vlvf_index < 0)
   4093 		return vlvf_index;
   4094 
   4095 	bits = IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32));
   4096 
   4097 	/* set the pool bit */
   4098 	bits |= 1 << (vind % 32);
   4099 	if (vlan_on)
   4100 		goto vlvf_update;
   4101 
   4102 	/* clear the pool bit */
   4103 	bits ^= 1 << (vind % 32);
   4104 
   4105 	if (!bits &&
   4106 	    !IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + 1 - vind / 32))) {
   4107 		/* Clear VFTA first, then disable VLVF.  Otherwise
   4108 		 * we run the risk of stray packets leaking into
   4109 		 * the PF via the default pool
   4110 		 */
   4111 		if (*vfta_delta)
   4112 			IXGBE_WRITE_REG(hw, IXGBE_VFTA(vlan / 32), vfta);
   4113 
   4114 		/* disable VLVF and clear remaining bit from pool */
   4115 		IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0);
   4116 		IXGBE_WRITE_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32), 0);
   4117 
   4118 		return IXGBE_SUCCESS;
   4119 	}
   4120 
   4121 	/* If there are still bits set in the VLVFB registers
   4122 	 * for the VLAN ID indicated we need to see if the
   4123 	 * caller is requesting that we clear the VFTA entry bit.
   4124 	 * If the caller has requested that we clear the VFTA
   4125 	 * entry bit but there are still pools/VFs using this VLAN
   4126 	 * ID entry then ignore the request.  We're not worried
   4127 	 * about the case where we're turning the VFTA VLAN ID
   4128 	 * entry bit on, only when requested to turn it off as
   4129 	 * there may be multiple pools and/or VFs using the
   4130 	 * VLAN ID entry.  In that case we cannot clear the
   4131 	 * VFTA bit until all pools/VFs using that VLAN ID have also
   4132 	 * been cleared.  This will be indicated by "bits" being
   4133 	 * zero.
   4134 	 */
   4135 	*vfta_delta = 0;
   4136 
   4137 vlvf_update:
   4138 	/* record pool change and enable VLAN ID if not already enabled */
   4139 	IXGBE_WRITE_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32), bits);
   4140 	IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), IXGBE_VLVF_VIEN | vlan);
   4141 
   4142 	return IXGBE_SUCCESS;
   4143 }
   4144 
   4145 /**
   4146  *  ixgbe_clear_vfta_generic - Clear VLAN filter table
   4147  *  @hw: pointer to hardware structure
   4148  *
   4149  *  Clears the VLAN filer table, and the VMDq index associated with the filter
   4150  **/
   4151 s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
   4152 {
   4153 	u32 offset;
   4154 
   4155 	DEBUGFUNC("ixgbe_clear_vfta_generic");
   4156 
   4157 	for (offset = 0; offset < hw->mac.vft_size; offset++)
   4158 		IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
   4159 
   4160 	for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) {
   4161 		IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0);
   4162 		IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2), 0);
   4163 		IXGBE_WRITE_REG(hw, IXGBE_VLVFB((offset * 2) + 1), 0);
   4164 	}
   4165 
   4166 	return IXGBE_SUCCESS;
   4167 }
   4168 
   4169 /**
   4170  *  ixgbe_toggle_txdctl_generic - Toggle VF's queues
   4171  *  @hw: pointer to hardware structure
   4172  *  @vf_number: VF index
   4173  *
   4174  *  Enable and disable each queue in VF.
   4175  */
   4176 s32 ixgbe_toggle_txdctl_generic(struct ixgbe_hw *hw, u32 vf_number)
   4177 {
   4178 	u8  queue_count, i;
   4179 	u32 offset, reg;
   4180 
   4181 	if (vf_number > 63)
   4182 		return IXGBE_ERR_PARAM;
   4183 
   4184 	/*
   4185 	 * Determine number of queues by checking
   4186 	 * number of virtual functions
   4187 	 */
   4188 	reg = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
   4189 	switch (reg & IXGBE_GCR_EXT_VT_MODE_MASK) {
   4190 	case IXGBE_GCR_EXT_VT_MODE_64:
   4191 		queue_count = 2;
   4192 		break;
   4193 	case IXGBE_GCR_EXT_VT_MODE_32:
   4194 		queue_count = 4;
   4195 		break;
   4196 	case IXGBE_GCR_EXT_VT_MODE_16:
   4197 		queue_count = 8;
   4198 		break;
   4199 	default:
   4200 		return IXGBE_ERR_CONFIG;
   4201 	}
   4202 
   4203 	/* Toggle queues */
   4204 	for (i = 0; i < queue_count; ++i) {
   4205 		/* Calculate offset of current queue */
   4206 		offset = queue_count * vf_number + i;
   4207 
   4208 		/* Enable queue */
   4209 		reg = IXGBE_READ_REG(hw, IXGBE_PVFTXDCTL(offset));
   4210 		reg |= IXGBE_TXDCTL_ENABLE;
   4211 		IXGBE_WRITE_REG(hw, IXGBE_PVFTXDCTL(offset), reg);
   4212 		IXGBE_WRITE_FLUSH(hw);
   4213 
   4214 		/* Disable queue */
   4215 		reg = IXGBE_READ_REG(hw, IXGBE_PVFTXDCTL(offset));
   4216 		reg &= ~IXGBE_TXDCTL_ENABLE;
   4217 		IXGBE_WRITE_REG(hw, IXGBE_PVFTXDCTL(offset), reg);
   4218 		IXGBE_WRITE_FLUSH(hw);
   4219 	}
   4220 
   4221 	return IXGBE_SUCCESS;
   4222 }
   4223 
   4224 /**
   4225  *  ixgbe_need_crosstalk_fix - Determine if we need to do cross talk fix
   4226  *  @hw: pointer to hardware structure
   4227  *
   4228  *  Contains the logic to identify if we need to verify link for the
   4229  *  crosstalk fix
   4230  **/
   4231 static bool ixgbe_need_crosstalk_fix(struct ixgbe_hw *hw)
   4232 {
   4233 
   4234 	/* Does FW say we need the fix */
   4235 	if (!hw->need_crosstalk_fix)
   4236 		return FALSE;
   4237 
   4238 	/* Only consider SFP+ PHYs i.e. media type fiber */
   4239 	switch (hw->mac.ops.get_media_type(hw)) {
   4240 	case ixgbe_media_type_fiber:
   4241 	case ixgbe_media_type_fiber_qsfp:
   4242 		break;
   4243 	default:
   4244 		return FALSE;
   4245 	}
   4246 
   4247 	return TRUE;
   4248 }
   4249 
   4250 /**
   4251  *  ixgbe_check_mac_link_generic - Determine link and speed status
   4252  *  @hw: pointer to hardware structure
   4253  *  @speed: pointer to link speed
   4254  *  @link_up: TRUE when link is up
   4255  *  @link_up_wait_to_complete: bool used to wait for link up or not
   4256  *
   4257  *  Reads the links register to determine if link is up and the current speed
   4258  **/
   4259 s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
   4260 				 bool *link_up, bool link_up_wait_to_complete)
   4261 {
   4262 	u32 links_reg, links_orig;
   4263 	u32 i;
   4264 
   4265 	DEBUGFUNC("ixgbe_check_mac_link_generic");
   4266 
   4267 	/* If Crosstalk fix enabled do the sanity check of making sure
   4268 	 * the SFP+ cage is full.
   4269 	 */
   4270 	if (ixgbe_need_crosstalk_fix(hw)) {
   4271 		if ((hw->mac.type != ixgbe_mac_82598EB) &&
   4272 		    !ixgbe_sfp_cage_full(hw)) {
   4273 			*link_up = FALSE;
   4274 			*speed = IXGBE_LINK_SPEED_UNKNOWN;
   4275 			return IXGBE_SUCCESS;
   4276 		}
   4277 	}
   4278 
   4279 	/* clear the old state */
   4280 	links_orig = IXGBE_READ_REG(hw, IXGBE_LINKS);
   4281 
   4282 	links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
   4283 
   4284 	if (links_orig != links_reg) {
   4285 		DEBUGOUT2("LINKS changed from %08X to %08X\n",
   4286 			  links_orig, links_reg);
   4287 	}
   4288 
   4289 	if (link_up_wait_to_complete) {
   4290 		for (i = 0; i < hw->mac.max_link_up_time; i++) {
   4291 			if (links_reg & IXGBE_LINKS_UP) {
   4292 				*link_up = TRUE;
   4293 				break;
   4294 			} else {
   4295 				*link_up = FALSE;
   4296 			}
   4297 			msec_delay(100);
   4298 			links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
   4299 		}
   4300 	} else {
   4301 		if (links_reg & IXGBE_LINKS_UP)
   4302 			*link_up = TRUE;
   4303 		else
   4304 			*link_up = FALSE;
   4305 	}
   4306 
   4307 	switch (links_reg & IXGBE_LINKS_SPEED_82599) {
   4308 	case IXGBE_LINKS_SPEED_10G_82599:
   4309 		*speed = IXGBE_LINK_SPEED_10GB_FULL;
   4310 		if (hw->mac.type >= ixgbe_mac_X550) {
   4311 			if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
   4312 				*speed = IXGBE_LINK_SPEED_2_5GB_FULL;
   4313 		}
   4314 		break;
   4315 	case IXGBE_LINKS_SPEED_1G_82599:
   4316 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
   4317 		break;
   4318 	case IXGBE_LINKS_SPEED_100_82599:
   4319 		*speed = IXGBE_LINK_SPEED_100_FULL;
   4320 		if (hw->mac.type >= ixgbe_mac_X550) {
   4321 			if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
   4322 				*speed = IXGBE_LINK_SPEED_5GB_FULL;
   4323 		}
   4324 		break;
   4325 	case IXGBE_LINKS_SPEED_10_X550EM_A:
   4326 		*speed = IXGBE_LINK_SPEED_UNKNOWN;
   4327 		if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T ||
   4328 		    hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)
   4329 			*speed = IXGBE_LINK_SPEED_10_FULL;
   4330 		break;
   4331 	default:
   4332 		*speed = IXGBE_LINK_SPEED_UNKNOWN;
   4333 	}
   4334 
   4335 	return IXGBE_SUCCESS;
   4336 }
   4337 
   4338 /**
   4339  *  ixgbe_get_wwn_prefix_generic - Get alternative WWNN/WWPN prefix from
   4340  *  the EEPROM
   4341  *  @hw: pointer to hardware structure
   4342  *  @wwnn_prefix: the alternative WWNN prefix
   4343  *  @wwpn_prefix: the alternative WWPN prefix
   4344  *
   4345  *  This function will read the EEPROM from the alternative SAN MAC address
   4346  *  block to check the support for the alternative WWNN/WWPN prefix support.
   4347  **/
   4348 s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
   4349 				 u16 *wwpn_prefix)
   4350 {
   4351 	u16 offset, caps;
   4352 	u16 alt_san_mac_blk_offset;
   4353 
   4354 	DEBUGFUNC("ixgbe_get_wwn_prefix_generic");
   4355 
   4356 	/* clear output first */
   4357 	*wwnn_prefix = 0xFFFF;
   4358 	*wwpn_prefix = 0xFFFF;
   4359 
   4360 	/* check if alternative SAN MAC is supported */
   4361 	offset = IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR;
   4362 	if (hw->eeprom.ops.read(hw, offset, &alt_san_mac_blk_offset))
   4363 		goto wwn_prefix_err;
   4364 
   4365 	if ((alt_san_mac_blk_offset == 0) ||
   4366 	    (alt_san_mac_blk_offset == 0xFFFF))
   4367 		goto wwn_prefix_out;
   4368 
   4369 	/* check capability in alternative san mac address block */
   4370 	offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET;
   4371 	if (hw->eeprom.ops.read(hw, offset, &caps))
   4372 		goto wwn_prefix_err;
   4373 	if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN))
   4374 		goto wwn_prefix_out;
   4375 
   4376 	/* get the corresponding prefix for WWNN/WWPN */
   4377 	offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET;
   4378 	if (hw->eeprom.ops.read(hw, offset, wwnn_prefix)) {
   4379 		ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
   4380 			      "eeprom read at offset %d failed", offset);
   4381 	}
   4382 
   4383 	offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET;
   4384 	if (hw->eeprom.ops.read(hw, offset, wwpn_prefix))
   4385 		goto wwn_prefix_err;
   4386 
   4387 wwn_prefix_out:
   4388 	return IXGBE_SUCCESS;
   4389 
   4390 wwn_prefix_err:
   4391 	ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
   4392 		      "eeprom read at offset %d failed", offset);
   4393 	return IXGBE_SUCCESS;
   4394 }
   4395 
   4396 /**
   4397  *  ixgbe_get_fcoe_boot_status_generic - Get FCOE boot status from EEPROM
   4398  *  @hw: pointer to hardware structure
   4399  *  @bs: the fcoe boot status
   4400  *
   4401  *  This function will read the FCOE boot status from the iSCSI FCOE block
   4402  **/
   4403 s32 ixgbe_get_fcoe_boot_status_generic(struct ixgbe_hw *hw, u16 *bs)
   4404 {
   4405 	u16 offset, caps, flags;
   4406 	s32 status;
   4407 
   4408 	DEBUGFUNC("ixgbe_get_fcoe_boot_status_generic");
   4409 
   4410 	/* clear output first */
   4411 	*bs = ixgbe_fcoe_bootstatus_unavailable;
   4412 
   4413 	/* check if FCOE IBA block is present */
   4414 	offset = IXGBE_FCOE_IBA_CAPS_BLK_PTR;
   4415 	status = hw->eeprom.ops.read(hw, offset, &caps);
   4416 	if (status != IXGBE_SUCCESS)
   4417 		goto out;
   4418 
   4419 	if (!(caps & IXGBE_FCOE_IBA_CAPS_FCOE))
   4420 		goto out;
   4421 
   4422 	/* check if iSCSI FCOE block is populated */
   4423 	status = hw->eeprom.ops.read(hw, IXGBE_ISCSI_FCOE_BLK_PTR, &offset);
   4424 	if (status != IXGBE_SUCCESS)
   4425 		goto out;
   4426 
   4427 	if ((offset == 0) || (offset == 0xFFFF))
   4428 		goto out;
   4429 
   4430 	/* read fcoe flags in iSCSI FCOE block */
   4431 	offset = offset + IXGBE_ISCSI_FCOE_FLAGS_OFFSET;
   4432 	status = hw->eeprom.ops.read(hw, offset, &flags);
   4433 	if (status != IXGBE_SUCCESS)
   4434 		goto out;
   4435 
   4436 	if (flags & IXGBE_ISCSI_FCOE_FLAGS_ENABLE)
   4437 		*bs = ixgbe_fcoe_bootstatus_enabled;
   4438 	else
   4439 		*bs = ixgbe_fcoe_bootstatus_disabled;
   4440 
   4441 out:
   4442 	return status;
   4443 }
   4444 
   4445 /**
   4446  *  ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing
   4447  *  @hw: pointer to hardware structure
   4448  *  @enable: enable or disable switch for MAC anti-spoofing
   4449  *  @vf: Virtual Function pool - VF Pool to set for MAC anti-spoofing
   4450  *
   4451  **/
   4452 void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
   4453 {
   4454 	int vf_target_reg = vf >> 3;
   4455 	int vf_target_shift = vf % 8;
   4456 	u32 pfvfspoof;
   4457 
   4458 	if (hw->mac.type == ixgbe_mac_82598EB)
   4459 		return;
   4460 
   4461 	pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
   4462 	if (enable)
   4463 		pfvfspoof |= (1 << vf_target_shift);
   4464 	else
   4465 		pfvfspoof &= ~(1 << vf_target_shift);
   4466 	IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
   4467 }
   4468 
   4469 /**
   4470  *  ixgbe_set_vlan_anti_spoofing - Enable/Disable VLAN anti-spoofing
   4471  *  @hw: pointer to hardware structure
   4472  *  @enable: enable or disable switch for VLAN anti-spoofing
   4473  *  @vf: Virtual Function pool - VF Pool to set for VLAN anti-spoofing
   4474  *
   4475  **/
   4476 void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
   4477 {
   4478 	int vf_target_reg = vf >> 3;
   4479 	int vf_target_shift = vf % 8 + IXGBE_SPOOF_VLANAS_SHIFT;
   4480 	u32 pfvfspoof;
   4481 
   4482 	if (hw->mac.type == ixgbe_mac_82598EB)
   4483 		return;
   4484 
   4485 	pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
   4486 	if (enable)
   4487 		pfvfspoof |= (1 << vf_target_shift);
   4488 	else
   4489 		pfvfspoof &= ~(1 << vf_target_shift);
   4490 	IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
   4491 }
   4492 
   4493 /**
   4494  *  ixgbe_get_device_caps_generic - Get additional device capabilities
   4495  *  @hw: pointer to hardware structure
   4496  *  @device_caps: the EEPROM word with the extra device capabilities
   4497  *
   4498  *  This function will read the EEPROM location for the device capabilities,
   4499  *  and return the word through device_caps.
   4500  **/
   4501 s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps)
   4502 {
   4503 	DEBUGFUNC("ixgbe_get_device_caps_generic");
   4504 
   4505 	hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps);
   4506 
   4507 	return IXGBE_SUCCESS;
   4508 }
   4509 
   4510 /**
   4511  *  ixgbe_enable_relaxed_ordering_gen2 - Enable relaxed ordering
   4512  *  @hw: pointer to hardware structure
   4513  *
   4514  **/
   4515 void ixgbe_enable_relaxed_ordering_gen2(struct ixgbe_hw *hw)
   4516 {
   4517 	u32 regval;
   4518 	u32 i;
   4519 
   4520 	DEBUGFUNC("ixgbe_enable_relaxed_ordering_gen2");
   4521 
   4522 	/* Enable relaxed ordering */
   4523 	for (i = 0; i < hw->mac.max_tx_queues; i++) {
   4524 		regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
   4525 		regval |= IXGBE_DCA_TXCTRL_DESC_WRO_EN;
   4526 		IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
   4527 	}
   4528 
   4529 	for (i = 0; i < hw->mac.max_rx_queues; i++) {
   4530 		regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
   4531 		regval |= IXGBE_DCA_RXCTRL_DATA_WRO_EN |
   4532 			  IXGBE_DCA_RXCTRL_HEAD_WRO_EN;
   4533 		IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
   4534 	}
   4535 
   4536 }
   4537 
   4538 /**
   4539  *  ixgbe_calculate_checksum - Calculate checksum for buffer
   4540  *  @buffer: pointer to EEPROM
   4541  *  @length: size of EEPROM to calculate a checksum for
   4542  *  Calculates the checksum for some buffer on a specified length.  The
   4543  *  checksum calculated is returned.
   4544  **/
   4545 u8 ixgbe_calculate_checksum(u8 *buffer, u32 length)
   4546 {
   4547 	u32 i;
   4548 	u8 sum = 0;
   4549 
   4550 	DEBUGFUNC("ixgbe_calculate_checksum");
   4551 
   4552 	if (!buffer)
   4553 		return 0;
   4554 
   4555 	for (i = 0; i < length; i++)
   4556 		sum += buffer[i];
   4557 
   4558 	return (u8) (0 - sum);
   4559 }
   4560 
   4561 /**
   4562  *  ixgbe_hic_unlocked - Issue command to manageability block unlocked
   4563  *  @hw: pointer to the HW structure
   4564  *  @buffer: command to write and where the return status will be placed
   4565  *  @length: length of buffer, must be multiple of 4 bytes
   4566  *  @timeout: time in ms to wait for command completion
   4567  *
   4568  *  Communicates with the manageability block. On success return IXGBE_SUCCESS
   4569  *  else returns semaphore error when encountering an error acquiring
   4570  *  semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
   4571  *
   4572  *  This function assumes that the IXGBE_GSSR_SW_MNG_SM semaphore is held
   4573  *  by the caller.
   4574  **/
   4575 s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 length,
   4576 		       u32 timeout)
   4577 {
   4578 	u32 hicr, i, fwsts;
   4579 	u16 dword_len;
   4580 
   4581 	DEBUGFUNC("ixgbe_hic_unlocked");
   4582 
   4583 	if (!length || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
   4584 		DEBUGOUT1("Buffer length failure buffersize=%d.\n", length);
   4585 		return IXGBE_ERR_HOST_INTERFACE_COMMAND;
   4586 	}
   4587 
   4588 	/* Set bit 9 of FWSTS clearing FW reset indication */
   4589 	fwsts = IXGBE_READ_REG(hw, IXGBE_FWSTS);
   4590 	IXGBE_WRITE_REG(hw, IXGBE_FWSTS, fwsts | IXGBE_FWSTS_FWRI);
   4591 
   4592 	/* Check that the host interface is enabled. */
   4593 	hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
   4594 	if (!(hicr & IXGBE_HICR_EN)) {
   4595 		DEBUGOUT("IXGBE_HOST_EN bit disabled.\n");
   4596 		return IXGBE_ERR_HOST_INTERFACE_COMMAND;
   4597 	}
   4598 
   4599 	/* Calculate length in DWORDs. We must be DWORD aligned */
   4600 	if (length % sizeof(u32)) {
   4601 		DEBUGOUT("Buffer length failure, not aligned to dword");
   4602 		return IXGBE_ERR_INVALID_ARGUMENT;
   4603 	}
   4604 
   4605 	dword_len = length >> 2;
   4606 
   4607 	/* The device driver writes the relevant command block
   4608 	 * into the ram area.
   4609 	 */
   4610 	for (i = 0; i < dword_len; i++)
   4611 		IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG,
   4612 				      i, IXGBE_CPU_TO_LE32(buffer[i]));
   4613 
   4614 	/* Setting this bit tells the ARC that a new command is pending. */
   4615 	IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C);
   4616 
   4617 	for (i = 0; i < timeout; i++) {
   4618 		hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
   4619 		if (!(hicr & IXGBE_HICR_C))
   4620 			break;
   4621 		msec_delay(1);
   4622 	}
   4623 
   4624 	/* For each command except "Apply Update" perform
   4625 	 * status checks in the HICR registry.
   4626 	 */
   4627 	if ((buffer[0] & IXGBE_HOST_INTERFACE_MASK_CMD) ==
   4628 	    IXGBE_HOST_INTERFACE_APPLY_UPDATE_CMD)
   4629 		return IXGBE_SUCCESS;
   4630 
   4631 	/* Check command completion */
   4632 	if ((timeout && i == timeout) ||
   4633 	    !(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV)) {
   4634 		ERROR_REPORT1(IXGBE_ERROR_CAUTION,
   4635 			      "Command has failed with no status valid.\n");
   4636 		return IXGBE_ERR_HOST_INTERFACE_COMMAND;
   4637 	}
   4638 
   4639 	return IXGBE_SUCCESS;
   4640 }
   4641 
   4642 /**
   4643  *  ixgbe_host_interface_command - Issue command to manageability block
   4644  *  @hw: pointer to the HW structure
   4645  *  @buffer: contains the command to write and where the return status will
   4646  *   be placed
   4647  *  @length: length of buffer, must be multiple of 4 bytes
   4648  *  @timeout: time in ms to wait for command completion
   4649  *  @return_data: read and return data from the buffer (TRUE) or not (FALSE)
   4650  *   Needed because FW structures are big endian and decoding of
   4651  *   these fields can be 8 bit or 16 bit based on command. Decoding
   4652  *   is not easily understood without making a table of commands.
   4653  *   So we will leave this up to the caller to read back the data
   4654  *   in these cases.
   4655  *
   4656  *  Communicates with the manageability block. On success return IXGBE_SUCCESS
   4657  *  else returns semaphore error when encountering an error acquiring
   4658  *  semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
   4659  **/
   4660 s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
   4661 				 u32 length, u32 timeout, bool return_data)
   4662 {
   4663 	u32 hdr_size = sizeof(struct ixgbe_hic_hdr);
   4664 	struct ixgbe_hic_hdr *resp = (struct ixgbe_hic_hdr *)buffer;
   4665 	u16 buf_len;
   4666 	s32 status;
   4667 	u32 bi;
   4668 	u32 dword_len;
   4669 
   4670 	DEBUGFUNC("ixgbe_host_interface_command");
   4671 
   4672 	if (length == 0 || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
   4673 		DEBUGOUT1("Buffer length failure buffersize=%d.\n", length);
   4674 		return IXGBE_ERR_HOST_INTERFACE_COMMAND;
   4675 	}
   4676 
   4677 	/* Take management host interface semaphore */
   4678 	status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
   4679 	if (status)
   4680 		return status;
   4681 
   4682 	status = ixgbe_hic_unlocked(hw, buffer, length, timeout);
   4683 	if (status)
   4684 		goto rel_out;
   4685 
   4686 	if (!return_data)
   4687 		goto rel_out;
   4688 
   4689 	/* Calculate length in DWORDs */
   4690 	dword_len = hdr_size >> 2;
   4691 
   4692 	/* first pull in the header so we know the buffer length */
   4693 	for (bi = 0; bi < dword_len; bi++) {
   4694 		buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
   4695 		IXGBE_LE32_TO_CPUS(&buffer[bi]);
   4696 	}
   4697 
   4698 	/*
   4699 	 * If there is any thing in data position pull it in
   4700 	 * Read Flash command requires reading buffer length from
   4701 	 * two byes instead of one byte
   4702 	 */
   4703 	if (resp->cmd == 0x30 || resp->cmd == 0x31) {
   4704 		for (; bi < dword_len + 2; bi++) {
   4705 			buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG,
   4706 							  bi);
   4707 			IXGBE_LE32_TO_CPUS(&buffer[bi]);
   4708 		}
   4709 		buf_len = (((u16)(resp->cmd_or_resp.ret_status) << 3)
   4710 				  & 0xF00) | resp->buf_len;
   4711 		hdr_size += (2 << 2);
   4712 	} else {
   4713 		buf_len = resp->buf_len;
   4714 	}
   4715 	if (!buf_len)
   4716 		goto rel_out;
   4717 
   4718 	if (length < buf_len + hdr_size) {
   4719 		DEBUGOUT("Buffer not large enough for reply message.\n");
   4720 		status = IXGBE_ERR_HOST_INTERFACE_COMMAND;
   4721 		goto rel_out;
   4722 	}
   4723 
   4724 	/* Calculate length in DWORDs, add 3 for odd lengths */
   4725 	dword_len = (buf_len + 3) >> 2;
   4726 
   4727 	/* Pull in the rest of the buffer (bi is where we left off) */
   4728 	for (; bi <= dword_len; bi++) {
   4729 		buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
   4730 		IXGBE_LE32_TO_CPUS(&buffer[bi]);
   4731 	}
   4732 
   4733 rel_out:
   4734 	hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
   4735 
   4736 	return status;
   4737 }
   4738 
   4739 /**
   4740  *  ixgbe_set_fw_drv_ver_generic - Sends driver version to firmware
   4741  *  @hw: pointer to the HW structure
   4742  *  @maj: driver version major number
   4743  *  @minr: driver version minor number
   4744  *  @build: driver version build number
   4745  *  @sub: driver version sub build number
   4746  *  @len: unused
   4747  *  @driver_ver: unused
   4748  *
   4749  *  Sends driver version number to firmware through the manageability
   4750  *  block.  On success return IXGBE_SUCCESS
   4751  *  else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring
   4752  *  semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
   4753  **/
   4754 s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 minr,
   4755 				 u8 build, u8 sub, u16 len,
   4756 				 const char *driver_ver)
   4757 {
   4758 	struct ixgbe_hic_drv_info fw_cmd;
   4759 	int i;
   4760 	s32 ret_val = IXGBE_SUCCESS;
   4761 
   4762 	DEBUGFUNC("ixgbe_set_fw_drv_ver_generic");
   4763 	UNREFERENCED_2PARAMETER(len, driver_ver);
   4764 
   4765 	fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO;
   4766 	fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN;
   4767 	fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
   4768 	fw_cmd.port_num = (u8)hw->bus.func;
   4769 	fw_cmd.ver_maj = maj;
   4770 	fw_cmd.ver_min = minr;
   4771 	fw_cmd.ver_build = build;
   4772 	fw_cmd.ver_sub = sub;
   4773 	fw_cmd.hdr.checksum = 0;
   4774 	fw_cmd.pad = 0;
   4775 	fw_cmd.pad2 = 0;
   4776 	fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd,
   4777 				(FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len));
   4778 
   4779 	for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
   4780 		ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
   4781 						       sizeof(fw_cmd),
   4782 						       IXGBE_HI_COMMAND_TIMEOUT,
   4783 						       TRUE);
   4784 		if (ret_val != IXGBE_SUCCESS)
   4785 			continue;
   4786 
   4787 		if (fw_cmd.hdr.cmd_or_resp.ret_status ==
   4788 		    FW_CEM_RESP_STATUS_SUCCESS)
   4789 			ret_val = IXGBE_SUCCESS;
   4790 		else
   4791 			ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
   4792 
   4793 		break;
   4794 	}
   4795 
   4796 	return ret_val;
   4797 }
   4798 
   4799 /**
   4800  * ixgbe_set_rxpba_generic - Initialize Rx packet buffer
   4801  * @hw: pointer to hardware structure
   4802  * @num_pb: number of packet buffers to allocate
   4803  * @headroom: reserve n KB of headroom
   4804  * @strategy: packet buffer allocation strategy
   4805  **/
   4806 void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, u32 headroom,
   4807 			     int strategy)
   4808 {
   4809 	u32 pbsize = hw->mac.rx_pb_size;
   4810 	int i = 0;
   4811 	u32 rxpktsize, txpktsize, txpbthresh;
   4812 
   4813 	/* Reserve headroom */
   4814 	pbsize -= headroom;
   4815 
   4816 	if (!num_pb)
   4817 		num_pb = 1;
   4818 
   4819 	/* Divide remaining packet buffer space amongst the number of packet
   4820 	 * buffers requested using supplied strategy.
   4821 	 */
   4822 	switch (strategy) {
   4823 	case PBA_STRATEGY_WEIGHTED:
   4824 		/* ixgbe_dcb_pba_80_48 strategy weight first half of packet
   4825 		 * buffer with 5/8 of the packet buffer space.
   4826 		 */
   4827 		rxpktsize = (pbsize * 5) / (num_pb * 4);
   4828 		pbsize -= rxpktsize * (num_pb / 2);
   4829 		rxpktsize <<= IXGBE_RXPBSIZE_SHIFT;
   4830 		for (; i < (num_pb / 2); i++)
   4831 			IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
   4832 		/* fall through - configure remaining packet buffers */
   4833 	case PBA_STRATEGY_EQUAL:
   4834 		rxpktsize = (pbsize / (num_pb - i)) << IXGBE_RXPBSIZE_SHIFT;
   4835 		for (; i < num_pb; i++)
   4836 			IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
   4837 		break;
   4838 	default:
   4839 		break;
   4840 	}
   4841 
   4842 	/* Only support an equally distributed Tx packet buffer strategy. */
   4843 	txpktsize = IXGBE_TXPBSIZE_MAX / num_pb;
   4844 	txpbthresh = (txpktsize / 1024) - IXGBE_TXPKT_SIZE_MAX;
   4845 	for (i = 0; i < num_pb; i++) {
   4846 		IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize);
   4847 		IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh);
   4848 	}
   4849 
   4850 	/* Clear unused TCs, if any, to zero buffer size*/
   4851 	for (; i < IXGBE_MAX_PB; i++) {
   4852 		IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
   4853 		IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0);
   4854 		IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0);
   4855 	}
   4856 }
   4857 
   4858 /**
   4859  * ixgbe_clear_tx_pending - Clear pending TX work from the PCIe fifo
   4860  * @hw: pointer to the hardware structure
   4861  *
   4862  * The 82599 and x540 MACs can experience issues if TX work is still pending
   4863  * when a reset occurs.  This function prevents this by flushing the PCIe
   4864  * buffers on the system.
   4865  **/
   4866 void ixgbe_clear_tx_pending(struct ixgbe_hw *hw)
   4867 {
   4868 	u32 gcr_ext, hlreg0, i, poll;
   4869 	u16 value;
   4870 
   4871 	/*
   4872 	 * If double reset is not requested then all transactions should
   4873 	 * already be clear and as such there is no work to do
   4874 	 */
   4875 	if (!(hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED))
   4876 		return;
   4877 
   4878 	/*
   4879 	 * Set loopback enable to prevent any transmits from being sent
   4880 	 * should the link come up.  This assumes that the RXCTRL.RXEN bit
   4881 	 * has already been cleared.
   4882 	 */
   4883 	hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
   4884 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0 | IXGBE_HLREG0_LPBK);
   4885 
   4886 	/* Wait for a last completion before clearing buffers */
   4887 	IXGBE_WRITE_FLUSH(hw);
   4888 	msec_delay(3);
   4889 
   4890 	/*
   4891 	 * Before proceeding, make sure that the PCIe block does not have
   4892 	 * transactions pending.
   4893 	 */
   4894 	poll = ixgbe_pcie_timeout_poll(hw);
   4895 	for (i = 0; i < poll; i++) {
   4896 		usec_delay(100);
   4897 		value = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS);
   4898 		if (IXGBE_REMOVED(hw->hw_addr))
   4899 			goto out;
   4900 		if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
   4901 			goto out;
   4902 	}
   4903 
   4904 out:
   4905 	/* initiate cleaning flow for buffers in the PCIe transaction layer */
   4906 	gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
   4907 	IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT,
   4908 			gcr_ext | IXGBE_GCR_EXT_BUFFERS_CLEAR);
   4909 
   4910 	/* Flush all writes and allow 20usec for all transactions to clear */
   4911 	IXGBE_WRITE_FLUSH(hw);
   4912 	usec_delay(20);
   4913 
   4914 	/* restore previous register values */
   4915 	IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
   4916 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
   4917 }
   4918 
   4919 /**
   4920  *  ixgbe_bypass_rw_generic - Bit bang data into by_pass FW
   4921  *
   4922  *  @hw: pointer to hardware structure
   4923  *  @cmd: Command we send to the FW
   4924  *  @status: The reply from the FW
   4925  *
   4926  *  Bit-bangs the cmd to the by_pass FW status points to what is returned.
   4927  **/
   4928 #define IXGBE_BYPASS_BB_WAIT 1
   4929 s32 ixgbe_bypass_rw_generic(struct ixgbe_hw *hw, u32 cmd, u32 *status)
   4930 {
   4931 	int i;
   4932 	u32 sck, sdi, sdo, dir_sck, dir_sdi, dir_sdo;
   4933 	u32 esdp;
   4934 
   4935 	if (!status)
   4936 		return IXGBE_ERR_PARAM;
   4937 
   4938 	*status = 0;
   4939 
   4940 	/* SDP vary by MAC type */
   4941 	switch (hw->mac.type) {
   4942 	case ixgbe_mac_82599EB:
   4943 		sck = IXGBE_ESDP_SDP7;
   4944 		sdi = IXGBE_ESDP_SDP0;
   4945 		sdo = IXGBE_ESDP_SDP6;
   4946 		dir_sck = IXGBE_ESDP_SDP7_DIR;
   4947 		dir_sdi = IXGBE_ESDP_SDP0_DIR;
   4948 		dir_sdo = IXGBE_ESDP_SDP6_DIR;
   4949 		break;
   4950 	case ixgbe_mac_X540:
   4951 		sck = IXGBE_ESDP_SDP2;
   4952 		sdi = IXGBE_ESDP_SDP0;
   4953 		sdo = IXGBE_ESDP_SDP1;
   4954 		dir_sck = IXGBE_ESDP_SDP2_DIR;
   4955 		dir_sdi = IXGBE_ESDP_SDP0_DIR;
   4956 		dir_sdo = IXGBE_ESDP_SDP1_DIR;
   4957 		break;
   4958 	default:
   4959 		return IXGBE_ERR_DEVICE_NOT_SUPPORTED;
   4960 	}
   4961 
   4962 	/* Set SDP pins direction */
   4963 	esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
   4964 	esdp |= dir_sck;	/* SCK as output */
   4965 	esdp |= dir_sdi;	/* SDI as output */
   4966 	esdp &= ~dir_sdo;	/* SDO as input */
   4967 	esdp |= sck;
   4968 	esdp |= sdi;
   4969 	IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
   4970 	IXGBE_WRITE_FLUSH(hw);
   4971 	msec_delay(IXGBE_BYPASS_BB_WAIT);
   4972 
   4973 	/* Generate start condition */
   4974 	esdp &= ~sdi;
   4975 	IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
   4976 	IXGBE_WRITE_FLUSH(hw);
   4977 	msec_delay(IXGBE_BYPASS_BB_WAIT);
   4978 
   4979 	esdp &= ~sck;
   4980 	IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
   4981 	IXGBE_WRITE_FLUSH(hw);
   4982 	msec_delay(IXGBE_BYPASS_BB_WAIT);
   4983 
   4984 	/* Clock out the new control word and clock in the status */
   4985 	for (i = 0; i < 32; i++) {
   4986 		if ((cmd >> (31 - i)) & 0x01) {
   4987 			esdp |= sdi;
   4988 			IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
   4989 		} else {
   4990 			esdp &= ~sdi;
   4991 			IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
   4992 		}
   4993 		IXGBE_WRITE_FLUSH(hw);
   4994 		msec_delay(IXGBE_BYPASS_BB_WAIT);
   4995 
   4996 		esdp |= sck;
   4997 		IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
   4998 		IXGBE_WRITE_FLUSH(hw);
   4999 		msec_delay(IXGBE_BYPASS_BB_WAIT);
   5000 
   5001 		esdp &= ~sck;
   5002 		IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
   5003 		IXGBE_WRITE_FLUSH(hw);
   5004 		msec_delay(IXGBE_BYPASS_BB_WAIT);
   5005 
   5006 		esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
   5007 		if (esdp & sdo)
   5008 			*status = (*status << 1) | 0x01;
   5009 		else
   5010 			*status = (*status << 1) | 0x00;
   5011 		msec_delay(IXGBE_BYPASS_BB_WAIT);
   5012 	}
   5013 
   5014 	/* stop condition */
   5015 	esdp |= sck;
   5016 	esdp &= ~sdi;
   5017 	IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
   5018 	IXGBE_WRITE_FLUSH(hw);
   5019 	msec_delay(IXGBE_BYPASS_BB_WAIT);
   5020 
   5021 	esdp |= sdi;
   5022 	IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
   5023 	IXGBE_WRITE_FLUSH(hw);
   5024 
   5025 	/* set the page bits to match the cmd that the status it belongs to */
   5026 	*status = (*status & 0x3fffffff) | (cmd & 0xc0000000);
   5027 
   5028 	return IXGBE_SUCCESS;
   5029 }
   5030 
   5031 /**
   5032  * ixgbe_bypass_valid_rd_generic - Verify valid return from bit-bang.
   5033  *
   5034  * If we send a write we can't be sure it took until we can read back
   5035  * that same register.  It can be a problem as some of the fields may
   5036  * for valid reasons change in-between the time wrote the register and
   5037  * we read it again to verify.  So this function check everything we
   5038  * can check and then assumes it worked.
   5039  *
   5040  * @u32 in_reg - The register cmd for the bit-bang read.
   5041  * @u32 out_reg - The register returned from a bit-bang read.
   5042  **/
   5043 bool ixgbe_bypass_valid_rd_generic(u32 in_reg, u32 out_reg)
   5044 {
   5045 	u32 mask;
   5046 
   5047 	/* Page must match for all control pages */
   5048 	if ((in_reg & BYPASS_PAGE_M) != (out_reg & BYPASS_PAGE_M))
   5049 		return FALSE;
   5050 
   5051 	switch (in_reg & BYPASS_PAGE_M) {
   5052 	case BYPASS_PAGE_CTL0:
   5053 		/* All the following can't change since the last write
   5054 		 *  - All the event actions
   5055 		 *  - The timeout value
   5056 		 */
   5057 		mask = BYPASS_AUX_ON_M | BYPASS_MAIN_ON_M |
   5058 		       BYPASS_MAIN_OFF_M | BYPASS_AUX_OFF_M |
   5059 		       BYPASS_WDTIMEOUT_M |
   5060 		       BYPASS_WDT_VALUE_M;
   5061 		if ((out_reg & mask) != (in_reg & mask))
   5062 			return FALSE;
   5063 
   5064 		/* 0x0 is never a valid value for bypass status */
   5065 		if (!(out_reg & BYPASS_STATUS_OFF_M))
   5066 			return FALSE;
   5067 		break;
   5068 	case BYPASS_PAGE_CTL1:
   5069 		/* All the following can't change since the last write
   5070 		 *  - time valid bit
   5071 		 *  - time we last sent
   5072 		 */
   5073 		mask = BYPASS_CTL1_VALID_M | BYPASS_CTL1_TIME_M;
   5074 		if ((out_reg & mask) != (in_reg & mask))
   5075 			return FALSE;
   5076 		break;
   5077 	case BYPASS_PAGE_CTL2:
   5078 		/* All we can check in this page is control number
   5079 		 * which is already done above.
   5080 		 */
   5081 		break;
   5082 	}
   5083 
   5084 	/* We are as sure as we can be return TRUE */
   5085 	return TRUE;
   5086 }
   5087 
   5088 /**
   5089  *  ixgbe_bypass_set_generic - Set a bypass field in the FW CTRL Register.
   5090  *
   5091  *  @hw: pointer to hardware structure
   5092  *  @cmd: The control word we are setting.
   5093  *  @event: The event we are setting in the FW.  This also happens to
   5094  *	    be the mask for the event we are setting (handy)
   5095  *  @action: The action we set the event to in the FW. This is in a
   5096  *	     bit field that happens to be what we want to put in
   5097  *	     the event spot (also handy)
   5098  **/
   5099 s32 ixgbe_bypass_set_generic(struct ixgbe_hw *hw, u32 ctrl, u32 event,
   5100 			     u32 action)
   5101 {
   5102 	u32 by_ctl = 0;
   5103 	u32 cmd, verify;
   5104 	u32 count = 0;
   5105 
   5106 	/* Get current values */
   5107 	cmd = ctrl;	/* just reading only need control number */
   5108 	if (ixgbe_bypass_rw_generic(hw, cmd, &by_ctl))
   5109 		return IXGBE_ERR_INVALID_ARGUMENT;
   5110 
   5111 	/* Set to new action */
   5112 	cmd = (by_ctl & ~event) | BYPASS_WE | action;
   5113 	if (ixgbe_bypass_rw_generic(hw, cmd, &by_ctl))
   5114 		return IXGBE_ERR_INVALID_ARGUMENT;
   5115 
   5116 	/* Page 0 force a FW eeprom write which is slow so verify */
   5117 	if ((cmd & BYPASS_PAGE_M) == BYPASS_PAGE_CTL0) {
   5118 		verify = BYPASS_PAGE_CTL0;
   5119 		do {
   5120 			if (count++ > 5)
   5121 				return IXGBE_BYPASS_FW_WRITE_FAILURE;
   5122 
   5123 			if (ixgbe_bypass_rw_generic(hw, verify, &by_ctl))
   5124 				return IXGBE_ERR_INVALID_ARGUMENT;
   5125 		} while (!ixgbe_bypass_valid_rd_generic(cmd, by_ctl));
   5126 	} else {
   5127 		/* We have give the FW time for the write to stick */
   5128 		msec_delay(100);
   5129 	}
   5130 
   5131 	return IXGBE_SUCCESS;
   5132 }
   5133 
   5134 /**
   5135  *  ixgbe_bypass_rd_eep_generic - Read the bypass FW eeprom address.
   5136  *
   5137  *  @hw: pointer to hardware structure
   5138  *  @addr: The bypass eeprom address to read.
   5139  *  @value: The 8b of data at the address above.
   5140  **/
   5141 s32 ixgbe_bypass_rd_eep_generic(struct ixgbe_hw *hw, u32 addr, u8 *value)
   5142 {
   5143 	u32 cmd;
   5144 	u32 status;
   5145 
   5146 
   5147 	/* send the request */
   5148 	cmd = BYPASS_PAGE_CTL2 | BYPASS_WE;
   5149 	cmd |= (addr << BYPASS_CTL2_OFFSET_SHIFT) & BYPASS_CTL2_OFFSET_M;
   5150 	if (ixgbe_bypass_rw_generic(hw, cmd, &status))
   5151 		return IXGBE_ERR_INVALID_ARGUMENT;
   5152 
   5153 	/* We have give the FW time for the write to stick */
   5154 	msec_delay(100);
   5155 
   5156 	/* now read the results */
   5157 	cmd &= ~BYPASS_WE;
   5158 	if (ixgbe_bypass_rw_generic(hw, cmd, &status))
   5159 		return IXGBE_ERR_INVALID_ARGUMENT;
   5160 
   5161 	*value = status & BYPASS_CTL2_DATA_M;
   5162 
   5163 	return IXGBE_SUCCESS;
   5164 }
   5165 
   5166 /**
   5167  *  ixgbe_get_orom_version - Return option ROM from EEPROM
   5168  *
   5169  *  @hw: pointer to hardware structure
   5170  *  @nvm_ver: pointer to output structure
   5171  *
   5172  *  if valid option ROM version, nvm_ver->or_valid set to TRUE
   5173  *  else nvm_ver->or_valid is FALSE.
   5174  **/
   5175 void ixgbe_get_orom_version(struct ixgbe_hw *hw,
   5176 			    struct ixgbe_nvm_version *nvm_ver)
   5177 {
   5178 	u16 offset, eeprom_cfg_blkh, eeprom_cfg_blkl;
   5179 
   5180 	nvm_ver->or_valid = FALSE;
   5181 	/* Option Rom may or may not be present.  Start with pointer */
   5182 	hw->eeprom.ops.read(hw, NVM_OROM_OFFSET, &offset);
   5183 
   5184 	/* make sure offset is valid */
   5185 	if ((offset == 0x0) || (offset == NVM_INVALID_PTR))
   5186 		return;
   5187 
   5188 	hw->eeprom.ops.read(hw, offset + NVM_OROM_BLK_HI, &eeprom_cfg_blkh);
   5189 	hw->eeprom.ops.read(hw, offset + NVM_OROM_BLK_LOW, &eeprom_cfg_blkl);
   5190 
   5191 	/* option rom exists and is valid */
   5192 	if ((eeprom_cfg_blkl | eeprom_cfg_blkh) == 0x0 ||
   5193 	    eeprom_cfg_blkl == NVM_VER_INVALID ||
   5194 	    eeprom_cfg_blkh == NVM_VER_INVALID)
   5195 		return;
   5196 
   5197 	nvm_ver->or_valid = TRUE;
   5198 	nvm_ver->or_major = eeprom_cfg_blkl >> NVM_OROM_SHIFT;
   5199 	nvm_ver->or_build = (eeprom_cfg_blkl << NVM_OROM_SHIFT) |
   5200 			    (eeprom_cfg_blkh >> NVM_OROM_SHIFT);
   5201 	nvm_ver->or_patch = eeprom_cfg_blkh & NVM_OROM_PATCH_MASK;
   5202 }
   5203 
   5204 /**
   5205  *  ixgbe_get_oem_prod_version - Return OEM Product version
   5206  *
   5207  *  @hw: pointer to hardware structure
   5208  *  @nvm_ver: pointer to output structure
   5209  *
   5210  *  if valid OEM product version, nvm_ver->oem_valid set to TRUE
   5211  *  else nvm_ver->oem_valid is FALSE.
   5212  **/
   5213 void ixgbe_get_oem_prod_version(struct ixgbe_hw *hw,
   5214 				struct ixgbe_nvm_version *nvm_ver)
   5215 {
   5216 	u16 rel_num, prod_ver, mod_len, cap, offset;
   5217 
   5218 	nvm_ver->oem_valid = FALSE;
   5219 	hw->eeprom.ops.read(hw, NVM_OEM_PROD_VER_PTR, &offset);
   5220 
   5221 	/* Return if offset to OEM Product Version block is invalid */
   5222 	if (offset == 0x0 || offset == NVM_INVALID_PTR)
   5223 		return;
   5224 
   5225 	/* Read product version block */
   5226 	hw->eeprom.ops.read(hw, offset, &mod_len);
   5227 	hw->eeprom.ops.read(hw, offset + NVM_OEM_PROD_VER_CAP_OFF, &cap);
   5228 
   5229 	/* Return if OEM product version block is invalid */
   5230 	if (mod_len != NVM_OEM_PROD_VER_MOD_LEN ||
   5231 	    (cap & NVM_OEM_PROD_VER_CAP_MASK) != 0x0)
   5232 		return;
   5233 
   5234 	hw->eeprom.ops.read(hw, offset + NVM_OEM_PROD_VER_OFF_L, &prod_ver);
   5235 	hw->eeprom.ops.read(hw, offset + NVM_OEM_PROD_VER_OFF_H, &rel_num);
   5236 
   5237 	/* Return if version is invalid */
   5238 	if ((rel_num | prod_ver) == 0x0 ||
   5239 	    rel_num == NVM_VER_INVALID || prod_ver == NVM_VER_INVALID)
   5240 		return;
   5241 
   5242 	nvm_ver->oem_major = prod_ver >> NVM_VER_SHIFT;
   5243 	nvm_ver->oem_minor = prod_ver & NVM_VER_MASK;
   5244 	nvm_ver->oem_release = rel_num;
   5245 	nvm_ver->oem_valid = TRUE;
   5246 }
   5247 
   5248 /**
   5249  *  ixgbe_get_etk_id - Return Etrack ID from EEPROM
   5250  *
   5251  *  @hw: pointer to hardware structure
   5252  *  @nvm_ver: pointer to output structure
   5253  *
   5254  *  word read errors will return 0xFFFF
   5255  **/
   5256 void ixgbe_get_etk_id(struct ixgbe_hw *hw, struct ixgbe_nvm_version *nvm_ver)
   5257 {
   5258 	u16 etk_id_l, etk_id_h;
   5259 
   5260 	if (hw->eeprom.ops.read(hw, NVM_ETK_OFF_LOW, &etk_id_l))
   5261 		etk_id_l = NVM_VER_INVALID;
   5262 	if (hw->eeprom.ops.read(hw, NVM_ETK_OFF_HI, &etk_id_h))
   5263 		etk_id_h = NVM_VER_INVALID;
   5264 
   5265 	/* The word order for the version format is determined by high order
   5266 	 * word bit 15.
   5267 	 */
   5268 	if ((etk_id_h & NVM_ETK_VALID) == 0) {
   5269 		nvm_ver->etk_id = etk_id_h;
   5270 		nvm_ver->etk_id |= (etk_id_l << NVM_ETK_SHIFT);
   5271 	} else {
   5272 		nvm_ver->etk_id = etk_id_l;
   5273 		nvm_ver->etk_id |= (etk_id_h << NVM_ETK_SHIFT);
   5274 	}
   5275 }
   5276 
   5277 
   5278 /**
   5279  * ixgbe_dcb_get_rtrup2tc_generic - read rtrup2tc reg
   5280  * @hw: pointer to hardware structure
   5281  * @map: pointer to u8 arr for returning map
   5282  *
   5283  * Read the rtrup2tc HW register and resolve its content into map
   5284  **/
   5285 void ixgbe_dcb_get_rtrup2tc_generic(struct ixgbe_hw *hw, u8 *map)
   5286 {
   5287 	u32 reg, i;
   5288 
   5289 	reg = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC);
   5290 	for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++)
   5291 		map[i] = IXGBE_RTRUP2TC_UP_MASK &
   5292 			(reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT));
   5293 	return;
   5294 }
   5295 
   5296 void ixgbe_disable_rx_generic(struct ixgbe_hw *hw)
   5297 {
   5298 	u32 pfdtxgswc;
   5299 	u32 rxctrl;
   5300 
   5301 	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
   5302 	if (rxctrl & IXGBE_RXCTRL_RXEN) {
   5303 		if (hw->mac.type != ixgbe_mac_82598EB) {
   5304 			pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
   5305 			if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) {
   5306 				pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN;
   5307 				IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
   5308 				hw->mac.set_lben = TRUE;
   5309 			} else {
   5310 				hw->mac.set_lben = FALSE;
   5311 			}
   5312 		}
   5313 		rxctrl &= ~IXGBE_RXCTRL_RXEN;
   5314 		IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl);
   5315 	}
   5316 }
   5317 
   5318 void ixgbe_enable_rx_generic(struct ixgbe_hw *hw)
   5319 {
   5320 	u32 pfdtxgswc;
   5321 	u32 rxctrl;
   5322 
   5323 	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
   5324 	IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, (rxctrl | IXGBE_RXCTRL_RXEN));
   5325 
   5326 	if (hw->mac.type != ixgbe_mac_82598EB) {
   5327 		if (hw->mac.set_lben) {
   5328 			pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
   5329 			pfdtxgswc |= IXGBE_PFDTXGSWC_VT_LBEN;
   5330 			IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
   5331 			hw->mac.set_lben = FALSE;
   5332 		}
   5333 	}
   5334 }
   5335 
   5336 /**
   5337  * ixgbe_mng_present - returns TRUE when management capability is present
   5338  * @hw: pointer to hardware structure
   5339  */
   5340 bool ixgbe_mng_present(struct ixgbe_hw *hw)
   5341 {
   5342 	u32 fwsm;
   5343 
   5344 	if (hw->mac.type < ixgbe_mac_82599EB)
   5345 		return FALSE;
   5346 
   5347 	fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw));
   5348 	return !!(fwsm & IXGBE_FWSM_FW_MODE_PT);
   5349 }
   5350 
   5351 /**
   5352  * ixgbe_mng_enabled - Is the manageability engine enabled?
   5353  * @hw: pointer to hardware structure
   5354  *
   5355  * Returns TRUE if the manageability engine is enabled.
   5356  **/
   5357 bool ixgbe_mng_enabled(struct ixgbe_hw *hw)
   5358 {
   5359 	u32 fwsm, manc, factps;
   5360 
   5361 	fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw));
   5362 	if ((fwsm & IXGBE_FWSM_MODE_MASK) != IXGBE_FWSM_FW_MODE_PT)
   5363 		return FALSE;
   5364 
   5365 	manc = IXGBE_READ_REG(hw, IXGBE_MANC);
   5366 	if (!(manc & IXGBE_MANC_RCV_TCO_EN))
   5367 		return FALSE;
   5368 
   5369 	if (hw->mac.type <= ixgbe_mac_X540) {
   5370 		factps = IXGBE_READ_REG(hw, IXGBE_FACTPS_BY_MAC(hw));
   5371 		if (factps & IXGBE_FACTPS_MNGCG)
   5372 			return FALSE;
   5373 	}
   5374 
   5375 	return TRUE;
   5376 }
   5377 
   5378 /**
   5379  *  ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed
   5380  *  @hw: pointer to hardware structure
   5381  *  @speed: new link speed
   5382  *  @autoneg_wait_to_complete: TRUE when waiting for completion is needed
   5383  *
   5384  *  Set the link speed in the MAC and/or PHY register and restarts link.
   5385  **/
   5386 s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
   5387 					  ixgbe_link_speed speed,
   5388 					  bool autoneg_wait_to_complete)
   5389 {
   5390 	ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
   5391 	ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN;
   5392 	s32 status = IXGBE_SUCCESS;
   5393 	u32 speedcnt = 0;
   5394 	u32 i = 0;
   5395 	bool autoneg, link_up = FALSE;
   5396 
   5397 	DEBUGFUNC("ixgbe_setup_mac_link_multispeed_fiber");
   5398 
   5399 	/* Mask off requested but non-supported speeds */
   5400 	status = ixgbe_get_link_capabilities(hw, &link_speed, &autoneg);
   5401 	if (status != IXGBE_SUCCESS)
   5402 		return status;
   5403 
   5404 	speed &= link_speed;
   5405 
   5406 	/* Try each speed one by one, highest priority first.  We do this in
   5407 	 * software because 10Gb fiber doesn't support speed autonegotiation.
   5408 	 */
   5409 	if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
   5410 		speedcnt++;
   5411 		highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL;
   5412 
   5413 		/* Set the module link speed */
   5414 		switch (hw->phy.media_type) {
   5415 		case ixgbe_media_type_fiber_fixed:
   5416 		case ixgbe_media_type_fiber:
   5417 			ixgbe_set_rate_select_speed(hw,
   5418 						    IXGBE_LINK_SPEED_10GB_FULL);
   5419 			break;
   5420 		case ixgbe_media_type_fiber_qsfp:
   5421 			/* QSFP module automatically detects MAC link speed */
   5422 			break;
   5423 		default:
   5424 			DEBUGOUT("Unexpected media type.\n");
   5425 			break;
   5426 		}
   5427 
   5428 		/* Allow module to change analog characteristics (1G->10G) */
   5429 		msec_delay(40);
   5430 
   5431 		status = ixgbe_setup_mac_link(hw,
   5432 					      IXGBE_LINK_SPEED_10GB_FULL,
   5433 					      autoneg_wait_to_complete);
   5434 		if (status != IXGBE_SUCCESS)
   5435 			return status;
   5436 
   5437 		/* Flap the Tx laser if it has not already been done */
   5438 		ixgbe_flap_tx_laser(hw);
   5439 
   5440 		/* Wait for the controller to acquire link.  Per IEEE 802.3ap,
   5441 		 * Section 73.10.2, we may have to wait up to 500ms if KR is
   5442 		 * attempted.  82599 uses the same timing for 10g SFI.
   5443 		 */
   5444 		for (i = 0; i < 5; i++) {
   5445 			/* Wait for the link partner to also set speed */
   5446 			msec_delay(100);
   5447 
   5448 			/* If we have link, just jump out */
   5449 			status = ixgbe_check_link(hw, &link_speed,
   5450 						  &link_up, FALSE);
   5451 			if (status != IXGBE_SUCCESS)
   5452 				return status;
   5453 
   5454 			if (link_up)
   5455 				goto out;
   5456 		}
   5457 	}
   5458 
   5459 	if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
   5460 		speedcnt++;
   5461 		if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN)
   5462 			highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL;
   5463 
   5464 		/* Set the module link speed */
   5465 		switch (hw->phy.media_type) {
   5466 		case ixgbe_media_type_fiber_fixed:
   5467 		case ixgbe_media_type_fiber:
   5468 			ixgbe_set_rate_select_speed(hw,
   5469 						    IXGBE_LINK_SPEED_1GB_FULL);
   5470 			break;
   5471 		case ixgbe_media_type_fiber_qsfp:
   5472 			/* QSFP module automatically detects link speed */
   5473 			break;
   5474 		default:
   5475 			DEBUGOUT("Unexpected media type.\n");
   5476 			break;
   5477 		}
   5478 
   5479 		/* Allow module to change analog characteristics (10G->1G) */
   5480 		msec_delay(40);
   5481 
   5482 		status = ixgbe_setup_mac_link(hw,
   5483 					      IXGBE_LINK_SPEED_1GB_FULL,
   5484 					      autoneg_wait_to_complete);
   5485 		if (status != IXGBE_SUCCESS)
   5486 			return status;
   5487 
   5488 		/* Flap the Tx laser if it has not already been done */
   5489 		ixgbe_flap_tx_laser(hw);
   5490 
   5491 		/* Wait for the link partner to also set speed */
   5492 		msec_delay(100);
   5493 
   5494 		/* If we have link, just jump out */
   5495 		status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
   5496 		if (status != IXGBE_SUCCESS)
   5497 			return status;
   5498 
   5499 		if (link_up)
   5500 			goto out;
   5501 	}
   5502 
   5503 	if (speed == 0) {
   5504 		/* Disable the Tx laser for media none */
   5505 		ixgbe_disable_tx_laser(hw);
   5506 
   5507 		goto out;
   5508 	}
   5509 
   5510 	/* We didn't get link.  Configure back to the highest speed we tried,
   5511 	 * (if there was more than one).  We call ourselves back with just the
   5512 	 * single highest speed that the user requested.
   5513 	 */
   5514 	if (speedcnt > 1)
   5515 		status = ixgbe_setup_mac_link_multispeed_fiber(hw,
   5516 						      highest_link_speed,
   5517 						      autoneg_wait_to_complete);
   5518 
   5519 out:
   5520 	/* Set autoneg_advertised value based on input link speed */
   5521 	hw->phy.autoneg_advertised = 0;
   5522 
   5523 	if (speed & IXGBE_LINK_SPEED_10GB_FULL)
   5524 		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
   5525 
   5526 	if (speed & IXGBE_LINK_SPEED_1GB_FULL)
   5527 		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
   5528 
   5529 	return status;
   5530 }
   5531 
   5532 /**
   5533  *  ixgbe_set_soft_rate_select_speed - Set module link speed
   5534  *  @hw: pointer to hardware structure
   5535  *  @speed: link speed to set
   5536  *
   5537  *  Set module link speed via the soft rate select.
   5538  */
   5539 void ixgbe_set_soft_rate_select_speed(struct ixgbe_hw *hw,
   5540 					ixgbe_link_speed speed)
   5541 {
   5542 	s32 status;
   5543 	u8 rs, eeprom_data;
   5544 
   5545 	switch (speed) {
   5546 	case IXGBE_LINK_SPEED_10GB_FULL:
   5547 		/* one bit mask same as setting on */
   5548 		rs = IXGBE_SFF_SOFT_RS_SELECT_10G;
   5549 		break;
   5550 	case IXGBE_LINK_SPEED_1GB_FULL:
   5551 		rs = IXGBE_SFF_SOFT_RS_SELECT_1G;
   5552 		break;
   5553 	default:
   5554 		DEBUGOUT("Invalid fixed module speed\n");
   5555 		return;
   5556 	}
   5557 
   5558 	/* Set RS0 */
   5559 	status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
   5560 					   IXGBE_I2C_EEPROM_DEV_ADDR2,
   5561 					   &eeprom_data);
   5562 	if (status) {
   5563 		DEBUGOUT("Failed to read Rx Rate Select RS0\n");
   5564 		goto out;
   5565 	}
   5566 
   5567 	eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs;
   5568 
   5569 	status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
   5570 					    IXGBE_I2C_EEPROM_DEV_ADDR2,
   5571 					    eeprom_data);
   5572 	if (status) {
   5573 		DEBUGOUT("Failed to write Rx Rate Select RS0\n");
   5574 		goto out;
   5575 	}
   5576 
   5577 	/* Set RS1 */
   5578 	status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
   5579 					   IXGBE_I2C_EEPROM_DEV_ADDR2,
   5580 					   &eeprom_data);
   5581 	if (status) {
   5582 		DEBUGOUT("Failed to read Rx Rate Select RS1\n");
   5583 		goto out;
   5584 	}
   5585 
   5586 	eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs;
   5587 
   5588 	status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
   5589 					    IXGBE_I2C_EEPROM_DEV_ADDR2,
   5590 					    eeprom_data);
   5591 	if (status) {
   5592 		DEBUGOUT("Failed to write Rx Rate Select RS1\n");
   5593 		goto out;
   5594 	}
   5595 out:
   5596 	return;
   5597 }
   5598