ixgbe_82599.c revision 1.29 1 /* $NetBSD: ixgbe_82599.c,v 1.29 2021/12/24 05:11:04 msaitoh Exp $ */
2
3 /******************************************************************************
4 SPDX-License-Identifier: BSD-3-Clause
5
6 Copyright (c) 2001-2020, Intel Corporation
7 All rights reserved.
8
9 Redistribution and use in source and binary forms, with or without
10 modification, are permitted provided that the following conditions are met:
11
12 1. Redistributions of source code must retain the above copyright notice,
13 this list of conditions and the following disclaimer.
14
15 2. Redistributions in binary form must reproduce the above copyright
16 notice, this list of conditions and the following disclaimer in the
17 documentation and/or other materials provided with the distribution.
18
19 3. Neither the name of the Intel Corporation nor the names of its
20 contributors may be used to endorse or promote products derived from
21 this software without specific prior written permission.
22
23 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
27 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33 POSSIBILITY OF SUCH DAMAGE.
34
35 ******************************************************************************/
36 /*$FreeBSD: head/sys/dev/ixgbe/ixgbe_82599.c 331224 2018-03-19 20:55:05Z erj $*/
37
38 #include <sys/cdefs.h>
39 __KERNEL_RCSID(0, "$NetBSD: ixgbe_82599.c,v 1.29 2021/12/24 05:11:04 msaitoh Exp $");
40
41 #include "ixgbe_type.h"
42 #include "ixgbe_82599.h"
43 #include "ixgbe_api.h"
44 #include "ixgbe_common.h"
45 #include "ixgbe_phy.h"
46
47 #define IXGBE_82599_MAX_TX_QUEUES 128
48 #define IXGBE_82599_MAX_RX_QUEUES 128
49 #define IXGBE_82599_RAR_ENTRIES 128
50 #define IXGBE_82599_MC_TBL_SIZE 128
51 #define IXGBE_82599_VFT_TBL_SIZE 128
52 #define IXGBE_82599_RX_PB_SIZE 512
53
54 static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
55 ixgbe_link_speed speed,
56 bool autoneg_wait_to_complete);
57 static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw);
58 static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
59 u16 offset, u16 *data);
60 static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
61 u16 words, u16 *data);
62 static s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw);
63 static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
64 u8 dev_addr, u8 *data);
65 static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
66 u8 dev_addr, u8 data);
67
68 void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
69 {
70 struct ixgbe_mac_info *mac = &hw->mac;
71
72 DEBUGFUNC("ixgbe_init_mac_link_ops_82599");
73
74 /*
75 * enable the laser control functions for SFP+ fiber
76 * and MNG not enabled
77 */
78 if ((mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
79 !ixgbe_mng_enabled(hw)) {
80 mac->ops.disable_tx_laser =
81 ixgbe_disable_tx_laser_multispeed_fiber;
82 mac->ops.enable_tx_laser =
83 ixgbe_enable_tx_laser_multispeed_fiber;
84 mac->ops.flap_tx_laser = ixgbe_flap_tx_laser_multispeed_fiber;
85
86 } else {
87 mac->ops.disable_tx_laser = NULL;
88 mac->ops.enable_tx_laser = NULL;
89 mac->ops.flap_tx_laser = NULL;
90 }
91
92 if (hw->phy.multispeed_fiber) {
93 /* Set up dual speed SFP+ support */
94 mac->ops.setup_link = ixgbe_setup_mac_link_multispeed_fiber;
95 mac->ops.setup_mac_link = ixgbe_setup_mac_link_82599;
96 mac->ops.set_rate_select_speed =
97 ixgbe_set_hard_rate_select_speed;
98 if (ixgbe_get_media_type(hw) == ixgbe_media_type_fiber_fixed)
99 mac->ops.set_rate_select_speed =
100 ixgbe_set_soft_rate_select_speed;
101 } else {
102 if ((ixgbe_get_media_type(hw) == ixgbe_media_type_backplane) &&
103 (hw->phy.smart_speed == ixgbe_smart_speed_auto ||
104 hw->phy.smart_speed == ixgbe_smart_speed_on) &&
105 !ixgbe_verify_lesm_fw_enabled_82599(hw)) {
106 mac->ops.setup_link = ixgbe_setup_mac_link_smartspeed;
107 } else {
108 mac->ops.setup_link = ixgbe_setup_mac_link_82599;
109 }
110 }
111 }
112
113 /**
114 * ixgbe_init_phy_ops_82599 - PHY/SFP specific init
115 * @hw: pointer to hardware structure
116 *
117 * Initialize any function pointers that were not able to be
118 * set during init_shared_code because the PHY/SFP type was
119 * not known. Perform the SFP init if necessary.
120 *
121 **/
122 s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
123 {
124 struct ixgbe_mac_info *mac = &hw->mac;
125 struct ixgbe_phy_info *phy = &hw->phy;
126 s32 ret_val = IXGBE_SUCCESS;
127 u32 esdp;
128
129 DEBUGFUNC("ixgbe_init_phy_ops_82599");
130
131 if (hw->device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP) {
132 /* Store flag indicating I2C bus access control unit. */
133 hw->phy.qsfp_shared_i2c_bus = TRUE;
134
135 /* Initialize access to QSFP+ I2C bus */
136 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
137 esdp |= IXGBE_ESDP_SDP0_DIR;
138 esdp &= ~IXGBE_ESDP_SDP1_DIR;
139 esdp &= ~IXGBE_ESDP_SDP0;
140 esdp &= ~IXGBE_ESDP_SDP0_NATIVE;
141 esdp &= ~IXGBE_ESDP_SDP1_NATIVE;
142 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
143 IXGBE_WRITE_FLUSH(hw);
144
145 phy->ops.read_i2c_byte = ixgbe_read_i2c_byte_82599;
146 phy->ops.write_i2c_byte = ixgbe_write_i2c_byte_82599;
147 }
148 /* Identify the PHY or SFP module */
149 ret_val = phy->ops.identify(hw);
150 if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED)
151 goto init_phy_ops_out;
152
153 /* Setup function pointers based on detected SFP module and speeds */
154 ixgbe_init_mac_link_ops_82599(hw);
155 if (hw->phy.sfp_type != ixgbe_sfp_type_unknown)
156 hw->phy.ops.reset = NULL;
157
158 /* If copper media, overwrite with copper function pointers */
159 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
160 mac->ops.setup_link = ixgbe_setup_copper_link_82599;
161 mac->ops.get_link_capabilities =
162 ixgbe_get_copper_link_capabilities_generic;
163 }
164
165 /* Set necessary function pointers based on PHY type */
166 switch (hw->phy.type) {
167 case ixgbe_phy_tn:
168 phy->ops.setup_link = ixgbe_setup_phy_link_tnx;
169 phy->ops.check_link = ixgbe_check_phy_link_tnx;
170 phy->ops.get_firmware_version =
171 ixgbe_get_phy_firmware_version_tnx;
172 break;
173 default:
174 break;
175 }
176 init_phy_ops_out:
177 return ret_val;
178 }
179
180 s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
181 {
182 s32 ret_val = IXGBE_SUCCESS;
183 u16 list_offset, data_offset, data_value;
184
185 DEBUGFUNC("ixgbe_setup_sfp_modules_82599");
186
187 if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) {
188 ixgbe_init_mac_link_ops_82599(hw);
189
190 hw->phy.ops.reset = NULL;
191
192 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
193 &data_offset);
194 if (ret_val != IXGBE_SUCCESS)
195 goto setup_sfp_out;
196
197 /* PHY config will finish before releasing the semaphore */
198 ret_val = hw->mac.ops.acquire_swfw_sync(hw,
199 IXGBE_GSSR_MAC_CSR_SM);
200 if (ret_val != IXGBE_SUCCESS) {
201 ret_val = IXGBE_ERR_SWFW_SYNC;
202 goto setup_sfp_out;
203 }
204
205 if (hw->eeprom.ops.read(hw, ++data_offset, &data_value))
206 goto setup_sfp_err;
207 while (data_value != 0xffff) {
208 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, data_value);
209 IXGBE_WRITE_FLUSH(hw);
210 if (hw->eeprom.ops.read(hw, ++data_offset, &data_value))
211 goto setup_sfp_err;
212 }
213
214 /* Release the semaphore */
215 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
216 /* Delay obtaining semaphore again to allow FW access
217 * prot_autoc_write uses the semaphore too.
218 */
219 msec_delay(hw->eeprom.semaphore_delay);
220
221 /* Restart DSP and set SFI mode */
222 ret_val = hw->mac.ops.prot_autoc_write(hw,
223 hw->mac.orig_autoc | IXGBE_AUTOC_LMS_10G_SERIAL,
224 FALSE);
225
226 if (ret_val) {
227 DEBUGOUT("sfp module setup not complete\n");
228 ret_val = IXGBE_ERR_SFP_SETUP_NOT_COMPLETE;
229 goto setup_sfp_out;
230 }
231
232 }
233
234 setup_sfp_out:
235 return ret_val;
236
237 setup_sfp_err:
238 /* Release the semaphore */
239 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
240 /* Delay obtaining semaphore again to allow FW access */
241 msec_delay(hw->eeprom.semaphore_delay);
242 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
243 "eeprom read at offset %d failed", data_offset);
244 return IXGBE_ERR_PHY;
245 }
246
247 /**
248 * prot_autoc_read_82599 - Hides MAC differences needed for AUTOC read
249 * @hw: pointer to hardware structure
250 * @locked: Return the if we locked for this read.
251 * @reg_val: Value we read from AUTOC
252 *
253 * For this part (82599) we need to wrap read-modify-writes with a possible
254 * FW/SW lock. It is assumed this lock will be freed with the next
255 * prot_autoc_write_82599().
256 */
257 s32 prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked, u32 *reg_val)
258 {
259 s32 ret_val;
260
261 *locked = FALSE;
262 /* If LESM is on then we need to hold the SW/FW semaphore. */
263 if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
264 ret_val = hw->mac.ops.acquire_swfw_sync(hw,
265 IXGBE_GSSR_MAC_CSR_SM);
266 if (ret_val != IXGBE_SUCCESS)
267 return IXGBE_ERR_SWFW_SYNC;
268
269 *locked = TRUE;
270 }
271
272 *reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC);
273 return IXGBE_SUCCESS;
274 }
275
276 /**
277 * prot_autoc_write_82599 - Hides MAC differences needed for AUTOC write
278 * @hw: pointer to hardware structure
279 * @autoc: value to write to AUTOC
280 * @locked: bool to indicate whether the SW/FW lock was already taken by
281 * previous proc_autoc_read_82599.
282 *
283 * This part (82599) may need to hold the SW/FW lock around all writes to
284 * AUTOC. Likewise after a write we need to do a pipeline reset.
285 */
286 s32 prot_autoc_write_82599(struct ixgbe_hw *hw, u32 autoc, bool locked)
287 {
288 s32 ret_val = IXGBE_SUCCESS;
289
290 /* Blocked by MNG FW so bail */
291 if (ixgbe_check_reset_blocked(hw))
292 goto out;
293
294 /* We only need to get the lock if:
295 * - We didn't do it already (in the read part of a read-modify-write)
296 * - LESM is enabled.
297 */
298 if (!locked && ixgbe_verify_lesm_fw_enabled_82599(hw)) {
299 ret_val = hw->mac.ops.acquire_swfw_sync(hw,
300 IXGBE_GSSR_MAC_CSR_SM);
301 if (ret_val != IXGBE_SUCCESS)
302 return IXGBE_ERR_SWFW_SYNC;
303
304 locked = TRUE;
305 }
306
307 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
308 ret_val = ixgbe_reset_pipeline_82599(hw);
309
310 out:
311 /* Free the SW/FW semaphore as we either grabbed it here or
312 * already had it when this function was called.
313 */
314 if (locked)
315 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
316
317 return ret_val;
318 }
319
320 /**
321 * ixgbe_init_ops_82599 - Inits func ptrs and MAC type
322 * @hw: pointer to hardware structure
323 *
324 * Initialize the function pointers and assign the MAC type for 82599.
325 * Does not touch the hardware.
326 **/
327
328 s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw)
329 {
330 struct ixgbe_mac_info *mac = &hw->mac;
331 struct ixgbe_phy_info *phy = &hw->phy;
332 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
333 s32 ret_val;
334 u16 i;
335
336 DEBUGFUNC("ixgbe_init_ops_82599");
337
338 ixgbe_init_phy_ops_generic(hw);
339 ret_val = ixgbe_init_ops_generic(hw);
340
341 /* PHY */
342 phy->ops.identify = ixgbe_identify_phy_82599;
343 phy->ops.init = ixgbe_init_phy_ops_82599;
344
345 /* MAC */
346 mac->ops.reset_hw = ixgbe_reset_hw_82599;
347 mac->ops.enable_relaxed_ordering = ixgbe_enable_relaxed_ordering_gen2;
348 mac->ops.get_media_type = ixgbe_get_media_type_82599;
349 mac->ops.get_supported_physical_layer =
350 ixgbe_get_supported_physical_layer_82599;
351 mac->ops.disable_sec_rx_path = ixgbe_disable_sec_rx_path_generic;
352 mac->ops.enable_sec_rx_path = ixgbe_enable_sec_rx_path_generic;
353 mac->ops.enable_rx_dma = ixgbe_enable_rx_dma_82599;
354 mac->ops.read_analog_reg8 = ixgbe_read_analog_reg8_82599;
355 mac->ops.write_analog_reg8 = ixgbe_write_analog_reg8_82599;
356 mac->ops.start_hw = ixgbe_start_hw_82599;
357 mac->ops.get_san_mac_addr = ixgbe_get_san_mac_addr_generic;
358 mac->ops.set_san_mac_addr = ixgbe_set_san_mac_addr_generic;
359 mac->ops.get_device_caps = ixgbe_get_device_caps_generic;
360 mac->ops.get_wwn_prefix = ixgbe_get_wwn_prefix_generic;
361 mac->ops.get_fcoe_boot_status = ixgbe_get_fcoe_boot_status_generic;
362 mac->ops.prot_autoc_read = prot_autoc_read_82599;
363 mac->ops.prot_autoc_write = prot_autoc_write_82599;
364
365 /* RAR, Multicast, VLAN */
366 mac->ops.set_vmdq = ixgbe_set_vmdq_generic;
367 mac->ops.set_vmdq_san_mac = ixgbe_set_vmdq_san_mac_generic;
368 mac->ops.clear_vmdq = ixgbe_clear_vmdq_generic;
369 mac->ops.insert_mac_addr = ixgbe_insert_mac_addr_generic;
370 mac->rar_highwater = 1;
371 mac->ops.set_vfta = ixgbe_set_vfta_generic;
372 mac->ops.set_vlvf = ixgbe_set_vlvf_generic;
373 mac->ops.clear_vfta = ixgbe_clear_vfta_generic;
374 mac->ops.init_uta_tables = ixgbe_init_uta_tables_generic;
375 mac->ops.setup_sfp = ixgbe_setup_sfp_modules_82599;
376 mac->ops.set_mac_anti_spoofing = ixgbe_set_mac_anti_spoofing;
377 mac->ops.set_vlan_anti_spoofing = ixgbe_set_vlan_anti_spoofing;
378
379 /* Link */
380 mac->ops.get_link_capabilities = ixgbe_get_link_capabilities_82599;
381 mac->ops.check_link = ixgbe_check_mac_link_generic;
382 mac->ops.setup_rxpba = ixgbe_set_rxpba_generic;
383 ixgbe_init_mac_link_ops_82599(hw);
384
385 mac->mcft_size = IXGBE_82599_MC_TBL_SIZE;
386 mac->vft_size = IXGBE_82599_VFT_TBL_SIZE;
387 mac->num_rar_entries = IXGBE_82599_RAR_ENTRIES;
388 mac->rx_pb_size = IXGBE_82599_RX_PB_SIZE;
389 mac->max_rx_queues = IXGBE_82599_MAX_RX_QUEUES;
390 mac->max_tx_queues = IXGBE_82599_MAX_TX_QUEUES;
391 mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
392
393 mac->arc_subsystem_valid = !!(IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw))
394 & IXGBE_FWSM_MODE_MASK);
395
396 for (i = 0; i < 64; i++)
397 hw->mbx.ops[i].init_params = ixgbe_init_mbx_params_pf;
398
399 /* EEPROM */
400 eeprom->ops.read = ixgbe_read_eeprom_82599;
401 eeprom->ops.read_buffer = ixgbe_read_eeprom_buffer_82599;
402
403 /* Manageability interface */
404 mac->ops.set_fw_drv_ver = ixgbe_set_fw_drv_ver_generic;
405
406 mac->ops.bypass_rw = ixgbe_bypass_rw_generic;
407 mac->ops.bypass_valid_rd = ixgbe_bypass_valid_rd_generic;
408 mac->ops.bypass_set = ixgbe_bypass_set_generic;
409 mac->ops.bypass_rd_eep = ixgbe_bypass_rd_eep_generic;
410
411 mac->ops.get_rtrup2tc = ixgbe_dcb_get_rtrup2tc_generic;
412
413 return ret_val;
414 }
415
416 /**
417 * ixgbe_get_link_capabilities_82599 - Determines link capabilities
418 * @hw: pointer to hardware structure
419 * @speed: pointer to link speed
420 * @autoneg: TRUE when autoneg or autotry is enabled
421 *
422 * Determines the link capabilities by reading the AUTOC register.
423 **/
424 s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
425 ixgbe_link_speed *speed,
426 bool *autoneg)
427 {
428 s32 status = IXGBE_SUCCESS;
429 u32 autoc = 0;
430
431 DEBUGFUNC("ixgbe_get_link_capabilities_82599");
432
433
434 /* Check if 1G SFP module. */
435 if (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
436 hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
437 hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
438 hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
439 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
440 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) {
441 *speed = IXGBE_LINK_SPEED_1GB_FULL;
442 *autoneg = TRUE;
443 goto out;
444 }
445
446 /*
447 * Determine link capabilities based on the stored value of AUTOC,
448 * which represents EEPROM defaults. If AUTOC value has not
449 * been stored, use the current register values.
450 */
451 if (hw->mac.orig_link_settings_stored)
452 autoc = hw->mac.orig_autoc;
453 else
454 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
455
456 switch (autoc & IXGBE_AUTOC_LMS_MASK) {
457 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
458 *speed = IXGBE_LINK_SPEED_1GB_FULL;
459 *autoneg = FALSE;
460 break;
461
462 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
463 *speed = IXGBE_LINK_SPEED_10GB_FULL;
464 *autoneg = FALSE;
465 break;
466
467 case IXGBE_AUTOC_LMS_1G_AN:
468 *speed = IXGBE_LINK_SPEED_1GB_FULL;
469 *autoneg = TRUE;
470 break;
471
472 case IXGBE_AUTOC_LMS_10G_SERIAL:
473 *speed = IXGBE_LINK_SPEED_10GB_FULL;
474 *autoneg = FALSE;
475 break;
476
477 case IXGBE_AUTOC_LMS_KX4_KX_KR:
478 case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN:
479 *speed = IXGBE_LINK_SPEED_UNKNOWN;
480 if (autoc & IXGBE_AUTOC_KR_SUPP)
481 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
482 if (autoc & IXGBE_AUTOC_KX4_SUPP)
483 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
484 if (autoc & IXGBE_AUTOC_KX_SUPP)
485 *speed |= IXGBE_LINK_SPEED_1GB_FULL;
486 *autoneg = TRUE;
487 break;
488
489 case IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII:
490 *speed = IXGBE_LINK_SPEED_100_FULL;
491 if (autoc & IXGBE_AUTOC_KR_SUPP)
492 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
493 if (autoc & IXGBE_AUTOC_KX4_SUPP)
494 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
495 if (autoc & IXGBE_AUTOC_KX_SUPP)
496 *speed |= IXGBE_LINK_SPEED_1GB_FULL;
497 *autoneg = TRUE;
498 break;
499
500 case IXGBE_AUTOC_LMS_SGMII_1G_100M:
501 *speed = IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_100_FULL;
502 *autoneg = FALSE;
503 break;
504
505 default:
506 status = IXGBE_ERR_LINK_SETUP;
507 goto out;
508 break;
509 }
510
511 if (hw->phy.multispeed_fiber) {
512 *speed |= IXGBE_LINK_SPEED_10GB_FULL |
513 IXGBE_LINK_SPEED_1GB_FULL;
514
515 /* QSFP must not enable full auto-negotiation
516 * Limited autoneg is enabled at 1G
517 */
518 if (hw->phy.media_type == ixgbe_media_type_fiber_qsfp)
519 *autoneg = FALSE;
520 else
521 *autoneg = TRUE;
522 }
523
524 out:
525 return status;
526 }
527
528 /**
529 * ixgbe_get_media_type_82599 - Get media type
530 * @hw: pointer to hardware structure
531 *
532 * Returns the media type (fiber, copper, backplane)
533 **/
534 enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
535 {
536 enum ixgbe_media_type media_type;
537
538 DEBUGFUNC("ixgbe_get_media_type_82599");
539
540 /* Detect if there is a copper PHY attached. */
541 switch (hw->phy.type) {
542 case ixgbe_phy_cu_unknown:
543 case ixgbe_phy_tn:
544 media_type = ixgbe_media_type_copper;
545 goto out;
546 default:
547 break;
548 }
549
550 switch (hw->device_id) {
551 case IXGBE_DEV_ID_82599_KX4:
552 case IXGBE_DEV_ID_82599_KX4_MEZZ:
553 case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
554 case IXGBE_DEV_ID_82599_KR:
555 case IXGBE_DEV_ID_82599_BACKPLANE_FCOE:
556 case IXGBE_DEV_ID_82599_XAUI_LOM:
557 /* Default device ID is mezzanine card KX/KX4 */
558 media_type = ixgbe_media_type_backplane;
559 break;
560 case IXGBE_DEV_ID_82599_SFP:
561 case IXGBE_DEV_ID_82599_SFP_FCOE:
562 case IXGBE_DEV_ID_82599_SFP_EM:
563 case IXGBE_DEV_ID_82599_SFP_SF2:
564 case IXGBE_DEV_ID_82599_SFP_SF_QP:
565 case IXGBE_DEV_ID_82599EN_SFP:
566 media_type = ixgbe_media_type_fiber;
567 break;
568 case IXGBE_DEV_ID_82599_CX4:
569 media_type = ixgbe_media_type_cx4;
570 break;
571 case IXGBE_DEV_ID_82599_T3_LOM:
572 media_type = ixgbe_media_type_copper;
573 break;
574 case IXGBE_DEV_ID_82599_QSFP_SF_QP:
575 media_type = ixgbe_media_type_fiber_qsfp;
576 break;
577 case IXGBE_DEV_ID_82599_BYPASS:
578 media_type = ixgbe_media_type_fiber_fixed;
579 hw->phy.multispeed_fiber = TRUE;
580 break;
581 default:
582 media_type = ixgbe_media_type_unknown;
583 break;
584 }
585 out:
586 return media_type;
587 }
588
589 /**
590 * ixgbe_stop_mac_link_on_d3_82599 - Disables link on D3
591 * @hw: pointer to hardware structure
592 *
593 * Disables link during D3 power down sequence.
594 *
595 **/
596 void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw)
597 {
598 u32 autoc2_reg;
599 u16 ee_ctrl_2 = 0;
600
601 DEBUGFUNC("ixgbe_stop_mac_link_on_d3_82599");
602 ixgbe_read_eeprom(hw, IXGBE_EEPROM_CTRL_2, &ee_ctrl_2);
603
604 if (!ixgbe_mng_present(hw) && !hw->wol_enabled &&
605 ee_ctrl_2 & IXGBE_EEPROM_CCD_BIT) {
606 autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
607 autoc2_reg |= IXGBE_AUTOC2_LINK_DISABLE_ON_D3_MASK;
608 IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2_reg);
609 }
610 }
611
612 /**
613 * ixgbe_start_mac_link_82599 - Setup MAC link settings
614 * @hw: pointer to hardware structure
615 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed
616 *
617 * Configures link settings based on values in the ixgbe_hw struct.
618 * Restarts the link. Performs autonegotiation if needed.
619 **/
620 s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
621 bool autoneg_wait_to_complete)
622 {
623 u32 autoc_reg;
624 u32 links_reg;
625 u32 i;
626 s32 status = IXGBE_SUCCESS;
627 bool got_lock = FALSE;
628
629 DEBUGFUNC("ixgbe_start_mac_link_82599");
630
631
632 /* reset_pipeline requires us to hold this lock as it writes to
633 * AUTOC.
634 */
635 if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
636 status = hw->mac.ops.acquire_swfw_sync(hw,
637 IXGBE_GSSR_MAC_CSR_SM);
638 if (status != IXGBE_SUCCESS)
639 goto out;
640
641 got_lock = TRUE;
642 }
643
644 /* Restart link */
645 ixgbe_reset_pipeline_82599(hw);
646
647 if (got_lock)
648 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
649
650 /* Only poll for autoneg to complete if specified to do so */
651 if (autoneg_wait_to_complete) {
652 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
653 if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
654 IXGBE_AUTOC_LMS_KX4_KX_KR ||
655 (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
656 IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
657 (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
658 IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
659 links_reg = 0; /* Just in case Autoneg time = 0 */
660 for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
661 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
662 if (links_reg & IXGBE_LINKS_KX_AN_COMP)
663 break;
664 msec_delay(100);
665 }
666 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
667 status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
668 DEBUGOUT("Autoneg did not complete.\n");
669 }
670 }
671 }
672
673 /* Add delay to filter out noises during initial link setup */
674 msec_delay(50);
675
676 out:
677 return status;
678 }
679
680 /**
681 * ixgbe_disable_tx_laser_multispeed_fiber - Disable Tx laser
682 * @hw: pointer to hardware structure
683 *
684 * The base drivers may require better control over SFP+ module
685 * PHY states. This includes selectively shutting down the Tx
686 * laser on the PHY, effectively halting physical link.
687 **/
688 void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
689 {
690 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
691
692 /* Blocked by MNG FW so bail */
693 if (ixgbe_check_reset_blocked(hw))
694 return;
695
696 /* Disable Tx laser; allow 100us to go dark per spec */
697 esdp_reg |= IXGBE_ESDP_SDP3;
698 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
699 IXGBE_WRITE_FLUSH(hw);
700 usec_delay(100);
701 }
702
703 /**
704 * ixgbe_enable_tx_laser_multispeed_fiber - Enable Tx laser
705 * @hw: pointer to hardware structure
706 *
707 * The base drivers may require better control over SFP+ module
708 * PHY states. This includes selectively turning on the Tx
709 * laser on the PHY, effectively starting physical link.
710 **/
711 void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
712 {
713 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
714
715 /* Enable Tx laser; allow 100ms to light up */
716 esdp_reg &= ~IXGBE_ESDP_SDP3;
717 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
718 IXGBE_WRITE_FLUSH(hw);
719 msec_delay(100);
720 }
721
722 /**
723 * ixgbe_flap_tx_laser_multispeed_fiber - Flap Tx laser
724 * @hw: pointer to hardware structure
725 *
726 * When the driver changes the link speeds that it can support,
727 * it sets autotry_restart to TRUE to indicate that we need to
728 * initiate a new autotry session with the link partner. To do
729 * so, we set the speed then disable and re-enable the Tx laser, to
730 * alert the link partner that it also needs to restart autotry on its
731 * end. This is consistent with TRUE clause 37 autoneg, which also
732 * involves a loss of signal.
733 **/
734 void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
735 {
736 DEBUGFUNC("ixgbe_flap_tx_laser_multispeed_fiber");
737
738 /* Blocked by MNG FW so bail */
739 if (ixgbe_check_reset_blocked(hw))
740 return;
741
742 if (hw->mac.autotry_restart) {
743 ixgbe_disable_tx_laser_multispeed_fiber(hw);
744 ixgbe_enable_tx_laser_multispeed_fiber(hw);
745 hw->mac.autotry_restart = FALSE;
746 }
747 }
748
749 /**
750 * ixgbe_set_hard_rate_select_speed - Set module link speed
751 * @hw: pointer to hardware structure
752 * @speed: link speed to set
753 *
754 * Set module link speed via RS0/RS1 rate select pins.
755 */
756 void ixgbe_set_hard_rate_select_speed(struct ixgbe_hw *hw,
757 ixgbe_link_speed speed)
758 {
759 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
760
761 switch (speed) {
762 case IXGBE_LINK_SPEED_10GB_FULL:
763 esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5);
764 break;
765 case IXGBE_LINK_SPEED_1GB_FULL:
766 esdp_reg &= ~IXGBE_ESDP_SDP5;
767 esdp_reg |= IXGBE_ESDP_SDP5_DIR;
768 break;
769 default:
770 DEBUGOUT("Invalid fixed module speed\n");
771 return;
772 }
773
774 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
775 IXGBE_WRITE_FLUSH(hw);
776 }
777
778 /**
779 * ixgbe_setup_mac_link_smartspeed - Set MAC link speed using SmartSpeed
780 * @hw: pointer to hardware structure
781 * @speed: new link speed
782 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed
783 *
784 * Implements the Intel SmartSpeed algorithm.
785 **/
786 s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
787 ixgbe_link_speed speed,
788 bool autoneg_wait_to_complete)
789 {
790 s32 status = IXGBE_SUCCESS;
791 ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
792 s32 i, j;
793 bool link_up = FALSE;
794 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
795
796 DEBUGFUNC("ixgbe_setup_mac_link_smartspeed");
797
798 /* Set autoneg_advertised value based on input link speed */
799 hw->phy.autoneg_advertised = 0;
800
801 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
802 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
803
804 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
805 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
806
807 if (speed & IXGBE_LINK_SPEED_100_FULL)
808 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL;
809
810 /*
811 * Implement Intel SmartSpeed algorithm. SmartSpeed will reduce the
812 * autoneg advertisement if link is unable to be established at the
813 * highest negotiated rate. This can sometimes happen due to integrity
814 * issues with the physical media connection.
815 */
816
817 /* First, try to get link with full advertisement */
818 hw->phy.smart_speed_active = FALSE;
819 for (j = 0; j < IXGBE_SMARTSPEED_MAX_RETRIES; j++) {
820 status = ixgbe_setup_mac_link_82599(hw, speed,
821 autoneg_wait_to_complete);
822 if (status != IXGBE_SUCCESS)
823 goto out;
824
825 /*
826 * Wait for the controller to acquire link. Per IEEE 802.3ap,
827 * Section 73.10.2, we may have to wait up to 500ms if KR is
828 * attempted, or 200ms if KX/KX4/BX/BX4 is attempted, per
829 * Table 9 in the AN MAS.
830 */
831 for (i = 0; i < 5; i++) {
832 msec_delay(100);
833
834 /* If we have link, just jump out */
835 status = ixgbe_check_link(hw, &link_speed, &link_up,
836 FALSE);
837 if (status != IXGBE_SUCCESS)
838 goto out;
839
840 if (link_up)
841 goto out;
842 }
843 }
844
845 /*
846 * We didn't get link. If we advertised KR plus one of KX4/KX
847 * (or BX4/BX), then disable KR and try again.
848 */
849 if (((autoc_reg & IXGBE_AUTOC_KR_SUPP) == 0) ||
850 ((autoc_reg & IXGBE_AUTOC_KX4_KX_SUPP_MASK) == 0))
851 goto out;
852
853 /* Turn SmartSpeed on to disable KR support */
854 hw->phy.smart_speed_active = TRUE;
855 status = ixgbe_setup_mac_link_82599(hw, speed,
856 autoneg_wait_to_complete);
857 if (status != IXGBE_SUCCESS)
858 goto out;
859
860 /*
861 * Wait for the controller to acquire link. 600ms will allow for
862 * the AN link_fail_inhibit_timer as well for multiple cycles of
863 * parallel detect, both 10g and 1g. This allows for the maximum
864 * connect attempts as defined in the AN MAS table 73-7.
865 */
866 for (i = 0; i < 6; i++) {
867 msec_delay(100);
868
869 /* If we have link, just jump out */
870 status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
871 if (status != IXGBE_SUCCESS)
872 goto out;
873
874 if (link_up)
875 goto out;
876 }
877
878 /* We didn't get link. Turn SmartSpeed back off. */
879 hw->phy.smart_speed_active = FALSE;
880 status = ixgbe_setup_mac_link_82599(hw, speed,
881 autoneg_wait_to_complete);
882
883 out:
884 if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL))
885 DEBUGOUT("Smartspeed has downgraded the link speed "
886 "from the maximum advertised\n");
887 return status;
888 }
889
890 /**
891 * ixgbe_setup_mac_link_82599 - Set MAC link speed
892 * @hw: pointer to hardware structure
893 * @speed: new link speed
894 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed
895 *
896 * Set the link speed in the AUTOC register and restarts link.
897 **/
898 s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
899 ixgbe_link_speed speed,
900 bool autoneg_wait_to_complete)
901 {
902 bool autoneg = FALSE;
903 s32 status = IXGBE_SUCCESS;
904 u32 pma_pmd_1g, link_mode;
905 u32 current_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); /* holds the value of AUTOC register at this current point in time */
906 u32 orig_autoc = 0; /* holds the cached value of AUTOC register */
907 u32 autoc = current_autoc; /* Temporary variable used for comparison purposes */
908 u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
909 u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
910 u32 links_reg;
911 u32 i;
912 ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
913
914 DEBUGFUNC("ixgbe_setup_mac_link_82599");
915
916 /* Check to see if speed passed in is supported. */
917 status = ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg);
918 if (status)
919 goto out;
920
921 speed &= link_capabilities;
922
923 if (speed == 0) {
924 ixgbe_disable_tx_laser(hw); /* For fiber */
925 ixgbe_set_phy_power(hw, false); /* For copper */
926 } else {
927 /* In case previous media setting was none(down) */
928 ixgbe_enable_tx_laser(hw); /* for Fiber */
929 ixgbe_set_phy_power(hw, true); /* For copper */
930 }
931
932 /* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/
933 if (hw->mac.orig_link_settings_stored)
934 orig_autoc = hw->mac.orig_autoc;
935 else
936 orig_autoc = autoc;
937
938 link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
939 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
940
941 if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
942 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
943 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
944 /* Set KX4/KX/KR support according to speed requested */
945 autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP);
946 if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
947 if (orig_autoc & IXGBE_AUTOC_KX4_SUPP)
948 autoc |= IXGBE_AUTOC_KX4_SUPP;
949 if ((orig_autoc & IXGBE_AUTOC_KR_SUPP) &&
950 (hw->phy.smart_speed_active == FALSE))
951 autoc |= IXGBE_AUTOC_KR_SUPP;
952 }
953 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
954 autoc |= IXGBE_AUTOC_KX_SUPP;
955 } else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) &&
956 (link_mode == IXGBE_AUTOC_LMS_1G_LINK_NO_AN ||
957 link_mode == IXGBE_AUTOC_LMS_1G_AN)) {
958 /* Switch from 1G SFI to 10G SFI if requested */
959 if ((speed == IXGBE_LINK_SPEED_10GB_FULL) &&
960 (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)) {
961 autoc &= ~IXGBE_AUTOC_LMS_MASK;
962 autoc |= IXGBE_AUTOC_LMS_10G_SERIAL;
963 }
964 } else if ((pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) &&
965 (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) {
966 /* Switch from 10G SFI to 1G SFI if requested */
967 if ((speed == IXGBE_LINK_SPEED_1GB_FULL) &&
968 (pma_pmd_1g == IXGBE_AUTOC_1G_SFI)) {
969 autoc &= ~IXGBE_AUTOC_LMS_MASK;
970 if (autoneg || hw->phy.type == ixgbe_phy_qsfp_intel)
971 autoc |= IXGBE_AUTOC_LMS_1G_AN;
972 else
973 autoc |= IXGBE_AUTOC_LMS_1G_LINK_NO_AN;
974 }
975 }
976
977 if (autoc != current_autoc) {
978 /* Restart link */
979 status = hw->mac.ops.prot_autoc_write(hw, autoc, FALSE);
980 if (status != IXGBE_SUCCESS)
981 goto out;
982
983 /* Only poll for autoneg to complete if specified to do so */
984 if (autoneg_wait_to_complete) {
985 if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
986 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
987 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
988 links_reg = 0; /*Just in case Autoneg time=0*/
989 for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
990 links_reg =
991 IXGBE_READ_REG(hw, IXGBE_LINKS);
992 if (links_reg & IXGBE_LINKS_KX_AN_COMP)
993 break;
994 msec_delay(100);
995 }
996 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
997 status =
998 IXGBE_ERR_AUTONEG_NOT_COMPLETE;
999 DEBUGOUT("Autoneg did not complete.\n");
1000 }
1001 }
1002 }
1003
1004 /* Add delay to filter out noises during initial link setup */
1005 msec_delay(50);
1006 }
1007
1008 out:
1009 return status;
1010 }
1011
1012 /**
1013 * ixgbe_setup_copper_link_82599 - Set the PHY autoneg advertised field
1014 * @hw: pointer to hardware structure
1015 * @speed: new link speed
1016 * @autoneg_wait_to_complete: TRUE if waiting is needed to complete
1017 *
1018 * Restarts link on PHY and MAC based on settings passed in.
1019 **/
1020 static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
1021 ixgbe_link_speed speed,
1022 bool autoneg_wait_to_complete)
1023 {
1024 s32 status;
1025
1026 DEBUGFUNC("ixgbe_setup_copper_link_82599");
1027
1028 /* Setup the PHY according to input speed */
1029 status = hw->phy.ops.setup_link_speed(hw, speed,
1030 autoneg_wait_to_complete);
1031 /* Set up MAC */
1032 ixgbe_start_mac_link_82599(hw, autoneg_wait_to_complete);
1033
1034 return status;
1035 }
1036
1037 /**
1038 * ixgbe_reset_hw_82599 - Perform hardware reset
1039 * @hw: pointer to hardware structure
1040 *
1041 * Resets the hardware by resetting the transmit and receive units, masks
1042 * and clears all interrupts, perform a PHY reset, and perform a link (MAC)
1043 * reset.
1044 **/
1045 s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
1046 {
1047 ixgbe_link_speed link_speed;
1048 s32 status;
1049 s32 phy_status = IXGBE_SUCCESS;
1050 u32 ctrl = 0;
1051 u32 i, autoc, autoc2;
1052 u32 curr_lms;
1053 bool link_up = FALSE;
1054
1055 DEBUGFUNC("ixgbe_reset_hw_82599");
1056
1057 /* Call adapter stop to disable tx/rx and clear interrupts */
1058 status = hw->mac.ops.stop_adapter(hw);
1059 if (status != IXGBE_SUCCESS)
1060 goto reset_hw_out;
1061
1062 /* flush pending Tx transactions */
1063 ixgbe_clear_tx_pending(hw);
1064
1065 /* PHY ops must be identified and initialized prior to reset */
1066
1067 /* Identify PHY and related function pointers */
1068 phy_status = hw->phy.ops.init(hw);
1069
1070 if (phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED)
1071 goto mac_reset_top;
1072
1073 /* Setup SFP module if there is one present. */
1074 if (hw->phy.sfp_setup_needed) {
1075 phy_status = hw->mac.ops.setup_sfp(hw);
1076 hw->phy.sfp_setup_needed = FALSE;
1077 }
1078
1079 if (phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED)
1080 goto mac_reset_top;
1081
1082 /* Reset PHY */
1083 if (hw->phy.reset_disable == FALSE && hw->phy.ops.reset != NULL)
1084 hw->phy.ops.reset(hw);
1085
1086 mac_reset_top:
1087 /* remember AUTOC from before we reset */
1088 curr_lms = IXGBE_READ_REG(hw, IXGBE_AUTOC) & IXGBE_AUTOC_LMS_MASK;
1089
1090 mac_reset_retry:
1091 /*
1092 * Issue global reset to the MAC. Needs to be SW reset if link is up.
1093 * If link reset is used when link is up, it might reset the PHY when
1094 * mng is using it. If link is down or the flag to force full link
1095 * reset is set, then perform link reset.
1096 */
1097 ctrl = IXGBE_CTRL_LNK_RST;
1098 if (!hw->force_full_reset) {
1099 hw->mac.ops.check_link(hw, &link_speed, &link_up, FALSE);
1100 if (link_up)
1101 ctrl = IXGBE_CTRL_RST;
1102 }
1103
1104 ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
1105 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
1106 IXGBE_WRITE_FLUSH(hw);
1107
1108 /* Poll for reset bit to self-clear meaning reset is complete */
1109 for (i = 0; i < 10; i++) {
1110 usec_delay(1);
1111 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
1112 if (!(ctrl & IXGBE_CTRL_RST_MASK))
1113 break;
1114 }
1115
1116 if (ctrl & IXGBE_CTRL_RST_MASK) {
1117 status = IXGBE_ERR_RESET_FAILED;
1118 DEBUGOUT("Reset polling failed to complete.\n");
1119 }
1120
1121 msec_delay(50);
1122
1123 /*
1124 * Double resets are required for recovery from certain error
1125 * conditions. Between resets, it is necessary to stall to
1126 * allow time for any pending HW events to complete.
1127 */
1128 if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
1129 hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
1130 goto mac_reset_retry;
1131 }
1132
1133 /*
1134 * Store the original AUTOC/AUTOC2 values if they have not been
1135 * stored off yet. Otherwise restore the stored original
1136 * values since the reset operation sets back to defaults.
1137 */
1138 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
1139 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
1140
1141 /* Enable link if disabled in NVM */
1142 if (autoc2 & IXGBE_AUTOC2_LINK_DISABLE_MASK) {
1143 autoc2 &= ~IXGBE_AUTOC2_LINK_DISABLE_MASK;
1144 IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
1145 IXGBE_WRITE_FLUSH(hw);
1146 }
1147
1148 if (hw->mac.orig_link_settings_stored == FALSE) {
1149 hw->mac.orig_autoc = autoc;
1150 hw->mac.orig_autoc2 = autoc2;
1151 hw->mac.orig_link_settings_stored = TRUE;
1152 } else {
1153
1154 /* If MNG FW is running on a multi-speed device that
1155 * doesn't autoneg with out driver support we need to
1156 * leave LMS in the state it was before we MAC reset.
1157 * Likewise if we support WoL we don't want change the
1158 * LMS state.
1159 */
1160 if ((hw->phy.multispeed_fiber && ixgbe_mng_enabled(hw)) ||
1161 hw->wol_enabled)
1162 hw->mac.orig_autoc =
1163 (hw->mac.orig_autoc & ~IXGBE_AUTOC_LMS_MASK) |
1164 curr_lms;
1165
1166 if (autoc != hw->mac.orig_autoc) {
1167 status = hw->mac.ops.prot_autoc_write(hw,
1168 hw->mac.orig_autoc,
1169 FALSE);
1170 if (status != IXGBE_SUCCESS)
1171 goto reset_hw_out;
1172 }
1173
1174 if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) !=
1175 (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) {
1176 autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK;
1177 autoc2 |= (hw->mac.orig_autoc2 &
1178 IXGBE_AUTOC2_UPPER_MASK);
1179 IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
1180 }
1181 }
1182
1183 /* Store the permanent mac address */
1184 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
1185
1186 /*
1187 * Store MAC address from RAR0, clear receive address registers, and
1188 * clear the multicast table. Also reset num_rar_entries to 128,
1189 * since we modify this value when programming the SAN MAC address.
1190 */
1191 hw->mac.num_rar_entries = 128;
1192 hw->mac.ops.init_rx_addrs(hw);
1193
1194 /* Store the permanent SAN mac address */
1195 hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr);
1196
1197 /* Add the SAN MAC address to the RAR only if it's a valid address */
1198 if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) {
1199 /* Save the SAN MAC RAR index */
1200 hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1;
1201
1202 hw->mac.ops.set_rar(hw, hw->mac.san_mac_rar_index,
1203 hw->mac.san_addr, 0, IXGBE_RAH_AV);
1204
1205 /* clear VMDq pool/queue selection for this RAR */
1206 hw->mac.ops.clear_vmdq(hw, hw->mac.san_mac_rar_index,
1207 IXGBE_CLEAR_VMDQ_ALL);
1208
1209 /* Reserve the last RAR for the SAN MAC address */
1210 hw->mac.num_rar_entries--;
1211 }
1212
1213 /* Store the alternative WWNN/WWPN prefix */
1214 hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
1215 &hw->mac.wwpn_prefix);
1216
1217 reset_hw_out:
1218 if (phy_status != IXGBE_SUCCESS)
1219 status = phy_status;
1220
1221 return status;
1222 }
1223
1224 /**
1225 * ixgbe_fdir_check_cmd_complete - poll to check whether FDIRCMD is complete
1226 * @hw: pointer to hardware structure
1227 * @fdircmd: current value of FDIRCMD register
1228 */
1229 static s32 ixgbe_fdir_check_cmd_complete(struct ixgbe_hw *hw, u32 *fdircmd)
1230 {
1231 int i;
1232
1233 for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) {
1234 *fdircmd = IXGBE_READ_REG(hw, IXGBE_FDIRCMD);
1235 if (!(*fdircmd & IXGBE_FDIRCMD_CMD_MASK))
1236 return IXGBE_SUCCESS;
1237 usec_delay(10);
1238 }
1239
1240 return IXGBE_ERR_FDIR_CMD_INCOMPLETE;
1241 }
1242
1243 /**
1244 * ixgbe_reinit_fdir_tables_82599 - Reinitialize Flow Director tables.
1245 * @hw: pointer to hardware structure
1246 **/
1247 s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
1248 {
1249 s32 err;
1250 int i;
1251 u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
1252 u32 fdircmd;
1253 fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE;
1254
1255 DEBUGFUNC("ixgbe_reinit_fdir_tables_82599");
1256
1257 /*
1258 * Before starting reinitialization process,
1259 * FDIRCMD.CMD must be zero.
1260 */
1261 err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
1262 if (err) {
1263 DEBUGOUT("Flow Director previous command did not complete, aborting table re-initialization.\n");
1264 return err;
1265 }
1266
1267 IXGBE_WRITE_REG(hw, IXGBE_FDIRFREE, 0);
1268 IXGBE_WRITE_FLUSH(hw);
1269 /*
1270 * 82599 adapters flow director init flow cannot be restarted,
1271 * Workaround 82599 silicon errata by performing the following steps
1272 * before re-writing the FDIRCTRL control register with the same value.
1273 * - write 1 to bit 8 of FDIRCMD register &
1274 * - write 0 to bit 8 of FDIRCMD register
1275 */
1276 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1277 (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) |
1278 IXGBE_FDIRCMD_CLEARHT));
1279 IXGBE_WRITE_FLUSH(hw);
1280 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1281 (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
1282 ~IXGBE_FDIRCMD_CLEARHT));
1283 IXGBE_WRITE_FLUSH(hw);
1284 /*
1285 * Clear FDIR Hash register to clear any leftover hashes
1286 * waiting to be programmed.
1287 */
1288 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, 0x00);
1289 IXGBE_WRITE_FLUSH(hw);
1290
1291 IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
1292 IXGBE_WRITE_FLUSH(hw);
1293
1294 /* Poll init-done after we write FDIRCTRL register */
1295 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
1296 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
1297 IXGBE_FDIRCTRL_INIT_DONE)
1298 break;
1299 msec_delay(1);
1300 }
1301 if (i >= IXGBE_FDIR_INIT_DONE_POLL) {
1302 DEBUGOUT("Flow Director Signature poll time exceeded!\n");
1303 return IXGBE_ERR_FDIR_REINIT_FAILED;
1304 }
1305
1306 /* Clear FDIR statistics registers (read to clear) */
1307 IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT);
1308 IXGBE_READ_REG(hw, IXGBE_FDIRFSTAT);
1309 IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
1310 IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
1311 IXGBE_READ_REG(hw, IXGBE_FDIRLEN);
1312
1313 return IXGBE_SUCCESS;
1314 }
1315
1316 /**
1317 * ixgbe_fdir_enable_82599 - Initialize Flow Director control registers
1318 * @hw: pointer to hardware structure
1319 * @fdirctrl: value to write to flow director control register
1320 **/
1321 static void ixgbe_fdir_enable_82599(struct ixgbe_hw *hw, u32 fdirctrl)
1322 {
1323 int i;
1324
1325 DEBUGFUNC("ixgbe_fdir_enable_82599");
1326
1327 /* Prime the keys for hashing */
1328 IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY);
1329 IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY);
1330
1331 /*
1332 * Poll init-done after we write the register. Estimated times:
1333 * 10G: PBALLOC = 11b, timing is 60us
1334 * 1G: PBALLOC = 11b, timing is 600us
1335 * 100M: PBALLOC = 11b, timing is 6ms
1336 *
1337 * Multiple these timings by 4 if under full Rx load
1338 *
1339 * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for
1340 * 1 msec per poll time. If we're at line rate and drop to 100M, then
1341 * this might not finish in our poll time, but we can live with that
1342 * for now.
1343 */
1344 IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
1345 IXGBE_WRITE_FLUSH(hw);
1346 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
1347 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
1348 IXGBE_FDIRCTRL_INIT_DONE)
1349 break;
1350 msec_delay(1);
1351 }
1352
1353 if (i >= IXGBE_FDIR_INIT_DONE_POLL)
1354 DEBUGOUT("Flow Director poll time exceeded!\n");
1355 }
1356
1357 /**
1358 * ixgbe_init_fdir_signature_82599 - Initialize Flow Director signature filters
1359 * @hw: pointer to hardware structure
1360 * @fdirctrl: value to write to flow director control register, initially
1361 * contains just the value of the Rx packet buffer allocation
1362 **/
1363 s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl)
1364 {
1365 DEBUGFUNC("ixgbe_init_fdir_signature_82599");
1366
1367 /*
1368 * Continue setup of fdirctrl register bits:
1369 * Move the flexible bytes to use the ethertype - shift 6 words
1370 * Set the maximum length per hash bucket to 0xA filters
1371 * Send interrupt when 64 filters are left
1372 */
1373 fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) |
1374 (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
1375 (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
1376
1377 /* write hashes and fdirctrl register, poll for completion */
1378 ixgbe_fdir_enable_82599(hw, fdirctrl);
1379
1380 return IXGBE_SUCCESS;
1381 }
1382
1383 /**
1384 * ixgbe_init_fdir_perfect_82599 - Initialize Flow Director perfect filters
1385 * @hw: pointer to hardware structure
1386 * @fdirctrl: value to write to flow director control register, initially
1387 * contains just the value of the Rx packet buffer allocation
1388 * @cloud_mode: TRUE - cloud mode, FALSE - other mode
1389 **/
1390 s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl,
1391 bool cloud_mode)
1392 {
1393 UNREFERENCED_1PARAMETER(cloud_mode);
1394 DEBUGFUNC("ixgbe_init_fdir_perfect_82599");
1395
1396 /*
1397 * Continue setup of fdirctrl register bits:
1398 * Turn perfect match filtering on
1399 * Report hash in RSS field of Rx wb descriptor
1400 * Initialize the drop queue to queue 127
1401 * Move the flexible bytes to use the ethertype - shift 6 words
1402 * Set the maximum length per hash bucket to 0xA filters
1403 * Send interrupt when 64 (0x4 * 16) filters are left
1404 */
1405 fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH |
1406 IXGBE_FDIRCTRL_REPORT_STATUS |
1407 (IXGBE_FDIR_DROP_QUEUE << IXGBE_FDIRCTRL_DROP_Q_SHIFT) |
1408 (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) |
1409 (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
1410 (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
1411
1412 if (cloud_mode)
1413 fdirctrl |=(IXGBE_FDIRCTRL_FILTERMODE_CLOUD <<
1414 IXGBE_FDIRCTRL_FILTERMODE_SHIFT);
1415
1416 /* write hashes and fdirctrl register, poll for completion */
1417 ixgbe_fdir_enable_82599(hw, fdirctrl);
1418
1419 return IXGBE_SUCCESS;
1420 }
1421
1422 /**
1423 * ixgbe_set_fdir_drop_queue_82599 - Set Flow Director drop queue
1424 * @hw: pointer to hardware structure
1425 * @dropqueue: Rx queue index used for the dropped packets
1426 **/
1427 void ixgbe_set_fdir_drop_queue_82599(struct ixgbe_hw *hw, u8 dropqueue)
1428 {
1429 u32 fdirctrl;
1430
1431 DEBUGFUNC("ixgbe_set_fdir_drop_queue_82599");
1432 /* Clear init done bit and drop queue field */
1433 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
1434 fdirctrl &= ~(IXGBE_FDIRCTRL_DROP_Q_MASK | IXGBE_FDIRCTRL_INIT_DONE);
1435
1436 /* Set drop queue */
1437 fdirctrl |= (dropqueue << IXGBE_FDIRCTRL_DROP_Q_SHIFT);
1438 if ((hw->mac.type == ixgbe_mac_X550) ||
1439 (hw->mac.type == ixgbe_mac_X550EM_x) ||
1440 (hw->mac.type == ixgbe_mac_X550EM_a))
1441 fdirctrl |= IXGBE_FDIRCTRL_DROP_NO_MATCH;
1442
1443 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1444 (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) |
1445 IXGBE_FDIRCMD_CLEARHT));
1446 IXGBE_WRITE_FLUSH(hw);
1447 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1448 (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
1449 ~IXGBE_FDIRCMD_CLEARHT));
1450 IXGBE_WRITE_FLUSH(hw);
1451
1452 /* write hashes and fdirctrl register, poll for completion */
1453 ixgbe_fdir_enable_82599(hw, fdirctrl);
1454 }
1455
1456 /*
1457 * These defines allow us to quickly generate all of the necessary instructions
1458 * in the function below by simply calling out IXGBE_COMPUTE_SIG_HASH_ITERATION
1459 * for values 0 through 15
1460 */
1461 #define IXGBE_ATR_COMMON_HASH_KEY \
1462 (IXGBE_ATR_BUCKET_HASH_KEY & IXGBE_ATR_SIGNATURE_HASH_KEY)
1463 #define IXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \
1464 do { \
1465 u32 n = (_n); \
1466 if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) \
1467 common_hash ^= lo_hash_dword >> n; \
1468 else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
1469 bucket_hash ^= lo_hash_dword >> n; \
1470 else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << n)) \
1471 sig_hash ^= lo_hash_dword << (16 - n); \
1472 if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << (n + 16))) \
1473 common_hash ^= hi_hash_dword >> n; \
1474 else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
1475 bucket_hash ^= hi_hash_dword >> n; \
1476 else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \
1477 sig_hash ^= hi_hash_dword << (16 - n); \
1478 } while (0)
1479
1480 /**
1481 * ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash
1482 * @input: input bitstream to compute the hash on
1483 * @common: compressed common input dword
1484 *
1485 * This function is almost identical to the function above but contains
1486 * several optimizations such as unwinding all of the loops, letting the
1487 * compiler work out all of the conditional ifs since the keys are static
1488 * defines, and computing two keys at once since the hashed dword stream
1489 * will be the same for both keys.
1490 **/
1491 u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
1492 union ixgbe_atr_hash_dword common)
1493 {
1494 u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
1495 u32 sig_hash = 0, bucket_hash = 0, common_hash = 0;
1496
1497 /* record the flow_vm_vlan bits as they are a key part to the hash */
1498 flow_vm_vlan = IXGBE_NTOHL(input.dword);
1499
1500 /* generate common hash dword */
1501 hi_hash_dword = IXGBE_NTOHL(common.dword);
1502
1503 /* low dword is word swapped version of common */
1504 lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
1505
1506 /* apply flow ID/VM pool/VLAN ID bits to hash words */
1507 hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
1508
1509 /* Process bits 0 and 16 */
1510 IXGBE_COMPUTE_SIG_HASH_ITERATION(0);
1511
1512 /*
1513 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
1514 * delay this because bit 0 of the stream should not be processed
1515 * so we do not add the VLAN until after bit 0 was processed
1516 */
1517 lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
1518
1519 /* Process remaining 30 bit of the key */
1520 IXGBE_COMPUTE_SIG_HASH_ITERATION(1);
1521 IXGBE_COMPUTE_SIG_HASH_ITERATION(2);
1522 IXGBE_COMPUTE_SIG_HASH_ITERATION(3);
1523 IXGBE_COMPUTE_SIG_HASH_ITERATION(4);
1524 IXGBE_COMPUTE_SIG_HASH_ITERATION(5);
1525 IXGBE_COMPUTE_SIG_HASH_ITERATION(6);
1526 IXGBE_COMPUTE_SIG_HASH_ITERATION(7);
1527 IXGBE_COMPUTE_SIG_HASH_ITERATION(8);
1528 IXGBE_COMPUTE_SIG_HASH_ITERATION(9);
1529 IXGBE_COMPUTE_SIG_HASH_ITERATION(10);
1530 IXGBE_COMPUTE_SIG_HASH_ITERATION(11);
1531 IXGBE_COMPUTE_SIG_HASH_ITERATION(12);
1532 IXGBE_COMPUTE_SIG_HASH_ITERATION(13);
1533 IXGBE_COMPUTE_SIG_HASH_ITERATION(14);
1534 IXGBE_COMPUTE_SIG_HASH_ITERATION(15);
1535
1536 /* combine common_hash result with signature and bucket hashes */
1537 bucket_hash ^= common_hash;
1538 bucket_hash &= IXGBE_ATR_HASH_MASK;
1539
1540 sig_hash ^= common_hash << 16;
1541 sig_hash &= IXGBE_ATR_HASH_MASK << 16;
1542
1543 /* return completed signature hash */
1544 return sig_hash ^ bucket_hash;
1545 }
1546
1547 /**
1548 * ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter
1549 * @hw: pointer to hardware structure
1550 * @input: unique input dword
1551 * @common: compressed common input dword
1552 * @queue: queue index to direct traffic to
1553 *
1554 * Note that the tunnel bit in input must not be set when the hardware
1555 * tunneling support does not exist.
1556 **/
1557 void ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
1558 union ixgbe_atr_hash_dword input,
1559 union ixgbe_atr_hash_dword common,
1560 u8 queue)
1561 {
1562 u64 fdirhashcmd;
1563 u8 flow_type;
1564 bool tunnel;
1565 u32 fdircmd;
1566
1567 DEBUGFUNC("ixgbe_fdir_add_signature_filter_82599");
1568
1569 /*
1570 * Get the flow_type in order to program FDIRCMD properly
1571 * lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6
1572 * fifth is FDIRCMD.TUNNEL_FILTER
1573 */
1574 tunnel = !!(input.formatted.flow_type & IXGBE_ATR_L4TYPE_TUNNEL_MASK);
1575 flow_type = input.formatted.flow_type &
1576 (IXGBE_ATR_L4TYPE_TUNNEL_MASK - 1);
1577 switch (flow_type) {
1578 case IXGBE_ATR_FLOW_TYPE_TCPV4:
1579 case IXGBE_ATR_FLOW_TYPE_UDPV4:
1580 case IXGBE_ATR_FLOW_TYPE_SCTPV4:
1581 case IXGBE_ATR_FLOW_TYPE_TCPV6:
1582 case IXGBE_ATR_FLOW_TYPE_UDPV6:
1583 case IXGBE_ATR_FLOW_TYPE_SCTPV6:
1584 break;
1585 default:
1586 DEBUGOUT(" Error on flow type input\n");
1587 return;
1588 }
1589
1590 /* configure FDIRCMD register */
1591 fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
1592 IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
1593 fdircmd |= (u32)flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
1594 fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
1595 if (tunnel)
1596 fdircmd |= IXGBE_FDIRCMD_TUNNEL_FILTER;
1597
1598 /*
1599 * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits
1600 * is for FDIRCMD. Then do a 64-bit register write from FDIRHASH.
1601 */
1602 fdirhashcmd = (u64)fdircmd << 32;
1603 fdirhashcmd |= (u64)ixgbe_atr_compute_sig_hash_82599(input, common);
1604 IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd);
1605
1606 DEBUGOUT2("Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd);
1607
1608 return;
1609 }
1610
1611 #define IXGBE_COMPUTE_BKT_HASH_ITERATION(_n) \
1612 do { \
1613 u32 n = (_n); \
1614 if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
1615 bucket_hash ^= lo_hash_dword >> n; \
1616 if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
1617 bucket_hash ^= hi_hash_dword >> n; \
1618 } while (0)
1619
1620 /**
1621 * ixgbe_atr_compute_perfect_hash_82599 - Compute the perfect filter hash
1622 * @input: input bitstream to compute the hash on
1623 * @input_mask: mask for the input bitstream
1624 *
1625 * This function serves two main purposes. First it applies the input_mask
1626 * to the atr_input resulting in a cleaned up atr_input data stream.
1627 * Secondly it computes the hash and stores it in the bkt_hash field at
1628 * the end of the input byte stream. This way it will be available for
1629 * future use without needing to recompute the hash.
1630 **/
1631 void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
1632 union ixgbe_atr_input *input_mask)
1633 {
1634
1635 u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
1636 u32 bucket_hash = 0;
1637 u32 hi_dword = 0;
1638 u32 i = 0;
1639
1640 /* Apply masks to input data */
1641 for (i = 0; i < 14; i++)
1642 input->dword_stream[i] &= input_mask->dword_stream[i];
1643
1644 /* record the flow_vm_vlan bits as they are a key part to the hash */
1645 flow_vm_vlan = IXGBE_NTOHL(input->dword_stream[0]);
1646
1647 /* generate common hash dword */
1648 for (i = 1; i <= 13; i++)
1649 hi_dword ^= input->dword_stream[i];
1650 hi_hash_dword = IXGBE_NTOHL(hi_dword);
1651
1652 /* low dword is word swapped version of common */
1653 lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
1654
1655 /* apply flow ID/VM pool/VLAN ID bits to hash words */
1656 hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
1657
1658 /* Process bits 0 and 16 */
1659 IXGBE_COMPUTE_BKT_HASH_ITERATION(0);
1660
1661 /*
1662 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
1663 * delay this because bit 0 of the stream should not be processed
1664 * so we do not add the VLAN until after bit 0 was processed
1665 */
1666 lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
1667
1668 /* Process remaining 30 bit of the key */
1669 for (i = 1; i <= 15; i++)
1670 IXGBE_COMPUTE_BKT_HASH_ITERATION(i);
1671
1672 /*
1673 * Limit hash to 13 bits since max bucket count is 8K.
1674 * Store result at the end of the input stream.
1675 */
1676 input->formatted.bkt_hash = bucket_hash & 0x1FFF;
1677 }
1678
1679 /**
1680 * ixgbe_get_fdirtcpm_82599 - generate a TCP port from atr_input_masks
1681 * @input_mask: mask to be bit swapped
1682 *
1683 * The source and destination port masks for flow director are bit swapped
1684 * in that bit 15 effects bit 0, 14 effects 1, 13, 2 etc. In order to
1685 * generate a correctly swapped value we need to bit swap the mask and that
1686 * is what is accomplished by this function.
1687 **/
1688 static u32 ixgbe_get_fdirtcpm_82599(union ixgbe_atr_input *input_mask)
1689 {
1690 u32 mask = IXGBE_NTOHS(input_mask->formatted.dst_port);
1691 mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT;
1692 mask |= (u32)IXGBE_NTOHS(input_mask->formatted.src_port);
1693 mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1);
1694 mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2);
1695 mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4);
1696 return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8);
1697 }
1698
1699 /*
1700 * These two macros are meant to address the fact that we have registers
1701 * that are either all or in part big-endian. As a result on big-endian
1702 * systems we will end up byte swapping the value to little-endian before
1703 * it is byte swapped again and written to the hardware in the original
1704 * big-endian format.
1705 */
1706 #define IXGBE_STORE_AS_BE32(_value) \
1707 (((u32)(_value) >> 24) | (((u32)(_value) & 0x00FF0000) >> 8) | \
1708 (((u32)(_value) & 0x0000FF00) << 8) | ((u32)(_value) << 24))
1709
1710 #define IXGBE_WRITE_REG_BE32(a, reg, value) \
1711 IXGBE_WRITE_REG((a), (reg), IXGBE_STORE_AS_BE32(IXGBE_NTOHL(value)))
1712
1713 #define IXGBE_STORE_AS_BE16(_value) \
1714 IXGBE_NTOHS(((u16)(_value) >> 8) | ((u16)(_value) << 8))
1715
1716 s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
1717 union ixgbe_atr_input *input_mask, bool cloud_mode)
1718 {
1719 /* mask IPv6 since it is currently not supported */
1720 u32 fdirm = IXGBE_FDIRM_DIPv6;
1721 u32 fdirtcpm;
1722 u32 fdirip6m;
1723 UNREFERENCED_1PARAMETER(cloud_mode);
1724 DEBUGFUNC("ixgbe_fdir_set_atr_input_mask_82599");
1725
1726 /*
1727 * Program the relevant mask registers. If src/dst_port or src/dst_addr
1728 * are zero, then assume a full mask for that field. Also assume that
1729 * a VLAN of 0 is unspecified, so mask that out as well. L4type
1730 * cannot be masked out in this implementation.
1731 *
1732 * This also assumes IPv4 only. IPv6 masking isn't supported at this
1733 * point in time.
1734 */
1735
1736 /* verify bucket hash is cleared on hash generation */
1737 if (input_mask->formatted.bkt_hash)
1738 DEBUGOUT(" bucket hash should always be 0 in mask\n");
1739
1740 /* Program FDIRM and verify partial masks */
1741 switch (input_mask->formatted.vm_pool & 0x7F) {
1742 case 0x0:
1743 fdirm |= IXGBE_FDIRM_POOL;
1744 case 0x7F:
1745 break;
1746 default:
1747 DEBUGOUT(" Error on vm pool mask\n");
1748 return IXGBE_ERR_CONFIG;
1749 }
1750
1751 switch (input_mask->formatted.flow_type & IXGBE_ATR_L4TYPE_MASK) {
1752 case 0x0:
1753 fdirm |= IXGBE_FDIRM_L4P;
1754 if (input_mask->formatted.dst_port ||
1755 input_mask->formatted.src_port) {
1756 DEBUGOUT(" Error on src/dst port mask\n");
1757 return IXGBE_ERR_CONFIG;
1758 }
1759 case IXGBE_ATR_L4TYPE_MASK:
1760 break;
1761 default:
1762 DEBUGOUT(" Error on flow type mask\n");
1763 return IXGBE_ERR_CONFIG;
1764 }
1765
1766 switch (IXGBE_NTOHS(input_mask->formatted.vlan_id) & 0xEFFF) {
1767 case 0x0000:
1768 /* mask VLAN ID */
1769 fdirm |= IXGBE_FDIRM_VLANID;
1770 /* fall through */
1771 case 0x0FFF:
1772 /* mask VLAN priority */
1773 fdirm |= IXGBE_FDIRM_VLANP;
1774 break;
1775 case 0xE000:
1776 /* mask VLAN ID only */
1777 fdirm |= IXGBE_FDIRM_VLANID;
1778 /* fall through */
1779 case 0xEFFF:
1780 /* no VLAN fields masked */
1781 break;
1782 default:
1783 DEBUGOUT(" Error on VLAN mask\n");
1784 return IXGBE_ERR_CONFIG;
1785 }
1786
1787 switch (input_mask->formatted.flex_bytes & 0xFFFF) {
1788 case 0x0000:
1789 /* Mask Flex Bytes */
1790 fdirm |= IXGBE_FDIRM_FLEX;
1791 /* fall through */
1792 case 0xFFFF:
1793 break;
1794 default:
1795 DEBUGOUT(" Error on flexible byte mask\n");
1796 return IXGBE_ERR_CONFIG;
1797 }
1798
1799 if (cloud_mode) {
1800 fdirm |= IXGBE_FDIRM_L3P;
1801 fdirip6m = ((u32) 0xFFFFU << IXGBE_FDIRIP6M_DIPM_SHIFT);
1802 fdirip6m |= IXGBE_FDIRIP6M_ALWAYS_MASK;
1803
1804 switch (input_mask->formatted.inner_mac[0] & 0xFF) {
1805 case 0x00:
1806 /* Mask inner MAC, fall through */
1807 fdirip6m |= IXGBE_FDIRIP6M_INNER_MAC;
1808 case 0xFF:
1809 break;
1810 default:
1811 DEBUGOUT(" Error on inner_mac byte mask\n");
1812 return IXGBE_ERR_CONFIG;
1813 }
1814
1815 switch (input_mask->formatted.tni_vni & 0xFFFFFFFF) {
1816 case 0x0:
1817 /* Mask vxlan id */
1818 fdirip6m |= IXGBE_FDIRIP6M_TNI_VNI;
1819 break;
1820 case 0x00FFFFFF:
1821 fdirip6m |= IXGBE_FDIRIP6M_TNI_VNI_24;
1822 break;
1823 case 0xFFFFFFFF:
1824 break;
1825 default:
1826 DEBUGOUT(" Error on TNI/VNI byte mask\n");
1827 return IXGBE_ERR_CONFIG;
1828 }
1829
1830 switch (input_mask->formatted.tunnel_type & 0xFFFF) {
1831 case 0x0:
1832 /* Mask turnnel type, fall through */
1833 fdirip6m |= IXGBE_FDIRIP6M_TUNNEL_TYPE;
1834 case 0xFFFF:
1835 break;
1836 default:
1837 DEBUGOUT(" Error on tunnel type byte mask\n");
1838 return IXGBE_ERR_CONFIG;
1839 }
1840 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIP6M, fdirip6m);
1841
1842 /* Set all bits in FDIRTCPM, FDIRUDPM, FDIRSCTPM,
1843 * FDIRSIP4M and FDIRDIP4M in cloud mode to allow
1844 * L3/L3 packets to tunnel.
1845 */
1846 IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, 0xFFFFFFFF);
1847 IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, 0xFFFFFFFF);
1848 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M, 0xFFFFFFFF);
1849 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M, 0xFFFFFFFF);
1850 switch (hw->mac.type) {
1851 case ixgbe_mac_X550:
1852 case ixgbe_mac_X550EM_x:
1853 case ixgbe_mac_X550EM_a:
1854 IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, 0xFFFFFFFF);
1855 break;
1856 default:
1857 break;
1858 }
1859 }
1860
1861 /* Now mask VM pool and destination IPv6 - bits 5 and 2 */
1862 IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
1863
1864 if (!cloud_mode) {
1865 /* store the TCP/UDP port masks, bit reversed from port
1866 * layout */
1867 fdirtcpm = ixgbe_get_fdirtcpm_82599(input_mask);
1868
1869 /* write both the same so that UDP and TCP use the same mask */
1870 IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm);
1871 IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm);
1872 /* also use it for SCTP */
1873 switch (hw->mac.type) {
1874 case ixgbe_mac_X550:
1875 case ixgbe_mac_X550EM_x:
1876 case ixgbe_mac_X550EM_a:
1877 IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, ~fdirtcpm);
1878 break;
1879 default:
1880 break;
1881 }
1882
1883 /* store source and destination IP masks (big-enian) */
1884 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M,
1885 ~input_mask->formatted.src_ip[0]);
1886 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M,
1887 ~input_mask->formatted.dst_ip[0]);
1888 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIP6M, 0xFFFFFFFF);
1889 }
1890 return IXGBE_SUCCESS;
1891 }
1892
1893 s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
1894 union ixgbe_atr_input *input,
1895 u16 soft_id, u8 queue, bool cloud_mode)
1896 {
1897 u32 fdirport, fdirvlan, fdirhash, fdircmd;
1898 u32 addr_low, addr_high;
1899 u32 cloud_type = 0;
1900 s32 err;
1901 UNREFERENCED_1PARAMETER(cloud_mode);
1902
1903 DEBUGFUNC("ixgbe_fdir_write_perfect_filter_82599");
1904 if (!cloud_mode) {
1905 /* currently IPv6 is not supported, must be programmed with 0 */
1906 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0),
1907 input->formatted.src_ip[0]);
1908 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1),
1909 input->formatted.src_ip[1]);
1910 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2),
1911 input->formatted.src_ip[2]);
1912
1913 /* record the source address (big-endian) */
1914 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA,
1915 input->formatted.src_ip[0]);
1916
1917 /* record the first 32 bits of the destination address
1918 * (big-endian) */
1919 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA,
1920 input->formatted.dst_ip[0]);
1921
1922 /* record source and destination port (little-endian)*/
1923 fdirport = IXGBE_NTOHS(input->formatted.dst_port);
1924 fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT;
1925 fdirport |= (u32)IXGBE_NTOHS(input->formatted.src_port);
1926 IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport);
1927 }
1928
1929 /* record VLAN (little-endian) and flex_bytes(big-endian) */
1930 fdirvlan = IXGBE_STORE_AS_BE16(input->formatted.flex_bytes);
1931 fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT;
1932 fdirvlan |= (u32)IXGBE_NTOHS(input->formatted.vlan_id);
1933 IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan);
1934
1935 if (cloud_mode) {
1936 if (input->formatted.tunnel_type != 0)
1937 cloud_type = 0x80000000;
1938
1939 addr_low = ((u32)input->formatted.inner_mac[0] |
1940 ((u32)input->formatted.inner_mac[1] << 8) |
1941 ((u32)input->formatted.inner_mac[2] << 16) |
1942 ((u32)input->formatted.inner_mac[3] << 24));
1943 addr_high = ((u32)input->formatted.inner_mac[4] |
1944 ((u32)input->formatted.inner_mac[5] << 8));
1945 cloud_type |= addr_high;
1946 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0), addr_low);
1947 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1), cloud_type);
1948 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2), input->formatted.tni_vni);
1949 }
1950
1951 /* configure FDIRHASH register */
1952 fdirhash = input->formatted.bkt_hash;
1953 fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
1954 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1955
1956 /*
1957 * flush all previous writes to make certain registers are
1958 * programmed prior to issuing the command
1959 */
1960 IXGBE_WRITE_FLUSH(hw);
1961
1962 /* configure FDIRCMD register */
1963 fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
1964 IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
1965 if (queue == IXGBE_FDIR_DROP_QUEUE)
1966 fdircmd |= IXGBE_FDIRCMD_DROP;
1967 if (input->formatted.flow_type & IXGBE_ATR_L4TYPE_TUNNEL_MASK)
1968 fdircmd |= IXGBE_FDIRCMD_TUNNEL_FILTER;
1969 fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
1970 fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
1971 fdircmd |= (u32)input->formatted.vm_pool << IXGBE_FDIRCMD_VT_POOL_SHIFT;
1972
1973 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
1974 err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
1975 if (err) {
1976 DEBUGOUT("Flow Director command did not complete!\n");
1977 return err;
1978 }
1979
1980 return IXGBE_SUCCESS;
1981 }
1982
1983 s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
1984 union ixgbe_atr_input *input,
1985 u16 soft_id)
1986 {
1987 u32 fdirhash;
1988 u32 fdircmd;
1989 s32 err;
1990
1991 /* configure FDIRHASH register */
1992 fdirhash = input->formatted.bkt_hash;
1993 fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
1994 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1995
1996 /* flush hash to HW */
1997 IXGBE_WRITE_FLUSH(hw);
1998
1999 /* Query if filter is present */
2000 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, IXGBE_FDIRCMD_CMD_QUERY_REM_FILT);
2001
2002 err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
2003 if (err) {
2004 DEBUGOUT("Flow Director command did not complete!\n");
2005 return err;
2006 }
2007
2008 /* if filter exists in hardware then remove it */
2009 if (fdircmd & IXGBE_FDIRCMD_FILTER_VALID) {
2010 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
2011 IXGBE_WRITE_FLUSH(hw);
2012 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
2013 IXGBE_FDIRCMD_CMD_REMOVE_FLOW);
2014 }
2015
2016 return IXGBE_SUCCESS;
2017 }
2018
2019 /**
2020 * ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter
2021 * @hw: pointer to hardware structure
2022 * @input: input bitstream
2023 * @input_mask: mask for the input bitstream
2024 * @soft_id: software index for the filters
2025 * @queue: queue index to direct traffic to
2026 * @cloud_mode: unused
2027 *
2028 * Note that the caller to this function must lock before calling, since the
2029 * hardware writes must be protected from one another.
2030 **/
2031 s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
2032 union ixgbe_atr_input *input,
2033 union ixgbe_atr_input *input_mask,
2034 u16 soft_id, u8 queue, bool cloud_mode)
2035 {
2036 s32 err = IXGBE_ERR_CONFIG;
2037 UNREFERENCED_1PARAMETER(cloud_mode);
2038
2039 DEBUGFUNC("ixgbe_fdir_add_perfect_filter_82599");
2040
2041 /*
2042 * Check flow_type formatting, and bail out before we touch the hardware
2043 * if there's a configuration issue
2044 */
2045 switch (input->formatted.flow_type) {
2046 case IXGBE_ATR_FLOW_TYPE_IPV4:
2047 case IXGBE_ATR_FLOW_TYPE_TUNNELED_IPV4:
2048 input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK;
2049 if (input->formatted.dst_port || input->formatted.src_port) {
2050 DEBUGOUT(" Error on src/dst port\n");
2051 return IXGBE_ERR_CONFIG;
2052 }
2053 break;
2054 case IXGBE_ATR_FLOW_TYPE_SCTPV4:
2055 case IXGBE_ATR_FLOW_TYPE_TUNNELED_SCTPV4:
2056 if (input->formatted.dst_port || input->formatted.src_port) {
2057 DEBUGOUT(" Error on src/dst port\n");
2058 return IXGBE_ERR_CONFIG;
2059 }
2060 /* fall through */
2061 case IXGBE_ATR_FLOW_TYPE_TCPV4:
2062 case IXGBE_ATR_FLOW_TYPE_TUNNELED_TCPV4:
2063 case IXGBE_ATR_FLOW_TYPE_UDPV4:
2064 case IXGBE_ATR_FLOW_TYPE_TUNNELED_UDPV4:
2065 input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
2066 IXGBE_ATR_L4TYPE_MASK;
2067 break;
2068 default:
2069 DEBUGOUT(" Error on flow type input\n");
2070 return err;
2071 }
2072
2073 /* program input mask into the HW */
2074 err = ixgbe_fdir_set_input_mask_82599(hw, input_mask, cloud_mode);
2075 if (err)
2076 return err;
2077
2078 /* apply mask and compute/store hash */
2079 ixgbe_atr_compute_perfect_hash_82599(input, input_mask);
2080
2081 /* program filters to filter memory */
2082 return ixgbe_fdir_write_perfect_filter_82599(hw, input,
2083 soft_id, queue, cloud_mode);
2084 }
2085
2086 /**
2087 * ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register
2088 * @hw: pointer to hardware structure
2089 * @reg: analog register to read
2090 * @val: read value
2091 *
2092 * Performs read operation to Omer analog register specified.
2093 **/
2094 s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val)
2095 {
2096 u32 core_ctl;
2097
2098 DEBUGFUNC("ixgbe_read_analog_reg8_82599");
2099
2100 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, IXGBE_CORECTL_WRITE_CMD |
2101 (reg << 8));
2102 IXGBE_WRITE_FLUSH(hw);
2103 usec_delay(10);
2104 core_ctl = IXGBE_READ_REG(hw, IXGBE_CORECTL);
2105 *val = (u8)core_ctl;
2106
2107 return IXGBE_SUCCESS;
2108 }
2109
2110 /**
2111 * ixgbe_write_analog_reg8_82599 - Writes 8 bit Omer analog register
2112 * @hw: pointer to hardware structure
2113 * @reg: atlas register to write
2114 * @val: value to write
2115 *
2116 * Performs write operation to Omer analog register specified.
2117 **/
2118 s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val)
2119 {
2120 u32 core_ctl;
2121
2122 DEBUGFUNC("ixgbe_write_analog_reg8_82599");
2123
2124 core_ctl = (reg << 8) | val;
2125 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, core_ctl);
2126 IXGBE_WRITE_FLUSH(hw);
2127 usec_delay(10);
2128
2129 return IXGBE_SUCCESS;
2130 }
2131
2132 /**
2133 * ixgbe_start_hw_82599 - Prepare hardware for Tx/Rx
2134 * @hw: pointer to hardware structure
2135 *
2136 * Starts the hardware using the generic start_hw function
2137 * and the generation start_hw function.
2138 * Then performs revision-specific operations, if any.
2139 **/
2140 s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw)
2141 {
2142 s32 ret_val = IXGBE_SUCCESS;
2143
2144 DEBUGFUNC("ixgbe_start_hw_82599");
2145
2146 ret_val = ixgbe_start_hw_generic(hw);
2147 if (ret_val != IXGBE_SUCCESS)
2148 goto out;
2149
2150 ixgbe_start_hw_gen2(hw);
2151
2152 /* We need to run link autotry after the driver loads */
2153 hw->mac.autotry_restart = TRUE;
2154
2155 if (ret_val == IXGBE_SUCCESS)
2156 ret_val = ixgbe_verify_fw_version_82599(hw);
2157 out:
2158 return ret_val;
2159 }
2160
2161 /**
2162 * ixgbe_identify_phy_82599 - Get physical layer module
2163 * @hw: pointer to hardware structure
2164 *
2165 * Determines the physical layer module found on the current adapter.
2166 * If PHY already detected, maintains current PHY type in hw struct,
2167 * otherwise executes the PHY detection routine.
2168 **/
2169 s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
2170 {
2171 s32 status;
2172
2173 DEBUGFUNC("ixgbe_identify_phy_82599");
2174
2175 /* Detect PHY if not unknown - returns success if already detected. */
2176 status = ixgbe_identify_phy_generic(hw);
2177 if (status != IXGBE_SUCCESS) {
2178 /* 82599 10GBASE-T requires an external PHY */
2179 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)
2180 return status;
2181 else
2182 status = ixgbe_identify_module_generic(hw);
2183 }
2184
2185 /* Set PHY type none if no PHY detected */
2186 if (hw->phy.type == ixgbe_phy_unknown) {
2187 hw->phy.type = ixgbe_phy_none;
2188 return IXGBE_SUCCESS;
2189 }
2190
2191 /* Return error if SFP module has been detected but is not supported */
2192 if (hw->phy.type == ixgbe_phy_sfp_unsupported)
2193 return IXGBE_ERR_SFP_NOT_SUPPORTED;
2194
2195 return status;
2196 }
2197
2198 /**
2199 * ixgbe_get_supported_physical_layer_82599 - Returns physical layer type
2200 * @hw: pointer to hardware structure
2201 *
2202 * Determines physical layer capabilities of the current configuration.
2203 **/
2204 u64 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw)
2205 {
2206 u64 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
2207 u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2208 u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
2209 u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
2210 u32 pma_pmd_10g_parallel = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK;
2211 u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
2212 u16 ext_ability = 0;
2213
2214 DEBUGFUNC("ixgbe_get_support_physical_layer_82599");
2215
2216 hw->phy.ops.identify(hw);
2217
2218 switch (hw->phy.type) {
2219 case ixgbe_phy_tn:
2220 case ixgbe_phy_cu_unknown:
2221 hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
2222 IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
2223 if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
2224 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
2225 if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
2226 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
2227 if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
2228 physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
2229 goto out;
2230 default:
2231 break;
2232 }
2233
2234 switch (autoc & IXGBE_AUTOC_LMS_MASK) {
2235 case IXGBE_AUTOC_LMS_1G_AN:
2236 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
2237 if (pma_pmd_1g == IXGBE_AUTOC_1G_KX_BX) {
2238 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX |
2239 IXGBE_PHYSICAL_LAYER_1000BASE_BX;
2240 goto out;
2241 } else
2242 /* SFI mode so read SFP module */
2243 goto sfp_check;
2244 break;
2245 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
2246 if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_CX4)
2247 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
2248 else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_KX4)
2249 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
2250 else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_XAUI)
2251 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_XAUI;
2252 goto out;
2253 break;
2254 case IXGBE_AUTOC_LMS_10G_SERIAL:
2255 if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_KR) {
2256 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR;
2257 goto out;
2258 } else if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)
2259 goto sfp_check;
2260 break;
2261 case IXGBE_AUTOC_LMS_KX4_KX_KR:
2262 case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN:
2263 if (autoc & IXGBE_AUTOC_KX_SUPP)
2264 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
2265 if (autoc & IXGBE_AUTOC_KX4_SUPP)
2266 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
2267 if (autoc & IXGBE_AUTOC_KR_SUPP)
2268 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KR;
2269 goto out;
2270 break;
2271 default:
2272 goto out;
2273 break;
2274 }
2275
2276 sfp_check:
2277 /* SFP check must be done last since DA modules are sometimes used to
2278 * test KR mode - we need to id KR mode correctly before SFP module.
2279 * Call identify_sfp because the pluggable module may have changed */
2280 physical_layer = ixgbe_get_supported_phy_sfp_layer_generic(hw);
2281 out:
2282 return physical_layer;
2283 }
2284
2285 /**
2286 * ixgbe_enable_rx_dma_82599 - Enable the Rx DMA unit on 82599
2287 * @hw: pointer to hardware structure
2288 * @regval: register value to write to RXCTRL
2289 *
2290 * Enables the Rx DMA unit for 82599
2291 **/
2292 s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval)
2293 {
2294
2295 DEBUGFUNC("ixgbe_enable_rx_dma_82599");
2296
2297 /*
2298 * Workaround for 82599 silicon errata when enabling the Rx datapath.
2299 * If traffic is incoming before we enable the Rx unit, it could hang
2300 * the Rx DMA unit. Therefore, make sure the security engine is
2301 * completely disabled prior to enabling the Rx unit.
2302 */
2303
2304 hw->mac.ops.disable_sec_rx_path(hw);
2305
2306 if (regval & IXGBE_RXCTRL_RXEN)
2307 ixgbe_enable_rx(hw);
2308 else
2309 ixgbe_disable_rx(hw);
2310
2311 hw->mac.ops.enable_sec_rx_path(hw);
2312
2313 return IXGBE_SUCCESS;
2314 }
2315
2316 /**
2317 * ixgbe_verify_fw_version_82599 - verify FW version for 82599
2318 * @hw: pointer to hardware structure
2319 *
2320 * Verifies that installed the firmware version is 0.6 or higher
2321 * for SFI devices. All 82599 SFI devices should have version 0.6 or higher.
2322 *
2323 * Returns IXGBE_ERR_EEPROM_VERSION if the FW is not present or
2324 * if the FW version is not supported.
2325 **/
2326 static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
2327 {
2328 s32 status = IXGBE_ERR_EEPROM_VERSION;
2329 u16 fw_offset, fw_ptp_cfg_offset;
2330 u16 fw_version;
2331
2332 DEBUGFUNC("ixgbe_verify_fw_version_82599");
2333
2334 /* firmware check is only necessary for SFI devices */
2335 if (hw->phy.media_type != ixgbe_media_type_fiber) {
2336 status = IXGBE_SUCCESS;
2337 goto fw_version_out;
2338 }
2339
2340 /* get the offset to the Firmware Module block */
2341 if (hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset)) {
2342 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
2343 "eeprom read at offset %d failed", IXGBE_FW_PTR);
2344 return IXGBE_ERR_EEPROM_VERSION;
2345 }
2346
2347 if ((fw_offset == 0) || (fw_offset == 0xFFFF))
2348 goto fw_version_out;
2349
2350 /* get the offset to the Pass Through Patch Configuration block */
2351 if (hw->eeprom.ops.read(hw, (fw_offset +
2352 IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR),
2353 &fw_ptp_cfg_offset)) {
2354 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
2355 "eeprom read at offset %d failed",
2356 fw_offset +
2357 IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR);
2358 return IXGBE_ERR_EEPROM_VERSION;
2359 }
2360
2361 if ((fw_ptp_cfg_offset == 0) || (fw_ptp_cfg_offset == 0xFFFF))
2362 goto fw_version_out;
2363
2364 /* get the firmware version */
2365 if (hw->eeprom.ops.read(hw, (fw_ptp_cfg_offset +
2366 IXGBE_FW_PATCH_VERSION_4), &fw_version)) {
2367 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
2368 "eeprom read at offset %d failed",
2369 fw_ptp_cfg_offset + IXGBE_FW_PATCH_VERSION_4);
2370 return IXGBE_ERR_EEPROM_VERSION;
2371 }
2372
2373 if (fw_version > 0x5)
2374 status = IXGBE_SUCCESS;
2375
2376 fw_version_out:
2377 return status;
2378 }
2379
2380 /**
2381 * ixgbe_verify_lesm_fw_enabled_82599 - Checks LESM FW module state.
2382 * @hw: pointer to hardware structure
2383 *
2384 * Returns TRUE if the LESM FW module is present and enabled. Otherwise
2385 * returns FALSE. Smart Speed must be disabled if LESM FW module is enabled.
2386 **/
2387 bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw)
2388 {
2389 bool lesm_enabled = FALSE;
2390 u16 fw_offset, fw_lesm_param_offset, fw_lesm_state;
2391 s32 status;
2392
2393 DEBUGFUNC("ixgbe_verify_lesm_fw_enabled_82599");
2394
2395 /* get the offset to the Firmware Module block */
2396 status = hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset);
2397
2398 if ((status != IXGBE_SUCCESS) ||
2399 (fw_offset == 0) || (fw_offset == 0xFFFF))
2400 goto out;
2401
2402 /* get the offset to the LESM Parameters block */
2403 status = hw->eeprom.ops.read(hw, (fw_offset +
2404 IXGBE_FW_LESM_PARAMETERS_PTR),
2405 &fw_lesm_param_offset);
2406
2407 if ((status != IXGBE_SUCCESS) ||
2408 (fw_lesm_param_offset == 0) || (fw_lesm_param_offset == 0xFFFF))
2409 goto out;
2410
2411 /* get the LESM state word */
2412 status = hw->eeprom.ops.read(hw, (fw_lesm_param_offset +
2413 IXGBE_FW_LESM_STATE_1),
2414 &fw_lesm_state);
2415
2416 if ((status == IXGBE_SUCCESS) &&
2417 (fw_lesm_state & IXGBE_FW_LESM_STATE_ENABLED))
2418 lesm_enabled = TRUE;
2419
2420 out:
2421 return lesm_enabled;
2422 }
2423
2424 /**
2425 * ixgbe_read_eeprom_buffer_82599 - Read EEPROM word(s) using
2426 * fastest available method
2427 *
2428 * @hw: pointer to hardware structure
2429 * @offset: offset of word in EEPROM to read
2430 * @words: number of words
2431 * @data: word(s) read from the EEPROM
2432 *
2433 * Retrieves 16 bit word(s) read from EEPROM
2434 **/
2435 static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
2436 u16 words, u16 *data)
2437 {
2438 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
2439 s32 ret_val = IXGBE_ERR_CONFIG;
2440
2441 DEBUGFUNC("ixgbe_read_eeprom_buffer_82599");
2442
2443 /*
2444 * If EEPROM is detected and can be addressed using 14 bits,
2445 * use EERD otherwise use bit bang
2446 */
2447 if ((eeprom->type == ixgbe_eeprom_spi) &&
2448 (offset + (words - 1) <= IXGBE_EERD_MAX_ADDR))
2449 ret_val = ixgbe_read_eerd_buffer_generic(hw, offset, words,
2450 data);
2451 else
2452 ret_val = ixgbe_read_eeprom_buffer_bit_bang_generic(hw, offset,
2453 words,
2454 data);
2455
2456 return ret_val;
2457 }
2458
2459 /**
2460 * ixgbe_read_eeprom_82599 - Read EEPROM word using
2461 * fastest available method
2462 *
2463 * @hw: pointer to hardware structure
2464 * @offset: offset of word in the EEPROM to read
2465 * @data: word read from the EEPROM
2466 *
2467 * Reads a 16 bit word from the EEPROM
2468 **/
2469 static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
2470 u16 offset, u16 *data)
2471 {
2472 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
2473 s32 ret_val = IXGBE_ERR_CONFIG;
2474
2475 DEBUGFUNC("ixgbe_read_eeprom_82599");
2476
2477 /*
2478 * If EEPROM is detected and can be addressed using 14 bits,
2479 * use EERD otherwise use bit bang
2480 */
2481 if ((eeprom->type == ixgbe_eeprom_spi) &&
2482 (offset <= IXGBE_EERD_MAX_ADDR))
2483 ret_val = ixgbe_read_eerd_generic(hw, offset, data);
2484 else
2485 ret_val = ixgbe_read_eeprom_bit_bang_generic(hw, offset, data);
2486
2487 return ret_val;
2488 }
2489
2490 /**
2491 * ixgbe_reset_pipeline_82599 - perform pipeline reset
2492 *
2493 * @hw: pointer to hardware structure
2494 *
2495 * Reset pipeline by asserting Restart_AN together with LMS change to ensure
2496 * full pipeline reset. This function assumes the SW/FW lock is held.
2497 **/
2498 static s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw)
2499 {
2500 s32 ret_val;
2501 u32 anlp1_reg = 0;
2502 u32 i, autoc_reg, autoc2_reg;
2503
2504 /* Enable link if disabled in NVM */
2505 autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
2506 if (autoc2_reg & IXGBE_AUTOC2_LINK_DISABLE_MASK) {
2507 autoc2_reg &= ~IXGBE_AUTOC2_LINK_DISABLE_MASK;
2508 IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2_reg);
2509 IXGBE_WRITE_FLUSH(hw);
2510 }
2511
2512 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2513 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
2514 /* Write AUTOC register with toggled LMS[2] bit and Restart_AN */
2515 IXGBE_WRITE_REG(hw, IXGBE_AUTOC,
2516 autoc_reg ^ (0x4 << IXGBE_AUTOC_LMS_SHIFT));
2517 /* Wait for AN to leave state 0 */
2518 for (i = 0; i < 10; i++) {
2519 msec_delay(4);
2520 anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
2521 if (anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)
2522 break;
2523 }
2524
2525 if (!(anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)) {
2526 DEBUGOUT("auto negotiation not completed\n");
2527 ret_val = IXGBE_ERR_RESET_FAILED;
2528 goto reset_pipeline_out;
2529 }
2530
2531 ret_val = IXGBE_SUCCESS;
2532
2533 reset_pipeline_out:
2534 /* Write AUTOC register with original LMS field and Restart_AN */
2535 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
2536 IXGBE_WRITE_FLUSH(hw);
2537
2538 return ret_val;
2539 }
2540
2541 /**
2542 * ixgbe_read_i2c_byte_82599 - Reads 8 bit word over I2C
2543 * @hw: pointer to hardware structure
2544 * @byte_offset: byte offset to read
2545 * @dev_addr: address to read from
2546 * @data: value read
2547 *
2548 * Performs byte read operation to SFP module's EEPROM over I2C interface at
2549 * a specified device address.
2550 **/
2551 static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
2552 u8 dev_addr, u8 *data)
2553 {
2554 u32 esdp;
2555 s32 status;
2556 s32 timeout = 200;
2557
2558 DEBUGFUNC("ixgbe_read_i2c_byte_82599");
2559
2560 if (hw->phy.qsfp_shared_i2c_bus == TRUE) {
2561 /* Acquire I2C bus ownership. */
2562 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
2563 esdp |= IXGBE_ESDP_SDP0;
2564 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
2565 IXGBE_WRITE_FLUSH(hw);
2566
2567 while (timeout) {
2568 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
2569 if (esdp & IXGBE_ESDP_SDP1)
2570 break;
2571
2572 msec_delay(5);
2573 timeout--;
2574 }
2575
2576 if (!timeout) {
2577 DEBUGOUT("Driver can't access resource,"
2578 " acquiring I2C bus timeout.\n");
2579 status = IXGBE_ERR_I2C;
2580 goto release_i2c_access;
2581 }
2582 }
2583
2584 status = ixgbe_read_i2c_byte_generic(hw, byte_offset, dev_addr, data);
2585
2586 release_i2c_access:
2587
2588 if (hw->phy.qsfp_shared_i2c_bus == TRUE) {
2589 /* Release I2C bus ownership. */
2590 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
2591 esdp &= ~IXGBE_ESDP_SDP0;
2592 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
2593 IXGBE_WRITE_FLUSH(hw);
2594 }
2595
2596 return status;
2597 }
2598
2599 /**
2600 * ixgbe_write_i2c_byte_82599 - Writes 8 bit word over I2C
2601 * @hw: pointer to hardware structure
2602 * @byte_offset: byte offset to write
2603 * @dev_addr: address to read from
2604 * @data: value to write
2605 *
2606 * Performs byte write operation to SFP module's EEPROM over I2C interface at
2607 * a specified device address.
2608 **/
2609 static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
2610 u8 dev_addr, u8 data)
2611 {
2612 u32 esdp;
2613 s32 status;
2614 s32 timeout = 200;
2615
2616 DEBUGFUNC("ixgbe_write_i2c_byte_82599");
2617
2618 if (hw->phy.qsfp_shared_i2c_bus == TRUE) {
2619 /* Acquire I2C bus ownership. */
2620 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
2621 esdp |= IXGBE_ESDP_SDP0;
2622 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
2623 IXGBE_WRITE_FLUSH(hw);
2624
2625 while (timeout) {
2626 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
2627 if (esdp & IXGBE_ESDP_SDP1)
2628 break;
2629
2630 msec_delay(5);
2631 timeout--;
2632 }
2633
2634 if (!timeout) {
2635 DEBUGOUT("Driver can't access resource,"
2636 " acquiring I2C bus timeout.\n");
2637 status = IXGBE_ERR_I2C;
2638 goto release_i2c_access;
2639 }
2640 }
2641
2642 status = ixgbe_write_i2c_byte_generic(hw, byte_offset, dev_addr, data);
2643
2644 release_i2c_access:
2645
2646 if (hw->phy.qsfp_shared_i2c_bus == TRUE) {
2647 /* Release I2C bus ownership. */
2648 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
2649 esdp &= ~IXGBE_ESDP_SDP0;
2650 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
2651 IXGBE_WRITE_FLUSH(hw);
2652 }
2653
2654 return status;
2655 }
2656