ixgbe_82599.c revision 1.1.12.2 1 /* $NetBSD: ixgbe_82599.c,v 1.1.12.2 2017/12/03 11:37:29 jdolecek Exp $ */
2
3 /******************************************************************************
4
5 Copyright (c) 2001-2017, Intel Corporation
6 All rights reserved.
7
8 Redistribution and use in source and binary forms, with or without
9 modification, are permitted provided that the following conditions are met:
10
11 1. Redistributions of source code must retain the above copyright notice,
12 this list of conditions and the following disclaimer.
13
14 2. Redistributions in binary form must reproduce the above copyright
15 notice, this list of conditions and the following disclaimer in the
16 documentation and/or other materials provided with the distribution.
17
18 3. Neither the name of the Intel Corporation nor the names of its
19 contributors may be used to endorse or promote products derived from
20 this software without specific prior written permission.
21
22 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 POSSIBILITY OF SUCH DAMAGE.
33
34 ******************************************************************************/
35 /*$FreeBSD: head/sys/dev/ixgbe/ixgbe_82599.c 320688 2017-07-05 17:27:03Z erj $*/
36
37 #include "ixgbe_type.h"
38 #include "ixgbe_82599.h"
39 #include "ixgbe_api.h"
40 #include "ixgbe_common.h"
41 #include "ixgbe_phy.h"
42
43 #define IXGBE_82599_MAX_TX_QUEUES 128
44 #define IXGBE_82599_MAX_RX_QUEUES 128
45 #define IXGBE_82599_RAR_ENTRIES 128
46 #define IXGBE_82599_MC_TBL_SIZE 128
47 #define IXGBE_82599_VFT_TBL_SIZE 128
48 #define IXGBE_82599_RX_PB_SIZE 512
49
50 static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
51 ixgbe_link_speed speed,
52 bool autoneg_wait_to_complete);
53 static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw);
54 static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
55 u16 offset, u16 *data);
56 static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
57 u16 words, u16 *data);
58 static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
59 u8 dev_addr, u8 *data);
60 static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
61 u8 dev_addr, u8 data);
62
63 void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
64 {
65 struct ixgbe_mac_info *mac = &hw->mac;
66
67 DEBUGFUNC("ixgbe_init_mac_link_ops_82599");
68
69 /*
70 * enable the laser control functions for SFP+ fiber
71 * and MNG not enabled
72 */
73 if ((mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
74 !ixgbe_mng_enabled(hw)) {
75 mac->ops.disable_tx_laser =
76 ixgbe_disable_tx_laser_multispeed_fiber;
77 mac->ops.enable_tx_laser =
78 ixgbe_enable_tx_laser_multispeed_fiber;
79 mac->ops.flap_tx_laser = ixgbe_flap_tx_laser_multispeed_fiber;
80
81 } else {
82 mac->ops.disable_tx_laser = NULL;
83 mac->ops.enable_tx_laser = NULL;
84 mac->ops.flap_tx_laser = NULL;
85 }
86
87 if (hw->phy.multispeed_fiber) {
88 /* Set up dual speed SFP+ support */
89 mac->ops.setup_link = ixgbe_setup_mac_link_multispeed_fiber;
90 mac->ops.setup_mac_link = ixgbe_setup_mac_link_82599;
91 mac->ops.set_rate_select_speed =
92 ixgbe_set_hard_rate_select_speed;
93 if (ixgbe_get_media_type(hw) == ixgbe_media_type_fiber_fixed)
94 mac->ops.set_rate_select_speed =
95 ixgbe_set_soft_rate_select_speed;
96 } else {
97 if ((ixgbe_get_media_type(hw) == ixgbe_media_type_backplane) &&
98 (hw->phy.smart_speed == ixgbe_smart_speed_auto ||
99 hw->phy.smart_speed == ixgbe_smart_speed_on) &&
100 !ixgbe_verify_lesm_fw_enabled_82599(hw)) {
101 mac->ops.setup_link = ixgbe_setup_mac_link_smartspeed;
102 } else {
103 mac->ops.setup_link = ixgbe_setup_mac_link_82599;
104 }
105 }
106 }
107
108 /**
109 * ixgbe_init_phy_ops_82599 - PHY/SFP specific init
110 * @hw: pointer to hardware structure
111 *
112 * Initialize any function pointers that were not able to be
113 * set during init_shared_code because the PHY/SFP type was
114 * not known. Perform the SFP init if necessary.
115 *
116 **/
117 s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
118 {
119 struct ixgbe_mac_info *mac = &hw->mac;
120 struct ixgbe_phy_info *phy = &hw->phy;
121 s32 ret_val = IXGBE_SUCCESS;
122 u32 esdp;
123
124 DEBUGFUNC("ixgbe_init_phy_ops_82599");
125
126 if (hw->device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP) {
127 /* Store flag indicating I2C bus access control unit. */
128 hw->phy.qsfp_shared_i2c_bus = TRUE;
129
130 /* Initialize access to QSFP+ I2C bus */
131 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
132 esdp |= IXGBE_ESDP_SDP0_DIR;
133 esdp &= ~IXGBE_ESDP_SDP1_DIR;
134 esdp &= ~IXGBE_ESDP_SDP0;
135 esdp &= ~IXGBE_ESDP_SDP0_NATIVE;
136 esdp &= ~IXGBE_ESDP_SDP1_NATIVE;
137 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
138 IXGBE_WRITE_FLUSH(hw);
139
140 phy->ops.read_i2c_byte = ixgbe_read_i2c_byte_82599;
141 phy->ops.write_i2c_byte = ixgbe_write_i2c_byte_82599;
142 }
143 /* Identify the PHY or SFP module */
144 ret_val = phy->ops.identify(hw);
145 if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED)
146 goto init_phy_ops_out;
147
148 /* Setup function pointers based on detected SFP module and speeds */
149 ixgbe_init_mac_link_ops_82599(hw);
150 if (hw->phy.sfp_type != ixgbe_sfp_type_unknown)
151 hw->phy.ops.reset = NULL;
152
153 /* If copper media, overwrite with copper function pointers */
154 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
155 mac->ops.setup_link = ixgbe_setup_copper_link_82599;
156 mac->ops.get_link_capabilities =
157 ixgbe_get_copper_link_capabilities_generic;
158 }
159
160 /* Set necessary function pointers based on PHY type */
161 switch (hw->phy.type) {
162 case ixgbe_phy_tn:
163 phy->ops.setup_link = ixgbe_setup_phy_link_tnx;
164 phy->ops.check_link = ixgbe_check_phy_link_tnx;
165 phy->ops.get_firmware_version =
166 ixgbe_get_phy_firmware_version_tnx;
167 break;
168 default:
169 break;
170 }
171 init_phy_ops_out:
172 return ret_val;
173 }
174
175 s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
176 {
177 s32 ret_val = IXGBE_SUCCESS;
178 u16 list_offset, data_offset, data_value;
179
180 DEBUGFUNC("ixgbe_setup_sfp_modules_82599");
181
182 if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) {
183 ixgbe_init_mac_link_ops_82599(hw);
184
185 hw->phy.ops.reset = NULL;
186
187 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
188 &data_offset);
189 if (ret_val != IXGBE_SUCCESS)
190 goto setup_sfp_out;
191
192 /* PHY config will finish before releasing the semaphore */
193 ret_val = hw->mac.ops.acquire_swfw_sync(hw,
194 IXGBE_GSSR_MAC_CSR_SM);
195 if (ret_val != IXGBE_SUCCESS) {
196 ret_val = IXGBE_ERR_SWFW_SYNC;
197 goto setup_sfp_out;
198 }
199
200 if (hw->eeprom.ops.read(hw, ++data_offset, &data_value))
201 goto setup_sfp_err;
202 while (data_value != 0xffff) {
203 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, data_value);
204 IXGBE_WRITE_FLUSH(hw);
205 if (hw->eeprom.ops.read(hw, ++data_offset, &data_value))
206 goto setup_sfp_err;
207 }
208
209 /* Release the semaphore */
210 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
211 /* Delay obtaining semaphore again to allow FW access
212 * prot_autoc_write uses the semaphore too.
213 */
214 msec_delay(hw->eeprom.semaphore_delay);
215
216 /* Restart DSP and set SFI mode */
217 ret_val = hw->mac.ops.prot_autoc_write(hw,
218 hw->mac.orig_autoc | IXGBE_AUTOC_LMS_10G_SERIAL,
219 FALSE);
220
221 if (ret_val) {
222 DEBUGOUT("sfp module setup not complete\n");
223 ret_val = IXGBE_ERR_SFP_SETUP_NOT_COMPLETE;
224 goto setup_sfp_out;
225 }
226
227 }
228
229 setup_sfp_out:
230 return ret_val;
231
232 setup_sfp_err:
233 /* Release the semaphore */
234 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
235 /* Delay obtaining semaphore again to allow FW access */
236 msec_delay(hw->eeprom.semaphore_delay);
237 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
238 "eeprom read at offset %d failed", data_offset);
239 return IXGBE_ERR_PHY;
240 }
241
242 /**
243 * prot_autoc_read_82599 - Hides MAC differences needed for AUTOC read
244 * @hw: pointer to hardware structure
245 * @locked: Return the if we locked for this read.
246 * @reg_val: Value we read from AUTOC
247 *
248 * For this part (82599) we need to wrap read-modify-writes with a possible
249 * FW/SW lock. It is assumed this lock will be freed with the next
250 * prot_autoc_write_82599().
251 */
252 s32 prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked, u32 *reg_val)
253 {
254 s32 ret_val;
255
256 *locked = FALSE;
257 /* If LESM is on then we need to hold the SW/FW semaphore. */
258 if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
259 ret_val = hw->mac.ops.acquire_swfw_sync(hw,
260 IXGBE_GSSR_MAC_CSR_SM);
261 if (ret_val != IXGBE_SUCCESS)
262 return IXGBE_ERR_SWFW_SYNC;
263
264 *locked = TRUE;
265 }
266
267 *reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC);
268 return IXGBE_SUCCESS;
269 }
270
271 /**
272 * prot_autoc_write_82599 - Hides MAC differences needed for AUTOC write
273 * @hw: pointer to hardware structure
274 * @reg_val: value to write to AUTOC
275 * @locked: bool to indicate whether the SW/FW lock was already taken by
276 * previous proc_autoc_read_82599.
277 *
278 * This part (82599) may need to hold the SW/FW lock around all writes to
279 * AUTOC. Likewise after a write we need to do a pipeline reset.
280 */
281 s32 prot_autoc_write_82599(struct ixgbe_hw *hw, u32 autoc, bool locked)
282 {
283 s32 ret_val = IXGBE_SUCCESS;
284
285 /* Blocked by MNG FW so bail */
286 if (ixgbe_check_reset_blocked(hw))
287 goto out;
288
289 /* We only need to get the lock if:
290 * - We didn't do it already (in the read part of a read-modify-write)
291 * - LESM is enabled.
292 */
293 if (!locked && ixgbe_verify_lesm_fw_enabled_82599(hw)) {
294 ret_val = hw->mac.ops.acquire_swfw_sync(hw,
295 IXGBE_GSSR_MAC_CSR_SM);
296 if (ret_val != IXGBE_SUCCESS)
297 return IXGBE_ERR_SWFW_SYNC;
298
299 locked = TRUE;
300 }
301
302 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
303 ret_val = ixgbe_reset_pipeline_82599(hw);
304
305 out:
306 /* Free the SW/FW semaphore as we either grabbed it here or
307 * already had it when this function was called.
308 */
309 if (locked)
310 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
311
312 return ret_val;
313 }
314
315 /**
316 * ixgbe_init_ops_82599 - Inits func ptrs and MAC type
317 * @hw: pointer to hardware structure
318 *
319 * Initialize the function pointers and assign the MAC type for 82599.
320 * Does not touch the hardware.
321 **/
322
323 s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw)
324 {
325 struct ixgbe_mac_info *mac = &hw->mac;
326 struct ixgbe_phy_info *phy = &hw->phy;
327 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
328 s32 ret_val;
329
330 DEBUGFUNC("ixgbe_init_ops_82599");
331
332 ixgbe_init_phy_ops_generic(hw);
333 ret_val = ixgbe_init_ops_generic(hw);
334
335 /* PHY */
336 phy->ops.identify = ixgbe_identify_phy_82599;
337 phy->ops.init = ixgbe_init_phy_ops_82599;
338
339 /* MAC */
340 mac->ops.reset_hw = ixgbe_reset_hw_82599;
341 mac->ops.enable_relaxed_ordering = ixgbe_enable_relaxed_ordering_gen2;
342 mac->ops.get_media_type = ixgbe_get_media_type_82599;
343 mac->ops.get_supported_physical_layer =
344 ixgbe_get_supported_physical_layer_82599;
345 mac->ops.disable_sec_rx_path = ixgbe_disable_sec_rx_path_generic;
346 mac->ops.enable_sec_rx_path = ixgbe_enable_sec_rx_path_generic;
347 mac->ops.enable_rx_dma = ixgbe_enable_rx_dma_82599;
348 mac->ops.read_analog_reg8 = ixgbe_read_analog_reg8_82599;
349 mac->ops.write_analog_reg8 = ixgbe_write_analog_reg8_82599;
350 mac->ops.start_hw = ixgbe_start_hw_82599;
351 mac->ops.get_san_mac_addr = ixgbe_get_san_mac_addr_generic;
352 mac->ops.set_san_mac_addr = ixgbe_set_san_mac_addr_generic;
353 mac->ops.get_device_caps = ixgbe_get_device_caps_generic;
354 mac->ops.get_wwn_prefix = ixgbe_get_wwn_prefix_generic;
355 mac->ops.get_fcoe_boot_status = ixgbe_get_fcoe_boot_status_generic;
356 mac->ops.prot_autoc_read = prot_autoc_read_82599;
357 mac->ops.prot_autoc_write = prot_autoc_write_82599;
358
359 /* RAR, Multicast, VLAN */
360 mac->ops.set_vmdq = ixgbe_set_vmdq_generic;
361 mac->ops.set_vmdq_san_mac = ixgbe_set_vmdq_san_mac_generic;
362 mac->ops.clear_vmdq = ixgbe_clear_vmdq_generic;
363 mac->ops.insert_mac_addr = ixgbe_insert_mac_addr_generic;
364 mac->rar_highwater = 1;
365 mac->ops.set_vfta = ixgbe_set_vfta_generic;
366 mac->ops.set_vlvf = ixgbe_set_vlvf_generic;
367 mac->ops.clear_vfta = ixgbe_clear_vfta_generic;
368 mac->ops.init_uta_tables = ixgbe_init_uta_tables_generic;
369 mac->ops.setup_sfp = ixgbe_setup_sfp_modules_82599;
370 mac->ops.set_mac_anti_spoofing = ixgbe_set_mac_anti_spoofing;
371 mac->ops.set_vlan_anti_spoofing = ixgbe_set_vlan_anti_spoofing;
372
373 /* Link */
374 mac->ops.get_link_capabilities = ixgbe_get_link_capabilities_82599;
375 mac->ops.check_link = ixgbe_check_mac_link_generic;
376 mac->ops.setup_rxpba = ixgbe_set_rxpba_generic;
377 ixgbe_init_mac_link_ops_82599(hw);
378
379 mac->mcft_size = IXGBE_82599_MC_TBL_SIZE;
380 mac->vft_size = IXGBE_82599_VFT_TBL_SIZE;
381 mac->num_rar_entries = IXGBE_82599_RAR_ENTRIES;
382 mac->rx_pb_size = IXGBE_82599_RX_PB_SIZE;
383 mac->max_rx_queues = IXGBE_82599_MAX_RX_QUEUES;
384 mac->max_tx_queues = IXGBE_82599_MAX_TX_QUEUES;
385 mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
386
387 mac->arc_subsystem_valid = !!(IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw))
388 & IXGBE_FWSM_MODE_MASK);
389
390 hw->mbx.ops.init_params = ixgbe_init_mbx_params_pf;
391
392 /* EEPROM */
393 eeprom->ops.read = ixgbe_read_eeprom_82599;
394 eeprom->ops.read_buffer = ixgbe_read_eeprom_buffer_82599;
395
396 /* Manageability interface */
397 mac->ops.set_fw_drv_ver = ixgbe_set_fw_drv_ver_generic;
398
399 mac->ops.bypass_rw = ixgbe_bypass_rw_generic;
400 mac->ops.bypass_valid_rd = ixgbe_bypass_valid_rd_generic;
401 mac->ops.bypass_set = ixgbe_bypass_set_generic;
402 mac->ops.bypass_rd_eep = ixgbe_bypass_rd_eep_generic;
403
404 mac->ops.get_rtrup2tc = ixgbe_dcb_get_rtrup2tc_generic;
405
406 return ret_val;
407 }
408
409 /**
410 * ixgbe_get_link_capabilities_82599 - Determines link capabilities
411 * @hw: pointer to hardware structure
412 * @speed: pointer to link speed
413 * @autoneg: TRUE when autoneg or autotry is enabled
414 *
415 * Determines the link capabilities by reading the AUTOC register.
416 **/
417 s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
418 ixgbe_link_speed *speed,
419 bool *autoneg)
420 {
421 s32 status = IXGBE_SUCCESS;
422 u32 autoc = 0;
423
424 DEBUGFUNC("ixgbe_get_link_capabilities_82599");
425
426
427 /* Check if 1G SFP module. */
428 if (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
429 hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
430 hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
431 hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
432 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
433 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) {
434 *speed = IXGBE_LINK_SPEED_1GB_FULL;
435 *autoneg = TRUE;
436 goto out;
437 }
438
439 /*
440 * Determine link capabilities based on the stored value of AUTOC,
441 * which represents EEPROM defaults. If AUTOC value has not
442 * been stored, use the current register values.
443 */
444 if (hw->mac.orig_link_settings_stored)
445 autoc = hw->mac.orig_autoc;
446 else
447 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
448
449 switch (autoc & IXGBE_AUTOC_LMS_MASK) {
450 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
451 *speed = IXGBE_LINK_SPEED_1GB_FULL;
452 *autoneg = FALSE;
453 break;
454
455 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
456 *speed = IXGBE_LINK_SPEED_10GB_FULL;
457 *autoneg = FALSE;
458 break;
459
460 case IXGBE_AUTOC_LMS_1G_AN:
461 *speed = IXGBE_LINK_SPEED_1GB_FULL;
462 *autoneg = TRUE;
463 break;
464
465 case IXGBE_AUTOC_LMS_10G_SERIAL:
466 *speed = IXGBE_LINK_SPEED_10GB_FULL;
467 *autoneg = FALSE;
468 break;
469
470 case IXGBE_AUTOC_LMS_KX4_KX_KR:
471 case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN:
472 *speed = IXGBE_LINK_SPEED_UNKNOWN;
473 if (autoc & IXGBE_AUTOC_KR_SUPP)
474 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
475 if (autoc & IXGBE_AUTOC_KX4_SUPP)
476 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
477 if (autoc & IXGBE_AUTOC_KX_SUPP)
478 *speed |= IXGBE_LINK_SPEED_1GB_FULL;
479 *autoneg = TRUE;
480 break;
481
482 case IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII:
483 *speed = IXGBE_LINK_SPEED_100_FULL;
484 if (autoc & IXGBE_AUTOC_KR_SUPP)
485 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
486 if (autoc & IXGBE_AUTOC_KX4_SUPP)
487 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
488 if (autoc & IXGBE_AUTOC_KX_SUPP)
489 *speed |= IXGBE_LINK_SPEED_1GB_FULL;
490 *autoneg = TRUE;
491 break;
492
493 case IXGBE_AUTOC_LMS_SGMII_1G_100M:
494 *speed = IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_100_FULL;
495 *autoneg = FALSE;
496 break;
497
498 default:
499 status = IXGBE_ERR_LINK_SETUP;
500 goto out;
501 break;
502 }
503
504 if (hw->phy.multispeed_fiber) {
505 *speed |= IXGBE_LINK_SPEED_10GB_FULL |
506 IXGBE_LINK_SPEED_1GB_FULL;
507
508 /* QSFP must not enable full auto-negotiation
509 * Limited autoneg is enabled at 1G
510 */
511 if (hw->phy.media_type == ixgbe_media_type_fiber_qsfp)
512 *autoneg = FALSE;
513 else
514 *autoneg = TRUE;
515 }
516
517 out:
518 return status;
519 }
520
521 /**
522 * ixgbe_get_media_type_82599 - Get media type
523 * @hw: pointer to hardware structure
524 *
525 * Returns the media type (fiber, copper, backplane)
526 **/
527 enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
528 {
529 enum ixgbe_media_type media_type;
530
531 DEBUGFUNC("ixgbe_get_media_type_82599");
532
533 /* Detect if there is a copper PHY attached. */
534 switch (hw->phy.type) {
535 case ixgbe_phy_cu_unknown:
536 case ixgbe_phy_tn:
537 media_type = ixgbe_media_type_copper;
538 goto out;
539 default:
540 break;
541 }
542
543 switch (hw->device_id) {
544 case IXGBE_DEV_ID_82599_KX4:
545 case IXGBE_DEV_ID_82599_KX4_MEZZ:
546 case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
547 case IXGBE_DEV_ID_82599_KR:
548 case IXGBE_DEV_ID_82599_BACKPLANE_FCOE:
549 case IXGBE_DEV_ID_82599_XAUI_LOM:
550 /* Default device ID is mezzanine card KX/KX4 */
551 media_type = ixgbe_media_type_backplane;
552 break;
553 case IXGBE_DEV_ID_82599_SFP:
554 case IXGBE_DEV_ID_82599_SFP_FCOE:
555 case IXGBE_DEV_ID_82599_SFP_EM:
556 case IXGBE_DEV_ID_82599_SFP_SF2:
557 case IXGBE_DEV_ID_82599_SFP_SF_QP:
558 case IXGBE_DEV_ID_82599EN_SFP:
559 media_type = ixgbe_media_type_fiber;
560 break;
561 case IXGBE_DEV_ID_82599_CX4:
562 media_type = ixgbe_media_type_cx4;
563 break;
564 case IXGBE_DEV_ID_82599_T3_LOM:
565 media_type = ixgbe_media_type_copper;
566 break;
567 case IXGBE_DEV_ID_82599_QSFP_SF_QP:
568 media_type = ixgbe_media_type_fiber_qsfp;
569 break;
570 case IXGBE_DEV_ID_82599_BYPASS:
571 media_type = ixgbe_media_type_fiber_fixed;
572 hw->phy.multispeed_fiber = TRUE;
573 break;
574 default:
575 media_type = ixgbe_media_type_unknown;
576 break;
577 }
578 out:
579 return media_type;
580 }
581
582 /**
583 * ixgbe_stop_mac_link_on_d3_82599 - Disables link on D3
584 * @hw: pointer to hardware structure
585 *
586 * Disables link during D3 power down sequence.
587 *
588 **/
589 void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw)
590 {
591 u32 autoc2_reg;
592 u16 ee_ctrl_2 = 0;
593
594 DEBUGFUNC("ixgbe_stop_mac_link_on_d3_82599");
595 ixgbe_read_eeprom(hw, IXGBE_EEPROM_CTRL_2, &ee_ctrl_2);
596
597 if (!ixgbe_mng_present(hw) && !hw->wol_enabled &&
598 ee_ctrl_2 & IXGBE_EEPROM_CCD_BIT) {
599 autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
600 autoc2_reg |= IXGBE_AUTOC2_LINK_DISABLE_ON_D3_MASK;
601 IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2_reg);
602 }
603 }
604
605 /**
606 * ixgbe_start_mac_link_82599 - Setup MAC link settings
607 * @hw: pointer to hardware structure
608 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed
609 *
610 * Configures link settings based on values in the ixgbe_hw struct.
611 * Restarts the link. Performs autonegotiation if needed.
612 **/
613 s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
614 bool autoneg_wait_to_complete)
615 {
616 u32 autoc_reg;
617 u32 links_reg;
618 u32 i;
619 s32 status = IXGBE_SUCCESS;
620 bool got_lock = FALSE;
621
622 DEBUGFUNC("ixgbe_start_mac_link_82599");
623
624
625 /* reset_pipeline requires us to hold this lock as it writes to
626 * AUTOC.
627 */
628 if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
629 status = hw->mac.ops.acquire_swfw_sync(hw,
630 IXGBE_GSSR_MAC_CSR_SM);
631 if (status != IXGBE_SUCCESS)
632 goto out;
633
634 got_lock = TRUE;
635 }
636
637 /* Restart link */
638 ixgbe_reset_pipeline_82599(hw);
639
640 if (got_lock)
641 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
642
643 /* Only poll for autoneg to complete if specified to do so */
644 if (autoneg_wait_to_complete) {
645 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
646 if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
647 IXGBE_AUTOC_LMS_KX4_KX_KR ||
648 (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
649 IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
650 (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
651 IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
652 links_reg = 0; /* Just in case Autoneg time = 0 */
653 for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
654 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
655 if (links_reg & IXGBE_LINKS_KX_AN_COMP)
656 break;
657 msec_delay(100);
658 }
659 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
660 status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
661 DEBUGOUT("Autoneg did not complete.\n");
662 }
663 }
664 }
665
666 /* Add delay to filter out noises during initial link setup */
667 msec_delay(50);
668
669 out:
670 return status;
671 }
672
673 /**
674 * ixgbe_disable_tx_laser_multispeed_fiber - Disable Tx laser
675 * @hw: pointer to hardware structure
676 *
677 * The base drivers may require better control over SFP+ module
678 * PHY states. This includes selectively shutting down the Tx
679 * laser on the PHY, effectively halting physical link.
680 **/
681 void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
682 {
683 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
684
685 /* Blocked by MNG FW so bail */
686 if (ixgbe_check_reset_blocked(hw))
687 return;
688
689 /* Disable Tx laser; allow 100us to go dark per spec */
690 esdp_reg |= IXGBE_ESDP_SDP3;
691 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
692 IXGBE_WRITE_FLUSH(hw);
693 usec_delay(100);
694 }
695
696 /**
697 * ixgbe_enable_tx_laser_multispeed_fiber - Enable Tx laser
698 * @hw: pointer to hardware structure
699 *
700 * The base drivers may require better control over SFP+ module
701 * PHY states. This includes selectively turning on the Tx
702 * laser on the PHY, effectively starting physical link.
703 **/
704 void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
705 {
706 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
707
708 /* Enable Tx laser; allow 100ms to light up */
709 esdp_reg &= ~IXGBE_ESDP_SDP3;
710 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
711 IXGBE_WRITE_FLUSH(hw);
712 msec_delay(100);
713 }
714
715 /**
716 * ixgbe_flap_tx_laser_multispeed_fiber - Flap Tx laser
717 * @hw: pointer to hardware structure
718 *
719 * When the driver changes the link speeds that it can support,
720 * it sets autotry_restart to TRUE to indicate that we need to
721 * initiate a new autotry session with the link partner. To do
722 * so, we set the speed then disable and re-enable the Tx laser, to
723 * alert the link partner that it also needs to restart autotry on its
724 * end. This is consistent with TRUE clause 37 autoneg, which also
725 * involves a loss of signal.
726 **/
727 void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
728 {
729 DEBUGFUNC("ixgbe_flap_tx_laser_multispeed_fiber");
730
731 /* Blocked by MNG FW so bail */
732 if (ixgbe_check_reset_blocked(hw))
733 return;
734
735 if (hw->mac.autotry_restart) {
736 ixgbe_disable_tx_laser_multispeed_fiber(hw);
737 ixgbe_enable_tx_laser_multispeed_fiber(hw);
738 hw->mac.autotry_restart = FALSE;
739 }
740 }
741
742 /**
743 * ixgbe_set_hard_rate_select_speed - Set module link speed
744 * @hw: pointer to hardware structure
745 * @speed: link speed to set
746 *
747 * Set module link speed via RS0/RS1 rate select pins.
748 */
749 void ixgbe_set_hard_rate_select_speed(struct ixgbe_hw *hw,
750 ixgbe_link_speed speed)
751 {
752 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
753
754 switch (speed) {
755 case IXGBE_LINK_SPEED_10GB_FULL:
756 esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5);
757 break;
758 case IXGBE_LINK_SPEED_1GB_FULL:
759 esdp_reg &= ~IXGBE_ESDP_SDP5;
760 esdp_reg |= IXGBE_ESDP_SDP5_DIR;
761 break;
762 default:
763 DEBUGOUT("Invalid fixed module speed\n");
764 return;
765 }
766
767 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
768 IXGBE_WRITE_FLUSH(hw);
769 }
770
771 /**
772 * ixgbe_setup_mac_link_smartspeed - Set MAC link speed using SmartSpeed
773 * @hw: pointer to hardware structure
774 * @speed: new link speed
775 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed
776 *
777 * Implements the Intel SmartSpeed algorithm.
778 **/
779 s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
780 ixgbe_link_speed speed,
781 bool autoneg_wait_to_complete)
782 {
783 s32 status = IXGBE_SUCCESS;
784 ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
785 s32 i, j;
786 bool link_up = FALSE;
787 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
788
789 DEBUGFUNC("ixgbe_setup_mac_link_smartspeed");
790
791 /* Set autoneg_advertised value based on input link speed */
792 hw->phy.autoneg_advertised = 0;
793
794 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
795 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
796
797 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
798 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
799
800 if (speed & IXGBE_LINK_SPEED_100_FULL)
801 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL;
802
803 /*
804 * Implement Intel SmartSpeed algorithm. SmartSpeed will reduce the
805 * autoneg advertisement if link is unable to be established at the
806 * highest negotiated rate. This can sometimes happen due to integrity
807 * issues with the physical media connection.
808 */
809
810 /* First, try to get link with full advertisement */
811 hw->phy.smart_speed_active = FALSE;
812 for (j = 0; j < IXGBE_SMARTSPEED_MAX_RETRIES; j++) {
813 status = ixgbe_setup_mac_link_82599(hw, speed,
814 autoneg_wait_to_complete);
815 if (status != IXGBE_SUCCESS)
816 goto out;
817
818 /*
819 * Wait for the controller to acquire link. Per IEEE 802.3ap,
820 * Section 73.10.2, we may have to wait up to 500ms if KR is
821 * attempted, or 200ms if KX/KX4/BX/BX4 is attempted, per
822 * Table 9 in the AN MAS.
823 */
824 for (i = 0; i < 5; i++) {
825 msec_delay(100);
826
827 /* If we have link, just jump out */
828 status = ixgbe_check_link(hw, &link_speed, &link_up,
829 FALSE);
830 if (status != IXGBE_SUCCESS)
831 goto out;
832
833 if (link_up)
834 goto out;
835 }
836 }
837
838 /*
839 * We didn't get link. If we advertised KR plus one of KX4/KX
840 * (or BX4/BX), then disable KR and try again.
841 */
842 if (((autoc_reg & IXGBE_AUTOC_KR_SUPP) == 0) ||
843 ((autoc_reg & IXGBE_AUTOC_KX4_KX_SUPP_MASK) == 0))
844 goto out;
845
846 /* Turn SmartSpeed on to disable KR support */
847 hw->phy.smart_speed_active = TRUE;
848 status = ixgbe_setup_mac_link_82599(hw, speed,
849 autoneg_wait_to_complete);
850 if (status != IXGBE_SUCCESS)
851 goto out;
852
853 /*
854 * Wait for the controller to acquire link. 600ms will allow for
855 * the AN link_fail_inhibit_timer as well for multiple cycles of
856 * parallel detect, both 10g and 1g. This allows for the maximum
857 * connect attempts as defined in the AN MAS table 73-7.
858 */
859 for (i = 0; i < 6; i++) {
860 msec_delay(100);
861
862 /* If we have link, just jump out */
863 status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
864 if (status != IXGBE_SUCCESS)
865 goto out;
866
867 if (link_up)
868 goto out;
869 }
870
871 /* We didn't get link. Turn SmartSpeed back off. */
872 hw->phy.smart_speed_active = FALSE;
873 status = ixgbe_setup_mac_link_82599(hw, speed,
874 autoneg_wait_to_complete);
875
876 out:
877 if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL))
878 DEBUGOUT("Smartspeed has downgraded the link speed "
879 "from the maximum advertised\n");
880 return status;
881 }
882
883 /**
884 * ixgbe_setup_mac_link_82599 - Set MAC link speed
885 * @hw: pointer to hardware structure
886 * @speed: new link speed
887 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed
888 *
889 * Set the link speed in the AUTOC register and restarts link.
890 **/
891 s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
892 ixgbe_link_speed speed,
893 bool autoneg_wait_to_complete)
894 {
895 bool autoneg = FALSE;
896 s32 status = IXGBE_SUCCESS;
897 u32 pma_pmd_1g, link_mode;
898 u32 current_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); /* holds the value of AUTOC register at this current point in time */
899 u32 orig_autoc = 0; /* holds the cached value of AUTOC register */
900 u32 autoc = current_autoc; /* Temporary variable used for comparison purposes */
901 u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
902 u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
903 u32 links_reg;
904 u32 i;
905 ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
906
907 DEBUGFUNC("ixgbe_setup_mac_link_82599");
908
909 /* Check to see if speed passed in is supported. */
910 status = ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg);
911 if (status)
912 goto out;
913
914 speed &= link_capabilities;
915
916 if (speed == IXGBE_LINK_SPEED_UNKNOWN) {
917 status = IXGBE_ERR_LINK_SETUP;
918 goto out;
919 }
920
921 /* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/
922 if (hw->mac.orig_link_settings_stored)
923 orig_autoc = hw->mac.orig_autoc;
924 else
925 orig_autoc = autoc;
926
927 link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
928 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
929
930 if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
931 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
932 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
933 /* Set KX4/KX/KR support according to speed requested */
934 autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP);
935 if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
936 if (orig_autoc & IXGBE_AUTOC_KX4_SUPP)
937 autoc |= IXGBE_AUTOC_KX4_SUPP;
938 if ((orig_autoc & IXGBE_AUTOC_KR_SUPP) &&
939 (hw->phy.smart_speed_active == FALSE))
940 autoc |= IXGBE_AUTOC_KR_SUPP;
941 }
942 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
943 autoc |= IXGBE_AUTOC_KX_SUPP;
944 } else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) &&
945 (link_mode == IXGBE_AUTOC_LMS_1G_LINK_NO_AN ||
946 link_mode == IXGBE_AUTOC_LMS_1G_AN)) {
947 /* Switch from 1G SFI to 10G SFI if requested */
948 if ((speed == IXGBE_LINK_SPEED_10GB_FULL) &&
949 (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)) {
950 autoc &= ~IXGBE_AUTOC_LMS_MASK;
951 autoc |= IXGBE_AUTOC_LMS_10G_SERIAL;
952 }
953 } else if ((pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) &&
954 (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) {
955 /* Switch from 10G SFI to 1G SFI if requested */
956 if ((speed == IXGBE_LINK_SPEED_1GB_FULL) &&
957 (pma_pmd_1g == IXGBE_AUTOC_1G_SFI)) {
958 autoc &= ~IXGBE_AUTOC_LMS_MASK;
959 if (autoneg || hw->phy.type == ixgbe_phy_qsfp_intel)
960 autoc |= IXGBE_AUTOC_LMS_1G_AN;
961 else
962 autoc |= IXGBE_AUTOC_LMS_1G_LINK_NO_AN;
963 }
964 }
965
966 if (autoc != current_autoc) {
967 /* Restart link */
968 status = hw->mac.ops.prot_autoc_write(hw, autoc, FALSE);
969 if (status != IXGBE_SUCCESS)
970 goto out;
971
972 /* Only poll for autoneg to complete if specified to do so */
973 if (autoneg_wait_to_complete) {
974 if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
975 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
976 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
977 links_reg = 0; /*Just in case Autoneg time=0*/
978 for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
979 links_reg =
980 IXGBE_READ_REG(hw, IXGBE_LINKS);
981 if (links_reg & IXGBE_LINKS_KX_AN_COMP)
982 break;
983 msec_delay(100);
984 }
985 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
986 status =
987 IXGBE_ERR_AUTONEG_NOT_COMPLETE;
988 DEBUGOUT("Autoneg did not complete.\n");
989 }
990 }
991 }
992
993 /* Add delay to filter out noises during initial link setup */
994 msec_delay(50);
995 }
996
997 out:
998 return status;
999 }
1000
1001 /**
1002 * ixgbe_setup_copper_link_82599 - Set the PHY autoneg advertised field
1003 * @hw: pointer to hardware structure
1004 * @speed: new link speed
1005 * @autoneg_wait_to_complete: TRUE if waiting is needed to complete
1006 *
1007 * Restarts link on PHY and MAC based on settings passed in.
1008 **/
1009 static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
1010 ixgbe_link_speed speed,
1011 bool autoneg_wait_to_complete)
1012 {
1013 s32 status;
1014
1015 DEBUGFUNC("ixgbe_setup_copper_link_82599");
1016
1017 /* Setup the PHY according to input speed */
1018 status = hw->phy.ops.setup_link_speed(hw, speed,
1019 autoneg_wait_to_complete);
1020 /* Set up MAC */
1021 ixgbe_start_mac_link_82599(hw, autoneg_wait_to_complete);
1022
1023 return status;
1024 }
1025
1026 /**
1027 * ixgbe_reset_hw_82599 - Perform hardware reset
1028 * @hw: pointer to hardware structure
1029 *
1030 * Resets the hardware by resetting the transmit and receive units, masks
1031 * and clears all interrupts, perform a PHY reset, and perform a link (MAC)
1032 * reset.
1033 **/
1034 s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
1035 {
1036 ixgbe_link_speed link_speed;
1037 s32 status;
1038 u32 ctrl = 0;
1039 u32 i, autoc, autoc2;
1040 u32 curr_lms;
1041 bool link_up = FALSE;
1042
1043 DEBUGFUNC("ixgbe_reset_hw_82599");
1044
1045 /* Call adapter stop to disable tx/rx and clear interrupts */
1046 status = hw->mac.ops.stop_adapter(hw);
1047 if (status != IXGBE_SUCCESS)
1048 goto reset_hw_out;
1049
1050 /* flush pending Tx transactions */
1051 ixgbe_clear_tx_pending(hw);
1052
1053 /* PHY ops must be identified and initialized prior to reset */
1054
1055 /* Identify PHY and related function pointers */
1056 status = hw->phy.ops.init(hw);
1057
1058 if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
1059 goto reset_hw_out;
1060
1061 /* Setup SFP module if there is one present. */
1062 if (hw->phy.sfp_setup_needed) {
1063 status = hw->mac.ops.setup_sfp(hw);
1064 hw->phy.sfp_setup_needed = FALSE;
1065 }
1066
1067 if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
1068 goto reset_hw_out;
1069
1070 /* Reset PHY */
1071 if (hw->phy.reset_disable == FALSE && hw->phy.ops.reset != NULL)
1072 hw->phy.ops.reset(hw);
1073
1074 /* remember AUTOC from before we reset */
1075 curr_lms = IXGBE_READ_REG(hw, IXGBE_AUTOC) & IXGBE_AUTOC_LMS_MASK;
1076
1077 mac_reset_top:
1078 /*
1079 * Issue global reset to the MAC. Needs to be SW reset if link is up.
1080 * If link reset is used when link is up, it might reset the PHY when
1081 * mng is using it. If link is down or the flag to force full link
1082 * reset is set, then perform link reset.
1083 */
1084 ctrl = IXGBE_CTRL_LNK_RST;
1085 if (!hw->force_full_reset) {
1086 hw->mac.ops.check_link(hw, &link_speed, &link_up, FALSE);
1087 if (link_up)
1088 ctrl = IXGBE_CTRL_RST;
1089 }
1090
1091 ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
1092 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
1093 IXGBE_WRITE_FLUSH(hw);
1094
1095 /* Poll for reset bit to self-clear meaning reset is complete */
1096 for (i = 0; i < 10; i++) {
1097 usec_delay(1);
1098 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
1099 if (!(ctrl & IXGBE_CTRL_RST_MASK))
1100 break;
1101 }
1102
1103 if (ctrl & IXGBE_CTRL_RST_MASK) {
1104 status = IXGBE_ERR_RESET_FAILED;
1105 DEBUGOUT("Reset polling failed to complete.\n");
1106 }
1107
1108 msec_delay(50);
1109
1110 /*
1111 * Double resets are required for recovery from certain error
1112 * conditions. Between resets, it is necessary to stall to
1113 * allow time for any pending HW events to complete.
1114 */
1115 if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
1116 hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
1117 goto mac_reset_top;
1118 }
1119
1120 /*
1121 * Store the original AUTOC/AUTOC2 values if they have not been
1122 * stored off yet. Otherwise restore the stored original
1123 * values since the reset operation sets back to defaults.
1124 */
1125 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
1126 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
1127
1128 /* Enable link if disabled in NVM */
1129 if (autoc2 & IXGBE_AUTOC2_LINK_DISABLE_MASK) {
1130 autoc2 &= ~IXGBE_AUTOC2_LINK_DISABLE_MASK;
1131 IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
1132 IXGBE_WRITE_FLUSH(hw);
1133 }
1134
1135 if (hw->mac.orig_link_settings_stored == FALSE) {
1136 hw->mac.orig_autoc = autoc;
1137 hw->mac.orig_autoc2 = autoc2;
1138 hw->mac.orig_link_settings_stored = TRUE;
1139 } else {
1140
1141 /* If MNG FW is running on a multi-speed device that
1142 * doesn't autoneg with out driver support we need to
1143 * leave LMS in the state it was before we MAC reset.
1144 * Likewise if we support WoL we don't want change the
1145 * LMS state.
1146 */
1147 if ((hw->phy.multispeed_fiber && ixgbe_mng_enabled(hw)) ||
1148 hw->wol_enabled)
1149 hw->mac.orig_autoc =
1150 (hw->mac.orig_autoc & ~IXGBE_AUTOC_LMS_MASK) |
1151 curr_lms;
1152
1153 if (autoc != hw->mac.orig_autoc) {
1154 status = hw->mac.ops.prot_autoc_write(hw,
1155 hw->mac.orig_autoc,
1156 FALSE);
1157 if (status != IXGBE_SUCCESS)
1158 goto reset_hw_out;
1159 }
1160
1161 if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) !=
1162 (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) {
1163 autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK;
1164 autoc2 |= (hw->mac.orig_autoc2 &
1165 IXGBE_AUTOC2_UPPER_MASK);
1166 IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
1167 }
1168 }
1169
1170 /* Store the permanent mac address */
1171 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
1172
1173 /*
1174 * Store MAC address from RAR0, clear receive address registers, and
1175 * clear the multicast table. Also reset num_rar_entries to 128,
1176 * since we modify this value when programming the SAN MAC address.
1177 */
1178 hw->mac.num_rar_entries = 128;
1179 hw->mac.ops.init_rx_addrs(hw);
1180
1181 /* Store the permanent SAN mac address */
1182 hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr);
1183
1184 /* Add the SAN MAC address to the RAR only if it's a valid address */
1185 if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) {
1186 /* Save the SAN MAC RAR index */
1187 hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1;
1188
1189 hw->mac.ops.set_rar(hw, hw->mac.san_mac_rar_index,
1190 hw->mac.san_addr, 0, IXGBE_RAH_AV);
1191
1192 /* clear VMDq pool/queue selection for this RAR */
1193 hw->mac.ops.clear_vmdq(hw, hw->mac.san_mac_rar_index,
1194 IXGBE_CLEAR_VMDQ_ALL);
1195
1196 /* Reserve the last RAR for the SAN MAC address */
1197 hw->mac.num_rar_entries--;
1198 }
1199
1200 /* Store the alternative WWNN/WWPN prefix */
1201 hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
1202 &hw->mac.wwpn_prefix);
1203
1204 reset_hw_out:
1205 return status;
1206 }
1207
1208 /**
1209 * ixgbe_fdir_check_cmd_complete - poll to check whether FDIRCMD is complete
1210 * @hw: pointer to hardware structure
1211 * @fdircmd: current value of FDIRCMD register
1212 */
1213 static s32 ixgbe_fdir_check_cmd_complete(struct ixgbe_hw *hw, u32 *fdircmd)
1214 {
1215 int i;
1216
1217 for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) {
1218 *fdircmd = IXGBE_READ_REG(hw, IXGBE_FDIRCMD);
1219 if (!(*fdircmd & IXGBE_FDIRCMD_CMD_MASK))
1220 return IXGBE_SUCCESS;
1221 usec_delay(10);
1222 }
1223
1224 return IXGBE_ERR_FDIR_CMD_INCOMPLETE;
1225 }
1226
1227 /**
1228 * ixgbe_reinit_fdir_tables_82599 - Reinitialize Flow Director tables.
1229 * @hw: pointer to hardware structure
1230 **/
1231 s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
1232 {
1233 s32 err;
1234 int i;
1235 u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
1236 u32 fdircmd;
1237 fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE;
1238
1239 DEBUGFUNC("ixgbe_reinit_fdir_tables_82599");
1240
1241 /*
1242 * Before starting reinitialization process,
1243 * FDIRCMD.CMD must be zero.
1244 */
1245 err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
1246 if (err) {
1247 DEBUGOUT("Flow Director previous command did not complete, aborting table re-initialization.\n");
1248 return err;
1249 }
1250
1251 IXGBE_WRITE_REG(hw, IXGBE_FDIRFREE, 0);
1252 IXGBE_WRITE_FLUSH(hw);
1253 /*
1254 * 82599 adapters flow director init flow cannot be restarted,
1255 * Workaround 82599 silicon errata by performing the following steps
1256 * before re-writing the FDIRCTRL control register with the same value.
1257 * - write 1 to bit 8 of FDIRCMD register &
1258 * - write 0 to bit 8 of FDIRCMD register
1259 */
1260 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1261 (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) |
1262 IXGBE_FDIRCMD_CLEARHT));
1263 IXGBE_WRITE_FLUSH(hw);
1264 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1265 (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
1266 ~IXGBE_FDIRCMD_CLEARHT));
1267 IXGBE_WRITE_FLUSH(hw);
1268 /*
1269 * Clear FDIR Hash register to clear any leftover hashes
1270 * waiting to be programmed.
1271 */
1272 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, 0x00);
1273 IXGBE_WRITE_FLUSH(hw);
1274
1275 IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
1276 IXGBE_WRITE_FLUSH(hw);
1277
1278 /* Poll init-done after we write FDIRCTRL register */
1279 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
1280 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
1281 IXGBE_FDIRCTRL_INIT_DONE)
1282 break;
1283 msec_delay(1);
1284 }
1285 if (i >= IXGBE_FDIR_INIT_DONE_POLL) {
1286 DEBUGOUT("Flow Director Signature poll time exceeded!\n");
1287 return IXGBE_ERR_FDIR_REINIT_FAILED;
1288 }
1289
1290 /* Clear FDIR statistics registers (read to clear) */
1291 IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT);
1292 IXGBE_READ_REG(hw, IXGBE_FDIRFSTAT);
1293 IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
1294 IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
1295 IXGBE_READ_REG(hw, IXGBE_FDIRLEN);
1296
1297 return IXGBE_SUCCESS;
1298 }
1299
1300 /**
1301 * ixgbe_fdir_enable_82599 - Initialize Flow Director control registers
1302 * @hw: pointer to hardware structure
1303 * @fdirctrl: value to write to flow director control register
1304 **/
1305 static void ixgbe_fdir_enable_82599(struct ixgbe_hw *hw, u32 fdirctrl)
1306 {
1307 int i;
1308
1309 DEBUGFUNC("ixgbe_fdir_enable_82599");
1310
1311 /* Prime the keys for hashing */
1312 IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY);
1313 IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY);
1314
1315 /*
1316 * Poll init-done after we write the register. Estimated times:
1317 * 10G: PBALLOC = 11b, timing is 60us
1318 * 1G: PBALLOC = 11b, timing is 600us
1319 * 100M: PBALLOC = 11b, timing is 6ms
1320 *
1321 * Multiple these timings by 4 if under full Rx load
1322 *
1323 * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for
1324 * 1 msec per poll time. If we're at line rate and drop to 100M, then
1325 * this might not finish in our poll time, but we can live with that
1326 * for now.
1327 */
1328 IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
1329 IXGBE_WRITE_FLUSH(hw);
1330 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
1331 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
1332 IXGBE_FDIRCTRL_INIT_DONE)
1333 break;
1334 msec_delay(1);
1335 }
1336
1337 if (i >= IXGBE_FDIR_INIT_DONE_POLL)
1338 DEBUGOUT("Flow Director poll time exceeded!\n");
1339 }
1340
1341 /**
1342 * ixgbe_init_fdir_signature_82599 - Initialize Flow Director signature filters
1343 * @hw: pointer to hardware structure
1344 * @fdirctrl: value to write to flow director control register, initially
1345 * contains just the value of the Rx packet buffer allocation
1346 **/
1347 s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl)
1348 {
1349 DEBUGFUNC("ixgbe_init_fdir_signature_82599");
1350
1351 /*
1352 * Continue setup of fdirctrl register bits:
1353 * Move the flexible bytes to use the ethertype - shift 6 words
1354 * Set the maximum length per hash bucket to 0xA filters
1355 * Send interrupt when 64 filters are left
1356 */
1357 fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) |
1358 (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
1359 (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
1360
1361 /* write hashes and fdirctrl register, poll for completion */
1362 ixgbe_fdir_enable_82599(hw, fdirctrl);
1363
1364 return IXGBE_SUCCESS;
1365 }
1366
1367 /**
1368 * ixgbe_init_fdir_perfect_82599 - Initialize Flow Director perfect filters
1369 * @hw: pointer to hardware structure
1370 * @fdirctrl: value to write to flow director control register, initially
1371 * contains just the value of the Rx packet buffer allocation
1372 * @cloud_mode: TRUE - cloud mode, FALSE - other mode
1373 **/
1374 s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl,
1375 bool cloud_mode)
1376 {
1377 DEBUGFUNC("ixgbe_init_fdir_perfect_82599");
1378
1379 /*
1380 * Continue setup of fdirctrl register bits:
1381 * Turn perfect match filtering on
1382 * Report hash in RSS field of Rx wb descriptor
1383 * Initialize the drop queue to queue 127
1384 * Move the flexible bytes to use the ethertype - shift 6 words
1385 * Set the maximum length per hash bucket to 0xA filters
1386 * Send interrupt when 64 (0x4 * 16) filters are left
1387 */
1388 fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH |
1389 IXGBE_FDIRCTRL_REPORT_STATUS |
1390 (IXGBE_FDIR_DROP_QUEUE << IXGBE_FDIRCTRL_DROP_Q_SHIFT) |
1391 (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) |
1392 (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
1393 (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
1394
1395 if (cloud_mode)
1396 fdirctrl |=(IXGBE_FDIRCTRL_FILTERMODE_CLOUD <<
1397 IXGBE_FDIRCTRL_FILTERMODE_SHIFT);
1398
1399 /* write hashes and fdirctrl register, poll for completion */
1400 ixgbe_fdir_enable_82599(hw, fdirctrl);
1401
1402 return IXGBE_SUCCESS;
1403 }
1404
1405 /**
1406 * ixgbe_set_fdir_drop_queue_82599 - Set Flow Director drop queue
1407 * @hw: pointer to hardware structure
1408 * @dropqueue: Rx queue index used for the dropped packets
1409 **/
1410 void ixgbe_set_fdir_drop_queue_82599(struct ixgbe_hw *hw, u8 dropqueue)
1411 {
1412 u32 fdirctrl;
1413
1414 DEBUGFUNC("ixgbe_set_fdir_drop_queue_82599");
1415 /* Clear init done bit and drop queue field */
1416 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
1417 fdirctrl &= ~(IXGBE_FDIRCTRL_DROP_Q_MASK | IXGBE_FDIRCTRL_INIT_DONE);
1418
1419 /* Set drop queue */
1420 fdirctrl |= (dropqueue << IXGBE_FDIRCTRL_DROP_Q_SHIFT);
1421 if ((hw->mac.type == ixgbe_mac_X550) ||
1422 (hw->mac.type == ixgbe_mac_X550EM_x) ||
1423 (hw->mac.type == ixgbe_mac_X550EM_a))
1424 fdirctrl |= IXGBE_FDIRCTRL_DROP_NO_MATCH;
1425
1426 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1427 (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) |
1428 IXGBE_FDIRCMD_CLEARHT));
1429 IXGBE_WRITE_FLUSH(hw);
1430 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1431 (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
1432 ~IXGBE_FDIRCMD_CLEARHT));
1433 IXGBE_WRITE_FLUSH(hw);
1434
1435 /* write hashes and fdirctrl register, poll for completion */
1436 ixgbe_fdir_enable_82599(hw, fdirctrl);
1437 }
1438
1439 /*
1440 * These defines allow us to quickly generate all of the necessary instructions
1441 * in the function below by simply calling out IXGBE_COMPUTE_SIG_HASH_ITERATION
1442 * for values 0 through 15
1443 */
1444 #define IXGBE_ATR_COMMON_HASH_KEY \
1445 (IXGBE_ATR_BUCKET_HASH_KEY & IXGBE_ATR_SIGNATURE_HASH_KEY)
1446 #define IXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \
1447 do { \
1448 u32 n = (_n); \
1449 if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) \
1450 common_hash ^= lo_hash_dword >> n; \
1451 else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
1452 bucket_hash ^= lo_hash_dword >> n; \
1453 else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << n)) \
1454 sig_hash ^= lo_hash_dword << (16 - n); \
1455 if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << (n + 16))) \
1456 common_hash ^= hi_hash_dword >> n; \
1457 else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
1458 bucket_hash ^= hi_hash_dword >> n; \
1459 else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \
1460 sig_hash ^= hi_hash_dword << (16 - n); \
1461 } while (0)
1462
1463 /**
1464 * ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash
1465 * @stream: input bitstream to compute the hash on
1466 *
1467 * This function is almost identical to the function above but contains
1468 * several optimizations such as unwinding all of the loops, letting the
1469 * compiler work out all of the conditional ifs since the keys are static
1470 * defines, and computing two keys at once since the hashed dword stream
1471 * will be the same for both keys.
1472 **/
1473 u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
1474 union ixgbe_atr_hash_dword common)
1475 {
1476 u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
1477 u32 sig_hash = 0, bucket_hash = 0, common_hash = 0;
1478
1479 /* record the flow_vm_vlan bits as they are a key part to the hash */
1480 flow_vm_vlan = IXGBE_NTOHL(input.dword);
1481
1482 /* generate common hash dword */
1483 hi_hash_dword = IXGBE_NTOHL(common.dword);
1484
1485 /* low dword is word swapped version of common */
1486 lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
1487
1488 /* apply flow ID/VM pool/VLAN ID bits to hash words */
1489 hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
1490
1491 /* Process bits 0 and 16 */
1492 IXGBE_COMPUTE_SIG_HASH_ITERATION(0);
1493
1494 /*
1495 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
1496 * delay this because bit 0 of the stream should not be processed
1497 * so we do not add the VLAN until after bit 0 was processed
1498 */
1499 lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
1500
1501 /* Process remaining 30 bit of the key */
1502 IXGBE_COMPUTE_SIG_HASH_ITERATION(1);
1503 IXGBE_COMPUTE_SIG_HASH_ITERATION(2);
1504 IXGBE_COMPUTE_SIG_HASH_ITERATION(3);
1505 IXGBE_COMPUTE_SIG_HASH_ITERATION(4);
1506 IXGBE_COMPUTE_SIG_HASH_ITERATION(5);
1507 IXGBE_COMPUTE_SIG_HASH_ITERATION(6);
1508 IXGBE_COMPUTE_SIG_HASH_ITERATION(7);
1509 IXGBE_COMPUTE_SIG_HASH_ITERATION(8);
1510 IXGBE_COMPUTE_SIG_HASH_ITERATION(9);
1511 IXGBE_COMPUTE_SIG_HASH_ITERATION(10);
1512 IXGBE_COMPUTE_SIG_HASH_ITERATION(11);
1513 IXGBE_COMPUTE_SIG_HASH_ITERATION(12);
1514 IXGBE_COMPUTE_SIG_HASH_ITERATION(13);
1515 IXGBE_COMPUTE_SIG_HASH_ITERATION(14);
1516 IXGBE_COMPUTE_SIG_HASH_ITERATION(15);
1517
1518 /* combine common_hash result with signature and bucket hashes */
1519 bucket_hash ^= common_hash;
1520 bucket_hash &= IXGBE_ATR_HASH_MASK;
1521
1522 sig_hash ^= common_hash << 16;
1523 sig_hash &= IXGBE_ATR_HASH_MASK << 16;
1524
1525 /* return completed signature hash */
1526 return sig_hash ^ bucket_hash;
1527 }
1528
1529 /**
1530 * ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter
1531 * @hw: pointer to hardware structure
1532 * @input: unique input dword
1533 * @common: compressed common input dword
1534 * @queue: queue index to direct traffic to
1535 *
1536 * Note that the tunnel bit in input must not be set when the hardware
1537 * tunneling support does not exist.
1538 **/
1539 void ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
1540 union ixgbe_atr_hash_dword input,
1541 union ixgbe_atr_hash_dword common,
1542 u8 queue)
1543 {
1544 u64 fdirhashcmd;
1545 u8 flow_type;
1546 bool tunnel;
1547 u32 fdircmd;
1548
1549 DEBUGFUNC("ixgbe_fdir_add_signature_filter_82599");
1550
1551 /*
1552 * Get the flow_type in order to program FDIRCMD properly
1553 * lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6
1554 * fifth is FDIRCMD.TUNNEL_FILTER
1555 */
1556 tunnel = !!(input.formatted.flow_type & IXGBE_ATR_L4TYPE_TUNNEL_MASK);
1557 flow_type = input.formatted.flow_type &
1558 (IXGBE_ATR_L4TYPE_TUNNEL_MASK - 1);
1559 switch (flow_type) {
1560 case IXGBE_ATR_FLOW_TYPE_TCPV4:
1561 case IXGBE_ATR_FLOW_TYPE_UDPV4:
1562 case IXGBE_ATR_FLOW_TYPE_SCTPV4:
1563 case IXGBE_ATR_FLOW_TYPE_TCPV6:
1564 case IXGBE_ATR_FLOW_TYPE_UDPV6:
1565 case IXGBE_ATR_FLOW_TYPE_SCTPV6:
1566 break;
1567 default:
1568 DEBUGOUT(" Error on flow type input\n");
1569 return;
1570 }
1571
1572 /* configure FDIRCMD register */
1573 fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
1574 IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
1575 fdircmd |= (u32)flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
1576 fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
1577 if (tunnel)
1578 fdircmd |= IXGBE_FDIRCMD_TUNNEL_FILTER;
1579
1580 /*
1581 * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits
1582 * is for FDIRCMD. Then do a 64-bit register write from FDIRHASH.
1583 */
1584 fdirhashcmd = (u64)fdircmd << 32;
1585 fdirhashcmd |= ixgbe_atr_compute_sig_hash_82599(input, common);
1586 IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd);
1587
1588 DEBUGOUT2("Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd);
1589
1590 return;
1591 }
1592
1593 #define IXGBE_COMPUTE_BKT_HASH_ITERATION(_n) \
1594 do { \
1595 u32 n = (_n); \
1596 if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
1597 bucket_hash ^= lo_hash_dword >> n; \
1598 if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
1599 bucket_hash ^= hi_hash_dword >> n; \
1600 } while (0)
1601
1602 /**
1603 * ixgbe_atr_compute_perfect_hash_82599 - Compute the perfect filter hash
1604 * @atr_input: input bitstream to compute the hash on
1605 * @input_mask: mask for the input bitstream
1606 *
1607 * This function serves two main purposes. First it applies the input_mask
1608 * to the atr_input resulting in a cleaned up atr_input data stream.
1609 * Secondly it computes the hash and stores it in the bkt_hash field at
1610 * the end of the input byte stream. This way it will be available for
1611 * future use without needing to recompute the hash.
1612 **/
1613 void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
1614 union ixgbe_atr_input *input_mask)
1615 {
1616
1617 u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
1618 u32 bucket_hash = 0;
1619 u32 hi_dword = 0;
1620 u32 i = 0;
1621
1622 /* Apply masks to input data */
1623 for (i = 0; i < 14; i++)
1624 input->dword_stream[i] &= input_mask->dword_stream[i];
1625
1626 /* record the flow_vm_vlan bits as they are a key part to the hash */
1627 flow_vm_vlan = IXGBE_NTOHL(input->dword_stream[0]);
1628
1629 /* generate common hash dword */
1630 for (i = 1; i <= 13; i++)
1631 hi_dword ^= input->dword_stream[i];
1632 hi_hash_dword = IXGBE_NTOHL(hi_dword);
1633
1634 /* low dword is word swapped version of common */
1635 lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
1636
1637 /* apply flow ID/VM pool/VLAN ID bits to hash words */
1638 hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
1639
1640 /* Process bits 0 and 16 */
1641 IXGBE_COMPUTE_BKT_HASH_ITERATION(0);
1642
1643 /*
1644 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
1645 * delay this because bit 0 of the stream should not be processed
1646 * so we do not add the VLAN until after bit 0 was processed
1647 */
1648 lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
1649
1650 /* Process remaining 30 bit of the key */
1651 for (i = 1; i <= 15; i++)
1652 IXGBE_COMPUTE_BKT_HASH_ITERATION(i);
1653
1654 /*
1655 * Limit hash to 13 bits since max bucket count is 8K.
1656 * Store result at the end of the input stream.
1657 */
1658 input->formatted.bkt_hash = bucket_hash & 0x1FFF;
1659 }
1660
1661 /**
1662 * ixgbe_get_fdirtcpm_82599 - generate a TCP port from atr_input_masks
1663 * @input_mask: mask to be bit swapped
1664 *
1665 * The source and destination port masks for flow director are bit swapped
1666 * in that bit 15 effects bit 0, 14 effects 1, 13, 2 etc. In order to
1667 * generate a correctly swapped value we need to bit swap the mask and that
1668 * is what is accomplished by this function.
1669 **/
1670 static u32 ixgbe_get_fdirtcpm_82599(union ixgbe_atr_input *input_mask)
1671 {
1672 u32 mask = IXGBE_NTOHS(input_mask->formatted.dst_port);
1673 mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT;
1674 mask |= IXGBE_NTOHS(input_mask->formatted.src_port);
1675 mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1);
1676 mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2);
1677 mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4);
1678 return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8);
1679 }
1680
1681 /*
1682 * These two macros are meant to address the fact that we have registers
1683 * that are either all or in part big-endian. As a result on big-endian
1684 * systems we will end up byte swapping the value to little-endian before
1685 * it is byte swapped again and written to the hardware in the original
1686 * big-endian format.
1687 */
1688 #define IXGBE_STORE_AS_BE32(_value) \
1689 (((u32)(_value) >> 24) | (((u32)(_value) & 0x00FF0000) >> 8) | \
1690 (((u32)(_value) & 0x0000FF00) << 8) | ((u32)(_value) << 24))
1691
1692 #define IXGBE_WRITE_REG_BE32(a, reg, value) \
1693 IXGBE_WRITE_REG((a), (reg), IXGBE_STORE_AS_BE32(IXGBE_NTOHL(value)))
1694
1695 #define IXGBE_STORE_AS_BE16(_value) \
1696 IXGBE_NTOHS(((u16)(_value) >> 8) | ((u16)(_value) << 8))
1697
1698 s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
1699 union ixgbe_atr_input *input_mask, bool cloud_mode)
1700 {
1701 /* mask IPv6 since it is currently not supported */
1702 u32 fdirm = IXGBE_FDIRM_DIPv6;
1703 u32 fdirtcpm;
1704 u32 fdirip6m;
1705 DEBUGFUNC("ixgbe_fdir_set_atr_input_mask_82599");
1706
1707 /*
1708 * Program the relevant mask registers. If src/dst_port or src/dst_addr
1709 * are zero, then assume a full mask for that field. Also assume that
1710 * a VLAN of 0 is unspecified, so mask that out as well. L4type
1711 * cannot be masked out in this implementation.
1712 *
1713 * This also assumes IPv4 only. IPv6 masking isn't supported at this
1714 * point in time.
1715 */
1716
1717 /* verify bucket hash is cleared on hash generation */
1718 if (input_mask->formatted.bkt_hash)
1719 DEBUGOUT(" bucket hash should always be 0 in mask\n");
1720
1721 /* Program FDIRM and verify partial masks */
1722 switch (input_mask->formatted.vm_pool & 0x7F) {
1723 case 0x0:
1724 fdirm |= IXGBE_FDIRM_POOL;
1725 case 0x7F:
1726 break;
1727 default:
1728 DEBUGOUT(" Error on vm pool mask\n");
1729 return IXGBE_ERR_CONFIG;
1730 }
1731
1732 switch (input_mask->formatted.flow_type & IXGBE_ATR_L4TYPE_MASK) {
1733 case 0x0:
1734 fdirm |= IXGBE_FDIRM_L4P;
1735 if (input_mask->formatted.dst_port ||
1736 input_mask->formatted.src_port) {
1737 DEBUGOUT(" Error on src/dst port mask\n");
1738 return IXGBE_ERR_CONFIG;
1739 }
1740 case IXGBE_ATR_L4TYPE_MASK:
1741 break;
1742 default:
1743 DEBUGOUT(" Error on flow type mask\n");
1744 return IXGBE_ERR_CONFIG;
1745 }
1746
1747 switch (IXGBE_NTOHS(input_mask->formatted.vlan_id) & 0xEFFF) {
1748 case 0x0000:
1749 /* mask VLAN ID */
1750 fdirm |= IXGBE_FDIRM_VLANID;
1751 /* fall through */
1752 case 0x0FFF:
1753 /* mask VLAN priority */
1754 fdirm |= IXGBE_FDIRM_VLANP;
1755 break;
1756 case 0xE000:
1757 /* mask VLAN ID only */
1758 fdirm |= IXGBE_FDIRM_VLANID;
1759 /* fall through */
1760 case 0xEFFF:
1761 /* no VLAN fields masked */
1762 break;
1763 default:
1764 DEBUGOUT(" Error on VLAN mask\n");
1765 return IXGBE_ERR_CONFIG;
1766 }
1767
1768 switch (input_mask->formatted.flex_bytes & 0xFFFF) {
1769 case 0x0000:
1770 /* Mask Flex Bytes */
1771 fdirm |= IXGBE_FDIRM_FLEX;
1772 /* fall through */
1773 case 0xFFFF:
1774 break;
1775 default:
1776 DEBUGOUT(" Error on flexible byte mask\n");
1777 return IXGBE_ERR_CONFIG;
1778 }
1779
1780 if (cloud_mode) {
1781 fdirm |= IXGBE_FDIRM_L3P;
1782 fdirip6m = ((u32) 0xFFFFU << IXGBE_FDIRIP6M_DIPM_SHIFT);
1783 fdirip6m |= IXGBE_FDIRIP6M_ALWAYS_MASK;
1784
1785 switch (input_mask->formatted.inner_mac[0] & 0xFF) {
1786 case 0x00:
1787 /* Mask inner MAC, fall through */
1788 fdirip6m |= IXGBE_FDIRIP6M_INNER_MAC;
1789 case 0xFF:
1790 break;
1791 default:
1792 DEBUGOUT(" Error on inner_mac byte mask\n");
1793 return IXGBE_ERR_CONFIG;
1794 }
1795
1796 switch (input_mask->formatted.tni_vni & 0xFFFFFFFF) {
1797 case 0x0:
1798 /* Mask vxlan id */
1799 fdirip6m |= IXGBE_FDIRIP6M_TNI_VNI;
1800 break;
1801 case 0x00FFFFFF:
1802 fdirip6m |= IXGBE_FDIRIP6M_TNI_VNI_24;
1803 break;
1804 case 0xFFFFFFFF:
1805 break;
1806 default:
1807 DEBUGOUT(" Error on TNI/VNI byte mask\n");
1808 return IXGBE_ERR_CONFIG;
1809 }
1810
1811 switch (input_mask->formatted.tunnel_type & 0xFFFF) {
1812 case 0x0:
1813 /* Mask turnnel type, fall through */
1814 fdirip6m |= IXGBE_FDIRIP6M_TUNNEL_TYPE;
1815 case 0xFFFF:
1816 break;
1817 default:
1818 DEBUGOUT(" Error on tunnel type byte mask\n");
1819 return IXGBE_ERR_CONFIG;
1820 }
1821 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIP6M, fdirip6m);
1822
1823 /* Set all bits in FDIRTCPM, FDIRUDPM, FDIRSCTPM,
1824 * FDIRSIP4M and FDIRDIP4M in cloud mode to allow
1825 * L3/L3 packets to tunnel.
1826 */
1827 IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, 0xFFFFFFFF);
1828 IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, 0xFFFFFFFF);
1829 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M, 0xFFFFFFFF);
1830 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M, 0xFFFFFFFF);
1831 switch (hw->mac.type) {
1832 case ixgbe_mac_X550:
1833 case ixgbe_mac_X550EM_x:
1834 case ixgbe_mac_X550EM_a:
1835 IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, 0xFFFFFFFF);
1836 break;
1837 default:
1838 break;
1839 }
1840 }
1841
1842 /* Now mask VM pool and destination IPv6 - bits 5 and 2 */
1843 IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
1844
1845 if (!cloud_mode) {
1846 /* store the TCP/UDP port masks, bit reversed from port
1847 * layout */
1848 fdirtcpm = ixgbe_get_fdirtcpm_82599(input_mask);
1849
1850 /* write both the same so that UDP and TCP use the same mask */
1851 IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm);
1852 IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm);
1853 /* also use it for SCTP */
1854 switch (hw->mac.type) {
1855 case ixgbe_mac_X550:
1856 case ixgbe_mac_X550EM_x:
1857 case ixgbe_mac_X550EM_a:
1858 IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, ~fdirtcpm);
1859 break;
1860 default:
1861 break;
1862 }
1863
1864 /* store source and destination IP masks (big-enian) */
1865 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M,
1866 ~input_mask->formatted.src_ip[0]);
1867 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M,
1868 ~input_mask->formatted.dst_ip[0]);
1869 }
1870 return IXGBE_SUCCESS;
1871 }
1872
1873 s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
1874 union ixgbe_atr_input *input,
1875 u16 soft_id, u8 queue, bool cloud_mode)
1876 {
1877 u32 fdirport, fdirvlan, fdirhash, fdircmd;
1878 u32 addr_low, addr_high;
1879 u32 cloud_type = 0;
1880 s32 err;
1881
1882 DEBUGFUNC("ixgbe_fdir_write_perfect_filter_82599");
1883 if (!cloud_mode) {
1884 /* currently IPv6 is not supported, must be programmed with 0 */
1885 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0),
1886 input->formatted.src_ip[0]);
1887 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1),
1888 input->formatted.src_ip[1]);
1889 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2),
1890 input->formatted.src_ip[2]);
1891
1892 /* record the source address (big-endian) */
1893 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA,
1894 input->formatted.src_ip[0]);
1895
1896 /* record the first 32 bits of the destination address
1897 * (big-endian) */
1898 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA,
1899 input->formatted.dst_ip[0]);
1900
1901 /* record source and destination port (little-endian)*/
1902 fdirport = IXGBE_NTOHS(input->formatted.dst_port);
1903 fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT;
1904 fdirport |= IXGBE_NTOHS(input->formatted.src_port);
1905 IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport);
1906 }
1907
1908 /* record VLAN (little-endian) and flex_bytes(big-endian) */
1909 fdirvlan = IXGBE_STORE_AS_BE16(input->formatted.flex_bytes);
1910 fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT;
1911 fdirvlan |= IXGBE_NTOHS(input->formatted.vlan_id);
1912 IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan);
1913
1914 if (cloud_mode) {
1915 if (input->formatted.tunnel_type != 0)
1916 cloud_type = 0x80000000;
1917
1918 addr_low = ((u32)input->formatted.inner_mac[0] |
1919 ((u32)input->formatted.inner_mac[1] << 8) |
1920 ((u32)input->formatted.inner_mac[2] << 16) |
1921 ((u32)input->formatted.inner_mac[3] << 24));
1922 addr_high = ((u32)input->formatted.inner_mac[4] |
1923 ((u32)input->formatted.inner_mac[5] << 8));
1924 cloud_type |= addr_high;
1925 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0), addr_low);
1926 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1), cloud_type);
1927 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2), input->formatted.tni_vni);
1928 }
1929
1930 /* configure FDIRHASH register */
1931 fdirhash = input->formatted.bkt_hash;
1932 fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
1933 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1934
1935 /*
1936 * flush all previous writes to make certain registers are
1937 * programmed prior to issuing the command
1938 */
1939 IXGBE_WRITE_FLUSH(hw);
1940
1941 /* configure FDIRCMD register */
1942 fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
1943 IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
1944 if (queue == IXGBE_FDIR_DROP_QUEUE)
1945 fdircmd |= IXGBE_FDIRCMD_DROP;
1946 if (input->formatted.flow_type & IXGBE_ATR_L4TYPE_TUNNEL_MASK)
1947 fdircmd |= IXGBE_FDIRCMD_TUNNEL_FILTER;
1948 fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
1949 fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
1950 fdircmd |= (u32)input->formatted.vm_pool << IXGBE_FDIRCMD_VT_POOL_SHIFT;
1951
1952 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
1953 err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
1954 if (err) {
1955 DEBUGOUT("Flow Director command did not complete!\n");
1956 return err;
1957 }
1958
1959 return IXGBE_SUCCESS;
1960 }
1961
1962 s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
1963 union ixgbe_atr_input *input,
1964 u16 soft_id)
1965 {
1966 u32 fdirhash;
1967 u32 fdircmd;
1968 s32 err;
1969
1970 /* configure FDIRHASH register */
1971 fdirhash = input->formatted.bkt_hash;
1972 fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
1973 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1974
1975 /* flush hash to HW */
1976 IXGBE_WRITE_FLUSH(hw);
1977
1978 /* Query if filter is present */
1979 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, IXGBE_FDIRCMD_CMD_QUERY_REM_FILT);
1980
1981 err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
1982 if (err) {
1983 DEBUGOUT("Flow Director command did not complete!\n");
1984 return err;
1985 }
1986
1987 /* if filter exists in hardware then remove it */
1988 if (fdircmd & IXGBE_FDIRCMD_FILTER_VALID) {
1989 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1990 IXGBE_WRITE_FLUSH(hw);
1991 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1992 IXGBE_FDIRCMD_CMD_REMOVE_FLOW);
1993 }
1994
1995 return IXGBE_SUCCESS;
1996 }
1997
1998 /**
1999 * ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter
2000 * @hw: pointer to hardware structure
2001 * @input: input bitstream
2002 * @input_mask: mask for the input bitstream
2003 * @soft_id: software index for the filters
2004 * @queue: queue index to direct traffic to
2005 *
2006 * Note that the caller to this function must lock before calling, since the
2007 * hardware writes must be protected from one another.
2008 **/
2009 s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
2010 union ixgbe_atr_input *input,
2011 union ixgbe_atr_input *input_mask,
2012 u16 soft_id, u8 queue, bool cloud_mode)
2013 {
2014 s32 err = IXGBE_ERR_CONFIG;
2015
2016 DEBUGFUNC("ixgbe_fdir_add_perfect_filter_82599");
2017
2018 /*
2019 * Check flow_type formatting, and bail out before we touch the hardware
2020 * if there's a configuration issue
2021 */
2022 switch (input->formatted.flow_type) {
2023 case IXGBE_ATR_FLOW_TYPE_IPV4:
2024 case IXGBE_ATR_FLOW_TYPE_TUNNELED_IPV4:
2025 input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK;
2026 if (input->formatted.dst_port || input->formatted.src_port) {
2027 DEBUGOUT(" Error on src/dst port\n");
2028 return IXGBE_ERR_CONFIG;
2029 }
2030 break;
2031 case IXGBE_ATR_FLOW_TYPE_SCTPV4:
2032 case IXGBE_ATR_FLOW_TYPE_TUNNELED_SCTPV4:
2033 if (input->formatted.dst_port || input->formatted.src_port) {
2034 DEBUGOUT(" Error on src/dst port\n");
2035 return IXGBE_ERR_CONFIG;
2036 }
2037 /* fall through */
2038 case IXGBE_ATR_FLOW_TYPE_TCPV4:
2039 case IXGBE_ATR_FLOW_TYPE_TUNNELED_TCPV4:
2040 case IXGBE_ATR_FLOW_TYPE_UDPV4:
2041 case IXGBE_ATR_FLOW_TYPE_TUNNELED_UDPV4:
2042 input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
2043 IXGBE_ATR_L4TYPE_MASK;
2044 break;
2045 default:
2046 DEBUGOUT(" Error on flow type input\n");
2047 return err;
2048 }
2049
2050 /* program input mask into the HW */
2051 err = ixgbe_fdir_set_input_mask_82599(hw, input_mask, cloud_mode);
2052 if (err)
2053 return err;
2054
2055 /* apply mask and compute/store hash */
2056 ixgbe_atr_compute_perfect_hash_82599(input, input_mask);
2057
2058 /* program filters to filter memory */
2059 return ixgbe_fdir_write_perfect_filter_82599(hw, input,
2060 soft_id, queue, cloud_mode);
2061 }
2062
2063 /**
2064 * ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register
2065 * @hw: pointer to hardware structure
2066 * @reg: analog register to read
2067 * @val: read value
2068 *
2069 * Performs read operation to Omer analog register specified.
2070 **/
2071 s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val)
2072 {
2073 u32 core_ctl;
2074
2075 DEBUGFUNC("ixgbe_read_analog_reg8_82599");
2076
2077 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, IXGBE_CORECTL_WRITE_CMD |
2078 (reg << 8));
2079 IXGBE_WRITE_FLUSH(hw);
2080 usec_delay(10);
2081 core_ctl = IXGBE_READ_REG(hw, IXGBE_CORECTL);
2082 *val = (u8)core_ctl;
2083
2084 return IXGBE_SUCCESS;
2085 }
2086
2087 /**
2088 * ixgbe_write_analog_reg8_82599 - Writes 8 bit Omer analog register
2089 * @hw: pointer to hardware structure
2090 * @reg: atlas register to write
2091 * @val: value to write
2092 *
2093 * Performs write operation to Omer analog register specified.
2094 **/
2095 s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val)
2096 {
2097 u32 core_ctl;
2098
2099 DEBUGFUNC("ixgbe_write_analog_reg8_82599");
2100
2101 core_ctl = (reg << 8) | val;
2102 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, core_ctl);
2103 IXGBE_WRITE_FLUSH(hw);
2104 usec_delay(10);
2105
2106 return IXGBE_SUCCESS;
2107 }
2108
2109 /**
2110 * ixgbe_start_hw_82599 - Prepare hardware for Tx/Rx
2111 * @hw: pointer to hardware structure
2112 *
2113 * Starts the hardware using the generic start_hw function
2114 * and the generation start_hw function.
2115 * Then performs revision-specific operations, if any.
2116 **/
2117 s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw)
2118 {
2119 s32 ret_val = IXGBE_SUCCESS;
2120
2121 DEBUGFUNC("ixgbe_start_hw_82599");
2122
2123 ret_val = ixgbe_start_hw_generic(hw);
2124 if (ret_val != IXGBE_SUCCESS)
2125 goto out;
2126
2127 ret_val = ixgbe_start_hw_gen2(hw);
2128 if (ret_val != IXGBE_SUCCESS)
2129 goto out;
2130
2131 /* We need to run link autotry after the driver loads */
2132 hw->mac.autotry_restart = TRUE;
2133
2134 if (ret_val == IXGBE_SUCCESS)
2135 ret_val = ixgbe_verify_fw_version_82599(hw);
2136 out:
2137 return ret_val;
2138 }
2139
2140 /**
2141 * ixgbe_identify_phy_82599 - Get physical layer module
2142 * @hw: pointer to hardware structure
2143 *
2144 * Determines the physical layer module found on the current adapter.
2145 * If PHY already detected, maintains current PHY type in hw struct,
2146 * otherwise executes the PHY detection routine.
2147 **/
2148 s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
2149 {
2150 s32 status;
2151
2152 DEBUGFUNC("ixgbe_identify_phy_82599");
2153
2154 /* Detect PHY if not unknown - returns success if already detected. */
2155 status = ixgbe_identify_phy_generic(hw);
2156 if (status != IXGBE_SUCCESS) {
2157 /* 82599 10GBASE-T requires an external PHY */
2158 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)
2159 return status;
2160 else
2161 status = ixgbe_identify_module_generic(hw);
2162 }
2163
2164 /* Set PHY type none if no PHY detected */
2165 if (hw->phy.type == ixgbe_phy_unknown) {
2166 hw->phy.type = ixgbe_phy_none;
2167 return IXGBE_SUCCESS;
2168 }
2169
2170 /* Return error if SFP module has been detected but is not supported */
2171 if (hw->phy.type == ixgbe_phy_sfp_unsupported)
2172 return IXGBE_ERR_SFP_NOT_SUPPORTED;
2173
2174 return status;
2175 }
2176
2177 /**
2178 * ixgbe_get_supported_physical_layer_82599 - Returns physical layer type
2179 * @hw: pointer to hardware structure
2180 *
2181 * Determines physical layer capabilities of the current configuration.
2182 **/
2183 u64 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw)
2184 {
2185 u64 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
2186 u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2187 u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
2188 u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
2189 u32 pma_pmd_10g_parallel = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK;
2190 u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
2191 u16 ext_ability = 0;
2192
2193 DEBUGFUNC("ixgbe_get_support_physical_layer_82599");
2194
2195 hw->phy.ops.identify(hw);
2196
2197 switch (hw->phy.type) {
2198 case ixgbe_phy_tn:
2199 case ixgbe_phy_cu_unknown:
2200 hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
2201 IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
2202 if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
2203 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
2204 if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
2205 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
2206 if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
2207 physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
2208 goto out;
2209 default:
2210 break;
2211 }
2212
2213 switch (autoc & IXGBE_AUTOC_LMS_MASK) {
2214 case IXGBE_AUTOC_LMS_1G_AN:
2215 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
2216 if (pma_pmd_1g == IXGBE_AUTOC_1G_KX_BX) {
2217 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX |
2218 IXGBE_PHYSICAL_LAYER_1000BASE_BX;
2219 goto out;
2220 } else
2221 /* SFI mode so read SFP module */
2222 goto sfp_check;
2223 break;
2224 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
2225 if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_CX4)
2226 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
2227 else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_KX4)
2228 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
2229 else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_XAUI)
2230 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_XAUI;
2231 goto out;
2232 break;
2233 case IXGBE_AUTOC_LMS_10G_SERIAL:
2234 if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_KR) {
2235 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR;
2236 goto out;
2237 } else if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)
2238 goto sfp_check;
2239 break;
2240 case IXGBE_AUTOC_LMS_KX4_KX_KR:
2241 case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN:
2242 if (autoc & IXGBE_AUTOC_KX_SUPP)
2243 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
2244 if (autoc & IXGBE_AUTOC_KX4_SUPP)
2245 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
2246 if (autoc & IXGBE_AUTOC_KR_SUPP)
2247 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KR;
2248 goto out;
2249 break;
2250 default:
2251 goto out;
2252 break;
2253 }
2254
2255 sfp_check:
2256 /* SFP check must be done last since DA modules are sometimes used to
2257 * test KR mode - we need to id KR mode correctly before SFP module.
2258 * Call identify_sfp because the pluggable module may have changed */
2259 physical_layer = ixgbe_get_supported_phy_sfp_layer_generic(hw);
2260 out:
2261 return physical_layer;
2262 }
2263
2264 /**
2265 * ixgbe_enable_rx_dma_82599 - Enable the Rx DMA unit on 82599
2266 * @hw: pointer to hardware structure
2267 * @regval: register value to write to RXCTRL
2268 *
2269 * Enables the Rx DMA unit for 82599
2270 **/
2271 s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval)
2272 {
2273
2274 DEBUGFUNC("ixgbe_enable_rx_dma_82599");
2275
2276 /*
2277 * Workaround for 82599 silicon errata when enabling the Rx datapath.
2278 * If traffic is incoming before we enable the Rx unit, it could hang
2279 * the Rx DMA unit. Therefore, make sure the security engine is
2280 * completely disabled prior to enabling the Rx unit.
2281 */
2282
2283 hw->mac.ops.disable_sec_rx_path(hw);
2284
2285 if (regval & IXGBE_RXCTRL_RXEN)
2286 ixgbe_enable_rx(hw);
2287 else
2288 ixgbe_disable_rx(hw);
2289
2290 hw->mac.ops.enable_sec_rx_path(hw);
2291
2292 return IXGBE_SUCCESS;
2293 }
2294
2295 /**
2296 * ixgbe_verify_fw_version_82599 - verify FW version for 82599
2297 * @hw: pointer to hardware structure
2298 *
2299 * Verifies that installed the firmware version is 0.6 or higher
2300 * for SFI devices. All 82599 SFI devices should have version 0.6 or higher.
2301 *
2302 * Returns IXGBE_ERR_EEPROM_VERSION if the FW is not present or
2303 * if the FW version is not supported.
2304 **/
2305 static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
2306 {
2307 s32 status = IXGBE_ERR_EEPROM_VERSION;
2308 u16 fw_offset, fw_ptp_cfg_offset;
2309 u16 fw_version;
2310
2311 DEBUGFUNC("ixgbe_verify_fw_version_82599");
2312
2313 /* firmware check is only necessary for SFI devices */
2314 if (hw->phy.media_type != ixgbe_media_type_fiber) {
2315 status = IXGBE_SUCCESS;
2316 goto fw_version_out;
2317 }
2318
2319 /* get the offset to the Firmware Module block */
2320 if (hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset)) {
2321 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
2322 "eeprom read at offset %d failed", IXGBE_FW_PTR);
2323 return IXGBE_ERR_EEPROM_VERSION;
2324 }
2325
2326 if ((fw_offset == 0) || (fw_offset == 0xFFFF))
2327 goto fw_version_out;
2328
2329 /* get the offset to the Pass Through Patch Configuration block */
2330 if (hw->eeprom.ops.read(hw, (fw_offset +
2331 IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR),
2332 &fw_ptp_cfg_offset)) {
2333 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
2334 "eeprom read at offset %d failed",
2335 fw_offset +
2336 IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR);
2337 return IXGBE_ERR_EEPROM_VERSION;
2338 }
2339
2340 if ((fw_ptp_cfg_offset == 0) || (fw_ptp_cfg_offset == 0xFFFF))
2341 goto fw_version_out;
2342
2343 /* get the firmware version */
2344 if (hw->eeprom.ops.read(hw, (fw_ptp_cfg_offset +
2345 IXGBE_FW_PATCH_VERSION_4), &fw_version)) {
2346 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
2347 "eeprom read at offset %d failed",
2348 fw_ptp_cfg_offset + IXGBE_FW_PATCH_VERSION_4);
2349 return IXGBE_ERR_EEPROM_VERSION;
2350 }
2351
2352 if (fw_version > 0x5)
2353 status = IXGBE_SUCCESS;
2354
2355 fw_version_out:
2356 return status;
2357 }
2358
2359 /**
2360 * ixgbe_verify_lesm_fw_enabled_82599 - Checks LESM FW module state.
2361 * @hw: pointer to hardware structure
2362 *
2363 * Returns TRUE if the LESM FW module is present and enabled. Otherwise
2364 * returns FALSE. Smart Speed must be disabled if LESM FW module is enabled.
2365 **/
2366 bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw)
2367 {
2368 bool lesm_enabled = FALSE;
2369 u16 fw_offset, fw_lesm_param_offset, fw_lesm_state;
2370 s32 status;
2371
2372 DEBUGFUNC("ixgbe_verify_lesm_fw_enabled_82599");
2373
2374 /* get the offset to the Firmware Module block */
2375 status = hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset);
2376
2377 if ((status != IXGBE_SUCCESS) ||
2378 (fw_offset == 0) || (fw_offset == 0xFFFF))
2379 goto out;
2380
2381 /* get the offset to the LESM Parameters block */
2382 status = hw->eeprom.ops.read(hw, (fw_offset +
2383 IXGBE_FW_LESM_PARAMETERS_PTR),
2384 &fw_lesm_param_offset);
2385
2386 if ((status != IXGBE_SUCCESS) ||
2387 (fw_lesm_param_offset == 0) || (fw_lesm_param_offset == 0xFFFF))
2388 goto out;
2389
2390 /* get the LESM state word */
2391 status = hw->eeprom.ops.read(hw, (fw_lesm_param_offset +
2392 IXGBE_FW_LESM_STATE_1),
2393 &fw_lesm_state);
2394
2395 if ((status == IXGBE_SUCCESS) &&
2396 (fw_lesm_state & IXGBE_FW_LESM_STATE_ENABLED))
2397 lesm_enabled = TRUE;
2398
2399 out:
2400 return lesm_enabled;
2401 }
2402
2403 /**
2404 * ixgbe_read_eeprom_buffer_82599 - Read EEPROM word(s) using
2405 * fastest available method
2406 *
2407 * @hw: pointer to hardware structure
2408 * @offset: offset of word in EEPROM to read
2409 * @words: number of words
2410 * @data: word(s) read from the EEPROM
2411 *
2412 * Retrieves 16 bit word(s) read from EEPROM
2413 **/
2414 static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
2415 u16 words, u16 *data)
2416 {
2417 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
2418 s32 ret_val = IXGBE_ERR_CONFIG;
2419
2420 DEBUGFUNC("ixgbe_read_eeprom_buffer_82599");
2421
2422 /*
2423 * If EEPROM is detected and can be addressed using 14 bits,
2424 * use EERD otherwise use bit bang
2425 */
2426 if ((eeprom->type == ixgbe_eeprom_spi) &&
2427 (offset + (words - 1) <= IXGBE_EERD_MAX_ADDR))
2428 ret_val = ixgbe_read_eerd_buffer_generic(hw, offset, words,
2429 data);
2430 else
2431 ret_val = ixgbe_read_eeprom_buffer_bit_bang_generic(hw, offset,
2432 words,
2433 data);
2434
2435 return ret_val;
2436 }
2437
2438 /**
2439 * ixgbe_read_eeprom_82599 - Read EEPROM word using
2440 * fastest available method
2441 *
2442 * @hw: pointer to hardware structure
2443 * @offset: offset of word in the EEPROM to read
2444 * @data: word read from the EEPROM
2445 *
2446 * Reads a 16 bit word from the EEPROM
2447 **/
2448 static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
2449 u16 offset, u16 *data)
2450 {
2451 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
2452 s32 ret_val = IXGBE_ERR_CONFIG;
2453
2454 DEBUGFUNC("ixgbe_read_eeprom_82599");
2455
2456 /*
2457 * If EEPROM is detected and can be addressed using 14 bits,
2458 * use EERD otherwise use bit bang
2459 */
2460 if ((eeprom->type == ixgbe_eeprom_spi) &&
2461 (offset <= IXGBE_EERD_MAX_ADDR))
2462 ret_val = ixgbe_read_eerd_generic(hw, offset, data);
2463 else
2464 ret_val = ixgbe_read_eeprom_bit_bang_generic(hw, offset, data);
2465
2466 return ret_val;
2467 }
2468
2469 /**
2470 * ixgbe_reset_pipeline_82599 - perform pipeline reset
2471 *
2472 * @hw: pointer to hardware structure
2473 *
2474 * Reset pipeline by asserting Restart_AN together with LMS change to ensure
2475 * full pipeline reset. This function assumes the SW/FW lock is held.
2476 **/
2477 s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw)
2478 {
2479 s32 ret_val;
2480 u32 anlp1_reg = 0;
2481 u32 i, autoc_reg, autoc2_reg;
2482
2483 /* Enable link if disabled in NVM */
2484 autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
2485 if (autoc2_reg & IXGBE_AUTOC2_LINK_DISABLE_MASK) {
2486 autoc2_reg &= ~IXGBE_AUTOC2_LINK_DISABLE_MASK;
2487 IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2_reg);
2488 IXGBE_WRITE_FLUSH(hw);
2489 }
2490
2491 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2492 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
2493 /* Write AUTOC register with toggled LMS[2] bit and Restart_AN */
2494 IXGBE_WRITE_REG(hw, IXGBE_AUTOC,
2495 autoc_reg ^ (0x4 << IXGBE_AUTOC_LMS_SHIFT));
2496 /* Wait for AN to leave state 0 */
2497 for (i = 0; i < 10; i++) {
2498 msec_delay(4);
2499 anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
2500 if (anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)
2501 break;
2502 }
2503
2504 if (!(anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)) {
2505 DEBUGOUT("auto negotiation not completed\n");
2506 ret_val = IXGBE_ERR_RESET_FAILED;
2507 goto reset_pipeline_out;
2508 }
2509
2510 ret_val = IXGBE_SUCCESS;
2511
2512 reset_pipeline_out:
2513 /* Write AUTOC register with original LMS field and Restart_AN */
2514 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
2515 IXGBE_WRITE_FLUSH(hw);
2516
2517 return ret_val;
2518 }
2519
2520 /**
2521 * ixgbe_read_i2c_byte_82599 - Reads 8 bit word over I2C
2522 * @hw: pointer to hardware structure
2523 * @byte_offset: byte offset to read
2524 * @data: value read
2525 *
2526 * Performs byte read operation to SFP module's EEPROM over I2C interface at
2527 * a specified device address.
2528 **/
2529 static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
2530 u8 dev_addr, u8 *data)
2531 {
2532 u32 esdp;
2533 s32 status;
2534 s32 timeout = 200;
2535
2536 DEBUGFUNC("ixgbe_read_i2c_byte_82599");
2537
2538 if (hw->phy.qsfp_shared_i2c_bus == TRUE) {
2539 /* Acquire I2C bus ownership. */
2540 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
2541 esdp |= IXGBE_ESDP_SDP0;
2542 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
2543 IXGBE_WRITE_FLUSH(hw);
2544
2545 while (timeout) {
2546 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
2547 if (esdp & IXGBE_ESDP_SDP1)
2548 break;
2549
2550 msec_delay(5);
2551 timeout--;
2552 }
2553
2554 if (!timeout) {
2555 DEBUGOUT("Driver can't access resource,"
2556 " acquiring I2C bus timeout.\n");
2557 status = IXGBE_ERR_I2C;
2558 goto release_i2c_access;
2559 }
2560 }
2561
2562 status = ixgbe_read_i2c_byte_generic(hw, byte_offset, dev_addr, data);
2563
2564 release_i2c_access:
2565
2566 if (hw->phy.qsfp_shared_i2c_bus == TRUE) {
2567 /* Release I2C bus ownership. */
2568 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
2569 esdp &= ~IXGBE_ESDP_SDP0;
2570 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
2571 IXGBE_WRITE_FLUSH(hw);
2572 }
2573
2574 return status;
2575 }
2576
2577 /**
2578 * ixgbe_write_i2c_byte_82599 - Writes 8 bit word over I2C
2579 * @hw: pointer to hardware structure
2580 * @byte_offset: byte offset to write
2581 * @data: value to write
2582 *
2583 * Performs byte write operation to SFP module's EEPROM over I2C interface at
2584 * a specified device address.
2585 **/
2586 static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
2587 u8 dev_addr, u8 data)
2588 {
2589 u32 esdp;
2590 s32 status;
2591 s32 timeout = 200;
2592
2593 DEBUGFUNC("ixgbe_write_i2c_byte_82599");
2594
2595 if (hw->phy.qsfp_shared_i2c_bus == TRUE) {
2596 /* Acquire I2C bus ownership. */
2597 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
2598 esdp |= IXGBE_ESDP_SDP0;
2599 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
2600 IXGBE_WRITE_FLUSH(hw);
2601
2602 while (timeout) {
2603 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
2604 if (esdp & IXGBE_ESDP_SDP1)
2605 break;
2606
2607 msec_delay(5);
2608 timeout--;
2609 }
2610
2611 if (!timeout) {
2612 DEBUGOUT("Driver can't access resource,"
2613 " acquiring I2C bus timeout.\n");
2614 status = IXGBE_ERR_I2C;
2615 goto release_i2c_access;
2616 }
2617 }
2618
2619 status = ixgbe_write_i2c_byte_generic(hw, byte_offset, dev_addr, data);
2620
2621 release_i2c_access:
2622
2623 if (hw->phy.qsfp_shared_i2c_bus == TRUE) {
2624 /* Release I2C bus ownership. */
2625 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
2626 esdp &= ~IXGBE_ESDP_SDP0;
2627 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
2628 IXGBE_WRITE_FLUSH(hw);
2629 }
2630
2631 return status;
2632 }
2633