ixgbe_82599.c revision 1.9 1 /******************************************************************************
2
3 Copyright (c) 2001-2013, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33 /*$FreeBSD: head/sys/dev/ixgbe/ixgbe_82599.c 247822 2013-03-04 23:07:40Z jfv $*/
34 /*$NetBSD: ixgbe_82599.c,v 1.9 2015/04/24 07:00:51 msaitoh Exp $*/
35
36 #include "ixgbe_type.h"
37 #include "ixgbe_82599.h"
38 #include "ixgbe_api.h"
39 #include "ixgbe_common.h"
40 #include "ixgbe_phy.h"
41
42 static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
43 ixgbe_link_speed speed,
44 bool autoneg_wait_to_complete);
45 static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw);
46 static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
47 u16 offset, u16 *data);
48 static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
49 u16 words, u16 *data);
50
51 static bool ixgbe_mng_enabled(struct ixgbe_hw *hw)
52 {
53 u32 fwsm, manc, factps;
54
55 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
56 if ((fwsm & IXGBE_FWSM_MODE_MASK) != IXGBE_FWSM_FW_MODE_PT)
57 return FALSE;
58
59 manc = IXGBE_READ_REG(hw, IXGBE_MANC);
60 if (!(manc & IXGBE_MANC_RCV_TCO_EN))
61 return FALSE;
62
63 factps = IXGBE_READ_REG(hw, IXGBE_FACTPS);
64 if (factps & IXGBE_FACTPS_MNGCG)
65 return FALSE;
66
67 return TRUE;
68 }
69
70 void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
71 {
72 struct ixgbe_mac_info *mac = &hw->mac;
73
74 DEBUGFUNC("ixgbe_init_mac_link_ops_82599");
75
76 /*
77 * enable the laser control functions for SFP+ fiber
78 * and MNG not enabled
79 */
80 if ((mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
81 !(ixgbe_mng_enabled(hw))) {
82 mac->ops.disable_tx_laser =
83 &ixgbe_disable_tx_laser_multispeed_fiber;
84 mac->ops.enable_tx_laser =
85 &ixgbe_enable_tx_laser_multispeed_fiber;
86 mac->ops.flap_tx_laser = &ixgbe_flap_tx_laser_multispeed_fiber;
87
88 } else {
89 mac->ops.disable_tx_laser = NULL;
90 mac->ops.enable_tx_laser = NULL;
91 mac->ops.flap_tx_laser = NULL;
92 }
93
94 if (hw->phy.multispeed_fiber) {
95 /* Set up dual speed SFP+ support */
96 mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber;
97 } else {
98 if ((ixgbe_get_media_type(hw) == ixgbe_media_type_backplane) &&
99 (hw->phy.smart_speed == ixgbe_smart_speed_auto ||
100 hw->phy.smart_speed == ixgbe_smart_speed_on) &&
101 !ixgbe_verify_lesm_fw_enabled_82599(hw)) {
102 mac->ops.setup_link = &ixgbe_setup_mac_link_smartspeed;
103 } else {
104 mac->ops.setup_link = &ixgbe_setup_mac_link_82599;
105 }
106 }
107 }
108
109 /**
110 * ixgbe_init_phy_ops_82599 - PHY/SFP specific init
111 * @hw: pointer to hardware structure
112 *
113 * Initialize any function pointers that were not able to be
114 * set during init_shared_code because the PHY/SFP type was
115 * not known. Perform the SFP init if necessary.
116 *
117 **/
118 s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
119 {
120 struct ixgbe_mac_info *mac = &hw->mac;
121 struct ixgbe_phy_info *phy = &hw->phy;
122 s32 ret_val = IXGBE_SUCCESS;
123
124 DEBUGFUNC("ixgbe_init_phy_ops_82599");
125
126 /* Identify the PHY or SFP module */
127 ret_val = phy->ops.identify(hw);
128 if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED)
129 goto init_phy_ops_out;
130
131 /* Setup function pointers based on detected SFP module and speeds */
132 ixgbe_init_mac_link_ops_82599(hw);
133 if (hw->phy.sfp_type != ixgbe_sfp_type_unknown)
134 hw->phy.ops.reset = NULL;
135
136 /* If copper media, overwrite with copper function pointers */
137 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
138 mac->ops.setup_link = &ixgbe_setup_copper_link_82599;
139 mac->ops.get_link_capabilities =
140 &ixgbe_get_copper_link_capabilities_generic;
141 }
142
143 /* Set necessary function pointers based on phy type */
144 switch (hw->phy.type) {
145 case ixgbe_phy_tn:
146 phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
147 phy->ops.check_link = &ixgbe_check_phy_link_tnx;
148 phy->ops.get_firmware_version =
149 &ixgbe_get_phy_firmware_version_tnx;
150 break;
151 default:
152 break;
153 }
154 init_phy_ops_out:
155 return ret_val;
156 }
157
158 s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
159 {
160 s32 ret_val = IXGBE_SUCCESS;
161 u16 list_offset, data_offset, data_value;
162 bool got_lock = FALSE;
163
164 DEBUGFUNC("ixgbe_setup_sfp_modules_82599");
165
166 if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) {
167 ixgbe_init_mac_link_ops_82599(hw);
168
169 hw->phy.ops.reset = NULL;
170
171 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
172 &data_offset);
173 if (ret_val != IXGBE_SUCCESS)
174 goto setup_sfp_out;
175
176 /* PHY config will finish before releasing the semaphore */
177 ret_val = hw->mac.ops.acquire_swfw_sync(hw,
178 IXGBE_GSSR_MAC_CSR_SM);
179 if (ret_val != IXGBE_SUCCESS) {
180 ret_val = IXGBE_ERR_SWFW_SYNC;
181 goto setup_sfp_out;
182 }
183
184 hw->eeprom.ops.read(hw, ++data_offset, &data_value);
185 while (data_value != 0xffff) {
186 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, data_value);
187 IXGBE_WRITE_FLUSH(hw);
188 hw->eeprom.ops.read(hw, ++data_offset, &data_value);
189 }
190
191 /* Release the semaphore */
192 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
193 /* Delay obtaining semaphore again to allow FW access */
194 msec_delay(hw->eeprom.semaphore_delay);
195
196 /* Need SW/FW semaphore around AUTOC writes if LESM on,
197 * likewise reset_pipeline requires lock as it also writes
198 * AUTOC.
199 */
200 if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
201 ret_val = hw->mac.ops.acquire_swfw_sync(hw,
202 IXGBE_GSSR_MAC_CSR_SM);
203 if (ret_val != IXGBE_SUCCESS) {
204 ret_val = IXGBE_ERR_SWFW_SYNC;
205 goto setup_sfp_out;
206 }
207
208 got_lock = TRUE;
209 }
210
211 /* Restart DSP and set SFI mode */
212 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, ((hw->mac.orig_autoc) |
213 IXGBE_AUTOC_LMS_10G_SERIAL));
214 hw->mac.cached_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
215 ret_val = ixgbe_reset_pipeline_82599(hw);
216
217 if (got_lock) {
218 hw->mac.ops.release_swfw_sync(hw,
219 IXGBE_GSSR_MAC_CSR_SM);
220 got_lock = FALSE;
221 }
222
223 if (ret_val) {
224 DEBUGOUT("sfp module setup not complete\n");
225 ret_val = IXGBE_ERR_SFP_SETUP_NOT_COMPLETE;
226 goto setup_sfp_out;
227 }
228
229 }
230
231 setup_sfp_out:
232 return ret_val;
233 }
234
235 /**
236 * ixgbe_init_ops_82599 - Inits func ptrs and MAC type
237 * @hw: pointer to hardware structure
238 *
239 * Initialize the function pointers and assign the MAC type for 82599.
240 * Does not touch the hardware.
241 **/
242
243 s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw)
244 {
245 struct ixgbe_mac_info *mac = &hw->mac;
246 struct ixgbe_phy_info *phy = &hw->phy;
247 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
248 s32 ret_val;
249
250 DEBUGFUNC("ixgbe_init_ops_82599");
251
252 ixgbe_init_phy_ops_generic(hw);
253 ret_val = ixgbe_init_ops_generic(hw);
254
255 /* PHY */
256 phy->ops.identify = &ixgbe_identify_phy_82599;
257 phy->ops.init = &ixgbe_init_phy_ops_82599;
258
259 /* MAC */
260 mac->ops.reset_hw = &ixgbe_reset_hw_82599;
261 mac->ops.enable_relaxed_ordering = &ixgbe_enable_relaxed_ordering_gen2;
262 mac->ops.get_media_type = &ixgbe_get_media_type_82599;
263 mac->ops.get_supported_physical_layer =
264 &ixgbe_get_supported_physical_layer_82599;
265 mac->ops.disable_sec_rx_path = &ixgbe_disable_sec_rx_path_generic;
266 mac->ops.enable_sec_rx_path = &ixgbe_enable_sec_rx_path_generic;
267 mac->ops.enable_rx_dma = &ixgbe_enable_rx_dma_82599;
268 mac->ops.read_analog_reg8 = &ixgbe_read_analog_reg8_82599;
269 mac->ops.write_analog_reg8 = &ixgbe_write_analog_reg8_82599;
270 mac->ops.start_hw = &ixgbe_start_hw_82599;
271 mac->ops.get_san_mac_addr = &ixgbe_get_san_mac_addr_generic;
272 mac->ops.set_san_mac_addr = &ixgbe_set_san_mac_addr_generic;
273 mac->ops.get_device_caps = &ixgbe_get_device_caps_generic;
274 mac->ops.get_wwn_prefix = &ixgbe_get_wwn_prefix_generic;
275 mac->ops.get_fcoe_boot_status = &ixgbe_get_fcoe_boot_status_generic;
276
277 /* RAR, Multicast, VLAN */
278 mac->ops.set_vmdq = &ixgbe_set_vmdq_generic;
279 mac->ops.set_vmdq_san_mac = &ixgbe_set_vmdq_san_mac_generic;
280 mac->ops.clear_vmdq = &ixgbe_clear_vmdq_generic;
281 mac->ops.insert_mac_addr = &ixgbe_insert_mac_addr_generic;
282 mac->rar_highwater = 1;
283 mac->ops.set_vfta = &ixgbe_set_vfta_generic;
284 mac->ops.set_vlvf = &ixgbe_set_vlvf_generic;
285 mac->ops.clear_vfta = &ixgbe_clear_vfta_generic;
286 mac->ops.init_uta_tables = &ixgbe_init_uta_tables_generic;
287 mac->ops.setup_sfp = &ixgbe_setup_sfp_modules_82599;
288 mac->ops.set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing;
289 mac->ops.set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing;
290
291 /* Link */
292 mac->ops.get_link_capabilities = &ixgbe_get_link_capabilities_82599;
293 mac->ops.check_link = &ixgbe_check_mac_link_generic;
294 mac->ops.setup_rxpba = &ixgbe_set_rxpba_generic;
295 ixgbe_init_mac_link_ops_82599(hw);
296
297 mac->mcft_size = 128;
298 mac->vft_size = 128;
299 mac->num_rar_entries = 128;
300 mac->rx_pb_size = 512;
301 mac->max_tx_queues = 128;
302 mac->max_rx_queues = 128;
303 mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
304
305 mac->arc_subsystem_valid = (IXGBE_READ_REG(hw, IXGBE_FWSM) &
306 IXGBE_FWSM_MODE_MASK) ? TRUE : FALSE;
307
308 hw->mbx.ops.init_params = ixgbe_init_mbx_params_pf;
309
310 /* EEPROM */
311 eeprom->ops.read = &ixgbe_read_eeprom_82599;
312 eeprom->ops.read_buffer = &ixgbe_read_eeprom_buffer_82599;
313
314 /* Manageability interface */
315 mac->ops.set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic;
316
317
318 return ret_val;
319 }
320
321 /**
322 * ixgbe_get_link_capabilities_82599 - Determines link capabilities
323 * @hw: pointer to hardware structure
324 * @speed: pointer to link speed
325 * @autoneg: TRUE when autoneg or autotry is enabled
326 *
327 * Determines the link capabilities by reading the AUTOC register.
328 **/
329 s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
330 ixgbe_link_speed *speed,
331 bool *autoneg)
332 {
333 s32 status = IXGBE_SUCCESS;
334 u32 autoc = 0;
335
336 DEBUGFUNC("ixgbe_get_link_capabilities_82599");
337
338
339 /* Check if 1G SFP module. */
340 if (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
341 hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
342 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
343 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) {
344 *speed = IXGBE_LINK_SPEED_1GB_FULL;
345 *autoneg = TRUE;
346 goto out;
347 }
348
349 /*
350 * Determine link capabilities based on the stored value of AUTOC,
351 * which represents EEPROM defaults. If AUTOC value has not
352 * been stored, use the current register values.
353 */
354 if (hw->mac.orig_link_settings_stored)
355 autoc = hw->mac.orig_autoc;
356 else
357 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
358
359 switch (autoc & IXGBE_AUTOC_LMS_MASK) {
360 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
361 *speed = IXGBE_LINK_SPEED_1GB_FULL;
362 *autoneg = FALSE;
363 break;
364
365 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
366 *speed = IXGBE_LINK_SPEED_10GB_FULL;
367 *autoneg = FALSE;
368 break;
369
370 case IXGBE_AUTOC_LMS_1G_AN:
371 *speed = IXGBE_LINK_SPEED_1GB_FULL;
372 *autoneg = TRUE;
373 break;
374
375 case IXGBE_AUTOC_LMS_10G_SERIAL:
376 *speed = IXGBE_LINK_SPEED_10GB_FULL;
377 *autoneg = FALSE;
378 break;
379
380 case IXGBE_AUTOC_LMS_KX4_KX_KR:
381 case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN:
382 *speed = IXGBE_LINK_SPEED_UNKNOWN;
383 if (autoc & IXGBE_AUTOC_KR_SUPP)
384 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
385 if (autoc & IXGBE_AUTOC_KX4_SUPP)
386 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
387 if (autoc & IXGBE_AUTOC_KX_SUPP)
388 *speed |= IXGBE_LINK_SPEED_1GB_FULL;
389 *autoneg = TRUE;
390 break;
391
392 case IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII:
393 *speed = IXGBE_LINK_SPEED_100_FULL;
394 if (autoc & IXGBE_AUTOC_KR_SUPP)
395 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
396 if (autoc & IXGBE_AUTOC_KX4_SUPP)
397 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
398 if (autoc & IXGBE_AUTOC_KX_SUPP)
399 *speed |= IXGBE_LINK_SPEED_1GB_FULL;
400 *autoneg = TRUE;
401 break;
402
403 case IXGBE_AUTOC_LMS_SGMII_1G_100M:
404 *speed = IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_100_FULL;
405 *autoneg = FALSE;
406 break;
407
408 default:
409 status = IXGBE_ERR_LINK_SETUP;
410 goto out;
411 break;
412 }
413
414 if (hw->phy.multispeed_fiber) {
415 *speed |= IXGBE_LINK_SPEED_10GB_FULL |
416 IXGBE_LINK_SPEED_1GB_FULL;
417 *autoneg = TRUE;
418 }
419
420 out:
421 return status;
422 }
423
424 /**
425 * ixgbe_get_media_type_82599 - Get media type
426 * @hw: pointer to hardware structure
427 *
428 * Returns the media type (fiber, copper, backplane)
429 **/
430 enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
431 {
432 enum ixgbe_media_type media_type;
433
434 DEBUGFUNC("ixgbe_get_media_type_82599");
435
436 /* Detect if there is a copper PHY attached. */
437 switch (hw->phy.type) {
438 case ixgbe_phy_cu_unknown:
439 case ixgbe_phy_tn:
440 media_type = ixgbe_media_type_copper;
441 goto out;
442 default:
443 break;
444 }
445
446 switch (hw->device_id) {
447 case IXGBE_DEV_ID_82599_KX4:
448 case IXGBE_DEV_ID_82599_KX4_MEZZ:
449 case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
450 case IXGBE_DEV_ID_82599_KR:
451 case IXGBE_DEV_ID_82599_BACKPLANE_FCOE:
452 case IXGBE_DEV_ID_82599_XAUI_LOM:
453 /* Default device ID is mezzanine card KX/KX4 */
454 media_type = ixgbe_media_type_backplane;
455 break;
456 case IXGBE_DEV_ID_82599_SFP:
457 case IXGBE_DEV_ID_82599_SFP_FCOE:
458 case IXGBE_DEV_ID_82599_SFP_EM:
459 case IXGBE_DEV_ID_82599_SFP_SF2:
460 case IXGBE_DEV_ID_82599_SFP_SF_QP:
461 case IXGBE_DEV_ID_82599EN_SFP:
462 media_type = ixgbe_media_type_fiber;
463 break;
464 case IXGBE_DEV_ID_82599_CX4:
465 media_type = ixgbe_media_type_cx4;
466 break;
467 case IXGBE_DEV_ID_82599_T3_LOM:
468 media_type = ixgbe_media_type_copper;
469 break;
470 case IXGBE_DEV_ID_82599_BYPASS:
471 media_type = ixgbe_media_type_fiber_fixed;
472 hw->phy.multispeed_fiber = TRUE;
473 break;
474 default:
475 media_type = ixgbe_media_type_unknown;
476 break;
477 }
478 out:
479 return media_type;
480 }
481
482 /**
483 * ixgbe_start_mac_link_82599 - Setup MAC link settings
484 * @hw: pointer to hardware structure
485 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed
486 *
487 * Configures link settings based on values in the ixgbe_hw struct.
488 * Restarts the link. Performs autonegotiation if needed.
489 **/
490 s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
491 bool autoneg_wait_to_complete)
492 {
493 u32 autoc_reg;
494 u32 links_reg;
495 u32 i;
496 s32 status = IXGBE_SUCCESS;
497 bool got_lock = FALSE;
498
499 DEBUGFUNC("ixgbe_start_mac_link_82599");
500
501
502 /* reset_pipeline requires us to hold this lock as it writes to
503 * AUTOC.
504 */
505 if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
506 status = hw->mac.ops.acquire_swfw_sync(hw,
507 IXGBE_GSSR_MAC_CSR_SM);
508 if (status != IXGBE_SUCCESS)
509 goto out;
510
511 got_lock = TRUE;
512 }
513
514 /* Restart link */
515 ixgbe_reset_pipeline_82599(hw);
516
517 if (got_lock)
518 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
519
520 /* Only poll for autoneg to complete if specified to do so */
521 if (autoneg_wait_to_complete) {
522 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
523 if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
524 IXGBE_AUTOC_LMS_KX4_KX_KR ||
525 (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
526 IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
527 (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
528 IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
529 links_reg = 0; /* Just in case Autoneg time = 0 */
530 for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
531 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
532 if (links_reg & IXGBE_LINKS_KX_AN_COMP)
533 break;
534 msec_delay(100);
535 }
536 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
537 status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
538 DEBUGOUT("Autoneg did not complete.\n");
539 }
540 }
541 }
542
543 /* Add delay to filter out noises during initial link setup */
544 msec_delay(50);
545
546 out:
547 return status;
548 }
549
550 /**
551 * ixgbe_disable_tx_laser_multispeed_fiber - Disable Tx laser
552 * @hw: pointer to hardware structure
553 *
554 * The base drivers may require better control over SFP+ module
555 * PHY states. This includes selectively shutting down the Tx
556 * laser on the PHY, effectively halting physical link.
557 **/
558 void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
559 {
560 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
561
562 /* Disable tx laser; allow 100us to go dark per spec */
563 esdp_reg |= IXGBE_ESDP_SDP3;
564 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
565 IXGBE_WRITE_FLUSH(hw);
566 usec_delay(100);
567 }
568
569 /**
570 * ixgbe_enable_tx_laser_multispeed_fiber - Enable Tx laser
571 * @hw: pointer to hardware structure
572 *
573 * The base drivers may require better control over SFP+ module
574 * PHY states. This includes selectively turning on the Tx
575 * laser on the PHY, effectively starting physical link.
576 **/
577 void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
578 {
579 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
580
581 /* Enable tx laser; allow 100ms to light up */
582 esdp_reg &= ~IXGBE_ESDP_SDP3;
583 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
584 IXGBE_WRITE_FLUSH(hw);
585 msec_delay(100);
586 }
587
588 /**
589 * ixgbe_flap_tx_laser_multispeed_fiber - Flap Tx laser
590 * @hw: pointer to hardware structure
591 *
592 * When the driver changes the link speeds that it can support,
593 * it sets autotry_restart to TRUE to indicate that we need to
594 * initiate a new autotry session with the link partner. To do
595 * so, we set the speed then disable and re-enable the tx laser, to
596 * alert the link partner that it also needs to restart autotry on its
597 * end. This is consistent with TRUE clause 37 autoneg, which also
598 * involves a loss of signal.
599 **/
600 void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
601 {
602 DEBUGFUNC("ixgbe_flap_tx_laser_multispeed_fiber");
603
604 if (hw->mac.autotry_restart) {
605 ixgbe_disable_tx_laser_multispeed_fiber(hw);
606 ixgbe_enable_tx_laser_multispeed_fiber(hw);
607 hw->mac.autotry_restart = FALSE;
608 }
609 }
610
611 /**
612 * ixgbe_set_fiber_fixed_speed - Set module link speed for fixed fiber
613 * @hw: pointer to hardware structure
614 * @speed: link speed to set
615 *
616 * We set the module speed differently for fixed fiber. For other
617 * multi-speed devices we don't have an error value so here if we
618 * detect an error we just log it and exit.
619 */
620 static void ixgbe_set_fiber_fixed_speed(struct ixgbe_hw *hw,
621 ixgbe_link_speed speed)
622 {
623 s32 status;
624 u8 rs, eeprom_data;
625
626 switch (speed) {
627 case IXGBE_LINK_SPEED_10GB_FULL:
628 /* one bit mask same as setting on */
629 rs = IXGBE_SFF_SOFT_RS_SELECT_10G;
630 break;
631 case IXGBE_LINK_SPEED_1GB_FULL:
632 rs = IXGBE_SFF_SOFT_RS_SELECT_1G;
633 break;
634 default:
635 DEBUGOUT("Invalid fixed module speed\n");
636 return;
637 }
638
639 /* Set RS0 */
640 status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
641 IXGBE_I2C_EEPROM_DEV_ADDR2,
642 &eeprom_data);
643 if (status) {
644 DEBUGOUT("Failed to read Rx Rate Select RS0\n");
645 goto out;
646 }
647
648 eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) & rs;
649
650 status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
651 IXGBE_I2C_EEPROM_DEV_ADDR2,
652 eeprom_data);
653 if (status) {
654 DEBUGOUT("Failed to write Rx Rate Select RS0\n");
655 goto out;
656 }
657
658 /* Set RS1 */
659 status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
660 IXGBE_I2C_EEPROM_DEV_ADDR2,
661 &eeprom_data);
662 if (status) {
663 DEBUGOUT("Failed to read Rx Rate Select RS1\n");
664 goto out;
665 }
666
667 eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) & rs;
668
669 status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
670 IXGBE_I2C_EEPROM_DEV_ADDR2,
671 eeprom_data);
672 if (status) {
673 DEBUGOUT("Failed to write Rx Rate Select RS1\n");
674 goto out;
675 }
676 out:
677 return;
678 }
679
680 /**
681 * ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed
682 * @hw: pointer to hardware structure
683 * @speed: new link speed
684 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed
685 *
686 * Set the link speed in the AUTOC register and restarts link.
687 **/
688 s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
689 ixgbe_link_speed speed,
690 bool autoneg_wait_to_complete)
691 {
692 s32 status = IXGBE_SUCCESS;
693 ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
694 ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN;
695 u32 speedcnt = 0;
696 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
697 u32 i = 0;
698 bool autoneg, link_up = FALSE;
699
700 DEBUGFUNC("ixgbe_setup_mac_link_multispeed_fiber");
701
702 /* Mask off requested but non-supported speeds */
703 status = ixgbe_get_link_capabilities(hw, &link_speed, &autoneg);
704 if (status != IXGBE_SUCCESS)
705 return status;
706
707 speed &= link_speed;
708
709 /*
710 * Try each speed one by one, highest priority first. We do this in
711 * software because 10gb fiber doesn't support speed autonegotiation.
712 */
713 if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
714 speedcnt++;
715 highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL;
716
717 /* If we already have link at this speed, just jump out */
718 status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
719 if (status != IXGBE_SUCCESS)
720 return status;
721
722 if ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) && link_up)
723 goto out;
724
725 /* Set the module link speed */
726 if (hw->phy.media_type == ixgbe_media_type_fiber_fixed) {
727 ixgbe_set_fiber_fixed_speed(hw,
728 IXGBE_LINK_SPEED_10GB_FULL);
729 } else {
730 esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5);
731 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
732 IXGBE_WRITE_FLUSH(hw);
733 }
734
735 /* Allow module to change analog characteristics (1G->10G) */
736 msec_delay(40);
737
738 status = ixgbe_setup_mac_link_82599(hw,
739 IXGBE_LINK_SPEED_10GB_FULL,
740 autoneg_wait_to_complete);
741 if (status != IXGBE_SUCCESS)
742 return status;
743
744 /* Flap the tx laser if it has not already been done */
745 ixgbe_flap_tx_laser(hw);
746
747 /*
748 * Wait for the controller to acquire link. Per IEEE 802.3ap,
749 * Section 73.10.2, we may have to wait up to 500ms if KR is
750 * attempted. 82599 uses the same timing for 10g SFI.
751 */
752 for (i = 0; i < 5; i++) {
753 /* Wait for the link partner to also set speed */
754 msec_delay(100);
755
756 /* If we have link, just jump out */
757 status = ixgbe_check_link(hw, &link_speed,
758 &link_up, FALSE);
759 if (status != IXGBE_SUCCESS)
760 return status;
761
762 if (link_up)
763 goto out;
764 }
765 }
766
767 if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
768 speedcnt++;
769 if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN)
770 highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL;
771
772 /* If we already have link at this speed, just jump out */
773 status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
774 if (status != IXGBE_SUCCESS)
775 return status;
776
777 if ((link_speed == IXGBE_LINK_SPEED_1GB_FULL) && link_up)
778 goto out;
779
780 /* Set the module link speed */
781 if (hw->phy.media_type == ixgbe_media_type_fiber_fixed) {
782 ixgbe_set_fiber_fixed_speed(hw,
783 IXGBE_LINK_SPEED_1GB_FULL);
784 } else {
785 esdp_reg &= ~IXGBE_ESDP_SDP5;
786 esdp_reg |= IXGBE_ESDP_SDP5_DIR;
787 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
788 IXGBE_WRITE_FLUSH(hw);
789 }
790
791 /* Allow module to change analog characteristics (10G->1G) */
792 msec_delay(40);
793
794 status = ixgbe_setup_mac_link_82599(hw,
795 IXGBE_LINK_SPEED_1GB_FULL,
796 autoneg_wait_to_complete);
797 if (status != IXGBE_SUCCESS)
798 return status;
799
800 /* Flap the tx laser if it has not already been done */
801 ixgbe_flap_tx_laser(hw);
802
803 /* Wait for the link partner to also set speed */
804 msec_delay(100);
805
806 /* If we have link, just jump out */
807 status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
808 if (status != IXGBE_SUCCESS)
809 return status;
810
811 if (link_up)
812 goto out;
813 }
814
815 /*
816 * We didn't get link. Configure back to the highest speed we tried,
817 * (if there was more than one). We call ourselves back with just the
818 * single highest speed that the user requested.
819 */
820 if (speedcnt > 1)
821 status = ixgbe_setup_mac_link_multispeed_fiber(hw,
822 highest_link_speed, autoneg_wait_to_complete);
823
824 out:
825 /* Set autoneg_advertised value based on input link speed */
826 hw->phy.autoneg_advertised = 0;
827
828 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
829 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
830
831 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
832 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
833
834 return status;
835 }
836
837 /**
838 * ixgbe_setup_mac_link_smartspeed - Set MAC link speed using SmartSpeed
839 * @hw: pointer to hardware structure
840 * @speed: new link speed
841 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed
842 *
843 * Implements the Intel SmartSpeed algorithm.
844 **/
845 s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
846 ixgbe_link_speed speed,
847 bool autoneg_wait_to_complete)
848 {
849 s32 status = IXGBE_SUCCESS;
850 ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
851 s32 i, j;
852 bool link_up = FALSE;
853 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
854
855 DEBUGFUNC("ixgbe_setup_mac_link_smartspeed");
856
857 /* Set autoneg_advertised value based on input link speed */
858 hw->phy.autoneg_advertised = 0;
859
860 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
861 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
862
863 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
864 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
865
866 if (speed & IXGBE_LINK_SPEED_100_FULL)
867 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL;
868
869 /*
870 * Implement Intel SmartSpeed algorithm. SmartSpeed will reduce the
871 * autoneg advertisement if link is unable to be established at the
872 * highest negotiated rate. This can sometimes happen due to integrity
873 * issues with the physical media connection.
874 */
875
876 /* First, try to get link with full advertisement */
877 hw->phy.smart_speed_active = FALSE;
878 for (j = 0; j < IXGBE_SMARTSPEED_MAX_RETRIES; j++) {
879 status = ixgbe_setup_mac_link_82599(hw, speed,
880 autoneg_wait_to_complete);
881 if (status != IXGBE_SUCCESS)
882 goto out;
883
884 /*
885 * Wait for the controller to acquire link. Per IEEE 802.3ap,
886 * Section 73.10.2, we may have to wait up to 500ms if KR is
887 * attempted, or 200ms if KX/KX4/BX/BX4 is attempted, per
888 * Table 9 in the AN MAS.
889 */
890 for (i = 0; i < 5; i++) {
891 msec_delay(100);
892
893 /* If we have link, just jump out */
894 status = ixgbe_check_link(hw, &link_speed, &link_up,
895 FALSE);
896 if (status != IXGBE_SUCCESS)
897 goto out;
898
899 if (link_up)
900 goto out;
901 }
902 }
903
904 /*
905 * We didn't get link. If we advertised KR plus one of KX4/KX
906 * (or BX4/BX), then disable KR and try again.
907 */
908 if (((autoc_reg & IXGBE_AUTOC_KR_SUPP) == 0) ||
909 ((autoc_reg & IXGBE_AUTOC_KX4_KX_SUPP_MASK) == 0))
910 goto out;
911
912 /* Turn SmartSpeed on to disable KR support */
913 hw->phy.smart_speed_active = TRUE;
914 status = ixgbe_setup_mac_link_82599(hw, speed,
915 autoneg_wait_to_complete);
916 if (status != IXGBE_SUCCESS)
917 goto out;
918
919 /*
920 * Wait for the controller to acquire link. 600ms will allow for
921 * the AN link_fail_inhibit_timer as well for multiple cycles of
922 * parallel detect, both 10g and 1g. This allows for the maximum
923 * connect attempts as defined in the AN MAS table 73-7.
924 */
925 for (i = 0; i < 6; i++) {
926 msec_delay(100);
927
928 /* If we have link, just jump out */
929 status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
930 if (status != IXGBE_SUCCESS)
931 goto out;
932
933 if (link_up)
934 goto out;
935 }
936
937 /* We didn't get link. Turn SmartSpeed back off. */
938 hw->phy.smart_speed_active = FALSE;
939 status = ixgbe_setup_mac_link_82599(hw, speed,
940 autoneg_wait_to_complete);
941
942 out:
943 if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL))
944 DEBUGOUT("Smartspeed has downgraded the link speed "
945 "from the maximum advertised\n");
946 return status;
947 }
948
949 /**
950 * ixgbe_setup_mac_link_82599 - Set MAC link speed
951 * @hw: pointer to hardware structure
952 * @speed: new link speed
953 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed
954 *
955 * Set the link speed in the AUTOC register and restarts link.
956 **/
957 s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
958 ixgbe_link_speed speed,
959 bool autoneg_wait_to_complete)
960 {
961 bool autoneg = FALSE;
962 s32 status = IXGBE_SUCCESS;
963 u32 autoc, pma_pmd_1g, link_mode, start_autoc;
964 u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
965 u32 orig_autoc = 0;
966 u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
967 u32 links_reg;
968 u32 i;
969 ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
970 bool got_lock = FALSE;
971
972 DEBUGFUNC("ixgbe_setup_mac_link_82599");
973
974 /* Check to see if speed passed in is supported. */
975 status = ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg);
976 if (status)
977 goto out;
978
979 speed &= link_capabilities;
980
981 if (speed == IXGBE_LINK_SPEED_UNKNOWN) {
982 status = IXGBE_ERR_LINK_SETUP;
983 goto out;
984 }
985
986 /* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/
987 if (hw->mac.orig_link_settings_stored)
988 autoc = hw->mac.orig_autoc;
989 else
990 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
991
992 orig_autoc = autoc;
993 start_autoc = hw->mac.cached_autoc;
994 link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
995 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
996
997 if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
998 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
999 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
1000 /* Set KX4/KX/KR support according to speed requested */
1001 autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP);
1002 if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
1003 if (orig_autoc & IXGBE_AUTOC_KX4_SUPP)
1004 autoc |= IXGBE_AUTOC_KX4_SUPP;
1005 if ((orig_autoc & IXGBE_AUTOC_KR_SUPP) &&
1006 (hw->phy.smart_speed_active == FALSE))
1007 autoc |= IXGBE_AUTOC_KR_SUPP;
1008 }
1009 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
1010 autoc |= IXGBE_AUTOC_KX_SUPP;
1011 } else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) &&
1012 (link_mode == IXGBE_AUTOC_LMS_1G_LINK_NO_AN ||
1013 link_mode == IXGBE_AUTOC_LMS_1G_AN)) {
1014 /* Switch from 1G SFI to 10G SFI if requested */
1015 if ((speed == IXGBE_LINK_SPEED_10GB_FULL) &&
1016 (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)) {
1017 autoc &= ~IXGBE_AUTOC_LMS_MASK;
1018 autoc |= IXGBE_AUTOC_LMS_10G_SERIAL;
1019 }
1020 } else if ((pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) &&
1021 (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) {
1022 /* Switch from 10G SFI to 1G SFI if requested */
1023 if ((speed == IXGBE_LINK_SPEED_1GB_FULL) &&
1024 (pma_pmd_1g == IXGBE_AUTOC_1G_SFI)) {
1025 autoc &= ~IXGBE_AUTOC_LMS_MASK;
1026 if (autoneg)
1027 autoc |= IXGBE_AUTOC_LMS_1G_AN;
1028 else
1029 autoc |= IXGBE_AUTOC_LMS_1G_LINK_NO_AN;
1030 }
1031 }
1032
1033 if (autoc != start_autoc) {
1034 /* Need SW/FW semaphore around AUTOC writes if LESM is on,
1035 * likewise reset_pipeline requires us to hold this lock as
1036 * it also writes to AUTOC.
1037 */
1038 if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
1039 status = hw->mac.ops.acquire_swfw_sync(hw,
1040 IXGBE_GSSR_MAC_CSR_SM);
1041 if (status != IXGBE_SUCCESS) {
1042 status = IXGBE_ERR_SWFW_SYNC;
1043 goto out;
1044 }
1045
1046 got_lock = TRUE;
1047 }
1048
1049 /* Restart link */
1050 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
1051 hw->mac.cached_autoc = autoc;
1052 ixgbe_reset_pipeline_82599(hw);
1053
1054 if (got_lock) {
1055 hw->mac.ops.release_swfw_sync(hw,
1056 IXGBE_GSSR_MAC_CSR_SM);
1057 got_lock = FALSE;
1058 }
1059
1060 /* Only poll for autoneg to complete if specified to do so */
1061 if (autoneg_wait_to_complete) {
1062 if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
1063 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
1064 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
1065 links_reg = 0; /*Just in case Autoneg time=0*/
1066 for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
1067 links_reg =
1068 IXGBE_READ_REG(hw, IXGBE_LINKS);
1069 if (links_reg & IXGBE_LINKS_KX_AN_COMP)
1070 break;
1071 msec_delay(100);
1072 }
1073 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
1074 status =
1075 IXGBE_ERR_AUTONEG_NOT_COMPLETE;
1076 DEBUGOUT("Autoneg did not complete.\n");
1077 }
1078 }
1079 }
1080
1081 /* Add delay to filter out noises during initial link setup */
1082 msec_delay(50);
1083 }
1084
1085 out:
1086 return status;
1087 }
1088
1089 /**
1090 * ixgbe_setup_copper_link_82599 - Set the PHY autoneg advertised field
1091 * @hw: pointer to hardware structure
1092 * @speed: new link speed
1093 * @autoneg_wait_to_complete: TRUE if waiting is needed to complete
1094 *
1095 * Restarts link on PHY and MAC based on settings passed in.
1096 **/
1097 static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
1098 ixgbe_link_speed speed,
1099 bool autoneg_wait_to_complete)
1100 {
1101 s32 status;
1102
1103 DEBUGFUNC("ixgbe_setup_copper_link_82599");
1104
1105 /* Setup the PHY according to input speed */
1106 status = hw->phy.ops.setup_link_speed(hw, speed,
1107 autoneg_wait_to_complete);
1108 /* Set up MAC */
1109 ixgbe_start_mac_link_82599(hw, autoneg_wait_to_complete);
1110
1111 return status;
1112 }
1113
1114 /**
1115 * ixgbe_reset_hw_82599 - Perform hardware reset
1116 * @hw: pointer to hardware structure
1117 *
1118 * Resets the hardware by resetting the transmit and receive units, masks
1119 * and clears all interrupts, perform a PHY reset, and perform a link (MAC)
1120 * reset.
1121 **/
1122 s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
1123 {
1124 ixgbe_link_speed link_speed;
1125 s32 status;
1126 u32 ctrl, i, autoc, autoc2;
1127 bool link_up = FALSE;
1128
1129 DEBUGFUNC("ixgbe_reset_hw_82599");
1130
1131 /* Call adapter stop to disable tx/rx and clear interrupts */
1132 status = hw->mac.ops.stop_adapter(hw);
1133 if (status != IXGBE_SUCCESS)
1134 goto reset_hw_out;
1135
1136 /* flush pending Tx transactions */
1137 ixgbe_clear_tx_pending(hw);
1138
1139 /* PHY ops must be identified and initialized prior to reset */
1140
1141 /* Identify PHY and related function pointers */
1142 status = hw->phy.ops.init(hw);
1143
1144 if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
1145 goto reset_hw_out;
1146
1147 /* Setup SFP module if there is one present. */
1148 if (hw->phy.sfp_setup_needed) {
1149 status = hw->mac.ops.setup_sfp(hw);
1150 hw->phy.sfp_setup_needed = FALSE;
1151 }
1152
1153 if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
1154 goto reset_hw_out;
1155
1156 /* Reset PHY */
1157 if (hw->phy.reset_disable == FALSE && hw->phy.ops.reset != NULL)
1158 hw->phy.ops.reset(hw);
1159
1160 mac_reset_top:
1161 /*
1162 * Issue global reset to the MAC. Needs to be SW reset if link is up.
1163 * If link reset is used when link is up, it might reset the PHY when
1164 * mng is using it. If link is down or the flag to force full link
1165 * reset is set, then perform link reset.
1166 */
1167 ctrl = IXGBE_CTRL_LNK_RST;
1168 if (!hw->force_full_reset) {
1169 hw->mac.ops.check_link(hw, &link_speed, &link_up, FALSE);
1170 if (link_up)
1171 ctrl = IXGBE_CTRL_RST;
1172 }
1173
1174 ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
1175 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
1176 IXGBE_WRITE_FLUSH(hw);
1177
1178 /* Poll for reset bit to self-clear indicating reset is complete */
1179 for (i = 0; i < 10; i++) {
1180 usec_delay(1);
1181 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
1182 if (!(ctrl & IXGBE_CTRL_RST_MASK))
1183 break;
1184 }
1185
1186 if (ctrl & IXGBE_CTRL_RST_MASK) {
1187 status = IXGBE_ERR_RESET_FAILED;
1188 DEBUGOUT("Reset polling failed to complete.\n");
1189 }
1190
1191 msec_delay(50);
1192
1193 /*
1194 * Double resets are required for recovery from certain error
1195 * conditions. Between resets, it is necessary to stall to allow time
1196 * for any pending HW events to complete.
1197 */
1198 if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
1199 hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
1200 goto mac_reset_top;
1201 }
1202
1203 /*
1204 * Store the original AUTOC/AUTOC2 values if they have not been
1205 * stored off yet. Otherwise restore the stored original
1206 * values since the reset operation sets back to defaults.
1207 */
1208 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
1209 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
1210
1211 /* Enable link if disabled in NVM */
1212 if (autoc2 & IXGBE_AUTOC2_LINK_DISABLE_MASK) {
1213 autoc2 &= ~IXGBE_AUTOC2_LINK_DISABLE_MASK;
1214 IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
1215 IXGBE_WRITE_FLUSH(hw);
1216 }
1217
1218 if (hw->mac.orig_link_settings_stored == FALSE) {
1219 hw->mac.orig_autoc = autoc;
1220 hw->mac.orig_autoc2 = autoc2;
1221 hw->mac.cached_autoc = autoc;
1222 hw->mac.orig_link_settings_stored = TRUE;
1223 } else {
1224 if (autoc != hw->mac.orig_autoc) {
1225 /* Need SW/FW semaphore around AUTOC writes if LESM is
1226 * on, likewise reset_pipeline requires us to hold
1227 * this lock as it also writes to AUTOC.
1228 */
1229 bool got_lock = FALSE;
1230 if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
1231 status = hw->mac.ops.acquire_swfw_sync(hw,
1232 IXGBE_GSSR_MAC_CSR_SM);
1233 if (status != IXGBE_SUCCESS) {
1234 status = IXGBE_ERR_SWFW_SYNC;
1235 goto reset_hw_out;
1236 }
1237
1238 got_lock = TRUE;
1239 }
1240
1241 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc);
1242 hw->mac.cached_autoc = hw->mac.orig_autoc;
1243 ixgbe_reset_pipeline_82599(hw);
1244
1245 if (got_lock)
1246 hw->mac.ops.release_swfw_sync(hw,
1247 IXGBE_GSSR_MAC_CSR_SM);
1248 }
1249
1250 if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) !=
1251 (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) {
1252 autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK;
1253 autoc2 |= (hw->mac.orig_autoc2 &
1254 IXGBE_AUTOC2_UPPER_MASK);
1255 IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
1256 }
1257 }
1258
1259 /* Store the permanent mac address */
1260 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
1261
1262 /*
1263 * Store MAC address from RAR0, clear receive address registers, and
1264 * clear the multicast table. Also reset num_rar_entries to 128,
1265 * since we modify this value when programming the SAN MAC address.
1266 */
1267 hw->mac.num_rar_entries = 128;
1268 hw->mac.ops.init_rx_addrs(hw);
1269
1270 /* Store the permanent SAN mac address */
1271 hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr);
1272
1273 /* Add the SAN MAC address to the RAR only if it's a valid address */
1274 if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) {
1275 hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
1276 hw->mac.san_addr, 0, IXGBE_RAH_AV);
1277
1278 /* Save the SAN MAC RAR index */
1279 hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1;
1280
1281 /* Reserve the last RAR for the SAN MAC address */
1282 hw->mac.num_rar_entries--;
1283 }
1284
1285 /* Store the alternative WWNN/WWPN prefix */
1286 hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
1287 &hw->mac.wwpn_prefix);
1288
1289 reset_hw_out:
1290 return status;
1291 }
1292
1293 /**
1294 * ixgbe_reinit_fdir_tables_82599 - Reinitialize Flow Director tables.
1295 * @hw: pointer to hardware structure
1296 **/
1297 s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
1298 {
1299 int i;
1300 u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
1301 fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE;
1302
1303 DEBUGFUNC("ixgbe_reinit_fdir_tables_82599");
1304
1305 /*
1306 * Before starting reinitialization process,
1307 * FDIRCMD.CMD must be zero.
1308 */
1309 for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) {
1310 if (!(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
1311 IXGBE_FDIRCMD_CMD_MASK))
1312 break;
1313 usec_delay(10);
1314 }
1315 if (i >= IXGBE_FDIRCMD_CMD_POLL) {
1316 DEBUGOUT("Flow Director previous command isn't complete, "
1317 "aborting table re-initialization.\n");
1318 return IXGBE_ERR_FDIR_REINIT_FAILED;
1319 }
1320
1321 IXGBE_WRITE_REG(hw, IXGBE_FDIRFREE, 0);
1322 IXGBE_WRITE_FLUSH(hw);
1323 /*
1324 * 82599 adapters flow director init flow cannot be restarted,
1325 * Workaround 82599 silicon errata by performing the following steps
1326 * before re-writing the FDIRCTRL control register with the same value.
1327 * - write 1 to bit 8 of FDIRCMD register &
1328 * - write 0 to bit 8 of FDIRCMD register
1329 */
1330 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1331 (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) |
1332 IXGBE_FDIRCMD_CLEARHT));
1333 IXGBE_WRITE_FLUSH(hw);
1334 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1335 (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
1336 ~IXGBE_FDIRCMD_CLEARHT));
1337 IXGBE_WRITE_FLUSH(hw);
1338 /*
1339 * Clear FDIR Hash register to clear any leftover hashes
1340 * waiting to be programmed.
1341 */
1342 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, 0x00);
1343 IXGBE_WRITE_FLUSH(hw);
1344
1345 IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
1346 IXGBE_WRITE_FLUSH(hw);
1347
1348 /* Poll init-done after we write FDIRCTRL register */
1349 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
1350 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
1351 IXGBE_FDIRCTRL_INIT_DONE)
1352 break;
1353 msec_delay(1);
1354 }
1355 if (i >= IXGBE_FDIR_INIT_DONE_POLL) {
1356 DEBUGOUT("Flow Director Signature poll time exceeded!\n");
1357 return IXGBE_ERR_FDIR_REINIT_FAILED;
1358 }
1359
1360 /* Clear FDIR statistics registers (read to clear) */
1361 IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT);
1362 IXGBE_READ_REG(hw, IXGBE_FDIRFSTAT);
1363 IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
1364 IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
1365 IXGBE_READ_REG(hw, IXGBE_FDIRLEN);
1366
1367 return IXGBE_SUCCESS;
1368 }
1369
1370 /**
1371 * ixgbe_fdir_enable_82599 - Initialize Flow Director control registers
1372 * @hw: pointer to hardware structure
1373 * @fdirctrl: value to write to flow director control register
1374 **/
1375 static void ixgbe_fdir_enable_82599(struct ixgbe_hw *hw, u32 fdirctrl)
1376 {
1377 int i;
1378
1379 DEBUGFUNC("ixgbe_fdir_enable_82599");
1380
1381 /* Prime the keys for hashing */
1382 IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY);
1383 IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY);
1384
1385 /*
1386 * Poll init-done after we write the register. Estimated times:
1387 * 10G: PBALLOC = 11b, timing is 60us
1388 * 1G: PBALLOC = 11b, timing is 600us
1389 * 100M: PBALLOC = 11b, timing is 6ms
1390 *
1391 * Multiple these timings by 4 if under full Rx load
1392 *
1393 * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for
1394 * 1 msec per poll time. If we're at line rate and drop to 100M, then
1395 * this might not finish in our poll time, but we can live with that
1396 * for now.
1397 */
1398 IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
1399 IXGBE_WRITE_FLUSH(hw);
1400 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
1401 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
1402 IXGBE_FDIRCTRL_INIT_DONE)
1403 break;
1404 msec_delay(1);
1405 }
1406
1407 if (i >= IXGBE_FDIR_INIT_DONE_POLL)
1408 DEBUGOUT("Flow Director poll time exceeded!\n");
1409 }
1410
1411 /**
1412 * ixgbe_init_fdir_signature_82599 - Initialize Flow Director signature filters
1413 * @hw: pointer to hardware structure
1414 * @fdirctrl: value to write to flow director control register, initially
1415 * contains just the value of the Rx packet buffer allocation
1416 **/
1417 s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl)
1418 {
1419 DEBUGFUNC("ixgbe_init_fdir_signature_82599");
1420
1421 /*
1422 * Continue setup of fdirctrl register bits:
1423 * Move the flexible bytes to use the ethertype - shift 6 words
1424 * Set the maximum length per hash bucket to 0xA filters
1425 * Send interrupt when 64 filters are left
1426 */
1427 fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) |
1428 (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
1429 (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
1430
1431 /* write hashes and fdirctrl register, poll for completion */
1432 ixgbe_fdir_enable_82599(hw, fdirctrl);
1433
1434 return IXGBE_SUCCESS;
1435 }
1436
1437 /**
1438 * ixgbe_init_fdir_perfect_82599 - Initialize Flow Director perfect filters
1439 * @hw: pointer to hardware structure
1440 * @fdirctrl: value to write to flow director control register, initially
1441 * contains just the value of the Rx packet buffer allocation
1442 **/
1443 s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl)
1444 {
1445 DEBUGFUNC("ixgbe_init_fdir_perfect_82599");
1446
1447 /*
1448 * Continue setup of fdirctrl register bits:
1449 * Turn perfect match filtering on
1450 * Report hash in RSS field of Rx wb descriptor
1451 * Initialize the drop queue
1452 * Move the flexible bytes to use the ethertype - shift 6 words
1453 * Set the maximum length per hash bucket to 0xA filters
1454 * Send interrupt when 64 (0x4 * 16) filters are left
1455 */
1456 fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH |
1457 IXGBE_FDIRCTRL_REPORT_STATUS |
1458 (IXGBE_FDIR_DROP_QUEUE << IXGBE_FDIRCTRL_DROP_Q_SHIFT) |
1459 (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) |
1460 (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
1461 (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
1462
1463 /* write hashes and fdirctrl register, poll for completion */
1464 ixgbe_fdir_enable_82599(hw, fdirctrl);
1465
1466 return IXGBE_SUCCESS;
1467 }
1468
1469 /*
1470 * These defines allow us to quickly generate all of the necessary instructions
1471 * in the function below by simply calling out IXGBE_COMPUTE_SIG_HASH_ITERATION
1472 * for values 0 through 15
1473 */
1474 #define IXGBE_ATR_COMMON_HASH_KEY \
1475 (IXGBE_ATR_BUCKET_HASH_KEY & IXGBE_ATR_SIGNATURE_HASH_KEY)
1476 #define IXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \
1477 do { \
1478 u32 n = (_n); \
1479 if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) \
1480 common_hash ^= lo_hash_dword >> n; \
1481 else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
1482 bucket_hash ^= lo_hash_dword >> n; \
1483 else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << n)) \
1484 sig_hash ^= lo_hash_dword << (16 - n); \
1485 if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << (n + 16))) \
1486 common_hash ^= hi_hash_dword >> n; \
1487 else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
1488 bucket_hash ^= hi_hash_dword >> n; \
1489 else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \
1490 sig_hash ^= hi_hash_dword << (16 - n); \
1491 } while (0);
1492
1493 /**
1494 * ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash
1495 * @stream: input bitstream to compute the hash on
1496 *
1497 * This function is almost identical to the function above but contains
1498 * several optomizations such as unwinding all of the loops, letting the
1499 * compiler work out all of the conditional ifs since the keys are static
1500 * defines, and computing two keys at once since the hashed dword stream
1501 * will be the same for both keys.
1502 **/
1503 u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
1504 union ixgbe_atr_hash_dword common)
1505 {
1506 u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
1507 u32 sig_hash = 0, bucket_hash = 0, common_hash = 0;
1508
1509 /* record the flow_vm_vlan bits as they are a key part to the hash */
1510 flow_vm_vlan = IXGBE_NTOHL(input.dword);
1511
1512 /* generate common hash dword */
1513 hi_hash_dword = IXGBE_NTOHL(common.dword);
1514
1515 /* low dword is word swapped version of common */
1516 lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
1517
1518 /* apply flow ID/VM pool/VLAN ID bits to hash words */
1519 hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
1520
1521 /* Process bits 0 and 16 */
1522 IXGBE_COMPUTE_SIG_HASH_ITERATION(0);
1523
1524 /*
1525 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
1526 * delay this because bit 0 of the stream should not be processed
1527 * so we do not add the vlan until after bit 0 was processed
1528 */
1529 lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
1530
1531 /* Process remaining 30 bit of the key */
1532 IXGBE_COMPUTE_SIG_HASH_ITERATION(1);
1533 IXGBE_COMPUTE_SIG_HASH_ITERATION(2);
1534 IXGBE_COMPUTE_SIG_HASH_ITERATION(3);
1535 IXGBE_COMPUTE_SIG_HASH_ITERATION(4);
1536 IXGBE_COMPUTE_SIG_HASH_ITERATION(5);
1537 IXGBE_COMPUTE_SIG_HASH_ITERATION(6);
1538 IXGBE_COMPUTE_SIG_HASH_ITERATION(7);
1539 IXGBE_COMPUTE_SIG_HASH_ITERATION(8);
1540 IXGBE_COMPUTE_SIG_HASH_ITERATION(9);
1541 IXGBE_COMPUTE_SIG_HASH_ITERATION(10);
1542 IXGBE_COMPUTE_SIG_HASH_ITERATION(11);
1543 IXGBE_COMPUTE_SIG_HASH_ITERATION(12);
1544 IXGBE_COMPUTE_SIG_HASH_ITERATION(13);
1545 IXGBE_COMPUTE_SIG_HASH_ITERATION(14);
1546 IXGBE_COMPUTE_SIG_HASH_ITERATION(15);
1547
1548 /* combine common_hash result with signature and bucket hashes */
1549 bucket_hash ^= common_hash;
1550 bucket_hash &= IXGBE_ATR_HASH_MASK;
1551
1552 sig_hash ^= common_hash << 16;
1553 sig_hash &= IXGBE_ATR_HASH_MASK << 16;
1554
1555 /* return completed signature hash */
1556 return sig_hash ^ bucket_hash;
1557 }
1558
1559 /**
1560 * ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter
1561 * @hw: pointer to hardware structure
1562 * @input: unique input dword
1563 * @common: compressed common input dword
1564 * @queue: queue index to direct traffic to
1565 **/
1566 s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
1567 union ixgbe_atr_hash_dword input,
1568 union ixgbe_atr_hash_dword common,
1569 u8 queue)
1570 {
1571 u64 fdirhashcmd;
1572 u32 fdircmd;
1573
1574 DEBUGFUNC("ixgbe_fdir_add_signature_filter_82599");
1575
1576 /*
1577 * Get the flow_type in order to program FDIRCMD properly
1578 * lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6
1579 */
1580 switch (input.formatted.flow_type) {
1581 case IXGBE_ATR_FLOW_TYPE_TCPV4:
1582 case IXGBE_ATR_FLOW_TYPE_UDPV4:
1583 case IXGBE_ATR_FLOW_TYPE_SCTPV4:
1584 case IXGBE_ATR_FLOW_TYPE_TCPV6:
1585 case IXGBE_ATR_FLOW_TYPE_UDPV6:
1586 case IXGBE_ATR_FLOW_TYPE_SCTPV6:
1587 break;
1588 default:
1589 DEBUGOUT(" Error on flow type input\n");
1590 return IXGBE_ERR_CONFIG;
1591 }
1592
1593 /* configure FDIRCMD register */
1594 fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
1595 IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
1596 fdircmd |= input.formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
1597 fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
1598
1599 /*
1600 * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits
1601 * is for FDIRCMD. Then do a 64-bit register write from FDIRHASH.
1602 */
1603 fdirhashcmd = (u64)fdircmd << 32;
1604 fdirhashcmd |= ixgbe_atr_compute_sig_hash_82599(input, common);
1605 IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd);
1606
1607 DEBUGOUT2("Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd);
1608
1609 return IXGBE_SUCCESS;
1610 }
1611
1612 #define IXGBE_COMPUTE_BKT_HASH_ITERATION(_n) \
1613 do { \
1614 u32 n = (_n); \
1615 if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
1616 bucket_hash ^= lo_hash_dword >> n; \
1617 if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
1618 bucket_hash ^= hi_hash_dword >> n; \
1619 } while (0);
1620
1621 /**
1622 * ixgbe_atr_compute_perfect_hash_82599 - Compute the perfect filter hash
1623 * @atr_input: input bitstream to compute the hash on
1624 * @input_mask: mask for the input bitstream
1625 *
1626 * This function serves two main purposes. First it applys the input_mask
1627 * to the atr_input resulting in a cleaned up atr_input data stream.
1628 * Secondly it computes the hash and stores it in the bkt_hash field at
1629 * the end of the input byte stream. This way it will be available for
1630 * future use without needing to recompute the hash.
1631 **/
1632 void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
1633 union ixgbe_atr_input *input_mask)
1634 {
1635
1636 u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
1637 u32 bucket_hash = 0;
1638
1639 /* Apply masks to input data */
1640 input->dword_stream[0] &= input_mask->dword_stream[0];
1641 input->dword_stream[1] &= input_mask->dword_stream[1];
1642 input->dword_stream[2] &= input_mask->dword_stream[2];
1643 input->dword_stream[3] &= input_mask->dword_stream[3];
1644 input->dword_stream[4] &= input_mask->dword_stream[4];
1645 input->dword_stream[5] &= input_mask->dword_stream[5];
1646 input->dword_stream[6] &= input_mask->dword_stream[6];
1647 input->dword_stream[7] &= input_mask->dword_stream[7];
1648 input->dword_stream[8] &= input_mask->dword_stream[8];
1649 input->dword_stream[9] &= input_mask->dword_stream[9];
1650 input->dword_stream[10] &= input_mask->dword_stream[10];
1651
1652 /* record the flow_vm_vlan bits as they are a key part to the hash */
1653 flow_vm_vlan = IXGBE_NTOHL(input->dword_stream[0]);
1654
1655 /* generate common hash dword */
1656 hi_hash_dword = IXGBE_NTOHL(input->dword_stream[1] ^
1657 input->dword_stream[2] ^
1658 input->dword_stream[3] ^
1659 input->dword_stream[4] ^
1660 input->dword_stream[5] ^
1661 input->dword_stream[6] ^
1662 input->dword_stream[7] ^
1663 input->dword_stream[8] ^
1664 input->dword_stream[9] ^
1665 input->dword_stream[10]);
1666
1667 /* low dword is word swapped version of common */
1668 lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
1669
1670 /* apply flow ID/VM pool/VLAN ID bits to hash words */
1671 hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
1672
1673 /* Process bits 0 and 16 */
1674 IXGBE_COMPUTE_BKT_HASH_ITERATION(0);
1675
1676 /*
1677 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
1678 * delay this because bit 0 of the stream should not be processed
1679 * so we do not add the vlan until after bit 0 was processed
1680 */
1681 lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
1682
1683 /* Process remaining 30 bit of the key */
1684 IXGBE_COMPUTE_BKT_HASH_ITERATION(1);
1685 IXGBE_COMPUTE_BKT_HASH_ITERATION(2);
1686 IXGBE_COMPUTE_BKT_HASH_ITERATION(3);
1687 IXGBE_COMPUTE_BKT_HASH_ITERATION(4);
1688 IXGBE_COMPUTE_BKT_HASH_ITERATION(5);
1689 IXGBE_COMPUTE_BKT_HASH_ITERATION(6);
1690 IXGBE_COMPUTE_BKT_HASH_ITERATION(7);
1691 IXGBE_COMPUTE_BKT_HASH_ITERATION(8);
1692 IXGBE_COMPUTE_BKT_HASH_ITERATION(9);
1693 IXGBE_COMPUTE_BKT_HASH_ITERATION(10);
1694 IXGBE_COMPUTE_BKT_HASH_ITERATION(11);
1695 IXGBE_COMPUTE_BKT_HASH_ITERATION(12);
1696 IXGBE_COMPUTE_BKT_HASH_ITERATION(13);
1697 IXGBE_COMPUTE_BKT_HASH_ITERATION(14);
1698 IXGBE_COMPUTE_BKT_HASH_ITERATION(15);
1699
1700 /*
1701 * Limit hash to 13 bits since max bucket count is 8K.
1702 * Store result at the end of the input stream.
1703 */
1704 input->formatted.bkt_hash = bucket_hash & 0x1FFF;
1705 }
1706
1707 /**
1708 * ixgbe_get_fdirtcpm_82599 - generate a tcp port from atr_input_masks
1709 * @input_mask: mask to be bit swapped
1710 *
1711 * The source and destination port masks for flow director are bit swapped
1712 * in that bit 15 effects bit 0, 14 effects 1, 13, 2 etc. In order to
1713 * generate a correctly swapped value we need to bit swap the mask and that
1714 * is what is accomplished by this function.
1715 **/
1716 static u32 ixgbe_get_fdirtcpm_82599(union ixgbe_atr_input *input_mask)
1717 {
1718 u32 mask = IXGBE_NTOHS(input_mask->formatted.dst_port);
1719 mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT;
1720 mask |= IXGBE_NTOHS(input_mask->formatted.src_port);
1721 mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1);
1722 mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2);
1723 mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4);
1724 return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8);
1725 }
1726
1727 /*
1728 * These two macros are meant to address the fact that we have registers
1729 * that are either all or in part big-endian. As a result on big-endian
1730 * systems we will end up byte swapping the value to little-endian before
1731 * it is byte swapped again and written to the hardware in the original
1732 * big-endian format.
1733 */
1734 #define IXGBE_STORE_AS_BE32(_value) \
1735 (((u32)(_value) >> 24) | (((u32)(_value) & 0x00FF0000) >> 8) | \
1736 (((u32)(_value) & 0x0000FF00) << 8) | ((u32)(_value) << 24))
1737
1738 #define IXGBE_WRITE_REG_BE32(a, reg, value) \
1739 IXGBE_WRITE_REG((a), (reg), IXGBE_STORE_AS_BE32(IXGBE_NTOHL(value)))
1740
1741 #define IXGBE_STORE_AS_BE16(_value) \
1742 IXGBE_NTOHS(((u16)(_value) >> 8) | ((u16)(_value) << 8))
1743
1744 s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
1745 union ixgbe_atr_input *input_mask)
1746 {
1747 /* mask IPv6 since it is currently not supported */
1748 u32 fdirm = IXGBE_FDIRM_DIPv6;
1749 u32 fdirtcpm;
1750
1751 DEBUGFUNC("ixgbe_fdir_set_atr_input_mask_82599");
1752
1753 /*
1754 * Program the relevant mask registers. If src/dst_port or src/dst_addr
1755 * are zero, then assume a full mask for that field. Also assume that
1756 * a VLAN of 0 is unspecified, so mask that out as well. L4type
1757 * cannot be masked out in this implementation.
1758 *
1759 * This also assumes IPv4 only. IPv6 masking isn't supported at this
1760 * point in time.
1761 */
1762
1763 /* verify bucket hash is cleared on hash generation */
1764 if (input_mask->formatted.bkt_hash)
1765 DEBUGOUT(" bucket hash should always be 0 in mask\n");
1766
1767 /* Program FDIRM and verify partial masks */
1768 switch (input_mask->formatted.vm_pool & 0x7F) {
1769 case 0x0:
1770 fdirm |= IXGBE_FDIRM_POOL;
1771 case 0x7F:
1772 break;
1773 default:
1774 DEBUGOUT(" Error on vm pool mask\n");
1775 return IXGBE_ERR_CONFIG;
1776 }
1777
1778 switch (input_mask->formatted.flow_type & IXGBE_ATR_L4TYPE_MASK) {
1779 case 0x0:
1780 fdirm |= IXGBE_FDIRM_L4P;
1781 if (input_mask->formatted.dst_port ||
1782 input_mask->formatted.src_port) {
1783 DEBUGOUT(" Error on src/dst port mask\n");
1784 return IXGBE_ERR_CONFIG;
1785 }
1786 case IXGBE_ATR_L4TYPE_MASK:
1787 break;
1788 default:
1789 DEBUGOUT(" Error on flow type mask\n");
1790 return IXGBE_ERR_CONFIG;
1791 }
1792
1793 switch (IXGBE_NTOHS(input_mask->formatted.vlan_id) & 0xEFFF) {
1794 case 0x0000:
1795 /* mask VLAN ID, fall through to mask VLAN priority */
1796 fdirm |= IXGBE_FDIRM_VLANID;
1797 case 0x0FFF:
1798 /* mask VLAN priority */
1799 fdirm |= IXGBE_FDIRM_VLANP;
1800 break;
1801 case 0xE000:
1802 /* mask VLAN ID only, fall through */
1803 fdirm |= IXGBE_FDIRM_VLANID;
1804 case 0xEFFF:
1805 /* no VLAN fields masked */
1806 break;
1807 default:
1808 DEBUGOUT(" Error on VLAN mask\n");
1809 return IXGBE_ERR_CONFIG;
1810 }
1811
1812 switch (input_mask->formatted.flex_bytes & 0xFFFF) {
1813 case 0x0000:
1814 /* Mask Flex Bytes, fall through */
1815 fdirm |= IXGBE_FDIRM_FLEX;
1816 case 0xFFFF:
1817 break;
1818 default:
1819 DEBUGOUT(" Error on flexible byte mask\n");
1820 return IXGBE_ERR_CONFIG;
1821 }
1822
1823 /* Now mask VM pool and destination IPv6 - bits 5 and 2 */
1824 IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
1825
1826 /* store the TCP/UDP port masks, bit reversed from port layout */
1827 fdirtcpm = ixgbe_get_fdirtcpm_82599(input_mask);
1828
1829 /* write both the same so that UDP and TCP use the same mask */
1830 IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm);
1831 IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm);
1832
1833 /* store source and destination IP masks (big-enian) */
1834 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M,
1835 ~input_mask->formatted.src_ip[0]);
1836 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M,
1837 ~input_mask->formatted.dst_ip[0]);
1838
1839 return IXGBE_SUCCESS;
1840 }
1841
1842 s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
1843 union ixgbe_atr_input *input,
1844 u16 soft_id, u8 queue)
1845 {
1846 u32 fdirport, fdirvlan, fdirhash, fdircmd;
1847
1848 DEBUGFUNC("ixgbe_fdir_write_perfect_filter_82599");
1849
1850 /* currently IPv6 is not supported, must be programmed with 0 */
1851 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0),
1852 input->formatted.src_ip[0]);
1853 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1),
1854 input->formatted.src_ip[1]);
1855 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2),
1856 input->formatted.src_ip[2]);
1857
1858 /* record the source address (big-endian) */
1859 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA, input->formatted.src_ip[0]);
1860
1861 /* record the first 32 bits of the destination address (big-endian) */
1862 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA, input->formatted.dst_ip[0]);
1863
1864 /* record source and destination port (little-endian)*/
1865 fdirport = IXGBE_NTOHS(input->formatted.dst_port);
1866 fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT;
1867 fdirport |= IXGBE_NTOHS(input->formatted.src_port);
1868 IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport);
1869
1870 /* record vlan (little-endian) and flex_bytes(big-endian) */
1871 fdirvlan = IXGBE_STORE_AS_BE16(input->formatted.flex_bytes);
1872 fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT;
1873 fdirvlan |= IXGBE_NTOHS(input->formatted.vlan_id);
1874 IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan);
1875
1876 /* configure FDIRHASH register */
1877 fdirhash = input->formatted.bkt_hash;
1878 fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
1879 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1880
1881 /*
1882 * flush all previous writes to make certain registers are
1883 * programmed prior to issuing the command
1884 */
1885 IXGBE_WRITE_FLUSH(hw);
1886
1887 /* configure FDIRCMD register */
1888 fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
1889 IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
1890 if (queue == IXGBE_FDIR_DROP_QUEUE)
1891 fdircmd |= IXGBE_FDIRCMD_DROP;
1892 fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
1893 fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
1894 fdircmd |= (u32)input->formatted.vm_pool << IXGBE_FDIRCMD_VT_POOL_SHIFT;
1895
1896 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
1897
1898 return IXGBE_SUCCESS;
1899 }
1900
1901 s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
1902 union ixgbe_atr_input *input,
1903 u16 soft_id)
1904 {
1905 u32 fdirhash;
1906 u32 fdircmd = 0;
1907 u32 retry_count;
1908 s32 err = IXGBE_SUCCESS;
1909
1910 /* configure FDIRHASH register */
1911 fdirhash = input->formatted.bkt_hash;
1912 fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
1913 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1914
1915 /* flush hash to HW */
1916 IXGBE_WRITE_FLUSH(hw);
1917
1918 /* Query if filter is present */
1919 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, IXGBE_FDIRCMD_CMD_QUERY_REM_FILT);
1920
1921 for (retry_count = 10; retry_count; retry_count--) {
1922 /* allow 10us for query to process */
1923 usec_delay(10);
1924 /* verify query completed successfully */
1925 fdircmd = IXGBE_READ_REG(hw, IXGBE_FDIRCMD);
1926 if (!(fdircmd & IXGBE_FDIRCMD_CMD_MASK))
1927 break;
1928 }
1929
1930 if (!retry_count)
1931 err = IXGBE_ERR_FDIR_REINIT_FAILED;
1932
1933 /* if filter exists in hardware then remove it */
1934 if (fdircmd & IXGBE_FDIRCMD_FILTER_VALID) {
1935 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1936 IXGBE_WRITE_FLUSH(hw);
1937 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1938 IXGBE_FDIRCMD_CMD_REMOVE_FLOW);
1939 }
1940
1941 return err;
1942 }
1943
1944 /**
1945 * ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter
1946 * @hw: pointer to hardware structure
1947 * @input: input bitstream
1948 * @input_mask: mask for the input bitstream
1949 * @soft_id: software index for the filters
1950 * @queue: queue index to direct traffic to
1951 *
1952 * Note that the caller to this function must lock before calling, since the
1953 * hardware writes must be protected from one another.
1954 **/
1955 s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
1956 union ixgbe_atr_input *input,
1957 union ixgbe_atr_input *input_mask,
1958 u16 soft_id, u8 queue)
1959 {
1960 s32 err = IXGBE_ERR_CONFIG;
1961
1962 DEBUGFUNC("ixgbe_fdir_add_perfect_filter_82599");
1963
1964 /*
1965 * Check flow_type formatting, and bail out before we touch the hardware
1966 * if there's a configuration issue
1967 */
1968 switch (input->formatted.flow_type) {
1969 case IXGBE_ATR_FLOW_TYPE_IPV4:
1970 input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK;
1971 if (input->formatted.dst_port || input->formatted.src_port) {
1972 DEBUGOUT(" Error on src/dst port\n");
1973 return IXGBE_ERR_CONFIG;
1974 }
1975 break;
1976 case IXGBE_ATR_FLOW_TYPE_SCTPV4:
1977 if (input->formatted.dst_port || input->formatted.src_port) {
1978 DEBUGOUT(" Error on src/dst port\n");
1979 return IXGBE_ERR_CONFIG;
1980 }
1981 case IXGBE_ATR_FLOW_TYPE_TCPV4:
1982 case IXGBE_ATR_FLOW_TYPE_UDPV4:
1983 input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
1984 IXGBE_ATR_L4TYPE_MASK;
1985 break;
1986 default:
1987 DEBUGOUT(" Error on flow type input\n");
1988 return err;
1989 }
1990
1991 /* program input mask into the HW */
1992 err = ixgbe_fdir_set_input_mask_82599(hw, input_mask);
1993 if (err)
1994 return err;
1995
1996 /* apply mask and compute/store hash */
1997 ixgbe_atr_compute_perfect_hash_82599(input, input_mask);
1998
1999 /* program filters to filter memory */
2000 return ixgbe_fdir_write_perfect_filter_82599(hw, input,
2001 soft_id, queue);
2002 }
2003
2004 /**
2005 * ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register
2006 * @hw: pointer to hardware structure
2007 * @reg: analog register to read
2008 * @val: read value
2009 *
2010 * Performs read operation to Omer analog register specified.
2011 **/
2012 s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val)
2013 {
2014 u32 core_ctl;
2015
2016 DEBUGFUNC("ixgbe_read_analog_reg8_82599");
2017
2018 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, IXGBE_CORECTL_WRITE_CMD |
2019 (reg << 8));
2020 IXGBE_WRITE_FLUSH(hw);
2021 usec_delay(10);
2022 core_ctl = IXGBE_READ_REG(hw, IXGBE_CORECTL);
2023 *val = (u8)core_ctl;
2024
2025 return IXGBE_SUCCESS;
2026 }
2027
2028 /**
2029 * ixgbe_write_analog_reg8_82599 - Writes 8 bit Omer analog register
2030 * @hw: pointer to hardware structure
2031 * @reg: atlas register to write
2032 * @val: value to write
2033 *
2034 * Performs write operation to Omer analog register specified.
2035 **/
2036 s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val)
2037 {
2038 u32 core_ctl;
2039
2040 DEBUGFUNC("ixgbe_write_analog_reg8_82599");
2041
2042 core_ctl = (reg << 8) | val;
2043 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, core_ctl);
2044 IXGBE_WRITE_FLUSH(hw);
2045 usec_delay(10);
2046
2047 return IXGBE_SUCCESS;
2048 }
2049
2050 /**
2051 * ixgbe_start_hw_82599 - Prepare hardware for Tx/Rx
2052 * @hw: pointer to hardware structure
2053 *
2054 * Starts the hardware using the generic start_hw function
2055 * and the generation start_hw function.
2056 * Then performs revision-specific operations, if any.
2057 **/
2058 s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw)
2059 {
2060 s32 ret_val = IXGBE_SUCCESS;
2061
2062 DEBUGFUNC("ixgbe_start_hw_82599");
2063
2064 ret_val = ixgbe_start_hw_generic(hw);
2065 if (ret_val != IXGBE_SUCCESS)
2066 goto out;
2067
2068 ret_val = ixgbe_start_hw_gen2(hw);
2069 if (ret_val != IXGBE_SUCCESS)
2070 goto out;
2071
2072 /* We need to run link autotry after the driver loads */
2073 hw->mac.autotry_restart = TRUE;
2074
2075 if (ret_val == IXGBE_SUCCESS)
2076 ret_val = ixgbe_verify_fw_version_82599(hw);
2077 out:
2078 return ret_val;
2079 }
2080
2081 /**
2082 * ixgbe_identify_phy_82599 - Get physical layer module
2083 * @hw: pointer to hardware structure
2084 *
2085 * Determines the physical layer module found on the current adapter.
2086 * If PHY already detected, maintains current PHY type in hw struct,
2087 * otherwise executes the PHY detection routine.
2088 **/
2089 s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
2090 {
2091 s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
2092
2093 DEBUGFUNC("ixgbe_identify_phy_82599");
2094
2095 /* Detect PHY if not unknown - returns success if already detected. */
2096 status = ixgbe_identify_phy_generic(hw);
2097 if (status != IXGBE_SUCCESS) {
2098 /* 82599 10GBASE-T requires an external PHY */
2099 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)
2100 goto out;
2101 else
2102 status = ixgbe_identify_module_generic(hw);
2103 }
2104
2105 /* Set PHY type none if no PHY detected */
2106 if (hw->phy.type == ixgbe_phy_unknown) {
2107 hw->phy.type = ixgbe_phy_none;
2108 status = IXGBE_SUCCESS;
2109 }
2110
2111 /* Return error if SFP module has been detected but is not supported */
2112 if (hw->phy.type == ixgbe_phy_sfp_unsupported)
2113 status = IXGBE_ERR_SFP_NOT_SUPPORTED;
2114
2115 out:
2116 return status;
2117 }
2118
2119 /**
2120 * ixgbe_get_supported_physical_layer_82599 - Returns physical layer type
2121 * @hw: pointer to hardware structure
2122 *
2123 * Determines physical layer capabilities of the current configuration.
2124 **/
2125 u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw)
2126 {
2127 u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
2128 u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2129 u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
2130 u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
2131 u32 pma_pmd_10g_parallel = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK;
2132 u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
2133 u16 ext_ability = 0;
2134 u8 comp_codes_10g = 0;
2135 u8 comp_codes_1g = 0;
2136
2137 DEBUGFUNC("ixgbe_get_support_physical_layer_82599");
2138
2139 hw->phy.ops.identify(hw);
2140
2141 switch (hw->phy.type) {
2142 case ixgbe_phy_tn:
2143 case ixgbe_phy_cu_unknown:
2144 hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
2145 IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
2146 if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
2147 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
2148 if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
2149 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
2150 if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
2151 physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
2152 goto out;
2153 default:
2154 break;
2155 }
2156
2157 switch (autoc & IXGBE_AUTOC_LMS_MASK) {
2158 case IXGBE_AUTOC_LMS_1G_AN:
2159 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
2160 if (pma_pmd_1g == IXGBE_AUTOC_1G_KX_BX) {
2161 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX |
2162 IXGBE_PHYSICAL_LAYER_1000BASE_BX;
2163 goto out;
2164 } else
2165 /* SFI mode so read SFP module */
2166 goto sfp_check;
2167 break;
2168 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
2169 if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_CX4)
2170 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
2171 else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_KX4)
2172 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
2173 else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_XAUI)
2174 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_XAUI;
2175 goto out;
2176 break;
2177 case IXGBE_AUTOC_LMS_10G_SERIAL:
2178 if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_KR) {
2179 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR;
2180 goto out;
2181 } else if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)
2182 goto sfp_check;
2183 break;
2184 case IXGBE_AUTOC_LMS_KX4_KX_KR:
2185 case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN:
2186 if (autoc & IXGBE_AUTOC_KX_SUPP)
2187 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
2188 if (autoc & IXGBE_AUTOC_KX4_SUPP)
2189 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
2190 if (autoc & IXGBE_AUTOC_KR_SUPP)
2191 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KR;
2192 goto out;
2193 break;
2194 default:
2195 goto out;
2196 break;
2197 }
2198
2199 sfp_check:
2200 /* SFP check must be done last since DA modules are sometimes used to
2201 * test KR mode - we need to id KR mode correctly before SFP module.
2202 * Call identify_sfp because the pluggable module may have changed */
2203 hw->phy.ops.identify_sfp(hw);
2204 if (hw->phy.sfp_type == ixgbe_sfp_type_not_present)
2205 goto out;
2206
2207 switch (hw->phy.type) {
2208 case ixgbe_phy_sfp_passive_tyco:
2209 case ixgbe_phy_sfp_passive_unknown:
2210 physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
2211 break;
2212 case ixgbe_phy_sfp_ftl_active:
2213 case ixgbe_phy_sfp_active_unknown:
2214 physical_layer = IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA;
2215 break;
2216 case ixgbe_phy_sfp_avago:
2217 case ixgbe_phy_sfp_ftl:
2218 case ixgbe_phy_sfp_intel:
2219 case ixgbe_phy_sfp_unknown:
2220 hw->phy.ops.read_i2c_eeprom(hw,
2221 IXGBE_SFF_1GBE_COMP_CODES, &comp_codes_1g);
2222 hw->phy.ops.read_i2c_eeprom(hw,
2223 IXGBE_SFF_10GBE_COMP_CODES, &comp_codes_10g);
2224 if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
2225 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
2226 else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)
2227 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
2228 else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE)
2229 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_T;
2230 else if (comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE)
2231 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_SX;
2232 break;
2233 default:
2234 break;
2235 }
2236
2237 out:
2238 return physical_layer;
2239 }
2240
2241 /**
2242 * ixgbe_enable_rx_dma_82599 - Enable the Rx DMA unit on 82599
2243 * @hw: pointer to hardware structure
2244 * @regval: register value to write to RXCTRL
2245 *
2246 * Enables the Rx DMA unit for 82599
2247 **/
2248 s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval)
2249 {
2250
2251 DEBUGFUNC("ixgbe_enable_rx_dma_82599");
2252
2253 /*
2254 * Workaround for 82599 silicon errata when enabling the Rx datapath.
2255 * If traffic is incoming before we enable the Rx unit, it could hang
2256 * the Rx DMA unit. Therefore, make sure the security engine is
2257 * completely disabled prior to enabling the Rx unit.
2258 */
2259
2260 hw->mac.ops.disable_sec_rx_path(hw);
2261
2262 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval);
2263
2264 hw->mac.ops.enable_sec_rx_path(hw);
2265
2266 return IXGBE_SUCCESS;
2267 }
2268
2269 /**
2270 * ixgbe_verify_fw_version_82599 - verify fw version for 82599
2271 * @hw: pointer to hardware structure
2272 *
2273 * Verifies that installed the firmware version is 0.6 or higher
2274 * for SFI devices. All 82599 SFI devices should have version 0.6 or higher.
2275 *
2276 * Returns IXGBE_ERR_EEPROM_VERSION if the FW is not present or
2277 * if the FW version is not supported.
2278 **/
2279 s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
2280 {
2281 s32 status = IXGBE_ERR_EEPROM_VERSION;
2282 u16 fw_offset, fw_ptp_cfg_offset;
2283 u16 fw_version = 0;
2284
2285 DEBUGFUNC("ixgbe_verify_fw_version_82599");
2286
2287 /* firmware check is only necessary for SFI devices */
2288 if (hw->phy.media_type != ixgbe_media_type_fiber) {
2289 status = IXGBE_SUCCESS;
2290 goto fw_version_out;
2291 }
2292
2293 /* get the offset to the Firmware Module block */
2294 hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset);
2295
2296 if ((fw_offset == 0) || (fw_offset == 0xFFFF))
2297 goto fw_version_out;
2298
2299 /* get the offset to the Pass Through Patch Configuration block */
2300 hw->eeprom.ops.read(hw, (fw_offset +
2301 IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR),
2302 &fw_ptp_cfg_offset);
2303
2304 if ((fw_ptp_cfg_offset == 0) || (fw_ptp_cfg_offset == 0xFFFF))
2305 goto fw_version_out;
2306
2307 /* get the firmware version */
2308 hw->eeprom.ops.read(hw, (fw_ptp_cfg_offset +
2309 IXGBE_FW_PATCH_VERSION_4), &fw_version);
2310
2311 if (fw_version > 0x5)
2312 status = IXGBE_SUCCESS;
2313
2314 fw_version_out:
2315 return status;
2316 }
2317
2318 /**
2319 * ixgbe_verify_lesm_fw_enabled_82599 - Checks LESM FW module state.
2320 * @hw: pointer to hardware structure
2321 *
2322 * Returns TRUE if the LESM FW module is present and enabled. Otherwise
2323 * returns FALSE. Smart Speed must be disabled if LESM FW module is enabled.
2324 **/
2325 bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw)
2326 {
2327 bool lesm_enabled = FALSE;
2328 u16 fw_offset, fw_lesm_param_offset, fw_lesm_state;
2329 s32 status;
2330
2331 DEBUGFUNC("ixgbe_verify_lesm_fw_enabled_82599");
2332
2333 /* get the offset to the Firmware Module block */
2334 status = hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset);
2335
2336 if ((status != IXGBE_SUCCESS) ||
2337 (fw_offset == 0) || (fw_offset == 0xFFFF))
2338 goto out;
2339
2340 /* get the offset to the LESM Parameters block */
2341 status = hw->eeprom.ops.read(hw, (fw_offset +
2342 IXGBE_FW_LESM_PARAMETERS_PTR),
2343 &fw_lesm_param_offset);
2344
2345 if ((status != IXGBE_SUCCESS) ||
2346 (fw_lesm_param_offset == 0) || (fw_lesm_param_offset == 0xFFFF))
2347 goto out;
2348
2349 /* get the lesm state word */
2350 status = hw->eeprom.ops.read(hw, (fw_lesm_param_offset +
2351 IXGBE_FW_LESM_STATE_1),
2352 &fw_lesm_state);
2353
2354 if ((status == IXGBE_SUCCESS) &&
2355 (fw_lesm_state & IXGBE_FW_LESM_STATE_ENABLED))
2356 lesm_enabled = TRUE;
2357
2358 out:
2359 return lesm_enabled;
2360 }
2361
2362 /**
2363 * ixgbe_read_eeprom_buffer_82599 - Read EEPROM word(s) using
2364 * fastest available method
2365 *
2366 * @hw: pointer to hardware structure
2367 * @offset: offset of word in EEPROM to read
2368 * @words: number of words
2369 * @data: word(s) read from the EEPROM
2370 *
2371 * Retrieves 16 bit word(s) read from EEPROM
2372 **/
2373 static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
2374 u16 words, u16 *data)
2375 {
2376 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
2377 s32 ret_val = IXGBE_ERR_CONFIG;
2378
2379 DEBUGFUNC("ixgbe_read_eeprom_buffer_82599");
2380
2381 /*
2382 * If EEPROM is detected and can be addressed using 14 bits,
2383 * use EERD otherwise use bit bang
2384 */
2385 if ((eeprom->type == ixgbe_eeprom_spi) &&
2386 (offset + (words - 1) <= IXGBE_EERD_MAX_ADDR))
2387 ret_val = ixgbe_read_eerd_buffer_generic(hw, offset, words,
2388 data);
2389 else
2390 ret_val = ixgbe_read_eeprom_buffer_bit_bang_generic(hw, offset,
2391 words,
2392 data);
2393
2394 return ret_val;
2395 }
2396
2397 /**
2398 * ixgbe_read_eeprom_82599 - Read EEPROM word using
2399 * fastest available method
2400 *
2401 * @hw: pointer to hardware structure
2402 * @offset: offset of word in the EEPROM to read
2403 * @data: word read from the EEPROM
2404 *
2405 * Reads a 16 bit word from the EEPROM
2406 **/
2407 static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
2408 u16 offset, u16 *data)
2409 {
2410 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
2411 s32 ret_val = IXGBE_ERR_CONFIG;
2412
2413 DEBUGFUNC("ixgbe_read_eeprom_82599");
2414
2415 /*
2416 * If EEPROM is detected and can be addressed using 14 bits,
2417 * use EERD otherwise use bit bang
2418 */
2419 if ((eeprom->type == ixgbe_eeprom_spi) &&
2420 (offset <= IXGBE_EERD_MAX_ADDR))
2421 ret_val = ixgbe_read_eerd_generic(hw, offset, data);
2422 else
2423 ret_val = ixgbe_read_eeprom_bit_bang_generic(hw, offset, data);
2424
2425 return ret_val;
2426 }
2427
2428 /**
2429 * ixgbe_reset_pipeline_82599 - perform pipeline reset
2430 *
2431 * @hw: pointer to hardware structure
2432 *
2433 * Reset pipeline by asserting Restart_AN together with LMS change to ensure
2434 * full pipeline reset
2435 **/
2436 s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw)
2437 {
2438 s32 ret_val;
2439 u32 anlp1_reg = 0;
2440 u32 i, autoc_reg, autoc2_reg;
2441
2442 /* Enable link if disabled in NVM */
2443 autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
2444 if (autoc2_reg & IXGBE_AUTOC2_LINK_DISABLE_MASK) {
2445 autoc2_reg &= ~IXGBE_AUTOC2_LINK_DISABLE_MASK;
2446 IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2_reg);
2447 IXGBE_WRITE_FLUSH(hw);
2448 }
2449
2450 autoc_reg = hw->mac.cached_autoc;
2451 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
2452 /* Write AUTOC register with toggled LMS[2] bit and Restart_AN */
2453 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg ^ IXGBE_AUTOC_LMS_1G_AN);
2454 /* Wait for AN to leave state 0 */
2455 for (i = 0; i < 10; i++) {
2456 msec_delay(4);
2457 anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
2458 if (anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)
2459 break;
2460 }
2461
2462 if (!(anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)) {
2463 DEBUGOUT("auto negotiation not completed\n");
2464 ret_val = IXGBE_ERR_RESET_FAILED;
2465 goto reset_pipeline_out;
2466 }
2467
2468 ret_val = IXGBE_SUCCESS;
2469
2470 reset_pipeline_out:
2471 /* Write AUTOC register with original LMS field and Restart_AN */
2472 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
2473 IXGBE_WRITE_FLUSH(hw);
2474
2475 return ret_val;
2476 }
2477
2478
2479
2480