ixgbe_x550.c revision 1.5.6.10 1 /* $NetBSD: ixgbe_x550.c,v 1.5.6.10 2021/09/15 16:38:01 martin Exp $ */
2
3 /******************************************************************************
4
5 Copyright (c) 2001-2017, Intel Corporation
6 All rights reserved.
7
8 Redistribution and use in source and binary forms, with or without
9 modification, are permitted provided that the following conditions are met:
10
11 1. Redistributions of source code must retain the above copyright notice,
12 this list of conditions and the following disclaimer.
13
14 2. Redistributions in binary form must reproduce the above copyright
15 notice, this list of conditions and the following disclaimer in the
16 documentation and/or other materials provided with the distribution.
17
18 3. Neither the name of the Intel Corporation nor the names of its
19 contributors may be used to endorse or promote products derived from
20 this software without specific prior written permission.
21
22 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 POSSIBILITY OF SUCH DAMAGE.
33
34 ******************************************************************************/
35 /*$FreeBSD: head/sys/dev/ixgbe/ixgbe_x550.c 331224 2018-03-19 20:55:05Z erj $*/
36
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: ixgbe_x550.c,v 1.5.6.10 2021/09/15 16:38:01 martin Exp $");
39
40 #include "ixgbe_x550.h"
41 #include "ixgbe_x540.h"
42 #include "ixgbe_type.h"
43 #include "ixgbe_api.h"
44 #include "ixgbe_common.h"
45 #include "ixgbe_phy.h"
46 #include <dev/mii/mii.h>
47
48 static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed);
49 static s32 ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw,
50 ixgbe_link_speed speed,
51 bool autoneg_wait_to_complete);
52 static s32 ixgbe_acquire_swfw_sync_X550a(struct ixgbe_hw *, u32 mask);
53 static void ixgbe_release_swfw_sync_X550a(struct ixgbe_hw *, u32 mask);
54 static s32 ixgbe_read_mng_if_sel_x550em(struct ixgbe_hw *hw);
55
56 /**
57 * ixgbe_init_ops_X550 - Inits func ptrs and MAC type
58 * @hw: pointer to hardware structure
59 *
60 * Initialize the function pointers and assign the MAC type for X550.
61 * Does not touch the hardware.
62 **/
63 s32 ixgbe_init_ops_X550(struct ixgbe_hw *hw)
64 {
65 struct ixgbe_mac_info *mac = &hw->mac;
66 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
67 s32 ret_val;
68
69 DEBUGFUNC("ixgbe_init_ops_X550");
70
71 ret_val = ixgbe_init_ops_X540(hw);
72 mac->ops.dmac_config = ixgbe_dmac_config_X550;
73 mac->ops.dmac_config_tcs = ixgbe_dmac_config_tcs_X550;
74 mac->ops.dmac_update_tcs = ixgbe_dmac_update_tcs_X550;
75 mac->ops.setup_eee = NULL;
76 mac->ops.set_source_address_pruning =
77 ixgbe_set_source_address_pruning_X550;
78 mac->ops.set_ethertype_anti_spoofing =
79 ixgbe_set_ethertype_anti_spoofing_X550;
80
81 mac->ops.get_rtrup2tc = ixgbe_dcb_get_rtrup2tc_generic;
82 eeprom->ops.init_params = ixgbe_init_eeprom_params_X550;
83 eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_X550;
84 eeprom->ops.read = ixgbe_read_ee_hostif_X550;
85 eeprom->ops.read_buffer = ixgbe_read_ee_hostif_buffer_X550;
86 eeprom->ops.write = ixgbe_write_ee_hostif_X550;
87 eeprom->ops.write_buffer = ixgbe_write_ee_hostif_buffer_X550;
88 eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_X550;
89 eeprom->ops.validate_checksum = ixgbe_validate_eeprom_checksum_X550;
90
91 mac->ops.disable_mdd = ixgbe_disable_mdd_X550;
92 mac->ops.enable_mdd = ixgbe_enable_mdd_X550;
93 mac->ops.mdd_event = ixgbe_mdd_event_X550;
94 mac->ops.restore_mdd_vf = ixgbe_restore_mdd_vf_X550;
95 mac->ops.fw_recovery_mode = ixgbe_fw_recovery_mode_X550;
96 mac->ops.disable_rx = ixgbe_disable_rx_x550;
97 /* Manageability interface */
98 mac->ops.set_fw_drv_ver = ixgbe_set_fw_drv_ver_x550;
99 switch (hw->device_id) {
100 case IXGBE_DEV_ID_X550EM_X_1G_T:
101 hw->mac.ops.led_on = NULL;
102 hw->mac.ops.led_off = NULL;
103 break;
104 case IXGBE_DEV_ID_X550EM_X_10G_T:
105 case IXGBE_DEV_ID_X550EM_A_10G_T:
106 hw->mac.ops.led_on = ixgbe_led_on_t_X550em;
107 hw->mac.ops.led_off = ixgbe_led_off_t_X550em;
108 break;
109 default:
110 break;
111 }
112 return ret_val;
113 }
114
115 /**
116 * ixgbe_read_cs4227 - Read CS4227 register
117 * @hw: pointer to hardware structure
118 * @reg: register number to write
119 * @value: pointer to receive value read
120 *
121 * Returns status code
122 **/
123 static s32 ixgbe_read_cs4227(struct ixgbe_hw *hw, u16 reg, u16 *value)
124 {
125 return hw->link.ops.read_link_unlocked(hw, hw->link.addr, reg, value);
126 }
127
128 /**
129 * ixgbe_write_cs4227 - Write CS4227 register
130 * @hw: pointer to hardware structure
131 * @reg: register number to write
132 * @value: value to write to register
133 *
134 * Returns status code
135 **/
136 static s32 ixgbe_write_cs4227(struct ixgbe_hw *hw, u16 reg, u16 value)
137 {
138 return hw->link.ops.write_link_unlocked(hw, hw->link.addr, reg, value);
139 }
140
141 /**
142 * ixgbe_read_pe - Read register from port expander
143 * @hw: pointer to hardware structure
144 * @reg: register number to read
145 * @value: pointer to receive read value
146 *
147 * Returns status code
148 **/
149 static s32 ixgbe_read_pe(struct ixgbe_hw *hw, u8 reg, u8 *value)
150 {
151 s32 status;
152
153 status = ixgbe_read_i2c_byte_unlocked(hw, reg, IXGBE_PE, value);
154 if (status != IXGBE_SUCCESS)
155 ERROR_REPORT2(IXGBE_ERROR_CAUTION,
156 "port expander access failed with %d\n", status);
157 return status;
158 }
159
160 /**
161 * ixgbe_write_pe - Write register to port expander
162 * @hw: pointer to hardware structure
163 * @reg: register number to write
164 * @value: value to write
165 *
166 * Returns status code
167 **/
168 static s32 ixgbe_write_pe(struct ixgbe_hw *hw, u8 reg, u8 value)
169 {
170 s32 status;
171
172 status = ixgbe_write_i2c_byte_unlocked(hw, reg, IXGBE_PE, value);
173 if (status != IXGBE_SUCCESS)
174 ERROR_REPORT2(IXGBE_ERROR_CAUTION,
175 "port expander access failed with %d\n", status);
176 return status;
177 }
178
179 /**
180 * ixgbe_reset_cs4227 - Reset CS4227 using port expander
181 * @hw: pointer to hardware structure
182 *
183 * This function assumes that the caller has acquired the proper semaphore.
184 * Returns error code
185 **/
186 static s32 ixgbe_reset_cs4227(struct ixgbe_hw *hw)
187 {
188 s32 status;
189 u32 retry;
190 u16 value;
191 u8 reg;
192
193 /* Trigger hard reset. */
194 status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, ®);
195 if (status != IXGBE_SUCCESS)
196 return status;
197 reg |= IXGBE_PE_BIT1;
198 status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg);
199 if (status != IXGBE_SUCCESS)
200 return status;
201
202 status = ixgbe_read_pe(hw, IXGBE_PE_CONFIG, ®);
203 if (status != IXGBE_SUCCESS)
204 return status;
205 reg &= ~IXGBE_PE_BIT1;
206 status = ixgbe_write_pe(hw, IXGBE_PE_CONFIG, reg);
207 if (status != IXGBE_SUCCESS)
208 return status;
209
210 status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, ®);
211 if (status != IXGBE_SUCCESS)
212 return status;
213 reg &= ~IXGBE_PE_BIT1;
214 status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg);
215 if (status != IXGBE_SUCCESS)
216 return status;
217
218 usec_delay(IXGBE_CS4227_RESET_HOLD);
219
220 status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, ®);
221 if (status != IXGBE_SUCCESS)
222 return status;
223 reg |= IXGBE_PE_BIT1;
224 status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg);
225 if (status != IXGBE_SUCCESS)
226 return status;
227
228 /* Wait for the reset to complete. */
229 msec_delay(IXGBE_CS4227_RESET_DELAY);
230 for (retry = 0; retry < IXGBE_CS4227_RETRIES; retry++) {
231 status = ixgbe_read_cs4227(hw, IXGBE_CS4227_EFUSE_STATUS,
232 &value);
233 if (status == IXGBE_SUCCESS &&
234 value == IXGBE_CS4227_EEPROM_LOAD_OK)
235 break;
236 msec_delay(IXGBE_CS4227_CHECK_DELAY);
237 }
238 if (retry == IXGBE_CS4227_RETRIES) {
239 ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE,
240 "CS4227 reset did not complete.");
241 return IXGBE_ERR_PHY;
242 }
243
244 status = ixgbe_read_cs4227(hw, IXGBE_CS4227_EEPROM_STATUS, &value);
245 if (status != IXGBE_SUCCESS ||
246 !(value & IXGBE_CS4227_EEPROM_LOAD_OK)) {
247 ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE,
248 "CS4227 EEPROM did not load successfully.");
249 return IXGBE_ERR_PHY;
250 }
251
252 return IXGBE_SUCCESS;
253 }
254
255 /**
256 * ixgbe_check_cs4227 - Check CS4227 and reset as needed
257 * @hw: pointer to hardware structure
258 **/
259 static void ixgbe_check_cs4227(struct ixgbe_hw *hw)
260 {
261 s32 status = IXGBE_SUCCESS;
262 u32 swfw_mask = hw->phy.phy_semaphore_mask;
263 u16 value = 0;
264 u8 retry;
265
266 for (retry = 0; retry < IXGBE_CS4227_RETRIES; retry++) {
267 status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
268 if (status != IXGBE_SUCCESS) {
269 ERROR_REPORT2(IXGBE_ERROR_CAUTION,
270 "semaphore failed with %d", status);
271 msec_delay(IXGBE_CS4227_CHECK_DELAY);
272 continue;
273 }
274
275 /* Get status of reset flow. */
276 status = ixgbe_read_cs4227(hw, IXGBE_CS4227_SCRATCH, &value);
277
278 if (status == IXGBE_SUCCESS &&
279 value == IXGBE_CS4227_RESET_COMPLETE)
280 goto out;
281
282 if (status != IXGBE_SUCCESS ||
283 value != IXGBE_CS4227_RESET_PENDING)
284 break;
285
286 /* Reset is pending. Wait and check again. */
287 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
288 msec_delay(IXGBE_CS4227_CHECK_DELAY);
289 }
290
291 /* If still pending, assume other instance failed. */
292 if (retry == IXGBE_CS4227_RETRIES) {
293 status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
294 if (status != IXGBE_SUCCESS) {
295 ERROR_REPORT2(IXGBE_ERROR_CAUTION,
296 "semaphore failed with %d", status);
297 return;
298 }
299 }
300
301 /* Reset the CS4227. */
302 status = ixgbe_reset_cs4227(hw);
303 if (status != IXGBE_SUCCESS) {
304 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
305 "CS4227 reset failed: %d", status);
306 goto out;
307 }
308
309 /* Reset takes so long, temporarily release semaphore in case the
310 * other driver instance is waiting for the reset indication.
311 */
312 ixgbe_write_cs4227(hw, IXGBE_CS4227_SCRATCH,
313 IXGBE_CS4227_RESET_PENDING);
314 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
315 msec_delay(10);
316 status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
317 if (status != IXGBE_SUCCESS) {
318 ERROR_REPORT2(IXGBE_ERROR_CAUTION,
319 "semaphore failed with %d", status);
320 return;
321 }
322
323 /* Record completion for next time. */
324 status = ixgbe_write_cs4227(hw, IXGBE_CS4227_SCRATCH,
325 IXGBE_CS4227_RESET_COMPLETE);
326
327 out:
328 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
329 msec_delay(hw->eeprom.semaphore_delay);
330 }
331
332 /**
333 * ixgbe_setup_mux_ctl - Setup ESDP register for I2C mux control
334 * @hw: pointer to hardware structure
335 **/
336 static void ixgbe_setup_mux_ctl(struct ixgbe_hw *hw)
337 {
338 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
339
340 if (hw->bus.lan_id) {
341 esdp &= ~(IXGBE_ESDP_SDP1_NATIVE | IXGBE_ESDP_SDP1);
342 esdp |= IXGBE_ESDP_SDP1_DIR;
343 }
344 esdp &= ~(IXGBE_ESDP_SDP0_NATIVE | IXGBE_ESDP_SDP0_DIR);
345 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
346 IXGBE_WRITE_FLUSH(hw);
347 }
348
349 /**
350 * ixgbe_read_phy_reg_mdi_22 - Read from a clause 22 PHY register without lock
351 * @hw: pointer to hardware structure
352 * @reg_addr: 32 bit address of PHY register to read
353 * @dev_type: always unused
354 * @phy_data: Pointer to read data from PHY register
355 */
356 static s32 ixgbe_read_phy_reg_mdi_22(struct ixgbe_hw *hw, u32 reg_addr,
357 u32 dev_type, u16 *phy_data)
358 {
359 u32 i, data, command;
360 UNREFERENCED_1PARAMETER(dev_type);
361
362 /* Setup and write the read command */
363 command = (reg_addr << IXGBE_MSCA_DEV_TYPE_SHIFT) |
364 (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
365 IXGBE_MSCA_OLD_PROTOCOL | IXGBE_MSCA_READ_AUTOINC |
366 IXGBE_MSCA_MDI_COMMAND;
367
368 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
369
370 /* Check every 10 usec to see if the access completed.
371 * The MDI Command bit will clear when the operation is
372 * complete
373 */
374 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
375 usec_delay(10);
376
377 command = IXGBE_READ_REG(hw, IXGBE_MSCA);
378 if (!(command & IXGBE_MSCA_MDI_COMMAND))
379 break;
380 }
381
382 if (command & IXGBE_MSCA_MDI_COMMAND) {
383 ERROR_REPORT1(IXGBE_ERROR_POLLING,
384 "PHY read command did not complete.\n");
385 return IXGBE_ERR_PHY;
386 }
387
388 /* Read operation is complete. Get the data from MSRWD */
389 data = IXGBE_READ_REG(hw, IXGBE_MSRWD);
390 data >>= IXGBE_MSRWD_READ_DATA_SHIFT;
391 *phy_data = (u16)data;
392
393 return IXGBE_SUCCESS;
394 }
395
396 /**
397 * ixgbe_write_phy_reg_mdi_22 - Write to a clause 22 PHY register without lock
398 * @hw: pointer to hardware structure
399 * @reg_addr: 32 bit PHY register to write
400 * @dev_type: always unused
401 * @phy_data: Data to write to the PHY register
402 */
403 static s32 ixgbe_write_phy_reg_mdi_22(struct ixgbe_hw *hw, u32 reg_addr,
404 u32 dev_type, u16 phy_data)
405 {
406 u32 i, command;
407 UNREFERENCED_1PARAMETER(dev_type);
408
409 /* Put the data in the MDI single read and write data register*/
410 IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)phy_data);
411
412 /* Setup and write the write command */
413 command = (reg_addr << IXGBE_MSCA_DEV_TYPE_SHIFT) |
414 (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
415 IXGBE_MSCA_OLD_PROTOCOL | IXGBE_MSCA_WRITE |
416 IXGBE_MSCA_MDI_COMMAND;
417
418 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
419
420 /* Check every 10 usec to see if the access completed.
421 * The MDI Command bit will clear when the operation is
422 * complete
423 */
424 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
425 usec_delay(10);
426
427 command = IXGBE_READ_REG(hw, IXGBE_MSCA);
428 if (!(command & IXGBE_MSCA_MDI_COMMAND))
429 break;
430 }
431
432 if (command & IXGBE_MSCA_MDI_COMMAND) {
433 ERROR_REPORT1(IXGBE_ERROR_POLLING,
434 "PHY write cmd didn't complete\n");
435 return IXGBE_ERR_PHY;
436 }
437
438 return IXGBE_SUCCESS;
439 }
440
441 /**
442 * ixgbe_identify_phy_x550em - Get PHY type based on device id
443 * @hw: pointer to hardware structure
444 *
445 * Returns error code
446 */
447 static s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw)
448 {
449 hw->mac.ops.set_lan_id(hw);
450
451 ixgbe_read_mng_if_sel_x550em(hw);
452
453 switch (hw->device_id) {
454 case IXGBE_DEV_ID_X550EM_A_SFP:
455 return ixgbe_identify_sfp_module_X550em(hw);
456 case IXGBE_DEV_ID_X550EM_X_SFP:
457 /* set up for CS4227 usage */
458 ixgbe_setup_mux_ctl(hw);
459 ixgbe_check_cs4227(hw);
460 /* Fallthrough */
461
462 case IXGBE_DEV_ID_X550EM_A_SFP_N:
463 return ixgbe_identify_sfp_module_X550em(hw);
464 break;
465 case IXGBE_DEV_ID_X550EM_X_KX4:
466 hw->phy.type = ixgbe_phy_x550em_kx4;
467 break;
468 case IXGBE_DEV_ID_X550EM_X_XFI:
469 hw->phy.type = ixgbe_phy_x550em_xfi;
470 break;
471 case IXGBE_DEV_ID_X550EM_X_KR:
472 case IXGBE_DEV_ID_X550EM_A_KR:
473 case IXGBE_DEV_ID_X550EM_A_KR_L:
474 hw->phy.type = ixgbe_phy_x550em_kr;
475 break;
476 case IXGBE_DEV_ID_X550EM_A_10G_T:
477 case IXGBE_DEV_ID_X550EM_X_10G_T:
478 return ixgbe_identify_phy_generic(hw);
479 case IXGBE_DEV_ID_X550EM_X_1G_T:
480 hw->phy.type = ixgbe_phy_ext_1g_t;
481 break;
482 case IXGBE_DEV_ID_X550EM_A_1G_T:
483 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
484 hw->phy.type = ixgbe_phy_fw;
485 if (hw->bus.lan_id)
486 hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY1_SM;
487 else
488 hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY0_SM;
489 break;
490 default:
491 break;
492 }
493 return IXGBE_SUCCESS;
494 }
495
496 /**
497 * ixgbe_fw_phy_activity - Perform an activity on a PHY
498 * @hw: pointer to hardware structure
499 * @activity: activity to perform
500 * @data: Pointer to 4 32-bit words of data
501 */
502 s32 ixgbe_fw_phy_activity(struct ixgbe_hw *hw, u16 activity,
503 u32 (*data)[FW_PHY_ACT_DATA_COUNT])
504 {
505 union {
506 struct ixgbe_hic_phy_activity_req cmd;
507 struct ixgbe_hic_phy_activity_resp rsp;
508 } hic;
509 u16 retries = FW_PHY_ACT_RETRIES;
510 s32 rc;
511 u16 i;
512
513 do {
514 memset(&hic, 0, sizeof(hic));
515 hic.cmd.hdr.cmd = FW_PHY_ACT_REQ_CMD;
516 hic.cmd.hdr.buf_len = FW_PHY_ACT_REQ_LEN;
517 hic.cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
518 hic.cmd.port_number = hw->bus.lan_id;
519 hic.cmd.activity_id = IXGBE_CPU_TO_LE16(activity);
520 for (i = 0; i < FW_PHY_ACT_DATA_COUNT; ++i)
521 hic.cmd.data[i] = IXGBE_CPU_TO_BE32((*data)[i]);
522
523 rc = ixgbe_host_interface_command(hw, (u32 *)&hic.cmd,
524 sizeof(hic.cmd),
525 IXGBE_HI_COMMAND_TIMEOUT,
526 TRUE);
527 if (rc != IXGBE_SUCCESS)
528 return rc;
529 if (hic.rsp.hdr.cmd_or_resp.ret_status ==
530 FW_CEM_RESP_STATUS_SUCCESS) {
531 for (i = 0; i < FW_PHY_ACT_DATA_COUNT; ++i)
532 (*data)[i] = IXGBE_BE32_TO_CPU(hic.rsp.data[i]);
533 return IXGBE_SUCCESS;
534 }
535 usec_delay(20);
536 --retries;
537 } while (retries > 0);
538
539 return IXGBE_ERR_HOST_INTERFACE_COMMAND;
540 }
541
542 static const struct {
543 u16 fw_speed;
544 ixgbe_link_speed phy_speed;
545 } ixgbe_fw_map[] = {
546 { FW_PHY_ACT_LINK_SPEED_10, IXGBE_LINK_SPEED_10_FULL },
547 { FW_PHY_ACT_LINK_SPEED_100, IXGBE_LINK_SPEED_100_FULL },
548 { FW_PHY_ACT_LINK_SPEED_1G, IXGBE_LINK_SPEED_1GB_FULL },
549 { FW_PHY_ACT_LINK_SPEED_2_5G, IXGBE_LINK_SPEED_2_5GB_FULL },
550 { FW_PHY_ACT_LINK_SPEED_5G, IXGBE_LINK_SPEED_5GB_FULL },
551 { FW_PHY_ACT_LINK_SPEED_10G, IXGBE_LINK_SPEED_10GB_FULL },
552 };
553
554 /**
555 * ixgbe_get_phy_id_fw - Get the phy ID via firmware command
556 * @hw: pointer to hardware structure
557 *
558 * Returns error code
559 */
560 static s32 ixgbe_get_phy_id_fw(struct ixgbe_hw *hw)
561 {
562 u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 };
563 u16 phy_speeds;
564 u16 phy_id_lo;
565 s32 rc;
566 u16 i;
567
568 rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_PHY_INFO, &info);
569 if (rc)
570 return rc;
571
572 hw->phy.speeds_supported = 0;
573 phy_speeds = info[0] & FW_PHY_INFO_SPEED_MASK;
574 for (i = 0; i < sizeof(ixgbe_fw_map) / sizeof(ixgbe_fw_map[0]); ++i) {
575 if (phy_speeds & ixgbe_fw_map[i].fw_speed)
576 hw->phy.speeds_supported |= ixgbe_fw_map[i].phy_speed;
577 }
578
579 #if 0
580 /*
581 * Don't set autoneg_advertised here to not to be inconsistent with
582 * if_media value.
583 */
584 if (!hw->phy.autoneg_advertised)
585 hw->phy.autoneg_advertised = hw->phy.speeds_supported;
586 #endif
587
588 hw->phy.id = info[0] & FW_PHY_INFO_ID_HI_MASK;
589 phy_id_lo = info[1] & FW_PHY_INFO_ID_LO_MASK;
590 hw->phy.id |= phy_id_lo & IXGBE_PHY_REVISION_MASK;
591 hw->phy.revision = phy_id_lo & ~IXGBE_PHY_REVISION_MASK;
592 if (!hw->phy.id || hw->phy.id == IXGBE_PHY_REVISION_MASK)
593 return IXGBE_ERR_PHY_ADDR_INVALID;
594 return IXGBE_SUCCESS;
595 }
596
597 /**
598 * ixgbe_identify_phy_fw - Get PHY type based on firmware command
599 * @hw: pointer to hardware structure
600 *
601 * Returns error code
602 */
603 static s32 ixgbe_identify_phy_fw(struct ixgbe_hw *hw)
604 {
605 if (hw->bus.lan_id)
606 hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM;
607 else
608 hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM;
609
610 hw->phy.type = ixgbe_phy_fw;
611 hw->phy.ops.read_reg = NULL;
612 hw->phy.ops.write_reg = NULL;
613 return ixgbe_get_phy_id_fw(hw);
614 }
615
616 /**
617 * ixgbe_shutdown_fw_phy - Shutdown a firmware-controlled PHY
618 * @hw: pointer to hardware structure
619 *
620 * Returns error code
621 */
622 s32 ixgbe_shutdown_fw_phy(struct ixgbe_hw *hw)
623 {
624 u32 setup[FW_PHY_ACT_DATA_COUNT] = { 0 };
625
626 setup[0] = FW_PHY_ACT_FORCE_LINK_DOWN_OFF;
627 return ixgbe_fw_phy_activity(hw, FW_PHY_ACT_FORCE_LINK_DOWN, &setup);
628 }
629
630 static s32 ixgbe_read_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr,
631 u32 device_type, u16 *phy_data)
632 {
633 UNREFERENCED_4PARAMETER(*hw, reg_addr, device_type, *phy_data);
634 return IXGBE_NOT_IMPLEMENTED;
635 }
636
637 static s32 ixgbe_write_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr,
638 u32 device_type, u16 phy_data)
639 {
640 UNREFERENCED_4PARAMETER(*hw, reg_addr, device_type, phy_data);
641 return IXGBE_NOT_IMPLEMENTED;
642 }
643
644 /**
645 * ixgbe_read_i2c_combined_generic - Perform I2C read combined operation
646 * @hw: pointer to the hardware structure
647 * @addr: I2C bus address to read from
648 * @reg: I2C device register to read from
649 * @val: pointer to location to receive read value
650 *
651 * Returns an error code on error.
652 **/
653 static s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr,
654 u16 reg, u16 *val)
655 {
656 return ixgbe_read_i2c_combined_generic_int(hw, addr, reg, val, TRUE);
657 }
658
659 /**
660 * ixgbe_read_i2c_combined_generic_unlocked - Do I2C read combined operation
661 * @hw: pointer to the hardware structure
662 * @addr: I2C bus address to read from
663 * @reg: I2C device register to read from
664 * @val: pointer to location to receive read value
665 *
666 * Returns an error code on error.
667 **/
668 static s32
669 ixgbe_read_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, u8 addr,
670 u16 reg, u16 *val)
671 {
672 return ixgbe_read_i2c_combined_generic_int(hw, addr, reg, val, FALSE);
673 }
674
675 /**
676 * ixgbe_write_i2c_combined_generic - Perform I2C write combined operation
677 * @hw: pointer to the hardware structure
678 * @addr: I2C bus address to write to
679 * @reg: I2C device register to write to
680 * @val: value to write
681 *
682 * Returns an error code on error.
683 **/
684 static s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw,
685 u8 addr, u16 reg, u16 val)
686 {
687 return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, TRUE);
688 }
689
690 /**
691 * ixgbe_write_i2c_combined_generic_unlocked - Do I2C write combined operation
692 * @hw: pointer to the hardware structure
693 * @addr: I2C bus address to write to
694 * @reg: I2C device register to write to
695 * @val: value to write
696 *
697 * Returns an error code on error.
698 **/
699 static s32
700 ixgbe_write_i2c_combined_generic_unlocked(struct ixgbe_hw *hw,
701 u8 addr, u16 reg, u16 val)
702 {
703 return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, FALSE);
704 }
705
706 /**
707 * ixgbe_init_ops_X550EM - Inits func ptrs and MAC type
708 * @hw: pointer to hardware structure
709 *
710 * Initialize the function pointers and for MAC type X550EM.
711 * Does not touch the hardware.
712 **/
713 s32 ixgbe_init_ops_X550EM(struct ixgbe_hw *hw)
714 {
715 struct ixgbe_mac_info *mac = &hw->mac;
716 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
717 struct ixgbe_phy_info *phy = &hw->phy;
718 s32 ret_val;
719
720 DEBUGFUNC("ixgbe_init_ops_X550EM");
721
722 /* Similar to X550 so start there. */
723 ret_val = ixgbe_init_ops_X550(hw);
724
725 /* Since this function eventually calls
726 * ixgbe_init_ops_540 by design, we are setting
727 * the pointers to NULL explicitly here to overwrite
728 * the values being set in the x540 function.
729 */
730
731 /* Bypass not supported in x550EM */
732 mac->ops.bypass_rw = NULL;
733 mac->ops.bypass_valid_rd = NULL;
734 mac->ops.bypass_set = NULL;
735 mac->ops.bypass_rd_eep = NULL;
736
737 /* FCOE not supported in x550EM */
738 mac->ops.get_san_mac_addr = NULL;
739 mac->ops.set_san_mac_addr = NULL;
740 mac->ops.get_wwn_prefix = NULL;
741 mac->ops.get_fcoe_boot_status = NULL;
742
743 /* IPsec not supported in x550EM */
744 mac->ops.disable_sec_rx_path = NULL;
745 mac->ops.enable_sec_rx_path = NULL;
746
747 /* AUTOC register is not present in x550EM. */
748 mac->ops.prot_autoc_read = NULL;
749 mac->ops.prot_autoc_write = NULL;
750
751 /* X550EM bus type is internal*/
752 hw->bus.type = ixgbe_bus_type_internal;
753 mac->ops.get_bus_info = ixgbe_get_bus_info_X550em;
754
755
756 mac->ops.get_media_type = ixgbe_get_media_type_X550em;
757 mac->ops.setup_sfp = ixgbe_setup_sfp_modules_X550em;
758 mac->ops.get_link_capabilities = ixgbe_get_link_capabilities_X550em;
759 mac->ops.reset_hw = ixgbe_reset_hw_X550em;
760 mac->ops.get_supported_physical_layer =
761 ixgbe_get_supported_physical_layer_X550em;
762
763 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper)
764 mac->ops.setup_fc = ixgbe_setup_fc_generic;
765 else
766 mac->ops.setup_fc = ixgbe_setup_fc_X550em;
767
768 /* PHY */
769 phy->ops.init = ixgbe_init_phy_ops_X550em;
770 switch (hw->device_id) {
771 case IXGBE_DEV_ID_X550EM_A_1G_T:
772 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
773 mac->ops.setup_fc = NULL;
774 phy->ops.identify = ixgbe_identify_phy_fw;
775 phy->ops.set_phy_power = NULL;
776 phy->ops.get_firmware_version = NULL;
777 break;
778 case IXGBE_DEV_ID_X550EM_X_1G_T:
779 mac->ops.setup_fc = NULL;
780 phy->ops.identify = ixgbe_identify_phy_x550em;
781 phy->ops.set_phy_power = NULL;
782 break;
783 default:
784 phy->ops.identify = ixgbe_identify_phy_x550em;
785 }
786
787 if (mac->ops.get_media_type(hw) != ixgbe_media_type_copper)
788 phy->ops.set_phy_power = NULL;
789
790
791 /* EEPROM */
792 eeprom->ops.init_params = ixgbe_init_eeprom_params_X540;
793 eeprom->ops.read = ixgbe_read_ee_hostif_X550;
794 eeprom->ops.read_buffer = ixgbe_read_ee_hostif_buffer_X550;
795 eeprom->ops.write = ixgbe_write_ee_hostif_X550;
796 eeprom->ops.write_buffer = ixgbe_write_ee_hostif_buffer_X550;
797 eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_X550;
798 eeprom->ops.validate_checksum = ixgbe_validate_eeprom_checksum_X550;
799 eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_X550;
800
801 return ret_val;
802 }
803
804 #define IXGBE_DENVERTON_WA 1
805
806 /**
807 * ixgbe_setup_fw_link - Setup firmware-controlled PHYs
808 * @hw: pointer to hardware structure
809 */
810 static s32 ixgbe_setup_fw_link(struct ixgbe_hw *hw)
811 {
812 u32 setup[FW_PHY_ACT_DATA_COUNT] = { 0 };
813 s32 rc;
814 #ifdef IXGBE_DENVERTON_WA
815 s32 ret_val;
816 u16 phydata;
817 #endif
818 u16 i;
819
820 if (hw->phy.reset_disable || ixgbe_check_reset_blocked(hw))
821 return 0;
822
823 if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
824 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
825 "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
826 return IXGBE_ERR_INVALID_LINK_SETTINGS;
827 }
828
829 switch (hw->fc.requested_mode) {
830 case ixgbe_fc_full:
831 setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_RXTX <<
832 FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT;
833 break;
834 case ixgbe_fc_rx_pause:
835 setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_RX <<
836 FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT;
837 break;
838 case ixgbe_fc_tx_pause:
839 setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_TX <<
840 FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT;
841 break;
842 default:
843 break;
844 }
845
846 for (i = 0; i < sizeof(ixgbe_fw_map) / sizeof(ixgbe_fw_map[0]); ++i) {
847 if (hw->phy.autoneg_advertised & ixgbe_fw_map[i].phy_speed)
848 setup[0] |= ixgbe_fw_map[i].fw_speed;
849 }
850 setup[0] |= FW_PHY_ACT_SETUP_LINK_HP | FW_PHY_ACT_SETUP_LINK_AN;
851
852 if (hw->phy.eee_speeds_advertised)
853 setup[0] |= FW_PHY_ACT_SETUP_LINK_EEE;
854
855 #ifdef IXGBE_DENVERTON_WA
856 if ((hw->phy.force_10_100_autonego == false)
857 && ((hw->phy.autoneg_advertised == IXGBE_LINK_SPEED_100_FULL)
858 || (hw->phy.autoneg_advertised == IXGBE_LINK_SPEED_10_FULL))) {
859 /* Don't use auto-nego for 10/100Mbps */
860 setup[0] &= ~FW_PHY_ACT_SETUP_LINK_AN;
861 setup[0] &= ~FW_PHY_ACT_SETUP_LINK_EEE;
862 setup[0] &= ~(FW_PHY_ACT_SETUP_LINK_PAUSE_RXTX
863 << FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT);
864 }
865 #endif
866
867 rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_SETUP_LINK, &setup);
868 if (rc)
869 return rc;
870
871 #ifdef IXGBE_DENVERTON_WA
872 if (hw->phy.force_10_100_autonego == true)
873 goto out;
874
875 ret_val = ixgbe_read_phy_reg_x550a(hw, MII_BMCR, 0, &phydata);
876 if (ret_val != 0)
877 goto out;
878
879 /*
880 * Broken firmware sets BMCR register incorrectly if
881 * FW_PHY_ACT_SETUP_LINK_AN isn't set.
882 * a) FDX may not be set.
883 * b) BMCR_SPEED1 (bit 6) is always cleared.
884 * + -------+------+-----------+-----+--------------------------+
885 * |request | BMCR | BMCR spd | BMCR | |
886 * | | (HEX)| (in bits)| FDX | |
887 * +--------+------+----------+------+--------------------------+
888 * | 10M | 0000 | 10M(00) | 0 | |
889 * | 10M | 2000 | 100M(01) | 0 |(I've never observed this)|
890 * | 10M | 2100 | 100M(01) | 1 | |
891 * | 100M | 0000 | 10M(00) | 0 | |
892 * | 100M | 0100 | 10M(00) | 1 | |
893 * +--------------------------+------+--------------------------+
894 */
895 if (((hw->phy.autoneg_advertised == IXGBE_LINK_SPEED_100_FULL)
896 && (((phydata & BMCR_FDX) == 0) || (BMCR_SPEED(phydata) == 0)))
897 || ((hw->phy.autoneg_advertised == IXGBE_LINK_SPEED_10_FULL)
898 && (((phydata & BMCR_FDX) == 0)
899 || (BMCR_SPEED(phydata) != BMCR_S10)))) {
900 phydata = BMCR_FDX;
901 switch (hw->phy.autoneg_advertised) {
902 case IXGBE_LINK_SPEED_10_FULL:
903 phydata |= BMCR_S10;
904 break;
905 case IXGBE_LINK_SPEED_100_FULL:
906 phydata |= BMCR_S100;
907 break;
908 case IXGBE_LINK_SPEED_1GB_FULL:
909 panic("%s: 1GB_FULL is set", __func__);
910 break;
911 default:
912 break;
913 }
914 ret_val = ixgbe_write_phy_reg_x550a(hw, MII_BMCR, 0, phydata);
915 if (ret_val != 0)
916 return ret_val;
917 }
918 out:
919 #endif
920 if (setup[0] == FW_PHY_ACT_SETUP_LINK_RSP_DOWN)
921 return IXGBE_ERR_OVERTEMP;
922 return IXGBE_SUCCESS;
923 }
924
925 /**
926 * ixgbe_fc_autoneg_fw _ Set up flow control for FW-controlled PHYs
927 * @hw: pointer to hardware structure
928 *
929 * Called at init time to set up flow control.
930 */
931 static s32 ixgbe_fc_autoneg_fw(struct ixgbe_hw *hw)
932 {
933 if (hw->fc.requested_mode == ixgbe_fc_default)
934 hw->fc.requested_mode = ixgbe_fc_full;
935
936 return ixgbe_setup_fw_link(hw);
937 }
938
939 /**
940 * ixgbe_setup_eee_fw - Enable/disable EEE support
941 * @hw: pointer to the HW structure
942 * @enable_eee: boolean flag to enable EEE
943 *
944 * Enable/disable EEE based on enable_eee flag.
945 * This function controls EEE for firmware-based PHY implementations.
946 */
947 static s32 ixgbe_setup_eee_fw(struct ixgbe_hw *hw, bool enable_eee)
948 {
949 if (!!hw->phy.eee_speeds_advertised == enable_eee)
950 return IXGBE_SUCCESS;
951 if (enable_eee)
952 hw->phy.eee_speeds_advertised = hw->phy.eee_speeds_supported;
953 else
954 hw->phy.eee_speeds_advertised = 0;
955 return hw->phy.ops.setup_link(hw);
956 }
957
958 /**
959 * ixgbe_init_ops_X550EM_a - Inits func ptrs and MAC type
960 * @hw: pointer to hardware structure
961 *
962 * Initialize the function pointers and for MAC type X550EM_a.
963 * Does not touch the hardware.
964 **/
965 s32 ixgbe_init_ops_X550EM_a(struct ixgbe_hw *hw)
966 {
967 struct ixgbe_mac_info *mac = &hw->mac;
968 s32 ret_val;
969
970 DEBUGFUNC("ixgbe_init_ops_X550EM_a");
971
972 /* Start with generic X550EM init */
973 ret_val = ixgbe_init_ops_X550EM(hw);
974
975 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII ||
976 hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII_L) {
977 mac->ops.read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550;
978 mac->ops.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550;
979 } else {
980 mac->ops.read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550a;
981 mac->ops.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550a;
982 }
983 mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync_X550a;
984 mac->ops.release_swfw_sync = ixgbe_release_swfw_sync_X550a;
985
986 switch (mac->ops.get_media_type(hw)) {
987 case ixgbe_media_type_fiber:
988 mac->ops.setup_fc = NULL;
989 mac->ops.fc_autoneg = ixgbe_fc_autoneg_fiber_x550em_a;
990 break;
991 case ixgbe_media_type_backplane:
992 mac->ops.fc_autoneg = ixgbe_fc_autoneg_backplane_x550em_a;
993 mac->ops.setup_fc = ixgbe_setup_fc_backplane_x550em_a;
994 break;
995 default:
996 break;
997 }
998
999 switch (hw->device_id) {
1000 case IXGBE_DEV_ID_X550EM_A_1G_T:
1001 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
1002 mac->ops.fc_autoneg = ixgbe_fc_autoneg_sgmii_x550em_a;
1003 mac->ops.setup_fc = ixgbe_fc_autoneg_fw;
1004 mac->ops.setup_eee = ixgbe_setup_eee_fw;
1005 hw->phy.eee_speeds_supported = IXGBE_LINK_SPEED_100_FULL |
1006 IXGBE_LINK_SPEED_1GB_FULL;
1007 hw->phy.eee_speeds_advertised = hw->phy.eee_speeds_supported;
1008 break;
1009 default:
1010 break;
1011 }
1012
1013 return ret_val;
1014 }
1015
1016 /**
1017 * ixgbe_init_ops_X550EM_x - Inits func ptrs and MAC type
1018 * @hw: pointer to hardware structure
1019 *
1020 * Initialize the function pointers and for MAC type X550EM_x.
1021 * Does not touch the hardware.
1022 **/
1023 s32 ixgbe_init_ops_X550EM_x(struct ixgbe_hw *hw)
1024 {
1025 struct ixgbe_mac_info *mac = &hw->mac;
1026 struct ixgbe_link_info *link = &hw->link;
1027 s32 ret_val;
1028
1029 DEBUGFUNC("ixgbe_init_ops_X550EM_x");
1030
1031 /* Start with generic X550EM init */
1032 ret_val = ixgbe_init_ops_X550EM(hw);
1033
1034 mac->ops.read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550;
1035 mac->ops.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550;
1036 mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync_X550em;
1037 mac->ops.release_swfw_sync = ixgbe_release_swfw_sync_X550em;
1038 link->ops.read_link = ixgbe_read_i2c_combined_generic;
1039 link->ops.read_link_unlocked = ixgbe_read_i2c_combined_generic_unlocked;
1040 link->ops.write_link = ixgbe_write_i2c_combined_generic;
1041 link->ops.write_link_unlocked =
1042 ixgbe_write_i2c_combined_generic_unlocked;
1043 link->addr = IXGBE_CS4227;
1044
1045 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_1G_T) {
1046 mac->ops.setup_fc = NULL;
1047 mac->ops.setup_eee = NULL;
1048 mac->ops.init_led_link_act = NULL;
1049 }
1050
1051 return ret_val;
1052 }
1053
1054 /**
1055 * ixgbe_dmac_config_X550
1056 * @hw: pointer to hardware structure
1057 *
1058 * Configure DMA coalescing. If enabling dmac, dmac is activated.
1059 * When disabling dmac, dmac enable dmac bit is cleared.
1060 **/
1061 s32 ixgbe_dmac_config_X550(struct ixgbe_hw *hw)
1062 {
1063 u32 reg, high_pri_tc;
1064
1065 DEBUGFUNC("ixgbe_dmac_config_X550");
1066
1067 /* Disable DMA coalescing before configuring */
1068 reg = IXGBE_READ_REG(hw, IXGBE_DMACR);
1069 reg &= ~IXGBE_DMACR_DMAC_EN;
1070 IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg);
1071
1072 /* Disable DMA Coalescing if the watchdog timer is 0 */
1073 if (!hw->mac.dmac_config.watchdog_timer)
1074 goto out;
1075
1076 ixgbe_dmac_config_tcs_X550(hw);
1077
1078 /* Configure DMA Coalescing Control Register */
1079 reg = IXGBE_READ_REG(hw, IXGBE_DMACR);
1080
1081 /* Set the watchdog timer in units of 40.96 usec */
1082 reg &= ~IXGBE_DMACR_DMACWT_MASK;
1083 reg |= (hw->mac.dmac_config.watchdog_timer * 100) / 4096;
1084
1085 reg &= ~IXGBE_DMACR_HIGH_PRI_TC_MASK;
1086 /* If fcoe is enabled, set high priority traffic class */
1087 if (hw->mac.dmac_config.fcoe_en) {
1088 high_pri_tc = 1 << hw->mac.dmac_config.fcoe_tc;
1089 reg |= ((high_pri_tc << IXGBE_DMACR_HIGH_PRI_TC_SHIFT) &
1090 IXGBE_DMACR_HIGH_PRI_TC_MASK);
1091 }
1092 reg |= IXGBE_DMACR_EN_MNG_IND;
1093
1094 /* Enable DMA coalescing after configuration */
1095 reg |= IXGBE_DMACR_DMAC_EN;
1096 IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg);
1097
1098 out:
1099 return IXGBE_SUCCESS;
1100 }
1101
1102 /**
1103 * ixgbe_dmac_config_tcs_X550
1104 * @hw: pointer to hardware structure
1105 *
1106 * Configure DMA coalescing threshold per TC. The dmac enable bit must
1107 * be cleared before configuring.
1108 **/
1109 s32 ixgbe_dmac_config_tcs_X550(struct ixgbe_hw *hw)
1110 {
1111 u32 tc, reg, pb_headroom, rx_pb_size, maxframe_size_kb;
1112
1113 DEBUGFUNC("ixgbe_dmac_config_tcs_X550");
1114
1115 /* Configure DMA coalescing enabled */
1116 switch (hw->mac.dmac_config.link_speed) {
1117 case IXGBE_LINK_SPEED_10_FULL:
1118 case IXGBE_LINK_SPEED_100_FULL:
1119 pb_headroom = IXGBE_DMACRXT_100M;
1120 break;
1121 case IXGBE_LINK_SPEED_1GB_FULL:
1122 pb_headroom = IXGBE_DMACRXT_1G;
1123 break;
1124 default:
1125 pb_headroom = IXGBE_DMACRXT_10G;
1126 break;
1127 }
1128
1129 maxframe_size_kb = ((IXGBE_READ_REG(hw, IXGBE_MAXFRS) >>
1130 IXGBE_MHADD_MFS_SHIFT) / 1024);
1131
1132 /* Set the per Rx packet buffer receive threshold */
1133 for (tc = 0; tc < IXGBE_DCB_MAX_TRAFFIC_CLASS; tc++) {
1134 reg = IXGBE_READ_REG(hw, IXGBE_DMCTH(tc));
1135 reg &= ~IXGBE_DMCTH_DMACRXT_MASK;
1136
1137 if (tc < hw->mac.dmac_config.num_tcs) {
1138 /* Get Rx PB size */
1139 rx_pb_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc));
1140 rx_pb_size = (rx_pb_size & IXGBE_RXPBSIZE_MASK) >>
1141 IXGBE_RXPBSIZE_SHIFT;
1142
1143 /* Calculate receive buffer threshold in kilobytes */
1144 if (rx_pb_size > pb_headroom)
1145 rx_pb_size = rx_pb_size - pb_headroom;
1146 else
1147 rx_pb_size = 0;
1148
1149 /* Minimum of MFS shall be set for DMCTH */
1150 reg |= (rx_pb_size > maxframe_size_kb) ?
1151 rx_pb_size : maxframe_size_kb;
1152 }
1153 IXGBE_WRITE_REG(hw, IXGBE_DMCTH(tc), reg);
1154 }
1155 return IXGBE_SUCCESS;
1156 }
1157
1158 /**
1159 * ixgbe_dmac_update_tcs_X550
1160 * @hw: pointer to hardware structure
1161 *
1162 * Disables dmac, updates per TC settings, and then enables dmac.
1163 **/
1164 s32 ixgbe_dmac_update_tcs_X550(struct ixgbe_hw *hw)
1165 {
1166 u32 reg;
1167
1168 DEBUGFUNC("ixgbe_dmac_update_tcs_X550");
1169
1170 /* Disable DMA coalescing before configuring */
1171 reg = IXGBE_READ_REG(hw, IXGBE_DMACR);
1172 reg &= ~IXGBE_DMACR_DMAC_EN;
1173 IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg);
1174
1175 ixgbe_dmac_config_tcs_X550(hw);
1176
1177 /* Enable DMA coalescing after configuration */
1178 reg = IXGBE_READ_REG(hw, IXGBE_DMACR);
1179 reg |= IXGBE_DMACR_DMAC_EN;
1180 IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg);
1181
1182 return IXGBE_SUCCESS;
1183 }
1184
1185 /**
1186 * ixgbe_init_eeprom_params_X550 - Initialize EEPROM params
1187 * @hw: pointer to hardware structure
1188 *
1189 * Initializes the EEPROM parameters ixgbe_eeprom_info within the
1190 * ixgbe_hw struct in order to set up EEPROM access.
1191 **/
1192 s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw)
1193 {
1194 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
1195 u32 eec;
1196 u16 eeprom_size;
1197
1198 DEBUGFUNC("ixgbe_init_eeprom_params_X550");
1199
1200 if (eeprom->type == ixgbe_eeprom_uninitialized) {
1201 eeprom->semaphore_delay = 10;
1202 eeprom->type = ixgbe_flash;
1203
1204 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1205 eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
1206 IXGBE_EEC_SIZE_SHIFT);
1207 eeprom->word_size = 1 << (eeprom_size +
1208 IXGBE_EEPROM_WORD_SIZE_SHIFT);
1209
1210 DEBUGOUT2("Eeprom params: type = %d, size = %d\n",
1211 eeprom->type, eeprom->word_size);
1212 }
1213
1214 return IXGBE_SUCCESS;
1215 }
1216
1217 /**
1218 * ixgbe_set_source_address_pruning_X550 - Enable/Disbale source address pruning
1219 * @hw: pointer to hardware structure
1220 * @enable: enable or disable source address pruning
1221 * @pool: Rx pool to set source address pruning for
1222 **/
1223 void ixgbe_set_source_address_pruning_X550(struct ixgbe_hw *hw, bool enable,
1224 unsigned int pool)
1225 {
1226 u64 pfflp;
1227
1228 /* max rx pool is 63 */
1229 if (pool > 63)
1230 return;
1231
1232 pfflp = (u64)IXGBE_READ_REG(hw, IXGBE_PFFLPL);
1233 pfflp |= (u64)IXGBE_READ_REG(hw, IXGBE_PFFLPH) << 32;
1234
1235 if (enable)
1236 pfflp |= (1ULL << pool);
1237 else
1238 pfflp &= ~(1ULL << pool);
1239
1240 IXGBE_WRITE_REG(hw, IXGBE_PFFLPL, (u32)pfflp);
1241 IXGBE_WRITE_REG(hw, IXGBE_PFFLPH, (u32)(pfflp >> 32));
1242 }
1243
1244 /**
1245 * ixgbe_set_ethertype_anti_spoofing_X550 - Enable/Disable Ethertype anti-spoofing
1246 * @hw: pointer to hardware structure
1247 * @enable: enable or disable switch for Ethertype anti-spoofing
1248 * @vf: Virtual Function pool - VF Pool to set for Ethertype anti-spoofing
1249 *
1250 **/
1251 void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw,
1252 bool enable, int vf)
1253 {
1254 int vf_target_reg = vf >> 3;
1255 int vf_target_shift = vf % 8 + IXGBE_SPOOF_ETHERTYPEAS_SHIFT;
1256 u32 pfvfspoof;
1257
1258 DEBUGFUNC("ixgbe_set_ethertype_anti_spoofing_X550");
1259
1260 pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
1261 if (enable)
1262 pfvfspoof |= (1 << vf_target_shift);
1263 else
1264 pfvfspoof &= ~(1 << vf_target_shift);
1265
1266 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
1267 }
1268
1269 /**
1270 * ixgbe_iosf_wait - Wait for IOSF command completion
1271 * @hw: pointer to hardware structure
1272 * @ctrl: pointer to location to receive final IOSF control value
1273 *
1274 * Returns failing status on timeout
1275 *
1276 * Note: ctrl can be NULL if the IOSF control register value is not needed
1277 **/
1278 static s32 ixgbe_iosf_wait(struct ixgbe_hw *hw, u32 *ctrl)
1279 {
1280 u32 i, command = 0;
1281
1282 /* Check every 10 usec to see if the address cycle completed.
1283 * The SB IOSF BUSY bit will clear when the operation is
1284 * complete
1285 */
1286 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
1287 command = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL);
1288 if ((command & IXGBE_SB_IOSF_CTRL_BUSY) == 0)
1289 break;
1290 usec_delay(10);
1291 }
1292 if (ctrl)
1293 *ctrl = command;
1294 if (i == IXGBE_MDIO_COMMAND_TIMEOUT) {
1295 ERROR_REPORT1(IXGBE_ERROR_POLLING, "Wait timed out\n");
1296 return IXGBE_ERR_PHY;
1297 }
1298
1299 return IXGBE_SUCCESS;
1300 }
1301
1302 /**
1303 * ixgbe_write_iosf_sb_reg_x550 - Writes a value to specified register
1304 * of the IOSF device
1305 * @hw: pointer to hardware structure
1306 * @reg_addr: 32 bit PHY register to write
1307 * @device_type: 3 bit device type
1308 * @data: Data to write to the register
1309 **/
1310 s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
1311 u32 device_type, u32 data)
1312 {
1313 u32 gssr = IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_PHY0_SM;
1314 u32 command, error __unused;
1315 s32 ret;
1316
1317 ret = ixgbe_acquire_swfw_semaphore(hw, gssr);
1318 if (ret != IXGBE_SUCCESS)
1319 return ret;
1320
1321 ret = ixgbe_iosf_wait(hw, NULL);
1322 if (ret != IXGBE_SUCCESS)
1323 goto out;
1324
1325 command = ((reg_addr << IXGBE_SB_IOSF_CTRL_ADDR_SHIFT) |
1326 (device_type << IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT));
1327
1328 /* Write IOSF control register */
1329 IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL, command);
1330
1331 /* Write IOSF data register */
1332 IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA, data);
1333
1334 ret = ixgbe_iosf_wait(hw, &command);
1335
1336 if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) {
1337 error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >>
1338 IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT;
1339 ERROR_REPORT2(IXGBE_ERROR_POLLING,
1340 "Failed to write, error %x\n", error);
1341 ret = IXGBE_ERR_PHY;
1342 }
1343
1344 out:
1345 ixgbe_release_swfw_semaphore(hw, gssr);
1346 return ret;
1347 }
1348
1349 /**
1350 * ixgbe_read_iosf_sb_reg_x550 - Reads specified register of the IOSF device
1351 * @hw: pointer to hardware structure
1352 * @reg_addr: 32 bit PHY register to write
1353 * @device_type: 3 bit device type
1354 * @data: Pointer to read data from the register
1355 **/
1356 s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
1357 u32 device_type, u32 *data)
1358 {
1359 u32 gssr = IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_PHY0_SM;
1360 u32 command, error __unused;
1361 s32 ret;
1362
1363 ret = ixgbe_acquire_swfw_semaphore(hw, gssr);
1364 if (ret != IXGBE_SUCCESS)
1365 return ret;
1366
1367 ret = ixgbe_iosf_wait(hw, NULL);
1368 if (ret != IXGBE_SUCCESS)
1369 goto out;
1370
1371 command = ((reg_addr << IXGBE_SB_IOSF_CTRL_ADDR_SHIFT) |
1372 (device_type << IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT));
1373
1374 /* Write IOSF control register */
1375 IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL, command);
1376
1377 ret = ixgbe_iosf_wait(hw, &command);
1378
1379 if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) {
1380 error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >>
1381 IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT;
1382 ERROR_REPORT2(IXGBE_ERROR_POLLING,
1383 "Failed to read, error %x\n", error);
1384 ret = IXGBE_ERR_PHY;
1385 }
1386
1387 if (ret == IXGBE_SUCCESS)
1388 *data = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA);
1389
1390 out:
1391 ixgbe_release_swfw_semaphore(hw, gssr);
1392 return ret;
1393 }
1394
1395 /**
1396 * ixgbe_get_phy_token - Get the token for shared phy access
1397 * @hw: Pointer to hardware structure
1398 */
1399
1400 s32 ixgbe_get_phy_token(struct ixgbe_hw *hw)
1401 {
1402 struct ixgbe_hic_phy_token_req token_cmd;
1403 s32 status;
1404
1405 token_cmd.hdr.cmd = FW_PHY_TOKEN_REQ_CMD;
1406 token_cmd.hdr.buf_len = FW_PHY_TOKEN_REQ_LEN;
1407 token_cmd.hdr.cmd_or_resp.cmd_resv = 0;
1408 token_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
1409 token_cmd.port_number = hw->bus.lan_id;
1410 token_cmd.command_type = FW_PHY_TOKEN_REQ;
1411 token_cmd.pad = 0;
1412 status = ixgbe_host_interface_command(hw, (u32 *)&token_cmd,
1413 sizeof(token_cmd),
1414 IXGBE_HI_COMMAND_TIMEOUT,
1415 TRUE);
1416 if (status) {
1417 DEBUGOUT1("Issuing host interface command failed with Status = %d\n",
1418 status);
1419 return status;
1420 }
1421 if (token_cmd.hdr.cmd_or_resp.ret_status == FW_PHY_TOKEN_OK)
1422 return IXGBE_SUCCESS;
1423 if (token_cmd.hdr.cmd_or_resp.ret_status != FW_PHY_TOKEN_RETRY) {
1424 DEBUGOUT1("Host interface command returned 0x%08x , returning IXGBE_ERR_FW_RESP_INVALID\n",
1425 token_cmd.hdr.cmd_or_resp.ret_status);
1426 return IXGBE_ERR_FW_RESP_INVALID;
1427 }
1428
1429 DEBUGOUT("Returning IXGBE_ERR_TOKEN_RETRY\n");
1430 return IXGBE_ERR_TOKEN_RETRY;
1431 }
1432
1433 /**
1434 * ixgbe_put_phy_token - Put the token for shared phy access
1435 * @hw: Pointer to hardware structure
1436 */
1437
1438 s32 ixgbe_put_phy_token(struct ixgbe_hw *hw)
1439 {
1440 struct ixgbe_hic_phy_token_req token_cmd;
1441 s32 status;
1442
1443 token_cmd.hdr.cmd = FW_PHY_TOKEN_REQ_CMD;
1444 token_cmd.hdr.buf_len = FW_PHY_TOKEN_REQ_LEN;
1445 token_cmd.hdr.cmd_or_resp.cmd_resv = 0;
1446 token_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
1447 token_cmd.port_number = hw->bus.lan_id;
1448 token_cmd.command_type = FW_PHY_TOKEN_REL;
1449 token_cmd.pad = 0;
1450 status = ixgbe_host_interface_command(hw, (u32 *)&token_cmd,
1451 sizeof(token_cmd),
1452 IXGBE_HI_COMMAND_TIMEOUT,
1453 TRUE);
1454 if (status)
1455 return status;
1456 if (token_cmd.hdr.cmd_or_resp.ret_status == FW_PHY_TOKEN_OK)
1457 return IXGBE_SUCCESS;
1458
1459 DEBUGOUT("Put PHY Token host interface command failed");
1460 return IXGBE_ERR_FW_RESP_INVALID;
1461 }
1462
1463 /**
1464 * ixgbe_write_iosf_sb_reg_x550a - Writes a value to specified register
1465 * of the IOSF device
1466 * @hw: pointer to hardware structure
1467 * @reg_addr: 32 bit PHY register to write
1468 * @device_type: 3 bit device type
1469 * @data: Data to write to the register
1470 **/
1471 s32 ixgbe_write_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
1472 u32 device_type, u32 data)
1473 {
1474 struct ixgbe_hic_internal_phy_req write_cmd;
1475 s32 status;
1476 UNREFERENCED_1PARAMETER(device_type);
1477
1478 memset(&write_cmd, 0, sizeof(write_cmd));
1479 write_cmd.hdr.cmd = FW_INT_PHY_REQ_CMD;
1480 write_cmd.hdr.buf_len = FW_INT_PHY_REQ_LEN;
1481 write_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
1482 write_cmd.port_number = hw->bus.lan_id;
1483 write_cmd.command_type = FW_INT_PHY_REQ_WRITE;
1484 write_cmd.address = IXGBE_CPU_TO_BE16(reg_addr);
1485 write_cmd.write_data = IXGBE_CPU_TO_BE32(data);
1486
1487 status = ixgbe_host_interface_command(hw, (u32 *)&write_cmd,
1488 sizeof(write_cmd),
1489 IXGBE_HI_COMMAND_TIMEOUT, FALSE);
1490
1491 return status;
1492 }
1493
1494 /**
1495 * ixgbe_read_iosf_sb_reg_x550a - Reads specified register of the IOSF device
1496 * @hw: pointer to hardware structure
1497 * @reg_addr: 32 bit PHY register to write
1498 * @device_type: 3 bit device type
1499 * @data: Pointer to read data from the register
1500 **/
1501 s32 ixgbe_read_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
1502 u32 device_type, u32 *data)
1503 {
1504 union {
1505 struct ixgbe_hic_internal_phy_req cmd;
1506 struct ixgbe_hic_internal_phy_resp rsp;
1507 } hic;
1508 s32 status;
1509 UNREFERENCED_1PARAMETER(device_type);
1510
1511 memset(&hic, 0, sizeof(hic));
1512 hic.cmd.hdr.cmd = FW_INT_PHY_REQ_CMD;
1513 hic.cmd.hdr.buf_len = FW_INT_PHY_REQ_LEN;
1514 hic.cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
1515 hic.cmd.port_number = hw->bus.lan_id;
1516 hic.cmd.command_type = FW_INT_PHY_REQ_READ;
1517 hic.cmd.address = IXGBE_CPU_TO_BE16(reg_addr);
1518
1519 status = ixgbe_host_interface_command(hw, (u32 *)&hic.cmd,
1520 sizeof(hic.cmd),
1521 IXGBE_HI_COMMAND_TIMEOUT, TRUE);
1522
1523 /* Extract the register value from the response. */
1524 *data = IXGBE_BE32_TO_CPU(hic.rsp.read_data);
1525
1526 return status;
1527 }
1528
1529 /**
1530 * ixgbe_disable_mdd_X550
1531 * @hw: pointer to hardware structure
1532 *
1533 * Disable malicious driver detection
1534 **/
1535 void ixgbe_disable_mdd_X550(struct ixgbe_hw *hw)
1536 {
1537 u32 reg;
1538
1539 DEBUGFUNC("ixgbe_disable_mdd_X550");
1540
1541 /* Disable MDD for TX DMA and interrupt */
1542 reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1543 reg &= ~(IXGBE_DMATXCTL_MDP_EN | IXGBE_DMATXCTL_MBINTEN);
1544 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg);
1545
1546 /* Disable MDD for RX and interrupt */
1547 reg = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
1548 reg &= ~(IXGBE_RDRXCTL_MDP_EN | IXGBE_RDRXCTL_MBINTEN);
1549 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg);
1550 }
1551
1552 /**
1553 * ixgbe_enable_mdd_X550
1554 * @hw: pointer to hardware structure
1555 *
1556 * Enable malicious driver detection
1557 **/
1558 void ixgbe_enable_mdd_X550(struct ixgbe_hw *hw)
1559 {
1560 u32 reg;
1561
1562 DEBUGFUNC("ixgbe_enable_mdd_X550");
1563
1564 /* Enable MDD for TX DMA and interrupt */
1565 reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1566 reg |= (IXGBE_DMATXCTL_MDP_EN | IXGBE_DMATXCTL_MBINTEN);
1567 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg);
1568
1569 /* Enable MDD for RX and interrupt */
1570 reg = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
1571 reg |= (IXGBE_RDRXCTL_MDP_EN | IXGBE_RDRXCTL_MBINTEN);
1572 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg);
1573 }
1574
1575 /**
1576 * ixgbe_restore_mdd_vf_X550
1577 * @hw: pointer to hardware structure
1578 * @vf: vf index
1579 *
1580 * Restore VF that was disabled during malicious driver detection event
1581 **/
1582 void ixgbe_restore_mdd_vf_X550(struct ixgbe_hw *hw, u32 vf)
1583 {
1584 u32 idx, reg, num_qs, start_q, bitmask;
1585
1586 DEBUGFUNC("ixgbe_restore_mdd_vf_X550");
1587
1588 /* Map VF to queues */
1589 reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
1590 switch (reg & IXGBE_MRQC_MRQE_MASK) {
1591 case IXGBE_MRQC_VMDQRT8TCEN:
1592 num_qs = 8; /* 16 VFs / pools */
1593 bitmask = 0x000000FF;
1594 break;
1595 case IXGBE_MRQC_VMDQRSS32EN:
1596 case IXGBE_MRQC_VMDQRT4TCEN:
1597 num_qs = 4; /* 32 VFs / pools */
1598 bitmask = 0x0000000F;
1599 break;
1600 default: /* 64 VFs / pools */
1601 num_qs = 2;
1602 bitmask = 0x00000003;
1603 break;
1604 }
1605 start_q = vf * num_qs;
1606
1607 /* Release vf's queues by clearing WQBR_TX and WQBR_RX (RW1C) */
1608 idx = start_q / 32;
1609 reg = 0;
1610 reg |= (bitmask << (start_q % 32));
1611 IXGBE_WRITE_REG(hw, IXGBE_WQBR_TX(idx), reg);
1612 IXGBE_WRITE_REG(hw, IXGBE_WQBR_RX(idx), reg);
1613 }
1614
1615 /**
1616 * ixgbe_mdd_event_X550
1617 * @hw: pointer to hardware structure
1618 * @vf_bitmap: vf bitmap of malicious vfs
1619 *
1620 * Handle malicious driver detection event.
1621 **/
1622 void ixgbe_mdd_event_X550(struct ixgbe_hw *hw, u32 *vf_bitmap)
1623 {
1624 u32 wqbr;
1625 u32 i, j, reg, q, shift, vf, idx;
1626
1627 DEBUGFUNC("ixgbe_mdd_event_X550");
1628
1629 /* figure out pool size for mapping to vf's */
1630 reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
1631 switch (reg & IXGBE_MRQC_MRQE_MASK) {
1632 case IXGBE_MRQC_VMDQRT8TCEN:
1633 shift = 3; /* 16 VFs / pools */
1634 break;
1635 case IXGBE_MRQC_VMDQRSS32EN:
1636 case IXGBE_MRQC_VMDQRT4TCEN:
1637 shift = 2; /* 32 VFs / pools */
1638 break;
1639 default:
1640 shift = 1; /* 64 VFs / pools */
1641 break;
1642 }
1643
1644 /* Read WQBR_TX and WQBR_RX and check for malicious queues */
1645 for (i = 0; i < 4; i++) {
1646 wqbr = IXGBE_READ_REG(hw, IXGBE_WQBR_TX(i));
1647 wqbr |= IXGBE_READ_REG(hw, IXGBE_WQBR_RX(i));
1648
1649 if (!wqbr)
1650 continue;
1651
1652 /* Get malicious queue */
1653 for (j = 0; j < 32 && wqbr; j++) {
1654
1655 if (!(wqbr & (1 << j)))
1656 continue;
1657
1658 /* Get queue from bitmask */
1659 q = j + (i * 32);
1660
1661 /* Map queue to vf */
1662 vf = (q >> shift);
1663
1664 /* Set vf bit in vf_bitmap */
1665 idx = vf / 32;
1666 vf_bitmap[idx] |= (1 << (vf % 32));
1667 wqbr &= ~(1 << j);
1668 }
1669 }
1670 }
1671
1672 /**
1673 * ixgbe_get_media_type_X550em - Get media type
1674 * @hw: pointer to hardware structure
1675 *
1676 * Returns the media type (fiber, copper, backplane)
1677 */
1678 enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw)
1679 {
1680 enum ixgbe_media_type media_type;
1681
1682 DEBUGFUNC("ixgbe_get_media_type_X550em");
1683
1684 /* Detect if there is a copper PHY attached. */
1685 switch (hw->device_id) {
1686 case IXGBE_DEV_ID_X550EM_X_KR:
1687 case IXGBE_DEV_ID_X550EM_X_KX4:
1688 case IXGBE_DEV_ID_X550EM_X_XFI:
1689 case IXGBE_DEV_ID_X550EM_A_KR:
1690 case IXGBE_DEV_ID_X550EM_A_KR_L:
1691 media_type = ixgbe_media_type_backplane;
1692 break;
1693 case IXGBE_DEV_ID_X550EM_X_SFP:
1694 case IXGBE_DEV_ID_X550EM_A_SFP:
1695 case IXGBE_DEV_ID_X550EM_A_SFP_N:
1696 case IXGBE_DEV_ID_X550EM_A_QSFP:
1697 case IXGBE_DEV_ID_X550EM_A_QSFP_N:
1698 media_type = ixgbe_media_type_fiber;
1699 break;
1700 case IXGBE_DEV_ID_X550EM_X_1G_T:
1701 case IXGBE_DEV_ID_X550EM_X_10G_T:
1702 case IXGBE_DEV_ID_X550EM_A_10G_T:
1703 media_type = ixgbe_media_type_copper;
1704 break;
1705 case IXGBE_DEV_ID_X550EM_A_SGMII:
1706 case IXGBE_DEV_ID_X550EM_A_SGMII_L:
1707 media_type = ixgbe_media_type_backplane;
1708 hw->phy.type = ixgbe_phy_sgmii;
1709 break;
1710 case IXGBE_DEV_ID_X550EM_A_1G_T:
1711 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
1712 media_type = ixgbe_media_type_copper;
1713 break;
1714 default:
1715 media_type = ixgbe_media_type_unknown;
1716 break;
1717 }
1718 return media_type;
1719 }
1720
1721 /**
1722 * ixgbe_supported_sfp_modules_X550em - Check if SFP module type is supported
1723 * @hw: pointer to hardware structure
1724 * @linear: TRUE if SFP module is linear
1725 */
1726 static s32 ixgbe_supported_sfp_modules_X550em(struct ixgbe_hw *hw, bool *linear)
1727 {
1728 DEBUGFUNC("ixgbe_supported_sfp_modules_X550em");
1729
1730 switch (hw->phy.sfp_type) {
1731 case ixgbe_sfp_type_not_present:
1732 return IXGBE_ERR_SFP_NOT_PRESENT;
1733 case ixgbe_sfp_type_da_cu_core0:
1734 case ixgbe_sfp_type_da_cu_core1:
1735 *linear = TRUE;
1736 break;
1737 case ixgbe_sfp_type_srlr_core0:
1738 case ixgbe_sfp_type_srlr_core1:
1739 case ixgbe_sfp_type_da_act_lmt_core0:
1740 case ixgbe_sfp_type_da_act_lmt_core1:
1741 case ixgbe_sfp_type_1g_sx_core0:
1742 case ixgbe_sfp_type_1g_sx_core1:
1743 case ixgbe_sfp_type_1g_lx_core0:
1744 case ixgbe_sfp_type_1g_lx_core1:
1745 *linear = FALSE;
1746 break;
1747 case ixgbe_sfp_type_unknown:
1748 case ixgbe_sfp_type_1g_cu_core0:
1749 case ixgbe_sfp_type_1g_cu_core1:
1750 default:
1751 return IXGBE_ERR_SFP_NOT_SUPPORTED;
1752 }
1753
1754 return IXGBE_SUCCESS;
1755 }
1756
1757 /**
1758 * ixgbe_identify_sfp_module_X550em - Identifies SFP modules
1759 * @hw: pointer to hardware structure
1760 *
1761 * Searches for and identifies the SFP module and assigns appropriate PHY type.
1762 **/
1763 s32 ixgbe_identify_sfp_module_X550em(struct ixgbe_hw *hw)
1764 {
1765 s32 status;
1766 bool linear;
1767
1768 DEBUGFUNC("ixgbe_identify_sfp_module_X550em");
1769
1770 status = ixgbe_identify_module_generic(hw);
1771
1772 if (status != IXGBE_SUCCESS)
1773 return status;
1774
1775 /* Check if SFP module is supported */
1776 status = ixgbe_supported_sfp_modules_X550em(hw, &linear);
1777
1778 return status;
1779 }
1780
1781 /**
1782 * ixgbe_setup_sfp_modules_X550em - Setup MAC link ops
1783 * @hw: pointer to hardware structure
1784 */
1785 s32 ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw)
1786 {
1787 s32 status;
1788 bool linear;
1789
1790 DEBUGFUNC("ixgbe_setup_sfp_modules_X550em");
1791
1792 /* Check if SFP module is supported */
1793 status = ixgbe_supported_sfp_modules_X550em(hw, &linear);
1794
1795 if (status != IXGBE_SUCCESS)
1796 return status;
1797
1798 ixgbe_init_mac_link_ops_X550em(hw);
1799 hw->phy.ops.reset = NULL;
1800
1801 return IXGBE_SUCCESS;
1802 }
1803
1804 /**
1805 * ixgbe_restart_an_internal_phy_x550em - restart autonegotiation for the
1806 * internal PHY
1807 * @hw: pointer to hardware structure
1808 **/
1809 static s32 ixgbe_restart_an_internal_phy_x550em(struct ixgbe_hw *hw)
1810 {
1811 s32 status;
1812 u32 link_ctrl;
1813
1814 /* Restart auto-negotiation. */
1815 status = hw->mac.ops.read_iosf_sb_reg(hw,
1816 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1817 IXGBE_SB_IOSF_TARGET_KR_PHY, &link_ctrl);
1818
1819 if (status) {
1820 DEBUGOUT("Auto-negotiation did not complete\n");
1821 return status;
1822 }
1823
1824 link_ctrl |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART;
1825 status = hw->mac.ops.write_iosf_sb_reg(hw,
1826 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1827 IXGBE_SB_IOSF_TARGET_KR_PHY, link_ctrl);
1828
1829 if (hw->mac.type == ixgbe_mac_X550EM_a) {
1830 u32 flx_mask_st20;
1831
1832 /* Indicate to FW that AN restart has been asserted */
1833 status = hw->mac.ops.read_iosf_sb_reg(hw,
1834 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1835 IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_mask_st20);
1836
1837 if (status) {
1838 DEBUGOUT("Auto-negotiation did not complete\n");
1839 return status;
1840 }
1841
1842 flx_mask_st20 |= IXGBE_KRM_PMD_FLX_MASK_ST20_FW_AN_RESTART;
1843 status = hw->mac.ops.write_iosf_sb_reg(hw,
1844 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1845 IXGBE_SB_IOSF_TARGET_KR_PHY, flx_mask_st20);
1846 }
1847
1848 return status;
1849 }
1850
1851 /**
1852 * ixgbe_setup_sgmii - Set up link for sgmii
1853 * @hw: pointer to hardware structure
1854 * @speed: new link speed
1855 * @autoneg_wait: TRUE when waiting for completion is needed
1856 */
1857 static s32 ixgbe_setup_sgmii(struct ixgbe_hw *hw, ixgbe_link_speed speed,
1858 bool autoneg_wait)
1859 {
1860 struct ixgbe_mac_info *mac = &hw->mac;
1861 u32 lval, sval, flx_val;
1862 s32 rc;
1863
1864 rc = mac->ops.read_iosf_sb_reg(hw,
1865 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1866 IXGBE_SB_IOSF_TARGET_KR_PHY, &lval);
1867 if (rc)
1868 return rc;
1869
1870 lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
1871 lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
1872 lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN;
1873 lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN;
1874 lval |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G;
1875 rc = mac->ops.write_iosf_sb_reg(hw,
1876 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1877 IXGBE_SB_IOSF_TARGET_KR_PHY, lval);
1878 if (rc)
1879 return rc;
1880
1881 rc = mac->ops.read_iosf_sb_reg(hw,
1882 IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id),
1883 IXGBE_SB_IOSF_TARGET_KR_PHY, &sval);
1884 if (rc)
1885 return rc;
1886
1887 sval |= IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D;
1888 sval |= IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D;
1889 rc = mac->ops.write_iosf_sb_reg(hw,
1890 IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id),
1891 IXGBE_SB_IOSF_TARGET_KR_PHY, sval);
1892 if (rc)
1893 return rc;
1894
1895 rc = mac->ops.read_iosf_sb_reg(hw,
1896 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1897 IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_val);
1898 if (rc)
1899 return rc;
1900
1901 flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK;
1902 flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_1G;
1903 flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN;
1904 flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN;
1905 flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN;
1906
1907 rc = mac->ops.write_iosf_sb_reg(hw,
1908 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1909 IXGBE_SB_IOSF_TARGET_KR_PHY, flx_val);
1910 if (rc)
1911 return rc;
1912
1913 rc = ixgbe_restart_an_internal_phy_x550em(hw);
1914 if (rc)
1915 return rc;
1916
1917 return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait);
1918 }
1919
1920 /**
1921 * ixgbe_setup_sgmii_fw - Set up link for internal PHY SGMII auto-negotiation
1922 * @hw: pointer to hardware structure
1923 * @speed: new link speed
1924 * @autoneg_wait: TRUE when waiting for completion is needed
1925 */
1926 static s32 ixgbe_setup_sgmii_fw(struct ixgbe_hw *hw, ixgbe_link_speed speed,
1927 bool autoneg_wait)
1928 {
1929 struct ixgbe_mac_info *mac = &hw->mac;
1930 u32 lval, sval, flx_val;
1931 s32 rc;
1932
1933 rc = mac->ops.read_iosf_sb_reg(hw,
1934 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1935 IXGBE_SB_IOSF_TARGET_KR_PHY, &lval);
1936 if (rc)
1937 return rc;
1938
1939 lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
1940 lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
1941 lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN;
1942 lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN;
1943 lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G;
1944 rc = mac->ops.write_iosf_sb_reg(hw,
1945 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1946 IXGBE_SB_IOSF_TARGET_KR_PHY, lval);
1947 if (rc)
1948 return rc;
1949
1950 rc = mac->ops.read_iosf_sb_reg(hw,
1951 IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id),
1952 IXGBE_SB_IOSF_TARGET_KR_PHY, &sval);
1953 if (rc)
1954 return rc;
1955
1956 sval &= ~IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D;
1957 sval &= ~IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D;
1958 rc = mac->ops.write_iosf_sb_reg(hw,
1959 IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id),
1960 IXGBE_SB_IOSF_TARGET_KR_PHY, sval);
1961 if (rc)
1962 return rc;
1963
1964 rc = mac->ops.write_iosf_sb_reg(hw,
1965 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1966 IXGBE_SB_IOSF_TARGET_KR_PHY, lval);
1967 if (rc)
1968 return rc;
1969
1970 rc = mac->ops.read_iosf_sb_reg(hw,
1971 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1972 IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_val);
1973 if (rc)
1974 return rc;
1975
1976 flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK;
1977 flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_AN;
1978 flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN;
1979 flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN;
1980 flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN;
1981
1982 rc = mac->ops.write_iosf_sb_reg(hw,
1983 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1984 IXGBE_SB_IOSF_TARGET_KR_PHY, flx_val);
1985 if (rc)
1986 return rc;
1987
1988 rc = ixgbe_restart_an_internal_phy_x550em(hw);
1989
1990 return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait);
1991 }
1992
1993 /**
1994 * ixgbe_init_mac_link_ops_X550em - init mac link function pointers
1995 * @hw: pointer to hardware structure
1996 */
1997 void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw)
1998 {
1999 struct ixgbe_mac_info *mac = &hw->mac;
2000
2001 DEBUGFUNC("ixgbe_init_mac_link_ops_X550em");
2002
2003 switch (hw->mac.ops.get_media_type(hw)) {
2004 case ixgbe_media_type_fiber:
2005 /* CS4227 does not support autoneg, so disable the laser control
2006 * functions for SFP+ fiber
2007 */
2008 mac->ops.disable_tx_laser = NULL;
2009 mac->ops.enable_tx_laser = NULL;
2010 mac->ops.flap_tx_laser = NULL;
2011 mac->ops.setup_link = ixgbe_setup_mac_link_multispeed_fiber;
2012 mac->ops.set_rate_select_speed =
2013 ixgbe_set_soft_rate_select_speed;
2014
2015 if ((hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N) ||
2016 (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP))
2017 mac->ops.setup_mac_link =
2018 ixgbe_setup_mac_link_sfp_x550a;
2019 else
2020 mac->ops.setup_mac_link =
2021 ixgbe_setup_mac_link_sfp_x550em;
2022 break;
2023 case ixgbe_media_type_copper:
2024 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_1G_T)
2025 break;
2026 if (hw->mac.type == ixgbe_mac_X550EM_a) {
2027 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T ||
2028 hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L) {
2029 mac->ops.setup_link = ixgbe_setup_sgmii_fw;
2030 mac->ops.check_link =
2031 ixgbe_check_mac_link_generic;
2032 } else {
2033 mac->ops.setup_link =
2034 ixgbe_setup_mac_link_t_X550em;
2035 }
2036 } else {
2037 mac->ops.setup_link = ixgbe_setup_mac_link_t_X550em;
2038 mac->ops.check_link = ixgbe_check_link_t_X550em;
2039 }
2040 break;
2041 case ixgbe_media_type_backplane:
2042 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII ||
2043 hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII_L)
2044 mac->ops.setup_link = ixgbe_setup_sgmii;
2045 break;
2046 default:
2047 break;
2048 }
2049 }
2050
2051 /**
2052 * ixgbe_get_link_capabilities_x550em - Determines link capabilities
2053 * @hw: pointer to hardware structure
2054 * @speed: pointer to link speed
2055 * @autoneg: TRUE when autoneg or autotry is enabled
2056 */
2057 s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
2058 ixgbe_link_speed *speed,
2059 bool *autoneg)
2060 {
2061 DEBUGFUNC("ixgbe_get_link_capabilities_X550em");
2062
2063
2064 if (hw->phy.type == ixgbe_phy_fw) {
2065 *autoneg = TRUE;
2066 *speed = hw->phy.speeds_supported;
2067 return 0;
2068 }
2069
2070 /* SFP */
2071 if (hw->phy.media_type == ixgbe_media_type_fiber) {
2072
2073 /* CS4227 SFP must not enable auto-negotiation */
2074 *autoneg = FALSE;
2075
2076 /* Check if 1G SFP module. */
2077 if (hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
2078 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1
2079 || hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
2080 hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1) {
2081 *speed = IXGBE_LINK_SPEED_1GB_FULL;
2082 return IXGBE_SUCCESS;
2083 }
2084
2085 /* Link capabilities are based on SFP */
2086 if (hw->phy.multispeed_fiber)
2087 *speed = IXGBE_LINK_SPEED_10GB_FULL |
2088 IXGBE_LINK_SPEED_1GB_FULL;
2089 else
2090 *speed = IXGBE_LINK_SPEED_10GB_FULL;
2091 } else {
2092 *autoneg = TRUE;
2093
2094 switch (hw->phy.type) {
2095 case ixgbe_phy_x550em_xfi:
2096 *speed = IXGBE_LINK_SPEED_1GB_FULL |
2097 IXGBE_LINK_SPEED_10GB_FULL;
2098 *autoneg = FALSE;
2099 break;
2100 case ixgbe_phy_ext_1g_t:
2101 case ixgbe_phy_sgmii:
2102 *speed = IXGBE_LINK_SPEED_1GB_FULL;
2103 break;
2104 case ixgbe_phy_x550em_kr:
2105 if (hw->mac.type == ixgbe_mac_X550EM_a) {
2106 /* check different backplane modes */
2107 if (hw->phy.nw_mng_if_sel &
2108 IXGBE_NW_MNG_IF_SEL_PHY_SPEED_2_5G) {
2109 *speed = IXGBE_LINK_SPEED_2_5GB_FULL;
2110 break;
2111 } else if (hw->device_id ==
2112 IXGBE_DEV_ID_X550EM_A_KR_L) {
2113 *speed = IXGBE_LINK_SPEED_1GB_FULL;
2114 break;
2115 }
2116 }
2117 /* fall through */
2118 default:
2119 *speed = IXGBE_LINK_SPEED_10GB_FULL |
2120 IXGBE_LINK_SPEED_1GB_FULL;
2121 break;
2122 }
2123 }
2124
2125 return IXGBE_SUCCESS;
2126 }
2127
2128 /**
2129 * ixgbe_get_lasi_ext_t_x550em - Determime external Base T PHY interrupt cause
2130 * @hw: pointer to hardware structure
2131 * @lsc: pointer to boolean flag which indicates whether external Base T
2132 * PHY interrupt is lsc
2133 *
2134 * Determime if external Base T PHY interrupt cause is high temperature
2135 * failure alarm or link status change.
2136 *
2137 * Return IXGBE_ERR_OVERTEMP if interrupt is high temperature
2138 * failure alarm, else return PHY access status.
2139 */
2140 static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc)
2141 {
2142 u32 status;
2143 u16 reg;
2144
2145 *lsc = FALSE;
2146
2147 /* Vendor alarm triggered */
2148 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_CHIP_STD_INT_FLAG,
2149 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2150 ®);
2151
2152 if (status != IXGBE_SUCCESS ||
2153 !(reg & IXGBE_MDIO_GLOBAL_VEN_ALM_INT_EN))
2154 return status;
2155
2156 /* Vendor Auto-Neg alarm triggered or Global alarm 1 triggered */
2157 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_FLAG,
2158 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2159 ®);
2160
2161 if (status != IXGBE_SUCCESS ||
2162 !(reg & (IXGBE_MDIO_GLOBAL_AN_VEN_ALM_INT_EN |
2163 IXGBE_MDIO_GLOBAL_ALARM_1_INT)))
2164 return status;
2165
2166 /* Global alarm triggered */
2167 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_ALARM_1,
2168 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2169 ®);
2170
2171 if (status != IXGBE_SUCCESS)
2172 return status;
2173
2174 /* If high temperature failure, then return over temp error and exit */
2175 if (reg & IXGBE_MDIO_GLOBAL_ALM_1_HI_TMP_FAIL) {
2176 /* power down the PHY in case the PHY FW didn't already */
2177 ixgbe_set_copper_phy_power(hw, FALSE);
2178 return IXGBE_ERR_OVERTEMP;
2179 } else if (reg & IXGBE_MDIO_GLOBAL_ALM_1_DEV_FAULT) {
2180 /* device fault alarm triggered */
2181 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_FAULT_MSG,
2182 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2183 ®);
2184
2185 if (status != IXGBE_SUCCESS)
2186 return status;
2187
2188 /* if device fault was due to high temp alarm handle and exit */
2189 if (reg == IXGBE_MDIO_GLOBAL_FAULT_MSG_HI_TMP) {
2190 /* power down the PHY in case the PHY FW didn't */
2191 ixgbe_set_copper_phy_power(hw, FALSE);
2192 return IXGBE_ERR_OVERTEMP;
2193 }
2194 }
2195
2196 /* Vendor alarm 2 triggered */
2197 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_CHIP_STD_INT_FLAG,
2198 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®);
2199
2200 if (status != IXGBE_SUCCESS ||
2201 !(reg & IXGBE_MDIO_GLOBAL_STD_ALM2_INT))
2202 return status;
2203
2204 /* link connect/disconnect event occurred */
2205 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM2,
2206 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®);
2207
2208 if (status != IXGBE_SUCCESS)
2209 return status;
2210
2211 /* Indicate LSC */
2212 if (reg & IXGBE_MDIO_AUTO_NEG_VEN_LSC)
2213 *lsc = TRUE;
2214
2215 return IXGBE_SUCCESS;
2216 }
2217
2218 /**
2219 * ixgbe_enable_lasi_ext_t_x550em - Enable external Base T PHY interrupts
2220 * @hw: pointer to hardware structure
2221 *
2222 * Enable link status change and temperature failure alarm for the external
2223 * Base T PHY
2224 *
2225 * Returns PHY access status
2226 */
2227 static s32 ixgbe_enable_lasi_ext_t_x550em(struct ixgbe_hw *hw)
2228 {
2229 u32 status;
2230 u16 reg;
2231 bool lsc;
2232
2233 /* Clear interrupt flags */
2234 status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc);
2235
2236 /* Enable link status change alarm */
2237
2238 /* Enable the LASI interrupts on X552 devices to receive notifications
2239 * of the link configurations of the external PHY and correspondingly
2240 * support the configuration of the internal iXFI link, since iXFI does
2241 * not support auto-negotiation. This is not required for X553 devices
2242 * having KR support, which performs auto-negotiations and which is used
2243 * as the internal link to the external PHY. Hence adding a check here
2244 * to avoid enabling LASI interrupts for X553 devices.
2245 */
2246 if (hw->mac.type != ixgbe_mac_X550EM_a) {
2247 status = hw->phy.ops.read_reg(hw,
2248 IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK,
2249 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®);
2250
2251 if (status != IXGBE_SUCCESS)
2252 return status;
2253
2254 reg |= IXGBE_MDIO_PMA_TX_VEN_LASI_INT_EN;
2255
2256 status = hw->phy.ops.write_reg(hw,
2257 IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK,
2258 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg);
2259
2260 if (status != IXGBE_SUCCESS)
2261 return status;
2262 }
2263
2264 /* Enable high temperature failure and global fault alarms */
2265 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_MASK,
2266 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2267 ®);
2268
2269 if (status != IXGBE_SUCCESS)
2270 return status;
2271
2272 reg |= (IXGBE_MDIO_GLOBAL_INT_HI_TEMP_EN |
2273 IXGBE_MDIO_GLOBAL_INT_DEV_FAULT_EN);
2274
2275 status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_MASK,
2276 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2277 reg);
2278
2279 if (status != IXGBE_SUCCESS)
2280 return status;
2281
2282 /* Enable vendor Auto-Neg alarm and Global Interrupt Mask 1 alarm */
2283 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_MASK,
2284 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2285 ®);
2286
2287 if (status != IXGBE_SUCCESS)
2288 return status;
2289
2290 reg |= (IXGBE_MDIO_GLOBAL_AN_VEN_ALM_INT_EN |
2291 IXGBE_MDIO_GLOBAL_ALARM_1_INT);
2292
2293 status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_MASK,
2294 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2295 reg);
2296
2297 if (status != IXGBE_SUCCESS)
2298 return status;
2299
2300 /* Enable chip-wide vendor alarm */
2301 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_STD_MASK,
2302 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2303 ®);
2304
2305 if (status != IXGBE_SUCCESS)
2306 return status;
2307
2308 reg |= IXGBE_MDIO_GLOBAL_VEN_ALM_INT_EN;
2309
2310 status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_STD_MASK,
2311 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2312 reg);
2313
2314 return status;
2315 }
2316
2317 /**
2318 * ixgbe_setup_kr_speed_x550em - Configure the KR PHY for link speed.
2319 * @hw: pointer to hardware structure
2320 * @speed: link speed
2321 *
2322 * Configures the integrated KR PHY.
2323 **/
2324 static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw,
2325 ixgbe_link_speed speed)
2326 {
2327 s32 status;
2328 u32 reg_val;
2329
2330 status = hw->mac.ops.read_iosf_sb_reg(hw,
2331 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
2332 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
2333 if (status)
2334 return status;
2335
2336 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
2337 reg_val &= ~(IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR |
2338 IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX);
2339
2340 /* Advertise 10G support. */
2341 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
2342 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR;
2343
2344 /* Advertise 1G support. */
2345 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
2346 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX;
2347
2348 status = hw->mac.ops.write_iosf_sb_reg(hw,
2349 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
2350 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
2351
2352 if (hw->mac.type == ixgbe_mac_X550EM_a) {
2353 /* Set lane mode to KR auto negotiation */
2354 status = hw->mac.ops.read_iosf_sb_reg(hw,
2355 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
2356 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
2357
2358 if (status)
2359 return status;
2360
2361 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK;
2362 reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_AN;
2363 reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN;
2364 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN;
2365 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN;
2366
2367 status = hw->mac.ops.write_iosf_sb_reg(hw,
2368 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
2369 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
2370 }
2371
2372 return ixgbe_restart_an_internal_phy_x550em(hw);
2373 }
2374
2375 /**
2376 * ixgbe_reset_phy_fw - Reset firmware-controlled PHYs
2377 * @hw: pointer to hardware structure
2378 */
2379 static s32 ixgbe_reset_phy_fw(struct ixgbe_hw *hw)
2380 {
2381 u32 store[FW_PHY_ACT_DATA_COUNT] = { 0 };
2382 s32 rc;
2383
2384 if (hw->phy.reset_disable || ixgbe_check_reset_blocked(hw))
2385 return IXGBE_SUCCESS;
2386
2387 rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_PHY_SW_RESET, &store);
2388 if (rc)
2389 return rc;
2390 memset(store, 0, sizeof(store));
2391
2392 rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_INIT_PHY, &store);
2393 if (rc)
2394 return rc;
2395
2396 return ixgbe_setup_fw_link(hw);
2397 }
2398
2399 /**
2400 * ixgbe_check_overtemp_fw - Check firmware-controlled PHYs for overtemp
2401 * @hw: pointer to hardware structure
2402 */
2403 static s32 ixgbe_check_overtemp_fw(struct ixgbe_hw *hw)
2404 {
2405 u32 store[FW_PHY_ACT_DATA_COUNT] = { 0 };
2406 s32 rc;
2407
2408 rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_LINK_INFO, &store);
2409 if (rc)
2410 return rc;
2411
2412 if (store[0] & FW_PHY_ACT_GET_LINK_INFO_TEMP) {
2413 ixgbe_shutdown_fw_phy(hw);
2414 return IXGBE_ERR_OVERTEMP;
2415 }
2416 return IXGBE_SUCCESS;
2417 }
2418
2419 /**
2420 * ixgbe_read_mng_if_sel_x550em - Read NW_MNG_IF_SEL register
2421 * @hw: pointer to hardware structure
2422 *
2423 * Read NW_MNG_IF_SEL register and save field values, and check for valid field
2424 * values.
2425 **/
2426 static s32 ixgbe_read_mng_if_sel_x550em(struct ixgbe_hw *hw)
2427 {
2428 /* Save NW management interface connected on board. This is used
2429 * to determine internal PHY mode.
2430 */
2431 hw->phy.nw_mng_if_sel = IXGBE_READ_REG(hw, IXGBE_NW_MNG_IF_SEL);
2432
2433 /* If X552 (X550EM_a) and MDIO is connected to external PHY, then set
2434 * PHY address. This register field was has only been used for X552.
2435 */
2436 if (hw->mac.type == ixgbe_mac_X550EM_a &&
2437 hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_MDIO_ACT) {
2438 hw->phy.addr = (hw->phy.nw_mng_if_sel &
2439 IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD) >>
2440 IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT;
2441 }
2442
2443 return IXGBE_SUCCESS;
2444 }
2445
2446 /**
2447 * ixgbe_init_phy_ops_X550em - PHY/SFP specific init
2448 * @hw: pointer to hardware structure
2449 *
2450 * Initialize any function pointers that were not able to be
2451 * set during init_shared_code because the PHY/SFP type was
2452 * not known. Perform the SFP init if necessary.
2453 */
2454 s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
2455 {
2456 struct ixgbe_phy_info *phy = &hw->phy;
2457 s32 ret_val;
2458
2459 DEBUGFUNC("ixgbe_init_phy_ops_X550em");
2460
2461 hw->mac.ops.set_lan_id(hw);
2462 ixgbe_read_mng_if_sel_x550em(hw);
2463
2464 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) {
2465 phy->phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM;
2466 ixgbe_setup_mux_ctl(hw);
2467 phy->ops.identify_sfp = ixgbe_identify_sfp_module_X550em;
2468 }
2469
2470 switch (hw->device_id) {
2471 case IXGBE_DEV_ID_X550EM_A_1G_T:
2472 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
2473 phy->ops.read_reg_mdi = ixgbe_read_phy_reg_mdi_22;
2474 phy->ops.write_reg_mdi = ixgbe_write_phy_reg_mdi_22;
2475 hw->phy.ops.read_reg = ixgbe_read_phy_reg_x550a;
2476 hw->phy.ops.write_reg = ixgbe_write_phy_reg_x550a;
2477 phy->ops.check_overtemp = ixgbe_check_overtemp_fw;
2478 if (hw->bus.lan_id)
2479 hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY1_SM;
2480 else
2481 hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY0_SM;
2482
2483 break;
2484 case IXGBE_DEV_ID_X550EM_A_10G_T:
2485 case IXGBE_DEV_ID_X550EM_A_SFP:
2486 hw->phy.ops.read_reg = ixgbe_read_phy_reg_x550a;
2487 hw->phy.ops.write_reg = ixgbe_write_phy_reg_x550a;
2488 if (hw->bus.lan_id)
2489 hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY1_SM;
2490 else
2491 hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY0_SM;
2492 break;
2493 case IXGBE_DEV_ID_X550EM_X_SFP:
2494 /* set up for CS4227 usage */
2495 hw->phy.phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM;
2496 break;
2497 case IXGBE_DEV_ID_X550EM_X_1G_T:
2498 phy->ops.read_reg_mdi = NULL;
2499 phy->ops.write_reg_mdi = NULL;
2500 break;
2501 default:
2502 break;
2503 }
2504
2505 /* Identify the PHY or SFP module */
2506 ret_val = phy->ops.identify(hw);
2507 if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED ||
2508 ret_val == IXGBE_ERR_PHY_ADDR_INVALID)
2509 return ret_val;
2510
2511 /* Setup function pointers based on detected hardware */
2512 ixgbe_init_mac_link_ops_X550em(hw);
2513 if (phy->sfp_type != ixgbe_sfp_type_unknown)
2514 phy->ops.reset = NULL;
2515
2516 /* Set functions pointers based on phy type */
2517 switch (hw->phy.type) {
2518 case ixgbe_phy_x550em_kx4:
2519 phy->ops.setup_link = NULL;
2520 phy->ops.read_reg = ixgbe_read_phy_reg_x550em;
2521 phy->ops.write_reg = ixgbe_write_phy_reg_x550em;
2522 break;
2523 case ixgbe_phy_x550em_kr:
2524 phy->ops.setup_link = ixgbe_setup_kr_x550em;
2525 phy->ops.read_reg = ixgbe_read_phy_reg_x550em;
2526 phy->ops.write_reg = ixgbe_write_phy_reg_x550em;
2527 break;
2528 case ixgbe_phy_ext_1g_t:
2529 /* link is managed by FW */
2530 phy->ops.setup_link = NULL;
2531 phy->ops.reset = NULL;
2532 break;
2533 case ixgbe_phy_x550em_xfi:
2534 /* link is managed by HW */
2535 phy->ops.setup_link = NULL;
2536 phy->ops.read_reg = ixgbe_read_phy_reg_x550em;
2537 phy->ops.write_reg = ixgbe_write_phy_reg_x550em;
2538 break;
2539 case ixgbe_phy_x550em_ext_t:
2540 /* If internal link mode is XFI, then setup iXFI internal link,
2541 * else setup KR now.
2542 */
2543 phy->ops.setup_internal_link =
2544 ixgbe_setup_internal_phy_t_x550em;
2545
2546 /* setup SW LPLU only for first revision of X550EM_x */
2547 if ((hw->mac.type == ixgbe_mac_X550EM_x) &&
2548 !(IXGBE_FUSES0_REV_MASK &
2549 IXGBE_READ_REG(hw, IXGBE_FUSES0_GROUP(0))))
2550 phy->ops.enter_lplu = ixgbe_enter_lplu_t_x550em;
2551
2552 phy->ops.handle_lasi = ixgbe_handle_lasi_ext_t_x550em;
2553 phy->ops.reset = ixgbe_reset_phy_t_X550em;
2554 break;
2555 case ixgbe_phy_sgmii:
2556 phy->ops.setup_link = NULL;
2557 break;
2558 case ixgbe_phy_fw:
2559 phy->ops.setup_link = ixgbe_setup_fw_link;
2560 phy->ops.reset = ixgbe_reset_phy_fw;
2561 break;
2562 default:
2563 break;
2564 }
2565 return ret_val;
2566 }
2567
2568 /**
2569 * ixgbe_set_mdio_speed - Set MDIO clock speed
2570 * @hw: pointer to hardware structure
2571 */
2572 static void ixgbe_set_mdio_speed(struct ixgbe_hw *hw)
2573 {
2574 u32 hlreg0;
2575
2576 switch (hw->device_id) {
2577 case IXGBE_DEV_ID_X550EM_X_10G_T:
2578 case IXGBE_DEV_ID_X550EM_A_SGMII:
2579 case IXGBE_DEV_ID_X550EM_A_SGMII_L:
2580 case IXGBE_DEV_ID_X550EM_A_10G_T:
2581 case IXGBE_DEV_ID_X550EM_A_SFP:
2582 case IXGBE_DEV_ID_X550EM_A_QSFP:
2583 /* Config MDIO clock speed before the first MDIO PHY access */
2584 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2585 hlreg0 &= ~IXGBE_HLREG0_MDCSPD;
2586 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
2587 break;
2588 case IXGBE_DEV_ID_X550EM_A_1G_T:
2589 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
2590 /* Select fast MDIO clock speed for these devices */
2591 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2592 hlreg0 |= IXGBE_HLREG0_MDCSPD;
2593 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
2594 break;
2595 default:
2596 break;
2597 }
2598 }
2599
2600 /**
2601 * ixgbe_reset_hw_X550em - Perform hardware reset
2602 * @hw: pointer to hardware structure
2603 *
2604 * Resets the hardware by resetting the transmit and receive units, masks
2605 * and clears all interrupts, perform a PHY reset, and perform a link (MAC)
2606 * reset.
2607 */
2608 s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw)
2609 {
2610 ixgbe_link_speed link_speed;
2611 s32 status;
2612 s32 phy_status = IXGBE_SUCCESS;
2613 u32 ctrl = 0;
2614 u32 i;
2615 bool link_up = FALSE;
2616 u32 swfw_mask = hw->phy.phy_semaphore_mask;
2617
2618 DEBUGFUNC("ixgbe_reset_hw_X550em");
2619
2620 /* Call adapter stop to disable Tx/Rx and clear interrupts */
2621 status = hw->mac.ops.stop_adapter(hw);
2622 if (status != IXGBE_SUCCESS) {
2623 DEBUGOUT1("Failed to stop adapter, STATUS = %d\n", status);
2624 return status;
2625 }
2626 /* flush pending Tx transactions */
2627 ixgbe_clear_tx_pending(hw);
2628
2629 ixgbe_set_mdio_speed(hw);
2630
2631 /* PHY ops must be identified and initialized prior to reset */
2632 phy_status = hw->phy.ops.init(hw);
2633
2634 if (phy_status)
2635 DEBUGOUT1("Failed to initialize PHY ops, STATUS = %d\n",
2636 status);
2637
2638 if (phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED ||
2639 phy_status == IXGBE_ERR_PHY_ADDR_INVALID) {
2640 DEBUGOUT("Returning from reset HW due to PHY init failure\n");
2641 goto mac_reset_top;
2642 }
2643
2644 /* start the external PHY */
2645 if (hw->phy.type == ixgbe_phy_x550em_ext_t) {
2646 status = ixgbe_init_ext_t_x550em(hw);
2647 if (status) {
2648 DEBUGOUT1("Failed to start the external PHY, STATUS = %d\n",
2649 status);
2650 return status;
2651 }
2652 }
2653
2654 /* Setup SFP module if there is one present. */
2655 if (hw->phy.sfp_setup_needed) {
2656 phy_status = hw->mac.ops.setup_sfp(hw);
2657 hw->phy.sfp_setup_needed = FALSE;
2658 }
2659
2660 if (phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED)
2661 goto mac_reset_top;
2662
2663 /* Reset PHY */
2664 if (!hw->phy.reset_disable && hw->phy.ops.reset) {
2665 if (hw->phy.ops.reset(hw) == IXGBE_ERR_OVERTEMP)
2666 return IXGBE_ERR_OVERTEMP;
2667 }
2668
2669 mac_reset_top:
2670 /* Issue global reset to the MAC. Needs to be SW reset if link is up.
2671 * If link reset is used when link is up, it might reset the PHY when
2672 * mng is using it. If link is down or the flag to force full link
2673 * reset is set, then perform link reset.
2674 */
2675 ctrl = IXGBE_CTRL_LNK_RST;
2676 if (!hw->force_full_reset) {
2677 hw->mac.ops.check_link(hw, &link_speed, &link_up, FALSE);
2678 if (link_up)
2679 ctrl = IXGBE_CTRL_RST;
2680 }
2681
2682 status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
2683 if (status != IXGBE_SUCCESS) {
2684 ERROR_REPORT2(IXGBE_ERROR_CAUTION,
2685 "semaphore failed with %d", status);
2686 return IXGBE_ERR_SWFW_SYNC;
2687 }
2688 ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
2689 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
2690 IXGBE_WRITE_FLUSH(hw);
2691 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
2692
2693 /* Poll for reset bit to self-clear meaning reset is complete */
2694 for (i = 0; i < 10; i++) {
2695 usec_delay(1);
2696 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
2697 if (!(ctrl & IXGBE_CTRL_RST_MASK))
2698 break;
2699 }
2700
2701 if (ctrl & IXGBE_CTRL_RST_MASK) {
2702 status = IXGBE_ERR_RESET_FAILED;
2703 DEBUGOUT("Reset polling failed to complete.\n");
2704 }
2705
2706 msec_delay(50);
2707
2708 /* Double resets are required for recovery from certain error
2709 * conditions. Between resets, it is necessary to stall to
2710 * allow time for any pending HW events to complete.
2711 */
2712 if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
2713 hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
2714 goto mac_reset_top;
2715 }
2716
2717 /* Store the permanent mac address */
2718 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
2719
2720 /* Store MAC address from RAR0, clear receive address registers, and
2721 * clear the multicast table. Also reset num_rar_entries to 128,
2722 * since we modify this value when programming the SAN MAC address.
2723 */
2724 hw->mac.num_rar_entries = 128;
2725 hw->mac.ops.init_rx_addrs(hw);
2726
2727 ixgbe_set_mdio_speed(hw);
2728
2729 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP)
2730 ixgbe_setup_mux_ctl(hw);
2731
2732 if (status != IXGBE_SUCCESS)
2733 DEBUGOUT1("Reset HW failed, STATUS = %d\n", status);
2734
2735 if (phy_status != IXGBE_SUCCESS)
2736 status = phy_status;
2737
2738 return status;
2739 }
2740
2741 /**
2742 * ixgbe_init_ext_t_x550em - Start (unstall) the external Base T PHY.
2743 * @hw: pointer to hardware structure
2744 */
2745 s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw)
2746 {
2747 u32 status;
2748 u16 reg;
2749
2750 status = hw->phy.ops.read_reg(hw,
2751 IXGBE_MDIO_TX_VENDOR_ALARMS_3,
2752 IXGBE_MDIO_PMA_PMD_DEV_TYPE,
2753 ®);
2754
2755 if (status != IXGBE_SUCCESS)
2756 return status;
2757
2758 /* If PHY FW reset completed bit is set then this is the first
2759 * SW instance after a power on so the PHY FW must be un-stalled.
2760 */
2761 if (reg & IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK) {
2762 status = hw->phy.ops.read_reg(hw,
2763 IXGBE_MDIO_GLOBAL_RES_PR_10,
2764 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2765 ®);
2766
2767 if (status != IXGBE_SUCCESS)
2768 return status;
2769
2770 reg &= ~IXGBE_MDIO_POWER_UP_STALL;
2771
2772 status = hw->phy.ops.write_reg(hw,
2773 IXGBE_MDIO_GLOBAL_RES_PR_10,
2774 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2775 reg);
2776
2777 if (status != IXGBE_SUCCESS)
2778 return status;
2779 }
2780
2781 return status;
2782 }
2783
2784 /**
2785 * ixgbe_setup_kr_x550em - Configure the KR PHY.
2786 * @hw: pointer to hardware structure
2787 **/
2788 s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw)
2789 {
2790 /* leave link alone for 2.5G */
2791 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_2_5GB_FULL)
2792 return IXGBE_SUCCESS;
2793
2794 if (ixgbe_check_reset_blocked(hw))
2795 return 0;
2796
2797 return ixgbe_setup_kr_speed_x550em(hw, hw->phy.autoneg_advertised);
2798 }
2799
2800 /**
2801 * ixgbe_setup_mac_link_sfp_x550em - Setup internal/external the PHY for SFP
2802 * @hw: pointer to hardware structure
2803 * @speed: new link speed
2804 * @autoneg_wait_to_complete: unused
2805 *
2806 * Configure the external PHY and the integrated KR PHY for SFP support.
2807 **/
2808 s32 ixgbe_setup_mac_link_sfp_x550em(struct ixgbe_hw *hw,
2809 ixgbe_link_speed speed,
2810 bool autoneg_wait_to_complete)
2811 {
2812 s32 ret_val;
2813 u16 reg_slice, reg_val;
2814 bool setup_linear = FALSE;
2815 UNREFERENCED_1PARAMETER(autoneg_wait_to_complete);
2816
2817 /* Check if SFP module is supported and linear */
2818 ret_val = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear);
2819
2820 /* If no SFP module present, then return success. Return success since
2821 * there is no reason to configure CS4227 and SFP not present error is
2822 * not excepted in the setup MAC link flow.
2823 */
2824 if (ret_val == IXGBE_ERR_SFP_NOT_PRESENT)
2825 return IXGBE_SUCCESS;
2826
2827 if (ret_val != IXGBE_SUCCESS)
2828 return ret_val;
2829
2830 /* Configure internal PHY for KR/KX. */
2831 ixgbe_setup_kr_speed_x550em(hw, speed);
2832
2833 /* Configure CS4227 LINE side to proper mode. */
2834 reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB +
2835 (hw->bus.lan_id << 12);
2836 if (setup_linear)
2837 reg_val = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1;
2838 else
2839 reg_val = (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1;
2840 ret_val = hw->link.ops.write_link(hw, hw->link.addr, reg_slice,
2841 reg_val);
2842 return ret_val;
2843 }
2844
2845 /**
2846 * ixgbe_setup_sfi_x550a - Configure the internal PHY for native SFI mode
2847 * @hw: pointer to hardware structure
2848 * @speed: the link speed to force
2849 *
2850 * Configures the integrated PHY for native SFI mode. Used to connect the
2851 * internal PHY directly to an SFP cage, without autonegotiation.
2852 **/
2853 static s32 ixgbe_setup_sfi_x550a(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
2854 {
2855 struct ixgbe_mac_info *mac = &hw->mac;
2856 s32 status;
2857 u32 reg_val;
2858
2859 /* Disable all AN and force speed to 10G Serial. */
2860 status = mac->ops.read_iosf_sb_reg(hw,
2861 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
2862 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
2863 if (status != IXGBE_SUCCESS)
2864 return status;
2865
2866 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN;
2867 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN;
2868 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN;
2869 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK;
2870
2871 /* Select forced link speed for internal PHY. */
2872 switch (*speed) {
2873 case IXGBE_LINK_SPEED_10GB_FULL:
2874 reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_10G;
2875 break;
2876 case IXGBE_LINK_SPEED_1GB_FULL:
2877 reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_1G;
2878 break;
2879 case 0:
2880 /* media none (linkdown) */
2881 break;
2882 default:
2883 /* Other link speeds are not supported by internal PHY. */
2884 return IXGBE_ERR_LINK_SETUP;
2885 }
2886
2887 status = mac->ops.write_iosf_sb_reg(hw,
2888 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
2889 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
2890
2891 /* Toggle port SW reset by AN reset. */
2892 status = ixgbe_restart_an_internal_phy_x550em(hw);
2893
2894 return status;
2895 }
2896
2897 /**
2898 * ixgbe_setup_mac_link_sfp_x550a - Setup internal PHY for SFP
2899 * @hw: pointer to hardware structure
2900 * @speed: new link speed
2901 * @autoneg_wait_to_complete: unused
2902 *
2903 * Configure the integrated PHY for SFP support.
2904 **/
2905 static s32 ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw,
2906 ixgbe_link_speed speed,
2907 bool autoneg_wait_to_complete)
2908 {
2909 s32 ret_val;
2910 u16 reg_phy_ext;
2911 bool setup_linear = FALSE;
2912 u32 reg_slice, reg_phy_int, slice_offset;
2913
2914 UNREFERENCED_1PARAMETER(autoneg_wait_to_complete);
2915
2916 /* Check if SFP module is supported and linear */
2917 ret_val = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear);
2918
2919 /* If no SFP module present, then return success. Return success since
2920 * SFP not present error is not excepted in the setup MAC link flow.
2921 */
2922 if (ret_val == IXGBE_ERR_SFP_NOT_PRESENT)
2923 return IXGBE_SUCCESS;
2924
2925 if (ret_val != IXGBE_SUCCESS)
2926 return ret_val;
2927
2928 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N) {
2929 /* Configure internal PHY for native SFI based on module type */
2930 ret_val = hw->mac.ops.read_iosf_sb_reg(hw,
2931 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
2932 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_phy_int);
2933
2934 if (ret_val != IXGBE_SUCCESS)
2935 return ret_val;
2936
2937 reg_phy_int &= IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_DA;
2938 if (!setup_linear)
2939 reg_phy_int |= IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_SR;
2940
2941 ret_val = hw->mac.ops.write_iosf_sb_reg(hw,
2942 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
2943 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_phy_int);
2944
2945 if (ret_val != IXGBE_SUCCESS)
2946 return ret_val;
2947
2948 /* Setup SFI internal link. */
2949 ret_val = ixgbe_setup_sfi_x550a(hw, &speed);
2950 } else {
2951 /* Configure internal PHY for KR/KX. */
2952 ixgbe_setup_kr_speed_x550em(hw, speed);
2953
2954 if (hw->phy.addr == 0x0 || hw->phy.addr == 0xFFFF) {
2955 /* Find Address */
2956 DEBUGOUT("Invalid NW_MNG_IF_SEL.MDIO_PHY_ADD value\n");
2957 return IXGBE_ERR_PHY_ADDR_INVALID;
2958 }
2959
2960 /* Get external PHY SKU id */
2961 ret_val = hw->phy.ops.read_reg(hw, IXGBE_CS4227_EFUSE_PDF_SKU,
2962 IXGBE_MDIO_ZERO_DEV_TYPE, ®_phy_ext);
2963
2964 if (ret_val != IXGBE_SUCCESS)
2965 return ret_val;
2966
2967 /* When configuring quad port CS4223, the MAC instance is part
2968 * of the slice offset.
2969 */
2970 if (reg_phy_ext == IXGBE_CS4223_SKU_ID)
2971 slice_offset = (hw->bus.lan_id +
2972 (hw->bus.instance_id << 1)) << 12;
2973 else
2974 slice_offset = hw->bus.lan_id << 12;
2975
2976 /* Configure CS4227/CS4223 LINE side to proper mode. */
2977 reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB + slice_offset;
2978
2979 ret_val = hw->phy.ops.read_reg(hw, reg_slice,
2980 IXGBE_MDIO_ZERO_DEV_TYPE, ®_phy_ext);
2981
2982 if (ret_val != IXGBE_SUCCESS)
2983 return ret_val;
2984
2985 reg_phy_ext &= ~((IXGBE_CS4227_EDC_MODE_CX1 << 1) |
2986 (IXGBE_CS4227_EDC_MODE_SR << 1));
2987
2988 if (setup_linear)
2989 reg_phy_ext |= (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1;
2990 else
2991 reg_phy_ext |= (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1;
2992 ret_val = hw->phy.ops.write_reg(hw, reg_slice,
2993 IXGBE_MDIO_ZERO_DEV_TYPE, reg_phy_ext);
2994
2995 /* Flush previous write with a read */
2996 ret_val = hw->phy.ops.read_reg(hw, reg_slice,
2997 IXGBE_MDIO_ZERO_DEV_TYPE, ®_phy_ext);
2998 }
2999 return ret_val;
3000 }
3001
3002 /**
3003 * ixgbe_setup_ixfi_x550em_x - MAC specific iXFI configuration
3004 * @hw: pointer to hardware structure
3005 *
3006 * iXfI configuration needed for ixgbe_mac_X550EM_x devices.
3007 **/
3008 static s32 ixgbe_setup_ixfi_x550em_x(struct ixgbe_hw *hw)
3009 {
3010 struct ixgbe_mac_info *mac = &hw->mac;
3011 s32 status;
3012 u32 reg_val;
3013
3014 /* Disable training protocol FSM. */
3015 status = mac->ops.read_iosf_sb_reg(hw,
3016 IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
3017 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3018 if (status != IXGBE_SUCCESS)
3019 return status;
3020 reg_val |= IXGBE_KRM_RX_TRN_LINKUP_CTRL_CONV_WO_PROTOCOL;
3021 status = mac->ops.write_iosf_sb_reg(hw,
3022 IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
3023 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3024 if (status != IXGBE_SUCCESS)
3025 return status;
3026
3027 /* Disable Flex from training TXFFE. */
3028 status = mac->ops.read_iosf_sb_reg(hw,
3029 IXGBE_KRM_DSP_TXFFE_STATE_4(hw->bus.lan_id),
3030 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3031 if (status != IXGBE_SUCCESS)
3032 return status;
3033 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN;
3034 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN;
3035 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN;
3036 status = mac->ops.write_iosf_sb_reg(hw,
3037 IXGBE_KRM_DSP_TXFFE_STATE_4(hw->bus.lan_id),
3038 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3039 if (status != IXGBE_SUCCESS)
3040 return status;
3041 status = mac->ops.read_iosf_sb_reg(hw,
3042 IXGBE_KRM_DSP_TXFFE_STATE_5(hw->bus.lan_id),
3043 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3044 if (status != IXGBE_SUCCESS)
3045 return status;
3046 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN;
3047 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN;
3048 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN;
3049 status = mac->ops.write_iosf_sb_reg(hw,
3050 IXGBE_KRM_DSP_TXFFE_STATE_5(hw->bus.lan_id),
3051 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3052 if (status != IXGBE_SUCCESS)
3053 return status;
3054
3055 /* Enable override for coefficients. */
3056 status = mac->ops.read_iosf_sb_reg(hw,
3057 IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id),
3058 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3059 if (status != IXGBE_SUCCESS)
3060 return status;
3061 reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_OVRRD_EN;
3062 reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN;
3063 reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CPLUS1_OVRRD_EN;
3064 reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CMINUS1_OVRRD_EN;
3065 status = mac->ops.write_iosf_sb_reg(hw,
3066 IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id),
3067 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3068 return status;
3069 }
3070
3071 /**
3072 * ixgbe_setup_ixfi_x550em - Configure the KR PHY for iXFI mode.
3073 * @hw: pointer to hardware structure
3074 * @speed: the link speed to force
3075 *
3076 * Configures the integrated KR PHY to use iXFI mode. Used to connect an
3077 * internal and external PHY at a specific speed, without autonegotiation.
3078 **/
3079 static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
3080 {
3081 struct ixgbe_mac_info *mac = &hw->mac;
3082 s32 status;
3083 u32 reg_val;
3084
3085 /* iXFI is only supported with X552 */
3086 if (mac->type != ixgbe_mac_X550EM_x)
3087 return IXGBE_ERR_LINK_SETUP;
3088
3089 /* Disable AN and force speed to 10G Serial. */
3090 status = mac->ops.read_iosf_sb_reg(hw,
3091 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
3092 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3093 if (status != IXGBE_SUCCESS)
3094 return status;
3095
3096 reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
3097 reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
3098
3099 /* Select forced link speed for internal PHY. */
3100 switch (*speed) {
3101 case IXGBE_LINK_SPEED_10GB_FULL:
3102 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G;
3103 break;
3104 case IXGBE_LINK_SPEED_1GB_FULL:
3105 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G;
3106 break;
3107 default:
3108 /* Other link speeds are not supported by internal KR PHY. */
3109 return IXGBE_ERR_LINK_SETUP;
3110 }
3111
3112 status = mac->ops.write_iosf_sb_reg(hw,
3113 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
3114 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3115 if (status != IXGBE_SUCCESS)
3116 return status;
3117
3118 /* Additional configuration needed for x550em_x */
3119 if (hw->mac.type == ixgbe_mac_X550EM_x) {
3120 status = ixgbe_setup_ixfi_x550em_x(hw);
3121 if (status != IXGBE_SUCCESS)
3122 return status;
3123 }
3124
3125 /* Toggle port SW reset by AN reset. */
3126 status = ixgbe_restart_an_internal_phy_x550em(hw);
3127
3128 return status;
3129 }
3130
3131 /**
3132 * ixgbe_ext_phy_t_x550em_get_link - Get ext phy link status
3133 * @hw: address of hardware structure
3134 * @link_up: address of boolean to indicate link status
3135 *
3136 * Returns error code if unable to get link status.
3137 */
3138 static s32 ixgbe_ext_phy_t_x550em_get_link(struct ixgbe_hw *hw, bool *link_up)
3139 {
3140 u32 ret;
3141 u16 autoneg_status;
3142
3143 *link_up = FALSE;
3144
3145 /* read this twice back to back to indicate current status */
3146 ret = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
3147 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
3148 &autoneg_status);
3149 if (ret != IXGBE_SUCCESS)
3150 return ret;
3151
3152 ret = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
3153 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
3154 &autoneg_status);
3155 if (ret != IXGBE_SUCCESS)
3156 return ret;
3157
3158 *link_up = !!(autoneg_status & IXGBE_MDIO_AUTO_NEG_LINK_STATUS);
3159
3160 return IXGBE_SUCCESS;
3161 }
3162
3163 /**
3164 * ixgbe_setup_internal_phy_t_x550em - Configure KR PHY to X557 link
3165 * @hw: point to hardware structure
3166 *
3167 * Configures the link between the integrated KR PHY and the external X557 PHY
3168 * The driver will call this function when it gets a link status change
3169 * interrupt from the X557 PHY. This function configures the link speed
3170 * between the PHYs to match the link speed of the BASE-T link.
3171 *
3172 * A return of a non-zero value indicates an error, and the base driver should
3173 * not report link up.
3174 */
3175 s32 ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw)
3176 {
3177 ixgbe_link_speed force_speed;
3178 bool link_up;
3179 u32 status;
3180 u16 speed;
3181
3182 if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper)
3183 return IXGBE_ERR_CONFIG;
3184
3185 if (hw->mac.type == ixgbe_mac_X550EM_x &&
3186 !(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) {
3187 /* If link is down, there is no setup necessary so return */
3188 status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up);
3189 if (status != IXGBE_SUCCESS)
3190 return status;
3191
3192 if (!link_up)
3193 return IXGBE_SUCCESS;
3194
3195 status = hw->phy.ops.read_reg(hw,
3196 IXGBE_MDIO_AUTO_NEG_VENDOR_STAT,
3197 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
3198 &speed);
3199 if (status != IXGBE_SUCCESS)
3200 return status;
3201
3202 /* If link is still down - no setup is required so return */
3203 status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up);
3204 if (status != IXGBE_SUCCESS)
3205 return status;
3206 if (!link_up)
3207 return IXGBE_SUCCESS;
3208
3209 /* clear everything but the speed and duplex bits */
3210 speed &= IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_MASK;
3211
3212 switch (speed) {
3213 case IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_FULL:
3214 force_speed = IXGBE_LINK_SPEED_10GB_FULL;
3215 break;
3216 case IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB_FULL:
3217 force_speed = IXGBE_LINK_SPEED_1GB_FULL;
3218 break;
3219 default:
3220 /* Internal PHY does not support anything else */
3221 return IXGBE_ERR_INVALID_LINK_SETTINGS;
3222 }
3223
3224 return ixgbe_setup_ixfi_x550em(hw, &force_speed);
3225 } else {
3226 speed = IXGBE_LINK_SPEED_10GB_FULL |
3227 IXGBE_LINK_SPEED_1GB_FULL;
3228 return ixgbe_setup_kr_speed_x550em(hw, speed);
3229 }
3230 }
3231
3232 /**
3233 * ixgbe_setup_phy_loopback_x550em - Configure the KR PHY for loopback.
3234 * @hw: pointer to hardware structure
3235 *
3236 * Configures the integrated KR PHY to use internal loopback mode.
3237 **/
3238 s32 ixgbe_setup_phy_loopback_x550em(struct ixgbe_hw *hw)
3239 {
3240 s32 status;
3241 u32 reg_val;
3242
3243 /* Disable AN and force speed to 10G Serial. */
3244 status = hw->mac.ops.read_iosf_sb_reg(hw,
3245 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
3246 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3247 if (status != IXGBE_SUCCESS)
3248 return status;
3249 reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
3250 reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
3251 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G;
3252 status = hw->mac.ops.write_iosf_sb_reg(hw,
3253 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
3254 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3255 if (status != IXGBE_SUCCESS)
3256 return status;
3257
3258 /* Set near-end loopback clocks. */
3259 status = hw->mac.ops.read_iosf_sb_reg(hw,
3260 IXGBE_KRM_PORT_CAR_GEN_CTRL(hw->bus.lan_id),
3261 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3262 if (status != IXGBE_SUCCESS)
3263 return status;
3264 reg_val |= IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_32B;
3265 reg_val |= IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_KRPCS;
3266 status = hw->mac.ops.write_iosf_sb_reg(hw,
3267 IXGBE_KRM_PORT_CAR_GEN_CTRL(hw->bus.lan_id),
3268 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3269 if (status != IXGBE_SUCCESS)
3270 return status;
3271
3272 /* Set loopback enable. */
3273 status = hw->mac.ops.read_iosf_sb_reg(hw,
3274 IXGBE_KRM_PMD_DFX_BURNIN(hw->bus.lan_id),
3275 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3276 if (status != IXGBE_SUCCESS)
3277 return status;
3278 reg_val |= IXGBE_KRM_PMD_DFX_BURNIN_TX_RX_KR_LB_MASK;
3279 status = hw->mac.ops.write_iosf_sb_reg(hw,
3280 IXGBE_KRM_PMD_DFX_BURNIN(hw->bus.lan_id),
3281 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3282 if (status != IXGBE_SUCCESS)
3283 return status;
3284
3285 /* Training bypass. */
3286 status = hw->mac.ops.read_iosf_sb_reg(hw,
3287 IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
3288 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3289 if (status != IXGBE_SUCCESS)
3290 return status;
3291 reg_val |= IXGBE_KRM_RX_TRN_LINKUP_CTRL_PROTOCOL_BYPASS;
3292 status = hw->mac.ops.write_iosf_sb_reg(hw,
3293 IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
3294 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3295
3296 return status;
3297 }
3298
3299 /**
3300 * ixgbe_read_ee_hostif_X550 - Read EEPROM word using a host interface command
3301 * assuming that the semaphore is already obtained.
3302 * @hw: pointer to hardware structure
3303 * @offset: offset of word in the EEPROM to read
3304 * @data: word read from the EEPROM
3305 *
3306 * Reads a 16 bit word from the EEPROM using the hostif.
3307 **/
3308 s32 ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 *data)
3309 {
3310 const u32 mask = IXGBE_GSSR_SW_MNG_SM | IXGBE_GSSR_EEP_SM;
3311 struct ixgbe_hic_read_shadow_ram buffer;
3312 s32 status;
3313
3314 DEBUGFUNC("ixgbe_read_ee_hostif_X550");
3315 buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD;
3316 buffer.hdr.req.buf_lenh = 0;
3317 buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN;
3318 buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
3319
3320 /* convert offset from words to bytes */
3321 buffer.address = IXGBE_CPU_TO_BE32(offset * 2);
3322 /* one word */
3323 buffer.length = IXGBE_CPU_TO_BE16(sizeof(u16));
3324 buffer.pad2 = 0;
3325 buffer.pad3 = 0;
3326
3327 status = hw->mac.ops.acquire_swfw_sync(hw, mask);
3328 if (status)
3329 return status;
3330
3331 status = ixgbe_hic_unlocked(hw, (u32 *)&buffer, sizeof(buffer),
3332 IXGBE_HI_COMMAND_TIMEOUT);
3333 if (!status) {
3334 *data = (u16)IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG,
3335 FW_NVM_DATA_OFFSET);
3336 }
3337
3338 hw->mac.ops.release_swfw_sync(hw, mask);
3339 return status;
3340 }
3341
3342 /**
3343 * ixgbe_read_ee_hostif_buffer_X550- Read EEPROM word(s) using hostif
3344 * @hw: pointer to hardware structure
3345 * @offset: offset of word in the EEPROM to read
3346 * @words: number of words
3347 * @data: word(s) read from the EEPROM
3348 *
3349 * Reads a 16 bit word(s) from the EEPROM using the hostif.
3350 **/
3351 s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
3352 u16 offset, u16 words, u16 *data)
3353 {
3354 const u32 mask = IXGBE_GSSR_SW_MNG_SM | IXGBE_GSSR_EEP_SM;
3355 struct ixgbe_hic_read_shadow_ram buffer;
3356 u32 current_word = 0;
3357 u16 words_to_read;
3358 s32 status;
3359 u32 i;
3360
3361 DEBUGFUNC("ixgbe_read_ee_hostif_buffer_X550");
3362
3363 /* Take semaphore for the entire operation. */
3364 status = hw->mac.ops.acquire_swfw_sync(hw, mask);
3365 if (status) {
3366 DEBUGOUT("EEPROM read buffer - semaphore failed\n");
3367 return status;
3368 }
3369
3370 while (words) {
3371 if (words > FW_MAX_READ_BUFFER_SIZE / 2)
3372 words_to_read = FW_MAX_READ_BUFFER_SIZE / 2;
3373 else
3374 words_to_read = words;
3375
3376 buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD;
3377 buffer.hdr.req.buf_lenh = 0;
3378 buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN;
3379 buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
3380
3381 /* convert offset from words to bytes */
3382 buffer.address = IXGBE_CPU_TO_BE32((offset + current_word) * 2);
3383 buffer.length = IXGBE_CPU_TO_BE16(words_to_read * 2);
3384 buffer.pad2 = 0;
3385 buffer.pad3 = 0;
3386
3387 status = ixgbe_hic_unlocked(hw, (u32 *)&buffer, sizeof(buffer),
3388 IXGBE_HI_COMMAND_TIMEOUT);
3389
3390 if (status) {
3391 DEBUGOUT("Host interface command failed\n");
3392 goto out;
3393 }
3394
3395 for (i = 0; i < words_to_read; i++) {
3396 u32 reg = IXGBE_FLEX_MNG + (FW_NVM_DATA_OFFSET << 2) +
3397 2 * i;
3398 u32 value = IXGBE_READ_REG(hw, reg);
3399
3400 data[current_word] = (u16)(value & 0xffff);
3401 current_word++;
3402 i++;
3403 if (i < words_to_read) {
3404 value >>= 16;
3405 data[current_word] = (u16)(value & 0xffff);
3406 current_word++;
3407 }
3408 }
3409 words -= words_to_read;
3410 }
3411
3412 out:
3413 hw->mac.ops.release_swfw_sync(hw, mask);
3414 return status;
3415 }
3416
3417 /**
3418 * ixgbe_write_ee_hostif_X550 - Write EEPROM word using hostif
3419 * @hw: pointer to hardware structure
3420 * @offset: offset of word in the EEPROM to write
3421 * @data: word write to the EEPROM
3422 *
3423 * Write a 16 bit word to the EEPROM using the hostif.
3424 **/
3425 s32 ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset,
3426 u16 data)
3427 {
3428 s32 status;
3429 struct ixgbe_hic_write_shadow_ram buffer;
3430
3431 DEBUGFUNC("ixgbe_write_ee_hostif_data_X550");
3432
3433 buffer.hdr.req.cmd = FW_WRITE_SHADOW_RAM_CMD;
3434 buffer.hdr.req.buf_lenh = 0;
3435 buffer.hdr.req.buf_lenl = FW_WRITE_SHADOW_RAM_LEN;
3436 buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
3437
3438 /* one word */
3439 buffer.length = IXGBE_CPU_TO_BE16(sizeof(u16));
3440 buffer.data = data;
3441 buffer.address = IXGBE_CPU_TO_BE32(offset * 2);
3442
3443 status = ixgbe_host_interface_command(hw, (u32 *)&buffer,
3444 sizeof(buffer),
3445 IXGBE_HI_COMMAND_TIMEOUT, FALSE);
3446
3447 return status;
3448 }
3449
3450 /**
3451 * ixgbe_write_ee_hostif_X550 - Write EEPROM word using hostif
3452 * @hw: pointer to hardware structure
3453 * @offset: offset of word in the EEPROM to write
3454 * @data: word write to the EEPROM
3455 *
3456 * Write a 16 bit word to the EEPROM using the hostif.
3457 **/
3458 s32 ixgbe_write_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset,
3459 u16 data)
3460 {
3461 s32 status = IXGBE_SUCCESS;
3462
3463 DEBUGFUNC("ixgbe_write_ee_hostif_X550");
3464
3465 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
3466 IXGBE_SUCCESS) {
3467 status = ixgbe_write_ee_hostif_data_X550(hw, offset, data);
3468 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
3469 } else {
3470 DEBUGOUT("write ee hostif failed to get semaphore");
3471 status = IXGBE_ERR_SWFW_SYNC;
3472 }
3473
3474 return status;
3475 }
3476
3477 /**
3478 * ixgbe_write_ee_hostif_buffer_X550 - Write EEPROM word(s) using hostif
3479 * @hw: pointer to hardware structure
3480 * @offset: offset of word in the EEPROM to write
3481 * @words: number of words
3482 * @data: word(s) write to the EEPROM
3483 *
3484 * Write a 16 bit word(s) to the EEPROM using the hostif.
3485 **/
3486 s32 ixgbe_write_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
3487 u16 offset, u16 words, u16 *data)
3488 {
3489 s32 status = IXGBE_SUCCESS;
3490 u32 i = 0;
3491
3492 DEBUGFUNC("ixgbe_write_ee_hostif_buffer_X550");
3493
3494 /* Take semaphore for the entire operation. */
3495 status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
3496 if (status != IXGBE_SUCCESS) {
3497 DEBUGOUT("EEPROM write buffer - semaphore failed\n");
3498 goto out;
3499 }
3500
3501 for (i = 0; i < words; i++) {
3502 status = ixgbe_write_ee_hostif_data_X550(hw, offset + i,
3503 data[i]);
3504
3505 if (status != IXGBE_SUCCESS) {
3506 DEBUGOUT("Eeprom buffered write failed\n");
3507 break;
3508 }
3509 }
3510
3511 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
3512 out:
3513
3514 return status;
3515 }
3516
3517 /**
3518 * ixgbe_checksum_ptr_x550 - Checksum one pointer region
3519 * @hw: pointer to hardware structure
3520 * @ptr: pointer offset in eeprom
3521 * @size: size of section pointed by ptr, if 0 first word will be used as size
3522 * @csum: address of checksum to update
3523 * @buffer: pointer to buffer containing calculated checksum
3524 * @buffer_size: size of buffer
3525 *
3526 * Returns error status for any failure
3527 */
3528 static s32 ixgbe_checksum_ptr_x550(struct ixgbe_hw *hw, u16 ptr,
3529 u16 size, u16 *csum, u16 *buffer,
3530 u32 buffer_size)
3531 {
3532 u16 buf[256];
3533 s32 status;
3534 u16 length, bufsz, i, start;
3535 u16 *local_buffer;
3536
3537 bufsz = sizeof(buf) / sizeof(buf[0]);
3538
3539 /* Read a chunk at the pointer location */
3540 if (!buffer) {
3541 status = ixgbe_read_ee_hostif_buffer_X550(hw, ptr, bufsz, buf);
3542 if (status) {
3543 DEBUGOUT("Failed to read EEPROM image\n");
3544 return status;
3545 }
3546 local_buffer = buf;
3547 } else {
3548 if (buffer_size < ptr)
3549 return IXGBE_ERR_PARAM;
3550 local_buffer = &buffer[ptr];
3551 }
3552
3553 if (size) {
3554 start = 0;
3555 length = size;
3556 } else {
3557 start = 1;
3558 length = local_buffer[0];
3559
3560 /* Skip pointer section if length is invalid. */
3561 if (length == 0xFFFF || length == 0 ||
3562 (ptr + length) >= hw->eeprom.word_size)
3563 return IXGBE_SUCCESS;
3564 }
3565
3566 if (buffer && ((u32)start + (u32)length > buffer_size))
3567 return IXGBE_ERR_PARAM;
3568
3569 for (i = start; length; i++, length--) {
3570 if (i == bufsz && !buffer) {
3571 ptr += bufsz;
3572 i = 0;
3573 if (length < bufsz)
3574 bufsz = length;
3575
3576 /* Read a chunk at the pointer location */
3577 status = ixgbe_read_ee_hostif_buffer_X550(hw, ptr,
3578 bufsz, buf);
3579 if (status) {
3580 DEBUGOUT("Failed to read EEPROM image\n");
3581 return status;
3582 }
3583 }
3584 *csum += local_buffer[i];
3585 }
3586 return IXGBE_SUCCESS;
3587 }
3588
3589 /**
3590 * ixgbe_calc_checksum_X550 - Calculates and returns the checksum
3591 * @hw: pointer to hardware structure
3592 * @buffer: pointer to buffer containing calculated checksum
3593 * @buffer_size: size of buffer
3594 *
3595 * Returns a negative error code on error, or the 16-bit checksum
3596 **/
3597 s32 ixgbe_calc_checksum_X550(struct ixgbe_hw *hw, u16 *buffer, u32 buffer_size)
3598 {
3599 u16 eeprom_ptrs[IXGBE_EEPROM_LAST_WORD + 1];
3600 u16 *local_buffer;
3601 s32 status;
3602 u16 checksum = 0;
3603 u16 pointer, i, size;
3604
3605 DEBUGFUNC("ixgbe_calc_eeprom_checksum_X550");
3606
3607 hw->eeprom.ops.init_params(hw);
3608
3609 if (!buffer) {
3610 /* Read pointer area */
3611 status = ixgbe_read_ee_hostif_buffer_X550(hw, 0,
3612 IXGBE_EEPROM_LAST_WORD + 1,
3613 eeprom_ptrs);
3614 if (status) {
3615 DEBUGOUT("Failed to read EEPROM image\n");
3616 return status;
3617 }
3618 local_buffer = eeprom_ptrs;
3619 } else {
3620 if (buffer_size < IXGBE_EEPROM_LAST_WORD)
3621 return IXGBE_ERR_PARAM;
3622 local_buffer = buffer;
3623 }
3624
3625 /*
3626 * For X550 hardware include 0x0-0x41 in the checksum, skip the
3627 * checksum word itself
3628 */
3629 for (i = 0; i <= IXGBE_EEPROM_LAST_WORD; i++)
3630 if (i != IXGBE_EEPROM_CHECKSUM)
3631 checksum += local_buffer[i];
3632
3633 /*
3634 * Include all data from pointers 0x3, 0x6-0xE. This excludes the
3635 * FW, PHY module, and PCIe Expansion/Option ROM pointers.
3636 */
3637 for (i = IXGBE_PCIE_ANALOG_PTR_X550; i < IXGBE_FW_PTR; i++) {
3638 if (i == IXGBE_PHY_PTR || i == IXGBE_OPTION_ROM_PTR)
3639 continue;
3640
3641 pointer = local_buffer[i];
3642
3643 /* Skip pointer section if the pointer is invalid. */
3644 if (pointer == 0xFFFF || pointer == 0 ||
3645 pointer >= hw->eeprom.word_size)
3646 continue;
3647
3648 switch (i) {
3649 case IXGBE_PCIE_GENERAL_PTR:
3650 size = IXGBE_IXGBE_PCIE_GENERAL_SIZE;
3651 break;
3652 case IXGBE_PCIE_CONFIG0_PTR:
3653 case IXGBE_PCIE_CONFIG1_PTR:
3654 size = IXGBE_PCIE_CONFIG_SIZE;
3655 break;
3656 default:
3657 size = 0;
3658 break;
3659 }
3660
3661 status = ixgbe_checksum_ptr_x550(hw, pointer, size, &checksum,
3662 buffer, buffer_size);
3663 if (status)
3664 return status;
3665 }
3666
3667 checksum = (u16)IXGBE_EEPROM_SUM - checksum;
3668
3669 return (s32)checksum;
3670 }
3671
3672 /**
3673 * ixgbe_calc_eeprom_checksum_X550 - Calculates and returns the checksum
3674 * @hw: pointer to hardware structure
3675 *
3676 * Returns a negative error code on error, or the 16-bit checksum
3677 **/
3678 s32 ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw *hw)
3679 {
3680 return ixgbe_calc_checksum_X550(hw, NULL, 0);
3681 }
3682
3683 /**
3684 * ixgbe_validate_eeprom_checksum_X550 - Validate EEPROM checksum
3685 * @hw: pointer to hardware structure
3686 * @checksum_val: calculated checksum
3687 *
3688 * Performs checksum calculation and validates the EEPROM checksum. If the
3689 * caller does not need checksum_val, the value can be NULL.
3690 **/
3691 s32 ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw *hw, u16 *checksum_val)
3692 {
3693 s32 status;
3694 u16 checksum;
3695 u16 read_checksum = 0;
3696
3697 DEBUGFUNC("ixgbe_validate_eeprom_checksum_X550");
3698
3699 /* Read the first word from the EEPROM. If this times out or fails, do
3700 * not continue or we could be in for a very long wait while every
3701 * EEPROM read fails
3702 */
3703 status = hw->eeprom.ops.read(hw, 0, &checksum);
3704 if (status) {
3705 DEBUGOUT("EEPROM read failed\n");
3706 return status;
3707 }
3708
3709 status = hw->eeprom.ops.calc_checksum(hw);
3710 if (status < 0)
3711 return status;
3712
3713 checksum = (u16)(status & 0xffff);
3714
3715 status = ixgbe_read_ee_hostif_X550(hw, IXGBE_EEPROM_CHECKSUM,
3716 &read_checksum);
3717 if (status)
3718 return status;
3719
3720 /* Verify read checksum from EEPROM is the same as
3721 * calculated checksum
3722 */
3723 if (read_checksum != checksum) {
3724 status = IXGBE_ERR_EEPROM_CHECKSUM;
3725 ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE,
3726 "Invalid EEPROM checksum");
3727 }
3728
3729 /* If the user cares, return the calculated checksum */
3730 if (checksum_val)
3731 *checksum_val = checksum;
3732
3733 return status;
3734 }
3735
3736 /**
3737 * ixgbe_update_eeprom_checksum_X550 - Updates the EEPROM checksum and flash
3738 * @hw: pointer to hardware structure
3739 *
3740 * After writing EEPROM to shadow RAM using EEWR register, software calculates
3741 * checksum and updates the EEPROM and instructs the hardware to update
3742 * the flash.
3743 **/
3744 s32 ixgbe_update_eeprom_checksum_X550(struct ixgbe_hw *hw)
3745 {
3746 s32 status;
3747 u16 checksum = 0;
3748
3749 DEBUGFUNC("ixgbe_update_eeprom_checksum_X550");
3750
3751 /* Read the first word from the EEPROM. If this times out or fails, do
3752 * not continue or we could be in for a very long wait while every
3753 * EEPROM read fails
3754 */
3755 status = ixgbe_read_ee_hostif_X550(hw, 0, &checksum);
3756 if (status) {
3757 DEBUGOUT("EEPROM read failed\n");
3758 return status;
3759 }
3760
3761 status = ixgbe_calc_eeprom_checksum_X550(hw);
3762 if (status < 0)
3763 return status;
3764
3765 checksum = (u16)(status & 0xffff);
3766
3767 status = ixgbe_write_ee_hostif_X550(hw, IXGBE_EEPROM_CHECKSUM,
3768 checksum);
3769 if (status)
3770 return status;
3771
3772 status = ixgbe_update_flash_X550(hw);
3773
3774 return status;
3775 }
3776
3777 /**
3778 * ixgbe_update_flash_X550 - Instruct HW to copy EEPROM to Flash device
3779 * @hw: pointer to hardware structure
3780 *
3781 * Issue a shadow RAM dump to FW to copy EEPROM from shadow RAM to the flash.
3782 **/
3783 s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw)
3784 {
3785 s32 status = IXGBE_SUCCESS;
3786 union ixgbe_hic_hdr2 buffer;
3787
3788 DEBUGFUNC("ixgbe_update_flash_X550");
3789
3790 buffer.req.cmd = FW_SHADOW_RAM_DUMP_CMD;
3791 buffer.req.buf_lenh = 0;
3792 buffer.req.buf_lenl = FW_SHADOW_RAM_DUMP_LEN;
3793 buffer.req.checksum = FW_DEFAULT_CHECKSUM;
3794
3795 status = ixgbe_host_interface_command(hw, (u32 *)&buffer,
3796 sizeof(buffer),
3797 IXGBE_HI_COMMAND_TIMEOUT, FALSE);
3798
3799 return status;
3800 }
3801
3802 /**
3803 * ixgbe_get_supported_physical_layer_X550em - Returns physical layer type
3804 * @hw: pointer to hardware structure
3805 *
3806 * Determines physical layer capabilities of the current configuration.
3807 **/
3808 u64 ixgbe_get_supported_physical_layer_X550em(struct ixgbe_hw *hw)
3809 {
3810 u64 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
3811 u16 ext_ability = 0;
3812
3813 DEBUGFUNC("ixgbe_get_supported_physical_layer_X550em");
3814
3815 hw->phy.ops.identify(hw);
3816
3817 switch (hw->phy.type) {
3818 case ixgbe_phy_x550em_kr:
3819 if (hw->mac.type == ixgbe_mac_X550EM_a) {
3820 if (hw->phy.nw_mng_if_sel &
3821 IXGBE_NW_MNG_IF_SEL_PHY_SPEED_2_5G) {
3822 physical_layer =
3823 IXGBE_PHYSICAL_LAYER_2500BASE_KX;
3824 break;
3825 } else if (hw->device_id ==
3826 IXGBE_DEV_ID_X550EM_A_KR_L) {
3827 physical_layer =
3828 IXGBE_PHYSICAL_LAYER_1000BASE_KX;
3829 break;
3830 }
3831 }
3832 /* fall through */
3833 case ixgbe_phy_x550em_xfi:
3834 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR |
3835 IXGBE_PHYSICAL_LAYER_1000BASE_KX;
3836 break;
3837 case ixgbe_phy_x550em_kx4:
3838 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4 |
3839 IXGBE_PHYSICAL_LAYER_1000BASE_KX;
3840 break;
3841 case ixgbe_phy_x550em_ext_t:
3842 hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
3843 IXGBE_MDIO_PMA_PMD_DEV_TYPE,
3844 &ext_ability);
3845 if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
3846 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
3847 if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
3848 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
3849 break;
3850 case ixgbe_phy_fw:
3851 if (hw->phy.speeds_supported & IXGBE_LINK_SPEED_1GB_FULL)
3852 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
3853 if (hw->phy.speeds_supported & IXGBE_LINK_SPEED_100_FULL)
3854 physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
3855 if (hw->phy.speeds_supported & IXGBE_LINK_SPEED_10_FULL)
3856 physical_layer |= IXGBE_PHYSICAL_LAYER_10BASE_T;
3857 break;
3858 case ixgbe_phy_sgmii:
3859 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX;
3860 break;
3861 case ixgbe_phy_ext_1g_t:
3862 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_T;
3863 break;
3864 default:
3865 break;
3866 }
3867
3868 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
3869 physical_layer = ixgbe_get_supported_phy_sfp_layer_generic(hw);
3870
3871 return physical_layer;
3872 }
3873
3874 /**
3875 * ixgbe_get_bus_info_x550em - Set PCI bus info
3876 * @hw: pointer to hardware structure
3877 *
3878 * Sets bus link width and speed to unknown because X550em is
3879 * not a PCI device.
3880 **/
3881 s32 ixgbe_get_bus_info_X550em(struct ixgbe_hw *hw)
3882 {
3883
3884 DEBUGFUNC("ixgbe_get_bus_info_x550em");
3885
3886 hw->bus.width = ixgbe_bus_width_unknown;
3887 hw->bus.speed = ixgbe_bus_speed_unknown;
3888
3889 hw->mac.ops.set_lan_id(hw);
3890
3891 return IXGBE_SUCCESS;
3892 }
3893
3894 /**
3895 * ixgbe_disable_rx_x550 - Disable RX unit
3896 * @hw: pointer to hardware structure
3897 *
3898 * Enables the Rx DMA unit for x550
3899 **/
3900 void ixgbe_disable_rx_x550(struct ixgbe_hw *hw)
3901 {
3902 u32 rxctrl, pfdtxgswc;
3903 s32 status;
3904 struct ixgbe_hic_disable_rxen fw_cmd;
3905
3906 DEBUGFUNC("ixgbe_disable_rx_dma_x550");
3907
3908 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3909 if (rxctrl & IXGBE_RXCTRL_RXEN) {
3910 pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
3911 if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) {
3912 pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN;
3913 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
3914 hw->mac.set_lben = TRUE;
3915 } else {
3916 hw->mac.set_lben = FALSE;
3917 }
3918
3919 fw_cmd.hdr.cmd = FW_DISABLE_RXEN_CMD;
3920 fw_cmd.hdr.buf_len = FW_DISABLE_RXEN_LEN;
3921 fw_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
3922 fw_cmd.port_number = (u8)hw->bus.lan_id;
3923
3924 status = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
3925 sizeof(struct ixgbe_hic_disable_rxen),
3926 IXGBE_HI_COMMAND_TIMEOUT, TRUE);
3927
3928 /* If we fail - disable RX using register write */
3929 if (status) {
3930 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3931 if (rxctrl & IXGBE_RXCTRL_RXEN) {
3932 rxctrl &= ~IXGBE_RXCTRL_RXEN;
3933 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl);
3934 }
3935 }
3936 }
3937 }
3938
3939 /**
3940 * ixgbe_enter_lplu_x550em - Transition to low power states
3941 * @hw: pointer to hardware structure
3942 *
3943 * Configures Low Power Link Up on transition to low power states
3944 * (from D0 to non-D0). Link is required to enter LPLU so avoid resetting the
3945 * X557 PHY immediately prior to entering LPLU.
3946 **/
3947 s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw)
3948 {
3949 u16 an_10g_cntl_reg, autoneg_reg, speed;
3950 s32 status;
3951 ixgbe_link_speed lcd_speed;
3952 u32 save_autoneg;
3953 bool link_up;
3954
3955 /* SW LPLU not required on later HW revisions. */
3956 if ((hw->mac.type == ixgbe_mac_X550EM_x) &&
3957 (IXGBE_FUSES0_REV_MASK &
3958 IXGBE_READ_REG(hw, IXGBE_FUSES0_GROUP(0))))
3959 return IXGBE_SUCCESS;
3960
3961 /* If blocked by MNG FW, then don't restart AN */
3962 if (ixgbe_check_reset_blocked(hw))
3963 return IXGBE_SUCCESS;
3964
3965 status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up);
3966 if (status != IXGBE_SUCCESS)
3967 return status;
3968
3969 status = ixgbe_read_eeprom(hw, NVM_INIT_CTRL_3, &hw->eeprom.ctrl_word_3);
3970
3971 if (status != IXGBE_SUCCESS)
3972 return status;
3973
3974 /* If link is down, LPLU disabled in NVM, WoL disabled, or manageability
3975 * disabled, then force link down by entering low power mode.
3976 */
3977 if (!link_up || !(hw->eeprom.ctrl_word_3 & NVM_INIT_CTRL_3_LPLU) ||
3978 !(hw->wol_enabled || ixgbe_mng_present(hw)))
3979 return ixgbe_set_copper_phy_power(hw, FALSE);
3980
3981 /* Determine LCD */
3982 status = ixgbe_get_lcd_t_x550em(hw, &lcd_speed);
3983
3984 if (status != IXGBE_SUCCESS)
3985 return status;
3986
3987 /* If no valid LCD link speed, then force link down and exit. */
3988 if (lcd_speed == IXGBE_LINK_SPEED_UNKNOWN)
3989 return ixgbe_set_copper_phy_power(hw, FALSE);
3990
3991 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_STAT,
3992 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
3993 &speed);
3994
3995 if (status != IXGBE_SUCCESS)
3996 return status;
3997
3998 /* If no link now, speed is invalid so take link down */
3999 status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up);
4000 if (status != IXGBE_SUCCESS)
4001 return ixgbe_set_copper_phy_power(hw, FALSE);
4002
4003 /* clear everything but the speed bits */
4004 speed &= IXGBE_MDIO_AUTO_NEG_VEN_STAT_SPEED_MASK;
4005
4006 /* If current speed is already LCD, then exit. */
4007 if (((speed == IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB) &&
4008 (lcd_speed == IXGBE_LINK_SPEED_1GB_FULL)) ||
4009 ((speed == IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB) &&
4010 (lcd_speed == IXGBE_LINK_SPEED_10GB_FULL)))
4011 return status;
4012
4013 /* Clear AN completed indication */
4014 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM,
4015 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
4016 &autoneg_reg);
4017
4018 if (status != IXGBE_SUCCESS)
4019 return status;
4020
4021 status = hw->phy.ops.read_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG,
4022 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
4023 &an_10g_cntl_reg);
4024
4025 if (status != IXGBE_SUCCESS)
4026 return status;
4027
4028 status = hw->phy.ops.read_reg(hw,
4029 IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
4030 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
4031 &autoneg_reg);
4032
4033 if (status != IXGBE_SUCCESS)
4034 return status;
4035
4036 save_autoneg = hw->phy.autoneg_advertised;
4037
4038 /* Setup link at least common link speed */
4039 status = hw->mac.ops.setup_link(hw, lcd_speed, FALSE);
4040
4041 /* restore autoneg from before setting lplu speed */
4042 hw->phy.autoneg_advertised = save_autoneg;
4043
4044 return status;
4045 }
4046
4047 /**
4048 * ixgbe_get_lcd_x550em - Determine lowest common denominator
4049 * @hw: pointer to hardware structure
4050 * @lcd_speed: pointer to lowest common link speed
4051 *
4052 * Determine lowest common link speed with link partner.
4053 **/
4054 s32 ixgbe_get_lcd_t_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *lcd_speed)
4055 {
4056 u16 an_lp_status;
4057 s32 status;
4058 u16 word = hw->eeprom.ctrl_word_3;
4059
4060 *lcd_speed = IXGBE_LINK_SPEED_UNKNOWN;
4061
4062 status = hw->phy.ops.read_reg(hw, IXGBE_AUTO_NEG_LP_STATUS,
4063 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
4064 &an_lp_status);
4065
4066 if (status != IXGBE_SUCCESS)
4067 return status;
4068
4069 /* If link partner advertised 1G, return 1G */
4070 if (an_lp_status & IXGBE_AUTO_NEG_LP_1000BASE_CAP) {
4071 *lcd_speed = IXGBE_LINK_SPEED_1GB_FULL;
4072 return status;
4073 }
4074
4075 /* If 10G disabled for LPLU via NVM D10GMP, then return no valid LCD */
4076 if ((hw->bus.lan_id && (word & NVM_INIT_CTRL_3_D10GMP_PORT1)) ||
4077 (word & NVM_INIT_CTRL_3_D10GMP_PORT0))
4078 return status;
4079
4080 /* Link partner not capable of lower speeds, return 10G */
4081 *lcd_speed = IXGBE_LINK_SPEED_10GB_FULL;
4082 return status;
4083 }
4084
4085 /**
4086 * ixgbe_setup_fc_X550em - Set up flow control
4087 * @hw: pointer to hardware structure
4088 *
4089 * Called at init time to set up flow control.
4090 **/
4091 s32 ixgbe_setup_fc_X550em(struct ixgbe_hw *hw)
4092 {
4093 s32 ret_val = IXGBE_SUCCESS;
4094 u32 pause, asm_dir, reg_val;
4095
4096 DEBUGFUNC("ixgbe_setup_fc_X550em");
4097
4098 /* Validate the requested mode */
4099 if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
4100 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
4101 "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
4102 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
4103 goto out;
4104 }
4105
4106 /* 10gig parts do not have a word in the EEPROM to determine the
4107 * default flow control setting, so we explicitly set it to full.
4108 */
4109 if (hw->fc.requested_mode == ixgbe_fc_default)
4110 hw->fc.requested_mode = ixgbe_fc_full;
4111
4112 /* Determine PAUSE and ASM_DIR bits. */
4113 switch (hw->fc.requested_mode) {
4114 case ixgbe_fc_none:
4115 pause = 0;
4116 asm_dir = 0;
4117 break;
4118 case ixgbe_fc_tx_pause:
4119 pause = 0;
4120 asm_dir = 1;
4121 break;
4122 case ixgbe_fc_rx_pause:
4123 /* Rx Flow control is enabled and Tx Flow control is
4124 * disabled by software override. Since there really
4125 * isn't a way to advertise that we are capable of RX
4126 * Pause ONLY, we will advertise that we support both
4127 * symmetric and asymmetric Rx PAUSE, as such we fall
4128 * through to the fc_full statement. Later, we will
4129 * disable the adapter's ability to send PAUSE frames.
4130 */
4131 case ixgbe_fc_full:
4132 pause = 1;
4133 asm_dir = 1;
4134 break;
4135 default:
4136 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
4137 "Flow control param set incorrectly\n");
4138 ret_val = IXGBE_ERR_CONFIG;
4139 goto out;
4140 }
4141
4142 switch (hw->device_id) {
4143 case IXGBE_DEV_ID_X550EM_X_KR:
4144 case IXGBE_DEV_ID_X550EM_A_KR:
4145 case IXGBE_DEV_ID_X550EM_A_KR_L:
4146 ret_val = hw->mac.ops.read_iosf_sb_reg(hw,
4147 IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
4148 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
4149 if (ret_val != IXGBE_SUCCESS)
4150 goto out;
4151 reg_val &= ~(IXGBE_KRM_AN_CNTL_1_SYM_PAUSE |
4152 IXGBE_KRM_AN_CNTL_1_ASM_PAUSE);
4153 if (pause)
4154 reg_val |= IXGBE_KRM_AN_CNTL_1_SYM_PAUSE;
4155 if (asm_dir)
4156 reg_val |= IXGBE_KRM_AN_CNTL_1_ASM_PAUSE;
4157 ret_val = hw->mac.ops.write_iosf_sb_reg(hw,
4158 IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
4159 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
4160
4161 /* This device does not fully support AN. */
4162 hw->fc.disable_fc_autoneg = TRUE;
4163 break;
4164 case IXGBE_DEV_ID_X550EM_X_XFI:
4165 hw->fc.disable_fc_autoneg = TRUE;
4166 break;
4167 default:
4168 break;
4169 }
4170
4171 out:
4172 return ret_val;
4173 }
4174
4175 /**
4176 * ixgbe_fc_autoneg_backplane_x550em_a - Enable flow control IEEE clause 37
4177 * @hw: pointer to hardware structure
4178 *
4179 * Enable flow control according to IEEE clause 37.
4180 **/
4181 void ixgbe_fc_autoneg_backplane_x550em_a(struct ixgbe_hw *hw)
4182 {
4183 u32 link_s1, lp_an_page_low, an_cntl_1;
4184 s32 status = IXGBE_ERR_FC_NOT_NEGOTIATED;
4185 ixgbe_link_speed speed;
4186 bool link_up;
4187
4188 /* AN should have completed when the cable was plugged in.
4189 * Look for reasons to bail out. Bail out if:
4190 * - FC autoneg is disabled, or if
4191 * - link is not up.
4192 */
4193 if (hw->fc.disable_fc_autoneg) {
4194 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
4195 "Flow control autoneg is disabled");
4196 goto out;
4197 }
4198
4199 hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
4200 if (!link_up) {
4201 ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "The link is down");
4202 goto out;
4203 }
4204
4205 /* Check at auto-negotiation has completed */
4206 status = hw->mac.ops.read_iosf_sb_reg(hw,
4207 IXGBE_KRM_LINK_S1(hw->bus.lan_id),
4208 IXGBE_SB_IOSF_TARGET_KR_PHY, &link_s1);
4209
4210 if (status != IXGBE_SUCCESS ||
4211 (link_s1 & IXGBE_KRM_LINK_S1_MAC_AN_COMPLETE) == 0) {
4212 DEBUGOUT("Auto-Negotiation did not complete\n");
4213 status = IXGBE_ERR_FC_NOT_NEGOTIATED;
4214 goto out;
4215 }
4216
4217 /* Read the 10g AN autoc and LP ability registers and resolve
4218 * local flow control settings accordingly
4219 */
4220 status = hw->mac.ops.read_iosf_sb_reg(hw,
4221 IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
4222 IXGBE_SB_IOSF_TARGET_KR_PHY, &an_cntl_1);
4223
4224 if (status != IXGBE_SUCCESS) {
4225 DEBUGOUT("Auto-Negotiation did not complete\n");
4226 goto out;
4227 }
4228
4229 status = hw->mac.ops.read_iosf_sb_reg(hw,
4230 IXGBE_KRM_LP_BASE_PAGE_HIGH(hw->bus.lan_id),
4231 IXGBE_SB_IOSF_TARGET_KR_PHY, &lp_an_page_low);
4232
4233 if (status != IXGBE_SUCCESS) {
4234 DEBUGOUT("Auto-Negotiation did not complete\n");
4235 goto out;
4236 }
4237
4238 status = ixgbe_negotiate_fc(hw, an_cntl_1, lp_an_page_low,
4239 IXGBE_KRM_AN_CNTL_1_SYM_PAUSE,
4240 IXGBE_KRM_AN_CNTL_1_ASM_PAUSE,
4241 IXGBE_KRM_LP_BASE_PAGE_HIGH_SYM_PAUSE,
4242 IXGBE_KRM_LP_BASE_PAGE_HIGH_ASM_PAUSE);
4243
4244 out:
4245 if (status == IXGBE_SUCCESS) {
4246 hw->fc.fc_was_autonegged = TRUE;
4247 } else {
4248 hw->fc.fc_was_autonegged = FALSE;
4249 hw->fc.current_mode = hw->fc.requested_mode;
4250 }
4251 }
4252
4253 /**
4254 * ixgbe_fc_autoneg_fiber_x550em_a - passthrough FC settings
4255 * @hw: pointer to hardware structure
4256 *
4257 **/
4258 void ixgbe_fc_autoneg_fiber_x550em_a(struct ixgbe_hw *hw)
4259 {
4260 hw->fc.fc_was_autonegged = FALSE;
4261 hw->fc.current_mode = hw->fc.requested_mode;
4262 }
4263
4264 /**
4265 * ixgbe_fc_autoneg_sgmii_x550em_a - Enable flow control IEEE clause 37
4266 * @hw: pointer to hardware structure
4267 *
4268 * Enable flow control according to IEEE clause 37.
4269 **/
4270 void ixgbe_fc_autoneg_sgmii_x550em_a(struct ixgbe_hw *hw)
4271 {
4272 s32 status = IXGBE_ERR_FC_NOT_NEGOTIATED;
4273 u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 };
4274 ixgbe_link_speed speed;
4275 bool link_up;
4276
4277 /* AN should have completed when the cable was plugged in.
4278 * Look for reasons to bail out. Bail out if:
4279 * - FC autoneg is disabled, or if
4280 * - link is not up.
4281 */
4282 if (hw->fc.disable_fc_autoneg) {
4283 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
4284 "Flow control autoneg is disabled");
4285 goto out;
4286 }
4287
4288 hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
4289 if (!link_up) {
4290 ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "The link is down");
4291 goto out;
4292 }
4293
4294 /* Check if auto-negotiation has completed */
4295 status = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_LINK_INFO, &info);
4296 if (status != IXGBE_SUCCESS ||
4297 !(info[0] & FW_PHY_ACT_GET_LINK_INFO_AN_COMPLETE)) {
4298 DEBUGOUT("Auto-Negotiation did not complete\n");
4299 status = IXGBE_ERR_FC_NOT_NEGOTIATED;
4300 goto out;
4301 }
4302
4303 /* Negotiate the flow control */
4304 status = ixgbe_negotiate_fc(hw, info[0], info[0],
4305 FW_PHY_ACT_GET_LINK_INFO_FC_RX,
4306 FW_PHY_ACT_GET_LINK_INFO_FC_TX,
4307 FW_PHY_ACT_GET_LINK_INFO_LP_FC_RX,
4308 FW_PHY_ACT_GET_LINK_INFO_LP_FC_TX);
4309
4310 out:
4311 if (status == IXGBE_SUCCESS) {
4312 hw->fc.fc_was_autonegged = TRUE;
4313 } else {
4314 hw->fc.fc_was_autonegged = FALSE;
4315 hw->fc.current_mode = hw->fc.requested_mode;
4316 }
4317 }
4318
4319 /**
4320 * ixgbe_setup_fc_backplane_x550em_a - Set up flow control
4321 * @hw: pointer to hardware structure
4322 *
4323 * Called at init time to set up flow control.
4324 **/
4325 s32 ixgbe_setup_fc_backplane_x550em_a(struct ixgbe_hw *hw)
4326 {
4327 s32 status = IXGBE_SUCCESS;
4328 u32 an_cntl = 0;
4329
4330 DEBUGFUNC("ixgbe_setup_fc_backplane_x550em_a");
4331
4332 /* Validate the requested mode */
4333 if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
4334 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
4335 "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
4336 return IXGBE_ERR_INVALID_LINK_SETTINGS;
4337 }
4338
4339 if (hw->fc.requested_mode == ixgbe_fc_default)
4340 hw->fc.requested_mode = ixgbe_fc_full;
4341
4342 /* Set up the 1G and 10G flow control advertisement registers so the
4343 * HW will be able to do FC autoneg once the cable is plugged in. If
4344 * we link at 10G, the 1G advertisement is harmless and vice versa.
4345 */
4346 status = hw->mac.ops.read_iosf_sb_reg(hw,
4347 IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
4348 IXGBE_SB_IOSF_TARGET_KR_PHY, &an_cntl);
4349
4350 if (status != IXGBE_SUCCESS) {
4351 DEBUGOUT("Auto-Negotiation did not complete\n");
4352 return status;
4353 }
4354
4355 /* The possible values of fc.requested_mode are:
4356 * 0: Flow control is completely disabled
4357 * 1: Rx flow control is enabled (we can receive pause frames,
4358 * but not send pause frames).
4359 * 2: Tx flow control is enabled (we can send pause frames but
4360 * we do not support receiving pause frames).
4361 * 3: Both Rx and Tx flow control (symmetric) are enabled.
4362 * other: Invalid.
4363 */
4364 switch (hw->fc.requested_mode) {
4365 case ixgbe_fc_none:
4366 /* Flow control completely disabled by software override. */
4367 an_cntl &= ~(IXGBE_KRM_AN_CNTL_1_SYM_PAUSE |
4368 IXGBE_KRM_AN_CNTL_1_ASM_PAUSE);
4369 break;
4370 case ixgbe_fc_tx_pause:
4371 /* Tx Flow control is enabled, and Rx Flow control is
4372 * disabled by software override.
4373 */
4374 an_cntl |= IXGBE_KRM_AN_CNTL_1_ASM_PAUSE;
4375 an_cntl &= ~IXGBE_KRM_AN_CNTL_1_SYM_PAUSE;
4376 break;
4377 case ixgbe_fc_rx_pause:
4378 /* Rx Flow control is enabled and Tx Flow control is
4379 * disabled by software override. Since there really
4380 * isn't a way to advertise that we are capable of RX
4381 * Pause ONLY, we will advertise that we support both
4382 * symmetric and asymmetric Rx PAUSE, as such we fall
4383 * through to the fc_full statement. Later, we will
4384 * disable the adapter's ability to send PAUSE frames.
4385 */
4386 case ixgbe_fc_full:
4387 /* Flow control (both Rx and Tx) is enabled by SW override. */
4388 an_cntl |= IXGBE_KRM_AN_CNTL_1_SYM_PAUSE |
4389 IXGBE_KRM_AN_CNTL_1_ASM_PAUSE;
4390 break;
4391 default:
4392 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
4393 "Flow control param set incorrectly\n");
4394 return IXGBE_ERR_CONFIG;
4395 }
4396
4397 status = hw->mac.ops.write_iosf_sb_reg(hw,
4398 IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
4399 IXGBE_SB_IOSF_TARGET_KR_PHY, an_cntl);
4400
4401 /* Restart auto-negotiation. */
4402 status = ixgbe_restart_an_internal_phy_x550em(hw);
4403
4404 return status;
4405 }
4406
4407 /**
4408 * ixgbe_set_mux - Set mux for port 1 access with CS4227
4409 * @hw: pointer to hardware structure
4410 * @state: set mux if 1, clear if 0
4411 */
4412 static void ixgbe_set_mux(struct ixgbe_hw *hw, u8 state)
4413 {
4414 u32 esdp;
4415
4416 if (!hw->bus.lan_id)
4417 return;
4418 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
4419 if (state)
4420 esdp |= IXGBE_ESDP_SDP1;
4421 else
4422 esdp &= ~IXGBE_ESDP_SDP1;
4423 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
4424 IXGBE_WRITE_FLUSH(hw);
4425 }
4426
4427 /**
4428 * ixgbe_acquire_swfw_sync_X550em - Acquire SWFW semaphore
4429 * @hw: pointer to hardware structure
4430 * @mask: Mask to specify which semaphore to acquire
4431 *
4432 * Acquires the SWFW semaphore and sets the I2C MUX
4433 **/
4434 s32 ixgbe_acquire_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask)
4435 {
4436 s32 status;
4437
4438 DEBUGFUNC("ixgbe_acquire_swfw_sync_X550em");
4439
4440 status = ixgbe_acquire_swfw_sync_X540(hw, mask);
4441 if (status)
4442 return status;
4443
4444 if (mask & IXGBE_GSSR_I2C_MASK)
4445 ixgbe_set_mux(hw, 1);
4446
4447 return IXGBE_SUCCESS;
4448 }
4449
4450 /**
4451 * ixgbe_release_swfw_sync_X550em - Release SWFW semaphore
4452 * @hw: pointer to hardware structure
4453 * @mask: Mask to specify which semaphore to release
4454 *
4455 * Releases the SWFW semaphore and sets the I2C MUX
4456 **/
4457 void ixgbe_release_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask)
4458 {
4459 DEBUGFUNC("ixgbe_release_swfw_sync_X550em");
4460
4461 if (mask & IXGBE_GSSR_I2C_MASK)
4462 ixgbe_set_mux(hw, 0);
4463
4464 ixgbe_release_swfw_sync_X540(hw, mask);
4465 }
4466
4467 /**
4468 * ixgbe_acquire_swfw_sync_X550a - Acquire SWFW semaphore
4469 * @hw: pointer to hardware structure
4470 * @mask: Mask to specify which semaphore to acquire
4471 *
4472 * Acquires the SWFW semaphore and get the shared phy token as needed
4473 */
4474 static s32 ixgbe_acquire_swfw_sync_X550a(struct ixgbe_hw *hw, u32 mask)
4475 {
4476 u32 hmask = mask & ~IXGBE_GSSR_TOKEN_SM;
4477 int retries = FW_PHY_TOKEN_RETRIES;
4478 s32 status = IXGBE_SUCCESS;
4479
4480 DEBUGFUNC("ixgbe_acquire_swfw_sync_X550a");
4481
4482 while (--retries) {
4483 status = IXGBE_SUCCESS;
4484 if (hmask)
4485 status = ixgbe_acquire_swfw_sync_X540(hw, hmask);
4486 if (status) {
4487 DEBUGOUT1("Could not acquire SWFW semaphore, Status = %d\n",
4488 status);
4489 return status;
4490 }
4491 if (!(mask & IXGBE_GSSR_TOKEN_SM))
4492 return IXGBE_SUCCESS;
4493
4494 status = ixgbe_get_phy_token(hw);
4495 if (status == IXGBE_ERR_TOKEN_RETRY)
4496 DEBUGOUT1("Could not acquire PHY token, Status = %d\n",
4497 status);
4498
4499 if (status == IXGBE_SUCCESS)
4500 return IXGBE_SUCCESS;
4501
4502 if (hmask)
4503 ixgbe_release_swfw_sync_X540(hw, hmask);
4504
4505 if (status != IXGBE_ERR_TOKEN_RETRY) {
4506 DEBUGOUT1("Unable to retry acquiring the PHY token, Status = %d\n",
4507 status);
4508 return status;
4509 }
4510 }
4511
4512 DEBUGOUT1("Semaphore acquisition retries failed!: PHY ID = 0x%08X\n",
4513 hw->phy.id);
4514 return status;
4515 }
4516
4517 /**
4518 * ixgbe_release_swfw_sync_X550a - Release SWFW semaphore
4519 * @hw: pointer to hardware structure
4520 * @mask: Mask to specify which semaphore to release
4521 *
4522 * Releases the SWFW semaphore and puts the shared phy token as needed
4523 */
4524 static void ixgbe_release_swfw_sync_X550a(struct ixgbe_hw *hw, u32 mask)
4525 {
4526 u32 hmask = mask & ~IXGBE_GSSR_TOKEN_SM;
4527
4528 DEBUGFUNC("ixgbe_release_swfw_sync_X550a");
4529
4530 if (mask & IXGBE_GSSR_TOKEN_SM)
4531 ixgbe_put_phy_token(hw);
4532
4533 if (hmask)
4534 ixgbe_release_swfw_sync_X540(hw, hmask);
4535 }
4536
4537 /**
4538 * ixgbe_read_phy_reg_x550a - Reads specified PHY register
4539 * @hw: pointer to hardware structure
4540 * @reg_addr: 32 bit address of PHY register to read
4541 * @device_type: 5 bit device type
4542 * @phy_data: Pointer to read data from PHY register
4543 *
4544 * Reads a value from a specified PHY register using the SWFW lock and PHY
4545 * Token. The PHY Token is needed since the MDIO is shared between to MAC
4546 * instances.
4547 **/
4548 s32 ixgbe_read_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
4549 u32 device_type, u16 *phy_data)
4550 {
4551 s32 status;
4552 u32 mask = hw->phy.phy_semaphore_mask | IXGBE_GSSR_TOKEN_SM;
4553
4554 DEBUGFUNC("ixgbe_read_phy_reg_x550a");
4555
4556 if (hw->mac.ops.acquire_swfw_sync(hw, mask))
4557 return IXGBE_ERR_SWFW_SYNC;
4558
4559 status = hw->phy.ops.read_reg_mdi(hw, reg_addr, device_type, phy_data);
4560
4561 hw->mac.ops.release_swfw_sync(hw, mask);
4562
4563 return status;
4564 }
4565
4566 /**
4567 * ixgbe_write_phy_reg_x550a - Writes specified PHY register
4568 * @hw: pointer to hardware structure
4569 * @reg_addr: 32 bit PHY register to write
4570 * @device_type: 5 bit device type
4571 * @phy_data: Data to write to the PHY register
4572 *
4573 * Writes a value to specified PHY register using the SWFW lock and PHY Token.
4574 * The PHY Token is needed since the MDIO is shared between to MAC instances.
4575 **/
4576 s32 ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
4577 u32 device_type, u16 phy_data)
4578 {
4579 s32 status;
4580 u32 mask = hw->phy.phy_semaphore_mask | IXGBE_GSSR_TOKEN_SM;
4581
4582 DEBUGFUNC("ixgbe_write_phy_reg_x550a");
4583
4584 if (hw->mac.ops.acquire_swfw_sync(hw, mask) == IXGBE_SUCCESS) {
4585 status = hw->phy.ops.write_reg_mdi(hw, reg_addr, device_type,
4586 phy_data);
4587 hw->mac.ops.release_swfw_sync(hw, mask);
4588 } else {
4589 status = IXGBE_ERR_SWFW_SYNC;
4590 }
4591
4592 return status;
4593 }
4594
4595 /**
4596 * ixgbe_handle_lasi_ext_t_x550em - Handle external Base T PHY interrupt
4597 * @hw: pointer to hardware structure
4598 *
4599 * Handle external Base T PHY interrupt. If high temperature
4600 * failure alarm then return error, else if link status change
4601 * then setup internal/external PHY link
4602 *
4603 * Return IXGBE_ERR_OVERTEMP if interrupt is high temperature
4604 * failure alarm, else return PHY access status.
4605 */
4606 s32 ixgbe_handle_lasi_ext_t_x550em(struct ixgbe_hw *hw)
4607 {
4608 bool lsc;
4609 u32 status;
4610
4611 status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc);
4612
4613 if (status != IXGBE_SUCCESS)
4614 return status;
4615
4616 if (lsc)
4617 return ixgbe_setup_internal_phy(hw);
4618
4619 return IXGBE_SUCCESS;
4620 }
4621
4622 /**
4623 * ixgbe_setup_mac_link_t_X550em - Sets the auto advertised link speed
4624 * @hw: pointer to hardware structure
4625 * @speed: new link speed
4626 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed
4627 *
4628 * Setup internal/external PHY link speed based on link speed, then set
4629 * external PHY auto advertised link speed.
4630 *
4631 * Returns error status for any failure
4632 **/
4633 s32 ixgbe_setup_mac_link_t_X550em(struct ixgbe_hw *hw,
4634 ixgbe_link_speed speed,
4635 bool autoneg_wait_to_complete)
4636 {
4637 s32 status;
4638 ixgbe_link_speed force_speed;
4639
4640 DEBUGFUNC("ixgbe_setup_mac_link_t_X550em");
4641
4642 /* Setup internal/external PHY link speed to iXFI (10G), unless
4643 * only 1G is auto advertised then setup KX link.
4644 */
4645 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
4646 force_speed = IXGBE_LINK_SPEED_10GB_FULL;
4647 else
4648 force_speed = IXGBE_LINK_SPEED_1GB_FULL;
4649
4650 /* If X552 and internal link mode is XFI, then setup XFI internal link.
4651 */
4652 if (hw->mac.type == ixgbe_mac_X550EM_x &&
4653 !(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) {
4654 status = ixgbe_setup_ixfi_x550em(hw, &force_speed);
4655
4656 if (status != IXGBE_SUCCESS)
4657 return status;
4658 }
4659
4660 return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait_to_complete);
4661 }
4662
4663 /**
4664 * ixgbe_check_link_t_X550em - Determine link and speed status
4665 * @hw: pointer to hardware structure
4666 * @speed: pointer to link speed
4667 * @link_up: TRUE when link is up
4668 * @link_up_wait_to_complete: bool used to wait for link up or not
4669 *
4670 * Check that both the MAC and X557 external PHY have link.
4671 **/
4672 s32 ixgbe_check_link_t_X550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
4673 bool *link_up, bool link_up_wait_to_complete)
4674 {
4675 u32 status;
4676 u16 i, autoneg_status = 0;
4677
4678 if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper)
4679 return IXGBE_ERR_CONFIG;
4680
4681 status = ixgbe_check_mac_link_generic(hw, speed, link_up,
4682 link_up_wait_to_complete);
4683
4684 /* If check link fails or MAC link is not up, then return */
4685 if (status != IXGBE_SUCCESS || !(*link_up))
4686 return status;
4687
4688 /* MAC link is up, so check external PHY link.
4689 * X557 PHY. Link status is latching low, and can only be used to detect
4690 * link drop, and not the current status of the link without performing
4691 * back-to-back reads.
4692 */
4693 for (i = 0; i < 2; i++) {
4694 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
4695 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
4696 &autoneg_status);
4697
4698 if (status != IXGBE_SUCCESS)
4699 return status;
4700 }
4701
4702 /* If external PHY link is not up, then indicate link not up */
4703 if (!(autoneg_status & IXGBE_MDIO_AUTO_NEG_LINK_STATUS))
4704 *link_up = FALSE;
4705
4706 return IXGBE_SUCCESS;
4707 }
4708
4709 /**
4710 * ixgbe_reset_phy_t_X550em - Performs X557 PHY reset and enables LASI
4711 * @hw: pointer to hardware structure
4712 **/
4713 s32 ixgbe_reset_phy_t_X550em(struct ixgbe_hw *hw)
4714 {
4715 s32 status;
4716
4717 status = ixgbe_reset_phy_generic(hw);
4718
4719 if (status != IXGBE_SUCCESS)
4720 return status;
4721
4722 /* Configure Link Status Alarm and Temperature Threshold interrupts */
4723 return ixgbe_enable_lasi_ext_t_x550em(hw);
4724 }
4725
4726 /**
4727 * ixgbe_led_on_t_X550em - Turns on the software controllable LEDs.
4728 * @hw: pointer to hardware structure
4729 * @led_idx: led number to turn on
4730 **/
4731 s32 ixgbe_led_on_t_X550em(struct ixgbe_hw *hw, u32 led_idx)
4732 {
4733 u16 phy_data;
4734
4735 DEBUGFUNC("ixgbe_led_on_t_X550em");
4736
4737 if (led_idx >= IXGBE_X557_MAX_LED_INDEX)
4738 return IXGBE_ERR_PARAM;
4739
4740 /* To turn on the LED, set mode to ON. */
4741 ixgbe_read_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
4742 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &phy_data);
4743 phy_data |= IXGBE_X557_LED_MANUAL_SET_MASK;
4744 ixgbe_write_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
4745 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, phy_data);
4746
4747 /* Some designs have the LEDs wired to the MAC */
4748 return ixgbe_led_on_generic(hw, led_idx);
4749 }
4750
4751 /**
4752 * ixgbe_led_off_t_X550em - Turns off the software controllable LEDs.
4753 * @hw: pointer to hardware structure
4754 * @led_idx: led number to turn off
4755 **/
4756 s32 ixgbe_led_off_t_X550em(struct ixgbe_hw *hw, u32 led_idx)
4757 {
4758 u16 phy_data;
4759
4760 DEBUGFUNC("ixgbe_led_off_t_X550em");
4761
4762 if (led_idx >= IXGBE_X557_MAX_LED_INDEX)
4763 return IXGBE_ERR_PARAM;
4764
4765 /* To turn on the LED, set mode to ON. */
4766 ixgbe_read_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
4767 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &phy_data);
4768 phy_data &= ~IXGBE_X557_LED_MANUAL_SET_MASK;
4769 ixgbe_write_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
4770 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, phy_data);
4771
4772 /* Some designs have the LEDs wired to the MAC */
4773 return ixgbe_led_off_generic(hw, led_idx);
4774 }
4775
4776 /**
4777 * ixgbe_set_fw_drv_ver_x550 - Sends driver version to firmware
4778 * @hw: pointer to the HW structure
4779 * @maj: driver version major number
4780 * @min: driver version minor number
4781 * @build: driver version build number
4782 * @sub: driver version sub build number
4783 * @len: length of driver_ver string
4784 * @driver_ver: driver string
4785 *
4786 * Sends driver version number to firmware through the manageability
4787 * block. On success return IXGBE_SUCCESS
4788 * else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring
4789 * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
4790 **/
4791 s32 ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min,
4792 u8 build, u8 sub, u16 len, const char *driver_ver)
4793 {
4794 struct ixgbe_hic_drv_info2 fw_cmd;
4795 s32 ret_val = IXGBE_SUCCESS;
4796 int i;
4797
4798 DEBUGFUNC("ixgbe_set_fw_drv_ver_x550");
4799
4800 if ((len == 0) || (driver_ver == NULL) ||
4801 (len > sizeof(fw_cmd.driver_string)))
4802 return IXGBE_ERR_INVALID_ARGUMENT;
4803
4804 fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO;
4805 fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN + len;
4806 fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
4807 fw_cmd.port_num = (u8)hw->bus.func;
4808 fw_cmd.ver_maj = maj;
4809 fw_cmd.ver_min = min;
4810 fw_cmd.ver_build = build;
4811 fw_cmd.ver_sub = sub;
4812 fw_cmd.hdr.checksum = 0;
4813 memcpy(fw_cmd.driver_string, driver_ver, len);
4814 fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd,
4815 (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len));
4816
4817 for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
4818 ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
4819 sizeof(fw_cmd),
4820 IXGBE_HI_COMMAND_TIMEOUT,
4821 TRUE);
4822 if (ret_val != IXGBE_SUCCESS)
4823 continue;
4824
4825 if (fw_cmd.hdr.cmd_or_resp.ret_status ==
4826 FW_CEM_RESP_STATUS_SUCCESS)
4827 ret_val = IXGBE_SUCCESS;
4828 else
4829 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
4830
4831 break;
4832 }
4833
4834 return ret_val;
4835 }
4836
4837 /**
4838 * ixgbe_fw_recovery_mode_X550 - Check FW NVM recovery mode
4839 * @hw: pointer t hardware structure
4840 *
4841 * Returns TRUE if in FW NVM recovery mode.
4842 **/
4843 bool ixgbe_fw_recovery_mode_X550(struct ixgbe_hw *hw)
4844 {
4845 u32 fwsm;
4846
4847 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw));
4848
4849 return !!(fwsm & IXGBE_FWSM_FW_NVM_RECOVERY_MODE);
4850 }
4851