ixgbe_x550.c revision 1.22 1 /* $NetBSD: ixgbe_x550.c,v 1.22 2021/12/10 11:16:54 msaitoh Exp $ */
2
3 /******************************************************************************
4
5 Copyright (c) 2001-2017, Intel Corporation
6 All rights reserved.
7
8 Redistribution and use in source and binary forms, with or without
9 modification, are permitted provided that the following conditions are met:
10
11 1. Redistributions of source code must retain the above copyright notice,
12 this list of conditions and the following disclaimer.
13
14 2. Redistributions in binary form must reproduce the above copyright
15 notice, this list of conditions and the following disclaimer in the
16 documentation and/or other materials provided with the distribution.
17
18 3. Neither the name of the Intel Corporation nor the names of its
19 contributors may be used to endorse or promote products derived from
20 this software without specific prior written permission.
21
22 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 POSSIBILITY OF SUCH DAMAGE.
33
34 ******************************************************************************/
35 /*$FreeBSD: head/sys/dev/ixgbe/ixgbe_x550.c 331224 2018-03-19 20:55:05Z erj $*/
36
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: ixgbe_x550.c,v 1.22 2021/12/10 11:16:54 msaitoh Exp $");
39
40 #include "ixgbe_x550.h"
41 #include "ixgbe_x540.h"
42 #include "ixgbe_type.h"
43 #include "ixgbe_api.h"
44 #include "ixgbe_common.h"
45 #include "ixgbe_phy.h"
46 #include <dev/mii/mii.h>
47
48 static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed);
49 static s32 ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw,
50 ixgbe_link_speed speed,
51 bool autoneg_wait_to_complete);
52 static s32 ixgbe_acquire_swfw_sync_X550a(struct ixgbe_hw *, u32 mask);
53 static void ixgbe_release_swfw_sync_X550a(struct ixgbe_hw *, u32 mask);
54 static s32 ixgbe_read_mng_if_sel_x550em(struct ixgbe_hw *hw);
55
56 /**
57 * ixgbe_init_ops_X550 - Inits func ptrs and MAC type
58 * @hw: pointer to hardware structure
59 *
60 * Initialize the function pointers and assign the MAC type for X550.
61 * Does not touch the hardware.
62 **/
63 s32 ixgbe_init_ops_X550(struct ixgbe_hw *hw)
64 {
65 struct ixgbe_mac_info *mac = &hw->mac;
66 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
67 s32 ret_val;
68
69 DEBUGFUNC("ixgbe_init_ops_X550");
70
71 ret_val = ixgbe_init_ops_X540(hw);
72 mac->ops.dmac_config = ixgbe_dmac_config_X550;
73 mac->ops.dmac_config_tcs = ixgbe_dmac_config_tcs_X550;
74 mac->ops.dmac_update_tcs = ixgbe_dmac_update_tcs_X550;
75 mac->ops.setup_eee = NULL;
76 mac->ops.set_source_address_pruning =
77 ixgbe_set_source_address_pruning_X550;
78 mac->ops.set_ethertype_anti_spoofing =
79 ixgbe_set_ethertype_anti_spoofing_X550;
80
81 mac->ops.get_rtrup2tc = ixgbe_dcb_get_rtrup2tc_generic;
82 eeprom->ops.init_params = ixgbe_init_eeprom_params_X550;
83 eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_X550;
84 eeprom->ops.read = ixgbe_read_ee_hostif_X550;
85 eeprom->ops.read_buffer = ixgbe_read_ee_hostif_buffer_X550;
86 eeprom->ops.write = ixgbe_write_ee_hostif_X550;
87 eeprom->ops.write_buffer = ixgbe_write_ee_hostif_buffer_X550;
88 eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_X550;
89 eeprom->ops.validate_checksum = ixgbe_validate_eeprom_checksum_X550;
90
91 mac->ops.disable_mdd = ixgbe_disable_mdd_X550;
92 mac->ops.enable_mdd = ixgbe_enable_mdd_X550;
93 mac->ops.mdd_event = ixgbe_mdd_event_X550;
94 mac->ops.restore_mdd_vf = ixgbe_restore_mdd_vf_X550;
95 mac->ops.fw_recovery_mode = ixgbe_fw_recovery_mode_X550;
96 mac->ops.disable_rx = ixgbe_disable_rx_x550;
97 /* Manageability interface */
98 mac->ops.set_fw_drv_ver = ixgbe_set_fw_drv_ver_x550;
99 switch (hw->device_id) {
100 case IXGBE_DEV_ID_X550EM_X_1G_T:
101 hw->mac.ops.led_on = NULL;
102 hw->mac.ops.led_off = NULL;
103 break;
104 case IXGBE_DEV_ID_X550EM_X_10G_T:
105 case IXGBE_DEV_ID_X550EM_A_10G_T:
106 hw->mac.ops.led_on = ixgbe_led_on_t_X550em;
107 hw->mac.ops.led_off = ixgbe_led_off_t_X550em;
108 break;
109 default:
110 break;
111 }
112 return ret_val;
113 }
114
115 /**
116 * ixgbe_read_cs4227 - Read CS4227 register
117 * @hw: pointer to hardware structure
118 * @reg: register number to write
119 * @value: pointer to receive value read
120 *
121 * Returns status code
122 **/
123 static s32 ixgbe_read_cs4227(struct ixgbe_hw *hw, u16 reg, u16 *value)
124 {
125 return hw->link.ops.read_link_unlocked(hw, hw->link.addr, reg, value);
126 }
127
128 /**
129 * ixgbe_write_cs4227 - Write CS4227 register
130 * @hw: pointer to hardware structure
131 * @reg: register number to write
132 * @value: value to write to register
133 *
134 * Returns status code
135 **/
136 static s32 ixgbe_write_cs4227(struct ixgbe_hw *hw, u16 reg, u16 value)
137 {
138 return hw->link.ops.write_link_unlocked(hw, hw->link.addr, reg, value);
139 }
140
141 /**
142 * ixgbe_read_pe - Read register from port expander
143 * @hw: pointer to hardware structure
144 * @reg: register number to read
145 * @value: pointer to receive read value
146 *
147 * Returns status code
148 **/
149 static s32 ixgbe_read_pe(struct ixgbe_hw *hw, u8 reg, u8 *value)
150 {
151 s32 status;
152
153 status = ixgbe_read_i2c_byte_unlocked(hw, reg, IXGBE_PE, value);
154 if (status != IXGBE_SUCCESS)
155 ERROR_REPORT2(IXGBE_ERROR_CAUTION,
156 "port expander access failed with %d\n", status);
157 return status;
158 }
159
160 /**
161 * ixgbe_write_pe - Write register to port expander
162 * @hw: pointer to hardware structure
163 * @reg: register number to write
164 * @value: value to write
165 *
166 * Returns status code
167 **/
168 static s32 ixgbe_write_pe(struct ixgbe_hw *hw, u8 reg, u8 value)
169 {
170 s32 status;
171
172 status = ixgbe_write_i2c_byte_unlocked(hw, reg, IXGBE_PE, value);
173 if (status != IXGBE_SUCCESS)
174 ERROR_REPORT2(IXGBE_ERROR_CAUTION,
175 "port expander access failed with %d\n", status);
176 return status;
177 }
178
179 /**
180 * ixgbe_reset_cs4227 - Reset CS4227 using port expander
181 * @hw: pointer to hardware structure
182 *
183 * This function assumes that the caller has acquired the proper semaphore.
184 * Returns error code
185 **/
186 static s32 ixgbe_reset_cs4227(struct ixgbe_hw *hw)
187 {
188 s32 status;
189 u32 retry;
190 u16 value;
191 u8 reg;
192
193 /* Trigger hard reset. */
194 status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, ®);
195 if (status != IXGBE_SUCCESS)
196 return status;
197 reg |= IXGBE_PE_BIT1;
198 status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg);
199 if (status != IXGBE_SUCCESS)
200 return status;
201
202 status = ixgbe_read_pe(hw, IXGBE_PE_CONFIG, ®);
203 if (status != IXGBE_SUCCESS)
204 return status;
205 reg &= ~IXGBE_PE_BIT1;
206 status = ixgbe_write_pe(hw, IXGBE_PE_CONFIG, reg);
207 if (status != IXGBE_SUCCESS)
208 return status;
209
210 status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, ®);
211 if (status != IXGBE_SUCCESS)
212 return status;
213 reg &= ~IXGBE_PE_BIT1;
214 status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg);
215 if (status != IXGBE_SUCCESS)
216 return status;
217
218 usec_delay(IXGBE_CS4227_RESET_HOLD);
219
220 status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, ®);
221 if (status != IXGBE_SUCCESS)
222 return status;
223 reg |= IXGBE_PE_BIT1;
224 status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg);
225 if (status != IXGBE_SUCCESS)
226 return status;
227
228 /* Wait for the reset to complete. */
229 msec_delay(IXGBE_CS4227_RESET_DELAY);
230 for (retry = 0; retry < IXGBE_CS4227_RETRIES; retry++) {
231 status = ixgbe_read_cs4227(hw, IXGBE_CS4227_EFUSE_STATUS,
232 &value);
233 if (status == IXGBE_SUCCESS &&
234 value == IXGBE_CS4227_EEPROM_LOAD_OK)
235 break;
236 msec_delay(IXGBE_CS4227_CHECK_DELAY);
237 }
238 if (retry == IXGBE_CS4227_RETRIES) {
239 ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE,
240 "CS4227 reset did not complete.");
241 return IXGBE_ERR_PHY;
242 }
243
244 status = ixgbe_read_cs4227(hw, IXGBE_CS4227_EEPROM_STATUS, &value);
245 if (status != IXGBE_SUCCESS ||
246 !(value & IXGBE_CS4227_EEPROM_LOAD_OK)) {
247 ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE,
248 "CS4227 EEPROM did not load successfully.");
249 return IXGBE_ERR_PHY;
250 }
251
252 return IXGBE_SUCCESS;
253 }
254
255 /**
256 * ixgbe_check_cs4227 - Check CS4227 and reset as needed
257 * @hw: pointer to hardware structure
258 **/
259 static void ixgbe_check_cs4227(struct ixgbe_hw *hw)
260 {
261 s32 status = IXGBE_SUCCESS;
262 u32 swfw_mask = hw->phy.phy_semaphore_mask;
263 u16 value = 0;
264 u8 retry;
265
266 for (retry = 0; retry < IXGBE_CS4227_RETRIES; retry++) {
267 status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
268 if (status != IXGBE_SUCCESS) {
269 ERROR_REPORT2(IXGBE_ERROR_CAUTION,
270 "semaphore failed with %d", status);
271 msec_delay(IXGBE_CS4227_CHECK_DELAY);
272 continue;
273 }
274
275 /* Get status of reset flow. */
276 status = ixgbe_read_cs4227(hw, IXGBE_CS4227_SCRATCH, &value);
277
278 if (status == IXGBE_SUCCESS &&
279 value == IXGBE_CS4227_RESET_COMPLETE)
280 goto out;
281
282 if (status != IXGBE_SUCCESS ||
283 value != IXGBE_CS4227_RESET_PENDING)
284 break;
285
286 /* Reset is pending. Wait and check again. */
287 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
288 msec_delay(IXGBE_CS4227_CHECK_DELAY);
289 }
290
291 /* If still pending, assume other instance failed. */
292 if (retry == IXGBE_CS4227_RETRIES) {
293 status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
294 if (status != IXGBE_SUCCESS) {
295 ERROR_REPORT2(IXGBE_ERROR_CAUTION,
296 "semaphore failed with %d", status);
297 return;
298 }
299 }
300
301 /* Reset the CS4227. */
302 status = ixgbe_reset_cs4227(hw);
303 if (status != IXGBE_SUCCESS) {
304 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
305 "CS4227 reset failed: %d", status);
306 goto out;
307 }
308
309 /* Reset takes so long, temporarily release semaphore in case the
310 * other driver instance is waiting for the reset indication.
311 */
312 ixgbe_write_cs4227(hw, IXGBE_CS4227_SCRATCH,
313 IXGBE_CS4227_RESET_PENDING);
314 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
315 msec_delay(10);
316 status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
317 if (status != IXGBE_SUCCESS) {
318 ERROR_REPORT2(IXGBE_ERROR_CAUTION,
319 "semaphore failed with %d", status);
320 return;
321 }
322
323 /* Record completion for next time. */
324 status = ixgbe_write_cs4227(hw, IXGBE_CS4227_SCRATCH,
325 IXGBE_CS4227_RESET_COMPLETE);
326
327 out:
328 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
329 msec_delay(hw->eeprom.semaphore_delay);
330 }
331
332 /**
333 * ixgbe_setup_mux_ctl - Setup ESDP register for I2C mux control
334 * @hw: pointer to hardware structure
335 **/
336 static void ixgbe_setup_mux_ctl(struct ixgbe_hw *hw)
337 {
338 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
339
340 if (hw->bus.lan_id) {
341 esdp &= ~(IXGBE_ESDP_SDP1_NATIVE | IXGBE_ESDP_SDP1);
342 esdp |= IXGBE_ESDP_SDP1_DIR;
343 }
344 esdp &= ~(IXGBE_ESDP_SDP0_NATIVE | IXGBE_ESDP_SDP0_DIR);
345 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
346 IXGBE_WRITE_FLUSH(hw);
347 }
348
349 /**
350 * ixgbe_read_phy_reg_mdi_22 - Read from a clause 22 PHY register without lock
351 * @hw: pointer to hardware structure
352 * @reg_addr: 32 bit address of PHY register to read
353 * @dev_type: always unused
354 * @phy_data: Pointer to read data from PHY register
355 */
356 static s32 ixgbe_read_phy_reg_mdi_22(struct ixgbe_hw *hw, u32 reg_addr,
357 u32 dev_type, u16 *phy_data)
358 {
359 u32 i, data, command;
360 UNREFERENCED_1PARAMETER(dev_type);
361
362 /* Setup and write the read command */
363 command = (reg_addr << IXGBE_MSCA_DEV_TYPE_SHIFT) |
364 (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
365 IXGBE_MSCA_OLD_PROTOCOL | IXGBE_MSCA_READ_AUTOINC |
366 IXGBE_MSCA_MDI_COMMAND;
367
368 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
369
370 /* Check every 10 usec to see if the access completed.
371 * The MDI Command bit will clear when the operation is
372 * complete
373 */
374 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
375 usec_delay(10);
376
377 command = IXGBE_READ_REG(hw, IXGBE_MSCA);
378 if (!(command & IXGBE_MSCA_MDI_COMMAND))
379 break;
380 }
381
382 if (command & IXGBE_MSCA_MDI_COMMAND) {
383 ERROR_REPORT1(IXGBE_ERROR_POLLING,
384 "PHY read command did not complete.\n");
385 return IXGBE_ERR_PHY;
386 }
387
388 /* Read operation is complete. Get the data from MSRWD */
389 data = IXGBE_READ_REG(hw, IXGBE_MSRWD);
390 data >>= IXGBE_MSRWD_READ_DATA_SHIFT;
391 *phy_data = (u16)data;
392
393 return IXGBE_SUCCESS;
394 }
395
396 /**
397 * ixgbe_write_phy_reg_mdi_22 - Write to a clause 22 PHY register without lock
398 * @hw: pointer to hardware structure
399 * @reg_addr: 32 bit PHY register to write
400 * @dev_type: always unused
401 * @phy_data: Data to write to the PHY register
402 */
403 static s32 ixgbe_write_phy_reg_mdi_22(struct ixgbe_hw *hw, u32 reg_addr,
404 u32 dev_type, u16 phy_data)
405 {
406 u32 i, command;
407 UNREFERENCED_1PARAMETER(dev_type);
408
409 /* Put the data in the MDI single read and write data register*/
410 IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)phy_data);
411
412 /* Setup and write the write command */
413 command = (reg_addr << IXGBE_MSCA_DEV_TYPE_SHIFT) |
414 (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
415 IXGBE_MSCA_OLD_PROTOCOL | IXGBE_MSCA_WRITE |
416 IXGBE_MSCA_MDI_COMMAND;
417
418 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
419
420 /* Check every 10 usec to see if the access completed.
421 * The MDI Command bit will clear when the operation is
422 * complete
423 */
424 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
425 usec_delay(10);
426
427 command = IXGBE_READ_REG(hw, IXGBE_MSCA);
428 if (!(command & IXGBE_MSCA_MDI_COMMAND))
429 break;
430 }
431
432 if (command & IXGBE_MSCA_MDI_COMMAND) {
433 ERROR_REPORT1(IXGBE_ERROR_POLLING,
434 "PHY write cmd didn't complete\n");
435 return IXGBE_ERR_PHY;
436 }
437
438 return IXGBE_SUCCESS;
439 }
440
441 /**
442 * ixgbe_identify_phy_x550em - Get PHY type based on device id
443 * @hw: pointer to hardware structure
444 *
445 * Returns error code
446 */
447 static s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw)
448 {
449 hw->mac.ops.set_lan_id(hw);
450
451 ixgbe_read_mng_if_sel_x550em(hw);
452
453 switch (hw->device_id) {
454 case IXGBE_DEV_ID_X550EM_A_SFP:
455 return ixgbe_identify_sfp_module_X550em(hw);
456 case IXGBE_DEV_ID_X550EM_X_SFP:
457 /* set up for CS4227 usage */
458 ixgbe_setup_mux_ctl(hw);
459 ixgbe_check_cs4227(hw);
460 /* Fallthrough */
461
462 case IXGBE_DEV_ID_X550EM_A_SFP_N:
463 return ixgbe_identify_sfp_module_X550em(hw);
464 break;
465 case IXGBE_DEV_ID_X550EM_X_KX4:
466 hw->phy.type = ixgbe_phy_x550em_kx4;
467 break;
468 case IXGBE_DEV_ID_X550EM_X_XFI:
469 hw->phy.type = ixgbe_phy_x550em_xfi;
470 break;
471 case IXGBE_DEV_ID_X550EM_X_KR:
472 case IXGBE_DEV_ID_X550EM_A_KR:
473 case IXGBE_DEV_ID_X550EM_A_KR_L:
474 hw->phy.type = ixgbe_phy_x550em_kr;
475 break;
476 case IXGBE_DEV_ID_X550EM_A_10G_T:
477 case IXGBE_DEV_ID_X550EM_X_10G_T:
478 return ixgbe_identify_phy_generic(hw);
479 case IXGBE_DEV_ID_X550EM_X_1G_T:
480 hw->phy.type = ixgbe_phy_ext_1g_t;
481 break;
482 case IXGBE_DEV_ID_X550EM_A_1G_T:
483 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
484 hw->phy.type = ixgbe_phy_fw;
485 if (hw->bus.lan_id)
486 hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY1_SM;
487 else
488 hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY0_SM;
489 break;
490 default:
491 break;
492 }
493 return IXGBE_SUCCESS;
494 }
495
496 /**
497 * ixgbe_fw_phy_activity - Perform an activity on a PHY
498 * @hw: pointer to hardware structure
499 * @activity: activity to perform
500 * @data: Pointer to 4 32-bit words of data
501 */
502 s32 ixgbe_fw_phy_activity(struct ixgbe_hw *hw, u16 activity,
503 u32 (*data)[FW_PHY_ACT_DATA_COUNT])
504 {
505 union {
506 struct ixgbe_hic_phy_activity_req cmd;
507 struct ixgbe_hic_phy_activity_resp rsp;
508 } hic;
509 u16 retries = FW_PHY_ACT_RETRIES;
510 s32 rc;
511 u16 i;
512
513 do {
514 memset(&hic, 0, sizeof(hic));
515 hic.cmd.hdr.cmd = FW_PHY_ACT_REQ_CMD;
516 hic.cmd.hdr.buf_len = FW_PHY_ACT_REQ_LEN;
517 hic.cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
518 hic.cmd.port_number = hw->bus.lan_id;
519 hic.cmd.activity_id = IXGBE_CPU_TO_LE16(activity);
520 for (i = 0; i < FW_PHY_ACT_DATA_COUNT; ++i)
521 hic.cmd.data[i] = IXGBE_CPU_TO_BE32((*data)[i]);
522
523 rc = ixgbe_host_interface_command(hw, (u32 *)&hic.cmd,
524 sizeof(hic.cmd),
525 IXGBE_HI_COMMAND_TIMEOUT,
526 TRUE);
527 if (rc != IXGBE_SUCCESS)
528 return rc;
529 if (hic.rsp.hdr.cmd_or_resp.ret_status ==
530 FW_CEM_RESP_STATUS_SUCCESS) {
531 for (i = 0; i < FW_PHY_ACT_DATA_COUNT; ++i)
532 (*data)[i] = IXGBE_BE32_TO_CPU(hic.rsp.data[i]);
533 return IXGBE_SUCCESS;
534 }
535 usec_delay(20);
536 --retries;
537 } while (retries > 0);
538
539 return IXGBE_ERR_HOST_INTERFACE_COMMAND;
540 }
541
542 static const struct {
543 u16 fw_speed;
544 ixgbe_link_speed phy_speed;
545 } ixgbe_fw_map[] = {
546 { FW_PHY_ACT_LINK_SPEED_10, IXGBE_LINK_SPEED_10_FULL },
547 { FW_PHY_ACT_LINK_SPEED_100, IXGBE_LINK_SPEED_100_FULL },
548 { FW_PHY_ACT_LINK_SPEED_1G, IXGBE_LINK_SPEED_1GB_FULL },
549 { FW_PHY_ACT_LINK_SPEED_2_5G, IXGBE_LINK_SPEED_2_5GB_FULL },
550 { FW_PHY_ACT_LINK_SPEED_5G, IXGBE_LINK_SPEED_5GB_FULL },
551 { FW_PHY_ACT_LINK_SPEED_10G, IXGBE_LINK_SPEED_10GB_FULL },
552 };
553
554 /**
555 * ixgbe_get_phy_id_fw - Get the phy ID via firmware command
556 * @hw: pointer to hardware structure
557 *
558 * Returns error code
559 */
560 static s32 ixgbe_get_phy_id_fw(struct ixgbe_hw *hw)
561 {
562 u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 };
563 u16 phy_speeds;
564 u16 phy_id_lo;
565 s32 rc;
566 u16 i;
567
568 rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_PHY_INFO, &info);
569 if (rc)
570 return rc;
571
572 hw->phy.speeds_supported = 0;
573 phy_speeds = info[0] & FW_PHY_INFO_SPEED_MASK;
574 for (i = 0; i < sizeof(ixgbe_fw_map) / sizeof(ixgbe_fw_map[0]); ++i) {
575 if (phy_speeds & ixgbe_fw_map[i].fw_speed)
576 hw->phy.speeds_supported |= ixgbe_fw_map[i].phy_speed;
577 }
578
579 #if 0
580 /*
581 * Don't set autoneg_advertised here to not to be inconsistent with
582 * if_media value.
583 */
584 if (!hw->phy.autoneg_advertised)
585 hw->phy.autoneg_advertised = hw->phy.speeds_supported;
586 #endif
587
588 hw->phy.id = info[0] & FW_PHY_INFO_ID_HI_MASK;
589 phy_id_lo = info[1] & FW_PHY_INFO_ID_LO_MASK;
590 hw->phy.id |= phy_id_lo & IXGBE_PHY_REVISION_MASK;
591 hw->phy.revision = phy_id_lo & ~IXGBE_PHY_REVISION_MASK;
592 if (!hw->phy.id || hw->phy.id == IXGBE_PHY_REVISION_MASK)
593 return IXGBE_ERR_PHY_ADDR_INVALID;
594 return IXGBE_SUCCESS;
595 }
596
597 /**
598 * ixgbe_identify_phy_fw - Get PHY type based on firmware command
599 * @hw: pointer to hardware structure
600 *
601 * Returns error code
602 */
603 static s32 ixgbe_identify_phy_fw(struct ixgbe_hw *hw)
604 {
605 if (hw->bus.lan_id)
606 hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM;
607 else
608 hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM;
609
610 hw->phy.type = ixgbe_phy_fw;
611 hw->phy.ops.read_reg = NULL;
612 hw->phy.ops.write_reg = NULL;
613 return ixgbe_get_phy_id_fw(hw);
614 }
615
616 /**
617 * ixgbe_shutdown_fw_phy - Shutdown a firmware-controlled PHY
618 * @hw: pointer to hardware structure
619 *
620 * Returns error code
621 */
622 s32 ixgbe_shutdown_fw_phy(struct ixgbe_hw *hw)
623 {
624 u32 setup[FW_PHY_ACT_DATA_COUNT] = { 0 };
625
626 setup[0] = FW_PHY_ACT_FORCE_LINK_DOWN_OFF;
627 return ixgbe_fw_phy_activity(hw, FW_PHY_ACT_FORCE_LINK_DOWN, &setup);
628 }
629
630 static s32 ixgbe_read_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr,
631 u32 device_type, u16 *phy_data)
632 {
633 UNREFERENCED_4PARAMETER(*hw, reg_addr, device_type, *phy_data);
634 return IXGBE_NOT_IMPLEMENTED;
635 }
636
637 static s32 ixgbe_write_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr,
638 u32 device_type, u16 phy_data)
639 {
640 UNREFERENCED_4PARAMETER(*hw, reg_addr, device_type, phy_data);
641 return IXGBE_NOT_IMPLEMENTED;
642 }
643
644 /**
645 * ixgbe_read_i2c_combined_generic - Perform I2C read combined operation
646 * @hw: pointer to the hardware structure
647 * @addr: I2C bus address to read from
648 * @reg: I2C device register to read from
649 * @val: pointer to location to receive read value
650 *
651 * Returns an error code on error.
652 **/
653 static s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr,
654 u16 reg, u16 *val)
655 {
656 return ixgbe_read_i2c_combined_generic_int(hw, addr, reg, val, TRUE);
657 }
658
659 /**
660 * ixgbe_read_i2c_combined_generic_unlocked - Do I2C read combined operation
661 * @hw: pointer to the hardware structure
662 * @addr: I2C bus address to read from
663 * @reg: I2C device register to read from
664 * @val: pointer to location to receive read value
665 *
666 * Returns an error code on error.
667 **/
668 static s32
669 ixgbe_read_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, u8 addr,
670 u16 reg, u16 *val)
671 {
672 return ixgbe_read_i2c_combined_generic_int(hw, addr, reg, val, FALSE);
673 }
674
675 /**
676 * ixgbe_write_i2c_combined_generic - Perform I2C write combined operation
677 * @hw: pointer to the hardware structure
678 * @addr: I2C bus address to write to
679 * @reg: I2C device register to write to
680 * @val: value to write
681 *
682 * Returns an error code on error.
683 **/
684 static s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw,
685 u8 addr, u16 reg, u16 val)
686 {
687 return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, TRUE);
688 }
689
690 /**
691 * ixgbe_write_i2c_combined_generic_unlocked - Do I2C write combined operation
692 * @hw: pointer to the hardware structure
693 * @addr: I2C bus address to write to
694 * @reg: I2C device register to write to
695 * @val: value to write
696 *
697 * Returns an error code on error.
698 **/
699 static s32
700 ixgbe_write_i2c_combined_generic_unlocked(struct ixgbe_hw *hw,
701 u8 addr, u16 reg, u16 val)
702 {
703 return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, FALSE);
704 }
705
706 /**
707 * ixgbe_init_ops_X550EM - Inits func ptrs and MAC type
708 * @hw: pointer to hardware structure
709 *
710 * Initialize the function pointers and for MAC type X550EM.
711 * Does not touch the hardware.
712 **/
713 s32 ixgbe_init_ops_X550EM(struct ixgbe_hw *hw)
714 {
715 struct ixgbe_mac_info *mac = &hw->mac;
716 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
717 struct ixgbe_phy_info *phy = &hw->phy;
718 s32 ret_val;
719
720 DEBUGFUNC("ixgbe_init_ops_X550EM");
721
722 /* Similar to X550 so start there. */
723 ret_val = ixgbe_init_ops_X550(hw);
724
725 /* Since this function eventually calls
726 * ixgbe_init_ops_540 by design, we are setting
727 * the pointers to NULL explicitly here to overwrite
728 * the values being set in the x540 function.
729 */
730
731 /* Bypass not supported in x550EM */
732 mac->ops.bypass_rw = NULL;
733 mac->ops.bypass_valid_rd = NULL;
734 mac->ops.bypass_set = NULL;
735 mac->ops.bypass_rd_eep = NULL;
736
737 /* FCOE not supported in x550EM */
738 mac->ops.get_san_mac_addr = NULL;
739 mac->ops.set_san_mac_addr = NULL;
740 mac->ops.get_wwn_prefix = NULL;
741 mac->ops.get_fcoe_boot_status = NULL;
742
743 /* IPsec not supported in x550EM */
744 mac->ops.disable_sec_rx_path = NULL;
745 mac->ops.enable_sec_rx_path = NULL;
746
747 /* AUTOC register is not present in x550EM. */
748 mac->ops.prot_autoc_read = NULL;
749 mac->ops.prot_autoc_write = NULL;
750
751 /* X550EM bus type is internal*/
752 hw->bus.type = ixgbe_bus_type_internal;
753 mac->ops.get_bus_info = ixgbe_get_bus_info_X550em;
754
755
756 mac->ops.get_media_type = ixgbe_get_media_type_X550em;
757 mac->ops.setup_sfp = ixgbe_setup_sfp_modules_X550em;
758 mac->ops.get_link_capabilities = ixgbe_get_link_capabilities_X550em;
759 mac->ops.reset_hw = ixgbe_reset_hw_X550em;
760 mac->ops.get_supported_physical_layer =
761 ixgbe_get_supported_physical_layer_X550em;
762
763 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper)
764 mac->ops.setup_fc = ixgbe_setup_fc_generic;
765 else
766 mac->ops.setup_fc = ixgbe_setup_fc_X550em;
767
768 /* PHY */
769 phy->ops.init = ixgbe_init_phy_ops_X550em;
770 switch (hw->device_id) {
771 case IXGBE_DEV_ID_X550EM_A_1G_T:
772 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
773 mac->ops.setup_fc = NULL;
774 phy->ops.identify = ixgbe_identify_phy_fw;
775 phy->ops.set_phy_power = NULL;
776 phy->ops.get_firmware_version = NULL;
777 break;
778 case IXGBE_DEV_ID_X550EM_X_1G_T:
779 mac->ops.setup_fc = NULL;
780 phy->ops.identify = ixgbe_identify_phy_x550em;
781 phy->ops.set_phy_power = NULL;
782 break;
783 default:
784 phy->ops.identify = ixgbe_identify_phy_x550em;
785 }
786
787 if (mac->ops.get_media_type(hw) != ixgbe_media_type_copper)
788 phy->ops.set_phy_power = NULL;
789
790
791 /* EEPROM */
792 eeprom->ops.init_params = ixgbe_init_eeprom_params_X540;
793 eeprom->ops.read = ixgbe_read_ee_hostif_X550;
794 eeprom->ops.read_buffer = ixgbe_read_ee_hostif_buffer_X550;
795 eeprom->ops.write = ixgbe_write_ee_hostif_X550;
796 eeprom->ops.write_buffer = ixgbe_write_ee_hostif_buffer_X550;
797 eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_X550;
798 eeprom->ops.validate_checksum = ixgbe_validate_eeprom_checksum_X550;
799 eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_X550;
800
801 return ret_val;
802 }
803
804 #define IXGBE_DENVERTON_WA 1
805
806 /**
807 * ixgbe_setup_fw_link - Setup firmware-controlled PHYs
808 * @hw: pointer to hardware structure
809 */
810 static s32 ixgbe_setup_fw_link(struct ixgbe_hw *hw)
811 {
812 u32 setup[FW_PHY_ACT_DATA_COUNT] = { 0 };
813 s32 rc;
814 #ifdef IXGBE_DENVERTON_WA
815 s32 ret_val;
816 u16 phydata;
817 #endif
818 u16 i;
819
820 if (hw->phy.reset_disable || ixgbe_check_reset_blocked(hw))
821 return 0;
822
823 if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
824 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
825 "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
826 return IXGBE_ERR_INVALID_LINK_SETTINGS;
827 }
828
829 switch (hw->fc.requested_mode) {
830 case ixgbe_fc_full:
831 setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_RXTX <<
832 FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT;
833 break;
834 case ixgbe_fc_rx_pause:
835 setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_RX <<
836 FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT;
837 break;
838 case ixgbe_fc_tx_pause:
839 setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_TX <<
840 FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT;
841 break;
842 default:
843 break;
844 }
845
846 for (i = 0; i < sizeof(ixgbe_fw_map) / sizeof(ixgbe_fw_map[0]); ++i) {
847 if (hw->phy.autoneg_advertised & ixgbe_fw_map[i].phy_speed)
848 setup[0] |= (u32)(ixgbe_fw_map[i].fw_speed);
849 }
850 setup[0] |= FW_PHY_ACT_SETUP_LINK_HP | FW_PHY_ACT_SETUP_LINK_AN;
851
852 if (hw->phy.eee_speeds_advertised)
853 setup[0] |= FW_PHY_ACT_SETUP_LINK_EEE;
854
855 #ifdef IXGBE_DENVERTON_WA
856 if ((hw->phy.force_10_100_autonego == false)
857 && ((hw->phy.autoneg_advertised == IXGBE_LINK_SPEED_100_FULL)
858 || (hw->phy.autoneg_advertised == IXGBE_LINK_SPEED_10_FULL))) {
859 /* Don't use auto-nego for 10/100Mbps */
860 setup[0] &= ~FW_PHY_ACT_SETUP_LINK_AN;
861 setup[0] &= ~FW_PHY_ACT_SETUP_LINK_EEE;
862 setup[0] &= ~(FW_PHY_ACT_SETUP_LINK_PAUSE_RXTX
863 << FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT);
864 }
865 #endif
866
867 rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_SETUP_LINK, &setup);
868 if (rc)
869 return rc;
870
871 #ifdef IXGBE_DENVERTON_WA
872 if (hw->phy.force_10_100_autonego == true)
873 goto out;
874
875 ret_val = ixgbe_read_phy_reg_x550a(hw, MII_BMCR, 0, &phydata);
876 if (ret_val != 0)
877 goto out;
878
879 /*
880 * Broken firmware sets BMCR register incorrectly if
881 * FW_PHY_ACT_SETUP_LINK_AN isn't set.
882 * a) FDX may not be set.
883 * b) BMCR_SPEED1 (bit 6) is always cleared.
884 * + -------+------+-----------+-----+--------------------------+
885 * |request | BMCR | BMCR spd | BMCR | |
886 * | | (HEX)| (in bits)| FDX | |
887 * +--------+------+----------+------+--------------------------+
888 * | 10M | 0000 | 10M(00) | 0 | |
889 * | 10M | 2000 | 100M(01) | 0 |(I've never observed this)|
890 * | 10M | 2100 | 100M(01) | 1 | |
891 * | 100M | 0000 | 10M(00) | 0 | |
892 * | 100M | 0100 | 10M(00) | 1 | |
893 * +--------------------------+------+--------------------------+
894 */
895 if (((hw->phy.autoneg_advertised == IXGBE_LINK_SPEED_100_FULL)
896 && (((phydata & BMCR_FDX) == 0) || (BMCR_SPEED(phydata) == 0)))
897 || ((hw->phy.autoneg_advertised == IXGBE_LINK_SPEED_10_FULL)
898 && (((phydata & BMCR_FDX) == 0)
899 || (BMCR_SPEED(phydata) != BMCR_S10)))) {
900 phydata = BMCR_FDX;
901 switch (hw->phy.autoneg_advertised) {
902 case IXGBE_LINK_SPEED_10_FULL:
903 phydata |= BMCR_S10;
904 break;
905 case IXGBE_LINK_SPEED_100_FULL:
906 phydata |= BMCR_S100;
907 break;
908 case IXGBE_LINK_SPEED_1GB_FULL:
909 panic("%s: 1GB_FULL is set", __func__);
910 break;
911 default:
912 break;
913 }
914 ret_val = ixgbe_write_phy_reg_x550a(hw, MII_BMCR, 0, phydata);
915 if (ret_val != 0)
916 return ret_val;
917 }
918 out:
919 #endif
920 if (setup[0] == FW_PHY_ACT_SETUP_LINK_RSP_DOWN)
921 return IXGBE_ERR_OVERTEMP;
922 return IXGBE_SUCCESS;
923 }
924
925 /**
926 * ixgbe_fc_autoneg_fw _ Set up flow control for FW-controlled PHYs
927 * @hw: pointer to hardware structure
928 *
929 * Called at init time to set up flow control.
930 */
931 static s32 ixgbe_fc_autoneg_fw(struct ixgbe_hw *hw)
932 {
933 if (hw->fc.requested_mode == ixgbe_fc_default)
934 hw->fc.requested_mode = ixgbe_fc_full;
935
936 return ixgbe_setup_fw_link(hw);
937 }
938
939 /**
940 * ixgbe_setup_eee_fw - Enable/disable EEE support
941 * @hw: pointer to the HW structure
942 * @enable_eee: boolean flag to enable EEE
943 *
944 * Enable/disable EEE based on enable_eee flag.
945 * This function controls EEE for firmware-based PHY implementations.
946 */
947 static s32 ixgbe_setup_eee_fw(struct ixgbe_hw *hw, bool enable_eee)
948 {
949 if (!!hw->phy.eee_speeds_advertised == enable_eee)
950 return IXGBE_SUCCESS;
951 if (enable_eee)
952 hw->phy.eee_speeds_advertised = hw->phy.eee_speeds_supported;
953 else
954 hw->phy.eee_speeds_advertised = 0;
955 return hw->phy.ops.setup_link(hw);
956 }
957
958 /**
959 * ixgbe_init_ops_X550EM_a - Inits func ptrs and MAC type
960 * @hw: pointer to hardware structure
961 *
962 * Initialize the function pointers and for MAC type X550EM_a.
963 * Does not touch the hardware.
964 **/
965 s32 ixgbe_init_ops_X550EM_a(struct ixgbe_hw *hw)
966 {
967 struct ixgbe_mac_info *mac = &hw->mac;
968 s32 ret_val;
969
970 DEBUGFUNC("ixgbe_init_ops_X550EM_a");
971
972 /* Start with generic X550EM init */
973 ret_val = ixgbe_init_ops_X550EM(hw);
974
975 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII ||
976 hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII_L) {
977 mac->ops.read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550;
978 mac->ops.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550;
979 } else {
980 mac->ops.read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550a;
981 mac->ops.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550a;
982 }
983 mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync_X550a;
984 mac->ops.release_swfw_sync = ixgbe_release_swfw_sync_X550a;
985
986 switch (mac->ops.get_media_type(hw)) {
987 case ixgbe_media_type_fiber:
988 mac->ops.setup_fc = NULL;
989 mac->ops.fc_autoneg = ixgbe_fc_autoneg_fiber_x550em_a;
990 break;
991 case ixgbe_media_type_backplane:
992 mac->ops.fc_autoneg = ixgbe_fc_autoneg_backplane_x550em_a;
993 mac->ops.setup_fc = ixgbe_setup_fc_backplane_x550em_a;
994 break;
995 default:
996 break;
997 }
998
999 switch (hw->device_id) {
1000 case IXGBE_DEV_ID_X550EM_A_1G_T:
1001 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
1002 mac->ops.fc_autoneg = ixgbe_fc_autoneg_sgmii_x550em_a;
1003 mac->ops.setup_fc = ixgbe_fc_autoneg_fw;
1004 mac->ops.setup_eee = ixgbe_setup_eee_fw;
1005 hw->phy.eee_speeds_supported = IXGBE_LINK_SPEED_100_FULL |
1006 IXGBE_LINK_SPEED_1GB_FULL;
1007 hw->phy.eee_speeds_advertised = hw->phy.eee_speeds_supported;
1008 break;
1009 default:
1010 break;
1011 }
1012
1013 return ret_val;
1014 }
1015
1016 /**
1017 * ixgbe_init_ops_X550EM_x - Inits func ptrs and MAC type
1018 * @hw: pointer to hardware structure
1019 *
1020 * Initialize the function pointers and for MAC type X550EM_x.
1021 * Does not touch the hardware.
1022 **/
1023 s32 ixgbe_init_ops_X550EM_x(struct ixgbe_hw *hw)
1024 {
1025 struct ixgbe_mac_info *mac = &hw->mac;
1026 struct ixgbe_link_info *link = &hw->link;
1027 s32 ret_val;
1028
1029 DEBUGFUNC("ixgbe_init_ops_X550EM_x");
1030
1031 /* Start with generic X550EM init */
1032 ret_val = ixgbe_init_ops_X550EM(hw);
1033
1034 mac->ops.read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550;
1035 mac->ops.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550;
1036 mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync_X550em;
1037 mac->ops.release_swfw_sync = ixgbe_release_swfw_sync_X550em;
1038 link->ops.read_link = ixgbe_read_i2c_combined_generic;
1039 link->ops.read_link_unlocked = ixgbe_read_i2c_combined_generic_unlocked;
1040 link->ops.write_link = ixgbe_write_i2c_combined_generic;
1041 link->ops.write_link_unlocked =
1042 ixgbe_write_i2c_combined_generic_unlocked;
1043 link->addr = IXGBE_CS4227;
1044
1045 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_1G_T) {
1046 mac->ops.setup_fc = NULL;
1047 mac->ops.setup_eee = NULL;
1048 mac->ops.init_led_link_act = NULL;
1049 }
1050
1051 return ret_val;
1052 }
1053
1054 /**
1055 * ixgbe_dmac_config_X550
1056 * @hw: pointer to hardware structure
1057 *
1058 * Configure DMA coalescing. If enabling dmac, dmac is activated.
1059 * When disabling dmac, dmac enable dmac bit is cleared.
1060 **/
1061 s32 ixgbe_dmac_config_X550(struct ixgbe_hw *hw)
1062 {
1063 u32 reg, high_pri_tc;
1064
1065 DEBUGFUNC("ixgbe_dmac_config_X550");
1066
1067 /* Disable DMA coalescing before configuring */
1068 reg = IXGBE_READ_REG(hw, IXGBE_DMACR);
1069 reg &= ~IXGBE_DMACR_DMAC_EN;
1070 IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg);
1071
1072 /* Disable DMA Coalescing if the watchdog timer is 0 */
1073 if (!hw->mac.dmac_config.watchdog_timer)
1074 goto out;
1075
1076 ixgbe_dmac_config_tcs_X550(hw);
1077
1078 /* Configure DMA Coalescing Control Register */
1079 reg = IXGBE_READ_REG(hw, IXGBE_DMACR);
1080
1081 /* Set the watchdog timer in units of 40.96 usec */
1082 reg &= ~IXGBE_DMACR_DMACWT_MASK;
1083 reg |= (hw->mac.dmac_config.watchdog_timer * 100) / 4096;
1084
1085 reg &= ~IXGBE_DMACR_HIGH_PRI_TC_MASK;
1086 /* If fcoe is enabled, set high priority traffic class */
1087 if (hw->mac.dmac_config.fcoe_en) {
1088 high_pri_tc = 1 << hw->mac.dmac_config.fcoe_tc;
1089 reg |= ((high_pri_tc << IXGBE_DMACR_HIGH_PRI_TC_SHIFT) &
1090 IXGBE_DMACR_HIGH_PRI_TC_MASK);
1091 }
1092 reg |= IXGBE_DMACR_EN_MNG_IND;
1093
1094 /* Enable DMA coalescing after configuration */
1095 reg |= IXGBE_DMACR_DMAC_EN;
1096 IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg);
1097
1098 out:
1099 return IXGBE_SUCCESS;
1100 }
1101
1102 /**
1103 * ixgbe_dmac_config_tcs_X550
1104 * @hw: pointer to hardware structure
1105 *
1106 * Configure DMA coalescing threshold per TC. The dmac enable bit must
1107 * be cleared before configuring.
1108 **/
1109 s32 ixgbe_dmac_config_tcs_X550(struct ixgbe_hw *hw)
1110 {
1111 u32 tc, reg, pb_headroom, rx_pb_size, maxframe_size_kb;
1112
1113 DEBUGFUNC("ixgbe_dmac_config_tcs_X550");
1114
1115 /* Configure DMA coalescing enabled */
1116 switch (hw->mac.dmac_config.link_speed) {
1117 case IXGBE_LINK_SPEED_10_FULL:
1118 case IXGBE_LINK_SPEED_100_FULL:
1119 pb_headroom = IXGBE_DMACRXT_100M;
1120 break;
1121 case IXGBE_LINK_SPEED_1GB_FULL:
1122 pb_headroom = IXGBE_DMACRXT_1G;
1123 break;
1124 default:
1125 pb_headroom = IXGBE_DMACRXT_10G;
1126 break;
1127 }
1128
1129 maxframe_size_kb = ((IXGBE_READ_REG(hw, IXGBE_MAXFRS) >>
1130 IXGBE_MHADD_MFS_SHIFT) / 1024);
1131
1132 /* Set the per Rx packet buffer receive threshold */
1133 for (tc = 0; tc < IXGBE_DCB_MAX_TRAFFIC_CLASS; tc++) {
1134 reg = IXGBE_READ_REG(hw, IXGBE_DMCTH(tc));
1135 reg &= ~IXGBE_DMCTH_DMACRXT_MASK;
1136
1137 if (tc < hw->mac.dmac_config.num_tcs) {
1138 /* Get Rx PB size */
1139 rx_pb_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc));
1140 rx_pb_size = (rx_pb_size & IXGBE_RXPBSIZE_MASK) >>
1141 IXGBE_RXPBSIZE_SHIFT;
1142
1143 /* Calculate receive buffer threshold in kilobytes */
1144 if (rx_pb_size > pb_headroom)
1145 rx_pb_size = rx_pb_size - pb_headroom;
1146 else
1147 rx_pb_size = 0;
1148
1149 /* Minimum of MFS shall be set for DMCTH */
1150 reg |= (rx_pb_size > maxframe_size_kb) ?
1151 rx_pb_size : maxframe_size_kb;
1152 }
1153 IXGBE_WRITE_REG(hw, IXGBE_DMCTH(tc), reg);
1154 }
1155 return IXGBE_SUCCESS;
1156 }
1157
1158 /**
1159 * ixgbe_dmac_update_tcs_X550
1160 * @hw: pointer to hardware structure
1161 *
1162 * Disables dmac, updates per TC settings, and then enables dmac.
1163 **/
1164 s32 ixgbe_dmac_update_tcs_X550(struct ixgbe_hw *hw)
1165 {
1166 u32 reg;
1167
1168 DEBUGFUNC("ixgbe_dmac_update_tcs_X550");
1169
1170 /* Disable DMA coalescing before configuring */
1171 reg = IXGBE_READ_REG(hw, IXGBE_DMACR);
1172 reg &= ~IXGBE_DMACR_DMAC_EN;
1173 IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg);
1174
1175 ixgbe_dmac_config_tcs_X550(hw);
1176
1177 /* Enable DMA coalescing after configuration */
1178 reg = IXGBE_READ_REG(hw, IXGBE_DMACR);
1179 reg |= IXGBE_DMACR_DMAC_EN;
1180 IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg);
1181
1182 return IXGBE_SUCCESS;
1183 }
1184
1185 /**
1186 * ixgbe_init_eeprom_params_X550 - Initialize EEPROM params
1187 * @hw: pointer to hardware structure
1188 *
1189 * Initializes the EEPROM parameters ixgbe_eeprom_info within the
1190 * ixgbe_hw struct in order to set up EEPROM access.
1191 **/
1192 s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw)
1193 {
1194 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
1195 u32 eec;
1196 u16 eeprom_size;
1197
1198 DEBUGFUNC("ixgbe_init_eeprom_params_X550");
1199
1200 if (eeprom->type == ixgbe_eeprom_uninitialized) {
1201 eeprom->semaphore_delay = 10;
1202 eeprom->type = ixgbe_flash;
1203
1204 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1205 eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
1206 IXGBE_EEC_SIZE_SHIFT);
1207 eeprom->word_size = 1 << (eeprom_size +
1208 IXGBE_EEPROM_WORD_SIZE_SHIFT);
1209
1210 DEBUGOUT2("Eeprom params: type = %d, size = %d\n",
1211 eeprom->type, eeprom->word_size);
1212 }
1213
1214 return IXGBE_SUCCESS;
1215 }
1216
1217 /**
1218 * ixgbe_set_source_address_pruning_X550 - Enable/Disbale source address pruning
1219 * @hw: pointer to hardware structure
1220 * @enable: enable or disable source address pruning
1221 * @pool: Rx pool to set source address pruning for
1222 **/
1223 void ixgbe_set_source_address_pruning_X550(struct ixgbe_hw *hw, bool enable,
1224 unsigned int pool)
1225 {
1226 u64 pfflp;
1227
1228 /* max rx pool is 63 */
1229 if (pool > 63)
1230 return;
1231
1232 pfflp = (u64)IXGBE_READ_REG(hw, IXGBE_PFFLPL);
1233 pfflp |= (u64)IXGBE_READ_REG(hw, IXGBE_PFFLPH) << 32;
1234
1235 if (enable)
1236 pfflp |= (1ULL << pool);
1237 else
1238 pfflp &= ~(1ULL << pool);
1239
1240 IXGBE_WRITE_REG(hw, IXGBE_PFFLPL, (u32)pfflp);
1241 IXGBE_WRITE_REG(hw, IXGBE_PFFLPH, (u32)(pfflp >> 32));
1242 }
1243
1244 /**
1245 * ixgbe_set_ethertype_anti_spoofing_X550 - Enable/Disable Ethertype anti-spoofing
1246 * @hw: pointer to hardware structure
1247 * @enable: enable or disable switch for Ethertype anti-spoofing
1248 * @vf: Virtual Function pool - VF Pool to set for Ethertype anti-spoofing
1249 *
1250 **/
1251 void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw,
1252 bool enable, int vf)
1253 {
1254 int vf_target_reg = vf >> 3;
1255 int vf_target_shift = vf % 8 + IXGBE_SPOOF_ETHERTYPEAS_SHIFT;
1256 u32 pfvfspoof;
1257
1258 DEBUGFUNC("ixgbe_set_ethertype_anti_spoofing_X550");
1259
1260 pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
1261 if (enable)
1262 pfvfspoof |= (1 << vf_target_shift);
1263 else
1264 pfvfspoof &= ~(1 << vf_target_shift);
1265
1266 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
1267 }
1268
1269 /**
1270 * ixgbe_iosf_wait - Wait for IOSF command completion
1271 * @hw: pointer to hardware structure
1272 * @ctrl: pointer to location to receive final IOSF control value
1273 *
1274 * Returns failing status on timeout
1275 *
1276 * Note: ctrl can be NULL if the IOSF control register value is not needed
1277 **/
1278 static s32 ixgbe_iosf_wait(struct ixgbe_hw *hw, u32 *ctrl)
1279 {
1280 u32 i, command = 0;
1281
1282 /* Check every 10 usec to see if the address cycle completed.
1283 * The SB IOSF BUSY bit will clear when the operation is
1284 * complete
1285 */
1286 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
1287 command = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL);
1288 if ((command & IXGBE_SB_IOSF_CTRL_BUSY) == 0)
1289 break;
1290 usec_delay(10);
1291 }
1292 if (ctrl)
1293 *ctrl = command;
1294 if (i == IXGBE_MDIO_COMMAND_TIMEOUT) {
1295 ERROR_REPORT1(IXGBE_ERROR_POLLING, "Wait timed out\n");
1296 return IXGBE_ERR_PHY;
1297 }
1298
1299 return IXGBE_SUCCESS;
1300 }
1301
1302 /**
1303 * ixgbe_write_iosf_sb_reg_x550 - Writes a value to specified register
1304 * of the IOSF device
1305 * @hw: pointer to hardware structure
1306 * @reg_addr: 32 bit PHY register to write
1307 * @device_type: 3 bit device type
1308 * @data: Data to write to the register
1309 **/
1310 s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
1311 u32 device_type, u32 data)
1312 {
1313 u32 gssr = IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_PHY0_SM;
1314 u32 command, error __unused;
1315 s32 ret;
1316
1317 ret = ixgbe_acquire_swfw_semaphore(hw, gssr);
1318 if (ret != IXGBE_SUCCESS)
1319 return ret;
1320
1321 ret = ixgbe_iosf_wait(hw, NULL);
1322 if (ret != IXGBE_SUCCESS)
1323 goto out;
1324
1325 command = ((reg_addr << IXGBE_SB_IOSF_CTRL_ADDR_SHIFT) |
1326 (device_type << IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT));
1327
1328 /* Write IOSF control register */
1329 IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL, command);
1330
1331 /* Write IOSF data register */
1332 IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA, data);
1333
1334 ret = ixgbe_iosf_wait(hw, &command);
1335
1336 if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) {
1337 error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >>
1338 IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT;
1339 ERROR_REPORT2(IXGBE_ERROR_POLLING,
1340 "Failed to write, error %x\n", error);
1341 ret = IXGBE_ERR_PHY;
1342 }
1343
1344 out:
1345 ixgbe_release_swfw_semaphore(hw, gssr);
1346 return ret;
1347 }
1348
1349 /**
1350 * ixgbe_read_iosf_sb_reg_x550 - Reads specified register of the IOSF device
1351 * @hw: pointer to hardware structure
1352 * @reg_addr: 32 bit PHY register to write
1353 * @device_type: 3 bit device type
1354 * @data: Pointer to read data from the register
1355 **/
1356 s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
1357 u32 device_type, u32 *data)
1358 {
1359 u32 gssr = IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_PHY0_SM;
1360 u32 command, error __unused;
1361 s32 ret;
1362
1363 ret = ixgbe_acquire_swfw_semaphore(hw, gssr);
1364 if (ret != IXGBE_SUCCESS)
1365 return ret;
1366
1367 ret = ixgbe_iosf_wait(hw, NULL);
1368 if (ret != IXGBE_SUCCESS)
1369 goto out;
1370
1371 command = ((reg_addr << IXGBE_SB_IOSF_CTRL_ADDR_SHIFT) |
1372 (device_type << IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT));
1373
1374 /* Write IOSF control register */
1375 IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL, command);
1376
1377 ret = ixgbe_iosf_wait(hw, &command);
1378
1379 if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) {
1380 error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >>
1381 IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT;
1382 ERROR_REPORT2(IXGBE_ERROR_POLLING,
1383 "Failed to read, error %x\n", error);
1384 ret = IXGBE_ERR_PHY;
1385 }
1386
1387 if (ret == IXGBE_SUCCESS)
1388 *data = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA);
1389
1390 out:
1391 ixgbe_release_swfw_semaphore(hw, gssr);
1392 return ret;
1393 }
1394
1395 /**
1396 * ixgbe_get_phy_token - Get the token for shared phy access
1397 * @hw: Pointer to hardware structure
1398 */
1399
1400 s32 ixgbe_get_phy_token(struct ixgbe_hw *hw)
1401 {
1402 struct ixgbe_hic_phy_token_req token_cmd;
1403 s32 status;
1404
1405 token_cmd.hdr.cmd = FW_PHY_TOKEN_REQ_CMD;
1406 token_cmd.hdr.buf_len = FW_PHY_TOKEN_REQ_LEN;
1407 token_cmd.hdr.cmd_or_resp.cmd_resv = 0;
1408 token_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
1409 token_cmd.port_number = hw->bus.lan_id;
1410 token_cmd.command_type = FW_PHY_TOKEN_REQ;
1411 token_cmd.pad = 0;
1412 status = ixgbe_host_interface_command(hw, (u32 *)&token_cmd,
1413 sizeof(token_cmd),
1414 IXGBE_HI_COMMAND_TIMEOUT,
1415 TRUE);
1416 if (status) {
1417 DEBUGOUT1("Issuing host interface command failed with Status = %d\n",
1418 status);
1419 return status;
1420 }
1421 if (token_cmd.hdr.cmd_or_resp.ret_status == FW_PHY_TOKEN_OK)
1422 return IXGBE_SUCCESS;
1423 if (token_cmd.hdr.cmd_or_resp.ret_status != FW_PHY_TOKEN_RETRY) {
1424 DEBUGOUT1("Host interface command returned 0x%08x , returning IXGBE_ERR_FW_RESP_INVALID\n",
1425 token_cmd.hdr.cmd_or_resp.ret_status);
1426 return IXGBE_ERR_FW_RESP_INVALID;
1427 }
1428
1429 DEBUGOUT("Returning IXGBE_ERR_TOKEN_RETRY\n");
1430 return IXGBE_ERR_TOKEN_RETRY;
1431 }
1432
1433 /**
1434 * ixgbe_put_phy_token - Put the token for shared phy access
1435 * @hw: Pointer to hardware structure
1436 */
1437
1438 s32 ixgbe_put_phy_token(struct ixgbe_hw *hw)
1439 {
1440 struct ixgbe_hic_phy_token_req token_cmd;
1441 s32 status;
1442
1443 token_cmd.hdr.cmd = FW_PHY_TOKEN_REQ_CMD;
1444 token_cmd.hdr.buf_len = FW_PHY_TOKEN_REQ_LEN;
1445 token_cmd.hdr.cmd_or_resp.cmd_resv = 0;
1446 token_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
1447 token_cmd.port_number = hw->bus.lan_id;
1448 token_cmd.command_type = FW_PHY_TOKEN_REL;
1449 token_cmd.pad = 0;
1450 status = ixgbe_host_interface_command(hw, (u32 *)&token_cmd,
1451 sizeof(token_cmd),
1452 IXGBE_HI_COMMAND_TIMEOUT,
1453 TRUE);
1454 if (status)
1455 return status;
1456 if (token_cmd.hdr.cmd_or_resp.ret_status == FW_PHY_TOKEN_OK)
1457 return IXGBE_SUCCESS;
1458
1459 DEBUGOUT("Put PHY Token host interface command failed");
1460 return IXGBE_ERR_FW_RESP_INVALID;
1461 }
1462
1463 /**
1464 * ixgbe_write_iosf_sb_reg_x550a - Writes a value to specified register
1465 * of the IOSF device
1466 * @hw: pointer to hardware structure
1467 * @reg_addr: 32 bit PHY register to write
1468 * @device_type: 3 bit device type
1469 * @data: Data to write to the register
1470 **/
1471 s32 ixgbe_write_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
1472 u32 device_type, u32 data)
1473 {
1474 struct ixgbe_hic_internal_phy_req write_cmd;
1475 s32 status;
1476 UNREFERENCED_1PARAMETER(device_type);
1477
1478 memset(&write_cmd, 0, sizeof(write_cmd));
1479 write_cmd.hdr.cmd = FW_INT_PHY_REQ_CMD;
1480 write_cmd.hdr.buf_len = FW_INT_PHY_REQ_LEN;
1481 write_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
1482 write_cmd.port_number = hw->bus.lan_id;
1483 write_cmd.command_type = FW_INT_PHY_REQ_WRITE;
1484 write_cmd.address = IXGBE_CPU_TO_BE16(reg_addr);
1485 write_cmd.write_data = IXGBE_CPU_TO_BE32(data);
1486
1487 status = ixgbe_host_interface_command(hw, (u32 *)&write_cmd,
1488 sizeof(write_cmd),
1489 IXGBE_HI_COMMAND_TIMEOUT, FALSE);
1490
1491 return status;
1492 }
1493
1494 /**
1495 * ixgbe_read_iosf_sb_reg_x550a - Reads specified register of the IOSF device
1496 * @hw: pointer to hardware structure
1497 * @reg_addr: 32 bit PHY register to write
1498 * @device_type: 3 bit device type
1499 * @data: Pointer to read data from the register
1500 **/
1501 s32 ixgbe_read_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
1502 u32 device_type, u32 *data)
1503 {
1504 union {
1505 struct ixgbe_hic_internal_phy_req cmd;
1506 struct ixgbe_hic_internal_phy_resp rsp;
1507 } hic;
1508 s32 status;
1509 UNREFERENCED_1PARAMETER(device_type);
1510
1511 memset(&hic, 0, sizeof(hic));
1512 hic.cmd.hdr.cmd = FW_INT_PHY_REQ_CMD;
1513 hic.cmd.hdr.buf_len = FW_INT_PHY_REQ_LEN;
1514 hic.cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
1515 hic.cmd.port_number = hw->bus.lan_id;
1516 hic.cmd.command_type = FW_INT_PHY_REQ_READ;
1517 hic.cmd.address = IXGBE_CPU_TO_BE16(reg_addr);
1518
1519 status = ixgbe_host_interface_command(hw, (u32 *)&hic.cmd,
1520 sizeof(hic.cmd),
1521 IXGBE_HI_COMMAND_TIMEOUT, TRUE);
1522
1523 /* Extract the register value from the response. */
1524 *data = IXGBE_BE32_TO_CPU(hic.rsp.read_data);
1525
1526 return status;
1527 }
1528
1529 /**
1530 * ixgbe_disable_mdd_X550
1531 * @hw: pointer to hardware structure
1532 *
1533 * Disable malicious driver detection
1534 **/
1535 void ixgbe_disable_mdd_X550(struct ixgbe_hw *hw)
1536 {
1537 u32 reg;
1538
1539 DEBUGFUNC("ixgbe_disable_mdd_X550");
1540
1541 /* Disable MDD for TX DMA and interrupt */
1542 reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1543 reg &= ~(IXGBE_DMATXCTL_MDP_EN | IXGBE_DMATXCTL_MBINTEN);
1544 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg);
1545
1546 /* Disable MDD for RX and interrupt */
1547 reg = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
1548 reg &= ~(IXGBE_RDRXCTL_MDP_EN | IXGBE_RDRXCTL_MBINTEN);
1549 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg);
1550 }
1551
1552 /**
1553 * ixgbe_enable_mdd_X550
1554 * @hw: pointer to hardware structure
1555 *
1556 * Enable malicious driver detection
1557 **/
1558 void ixgbe_enable_mdd_X550(struct ixgbe_hw *hw)
1559 {
1560 u32 reg;
1561
1562 DEBUGFUNC("ixgbe_enable_mdd_X550");
1563
1564 /* Enable MDD for TX DMA and interrupt */
1565 reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1566 reg |= (IXGBE_DMATXCTL_MDP_EN | IXGBE_DMATXCTL_MBINTEN);
1567 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg);
1568
1569 /* Enable MDD for RX and interrupt */
1570 reg = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
1571 reg |= (IXGBE_RDRXCTL_MDP_EN | IXGBE_RDRXCTL_MBINTEN);
1572 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg);
1573 }
1574
1575 /**
1576 * ixgbe_restore_mdd_vf_X550
1577 * @hw: pointer to hardware structure
1578 * @vf: vf index
1579 *
1580 * Restore VF that was disabled during malicious driver detection event
1581 **/
1582 void ixgbe_restore_mdd_vf_X550(struct ixgbe_hw *hw, u32 vf)
1583 {
1584 u32 idx, reg, num_qs, start_q, bitmask;
1585
1586 DEBUGFUNC("ixgbe_restore_mdd_vf_X550");
1587
1588 /* Map VF to queues */
1589 reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
1590 switch (reg & IXGBE_MRQC_MRQE_MASK) {
1591 case IXGBE_MRQC_VMDQRT8TCEN:
1592 num_qs = 8; /* 16 VFs / pools */
1593 bitmask = 0x000000FF;
1594 break;
1595 case IXGBE_MRQC_VMDQRSS32EN:
1596 case IXGBE_MRQC_VMDQRT4TCEN:
1597 num_qs = 4; /* 32 VFs / pools */
1598 bitmask = 0x0000000F;
1599 break;
1600 default: /* 64 VFs / pools */
1601 num_qs = 2;
1602 bitmask = 0x00000003;
1603 break;
1604 }
1605 start_q = vf * num_qs;
1606
1607 /* Release vf's queues by clearing WQBR_TX and WQBR_RX (RW1C) */
1608 idx = start_q / 32;
1609 reg = 0;
1610 reg |= (bitmask << (start_q % 32));
1611 IXGBE_WRITE_REG(hw, IXGBE_WQBR_TX(idx), reg);
1612 IXGBE_WRITE_REG(hw, IXGBE_WQBR_RX(idx), reg);
1613 }
1614
1615 /**
1616 * ixgbe_mdd_event_X550
1617 * @hw: pointer to hardware structure
1618 * @vf_bitmap: vf bitmap of malicious vfs
1619 *
1620 * Handle malicious driver detection event.
1621 **/
1622 void ixgbe_mdd_event_X550(struct ixgbe_hw *hw, u32 *vf_bitmap)
1623 {
1624 u32 wqbr;
1625 u32 i, j, reg, q, shift, vf, idx;
1626
1627 DEBUGFUNC("ixgbe_mdd_event_X550");
1628
1629 /* figure out pool size for mapping to vf's */
1630 reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
1631 switch (reg & IXGBE_MRQC_MRQE_MASK) {
1632 case IXGBE_MRQC_VMDQRT8TCEN:
1633 shift = 3; /* 16 VFs / pools */
1634 break;
1635 case IXGBE_MRQC_VMDQRSS32EN:
1636 case IXGBE_MRQC_VMDQRT4TCEN:
1637 shift = 2; /* 32 VFs / pools */
1638 break;
1639 default:
1640 shift = 1; /* 64 VFs / pools */
1641 break;
1642 }
1643
1644 /* Read WQBR_TX and WQBR_RX and check for malicious queues */
1645 for (i = 0; i < 4; i++) {
1646 wqbr = IXGBE_READ_REG(hw, IXGBE_WQBR_TX(i));
1647 wqbr |= IXGBE_READ_REG(hw, IXGBE_WQBR_RX(i));
1648
1649 if (!wqbr)
1650 continue;
1651
1652 /* Get malicious queue */
1653 for (j = 0; j < 32 && wqbr; j++) {
1654
1655 if (!(wqbr & (1 << j)))
1656 continue;
1657
1658 /* Get queue from bitmask */
1659 q = j + (i * 32);
1660
1661 /* Map queue to vf */
1662 vf = (q >> shift);
1663
1664 /* Set vf bit in vf_bitmap */
1665 idx = vf / 32;
1666 vf_bitmap[idx] |= (1 << (vf % 32));
1667 wqbr &= ~(1 << j);
1668 }
1669 }
1670 }
1671
1672 /**
1673 * ixgbe_get_media_type_X550em - Get media type
1674 * @hw: pointer to hardware structure
1675 *
1676 * Returns the media type (fiber, copper, backplane)
1677 */
1678 enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw)
1679 {
1680 enum ixgbe_media_type media_type;
1681
1682 DEBUGFUNC("ixgbe_get_media_type_X550em");
1683
1684 /* Detect if there is a copper PHY attached. */
1685 switch (hw->device_id) {
1686 case IXGBE_DEV_ID_X550EM_X_KR:
1687 case IXGBE_DEV_ID_X550EM_X_KX4:
1688 case IXGBE_DEV_ID_X550EM_X_XFI:
1689 case IXGBE_DEV_ID_X550EM_A_KR:
1690 case IXGBE_DEV_ID_X550EM_A_KR_L:
1691 media_type = ixgbe_media_type_backplane;
1692 break;
1693 case IXGBE_DEV_ID_X550EM_X_SFP:
1694 case IXGBE_DEV_ID_X550EM_A_SFP:
1695 case IXGBE_DEV_ID_X550EM_A_SFP_N:
1696 case IXGBE_DEV_ID_X550EM_A_QSFP:
1697 case IXGBE_DEV_ID_X550EM_A_QSFP_N:
1698 media_type = ixgbe_media_type_fiber;
1699 break;
1700 case IXGBE_DEV_ID_X550EM_X_1G_T:
1701 case IXGBE_DEV_ID_X550EM_X_10G_T:
1702 case IXGBE_DEV_ID_X550EM_A_10G_T:
1703 media_type = ixgbe_media_type_copper;
1704 break;
1705 case IXGBE_DEV_ID_X550EM_A_SGMII:
1706 case IXGBE_DEV_ID_X550EM_A_SGMII_L:
1707 media_type = ixgbe_media_type_backplane;
1708 hw->phy.type = ixgbe_phy_sgmii;
1709 break;
1710 case IXGBE_DEV_ID_X550EM_A_1G_T:
1711 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
1712 media_type = ixgbe_media_type_copper;
1713 break;
1714 default:
1715 media_type = ixgbe_media_type_unknown;
1716 break;
1717 }
1718 return media_type;
1719 }
1720
1721 /**
1722 * ixgbe_supported_sfp_modules_X550em - Check if SFP module type is supported
1723 * @hw: pointer to hardware structure
1724 * @linear: TRUE if SFP module is linear
1725 */
1726 static s32 ixgbe_supported_sfp_modules_X550em(struct ixgbe_hw *hw, bool *linear)
1727 {
1728 DEBUGFUNC("ixgbe_supported_sfp_modules_X550em");
1729
1730 switch (hw->phy.sfp_type) {
1731 case ixgbe_sfp_type_not_present:
1732 return IXGBE_ERR_SFP_NOT_PRESENT;
1733 case ixgbe_sfp_type_da_cu_core0:
1734 case ixgbe_sfp_type_da_cu_core1:
1735 *linear = TRUE;
1736 break;
1737 case ixgbe_sfp_type_srlr_core0:
1738 case ixgbe_sfp_type_srlr_core1:
1739 case ixgbe_sfp_type_da_act_lmt_core0:
1740 case ixgbe_sfp_type_da_act_lmt_core1:
1741 case ixgbe_sfp_type_1g_sx_core0:
1742 case ixgbe_sfp_type_1g_sx_core1:
1743 case ixgbe_sfp_type_1g_lx_core0:
1744 case ixgbe_sfp_type_1g_lx_core1:
1745 *linear = FALSE;
1746 break;
1747 case ixgbe_sfp_type_unknown:
1748 case ixgbe_sfp_type_1g_cu_core0:
1749 case ixgbe_sfp_type_1g_cu_core1:
1750 default:
1751 return IXGBE_ERR_SFP_NOT_SUPPORTED;
1752 }
1753
1754 return IXGBE_SUCCESS;
1755 }
1756
1757 /**
1758 * ixgbe_identify_sfp_module_X550em - Identifies SFP modules
1759 * @hw: pointer to hardware structure
1760 *
1761 * Searches for and identifies the SFP module and assigns appropriate PHY type.
1762 **/
1763 s32 ixgbe_identify_sfp_module_X550em(struct ixgbe_hw *hw)
1764 {
1765 s32 status;
1766 bool linear;
1767
1768 DEBUGFUNC("ixgbe_identify_sfp_module_X550em");
1769
1770 status = ixgbe_identify_module_generic(hw);
1771
1772 if (status != IXGBE_SUCCESS)
1773 return status;
1774
1775 /* Check if SFP module is supported */
1776 status = ixgbe_supported_sfp_modules_X550em(hw, &linear);
1777
1778 return status;
1779 }
1780
1781 /**
1782 * ixgbe_setup_sfp_modules_X550em - Setup MAC link ops
1783 * @hw: pointer to hardware structure
1784 */
1785 s32 ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw)
1786 {
1787 s32 status;
1788 bool linear;
1789
1790 DEBUGFUNC("ixgbe_setup_sfp_modules_X550em");
1791
1792 /* Check if SFP module is supported */
1793 status = ixgbe_supported_sfp_modules_X550em(hw, &linear);
1794
1795 if (status != IXGBE_SUCCESS)
1796 return status;
1797
1798 ixgbe_init_mac_link_ops_X550em(hw);
1799 hw->phy.ops.reset = NULL;
1800
1801 return IXGBE_SUCCESS;
1802 }
1803
1804 /**
1805 * ixgbe_restart_an_internal_phy_x550em - restart autonegotiation for the
1806 * internal PHY
1807 * @hw: pointer to hardware structure
1808 **/
1809 static s32 ixgbe_restart_an_internal_phy_x550em(struct ixgbe_hw *hw)
1810 {
1811 s32 status;
1812 u32 link_ctrl;
1813
1814 /* Restart auto-negotiation. */
1815 status = hw->mac.ops.read_iosf_sb_reg(hw,
1816 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1817 IXGBE_SB_IOSF_TARGET_KR_PHY, &link_ctrl);
1818
1819 if (status) {
1820 DEBUGOUT("Auto-negotiation did not complete\n");
1821 return status;
1822 }
1823
1824 link_ctrl |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART;
1825 status = hw->mac.ops.write_iosf_sb_reg(hw,
1826 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1827 IXGBE_SB_IOSF_TARGET_KR_PHY, link_ctrl);
1828
1829 if (hw->mac.type == ixgbe_mac_X550EM_a) {
1830 u32 flx_mask_st20;
1831
1832 /* Indicate to FW that AN restart has been asserted */
1833 status = hw->mac.ops.read_iosf_sb_reg(hw,
1834 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1835 IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_mask_st20);
1836
1837 if (status) {
1838 DEBUGOUT("Auto-negotiation did not complete\n");
1839 return status;
1840 }
1841
1842 flx_mask_st20 |= IXGBE_KRM_PMD_FLX_MASK_ST20_FW_AN_RESTART;
1843 status = hw->mac.ops.write_iosf_sb_reg(hw,
1844 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1845 IXGBE_SB_IOSF_TARGET_KR_PHY, flx_mask_st20);
1846 }
1847
1848 return status;
1849 }
1850
1851 /**
1852 * ixgbe_setup_sgmii - Set up link for sgmii
1853 * @hw: pointer to hardware structure
1854 * @speed: new link speed
1855 * @autoneg_wait: TRUE when waiting for completion is needed
1856 */
1857 static s32 ixgbe_setup_sgmii(struct ixgbe_hw *hw, ixgbe_link_speed speed,
1858 bool autoneg_wait)
1859 {
1860 struct ixgbe_mac_info *mac = &hw->mac;
1861 u32 lval, sval, flx_val;
1862 s32 rc;
1863
1864 rc = mac->ops.read_iosf_sb_reg(hw,
1865 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1866 IXGBE_SB_IOSF_TARGET_KR_PHY, &lval);
1867 if (rc)
1868 return rc;
1869
1870 lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
1871 lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
1872 lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN;
1873 lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN;
1874 lval |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G;
1875 rc = mac->ops.write_iosf_sb_reg(hw,
1876 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1877 IXGBE_SB_IOSF_TARGET_KR_PHY, lval);
1878 if (rc)
1879 return rc;
1880
1881 rc = mac->ops.read_iosf_sb_reg(hw,
1882 IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id),
1883 IXGBE_SB_IOSF_TARGET_KR_PHY, &sval);
1884 if (rc)
1885 return rc;
1886
1887 sval |= IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D;
1888 sval |= IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D;
1889 rc = mac->ops.write_iosf_sb_reg(hw,
1890 IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id),
1891 IXGBE_SB_IOSF_TARGET_KR_PHY, sval);
1892 if (rc)
1893 return rc;
1894
1895 rc = mac->ops.read_iosf_sb_reg(hw,
1896 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1897 IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_val);
1898 if (rc)
1899 return rc;
1900
1901 flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK;
1902 flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_1G;
1903 flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN;
1904 flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN;
1905 flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN;
1906
1907 rc = mac->ops.write_iosf_sb_reg(hw,
1908 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1909 IXGBE_SB_IOSF_TARGET_KR_PHY, flx_val);
1910 if (rc)
1911 return rc;
1912
1913 rc = ixgbe_restart_an_internal_phy_x550em(hw);
1914 if (rc)
1915 return rc;
1916
1917 return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait);
1918 }
1919
1920 /**
1921 * ixgbe_setup_sgmii_fw - Set up link for internal PHY SGMII auto-negotiation
1922 * @hw: pointer to hardware structure
1923 * @speed: new link speed
1924 * @autoneg_wait: TRUE when waiting for completion is needed
1925 */
1926 static s32 ixgbe_setup_sgmii_fw(struct ixgbe_hw *hw, ixgbe_link_speed speed,
1927 bool autoneg_wait)
1928 {
1929 struct ixgbe_mac_info *mac = &hw->mac;
1930 u32 lval, sval, flx_val;
1931 s32 rc;
1932
1933 rc = mac->ops.read_iosf_sb_reg(hw,
1934 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1935 IXGBE_SB_IOSF_TARGET_KR_PHY, &lval);
1936 if (rc)
1937 return rc;
1938
1939 lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
1940 lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
1941 lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN;
1942 lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN;
1943 lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G;
1944 rc = mac->ops.write_iosf_sb_reg(hw,
1945 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1946 IXGBE_SB_IOSF_TARGET_KR_PHY, lval);
1947 if (rc)
1948 return rc;
1949
1950 rc = mac->ops.read_iosf_sb_reg(hw,
1951 IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id),
1952 IXGBE_SB_IOSF_TARGET_KR_PHY, &sval);
1953 if (rc)
1954 return rc;
1955
1956 sval &= ~IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D;
1957 sval &= ~IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D;
1958 rc = mac->ops.write_iosf_sb_reg(hw,
1959 IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id),
1960 IXGBE_SB_IOSF_TARGET_KR_PHY, sval);
1961 if (rc)
1962 return rc;
1963
1964 rc = mac->ops.write_iosf_sb_reg(hw,
1965 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1966 IXGBE_SB_IOSF_TARGET_KR_PHY, lval);
1967 if (rc)
1968 return rc;
1969
1970 rc = mac->ops.read_iosf_sb_reg(hw,
1971 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1972 IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_val);
1973 if (rc)
1974 return rc;
1975
1976 flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK;
1977 flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_AN;
1978 flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN;
1979 flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN;
1980 flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN;
1981
1982 rc = mac->ops.write_iosf_sb_reg(hw,
1983 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1984 IXGBE_SB_IOSF_TARGET_KR_PHY, flx_val);
1985 if (rc)
1986 return rc;
1987
1988 rc = ixgbe_restart_an_internal_phy_x550em(hw);
1989
1990 return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait);
1991 }
1992
1993 /**
1994 * ixgbe_init_mac_link_ops_X550em - init mac link function pointers
1995 * @hw: pointer to hardware structure
1996 */
1997 void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw)
1998 {
1999 struct ixgbe_mac_info *mac = &hw->mac;
2000
2001 DEBUGFUNC("ixgbe_init_mac_link_ops_X550em");
2002
2003 switch (hw->mac.ops.get_media_type(hw)) {
2004 case ixgbe_media_type_fiber:
2005 /* CS4227 does not support autoneg, so disable the laser control
2006 * functions for SFP+ fiber
2007 */
2008 mac->ops.disable_tx_laser = NULL;
2009 mac->ops.enable_tx_laser = NULL;
2010 mac->ops.flap_tx_laser = NULL;
2011 mac->ops.setup_link = ixgbe_setup_mac_link_multispeed_fiber;
2012 mac->ops.set_rate_select_speed =
2013 ixgbe_set_soft_rate_select_speed;
2014
2015 if ((hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N) ||
2016 (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP))
2017 mac->ops.setup_mac_link =
2018 ixgbe_setup_mac_link_sfp_x550a;
2019 else
2020 mac->ops.setup_mac_link =
2021 ixgbe_setup_mac_link_sfp_x550em;
2022 break;
2023 case ixgbe_media_type_copper:
2024 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_1G_T)
2025 break;
2026 if (hw->mac.type == ixgbe_mac_X550EM_a) {
2027 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T ||
2028 hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L) {
2029 mac->ops.setup_link = ixgbe_setup_sgmii_fw;
2030 mac->ops.check_link =
2031 ixgbe_check_mac_link_generic;
2032 } else {
2033 mac->ops.setup_link =
2034 ixgbe_setup_mac_link_t_X550em;
2035 }
2036 } else {
2037 mac->ops.setup_link = ixgbe_setup_mac_link_t_X550em;
2038 mac->ops.check_link = ixgbe_check_link_t_X550em;
2039 }
2040 break;
2041 case ixgbe_media_type_backplane:
2042 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII ||
2043 hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII_L)
2044 mac->ops.setup_link = ixgbe_setup_sgmii;
2045 break;
2046 default:
2047 break;
2048 }
2049 }
2050
2051 /**
2052 * ixgbe_get_link_capabilities_x550em - Determines link capabilities
2053 * @hw: pointer to hardware structure
2054 * @speed: pointer to link speed
2055 * @autoneg: TRUE when autoneg or autotry is enabled
2056 */
2057 s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
2058 ixgbe_link_speed *speed,
2059 bool *autoneg)
2060 {
2061 DEBUGFUNC("ixgbe_get_link_capabilities_X550em");
2062
2063
2064 if (hw->phy.type == ixgbe_phy_fw) {
2065 *autoneg = TRUE;
2066 *speed = hw->phy.speeds_supported;
2067 return 0;
2068 }
2069
2070 /* SFP */
2071 if (hw->phy.media_type == ixgbe_media_type_fiber) {
2072
2073 /* CS4227 SFP must not enable auto-negotiation */
2074 *autoneg = FALSE;
2075
2076 /* Check if 1G SFP module. */
2077 if (hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
2078 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1
2079 || hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
2080 hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1) {
2081 *speed = IXGBE_LINK_SPEED_1GB_FULL;
2082 return IXGBE_SUCCESS;
2083 }
2084
2085 /* Link capabilities are based on SFP */
2086 if (hw->phy.multispeed_fiber)
2087 *speed = IXGBE_LINK_SPEED_10GB_FULL |
2088 IXGBE_LINK_SPEED_1GB_FULL;
2089 else
2090 *speed = IXGBE_LINK_SPEED_10GB_FULL;
2091 } else {
2092 *autoneg = TRUE;
2093
2094 switch (hw->phy.type) {
2095 case ixgbe_phy_x550em_xfi:
2096 *speed = IXGBE_LINK_SPEED_1GB_FULL |
2097 IXGBE_LINK_SPEED_10GB_FULL;
2098 *autoneg = FALSE;
2099 break;
2100 case ixgbe_phy_ext_1g_t:
2101 case ixgbe_phy_sgmii:
2102 *speed = IXGBE_LINK_SPEED_1GB_FULL;
2103 break;
2104 case ixgbe_phy_x550em_kr:
2105 if (hw->mac.type == ixgbe_mac_X550EM_a) {
2106 /* check different backplane modes */
2107 if (hw->phy.nw_mng_if_sel &
2108 IXGBE_NW_MNG_IF_SEL_PHY_SPEED_2_5G) {
2109 *speed = IXGBE_LINK_SPEED_2_5GB_FULL;
2110 break;
2111 } else if (hw->device_id ==
2112 IXGBE_DEV_ID_X550EM_A_KR_L) {
2113 *speed = IXGBE_LINK_SPEED_1GB_FULL;
2114 break;
2115 }
2116 }
2117 /* fall through */
2118 default:
2119 *speed = IXGBE_LINK_SPEED_10GB_FULL |
2120 IXGBE_LINK_SPEED_1GB_FULL;
2121 break;
2122 }
2123 }
2124
2125 return IXGBE_SUCCESS;
2126 }
2127
2128 /**
2129 * ixgbe_get_lasi_ext_t_x550em - Determime external Base T PHY interrupt cause
2130 * @hw: pointer to hardware structure
2131 * @lsc: pointer to boolean flag which indicates whether external Base T
2132 * PHY interrupt is lsc
2133 *
2134 * Determime if external Base T PHY interrupt cause is high temperature
2135 * failure alarm or link status change.
2136 *
2137 * Return IXGBE_ERR_OVERTEMP if interrupt is high temperature
2138 * failure alarm, else return PHY access status.
2139 */
2140 static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc)
2141 {
2142 u32 status;
2143 u16 reg;
2144
2145 *lsc = FALSE;
2146
2147 /* Vendor alarm triggered */
2148 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_CHIP_STD_INT_FLAG,
2149 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2150 ®);
2151
2152 if (status != IXGBE_SUCCESS ||
2153 !(reg & IXGBE_MDIO_GLOBAL_VEN_ALM_INT_EN))
2154 return status;
2155
2156 /* Vendor Auto-Neg alarm triggered or Global alarm 1 triggered */
2157 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_FLAG,
2158 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2159 ®);
2160
2161 if (status != IXGBE_SUCCESS ||
2162 !(reg & (IXGBE_MDIO_GLOBAL_AN_VEN_ALM_INT_EN |
2163 IXGBE_MDIO_GLOBAL_ALARM_1_INT)))
2164 return status;
2165
2166 /* Global alarm triggered */
2167 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_ALARM_1,
2168 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2169 ®);
2170
2171 if (status != IXGBE_SUCCESS)
2172 return status;
2173
2174 /* If high temperature failure, then return over temp error and exit */
2175 if (reg & IXGBE_MDIO_GLOBAL_ALM_1_HI_TMP_FAIL) {
2176 /* power down the PHY in case the PHY FW didn't already */
2177 ixgbe_set_copper_phy_power(hw, FALSE);
2178 return IXGBE_ERR_OVERTEMP;
2179 } else if (reg & IXGBE_MDIO_GLOBAL_ALM_1_DEV_FAULT) {
2180 /* device fault alarm triggered */
2181 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_FAULT_MSG,
2182 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2183 ®);
2184
2185 if (status != IXGBE_SUCCESS)
2186 return status;
2187
2188 /* if device fault was due to high temp alarm handle and exit */
2189 if (reg == IXGBE_MDIO_GLOBAL_FAULT_MSG_HI_TMP) {
2190 /* power down the PHY in case the PHY FW didn't */
2191 ixgbe_set_copper_phy_power(hw, FALSE);
2192 return IXGBE_ERR_OVERTEMP;
2193 }
2194 }
2195
2196 /* Vendor alarm 2 triggered */
2197 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_CHIP_STD_INT_FLAG,
2198 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®);
2199
2200 if (status != IXGBE_SUCCESS ||
2201 !(reg & IXGBE_MDIO_GLOBAL_STD_ALM2_INT))
2202 return status;
2203
2204 /* link connect/disconnect event occurred */
2205 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM2,
2206 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®);
2207
2208 if (status != IXGBE_SUCCESS)
2209 return status;
2210
2211 /* Indicate LSC */
2212 if (reg & IXGBE_MDIO_AUTO_NEG_VEN_LSC)
2213 *lsc = TRUE;
2214
2215 return IXGBE_SUCCESS;
2216 }
2217
2218 /**
2219 * ixgbe_enable_lasi_ext_t_x550em - Enable external Base T PHY interrupts
2220 * @hw: pointer to hardware structure
2221 *
2222 * Enable link status change and temperature failure alarm for the external
2223 * Base T PHY
2224 *
2225 * Returns PHY access status
2226 */
2227 static s32 ixgbe_enable_lasi_ext_t_x550em(struct ixgbe_hw *hw)
2228 {
2229 u32 status;
2230 u16 reg;
2231 bool lsc;
2232
2233 /* Clear interrupt flags */
2234 status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc);
2235
2236 /* Enable link status change alarm */
2237
2238 /* Enable the LASI interrupts on X552 devices to receive notifications
2239 * of the link configurations of the external PHY and correspondingly
2240 * support the configuration of the internal iXFI link, since iXFI does
2241 * not support auto-negotiation. This is not required for X553 devices
2242 * having KR support, which performs auto-negotiations and which is used
2243 * as the internal link to the external PHY. Hence adding a check here
2244 * to avoid enabling LASI interrupts for X553 devices.
2245 */
2246 if (hw->mac.type != ixgbe_mac_X550EM_a) {
2247 status = hw->phy.ops.read_reg(hw,
2248 IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK,
2249 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®);
2250
2251 if (status != IXGBE_SUCCESS)
2252 return status;
2253
2254 reg |= IXGBE_MDIO_PMA_TX_VEN_LASI_INT_EN;
2255
2256 status = hw->phy.ops.write_reg(hw,
2257 IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK,
2258 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg);
2259
2260 if (status != IXGBE_SUCCESS)
2261 return status;
2262 }
2263
2264 /* Enable high temperature failure and global fault alarms */
2265 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_MASK,
2266 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2267 ®);
2268
2269 if (status != IXGBE_SUCCESS)
2270 return status;
2271
2272 reg |= (IXGBE_MDIO_GLOBAL_INT_HI_TEMP_EN |
2273 IXGBE_MDIO_GLOBAL_INT_DEV_FAULT_EN);
2274
2275 status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_MASK,
2276 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2277 reg);
2278
2279 if (status != IXGBE_SUCCESS)
2280 return status;
2281
2282 /* Enable vendor Auto-Neg alarm and Global Interrupt Mask 1 alarm */
2283 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_MASK,
2284 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2285 ®);
2286
2287 if (status != IXGBE_SUCCESS)
2288 return status;
2289
2290 reg |= (IXGBE_MDIO_GLOBAL_AN_VEN_ALM_INT_EN |
2291 IXGBE_MDIO_GLOBAL_ALARM_1_INT);
2292
2293 status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_MASK,
2294 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2295 reg);
2296
2297 if (status != IXGBE_SUCCESS)
2298 return status;
2299
2300 /* Enable chip-wide vendor alarm */
2301 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_STD_MASK,
2302 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2303 ®);
2304
2305 if (status != IXGBE_SUCCESS)
2306 return status;
2307
2308 reg |= IXGBE_MDIO_GLOBAL_VEN_ALM_INT_EN;
2309
2310 status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_STD_MASK,
2311 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2312 reg);
2313
2314 return status;
2315 }
2316
2317 /**
2318 * ixgbe_setup_kr_speed_x550em - Configure the KR PHY for link speed.
2319 * @hw: pointer to hardware structure
2320 * @speed: link speed
2321 *
2322 * Configures the integrated KR PHY.
2323 **/
2324 static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw,
2325 ixgbe_link_speed speed)
2326 {
2327 s32 status;
2328 u32 reg_val;
2329
2330 status = hw->mac.ops.read_iosf_sb_reg(hw,
2331 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
2332 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
2333 if (status)
2334 return status;
2335
2336 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
2337 reg_val &= ~(IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR |
2338 IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX);
2339
2340 /* Advertise 10G support. */
2341 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
2342 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR;
2343
2344 /* Advertise 1G support. */
2345 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
2346 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX;
2347
2348 status = hw->mac.ops.write_iosf_sb_reg(hw,
2349 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
2350 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
2351
2352 if (hw->mac.type == ixgbe_mac_X550EM_a) {
2353 /* Set lane mode to KR auto negotiation */
2354 status = hw->mac.ops.read_iosf_sb_reg(hw,
2355 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
2356 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
2357
2358 if (status)
2359 return status;
2360
2361 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK;
2362 reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_AN;
2363 reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN;
2364 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN;
2365 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN;
2366
2367 status = hw->mac.ops.write_iosf_sb_reg(hw,
2368 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
2369 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
2370 }
2371
2372 return ixgbe_restart_an_internal_phy_x550em(hw);
2373 }
2374
2375 /**
2376 * ixgbe_reset_phy_fw - Reset firmware-controlled PHYs
2377 * @hw: pointer to hardware structure
2378 */
2379 static s32 ixgbe_reset_phy_fw(struct ixgbe_hw *hw)
2380 {
2381 u32 store[FW_PHY_ACT_DATA_COUNT] = { 0 };
2382 s32 rc;
2383
2384 if (hw->phy.reset_disable || ixgbe_check_reset_blocked(hw))
2385 return IXGBE_SUCCESS;
2386
2387 rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_PHY_SW_RESET, &store);
2388 if (rc)
2389 return rc;
2390 memset(store, 0, sizeof(store));
2391
2392 rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_INIT_PHY, &store);
2393 if (rc)
2394 return rc;
2395
2396 return ixgbe_setup_fw_link(hw);
2397 }
2398
2399 /**
2400 * ixgbe_check_overtemp_fw - Check firmware-controlled PHYs for overtemp
2401 * @hw: pointer to hardware structure
2402 */
2403 static s32 ixgbe_check_overtemp_fw(struct ixgbe_hw *hw)
2404 {
2405 u32 store[FW_PHY_ACT_DATA_COUNT] = { 0 };
2406 s32 rc;
2407
2408 rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_LINK_INFO, &store);
2409 if (rc)
2410 return rc;
2411
2412 if (store[0] & FW_PHY_ACT_GET_LINK_INFO_TEMP) {
2413 ixgbe_shutdown_fw_phy(hw);
2414 return IXGBE_ERR_OVERTEMP;
2415 }
2416 return IXGBE_SUCCESS;
2417 }
2418
2419 /**
2420 * ixgbe_read_mng_if_sel_x550em - Read NW_MNG_IF_SEL register
2421 * @hw: pointer to hardware structure
2422 *
2423 * Read NW_MNG_IF_SEL register and save field values, and check for valid field
2424 * values.
2425 **/
2426 static s32 ixgbe_read_mng_if_sel_x550em(struct ixgbe_hw *hw)
2427 {
2428 /* Save NW management interface connected on board. This is used
2429 * to determine internal PHY mode.
2430 */
2431 hw->phy.nw_mng_if_sel = IXGBE_READ_REG(hw, IXGBE_NW_MNG_IF_SEL);
2432
2433 /* If X552 (X550EM_a) and MDIO is connected to external PHY, then set
2434 * PHY address. This register field was has only been used for X552.
2435 */
2436 if (hw->mac.type == ixgbe_mac_X550EM_a &&
2437 hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_MDIO_ACT) {
2438 hw->phy.addr = (hw->phy.nw_mng_if_sel &
2439 IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD) >>
2440 IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT;
2441 }
2442
2443 return IXGBE_SUCCESS;
2444 }
2445
2446 /**
2447 * ixgbe_init_phy_ops_X550em - PHY/SFP specific init
2448 * @hw: pointer to hardware structure
2449 *
2450 * Initialize any function pointers that were not able to be
2451 * set during init_shared_code because the PHY/SFP type was
2452 * not known. Perform the SFP init if necessary.
2453 */
2454 s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
2455 {
2456 struct ixgbe_phy_info *phy = &hw->phy;
2457 s32 ret_val;
2458
2459 DEBUGFUNC("ixgbe_init_phy_ops_X550em");
2460
2461 hw->mac.ops.set_lan_id(hw);
2462 ixgbe_read_mng_if_sel_x550em(hw);
2463
2464 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) {
2465 phy->phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM;
2466 ixgbe_setup_mux_ctl(hw);
2467 phy->ops.identify_sfp = ixgbe_identify_sfp_module_X550em;
2468 }
2469
2470 switch (hw->device_id) {
2471 case IXGBE_DEV_ID_X550EM_A_1G_T:
2472 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
2473 phy->ops.read_reg_mdi = ixgbe_read_phy_reg_mdi_22;
2474 phy->ops.write_reg_mdi = ixgbe_write_phy_reg_mdi_22;
2475 hw->phy.ops.read_reg = ixgbe_read_phy_reg_x550a;
2476 hw->phy.ops.write_reg = ixgbe_write_phy_reg_x550a;
2477 phy->ops.check_overtemp = ixgbe_check_overtemp_fw;
2478 if (hw->bus.lan_id)
2479 hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY1_SM;
2480 else
2481 hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY0_SM;
2482
2483 break;
2484 case IXGBE_DEV_ID_X550EM_A_10G_T:
2485 case IXGBE_DEV_ID_X550EM_A_SFP:
2486 hw->phy.ops.read_reg = ixgbe_read_phy_reg_x550a;
2487 hw->phy.ops.write_reg = ixgbe_write_phy_reg_x550a;
2488 if (hw->bus.lan_id)
2489 hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY1_SM;
2490 else
2491 hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY0_SM;
2492 break;
2493 case IXGBE_DEV_ID_X550EM_X_SFP:
2494 /* set up for CS4227 usage */
2495 hw->phy.phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM;
2496 break;
2497 case IXGBE_DEV_ID_X550EM_X_1G_T:
2498 phy->ops.read_reg_mdi = NULL;
2499 phy->ops.write_reg_mdi = NULL;
2500 break;
2501 default:
2502 break;
2503 }
2504
2505 /* Identify the PHY or SFP module */
2506 ret_val = phy->ops.identify(hw);
2507 if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED ||
2508 ret_val == IXGBE_ERR_PHY_ADDR_INVALID)
2509 return ret_val;
2510
2511 /* Setup function pointers based on detected hardware */
2512 ixgbe_init_mac_link_ops_X550em(hw);
2513 if (phy->sfp_type != ixgbe_sfp_type_unknown)
2514 phy->ops.reset = NULL;
2515
2516 /* Set functions pointers based on phy type */
2517 switch (hw->phy.type) {
2518 case ixgbe_phy_x550em_kx4:
2519 phy->ops.setup_link = NULL;
2520 phy->ops.read_reg = ixgbe_read_phy_reg_x550em;
2521 phy->ops.write_reg = ixgbe_write_phy_reg_x550em;
2522 break;
2523 case ixgbe_phy_x550em_kr:
2524 phy->ops.setup_link = ixgbe_setup_kr_x550em;
2525 phy->ops.read_reg = ixgbe_read_phy_reg_x550em;
2526 phy->ops.write_reg = ixgbe_write_phy_reg_x550em;
2527 break;
2528 case ixgbe_phy_ext_1g_t:
2529 /* link is managed by FW */
2530 phy->ops.setup_link = NULL;
2531 phy->ops.reset = NULL;
2532 break;
2533 case ixgbe_phy_x550em_xfi:
2534 /* link is managed by HW */
2535 phy->ops.setup_link = NULL;
2536 phy->ops.read_reg = ixgbe_read_phy_reg_x550em;
2537 phy->ops.write_reg = ixgbe_write_phy_reg_x550em;
2538 break;
2539 case ixgbe_phy_x550em_ext_t:
2540 /* If internal link mode is XFI, then setup iXFI internal link,
2541 * else setup KR now.
2542 */
2543 phy->ops.setup_internal_link =
2544 ixgbe_setup_internal_phy_t_x550em;
2545
2546 /* setup SW LPLU only for first revision of X550EM_x */
2547 if ((hw->mac.type == ixgbe_mac_X550EM_x) &&
2548 !(IXGBE_FUSES0_REV_MASK &
2549 IXGBE_READ_REG(hw, IXGBE_FUSES0_GROUP(0))))
2550 phy->ops.enter_lplu = ixgbe_enter_lplu_t_x550em;
2551
2552 phy->ops.handle_lasi = ixgbe_handle_lasi_ext_t_x550em;
2553 phy->ops.reset = ixgbe_reset_phy_t_X550em;
2554 break;
2555 case ixgbe_phy_sgmii:
2556 phy->ops.setup_link = NULL;
2557 break;
2558 case ixgbe_phy_fw:
2559 phy->ops.setup_link = ixgbe_setup_fw_link;
2560 phy->ops.reset = ixgbe_reset_phy_fw;
2561 break;
2562 default:
2563 break;
2564 }
2565 return ret_val;
2566 }
2567
2568 /**
2569 * ixgbe_set_mdio_speed - Set MDIO clock speed
2570 * @hw: pointer to hardware structure
2571 */
2572 static void ixgbe_set_mdio_speed(struct ixgbe_hw *hw)
2573 {
2574 u32 hlreg0;
2575
2576 switch (hw->device_id) {
2577 case IXGBE_DEV_ID_X550EM_X_10G_T:
2578 case IXGBE_DEV_ID_X550EM_A_SGMII:
2579 case IXGBE_DEV_ID_X550EM_A_SGMII_L:
2580 case IXGBE_DEV_ID_X550EM_A_10G_T:
2581 case IXGBE_DEV_ID_X550EM_A_SFP:
2582 case IXGBE_DEV_ID_X550EM_A_QSFP:
2583 /* Config MDIO clock speed before the first MDIO PHY access */
2584 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2585 hlreg0 &= ~IXGBE_HLREG0_MDCSPD;
2586 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
2587 break;
2588 case IXGBE_DEV_ID_X550EM_A_1G_T:
2589 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
2590 /* Select fast MDIO clock speed for these devices */
2591 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2592 hlreg0 |= IXGBE_HLREG0_MDCSPD;
2593 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
2594 break;
2595 default:
2596 break;
2597 }
2598 }
2599
2600 /**
2601 * ixgbe_reset_hw_X550em - Perform hardware reset
2602 * @hw: pointer to hardware structure
2603 *
2604 * Resets the hardware by resetting the transmit and receive units, masks
2605 * and clears all interrupts, perform a PHY reset, and perform a link (MAC)
2606 * reset.
2607 */
2608 s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw)
2609 {
2610 ixgbe_link_speed link_speed;
2611 s32 status;
2612 s32 phy_status = IXGBE_SUCCESS;
2613 u32 ctrl = 0;
2614 u32 i;
2615 bool link_up = FALSE;
2616 u32 swfw_mask = hw->phy.phy_semaphore_mask;
2617
2618 DEBUGFUNC("ixgbe_reset_hw_X550em");
2619
2620 /* Call adapter stop to disable Tx/Rx and clear interrupts */
2621 status = hw->mac.ops.stop_adapter(hw);
2622 if (status != IXGBE_SUCCESS) {
2623 DEBUGOUT1("Failed to stop adapter, STATUS = %d\n", status);
2624 return status;
2625 }
2626 /* flush pending Tx transactions */
2627 ixgbe_clear_tx_pending(hw);
2628
2629 ixgbe_set_mdio_speed(hw);
2630
2631 /* PHY ops must be identified and initialized prior to reset */
2632 phy_status = hw->phy.ops.init(hw);
2633
2634 if (phy_status)
2635 DEBUGOUT1("Failed to initialize PHY ops, STATUS = %d\n",
2636 status);
2637
2638 if (phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED ||
2639 phy_status == IXGBE_ERR_PHY_ADDR_INVALID) {
2640 DEBUGOUT("Returning from reset HW due to PHY init failure\n");
2641 goto mac_reset_top;
2642 }
2643
2644 /* start the external PHY */
2645 if (hw->phy.type == ixgbe_phy_x550em_ext_t) {
2646 status = ixgbe_init_ext_t_x550em(hw);
2647 if (status) {
2648 DEBUGOUT1("Failed to start the external PHY, STATUS = %d\n",
2649 status);
2650 return status;
2651 }
2652 }
2653
2654 /* Setup SFP module if there is one present. */
2655 if (hw->phy.sfp_setup_needed) {
2656 phy_status = hw->mac.ops.setup_sfp(hw);
2657 hw->phy.sfp_setup_needed = FALSE;
2658 }
2659
2660 if (phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED)
2661 goto mac_reset_top;
2662
2663 /* Reset PHY */
2664 if (!hw->phy.reset_disable && hw->phy.ops.reset) {
2665 if (hw->phy.ops.reset(hw) == IXGBE_ERR_OVERTEMP)
2666 return IXGBE_ERR_OVERTEMP;
2667 }
2668
2669 mac_reset_top:
2670 /* Issue global reset to the MAC. Needs to be SW reset if link is up.
2671 * If link reset is used when link is up, it might reset the PHY when
2672 * mng is using it. If link is down or the flag to force full link
2673 * reset is set, then perform link reset.
2674 */
2675 ctrl = IXGBE_CTRL_LNK_RST;
2676 if (!hw->force_full_reset) {
2677 hw->mac.ops.check_link(hw, &link_speed, &link_up, FALSE);
2678 if (link_up)
2679 ctrl = IXGBE_CTRL_RST;
2680 }
2681
2682 status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
2683 if (status != IXGBE_SUCCESS) {
2684 ERROR_REPORT2(IXGBE_ERROR_CAUTION,
2685 "semaphore failed with %d", status);
2686 return IXGBE_ERR_SWFW_SYNC;
2687 }
2688 ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
2689 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
2690 IXGBE_WRITE_FLUSH(hw);
2691 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
2692
2693 /* Poll for reset bit to self-clear meaning reset is complete */
2694 for (i = 0; i < 10; i++) {
2695 usec_delay(1);
2696 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
2697 if (!(ctrl & IXGBE_CTRL_RST_MASK))
2698 break;
2699 }
2700
2701 if (ctrl & IXGBE_CTRL_RST_MASK) {
2702 status = IXGBE_ERR_RESET_FAILED;
2703 DEBUGOUT("Reset polling failed to complete.\n");
2704 }
2705
2706 msec_delay(50);
2707
2708 /* Double resets are required for recovery from certain error
2709 * conditions. Between resets, it is necessary to stall to
2710 * allow time for any pending HW events to complete.
2711 */
2712 if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
2713 hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
2714 goto mac_reset_top;
2715 }
2716
2717 /* Store the permanent mac address */
2718 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
2719
2720 /* Store MAC address from RAR0, clear receive address registers, and
2721 * clear the multicast table. Also reset num_rar_entries to 128,
2722 * since we modify this value when programming the SAN MAC address.
2723 */
2724 hw->mac.num_rar_entries = 128;
2725 hw->mac.ops.init_rx_addrs(hw);
2726
2727 ixgbe_set_mdio_speed(hw);
2728
2729 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP)
2730 ixgbe_setup_mux_ctl(hw);
2731
2732 if (status != IXGBE_SUCCESS)
2733 DEBUGOUT1("Reset HW failed, STATUS = %d\n", status);
2734
2735 if (phy_status != IXGBE_SUCCESS)
2736 status = phy_status;
2737
2738 return status;
2739 }
2740
2741 /**
2742 * ixgbe_init_ext_t_x550em - Start (unstall) the external Base T PHY.
2743 * @hw: pointer to hardware structure
2744 */
2745 s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw)
2746 {
2747 u32 status;
2748 u16 reg;
2749
2750 status = hw->phy.ops.read_reg(hw,
2751 IXGBE_MDIO_TX_VENDOR_ALARMS_3,
2752 IXGBE_MDIO_PMA_PMD_DEV_TYPE,
2753 ®);
2754
2755 if (status != IXGBE_SUCCESS)
2756 return status;
2757
2758 /* If PHY FW reset completed bit is set then this is the first
2759 * SW instance after a power on so the PHY FW must be un-stalled.
2760 */
2761 if (reg & IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK) {
2762 status = hw->phy.ops.read_reg(hw,
2763 IXGBE_MDIO_GLOBAL_RES_PR_10,
2764 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2765 ®);
2766
2767 if (status != IXGBE_SUCCESS)
2768 return status;
2769
2770 reg &= ~IXGBE_MDIO_POWER_UP_STALL;
2771
2772 status = hw->phy.ops.write_reg(hw,
2773 IXGBE_MDIO_GLOBAL_RES_PR_10,
2774 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2775 reg);
2776
2777 if (status != IXGBE_SUCCESS)
2778 return status;
2779 }
2780
2781 return status;
2782 }
2783
2784 /**
2785 * ixgbe_setup_kr_x550em - Configure the KR PHY.
2786 * @hw: pointer to hardware structure
2787 **/
2788 s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw)
2789 {
2790 /* leave link alone for 2.5G */
2791 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_2_5GB_FULL)
2792 return IXGBE_SUCCESS;
2793
2794 if (ixgbe_check_reset_blocked(hw))
2795 return 0;
2796
2797 return ixgbe_setup_kr_speed_x550em(hw, hw->phy.autoneg_advertised);
2798 }
2799
2800 /**
2801 * ixgbe_setup_mac_link_sfp_x550em - Setup internal/external the PHY for SFP
2802 * @hw: pointer to hardware structure
2803 * @speed: new link speed
2804 * @autoneg_wait_to_complete: unused
2805 *
2806 * Configure the external PHY and the integrated KR PHY for SFP support.
2807 **/
2808 s32 ixgbe_setup_mac_link_sfp_x550em(struct ixgbe_hw *hw,
2809 ixgbe_link_speed speed,
2810 bool autoneg_wait_to_complete)
2811 {
2812 s32 ret_val;
2813 u16 reg_slice, reg_val;
2814 bool setup_linear = FALSE;
2815 UNREFERENCED_1PARAMETER(autoneg_wait_to_complete);
2816
2817 /* Check if SFP module is supported and linear */
2818 ret_val = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear);
2819
2820 /* If no SFP module present, then return success. Return success since
2821 * there is no reason to configure CS4227 and SFP not present error is
2822 * not excepted in the setup MAC link flow.
2823 */
2824 if (ret_val == IXGBE_ERR_SFP_NOT_PRESENT)
2825 return IXGBE_SUCCESS;
2826
2827 if (ret_val != IXGBE_SUCCESS)
2828 return ret_val;
2829
2830 /* Configure internal PHY for KR/KX. */
2831 ixgbe_setup_kr_speed_x550em(hw, speed);
2832
2833 /* Configure CS4227 LINE side to proper mode. */
2834 reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB +
2835 (hw->bus.lan_id << 12);
2836 if (setup_linear)
2837 reg_val = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1;
2838 else
2839 reg_val = (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1;
2840 ret_val = hw->link.ops.write_link(hw, hw->link.addr, reg_slice,
2841 reg_val);
2842 return ret_val;
2843 }
2844
2845 /**
2846 * ixgbe_setup_sfi_x550a - Configure the internal PHY for native SFI mode
2847 * @hw: pointer to hardware structure
2848 * @speed: the link speed to force
2849 *
2850 * Configures the integrated PHY for native SFI mode. Used to connect the
2851 * internal PHY directly to an SFP cage, without autonegotiation.
2852 **/
2853 static s32 ixgbe_setup_sfi_x550a(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
2854 {
2855 struct ixgbe_mac_info *mac = &hw->mac;
2856 s32 status;
2857 u32 reg_val;
2858
2859 /* Disable all AN and force speed to 10G Serial. */
2860 status = mac->ops.read_iosf_sb_reg(hw,
2861 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
2862 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
2863 if (status != IXGBE_SUCCESS)
2864 return status;
2865
2866 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN;
2867 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN;
2868 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN;
2869 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK;
2870
2871 /* Select forced link speed for internal PHY. */
2872 switch (*speed) {
2873 case IXGBE_LINK_SPEED_10GB_FULL:
2874 reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_10G;
2875 break;
2876 case IXGBE_LINK_SPEED_1GB_FULL:
2877 reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_1G;
2878 break;
2879 case 0:
2880 /* media none (linkdown) */
2881 break;
2882 default:
2883 /* Other link speeds are not supported by internal PHY. */
2884 return IXGBE_ERR_LINK_SETUP;
2885 }
2886
2887 status = mac->ops.write_iosf_sb_reg(hw,
2888 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
2889 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
2890
2891 /* Toggle port SW reset by AN reset. */
2892 status = ixgbe_restart_an_internal_phy_x550em(hw);
2893
2894 return status;
2895 }
2896
2897 /**
2898 * ixgbe_setup_mac_link_sfp_x550a - Setup internal PHY for SFP
2899 * @hw: pointer to hardware structure
2900 * @speed: new link speed
2901 * @autoneg_wait_to_complete: unused
2902 *
2903 * Configure the integrated PHY for SFP support.
2904 **/
2905 static s32 ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw,
2906 ixgbe_link_speed speed,
2907 bool autoneg_wait_to_complete)
2908 {
2909 s32 ret_val;
2910 u16 reg_phy_ext;
2911 bool setup_linear = FALSE;
2912 u32 reg_slice, reg_phy_int, slice_offset;
2913
2914 UNREFERENCED_1PARAMETER(autoneg_wait_to_complete);
2915
2916 /* Check if SFP module is supported and linear */
2917 ret_val = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear);
2918
2919 /* If no SFP module present, then return success. Return success since
2920 * SFP not present error is not excepted in the setup MAC link flow.
2921 */
2922 if (ret_val == IXGBE_ERR_SFP_NOT_PRESENT)
2923 return IXGBE_SUCCESS;
2924
2925 if (ret_val != IXGBE_SUCCESS)
2926 return ret_val;
2927
2928 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N) {
2929 /* Configure internal PHY for native SFI based on module type */
2930 ret_val = hw->mac.ops.read_iosf_sb_reg(hw,
2931 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
2932 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_phy_int);
2933
2934 if (ret_val != IXGBE_SUCCESS)
2935 return ret_val;
2936
2937 reg_phy_int &= IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_DA;
2938 if (!setup_linear)
2939 reg_phy_int |= IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_SR;
2940
2941 ret_val = hw->mac.ops.write_iosf_sb_reg(hw,
2942 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
2943 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_phy_int);
2944
2945 if (ret_val != IXGBE_SUCCESS)
2946 return ret_val;
2947
2948 /* Setup SFI internal link. */
2949 ret_val = ixgbe_setup_sfi_x550a(hw, &speed);
2950 } else {
2951 /* Configure internal PHY for KR/KX. */
2952 ixgbe_setup_kr_speed_x550em(hw, speed);
2953
2954 if (hw->phy.addr == 0x0 || hw->phy.addr == 0xFFFF) {
2955 /* Find Address */
2956 DEBUGOUT("Invalid NW_MNG_IF_SEL.MDIO_PHY_ADD value\n");
2957 return IXGBE_ERR_PHY_ADDR_INVALID;
2958 }
2959
2960 /* Get external PHY SKU id */
2961 ret_val = hw->phy.ops.read_reg(hw, IXGBE_CS4227_EFUSE_PDF_SKU,
2962 IXGBE_MDIO_ZERO_DEV_TYPE, ®_phy_ext);
2963
2964 if (ret_val != IXGBE_SUCCESS)
2965 return ret_val;
2966
2967 /* When configuring quad port CS4223, the MAC instance is part
2968 * of the slice offset.
2969 */
2970 if (reg_phy_ext == IXGBE_CS4223_SKU_ID)
2971 slice_offset = (hw->bus.lan_id +
2972 (hw->bus.instance_id << 1)) << 12;
2973 else
2974 slice_offset = hw->bus.lan_id << 12;
2975
2976 /* Configure CS4227/CS4223 LINE side to proper mode. */
2977 reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB + slice_offset;
2978
2979 ret_val = hw->phy.ops.read_reg(hw, reg_slice,
2980 IXGBE_MDIO_ZERO_DEV_TYPE, ®_phy_ext);
2981
2982 if (ret_val != IXGBE_SUCCESS)
2983 return ret_val;
2984
2985 reg_phy_ext &= ~((IXGBE_CS4227_EDC_MODE_CX1 << 1) |
2986 (IXGBE_CS4227_EDC_MODE_SR << 1));
2987
2988 if (setup_linear)
2989 reg_phy_ext |= (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1;
2990 else
2991 reg_phy_ext |= (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1;
2992 ret_val = hw->phy.ops.write_reg(hw, reg_slice,
2993 IXGBE_MDIO_ZERO_DEV_TYPE, reg_phy_ext);
2994
2995 /* Flush previous write with a read */
2996 ret_val = hw->phy.ops.read_reg(hw, reg_slice,
2997 IXGBE_MDIO_ZERO_DEV_TYPE, ®_phy_ext);
2998 }
2999 return ret_val;
3000 }
3001
3002 /**
3003 * ixgbe_setup_ixfi_x550em_x - MAC specific iXFI configuration
3004 * @hw: pointer to hardware structure
3005 *
3006 * iXfI configuration needed for ixgbe_mac_X550EM_x devices.
3007 **/
3008 static s32 ixgbe_setup_ixfi_x550em_x(struct ixgbe_hw *hw)
3009 {
3010 struct ixgbe_mac_info *mac = &hw->mac;
3011 s32 status;
3012 u32 reg_val;
3013
3014 /* Disable training protocol FSM. */
3015 status = mac->ops.read_iosf_sb_reg(hw,
3016 IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
3017 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3018 if (status != IXGBE_SUCCESS)
3019 return status;
3020 reg_val |= IXGBE_KRM_RX_TRN_LINKUP_CTRL_CONV_WO_PROTOCOL;
3021 status = mac->ops.write_iosf_sb_reg(hw,
3022 IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
3023 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3024 if (status != IXGBE_SUCCESS)
3025 return status;
3026
3027 /* Disable Flex from training TXFFE. */
3028 status = mac->ops.read_iosf_sb_reg(hw,
3029 IXGBE_KRM_DSP_TXFFE_STATE_4(hw->bus.lan_id),
3030 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3031 if (status != IXGBE_SUCCESS)
3032 return status;
3033 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN;
3034 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN;
3035 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN;
3036 status = mac->ops.write_iosf_sb_reg(hw,
3037 IXGBE_KRM_DSP_TXFFE_STATE_4(hw->bus.lan_id),
3038 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3039 if (status != IXGBE_SUCCESS)
3040 return status;
3041 status = mac->ops.read_iosf_sb_reg(hw,
3042 IXGBE_KRM_DSP_TXFFE_STATE_5(hw->bus.lan_id),
3043 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3044 if (status != IXGBE_SUCCESS)
3045 return status;
3046 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN;
3047 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN;
3048 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN;
3049 status = mac->ops.write_iosf_sb_reg(hw,
3050 IXGBE_KRM_DSP_TXFFE_STATE_5(hw->bus.lan_id),
3051 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3052 if (status != IXGBE_SUCCESS)
3053 return status;
3054
3055 /* Enable override for coefficients. */
3056 status = mac->ops.read_iosf_sb_reg(hw,
3057 IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id),
3058 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3059 if (status != IXGBE_SUCCESS)
3060 return status;
3061 reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_OVRRD_EN;
3062 reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN;
3063 reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CPLUS1_OVRRD_EN;
3064 reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CMINUS1_OVRRD_EN;
3065 status = mac->ops.write_iosf_sb_reg(hw,
3066 IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id),
3067 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3068 return status;
3069 }
3070
3071 /**
3072 * ixgbe_setup_ixfi_x550em - Configure the KR PHY for iXFI mode.
3073 * @hw: pointer to hardware structure
3074 * @speed: the link speed to force
3075 *
3076 * Configures the integrated KR PHY to use iXFI mode. Used to connect an
3077 * internal and external PHY at a specific speed, without autonegotiation.
3078 **/
3079 static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
3080 {
3081 struct ixgbe_mac_info *mac = &hw->mac;
3082 s32 status;
3083 u32 reg_val;
3084
3085 /* iXFI is only supported with X552 */
3086 if (mac->type != ixgbe_mac_X550EM_x)
3087 return IXGBE_ERR_LINK_SETUP;
3088
3089 /* Disable AN and force speed to 10G Serial. */
3090 status = mac->ops.read_iosf_sb_reg(hw,
3091 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
3092 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3093 if (status != IXGBE_SUCCESS)
3094 return status;
3095
3096 reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
3097 reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
3098
3099 /* Select forced link speed for internal PHY. */
3100 switch (*speed) {
3101 case IXGBE_LINK_SPEED_10GB_FULL:
3102 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G;
3103 break;
3104 case IXGBE_LINK_SPEED_1GB_FULL:
3105 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G;
3106 break;
3107 default:
3108 /* Other link speeds are not supported by internal KR PHY. */
3109 return IXGBE_ERR_LINK_SETUP;
3110 }
3111
3112 status = mac->ops.write_iosf_sb_reg(hw,
3113 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
3114 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3115 if (status != IXGBE_SUCCESS)
3116 return status;
3117
3118 /* Additional configuration needed for x550em_x */
3119 if (hw->mac.type == ixgbe_mac_X550EM_x) {
3120 status = ixgbe_setup_ixfi_x550em_x(hw);
3121 if (status != IXGBE_SUCCESS)
3122 return status;
3123 }
3124
3125 /* Toggle port SW reset by AN reset. */
3126 status = ixgbe_restart_an_internal_phy_x550em(hw);
3127
3128 return status;
3129 }
3130
3131 /**
3132 * ixgbe_ext_phy_t_x550em_get_link - Get ext phy link status
3133 * @hw: address of hardware structure
3134 * @link_up: address of boolean to indicate link status
3135 *
3136 * Returns error code if unable to get link status.
3137 */
3138 static s32 ixgbe_ext_phy_t_x550em_get_link(struct ixgbe_hw *hw, bool *link_up)
3139 {
3140 u32 ret;
3141 u16 autoneg_status;
3142
3143 *link_up = FALSE;
3144
3145 /* read this twice back to back to indicate current status */
3146 ret = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
3147 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
3148 &autoneg_status);
3149 if (ret != IXGBE_SUCCESS)
3150 return ret;
3151
3152 ret = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
3153 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
3154 &autoneg_status);
3155 if (ret != IXGBE_SUCCESS)
3156 return ret;
3157
3158 *link_up = !!(autoneg_status & IXGBE_MDIO_AUTO_NEG_LINK_STATUS);
3159
3160 return IXGBE_SUCCESS;
3161 }
3162
3163 /**
3164 * ixgbe_setup_internal_phy_t_x550em - Configure KR PHY to X557 link
3165 * @hw: point to hardware structure
3166 *
3167 * Configures the link between the integrated KR PHY and the external X557 PHY
3168 * The driver will call this function when it gets a link status change
3169 * interrupt from the X557 PHY. This function configures the link speed
3170 * between the PHYs to match the link speed of the BASE-T link.
3171 *
3172 * A return of a non-zero value indicates an error, and the base driver should
3173 * not report link up.
3174 */
3175 s32 ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw)
3176 {
3177 ixgbe_link_speed force_speed;
3178 bool link_up;
3179 u32 status;
3180 u16 speed;
3181
3182 if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper)
3183 return IXGBE_ERR_CONFIG;
3184
3185 if (hw->mac.type == ixgbe_mac_X550EM_x &&
3186 !(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) {
3187 /* If link is down, there is no setup necessary so return */
3188 status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up);
3189 if (status != IXGBE_SUCCESS)
3190 return status;
3191
3192 if (!link_up)
3193 return IXGBE_SUCCESS;
3194
3195 status = hw->phy.ops.read_reg(hw,
3196 IXGBE_MDIO_AUTO_NEG_VENDOR_STAT,
3197 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
3198 &speed);
3199 if (status != IXGBE_SUCCESS)
3200 return status;
3201
3202 /* If link is still down - no setup is required so return */
3203 status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up);
3204 if (status != IXGBE_SUCCESS)
3205 return status;
3206 if (!link_up)
3207 return IXGBE_SUCCESS;
3208
3209 /* clear everything but the speed and duplex bits */
3210 speed &= IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_MASK;
3211
3212 switch (speed) {
3213 case IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_FULL:
3214 force_speed = IXGBE_LINK_SPEED_10GB_FULL;
3215 break;
3216 case IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB_FULL:
3217 force_speed = IXGBE_LINK_SPEED_1GB_FULL;
3218 break;
3219 default:
3220 /* Internal PHY does not support anything else */
3221 return IXGBE_ERR_INVALID_LINK_SETTINGS;
3222 }
3223
3224 return ixgbe_setup_ixfi_x550em(hw, &force_speed);
3225 } else {
3226 speed = IXGBE_LINK_SPEED_10GB_FULL |
3227 IXGBE_LINK_SPEED_1GB_FULL;
3228 return ixgbe_setup_kr_speed_x550em(hw, speed);
3229 }
3230 }
3231
3232 /**
3233 * ixgbe_setup_phy_loopback_x550em - Configure the KR PHY for loopback.
3234 * @hw: pointer to hardware structure
3235 *
3236 * Configures the integrated KR PHY to use internal loopback mode.
3237 **/
3238 s32 ixgbe_setup_phy_loopback_x550em(struct ixgbe_hw *hw)
3239 {
3240 s32 status;
3241 u32 reg_val;
3242
3243 /* Disable AN and force speed to 10G Serial. */
3244 status = hw->mac.ops.read_iosf_sb_reg(hw,
3245 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
3246 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3247 if (status != IXGBE_SUCCESS)
3248 return status;
3249 reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
3250 reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
3251 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G;
3252 status = hw->mac.ops.write_iosf_sb_reg(hw,
3253 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
3254 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3255 if (status != IXGBE_SUCCESS)
3256 return status;
3257
3258 /* Set near-end loopback clocks. */
3259 status = hw->mac.ops.read_iosf_sb_reg(hw,
3260 IXGBE_KRM_PORT_CAR_GEN_CTRL(hw->bus.lan_id),
3261 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3262 if (status != IXGBE_SUCCESS)
3263 return status;
3264 reg_val |= IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_32B;
3265 reg_val |= IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_KRPCS;
3266 status = hw->mac.ops.write_iosf_sb_reg(hw,
3267 IXGBE_KRM_PORT_CAR_GEN_CTRL(hw->bus.lan_id),
3268 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3269 if (status != IXGBE_SUCCESS)
3270 return status;
3271
3272 /* Set loopback enable. */
3273 status = hw->mac.ops.read_iosf_sb_reg(hw,
3274 IXGBE_KRM_PMD_DFX_BURNIN(hw->bus.lan_id),
3275 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3276 if (status != IXGBE_SUCCESS)
3277 return status;
3278 reg_val |= IXGBE_KRM_PMD_DFX_BURNIN_TX_RX_KR_LB_MASK;
3279 status = hw->mac.ops.write_iosf_sb_reg(hw,
3280 IXGBE_KRM_PMD_DFX_BURNIN(hw->bus.lan_id),
3281 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3282 if (status != IXGBE_SUCCESS)
3283 return status;
3284
3285 /* Training bypass. */
3286 status = hw->mac.ops.read_iosf_sb_reg(hw,
3287 IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
3288 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3289 if (status != IXGBE_SUCCESS)
3290 return status;
3291 reg_val |= IXGBE_KRM_RX_TRN_LINKUP_CTRL_PROTOCOL_BYPASS;
3292 status = hw->mac.ops.write_iosf_sb_reg(hw,
3293 IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
3294 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3295
3296 return status;
3297 }
3298
3299 /**
3300 * ixgbe_read_ee_hostif_X550 - Read EEPROM word using a host interface command
3301 * assuming that the semaphore is already obtained.
3302 * @hw: pointer to hardware structure
3303 * @offset: offset of word in the EEPROM to read
3304 * @data: word read from the EEPROM
3305 *
3306 * Reads a 16 bit word from the EEPROM using the hostif.
3307 **/
3308 s32 ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 *data)
3309 {
3310 const u32 mask = IXGBE_GSSR_SW_MNG_SM | IXGBE_GSSR_EEP_SM;
3311 struct ixgbe_hic_read_shadow_ram buffer;
3312 s32 status;
3313
3314 DEBUGFUNC("ixgbe_read_ee_hostif_X550");
3315 buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD;
3316 buffer.hdr.req.buf_lenh = 0;
3317 buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN;
3318 buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
3319
3320 /* convert offset from words to bytes */
3321 buffer.address = IXGBE_CPU_TO_BE32(offset * 2);
3322 /* one word */
3323 buffer.length = IXGBE_CPU_TO_BE16(sizeof(u16));
3324 buffer.pad2 = 0;
3325 buffer.data = 0;
3326 buffer.pad3 = 0;
3327
3328 status = hw->mac.ops.acquire_swfw_sync(hw, mask);
3329 if (status)
3330 return status;
3331
3332 status = ixgbe_hic_unlocked(hw, (u32 *)&buffer, sizeof(buffer),
3333 IXGBE_HI_COMMAND_TIMEOUT);
3334 if (!status) {
3335 *data = (u16)IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG,
3336 FW_NVM_DATA_OFFSET);
3337 }
3338
3339 hw->mac.ops.release_swfw_sync(hw, mask);
3340 return status;
3341 }
3342
3343 /**
3344 * ixgbe_read_ee_hostif_buffer_X550- Read EEPROM word(s) using hostif
3345 * @hw: pointer to hardware structure
3346 * @offset: offset of word in the EEPROM to read
3347 * @words: number of words
3348 * @data: word(s) read from the EEPROM
3349 *
3350 * Reads a 16 bit word(s) from the EEPROM using the hostif.
3351 **/
3352 s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
3353 u16 offset, u16 words, u16 *data)
3354 {
3355 const u32 mask = IXGBE_GSSR_SW_MNG_SM | IXGBE_GSSR_EEP_SM;
3356 struct ixgbe_hic_read_shadow_ram buffer;
3357 u32 current_word = 0;
3358 u16 words_to_read;
3359 s32 status;
3360 u32 i;
3361
3362 DEBUGFUNC("ixgbe_read_ee_hostif_buffer_X550");
3363
3364 /* Take semaphore for the entire operation. */
3365 status = hw->mac.ops.acquire_swfw_sync(hw, mask);
3366 if (status) {
3367 DEBUGOUT("EEPROM read buffer - semaphore failed\n");
3368 return status;
3369 }
3370
3371 while (words) {
3372 if (words > FW_MAX_READ_BUFFER_SIZE / 2)
3373 words_to_read = FW_MAX_READ_BUFFER_SIZE / 2;
3374 else
3375 words_to_read = words;
3376
3377 buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD;
3378 buffer.hdr.req.buf_lenh = 0;
3379 buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN;
3380 buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
3381
3382 /* convert offset from words to bytes */
3383 buffer.address = IXGBE_CPU_TO_BE32((offset + current_word) * 2);
3384 buffer.length = IXGBE_CPU_TO_BE16(words_to_read * 2);
3385 buffer.pad2 = 0;
3386 buffer.data = 0;
3387 buffer.pad3 = 0;
3388
3389 status = ixgbe_hic_unlocked(hw, (u32 *)&buffer, sizeof(buffer),
3390 IXGBE_HI_COMMAND_TIMEOUT);
3391
3392 if (status) {
3393 DEBUGOUT("Host interface command failed\n");
3394 goto out;
3395 }
3396
3397 for (i = 0; i < words_to_read; i++) {
3398 u32 reg = IXGBE_FLEX_MNG + (FW_NVM_DATA_OFFSET << 2) +
3399 2 * i;
3400 u32 value = IXGBE_READ_REG(hw, reg);
3401
3402 data[current_word] = (u16)(value & 0xffff);
3403 current_word++;
3404 i++;
3405 if (i < words_to_read) {
3406 value >>= 16;
3407 data[current_word] = (u16)(value & 0xffff);
3408 current_word++;
3409 }
3410 }
3411 words -= words_to_read;
3412 }
3413
3414 out:
3415 hw->mac.ops.release_swfw_sync(hw, mask);
3416 return status;
3417 }
3418
3419 /**
3420 * ixgbe_write_ee_hostif_X550 - Write EEPROM word using hostif
3421 * @hw: pointer to hardware structure
3422 * @offset: offset of word in the EEPROM to write
3423 * @data: word write to the EEPROM
3424 *
3425 * Write a 16 bit word to the EEPROM using the hostif.
3426 **/
3427 s32 ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset,
3428 u16 data)
3429 {
3430 s32 status;
3431 struct ixgbe_hic_write_shadow_ram buffer;
3432
3433 DEBUGFUNC("ixgbe_write_ee_hostif_data_X550");
3434
3435 buffer.hdr.req.cmd = FW_WRITE_SHADOW_RAM_CMD;
3436 buffer.hdr.req.buf_lenh = 0;
3437 buffer.hdr.req.buf_lenl = FW_WRITE_SHADOW_RAM_LEN;
3438 buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
3439
3440 /* one word */
3441 buffer.length = IXGBE_CPU_TO_BE16(sizeof(u16));
3442 buffer.data = data;
3443 buffer.address = IXGBE_CPU_TO_BE32(offset * 2);
3444
3445 status = ixgbe_host_interface_command(hw, (u32 *)&buffer,
3446 sizeof(buffer),
3447 IXGBE_HI_COMMAND_TIMEOUT, FALSE);
3448
3449 return status;
3450 }
3451
3452 /**
3453 * ixgbe_write_ee_hostif_X550 - Write EEPROM word using hostif
3454 * @hw: pointer to hardware structure
3455 * @offset: offset of word in the EEPROM to write
3456 * @data: word write to the EEPROM
3457 *
3458 * Write a 16 bit word to the EEPROM using the hostif.
3459 **/
3460 s32 ixgbe_write_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset,
3461 u16 data)
3462 {
3463 s32 status = IXGBE_SUCCESS;
3464
3465 DEBUGFUNC("ixgbe_write_ee_hostif_X550");
3466
3467 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
3468 IXGBE_SUCCESS) {
3469 status = ixgbe_write_ee_hostif_data_X550(hw, offset, data);
3470 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
3471 } else {
3472 DEBUGOUT("write ee hostif failed to get semaphore");
3473 status = IXGBE_ERR_SWFW_SYNC;
3474 }
3475
3476 return status;
3477 }
3478
3479 /**
3480 * ixgbe_write_ee_hostif_buffer_X550 - Write EEPROM word(s) using hostif
3481 * @hw: pointer to hardware structure
3482 * @offset: offset of word in the EEPROM to write
3483 * @words: number of words
3484 * @data: word(s) write to the EEPROM
3485 *
3486 * Write a 16 bit word(s) to the EEPROM using the hostif.
3487 **/
3488 s32 ixgbe_write_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
3489 u16 offset, u16 words, u16 *data)
3490 {
3491 s32 status = IXGBE_SUCCESS;
3492 u32 i = 0;
3493
3494 DEBUGFUNC("ixgbe_write_ee_hostif_buffer_X550");
3495
3496 /* Take semaphore for the entire operation. */
3497 status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
3498 if (status != IXGBE_SUCCESS) {
3499 DEBUGOUT("EEPROM write buffer - semaphore failed\n");
3500 goto out;
3501 }
3502
3503 for (i = 0; i < words; i++) {
3504 status = ixgbe_write_ee_hostif_data_X550(hw, offset + i,
3505 data[i]);
3506
3507 if (status != IXGBE_SUCCESS) {
3508 DEBUGOUT("Eeprom buffered write failed\n");
3509 break;
3510 }
3511 }
3512
3513 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
3514 out:
3515
3516 return status;
3517 }
3518
3519 /**
3520 * ixgbe_checksum_ptr_x550 - Checksum one pointer region
3521 * @hw: pointer to hardware structure
3522 * @ptr: pointer offset in eeprom
3523 * @size: size of section pointed by ptr, if 0 first word will be used as size
3524 * @csum: address of checksum to update
3525 * @buffer: pointer to buffer containing calculated checksum
3526 * @buffer_size: size of buffer
3527 *
3528 * Returns error status for any failure
3529 */
3530 static s32 ixgbe_checksum_ptr_x550(struct ixgbe_hw *hw, u16 ptr,
3531 u16 size, u16 *csum, u16 *buffer,
3532 u32 buffer_size)
3533 {
3534 u16 buf[256];
3535 s32 status;
3536 u16 length, bufsz, i, start;
3537 u16 *local_buffer;
3538
3539 bufsz = sizeof(buf) / sizeof(buf[0]);
3540
3541 /* Read a chunk at the pointer location */
3542 if (!buffer) {
3543 status = ixgbe_read_ee_hostif_buffer_X550(hw, ptr, bufsz, buf);
3544 if (status) {
3545 DEBUGOUT("Failed to read EEPROM image\n");
3546 return status;
3547 }
3548 local_buffer = buf;
3549 } else {
3550 if (buffer_size < ptr)
3551 return IXGBE_ERR_PARAM;
3552 local_buffer = &buffer[ptr];
3553 }
3554
3555 if (size) {
3556 start = 0;
3557 length = size;
3558 } else {
3559 start = 1;
3560 length = local_buffer[0];
3561
3562 /* Skip pointer section if length is invalid. */
3563 if (length == 0xFFFF || length == 0 ||
3564 (ptr + length) >= hw->eeprom.word_size)
3565 return IXGBE_SUCCESS;
3566 }
3567
3568 if (buffer && ((u32)start + (u32)length > buffer_size))
3569 return IXGBE_ERR_PARAM;
3570
3571 for (i = start; length; i++, length--) {
3572 if (i == bufsz && !buffer) {
3573 ptr += bufsz;
3574 i = 0;
3575 if (length < bufsz)
3576 bufsz = length;
3577
3578 /* Read a chunk at the pointer location */
3579 status = ixgbe_read_ee_hostif_buffer_X550(hw, ptr,
3580 bufsz, buf);
3581 if (status) {
3582 DEBUGOUT("Failed to read EEPROM image\n");
3583 return status;
3584 }
3585 }
3586 *csum += local_buffer[i];
3587 }
3588 return IXGBE_SUCCESS;
3589 }
3590
3591 /**
3592 * ixgbe_calc_checksum_X550 - Calculates and returns the checksum
3593 * @hw: pointer to hardware structure
3594 * @buffer: pointer to buffer containing calculated checksum
3595 * @buffer_size: size of buffer
3596 *
3597 * Returns a negative error code on error, or the 16-bit checksum
3598 **/
3599 s32 ixgbe_calc_checksum_X550(struct ixgbe_hw *hw, u16 *buffer, u32 buffer_size)
3600 {
3601 u16 eeprom_ptrs[IXGBE_EEPROM_LAST_WORD + 1];
3602 u16 *local_buffer;
3603 s32 status;
3604 u16 checksum = 0;
3605 u16 pointer, i, size;
3606
3607 DEBUGFUNC("ixgbe_calc_eeprom_checksum_X550");
3608
3609 hw->eeprom.ops.init_params(hw);
3610
3611 if (!buffer) {
3612 /* Read pointer area */
3613 status = ixgbe_read_ee_hostif_buffer_X550(hw, 0,
3614 IXGBE_EEPROM_LAST_WORD + 1,
3615 eeprom_ptrs);
3616 if (status) {
3617 DEBUGOUT("Failed to read EEPROM image\n");
3618 return status;
3619 }
3620 local_buffer = eeprom_ptrs;
3621 } else {
3622 if (buffer_size < IXGBE_EEPROM_LAST_WORD)
3623 return IXGBE_ERR_PARAM;
3624 local_buffer = buffer;
3625 }
3626
3627 /*
3628 * For X550 hardware include 0x0-0x41 in the checksum, skip the
3629 * checksum word itself
3630 */
3631 for (i = 0; i <= IXGBE_EEPROM_LAST_WORD; i++)
3632 if (i != IXGBE_EEPROM_CHECKSUM)
3633 checksum += local_buffer[i];
3634
3635 /*
3636 * Include all data from pointers 0x3, 0x6-0xE. This excludes the
3637 * FW, PHY module, and PCIe Expansion/Option ROM pointers.
3638 */
3639 for (i = IXGBE_PCIE_ANALOG_PTR_X550; i < IXGBE_FW_PTR; i++) {
3640 if (i == IXGBE_PHY_PTR || i == IXGBE_OPTION_ROM_PTR)
3641 continue;
3642
3643 pointer = local_buffer[i];
3644
3645 /* Skip pointer section if the pointer is invalid. */
3646 if (pointer == 0xFFFF || pointer == 0 ||
3647 pointer >= hw->eeprom.word_size)
3648 continue;
3649
3650 switch (i) {
3651 case IXGBE_PCIE_GENERAL_PTR:
3652 size = IXGBE_IXGBE_PCIE_GENERAL_SIZE;
3653 break;
3654 case IXGBE_PCIE_CONFIG0_PTR:
3655 case IXGBE_PCIE_CONFIG1_PTR:
3656 size = IXGBE_PCIE_CONFIG_SIZE;
3657 break;
3658 default:
3659 size = 0;
3660 break;
3661 }
3662
3663 status = ixgbe_checksum_ptr_x550(hw, pointer, size, &checksum,
3664 buffer, buffer_size);
3665 if (status)
3666 return status;
3667 }
3668
3669 checksum = (u16)IXGBE_EEPROM_SUM - checksum;
3670
3671 return (s32)checksum;
3672 }
3673
3674 /**
3675 * ixgbe_calc_eeprom_checksum_X550 - Calculates and returns the checksum
3676 * @hw: pointer to hardware structure
3677 *
3678 * Returns a negative error code on error, or the 16-bit checksum
3679 **/
3680 s32 ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw *hw)
3681 {
3682 return ixgbe_calc_checksum_X550(hw, NULL, 0);
3683 }
3684
3685 /**
3686 * ixgbe_validate_eeprom_checksum_X550 - Validate EEPROM checksum
3687 * @hw: pointer to hardware structure
3688 * @checksum_val: calculated checksum
3689 *
3690 * Performs checksum calculation and validates the EEPROM checksum. If the
3691 * caller does not need checksum_val, the value can be NULL.
3692 **/
3693 s32 ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw *hw, u16 *checksum_val)
3694 {
3695 s32 status;
3696 u16 checksum;
3697 u16 read_checksum = 0;
3698
3699 DEBUGFUNC("ixgbe_validate_eeprom_checksum_X550");
3700
3701 /* Read the first word from the EEPROM. If this times out or fails, do
3702 * not continue or we could be in for a very long wait while every
3703 * EEPROM read fails
3704 */
3705 status = hw->eeprom.ops.read(hw, 0, &checksum);
3706 if (status) {
3707 DEBUGOUT("EEPROM read failed\n");
3708 return status;
3709 }
3710
3711 status = hw->eeprom.ops.calc_checksum(hw);
3712 if (status < 0)
3713 return status;
3714
3715 checksum = (u16)(status & 0xffff);
3716
3717 status = ixgbe_read_ee_hostif_X550(hw, IXGBE_EEPROM_CHECKSUM,
3718 &read_checksum);
3719 if (status)
3720 return status;
3721
3722 /* Verify read checksum from EEPROM is the same as
3723 * calculated checksum
3724 */
3725 if (read_checksum != checksum) {
3726 status = IXGBE_ERR_EEPROM_CHECKSUM;
3727 ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE,
3728 "Invalid EEPROM checksum");
3729 }
3730
3731 /* If the user cares, return the calculated checksum */
3732 if (checksum_val)
3733 *checksum_val = checksum;
3734
3735 return status;
3736 }
3737
3738 /**
3739 * ixgbe_update_eeprom_checksum_X550 - Updates the EEPROM checksum and flash
3740 * @hw: pointer to hardware structure
3741 *
3742 * After writing EEPROM to shadow RAM using EEWR register, software calculates
3743 * checksum and updates the EEPROM and instructs the hardware to update
3744 * the flash.
3745 **/
3746 s32 ixgbe_update_eeprom_checksum_X550(struct ixgbe_hw *hw)
3747 {
3748 s32 status;
3749 u16 checksum = 0;
3750
3751 DEBUGFUNC("ixgbe_update_eeprom_checksum_X550");
3752
3753 /* Read the first word from the EEPROM. If this times out or fails, do
3754 * not continue or we could be in for a very long wait while every
3755 * EEPROM read fails
3756 */
3757 status = ixgbe_read_ee_hostif_X550(hw, 0, &checksum);
3758 if (status) {
3759 DEBUGOUT("EEPROM read failed\n");
3760 return status;
3761 }
3762
3763 status = ixgbe_calc_eeprom_checksum_X550(hw);
3764 if (status < 0)
3765 return status;
3766
3767 checksum = (u16)(status & 0xffff);
3768
3769 status = ixgbe_write_ee_hostif_X550(hw, IXGBE_EEPROM_CHECKSUM,
3770 checksum);
3771 if (status)
3772 return status;
3773
3774 status = ixgbe_update_flash_X550(hw);
3775
3776 return status;
3777 }
3778
3779 /**
3780 * ixgbe_update_flash_X550 - Instruct HW to copy EEPROM to Flash device
3781 * @hw: pointer to hardware structure
3782 *
3783 * Issue a shadow RAM dump to FW to copy EEPROM from shadow RAM to the flash.
3784 **/
3785 s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw)
3786 {
3787 s32 status = IXGBE_SUCCESS;
3788 union ixgbe_hic_hdr2 buffer;
3789
3790 DEBUGFUNC("ixgbe_update_flash_X550");
3791
3792 buffer.req.cmd = FW_SHADOW_RAM_DUMP_CMD;
3793 buffer.req.buf_lenh = 0;
3794 buffer.req.buf_lenl = FW_SHADOW_RAM_DUMP_LEN;
3795 buffer.req.checksum = FW_DEFAULT_CHECKSUM;
3796
3797 status = ixgbe_host_interface_command(hw, (u32 *)&buffer,
3798 sizeof(buffer),
3799 IXGBE_HI_COMMAND_TIMEOUT, FALSE);
3800
3801 return status;
3802 }
3803
3804 /**
3805 * ixgbe_get_supported_physical_layer_X550em - Returns physical layer type
3806 * @hw: pointer to hardware structure
3807 *
3808 * Determines physical layer capabilities of the current configuration.
3809 **/
3810 u64 ixgbe_get_supported_physical_layer_X550em(struct ixgbe_hw *hw)
3811 {
3812 u64 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
3813 u16 ext_ability = 0;
3814
3815 DEBUGFUNC("ixgbe_get_supported_physical_layer_X550em");
3816
3817 hw->phy.ops.identify(hw);
3818
3819 switch (hw->phy.type) {
3820 case ixgbe_phy_x550em_kr:
3821 if (hw->mac.type == ixgbe_mac_X550EM_a) {
3822 if (hw->phy.nw_mng_if_sel &
3823 IXGBE_NW_MNG_IF_SEL_PHY_SPEED_2_5G) {
3824 physical_layer =
3825 IXGBE_PHYSICAL_LAYER_2500BASE_KX;
3826 break;
3827 } else if (hw->device_id ==
3828 IXGBE_DEV_ID_X550EM_A_KR_L) {
3829 physical_layer =
3830 IXGBE_PHYSICAL_LAYER_1000BASE_KX;
3831 break;
3832 }
3833 }
3834 /* fall through */
3835 case ixgbe_phy_x550em_xfi:
3836 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR |
3837 IXGBE_PHYSICAL_LAYER_1000BASE_KX;
3838 break;
3839 case ixgbe_phy_x550em_kx4:
3840 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4 |
3841 IXGBE_PHYSICAL_LAYER_1000BASE_KX;
3842 break;
3843 case ixgbe_phy_x550em_ext_t:
3844 hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
3845 IXGBE_MDIO_PMA_PMD_DEV_TYPE,
3846 &ext_ability);
3847 if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
3848 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
3849 if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
3850 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
3851 break;
3852 case ixgbe_phy_fw:
3853 if (hw->phy.speeds_supported & IXGBE_LINK_SPEED_1GB_FULL)
3854 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
3855 if (hw->phy.speeds_supported & IXGBE_LINK_SPEED_100_FULL)
3856 physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
3857 if (hw->phy.speeds_supported & IXGBE_LINK_SPEED_10_FULL)
3858 physical_layer |= IXGBE_PHYSICAL_LAYER_10BASE_T;
3859 break;
3860 case ixgbe_phy_sgmii:
3861 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX;
3862 break;
3863 case ixgbe_phy_ext_1g_t:
3864 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_T;
3865 break;
3866 default:
3867 break;
3868 }
3869
3870 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
3871 physical_layer = ixgbe_get_supported_phy_sfp_layer_generic(hw);
3872
3873 return physical_layer;
3874 }
3875
3876 /**
3877 * ixgbe_get_bus_info_x550em - Set PCI bus info
3878 * @hw: pointer to hardware structure
3879 *
3880 * Sets bus link width and speed to unknown because X550em is
3881 * not a PCI device.
3882 **/
3883 s32 ixgbe_get_bus_info_X550em(struct ixgbe_hw *hw)
3884 {
3885
3886 DEBUGFUNC("ixgbe_get_bus_info_x550em");
3887
3888 hw->bus.width = ixgbe_bus_width_unknown;
3889 hw->bus.speed = ixgbe_bus_speed_unknown;
3890
3891 hw->mac.ops.set_lan_id(hw);
3892
3893 return IXGBE_SUCCESS;
3894 }
3895
3896 /**
3897 * ixgbe_disable_rx_x550 - Disable RX unit
3898 * @hw: pointer to hardware structure
3899 *
3900 * Enables the Rx DMA unit for x550
3901 **/
3902 void ixgbe_disable_rx_x550(struct ixgbe_hw *hw)
3903 {
3904 u32 rxctrl, pfdtxgswc;
3905 s32 status;
3906 struct ixgbe_hic_disable_rxen fw_cmd;
3907
3908 DEBUGFUNC("ixgbe_disable_rx_dma_x550");
3909
3910 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3911 if (rxctrl & IXGBE_RXCTRL_RXEN) {
3912 pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
3913 if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) {
3914 pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN;
3915 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
3916 hw->mac.set_lben = TRUE;
3917 } else {
3918 hw->mac.set_lben = FALSE;
3919 }
3920
3921 fw_cmd.hdr.cmd = FW_DISABLE_RXEN_CMD;
3922 fw_cmd.hdr.buf_len = FW_DISABLE_RXEN_LEN;
3923 fw_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
3924 fw_cmd.port_number = (u8)hw->bus.lan_id;
3925
3926 status = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
3927 sizeof(struct ixgbe_hic_disable_rxen),
3928 IXGBE_HI_COMMAND_TIMEOUT, TRUE);
3929
3930 /* If we fail - disable RX using register write */
3931 if (status) {
3932 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3933 if (rxctrl & IXGBE_RXCTRL_RXEN) {
3934 rxctrl &= ~IXGBE_RXCTRL_RXEN;
3935 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl);
3936 }
3937 }
3938 }
3939 }
3940
3941 /**
3942 * ixgbe_enter_lplu_x550em - Transition to low power states
3943 * @hw: pointer to hardware structure
3944 *
3945 * Configures Low Power Link Up on transition to low power states
3946 * (from D0 to non-D0). Link is required to enter LPLU so avoid resetting the
3947 * X557 PHY immediately prior to entering LPLU.
3948 **/
3949 s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw)
3950 {
3951 u16 an_10g_cntl_reg, autoneg_reg, speed;
3952 s32 status;
3953 ixgbe_link_speed lcd_speed;
3954 u32 save_autoneg;
3955 bool link_up;
3956
3957 /* SW LPLU not required on later HW revisions. */
3958 if ((hw->mac.type == ixgbe_mac_X550EM_x) &&
3959 (IXGBE_FUSES0_REV_MASK &
3960 IXGBE_READ_REG(hw, IXGBE_FUSES0_GROUP(0))))
3961 return IXGBE_SUCCESS;
3962
3963 /* If blocked by MNG FW, then don't restart AN */
3964 if (ixgbe_check_reset_blocked(hw))
3965 return IXGBE_SUCCESS;
3966
3967 status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up);
3968 if (status != IXGBE_SUCCESS)
3969 return status;
3970
3971 status = ixgbe_read_eeprom(hw, NVM_INIT_CTRL_3, &hw->eeprom.ctrl_word_3);
3972
3973 if (status != IXGBE_SUCCESS)
3974 return status;
3975
3976 /* If link is down, LPLU disabled in NVM, WoL disabled, or manageability
3977 * disabled, then force link down by entering low power mode.
3978 */
3979 if (!link_up || !(hw->eeprom.ctrl_word_3 & NVM_INIT_CTRL_3_LPLU) ||
3980 !(hw->wol_enabled || ixgbe_mng_present(hw)))
3981 return ixgbe_set_copper_phy_power(hw, FALSE);
3982
3983 /* Determine LCD */
3984 status = ixgbe_get_lcd_t_x550em(hw, &lcd_speed);
3985
3986 if (status != IXGBE_SUCCESS)
3987 return status;
3988
3989 /* If no valid LCD link speed, then force link down and exit. */
3990 if (lcd_speed == IXGBE_LINK_SPEED_UNKNOWN)
3991 return ixgbe_set_copper_phy_power(hw, FALSE);
3992
3993 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_STAT,
3994 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
3995 &speed);
3996
3997 if (status != IXGBE_SUCCESS)
3998 return status;
3999
4000 /* If no link now, speed is invalid so take link down */
4001 status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up);
4002 if (status != IXGBE_SUCCESS)
4003 return ixgbe_set_copper_phy_power(hw, FALSE);
4004
4005 /* clear everything but the speed bits */
4006 speed &= IXGBE_MDIO_AUTO_NEG_VEN_STAT_SPEED_MASK;
4007
4008 /* If current speed is already LCD, then exit. */
4009 if (((speed == IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB) &&
4010 (lcd_speed == IXGBE_LINK_SPEED_1GB_FULL)) ||
4011 ((speed == IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB) &&
4012 (lcd_speed == IXGBE_LINK_SPEED_10GB_FULL)))
4013 return status;
4014
4015 /* Clear AN completed indication */
4016 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM,
4017 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
4018 &autoneg_reg);
4019
4020 if (status != IXGBE_SUCCESS)
4021 return status;
4022
4023 status = hw->phy.ops.read_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG,
4024 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
4025 &an_10g_cntl_reg);
4026
4027 if (status != IXGBE_SUCCESS)
4028 return status;
4029
4030 status = hw->phy.ops.read_reg(hw,
4031 IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
4032 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
4033 &autoneg_reg);
4034
4035 if (status != IXGBE_SUCCESS)
4036 return status;
4037
4038 save_autoneg = hw->phy.autoneg_advertised;
4039
4040 /* Setup link at least common link speed */
4041 status = hw->mac.ops.setup_link(hw, lcd_speed, FALSE);
4042
4043 /* restore autoneg from before setting lplu speed */
4044 hw->phy.autoneg_advertised = save_autoneg;
4045
4046 return status;
4047 }
4048
4049 /**
4050 * ixgbe_get_lcd_x550em - Determine lowest common denominator
4051 * @hw: pointer to hardware structure
4052 * @lcd_speed: pointer to lowest common link speed
4053 *
4054 * Determine lowest common link speed with link partner.
4055 **/
4056 s32 ixgbe_get_lcd_t_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *lcd_speed)
4057 {
4058 u16 an_lp_status;
4059 s32 status;
4060 u16 word = hw->eeprom.ctrl_word_3;
4061
4062 *lcd_speed = IXGBE_LINK_SPEED_UNKNOWN;
4063
4064 status = hw->phy.ops.read_reg(hw, IXGBE_AUTO_NEG_LP_STATUS,
4065 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
4066 &an_lp_status);
4067
4068 if (status != IXGBE_SUCCESS)
4069 return status;
4070
4071 /* If link partner advertised 1G, return 1G */
4072 if (an_lp_status & IXGBE_AUTO_NEG_LP_1000BASE_CAP) {
4073 *lcd_speed = IXGBE_LINK_SPEED_1GB_FULL;
4074 return status;
4075 }
4076
4077 /* If 10G disabled for LPLU via NVM D10GMP, then return no valid LCD */
4078 if ((hw->bus.lan_id && (word & NVM_INIT_CTRL_3_D10GMP_PORT1)) ||
4079 (word & NVM_INIT_CTRL_3_D10GMP_PORT0))
4080 return status;
4081
4082 /* Link partner not capable of lower speeds, return 10G */
4083 *lcd_speed = IXGBE_LINK_SPEED_10GB_FULL;
4084 return status;
4085 }
4086
4087 /**
4088 * ixgbe_setup_fc_X550em - Set up flow control
4089 * @hw: pointer to hardware structure
4090 *
4091 * Called at init time to set up flow control.
4092 **/
4093 s32 ixgbe_setup_fc_X550em(struct ixgbe_hw *hw)
4094 {
4095 s32 ret_val = IXGBE_SUCCESS;
4096 u32 pause, asm_dir, reg_val;
4097
4098 DEBUGFUNC("ixgbe_setup_fc_X550em");
4099
4100 /* Validate the requested mode */
4101 if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
4102 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
4103 "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
4104 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
4105 goto out;
4106 }
4107
4108 /* 10gig parts do not have a word in the EEPROM to determine the
4109 * default flow control setting, so we explicitly set it to full.
4110 */
4111 if (hw->fc.requested_mode == ixgbe_fc_default)
4112 hw->fc.requested_mode = ixgbe_fc_full;
4113
4114 /* Determine PAUSE and ASM_DIR bits. */
4115 switch (hw->fc.requested_mode) {
4116 case ixgbe_fc_none:
4117 pause = 0;
4118 asm_dir = 0;
4119 break;
4120 case ixgbe_fc_tx_pause:
4121 pause = 0;
4122 asm_dir = 1;
4123 break;
4124 case ixgbe_fc_rx_pause:
4125 /* Rx Flow control is enabled and Tx Flow control is
4126 * disabled by software override. Since there really
4127 * isn't a way to advertise that we are capable of RX
4128 * Pause ONLY, we will advertise that we support both
4129 * symmetric and asymmetric Rx PAUSE, as such we fall
4130 * through to the fc_full statement. Later, we will
4131 * disable the adapter's ability to send PAUSE frames.
4132 */
4133 case ixgbe_fc_full:
4134 pause = 1;
4135 asm_dir = 1;
4136 break;
4137 default:
4138 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
4139 "Flow control param set incorrectly\n");
4140 ret_val = IXGBE_ERR_CONFIG;
4141 goto out;
4142 }
4143
4144 switch (hw->device_id) {
4145 case IXGBE_DEV_ID_X550EM_X_KR:
4146 case IXGBE_DEV_ID_X550EM_A_KR:
4147 case IXGBE_DEV_ID_X550EM_A_KR_L:
4148 ret_val = hw->mac.ops.read_iosf_sb_reg(hw,
4149 IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
4150 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
4151 if (ret_val != IXGBE_SUCCESS)
4152 goto out;
4153 reg_val &= ~(IXGBE_KRM_AN_CNTL_1_SYM_PAUSE |
4154 IXGBE_KRM_AN_CNTL_1_ASM_PAUSE);
4155 if (pause)
4156 reg_val |= IXGBE_KRM_AN_CNTL_1_SYM_PAUSE;
4157 if (asm_dir)
4158 reg_val |= IXGBE_KRM_AN_CNTL_1_ASM_PAUSE;
4159 ret_val = hw->mac.ops.write_iosf_sb_reg(hw,
4160 IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
4161 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
4162
4163 /* This device does not fully support AN. */
4164 hw->fc.disable_fc_autoneg = TRUE;
4165 break;
4166 case IXGBE_DEV_ID_X550EM_X_XFI:
4167 hw->fc.disable_fc_autoneg = TRUE;
4168 break;
4169 default:
4170 break;
4171 }
4172
4173 out:
4174 return ret_val;
4175 }
4176
4177 /**
4178 * ixgbe_fc_autoneg_backplane_x550em_a - Enable flow control IEEE clause 37
4179 * @hw: pointer to hardware structure
4180 *
4181 * Enable flow control according to IEEE clause 37.
4182 **/
4183 void ixgbe_fc_autoneg_backplane_x550em_a(struct ixgbe_hw *hw)
4184 {
4185 u32 link_s1, lp_an_page_low, an_cntl_1;
4186 s32 status = IXGBE_ERR_FC_NOT_NEGOTIATED;
4187 ixgbe_link_speed speed;
4188 bool link_up;
4189
4190 /* AN should have completed when the cable was plugged in.
4191 * Look for reasons to bail out. Bail out if:
4192 * - FC autoneg is disabled, or if
4193 * - link is not up.
4194 */
4195 if (hw->fc.disable_fc_autoneg) {
4196 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
4197 "Flow control autoneg is disabled");
4198 goto out;
4199 }
4200
4201 hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
4202 if (!link_up) {
4203 ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "The link is down");
4204 goto out;
4205 }
4206
4207 /* Check at auto-negotiation has completed */
4208 status = hw->mac.ops.read_iosf_sb_reg(hw,
4209 IXGBE_KRM_LINK_S1(hw->bus.lan_id),
4210 IXGBE_SB_IOSF_TARGET_KR_PHY, &link_s1);
4211
4212 if (status != IXGBE_SUCCESS ||
4213 (link_s1 & IXGBE_KRM_LINK_S1_MAC_AN_COMPLETE) == 0) {
4214 DEBUGOUT("Auto-Negotiation did not complete\n");
4215 status = IXGBE_ERR_FC_NOT_NEGOTIATED;
4216 goto out;
4217 }
4218
4219 /* Read the 10g AN autoc and LP ability registers and resolve
4220 * local flow control settings accordingly
4221 */
4222 status = hw->mac.ops.read_iosf_sb_reg(hw,
4223 IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
4224 IXGBE_SB_IOSF_TARGET_KR_PHY, &an_cntl_1);
4225
4226 if (status != IXGBE_SUCCESS) {
4227 DEBUGOUT("Auto-Negotiation did not complete\n");
4228 goto out;
4229 }
4230
4231 status = hw->mac.ops.read_iosf_sb_reg(hw,
4232 IXGBE_KRM_LP_BASE_PAGE_HIGH(hw->bus.lan_id),
4233 IXGBE_SB_IOSF_TARGET_KR_PHY, &lp_an_page_low);
4234
4235 if (status != IXGBE_SUCCESS) {
4236 DEBUGOUT("Auto-Negotiation did not complete\n");
4237 goto out;
4238 }
4239
4240 status = ixgbe_negotiate_fc(hw, an_cntl_1, lp_an_page_low,
4241 IXGBE_KRM_AN_CNTL_1_SYM_PAUSE,
4242 IXGBE_KRM_AN_CNTL_1_ASM_PAUSE,
4243 IXGBE_KRM_LP_BASE_PAGE_HIGH_SYM_PAUSE,
4244 IXGBE_KRM_LP_BASE_PAGE_HIGH_ASM_PAUSE);
4245
4246 out:
4247 if (status == IXGBE_SUCCESS) {
4248 hw->fc.fc_was_autonegged = TRUE;
4249 } else {
4250 hw->fc.fc_was_autonegged = FALSE;
4251 hw->fc.current_mode = hw->fc.requested_mode;
4252 }
4253 }
4254
4255 /**
4256 * ixgbe_fc_autoneg_fiber_x550em_a - passthrough FC settings
4257 * @hw: pointer to hardware structure
4258 *
4259 **/
4260 void ixgbe_fc_autoneg_fiber_x550em_a(struct ixgbe_hw *hw)
4261 {
4262 hw->fc.fc_was_autonegged = FALSE;
4263 hw->fc.current_mode = hw->fc.requested_mode;
4264 }
4265
4266 /**
4267 * ixgbe_fc_autoneg_sgmii_x550em_a - Enable flow control IEEE clause 37
4268 * @hw: pointer to hardware structure
4269 *
4270 * Enable flow control according to IEEE clause 37.
4271 **/
4272 void ixgbe_fc_autoneg_sgmii_x550em_a(struct ixgbe_hw *hw)
4273 {
4274 s32 status = IXGBE_ERR_FC_NOT_NEGOTIATED;
4275 u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 };
4276 ixgbe_link_speed speed;
4277 bool link_up;
4278
4279 /* AN should have completed when the cable was plugged in.
4280 * Look for reasons to bail out. Bail out if:
4281 * - FC autoneg is disabled, or if
4282 * - link is not up.
4283 */
4284 if (hw->fc.disable_fc_autoneg) {
4285 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
4286 "Flow control autoneg is disabled");
4287 goto out;
4288 }
4289
4290 hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
4291 if (!link_up) {
4292 ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "The link is down");
4293 goto out;
4294 }
4295
4296 /* Check if auto-negotiation has completed */
4297 status = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_LINK_INFO, &info);
4298 if (status != IXGBE_SUCCESS ||
4299 !(info[0] & FW_PHY_ACT_GET_LINK_INFO_AN_COMPLETE)) {
4300 DEBUGOUT("Auto-Negotiation did not complete\n");
4301 status = IXGBE_ERR_FC_NOT_NEGOTIATED;
4302 goto out;
4303 }
4304
4305 /* Negotiate the flow control */
4306 status = ixgbe_negotiate_fc(hw, info[0], info[0],
4307 FW_PHY_ACT_GET_LINK_INFO_FC_RX,
4308 FW_PHY_ACT_GET_LINK_INFO_FC_TX,
4309 FW_PHY_ACT_GET_LINK_INFO_LP_FC_RX,
4310 FW_PHY_ACT_GET_LINK_INFO_LP_FC_TX);
4311
4312 out:
4313 if (status == IXGBE_SUCCESS) {
4314 hw->fc.fc_was_autonegged = TRUE;
4315 } else {
4316 hw->fc.fc_was_autonegged = FALSE;
4317 hw->fc.current_mode = hw->fc.requested_mode;
4318 }
4319 }
4320
4321 /**
4322 * ixgbe_setup_fc_backplane_x550em_a - Set up flow control
4323 * @hw: pointer to hardware structure
4324 *
4325 * Called at init time to set up flow control.
4326 **/
4327 s32 ixgbe_setup_fc_backplane_x550em_a(struct ixgbe_hw *hw)
4328 {
4329 s32 status = IXGBE_SUCCESS;
4330 u32 an_cntl = 0;
4331
4332 DEBUGFUNC("ixgbe_setup_fc_backplane_x550em_a");
4333
4334 /* Validate the requested mode */
4335 if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
4336 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
4337 "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
4338 return IXGBE_ERR_INVALID_LINK_SETTINGS;
4339 }
4340
4341 if (hw->fc.requested_mode == ixgbe_fc_default)
4342 hw->fc.requested_mode = ixgbe_fc_full;
4343
4344 /* Set up the 1G and 10G flow control advertisement registers so the
4345 * HW will be able to do FC autoneg once the cable is plugged in. If
4346 * we link at 10G, the 1G advertisement is harmless and vice versa.
4347 */
4348 status = hw->mac.ops.read_iosf_sb_reg(hw,
4349 IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
4350 IXGBE_SB_IOSF_TARGET_KR_PHY, &an_cntl);
4351
4352 if (status != IXGBE_SUCCESS) {
4353 DEBUGOUT("Auto-Negotiation did not complete\n");
4354 return status;
4355 }
4356
4357 /* The possible values of fc.requested_mode are:
4358 * 0: Flow control is completely disabled
4359 * 1: Rx flow control is enabled (we can receive pause frames,
4360 * but not send pause frames).
4361 * 2: Tx flow control is enabled (we can send pause frames but
4362 * we do not support receiving pause frames).
4363 * 3: Both Rx and Tx flow control (symmetric) are enabled.
4364 * other: Invalid.
4365 */
4366 switch (hw->fc.requested_mode) {
4367 case ixgbe_fc_none:
4368 /* Flow control completely disabled by software override. */
4369 an_cntl &= ~(IXGBE_KRM_AN_CNTL_1_SYM_PAUSE |
4370 IXGBE_KRM_AN_CNTL_1_ASM_PAUSE);
4371 break;
4372 case ixgbe_fc_tx_pause:
4373 /* Tx Flow control is enabled, and Rx Flow control is
4374 * disabled by software override.
4375 */
4376 an_cntl |= IXGBE_KRM_AN_CNTL_1_ASM_PAUSE;
4377 an_cntl &= ~IXGBE_KRM_AN_CNTL_1_SYM_PAUSE;
4378 break;
4379 case ixgbe_fc_rx_pause:
4380 /* Rx Flow control is enabled and Tx Flow control is
4381 * disabled by software override. Since there really
4382 * isn't a way to advertise that we are capable of RX
4383 * Pause ONLY, we will advertise that we support both
4384 * symmetric and asymmetric Rx PAUSE, as such we fall
4385 * through to the fc_full statement. Later, we will
4386 * disable the adapter's ability to send PAUSE frames.
4387 */
4388 case ixgbe_fc_full:
4389 /* Flow control (both Rx and Tx) is enabled by SW override. */
4390 an_cntl |= IXGBE_KRM_AN_CNTL_1_SYM_PAUSE |
4391 IXGBE_KRM_AN_CNTL_1_ASM_PAUSE;
4392 break;
4393 default:
4394 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
4395 "Flow control param set incorrectly\n");
4396 return IXGBE_ERR_CONFIG;
4397 }
4398
4399 status = hw->mac.ops.write_iosf_sb_reg(hw,
4400 IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
4401 IXGBE_SB_IOSF_TARGET_KR_PHY, an_cntl);
4402
4403 /* Restart auto-negotiation. */
4404 status = ixgbe_restart_an_internal_phy_x550em(hw);
4405
4406 return status;
4407 }
4408
4409 /**
4410 * ixgbe_set_mux - Set mux for port 1 access with CS4227
4411 * @hw: pointer to hardware structure
4412 * @state: set mux if 1, clear if 0
4413 */
4414 static void ixgbe_set_mux(struct ixgbe_hw *hw, u8 state)
4415 {
4416 u32 esdp;
4417
4418 if (!hw->bus.lan_id)
4419 return;
4420 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
4421 if (state)
4422 esdp |= IXGBE_ESDP_SDP1;
4423 else
4424 esdp &= ~IXGBE_ESDP_SDP1;
4425 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
4426 IXGBE_WRITE_FLUSH(hw);
4427 }
4428
4429 /**
4430 * ixgbe_acquire_swfw_sync_X550em - Acquire SWFW semaphore
4431 * @hw: pointer to hardware structure
4432 * @mask: Mask to specify which semaphore to acquire
4433 *
4434 * Acquires the SWFW semaphore and sets the I2C MUX
4435 **/
4436 s32 ixgbe_acquire_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask)
4437 {
4438 s32 status;
4439
4440 DEBUGFUNC("ixgbe_acquire_swfw_sync_X550em");
4441
4442 status = ixgbe_acquire_swfw_sync_X540(hw, mask);
4443 if (status)
4444 return status;
4445
4446 if (mask & IXGBE_GSSR_I2C_MASK)
4447 ixgbe_set_mux(hw, 1);
4448
4449 return IXGBE_SUCCESS;
4450 }
4451
4452 /**
4453 * ixgbe_release_swfw_sync_X550em - Release SWFW semaphore
4454 * @hw: pointer to hardware structure
4455 * @mask: Mask to specify which semaphore to release
4456 *
4457 * Releases the SWFW semaphore and sets the I2C MUX
4458 **/
4459 void ixgbe_release_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask)
4460 {
4461 DEBUGFUNC("ixgbe_release_swfw_sync_X550em");
4462
4463 if (mask & IXGBE_GSSR_I2C_MASK)
4464 ixgbe_set_mux(hw, 0);
4465
4466 ixgbe_release_swfw_sync_X540(hw, mask);
4467 }
4468
4469 /**
4470 * ixgbe_acquire_swfw_sync_X550a - Acquire SWFW semaphore
4471 * @hw: pointer to hardware structure
4472 * @mask: Mask to specify which semaphore to acquire
4473 *
4474 * Acquires the SWFW semaphore and get the shared phy token as needed
4475 */
4476 static s32 ixgbe_acquire_swfw_sync_X550a(struct ixgbe_hw *hw, u32 mask)
4477 {
4478 u32 hmask = mask & ~IXGBE_GSSR_TOKEN_SM;
4479 int retries = FW_PHY_TOKEN_RETRIES;
4480 s32 status = IXGBE_SUCCESS;
4481
4482 DEBUGFUNC("ixgbe_acquire_swfw_sync_X550a");
4483
4484 while (--retries) {
4485 status = IXGBE_SUCCESS;
4486 if (hmask)
4487 status = ixgbe_acquire_swfw_sync_X540(hw, hmask);
4488 if (status) {
4489 DEBUGOUT1("Could not acquire SWFW semaphore, Status = %d\n",
4490 status);
4491 return status;
4492 }
4493 if (!(mask & IXGBE_GSSR_TOKEN_SM))
4494 return IXGBE_SUCCESS;
4495
4496 status = ixgbe_get_phy_token(hw);
4497 if (status == IXGBE_ERR_TOKEN_RETRY)
4498 DEBUGOUT1("Could not acquire PHY token, Status = %d\n",
4499 status);
4500
4501 if (status == IXGBE_SUCCESS)
4502 return IXGBE_SUCCESS;
4503
4504 if (hmask)
4505 ixgbe_release_swfw_sync_X540(hw, hmask);
4506
4507 if (status != IXGBE_ERR_TOKEN_RETRY) {
4508 DEBUGOUT1("Unable to retry acquiring the PHY token, Status = %d\n",
4509 status);
4510 return status;
4511 }
4512 }
4513
4514 DEBUGOUT1("Semaphore acquisition retries failed!: PHY ID = 0x%08X\n",
4515 hw->phy.id);
4516 return status;
4517 }
4518
4519 /**
4520 * ixgbe_release_swfw_sync_X550a - Release SWFW semaphore
4521 * @hw: pointer to hardware structure
4522 * @mask: Mask to specify which semaphore to release
4523 *
4524 * Releases the SWFW semaphore and puts the shared phy token as needed
4525 */
4526 static void ixgbe_release_swfw_sync_X550a(struct ixgbe_hw *hw, u32 mask)
4527 {
4528 u32 hmask = mask & ~IXGBE_GSSR_TOKEN_SM;
4529
4530 DEBUGFUNC("ixgbe_release_swfw_sync_X550a");
4531
4532 if (mask & IXGBE_GSSR_TOKEN_SM)
4533 ixgbe_put_phy_token(hw);
4534
4535 if (hmask)
4536 ixgbe_release_swfw_sync_X540(hw, hmask);
4537 }
4538
4539 /**
4540 * ixgbe_read_phy_reg_x550a - Reads specified PHY register
4541 * @hw: pointer to hardware structure
4542 * @reg_addr: 32 bit address of PHY register to read
4543 * @device_type: 5 bit device type
4544 * @phy_data: Pointer to read data from PHY register
4545 *
4546 * Reads a value from a specified PHY register using the SWFW lock and PHY
4547 * Token. The PHY Token is needed since the MDIO is shared between to MAC
4548 * instances.
4549 **/
4550 s32 ixgbe_read_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
4551 u32 device_type, u16 *phy_data)
4552 {
4553 s32 status;
4554 u32 mask = hw->phy.phy_semaphore_mask | IXGBE_GSSR_TOKEN_SM;
4555
4556 DEBUGFUNC("ixgbe_read_phy_reg_x550a");
4557
4558 if (hw->mac.ops.acquire_swfw_sync(hw, mask))
4559 return IXGBE_ERR_SWFW_SYNC;
4560
4561 status = hw->phy.ops.read_reg_mdi(hw, reg_addr, device_type, phy_data);
4562
4563 hw->mac.ops.release_swfw_sync(hw, mask);
4564
4565 return status;
4566 }
4567
4568 /**
4569 * ixgbe_write_phy_reg_x550a - Writes specified PHY register
4570 * @hw: pointer to hardware structure
4571 * @reg_addr: 32 bit PHY register to write
4572 * @device_type: 5 bit device type
4573 * @phy_data: Data to write to the PHY register
4574 *
4575 * Writes a value to specified PHY register using the SWFW lock and PHY Token.
4576 * The PHY Token is needed since the MDIO is shared between to MAC instances.
4577 **/
4578 s32 ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
4579 u32 device_type, u16 phy_data)
4580 {
4581 s32 status;
4582 u32 mask = hw->phy.phy_semaphore_mask | IXGBE_GSSR_TOKEN_SM;
4583
4584 DEBUGFUNC("ixgbe_write_phy_reg_x550a");
4585
4586 if (hw->mac.ops.acquire_swfw_sync(hw, mask) == IXGBE_SUCCESS) {
4587 status = hw->phy.ops.write_reg_mdi(hw, reg_addr, device_type,
4588 phy_data);
4589 hw->mac.ops.release_swfw_sync(hw, mask);
4590 } else {
4591 status = IXGBE_ERR_SWFW_SYNC;
4592 }
4593
4594 return status;
4595 }
4596
4597 /**
4598 * ixgbe_handle_lasi_ext_t_x550em - Handle external Base T PHY interrupt
4599 * @hw: pointer to hardware structure
4600 *
4601 * Handle external Base T PHY interrupt. If high temperature
4602 * failure alarm then return error, else if link status change
4603 * then setup internal/external PHY link
4604 *
4605 * Return IXGBE_ERR_OVERTEMP if interrupt is high temperature
4606 * failure alarm, else return PHY access status.
4607 */
4608 s32 ixgbe_handle_lasi_ext_t_x550em(struct ixgbe_hw *hw)
4609 {
4610 bool lsc;
4611 u32 status;
4612
4613 status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc);
4614
4615 if (status != IXGBE_SUCCESS)
4616 return status;
4617
4618 if (lsc)
4619 return ixgbe_setup_internal_phy(hw);
4620
4621 return IXGBE_SUCCESS;
4622 }
4623
4624 /**
4625 * ixgbe_setup_mac_link_t_X550em - Sets the auto advertised link speed
4626 * @hw: pointer to hardware structure
4627 * @speed: new link speed
4628 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed
4629 *
4630 * Setup internal/external PHY link speed based on link speed, then set
4631 * external PHY auto advertised link speed.
4632 *
4633 * Returns error status for any failure
4634 **/
4635 s32 ixgbe_setup_mac_link_t_X550em(struct ixgbe_hw *hw,
4636 ixgbe_link_speed speed,
4637 bool autoneg_wait_to_complete)
4638 {
4639 s32 status;
4640 ixgbe_link_speed force_speed;
4641
4642 DEBUGFUNC("ixgbe_setup_mac_link_t_X550em");
4643
4644 /* Setup internal/external PHY link speed to iXFI (10G), unless
4645 * only 1G is auto advertised then setup KX link.
4646 */
4647 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
4648 force_speed = IXGBE_LINK_SPEED_10GB_FULL;
4649 else
4650 force_speed = IXGBE_LINK_SPEED_1GB_FULL;
4651
4652 /* If X552 and internal link mode is XFI, then setup XFI internal link.
4653 */
4654 if (hw->mac.type == ixgbe_mac_X550EM_x &&
4655 !(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) {
4656 status = ixgbe_setup_ixfi_x550em(hw, &force_speed);
4657
4658 if (status != IXGBE_SUCCESS)
4659 return status;
4660 }
4661
4662 return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait_to_complete);
4663 }
4664
4665 /**
4666 * ixgbe_check_link_t_X550em - Determine link and speed status
4667 * @hw: pointer to hardware structure
4668 * @speed: pointer to link speed
4669 * @link_up: TRUE when link is up
4670 * @link_up_wait_to_complete: bool used to wait for link up or not
4671 *
4672 * Check that both the MAC and X557 external PHY have link.
4673 **/
4674 s32 ixgbe_check_link_t_X550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
4675 bool *link_up, bool link_up_wait_to_complete)
4676 {
4677 u32 status;
4678 u16 i, autoneg_status = 0;
4679
4680 if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper)
4681 return IXGBE_ERR_CONFIG;
4682
4683 status = ixgbe_check_mac_link_generic(hw, speed, link_up,
4684 link_up_wait_to_complete);
4685
4686 /* If check link fails or MAC link is not up, then return */
4687 if (status != IXGBE_SUCCESS || !(*link_up))
4688 return status;
4689
4690 /* MAC link is up, so check external PHY link.
4691 * X557 PHY. Link status is latching low, and can only be used to detect
4692 * link drop, and not the current status of the link without performing
4693 * back-to-back reads.
4694 */
4695 for (i = 0; i < 2; i++) {
4696 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
4697 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
4698 &autoneg_status);
4699
4700 if (status != IXGBE_SUCCESS)
4701 return status;
4702 }
4703
4704 /* If external PHY link is not up, then indicate link not up */
4705 if (!(autoneg_status & IXGBE_MDIO_AUTO_NEG_LINK_STATUS))
4706 *link_up = FALSE;
4707
4708 return IXGBE_SUCCESS;
4709 }
4710
4711 /**
4712 * ixgbe_reset_phy_t_X550em - Performs X557 PHY reset and enables LASI
4713 * @hw: pointer to hardware structure
4714 **/
4715 s32 ixgbe_reset_phy_t_X550em(struct ixgbe_hw *hw)
4716 {
4717 s32 status;
4718
4719 status = ixgbe_reset_phy_generic(hw);
4720
4721 if (status != IXGBE_SUCCESS)
4722 return status;
4723
4724 /* Configure Link Status Alarm and Temperature Threshold interrupts */
4725 return ixgbe_enable_lasi_ext_t_x550em(hw);
4726 }
4727
4728 /**
4729 * ixgbe_led_on_t_X550em - Turns on the software controllable LEDs.
4730 * @hw: pointer to hardware structure
4731 * @led_idx: led number to turn on
4732 **/
4733 s32 ixgbe_led_on_t_X550em(struct ixgbe_hw *hw, u32 led_idx)
4734 {
4735 u16 phy_data;
4736
4737 DEBUGFUNC("ixgbe_led_on_t_X550em");
4738
4739 if (led_idx >= IXGBE_X557_MAX_LED_INDEX)
4740 return IXGBE_ERR_PARAM;
4741
4742 /* To turn on the LED, set mode to ON. */
4743 ixgbe_read_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
4744 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &phy_data);
4745 phy_data |= IXGBE_X557_LED_MANUAL_SET_MASK;
4746 ixgbe_write_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
4747 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, phy_data);
4748
4749 /* Some designs have the LEDs wired to the MAC */
4750 return ixgbe_led_on_generic(hw, led_idx);
4751 }
4752
4753 /**
4754 * ixgbe_led_off_t_X550em - Turns off the software controllable LEDs.
4755 * @hw: pointer to hardware structure
4756 * @led_idx: led number to turn off
4757 **/
4758 s32 ixgbe_led_off_t_X550em(struct ixgbe_hw *hw, u32 led_idx)
4759 {
4760 u16 phy_data;
4761
4762 DEBUGFUNC("ixgbe_led_off_t_X550em");
4763
4764 if (led_idx >= IXGBE_X557_MAX_LED_INDEX)
4765 return IXGBE_ERR_PARAM;
4766
4767 /* To turn on the LED, set mode to ON. */
4768 ixgbe_read_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
4769 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &phy_data);
4770 phy_data &= ~IXGBE_X557_LED_MANUAL_SET_MASK;
4771 ixgbe_write_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
4772 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, phy_data);
4773
4774 /* Some designs have the LEDs wired to the MAC */
4775 return ixgbe_led_off_generic(hw, led_idx);
4776 }
4777
4778 /**
4779 * ixgbe_set_fw_drv_ver_x550 - Sends driver version to firmware
4780 * @hw: pointer to the HW structure
4781 * @maj: driver version major number
4782 * @min: driver version minor number
4783 * @build: driver version build number
4784 * @sub: driver version sub build number
4785 * @len: length of driver_ver string
4786 * @driver_ver: driver string
4787 *
4788 * Sends driver version number to firmware through the manageability
4789 * block. On success return IXGBE_SUCCESS
4790 * else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring
4791 * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
4792 **/
4793 s32 ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min,
4794 u8 build, u8 sub, u16 len, const char *driver_ver)
4795 {
4796 struct ixgbe_hic_drv_info2 fw_cmd;
4797 s32 ret_val = IXGBE_SUCCESS;
4798 int i;
4799
4800 DEBUGFUNC("ixgbe_set_fw_drv_ver_x550");
4801
4802 if ((len == 0) || (driver_ver == NULL) ||
4803 (len > sizeof(fw_cmd.driver_string)))
4804 return IXGBE_ERR_INVALID_ARGUMENT;
4805
4806 fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO;
4807 fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN + len;
4808 fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
4809 fw_cmd.port_num = (u8)hw->bus.func;
4810 fw_cmd.ver_maj = maj;
4811 fw_cmd.ver_min = min;
4812 fw_cmd.ver_build = build;
4813 fw_cmd.ver_sub = sub;
4814 fw_cmd.hdr.checksum = 0;
4815 memcpy(fw_cmd.driver_string, driver_ver, len);
4816 fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd,
4817 (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len));
4818
4819 for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
4820 ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
4821 sizeof(fw_cmd),
4822 IXGBE_HI_COMMAND_TIMEOUT,
4823 TRUE);
4824 if (ret_val != IXGBE_SUCCESS)
4825 continue;
4826
4827 if (fw_cmd.hdr.cmd_or_resp.ret_status ==
4828 FW_CEM_RESP_STATUS_SUCCESS)
4829 ret_val = IXGBE_SUCCESS;
4830 else
4831 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
4832
4833 break;
4834 }
4835
4836 return ret_val;
4837 }
4838
4839 /**
4840 * ixgbe_fw_recovery_mode_X550 - Check FW NVM recovery mode
4841 * @hw: pointer t hardware structure
4842 *
4843 * Returns TRUE if in FW NVM recovery mode.
4844 **/
4845 bool ixgbe_fw_recovery_mode_X550(struct ixgbe_hw *hw)
4846 {
4847 u32 fwsm;
4848
4849 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw));
4850
4851 return !!(fwsm & IXGBE_FWSM_FW_NVM_RECOVERY_MODE);
4852 }
4853