ixgbe_x550.c revision 1.12.2.2 1 /******************************************************************************
2
3 Copyright (c) 2001-2017, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33 /*$FreeBSD: head/sys/dev/ixgbe/ixgbe_x550.c 331224 2018-03-19 20:55:05Z erj $*/
34
35 #include "ixgbe_x550.h"
36 #include "ixgbe_x540.h"
37 #include "ixgbe_type.h"
38 #include "ixgbe_api.h"
39 #include "ixgbe_common.h"
40 #include "ixgbe_phy.h"
41 #include <dev/mii/mii.h>
42
43 static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed);
44 static s32 ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw,
45 ixgbe_link_speed speed,
46 bool autoneg_wait_to_complete);
47 static s32 ixgbe_acquire_swfw_sync_X550a(struct ixgbe_hw *, u32 mask);
48 static void ixgbe_release_swfw_sync_X550a(struct ixgbe_hw *, u32 mask);
49 static s32 ixgbe_read_mng_if_sel_x550em(struct ixgbe_hw *hw);
50
51 /**
52 * ixgbe_init_ops_X550 - Inits func ptrs and MAC type
53 * @hw: pointer to hardware structure
54 *
55 * Initialize the function pointers and assign the MAC type for X550.
56 * Does not touch the hardware.
57 **/
58 s32 ixgbe_init_ops_X550(struct ixgbe_hw *hw)
59 {
60 struct ixgbe_mac_info *mac = &hw->mac;
61 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
62 s32 ret_val;
63
64 DEBUGFUNC("ixgbe_init_ops_X550");
65
66 ret_val = ixgbe_init_ops_X540(hw);
67 mac->ops.dmac_config = ixgbe_dmac_config_X550;
68 mac->ops.dmac_config_tcs = ixgbe_dmac_config_tcs_X550;
69 mac->ops.dmac_update_tcs = ixgbe_dmac_update_tcs_X550;
70 mac->ops.setup_eee = NULL;
71 mac->ops.set_source_address_pruning =
72 ixgbe_set_source_address_pruning_X550;
73 mac->ops.set_ethertype_anti_spoofing =
74 ixgbe_set_ethertype_anti_spoofing_X550;
75
76 mac->ops.get_rtrup2tc = ixgbe_dcb_get_rtrup2tc_generic;
77 eeprom->ops.init_params = ixgbe_init_eeprom_params_X550;
78 eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_X550;
79 eeprom->ops.read = ixgbe_read_ee_hostif_X550;
80 eeprom->ops.read_buffer = ixgbe_read_ee_hostif_buffer_X550;
81 eeprom->ops.write = ixgbe_write_ee_hostif_X550;
82 eeprom->ops.write_buffer = ixgbe_write_ee_hostif_buffer_X550;
83 eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_X550;
84 eeprom->ops.validate_checksum = ixgbe_validate_eeprom_checksum_X550;
85
86 mac->ops.disable_mdd = ixgbe_disable_mdd_X550;
87 mac->ops.enable_mdd = ixgbe_enable_mdd_X550;
88 mac->ops.mdd_event = ixgbe_mdd_event_X550;
89 mac->ops.restore_mdd_vf = ixgbe_restore_mdd_vf_X550;
90 mac->ops.fw_recovery_mode = ixgbe_fw_recovery_mode_X550;
91 mac->ops.disable_rx = ixgbe_disable_rx_x550;
92 /* Manageability interface */
93 mac->ops.set_fw_drv_ver = ixgbe_set_fw_drv_ver_x550;
94 switch (hw->device_id) {
95 case IXGBE_DEV_ID_X550EM_X_1G_T:
96 hw->mac.ops.led_on = NULL;
97 hw->mac.ops.led_off = NULL;
98 break;
99 case IXGBE_DEV_ID_X550EM_X_10G_T:
100 case IXGBE_DEV_ID_X550EM_A_10G_T:
101 hw->mac.ops.led_on = ixgbe_led_on_t_X550em;
102 hw->mac.ops.led_off = ixgbe_led_off_t_X550em;
103 break;
104 default:
105 break;
106 }
107 return ret_val;
108 }
109
110 /**
111 * ixgbe_read_cs4227 - Read CS4227 register
112 * @hw: pointer to hardware structure
113 * @reg: register number to write
114 * @value: pointer to receive value read
115 *
116 * Returns status code
117 **/
118 static s32 ixgbe_read_cs4227(struct ixgbe_hw *hw, u16 reg, u16 *value)
119 {
120 return hw->link.ops.read_link_unlocked(hw, hw->link.addr, reg, value);
121 }
122
123 /**
124 * ixgbe_write_cs4227 - Write CS4227 register
125 * @hw: pointer to hardware structure
126 * @reg: register number to write
127 * @value: value to write to register
128 *
129 * Returns status code
130 **/
131 static s32 ixgbe_write_cs4227(struct ixgbe_hw *hw, u16 reg, u16 value)
132 {
133 return hw->link.ops.write_link_unlocked(hw, hw->link.addr, reg, value);
134 }
135
136 /**
137 * ixgbe_read_pe - Read register from port expander
138 * @hw: pointer to hardware structure
139 * @reg: register number to read
140 * @value: pointer to receive read value
141 *
142 * Returns status code
143 **/
144 static s32 ixgbe_read_pe(struct ixgbe_hw *hw, u8 reg, u8 *value)
145 {
146 s32 status;
147
148 status = ixgbe_read_i2c_byte_unlocked(hw, reg, IXGBE_PE, value);
149 if (status != IXGBE_SUCCESS)
150 ERROR_REPORT2(IXGBE_ERROR_CAUTION,
151 "port expander access failed with %d\n", status);
152 return status;
153 }
154
155 /**
156 * ixgbe_write_pe - Write register to port expander
157 * @hw: pointer to hardware structure
158 * @reg: register number to write
159 * @value: value to write
160 *
161 * Returns status code
162 **/
163 static s32 ixgbe_write_pe(struct ixgbe_hw *hw, u8 reg, u8 value)
164 {
165 s32 status;
166
167 status = ixgbe_write_i2c_byte_unlocked(hw, reg, IXGBE_PE, value);
168 if (status != IXGBE_SUCCESS)
169 ERROR_REPORT2(IXGBE_ERROR_CAUTION,
170 "port expander access failed with %d\n", status);
171 return status;
172 }
173
174 /**
175 * ixgbe_reset_cs4227 - Reset CS4227 using port expander
176 * @hw: pointer to hardware structure
177 *
178 * This function assumes that the caller has acquired the proper semaphore.
179 * Returns error code
180 **/
181 static s32 ixgbe_reset_cs4227(struct ixgbe_hw *hw)
182 {
183 s32 status;
184 u32 retry;
185 u16 value;
186 u8 reg;
187
188 /* Trigger hard reset. */
189 status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, ®);
190 if (status != IXGBE_SUCCESS)
191 return status;
192 reg |= IXGBE_PE_BIT1;
193 status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg);
194 if (status != IXGBE_SUCCESS)
195 return status;
196
197 status = ixgbe_read_pe(hw, IXGBE_PE_CONFIG, ®);
198 if (status != IXGBE_SUCCESS)
199 return status;
200 reg &= ~IXGBE_PE_BIT1;
201 status = ixgbe_write_pe(hw, IXGBE_PE_CONFIG, reg);
202 if (status != IXGBE_SUCCESS)
203 return status;
204
205 status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, ®);
206 if (status != IXGBE_SUCCESS)
207 return status;
208 reg &= ~IXGBE_PE_BIT1;
209 status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg);
210 if (status != IXGBE_SUCCESS)
211 return status;
212
213 usec_delay(IXGBE_CS4227_RESET_HOLD);
214
215 status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, ®);
216 if (status != IXGBE_SUCCESS)
217 return status;
218 reg |= IXGBE_PE_BIT1;
219 status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg);
220 if (status != IXGBE_SUCCESS)
221 return status;
222
223 /* Wait for the reset to complete. */
224 msec_delay(IXGBE_CS4227_RESET_DELAY);
225 for (retry = 0; retry < IXGBE_CS4227_RETRIES; retry++) {
226 status = ixgbe_read_cs4227(hw, IXGBE_CS4227_EFUSE_STATUS,
227 &value);
228 if (status == IXGBE_SUCCESS &&
229 value == IXGBE_CS4227_EEPROM_LOAD_OK)
230 break;
231 msec_delay(IXGBE_CS4227_CHECK_DELAY);
232 }
233 if (retry == IXGBE_CS4227_RETRIES) {
234 ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE,
235 "CS4227 reset did not complete.");
236 return IXGBE_ERR_PHY;
237 }
238
239 status = ixgbe_read_cs4227(hw, IXGBE_CS4227_EEPROM_STATUS, &value);
240 if (status != IXGBE_SUCCESS ||
241 !(value & IXGBE_CS4227_EEPROM_LOAD_OK)) {
242 ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE,
243 "CS4227 EEPROM did not load successfully.");
244 return IXGBE_ERR_PHY;
245 }
246
247 return IXGBE_SUCCESS;
248 }
249
250 /**
251 * ixgbe_check_cs4227 - Check CS4227 and reset as needed
252 * @hw: pointer to hardware structure
253 **/
254 static void ixgbe_check_cs4227(struct ixgbe_hw *hw)
255 {
256 s32 status = IXGBE_SUCCESS;
257 u32 swfw_mask = hw->phy.phy_semaphore_mask;
258 u16 value = 0;
259 u8 retry;
260
261 for (retry = 0; retry < IXGBE_CS4227_RETRIES; retry++) {
262 status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
263 if (status != IXGBE_SUCCESS) {
264 ERROR_REPORT2(IXGBE_ERROR_CAUTION,
265 "semaphore failed with %d", status);
266 msec_delay(IXGBE_CS4227_CHECK_DELAY);
267 continue;
268 }
269
270 /* Get status of reset flow. */
271 status = ixgbe_read_cs4227(hw, IXGBE_CS4227_SCRATCH, &value);
272
273 if (status == IXGBE_SUCCESS &&
274 value == IXGBE_CS4227_RESET_COMPLETE)
275 goto out;
276
277 if (status != IXGBE_SUCCESS ||
278 value != IXGBE_CS4227_RESET_PENDING)
279 break;
280
281 /* Reset is pending. Wait and check again. */
282 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
283 msec_delay(IXGBE_CS4227_CHECK_DELAY);
284 }
285
286 /* If still pending, assume other instance failed. */
287 if (retry == IXGBE_CS4227_RETRIES) {
288 status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
289 if (status != IXGBE_SUCCESS) {
290 ERROR_REPORT2(IXGBE_ERROR_CAUTION,
291 "semaphore failed with %d", status);
292 return;
293 }
294 }
295
296 /* Reset the CS4227. */
297 status = ixgbe_reset_cs4227(hw);
298 if (status != IXGBE_SUCCESS) {
299 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
300 "CS4227 reset failed: %d", status);
301 goto out;
302 }
303
304 /* Reset takes so long, temporarily release semaphore in case the
305 * other driver instance is waiting for the reset indication.
306 */
307 ixgbe_write_cs4227(hw, IXGBE_CS4227_SCRATCH,
308 IXGBE_CS4227_RESET_PENDING);
309 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
310 msec_delay(10);
311 status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
312 if (status != IXGBE_SUCCESS) {
313 ERROR_REPORT2(IXGBE_ERROR_CAUTION,
314 "semaphore failed with %d", status);
315 return;
316 }
317
318 /* Record completion for next time. */
319 status = ixgbe_write_cs4227(hw, IXGBE_CS4227_SCRATCH,
320 IXGBE_CS4227_RESET_COMPLETE);
321
322 out:
323 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
324 msec_delay(hw->eeprom.semaphore_delay);
325 }
326
327 /**
328 * ixgbe_setup_mux_ctl - Setup ESDP register for I2C mux control
329 * @hw: pointer to hardware structure
330 **/
331 static void ixgbe_setup_mux_ctl(struct ixgbe_hw *hw)
332 {
333 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
334
335 if (hw->bus.lan_id) {
336 esdp &= ~(IXGBE_ESDP_SDP1_NATIVE | IXGBE_ESDP_SDP1);
337 esdp |= IXGBE_ESDP_SDP1_DIR;
338 }
339 esdp &= ~(IXGBE_ESDP_SDP0_NATIVE | IXGBE_ESDP_SDP0_DIR);
340 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
341 IXGBE_WRITE_FLUSH(hw);
342 }
343
344 /**
345 * ixgbe_read_phy_reg_mdi_22 - Read from a clause 22 PHY register without lock
346 * @hw: pointer to hardware structure
347 * @reg_addr: 32 bit address of PHY register to read
348 * @dev_type: always unused
349 * @phy_data: Pointer to read data from PHY register
350 */
351 static s32 ixgbe_read_phy_reg_mdi_22(struct ixgbe_hw *hw, u32 reg_addr,
352 u32 dev_type, u16 *phy_data)
353 {
354 u32 i, data, command;
355 UNREFERENCED_1PARAMETER(dev_type);
356
357 /* Setup and write the read command */
358 command = (reg_addr << IXGBE_MSCA_DEV_TYPE_SHIFT) |
359 (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
360 IXGBE_MSCA_OLD_PROTOCOL | IXGBE_MSCA_READ_AUTOINC |
361 IXGBE_MSCA_MDI_COMMAND;
362
363 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
364
365 /* Check every 10 usec to see if the access completed.
366 * The MDI Command bit will clear when the operation is
367 * complete
368 */
369 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
370 usec_delay(10);
371
372 command = IXGBE_READ_REG(hw, IXGBE_MSCA);
373 if (!(command & IXGBE_MSCA_MDI_COMMAND))
374 break;
375 }
376
377 if (command & IXGBE_MSCA_MDI_COMMAND) {
378 ERROR_REPORT1(IXGBE_ERROR_POLLING,
379 "PHY read command did not complete.\n");
380 return IXGBE_ERR_PHY;
381 }
382
383 /* Read operation is complete. Get the data from MSRWD */
384 data = IXGBE_READ_REG(hw, IXGBE_MSRWD);
385 data >>= IXGBE_MSRWD_READ_DATA_SHIFT;
386 *phy_data = (u16)data;
387
388 return IXGBE_SUCCESS;
389 }
390
391 /**
392 * ixgbe_write_phy_reg_mdi_22 - Write to a clause 22 PHY register without lock
393 * @hw: pointer to hardware structure
394 * @reg_addr: 32 bit PHY register to write
395 * @dev_type: always unused
396 * @phy_data: Data to write to the PHY register
397 */
398 static s32 ixgbe_write_phy_reg_mdi_22(struct ixgbe_hw *hw, u32 reg_addr,
399 u32 dev_type, u16 phy_data)
400 {
401 u32 i, command;
402 UNREFERENCED_1PARAMETER(dev_type);
403
404 /* Put the data in the MDI single read and write data register*/
405 IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)phy_data);
406
407 /* Setup and write the write command */
408 command = (reg_addr << IXGBE_MSCA_DEV_TYPE_SHIFT) |
409 (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
410 IXGBE_MSCA_OLD_PROTOCOL | IXGBE_MSCA_WRITE |
411 IXGBE_MSCA_MDI_COMMAND;
412
413 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
414
415 /* Check every 10 usec to see if the access completed.
416 * The MDI Command bit will clear when the operation is
417 * complete
418 */
419 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
420 usec_delay(10);
421
422 command = IXGBE_READ_REG(hw, IXGBE_MSCA);
423 if (!(command & IXGBE_MSCA_MDI_COMMAND))
424 break;
425 }
426
427 if (command & IXGBE_MSCA_MDI_COMMAND) {
428 ERROR_REPORT1(IXGBE_ERROR_POLLING,
429 "PHY write cmd didn't complete\n");
430 return IXGBE_ERR_PHY;
431 }
432
433 return IXGBE_SUCCESS;
434 }
435
436 /**
437 * ixgbe_identify_phy_x550em - Get PHY type based on device id
438 * @hw: pointer to hardware structure
439 *
440 * Returns error code
441 */
442 static s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw)
443 {
444 hw->mac.ops.set_lan_id(hw);
445
446 ixgbe_read_mng_if_sel_x550em(hw);
447
448 switch (hw->device_id) {
449 case IXGBE_DEV_ID_X550EM_A_SFP:
450 return ixgbe_identify_sfp_module_X550em(hw);
451 case IXGBE_DEV_ID_X550EM_X_SFP:
452 /* set up for CS4227 usage */
453 ixgbe_setup_mux_ctl(hw);
454 ixgbe_check_cs4227(hw);
455 /* Fallthrough */
456
457 case IXGBE_DEV_ID_X550EM_A_SFP_N:
458 return ixgbe_identify_sfp_module_X550em(hw);
459 break;
460 case IXGBE_DEV_ID_X550EM_X_KX4:
461 hw->phy.type = ixgbe_phy_x550em_kx4;
462 break;
463 case IXGBE_DEV_ID_X550EM_X_XFI:
464 hw->phy.type = ixgbe_phy_x550em_xfi;
465 break;
466 case IXGBE_DEV_ID_X550EM_X_KR:
467 case IXGBE_DEV_ID_X550EM_A_KR:
468 case IXGBE_DEV_ID_X550EM_A_KR_L:
469 hw->phy.type = ixgbe_phy_x550em_kr;
470 break;
471 case IXGBE_DEV_ID_X550EM_A_10G_T:
472 case IXGBE_DEV_ID_X550EM_X_10G_T:
473 return ixgbe_identify_phy_generic(hw);
474 case IXGBE_DEV_ID_X550EM_X_1G_T:
475 hw->phy.type = ixgbe_phy_ext_1g_t;
476 break;
477 case IXGBE_DEV_ID_X550EM_A_1G_T:
478 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
479 hw->phy.type = ixgbe_phy_fw;
480 if (hw->bus.lan_id)
481 hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY1_SM;
482 else
483 hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY0_SM;
484 break;
485 default:
486 break;
487 }
488 return IXGBE_SUCCESS;
489 }
490
491 /**
492 * ixgbe_fw_phy_activity - Perform an activity on a PHY
493 * @hw: pointer to hardware structure
494 * @activity: activity to perform
495 * @data: Pointer to 4 32-bit words of data
496 */
497 s32 ixgbe_fw_phy_activity(struct ixgbe_hw *hw, u16 activity,
498 u32 (*data)[FW_PHY_ACT_DATA_COUNT])
499 {
500 union {
501 struct ixgbe_hic_phy_activity_req cmd;
502 struct ixgbe_hic_phy_activity_resp rsp;
503 } hic;
504 u16 retries = FW_PHY_ACT_RETRIES;
505 s32 rc;
506 u16 i;
507
508 do {
509 memset(&hic, 0, sizeof(hic));
510 hic.cmd.hdr.cmd = FW_PHY_ACT_REQ_CMD;
511 hic.cmd.hdr.buf_len = FW_PHY_ACT_REQ_LEN;
512 hic.cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
513 hic.cmd.port_number = hw->bus.lan_id;
514 hic.cmd.activity_id = IXGBE_CPU_TO_LE16(activity);
515 for (i = 0; i < FW_PHY_ACT_DATA_COUNT; ++i)
516 hic.cmd.data[i] = IXGBE_CPU_TO_BE32((*data)[i]);
517
518 rc = ixgbe_host_interface_command(hw, (u32 *)&hic.cmd,
519 sizeof(hic.cmd),
520 IXGBE_HI_COMMAND_TIMEOUT,
521 TRUE);
522 if (rc != IXGBE_SUCCESS)
523 return rc;
524 if (hic.rsp.hdr.cmd_or_resp.ret_status ==
525 FW_CEM_RESP_STATUS_SUCCESS) {
526 for (i = 0; i < FW_PHY_ACT_DATA_COUNT; ++i)
527 (*data)[i] = IXGBE_BE32_TO_CPU(hic.rsp.data[i]);
528 return IXGBE_SUCCESS;
529 }
530 usec_delay(20);
531 --retries;
532 } while (retries > 0);
533
534 return IXGBE_ERR_HOST_INTERFACE_COMMAND;
535 }
536
537 static const struct {
538 u16 fw_speed;
539 ixgbe_link_speed phy_speed;
540 } ixgbe_fw_map[] = {
541 { FW_PHY_ACT_LINK_SPEED_10, IXGBE_LINK_SPEED_10_FULL },
542 { FW_PHY_ACT_LINK_SPEED_100, IXGBE_LINK_SPEED_100_FULL },
543 { FW_PHY_ACT_LINK_SPEED_1G, IXGBE_LINK_SPEED_1GB_FULL },
544 { FW_PHY_ACT_LINK_SPEED_2_5G, IXGBE_LINK_SPEED_2_5GB_FULL },
545 { FW_PHY_ACT_LINK_SPEED_5G, IXGBE_LINK_SPEED_5GB_FULL },
546 { FW_PHY_ACT_LINK_SPEED_10G, IXGBE_LINK_SPEED_10GB_FULL },
547 };
548
549 /**
550 * ixgbe_get_phy_id_fw - Get the phy ID via firmware command
551 * @hw: pointer to hardware structure
552 *
553 * Returns error code
554 */
555 static s32 ixgbe_get_phy_id_fw(struct ixgbe_hw *hw)
556 {
557 u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 };
558 u16 phy_speeds;
559 u16 phy_id_lo;
560 s32 rc;
561 u16 i;
562
563 rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_PHY_INFO, &info);
564 if (rc)
565 return rc;
566
567 hw->phy.speeds_supported = 0;
568 phy_speeds = info[0] & FW_PHY_INFO_SPEED_MASK;
569 for (i = 0; i < sizeof(ixgbe_fw_map) / sizeof(ixgbe_fw_map[0]); ++i) {
570 if (phy_speeds & ixgbe_fw_map[i].fw_speed)
571 hw->phy.speeds_supported |= ixgbe_fw_map[i].phy_speed;
572 }
573
574 #if 0
575 /*
576 * Don't set autoneg_advertised here to not to be inconsistent with
577 * if_media value.
578 */
579 if (!hw->phy.autoneg_advertised)
580 hw->phy.autoneg_advertised = hw->phy.speeds_supported;
581 #endif
582
583 hw->phy.id = info[0] & FW_PHY_INFO_ID_HI_MASK;
584 phy_id_lo = info[1] & FW_PHY_INFO_ID_LO_MASK;
585 hw->phy.id |= phy_id_lo & IXGBE_PHY_REVISION_MASK;
586 hw->phy.revision = phy_id_lo & ~IXGBE_PHY_REVISION_MASK;
587 if (!hw->phy.id || hw->phy.id == IXGBE_PHY_REVISION_MASK)
588 return IXGBE_ERR_PHY_ADDR_INVALID;
589 return IXGBE_SUCCESS;
590 }
591
592 /**
593 * ixgbe_identify_phy_fw - Get PHY type based on firmware command
594 * @hw: pointer to hardware structure
595 *
596 * Returns error code
597 */
598 static s32 ixgbe_identify_phy_fw(struct ixgbe_hw *hw)
599 {
600 if (hw->bus.lan_id)
601 hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM;
602 else
603 hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM;
604
605 hw->phy.type = ixgbe_phy_fw;
606 hw->phy.ops.read_reg = NULL;
607 hw->phy.ops.write_reg = NULL;
608 return ixgbe_get_phy_id_fw(hw);
609 }
610
611 /**
612 * ixgbe_shutdown_fw_phy - Shutdown a firmware-controlled PHY
613 * @hw: pointer to hardware structure
614 *
615 * Returns error code
616 */
617 s32 ixgbe_shutdown_fw_phy(struct ixgbe_hw *hw)
618 {
619 u32 setup[FW_PHY_ACT_DATA_COUNT] = { 0 };
620
621 setup[0] = FW_PHY_ACT_FORCE_LINK_DOWN_OFF;
622 return ixgbe_fw_phy_activity(hw, FW_PHY_ACT_FORCE_LINK_DOWN, &setup);
623 }
624
625 static s32 ixgbe_read_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr,
626 u32 device_type, u16 *phy_data)
627 {
628 UNREFERENCED_4PARAMETER(*hw, reg_addr, device_type, *phy_data);
629 return IXGBE_NOT_IMPLEMENTED;
630 }
631
632 static s32 ixgbe_write_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr,
633 u32 device_type, u16 phy_data)
634 {
635 UNREFERENCED_4PARAMETER(*hw, reg_addr, device_type, phy_data);
636 return IXGBE_NOT_IMPLEMENTED;
637 }
638
639 /**
640 * ixgbe_read_i2c_combined_generic - Perform I2C read combined operation
641 * @hw: pointer to the hardware structure
642 * @addr: I2C bus address to read from
643 * @reg: I2C device register to read from
644 * @val: pointer to location to receive read value
645 *
646 * Returns an error code on error.
647 **/
648 static s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr,
649 u16 reg, u16 *val)
650 {
651 return ixgbe_read_i2c_combined_generic_int(hw, addr, reg, val, TRUE);
652 }
653
654 /**
655 * ixgbe_read_i2c_combined_generic_unlocked - Do I2C read combined operation
656 * @hw: pointer to the hardware structure
657 * @addr: I2C bus address to read from
658 * @reg: I2C device register to read from
659 * @val: pointer to location to receive read value
660 *
661 * Returns an error code on error.
662 **/
663 static s32
664 ixgbe_read_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, u8 addr,
665 u16 reg, u16 *val)
666 {
667 return ixgbe_read_i2c_combined_generic_int(hw, addr, reg, val, FALSE);
668 }
669
670 /**
671 * ixgbe_write_i2c_combined_generic - Perform I2C write combined operation
672 * @hw: pointer to the hardware structure
673 * @addr: I2C bus address to write to
674 * @reg: I2C device register to write to
675 * @val: value to write
676 *
677 * Returns an error code on error.
678 **/
679 static s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw,
680 u8 addr, u16 reg, u16 val)
681 {
682 return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, TRUE);
683 }
684
685 /**
686 * ixgbe_write_i2c_combined_generic_unlocked - Do I2C write combined operation
687 * @hw: pointer to the hardware structure
688 * @addr: I2C bus address to write to
689 * @reg: I2C device register to write to
690 * @val: value to write
691 *
692 * Returns an error code on error.
693 **/
694 static s32
695 ixgbe_write_i2c_combined_generic_unlocked(struct ixgbe_hw *hw,
696 u8 addr, u16 reg, u16 val)
697 {
698 return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, FALSE);
699 }
700
701 /**
702 * ixgbe_init_ops_X550EM - Inits func ptrs and MAC type
703 * @hw: pointer to hardware structure
704 *
705 * Initialize the function pointers and for MAC type X550EM.
706 * Does not touch the hardware.
707 **/
708 s32 ixgbe_init_ops_X550EM(struct ixgbe_hw *hw)
709 {
710 struct ixgbe_mac_info *mac = &hw->mac;
711 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
712 struct ixgbe_phy_info *phy = &hw->phy;
713 s32 ret_val;
714
715 DEBUGFUNC("ixgbe_init_ops_X550EM");
716
717 /* Similar to X550 so start there. */
718 ret_val = ixgbe_init_ops_X550(hw);
719
720 /* Since this function eventually calls
721 * ixgbe_init_ops_540 by design, we are setting
722 * the pointers to NULL explicitly here to overwrite
723 * the values being set in the x540 function.
724 */
725
726 /* Bypass not supported in x550EM */
727 mac->ops.bypass_rw = NULL;
728 mac->ops.bypass_valid_rd = NULL;
729 mac->ops.bypass_set = NULL;
730 mac->ops.bypass_rd_eep = NULL;
731
732 /* FCOE not supported in x550EM */
733 mac->ops.get_san_mac_addr = NULL;
734 mac->ops.set_san_mac_addr = NULL;
735 mac->ops.get_wwn_prefix = NULL;
736 mac->ops.get_fcoe_boot_status = NULL;
737
738 /* IPsec not supported in x550EM */
739 mac->ops.disable_sec_rx_path = NULL;
740 mac->ops.enable_sec_rx_path = NULL;
741
742 /* AUTOC register is not present in x550EM. */
743 mac->ops.prot_autoc_read = NULL;
744 mac->ops.prot_autoc_write = NULL;
745
746 /* X550EM bus type is internal*/
747 hw->bus.type = ixgbe_bus_type_internal;
748 mac->ops.get_bus_info = ixgbe_get_bus_info_X550em;
749
750
751 mac->ops.get_media_type = ixgbe_get_media_type_X550em;
752 mac->ops.setup_sfp = ixgbe_setup_sfp_modules_X550em;
753 mac->ops.get_link_capabilities = ixgbe_get_link_capabilities_X550em;
754 mac->ops.reset_hw = ixgbe_reset_hw_X550em;
755 mac->ops.get_supported_physical_layer =
756 ixgbe_get_supported_physical_layer_X550em;
757
758 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper)
759 mac->ops.setup_fc = ixgbe_setup_fc_generic;
760 else
761 mac->ops.setup_fc = ixgbe_setup_fc_X550em;
762
763 /* PHY */
764 phy->ops.init = ixgbe_init_phy_ops_X550em;
765 switch (hw->device_id) {
766 case IXGBE_DEV_ID_X550EM_A_1G_T:
767 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
768 mac->ops.setup_fc = NULL;
769 phy->ops.identify = ixgbe_identify_phy_fw;
770 phy->ops.set_phy_power = NULL;
771 phy->ops.get_firmware_version = NULL;
772 break;
773 case IXGBE_DEV_ID_X550EM_X_1G_T:
774 mac->ops.setup_fc = NULL;
775 phy->ops.identify = ixgbe_identify_phy_x550em;
776 phy->ops.set_phy_power = NULL;
777 break;
778 default:
779 phy->ops.identify = ixgbe_identify_phy_x550em;
780 }
781
782 if (mac->ops.get_media_type(hw) != ixgbe_media_type_copper)
783 phy->ops.set_phy_power = NULL;
784
785
786 /* EEPROM */
787 eeprom->ops.init_params = ixgbe_init_eeprom_params_X540;
788 eeprom->ops.read = ixgbe_read_ee_hostif_X550;
789 eeprom->ops.read_buffer = ixgbe_read_ee_hostif_buffer_X550;
790 eeprom->ops.write = ixgbe_write_ee_hostif_X550;
791 eeprom->ops.write_buffer = ixgbe_write_ee_hostif_buffer_X550;
792 eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_X550;
793 eeprom->ops.validate_checksum = ixgbe_validate_eeprom_checksum_X550;
794 eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_X550;
795
796 return ret_val;
797 }
798
799 #define IXGBE_DENVERTON_WA 1
800
801 /**
802 * ixgbe_setup_fw_link - Setup firmware-controlled PHYs
803 * @hw: pointer to hardware structure
804 */
805 static s32 ixgbe_setup_fw_link(struct ixgbe_hw *hw)
806 {
807 u32 setup[FW_PHY_ACT_DATA_COUNT] = { 0 };
808 s32 rc;
809 #ifdef IXGBE_DENVERTON_WA
810 s32 ret_val;
811 u16 phydata;
812 #endif
813 u16 i;
814
815 if (hw->phy.reset_disable || ixgbe_check_reset_blocked(hw))
816 return 0;
817
818 if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
819 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
820 "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
821 return IXGBE_ERR_INVALID_LINK_SETTINGS;
822 }
823
824 switch (hw->fc.requested_mode) {
825 case ixgbe_fc_full:
826 setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_RXTX <<
827 FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT;
828 break;
829 case ixgbe_fc_rx_pause:
830 setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_RX <<
831 FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT;
832 break;
833 case ixgbe_fc_tx_pause:
834 setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_TX <<
835 FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT;
836 break;
837 default:
838 break;
839 }
840
841 for (i = 0; i < sizeof(ixgbe_fw_map) / sizeof(ixgbe_fw_map[0]); ++i) {
842 if (hw->phy.autoneg_advertised & ixgbe_fw_map[i].phy_speed)
843 setup[0] |= ixgbe_fw_map[i].fw_speed;
844 }
845 setup[0] |= FW_PHY_ACT_SETUP_LINK_HP | FW_PHY_ACT_SETUP_LINK_AN;
846
847 if (hw->phy.eee_speeds_advertised)
848 setup[0] |= FW_PHY_ACT_SETUP_LINK_EEE;
849
850 #ifdef IXGBE_DENVERTON_WA
851 if ((hw->phy.force_10_100_autonego == false)
852 && ((hw->phy.autoneg_advertised == IXGBE_LINK_SPEED_100_FULL)
853 || (hw->phy.autoneg_advertised == IXGBE_LINK_SPEED_10_FULL))) {
854 /* Don't use auto-nego for 10/100Mbps */
855 setup[0] &= ~FW_PHY_ACT_SETUP_LINK_AN;
856 setup[0] &= ~FW_PHY_ACT_SETUP_LINK_EEE;
857 setup[0] &= ~(FW_PHY_ACT_SETUP_LINK_PAUSE_RXTX
858 << FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT);
859 }
860 #endif
861
862 rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_SETUP_LINK, &setup);
863 if (rc)
864 return rc;
865
866 #ifdef IXGBE_DENVERTON_WA
867 if (hw->phy.force_10_100_autonego == true)
868 goto out;
869
870 ret_val = ixgbe_read_phy_reg_x550a(hw, MII_BMCR, 0, &phydata);
871 if (ret_val != 0)
872 goto out;
873
874 /*
875 * Broken firmware sets BMCR register incorrectly if
876 * FW_PHY_ACT_SETUP_LINK_AN isn't set.
877 * a) FDX may not be set.
878 * b) BMCR_SPEED1 (bit 6) is always cleard.
879 * + -------+------+-----------+-----+--------------------------+
880 * |request | BMCR | BMCR spd | BMCR | |
881 * | | (HEX)| (in bits)| FDX | |
882 * +--------+------+----------+------+--------------------------+
883 * | 10M | 0000 | 10M(00) | 0 | |
884 * | 10M | 2000 | 100M(01) | 0 |(I've never observed this)|
885 * | 10M | 2100 | 100M(01) | 1 | |
886 * | 100M | 0000 | 10M(00) | 0 | |
887 * | 100M | 0100 | 10M(00) | 1 | |
888 * +--------------------------+------+--------------------------+
889 */
890 if (((hw->phy.autoneg_advertised == IXGBE_LINK_SPEED_100_FULL)
891 && (((phydata & BMCR_FDX) == 0) || (BMCR_SPEED(phydata) == 0)))
892 || ((hw->phy.autoneg_advertised == IXGBE_LINK_SPEED_10_FULL)
893 && (((phydata & BMCR_FDX) == 0)
894 || (BMCR_SPEED(phydata) != BMCR_S10)))) {
895 phydata = BMCR_FDX;
896 switch (hw->phy.autoneg_advertised) {
897 case IXGBE_LINK_SPEED_10_FULL:
898 phydata |= BMCR_S10;
899 break;
900 case IXGBE_LINK_SPEED_100_FULL:
901 phydata |= BMCR_S100;
902 break;
903 case IXGBE_LINK_SPEED_1GB_FULL:
904 panic("%s: 1GB_FULL is set", __func__);
905 break;
906 default:
907 break;
908 }
909 ret_val = ixgbe_write_phy_reg_x550a(hw, MII_BMCR, 0, phydata);
910 if (ret_val != 0)
911 return ret_val;
912 }
913 out:
914 #endif
915 if (setup[0] == FW_PHY_ACT_SETUP_LINK_RSP_DOWN)
916 return IXGBE_ERR_OVERTEMP;
917 return IXGBE_SUCCESS;
918 }
919
920 /**
921 * ixgbe_fc_autoneg_fw _ Set up flow control for FW-controlled PHYs
922 * @hw: pointer to hardware structure
923 *
924 * Called at init time to set up flow control.
925 */
926 static s32 ixgbe_fc_autoneg_fw(struct ixgbe_hw *hw)
927 {
928 if (hw->fc.requested_mode == ixgbe_fc_default)
929 hw->fc.requested_mode = ixgbe_fc_full;
930
931 return ixgbe_setup_fw_link(hw);
932 }
933
934 /**
935 * ixgbe_setup_eee_fw - Enable/disable EEE support
936 * @hw: pointer to the HW structure
937 * @enable_eee: boolean flag to enable EEE
938 *
939 * Enable/disable EEE based on enable_eee flag.
940 * This function controls EEE for firmware-based PHY implementations.
941 */
942 static s32 ixgbe_setup_eee_fw(struct ixgbe_hw *hw, bool enable_eee)
943 {
944 if (!!hw->phy.eee_speeds_advertised == enable_eee)
945 return IXGBE_SUCCESS;
946 if (enable_eee)
947 hw->phy.eee_speeds_advertised = hw->phy.eee_speeds_supported;
948 else
949 hw->phy.eee_speeds_advertised = 0;
950 return hw->phy.ops.setup_link(hw);
951 }
952
953 /**
954 * ixgbe_init_ops_X550EM_a - Inits func ptrs and MAC type
955 * @hw: pointer to hardware structure
956 *
957 * Initialize the function pointers and for MAC type X550EM_a.
958 * Does not touch the hardware.
959 **/
960 s32 ixgbe_init_ops_X550EM_a(struct ixgbe_hw *hw)
961 {
962 struct ixgbe_mac_info *mac = &hw->mac;
963 s32 ret_val;
964
965 DEBUGFUNC("ixgbe_init_ops_X550EM_a");
966
967 /* Start with generic X550EM init */
968 ret_val = ixgbe_init_ops_X550EM(hw);
969
970 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII ||
971 hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII_L) {
972 mac->ops.read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550;
973 mac->ops.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550;
974 } else {
975 mac->ops.read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550a;
976 mac->ops.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550a;
977 }
978 mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync_X550a;
979 mac->ops.release_swfw_sync = ixgbe_release_swfw_sync_X550a;
980
981 switch (mac->ops.get_media_type(hw)) {
982 case ixgbe_media_type_fiber:
983 mac->ops.setup_fc = NULL;
984 mac->ops.fc_autoneg = ixgbe_fc_autoneg_fiber_x550em_a;
985 break;
986 case ixgbe_media_type_backplane:
987 mac->ops.fc_autoneg = ixgbe_fc_autoneg_backplane_x550em_a;
988 mac->ops.setup_fc = ixgbe_setup_fc_backplane_x550em_a;
989 break;
990 default:
991 break;
992 }
993
994 switch (hw->device_id) {
995 case IXGBE_DEV_ID_X550EM_A_1G_T:
996 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
997 mac->ops.fc_autoneg = ixgbe_fc_autoneg_sgmii_x550em_a;
998 mac->ops.setup_fc = ixgbe_fc_autoneg_fw;
999 mac->ops.setup_eee = ixgbe_setup_eee_fw;
1000 hw->phy.eee_speeds_supported = IXGBE_LINK_SPEED_100_FULL |
1001 IXGBE_LINK_SPEED_1GB_FULL;
1002 hw->phy.eee_speeds_advertised = hw->phy.eee_speeds_supported;
1003 break;
1004 default:
1005 break;
1006 }
1007
1008 return ret_val;
1009 }
1010
1011 /**
1012 * ixgbe_init_ops_X550EM_x - Inits func ptrs and MAC type
1013 * @hw: pointer to hardware structure
1014 *
1015 * Initialize the function pointers and for MAC type X550EM_x.
1016 * Does not touch the hardware.
1017 **/
1018 s32 ixgbe_init_ops_X550EM_x(struct ixgbe_hw *hw)
1019 {
1020 struct ixgbe_mac_info *mac = &hw->mac;
1021 struct ixgbe_link_info *link = &hw->link;
1022 s32 ret_val;
1023
1024 DEBUGFUNC("ixgbe_init_ops_X550EM_x");
1025
1026 /* Start with generic X550EM init */
1027 ret_val = ixgbe_init_ops_X550EM(hw);
1028
1029 mac->ops.read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550;
1030 mac->ops.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550;
1031 mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync_X550em;
1032 mac->ops.release_swfw_sync = ixgbe_release_swfw_sync_X550em;
1033 link->ops.read_link = ixgbe_read_i2c_combined_generic;
1034 link->ops.read_link_unlocked = ixgbe_read_i2c_combined_generic_unlocked;
1035 link->ops.write_link = ixgbe_write_i2c_combined_generic;
1036 link->ops.write_link_unlocked =
1037 ixgbe_write_i2c_combined_generic_unlocked;
1038 link->addr = IXGBE_CS4227;
1039
1040 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_1G_T) {
1041 mac->ops.setup_fc = NULL;
1042 mac->ops.setup_eee = NULL;
1043 mac->ops.init_led_link_act = NULL;
1044 }
1045
1046 return ret_val;
1047 }
1048
1049 /**
1050 * ixgbe_dmac_config_X550
1051 * @hw: pointer to hardware structure
1052 *
1053 * Configure DMA coalescing. If enabling dmac, dmac is activated.
1054 * When disabling dmac, dmac enable dmac bit is cleared.
1055 **/
1056 s32 ixgbe_dmac_config_X550(struct ixgbe_hw *hw)
1057 {
1058 u32 reg, high_pri_tc;
1059
1060 DEBUGFUNC("ixgbe_dmac_config_X550");
1061
1062 /* Disable DMA coalescing before configuring */
1063 reg = IXGBE_READ_REG(hw, IXGBE_DMACR);
1064 reg &= ~IXGBE_DMACR_DMAC_EN;
1065 IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg);
1066
1067 /* Disable DMA Coalescing if the watchdog timer is 0 */
1068 if (!hw->mac.dmac_config.watchdog_timer)
1069 goto out;
1070
1071 ixgbe_dmac_config_tcs_X550(hw);
1072
1073 /* Configure DMA Coalescing Control Register */
1074 reg = IXGBE_READ_REG(hw, IXGBE_DMACR);
1075
1076 /* Set the watchdog timer in units of 40.96 usec */
1077 reg &= ~IXGBE_DMACR_DMACWT_MASK;
1078 reg |= (hw->mac.dmac_config.watchdog_timer * 100) / 4096;
1079
1080 reg &= ~IXGBE_DMACR_HIGH_PRI_TC_MASK;
1081 /* If fcoe is enabled, set high priority traffic class */
1082 if (hw->mac.dmac_config.fcoe_en) {
1083 high_pri_tc = 1 << hw->mac.dmac_config.fcoe_tc;
1084 reg |= ((high_pri_tc << IXGBE_DMACR_HIGH_PRI_TC_SHIFT) &
1085 IXGBE_DMACR_HIGH_PRI_TC_MASK);
1086 }
1087 reg |= IXGBE_DMACR_EN_MNG_IND;
1088
1089 /* Enable DMA coalescing after configuration */
1090 reg |= IXGBE_DMACR_DMAC_EN;
1091 IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg);
1092
1093 out:
1094 return IXGBE_SUCCESS;
1095 }
1096
1097 /**
1098 * ixgbe_dmac_config_tcs_X550
1099 * @hw: pointer to hardware structure
1100 *
1101 * Configure DMA coalescing threshold per TC. The dmac enable bit must
1102 * be cleared before configuring.
1103 **/
1104 s32 ixgbe_dmac_config_tcs_X550(struct ixgbe_hw *hw)
1105 {
1106 u32 tc, reg, pb_headroom, rx_pb_size, maxframe_size_kb;
1107
1108 DEBUGFUNC("ixgbe_dmac_config_tcs_X550");
1109
1110 /* Configure DMA coalescing enabled */
1111 switch (hw->mac.dmac_config.link_speed) {
1112 case IXGBE_LINK_SPEED_10_FULL:
1113 case IXGBE_LINK_SPEED_100_FULL:
1114 pb_headroom = IXGBE_DMACRXT_100M;
1115 break;
1116 case IXGBE_LINK_SPEED_1GB_FULL:
1117 pb_headroom = IXGBE_DMACRXT_1G;
1118 break;
1119 default:
1120 pb_headroom = IXGBE_DMACRXT_10G;
1121 break;
1122 }
1123
1124 maxframe_size_kb = ((IXGBE_READ_REG(hw, IXGBE_MAXFRS) >>
1125 IXGBE_MHADD_MFS_SHIFT) / 1024);
1126
1127 /* Set the per Rx packet buffer receive threshold */
1128 for (tc = 0; tc < IXGBE_DCB_MAX_TRAFFIC_CLASS; tc++) {
1129 reg = IXGBE_READ_REG(hw, IXGBE_DMCTH(tc));
1130 reg &= ~IXGBE_DMCTH_DMACRXT_MASK;
1131
1132 if (tc < hw->mac.dmac_config.num_tcs) {
1133 /* Get Rx PB size */
1134 rx_pb_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc));
1135 rx_pb_size = (rx_pb_size & IXGBE_RXPBSIZE_MASK) >>
1136 IXGBE_RXPBSIZE_SHIFT;
1137
1138 /* Calculate receive buffer threshold in kilobytes */
1139 if (rx_pb_size > pb_headroom)
1140 rx_pb_size = rx_pb_size - pb_headroom;
1141 else
1142 rx_pb_size = 0;
1143
1144 /* Minimum of MFS shall be set for DMCTH */
1145 reg |= (rx_pb_size > maxframe_size_kb) ?
1146 rx_pb_size : maxframe_size_kb;
1147 }
1148 IXGBE_WRITE_REG(hw, IXGBE_DMCTH(tc), reg);
1149 }
1150 return IXGBE_SUCCESS;
1151 }
1152
1153 /**
1154 * ixgbe_dmac_update_tcs_X550
1155 * @hw: pointer to hardware structure
1156 *
1157 * Disables dmac, updates per TC settings, and then enables dmac.
1158 **/
1159 s32 ixgbe_dmac_update_tcs_X550(struct ixgbe_hw *hw)
1160 {
1161 u32 reg;
1162
1163 DEBUGFUNC("ixgbe_dmac_update_tcs_X550");
1164
1165 /* Disable DMA coalescing before configuring */
1166 reg = IXGBE_READ_REG(hw, IXGBE_DMACR);
1167 reg &= ~IXGBE_DMACR_DMAC_EN;
1168 IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg);
1169
1170 ixgbe_dmac_config_tcs_X550(hw);
1171
1172 /* Enable DMA coalescing after configuration */
1173 reg = IXGBE_READ_REG(hw, IXGBE_DMACR);
1174 reg |= IXGBE_DMACR_DMAC_EN;
1175 IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg);
1176
1177 return IXGBE_SUCCESS;
1178 }
1179
1180 /**
1181 * ixgbe_init_eeprom_params_X550 - Initialize EEPROM params
1182 * @hw: pointer to hardware structure
1183 *
1184 * Initializes the EEPROM parameters ixgbe_eeprom_info within the
1185 * ixgbe_hw struct in order to set up EEPROM access.
1186 **/
1187 s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw)
1188 {
1189 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
1190 u32 eec;
1191 u16 eeprom_size;
1192
1193 DEBUGFUNC("ixgbe_init_eeprom_params_X550");
1194
1195 if (eeprom->type == ixgbe_eeprom_uninitialized) {
1196 eeprom->semaphore_delay = 10;
1197 eeprom->type = ixgbe_flash;
1198
1199 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1200 eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
1201 IXGBE_EEC_SIZE_SHIFT);
1202 eeprom->word_size = 1 << (eeprom_size +
1203 IXGBE_EEPROM_WORD_SIZE_SHIFT);
1204
1205 DEBUGOUT2("Eeprom params: type = %d, size = %d\n",
1206 eeprom->type, eeprom->word_size);
1207 }
1208
1209 return IXGBE_SUCCESS;
1210 }
1211
1212 /**
1213 * ixgbe_set_source_address_pruning_X550 - Enable/Disbale source address pruning
1214 * @hw: pointer to hardware structure
1215 * @enable: enable or disable source address pruning
1216 * @pool: Rx pool to set source address pruning for
1217 **/
1218 void ixgbe_set_source_address_pruning_X550(struct ixgbe_hw *hw, bool enable,
1219 unsigned int pool)
1220 {
1221 u64 pfflp;
1222
1223 /* max rx pool is 63 */
1224 if (pool > 63)
1225 return;
1226
1227 pfflp = (u64)IXGBE_READ_REG(hw, IXGBE_PFFLPL);
1228 pfflp |= (u64)IXGBE_READ_REG(hw, IXGBE_PFFLPH) << 32;
1229
1230 if (enable)
1231 pfflp |= (1ULL << pool);
1232 else
1233 pfflp &= ~(1ULL << pool);
1234
1235 IXGBE_WRITE_REG(hw, IXGBE_PFFLPL, (u32)pfflp);
1236 IXGBE_WRITE_REG(hw, IXGBE_PFFLPH, (u32)(pfflp >> 32));
1237 }
1238
1239 /**
1240 * ixgbe_set_ethertype_anti_spoofing_X550 - Enable/Disable Ethertype anti-spoofing
1241 * @hw: pointer to hardware structure
1242 * @enable: enable or disable switch for Ethertype anti-spoofing
1243 * @vf: Virtual Function pool - VF Pool to set for Ethertype anti-spoofing
1244 *
1245 **/
1246 void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw,
1247 bool enable, int vf)
1248 {
1249 int vf_target_reg = vf >> 3;
1250 int vf_target_shift = vf % 8 + IXGBE_SPOOF_ETHERTYPEAS_SHIFT;
1251 u32 pfvfspoof;
1252
1253 DEBUGFUNC("ixgbe_set_ethertype_anti_spoofing_X550");
1254
1255 pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
1256 if (enable)
1257 pfvfspoof |= (1 << vf_target_shift);
1258 else
1259 pfvfspoof &= ~(1 << vf_target_shift);
1260
1261 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
1262 }
1263
1264 /**
1265 * ixgbe_iosf_wait - Wait for IOSF command completion
1266 * @hw: pointer to hardware structure
1267 * @ctrl: pointer to location to receive final IOSF control value
1268 *
1269 * Returns failing status on timeout
1270 *
1271 * Note: ctrl can be NULL if the IOSF control register value is not needed
1272 **/
1273 static s32 ixgbe_iosf_wait(struct ixgbe_hw *hw, u32 *ctrl)
1274 {
1275 u32 i, command = 0;
1276
1277 /* Check every 10 usec to see if the address cycle completed.
1278 * The SB IOSF BUSY bit will clear when the operation is
1279 * complete
1280 */
1281 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
1282 command = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL);
1283 if ((command & IXGBE_SB_IOSF_CTRL_BUSY) == 0)
1284 break;
1285 usec_delay(10);
1286 }
1287 if (ctrl)
1288 *ctrl = command;
1289 if (i == IXGBE_MDIO_COMMAND_TIMEOUT) {
1290 ERROR_REPORT1(IXGBE_ERROR_POLLING, "Wait timed out\n");
1291 return IXGBE_ERR_PHY;
1292 }
1293
1294 return IXGBE_SUCCESS;
1295 }
1296
1297 /**
1298 * ixgbe_write_iosf_sb_reg_x550 - Writes a value to specified register
1299 * of the IOSF device
1300 * @hw: pointer to hardware structure
1301 * @reg_addr: 32 bit PHY register to write
1302 * @device_type: 3 bit device type
1303 * @data: Data to write to the register
1304 **/
1305 s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
1306 u32 device_type, u32 data)
1307 {
1308 u32 gssr = IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_PHY0_SM;
1309 u32 command, error __unused;
1310 s32 ret;
1311
1312 ret = ixgbe_acquire_swfw_semaphore(hw, gssr);
1313 if (ret != IXGBE_SUCCESS)
1314 return ret;
1315
1316 ret = ixgbe_iosf_wait(hw, NULL);
1317 if (ret != IXGBE_SUCCESS)
1318 goto out;
1319
1320 command = ((reg_addr << IXGBE_SB_IOSF_CTRL_ADDR_SHIFT) |
1321 (device_type << IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT));
1322
1323 /* Write IOSF control register */
1324 IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL, command);
1325
1326 /* Write IOSF data register */
1327 IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA, data);
1328
1329 ret = ixgbe_iosf_wait(hw, &command);
1330
1331 if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) {
1332 error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >>
1333 IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT;
1334 ERROR_REPORT2(IXGBE_ERROR_POLLING,
1335 "Failed to write, error %x\n", error);
1336 ret = IXGBE_ERR_PHY;
1337 }
1338
1339 out:
1340 ixgbe_release_swfw_semaphore(hw, gssr);
1341 return ret;
1342 }
1343
1344 /**
1345 * ixgbe_read_iosf_sb_reg_x550 - Reads specified register of the IOSF device
1346 * @hw: pointer to hardware structure
1347 * @reg_addr: 32 bit PHY register to write
1348 * @device_type: 3 bit device type
1349 * @data: Pointer to read data from the register
1350 **/
1351 s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
1352 u32 device_type, u32 *data)
1353 {
1354 u32 gssr = IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_PHY0_SM;
1355 u32 command, error __unused;
1356 s32 ret;
1357
1358 ret = ixgbe_acquire_swfw_semaphore(hw, gssr);
1359 if (ret != IXGBE_SUCCESS)
1360 return ret;
1361
1362 ret = ixgbe_iosf_wait(hw, NULL);
1363 if (ret != IXGBE_SUCCESS)
1364 goto out;
1365
1366 command = ((reg_addr << IXGBE_SB_IOSF_CTRL_ADDR_SHIFT) |
1367 (device_type << IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT));
1368
1369 /* Write IOSF control register */
1370 IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL, command);
1371
1372 ret = ixgbe_iosf_wait(hw, &command);
1373
1374 if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) {
1375 error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >>
1376 IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT;
1377 ERROR_REPORT2(IXGBE_ERROR_POLLING,
1378 "Failed to read, error %x\n", error);
1379 ret = IXGBE_ERR_PHY;
1380 }
1381
1382 if (ret == IXGBE_SUCCESS)
1383 *data = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA);
1384
1385 out:
1386 ixgbe_release_swfw_semaphore(hw, gssr);
1387 return ret;
1388 }
1389
1390 /**
1391 * ixgbe_get_phy_token - Get the token for shared phy access
1392 * @hw: Pointer to hardware structure
1393 */
1394
1395 s32 ixgbe_get_phy_token(struct ixgbe_hw *hw)
1396 {
1397 struct ixgbe_hic_phy_token_req token_cmd;
1398 s32 status;
1399
1400 token_cmd.hdr.cmd = FW_PHY_TOKEN_REQ_CMD;
1401 token_cmd.hdr.buf_len = FW_PHY_TOKEN_REQ_LEN;
1402 token_cmd.hdr.cmd_or_resp.cmd_resv = 0;
1403 token_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
1404 token_cmd.port_number = hw->bus.lan_id;
1405 token_cmd.command_type = FW_PHY_TOKEN_REQ;
1406 token_cmd.pad = 0;
1407 status = ixgbe_host_interface_command(hw, (u32 *)&token_cmd,
1408 sizeof(token_cmd),
1409 IXGBE_HI_COMMAND_TIMEOUT,
1410 TRUE);
1411 if (status) {
1412 DEBUGOUT1("Issuing host interface command failed with Status = %d\n",
1413 status);
1414 return status;
1415 }
1416 if (token_cmd.hdr.cmd_or_resp.ret_status == FW_PHY_TOKEN_OK)
1417 return IXGBE_SUCCESS;
1418 if (token_cmd.hdr.cmd_or_resp.ret_status != FW_PHY_TOKEN_RETRY) {
1419 DEBUGOUT1("Host interface command returned 0x%08x , returning IXGBE_ERR_FW_RESP_INVALID\n",
1420 token_cmd.hdr.cmd_or_resp.ret_status);
1421 return IXGBE_ERR_FW_RESP_INVALID;
1422 }
1423
1424 DEBUGOUT("Returning IXGBE_ERR_TOKEN_RETRY\n");
1425 return IXGBE_ERR_TOKEN_RETRY;
1426 }
1427
1428 /**
1429 * ixgbe_put_phy_token - Put the token for shared phy access
1430 * @hw: Pointer to hardware structure
1431 */
1432
1433 s32 ixgbe_put_phy_token(struct ixgbe_hw *hw)
1434 {
1435 struct ixgbe_hic_phy_token_req token_cmd;
1436 s32 status;
1437
1438 token_cmd.hdr.cmd = FW_PHY_TOKEN_REQ_CMD;
1439 token_cmd.hdr.buf_len = FW_PHY_TOKEN_REQ_LEN;
1440 token_cmd.hdr.cmd_or_resp.cmd_resv = 0;
1441 token_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
1442 token_cmd.port_number = hw->bus.lan_id;
1443 token_cmd.command_type = FW_PHY_TOKEN_REL;
1444 token_cmd.pad = 0;
1445 status = ixgbe_host_interface_command(hw, (u32 *)&token_cmd,
1446 sizeof(token_cmd),
1447 IXGBE_HI_COMMAND_TIMEOUT,
1448 TRUE);
1449 if (status)
1450 return status;
1451 if (token_cmd.hdr.cmd_or_resp.ret_status == FW_PHY_TOKEN_OK)
1452 return IXGBE_SUCCESS;
1453
1454 DEBUGOUT("Put PHY Token host interface command failed");
1455 return IXGBE_ERR_FW_RESP_INVALID;
1456 }
1457
1458 /**
1459 * ixgbe_write_iosf_sb_reg_x550a - Writes a value to specified register
1460 * of the IOSF device
1461 * @hw: pointer to hardware structure
1462 * @reg_addr: 32 bit PHY register to write
1463 * @device_type: 3 bit device type
1464 * @data: Data to write to the register
1465 **/
1466 s32 ixgbe_write_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
1467 u32 device_type, u32 data)
1468 {
1469 struct ixgbe_hic_internal_phy_req write_cmd;
1470 s32 status;
1471 UNREFERENCED_1PARAMETER(device_type);
1472
1473 memset(&write_cmd, 0, sizeof(write_cmd));
1474 write_cmd.hdr.cmd = FW_INT_PHY_REQ_CMD;
1475 write_cmd.hdr.buf_len = FW_INT_PHY_REQ_LEN;
1476 write_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
1477 write_cmd.port_number = hw->bus.lan_id;
1478 write_cmd.command_type = FW_INT_PHY_REQ_WRITE;
1479 write_cmd.address = IXGBE_CPU_TO_BE16(reg_addr);
1480 write_cmd.write_data = IXGBE_CPU_TO_BE32(data);
1481
1482 status = ixgbe_host_interface_command(hw, (u32 *)&write_cmd,
1483 sizeof(write_cmd),
1484 IXGBE_HI_COMMAND_TIMEOUT, FALSE);
1485
1486 return status;
1487 }
1488
1489 /**
1490 * ixgbe_read_iosf_sb_reg_x550a - Reads specified register of the IOSF device
1491 * @hw: pointer to hardware structure
1492 * @reg_addr: 32 bit PHY register to write
1493 * @device_type: 3 bit device type
1494 * @data: Pointer to read data from the register
1495 **/
1496 s32 ixgbe_read_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
1497 u32 device_type, u32 *data)
1498 {
1499 union {
1500 struct ixgbe_hic_internal_phy_req cmd;
1501 struct ixgbe_hic_internal_phy_resp rsp;
1502 } hic;
1503 s32 status;
1504 UNREFERENCED_1PARAMETER(device_type);
1505
1506 memset(&hic, 0, sizeof(hic));
1507 hic.cmd.hdr.cmd = FW_INT_PHY_REQ_CMD;
1508 hic.cmd.hdr.buf_len = FW_INT_PHY_REQ_LEN;
1509 hic.cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
1510 hic.cmd.port_number = hw->bus.lan_id;
1511 hic.cmd.command_type = FW_INT_PHY_REQ_READ;
1512 hic.cmd.address = IXGBE_CPU_TO_BE16(reg_addr);
1513
1514 status = ixgbe_host_interface_command(hw, (u32 *)&hic.cmd,
1515 sizeof(hic.cmd),
1516 IXGBE_HI_COMMAND_TIMEOUT, TRUE);
1517
1518 /* Extract the register value from the response. */
1519 *data = IXGBE_BE32_TO_CPU(hic.rsp.read_data);
1520
1521 return status;
1522 }
1523
1524 /**
1525 * ixgbe_disable_mdd_X550
1526 * @hw: pointer to hardware structure
1527 *
1528 * Disable malicious driver detection
1529 **/
1530 void ixgbe_disable_mdd_X550(struct ixgbe_hw *hw)
1531 {
1532 u32 reg;
1533
1534 DEBUGFUNC("ixgbe_disable_mdd_X550");
1535
1536 /* Disable MDD for TX DMA and interrupt */
1537 reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1538 reg &= ~(IXGBE_DMATXCTL_MDP_EN | IXGBE_DMATXCTL_MBINTEN);
1539 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg);
1540
1541 /* Disable MDD for RX and interrupt */
1542 reg = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
1543 reg &= ~(IXGBE_RDRXCTL_MDP_EN | IXGBE_RDRXCTL_MBINTEN);
1544 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg);
1545 }
1546
1547 /**
1548 * ixgbe_enable_mdd_X550
1549 * @hw: pointer to hardware structure
1550 *
1551 * Enable malicious driver detection
1552 **/
1553 void ixgbe_enable_mdd_X550(struct ixgbe_hw *hw)
1554 {
1555 u32 reg;
1556
1557 DEBUGFUNC("ixgbe_enable_mdd_X550");
1558
1559 /* Enable MDD for TX DMA and interrupt */
1560 reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1561 reg |= (IXGBE_DMATXCTL_MDP_EN | IXGBE_DMATXCTL_MBINTEN);
1562 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg);
1563
1564 /* Enable MDD for RX and interrupt */
1565 reg = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
1566 reg |= (IXGBE_RDRXCTL_MDP_EN | IXGBE_RDRXCTL_MBINTEN);
1567 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg);
1568 }
1569
1570 /**
1571 * ixgbe_restore_mdd_vf_X550
1572 * @hw: pointer to hardware structure
1573 * @vf: vf index
1574 *
1575 * Restore VF that was disabled during malicious driver detection event
1576 **/
1577 void ixgbe_restore_mdd_vf_X550(struct ixgbe_hw *hw, u32 vf)
1578 {
1579 u32 idx, reg, num_qs, start_q, bitmask;
1580
1581 DEBUGFUNC("ixgbe_restore_mdd_vf_X550");
1582
1583 /* Map VF to queues */
1584 reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
1585 switch (reg & IXGBE_MRQC_MRQE_MASK) {
1586 case IXGBE_MRQC_VMDQRT8TCEN:
1587 num_qs = 8; /* 16 VFs / pools */
1588 bitmask = 0x000000FF;
1589 break;
1590 case IXGBE_MRQC_VMDQRSS32EN:
1591 case IXGBE_MRQC_VMDQRT4TCEN:
1592 num_qs = 4; /* 32 VFs / pools */
1593 bitmask = 0x0000000F;
1594 break;
1595 default: /* 64 VFs / pools */
1596 num_qs = 2;
1597 bitmask = 0x00000003;
1598 break;
1599 }
1600 start_q = vf * num_qs;
1601
1602 /* Release vf's queues by clearing WQBR_TX and WQBR_RX (RW1C) */
1603 idx = start_q / 32;
1604 reg = 0;
1605 reg |= (bitmask << (start_q % 32));
1606 IXGBE_WRITE_REG(hw, IXGBE_WQBR_TX(idx), reg);
1607 IXGBE_WRITE_REG(hw, IXGBE_WQBR_RX(idx), reg);
1608 }
1609
1610 /**
1611 * ixgbe_mdd_event_X550
1612 * @hw: pointer to hardware structure
1613 * @vf_bitmap: vf bitmap of malicious vfs
1614 *
1615 * Handle malicious driver detection event.
1616 **/
1617 void ixgbe_mdd_event_X550(struct ixgbe_hw *hw, u32 *vf_bitmap)
1618 {
1619 u32 wqbr;
1620 u32 i, j, reg, q, shift, vf, idx;
1621
1622 DEBUGFUNC("ixgbe_mdd_event_X550");
1623
1624 /* figure out pool size for mapping to vf's */
1625 reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
1626 switch (reg & IXGBE_MRQC_MRQE_MASK) {
1627 case IXGBE_MRQC_VMDQRT8TCEN:
1628 shift = 3; /* 16 VFs / pools */
1629 break;
1630 case IXGBE_MRQC_VMDQRSS32EN:
1631 case IXGBE_MRQC_VMDQRT4TCEN:
1632 shift = 2; /* 32 VFs / pools */
1633 break;
1634 default:
1635 shift = 1; /* 64 VFs / pools */
1636 break;
1637 }
1638
1639 /* Read WQBR_TX and WQBR_RX and check for malicious queues */
1640 for (i = 0; i < 4; i++) {
1641 wqbr = IXGBE_READ_REG(hw, IXGBE_WQBR_TX(i));
1642 wqbr |= IXGBE_READ_REG(hw, IXGBE_WQBR_RX(i));
1643
1644 if (!wqbr)
1645 continue;
1646
1647 /* Get malicious queue */
1648 for (j = 0; j < 32 && wqbr; j++) {
1649
1650 if (!(wqbr & (1 << j)))
1651 continue;
1652
1653 /* Get queue from bitmask */
1654 q = j + (i * 32);
1655
1656 /* Map queue to vf */
1657 vf = (q >> shift);
1658
1659 /* Set vf bit in vf_bitmap */
1660 idx = vf / 32;
1661 vf_bitmap[idx] |= (1 << (vf % 32));
1662 wqbr &= ~(1 << j);
1663 }
1664 }
1665 }
1666
1667 /**
1668 * ixgbe_get_media_type_X550em - Get media type
1669 * @hw: pointer to hardware structure
1670 *
1671 * Returns the media type (fiber, copper, backplane)
1672 */
1673 enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw)
1674 {
1675 enum ixgbe_media_type media_type;
1676
1677 DEBUGFUNC("ixgbe_get_media_type_X550em");
1678
1679 /* Detect if there is a copper PHY attached. */
1680 switch (hw->device_id) {
1681 case IXGBE_DEV_ID_X550EM_X_KR:
1682 case IXGBE_DEV_ID_X550EM_X_KX4:
1683 case IXGBE_DEV_ID_X550EM_X_XFI:
1684 case IXGBE_DEV_ID_X550EM_A_KR:
1685 case IXGBE_DEV_ID_X550EM_A_KR_L:
1686 media_type = ixgbe_media_type_backplane;
1687 break;
1688 case IXGBE_DEV_ID_X550EM_X_SFP:
1689 case IXGBE_DEV_ID_X550EM_A_SFP:
1690 case IXGBE_DEV_ID_X550EM_A_SFP_N:
1691 case IXGBE_DEV_ID_X550EM_A_QSFP:
1692 case IXGBE_DEV_ID_X550EM_A_QSFP_N:
1693 media_type = ixgbe_media_type_fiber;
1694 break;
1695 case IXGBE_DEV_ID_X550EM_X_1G_T:
1696 case IXGBE_DEV_ID_X550EM_X_10G_T:
1697 case IXGBE_DEV_ID_X550EM_A_10G_T:
1698 media_type = ixgbe_media_type_copper;
1699 break;
1700 case IXGBE_DEV_ID_X550EM_A_SGMII:
1701 case IXGBE_DEV_ID_X550EM_A_SGMII_L:
1702 media_type = ixgbe_media_type_backplane;
1703 hw->phy.type = ixgbe_phy_sgmii;
1704 break;
1705 case IXGBE_DEV_ID_X550EM_A_1G_T:
1706 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
1707 media_type = ixgbe_media_type_copper;
1708 break;
1709 default:
1710 media_type = ixgbe_media_type_unknown;
1711 break;
1712 }
1713 return media_type;
1714 }
1715
1716 /**
1717 * ixgbe_supported_sfp_modules_X550em - Check if SFP module type is supported
1718 * @hw: pointer to hardware structure
1719 * @linear: TRUE if SFP module is linear
1720 */
1721 static s32 ixgbe_supported_sfp_modules_X550em(struct ixgbe_hw *hw, bool *linear)
1722 {
1723 DEBUGFUNC("ixgbe_supported_sfp_modules_X550em");
1724
1725 switch (hw->phy.sfp_type) {
1726 case ixgbe_sfp_type_not_present:
1727 return IXGBE_ERR_SFP_NOT_PRESENT;
1728 case ixgbe_sfp_type_da_cu_core0:
1729 case ixgbe_sfp_type_da_cu_core1:
1730 *linear = TRUE;
1731 break;
1732 case ixgbe_sfp_type_srlr_core0:
1733 case ixgbe_sfp_type_srlr_core1:
1734 case ixgbe_sfp_type_da_act_lmt_core0:
1735 case ixgbe_sfp_type_da_act_lmt_core1:
1736 case ixgbe_sfp_type_1g_sx_core0:
1737 case ixgbe_sfp_type_1g_sx_core1:
1738 case ixgbe_sfp_type_1g_lx_core0:
1739 case ixgbe_sfp_type_1g_lx_core1:
1740 *linear = FALSE;
1741 break;
1742 case ixgbe_sfp_type_unknown:
1743 case ixgbe_sfp_type_1g_cu_core0:
1744 case ixgbe_sfp_type_1g_cu_core1:
1745 default:
1746 return IXGBE_ERR_SFP_NOT_SUPPORTED;
1747 }
1748
1749 return IXGBE_SUCCESS;
1750 }
1751
1752 /**
1753 * ixgbe_identify_sfp_module_X550em - Identifies SFP modules
1754 * @hw: pointer to hardware structure
1755 *
1756 * Searches for and identifies the SFP module and assigns appropriate PHY type.
1757 **/
1758 s32 ixgbe_identify_sfp_module_X550em(struct ixgbe_hw *hw)
1759 {
1760 s32 status;
1761 bool linear;
1762
1763 DEBUGFUNC("ixgbe_identify_sfp_module_X550em");
1764
1765 status = ixgbe_identify_module_generic(hw);
1766
1767 if (status != IXGBE_SUCCESS)
1768 return status;
1769
1770 /* Check if SFP module is supported */
1771 status = ixgbe_supported_sfp_modules_X550em(hw, &linear);
1772
1773 return status;
1774 }
1775
1776 /**
1777 * ixgbe_setup_sfp_modules_X550em - Setup MAC link ops
1778 * @hw: pointer to hardware structure
1779 */
1780 s32 ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw)
1781 {
1782 s32 status;
1783 bool linear;
1784
1785 DEBUGFUNC("ixgbe_setup_sfp_modules_X550em");
1786
1787 /* Check if SFP module is supported */
1788 status = ixgbe_supported_sfp_modules_X550em(hw, &linear);
1789
1790 if (status != IXGBE_SUCCESS)
1791 return status;
1792
1793 ixgbe_init_mac_link_ops_X550em(hw);
1794 hw->phy.ops.reset = NULL;
1795
1796 return IXGBE_SUCCESS;
1797 }
1798
1799 /**
1800 * ixgbe_restart_an_internal_phy_x550em - restart autonegotiation for the
1801 * internal PHY
1802 * @hw: pointer to hardware structure
1803 **/
1804 static s32 ixgbe_restart_an_internal_phy_x550em(struct ixgbe_hw *hw)
1805 {
1806 s32 status;
1807 u32 link_ctrl;
1808
1809 /* Restart auto-negotiation. */
1810 status = hw->mac.ops.read_iosf_sb_reg(hw,
1811 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1812 IXGBE_SB_IOSF_TARGET_KR_PHY, &link_ctrl);
1813
1814 if (status) {
1815 DEBUGOUT("Auto-negotiation did not complete\n");
1816 return status;
1817 }
1818
1819 link_ctrl |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART;
1820 status = hw->mac.ops.write_iosf_sb_reg(hw,
1821 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1822 IXGBE_SB_IOSF_TARGET_KR_PHY, link_ctrl);
1823
1824 if (hw->mac.type == ixgbe_mac_X550EM_a) {
1825 u32 flx_mask_st20;
1826
1827 /* Indicate to FW that AN restart has been asserted */
1828 status = hw->mac.ops.read_iosf_sb_reg(hw,
1829 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1830 IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_mask_st20);
1831
1832 if (status) {
1833 DEBUGOUT("Auto-negotiation did not complete\n");
1834 return status;
1835 }
1836
1837 flx_mask_st20 |= IXGBE_KRM_PMD_FLX_MASK_ST20_FW_AN_RESTART;
1838 status = hw->mac.ops.write_iosf_sb_reg(hw,
1839 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1840 IXGBE_SB_IOSF_TARGET_KR_PHY, flx_mask_st20);
1841 }
1842
1843 return status;
1844 }
1845
1846 /**
1847 * ixgbe_setup_sgmii - Set up link for sgmii
1848 * @hw: pointer to hardware structure
1849 * @speed: new link speed
1850 * @autoneg_wait: TRUE when waiting for completion is needed
1851 */
1852 static s32 ixgbe_setup_sgmii(struct ixgbe_hw *hw, ixgbe_link_speed speed,
1853 bool autoneg_wait)
1854 {
1855 struct ixgbe_mac_info *mac = &hw->mac;
1856 u32 lval, sval, flx_val;
1857 s32 rc;
1858
1859 rc = mac->ops.read_iosf_sb_reg(hw,
1860 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1861 IXGBE_SB_IOSF_TARGET_KR_PHY, &lval);
1862 if (rc)
1863 return rc;
1864
1865 lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
1866 lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
1867 lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN;
1868 lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN;
1869 lval |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G;
1870 rc = mac->ops.write_iosf_sb_reg(hw,
1871 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1872 IXGBE_SB_IOSF_TARGET_KR_PHY, lval);
1873 if (rc)
1874 return rc;
1875
1876 rc = mac->ops.read_iosf_sb_reg(hw,
1877 IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id),
1878 IXGBE_SB_IOSF_TARGET_KR_PHY, &sval);
1879 if (rc)
1880 return rc;
1881
1882 sval |= IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D;
1883 sval |= IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D;
1884 rc = mac->ops.write_iosf_sb_reg(hw,
1885 IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id),
1886 IXGBE_SB_IOSF_TARGET_KR_PHY, sval);
1887 if (rc)
1888 return rc;
1889
1890 rc = mac->ops.read_iosf_sb_reg(hw,
1891 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1892 IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_val);
1893 if (rc)
1894 return rc;
1895
1896 flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK;
1897 flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_1G;
1898 flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN;
1899 flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN;
1900 flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN;
1901
1902 rc = mac->ops.write_iosf_sb_reg(hw,
1903 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1904 IXGBE_SB_IOSF_TARGET_KR_PHY, flx_val);
1905 if (rc)
1906 return rc;
1907
1908 rc = ixgbe_restart_an_internal_phy_x550em(hw);
1909 if (rc)
1910 return rc;
1911
1912 return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait);
1913 }
1914
1915 /**
1916 * ixgbe_setup_sgmii_fw - Set up link for internal PHY SGMII auto-negotiation
1917 * @hw: pointer to hardware structure
1918 * @speed: new link speed
1919 * @autoneg_wait: TRUE when waiting for completion is needed
1920 */
1921 static s32 ixgbe_setup_sgmii_fw(struct ixgbe_hw *hw, ixgbe_link_speed speed,
1922 bool autoneg_wait)
1923 {
1924 struct ixgbe_mac_info *mac = &hw->mac;
1925 u32 lval, sval, flx_val;
1926 s32 rc;
1927
1928 rc = mac->ops.read_iosf_sb_reg(hw,
1929 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1930 IXGBE_SB_IOSF_TARGET_KR_PHY, &lval);
1931 if (rc)
1932 return rc;
1933
1934 lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
1935 lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
1936 lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN;
1937 lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN;
1938 lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G;
1939 rc = mac->ops.write_iosf_sb_reg(hw,
1940 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1941 IXGBE_SB_IOSF_TARGET_KR_PHY, lval);
1942 if (rc)
1943 return rc;
1944
1945 rc = mac->ops.read_iosf_sb_reg(hw,
1946 IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id),
1947 IXGBE_SB_IOSF_TARGET_KR_PHY, &sval);
1948 if (rc)
1949 return rc;
1950
1951 sval &= ~IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D;
1952 sval &= ~IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D;
1953 rc = mac->ops.write_iosf_sb_reg(hw,
1954 IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id),
1955 IXGBE_SB_IOSF_TARGET_KR_PHY, sval);
1956 if (rc)
1957 return rc;
1958
1959 rc = mac->ops.write_iosf_sb_reg(hw,
1960 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1961 IXGBE_SB_IOSF_TARGET_KR_PHY, lval);
1962 if (rc)
1963 return rc;
1964
1965 rc = mac->ops.read_iosf_sb_reg(hw,
1966 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1967 IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_val);
1968 if (rc)
1969 return rc;
1970
1971 flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK;
1972 flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_AN;
1973 flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN;
1974 flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN;
1975 flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN;
1976
1977 rc = mac->ops.write_iosf_sb_reg(hw,
1978 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1979 IXGBE_SB_IOSF_TARGET_KR_PHY, flx_val);
1980 if (rc)
1981 return rc;
1982
1983 rc = ixgbe_restart_an_internal_phy_x550em(hw);
1984
1985 return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait);
1986 }
1987
1988 /**
1989 * ixgbe_init_mac_link_ops_X550em - init mac link function pointers
1990 * @hw: pointer to hardware structure
1991 */
1992 void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw)
1993 {
1994 struct ixgbe_mac_info *mac = &hw->mac;
1995
1996 DEBUGFUNC("ixgbe_init_mac_link_ops_X550em");
1997
1998 switch (hw->mac.ops.get_media_type(hw)) {
1999 case ixgbe_media_type_fiber:
2000 /* CS4227 does not support autoneg, so disable the laser control
2001 * functions for SFP+ fiber
2002 */
2003 mac->ops.disable_tx_laser = NULL;
2004 mac->ops.enable_tx_laser = NULL;
2005 mac->ops.flap_tx_laser = NULL;
2006 mac->ops.setup_link = ixgbe_setup_mac_link_multispeed_fiber;
2007 mac->ops.set_rate_select_speed =
2008 ixgbe_set_soft_rate_select_speed;
2009
2010 if ((hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N) ||
2011 (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP))
2012 mac->ops.setup_mac_link =
2013 ixgbe_setup_mac_link_sfp_x550a;
2014 else
2015 mac->ops.setup_mac_link =
2016 ixgbe_setup_mac_link_sfp_x550em;
2017 break;
2018 case ixgbe_media_type_copper:
2019 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_1G_T)
2020 break;
2021 if (hw->mac.type == ixgbe_mac_X550EM_a) {
2022 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T ||
2023 hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L) {
2024 mac->ops.setup_link = ixgbe_setup_sgmii_fw;
2025 mac->ops.check_link =
2026 ixgbe_check_mac_link_generic;
2027 } else {
2028 mac->ops.setup_link =
2029 ixgbe_setup_mac_link_t_X550em;
2030 }
2031 } else {
2032 mac->ops.setup_link = ixgbe_setup_mac_link_t_X550em;
2033 mac->ops.check_link = ixgbe_check_link_t_X550em;
2034 }
2035 break;
2036 case ixgbe_media_type_backplane:
2037 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII ||
2038 hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII_L)
2039 mac->ops.setup_link = ixgbe_setup_sgmii;
2040 break;
2041 default:
2042 break;
2043 }
2044 }
2045
2046 /**
2047 * ixgbe_get_link_capabilities_x550em - Determines link capabilities
2048 * @hw: pointer to hardware structure
2049 * @speed: pointer to link speed
2050 * @autoneg: TRUE when autoneg or autotry is enabled
2051 */
2052 s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
2053 ixgbe_link_speed *speed,
2054 bool *autoneg)
2055 {
2056 DEBUGFUNC("ixgbe_get_link_capabilities_X550em");
2057
2058
2059 if (hw->phy.type == ixgbe_phy_fw) {
2060 *autoneg = TRUE;
2061 *speed = hw->phy.speeds_supported;
2062 return 0;
2063 }
2064
2065 /* SFP */
2066 if (hw->phy.media_type == ixgbe_media_type_fiber) {
2067
2068 /* CS4227 SFP must not enable auto-negotiation */
2069 *autoneg = FALSE;
2070
2071 /* Check if 1G SFP module. */
2072 if (hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
2073 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1
2074 || hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
2075 hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1) {
2076 *speed = IXGBE_LINK_SPEED_1GB_FULL;
2077 return IXGBE_SUCCESS;
2078 }
2079
2080 /* Link capabilities are based on SFP */
2081 if (hw->phy.multispeed_fiber)
2082 *speed = IXGBE_LINK_SPEED_10GB_FULL |
2083 IXGBE_LINK_SPEED_1GB_FULL;
2084 else
2085 *speed = IXGBE_LINK_SPEED_10GB_FULL;
2086 } else {
2087 switch (hw->phy.type) {
2088 case ixgbe_phy_ext_1g_t:
2089 case ixgbe_phy_sgmii:
2090 *speed = IXGBE_LINK_SPEED_1GB_FULL;
2091 break;
2092 case ixgbe_phy_x550em_kr:
2093 if (hw->mac.type == ixgbe_mac_X550EM_a) {
2094 /* check different backplane modes */
2095 if (hw->phy.nw_mng_if_sel &
2096 IXGBE_NW_MNG_IF_SEL_PHY_SPEED_2_5G) {
2097 *speed = IXGBE_LINK_SPEED_2_5GB_FULL;
2098 break;
2099 } else if (hw->device_id ==
2100 IXGBE_DEV_ID_X550EM_A_KR_L) {
2101 *speed = IXGBE_LINK_SPEED_1GB_FULL;
2102 break;
2103 }
2104 }
2105 /* fall through */
2106 default:
2107 *speed = IXGBE_LINK_SPEED_10GB_FULL |
2108 IXGBE_LINK_SPEED_1GB_FULL;
2109 break;
2110 }
2111 *autoneg = TRUE;
2112 }
2113
2114 return IXGBE_SUCCESS;
2115 }
2116
2117 /**
2118 * ixgbe_get_lasi_ext_t_x550em - Determime external Base T PHY interrupt cause
2119 * @hw: pointer to hardware structure
2120 * @lsc: pointer to boolean flag which indicates whether external Base T
2121 * PHY interrupt is lsc
2122 *
2123 * Determime if external Base T PHY interrupt cause is high temperature
2124 * failure alarm or link status change.
2125 *
2126 * Return IXGBE_ERR_OVERTEMP if interrupt is high temperature
2127 * failure alarm, else return PHY access status.
2128 */
2129 static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc)
2130 {
2131 u32 status;
2132 u16 reg;
2133
2134 *lsc = FALSE;
2135
2136 /* Vendor alarm triggered */
2137 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_CHIP_STD_INT_FLAG,
2138 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2139 ®);
2140
2141 if (status != IXGBE_SUCCESS ||
2142 !(reg & IXGBE_MDIO_GLOBAL_VEN_ALM_INT_EN))
2143 return status;
2144
2145 /* Vendor Auto-Neg alarm triggered or Global alarm 1 triggered */
2146 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_FLAG,
2147 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2148 ®);
2149
2150 if (status != IXGBE_SUCCESS ||
2151 !(reg & (IXGBE_MDIO_GLOBAL_AN_VEN_ALM_INT_EN |
2152 IXGBE_MDIO_GLOBAL_ALARM_1_INT)))
2153 return status;
2154
2155 /* Global alarm triggered */
2156 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_ALARM_1,
2157 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2158 ®);
2159
2160 if (status != IXGBE_SUCCESS)
2161 return status;
2162
2163 /* If high temperature failure, then return over temp error and exit */
2164 if (reg & IXGBE_MDIO_GLOBAL_ALM_1_HI_TMP_FAIL) {
2165 /* power down the PHY in case the PHY FW didn't already */
2166 ixgbe_set_copper_phy_power(hw, FALSE);
2167 return IXGBE_ERR_OVERTEMP;
2168 } else if (reg & IXGBE_MDIO_GLOBAL_ALM_1_DEV_FAULT) {
2169 /* device fault alarm triggered */
2170 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_FAULT_MSG,
2171 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2172 ®);
2173
2174 if (status != IXGBE_SUCCESS)
2175 return status;
2176
2177 /* if device fault was due to high temp alarm handle and exit */
2178 if (reg == IXGBE_MDIO_GLOBAL_FAULT_MSG_HI_TMP) {
2179 /* power down the PHY in case the PHY FW didn't */
2180 ixgbe_set_copper_phy_power(hw, FALSE);
2181 return IXGBE_ERR_OVERTEMP;
2182 }
2183 }
2184
2185 /* Vendor alarm 2 triggered */
2186 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_CHIP_STD_INT_FLAG,
2187 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®);
2188
2189 if (status != IXGBE_SUCCESS ||
2190 !(reg & IXGBE_MDIO_GLOBAL_STD_ALM2_INT))
2191 return status;
2192
2193 /* link connect/disconnect event occurred */
2194 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM2,
2195 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®);
2196
2197 if (status != IXGBE_SUCCESS)
2198 return status;
2199
2200 /* Indicate LSC */
2201 if (reg & IXGBE_MDIO_AUTO_NEG_VEN_LSC)
2202 *lsc = TRUE;
2203
2204 return IXGBE_SUCCESS;
2205 }
2206
2207 /**
2208 * ixgbe_enable_lasi_ext_t_x550em - Enable external Base T PHY interrupts
2209 * @hw: pointer to hardware structure
2210 *
2211 * Enable link status change and temperature failure alarm for the external
2212 * Base T PHY
2213 *
2214 * Returns PHY access status
2215 */
2216 static s32 ixgbe_enable_lasi_ext_t_x550em(struct ixgbe_hw *hw)
2217 {
2218 u32 status;
2219 u16 reg;
2220 bool lsc;
2221
2222 /* Clear interrupt flags */
2223 status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc);
2224
2225 /* Enable link status change alarm */
2226
2227 /* Enable the LASI interrupts on X552 devices to receive notifications
2228 * of the link configurations of the external PHY and correspondingly
2229 * support the configuration of the internal iXFI link, since iXFI does
2230 * not support auto-negotiation. This is not required for X553 devices
2231 * having KR support, which performs auto-negotiations and which is used
2232 * as the internal link to the external PHY. Hence adding a check here
2233 * to avoid enabling LASI interrupts for X553 devices.
2234 */
2235 if (hw->mac.type != ixgbe_mac_X550EM_a) {
2236 status = hw->phy.ops.read_reg(hw,
2237 IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK,
2238 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®);
2239
2240 if (status != IXGBE_SUCCESS)
2241 return status;
2242
2243 reg |= IXGBE_MDIO_PMA_TX_VEN_LASI_INT_EN;
2244
2245 status = hw->phy.ops.write_reg(hw,
2246 IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK,
2247 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg);
2248
2249 if (status != IXGBE_SUCCESS)
2250 return status;
2251 }
2252
2253 /* Enable high temperature failure and global fault alarms */
2254 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_MASK,
2255 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2256 ®);
2257
2258 if (status != IXGBE_SUCCESS)
2259 return status;
2260
2261 reg |= (IXGBE_MDIO_GLOBAL_INT_HI_TEMP_EN |
2262 IXGBE_MDIO_GLOBAL_INT_DEV_FAULT_EN);
2263
2264 status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_MASK,
2265 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2266 reg);
2267
2268 if (status != IXGBE_SUCCESS)
2269 return status;
2270
2271 /* Enable vendor Auto-Neg alarm and Global Interrupt Mask 1 alarm */
2272 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_MASK,
2273 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2274 ®);
2275
2276 if (status != IXGBE_SUCCESS)
2277 return status;
2278
2279 reg |= (IXGBE_MDIO_GLOBAL_AN_VEN_ALM_INT_EN |
2280 IXGBE_MDIO_GLOBAL_ALARM_1_INT);
2281
2282 status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_MASK,
2283 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2284 reg);
2285
2286 if (status != IXGBE_SUCCESS)
2287 return status;
2288
2289 /* Enable chip-wide vendor alarm */
2290 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_STD_MASK,
2291 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2292 ®);
2293
2294 if (status != IXGBE_SUCCESS)
2295 return status;
2296
2297 reg |= IXGBE_MDIO_GLOBAL_VEN_ALM_INT_EN;
2298
2299 status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_STD_MASK,
2300 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2301 reg);
2302
2303 return status;
2304 }
2305
2306 /**
2307 * ixgbe_setup_kr_speed_x550em - Configure the KR PHY for link speed.
2308 * @hw: pointer to hardware structure
2309 * @speed: link speed
2310 *
2311 * Configures the integrated KR PHY.
2312 **/
2313 static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw,
2314 ixgbe_link_speed speed)
2315 {
2316 s32 status;
2317 u32 reg_val;
2318
2319 status = hw->mac.ops.read_iosf_sb_reg(hw,
2320 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
2321 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
2322 if (status)
2323 return status;
2324
2325 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
2326 reg_val &= ~(IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR |
2327 IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX);
2328
2329 /* Advertise 10G support. */
2330 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
2331 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR;
2332
2333 /* Advertise 1G support. */
2334 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
2335 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX;
2336
2337 status = hw->mac.ops.write_iosf_sb_reg(hw,
2338 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
2339 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
2340
2341 if (hw->mac.type == ixgbe_mac_X550EM_a) {
2342 /* Set lane mode to KR auto negotiation */
2343 status = hw->mac.ops.read_iosf_sb_reg(hw,
2344 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
2345 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
2346
2347 if (status)
2348 return status;
2349
2350 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK;
2351 reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_AN;
2352 reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN;
2353 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN;
2354 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN;
2355
2356 status = hw->mac.ops.write_iosf_sb_reg(hw,
2357 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
2358 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
2359 }
2360
2361 return ixgbe_restart_an_internal_phy_x550em(hw);
2362 }
2363
2364 /**
2365 * ixgbe_reset_phy_fw - Reset firmware-controlled PHYs
2366 * @hw: pointer to hardware structure
2367 */
2368 static s32 ixgbe_reset_phy_fw(struct ixgbe_hw *hw)
2369 {
2370 u32 store[FW_PHY_ACT_DATA_COUNT] = { 0 };
2371 s32 rc;
2372
2373 if (hw->phy.reset_disable || ixgbe_check_reset_blocked(hw))
2374 return IXGBE_SUCCESS;
2375
2376 rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_PHY_SW_RESET, &store);
2377 if (rc)
2378 return rc;
2379 memset(store, 0, sizeof(store));
2380
2381 rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_INIT_PHY, &store);
2382 if (rc)
2383 return rc;
2384
2385 return ixgbe_setup_fw_link(hw);
2386 }
2387
2388 /**
2389 * ixgbe_check_overtemp_fw - Check firmware-controlled PHYs for overtemp
2390 * @hw: pointer to hardware structure
2391 */
2392 static s32 ixgbe_check_overtemp_fw(struct ixgbe_hw *hw)
2393 {
2394 u32 store[FW_PHY_ACT_DATA_COUNT] = { 0 };
2395 s32 rc;
2396
2397 rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_LINK_INFO, &store);
2398 if (rc)
2399 return rc;
2400
2401 if (store[0] & FW_PHY_ACT_GET_LINK_INFO_TEMP) {
2402 ixgbe_shutdown_fw_phy(hw);
2403 return IXGBE_ERR_OVERTEMP;
2404 }
2405 return IXGBE_SUCCESS;
2406 }
2407
2408 /**
2409 * ixgbe_read_mng_if_sel_x550em - Read NW_MNG_IF_SEL register
2410 * @hw: pointer to hardware structure
2411 *
2412 * Read NW_MNG_IF_SEL register and save field values, and check for valid field
2413 * values.
2414 **/
2415 static s32 ixgbe_read_mng_if_sel_x550em(struct ixgbe_hw *hw)
2416 {
2417 /* Save NW management interface connected on board. This is used
2418 * to determine internal PHY mode.
2419 */
2420 hw->phy.nw_mng_if_sel = IXGBE_READ_REG(hw, IXGBE_NW_MNG_IF_SEL);
2421
2422 /* If X552 (X550EM_a) and MDIO is connected to external PHY, then set
2423 * PHY address. This register field was has only been used for X552.
2424 */
2425 if (hw->mac.type == ixgbe_mac_X550EM_a &&
2426 hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_MDIO_ACT) {
2427 hw->phy.addr = (hw->phy.nw_mng_if_sel &
2428 IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD) >>
2429 IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT;
2430 }
2431
2432 return IXGBE_SUCCESS;
2433 }
2434
2435 /**
2436 * ixgbe_init_phy_ops_X550em - PHY/SFP specific init
2437 * @hw: pointer to hardware structure
2438 *
2439 * Initialize any function pointers that were not able to be
2440 * set during init_shared_code because the PHY/SFP type was
2441 * not known. Perform the SFP init if necessary.
2442 */
2443 s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
2444 {
2445 struct ixgbe_phy_info *phy = &hw->phy;
2446 s32 ret_val;
2447
2448 DEBUGFUNC("ixgbe_init_phy_ops_X550em");
2449
2450 hw->mac.ops.set_lan_id(hw);
2451 ixgbe_read_mng_if_sel_x550em(hw);
2452
2453 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) {
2454 phy->phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM;
2455 ixgbe_setup_mux_ctl(hw);
2456 phy->ops.identify_sfp = ixgbe_identify_sfp_module_X550em;
2457 }
2458
2459 switch (hw->device_id) {
2460 case IXGBE_DEV_ID_X550EM_A_1G_T:
2461 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
2462 phy->ops.read_reg_mdi = ixgbe_read_phy_reg_mdi_22;
2463 phy->ops.write_reg_mdi = ixgbe_write_phy_reg_mdi_22;
2464 hw->phy.ops.read_reg = ixgbe_read_phy_reg_x550a;
2465 hw->phy.ops.write_reg = ixgbe_write_phy_reg_x550a;
2466 phy->ops.check_overtemp = ixgbe_check_overtemp_fw;
2467 if (hw->bus.lan_id)
2468 hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY1_SM;
2469 else
2470 hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY0_SM;
2471
2472 break;
2473 case IXGBE_DEV_ID_X550EM_A_10G_T:
2474 case IXGBE_DEV_ID_X550EM_A_SFP:
2475 hw->phy.ops.read_reg = ixgbe_read_phy_reg_x550a;
2476 hw->phy.ops.write_reg = ixgbe_write_phy_reg_x550a;
2477 if (hw->bus.lan_id)
2478 hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY1_SM;
2479 else
2480 hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY0_SM;
2481 break;
2482 case IXGBE_DEV_ID_X550EM_X_SFP:
2483 /* set up for CS4227 usage */
2484 hw->phy.phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM;
2485 break;
2486 case IXGBE_DEV_ID_X550EM_X_1G_T:
2487 phy->ops.read_reg_mdi = NULL;
2488 phy->ops.write_reg_mdi = NULL;
2489 break;
2490 default:
2491 break;
2492 }
2493
2494 /* Identify the PHY or SFP module */
2495 ret_val = phy->ops.identify(hw);
2496 if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED ||
2497 ret_val == IXGBE_ERR_PHY_ADDR_INVALID)
2498 return ret_val;
2499
2500 /* Setup function pointers based on detected hardware */
2501 ixgbe_init_mac_link_ops_X550em(hw);
2502 if (phy->sfp_type != ixgbe_sfp_type_unknown)
2503 phy->ops.reset = NULL;
2504
2505 /* Set functions pointers based on phy type */
2506 switch (hw->phy.type) {
2507 case ixgbe_phy_x550em_kx4:
2508 phy->ops.setup_link = NULL;
2509 phy->ops.read_reg = ixgbe_read_phy_reg_x550em;
2510 phy->ops.write_reg = ixgbe_write_phy_reg_x550em;
2511 break;
2512 case ixgbe_phy_x550em_kr:
2513 phy->ops.setup_link = ixgbe_setup_kr_x550em;
2514 phy->ops.read_reg = ixgbe_read_phy_reg_x550em;
2515 phy->ops.write_reg = ixgbe_write_phy_reg_x550em;
2516 break;
2517 case ixgbe_phy_ext_1g_t:
2518 /* link is managed by FW */
2519 phy->ops.setup_link = NULL;
2520 phy->ops.reset = NULL;
2521 break;
2522 case ixgbe_phy_x550em_xfi:
2523 /* link is managed by HW */
2524 phy->ops.setup_link = NULL;
2525 phy->ops.read_reg = ixgbe_read_phy_reg_x550em;
2526 phy->ops.write_reg = ixgbe_write_phy_reg_x550em;
2527 break;
2528 case ixgbe_phy_x550em_ext_t:
2529 /* If internal link mode is XFI, then setup iXFI internal link,
2530 * else setup KR now.
2531 */
2532 phy->ops.setup_internal_link =
2533 ixgbe_setup_internal_phy_t_x550em;
2534
2535 /* setup SW LPLU only for first revision of X550EM_x */
2536 if ((hw->mac.type == ixgbe_mac_X550EM_x) &&
2537 !(IXGBE_FUSES0_REV_MASK &
2538 IXGBE_READ_REG(hw, IXGBE_FUSES0_GROUP(0))))
2539 phy->ops.enter_lplu = ixgbe_enter_lplu_t_x550em;
2540
2541 phy->ops.handle_lasi = ixgbe_handle_lasi_ext_t_x550em;
2542 phy->ops.reset = ixgbe_reset_phy_t_X550em;
2543 break;
2544 case ixgbe_phy_sgmii:
2545 phy->ops.setup_link = NULL;
2546 break;
2547 case ixgbe_phy_fw:
2548 phy->ops.setup_link = ixgbe_setup_fw_link;
2549 phy->ops.reset = ixgbe_reset_phy_fw;
2550 break;
2551 default:
2552 break;
2553 }
2554 return ret_val;
2555 }
2556
2557 /**
2558 * ixgbe_set_mdio_speed - Set MDIO clock speed
2559 * @hw: pointer to hardware structure
2560 */
2561 static void ixgbe_set_mdio_speed(struct ixgbe_hw *hw)
2562 {
2563 u32 hlreg0;
2564
2565 switch (hw->device_id) {
2566 case IXGBE_DEV_ID_X550EM_X_10G_T:
2567 case IXGBE_DEV_ID_X550EM_A_SGMII:
2568 case IXGBE_DEV_ID_X550EM_A_SGMII_L:
2569 case IXGBE_DEV_ID_X550EM_A_10G_T:
2570 case IXGBE_DEV_ID_X550EM_A_SFP:
2571 case IXGBE_DEV_ID_X550EM_A_QSFP:
2572 /* Config MDIO clock speed before the first MDIO PHY access */
2573 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2574 hlreg0 &= ~IXGBE_HLREG0_MDCSPD;
2575 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
2576 break;
2577 case IXGBE_DEV_ID_X550EM_A_1G_T:
2578 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
2579 /* Select fast MDIO clock speed for these devices */
2580 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2581 hlreg0 |= IXGBE_HLREG0_MDCSPD;
2582 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
2583 break;
2584 default:
2585 break;
2586 }
2587 }
2588
2589 /**
2590 * ixgbe_reset_hw_X550em - Perform hardware reset
2591 * @hw: pointer to hardware structure
2592 *
2593 * Resets the hardware by resetting the transmit and receive units, masks
2594 * and clears all interrupts, perform a PHY reset, and perform a link (MAC)
2595 * reset.
2596 */
2597 s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw)
2598 {
2599 ixgbe_link_speed link_speed;
2600 s32 status;
2601 s32 phy_status = IXGBE_SUCCESS;
2602 u32 ctrl = 0;
2603 u32 i;
2604 bool link_up = FALSE;
2605 u32 swfw_mask = hw->phy.phy_semaphore_mask;
2606
2607 DEBUGFUNC("ixgbe_reset_hw_X550em");
2608
2609 /* Call adapter stop to disable Tx/Rx and clear interrupts */
2610 status = hw->mac.ops.stop_adapter(hw);
2611 if (status != IXGBE_SUCCESS) {
2612 DEBUGOUT1("Failed to stop adapter, STATUS = %d\n", status);
2613 return status;
2614 }
2615 /* flush pending Tx transactions */
2616 ixgbe_clear_tx_pending(hw);
2617
2618 ixgbe_set_mdio_speed(hw);
2619
2620 /* PHY ops must be identified and initialized prior to reset */
2621 phy_status = hw->phy.ops.init(hw);
2622
2623 if (phy_status)
2624 DEBUGOUT1("Failed to initialize PHY ops, STATUS = %d\n",
2625 status);
2626
2627 if (phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED ||
2628 phy_status == IXGBE_ERR_PHY_ADDR_INVALID) {
2629 DEBUGOUT("Returning from reset HW due to PHY init failure\n");
2630 goto mac_reset_top;
2631 }
2632
2633 /* start the external PHY */
2634 if (hw->phy.type == ixgbe_phy_x550em_ext_t) {
2635 status = ixgbe_init_ext_t_x550em(hw);
2636 if (status) {
2637 DEBUGOUT1("Failed to start the external PHY, STATUS = %d\n",
2638 status);
2639 return status;
2640 }
2641 }
2642
2643 /* Setup SFP module if there is one present. */
2644 if (hw->phy.sfp_setup_needed) {
2645 phy_status = hw->mac.ops.setup_sfp(hw);
2646 hw->phy.sfp_setup_needed = FALSE;
2647 }
2648
2649 if (phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED)
2650 goto mac_reset_top;
2651
2652 /* Reset PHY */
2653 if (!hw->phy.reset_disable && hw->phy.ops.reset) {
2654 if (hw->phy.ops.reset(hw) == IXGBE_ERR_OVERTEMP)
2655 return IXGBE_ERR_OVERTEMP;
2656 }
2657
2658 mac_reset_top:
2659 /* Issue global reset to the MAC. Needs to be SW reset if link is up.
2660 * If link reset is used when link is up, it might reset the PHY when
2661 * mng is using it. If link is down or the flag to force full link
2662 * reset is set, then perform link reset.
2663 */
2664 ctrl = IXGBE_CTRL_LNK_RST;
2665 if (!hw->force_full_reset) {
2666 hw->mac.ops.check_link(hw, &link_speed, &link_up, FALSE);
2667 if (link_up)
2668 ctrl = IXGBE_CTRL_RST;
2669 }
2670
2671 status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
2672 if (status != IXGBE_SUCCESS) {
2673 ERROR_REPORT2(IXGBE_ERROR_CAUTION,
2674 "semaphore failed with %d", status);
2675 return IXGBE_ERR_SWFW_SYNC;
2676 }
2677 ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
2678 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
2679 IXGBE_WRITE_FLUSH(hw);
2680 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
2681
2682 /* Poll for reset bit to self-clear meaning reset is complete */
2683 for (i = 0; i < 10; i++) {
2684 usec_delay(1);
2685 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
2686 if (!(ctrl & IXGBE_CTRL_RST_MASK))
2687 break;
2688 }
2689
2690 if (ctrl & IXGBE_CTRL_RST_MASK) {
2691 status = IXGBE_ERR_RESET_FAILED;
2692 DEBUGOUT("Reset polling failed to complete.\n");
2693 }
2694
2695 msec_delay(50);
2696
2697 /* Double resets are required for recovery from certain error
2698 * conditions. Between resets, it is necessary to stall to
2699 * allow time for any pending HW events to complete.
2700 */
2701 if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
2702 hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
2703 goto mac_reset_top;
2704 }
2705
2706 /* Store the permanent mac address */
2707 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
2708
2709 /* Store MAC address from RAR0, clear receive address registers, and
2710 * clear the multicast table. Also reset num_rar_entries to 128,
2711 * since we modify this value when programming the SAN MAC address.
2712 */
2713 hw->mac.num_rar_entries = 128;
2714 hw->mac.ops.init_rx_addrs(hw);
2715
2716 ixgbe_set_mdio_speed(hw);
2717
2718 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP)
2719 ixgbe_setup_mux_ctl(hw);
2720
2721 if (status != IXGBE_SUCCESS)
2722 DEBUGOUT1("Reset HW failed, STATUS = %d\n", status);
2723
2724 if (phy_status != IXGBE_SUCCESS)
2725 status = phy_status;
2726
2727 return status;
2728 }
2729
2730 /**
2731 * ixgbe_init_ext_t_x550em - Start (unstall) the external Base T PHY.
2732 * @hw: pointer to hardware structure
2733 */
2734 s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw)
2735 {
2736 u32 status;
2737 u16 reg;
2738
2739 status = hw->phy.ops.read_reg(hw,
2740 IXGBE_MDIO_TX_VENDOR_ALARMS_3,
2741 IXGBE_MDIO_PMA_PMD_DEV_TYPE,
2742 ®);
2743
2744 if (status != IXGBE_SUCCESS)
2745 return status;
2746
2747 /* If PHY FW reset completed bit is set then this is the first
2748 * SW instance after a power on so the PHY FW must be un-stalled.
2749 */
2750 if (reg & IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK) {
2751 status = hw->phy.ops.read_reg(hw,
2752 IXGBE_MDIO_GLOBAL_RES_PR_10,
2753 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2754 ®);
2755
2756 if (status != IXGBE_SUCCESS)
2757 return status;
2758
2759 reg &= ~IXGBE_MDIO_POWER_UP_STALL;
2760
2761 status = hw->phy.ops.write_reg(hw,
2762 IXGBE_MDIO_GLOBAL_RES_PR_10,
2763 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2764 reg);
2765
2766 if (status != IXGBE_SUCCESS)
2767 return status;
2768 }
2769
2770 return status;
2771 }
2772
2773 /**
2774 * ixgbe_setup_kr_x550em - Configure the KR PHY.
2775 * @hw: pointer to hardware structure
2776 **/
2777 s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw)
2778 {
2779 /* leave link alone for 2.5G */
2780 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_2_5GB_FULL)
2781 return IXGBE_SUCCESS;
2782
2783 if (ixgbe_check_reset_blocked(hw))
2784 return 0;
2785
2786 return ixgbe_setup_kr_speed_x550em(hw, hw->phy.autoneg_advertised);
2787 }
2788
2789 /**
2790 * ixgbe_setup_mac_link_sfp_x550em - Setup internal/external the PHY for SFP
2791 * @hw: pointer to hardware structure
2792 * @speed: new link speed
2793 * @autoneg_wait_to_complete: unused
2794 *
2795 * Configure the external PHY and the integrated KR PHY for SFP support.
2796 **/
2797 s32 ixgbe_setup_mac_link_sfp_x550em(struct ixgbe_hw *hw,
2798 ixgbe_link_speed speed,
2799 bool autoneg_wait_to_complete)
2800 {
2801 s32 ret_val;
2802 u16 reg_slice, reg_val;
2803 bool setup_linear = FALSE;
2804 UNREFERENCED_1PARAMETER(autoneg_wait_to_complete);
2805
2806 /* Check if SFP module is supported and linear */
2807 ret_val = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear);
2808
2809 /* If no SFP module present, then return success. Return success since
2810 * there is no reason to configure CS4227 and SFP not present error is
2811 * not excepted in the setup MAC link flow.
2812 */
2813 if (ret_val == IXGBE_ERR_SFP_NOT_PRESENT)
2814 return IXGBE_SUCCESS;
2815
2816 if (ret_val != IXGBE_SUCCESS)
2817 return ret_val;
2818
2819 /* Configure internal PHY for KR/KX. */
2820 ixgbe_setup_kr_speed_x550em(hw, speed);
2821
2822 /* Configure CS4227 LINE side to proper mode. */
2823 reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB +
2824 (hw->bus.lan_id << 12);
2825 if (setup_linear)
2826 reg_val = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1;
2827 else
2828 reg_val = (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1;
2829 ret_val = hw->link.ops.write_link(hw, hw->link.addr, reg_slice,
2830 reg_val);
2831 return ret_val;
2832 }
2833
2834 /**
2835 * ixgbe_setup_sfi_x550a - Configure the internal PHY for native SFI mode
2836 * @hw: pointer to hardware structure
2837 * @speed: the link speed to force
2838 *
2839 * Configures the integrated PHY for native SFI mode. Used to connect the
2840 * internal PHY directly to an SFP cage, without autonegotiation.
2841 **/
2842 static s32 ixgbe_setup_sfi_x550a(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
2843 {
2844 struct ixgbe_mac_info *mac = &hw->mac;
2845 s32 status;
2846 u32 reg_val;
2847
2848 /* Disable all AN and force speed to 10G Serial. */
2849 status = mac->ops.read_iosf_sb_reg(hw,
2850 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
2851 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
2852 if (status != IXGBE_SUCCESS)
2853 return status;
2854
2855 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN;
2856 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN;
2857 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN;
2858 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK;
2859
2860 /* Select forced link speed for internal PHY. */
2861 switch (*speed) {
2862 case IXGBE_LINK_SPEED_10GB_FULL:
2863 reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_10G;
2864 break;
2865 case IXGBE_LINK_SPEED_1GB_FULL:
2866 reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_1G;
2867 break;
2868 case 0:
2869 /* media none (linkdown) */
2870 break;
2871 default:
2872 /* Other link speeds are not supported by internal PHY. */
2873 return IXGBE_ERR_LINK_SETUP;
2874 }
2875
2876 status = mac->ops.write_iosf_sb_reg(hw,
2877 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
2878 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
2879
2880 /* Toggle port SW reset by AN reset. */
2881 status = ixgbe_restart_an_internal_phy_x550em(hw);
2882
2883 return status;
2884 }
2885
2886 /**
2887 * ixgbe_setup_mac_link_sfp_x550a - Setup internal PHY for SFP
2888 * @hw: pointer to hardware structure
2889 * @speed: new link speed
2890 * @autoneg_wait_to_complete: unused
2891 *
2892 * Configure the integrated PHY for SFP support.
2893 **/
2894 static s32 ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw,
2895 ixgbe_link_speed speed,
2896 bool autoneg_wait_to_complete)
2897 {
2898 s32 ret_val;
2899 u16 reg_phy_ext;
2900 bool setup_linear = FALSE;
2901 u32 reg_slice, reg_phy_int, slice_offset;
2902
2903 UNREFERENCED_1PARAMETER(autoneg_wait_to_complete);
2904
2905 /* Check if SFP module is supported and linear */
2906 ret_val = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear);
2907
2908 /* If no SFP module present, then return success. Return success since
2909 * SFP not present error is not excepted in the setup MAC link flow.
2910 */
2911 if (ret_val == IXGBE_ERR_SFP_NOT_PRESENT)
2912 return IXGBE_SUCCESS;
2913
2914 if (ret_val != IXGBE_SUCCESS)
2915 return ret_val;
2916
2917 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N) {
2918 /* Configure internal PHY for native SFI based on module type */
2919 ret_val = hw->mac.ops.read_iosf_sb_reg(hw,
2920 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
2921 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_phy_int);
2922
2923 if (ret_val != IXGBE_SUCCESS)
2924 return ret_val;
2925
2926 reg_phy_int &= IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_DA;
2927 if (!setup_linear)
2928 reg_phy_int |= IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_SR;
2929
2930 ret_val = hw->mac.ops.write_iosf_sb_reg(hw,
2931 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
2932 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_phy_int);
2933
2934 if (ret_val != IXGBE_SUCCESS)
2935 return ret_val;
2936
2937 /* Setup SFI internal link. */
2938 ret_val = ixgbe_setup_sfi_x550a(hw, &speed);
2939 } else {
2940 /* Configure internal PHY for KR/KX. */
2941 ixgbe_setup_kr_speed_x550em(hw, speed);
2942
2943 if (hw->phy.addr == 0x0 || hw->phy.addr == 0xFFFF) {
2944 /* Find Address */
2945 DEBUGOUT("Invalid NW_MNG_IF_SEL.MDIO_PHY_ADD value\n");
2946 return IXGBE_ERR_PHY_ADDR_INVALID;
2947 }
2948
2949 /* Get external PHY SKU id */
2950 ret_val = hw->phy.ops.read_reg(hw, IXGBE_CS4227_EFUSE_PDF_SKU,
2951 IXGBE_MDIO_ZERO_DEV_TYPE, ®_phy_ext);
2952
2953 if (ret_val != IXGBE_SUCCESS)
2954 return ret_val;
2955
2956 /* When configuring quad port CS4223, the MAC instance is part
2957 * of the slice offset.
2958 */
2959 if (reg_phy_ext == IXGBE_CS4223_SKU_ID)
2960 slice_offset = (hw->bus.lan_id +
2961 (hw->bus.instance_id << 1)) << 12;
2962 else
2963 slice_offset = hw->bus.lan_id << 12;
2964
2965 /* Configure CS4227/CS4223 LINE side to proper mode. */
2966 reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB + slice_offset;
2967
2968 ret_val = hw->phy.ops.read_reg(hw, reg_slice,
2969 IXGBE_MDIO_ZERO_DEV_TYPE, ®_phy_ext);
2970
2971 if (ret_val != IXGBE_SUCCESS)
2972 return ret_val;
2973
2974 reg_phy_ext &= ~((IXGBE_CS4227_EDC_MODE_CX1 << 1) |
2975 (IXGBE_CS4227_EDC_MODE_SR << 1));
2976
2977 if (setup_linear)
2978 reg_phy_ext |= (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1;
2979 else
2980 reg_phy_ext |= (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1;
2981 ret_val = hw->phy.ops.write_reg(hw, reg_slice,
2982 IXGBE_MDIO_ZERO_DEV_TYPE, reg_phy_ext);
2983
2984 /* Flush previous write with a read */
2985 ret_val = hw->phy.ops.read_reg(hw, reg_slice,
2986 IXGBE_MDIO_ZERO_DEV_TYPE, ®_phy_ext);
2987 }
2988 return ret_val;
2989 }
2990
2991 /**
2992 * ixgbe_setup_ixfi_x550em_x - MAC specific iXFI configuration
2993 * @hw: pointer to hardware structure
2994 *
2995 * iXfI configuration needed for ixgbe_mac_X550EM_x devices.
2996 **/
2997 static s32 ixgbe_setup_ixfi_x550em_x(struct ixgbe_hw *hw)
2998 {
2999 struct ixgbe_mac_info *mac = &hw->mac;
3000 s32 status;
3001 u32 reg_val;
3002
3003 /* Disable training protocol FSM. */
3004 status = mac->ops.read_iosf_sb_reg(hw,
3005 IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
3006 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3007 if (status != IXGBE_SUCCESS)
3008 return status;
3009 reg_val |= IXGBE_KRM_RX_TRN_LINKUP_CTRL_CONV_WO_PROTOCOL;
3010 status = mac->ops.write_iosf_sb_reg(hw,
3011 IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
3012 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3013 if (status != IXGBE_SUCCESS)
3014 return status;
3015
3016 /* Disable Flex from training TXFFE. */
3017 status = mac->ops.read_iosf_sb_reg(hw,
3018 IXGBE_KRM_DSP_TXFFE_STATE_4(hw->bus.lan_id),
3019 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3020 if (status != IXGBE_SUCCESS)
3021 return status;
3022 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN;
3023 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN;
3024 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN;
3025 status = mac->ops.write_iosf_sb_reg(hw,
3026 IXGBE_KRM_DSP_TXFFE_STATE_4(hw->bus.lan_id),
3027 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3028 if (status != IXGBE_SUCCESS)
3029 return status;
3030 status = mac->ops.read_iosf_sb_reg(hw,
3031 IXGBE_KRM_DSP_TXFFE_STATE_5(hw->bus.lan_id),
3032 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3033 if (status != IXGBE_SUCCESS)
3034 return status;
3035 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN;
3036 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN;
3037 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN;
3038 status = mac->ops.write_iosf_sb_reg(hw,
3039 IXGBE_KRM_DSP_TXFFE_STATE_5(hw->bus.lan_id),
3040 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3041 if (status != IXGBE_SUCCESS)
3042 return status;
3043
3044 /* Enable override for coefficients. */
3045 status = mac->ops.read_iosf_sb_reg(hw,
3046 IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id),
3047 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3048 if (status != IXGBE_SUCCESS)
3049 return status;
3050 reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_OVRRD_EN;
3051 reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN;
3052 reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CPLUS1_OVRRD_EN;
3053 reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CMINUS1_OVRRD_EN;
3054 status = mac->ops.write_iosf_sb_reg(hw,
3055 IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id),
3056 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3057 return status;
3058 }
3059
3060 /**
3061 * ixgbe_setup_ixfi_x550em - Configure the KR PHY for iXFI mode.
3062 * @hw: pointer to hardware structure
3063 * @speed: the link speed to force
3064 *
3065 * Configures the integrated KR PHY to use iXFI mode. Used to connect an
3066 * internal and external PHY at a specific speed, without autonegotiation.
3067 **/
3068 static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
3069 {
3070 struct ixgbe_mac_info *mac = &hw->mac;
3071 s32 status;
3072 u32 reg_val;
3073
3074 /* iXFI is only supported with X552 */
3075 if (mac->type != ixgbe_mac_X550EM_x)
3076 return IXGBE_ERR_LINK_SETUP;
3077
3078 /* Disable AN and force speed to 10G Serial. */
3079 status = mac->ops.read_iosf_sb_reg(hw,
3080 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
3081 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3082 if (status != IXGBE_SUCCESS)
3083 return status;
3084
3085 reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
3086 reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
3087
3088 /* Select forced link speed for internal PHY. */
3089 switch (*speed) {
3090 case IXGBE_LINK_SPEED_10GB_FULL:
3091 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G;
3092 break;
3093 case IXGBE_LINK_SPEED_1GB_FULL:
3094 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G;
3095 break;
3096 default:
3097 /* Other link speeds are not supported by internal KR PHY. */
3098 return IXGBE_ERR_LINK_SETUP;
3099 }
3100
3101 status = mac->ops.write_iosf_sb_reg(hw,
3102 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
3103 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3104 if (status != IXGBE_SUCCESS)
3105 return status;
3106
3107 /* Additional configuration needed for x550em_x */
3108 if (hw->mac.type == ixgbe_mac_X550EM_x) {
3109 status = ixgbe_setup_ixfi_x550em_x(hw);
3110 if (status != IXGBE_SUCCESS)
3111 return status;
3112 }
3113
3114 /* Toggle port SW reset by AN reset. */
3115 status = ixgbe_restart_an_internal_phy_x550em(hw);
3116
3117 return status;
3118 }
3119
3120 /**
3121 * ixgbe_ext_phy_t_x550em_get_link - Get ext phy link status
3122 * @hw: address of hardware structure
3123 * @link_up: address of boolean to indicate link status
3124 *
3125 * Returns error code if unable to get link status.
3126 */
3127 static s32 ixgbe_ext_phy_t_x550em_get_link(struct ixgbe_hw *hw, bool *link_up)
3128 {
3129 u32 ret;
3130 u16 autoneg_status;
3131
3132 *link_up = FALSE;
3133
3134 /* read this twice back to back to indicate current status */
3135 ret = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
3136 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
3137 &autoneg_status);
3138 if (ret != IXGBE_SUCCESS)
3139 return ret;
3140
3141 ret = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
3142 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
3143 &autoneg_status);
3144 if (ret != IXGBE_SUCCESS)
3145 return ret;
3146
3147 *link_up = !!(autoneg_status & IXGBE_MDIO_AUTO_NEG_LINK_STATUS);
3148
3149 return IXGBE_SUCCESS;
3150 }
3151
3152 /**
3153 * ixgbe_setup_internal_phy_t_x550em - Configure KR PHY to X557 link
3154 * @hw: point to hardware structure
3155 *
3156 * Configures the link between the integrated KR PHY and the external X557 PHY
3157 * The driver will call this function when it gets a link status change
3158 * interrupt from the X557 PHY. This function configures the link speed
3159 * between the PHYs to match the link speed of the BASE-T link.
3160 *
3161 * A return of a non-zero value indicates an error, and the base driver should
3162 * not report link up.
3163 */
3164 s32 ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw)
3165 {
3166 ixgbe_link_speed force_speed;
3167 bool link_up;
3168 u32 status;
3169 u16 speed;
3170
3171 if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper)
3172 return IXGBE_ERR_CONFIG;
3173
3174 if (hw->mac.type == ixgbe_mac_X550EM_x &&
3175 !(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) {
3176 /* If link is down, there is no setup necessary so return */
3177 status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up);
3178 if (status != IXGBE_SUCCESS)
3179 return status;
3180
3181 if (!link_up)
3182 return IXGBE_SUCCESS;
3183
3184 status = hw->phy.ops.read_reg(hw,
3185 IXGBE_MDIO_AUTO_NEG_VENDOR_STAT,
3186 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
3187 &speed);
3188 if (status != IXGBE_SUCCESS)
3189 return status;
3190
3191 /* If link is still down - no setup is required so return */
3192 status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up);
3193 if (status != IXGBE_SUCCESS)
3194 return status;
3195 if (!link_up)
3196 return IXGBE_SUCCESS;
3197
3198 /* clear everything but the speed and duplex bits */
3199 speed &= IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_MASK;
3200
3201 switch (speed) {
3202 case IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_FULL:
3203 force_speed = IXGBE_LINK_SPEED_10GB_FULL;
3204 break;
3205 case IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB_FULL:
3206 force_speed = IXGBE_LINK_SPEED_1GB_FULL;
3207 break;
3208 default:
3209 /* Internal PHY does not support anything else */
3210 return IXGBE_ERR_INVALID_LINK_SETTINGS;
3211 }
3212
3213 return ixgbe_setup_ixfi_x550em(hw, &force_speed);
3214 } else {
3215 speed = IXGBE_LINK_SPEED_10GB_FULL |
3216 IXGBE_LINK_SPEED_1GB_FULL;
3217 return ixgbe_setup_kr_speed_x550em(hw, speed);
3218 }
3219 }
3220
3221 /**
3222 * ixgbe_setup_phy_loopback_x550em - Configure the KR PHY for loopback.
3223 * @hw: pointer to hardware structure
3224 *
3225 * Configures the integrated KR PHY to use internal loopback mode.
3226 **/
3227 s32 ixgbe_setup_phy_loopback_x550em(struct ixgbe_hw *hw)
3228 {
3229 s32 status;
3230 u32 reg_val;
3231
3232 /* Disable AN and force speed to 10G Serial. */
3233 status = hw->mac.ops.read_iosf_sb_reg(hw,
3234 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
3235 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3236 if (status != IXGBE_SUCCESS)
3237 return status;
3238 reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
3239 reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
3240 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G;
3241 status = hw->mac.ops.write_iosf_sb_reg(hw,
3242 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
3243 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3244 if (status != IXGBE_SUCCESS)
3245 return status;
3246
3247 /* Set near-end loopback clocks. */
3248 status = hw->mac.ops.read_iosf_sb_reg(hw,
3249 IXGBE_KRM_PORT_CAR_GEN_CTRL(hw->bus.lan_id),
3250 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3251 if (status != IXGBE_SUCCESS)
3252 return status;
3253 reg_val |= IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_32B;
3254 reg_val |= IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_KRPCS;
3255 status = hw->mac.ops.write_iosf_sb_reg(hw,
3256 IXGBE_KRM_PORT_CAR_GEN_CTRL(hw->bus.lan_id),
3257 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3258 if (status != IXGBE_SUCCESS)
3259 return status;
3260
3261 /* Set loopback enable. */
3262 status = hw->mac.ops.read_iosf_sb_reg(hw,
3263 IXGBE_KRM_PMD_DFX_BURNIN(hw->bus.lan_id),
3264 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3265 if (status != IXGBE_SUCCESS)
3266 return status;
3267 reg_val |= IXGBE_KRM_PMD_DFX_BURNIN_TX_RX_KR_LB_MASK;
3268 status = hw->mac.ops.write_iosf_sb_reg(hw,
3269 IXGBE_KRM_PMD_DFX_BURNIN(hw->bus.lan_id),
3270 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3271 if (status != IXGBE_SUCCESS)
3272 return status;
3273
3274 /* Training bypass. */
3275 status = hw->mac.ops.read_iosf_sb_reg(hw,
3276 IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
3277 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3278 if (status != IXGBE_SUCCESS)
3279 return status;
3280 reg_val |= IXGBE_KRM_RX_TRN_LINKUP_CTRL_PROTOCOL_BYPASS;
3281 status = hw->mac.ops.write_iosf_sb_reg(hw,
3282 IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
3283 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3284
3285 return status;
3286 }
3287
3288 /**
3289 * ixgbe_read_ee_hostif_X550 - Read EEPROM word using a host interface command
3290 * assuming that the semaphore is already obtained.
3291 * @hw: pointer to hardware structure
3292 * @offset: offset of word in the EEPROM to read
3293 * @data: word read from the EEPROM
3294 *
3295 * Reads a 16 bit word from the EEPROM using the hostif.
3296 **/
3297 s32 ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 *data)
3298 {
3299 const u32 mask = IXGBE_GSSR_SW_MNG_SM | IXGBE_GSSR_EEP_SM;
3300 struct ixgbe_hic_read_shadow_ram buffer;
3301 s32 status;
3302
3303 DEBUGFUNC("ixgbe_read_ee_hostif_X550");
3304 buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD;
3305 buffer.hdr.req.buf_lenh = 0;
3306 buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN;
3307 buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
3308
3309 /* convert offset from words to bytes */
3310 buffer.address = IXGBE_CPU_TO_BE32(offset * 2);
3311 /* one word */
3312 buffer.length = IXGBE_CPU_TO_BE16(sizeof(u16));
3313 buffer.pad2 = 0;
3314 buffer.pad3 = 0;
3315
3316 status = hw->mac.ops.acquire_swfw_sync(hw, mask);
3317 if (status)
3318 return status;
3319
3320 status = ixgbe_hic_unlocked(hw, (u32 *)&buffer, sizeof(buffer),
3321 IXGBE_HI_COMMAND_TIMEOUT);
3322 if (!status) {
3323 *data = (u16)IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG,
3324 FW_NVM_DATA_OFFSET);
3325 }
3326
3327 hw->mac.ops.release_swfw_sync(hw, mask);
3328 return status;
3329 }
3330
3331 /**
3332 * ixgbe_read_ee_hostif_buffer_X550- Read EEPROM word(s) using hostif
3333 * @hw: pointer to hardware structure
3334 * @offset: offset of word in the EEPROM to read
3335 * @words: number of words
3336 * @data: word(s) read from the EEPROM
3337 *
3338 * Reads a 16 bit word(s) from the EEPROM using the hostif.
3339 **/
3340 s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
3341 u16 offset, u16 words, u16 *data)
3342 {
3343 const u32 mask = IXGBE_GSSR_SW_MNG_SM | IXGBE_GSSR_EEP_SM;
3344 struct ixgbe_hic_read_shadow_ram buffer;
3345 u32 current_word = 0;
3346 u16 words_to_read;
3347 s32 status;
3348 u32 i;
3349
3350 DEBUGFUNC("ixgbe_read_ee_hostif_buffer_X550");
3351
3352 /* Take semaphore for the entire operation. */
3353 status = hw->mac.ops.acquire_swfw_sync(hw, mask);
3354 if (status) {
3355 DEBUGOUT("EEPROM read buffer - semaphore failed\n");
3356 return status;
3357 }
3358
3359 while (words) {
3360 if (words > FW_MAX_READ_BUFFER_SIZE / 2)
3361 words_to_read = FW_MAX_READ_BUFFER_SIZE / 2;
3362 else
3363 words_to_read = words;
3364
3365 buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD;
3366 buffer.hdr.req.buf_lenh = 0;
3367 buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN;
3368 buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
3369
3370 /* convert offset from words to bytes */
3371 buffer.address = IXGBE_CPU_TO_BE32((offset + current_word) * 2);
3372 buffer.length = IXGBE_CPU_TO_BE16(words_to_read * 2);
3373 buffer.pad2 = 0;
3374 buffer.pad3 = 0;
3375
3376 status = ixgbe_hic_unlocked(hw, (u32 *)&buffer, sizeof(buffer),
3377 IXGBE_HI_COMMAND_TIMEOUT);
3378
3379 if (status) {
3380 DEBUGOUT("Host interface command failed\n");
3381 goto out;
3382 }
3383
3384 for (i = 0; i < words_to_read; i++) {
3385 u32 reg = IXGBE_FLEX_MNG + (FW_NVM_DATA_OFFSET << 2) +
3386 2 * i;
3387 u32 value = IXGBE_READ_REG(hw, reg);
3388
3389 data[current_word] = (u16)(value & 0xffff);
3390 current_word++;
3391 i++;
3392 if (i < words_to_read) {
3393 value >>= 16;
3394 data[current_word] = (u16)(value & 0xffff);
3395 current_word++;
3396 }
3397 }
3398 words -= words_to_read;
3399 }
3400
3401 out:
3402 hw->mac.ops.release_swfw_sync(hw, mask);
3403 return status;
3404 }
3405
3406 /**
3407 * ixgbe_write_ee_hostif_X550 - Write EEPROM word using hostif
3408 * @hw: pointer to hardware structure
3409 * @offset: offset of word in the EEPROM to write
3410 * @data: word write to the EEPROM
3411 *
3412 * Write a 16 bit word to the EEPROM using the hostif.
3413 **/
3414 s32 ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset,
3415 u16 data)
3416 {
3417 s32 status;
3418 struct ixgbe_hic_write_shadow_ram buffer;
3419
3420 DEBUGFUNC("ixgbe_write_ee_hostif_data_X550");
3421
3422 buffer.hdr.req.cmd = FW_WRITE_SHADOW_RAM_CMD;
3423 buffer.hdr.req.buf_lenh = 0;
3424 buffer.hdr.req.buf_lenl = FW_WRITE_SHADOW_RAM_LEN;
3425 buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
3426
3427 /* one word */
3428 buffer.length = IXGBE_CPU_TO_BE16(sizeof(u16));
3429 buffer.data = data;
3430 buffer.address = IXGBE_CPU_TO_BE32(offset * 2);
3431
3432 status = ixgbe_host_interface_command(hw, (u32 *)&buffer,
3433 sizeof(buffer),
3434 IXGBE_HI_COMMAND_TIMEOUT, FALSE);
3435
3436 return status;
3437 }
3438
3439 /**
3440 * ixgbe_write_ee_hostif_X550 - Write EEPROM word using hostif
3441 * @hw: pointer to hardware structure
3442 * @offset: offset of word in the EEPROM to write
3443 * @data: word write to the EEPROM
3444 *
3445 * Write a 16 bit word to the EEPROM using the hostif.
3446 **/
3447 s32 ixgbe_write_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset,
3448 u16 data)
3449 {
3450 s32 status = IXGBE_SUCCESS;
3451
3452 DEBUGFUNC("ixgbe_write_ee_hostif_X550");
3453
3454 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
3455 IXGBE_SUCCESS) {
3456 status = ixgbe_write_ee_hostif_data_X550(hw, offset, data);
3457 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
3458 } else {
3459 DEBUGOUT("write ee hostif failed to get semaphore");
3460 status = IXGBE_ERR_SWFW_SYNC;
3461 }
3462
3463 return status;
3464 }
3465
3466 /**
3467 * ixgbe_write_ee_hostif_buffer_X550 - Write EEPROM word(s) using hostif
3468 * @hw: pointer to hardware structure
3469 * @offset: offset of word in the EEPROM to write
3470 * @words: number of words
3471 * @data: word(s) write to the EEPROM
3472 *
3473 * Write a 16 bit word(s) to the EEPROM using the hostif.
3474 **/
3475 s32 ixgbe_write_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
3476 u16 offset, u16 words, u16 *data)
3477 {
3478 s32 status = IXGBE_SUCCESS;
3479 u32 i = 0;
3480
3481 DEBUGFUNC("ixgbe_write_ee_hostif_buffer_X550");
3482
3483 /* Take semaphore for the entire operation. */
3484 status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
3485 if (status != IXGBE_SUCCESS) {
3486 DEBUGOUT("EEPROM write buffer - semaphore failed\n");
3487 goto out;
3488 }
3489
3490 for (i = 0; i < words; i++) {
3491 status = ixgbe_write_ee_hostif_data_X550(hw, offset + i,
3492 data[i]);
3493
3494 if (status != IXGBE_SUCCESS) {
3495 DEBUGOUT("Eeprom buffered write failed\n");
3496 break;
3497 }
3498 }
3499
3500 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
3501 out:
3502
3503 return status;
3504 }
3505
3506 /**
3507 * ixgbe_checksum_ptr_x550 - Checksum one pointer region
3508 * @hw: pointer to hardware structure
3509 * @ptr: pointer offset in eeprom
3510 * @size: size of section pointed by ptr, if 0 first word will be used as size
3511 * @csum: address of checksum to update
3512 * @buffer: pointer to buffer containing calculated checksum
3513 * @buffer_size: size of buffer
3514 *
3515 * Returns error status for any failure
3516 */
3517 static s32 ixgbe_checksum_ptr_x550(struct ixgbe_hw *hw, u16 ptr,
3518 u16 size, u16 *csum, u16 *buffer,
3519 u32 buffer_size)
3520 {
3521 u16 buf[256];
3522 s32 status;
3523 u16 length, bufsz, i, start;
3524 u16 *local_buffer;
3525
3526 bufsz = sizeof(buf) / sizeof(buf[0]);
3527
3528 /* Read a chunk at the pointer location */
3529 if (!buffer) {
3530 status = ixgbe_read_ee_hostif_buffer_X550(hw, ptr, bufsz, buf);
3531 if (status) {
3532 DEBUGOUT("Failed to read EEPROM image\n");
3533 return status;
3534 }
3535 local_buffer = buf;
3536 } else {
3537 if (buffer_size < ptr)
3538 return IXGBE_ERR_PARAM;
3539 local_buffer = &buffer[ptr];
3540 }
3541
3542 if (size) {
3543 start = 0;
3544 length = size;
3545 } else {
3546 start = 1;
3547 length = local_buffer[0];
3548
3549 /* Skip pointer section if length is invalid. */
3550 if (length == 0xFFFF || length == 0 ||
3551 (ptr + length) >= hw->eeprom.word_size)
3552 return IXGBE_SUCCESS;
3553 }
3554
3555 if (buffer && ((u32)start + (u32)length > buffer_size))
3556 return IXGBE_ERR_PARAM;
3557
3558 for (i = start; length; i++, length--) {
3559 if (i == bufsz && !buffer) {
3560 ptr += bufsz;
3561 i = 0;
3562 if (length < bufsz)
3563 bufsz = length;
3564
3565 /* Read a chunk at the pointer location */
3566 status = ixgbe_read_ee_hostif_buffer_X550(hw, ptr,
3567 bufsz, buf);
3568 if (status) {
3569 DEBUGOUT("Failed to read EEPROM image\n");
3570 return status;
3571 }
3572 }
3573 *csum += local_buffer[i];
3574 }
3575 return IXGBE_SUCCESS;
3576 }
3577
3578 /**
3579 * ixgbe_calc_checksum_X550 - Calculates and returns the checksum
3580 * @hw: pointer to hardware structure
3581 * @buffer: pointer to buffer containing calculated checksum
3582 * @buffer_size: size of buffer
3583 *
3584 * Returns a negative error code on error, or the 16-bit checksum
3585 **/
3586 s32 ixgbe_calc_checksum_X550(struct ixgbe_hw *hw, u16 *buffer, u32 buffer_size)
3587 {
3588 u16 eeprom_ptrs[IXGBE_EEPROM_LAST_WORD + 1];
3589 u16 *local_buffer;
3590 s32 status;
3591 u16 checksum = 0;
3592 u16 pointer, i, size;
3593
3594 DEBUGFUNC("ixgbe_calc_eeprom_checksum_X550");
3595
3596 hw->eeprom.ops.init_params(hw);
3597
3598 if (!buffer) {
3599 /* Read pointer area */
3600 status = ixgbe_read_ee_hostif_buffer_X550(hw, 0,
3601 IXGBE_EEPROM_LAST_WORD + 1,
3602 eeprom_ptrs);
3603 if (status) {
3604 DEBUGOUT("Failed to read EEPROM image\n");
3605 return status;
3606 }
3607 local_buffer = eeprom_ptrs;
3608 } else {
3609 if (buffer_size < IXGBE_EEPROM_LAST_WORD)
3610 return IXGBE_ERR_PARAM;
3611 local_buffer = buffer;
3612 }
3613
3614 /*
3615 * For X550 hardware include 0x0-0x41 in the checksum, skip the
3616 * checksum word itself
3617 */
3618 for (i = 0; i <= IXGBE_EEPROM_LAST_WORD; i++)
3619 if (i != IXGBE_EEPROM_CHECKSUM)
3620 checksum += local_buffer[i];
3621
3622 /*
3623 * Include all data from pointers 0x3, 0x6-0xE. This excludes the
3624 * FW, PHY module, and PCIe Expansion/Option ROM pointers.
3625 */
3626 for (i = IXGBE_PCIE_ANALOG_PTR_X550; i < IXGBE_FW_PTR; i++) {
3627 if (i == IXGBE_PHY_PTR || i == IXGBE_OPTION_ROM_PTR)
3628 continue;
3629
3630 pointer = local_buffer[i];
3631
3632 /* Skip pointer section if the pointer is invalid. */
3633 if (pointer == 0xFFFF || pointer == 0 ||
3634 pointer >= hw->eeprom.word_size)
3635 continue;
3636
3637 switch (i) {
3638 case IXGBE_PCIE_GENERAL_PTR:
3639 size = IXGBE_IXGBE_PCIE_GENERAL_SIZE;
3640 break;
3641 case IXGBE_PCIE_CONFIG0_PTR:
3642 case IXGBE_PCIE_CONFIG1_PTR:
3643 size = IXGBE_PCIE_CONFIG_SIZE;
3644 break;
3645 default:
3646 size = 0;
3647 break;
3648 }
3649
3650 status = ixgbe_checksum_ptr_x550(hw, pointer, size, &checksum,
3651 buffer, buffer_size);
3652 if (status)
3653 return status;
3654 }
3655
3656 checksum = (u16)IXGBE_EEPROM_SUM - checksum;
3657
3658 return (s32)checksum;
3659 }
3660
3661 /**
3662 * ixgbe_calc_eeprom_checksum_X550 - Calculates and returns the checksum
3663 * @hw: pointer to hardware structure
3664 *
3665 * Returns a negative error code on error, or the 16-bit checksum
3666 **/
3667 s32 ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw *hw)
3668 {
3669 return ixgbe_calc_checksum_X550(hw, NULL, 0);
3670 }
3671
3672 /**
3673 * ixgbe_validate_eeprom_checksum_X550 - Validate EEPROM checksum
3674 * @hw: pointer to hardware structure
3675 * @checksum_val: calculated checksum
3676 *
3677 * Performs checksum calculation and validates the EEPROM checksum. If the
3678 * caller does not need checksum_val, the value can be NULL.
3679 **/
3680 s32 ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw *hw, u16 *checksum_val)
3681 {
3682 s32 status;
3683 u16 checksum;
3684 u16 read_checksum = 0;
3685
3686 DEBUGFUNC("ixgbe_validate_eeprom_checksum_X550");
3687
3688 /* Read the first word from the EEPROM. If this times out or fails, do
3689 * not continue or we could be in for a very long wait while every
3690 * EEPROM read fails
3691 */
3692 status = hw->eeprom.ops.read(hw, 0, &checksum);
3693 if (status) {
3694 DEBUGOUT("EEPROM read failed\n");
3695 return status;
3696 }
3697
3698 status = hw->eeprom.ops.calc_checksum(hw);
3699 if (status < 0)
3700 return status;
3701
3702 checksum = (u16)(status & 0xffff);
3703
3704 status = ixgbe_read_ee_hostif_X550(hw, IXGBE_EEPROM_CHECKSUM,
3705 &read_checksum);
3706 if (status)
3707 return status;
3708
3709 /* Verify read checksum from EEPROM is the same as
3710 * calculated checksum
3711 */
3712 if (read_checksum != checksum) {
3713 status = IXGBE_ERR_EEPROM_CHECKSUM;
3714 ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE,
3715 "Invalid EEPROM checksum");
3716 }
3717
3718 /* If the user cares, return the calculated checksum */
3719 if (checksum_val)
3720 *checksum_val = checksum;
3721
3722 return status;
3723 }
3724
3725 /**
3726 * ixgbe_update_eeprom_checksum_X550 - Updates the EEPROM checksum and flash
3727 * @hw: pointer to hardware structure
3728 *
3729 * After writing EEPROM to shadow RAM using EEWR register, software calculates
3730 * checksum and updates the EEPROM and instructs the hardware to update
3731 * the flash.
3732 **/
3733 s32 ixgbe_update_eeprom_checksum_X550(struct ixgbe_hw *hw)
3734 {
3735 s32 status;
3736 u16 checksum = 0;
3737
3738 DEBUGFUNC("ixgbe_update_eeprom_checksum_X550");
3739
3740 /* Read the first word from the EEPROM. If this times out or fails, do
3741 * not continue or we could be in for a very long wait while every
3742 * EEPROM read fails
3743 */
3744 status = ixgbe_read_ee_hostif_X550(hw, 0, &checksum);
3745 if (status) {
3746 DEBUGOUT("EEPROM read failed\n");
3747 return status;
3748 }
3749
3750 status = ixgbe_calc_eeprom_checksum_X550(hw);
3751 if (status < 0)
3752 return status;
3753
3754 checksum = (u16)(status & 0xffff);
3755
3756 status = ixgbe_write_ee_hostif_X550(hw, IXGBE_EEPROM_CHECKSUM,
3757 checksum);
3758 if (status)
3759 return status;
3760
3761 status = ixgbe_update_flash_X550(hw);
3762
3763 return status;
3764 }
3765
3766 /**
3767 * ixgbe_update_flash_X550 - Instruct HW to copy EEPROM to Flash device
3768 * @hw: pointer to hardware structure
3769 *
3770 * Issue a shadow RAM dump to FW to copy EEPROM from shadow RAM to the flash.
3771 **/
3772 s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw)
3773 {
3774 s32 status = IXGBE_SUCCESS;
3775 union ixgbe_hic_hdr2 buffer;
3776
3777 DEBUGFUNC("ixgbe_update_flash_X550");
3778
3779 buffer.req.cmd = FW_SHADOW_RAM_DUMP_CMD;
3780 buffer.req.buf_lenh = 0;
3781 buffer.req.buf_lenl = FW_SHADOW_RAM_DUMP_LEN;
3782 buffer.req.checksum = FW_DEFAULT_CHECKSUM;
3783
3784 status = ixgbe_host_interface_command(hw, (u32 *)&buffer,
3785 sizeof(buffer),
3786 IXGBE_HI_COMMAND_TIMEOUT, FALSE);
3787
3788 return status;
3789 }
3790
3791 /**
3792 * ixgbe_get_supported_physical_layer_X550em - Returns physical layer type
3793 * @hw: pointer to hardware structure
3794 *
3795 * Determines physical layer capabilities of the current configuration.
3796 **/
3797 u64 ixgbe_get_supported_physical_layer_X550em(struct ixgbe_hw *hw)
3798 {
3799 u64 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
3800 u16 ext_ability = 0;
3801
3802 DEBUGFUNC("ixgbe_get_supported_physical_layer_X550em");
3803
3804 hw->phy.ops.identify(hw);
3805
3806 switch (hw->phy.type) {
3807 case ixgbe_phy_x550em_kr:
3808 if (hw->mac.type == ixgbe_mac_X550EM_a) {
3809 if (hw->phy.nw_mng_if_sel &
3810 IXGBE_NW_MNG_IF_SEL_PHY_SPEED_2_5G) {
3811 physical_layer =
3812 IXGBE_PHYSICAL_LAYER_2500BASE_KX;
3813 break;
3814 } else if (hw->device_id ==
3815 IXGBE_DEV_ID_X550EM_A_KR_L) {
3816 physical_layer =
3817 IXGBE_PHYSICAL_LAYER_1000BASE_KX;
3818 break;
3819 }
3820 }
3821 /* fall through */
3822 case ixgbe_phy_x550em_xfi:
3823 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR |
3824 IXGBE_PHYSICAL_LAYER_1000BASE_KX;
3825 break;
3826 case ixgbe_phy_x550em_kx4:
3827 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4 |
3828 IXGBE_PHYSICAL_LAYER_1000BASE_KX;
3829 break;
3830 case ixgbe_phy_x550em_ext_t:
3831 hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
3832 IXGBE_MDIO_PMA_PMD_DEV_TYPE,
3833 &ext_ability);
3834 if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
3835 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
3836 if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
3837 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
3838 break;
3839 case ixgbe_phy_fw:
3840 if (hw->phy.speeds_supported & IXGBE_LINK_SPEED_1GB_FULL)
3841 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
3842 if (hw->phy.speeds_supported & IXGBE_LINK_SPEED_100_FULL)
3843 physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
3844 if (hw->phy.speeds_supported & IXGBE_LINK_SPEED_10_FULL)
3845 physical_layer |= IXGBE_PHYSICAL_LAYER_10BASE_T;
3846 break;
3847 case ixgbe_phy_sgmii:
3848 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX;
3849 break;
3850 case ixgbe_phy_ext_1g_t:
3851 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_T;
3852 break;
3853 default:
3854 break;
3855 }
3856
3857 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
3858 physical_layer = ixgbe_get_supported_phy_sfp_layer_generic(hw);
3859
3860 return physical_layer;
3861 }
3862
3863 /**
3864 * ixgbe_get_bus_info_x550em - Set PCI bus info
3865 * @hw: pointer to hardware structure
3866 *
3867 * Sets bus link width and speed to unknown because X550em is
3868 * not a PCI device.
3869 **/
3870 s32 ixgbe_get_bus_info_X550em(struct ixgbe_hw *hw)
3871 {
3872
3873 DEBUGFUNC("ixgbe_get_bus_info_x550em");
3874
3875 hw->bus.width = ixgbe_bus_width_unknown;
3876 hw->bus.speed = ixgbe_bus_speed_unknown;
3877
3878 hw->mac.ops.set_lan_id(hw);
3879
3880 return IXGBE_SUCCESS;
3881 }
3882
3883 /**
3884 * ixgbe_disable_rx_x550 - Disable RX unit
3885 * @hw: pointer to hardware structure
3886 *
3887 * Enables the Rx DMA unit for x550
3888 **/
3889 void ixgbe_disable_rx_x550(struct ixgbe_hw *hw)
3890 {
3891 u32 rxctrl, pfdtxgswc;
3892 s32 status;
3893 struct ixgbe_hic_disable_rxen fw_cmd;
3894
3895 DEBUGFUNC("ixgbe_disable_rx_dma_x550");
3896
3897 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3898 if (rxctrl & IXGBE_RXCTRL_RXEN) {
3899 pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
3900 if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) {
3901 pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN;
3902 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
3903 hw->mac.set_lben = TRUE;
3904 } else {
3905 hw->mac.set_lben = FALSE;
3906 }
3907
3908 fw_cmd.hdr.cmd = FW_DISABLE_RXEN_CMD;
3909 fw_cmd.hdr.buf_len = FW_DISABLE_RXEN_LEN;
3910 fw_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
3911 fw_cmd.port_number = (u8)hw->bus.lan_id;
3912
3913 status = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
3914 sizeof(struct ixgbe_hic_disable_rxen),
3915 IXGBE_HI_COMMAND_TIMEOUT, TRUE);
3916
3917 /* If we fail - disable RX using register write */
3918 if (status) {
3919 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3920 if (rxctrl & IXGBE_RXCTRL_RXEN) {
3921 rxctrl &= ~IXGBE_RXCTRL_RXEN;
3922 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl);
3923 }
3924 }
3925 }
3926 }
3927
3928 /**
3929 * ixgbe_enter_lplu_x550em - Transition to low power states
3930 * @hw: pointer to hardware structure
3931 *
3932 * Configures Low Power Link Up on transition to low power states
3933 * (from D0 to non-D0). Link is required to enter LPLU so avoid resetting the
3934 * X557 PHY immediately prior to entering LPLU.
3935 **/
3936 s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw)
3937 {
3938 u16 an_10g_cntl_reg, autoneg_reg, speed;
3939 s32 status;
3940 ixgbe_link_speed lcd_speed;
3941 u32 save_autoneg;
3942 bool link_up;
3943
3944 /* SW LPLU not required on later HW revisions. */
3945 if ((hw->mac.type == ixgbe_mac_X550EM_x) &&
3946 (IXGBE_FUSES0_REV_MASK &
3947 IXGBE_READ_REG(hw, IXGBE_FUSES0_GROUP(0))))
3948 return IXGBE_SUCCESS;
3949
3950 /* If blocked by MNG FW, then don't restart AN */
3951 if (ixgbe_check_reset_blocked(hw))
3952 return IXGBE_SUCCESS;
3953
3954 status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up);
3955 if (status != IXGBE_SUCCESS)
3956 return status;
3957
3958 status = ixgbe_read_eeprom(hw, NVM_INIT_CTRL_3, &hw->eeprom.ctrl_word_3);
3959
3960 if (status != IXGBE_SUCCESS)
3961 return status;
3962
3963 /* If link is down, LPLU disabled in NVM, WoL disabled, or manageability
3964 * disabled, then force link down by entering low power mode.
3965 */
3966 if (!link_up || !(hw->eeprom.ctrl_word_3 & NVM_INIT_CTRL_3_LPLU) ||
3967 !(hw->wol_enabled || ixgbe_mng_present(hw)))
3968 return ixgbe_set_copper_phy_power(hw, FALSE);
3969
3970 /* Determine LCD */
3971 status = ixgbe_get_lcd_t_x550em(hw, &lcd_speed);
3972
3973 if (status != IXGBE_SUCCESS)
3974 return status;
3975
3976 /* If no valid LCD link speed, then force link down and exit. */
3977 if (lcd_speed == IXGBE_LINK_SPEED_UNKNOWN)
3978 return ixgbe_set_copper_phy_power(hw, FALSE);
3979
3980 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_STAT,
3981 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
3982 &speed);
3983
3984 if (status != IXGBE_SUCCESS)
3985 return status;
3986
3987 /* If no link now, speed is invalid so take link down */
3988 status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up);
3989 if (status != IXGBE_SUCCESS)
3990 return ixgbe_set_copper_phy_power(hw, FALSE);
3991
3992 /* clear everything but the speed bits */
3993 speed &= IXGBE_MDIO_AUTO_NEG_VEN_STAT_SPEED_MASK;
3994
3995 /* If current speed is already LCD, then exit. */
3996 if (((speed == IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB) &&
3997 (lcd_speed == IXGBE_LINK_SPEED_1GB_FULL)) ||
3998 ((speed == IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB) &&
3999 (lcd_speed == IXGBE_LINK_SPEED_10GB_FULL)))
4000 return status;
4001
4002 /* Clear AN completed indication */
4003 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM,
4004 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
4005 &autoneg_reg);
4006
4007 if (status != IXGBE_SUCCESS)
4008 return status;
4009
4010 status = hw->phy.ops.read_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG,
4011 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
4012 &an_10g_cntl_reg);
4013
4014 if (status != IXGBE_SUCCESS)
4015 return status;
4016
4017 status = hw->phy.ops.read_reg(hw,
4018 IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
4019 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
4020 &autoneg_reg);
4021
4022 if (status != IXGBE_SUCCESS)
4023 return status;
4024
4025 save_autoneg = hw->phy.autoneg_advertised;
4026
4027 /* Setup link at least common link speed */
4028 status = hw->mac.ops.setup_link(hw, lcd_speed, FALSE);
4029
4030 /* restore autoneg from before setting lplu speed */
4031 hw->phy.autoneg_advertised = save_autoneg;
4032
4033 return status;
4034 }
4035
4036 /**
4037 * ixgbe_get_lcd_x550em - Determine lowest common denominator
4038 * @hw: pointer to hardware structure
4039 * @lcd_speed: pointer to lowest common link speed
4040 *
4041 * Determine lowest common link speed with link partner.
4042 **/
4043 s32 ixgbe_get_lcd_t_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *lcd_speed)
4044 {
4045 u16 an_lp_status;
4046 s32 status;
4047 u16 word = hw->eeprom.ctrl_word_3;
4048
4049 *lcd_speed = IXGBE_LINK_SPEED_UNKNOWN;
4050
4051 status = hw->phy.ops.read_reg(hw, IXGBE_AUTO_NEG_LP_STATUS,
4052 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
4053 &an_lp_status);
4054
4055 if (status != IXGBE_SUCCESS)
4056 return status;
4057
4058 /* If link partner advertised 1G, return 1G */
4059 if (an_lp_status & IXGBE_AUTO_NEG_LP_1000BASE_CAP) {
4060 *lcd_speed = IXGBE_LINK_SPEED_1GB_FULL;
4061 return status;
4062 }
4063
4064 /* If 10G disabled for LPLU via NVM D10GMP, then return no valid LCD */
4065 if ((hw->bus.lan_id && (word & NVM_INIT_CTRL_3_D10GMP_PORT1)) ||
4066 (word & NVM_INIT_CTRL_3_D10GMP_PORT0))
4067 return status;
4068
4069 /* Link partner not capable of lower speeds, return 10G */
4070 *lcd_speed = IXGBE_LINK_SPEED_10GB_FULL;
4071 return status;
4072 }
4073
4074 /**
4075 * ixgbe_setup_fc_X550em - Set up flow control
4076 * @hw: pointer to hardware structure
4077 *
4078 * Called at init time to set up flow control.
4079 **/
4080 s32 ixgbe_setup_fc_X550em(struct ixgbe_hw *hw)
4081 {
4082 s32 ret_val = IXGBE_SUCCESS;
4083 u32 pause, asm_dir, reg_val;
4084
4085 DEBUGFUNC("ixgbe_setup_fc_X550em");
4086
4087 /* Validate the requested mode */
4088 if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
4089 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
4090 "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
4091 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
4092 goto out;
4093 }
4094
4095 /* 10gig parts do not have a word in the EEPROM to determine the
4096 * default flow control setting, so we explicitly set it to full.
4097 */
4098 if (hw->fc.requested_mode == ixgbe_fc_default)
4099 hw->fc.requested_mode = ixgbe_fc_full;
4100
4101 /* Determine PAUSE and ASM_DIR bits. */
4102 switch (hw->fc.requested_mode) {
4103 case ixgbe_fc_none:
4104 pause = 0;
4105 asm_dir = 0;
4106 break;
4107 case ixgbe_fc_tx_pause:
4108 pause = 0;
4109 asm_dir = 1;
4110 break;
4111 case ixgbe_fc_rx_pause:
4112 /* Rx Flow control is enabled and Tx Flow control is
4113 * disabled by software override. Since there really
4114 * isn't a way to advertise that we are capable of RX
4115 * Pause ONLY, we will advertise that we support both
4116 * symmetric and asymmetric Rx PAUSE, as such we fall
4117 * through to the fc_full statement. Later, we will
4118 * disable the adapter's ability to send PAUSE frames.
4119 */
4120 case ixgbe_fc_full:
4121 pause = 1;
4122 asm_dir = 1;
4123 break;
4124 default:
4125 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
4126 "Flow control param set incorrectly\n");
4127 ret_val = IXGBE_ERR_CONFIG;
4128 goto out;
4129 }
4130
4131 switch (hw->device_id) {
4132 case IXGBE_DEV_ID_X550EM_X_KR:
4133 case IXGBE_DEV_ID_X550EM_A_KR:
4134 case IXGBE_DEV_ID_X550EM_A_KR_L:
4135 ret_val = hw->mac.ops.read_iosf_sb_reg(hw,
4136 IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
4137 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
4138 if (ret_val != IXGBE_SUCCESS)
4139 goto out;
4140 reg_val &= ~(IXGBE_KRM_AN_CNTL_1_SYM_PAUSE |
4141 IXGBE_KRM_AN_CNTL_1_ASM_PAUSE);
4142 if (pause)
4143 reg_val |= IXGBE_KRM_AN_CNTL_1_SYM_PAUSE;
4144 if (asm_dir)
4145 reg_val |= IXGBE_KRM_AN_CNTL_1_ASM_PAUSE;
4146 ret_val = hw->mac.ops.write_iosf_sb_reg(hw,
4147 IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
4148 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
4149
4150 /* This device does not fully support AN. */
4151 hw->fc.disable_fc_autoneg = TRUE;
4152 break;
4153 case IXGBE_DEV_ID_X550EM_X_XFI:
4154 hw->fc.disable_fc_autoneg = TRUE;
4155 break;
4156 default:
4157 break;
4158 }
4159
4160 out:
4161 return ret_val;
4162 }
4163
4164 /**
4165 * ixgbe_fc_autoneg_backplane_x550em_a - Enable flow control IEEE clause 37
4166 * @hw: pointer to hardware structure
4167 *
4168 * Enable flow control according to IEEE clause 37.
4169 **/
4170 void ixgbe_fc_autoneg_backplane_x550em_a(struct ixgbe_hw *hw)
4171 {
4172 u32 link_s1, lp_an_page_low, an_cntl_1;
4173 s32 status = IXGBE_ERR_FC_NOT_NEGOTIATED;
4174 ixgbe_link_speed speed;
4175 bool link_up;
4176
4177 /* AN should have completed when the cable was plugged in.
4178 * Look for reasons to bail out. Bail out if:
4179 * - FC autoneg is disabled, or if
4180 * - link is not up.
4181 */
4182 if (hw->fc.disable_fc_autoneg) {
4183 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
4184 "Flow control autoneg is disabled");
4185 goto out;
4186 }
4187
4188 hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
4189 if (!link_up) {
4190 ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "The link is down");
4191 goto out;
4192 }
4193
4194 /* Check at auto-negotiation has completed */
4195 status = hw->mac.ops.read_iosf_sb_reg(hw,
4196 IXGBE_KRM_LINK_S1(hw->bus.lan_id),
4197 IXGBE_SB_IOSF_TARGET_KR_PHY, &link_s1);
4198
4199 if (status != IXGBE_SUCCESS ||
4200 (link_s1 & IXGBE_KRM_LINK_S1_MAC_AN_COMPLETE) == 0) {
4201 DEBUGOUT("Auto-Negotiation did not complete\n");
4202 status = IXGBE_ERR_FC_NOT_NEGOTIATED;
4203 goto out;
4204 }
4205
4206 /* Read the 10g AN autoc and LP ability registers and resolve
4207 * local flow control settings accordingly
4208 */
4209 status = hw->mac.ops.read_iosf_sb_reg(hw,
4210 IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
4211 IXGBE_SB_IOSF_TARGET_KR_PHY, &an_cntl_1);
4212
4213 if (status != IXGBE_SUCCESS) {
4214 DEBUGOUT("Auto-Negotiation did not complete\n");
4215 goto out;
4216 }
4217
4218 status = hw->mac.ops.read_iosf_sb_reg(hw,
4219 IXGBE_KRM_LP_BASE_PAGE_HIGH(hw->bus.lan_id),
4220 IXGBE_SB_IOSF_TARGET_KR_PHY, &lp_an_page_low);
4221
4222 if (status != IXGBE_SUCCESS) {
4223 DEBUGOUT("Auto-Negotiation did not complete\n");
4224 goto out;
4225 }
4226
4227 status = ixgbe_negotiate_fc(hw, an_cntl_1, lp_an_page_low,
4228 IXGBE_KRM_AN_CNTL_1_SYM_PAUSE,
4229 IXGBE_KRM_AN_CNTL_1_ASM_PAUSE,
4230 IXGBE_KRM_LP_BASE_PAGE_HIGH_SYM_PAUSE,
4231 IXGBE_KRM_LP_BASE_PAGE_HIGH_ASM_PAUSE);
4232
4233 out:
4234 if (status == IXGBE_SUCCESS) {
4235 hw->fc.fc_was_autonegged = TRUE;
4236 } else {
4237 hw->fc.fc_was_autonegged = FALSE;
4238 hw->fc.current_mode = hw->fc.requested_mode;
4239 }
4240 }
4241
4242 /**
4243 * ixgbe_fc_autoneg_fiber_x550em_a - passthrough FC settings
4244 * @hw: pointer to hardware structure
4245 *
4246 **/
4247 void ixgbe_fc_autoneg_fiber_x550em_a(struct ixgbe_hw *hw)
4248 {
4249 hw->fc.fc_was_autonegged = FALSE;
4250 hw->fc.current_mode = hw->fc.requested_mode;
4251 }
4252
4253 /**
4254 * ixgbe_fc_autoneg_sgmii_x550em_a - Enable flow control IEEE clause 37
4255 * @hw: pointer to hardware structure
4256 *
4257 * Enable flow control according to IEEE clause 37.
4258 **/
4259 void ixgbe_fc_autoneg_sgmii_x550em_a(struct ixgbe_hw *hw)
4260 {
4261 s32 status = IXGBE_ERR_FC_NOT_NEGOTIATED;
4262 u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 };
4263 ixgbe_link_speed speed;
4264 bool link_up;
4265
4266 /* AN should have completed when the cable was plugged in.
4267 * Look for reasons to bail out. Bail out if:
4268 * - FC autoneg is disabled, or if
4269 * - link is not up.
4270 */
4271 if (hw->fc.disable_fc_autoneg) {
4272 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
4273 "Flow control autoneg is disabled");
4274 goto out;
4275 }
4276
4277 hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
4278 if (!link_up) {
4279 ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "The link is down");
4280 goto out;
4281 }
4282
4283 /* Check if auto-negotiation has completed */
4284 status = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_LINK_INFO, &info);
4285 if (status != IXGBE_SUCCESS ||
4286 !(info[0] & FW_PHY_ACT_GET_LINK_INFO_AN_COMPLETE)) {
4287 DEBUGOUT("Auto-Negotiation did not complete\n");
4288 status = IXGBE_ERR_FC_NOT_NEGOTIATED;
4289 goto out;
4290 }
4291
4292 /* Negotiate the flow control */
4293 status = ixgbe_negotiate_fc(hw, info[0], info[0],
4294 FW_PHY_ACT_GET_LINK_INFO_FC_RX,
4295 FW_PHY_ACT_GET_LINK_INFO_FC_TX,
4296 FW_PHY_ACT_GET_LINK_INFO_LP_FC_RX,
4297 FW_PHY_ACT_GET_LINK_INFO_LP_FC_TX);
4298
4299 out:
4300 if (status == IXGBE_SUCCESS) {
4301 hw->fc.fc_was_autonegged = TRUE;
4302 } else {
4303 hw->fc.fc_was_autonegged = FALSE;
4304 hw->fc.current_mode = hw->fc.requested_mode;
4305 }
4306 }
4307
4308 /**
4309 * ixgbe_setup_fc_backplane_x550em_a - Set up flow control
4310 * @hw: pointer to hardware structure
4311 *
4312 * Called at init time to set up flow control.
4313 **/
4314 s32 ixgbe_setup_fc_backplane_x550em_a(struct ixgbe_hw *hw)
4315 {
4316 s32 status = IXGBE_SUCCESS;
4317 u32 an_cntl = 0;
4318
4319 DEBUGFUNC("ixgbe_setup_fc_backplane_x550em_a");
4320
4321 /* Validate the requested mode */
4322 if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
4323 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
4324 "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
4325 return IXGBE_ERR_INVALID_LINK_SETTINGS;
4326 }
4327
4328 if (hw->fc.requested_mode == ixgbe_fc_default)
4329 hw->fc.requested_mode = ixgbe_fc_full;
4330
4331 /* Set up the 1G and 10G flow control advertisement registers so the
4332 * HW will be able to do FC autoneg once the cable is plugged in. If
4333 * we link at 10G, the 1G advertisement is harmless and vice versa.
4334 */
4335 status = hw->mac.ops.read_iosf_sb_reg(hw,
4336 IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
4337 IXGBE_SB_IOSF_TARGET_KR_PHY, &an_cntl);
4338
4339 if (status != IXGBE_SUCCESS) {
4340 DEBUGOUT("Auto-Negotiation did not complete\n");
4341 return status;
4342 }
4343
4344 /* The possible values of fc.requested_mode are:
4345 * 0: Flow control is completely disabled
4346 * 1: Rx flow control is enabled (we can receive pause frames,
4347 * but not send pause frames).
4348 * 2: Tx flow control is enabled (we can send pause frames but
4349 * we do not support receiving pause frames).
4350 * 3: Both Rx and Tx flow control (symmetric) are enabled.
4351 * other: Invalid.
4352 */
4353 switch (hw->fc.requested_mode) {
4354 case ixgbe_fc_none:
4355 /* Flow control completely disabled by software override. */
4356 an_cntl &= ~(IXGBE_KRM_AN_CNTL_1_SYM_PAUSE |
4357 IXGBE_KRM_AN_CNTL_1_ASM_PAUSE);
4358 break;
4359 case ixgbe_fc_tx_pause:
4360 /* Tx Flow control is enabled, and Rx Flow control is
4361 * disabled by software override.
4362 */
4363 an_cntl |= IXGBE_KRM_AN_CNTL_1_ASM_PAUSE;
4364 an_cntl &= ~IXGBE_KRM_AN_CNTL_1_SYM_PAUSE;
4365 break;
4366 case ixgbe_fc_rx_pause:
4367 /* Rx Flow control is enabled and Tx Flow control is
4368 * disabled by software override. Since there really
4369 * isn't a way to advertise that we are capable of RX
4370 * Pause ONLY, we will advertise that we support both
4371 * symmetric and asymmetric Rx PAUSE, as such we fall
4372 * through to the fc_full statement. Later, we will
4373 * disable the adapter's ability to send PAUSE frames.
4374 */
4375 case ixgbe_fc_full:
4376 /* Flow control (both Rx and Tx) is enabled by SW override. */
4377 an_cntl |= IXGBE_KRM_AN_CNTL_1_SYM_PAUSE |
4378 IXGBE_KRM_AN_CNTL_1_ASM_PAUSE;
4379 break;
4380 default:
4381 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
4382 "Flow control param set incorrectly\n");
4383 return IXGBE_ERR_CONFIG;
4384 }
4385
4386 status = hw->mac.ops.write_iosf_sb_reg(hw,
4387 IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
4388 IXGBE_SB_IOSF_TARGET_KR_PHY, an_cntl);
4389
4390 /* Restart auto-negotiation. */
4391 status = ixgbe_restart_an_internal_phy_x550em(hw);
4392
4393 return status;
4394 }
4395
4396 /**
4397 * ixgbe_set_mux - Set mux for port 1 access with CS4227
4398 * @hw: pointer to hardware structure
4399 * @state: set mux if 1, clear if 0
4400 */
4401 static void ixgbe_set_mux(struct ixgbe_hw *hw, u8 state)
4402 {
4403 u32 esdp;
4404
4405 if (!hw->bus.lan_id)
4406 return;
4407 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
4408 if (state)
4409 esdp |= IXGBE_ESDP_SDP1;
4410 else
4411 esdp &= ~IXGBE_ESDP_SDP1;
4412 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
4413 IXGBE_WRITE_FLUSH(hw);
4414 }
4415
4416 /**
4417 * ixgbe_acquire_swfw_sync_X550em - Acquire SWFW semaphore
4418 * @hw: pointer to hardware structure
4419 * @mask: Mask to specify which semaphore to acquire
4420 *
4421 * Acquires the SWFW semaphore and sets the I2C MUX
4422 **/
4423 s32 ixgbe_acquire_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask)
4424 {
4425 s32 status;
4426
4427 DEBUGFUNC("ixgbe_acquire_swfw_sync_X550em");
4428
4429 status = ixgbe_acquire_swfw_sync_X540(hw, mask);
4430 if (status)
4431 return status;
4432
4433 if (mask & IXGBE_GSSR_I2C_MASK)
4434 ixgbe_set_mux(hw, 1);
4435
4436 return IXGBE_SUCCESS;
4437 }
4438
4439 /**
4440 * ixgbe_release_swfw_sync_X550em - Release SWFW semaphore
4441 * @hw: pointer to hardware structure
4442 * @mask: Mask to specify which semaphore to release
4443 *
4444 * Releases the SWFW semaphore and sets the I2C MUX
4445 **/
4446 void ixgbe_release_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask)
4447 {
4448 DEBUGFUNC("ixgbe_release_swfw_sync_X550em");
4449
4450 if (mask & IXGBE_GSSR_I2C_MASK)
4451 ixgbe_set_mux(hw, 0);
4452
4453 ixgbe_release_swfw_sync_X540(hw, mask);
4454 }
4455
4456 /**
4457 * ixgbe_acquire_swfw_sync_X550a - Acquire SWFW semaphore
4458 * @hw: pointer to hardware structure
4459 * @mask: Mask to specify which semaphore to acquire
4460 *
4461 * Acquires the SWFW semaphore and get the shared phy token as needed
4462 */
4463 static s32 ixgbe_acquire_swfw_sync_X550a(struct ixgbe_hw *hw, u32 mask)
4464 {
4465 u32 hmask = mask & ~IXGBE_GSSR_TOKEN_SM;
4466 int retries = FW_PHY_TOKEN_RETRIES;
4467 s32 status = IXGBE_SUCCESS;
4468
4469 DEBUGFUNC("ixgbe_acquire_swfw_sync_X550a");
4470
4471 while (--retries) {
4472 status = IXGBE_SUCCESS;
4473 if (hmask)
4474 status = ixgbe_acquire_swfw_sync_X540(hw, hmask);
4475 if (status) {
4476 DEBUGOUT1("Could not acquire SWFW semaphore, Status = %d\n",
4477 status);
4478 return status;
4479 }
4480 if (!(mask & IXGBE_GSSR_TOKEN_SM))
4481 return IXGBE_SUCCESS;
4482
4483 status = ixgbe_get_phy_token(hw);
4484 if (status == IXGBE_ERR_TOKEN_RETRY)
4485 DEBUGOUT1("Could not acquire PHY token, Status = %d\n",
4486 status);
4487
4488 if (status == IXGBE_SUCCESS)
4489 return IXGBE_SUCCESS;
4490
4491 if (hmask)
4492 ixgbe_release_swfw_sync_X540(hw, hmask);
4493
4494 if (status != IXGBE_ERR_TOKEN_RETRY) {
4495 DEBUGOUT1("Unable to retry acquiring the PHY token, Status = %d\n",
4496 status);
4497 return status;
4498 }
4499 }
4500
4501 DEBUGOUT1("Semaphore acquisition retries failed!: PHY ID = 0x%08X\n",
4502 hw->phy.id);
4503 return status;
4504 }
4505
4506 /**
4507 * ixgbe_release_swfw_sync_X550a - Release SWFW semaphore
4508 * @hw: pointer to hardware structure
4509 * @mask: Mask to specify which semaphore to release
4510 *
4511 * Releases the SWFW semaphore and puts the shared phy token as needed
4512 */
4513 static void ixgbe_release_swfw_sync_X550a(struct ixgbe_hw *hw, u32 mask)
4514 {
4515 u32 hmask = mask & ~IXGBE_GSSR_TOKEN_SM;
4516
4517 DEBUGFUNC("ixgbe_release_swfw_sync_X550a");
4518
4519 if (mask & IXGBE_GSSR_TOKEN_SM)
4520 ixgbe_put_phy_token(hw);
4521
4522 if (hmask)
4523 ixgbe_release_swfw_sync_X540(hw, hmask);
4524 }
4525
4526 /**
4527 * ixgbe_read_phy_reg_x550a - Reads specified PHY register
4528 * @hw: pointer to hardware structure
4529 * @reg_addr: 32 bit address of PHY register to read
4530 * @device_type: 5 bit device type
4531 * @phy_data: Pointer to read data from PHY register
4532 *
4533 * Reads a value from a specified PHY register using the SWFW lock and PHY
4534 * Token. The PHY Token is needed since the MDIO is shared between to MAC
4535 * instances.
4536 **/
4537 s32 ixgbe_read_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
4538 u32 device_type, u16 *phy_data)
4539 {
4540 s32 status;
4541 u32 mask = hw->phy.phy_semaphore_mask | IXGBE_GSSR_TOKEN_SM;
4542
4543 DEBUGFUNC("ixgbe_read_phy_reg_x550a");
4544
4545 if (hw->mac.ops.acquire_swfw_sync(hw, mask))
4546 return IXGBE_ERR_SWFW_SYNC;
4547
4548 status = hw->phy.ops.read_reg_mdi(hw, reg_addr, device_type, phy_data);
4549
4550 hw->mac.ops.release_swfw_sync(hw, mask);
4551
4552 return status;
4553 }
4554
4555 /**
4556 * ixgbe_write_phy_reg_x550a - Writes specified PHY register
4557 * @hw: pointer to hardware structure
4558 * @reg_addr: 32 bit PHY register to write
4559 * @device_type: 5 bit device type
4560 * @phy_data: Data to write to the PHY register
4561 *
4562 * Writes a value to specified PHY register using the SWFW lock and PHY Token.
4563 * The PHY Token is needed since the MDIO is shared between to MAC instances.
4564 **/
4565 s32 ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
4566 u32 device_type, u16 phy_data)
4567 {
4568 s32 status;
4569 u32 mask = hw->phy.phy_semaphore_mask | IXGBE_GSSR_TOKEN_SM;
4570
4571 DEBUGFUNC("ixgbe_write_phy_reg_x550a");
4572
4573 if (hw->mac.ops.acquire_swfw_sync(hw, mask) == IXGBE_SUCCESS) {
4574 status = hw->phy.ops.write_reg_mdi(hw, reg_addr, device_type,
4575 phy_data);
4576 hw->mac.ops.release_swfw_sync(hw, mask);
4577 } else {
4578 status = IXGBE_ERR_SWFW_SYNC;
4579 }
4580
4581 return status;
4582 }
4583
4584 /**
4585 * ixgbe_handle_lasi_ext_t_x550em - Handle external Base T PHY interrupt
4586 * @hw: pointer to hardware structure
4587 *
4588 * Handle external Base T PHY interrupt. If high temperature
4589 * failure alarm then return error, else if link status change
4590 * then setup internal/external PHY link
4591 *
4592 * Return IXGBE_ERR_OVERTEMP if interrupt is high temperature
4593 * failure alarm, else return PHY access status.
4594 */
4595 s32 ixgbe_handle_lasi_ext_t_x550em(struct ixgbe_hw *hw)
4596 {
4597 bool lsc;
4598 u32 status;
4599
4600 status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc);
4601
4602 if (status != IXGBE_SUCCESS)
4603 return status;
4604
4605 if (lsc)
4606 return ixgbe_setup_internal_phy(hw);
4607
4608 return IXGBE_SUCCESS;
4609 }
4610
4611 /**
4612 * ixgbe_setup_mac_link_t_X550em - Sets the auto advertised link speed
4613 * @hw: pointer to hardware structure
4614 * @speed: new link speed
4615 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed
4616 *
4617 * Setup internal/external PHY link speed based on link speed, then set
4618 * external PHY auto advertised link speed.
4619 *
4620 * Returns error status for any failure
4621 **/
4622 s32 ixgbe_setup_mac_link_t_X550em(struct ixgbe_hw *hw,
4623 ixgbe_link_speed speed,
4624 bool autoneg_wait_to_complete)
4625 {
4626 s32 status;
4627 ixgbe_link_speed force_speed;
4628
4629 DEBUGFUNC("ixgbe_setup_mac_link_t_X550em");
4630
4631 /* Setup internal/external PHY link speed to iXFI (10G), unless
4632 * only 1G is auto advertised then setup KX link.
4633 */
4634 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
4635 force_speed = IXGBE_LINK_SPEED_10GB_FULL;
4636 else
4637 force_speed = IXGBE_LINK_SPEED_1GB_FULL;
4638
4639 /* If X552 and internal link mode is XFI, then setup XFI internal link.
4640 */
4641 if (hw->mac.type == ixgbe_mac_X550EM_x &&
4642 !(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) {
4643 status = ixgbe_setup_ixfi_x550em(hw, &force_speed);
4644
4645 if (status != IXGBE_SUCCESS)
4646 return status;
4647 }
4648
4649 return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait_to_complete);
4650 }
4651
4652 /**
4653 * ixgbe_check_link_t_X550em - Determine link and speed status
4654 * @hw: pointer to hardware structure
4655 * @speed: pointer to link speed
4656 * @link_up: TRUE when link is up
4657 * @link_up_wait_to_complete: bool used to wait for link up or not
4658 *
4659 * Check that both the MAC and X557 external PHY have link.
4660 **/
4661 s32 ixgbe_check_link_t_X550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
4662 bool *link_up, bool link_up_wait_to_complete)
4663 {
4664 u32 status;
4665 u16 i, autoneg_status = 0;
4666
4667 if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper)
4668 return IXGBE_ERR_CONFIG;
4669
4670 status = ixgbe_check_mac_link_generic(hw, speed, link_up,
4671 link_up_wait_to_complete);
4672
4673 /* If check link fails or MAC link is not up, then return */
4674 if (status != IXGBE_SUCCESS || !(*link_up))
4675 return status;
4676
4677 /* MAC link is up, so check external PHY link.
4678 * X557 PHY. Link status is latching low, and can only be used to detect
4679 * link drop, and not the current status of the link without performing
4680 * back-to-back reads.
4681 */
4682 for (i = 0; i < 2; i++) {
4683 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
4684 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
4685 &autoneg_status);
4686
4687 if (status != IXGBE_SUCCESS)
4688 return status;
4689 }
4690
4691 /* If external PHY link is not up, then indicate link not up */
4692 if (!(autoneg_status & IXGBE_MDIO_AUTO_NEG_LINK_STATUS))
4693 *link_up = FALSE;
4694
4695 return IXGBE_SUCCESS;
4696 }
4697
4698 /**
4699 * ixgbe_reset_phy_t_X550em - Performs X557 PHY reset and enables LASI
4700 * @hw: pointer to hardware structure
4701 **/
4702 s32 ixgbe_reset_phy_t_X550em(struct ixgbe_hw *hw)
4703 {
4704 s32 status;
4705
4706 status = ixgbe_reset_phy_generic(hw);
4707
4708 if (status != IXGBE_SUCCESS)
4709 return status;
4710
4711 /* Configure Link Status Alarm and Temperature Threshold interrupts */
4712 return ixgbe_enable_lasi_ext_t_x550em(hw);
4713 }
4714
4715 /**
4716 * ixgbe_led_on_t_X550em - Turns on the software controllable LEDs.
4717 * @hw: pointer to hardware structure
4718 * @led_idx: led number to turn on
4719 **/
4720 s32 ixgbe_led_on_t_X550em(struct ixgbe_hw *hw, u32 led_idx)
4721 {
4722 u16 phy_data;
4723
4724 DEBUGFUNC("ixgbe_led_on_t_X550em");
4725
4726 if (led_idx >= IXGBE_X557_MAX_LED_INDEX)
4727 return IXGBE_ERR_PARAM;
4728
4729 /* To turn on the LED, set mode to ON. */
4730 ixgbe_read_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
4731 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &phy_data);
4732 phy_data |= IXGBE_X557_LED_MANUAL_SET_MASK;
4733 ixgbe_write_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
4734 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, phy_data);
4735
4736 /* Some designs have the LEDs wired to the MAC */
4737 return ixgbe_led_on_generic(hw, led_idx);
4738 }
4739
4740 /**
4741 * ixgbe_led_off_t_X550em - Turns off the software controllable LEDs.
4742 * @hw: pointer to hardware structure
4743 * @led_idx: led number to turn off
4744 **/
4745 s32 ixgbe_led_off_t_X550em(struct ixgbe_hw *hw, u32 led_idx)
4746 {
4747 u16 phy_data;
4748
4749 DEBUGFUNC("ixgbe_led_off_t_X550em");
4750
4751 if (led_idx >= IXGBE_X557_MAX_LED_INDEX)
4752 return IXGBE_ERR_PARAM;
4753
4754 /* To turn on the LED, set mode to ON. */
4755 ixgbe_read_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
4756 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &phy_data);
4757 phy_data &= ~IXGBE_X557_LED_MANUAL_SET_MASK;
4758 ixgbe_write_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
4759 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, phy_data);
4760
4761 /* Some designs have the LEDs wired to the MAC */
4762 return ixgbe_led_off_generic(hw, led_idx);
4763 }
4764
4765 /**
4766 * ixgbe_set_fw_drv_ver_x550 - Sends driver version to firmware
4767 * @hw: pointer to the HW structure
4768 * @maj: driver version major number
4769 * @min: driver version minor number
4770 * @build: driver version build number
4771 * @sub: driver version sub build number
4772 * @len: length of driver_ver string
4773 * @driver_ver: driver string
4774 *
4775 * Sends driver version number to firmware through the manageability
4776 * block. On success return IXGBE_SUCCESS
4777 * else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring
4778 * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
4779 **/
4780 s32 ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min,
4781 u8 build, u8 sub, u16 len, const char *driver_ver)
4782 {
4783 struct ixgbe_hic_drv_info2 fw_cmd;
4784 s32 ret_val = IXGBE_SUCCESS;
4785 int i;
4786
4787 DEBUGFUNC("ixgbe_set_fw_drv_ver_x550");
4788
4789 if ((len == 0) || (driver_ver == NULL) ||
4790 (len > sizeof(fw_cmd.driver_string)))
4791 return IXGBE_ERR_INVALID_ARGUMENT;
4792
4793 fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO;
4794 fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN + len;
4795 fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
4796 fw_cmd.port_num = (u8)hw->bus.func;
4797 fw_cmd.ver_maj = maj;
4798 fw_cmd.ver_min = min;
4799 fw_cmd.ver_build = build;
4800 fw_cmd.ver_sub = sub;
4801 fw_cmd.hdr.checksum = 0;
4802 memcpy(fw_cmd.driver_string, driver_ver, len);
4803 fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd,
4804 (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len));
4805
4806 for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
4807 ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
4808 sizeof(fw_cmd),
4809 IXGBE_HI_COMMAND_TIMEOUT,
4810 TRUE);
4811 if (ret_val != IXGBE_SUCCESS)
4812 continue;
4813
4814 if (fw_cmd.hdr.cmd_or_resp.ret_status ==
4815 FW_CEM_RESP_STATUS_SUCCESS)
4816 ret_val = IXGBE_SUCCESS;
4817 else
4818 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
4819
4820 break;
4821 }
4822
4823 return ret_val;
4824 }
4825
4826 /**
4827 * ixgbe_fw_recovery_mode_X550 - Check FW NVM recovery mode
4828 * @hw: pointer t hardware structure
4829 *
4830 * Returns TRUE if in FW NVM recovery mode.
4831 **/
4832 bool ixgbe_fw_recovery_mode_X550(struct ixgbe_hw *hw)
4833 {
4834 u32 fwsm;
4835
4836 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw));
4837
4838 return !!(fwsm & IXGBE_FWSM_FW_NVM_RECOVERY_MODE);
4839 }
4840