ixgbe_x550.c revision 1.17 1 /******************************************************************************
2
3 Copyright (c) 2001-2017, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33 /*$FreeBSD: head/sys/dev/ixgbe/ixgbe_x550.c 331224 2018-03-19 20:55:05Z erj $*/
34
35 #include "ixgbe_x550.h"
36 #include "ixgbe_x540.h"
37 #include "ixgbe_type.h"
38 #include "ixgbe_api.h"
39 #include "ixgbe_common.h"
40 #include "ixgbe_phy.h"
41 #include <dev/mii/mii.h>
42
43 static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed);
44 static s32 ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw,
45 ixgbe_link_speed speed,
46 bool autoneg_wait_to_complete);
47 static s32 ixgbe_acquire_swfw_sync_X550a(struct ixgbe_hw *, u32 mask);
48 static void ixgbe_release_swfw_sync_X550a(struct ixgbe_hw *, u32 mask);
49 static s32 ixgbe_read_mng_if_sel_x550em(struct ixgbe_hw *hw);
50
51 /**
52 * ixgbe_init_ops_X550 - Inits func ptrs and MAC type
53 * @hw: pointer to hardware structure
54 *
55 * Initialize the function pointers and assign the MAC type for X550.
56 * Does not touch the hardware.
57 **/
58 s32 ixgbe_init_ops_X550(struct ixgbe_hw *hw)
59 {
60 struct ixgbe_mac_info *mac = &hw->mac;
61 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
62 s32 ret_val;
63
64 DEBUGFUNC("ixgbe_init_ops_X550");
65
66 ret_val = ixgbe_init_ops_X540(hw);
67 mac->ops.dmac_config = ixgbe_dmac_config_X550;
68 mac->ops.dmac_config_tcs = ixgbe_dmac_config_tcs_X550;
69 mac->ops.dmac_update_tcs = ixgbe_dmac_update_tcs_X550;
70 mac->ops.setup_eee = NULL;
71 mac->ops.set_source_address_pruning =
72 ixgbe_set_source_address_pruning_X550;
73 mac->ops.set_ethertype_anti_spoofing =
74 ixgbe_set_ethertype_anti_spoofing_X550;
75
76 mac->ops.get_rtrup2tc = ixgbe_dcb_get_rtrup2tc_generic;
77 eeprom->ops.init_params = ixgbe_init_eeprom_params_X550;
78 eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_X550;
79 eeprom->ops.read = ixgbe_read_ee_hostif_X550;
80 eeprom->ops.read_buffer = ixgbe_read_ee_hostif_buffer_X550;
81 eeprom->ops.write = ixgbe_write_ee_hostif_X550;
82 eeprom->ops.write_buffer = ixgbe_write_ee_hostif_buffer_X550;
83 eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_X550;
84 eeprom->ops.validate_checksum = ixgbe_validate_eeprom_checksum_X550;
85
86 mac->ops.disable_mdd = ixgbe_disable_mdd_X550;
87 mac->ops.enable_mdd = ixgbe_enable_mdd_X550;
88 mac->ops.mdd_event = ixgbe_mdd_event_X550;
89 mac->ops.restore_mdd_vf = ixgbe_restore_mdd_vf_X550;
90 mac->ops.fw_recovery_mode = ixgbe_fw_recovery_mode_X550;
91 mac->ops.disable_rx = ixgbe_disable_rx_x550;
92 /* Manageability interface */
93 mac->ops.set_fw_drv_ver = ixgbe_set_fw_drv_ver_x550;
94 switch (hw->device_id) {
95 case IXGBE_DEV_ID_X550EM_X_1G_T:
96 hw->mac.ops.led_on = NULL;
97 hw->mac.ops.led_off = NULL;
98 break;
99 case IXGBE_DEV_ID_X550EM_X_10G_T:
100 case IXGBE_DEV_ID_X550EM_A_10G_T:
101 hw->mac.ops.led_on = ixgbe_led_on_t_X550em;
102 hw->mac.ops.led_off = ixgbe_led_off_t_X550em;
103 break;
104 default:
105 break;
106 }
107 return ret_val;
108 }
109
110 /**
111 * ixgbe_read_cs4227 - Read CS4227 register
112 * @hw: pointer to hardware structure
113 * @reg: register number to write
114 * @value: pointer to receive value read
115 *
116 * Returns status code
117 **/
118 static s32 ixgbe_read_cs4227(struct ixgbe_hw *hw, u16 reg, u16 *value)
119 {
120 return hw->link.ops.read_link_unlocked(hw, hw->link.addr, reg, value);
121 }
122
123 /**
124 * ixgbe_write_cs4227 - Write CS4227 register
125 * @hw: pointer to hardware structure
126 * @reg: register number to write
127 * @value: value to write to register
128 *
129 * Returns status code
130 **/
131 static s32 ixgbe_write_cs4227(struct ixgbe_hw *hw, u16 reg, u16 value)
132 {
133 return hw->link.ops.write_link_unlocked(hw, hw->link.addr, reg, value);
134 }
135
136 /**
137 * ixgbe_read_pe - Read register from port expander
138 * @hw: pointer to hardware structure
139 * @reg: register number to read
140 * @value: pointer to receive read value
141 *
142 * Returns status code
143 **/
144 static s32 ixgbe_read_pe(struct ixgbe_hw *hw, u8 reg, u8 *value)
145 {
146 s32 status;
147
148 status = ixgbe_read_i2c_byte_unlocked(hw, reg, IXGBE_PE, value);
149 if (status != IXGBE_SUCCESS)
150 ERROR_REPORT2(IXGBE_ERROR_CAUTION,
151 "port expander access failed with %d\n", status);
152 return status;
153 }
154
155 /**
156 * ixgbe_write_pe - Write register to port expander
157 * @hw: pointer to hardware structure
158 * @reg: register number to write
159 * @value: value to write
160 *
161 * Returns status code
162 **/
163 static s32 ixgbe_write_pe(struct ixgbe_hw *hw, u8 reg, u8 value)
164 {
165 s32 status;
166
167 status = ixgbe_write_i2c_byte_unlocked(hw, reg, IXGBE_PE, value);
168 if (status != IXGBE_SUCCESS)
169 ERROR_REPORT2(IXGBE_ERROR_CAUTION,
170 "port expander access failed with %d\n", status);
171 return status;
172 }
173
174 /**
175 * ixgbe_reset_cs4227 - Reset CS4227 using port expander
176 * @hw: pointer to hardware structure
177 *
178 * This function assumes that the caller has acquired the proper semaphore.
179 * Returns error code
180 **/
181 static s32 ixgbe_reset_cs4227(struct ixgbe_hw *hw)
182 {
183 s32 status;
184 u32 retry;
185 u16 value;
186 u8 reg;
187
188 /* Trigger hard reset. */
189 status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, ®);
190 if (status != IXGBE_SUCCESS)
191 return status;
192 reg |= IXGBE_PE_BIT1;
193 status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg);
194 if (status != IXGBE_SUCCESS)
195 return status;
196
197 status = ixgbe_read_pe(hw, IXGBE_PE_CONFIG, ®);
198 if (status != IXGBE_SUCCESS)
199 return status;
200 reg &= ~IXGBE_PE_BIT1;
201 status = ixgbe_write_pe(hw, IXGBE_PE_CONFIG, reg);
202 if (status != IXGBE_SUCCESS)
203 return status;
204
205 status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, ®);
206 if (status != IXGBE_SUCCESS)
207 return status;
208 reg &= ~IXGBE_PE_BIT1;
209 status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg);
210 if (status != IXGBE_SUCCESS)
211 return status;
212
213 usec_delay(IXGBE_CS4227_RESET_HOLD);
214
215 status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, ®);
216 if (status != IXGBE_SUCCESS)
217 return status;
218 reg |= IXGBE_PE_BIT1;
219 status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg);
220 if (status != IXGBE_SUCCESS)
221 return status;
222
223 /* Wait for the reset to complete. */
224 msec_delay(IXGBE_CS4227_RESET_DELAY);
225 for (retry = 0; retry < IXGBE_CS4227_RETRIES; retry++) {
226 status = ixgbe_read_cs4227(hw, IXGBE_CS4227_EFUSE_STATUS,
227 &value);
228 if (status == IXGBE_SUCCESS &&
229 value == IXGBE_CS4227_EEPROM_LOAD_OK)
230 break;
231 msec_delay(IXGBE_CS4227_CHECK_DELAY);
232 }
233 if (retry == IXGBE_CS4227_RETRIES) {
234 ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE,
235 "CS4227 reset did not complete.");
236 return IXGBE_ERR_PHY;
237 }
238
239 status = ixgbe_read_cs4227(hw, IXGBE_CS4227_EEPROM_STATUS, &value);
240 if (status != IXGBE_SUCCESS ||
241 !(value & IXGBE_CS4227_EEPROM_LOAD_OK)) {
242 ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE,
243 "CS4227 EEPROM did not load successfully.");
244 return IXGBE_ERR_PHY;
245 }
246
247 return IXGBE_SUCCESS;
248 }
249
250 /**
251 * ixgbe_check_cs4227 - Check CS4227 and reset as needed
252 * @hw: pointer to hardware structure
253 **/
254 static void ixgbe_check_cs4227(struct ixgbe_hw *hw)
255 {
256 s32 status = IXGBE_SUCCESS;
257 u32 swfw_mask = hw->phy.phy_semaphore_mask;
258 u16 value = 0;
259 u8 retry;
260
261 for (retry = 0; retry < IXGBE_CS4227_RETRIES; retry++) {
262 status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
263 if (status != IXGBE_SUCCESS) {
264 ERROR_REPORT2(IXGBE_ERROR_CAUTION,
265 "semaphore failed with %d", status);
266 msec_delay(IXGBE_CS4227_CHECK_DELAY);
267 continue;
268 }
269
270 /* Get status of reset flow. */
271 status = ixgbe_read_cs4227(hw, IXGBE_CS4227_SCRATCH, &value);
272
273 if (status == IXGBE_SUCCESS &&
274 value == IXGBE_CS4227_RESET_COMPLETE)
275 goto out;
276
277 if (status != IXGBE_SUCCESS ||
278 value != IXGBE_CS4227_RESET_PENDING)
279 break;
280
281 /* Reset is pending. Wait and check again. */
282 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
283 msec_delay(IXGBE_CS4227_CHECK_DELAY);
284 }
285
286 /* If still pending, assume other instance failed. */
287 if (retry == IXGBE_CS4227_RETRIES) {
288 status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
289 if (status != IXGBE_SUCCESS) {
290 ERROR_REPORT2(IXGBE_ERROR_CAUTION,
291 "semaphore failed with %d", status);
292 return;
293 }
294 }
295
296 /* Reset the CS4227. */
297 status = ixgbe_reset_cs4227(hw);
298 if (status != IXGBE_SUCCESS) {
299 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
300 "CS4227 reset failed: %d", status);
301 goto out;
302 }
303
304 /* Reset takes so long, temporarily release semaphore in case the
305 * other driver instance is waiting for the reset indication.
306 */
307 ixgbe_write_cs4227(hw, IXGBE_CS4227_SCRATCH,
308 IXGBE_CS4227_RESET_PENDING);
309 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
310 msec_delay(10);
311 status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
312 if (status != IXGBE_SUCCESS) {
313 ERROR_REPORT2(IXGBE_ERROR_CAUTION,
314 "semaphore failed with %d", status);
315 return;
316 }
317
318 /* Record completion for next time. */
319 status = ixgbe_write_cs4227(hw, IXGBE_CS4227_SCRATCH,
320 IXGBE_CS4227_RESET_COMPLETE);
321
322 out:
323 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
324 msec_delay(hw->eeprom.semaphore_delay);
325 }
326
327 /**
328 * ixgbe_setup_mux_ctl - Setup ESDP register for I2C mux control
329 * @hw: pointer to hardware structure
330 **/
331 static void ixgbe_setup_mux_ctl(struct ixgbe_hw *hw)
332 {
333 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
334
335 if (hw->bus.lan_id) {
336 esdp &= ~(IXGBE_ESDP_SDP1_NATIVE | IXGBE_ESDP_SDP1);
337 esdp |= IXGBE_ESDP_SDP1_DIR;
338 }
339 esdp &= ~(IXGBE_ESDP_SDP0_NATIVE | IXGBE_ESDP_SDP0_DIR);
340 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
341 IXGBE_WRITE_FLUSH(hw);
342 }
343
344 /**
345 * ixgbe_read_phy_reg_mdi_22 - Read from a clause 22 PHY register without lock
346 * @hw: pointer to hardware structure
347 * @reg_addr: 32 bit address of PHY register to read
348 * @dev_type: always unused
349 * @phy_data: Pointer to read data from PHY register
350 */
351 static s32 ixgbe_read_phy_reg_mdi_22(struct ixgbe_hw *hw, u32 reg_addr,
352 u32 dev_type, u16 *phy_data)
353 {
354 u32 i, data, command;
355 UNREFERENCED_1PARAMETER(dev_type);
356
357 /* Setup and write the read command */
358 command = (reg_addr << IXGBE_MSCA_DEV_TYPE_SHIFT) |
359 (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
360 IXGBE_MSCA_OLD_PROTOCOL | IXGBE_MSCA_READ_AUTOINC |
361 IXGBE_MSCA_MDI_COMMAND;
362
363 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
364
365 /* Check every 10 usec to see if the access completed.
366 * The MDI Command bit will clear when the operation is
367 * complete
368 */
369 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
370 usec_delay(10);
371
372 command = IXGBE_READ_REG(hw, IXGBE_MSCA);
373 if (!(command & IXGBE_MSCA_MDI_COMMAND))
374 break;
375 }
376
377 if (command & IXGBE_MSCA_MDI_COMMAND) {
378 ERROR_REPORT1(IXGBE_ERROR_POLLING,
379 "PHY read command did not complete.\n");
380 return IXGBE_ERR_PHY;
381 }
382
383 /* Read operation is complete. Get the data from MSRWD */
384 data = IXGBE_READ_REG(hw, IXGBE_MSRWD);
385 data >>= IXGBE_MSRWD_READ_DATA_SHIFT;
386 *phy_data = (u16)data;
387
388 return IXGBE_SUCCESS;
389 }
390
391 /**
392 * ixgbe_write_phy_reg_mdi_22 - Write to a clause 22 PHY register without lock
393 * @hw: pointer to hardware structure
394 * @reg_addr: 32 bit PHY register to write
395 * @dev_type: always unused
396 * @phy_data: Data to write to the PHY register
397 */
398 static s32 ixgbe_write_phy_reg_mdi_22(struct ixgbe_hw *hw, u32 reg_addr,
399 u32 dev_type, u16 phy_data)
400 {
401 u32 i, command;
402 UNREFERENCED_1PARAMETER(dev_type);
403
404 /* Put the data in the MDI single read and write data register*/
405 IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)phy_data);
406
407 /* Setup and write the write command */
408 command = (reg_addr << IXGBE_MSCA_DEV_TYPE_SHIFT) |
409 (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
410 IXGBE_MSCA_OLD_PROTOCOL | IXGBE_MSCA_WRITE |
411 IXGBE_MSCA_MDI_COMMAND;
412
413 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
414
415 /* Check every 10 usec to see if the access completed.
416 * The MDI Command bit will clear when the operation is
417 * complete
418 */
419 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
420 usec_delay(10);
421
422 command = IXGBE_READ_REG(hw, IXGBE_MSCA);
423 if (!(command & IXGBE_MSCA_MDI_COMMAND))
424 break;
425 }
426
427 if (command & IXGBE_MSCA_MDI_COMMAND) {
428 ERROR_REPORT1(IXGBE_ERROR_POLLING,
429 "PHY write cmd didn't complete\n");
430 return IXGBE_ERR_PHY;
431 }
432
433 return IXGBE_SUCCESS;
434 }
435
436 /**
437 * ixgbe_identify_phy_x550em - Get PHY type based on device id
438 * @hw: pointer to hardware structure
439 *
440 * Returns error code
441 */
442 static s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw)
443 {
444 hw->mac.ops.set_lan_id(hw);
445
446 ixgbe_read_mng_if_sel_x550em(hw);
447
448 switch (hw->device_id) {
449 case IXGBE_DEV_ID_X550EM_A_SFP:
450 return ixgbe_identify_sfp_module_X550em(hw);
451 case IXGBE_DEV_ID_X550EM_X_SFP:
452 /* set up for CS4227 usage */
453 ixgbe_setup_mux_ctl(hw);
454 ixgbe_check_cs4227(hw);
455 /* Fallthrough */
456
457 case IXGBE_DEV_ID_X550EM_A_SFP_N:
458 return ixgbe_identify_sfp_module_X550em(hw);
459 break;
460 case IXGBE_DEV_ID_X550EM_X_KX4:
461 hw->phy.type = ixgbe_phy_x550em_kx4;
462 break;
463 case IXGBE_DEV_ID_X550EM_X_XFI:
464 hw->phy.type = ixgbe_phy_x550em_xfi;
465 break;
466 case IXGBE_DEV_ID_X550EM_X_KR:
467 case IXGBE_DEV_ID_X550EM_A_KR:
468 case IXGBE_DEV_ID_X550EM_A_KR_L:
469 hw->phy.type = ixgbe_phy_x550em_kr;
470 break;
471 case IXGBE_DEV_ID_X550EM_A_10G_T:
472 case IXGBE_DEV_ID_X550EM_X_10G_T:
473 return ixgbe_identify_phy_generic(hw);
474 case IXGBE_DEV_ID_X550EM_X_1G_T:
475 hw->phy.type = ixgbe_phy_ext_1g_t;
476 break;
477 case IXGBE_DEV_ID_X550EM_A_1G_T:
478 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
479 hw->phy.type = ixgbe_phy_fw;
480 if (hw->bus.lan_id)
481 hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY1_SM;
482 else
483 hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY0_SM;
484 break;
485 default:
486 break;
487 }
488 return IXGBE_SUCCESS;
489 }
490
491 /**
492 * ixgbe_fw_phy_activity - Perform an activity on a PHY
493 * @hw: pointer to hardware structure
494 * @activity: activity to perform
495 * @data: Pointer to 4 32-bit words of data
496 */
497 s32 ixgbe_fw_phy_activity(struct ixgbe_hw *hw, u16 activity,
498 u32 (*data)[FW_PHY_ACT_DATA_COUNT])
499 {
500 union {
501 struct ixgbe_hic_phy_activity_req cmd;
502 struct ixgbe_hic_phy_activity_resp rsp;
503 } hic;
504 u16 retries = FW_PHY_ACT_RETRIES;
505 s32 rc;
506 u16 i;
507
508 do {
509 memset(&hic, 0, sizeof(hic));
510 hic.cmd.hdr.cmd = FW_PHY_ACT_REQ_CMD;
511 hic.cmd.hdr.buf_len = FW_PHY_ACT_REQ_LEN;
512 hic.cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
513 hic.cmd.port_number = hw->bus.lan_id;
514 hic.cmd.activity_id = IXGBE_CPU_TO_LE16(activity);
515 for (i = 0; i < FW_PHY_ACT_DATA_COUNT; ++i)
516 hic.cmd.data[i] = IXGBE_CPU_TO_BE32((*data)[i]);
517
518 rc = ixgbe_host_interface_command(hw, (u32 *)&hic.cmd,
519 sizeof(hic.cmd),
520 IXGBE_HI_COMMAND_TIMEOUT,
521 TRUE);
522 if (rc != IXGBE_SUCCESS)
523 return rc;
524 if (hic.rsp.hdr.cmd_or_resp.ret_status ==
525 FW_CEM_RESP_STATUS_SUCCESS) {
526 for (i = 0; i < FW_PHY_ACT_DATA_COUNT; ++i)
527 (*data)[i] = IXGBE_BE32_TO_CPU(hic.rsp.data[i]);
528 return IXGBE_SUCCESS;
529 }
530 usec_delay(20);
531 --retries;
532 } while (retries > 0);
533
534 return IXGBE_ERR_HOST_INTERFACE_COMMAND;
535 }
536
537 static const struct {
538 u16 fw_speed;
539 ixgbe_link_speed phy_speed;
540 } ixgbe_fw_map[] = {
541 { FW_PHY_ACT_LINK_SPEED_10, IXGBE_LINK_SPEED_10_FULL },
542 { FW_PHY_ACT_LINK_SPEED_100, IXGBE_LINK_SPEED_100_FULL },
543 { FW_PHY_ACT_LINK_SPEED_1G, IXGBE_LINK_SPEED_1GB_FULL },
544 { FW_PHY_ACT_LINK_SPEED_2_5G, IXGBE_LINK_SPEED_2_5GB_FULL },
545 { FW_PHY_ACT_LINK_SPEED_5G, IXGBE_LINK_SPEED_5GB_FULL },
546 { FW_PHY_ACT_LINK_SPEED_10G, IXGBE_LINK_SPEED_10GB_FULL },
547 };
548
549 /**
550 * ixgbe_get_phy_id_fw - Get the phy ID via firmware command
551 * @hw: pointer to hardware structure
552 *
553 * Returns error code
554 */
555 static s32 ixgbe_get_phy_id_fw(struct ixgbe_hw *hw)
556 {
557 u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 };
558 u16 phy_speeds;
559 u16 phy_id_lo;
560 s32 rc;
561 u16 i;
562
563 rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_PHY_INFO, &info);
564 if (rc)
565 return rc;
566
567 hw->phy.speeds_supported = 0;
568 phy_speeds = info[0] & FW_PHY_INFO_SPEED_MASK;
569 for (i = 0; i < sizeof(ixgbe_fw_map) / sizeof(ixgbe_fw_map[0]); ++i) {
570 if (phy_speeds & ixgbe_fw_map[i].fw_speed)
571 hw->phy.speeds_supported |= ixgbe_fw_map[i].phy_speed;
572 }
573
574 #if 0
575 /*
576 * Don't set autoneg_advertised here to not to be inconsistent with
577 * if_media value.
578 */
579 if (!hw->phy.autoneg_advertised)
580 hw->phy.autoneg_advertised = hw->phy.speeds_supported;
581 #endif
582
583 hw->phy.id = info[0] & FW_PHY_INFO_ID_HI_MASK;
584 phy_id_lo = info[1] & FW_PHY_INFO_ID_LO_MASK;
585 hw->phy.id |= phy_id_lo & IXGBE_PHY_REVISION_MASK;
586 hw->phy.revision = phy_id_lo & ~IXGBE_PHY_REVISION_MASK;
587 if (!hw->phy.id || hw->phy.id == IXGBE_PHY_REVISION_MASK)
588 return IXGBE_ERR_PHY_ADDR_INVALID;
589 return IXGBE_SUCCESS;
590 }
591
592 /**
593 * ixgbe_identify_phy_fw - Get PHY type based on firmware command
594 * @hw: pointer to hardware structure
595 *
596 * Returns error code
597 */
598 static s32 ixgbe_identify_phy_fw(struct ixgbe_hw *hw)
599 {
600 if (hw->bus.lan_id)
601 hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM;
602 else
603 hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM;
604
605 hw->phy.type = ixgbe_phy_fw;
606 hw->phy.ops.read_reg = NULL;
607 hw->phy.ops.write_reg = NULL;
608 return ixgbe_get_phy_id_fw(hw);
609 }
610
611 /**
612 * ixgbe_shutdown_fw_phy - Shutdown a firmware-controlled PHY
613 * @hw: pointer to hardware structure
614 *
615 * Returns error code
616 */
617 s32 ixgbe_shutdown_fw_phy(struct ixgbe_hw *hw)
618 {
619 u32 setup[FW_PHY_ACT_DATA_COUNT] = { 0 };
620
621 setup[0] = FW_PHY_ACT_FORCE_LINK_DOWN_OFF;
622 return ixgbe_fw_phy_activity(hw, FW_PHY_ACT_FORCE_LINK_DOWN, &setup);
623 }
624
625 static s32 ixgbe_read_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr,
626 u32 device_type, u16 *phy_data)
627 {
628 UNREFERENCED_4PARAMETER(*hw, reg_addr, device_type, *phy_data);
629 return IXGBE_NOT_IMPLEMENTED;
630 }
631
632 static s32 ixgbe_write_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr,
633 u32 device_type, u16 phy_data)
634 {
635 UNREFERENCED_4PARAMETER(*hw, reg_addr, device_type, phy_data);
636 return IXGBE_NOT_IMPLEMENTED;
637 }
638
639 /**
640 * ixgbe_read_i2c_combined_generic - Perform I2C read combined operation
641 * @hw: pointer to the hardware structure
642 * @addr: I2C bus address to read from
643 * @reg: I2C device register to read from
644 * @val: pointer to location to receive read value
645 *
646 * Returns an error code on error.
647 **/
648 static s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr,
649 u16 reg, u16 *val)
650 {
651 return ixgbe_read_i2c_combined_generic_int(hw, addr, reg, val, TRUE);
652 }
653
654 /**
655 * ixgbe_read_i2c_combined_generic_unlocked - Do I2C read combined operation
656 * @hw: pointer to the hardware structure
657 * @addr: I2C bus address to read from
658 * @reg: I2C device register to read from
659 * @val: pointer to location to receive read value
660 *
661 * Returns an error code on error.
662 **/
663 static s32
664 ixgbe_read_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, u8 addr,
665 u16 reg, u16 *val)
666 {
667 return ixgbe_read_i2c_combined_generic_int(hw, addr, reg, val, FALSE);
668 }
669
670 /**
671 * ixgbe_write_i2c_combined_generic - Perform I2C write combined operation
672 * @hw: pointer to the hardware structure
673 * @addr: I2C bus address to write to
674 * @reg: I2C device register to write to
675 * @val: value to write
676 *
677 * Returns an error code on error.
678 **/
679 static s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw,
680 u8 addr, u16 reg, u16 val)
681 {
682 return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, TRUE);
683 }
684
685 /**
686 * ixgbe_write_i2c_combined_generic_unlocked - Do I2C write combined operation
687 * @hw: pointer to the hardware structure
688 * @addr: I2C bus address to write to
689 * @reg: I2C device register to write to
690 * @val: value to write
691 *
692 * Returns an error code on error.
693 **/
694 static s32
695 ixgbe_write_i2c_combined_generic_unlocked(struct ixgbe_hw *hw,
696 u8 addr, u16 reg, u16 val)
697 {
698 return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, FALSE);
699 }
700
701 /**
702 * ixgbe_init_ops_X550EM - Inits func ptrs and MAC type
703 * @hw: pointer to hardware structure
704 *
705 * Initialize the function pointers and for MAC type X550EM.
706 * Does not touch the hardware.
707 **/
708 s32 ixgbe_init_ops_X550EM(struct ixgbe_hw *hw)
709 {
710 struct ixgbe_mac_info *mac = &hw->mac;
711 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
712 struct ixgbe_phy_info *phy = &hw->phy;
713 s32 ret_val;
714
715 DEBUGFUNC("ixgbe_init_ops_X550EM");
716
717 /* Similar to X550 so start there. */
718 ret_val = ixgbe_init_ops_X550(hw);
719
720 /* Since this function eventually calls
721 * ixgbe_init_ops_540 by design, we are setting
722 * the pointers to NULL explicitly here to overwrite
723 * the values being set in the x540 function.
724 */
725
726 /* Bypass not supported in x550EM */
727 mac->ops.bypass_rw = NULL;
728 mac->ops.bypass_valid_rd = NULL;
729 mac->ops.bypass_set = NULL;
730 mac->ops.bypass_rd_eep = NULL;
731
732 /* FCOE not supported in x550EM */
733 mac->ops.get_san_mac_addr = NULL;
734 mac->ops.set_san_mac_addr = NULL;
735 mac->ops.get_wwn_prefix = NULL;
736 mac->ops.get_fcoe_boot_status = NULL;
737
738 /* IPsec not supported in x550EM */
739 mac->ops.disable_sec_rx_path = NULL;
740 mac->ops.enable_sec_rx_path = NULL;
741
742 /* AUTOC register is not present in x550EM. */
743 mac->ops.prot_autoc_read = NULL;
744 mac->ops.prot_autoc_write = NULL;
745
746 /* X550EM bus type is internal*/
747 hw->bus.type = ixgbe_bus_type_internal;
748 mac->ops.get_bus_info = ixgbe_get_bus_info_X550em;
749
750
751 mac->ops.get_media_type = ixgbe_get_media_type_X550em;
752 mac->ops.setup_sfp = ixgbe_setup_sfp_modules_X550em;
753 mac->ops.get_link_capabilities = ixgbe_get_link_capabilities_X550em;
754 mac->ops.reset_hw = ixgbe_reset_hw_X550em;
755 mac->ops.get_supported_physical_layer =
756 ixgbe_get_supported_physical_layer_X550em;
757
758 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper)
759 mac->ops.setup_fc = ixgbe_setup_fc_generic;
760 else
761 mac->ops.setup_fc = ixgbe_setup_fc_X550em;
762
763 /* PHY */
764 phy->ops.init = ixgbe_init_phy_ops_X550em;
765 switch (hw->device_id) {
766 case IXGBE_DEV_ID_X550EM_A_1G_T:
767 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
768 mac->ops.setup_fc = NULL;
769 phy->ops.identify = ixgbe_identify_phy_fw;
770 phy->ops.set_phy_power = NULL;
771 phy->ops.get_firmware_version = NULL;
772 break;
773 case IXGBE_DEV_ID_X550EM_X_1G_T:
774 mac->ops.setup_fc = NULL;
775 phy->ops.identify = ixgbe_identify_phy_x550em;
776 phy->ops.set_phy_power = NULL;
777 break;
778 default:
779 phy->ops.identify = ixgbe_identify_phy_x550em;
780 }
781
782 if (mac->ops.get_media_type(hw) != ixgbe_media_type_copper)
783 phy->ops.set_phy_power = NULL;
784
785
786 /* EEPROM */
787 eeprom->ops.init_params = ixgbe_init_eeprom_params_X540;
788 eeprom->ops.read = ixgbe_read_ee_hostif_X550;
789 eeprom->ops.read_buffer = ixgbe_read_ee_hostif_buffer_X550;
790 eeprom->ops.write = ixgbe_write_ee_hostif_X550;
791 eeprom->ops.write_buffer = ixgbe_write_ee_hostif_buffer_X550;
792 eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_X550;
793 eeprom->ops.validate_checksum = ixgbe_validate_eeprom_checksum_X550;
794 eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_X550;
795
796 return ret_val;
797 }
798
799 #define IXGBE_DENVERTON_WA 1
800
801 /**
802 * ixgbe_setup_fw_link - Setup firmware-controlled PHYs
803 * @hw: pointer to hardware structure
804 */
805 static s32 ixgbe_setup_fw_link(struct ixgbe_hw *hw)
806 {
807 u32 setup[FW_PHY_ACT_DATA_COUNT] = { 0 };
808 s32 rc;
809 #ifdef IXGBE_DENVERTON_WA
810 s32 ret_val;
811 u16 phydata;
812 #endif
813 u16 i;
814
815 if (hw->phy.reset_disable || ixgbe_check_reset_blocked(hw))
816 return 0;
817
818 if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
819 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
820 "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
821 return IXGBE_ERR_INVALID_LINK_SETTINGS;
822 }
823
824 switch (hw->fc.requested_mode) {
825 case ixgbe_fc_full:
826 setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_RXTX <<
827 FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT;
828 break;
829 case ixgbe_fc_rx_pause:
830 setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_RX <<
831 FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT;
832 break;
833 case ixgbe_fc_tx_pause:
834 setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_TX <<
835 FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT;
836 break;
837 default:
838 break;
839 }
840
841 for (i = 0; i < sizeof(ixgbe_fw_map) / sizeof(ixgbe_fw_map[0]); ++i) {
842 if (hw->phy.autoneg_advertised & ixgbe_fw_map[i].phy_speed)
843 setup[0] |= ixgbe_fw_map[i].fw_speed;
844 }
845 setup[0] |= FW_PHY_ACT_SETUP_LINK_HP | FW_PHY_ACT_SETUP_LINK_AN;
846
847 if (hw->phy.eee_speeds_advertised)
848 setup[0] |= FW_PHY_ACT_SETUP_LINK_EEE;
849
850 #ifdef IXGBE_DENVERTON_WA
851 if ((hw->phy.force_10_100_autonego == false)
852 && ((hw->phy.autoneg_advertised == IXGBE_LINK_SPEED_100_FULL)
853 || (hw->phy.autoneg_advertised == IXGBE_LINK_SPEED_10_FULL))) {
854 /* Don't use auto-nego for 10/100Mbps */
855 setup[0] &= ~FW_PHY_ACT_SETUP_LINK_AN;
856 setup[0] &= ~FW_PHY_ACT_SETUP_LINK_EEE;
857 setup[0] &= ~(FW_PHY_ACT_SETUP_LINK_PAUSE_RXTX
858 << FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT);
859 }
860 #endif
861
862 rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_SETUP_LINK, &setup);
863 if (rc)
864 return rc;
865
866 #ifdef IXGBE_DENVERTON_WA
867 if (hw->phy.force_10_100_autonego == true)
868 goto out;
869
870 ret_val = ixgbe_read_phy_reg_x550a(hw, MII_BMCR, 0, &phydata);
871 if (ret_val != 0)
872 goto out;
873
874 /*
875 * Broken firmware sets BMCR register incorrectly if
876 * FW_PHY_ACT_SETUP_LINK_AN isn't set.
877 * a) FDX may not be set.
878 * b) BMCR_SPEED1 (bit 6) is always cleared.
879 * + -------+------+-----------+-----+--------------------------+
880 * |request | BMCR | BMCR spd | BMCR | |
881 * | | (HEX)| (in bits)| FDX | |
882 * +--------+------+----------+------+--------------------------+
883 * | 10M | 0000 | 10M(00) | 0 | |
884 * | 10M | 2000 | 100M(01) | 0 |(I've never observed this)|
885 * | 10M | 2100 | 100M(01) | 1 | |
886 * | 100M | 0000 | 10M(00) | 0 | |
887 * | 100M | 0100 | 10M(00) | 1 | |
888 * +--------------------------+------+--------------------------+
889 */
890 if (((hw->phy.autoneg_advertised == IXGBE_LINK_SPEED_100_FULL)
891 && (((phydata & BMCR_FDX) == 0) || (BMCR_SPEED(phydata) == 0)))
892 || ((hw->phy.autoneg_advertised == IXGBE_LINK_SPEED_10_FULL)
893 && (((phydata & BMCR_FDX) == 0)
894 || (BMCR_SPEED(phydata) != BMCR_S10)))) {
895 phydata = BMCR_FDX;
896 switch (hw->phy.autoneg_advertised) {
897 case IXGBE_LINK_SPEED_10_FULL:
898 phydata |= BMCR_S10;
899 break;
900 case IXGBE_LINK_SPEED_100_FULL:
901 phydata |= BMCR_S100;
902 break;
903 case IXGBE_LINK_SPEED_1GB_FULL:
904 panic("%s: 1GB_FULL is set", __func__);
905 break;
906 default:
907 break;
908 }
909 ret_val = ixgbe_write_phy_reg_x550a(hw, MII_BMCR, 0, phydata);
910 if (ret_val != 0)
911 return ret_val;
912 }
913 out:
914 #endif
915 if (setup[0] == FW_PHY_ACT_SETUP_LINK_RSP_DOWN)
916 return IXGBE_ERR_OVERTEMP;
917 return IXGBE_SUCCESS;
918 }
919
920 /**
921 * ixgbe_fc_autoneg_fw _ Set up flow control for FW-controlled PHYs
922 * @hw: pointer to hardware structure
923 *
924 * Called at init time to set up flow control.
925 */
926 static s32 ixgbe_fc_autoneg_fw(struct ixgbe_hw *hw)
927 {
928 if (hw->fc.requested_mode == ixgbe_fc_default)
929 hw->fc.requested_mode = ixgbe_fc_full;
930
931 return ixgbe_setup_fw_link(hw);
932 }
933
934 /**
935 * ixgbe_setup_eee_fw - Enable/disable EEE support
936 * @hw: pointer to the HW structure
937 * @enable_eee: boolean flag to enable EEE
938 *
939 * Enable/disable EEE based on enable_eee flag.
940 * This function controls EEE for firmware-based PHY implementations.
941 */
942 static s32 ixgbe_setup_eee_fw(struct ixgbe_hw *hw, bool enable_eee)
943 {
944 if (!!hw->phy.eee_speeds_advertised == enable_eee)
945 return IXGBE_SUCCESS;
946 if (enable_eee)
947 hw->phy.eee_speeds_advertised = hw->phy.eee_speeds_supported;
948 else
949 hw->phy.eee_speeds_advertised = 0;
950 return hw->phy.ops.setup_link(hw);
951 }
952
953 /**
954 * ixgbe_init_ops_X550EM_a - Inits func ptrs and MAC type
955 * @hw: pointer to hardware structure
956 *
957 * Initialize the function pointers and for MAC type X550EM_a.
958 * Does not touch the hardware.
959 **/
960 s32 ixgbe_init_ops_X550EM_a(struct ixgbe_hw *hw)
961 {
962 struct ixgbe_mac_info *mac = &hw->mac;
963 s32 ret_val;
964
965 DEBUGFUNC("ixgbe_init_ops_X550EM_a");
966
967 /* Start with generic X550EM init */
968 ret_val = ixgbe_init_ops_X550EM(hw);
969
970 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII ||
971 hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII_L) {
972 mac->ops.read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550;
973 mac->ops.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550;
974 } else {
975 mac->ops.read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550a;
976 mac->ops.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550a;
977 }
978 mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync_X550a;
979 mac->ops.release_swfw_sync = ixgbe_release_swfw_sync_X550a;
980
981 switch (mac->ops.get_media_type(hw)) {
982 case ixgbe_media_type_fiber:
983 mac->ops.setup_fc = NULL;
984 mac->ops.fc_autoneg = ixgbe_fc_autoneg_fiber_x550em_a;
985 break;
986 case ixgbe_media_type_backplane:
987 mac->ops.fc_autoneg = ixgbe_fc_autoneg_backplane_x550em_a;
988 mac->ops.setup_fc = ixgbe_setup_fc_backplane_x550em_a;
989 break;
990 default:
991 break;
992 }
993
994 switch (hw->device_id) {
995 case IXGBE_DEV_ID_X550EM_A_1G_T:
996 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
997 mac->ops.fc_autoneg = ixgbe_fc_autoneg_sgmii_x550em_a;
998 mac->ops.setup_fc = ixgbe_fc_autoneg_fw;
999 mac->ops.setup_eee = ixgbe_setup_eee_fw;
1000 hw->phy.eee_speeds_supported = IXGBE_LINK_SPEED_100_FULL |
1001 IXGBE_LINK_SPEED_1GB_FULL;
1002 hw->phy.eee_speeds_advertised = hw->phy.eee_speeds_supported;
1003 break;
1004 default:
1005 break;
1006 }
1007
1008 return ret_val;
1009 }
1010
1011 /**
1012 * ixgbe_init_ops_X550EM_x - Inits func ptrs and MAC type
1013 * @hw: pointer to hardware structure
1014 *
1015 * Initialize the function pointers and for MAC type X550EM_x.
1016 * Does not touch the hardware.
1017 **/
1018 s32 ixgbe_init_ops_X550EM_x(struct ixgbe_hw *hw)
1019 {
1020 struct ixgbe_mac_info *mac = &hw->mac;
1021 struct ixgbe_link_info *link = &hw->link;
1022 s32 ret_val;
1023
1024 DEBUGFUNC("ixgbe_init_ops_X550EM_x");
1025
1026 /* Start with generic X550EM init */
1027 ret_val = ixgbe_init_ops_X550EM(hw);
1028
1029 mac->ops.read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550;
1030 mac->ops.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550;
1031 mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync_X550em;
1032 mac->ops.release_swfw_sync = ixgbe_release_swfw_sync_X550em;
1033 link->ops.read_link = ixgbe_read_i2c_combined_generic;
1034 link->ops.read_link_unlocked = ixgbe_read_i2c_combined_generic_unlocked;
1035 link->ops.write_link = ixgbe_write_i2c_combined_generic;
1036 link->ops.write_link_unlocked =
1037 ixgbe_write_i2c_combined_generic_unlocked;
1038 link->addr = IXGBE_CS4227;
1039
1040 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_1G_T) {
1041 mac->ops.setup_fc = NULL;
1042 mac->ops.setup_eee = NULL;
1043 mac->ops.init_led_link_act = NULL;
1044 }
1045
1046 return ret_val;
1047 }
1048
1049 /**
1050 * ixgbe_dmac_config_X550
1051 * @hw: pointer to hardware structure
1052 *
1053 * Configure DMA coalescing. If enabling dmac, dmac is activated.
1054 * When disabling dmac, dmac enable dmac bit is cleared.
1055 **/
1056 s32 ixgbe_dmac_config_X550(struct ixgbe_hw *hw)
1057 {
1058 u32 reg, high_pri_tc;
1059
1060 DEBUGFUNC("ixgbe_dmac_config_X550");
1061
1062 /* Disable DMA coalescing before configuring */
1063 reg = IXGBE_READ_REG(hw, IXGBE_DMACR);
1064 reg &= ~IXGBE_DMACR_DMAC_EN;
1065 IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg);
1066
1067 /* Disable DMA Coalescing if the watchdog timer is 0 */
1068 if (!hw->mac.dmac_config.watchdog_timer)
1069 goto out;
1070
1071 ixgbe_dmac_config_tcs_X550(hw);
1072
1073 /* Configure DMA Coalescing Control Register */
1074 reg = IXGBE_READ_REG(hw, IXGBE_DMACR);
1075
1076 /* Set the watchdog timer in units of 40.96 usec */
1077 reg &= ~IXGBE_DMACR_DMACWT_MASK;
1078 reg |= (hw->mac.dmac_config.watchdog_timer * 100) / 4096;
1079
1080 reg &= ~IXGBE_DMACR_HIGH_PRI_TC_MASK;
1081 /* If fcoe is enabled, set high priority traffic class */
1082 if (hw->mac.dmac_config.fcoe_en) {
1083 high_pri_tc = 1 << hw->mac.dmac_config.fcoe_tc;
1084 reg |= ((high_pri_tc << IXGBE_DMACR_HIGH_PRI_TC_SHIFT) &
1085 IXGBE_DMACR_HIGH_PRI_TC_MASK);
1086 }
1087 reg |= IXGBE_DMACR_EN_MNG_IND;
1088
1089 /* Enable DMA coalescing after configuration */
1090 reg |= IXGBE_DMACR_DMAC_EN;
1091 IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg);
1092
1093 out:
1094 return IXGBE_SUCCESS;
1095 }
1096
1097 /**
1098 * ixgbe_dmac_config_tcs_X550
1099 * @hw: pointer to hardware structure
1100 *
1101 * Configure DMA coalescing threshold per TC. The dmac enable bit must
1102 * be cleared before configuring.
1103 **/
1104 s32 ixgbe_dmac_config_tcs_X550(struct ixgbe_hw *hw)
1105 {
1106 u32 tc, reg, pb_headroom, rx_pb_size, maxframe_size_kb;
1107
1108 DEBUGFUNC("ixgbe_dmac_config_tcs_X550");
1109
1110 /* Configure DMA coalescing enabled */
1111 switch (hw->mac.dmac_config.link_speed) {
1112 case IXGBE_LINK_SPEED_10_FULL:
1113 case IXGBE_LINK_SPEED_100_FULL:
1114 pb_headroom = IXGBE_DMACRXT_100M;
1115 break;
1116 case IXGBE_LINK_SPEED_1GB_FULL:
1117 pb_headroom = IXGBE_DMACRXT_1G;
1118 break;
1119 default:
1120 pb_headroom = IXGBE_DMACRXT_10G;
1121 break;
1122 }
1123
1124 maxframe_size_kb = ((IXGBE_READ_REG(hw, IXGBE_MAXFRS) >>
1125 IXGBE_MHADD_MFS_SHIFT) / 1024);
1126
1127 /* Set the per Rx packet buffer receive threshold */
1128 for (tc = 0; tc < IXGBE_DCB_MAX_TRAFFIC_CLASS; tc++) {
1129 reg = IXGBE_READ_REG(hw, IXGBE_DMCTH(tc));
1130 reg &= ~IXGBE_DMCTH_DMACRXT_MASK;
1131
1132 if (tc < hw->mac.dmac_config.num_tcs) {
1133 /* Get Rx PB size */
1134 rx_pb_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc));
1135 rx_pb_size = (rx_pb_size & IXGBE_RXPBSIZE_MASK) >>
1136 IXGBE_RXPBSIZE_SHIFT;
1137
1138 /* Calculate receive buffer threshold in kilobytes */
1139 if (rx_pb_size > pb_headroom)
1140 rx_pb_size = rx_pb_size - pb_headroom;
1141 else
1142 rx_pb_size = 0;
1143
1144 /* Minimum of MFS shall be set for DMCTH */
1145 reg |= (rx_pb_size > maxframe_size_kb) ?
1146 rx_pb_size : maxframe_size_kb;
1147 }
1148 IXGBE_WRITE_REG(hw, IXGBE_DMCTH(tc), reg);
1149 }
1150 return IXGBE_SUCCESS;
1151 }
1152
1153 /**
1154 * ixgbe_dmac_update_tcs_X550
1155 * @hw: pointer to hardware structure
1156 *
1157 * Disables dmac, updates per TC settings, and then enables dmac.
1158 **/
1159 s32 ixgbe_dmac_update_tcs_X550(struct ixgbe_hw *hw)
1160 {
1161 u32 reg;
1162
1163 DEBUGFUNC("ixgbe_dmac_update_tcs_X550");
1164
1165 /* Disable DMA coalescing before configuring */
1166 reg = IXGBE_READ_REG(hw, IXGBE_DMACR);
1167 reg &= ~IXGBE_DMACR_DMAC_EN;
1168 IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg);
1169
1170 ixgbe_dmac_config_tcs_X550(hw);
1171
1172 /* Enable DMA coalescing after configuration */
1173 reg = IXGBE_READ_REG(hw, IXGBE_DMACR);
1174 reg |= IXGBE_DMACR_DMAC_EN;
1175 IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg);
1176
1177 return IXGBE_SUCCESS;
1178 }
1179
1180 /**
1181 * ixgbe_init_eeprom_params_X550 - Initialize EEPROM params
1182 * @hw: pointer to hardware structure
1183 *
1184 * Initializes the EEPROM parameters ixgbe_eeprom_info within the
1185 * ixgbe_hw struct in order to set up EEPROM access.
1186 **/
1187 s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw)
1188 {
1189 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
1190 u32 eec;
1191 u16 eeprom_size;
1192
1193 DEBUGFUNC("ixgbe_init_eeprom_params_X550");
1194
1195 if (eeprom->type == ixgbe_eeprom_uninitialized) {
1196 eeprom->semaphore_delay = 10;
1197 eeprom->type = ixgbe_flash;
1198
1199 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1200 eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
1201 IXGBE_EEC_SIZE_SHIFT);
1202 eeprom->word_size = 1 << (eeprom_size +
1203 IXGBE_EEPROM_WORD_SIZE_SHIFT);
1204
1205 DEBUGOUT2("Eeprom params: type = %d, size = %d\n",
1206 eeprom->type, eeprom->word_size);
1207 }
1208
1209 return IXGBE_SUCCESS;
1210 }
1211
1212 /**
1213 * ixgbe_set_source_address_pruning_X550 - Enable/Disbale source address pruning
1214 * @hw: pointer to hardware structure
1215 * @enable: enable or disable source address pruning
1216 * @pool: Rx pool to set source address pruning for
1217 **/
1218 void ixgbe_set_source_address_pruning_X550(struct ixgbe_hw *hw, bool enable,
1219 unsigned int pool)
1220 {
1221 u64 pfflp;
1222
1223 /* max rx pool is 63 */
1224 if (pool > 63)
1225 return;
1226
1227 pfflp = (u64)IXGBE_READ_REG(hw, IXGBE_PFFLPL);
1228 pfflp |= (u64)IXGBE_READ_REG(hw, IXGBE_PFFLPH) << 32;
1229
1230 if (enable)
1231 pfflp |= (1ULL << pool);
1232 else
1233 pfflp &= ~(1ULL << pool);
1234
1235 IXGBE_WRITE_REG(hw, IXGBE_PFFLPL, (u32)pfflp);
1236 IXGBE_WRITE_REG(hw, IXGBE_PFFLPH, (u32)(pfflp >> 32));
1237 }
1238
1239 /**
1240 * ixgbe_set_ethertype_anti_spoofing_X550 - Enable/Disable Ethertype anti-spoofing
1241 * @hw: pointer to hardware structure
1242 * @enable: enable or disable switch for Ethertype anti-spoofing
1243 * @vf: Virtual Function pool - VF Pool to set for Ethertype anti-spoofing
1244 *
1245 **/
1246 void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw,
1247 bool enable, int vf)
1248 {
1249 int vf_target_reg = vf >> 3;
1250 int vf_target_shift = vf % 8 + IXGBE_SPOOF_ETHERTYPEAS_SHIFT;
1251 u32 pfvfspoof;
1252
1253 DEBUGFUNC("ixgbe_set_ethertype_anti_spoofing_X550");
1254
1255 pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
1256 if (enable)
1257 pfvfspoof |= (1 << vf_target_shift);
1258 else
1259 pfvfspoof &= ~(1 << vf_target_shift);
1260
1261 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
1262 }
1263
1264 /**
1265 * ixgbe_iosf_wait - Wait for IOSF command completion
1266 * @hw: pointer to hardware structure
1267 * @ctrl: pointer to location to receive final IOSF control value
1268 *
1269 * Returns failing status on timeout
1270 *
1271 * Note: ctrl can be NULL if the IOSF control register value is not needed
1272 **/
1273 static s32 ixgbe_iosf_wait(struct ixgbe_hw *hw, u32 *ctrl)
1274 {
1275 u32 i, command = 0;
1276
1277 /* Check every 10 usec to see if the address cycle completed.
1278 * The SB IOSF BUSY bit will clear when the operation is
1279 * complete
1280 */
1281 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
1282 command = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL);
1283 if ((command & IXGBE_SB_IOSF_CTRL_BUSY) == 0)
1284 break;
1285 usec_delay(10);
1286 }
1287 if (ctrl)
1288 *ctrl = command;
1289 if (i == IXGBE_MDIO_COMMAND_TIMEOUT) {
1290 ERROR_REPORT1(IXGBE_ERROR_POLLING, "Wait timed out\n");
1291 return IXGBE_ERR_PHY;
1292 }
1293
1294 return IXGBE_SUCCESS;
1295 }
1296
1297 /**
1298 * ixgbe_write_iosf_sb_reg_x550 - Writes a value to specified register
1299 * of the IOSF device
1300 * @hw: pointer to hardware structure
1301 * @reg_addr: 32 bit PHY register to write
1302 * @device_type: 3 bit device type
1303 * @data: Data to write to the register
1304 **/
1305 s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
1306 u32 device_type, u32 data)
1307 {
1308 u32 gssr = IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_PHY0_SM;
1309 u32 command, error __unused;
1310 s32 ret;
1311
1312 ret = ixgbe_acquire_swfw_semaphore(hw, gssr);
1313 if (ret != IXGBE_SUCCESS)
1314 return ret;
1315
1316 ret = ixgbe_iosf_wait(hw, NULL);
1317 if (ret != IXGBE_SUCCESS)
1318 goto out;
1319
1320 command = ((reg_addr << IXGBE_SB_IOSF_CTRL_ADDR_SHIFT) |
1321 (device_type << IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT));
1322
1323 /* Write IOSF control register */
1324 IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL, command);
1325
1326 /* Write IOSF data register */
1327 IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA, data);
1328
1329 ret = ixgbe_iosf_wait(hw, &command);
1330
1331 if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) {
1332 error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >>
1333 IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT;
1334 ERROR_REPORT2(IXGBE_ERROR_POLLING,
1335 "Failed to write, error %x\n", error);
1336 ret = IXGBE_ERR_PHY;
1337 }
1338
1339 out:
1340 ixgbe_release_swfw_semaphore(hw, gssr);
1341 return ret;
1342 }
1343
1344 /**
1345 * ixgbe_read_iosf_sb_reg_x550 - Reads specified register of the IOSF device
1346 * @hw: pointer to hardware structure
1347 * @reg_addr: 32 bit PHY register to write
1348 * @device_type: 3 bit device type
1349 * @data: Pointer to read data from the register
1350 **/
1351 s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
1352 u32 device_type, u32 *data)
1353 {
1354 u32 gssr = IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_PHY0_SM;
1355 u32 command, error __unused;
1356 s32 ret;
1357
1358 ret = ixgbe_acquire_swfw_semaphore(hw, gssr);
1359 if (ret != IXGBE_SUCCESS)
1360 return ret;
1361
1362 ret = ixgbe_iosf_wait(hw, NULL);
1363 if (ret != IXGBE_SUCCESS)
1364 goto out;
1365
1366 command = ((reg_addr << IXGBE_SB_IOSF_CTRL_ADDR_SHIFT) |
1367 (device_type << IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT));
1368
1369 /* Write IOSF control register */
1370 IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL, command);
1371
1372 ret = ixgbe_iosf_wait(hw, &command);
1373
1374 if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) {
1375 error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >>
1376 IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT;
1377 ERROR_REPORT2(IXGBE_ERROR_POLLING,
1378 "Failed to read, error %x\n", error);
1379 ret = IXGBE_ERR_PHY;
1380 }
1381
1382 if (ret == IXGBE_SUCCESS)
1383 *data = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA);
1384
1385 out:
1386 ixgbe_release_swfw_semaphore(hw, gssr);
1387 return ret;
1388 }
1389
1390 /**
1391 * ixgbe_get_phy_token - Get the token for shared phy access
1392 * @hw: Pointer to hardware structure
1393 */
1394
1395 s32 ixgbe_get_phy_token(struct ixgbe_hw *hw)
1396 {
1397 struct ixgbe_hic_phy_token_req token_cmd;
1398 s32 status;
1399
1400 token_cmd.hdr.cmd = FW_PHY_TOKEN_REQ_CMD;
1401 token_cmd.hdr.buf_len = FW_PHY_TOKEN_REQ_LEN;
1402 token_cmd.hdr.cmd_or_resp.cmd_resv = 0;
1403 token_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
1404 token_cmd.port_number = hw->bus.lan_id;
1405 token_cmd.command_type = FW_PHY_TOKEN_REQ;
1406 token_cmd.pad = 0;
1407 status = ixgbe_host_interface_command(hw, (u32 *)&token_cmd,
1408 sizeof(token_cmd),
1409 IXGBE_HI_COMMAND_TIMEOUT,
1410 TRUE);
1411 if (status) {
1412 DEBUGOUT1("Issuing host interface command failed with Status = %d\n",
1413 status);
1414 return status;
1415 }
1416 if (token_cmd.hdr.cmd_or_resp.ret_status == FW_PHY_TOKEN_OK)
1417 return IXGBE_SUCCESS;
1418 if (token_cmd.hdr.cmd_or_resp.ret_status != FW_PHY_TOKEN_RETRY) {
1419 DEBUGOUT1("Host interface command returned 0x%08x , returning IXGBE_ERR_FW_RESP_INVALID\n",
1420 token_cmd.hdr.cmd_or_resp.ret_status);
1421 return IXGBE_ERR_FW_RESP_INVALID;
1422 }
1423
1424 DEBUGOUT("Returning IXGBE_ERR_TOKEN_RETRY\n");
1425 return IXGBE_ERR_TOKEN_RETRY;
1426 }
1427
1428 /**
1429 * ixgbe_put_phy_token - Put the token for shared phy access
1430 * @hw: Pointer to hardware structure
1431 */
1432
1433 s32 ixgbe_put_phy_token(struct ixgbe_hw *hw)
1434 {
1435 struct ixgbe_hic_phy_token_req token_cmd;
1436 s32 status;
1437
1438 token_cmd.hdr.cmd = FW_PHY_TOKEN_REQ_CMD;
1439 token_cmd.hdr.buf_len = FW_PHY_TOKEN_REQ_LEN;
1440 token_cmd.hdr.cmd_or_resp.cmd_resv = 0;
1441 token_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
1442 token_cmd.port_number = hw->bus.lan_id;
1443 token_cmd.command_type = FW_PHY_TOKEN_REL;
1444 token_cmd.pad = 0;
1445 status = ixgbe_host_interface_command(hw, (u32 *)&token_cmd,
1446 sizeof(token_cmd),
1447 IXGBE_HI_COMMAND_TIMEOUT,
1448 TRUE);
1449 if (status)
1450 return status;
1451 if (token_cmd.hdr.cmd_or_resp.ret_status == FW_PHY_TOKEN_OK)
1452 return IXGBE_SUCCESS;
1453
1454 DEBUGOUT("Put PHY Token host interface command failed");
1455 return IXGBE_ERR_FW_RESP_INVALID;
1456 }
1457
1458 /**
1459 * ixgbe_write_iosf_sb_reg_x550a - Writes a value to specified register
1460 * of the IOSF device
1461 * @hw: pointer to hardware structure
1462 * @reg_addr: 32 bit PHY register to write
1463 * @device_type: 3 bit device type
1464 * @data: Data to write to the register
1465 **/
1466 s32 ixgbe_write_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
1467 u32 device_type, u32 data)
1468 {
1469 struct ixgbe_hic_internal_phy_req write_cmd;
1470 s32 status;
1471 UNREFERENCED_1PARAMETER(device_type);
1472
1473 memset(&write_cmd, 0, sizeof(write_cmd));
1474 write_cmd.hdr.cmd = FW_INT_PHY_REQ_CMD;
1475 write_cmd.hdr.buf_len = FW_INT_PHY_REQ_LEN;
1476 write_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
1477 write_cmd.port_number = hw->bus.lan_id;
1478 write_cmd.command_type = FW_INT_PHY_REQ_WRITE;
1479 write_cmd.address = IXGBE_CPU_TO_BE16(reg_addr);
1480 write_cmd.write_data = IXGBE_CPU_TO_BE32(data);
1481
1482 status = ixgbe_host_interface_command(hw, (u32 *)&write_cmd,
1483 sizeof(write_cmd),
1484 IXGBE_HI_COMMAND_TIMEOUT, FALSE);
1485
1486 return status;
1487 }
1488
1489 /**
1490 * ixgbe_read_iosf_sb_reg_x550a - Reads specified register of the IOSF device
1491 * @hw: pointer to hardware structure
1492 * @reg_addr: 32 bit PHY register to write
1493 * @device_type: 3 bit device type
1494 * @data: Pointer to read data from the register
1495 **/
1496 s32 ixgbe_read_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
1497 u32 device_type, u32 *data)
1498 {
1499 union {
1500 struct ixgbe_hic_internal_phy_req cmd;
1501 struct ixgbe_hic_internal_phy_resp rsp;
1502 } hic;
1503 s32 status;
1504 UNREFERENCED_1PARAMETER(device_type);
1505
1506 memset(&hic, 0, sizeof(hic));
1507 hic.cmd.hdr.cmd = FW_INT_PHY_REQ_CMD;
1508 hic.cmd.hdr.buf_len = FW_INT_PHY_REQ_LEN;
1509 hic.cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
1510 hic.cmd.port_number = hw->bus.lan_id;
1511 hic.cmd.command_type = FW_INT_PHY_REQ_READ;
1512 hic.cmd.address = IXGBE_CPU_TO_BE16(reg_addr);
1513
1514 status = ixgbe_host_interface_command(hw, (u32 *)&hic.cmd,
1515 sizeof(hic.cmd),
1516 IXGBE_HI_COMMAND_TIMEOUT, TRUE);
1517
1518 /* Extract the register value from the response. */
1519 *data = IXGBE_BE32_TO_CPU(hic.rsp.read_data);
1520
1521 return status;
1522 }
1523
1524 /**
1525 * ixgbe_disable_mdd_X550
1526 * @hw: pointer to hardware structure
1527 *
1528 * Disable malicious driver detection
1529 **/
1530 void ixgbe_disable_mdd_X550(struct ixgbe_hw *hw)
1531 {
1532 u32 reg;
1533
1534 DEBUGFUNC("ixgbe_disable_mdd_X550");
1535
1536 /* Disable MDD for TX DMA and interrupt */
1537 reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1538 reg &= ~(IXGBE_DMATXCTL_MDP_EN | IXGBE_DMATXCTL_MBINTEN);
1539 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg);
1540
1541 /* Disable MDD for RX and interrupt */
1542 reg = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
1543 reg &= ~(IXGBE_RDRXCTL_MDP_EN | IXGBE_RDRXCTL_MBINTEN);
1544 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg);
1545 }
1546
1547 /**
1548 * ixgbe_enable_mdd_X550
1549 * @hw: pointer to hardware structure
1550 *
1551 * Enable malicious driver detection
1552 **/
1553 void ixgbe_enable_mdd_X550(struct ixgbe_hw *hw)
1554 {
1555 u32 reg;
1556
1557 DEBUGFUNC("ixgbe_enable_mdd_X550");
1558
1559 /* Enable MDD for TX DMA and interrupt */
1560 reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1561 reg |= (IXGBE_DMATXCTL_MDP_EN | IXGBE_DMATXCTL_MBINTEN);
1562 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg);
1563
1564 /* Enable MDD for RX and interrupt */
1565 reg = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
1566 reg |= (IXGBE_RDRXCTL_MDP_EN | IXGBE_RDRXCTL_MBINTEN);
1567 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg);
1568 }
1569
1570 /**
1571 * ixgbe_restore_mdd_vf_X550
1572 * @hw: pointer to hardware structure
1573 * @vf: vf index
1574 *
1575 * Restore VF that was disabled during malicious driver detection event
1576 **/
1577 void ixgbe_restore_mdd_vf_X550(struct ixgbe_hw *hw, u32 vf)
1578 {
1579 u32 idx, reg, num_qs, start_q, bitmask;
1580
1581 DEBUGFUNC("ixgbe_restore_mdd_vf_X550");
1582
1583 /* Map VF to queues */
1584 reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
1585 switch (reg & IXGBE_MRQC_MRQE_MASK) {
1586 case IXGBE_MRQC_VMDQRT8TCEN:
1587 num_qs = 8; /* 16 VFs / pools */
1588 bitmask = 0x000000FF;
1589 break;
1590 case IXGBE_MRQC_VMDQRSS32EN:
1591 case IXGBE_MRQC_VMDQRT4TCEN:
1592 num_qs = 4; /* 32 VFs / pools */
1593 bitmask = 0x0000000F;
1594 break;
1595 default: /* 64 VFs / pools */
1596 num_qs = 2;
1597 bitmask = 0x00000003;
1598 break;
1599 }
1600 start_q = vf * num_qs;
1601
1602 /* Release vf's queues by clearing WQBR_TX and WQBR_RX (RW1C) */
1603 idx = start_q / 32;
1604 reg = 0;
1605 reg |= (bitmask << (start_q % 32));
1606 IXGBE_WRITE_REG(hw, IXGBE_WQBR_TX(idx), reg);
1607 IXGBE_WRITE_REG(hw, IXGBE_WQBR_RX(idx), reg);
1608 }
1609
1610 /**
1611 * ixgbe_mdd_event_X550
1612 * @hw: pointer to hardware structure
1613 * @vf_bitmap: vf bitmap of malicious vfs
1614 *
1615 * Handle malicious driver detection event.
1616 **/
1617 void ixgbe_mdd_event_X550(struct ixgbe_hw *hw, u32 *vf_bitmap)
1618 {
1619 u32 wqbr;
1620 u32 i, j, reg, q, shift, vf, idx;
1621
1622 DEBUGFUNC("ixgbe_mdd_event_X550");
1623
1624 /* figure out pool size for mapping to vf's */
1625 reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
1626 switch (reg & IXGBE_MRQC_MRQE_MASK) {
1627 case IXGBE_MRQC_VMDQRT8TCEN:
1628 shift = 3; /* 16 VFs / pools */
1629 break;
1630 case IXGBE_MRQC_VMDQRSS32EN:
1631 case IXGBE_MRQC_VMDQRT4TCEN:
1632 shift = 2; /* 32 VFs / pools */
1633 break;
1634 default:
1635 shift = 1; /* 64 VFs / pools */
1636 break;
1637 }
1638
1639 /* Read WQBR_TX and WQBR_RX and check for malicious queues */
1640 for (i = 0; i < 4; i++) {
1641 wqbr = IXGBE_READ_REG(hw, IXGBE_WQBR_TX(i));
1642 wqbr |= IXGBE_READ_REG(hw, IXGBE_WQBR_RX(i));
1643
1644 if (!wqbr)
1645 continue;
1646
1647 /* Get malicious queue */
1648 for (j = 0; j < 32 && wqbr; j++) {
1649
1650 if (!(wqbr & (1 << j)))
1651 continue;
1652
1653 /* Get queue from bitmask */
1654 q = j + (i * 32);
1655
1656 /* Map queue to vf */
1657 vf = (q >> shift);
1658
1659 /* Set vf bit in vf_bitmap */
1660 idx = vf / 32;
1661 vf_bitmap[idx] |= (1 << (vf % 32));
1662 wqbr &= ~(1 << j);
1663 }
1664 }
1665 }
1666
1667 /**
1668 * ixgbe_get_media_type_X550em - Get media type
1669 * @hw: pointer to hardware structure
1670 *
1671 * Returns the media type (fiber, copper, backplane)
1672 */
1673 enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw)
1674 {
1675 enum ixgbe_media_type media_type;
1676
1677 DEBUGFUNC("ixgbe_get_media_type_X550em");
1678
1679 /* Detect if there is a copper PHY attached. */
1680 switch (hw->device_id) {
1681 case IXGBE_DEV_ID_X550EM_X_KR:
1682 case IXGBE_DEV_ID_X550EM_X_KX4:
1683 case IXGBE_DEV_ID_X550EM_X_XFI:
1684 case IXGBE_DEV_ID_X550EM_A_KR:
1685 case IXGBE_DEV_ID_X550EM_A_KR_L:
1686 media_type = ixgbe_media_type_backplane;
1687 break;
1688 case IXGBE_DEV_ID_X550EM_X_SFP:
1689 case IXGBE_DEV_ID_X550EM_A_SFP:
1690 case IXGBE_DEV_ID_X550EM_A_SFP_N:
1691 case IXGBE_DEV_ID_X550EM_A_QSFP:
1692 case IXGBE_DEV_ID_X550EM_A_QSFP_N:
1693 media_type = ixgbe_media_type_fiber;
1694 break;
1695 case IXGBE_DEV_ID_X550EM_X_1G_T:
1696 case IXGBE_DEV_ID_X550EM_X_10G_T:
1697 case IXGBE_DEV_ID_X550EM_A_10G_T:
1698 media_type = ixgbe_media_type_copper;
1699 break;
1700 case IXGBE_DEV_ID_X550EM_A_SGMII:
1701 case IXGBE_DEV_ID_X550EM_A_SGMII_L:
1702 media_type = ixgbe_media_type_backplane;
1703 hw->phy.type = ixgbe_phy_sgmii;
1704 break;
1705 case IXGBE_DEV_ID_X550EM_A_1G_T:
1706 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
1707 media_type = ixgbe_media_type_copper;
1708 break;
1709 default:
1710 media_type = ixgbe_media_type_unknown;
1711 break;
1712 }
1713 return media_type;
1714 }
1715
1716 /**
1717 * ixgbe_supported_sfp_modules_X550em - Check if SFP module type is supported
1718 * @hw: pointer to hardware structure
1719 * @linear: TRUE if SFP module is linear
1720 */
1721 static s32 ixgbe_supported_sfp_modules_X550em(struct ixgbe_hw *hw, bool *linear)
1722 {
1723 DEBUGFUNC("ixgbe_supported_sfp_modules_X550em");
1724
1725 switch (hw->phy.sfp_type) {
1726 case ixgbe_sfp_type_not_present:
1727 return IXGBE_ERR_SFP_NOT_PRESENT;
1728 case ixgbe_sfp_type_da_cu_core0:
1729 case ixgbe_sfp_type_da_cu_core1:
1730 *linear = TRUE;
1731 break;
1732 case ixgbe_sfp_type_srlr_core0:
1733 case ixgbe_sfp_type_srlr_core1:
1734 case ixgbe_sfp_type_da_act_lmt_core0:
1735 case ixgbe_sfp_type_da_act_lmt_core1:
1736 case ixgbe_sfp_type_1g_sx_core0:
1737 case ixgbe_sfp_type_1g_sx_core1:
1738 case ixgbe_sfp_type_1g_lx_core0:
1739 case ixgbe_sfp_type_1g_lx_core1:
1740 *linear = FALSE;
1741 break;
1742 case ixgbe_sfp_type_unknown:
1743 case ixgbe_sfp_type_1g_cu_core0:
1744 case ixgbe_sfp_type_1g_cu_core1:
1745 default:
1746 return IXGBE_ERR_SFP_NOT_SUPPORTED;
1747 }
1748
1749 return IXGBE_SUCCESS;
1750 }
1751
1752 /**
1753 * ixgbe_identify_sfp_module_X550em - Identifies SFP modules
1754 * @hw: pointer to hardware structure
1755 *
1756 * Searches for and identifies the SFP module and assigns appropriate PHY type.
1757 **/
1758 s32 ixgbe_identify_sfp_module_X550em(struct ixgbe_hw *hw)
1759 {
1760 s32 status;
1761 bool linear;
1762
1763 DEBUGFUNC("ixgbe_identify_sfp_module_X550em");
1764
1765 status = ixgbe_identify_module_generic(hw);
1766
1767 if (status != IXGBE_SUCCESS)
1768 return status;
1769
1770 /* Check if SFP module is supported */
1771 status = ixgbe_supported_sfp_modules_X550em(hw, &linear);
1772
1773 return status;
1774 }
1775
1776 /**
1777 * ixgbe_setup_sfp_modules_X550em - Setup MAC link ops
1778 * @hw: pointer to hardware structure
1779 */
1780 s32 ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw)
1781 {
1782 s32 status;
1783 bool linear;
1784
1785 DEBUGFUNC("ixgbe_setup_sfp_modules_X550em");
1786
1787 /* Check if SFP module is supported */
1788 status = ixgbe_supported_sfp_modules_X550em(hw, &linear);
1789
1790 if (status != IXGBE_SUCCESS)
1791 return status;
1792
1793 ixgbe_init_mac_link_ops_X550em(hw);
1794 hw->phy.ops.reset = NULL;
1795
1796 return IXGBE_SUCCESS;
1797 }
1798
1799 /**
1800 * ixgbe_restart_an_internal_phy_x550em - restart autonegotiation for the
1801 * internal PHY
1802 * @hw: pointer to hardware structure
1803 **/
1804 static s32 ixgbe_restart_an_internal_phy_x550em(struct ixgbe_hw *hw)
1805 {
1806 s32 status;
1807 u32 link_ctrl;
1808
1809 /* Restart auto-negotiation. */
1810 status = hw->mac.ops.read_iosf_sb_reg(hw,
1811 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1812 IXGBE_SB_IOSF_TARGET_KR_PHY, &link_ctrl);
1813
1814 if (status) {
1815 DEBUGOUT("Auto-negotiation did not complete\n");
1816 return status;
1817 }
1818
1819 link_ctrl |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART;
1820 status = hw->mac.ops.write_iosf_sb_reg(hw,
1821 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1822 IXGBE_SB_IOSF_TARGET_KR_PHY, link_ctrl);
1823
1824 if (hw->mac.type == ixgbe_mac_X550EM_a) {
1825 u32 flx_mask_st20;
1826
1827 /* Indicate to FW that AN restart has been asserted */
1828 status = hw->mac.ops.read_iosf_sb_reg(hw,
1829 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1830 IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_mask_st20);
1831
1832 if (status) {
1833 DEBUGOUT("Auto-negotiation did not complete\n");
1834 return status;
1835 }
1836
1837 flx_mask_st20 |= IXGBE_KRM_PMD_FLX_MASK_ST20_FW_AN_RESTART;
1838 status = hw->mac.ops.write_iosf_sb_reg(hw,
1839 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1840 IXGBE_SB_IOSF_TARGET_KR_PHY, flx_mask_st20);
1841 }
1842
1843 return status;
1844 }
1845
1846 /**
1847 * ixgbe_setup_sgmii - Set up link for sgmii
1848 * @hw: pointer to hardware structure
1849 * @speed: new link speed
1850 * @autoneg_wait: TRUE when waiting for completion is needed
1851 */
1852 static s32 ixgbe_setup_sgmii(struct ixgbe_hw *hw, ixgbe_link_speed speed,
1853 bool autoneg_wait)
1854 {
1855 struct ixgbe_mac_info *mac = &hw->mac;
1856 u32 lval, sval, flx_val;
1857 s32 rc;
1858
1859 rc = mac->ops.read_iosf_sb_reg(hw,
1860 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1861 IXGBE_SB_IOSF_TARGET_KR_PHY, &lval);
1862 if (rc)
1863 return rc;
1864
1865 lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
1866 lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
1867 lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN;
1868 lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN;
1869 lval |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G;
1870 rc = mac->ops.write_iosf_sb_reg(hw,
1871 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1872 IXGBE_SB_IOSF_TARGET_KR_PHY, lval);
1873 if (rc)
1874 return rc;
1875
1876 rc = mac->ops.read_iosf_sb_reg(hw,
1877 IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id),
1878 IXGBE_SB_IOSF_TARGET_KR_PHY, &sval);
1879 if (rc)
1880 return rc;
1881
1882 sval |= IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D;
1883 sval |= IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D;
1884 rc = mac->ops.write_iosf_sb_reg(hw,
1885 IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id),
1886 IXGBE_SB_IOSF_TARGET_KR_PHY, sval);
1887 if (rc)
1888 return rc;
1889
1890 rc = mac->ops.read_iosf_sb_reg(hw,
1891 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1892 IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_val);
1893 if (rc)
1894 return rc;
1895
1896 flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK;
1897 flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_1G;
1898 flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN;
1899 flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN;
1900 flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN;
1901
1902 rc = mac->ops.write_iosf_sb_reg(hw,
1903 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1904 IXGBE_SB_IOSF_TARGET_KR_PHY, flx_val);
1905 if (rc)
1906 return rc;
1907
1908 rc = ixgbe_restart_an_internal_phy_x550em(hw);
1909 if (rc)
1910 return rc;
1911
1912 return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait);
1913 }
1914
1915 /**
1916 * ixgbe_setup_sgmii_fw - Set up link for internal PHY SGMII auto-negotiation
1917 * @hw: pointer to hardware structure
1918 * @speed: new link speed
1919 * @autoneg_wait: TRUE when waiting for completion is needed
1920 */
1921 static s32 ixgbe_setup_sgmii_fw(struct ixgbe_hw *hw, ixgbe_link_speed speed,
1922 bool autoneg_wait)
1923 {
1924 struct ixgbe_mac_info *mac = &hw->mac;
1925 u32 lval, sval, flx_val;
1926 s32 rc;
1927
1928 rc = mac->ops.read_iosf_sb_reg(hw,
1929 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1930 IXGBE_SB_IOSF_TARGET_KR_PHY, &lval);
1931 if (rc)
1932 return rc;
1933
1934 lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
1935 lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
1936 lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN;
1937 lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN;
1938 lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G;
1939 rc = mac->ops.write_iosf_sb_reg(hw,
1940 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1941 IXGBE_SB_IOSF_TARGET_KR_PHY, lval);
1942 if (rc)
1943 return rc;
1944
1945 rc = mac->ops.read_iosf_sb_reg(hw,
1946 IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id),
1947 IXGBE_SB_IOSF_TARGET_KR_PHY, &sval);
1948 if (rc)
1949 return rc;
1950
1951 sval &= ~IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D;
1952 sval &= ~IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D;
1953 rc = mac->ops.write_iosf_sb_reg(hw,
1954 IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id),
1955 IXGBE_SB_IOSF_TARGET_KR_PHY, sval);
1956 if (rc)
1957 return rc;
1958
1959 rc = mac->ops.write_iosf_sb_reg(hw,
1960 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1961 IXGBE_SB_IOSF_TARGET_KR_PHY, lval);
1962 if (rc)
1963 return rc;
1964
1965 rc = mac->ops.read_iosf_sb_reg(hw,
1966 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1967 IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_val);
1968 if (rc)
1969 return rc;
1970
1971 flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK;
1972 flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_AN;
1973 flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN;
1974 flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN;
1975 flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN;
1976
1977 rc = mac->ops.write_iosf_sb_reg(hw,
1978 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1979 IXGBE_SB_IOSF_TARGET_KR_PHY, flx_val);
1980 if (rc)
1981 return rc;
1982
1983 rc = ixgbe_restart_an_internal_phy_x550em(hw);
1984
1985 return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait);
1986 }
1987
1988 /**
1989 * ixgbe_init_mac_link_ops_X550em - init mac link function pointers
1990 * @hw: pointer to hardware structure
1991 */
1992 void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw)
1993 {
1994 struct ixgbe_mac_info *mac = &hw->mac;
1995
1996 DEBUGFUNC("ixgbe_init_mac_link_ops_X550em");
1997
1998 switch (hw->mac.ops.get_media_type(hw)) {
1999 case ixgbe_media_type_fiber:
2000 /* CS4227 does not support autoneg, so disable the laser control
2001 * functions for SFP+ fiber
2002 */
2003 mac->ops.disable_tx_laser = NULL;
2004 mac->ops.enable_tx_laser = NULL;
2005 mac->ops.flap_tx_laser = NULL;
2006 mac->ops.setup_link = ixgbe_setup_mac_link_multispeed_fiber;
2007 mac->ops.set_rate_select_speed =
2008 ixgbe_set_soft_rate_select_speed;
2009
2010 if ((hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N) ||
2011 (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP))
2012 mac->ops.setup_mac_link =
2013 ixgbe_setup_mac_link_sfp_x550a;
2014 else
2015 mac->ops.setup_mac_link =
2016 ixgbe_setup_mac_link_sfp_x550em;
2017 break;
2018 case ixgbe_media_type_copper:
2019 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_1G_T)
2020 break;
2021 if (hw->mac.type == ixgbe_mac_X550EM_a) {
2022 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T ||
2023 hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L) {
2024 mac->ops.setup_link = ixgbe_setup_sgmii_fw;
2025 mac->ops.check_link =
2026 ixgbe_check_mac_link_generic;
2027 } else {
2028 mac->ops.setup_link =
2029 ixgbe_setup_mac_link_t_X550em;
2030 }
2031 } else {
2032 mac->ops.setup_link = ixgbe_setup_mac_link_t_X550em;
2033 mac->ops.check_link = ixgbe_check_link_t_X550em;
2034 }
2035 break;
2036 case ixgbe_media_type_backplane:
2037 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII ||
2038 hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII_L)
2039 mac->ops.setup_link = ixgbe_setup_sgmii;
2040 break;
2041 default:
2042 break;
2043 }
2044 }
2045
2046 /**
2047 * ixgbe_get_link_capabilities_x550em - Determines link capabilities
2048 * @hw: pointer to hardware structure
2049 * @speed: pointer to link speed
2050 * @autoneg: TRUE when autoneg or autotry is enabled
2051 */
2052 s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
2053 ixgbe_link_speed *speed,
2054 bool *autoneg)
2055 {
2056 DEBUGFUNC("ixgbe_get_link_capabilities_X550em");
2057
2058
2059 if (hw->phy.type == ixgbe_phy_fw) {
2060 *autoneg = TRUE;
2061 *speed = hw->phy.speeds_supported;
2062 return 0;
2063 }
2064
2065 /* SFP */
2066 if (hw->phy.media_type == ixgbe_media_type_fiber) {
2067
2068 /* CS4227 SFP must not enable auto-negotiation */
2069 *autoneg = FALSE;
2070
2071 /* Check if 1G SFP module. */
2072 if (hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
2073 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1
2074 || hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
2075 hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1) {
2076 *speed = IXGBE_LINK_SPEED_1GB_FULL;
2077 return IXGBE_SUCCESS;
2078 }
2079
2080 /* Link capabilities are based on SFP */
2081 if (hw->phy.multispeed_fiber)
2082 *speed = IXGBE_LINK_SPEED_10GB_FULL |
2083 IXGBE_LINK_SPEED_1GB_FULL;
2084 else
2085 *speed = IXGBE_LINK_SPEED_10GB_FULL;
2086 } else {
2087 *autoneg = TRUE;
2088
2089 switch (hw->phy.type) {
2090 case ixgbe_phy_x550em_xfi:
2091 *speed = IXGBE_LINK_SPEED_1GB_FULL |
2092 IXGBE_LINK_SPEED_10GB_FULL;
2093 *autoneg = FALSE;
2094 break;
2095 case ixgbe_phy_ext_1g_t:
2096 case ixgbe_phy_sgmii:
2097 *speed = IXGBE_LINK_SPEED_1GB_FULL;
2098 break;
2099 case ixgbe_phy_x550em_kr:
2100 if (hw->mac.type == ixgbe_mac_X550EM_a) {
2101 /* check different backplane modes */
2102 if (hw->phy.nw_mng_if_sel &
2103 IXGBE_NW_MNG_IF_SEL_PHY_SPEED_2_5G) {
2104 *speed = IXGBE_LINK_SPEED_2_5GB_FULL;
2105 break;
2106 } else if (hw->device_id ==
2107 IXGBE_DEV_ID_X550EM_A_KR_L) {
2108 *speed = IXGBE_LINK_SPEED_1GB_FULL;
2109 break;
2110 }
2111 }
2112 /* fall through */
2113 default:
2114 *speed = IXGBE_LINK_SPEED_10GB_FULL |
2115 IXGBE_LINK_SPEED_1GB_FULL;
2116 break;
2117 }
2118 }
2119
2120 return IXGBE_SUCCESS;
2121 }
2122
2123 /**
2124 * ixgbe_get_lasi_ext_t_x550em - Determime external Base T PHY interrupt cause
2125 * @hw: pointer to hardware structure
2126 * @lsc: pointer to boolean flag which indicates whether external Base T
2127 * PHY interrupt is lsc
2128 *
2129 * Determime if external Base T PHY interrupt cause is high temperature
2130 * failure alarm or link status change.
2131 *
2132 * Return IXGBE_ERR_OVERTEMP if interrupt is high temperature
2133 * failure alarm, else return PHY access status.
2134 */
2135 static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc)
2136 {
2137 u32 status;
2138 u16 reg;
2139
2140 *lsc = FALSE;
2141
2142 /* Vendor alarm triggered */
2143 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_CHIP_STD_INT_FLAG,
2144 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2145 ®);
2146
2147 if (status != IXGBE_SUCCESS ||
2148 !(reg & IXGBE_MDIO_GLOBAL_VEN_ALM_INT_EN))
2149 return status;
2150
2151 /* Vendor Auto-Neg alarm triggered or Global alarm 1 triggered */
2152 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_FLAG,
2153 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2154 ®);
2155
2156 if (status != IXGBE_SUCCESS ||
2157 !(reg & (IXGBE_MDIO_GLOBAL_AN_VEN_ALM_INT_EN |
2158 IXGBE_MDIO_GLOBAL_ALARM_1_INT)))
2159 return status;
2160
2161 /* Global alarm triggered */
2162 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_ALARM_1,
2163 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2164 ®);
2165
2166 if (status != IXGBE_SUCCESS)
2167 return status;
2168
2169 /* If high temperature failure, then return over temp error and exit */
2170 if (reg & IXGBE_MDIO_GLOBAL_ALM_1_HI_TMP_FAIL) {
2171 /* power down the PHY in case the PHY FW didn't already */
2172 ixgbe_set_copper_phy_power(hw, FALSE);
2173 return IXGBE_ERR_OVERTEMP;
2174 } else if (reg & IXGBE_MDIO_GLOBAL_ALM_1_DEV_FAULT) {
2175 /* device fault alarm triggered */
2176 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_FAULT_MSG,
2177 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2178 ®);
2179
2180 if (status != IXGBE_SUCCESS)
2181 return status;
2182
2183 /* if device fault was due to high temp alarm handle and exit */
2184 if (reg == IXGBE_MDIO_GLOBAL_FAULT_MSG_HI_TMP) {
2185 /* power down the PHY in case the PHY FW didn't */
2186 ixgbe_set_copper_phy_power(hw, FALSE);
2187 return IXGBE_ERR_OVERTEMP;
2188 }
2189 }
2190
2191 /* Vendor alarm 2 triggered */
2192 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_CHIP_STD_INT_FLAG,
2193 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®);
2194
2195 if (status != IXGBE_SUCCESS ||
2196 !(reg & IXGBE_MDIO_GLOBAL_STD_ALM2_INT))
2197 return status;
2198
2199 /* link connect/disconnect event occurred */
2200 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM2,
2201 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®);
2202
2203 if (status != IXGBE_SUCCESS)
2204 return status;
2205
2206 /* Indicate LSC */
2207 if (reg & IXGBE_MDIO_AUTO_NEG_VEN_LSC)
2208 *lsc = TRUE;
2209
2210 return IXGBE_SUCCESS;
2211 }
2212
2213 /**
2214 * ixgbe_enable_lasi_ext_t_x550em - Enable external Base T PHY interrupts
2215 * @hw: pointer to hardware structure
2216 *
2217 * Enable link status change and temperature failure alarm for the external
2218 * Base T PHY
2219 *
2220 * Returns PHY access status
2221 */
2222 static s32 ixgbe_enable_lasi_ext_t_x550em(struct ixgbe_hw *hw)
2223 {
2224 u32 status;
2225 u16 reg;
2226 bool lsc;
2227
2228 /* Clear interrupt flags */
2229 status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc);
2230
2231 /* Enable link status change alarm */
2232
2233 /* Enable the LASI interrupts on X552 devices to receive notifications
2234 * of the link configurations of the external PHY and correspondingly
2235 * support the configuration of the internal iXFI link, since iXFI does
2236 * not support auto-negotiation. This is not required for X553 devices
2237 * having KR support, which performs auto-negotiations and which is used
2238 * as the internal link to the external PHY. Hence adding a check here
2239 * to avoid enabling LASI interrupts for X553 devices.
2240 */
2241 if (hw->mac.type != ixgbe_mac_X550EM_a) {
2242 status = hw->phy.ops.read_reg(hw,
2243 IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK,
2244 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®);
2245
2246 if (status != IXGBE_SUCCESS)
2247 return status;
2248
2249 reg |= IXGBE_MDIO_PMA_TX_VEN_LASI_INT_EN;
2250
2251 status = hw->phy.ops.write_reg(hw,
2252 IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK,
2253 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg);
2254
2255 if (status != IXGBE_SUCCESS)
2256 return status;
2257 }
2258
2259 /* Enable high temperature failure and global fault alarms */
2260 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_MASK,
2261 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2262 ®);
2263
2264 if (status != IXGBE_SUCCESS)
2265 return status;
2266
2267 reg |= (IXGBE_MDIO_GLOBAL_INT_HI_TEMP_EN |
2268 IXGBE_MDIO_GLOBAL_INT_DEV_FAULT_EN);
2269
2270 status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_MASK,
2271 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2272 reg);
2273
2274 if (status != IXGBE_SUCCESS)
2275 return status;
2276
2277 /* Enable vendor Auto-Neg alarm and Global Interrupt Mask 1 alarm */
2278 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_MASK,
2279 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2280 ®);
2281
2282 if (status != IXGBE_SUCCESS)
2283 return status;
2284
2285 reg |= (IXGBE_MDIO_GLOBAL_AN_VEN_ALM_INT_EN |
2286 IXGBE_MDIO_GLOBAL_ALARM_1_INT);
2287
2288 status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_MASK,
2289 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2290 reg);
2291
2292 if (status != IXGBE_SUCCESS)
2293 return status;
2294
2295 /* Enable chip-wide vendor alarm */
2296 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_STD_MASK,
2297 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2298 ®);
2299
2300 if (status != IXGBE_SUCCESS)
2301 return status;
2302
2303 reg |= IXGBE_MDIO_GLOBAL_VEN_ALM_INT_EN;
2304
2305 status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_STD_MASK,
2306 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2307 reg);
2308
2309 return status;
2310 }
2311
2312 /**
2313 * ixgbe_setup_kr_speed_x550em - Configure the KR PHY for link speed.
2314 * @hw: pointer to hardware structure
2315 * @speed: link speed
2316 *
2317 * Configures the integrated KR PHY.
2318 **/
2319 static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw,
2320 ixgbe_link_speed speed)
2321 {
2322 s32 status;
2323 u32 reg_val;
2324
2325 status = hw->mac.ops.read_iosf_sb_reg(hw,
2326 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
2327 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
2328 if (status)
2329 return status;
2330
2331 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
2332 reg_val &= ~(IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR |
2333 IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX);
2334
2335 /* Advertise 10G support. */
2336 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
2337 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR;
2338
2339 /* Advertise 1G support. */
2340 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
2341 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX;
2342
2343 status = hw->mac.ops.write_iosf_sb_reg(hw,
2344 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
2345 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
2346
2347 if (hw->mac.type == ixgbe_mac_X550EM_a) {
2348 /* Set lane mode to KR auto negotiation */
2349 status = hw->mac.ops.read_iosf_sb_reg(hw,
2350 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
2351 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
2352
2353 if (status)
2354 return status;
2355
2356 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK;
2357 reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_AN;
2358 reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN;
2359 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN;
2360 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN;
2361
2362 status = hw->mac.ops.write_iosf_sb_reg(hw,
2363 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
2364 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
2365 }
2366
2367 return ixgbe_restart_an_internal_phy_x550em(hw);
2368 }
2369
2370 /**
2371 * ixgbe_reset_phy_fw - Reset firmware-controlled PHYs
2372 * @hw: pointer to hardware structure
2373 */
2374 static s32 ixgbe_reset_phy_fw(struct ixgbe_hw *hw)
2375 {
2376 u32 store[FW_PHY_ACT_DATA_COUNT] = { 0 };
2377 s32 rc;
2378
2379 if (hw->phy.reset_disable || ixgbe_check_reset_blocked(hw))
2380 return IXGBE_SUCCESS;
2381
2382 rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_PHY_SW_RESET, &store);
2383 if (rc)
2384 return rc;
2385 memset(store, 0, sizeof(store));
2386
2387 rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_INIT_PHY, &store);
2388 if (rc)
2389 return rc;
2390
2391 return ixgbe_setup_fw_link(hw);
2392 }
2393
2394 /**
2395 * ixgbe_check_overtemp_fw - Check firmware-controlled PHYs for overtemp
2396 * @hw: pointer to hardware structure
2397 */
2398 static s32 ixgbe_check_overtemp_fw(struct ixgbe_hw *hw)
2399 {
2400 u32 store[FW_PHY_ACT_DATA_COUNT] = { 0 };
2401 s32 rc;
2402
2403 rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_LINK_INFO, &store);
2404 if (rc)
2405 return rc;
2406
2407 if (store[0] & FW_PHY_ACT_GET_LINK_INFO_TEMP) {
2408 ixgbe_shutdown_fw_phy(hw);
2409 return IXGBE_ERR_OVERTEMP;
2410 }
2411 return IXGBE_SUCCESS;
2412 }
2413
2414 /**
2415 * ixgbe_read_mng_if_sel_x550em - Read NW_MNG_IF_SEL register
2416 * @hw: pointer to hardware structure
2417 *
2418 * Read NW_MNG_IF_SEL register and save field values, and check for valid field
2419 * values.
2420 **/
2421 static s32 ixgbe_read_mng_if_sel_x550em(struct ixgbe_hw *hw)
2422 {
2423 /* Save NW management interface connected on board. This is used
2424 * to determine internal PHY mode.
2425 */
2426 hw->phy.nw_mng_if_sel = IXGBE_READ_REG(hw, IXGBE_NW_MNG_IF_SEL);
2427
2428 /* If X552 (X550EM_a) and MDIO is connected to external PHY, then set
2429 * PHY address. This register field was has only been used for X552.
2430 */
2431 if (hw->mac.type == ixgbe_mac_X550EM_a &&
2432 hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_MDIO_ACT) {
2433 hw->phy.addr = (hw->phy.nw_mng_if_sel &
2434 IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD) >>
2435 IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT;
2436 }
2437
2438 return IXGBE_SUCCESS;
2439 }
2440
2441 /**
2442 * ixgbe_init_phy_ops_X550em - PHY/SFP specific init
2443 * @hw: pointer to hardware structure
2444 *
2445 * Initialize any function pointers that were not able to be
2446 * set during init_shared_code because the PHY/SFP type was
2447 * not known. Perform the SFP init if necessary.
2448 */
2449 s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
2450 {
2451 struct ixgbe_phy_info *phy = &hw->phy;
2452 s32 ret_val;
2453
2454 DEBUGFUNC("ixgbe_init_phy_ops_X550em");
2455
2456 hw->mac.ops.set_lan_id(hw);
2457 ixgbe_read_mng_if_sel_x550em(hw);
2458
2459 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) {
2460 phy->phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM;
2461 ixgbe_setup_mux_ctl(hw);
2462 phy->ops.identify_sfp = ixgbe_identify_sfp_module_X550em;
2463 }
2464
2465 switch (hw->device_id) {
2466 case IXGBE_DEV_ID_X550EM_A_1G_T:
2467 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
2468 phy->ops.read_reg_mdi = ixgbe_read_phy_reg_mdi_22;
2469 phy->ops.write_reg_mdi = ixgbe_write_phy_reg_mdi_22;
2470 hw->phy.ops.read_reg = ixgbe_read_phy_reg_x550a;
2471 hw->phy.ops.write_reg = ixgbe_write_phy_reg_x550a;
2472 phy->ops.check_overtemp = ixgbe_check_overtemp_fw;
2473 if (hw->bus.lan_id)
2474 hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY1_SM;
2475 else
2476 hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY0_SM;
2477
2478 break;
2479 case IXGBE_DEV_ID_X550EM_A_10G_T:
2480 case IXGBE_DEV_ID_X550EM_A_SFP:
2481 hw->phy.ops.read_reg = ixgbe_read_phy_reg_x550a;
2482 hw->phy.ops.write_reg = ixgbe_write_phy_reg_x550a;
2483 if (hw->bus.lan_id)
2484 hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY1_SM;
2485 else
2486 hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY0_SM;
2487 break;
2488 case IXGBE_DEV_ID_X550EM_X_SFP:
2489 /* set up for CS4227 usage */
2490 hw->phy.phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM;
2491 break;
2492 case IXGBE_DEV_ID_X550EM_X_1G_T:
2493 phy->ops.read_reg_mdi = NULL;
2494 phy->ops.write_reg_mdi = NULL;
2495 break;
2496 default:
2497 break;
2498 }
2499
2500 /* Identify the PHY or SFP module */
2501 ret_val = phy->ops.identify(hw);
2502 if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED ||
2503 ret_val == IXGBE_ERR_PHY_ADDR_INVALID)
2504 return ret_val;
2505
2506 /* Setup function pointers based on detected hardware */
2507 ixgbe_init_mac_link_ops_X550em(hw);
2508 if (phy->sfp_type != ixgbe_sfp_type_unknown)
2509 phy->ops.reset = NULL;
2510
2511 /* Set functions pointers based on phy type */
2512 switch (hw->phy.type) {
2513 case ixgbe_phy_x550em_kx4:
2514 phy->ops.setup_link = NULL;
2515 phy->ops.read_reg = ixgbe_read_phy_reg_x550em;
2516 phy->ops.write_reg = ixgbe_write_phy_reg_x550em;
2517 break;
2518 case ixgbe_phy_x550em_kr:
2519 phy->ops.setup_link = ixgbe_setup_kr_x550em;
2520 phy->ops.read_reg = ixgbe_read_phy_reg_x550em;
2521 phy->ops.write_reg = ixgbe_write_phy_reg_x550em;
2522 break;
2523 case ixgbe_phy_ext_1g_t:
2524 /* link is managed by FW */
2525 phy->ops.setup_link = NULL;
2526 phy->ops.reset = NULL;
2527 break;
2528 case ixgbe_phy_x550em_xfi:
2529 /* link is managed by HW */
2530 phy->ops.setup_link = NULL;
2531 phy->ops.read_reg = ixgbe_read_phy_reg_x550em;
2532 phy->ops.write_reg = ixgbe_write_phy_reg_x550em;
2533 break;
2534 case ixgbe_phy_x550em_ext_t:
2535 /* If internal link mode is XFI, then setup iXFI internal link,
2536 * else setup KR now.
2537 */
2538 phy->ops.setup_internal_link =
2539 ixgbe_setup_internal_phy_t_x550em;
2540
2541 /* setup SW LPLU only for first revision of X550EM_x */
2542 if ((hw->mac.type == ixgbe_mac_X550EM_x) &&
2543 !(IXGBE_FUSES0_REV_MASK &
2544 IXGBE_READ_REG(hw, IXGBE_FUSES0_GROUP(0))))
2545 phy->ops.enter_lplu = ixgbe_enter_lplu_t_x550em;
2546
2547 phy->ops.handle_lasi = ixgbe_handle_lasi_ext_t_x550em;
2548 phy->ops.reset = ixgbe_reset_phy_t_X550em;
2549 break;
2550 case ixgbe_phy_sgmii:
2551 phy->ops.setup_link = NULL;
2552 break;
2553 case ixgbe_phy_fw:
2554 phy->ops.setup_link = ixgbe_setup_fw_link;
2555 phy->ops.reset = ixgbe_reset_phy_fw;
2556 break;
2557 default:
2558 break;
2559 }
2560 return ret_val;
2561 }
2562
2563 /**
2564 * ixgbe_set_mdio_speed - Set MDIO clock speed
2565 * @hw: pointer to hardware structure
2566 */
2567 static void ixgbe_set_mdio_speed(struct ixgbe_hw *hw)
2568 {
2569 u32 hlreg0;
2570
2571 switch (hw->device_id) {
2572 case IXGBE_DEV_ID_X550EM_X_10G_T:
2573 case IXGBE_DEV_ID_X550EM_A_SGMII:
2574 case IXGBE_DEV_ID_X550EM_A_SGMII_L:
2575 case IXGBE_DEV_ID_X550EM_A_10G_T:
2576 case IXGBE_DEV_ID_X550EM_A_SFP:
2577 case IXGBE_DEV_ID_X550EM_A_QSFP:
2578 /* Config MDIO clock speed before the first MDIO PHY access */
2579 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2580 hlreg0 &= ~IXGBE_HLREG0_MDCSPD;
2581 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
2582 break;
2583 case IXGBE_DEV_ID_X550EM_A_1G_T:
2584 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
2585 /* Select fast MDIO clock speed for these devices */
2586 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2587 hlreg0 |= IXGBE_HLREG0_MDCSPD;
2588 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
2589 break;
2590 default:
2591 break;
2592 }
2593 }
2594
2595 /**
2596 * ixgbe_reset_hw_X550em - Perform hardware reset
2597 * @hw: pointer to hardware structure
2598 *
2599 * Resets the hardware by resetting the transmit and receive units, masks
2600 * and clears all interrupts, perform a PHY reset, and perform a link (MAC)
2601 * reset.
2602 */
2603 s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw)
2604 {
2605 ixgbe_link_speed link_speed;
2606 s32 status;
2607 s32 phy_status = IXGBE_SUCCESS;
2608 u32 ctrl = 0;
2609 u32 i;
2610 bool link_up = FALSE;
2611 u32 swfw_mask = hw->phy.phy_semaphore_mask;
2612
2613 DEBUGFUNC("ixgbe_reset_hw_X550em");
2614
2615 /* Call adapter stop to disable Tx/Rx and clear interrupts */
2616 status = hw->mac.ops.stop_adapter(hw);
2617 if (status != IXGBE_SUCCESS) {
2618 DEBUGOUT1("Failed to stop adapter, STATUS = %d\n", status);
2619 return status;
2620 }
2621 /* flush pending Tx transactions */
2622 ixgbe_clear_tx_pending(hw);
2623
2624 ixgbe_set_mdio_speed(hw);
2625
2626 /* PHY ops must be identified and initialized prior to reset */
2627 phy_status = hw->phy.ops.init(hw);
2628
2629 if (phy_status)
2630 DEBUGOUT1("Failed to initialize PHY ops, STATUS = %d\n",
2631 status);
2632
2633 if (phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED ||
2634 phy_status == IXGBE_ERR_PHY_ADDR_INVALID) {
2635 DEBUGOUT("Returning from reset HW due to PHY init failure\n");
2636 goto mac_reset_top;
2637 }
2638
2639 /* start the external PHY */
2640 if (hw->phy.type == ixgbe_phy_x550em_ext_t) {
2641 status = ixgbe_init_ext_t_x550em(hw);
2642 if (status) {
2643 DEBUGOUT1("Failed to start the external PHY, STATUS = %d\n",
2644 status);
2645 return status;
2646 }
2647 }
2648
2649 /* Setup SFP module if there is one present. */
2650 if (hw->phy.sfp_setup_needed) {
2651 phy_status = hw->mac.ops.setup_sfp(hw);
2652 hw->phy.sfp_setup_needed = FALSE;
2653 }
2654
2655 if (phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED)
2656 goto mac_reset_top;
2657
2658 /* Reset PHY */
2659 if (!hw->phy.reset_disable && hw->phy.ops.reset) {
2660 if (hw->phy.ops.reset(hw) == IXGBE_ERR_OVERTEMP)
2661 return IXGBE_ERR_OVERTEMP;
2662 }
2663
2664 mac_reset_top:
2665 /* Issue global reset to the MAC. Needs to be SW reset if link is up.
2666 * If link reset is used when link is up, it might reset the PHY when
2667 * mng is using it. If link is down or the flag to force full link
2668 * reset is set, then perform link reset.
2669 */
2670 ctrl = IXGBE_CTRL_LNK_RST;
2671 if (!hw->force_full_reset) {
2672 hw->mac.ops.check_link(hw, &link_speed, &link_up, FALSE);
2673 if (link_up)
2674 ctrl = IXGBE_CTRL_RST;
2675 }
2676
2677 status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
2678 if (status != IXGBE_SUCCESS) {
2679 ERROR_REPORT2(IXGBE_ERROR_CAUTION,
2680 "semaphore failed with %d", status);
2681 return IXGBE_ERR_SWFW_SYNC;
2682 }
2683 ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
2684 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
2685 IXGBE_WRITE_FLUSH(hw);
2686 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
2687
2688 /* Poll for reset bit to self-clear meaning reset is complete */
2689 for (i = 0; i < 10; i++) {
2690 usec_delay(1);
2691 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
2692 if (!(ctrl & IXGBE_CTRL_RST_MASK))
2693 break;
2694 }
2695
2696 if (ctrl & IXGBE_CTRL_RST_MASK) {
2697 status = IXGBE_ERR_RESET_FAILED;
2698 DEBUGOUT("Reset polling failed to complete.\n");
2699 }
2700
2701 msec_delay(50);
2702
2703 /* Double resets are required for recovery from certain error
2704 * conditions. Between resets, it is necessary to stall to
2705 * allow time for any pending HW events to complete.
2706 */
2707 if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
2708 hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
2709 goto mac_reset_top;
2710 }
2711
2712 /* Store the permanent mac address */
2713 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
2714
2715 /* Store MAC address from RAR0, clear receive address registers, and
2716 * clear the multicast table. Also reset num_rar_entries to 128,
2717 * since we modify this value when programming the SAN MAC address.
2718 */
2719 hw->mac.num_rar_entries = 128;
2720 hw->mac.ops.init_rx_addrs(hw);
2721
2722 ixgbe_set_mdio_speed(hw);
2723
2724 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP)
2725 ixgbe_setup_mux_ctl(hw);
2726
2727 if (status != IXGBE_SUCCESS)
2728 DEBUGOUT1("Reset HW failed, STATUS = %d\n", status);
2729
2730 if (phy_status != IXGBE_SUCCESS)
2731 status = phy_status;
2732
2733 return status;
2734 }
2735
2736 /**
2737 * ixgbe_init_ext_t_x550em - Start (unstall) the external Base T PHY.
2738 * @hw: pointer to hardware structure
2739 */
2740 s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw)
2741 {
2742 u32 status;
2743 u16 reg;
2744
2745 status = hw->phy.ops.read_reg(hw,
2746 IXGBE_MDIO_TX_VENDOR_ALARMS_3,
2747 IXGBE_MDIO_PMA_PMD_DEV_TYPE,
2748 ®);
2749
2750 if (status != IXGBE_SUCCESS)
2751 return status;
2752
2753 /* If PHY FW reset completed bit is set then this is the first
2754 * SW instance after a power on so the PHY FW must be un-stalled.
2755 */
2756 if (reg & IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK) {
2757 status = hw->phy.ops.read_reg(hw,
2758 IXGBE_MDIO_GLOBAL_RES_PR_10,
2759 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2760 ®);
2761
2762 if (status != IXGBE_SUCCESS)
2763 return status;
2764
2765 reg &= ~IXGBE_MDIO_POWER_UP_STALL;
2766
2767 status = hw->phy.ops.write_reg(hw,
2768 IXGBE_MDIO_GLOBAL_RES_PR_10,
2769 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2770 reg);
2771
2772 if (status != IXGBE_SUCCESS)
2773 return status;
2774 }
2775
2776 return status;
2777 }
2778
2779 /**
2780 * ixgbe_setup_kr_x550em - Configure the KR PHY.
2781 * @hw: pointer to hardware structure
2782 **/
2783 s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw)
2784 {
2785 /* leave link alone for 2.5G */
2786 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_2_5GB_FULL)
2787 return IXGBE_SUCCESS;
2788
2789 if (ixgbe_check_reset_blocked(hw))
2790 return 0;
2791
2792 return ixgbe_setup_kr_speed_x550em(hw, hw->phy.autoneg_advertised);
2793 }
2794
2795 /**
2796 * ixgbe_setup_mac_link_sfp_x550em - Setup internal/external the PHY for SFP
2797 * @hw: pointer to hardware structure
2798 * @speed: new link speed
2799 * @autoneg_wait_to_complete: unused
2800 *
2801 * Configure the external PHY and the integrated KR PHY for SFP support.
2802 **/
2803 s32 ixgbe_setup_mac_link_sfp_x550em(struct ixgbe_hw *hw,
2804 ixgbe_link_speed speed,
2805 bool autoneg_wait_to_complete)
2806 {
2807 s32 ret_val;
2808 u16 reg_slice, reg_val;
2809 bool setup_linear = FALSE;
2810 UNREFERENCED_1PARAMETER(autoneg_wait_to_complete);
2811
2812 /* Check if SFP module is supported and linear */
2813 ret_val = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear);
2814
2815 /* If no SFP module present, then return success. Return success since
2816 * there is no reason to configure CS4227 and SFP not present error is
2817 * not excepted in the setup MAC link flow.
2818 */
2819 if (ret_val == IXGBE_ERR_SFP_NOT_PRESENT)
2820 return IXGBE_SUCCESS;
2821
2822 if (ret_val != IXGBE_SUCCESS)
2823 return ret_val;
2824
2825 /* Configure internal PHY for KR/KX. */
2826 ixgbe_setup_kr_speed_x550em(hw, speed);
2827
2828 /* Configure CS4227 LINE side to proper mode. */
2829 reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB +
2830 (hw->bus.lan_id << 12);
2831 if (setup_linear)
2832 reg_val = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1;
2833 else
2834 reg_val = (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1;
2835 ret_val = hw->link.ops.write_link(hw, hw->link.addr, reg_slice,
2836 reg_val);
2837 return ret_val;
2838 }
2839
2840 /**
2841 * ixgbe_setup_sfi_x550a - Configure the internal PHY for native SFI mode
2842 * @hw: pointer to hardware structure
2843 * @speed: the link speed to force
2844 *
2845 * Configures the integrated PHY for native SFI mode. Used to connect the
2846 * internal PHY directly to an SFP cage, without autonegotiation.
2847 **/
2848 static s32 ixgbe_setup_sfi_x550a(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
2849 {
2850 struct ixgbe_mac_info *mac = &hw->mac;
2851 s32 status;
2852 u32 reg_val;
2853
2854 /* Disable all AN and force speed to 10G Serial. */
2855 status = mac->ops.read_iosf_sb_reg(hw,
2856 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
2857 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
2858 if (status != IXGBE_SUCCESS)
2859 return status;
2860
2861 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN;
2862 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN;
2863 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN;
2864 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK;
2865
2866 /* Select forced link speed for internal PHY. */
2867 switch (*speed) {
2868 case IXGBE_LINK_SPEED_10GB_FULL:
2869 reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_10G;
2870 break;
2871 case IXGBE_LINK_SPEED_1GB_FULL:
2872 reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_1G;
2873 break;
2874 case 0:
2875 /* media none (linkdown) */
2876 break;
2877 default:
2878 /* Other link speeds are not supported by internal PHY. */
2879 return IXGBE_ERR_LINK_SETUP;
2880 }
2881
2882 status = mac->ops.write_iosf_sb_reg(hw,
2883 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
2884 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
2885
2886 /* Toggle port SW reset by AN reset. */
2887 status = ixgbe_restart_an_internal_phy_x550em(hw);
2888
2889 return status;
2890 }
2891
2892 /**
2893 * ixgbe_setup_mac_link_sfp_x550a - Setup internal PHY for SFP
2894 * @hw: pointer to hardware structure
2895 * @speed: new link speed
2896 * @autoneg_wait_to_complete: unused
2897 *
2898 * Configure the the integrated PHY for SFP support.
2899 **/
2900 static s32 ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw,
2901 ixgbe_link_speed speed,
2902 bool autoneg_wait_to_complete)
2903 {
2904 s32 ret_val;
2905 u16 reg_phy_ext;
2906 bool setup_linear = FALSE;
2907 u32 reg_slice, reg_phy_int, slice_offset;
2908
2909 UNREFERENCED_1PARAMETER(autoneg_wait_to_complete);
2910
2911 /* Check if SFP module is supported and linear */
2912 ret_val = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear);
2913
2914 /* If no SFP module present, then return success. Return success since
2915 * SFP not present error is not excepted in the setup MAC link flow.
2916 */
2917 if (ret_val == IXGBE_ERR_SFP_NOT_PRESENT)
2918 return IXGBE_SUCCESS;
2919
2920 if (ret_val != IXGBE_SUCCESS)
2921 return ret_val;
2922
2923 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N) {
2924 /* Configure internal PHY for native SFI based on module type */
2925 ret_val = hw->mac.ops.read_iosf_sb_reg(hw,
2926 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
2927 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_phy_int);
2928
2929 if (ret_val != IXGBE_SUCCESS)
2930 return ret_val;
2931
2932 reg_phy_int &= IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_DA;
2933 if (!setup_linear)
2934 reg_phy_int |= IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_SR;
2935
2936 ret_val = hw->mac.ops.write_iosf_sb_reg(hw,
2937 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
2938 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_phy_int);
2939
2940 if (ret_val != IXGBE_SUCCESS)
2941 return ret_val;
2942
2943 /* Setup SFI internal link. */
2944 ret_val = ixgbe_setup_sfi_x550a(hw, &speed);
2945 } else {
2946 /* Configure internal PHY for KR/KX. */
2947 ixgbe_setup_kr_speed_x550em(hw, speed);
2948
2949 if (hw->phy.addr == 0x0 || hw->phy.addr == 0xFFFF) {
2950 /* Find Address */
2951 DEBUGOUT("Invalid NW_MNG_IF_SEL.MDIO_PHY_ADD value\n");
2952 return IXGBE_ERR_PHY_ADDR_INVALID;
2953 }
2954
2955 /* Get external PHY SKU id */
2956 ret_val = hw->phy.ops.read_reg(hw, IXGBE_CS4227_EFUSE_PDF_SKU,
2957 IXGBE_MDIO_ZERO_DEV_TYPE, ®_phy_ext);
2958
2959 if (ret_val != IXGBE_SUCCESS)
2960 return ret_val;
2961
2962 /* When configuring quad port CS4223, the MAC instance is part
2963 * of the slice offset.
2964 */
2965 if (reg_phy_ext == IXGBE_CS4223_SKU_ID)
2966 slice_offset = (hw->bus.lan_id +
2967 (hw->bus.instance_id << 1)) << 12;
2968 else
2969 slice_offset = hw->bus.lan_id << 12;
2970
2971 /* Configure CS4227/CS4223 LINE side to proper mode. */
2972 reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB + slice_offset;
2973
2974 ret_val = hw->phy.ops.read_reg(hw, reg_slice,
2975 IXGBE_MDIO_ZERO_DEV_TYPE, ®_phy_ext);
2976
2977 if (ret_val != IXGBE_SUCCESS)
2978 return ret_val;
2979
2980 reg_phy_ext &= ~((IXGBE_CS4227_EDC_MODE_CX1 << 1) |
2981 (IXGBE_CS4227_EDC_MODE_SR << 1));
2982
2983 if (setup_linear)
2984 reg_phy_ext |= (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1;
2985 else
2986 reg_phy_ext |= (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1;
2987 ret_val = hw->phy.ops.write_reg(hw, reg_slice,
2988 IXGBE_MDIO_ZERO_DEV_TYPE, reg_phy_ext);
2989
2990 /* Flush previous write with a read */
2991 ret_val = hw->phy.ops.read_reg(hw, reg_slice,
2992 IXGBE_MDIO_ZERO_DEV_TYPE, ®_phy_ext);
2993 }
2994 return ret_val;
2995 }
2996
2997 /**
2998 * ixgbe_setup_ixfi_x550em_x - MAC specific iXFI configuration
2999 * @hw: pointer to hardware structure
3000 *
3001 * iXfI configuration needed for ixgbe_mac_X550EM_x devices.
3002 **/
3003 static s32 ixgbe_setup_ixfi_x550em_x(struct ixgbe_hw *hw)
3004 {
3005 struct ixgbe_mac_info *mac = &hw->mac;
3006 s32 status;
3007 u32 reg_val;
3008
3009 /* Disable training protocol FSM. */
3010 status = mac->ops.read_iosf_sb_reg(hw,
3011 IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
3012 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3013 if (status != IXGBE_SUCCESS)
3014 return status;
3015 reg_val |= IXGBE_KRM_RX_TRN_LINKUP_CTRL_CONV_WO_PROTOCOL;
3016 status = mac->ops.write_iosf_sb_reg(hw,
3017 IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
3018 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3019 if (status != IXGBE_SUCCESS)
3020 return status;
3021
3022 /* Disable Flex from training TXFFE. */
3023 status = mac->ops.read_iosf_sb_reg(hw,
3024 IXGBE_KRM_DSP_TXFFE_STATE_4(hw->bus.lan_id),
3025 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3026 if (status != IXGBE_SUCCESS)
3027 return status;
3028 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN;
3029 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN;
3030 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN;
3031 status = mac->ops.write_iosf_sb_reg(hw,
3032 IXGBE_KRM_DSP_TXFFE_STATE_4(hw->bus.lan_id),
3033 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3034 if (status != IXGBE_SUCCESS)
3035 return status;
3036 status = mac->ops.read_iosf_sb_reg(hw,
3037 IXGBE_KRM_DSP_TXFFE_STATE_5(hw->bus.lan_id),
3038 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3039 if (status != IXGBE_SUCCESS)
3040 return status;
3041 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN;
3042 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN;
3043 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN;
3044 status = mac->ops.write_iosf_sb_reg(hw,
3045 IXGBE_KRM_DSP_TXFFE_STATE_5(hw->bus.lan_id),
3046 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3047 if (status != IXGBE_SUCCESS)
3048 return status;
3049
3050 /* Enable override for coefficients. */
3051 status = mac->ops.read_iosf_sb_reg(hw,
3052 IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id),
3053 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3054 if (status != IXGBE_SUCCESS)
3055 return status;
3056 reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_OVRRD_EN;
3057 reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN;
3058 reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CPLUS1_OVRRD_EN;
3059 reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CMINUS1_OVRRD_EN;
3060 status = mac->ops.write_iosf_sb_reg(hw,
3061 IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id),
3062 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3063 return status;
3064 }
3065
3066 /**
3067 * ixgbe_setup_ixfi_x550em - Configure the KR PHY for iXFI mode.
3068 * @hw: pointer to hardware structure
3069 * @speed: the link speed to force
3070 *
3071 * Configures the integrated KR PHY to use iXFI mode. Used to connect an
3072 * internal and external PHY at a specific speed, without autonegotiation.
3073 **/
3074 static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
3075 {
3076 struct ixgbe_mac_info *mac = &hw->mac;
3077 s32 status;
3078 u32 reg_val;
3079
3080 /* iXFI is only supported with X552 */
3081 if (mac->type != ixgbe_mac_X550EM_x)
3082 return IXGBE_ERR_LINK_SETUP;
3083
3084 /* Disable AN and force speed to 10G Serial. */
3085 status = mac->ops.read_iosf_sb_reg(hw,
3086 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
3087 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3088 if (status != IXGBE_SUCCESS)
3089 return status;
3090
3091 reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
3092 reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
3093
3094 /* Select forced link speed for internal PHY. */
3095 switch (*speed) {
3096 case IXGBE_LINK_SPEED_10GB_FULL:
3097 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G;
3098 break;
3099 case IXGBE_LINK_SPEED_1GB_FULL:
3100 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G;
3101 break;
3102 default:
3103 /* Other link speeds are not supported by internal KR PHY. */
3104 return IXGBE_ERR_LINK_SETUP;
3105 }
3106
3107 status = mac->ops.write_iosf_sb_reg(hw,
3108 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
3109 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3110 if (status != IXGBE_SUCCESS)
3111 return status;
3112
3113 /* Additional configuration needed for x550em_x */
3114 if (hw->mac.type == ixgbe_mac_X550EM_x) {
3115 status = ixgbe_setup_ixfi_x550em_x(hw);
3116 if (status != IXGBE_SUCCESS)
3117 return status;
3118 }
3119
3120 /* Toggle port SW reset by AN reset. */
3121 status = ixgbe_restart_an_internal_phy_x550em(hw);
3122
3123 return status;
3124 }
3125
3126 /**
3127 * ixgbe_ext_phy_t_x550em_get_link - Get ext phy link status
3128 * @hw: address of hardware structure
3129 * @link_up: address of boolean to indicate link status
3130 *
3131 * Returns error code if unable to get link status.
3132 */
3133 static s32 ixgbe_ext_phy_t_x550em_get_link(struct ixgbe_hw *hw, bool *link_up)
3134 {
3135 u32 ret;
3136 u16 autoneg_status;
3137
3138 *link_up = FALSE;
3139
3140 /* read this twice back to back to indicate current status */
3141 ret = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
3142 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
3143 &autoneg_status);
3144 if (ret != IXGBE_SUCCESS)
3145 return ret;
3146
3147 ret = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
3148 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
3149 &autoneg_status);
3150 if (ret != IXGBE_SUCCESS)
3151 return ret;
3152
3153 *link_up = !!(autoneg_status & IXGBE_MDIO_AUTO_NEG_LINK_STATUS);
3154
3155 return IXGBE_SUCCESS;
3156 }
3157
3158 /**
3159 * ixgbe_setup_internal_phy_t_x550em - Configure KR PHY to X557 link
3160 * @hw: point to hardware structure
3161 *
3162 * Configures the link between the integrated KR PHY and the external X557 PHY
3163 * The driver will call this function when it gets a link status change
3164 * interrupt from the X557 PHY. This function configures the link speed
3165 * between the PHYs to match the link speed of the BASE-T link.
3166 *
3167 * A return of a non-zero value indicates an error, and the base driver should
3168 * not report link up.
3169 */
3170 s32 ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw)
3171 {
3172 ixgbe_link_speed force_speed;
3173 bool link_up;
3174 u32 status;
3175 u16 speed;
3176
3177 if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper)
3178 return IXGBE_ERR_CONFIG;
3179
3180 if (hw->mac.type == ixgbe_mac_X550EM_x &&
3181 !(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) {
3182 /* If link is down, there is no setup necessary so return */
3183 status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up);
3184 if (status != IXGBE_SUCCESS)
3185 return status;
3186
3187 if (!link_up)
3188 return IXGBE_SUCCESS;
3189
3190 status = hw->phy.ops.read_reg(hw,
3191 IXGBE_MDIO_AUTO_NEG_VENDOR_STAT,
3192 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
3193 &speed);
3194 if (status != IXGBE_SUCCESS)
3195 return status;
3196
3197 /* If link is still down - no setup is required so return */
3198 status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up);
3199 if (status != IXGBE_SUCCESS)
3200 return status;
3201 if (!link_up)
3202 return IXGBE_SUCCESS;
3203
3204 /* clear everything but the speed and duplex bits */
3205 speed &= IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_MASK;
3206
3207 switch (speed) {
3208 case IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_FULL:
3209 force_speed = IXGBE_LINK_SPEED_10GB_FULL;
3210 break;
3211 case IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB_FULL:
3212 force_speed = IXGBE_LINK_SPEED_1GB_FULL;
3213 break;
3214 default:
3215 /* Internal PHY does not support anything else */
3216 return IXGBE_ERR_INVALID_LINK_SETTINGS;
3217 }
3218
3219 return ixgbe_setup_ixfi_x550em(hw, &force_speed);
3220 } else {
3221 speed = IXGBE_LINK_SPEED_10GB_FULL |
3222 IXGBE_LINK_SPEED_1GB_FULL;
3223 return ixgbe_setup_kr_speed_x550em(hw, speed);
3224 }
3225 }
3226
3227 /**
3228 * ixgbe_setup_phy_loopback_x550em - Configure the KR PHY for loopback.
3229 * @hw: pointer to hardware structure
3230 *
3231 * Configures the integrated KR PHY to use internal loopback mode.
3232 **/
3233 s32 ixgbe_setup_phy_loopback_x550em(struct ixgbe_hw *hw)
3234 {
3235 s32 status;
3236 u32 reg_val;
3237
3238 /* Disable AN and force speed to 10G Serial. */
3239 status = hw->mac.ops.read_iosf_sb_reg(hw,
3240 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
3241 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3242 if (status != IXGBE_SUCCESS)
3243 return status;
3244 reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
3245 reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
3246 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G;
3247 status = hw->mac.ops.write_iosf_sb_reg(hw,
3248 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
3249 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3250 if (status != IXGBE_SUCCESS)
3251 return status;
3252
3253 /* Set near-end loopback clocks. */
3254 status = hw->mac.ops.read_iosf_sb_reg(hw,
3255 IXGBE_KRM_PORT_CAR_GEN_CTRL(hw->bus.lan_id),
3256 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3257 if (status != IXGBE_SUCCESS)
3258 return status;
3259 reg_val |= IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_32B;
3260 reg_val |= IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_KRPCS;
3261 status = hw->mac.ops.write_iosf_sb_reg(hw,
3262 IXGBE_KRM_PORT_CAR_GEN_CTRL(hw->bus.lan_id),
3263 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3264 if (status != IXGBE_SUCCESS)
3265 return status;
3266
3267 /* Set loopback enable. */
3268 status = hw->mac.ops.read_iosf_sb_reg(hw,
3269 IXGBE_KRM_PMD_DFX_BURNIN(hw->bus.lan_id),
3270 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3271 if (status != IXGBE_SUCCESS)
3272 return status;
3273 reg_val |= IXGBE_KRM_PMD_DFX_BURNIN_TX_RX_KR_LB_MASK;
3274 status = hw->mac.ops.write_iosf_sb_reg(hw,
3275 IXGBE_KRM_PMD_DFX_BURNIN(hw->bus.lan_id),
3276 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3277 if (status != IXGBE_SUCCESS)
3278 return status;
3279
3280 /* Training bypass. */
3281 status = hw->mac.ops.read_iosf_sb_reg(hw,
3282 IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
3283 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3284 if (status != IXGBE_SUCCESS)
3285 return status;
3286 reg_val |= IXGBE_KRM_RX_TRN_LINKUP_CTRL_PROTOCOL_BYPASS;
3287 status = hw->mac.ops.write_iosf_sb_reg(hw,
3288 IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
3289 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3290
3291 return status;
3292 }
3293
3294 /**
3295 * ixgbe_read_ee_hostif_X550 - Read EEPROM word using a host interface command
3296 * assuming that the semaphore is already obtained.
3297 * @hw: pointer to hardware structure
3298 * @offset: offset of word in the EEPROM to read
3299 * @data: word read from the EEPROM
3300 *
3301 * Reads a 16 bit word from the EEPROM using the hostif.
3302 **/
3303 s32 ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 *data)
3304 {
3305 const u32 mask = IXGBE_GSSR_SW_MNG_SM | IXGBE_GSSR_EEP_SM;
3306 struct ixgbe_hic_read_shadow_ram buffer;
3307 s32 status;
3308
3309 DEBUGFUNC("ixgbe_read_ee_hostif_X550");
3310 buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD;
3311 buffer.hdr.req.buf_lenh = 0;
3312 buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN;
3313 buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
3314
3315 /* convert offset from words to bytes */
3316 buffer.address = IXGBE_CPU_TO_BE32(offset * 2);
3317 /* one word */
3318 buffer.length = IXGBE_CPU_TO_BE16(sizeof(u16));
3319 buffer.pad2 = 0;
3320 buffer.pad3 = 0;
3321
3322 status = hw->mac.ops.acquire_swfw_sync(hw, mask);
3323 if (status)
3324 return status;
3325
3326 status = ixgbe_hic_unlocked(hw, (u32 *)&buffer, sizeof(buffer),
3327 IXGBE_HI_COMMAND_TIMEOUT);
3328 if (!status) {
3329 *data = (u16)IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG,
3330 FW_NVM_DATA_OFFSET);
3331 }
3332
3333 hw->mac.ops.release_swfw_sync(hw, mask);
3334 return status;
3335 }
3336
3337 /**
3338 * ixgbe_read_ee_hostif_buffer_X550- Read EEPROM word(s) using hostif
3339 * @hw: pointer to hardware structure
3340 * @offset: offset of word in the EEPROM to read
3341 * @words: number of words
3342 * @data: word(s) read from the EEPROM
3343 *
3344 * Reads a 16 bit word(s) from the EEPROM using the hostif.
3345 **/
3346 s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
3347 u16 offset, u16 words, u16 *data)
3348 {
3349 const u32 mask = IXGBE_GSSR_SW_MNG_SM | IXGBE_GSSR_EEP_SM;
3350 struct ixgbe_hic_read_shadow_ram buffer;
3351 u32 current_word = 0;
3352 u16 words_to_read;
3353 s32 status;
3354 u32 i;
3355
3356 DEBUGFUNC("ixgbe_read_ee_hostif_buffer_X550");
3357
3358 /* Take semaphore for the entire operation. */
3359 status = hw->mac.ops.acquire_swfw_sync(hw, mask);
3360 if (status) {
3361 DEBUGOUT("EEPROM read buffer - semaphore failed\n");
3362 return status;
3363 }
3364
3365 while (words) {
3366 if (words > FW_MAX_READ_BUFFER_SIZE / 2)
3367 words_to_read = FW_MAX_READ_BUFFER_SIZE / 2;
3368 else
3369 words_to_read = words;
3370
3371 buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD;
3372 buffer.hdr.req.buf_lenh = 0;
3373 buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN;
3374 buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
3375
3376 /* convert offset from words to bytes */
3377 buffer.address = IXGBE_CPU_TO_BE32((offset + current_word) * 2);
3378 buffer.length = IXGBE_CPU_TO_BE16(words_to_read * 2);
3379 buffer.pad2 = 0;
3380 buffer.pad3 = 0;
3381
3382 status = ixgbe_hic_unlocked(hw, (u32 *)&buffer, sizeof(buffer),
3383 IXGBE_HI_COMMAND_TIMEOUT);
3384
3385 if (status) {
3386 DEBUGOUT("Host interface command failed\n");
3387 goto out;
3388 }
3389
3390 for (i = 0; i < words_to_read; i++) {
3391 u32 reg = IXGBE_FLEX_MNG + (FW_NVM_DATA_OFFSET << 2) +
3392 2 * i;
3393 u32 value = IXGBE_READ_REG(hw, reg);
3394
3395 data[current_word] = (u16)(value & 0xffff);
3396 current_word++;
3397 i++;
3398 if (i < words_to_read) {
3399 value >>= 16;
3400 data[current_word] = (u16)(value & 0xffff);
3401 current_word++;
3402 }
3403 }
3404 words -= words_to_read;
3405 }
3406
3407 out:
3408 hw->mac.ops.release_swfw_sync(hw, mask);
3409 return status;
3410 }
3411
3412 /**
3413 * ixgbe_write_ee_hostif_X550 - Write EEPROM word using hostif
3414 * @hw: pointer to hardware structure
3415 * @offset: offset of word in the EEPROM to write
3416 * @data: word write to the EEPROM
3417 *
3418 * Write a 16 bit word to the EEPROM using the hostif.
3419 **/
3420 s32 ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset,
3421 u16 data)
3422 {
3423 s32 status;
3424 struct ixgbe_hic_write_shadow_ram buffer;
3425
3426 DEBUGFUNC("ixgbe_write_ee_hostif_data_X550");
3427
3428 buffer.hdr.req.cmd = FW_WRITE_SHADOW_RAM_CMD;
3429 buffer.hdr.req.buf_lenh = 0;
3430 buffer.hdr.req.buf_lenl = FW_WRITE_SHADOW_RAM_LEN;
3431 buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
3432
3433 /* one word */
3434 buffer.length = IXGBE_CPU_TO_BE16(sizeof(u16));
3435 buffer.data = data;
3436 buffer.address = IXGBE_CPU_TO_BE32(offset * 2);
3437
3438 status = ixgbe_host_interface_command(hw, (u32 *)&buffer,
3439 sizeof(buffer),
3440 IXGBE_HI_COMMAND_TIMEOUT, FALSE);
3441
3442 return status;
3443 }
3444
3445 /**
3446 * ixgbe_write_ee_hostif_X550 - Write EEPROM word using hostif
3447 * @hw: pointer to hardware structure
3448 * @offset: offset of word in the EEPROM to write
3449 * @data: word write to the EEPROM
3450 *
3451 * Write a 16 bit word to the EEPROM using the hostif.
3452 **/
3453 s32 ixgbe_write_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset,
3454 u16 data)
3455 {
3456 s32 status = IXGBE_SUCCESS;
3457
3458 DEBUGFUNC("ixgbe_write_ee_hostif_X550");
3459
3460 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
3461 IXGBE_SUCCESS) {
3462 status = ixgbe_write_ee_hostif_data_X550(hw, offset, data);
3463 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
3464 } else {
3465 DEBUGOUT("write ee hostif failed to get semaphore");
3466 status = IXGBE_ERR_SWFW_SYNC;
3467 }
3468
3469 return status;
3470 }
3471
3472 /**
3473 * ixgbe_write_ee_hostif_buffer_X550 - Write EEPROM word(s) using hostif
3474 * @hw: pointer to hardware structure
3475 * @offset: offset of word in the EEPROM to write
3476 * @words: number of words
3477 * @data: word(s) write to the EEPROM
3478 *
3479 * Write a 16 bit word(s) to the EEPROM using the hostif.
3480 **/
3481 s32 ixgbe_write_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
3482 u16 offset, u16 words, u16 *data)
3483 {
3484 s32 status = IXGBE_SUCCESS;
3485 u32 i = 0;
3486
3487 DEBUGFUNC("ixgbe_write_ee_hostif_buffer_X550");
3488
3489 /* Take semaphore for the entire operation. */
3490 status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
3491 if (status != IXGBE_SUCCESS) {
3492 DEBUGOUT("EEPROM write buffer - semaphore failed\n");
3493 goto out;
3494 }
3495
3496 for (i = 0; i < words; i++) {
3497 status = ixgbe_write_ee_hostif_data_X550(hw, offset + i,
3498 data[i]);
3499
3500 if (status != IXGBE_SUCCESS) {
3501 DEBUGOUT("Eeprom buffered write failed\n");
3502 break;
3503 }
3504 }
3505
3506 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
3507 out:
3508
3509 return status;
3510 }
3511
3512 /**
3513 * ixgbe_checksum_ptr_x550 - Checksum one pointer region
3514 * @hw: pointer to hardware structure
3515 * @ptr: pointer offset in eeprom
3516 * @size: size of section pointed by ptr, if 0 first word will be used as size
3517 * @csum: address of checksum to update
3518 * @buffer: pointer to buffer containing calculated checksum
3519 * @buffer_size: size of buffer
3520 *
3521 * Returns error status for any failure
3522 */
3523 static s32 ixgbe_checksum_ptr_x550(struct ixgbe_hw *hw, u16 ptr,
3524 u16 size, u16 *csum, u16 *buffer,
3525 u32 buffer_size)
3526 {
3527 u16 buf[256];
3528 s32 status;
3529 u16 length, bufsz, i, start;
3530 u16 *local_buffer;
3531
3532 bufsz = sizeof(buf) / sizeof(buf[0]);
3533
3534 /* Read a chunk at the pointer location */
3535 if (!buffer) {
3536 status = ixgbe_read_ee_hostif_buffer_X550(hw, ptr, bufsz, buf);
3537 if (status) {
3538 DEBUGOUT("Failed to read EEPROM image\n");
3539 return status;
3540 }
3541 local_buffer = buf;
3542 } else {
3543 if (buffer_size < ptr)
3544 return IXGBE_ERR_PARAM;
3545 local_buffer = &buffer[ptr];
3546 }
3547
3548 if (size) {
3549 start = 0;
3550 length = size;
3551 } else {
3552 start = 1;
3553 length = local_buffer[0];
3554
3555 /* Skip pointer section if length is invalid. */
3556 if (length == 0xFFFF || length == 0 ||
3557 (ptr + length) >= hw->eeprom.word_size)
3558 return IXGBE_SUCCESS;
3559 }
3560
3561 if (buffer && ((u32)start + (u32)length > buffer_size))
3562 return IXGBE_ERR_PARAM;
3563
3564 for (i = start; length; i++, length--) {
3565 if (i == bufsz && !buffer) {
3566 ptr += bufsz;
3567 i = 0;
3568 if (length < bufsz)
3569 bufsz = length;
3570
3571 /* Read a chunk at the pointer location */
3572 status = ixgbe_read_ee_hostif_buffer_X550(hw, ptr,
3573 bufsz, buf);
3574 if (status) {
3575 DEBUGOUT("Failed to read EEPROM image\n");
3576 return status;
3577 }
3578 }
3579 *csum += local_buffer[i];
3580 }
3581 return IXGBE_SUCCESS;
3582 }
3583
3584 /**
3585 * ixgbe_calc_checksum_X550 - Calculates and returns the checksum
3586 * @hw: pointer to hardware structure
3587 * @buffer: pointer to buffer containing calculated checksum
3588 * @buffer_size: size of buffer
3589 *
3590 * Returns a negative error code on error, or the 16-bit checksum
3591 **/
3592 s32 ixgbe_calc_checksum_X550(struct ixgbe_hw *hw, u16 *buffer, u32 buffer_size)
3593 {
3594 u16 eeprom_ptrs[IXGBE_EEPROM_LAST_WORD + 1];
3595 u16 *local_buffer;
3596 s32 status;
3597 u16 checksum = 0;
3598 u16 pointer, i, size;
3599
3600 DEBUGFUNC("ixgbe_calc_eeprom_checksum_X550");
3601
3602 hw->eeprom.ops.init_params(hw);
3603
3604 if (!buffer) {
3605 /* Read pointer area */
3606 status = ixgbe_read_ee_hostif_buffer_X550(hw, 0,
3607 IXGBE_EEPROM_LAST_WORD + 1,
3608 eeprom_ptrs);
3609 if (status) {
3610 DEBUGOUT("Failed to read EEPROM image\n");
3611 return status;
3612 }
3613 local_buffer = eeprom_ptrs;
3614 } else {
3615 if (buffer_size < IXGBE_EEPROM_LAST_WORD)
3616 return IXGBE_ERR_PARAM;
3617 local_buffer = buffer;
3618 }
3619
3620 /*
3621 * For X550 hardware include 0x0-0x41 in the checksum, skip the
3622 * checksum word itself
3623 */
3624 for (i = 0; i <= IXGBE_EEPROM_LAST_WORD; i++)
3625 if (i != IXGBE_EEPROM_CHECKSUM)
3626 checksum += local_buffer[i];
3627
3628 /*
3629 * Include all data from pointers 0x3, 0x6-0xE. This excludes the
3630 * FW, PHY module, and PCIe Expansion/Option ROM pointers.
3631 */
3632 for (i = IXGBE_PCIE_ANALOG_PTR_X550; i < IXGBE_FW_PTR; i++) {
3633 if (i == IXGBE_PHY_PTR || i == IXGBE_OPTION_ROM_PTR)
3634 continue;
3635
3636 pointer = local_buffer[i];
3637
3638 /* Skip pointer section if the pointer is invalid. */
3639 if (pointer == 0xFFFF || pointer == 0 ||
3640 pointer >= hw->eeprom.word_size)
3641 continue;
3642
3643 switch (i) {
3644 case IXGBE_PCIE_GENERAL_PTR:
3645 size = IXGBE_IXGBE_PCIE_GENERAL_SIZE;
3646 break;
3647 case IXGBE_PCIE_CONFIG0_PTR:
3648 case IXGBE_PCIE_CONFIG1_PTR:
3649 size = IXGBE_PCIE_CONFIG_SIZE;
3650 break;
3651 default:
3652 size = 0;
3653 break;
3654 }
3655
3656 status = ixgbe_checksum_ptr_x550(hw, pointer, size, &checksum,
3657 buffer, buffer_size);
3658 if (status)
3659 return status;
3660 }
3661
3662 checksum = (u16)IXGBE_EEPROM_SUM - checksum;
3663
3664 return (s32)checksum;
3665 }
3666
3667 /**
3668 * ixgbe_calc_eeprom_checksum_X550 - Calculates and returns the checksum
3669 * @hw: pointer to hardware structure
3670 *
3671 * Returns a negative error code on error, or the 16-bit checksum
3672 **/
3673 s32 ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw *hw)
3674 {
3675 return ixgbe_calc_checksum_X550(hw, NULL, 0);
3676 }
3677
3678 /**
3679 * ixgbe_validate_eeprom_checksum_X550 - Validate EEPROM checksum
3680 * @hw: pointer to hardware structure
3681 * @checksum_val: calculated checksum
3682 *
3683 * Performs checksum calculation and validates the EEPROM checksum. If the
3684 * caller does not need checksum_val, the value can be NULL.
3685 **/
3686 s32 ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw *hw, u16 *checksum_val)
3687 {
3688 s32 status;
3689 u16 checksum;
3690 u16 read_checksum = 0;
3691
3692 DEBUGFUNC("ixgbe_validate_eeprom_checksum_X550");
3693
3694 /* Read the first word from the EEPROM. If this times out or fails, do
3695 * not continue or we could be in for a very long wait while every
3696 * EEPROM read fails
3697 */
3698 status = hw->eeprom.ops.read(hw, 0, &checksum);
3699 if (status) {
3700 DEBUGOUT("EEPROM read failed\n");
3701 return status;
3702 }
3703
3704 status = hw->eeprom.ops.calc_checksum(hw);
3705 if (status < 0)
3706 return status;
3707
3708 checksum = (u16)(status & 0xffff);
3709
3710 status = ixgbe_read_ee_hostif_X550(hw, IXGBE_EEPROM_CHECKSUM,
3711 &read_checksum);
3712 if (status)
3713 return status;
3714
3715 /* Verify read checksum from EEPROM is the same as
3716 * calculated checksum
3717 */
3718 if (read_checksum != checksum) {
3719 status = IXGBE_ERR_EEPROM_CHECKSUM;
3720 ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE,
3721 "Invalid EEPROM checksum");
3722 }
3723
3724 /* If the user cares, return the calculated checksum */
3725 if (checksum_val)
3726 *checksum_val = checksum;
3727
3728 return status;
3729 }
3730
3731 /**
3732 * ixgbe_update_eeprom_checksum_X550 - Updates the EEPROM checksum and flash
3733 * @hw: pointer to hardware structure
3734 *
3735 * After writing EEPROM to shadow RAM using EEWR register, software calculates
3736 * checksum and updates the EEPROM and instructs the hardware to update
3737 * the flash.
3738 **/
3739 s32 ixgbe_update_eeprom_checksum_X550(struct ixgbe_hw *hw)
3740 {
3741 s32 status;
3742 u16 checksum = 0;
3743
3744 DEBUGFUNC("ixgbe_update_eeprom_checksum_X550");
3745
3746 /* Read the first word from the EEPROM. If this times out or fails, do
3747 * not continue or we could be in for a very long wait while every
3748 * EEPROM read fails
3749 */
3750 status = ixgbe_read_ee_hostif_X550(hw, 0, &checksum);
3751 if (status) {
3752 DEBUGOUT("EEPROM read failed\n");
3753 return status;
3754 }
3755
3756 status = ixgbe_calc_eeprom_checksum_X550(hw);
3757 if (status < 0)
3758 return status;
3759
3760 checksum = (u16)(status & 0xffff);
3761
3762 status = ixgbe_write_ee_hostif_X550(hw, IXGBE_EEPROM_CHECKSUM,
3763 checksum);
3764 if (status)
3765 return status;
3766
3767 status = ixgbe_update_flash_X550(hw);
3768
3769 return status;
3770 }
3771
3772 /**
3773 * ixgbe_update_flash_X550 - Instruct HW to copy EEPROM to Flash device
3774 * @hw: pointer to hardware structure
3775 *
3776 * Issue a shadow RAM dump to FW to copy EEPROM from shadow RAM to the flash.
3777 **/
3778 s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw)
3779 {
3780 s32 status = IXGBE_SUCCESS;
3781 union ixgbe_hic_hdr2 buffer;
3782
3783 DEBUGFUNC("ixgbe_update_flash_X550");
3784
3785 buffer.req.cmd = FW_SHADOW_RAM_DUMP_CMD;
3786 buffer.req.buf_lenh = 0;
3787 buffer.req.buf_lenl = FW_SHADOW_RAM_DUMP_LEN;
3788 buffer.req.checksum = FW_DEFAULT_CHECKSUM;
3789
3790 status = ixgbe_host_interface_command(hw, (u32 *)&buffer,
3791 sizeof(buffer),
3792 IXGBE_HI_COMMAND_TIMEOUT, FALSE);
3793
3794 return status;
3795 }
3796
3797 /**
3798 * ixgbe_get_supported_physical_layer_X550em - Returns physical layer type
3799 * @hw: pointer to hardware structure
3800 *
3801 * Determines physical layer capabilities of the current configuration.
3802 **/
3803 u64 ixgbe_get_supported_physical_layer_X550em(struct ixgbe_hw *hw)
3804 {
3805 u64 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
3806 u16 ext_ability = 0;
3807
3808 DEBUGFUNC("ixgbe_get_supported_physical_layer_X550em");
3809
3810 hw->phy.ops.identify(hw);
3811
3812 switch (hw->phy.type) {
3813 case ixgbe_phy_x550em_kr:
3814 if (hw->mac.type == ixgbe_mac_X550EM_a) {
3815 if (hw->phy.nw_mng_if_sel &
3816 IXGBE_NW_MNG_IF_SEL_PHY_SPEED_2_5G) {
3817 physical_layer =
3818 IXGBE_PHYSICAL_LAYER_2500BASE_KX;
3819 break;
3820 } else if (hw->device_id ==
3821 IXGBE_DEV_ID_X550EM_A_KR_L) {
3822 physical_layer =
3823 IXGBE_PHYSICAL_LAYER_1000BASE_KX;
3824 break;
3825 }
3826 }
3827 /* fall through */
3828 case ixgbe_phy_x550em_xfi:
3829 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR |
3830 IXGBE_PHYSICAL_LAYER_1000BASE_KX;
3831 break;
3832 case ixgbe_phy_x550em_kx4:
3833 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4 |
3834 IXGBE_PHYSICAL_LAYER_1000BASE_KX;
3835 break;
3836 case ixgbe_phy_x550em_ext_t:
3837 hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
3838 IXGBE_MDIO_PMA_PMD_DEV_TYPE,
3839 &ext_ability);
3840 if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
3841 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
3842 if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
3843 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
3844 break;
3845 case ixgbe_phy_fw:
3846 if (hw->phy.speeds_supported & IXGBE_LINK_SPEED_1GB_FULL)
3847 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
3848 if (hw->phy.speeds_supported & IXGBE_LINK_SPEED_100_FULL)
3849 physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
3850 if (hw->phy.speeds_supported & IXGBE_LINK_SPEED_10_FULL)
3851 physical_layer |= IXGBE_PHYSICAL_LAYER_10BASE_T;
3852 break;
3853 case ixgbe_phy_sgmii:
3854 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX;
3855 break;
3856 case ixgbe_phy_ext_1g_t:
3857 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_T;
3858 break;
3859 default:
3860 break;
3861 }
3862
3863 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
3864 physical_layer = ixgbe_get_supported_phy_sfp_layer_generic(hw);
3865
3866 return physical_layer;
3867 }
3868
3869 /**
3870 * ixgbe_get_bus_info_x550em - Set PCI bus info
3871 * @hw: pointer to hardware structure
3872 *
3873 * Sets bus link width and speed to unknown because X550em is
3874 * not a PCI device.
3875 **/
3876 s32 ixgbe_get_bus_info_X550em(struct ixgbe_hw *hw)
3877 {
3878
3879 DEBUGFUNC("ixgbe_get_bus_info_x550em");
3880
3881 hw->bus.width = ixgbe_bus_width_unknown;
3882 hw->bus.speed = ixgbe_bus_speed_unknown;
3883
3884 hw->mac.ops.set_lan_id(hw);
3885
3886 return IXGBE_SUCCESS;
3887 }
3888
3889 /**
3890 * ixgbe_disable_rx_x550 - Disable RX unit
3891 * @hw: pointer to hardware structure
3892 *
3893 * Enables the Rx DMA unit for x550
3894 **/
3895 void ixgbe_disable_rx_x550(struct ixgbe_hw *hw)
3896 {
3897 u32 rxctrl, pfdtxgswc;
3898 s32 status;
3899 struct ixgbe_hic_disable_rxen fw_cmd;
3900
3901 DEBUGFUNC("ixgbe_disable_rx_dma_x550");
3902
3903 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3904 if (rxctrl & IXGBE_RXCTRL_RXEN) {
3905 pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
3906 if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) {
3907 pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN;
3908 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
3909 hw->mac.set_lben = TRUE;
3910 } else {
3911 hw->mac.set_lben = FALSE;
3912 }
3913
3914 fw_cmd.hdr.cmd = FW_DISABLE_RXEN_CMD;
3915 fw_cmd.hdr.buf_len = FW_DISABLE_RXEN_LEN;
3916 fw_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
3917 fw_cmd.port_number = (u8)hw->bus.lan_id;
3918
3919 status = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
3920 sizeof(struct ixgbe_hic_disable_rxen),
3921 IXGBE_HI_COMMAND_TIMEOUT, TRUE);
3922
3923 /* If we fail - disable RX using register write */
3924 if (status) {
3925 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3926 if (rxctrl & IXGBE_RXCTRL_RXEN) {
3927 rxctrl &= ~IXGBE_RXCTRL_RXEN;
3928 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl);
3929 }
3930 }
3931 }
3932 }
3933
3934 /**
3935 * ixgbe_enter_lplu_x550em - Transition to low power states
3936 * @hw: pointer to hardware structure
3937 *
3938 * Configures Low Power Link Up on transition to low power states
3939 * (from D0 to non-D0). Link is required to enter LPLU so avoid resetting the
3940 * X557 PHY immediately prior to entering LPLU.
3941 **/
3942 s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw)
3943 {
3944 u16 an_10g_cntl_reg, autoneg_reg, speed;
3945 s32 status;
3946 ixgbe_link_speed lcd_speed;
3947 u32 save_autoneg;
3948 bool link_up;
3949
3950 /* SW LPLU not required on later HW revisions. */
3951 if ((hw->mac.type == ixgbe_mac_X550EM_x) &&
3952 (IXGBE_FUSES0_REV_MASK &
3953 IXGBE_READ_REG(hw, IXGBE_FUSES0_GROUP(0))))
3954 return IXGBE_SUCCESS;
3955
3956 /* If blocked by MNG FW, then don't restart AN */
3957 if (ixgbe_check_reset_blocked(hw))
3958 return IXGBE_SUCCESS;
3959
3960 status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up);
3961 if (status != IXGBE_SUCCESS)
3962 return status;
3963
3964 status = ixgbe_read_eeprom(hw, NVM_INIT_CTRL_3, &hw->eeprom.ctrl_word_3);
3965
3966 if (status != IXGBE_SUCCESS)
3967 return status;
3968
3969 /* If link is down, LPLU disabled in NVM, WoL disabled, or manageability
3970 * disabled, then force link down by entering low power mode.
3971 */
3972 if (!link_up || !(hw->eeprom.ctrl_word_3 & NVM_INIT_CTRL_3_LPLU) ||
3973 !(hw->wol_enabled || ixgbe_mng_present(hw)))
3974 return ixgbe_set_copper_phy_power(hw, FALSE);
3975
3976 /* Determine LCD */
3977 status = ixgbe_get_lcd_t_x550em(hw, &lcd_speed);
3978
3979 if (status != IXGBE_SUCCESS)
3980 return status;
3981
3982 /* If no valid LCD link speed, then force link down and exit. */
3983 if (lcd_speed == IXGBE_LINK_SPEED_UNKNOWN)
3984 return ixgbe_set_copper_phy_power(hw, FALSE);
3985
3986 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_STAT,
3987 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
3988 &speed);
3989
3990 if (status != IXGBE_SUCCESS)
3991 return status;
3992
3993 /* If no link now, speed is invalid so take link down */
3994 status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up);
3995 if (status != IXGBE_SUCCESS)
3996 return ixgbe_set_copper_phy_power(hw, FALSE);
3997
3998 /* clear everything but the speed bits */
3999 speed &= IXGBE_MDIO_AUTO_NEG_VEN_STAT_SPEED_MASK;
4000
4001 /* If current speed is already LCD, then exit. */
4002 if (((speed == IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB) &&
4003 (lcd_speed == IXGBE_LINK_SPEED_1GB_FULL)) ||
4004 ((speed == IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB) &&
4005 (lcd_speed == IXGBE_LINK_SPEED_10GB_FULL)))
4006 return status;
4007
4008 /* Clear AN completed indication */
4009 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM,
4010 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
4011 &autoneg_reg);
4012
4013 if (status != IXGBE_SUCCESS)
4014 return status;
4015
4016 status = hw->phy.ops.read_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG,
4017 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
4018 &an_10g_cntl_reg);
4019
4020 if (status != IXGBE_SUCCESS)
4021 return status;
4022
4023 status = hw->phy.ops.read_reg(hw,
4024 IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
4025 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
4026 &autoneg_reg);
4027
4028 if (status != IXGBE_SUCCESS)
4029 return status;
4030
4031 save_autoneg = hw->phy.autoneg_advertised;
4032
4033 /* Setup link at least common link speed */
4034 status = hw->mac.ops.setup_link(hw, lcd_speed, FALSE);
4035
4036 /* restore autoneg from before setting lplu speed */
4037 hw->phy.autoneg_advertised = save_autoneg;
4038
4039 return status;
4040 }
4041
4042 /**
4043 * ixgbe_get_lcd_x550em - Determine lowest common denominator
4044 * @hw: pointer to hardware structure
4045 * @lcd_speed: pointer to lowest common link speed
4046 *
4047 * Determine lowest common link speed with link partner.
4048 **/
4049 s32 ixgbe_get_lcd_t_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *lcd_speed)
4050 {
4051 u16 an_lp_status;
4052 s32 status;
4053 u16 word = hw->eeprom.ctrl_word_3;
4054
4055 *lcd_speed = IXGBE_LINK_SPEED_UNKNOWN;
4056
4057 status = hw->phy.ops.read_reg(hw, IXGBE_AUTO_NEG_LP_STATUS,
4058 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
4059 &an_lp_status);
4060
4061 if (status != IXGBE_SUCCESS)
4062 return status;
4063
4064 /* If link partner advertised 1G, return 1G */
4065 if (an_lp_status & IXGBE_AUTO_NEG_LP_1000BASE_CAP) {
4066 *lcd_speed = IXGBE_LINK_SPEED_1GB_FULL;
4067 return status;
4068 }
4069
4070 /* If 10G disabled for LPLU via NVM D10GMP, then return no valid LCD */
4071 if ((hw->bus.lan_id && (word & NVM_INIT_CTRL_3_D10GMP_PORT1)) ||
4072 (word & NVM_INIT_CTRL_3_D10GMP_PORT0))
4073 return status;
4074
4075 /* Link partner not capable of lower speeds, return 10G */
4076 *lcd_speed = IXGBE_LINK_SPEED_10GB_FULL;
4077 return status;
4078 }
4079
4080 /**
4081 * ixgbe_setup_fc_X550em - Set up flow control
4082 * @hw: pointer to hardware structure
4083 *
4084 * Called at init time to set up flow control.
4085 **/
4086 s32 ixgbe_setup_fc_X550em(struct ixgbe_hw *hw)
4087 {
4088 s32 ret_val = IXGBE_SUCCESS;
4089 u32 pause, asm_dir, reg_val;
4090
4091 DEBUGFUNC("ixgbe_setup_fc_X550em");
4092
4093 /* Validate the requested mode */
4094 if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
4095 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
4096 "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
4097 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
4098 goto out;
4099 }
4100
4101 /* 10gig parts do not have a word in the EEPROM to determine the
4102 * default flow control setting, so we explicitly set it to full.
4103 */
4104 if (hw->fc.requested_mode == ixgbe_fc_default)
4105 hw->fc.requested_mode = ixgbe_fc_full;
4106
4107 /* Determine PAUSE and ASM_DIR bits. */
4108 switch (hw->fc.requested_mode) {
4109 case ixgbe_fc_none:
4110 pause = 0;
4111 asm_dir = 0;
4112 break;
4113 case ixgbe_fc_tx_pause:
4114 pause = 0;
4115 asm_dir = 1;
4116 break;
4117 case ixgbe_fc_rx_pause:
4118 /* Rx Flow control is enabled and Tx Flow control is
4119 * disabled by software override. Since there really
4120 * isn't a way to advertise that we are capable of RX
4121 * Pause ONLY, we will advertise that we support both
4122 * symmetric and asymmetric Rx PAUSE, as such we fall
4123 * through to the fc_full statement. Later, we will
4124 * disable the adapter's ability to send PAUSE frames.
4125 */
4126 case ixgbe_fc_full:
4127 pause = 1;
4128 asm_dir = 1;
4129 break;
4130 default:
4131 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
4132 "Flow control param set incorrectly\n");
4133 ret_val = IXGBE_ERR_CONFIG;
4134 goto out;
4135 }
4136
4137 switch (hw->device_id) {
4138 case IXGBE_DEV_ID_X550EM_X_KR:
4139 case IXGBE_DEV_ID_X550EM_A_KR:
4140 case IXGBE_DEV_ID_X550EM_A_KR_L:
4141 ret_val = hw->mac.ops.read_iosf_sb_reg(hw,
4142 IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
4143 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
4144 if (ret_val != IXGBE_SUCCESS)
4145 goto out;
4146 reg_val &= ~(IXGBE_KRM_AN_CNTL_1_SYM_PAUSE |
4147 IXGBE_KRM_AN_CNTL_1_ASM_PAUSE);
4148 if (pause)
4149 reg_val |= IXGBE_KRM_AN_CNTL_1_SYM_PAUSE;
4150 if (asm_dir)
4151 reg_val |= IXGBE_KRM_AN_CNTL_1_ASM_PAUSE;
4152 ret_val = hw->mac.ops.write_iosf_sb_reg(hw,
4153 IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
4154 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
4155
4156 /* This device does not fully support AN. */
4157 hw->fc.disable_fc_autoneg = TRUE;
4158 break;
4159 case IXGBE_DEV_ID_X550EM_X_XFI:
4160 hw->fc.disable_fc_autoneg = TRUE;
4161 break;
4162 default:
4163 break;
4164 }
4165
4166 out:
4167 return ret_val;
4168 }
4169
4170 /**
4171 * ixgbe_fc_autoneg_backplane_x550em_a - Enable flow control IEEE clause 37
4172 * @hw: pointer to hardware structure
4173 *
4174 * Enable flow control according to IEEE clause 37.
4175 **/
4176 void ixgbe_fc_autoneg_backplane_x550em_a(struct ixgbe_hw *hw)
4177 {
4178 u32 link_s1, lp_an_page_low, an_cntl_1;
4179 s32 status = IXGBE_ERR_FC_NOT_NEGOTIATED;
4180 ixgbe_link_speed speed;
4181 bool link_up;
4182
4183 /* AN should have completed when the cable was plugged in.
4184 * Look for reasons to bail out. Bail out if:
4185 * - FC autoneg is disabled, or if
4186 * - link is not up.
4187 */
4188 if (hw->fc.disable_fc_autoneg) {
4189 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
4190 "Flow control autoneg is disabled");
4191 goto out;
4192 }
4193
4194 hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
4195 if (!link_up) {
4196 ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "The link is down");
4197 goto out;
4198 }
4199
4200 /* Check at auto-negotiation has completed */
4201 status = hw->mac.ops.read_iosf_sb_reg(hw,
4202 IXGBE_KRM_LINK_S1(hw->bus.lan_id),
4203 IXGBE_SB_IOSF_TARGET_KR_PHY, &link_s1);
4204
4205 if (status != IXGBE_SUCCESS ||
4206 (link_s1 & IXGBE_KRM_LINK_S1_MAC_AN_COMPLETE) == 0) {
4207 DEBUGOUT("Auto-Negotiation did not complete\n");
4208 status = IXGBE_ERR_FC_NOT_NEGOTIATED;
4209 goto out;
4210 }
4211
4212 /* Read the 10g AN autoc and LP ability registers and resolve
4213 * local flow control settings accordingly
4214 */
4215 status = hw->mac.ops.read_iosf_sb_reg(hw,
4216 IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
4217 IXGBE_SB_IOSF_TARGET_KR_PHY, &an_cntl_1);
4218
4219 if (status != IXGBE_SUCCESS) {
4220 DEBUGOUT("Auto-Negotiation did not complete\n");
4221 goto out;
4222 }
4223
4224 status = hw->mac.ops.read_iosf_sb_reg(hw,
4225 IXGBE_KRM_LP_BASE_PAGE_HIGH(hw->bus.lan_id),
4226 IXGBE_SB_IOSF_TARGET_KR_PHY, &lp_an_page_low);
4227
4228 if (status != IXGBE_SUCCESS) {
4229 DEBUGOUT("Auto-Negotiation did not complete\n");
4230 goto out;
4231 }
4232
4233 status = ixgbe_negotiate_fc(hw, an_cntl_1, lp_an_page_low,
4234 IXGBE_KRM_AN_CNTL_1_SYM_PAUSE,
4235 IXGBE_KRM_AN_CNTL_1_ASM_PAUSE,
4236 IXGBE_KRM_LP_BASE_PAGE_HIGH_SYM_PAUSE,
4237 IXGBE_KRM_LP_BASE_PAGE_HIGH_ASM_PAUSE);
4238
4239 out:
4240 if (status == IXGBE_SUCCESS) {
4241 hw->fc.fc_was_autonegged = TRUE;
4242 } else {
4243 hw->fc.fc_was_autonegged = FALSE;
4244 hw->fc.current_mode = hw->fc.requested_mode;
4245 }
4246 }
4247
4248 /**
4249 * ixgbe_fc_autoneg_fiber_x550em_a - passthrough FC settings
4250 * @hw: pointer to hardware structure
4251 *
4252 **/
4253 void ixgbe_fc_autoneg_fiber_x550em_a(struct ixgbe_hw *hw)
4254 {
4255 hw->fc.fc_was_autonegged = FALSE;
4256 hw->fc.current_mode = hw->fc.requested_mode;
4257 }
4258
4259 /**
4260 * ixgbe_fc_autoneg_sgmii_x550em_a - Enable flow control IEEE clause 37
4261 * @hw: pointer to hardware structure
4262 *
4263 * Enable flow control according to IEEE clause 37.
4264 **/
4265 void ixgbe_fc_autoneg_sgmii_x550em_a(struct ixgbe_hw *hw)
4266 {
4267 s32 status = IXGBE_ERR_FC_NOT_NEGOTIATED;
4268 u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 };
4269 ixgbe_link_speed speed;
4270 bool link_up;
4271
4272 /* AN should have completed when the cable was plugged in.
4273 * Look for reasons to bail out. Bail out if:
4274 * - FC autoneg is disabled, or if
4275 * - link is not up.
4276 */
4277 if (hw->fc.disable_fc_autoneg) {
4278 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
4279 "Flow control autoneg is disabled");
4280 goto out;
4281 }
4282
4283 hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
4284 if (!link_up) {
4285 ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "The link is down");
4286 goto out;
4287 }
4288
4289 /* Check if auto-negotiation has completed */
4290 status = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_LINK_INFO, &info);
4291 if (status != IXGBE_SUCCESS ||
4292 !(info[0] & FW_PHY_ACT_GET_LINK_INFO_AN_COMPLETE)) {
4293 DEBUGOUT("Auto-Negotiation did not complete\n");
4294 status = IXGBE_ERR_FC_NOT_NEGOTIATED;
4295 goto out;
4296 }
4297
4298 /* Negotiate the flow control */
4299 status = ixgbe_negotiate_fc(hw, info[0], info[0],
4300 FW_PHY_ACT_GET_LINK_INFO_FC_RX,
4301 FW_PHY_ACT_GET_LINK_INFO_FC_TX,
4302 FW_PHY_ACT_GET_LINK_INFO_LP_FC_RX,
4303 FW_PHY_ACT_GET_LINK_INFO_LP_FC_TX);
4304
4305 out:
4306 if (status == IXGBE_SUCCESS) {
4307 hw->fc.fc_was_autonegged = TRUE;
4308 } else {
4309 hw->fc.fc_was_autonegged = FALSE;
4310 hw->fc.current_mode = hw->fc.requested_mode;
4311 }
4312 }
4313
4314 /**
4315 * ixgbe_setup_fc_backplane_x550em_a - Set up flow control
4316 * @hw: pointer to hardware structure
4317 *
4318 * Called at init time to set up flow control.
4319 **/
4320 s32 ixgbe_setup_fc_backplane_x550em_a(struct ixgbe_hw *hw)
4321 {
4322 s32 status = IXGBE_SUCCESS;
4323 u32 an_cntl = 0;
4324
4325 DEBUGFUNC("ixgbe_setup_fc_backplane_x550em_a");
4326
4327 /* Validate the requested mode */
4328 if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
4329 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
4330 "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
4331 return IXGBE_ERR_INVALID_LINK_SETTINGS;
4332 }
4333
4334 if (hw->fc.requested_mode == ixgbe_fc_default)
4335 hw->fc.requested_mode = ixgbe_fc_full;
4336
4337 /* Set up the 1G and 10G flow control advertisement registers so the
4338 * HW will be able to do FC autoneg once the cable is plugged in. If
4339 * we link at 10G, the 1G advertisement is harmless and vice versa.
4340 */
4341 status = hw->mac.ops.read_iosf_sb_reg(hw,
4342 IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
4343 IXGBE_SB_IOSF_TARGET_KR_PHY, &an_cntl);
4344
4345 if (status != IXGBE_SUCCESS) {
4346 DEBUGOUT("Auto-Negotiation did not complete\n");
4347 return status;
4348 }
4349
4350 /* The possible values of fc.requested_mode are:
4351 * 0: Flow control is completely disabled
4352 * 1: Rx flow control is enabled (we can receive pause frames,
4353 * but not send pause frames).
4354 * 2: Tx flow control is enabled (we can send pause frames but
4355 * we do not support receiving pause frames).
4356 * 3: Both Rx and Tx flow control (symmetric) are enabled.
4357 * other: Invalid.
4358 */
4359 switch (hw->fc.requested_mode) {
4360 case ixgbe_fc_none:
4361 /* Flow control completely disabled by software override. */
4362 an_cntl &= ~(IXGBE_KRM_AN_CNTL_1_SYM_PAUSE |
4363 IXGBE_KRM_AN_CNTL_1_ASM_PAUSE);
4364 break;
4365 case ixgbe_fc_tx_pause:
4366 /* Tx Flow control is enabled, and Rx Flow control is
4367 * disabled by software override.
4368 */
4369 an_cntl |= IXGBE_KRM_AN_CNTL_1_ASM_PAUSE;
4370 an_cntl &= ~IXGBE_KRM_AN_CNTL_1_SYM_PAUSE;
4371 break;
4372 case ixgbe_fc_rx_pause:
4373 /* Rx Flow control is enabled and Tx Flow control is
4374 * disabled by software override. Since there really
4375 * isn't a way to advertise that we are capable of RX
4376 * Pause ONLY, we will advertise that we support both
4377 * symmetric and asymmetric Rx PAUSE, as such we fall
4378 * through to the fc_full statement. Later, we will
4379 * disable the adapter's ability to send PAUSE frames.
4380 */
4381 case ixgbe_fc_full:
4382 /* Flow control (both Rx and Tx) is enabled by SW override. */
4383 an_cntl |= IXGBE_KRM_AN_CNTL_1_SYM_PAUSE |
4384 IXGBE_KRM_AN_CNTL_1_ASM_PAUSE;
4385 break;
4386 default:
4387 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
4388 "Flow control param set incorrectly\n");
4389 return IXGBE_ERR_CONFIG;
4390 }
4391
4392 status = hw->mac.ops.write_iosf_sb_reg(hw,
4393 IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
4394 IXGBE_SB_IOSF_TARGET_KR_PHY, an_cntl);
4395
4396 /* Restart auto-negotiation. */
4397 status = ixgbe_restart_an_internal_phy_x550em(hw);
4398
4399 return status;
4400 }
4401
4402 /**
4403 * ixgbe_set_mux - Set mux for port 1 access with CS4227
4404 * @hw: pointer to hardware structure
4405 * @state: set mux if 1, clear if 0
4406 */
4407 static void ixgbe_set_mux(struct ixgbe_hw *hw, u8 state)
4408 {
4409 u32 esdp;
4410
4411 if (!hw->bus.lan_id)
4412 return;
4413 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
4414 if (state)
4415 esdp |= IXGBE_ESDP_SDP1;
4416 else
4417 esdp &= ~IXGBE_ESDP_SDP1;
4418 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
4419 IXGBE_WRITE_FLUSH(hw);
4420 }
4421
4422 /**
4423 * ixgbe_acquire_swfw_sync_X550em - Acquire SWFW semaphore
4424 * @hw: pointer to hardware structure
4425 * @mask: Mask to specify which semaphore to acquire
4426 *
4427 * Acquires the SWFW semaphore and sets the I2C MUX
4428 **/
4429 s32 ixgbe_acquire_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask)
4430 {
4431 s32 status;
4432
4433 DEBUGFUNC("ixgbe_acquire_swfw_sync_X550em");
4434
4435 status = ixgbe_acquire_swfw_sync_X540(hw, mask);
4436 if (status)
4437 return status;
4438
4439 if (mask & IXGBE_GSSR_I2C_MASK)
4440 ixgbe_set_mux(hw, 1);
4441
4442 return IXGBE_SUCCESS;
4443 }
4444
4445 /**
4446 * ixgbe_release_swfw_sync_X550em - Release SWFW semaphore
4447 * @hw: pointer to hardware structure
4448 * @mask: Mask to specify which semaphore to release
4449 *
4450 * Releases the SWFW semaphore and sets the I2C MUX
4451 **/
4452 void ixgbe_release_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask)
4453 {
4454 DEBUGFUNC("ixgbe_release_swfw_sync_X550em");
4455
4456 if (mask & IXGBE_GSSR_I2C_MASK)
4457 ixgbe_set_mux(hw, 0);
4458
4459 ixgbe_release_swfw_sync_X540(hw, mask);
4460 }
4461
4462 /**
4463 * ixgbe_acquire_swfw_sync_X550a - Acquire SWFW semaphore
4464 * @hw: pointer to hardware structure
4465 * @mask: Mask to specify which semaphore to acquire
4466 *
4467 * Acquires the SWFW semaphore and get the shared phy token as needed
4468 */
4469 static s32 ixgbe_acquire_swfw_sync_X550a(struct ixgbe_hw *hw, u32 mask)
4470 {
4471 u32 hmask = mask & ~IXGBE_GSSR_TOKEN_SM;
4472 int retries = FW_PHY_TOKEN_RETRIES;
4473 s32 status = IXGBE_SUCCESS;
4474
4475 DEBUGFUNC("ixgbe_acquire_swfw_sync_X550a");
4476
4477 while (--retries) {
4478 status = IXGBE_SUCCESS;
4479 if (hmask)
4480 status = ixgbe_acquire_swfw_sync_X540(hw, hmask);
4481 if (status) {
4482 DEBUGOUT1("Could not acquire SWFW semaphore, Status = %d\n",
4483 status);
4484 return status;
4485 }
4486 if (!(mask & IXGBE_GSSR_TOKEN_SM))
4487 return IXGBE_SUCCESS;
4488
4489 status = ixgbe_get_phy_token(hw);
4490 if (status == IXGBE_ERR_TOKEN_RETRY)
4491 DEBUGOUT1("Could not acquire PHY token, Status = %d\n",
4492 status);
4493
4494 if (status == IXGBE_SUCCESS)
4495 return IXGBE_SUCCESS;
4496
4497 if (hmask)
4498 ixgbe_release_swfw_sync_X540(hw, hmask);
4499
4500 if (status != IXGBE_ERR_TOKEN_RETRY) {
4501 DEBUGOUT1("Unable to retry acquiring the PHY token, Status = %d\n",
4502 status);
4503 return status;
4504 }
4505 }
4506
4507 DEBUGOUT1("Semaphore acquisition retries failed!: PHY ID = 0x%08X\n",
4508 hw->phy.id);
4509 return status;
4510 }
4511
4512 /**
4513 * ixgbe_release_swfw_sync_X550a - Release SWFW semaphore
4514 * @hw: pointer to hardware structure
4515 * @mask: Mask to specify which semaphore to release
4516 *
4517 * Releases the SWFW semaphore and puts the shared phy token as needed
4518 */
4519 static void ixgbe_release_swfw_sync_X550a(struct ixgbe_hw *hw, u32 mask)
4520 {
4521 u32 hmask = mask & ~IXGBE_GSSR_TOKEN_SM;
4522
4523 DEBUGFUNC("ixgbe_release_swfw_sync_X550a");
4524
4525 if (mask & IXGBE_GSSR_TOKEN_SM)
4526 ixgbe_put_phy_token(hw);
4527
4528 if (hmask)
4529 ixgbe_release_swfw_sync_X540(hw, hmask);
4530 }
4531
4532 /**
4533 * ixgbe_read_phy_reg_x550a - Reads specified PHY register
4534 * @hw: pointer to hardware structure
4535 * @reg_addr: 32 bit address of PHY register to read
4536 * @device_type: 5 bit device type
4537 * @phy_data: Pointer to read data from PHY register
4538 *
4539 * Reads a value from a specified PHY register using the SWFW lock and PHY
4540 * Token. The PHY Token is needed since the MDIO is shared between to MAC
4541 * instances.
4542 **/
4543 s32 ixgbe_read_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
4544 u32 device_type, u16 *phy_data)
4545 {
4546 s32 status;
4547 u32 mask = hw->phy.phy_semaphore_mask | IXGBE_GSSR_TOKEN_SM;
4548
4549 DEBUGFUNC("ixgbe_read_phy_reg_x550a");
4550
4551 if (hw->mac.ops.acquire_swfw_sync(hw, mask))
4552 return IXGBE_ERR_SWFW_SYNC;
4553
4554 status = hw->phy.ops.read_reg_mdi(hw, reg_addr, device_type, phy_data);
4555
4556 hw->mac.ops.release_swfw_sync(hw, mask);
4557
4558 return status;
4559 }
4560
4561 /**
4562 * ixgbe_write_phy_reg_x550a - Writes specified PHY register
4563 * @hw: pointer to hardware structure
4564 * @reg_addr: 32 bit PHY register to write
4565 * @device_type: 5 bit device type
4566 * @phy_data: Data to write to the PHY register
4567 *
4568 * Writes a value to specified PHY register using the SWFW lock and PHY Token.
4569 * The PHY Token is needed since the MDIO is shared between to MAC instances.
4570 **/
4571 s32 ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
4572 u32 device_type, u16 phy_data)
4573 {
4574 s32 status;
4575 u32 mask = hw->phy.phy_semaphore_mask | IXGBE_GSSR_TOKEN_SM;
4576
4577 DEBUGFUNC("ixgbe_write_phy_reg_x550a");
4578
4579 if (hw->mac.ops.acquire_swfw_sync(hw, mask) == IXGBE_SUCCESS) {
4580 status = hw->phy.ops.write_reg_mdi(hw, reg_addr, device_type,
4581 phy_data);
4582 hw->mac.ops.release_swfw_sync(hw, mask);
4583 } else {
4584 status = IXGBE_ERR_SWFW_SYNC;
4585 }
4586
4587 return status;
4588 }
4589
4590 /**
4591 * ixgbe_handle_lasi_ext_t_x550em - Handle external Base T PHY interrupt
4592 * @hw: pointer to hardware structure
4593 *
4594 * Handle external Base T PHY interrupt. If high temperature
4595 * failure alarm then return error, else if link status change
4596 * then setup internal/external PHY link
4597 *
4598 * Return IXGBE_ERR_OVERTEMP if interrupt is high temperature
4599 * failure alarm, else return PHY access status.
4600 */
4601 s32 ixgbe_handle_lasi_ext_t_x550em(struct ixgbe_hw *hw)
4602 {
4603 bool lsc;
4604 u32 status;
4605
4606 status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc);
4607
4608 if (status != IXGBE_SUCCESS)
4609 return status;
4610
4611 if (lsc)
4612 return ixgbe_setup_internal_phy(hw);
4613
4614 return IXGBE_SUCCESS;
4615 }
4616
4617 /**
4618 * ixgbe_setup_mac_link_t_X550em - Sets the auto advertised link speed
4619 * @hw: pointer to hardware structure
4620 * @speed: new link speed
4621 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed
4622 *
4623 * Setup internal/external PHY link speed based on link speed, then set
4624 * external PHY auto advertised link speed.
4625 *
4626 * Returns error status for any failure
4627 **/
4628 s32 ixgbe_setup_mac_link_t_X550em(struct ixgbe_hw *hw,
4629 ixgbe_link_speed speed,
4630 bool autoneg_wait_to_complete)
4631 {
4632 s32 status;
4633 ixgbe_link_speed force_speed;
4634
4635 DEBUGFUNC("ixgbe_setup_mac_link_t_X550em");
4636
4637 /* Setup internal/external PHY link speed to iXFI (10G), unless
4638 * only 1G is auto advertised then setup KX link.
4639 */
4640 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
4641 force_speed = IXGBE_LINK_SPEED_10GB_FULL;
4642 else
4643 force_speed = IXGBE_LINK_SPEED_1GB_FULL;
4644
4645 /* If X552 and internal link mode is XFI, then setup XFI internal link.
4646 */
4647 if (hw->mac.type == ixgbe_mac_X550EM_x &&
4648 !(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) {
4649 status = ixgbe_setup_ixfi_x550em(hw, &force_speed);
4650
4651 if (status != IXGBE_SUCCESS)
4652 return status;
4653 }
4654
4655 return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait_to_complete);
4656 }
4657
4658 /**
4659 * ixgbe_check_link_t_X550em - Determine link and speed status
4660 * @hw: pointer to hardware structure
4661 * @speed: pointer to link speed
4662 * @link_up: TRUE when link is up
4663 * @link_up_wait_to_complete: bool used to wait for link up or not
4664 *
4665 * Check that both the MAC and X557 external PHY have link.
4666 **/
4667 s32 ixgbe_check_link_t_X550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
4668 bool *link_up, bool link_up_wait_to_complete)
4669 {
4670 u32 status;
4671 u16 i, autoneg_status = 0;
4672
4673 if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper)
4674 return IXGBE_ERR_CONFIG;
4675
4676 status = ixgbe_check_mac_link_generic(hw, speed, link_up,
4677 link_up_wait_to_complete);
4678
4679 /* If check link fails or MAC link is not up, then return */
4680 if (status != IXGBE_SUCCESS || !(*link_up))
4681 return status;
4682
4683 /* MAC link is up, so check external PHY link.
4684 * X557 PHY. Link status is latching low, and can only be used to detect
4685 * link drop, and not the current status of the link without performing
4686 * back-to-back reads.
4687 */
4688 for (i = 0; i < 2; i++) {
4689 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
4690 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
4691 &autoneg_status);
4692
4693 if (status != IXGBE_SUCCESS)
4694 return status;
4695 }
4696
4697 /* If external PHY link is not up, then indicate link not up */
4698 if (!(autoneg_status & IXGBE_MDIO_AUTO_NEG_LINK_STATUS))
4699 *link_up = FALSE;
4700
4701 return IXGBE_SUCCESS;
4702 }
4703
4704 /**
4705 * ixgbe_reset_phy_t_X550em - Performs X557 PHY reset and enables LASI
4706 * @hw: pointer to hardware structure
4707 **/
4708 s32 ixgbe_reset_phy_t_X550em(struct ixgbe_hw *hw)
4709 {
4710 s32 status;
4711
4712 status = ixgbe_reset_phy_generic(hw);
4713
4714 if (status != IXGBE_SUCCESS)
4715 return status;
4716
4717 /* Configure Link Status Alarm and Temperature Threshold interrupts */
4718 return ixgbe_enable_lasi_ext_t_x550em(hw);
4719 }
4720
4721 /**
4722 * ixgbe_led_on_t_X550em - Turns on the software controllable LEDs.
4723 * @hw: pointer to hardware structure
4724 * @led_idx: led number to turn on
4725 **/
4726 s32 ixgbe_led_on_t_X550em(struct ixgbe_hw *hw, u32 led_idx)
4727 {
4728 u16 phy_data;
4729
4730 DEBUGFUNC("ixgbe_led_on_t_X550em");
4731
4732 if (led_idx >= IXGBE_X557_MAX_LED_INDEX)
4733 return IXGBE_ERR_PARAM;
4734
4735 /* To turn on the LED, set mode to ON. */
4736 ixgbe_read_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
4737 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &phy_data);
4738 phy_data |= IXGBE_X557_LED_MANUAL_SET_MASK;
4739 ixgbe_write_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
4740 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, phy_data);
4741
4742 /* Some designs have the LEDs wired to the MAC */
4743 return ixgbe_led_on_generic(hw, led_idx);
4744 }
4745
4746 /**
4747 * ixgbe_led_off_t_X550em - Turns off the software controllable LEDs.
4748 * @hw: pointer to hardware structure
4749 * @led_idx: led number to turn off
4750 **/
4751 s32 ixgbe_led_off_t_X550em(struct ixgbe_hw *hw, u32 led_idx)
4752 {
4753 u16 phy_data;
4754
4755 DEBUGFUNC("ixgbe_led_off_t_X550em");
4756
4757 if (led_idx >= IXGBE_X557_MAX_LED_INDEX)
4758 return IXGBE_ERR_PARAM;
4759
4760 /* To turn on the LED, set mode to ON. */
4761 ixgbe_read_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
4762 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &phy_data);
4763 phy_data &= ~IXGBE_X557_LED_MANUAL_SET_MASK;
4764 ixgbe_write_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
4765 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, phy_data);
4766
4767 /* Some designs have the LEDs wired to the MAC */
4768 return ixgbe_led_off_generic(hw, led_idx);
4769 }
4770
4771 /**
4772 * ixgbe_set_fw_drv_ver_x550 - Sends driver version to firmware
4773 * @hw: pointer to the HW structure
4774 * @maj: driver version major number
4775 * @min: driver version minor number
4776 * @build: driver version build number
4777 * @sub: driver version sub build number
4778 * @len: length of driver_ver string
4779 * @driver_ver: driver string
4780 *
4781 * Sends driver version number to firmware through the manageability
4782 * block. On success return IXGBE_SUCCESS
4783 * else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring
4784 * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
4785 **/
4786 s32 ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min,
4787 u8 build, u8 sub, u16 len, const char *driver_ver)
4788 {
4789 struct ixgbe_hic_drv_info2 fw_cmd;
4790 s32 ret_val = IXGBE_SUCCESS;
4791 int i;
4792
4793 DEBUGFUNC("ixgbe_set_fw_drv_ver_x550");
4794
4795 if ((len == 0) || (driver_ver == NULL) ||
4796 (len > sizeof(fw_cmd.driver_string)))
4797 return IXGBE_ERR_INVALID_ARGUMENT;
4798
4799 fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO;
4800 fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN + len;
4801 fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
4802 fw_cmd.port_num = (u8)hw->bus.func;
4803 fw_cmd.ver_maj = maj;
4804 fw_cmd.ver_min = min;
4805 fw_cmd.ver_build = build;
4806 fw_cmd.ver_sub = sub;
4807 fw_cmd.hdr.checksum = 0;
4808 memcpy(fw_cmd.driver_string, driver_ver, len);
4809 fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd,
4810 (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len));
4811
4812 for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
4813 ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
4814 sizeof(fw_cmd),
4815 IXGBE_HI_COMMAND_TIMEOUT,
4816 TRUE);
4817 if (ret_val != IXGBE_SUCCESS)
4818 continue;
4819
4820 if (fw_cmd.hdr.cmd_or_resp.ret_status ==
4821 FW_CEM_RESP_STATUS_SUCCESS)
4822 ret_val = IXGBE_SUCCESS;
4823 else
4824 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
4825
4826 break;
4827 }
4828
4829 return ret_val;
4830 }
4831
4832 /**
4833 * ixgbe_fw_recovery_mode_X550 - Check FW NVM recovery mode
4834 * @hw: pointer t hardware structure
4835 *
4836 * Returns TRUE if in FW NVM recovery mode.
4837 **/
4838 bool ixgbe_fw_recovery_mode_X550(struct ixgbe_hw *hw)
4839 {
4840 u32 fwsm;
4841
4842 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw));
4843
4844 return !!(fwsm & IXGBE_FWSM_FW_NVM_RECOVERY_MODE);
4845 }
4846
4847