ixgbe_x550.c revision 1.18.10.1 1 /******************************************************************************
2
3 Copyright (c) 2001-2017, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33 /*$FreeBSD: head/sys/dev/ixgbe/ixgbe_x550.c 331224 2018-03-19 20:55:05Z erj $*/
34
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: ixgbe_x550.c,v 1.18.10.1 2021/05/13 00:47:31 thorpej Exp $");
37
38 #include "ixgbe_x550.h"
39 #include "ixgbe_x540.h"
40 #include "ixgbe_type.h"
41 #include "ixgbe_api.h"
42 #include "ixgbe_common.h"
43 #include "ixgbe_phy.h"
44 #include <dev/mii/mii.h>
45
46 static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed);
47 static s32 ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw,
48 ixgbe_link_speed speed,
49 bool autoneg_wait_to_complete);
50 static s32 ixgbe_acquire_swfw_sync_X550a(struct ixgbe_hw *, u32 mask);
51 static void ixgbe_release_swfw_sync_X550a(struct ixgbe_hw *, u32 mask);
52 static s32 ixgbe_read_mng_if_sel_x550em(struct ixgbe_hw *hw);
53
54 /**
55 * ixgbe_init_ops_X550 - Inits func ptrs and MAC type
56 * @hw: pointer to hardware structure
57 *
58 * Initialize the function pointers and assign the MAC type for X550.
59 * Does not touch the hardware.
60 **/
61 s32 ixgbe_init_ops_X550(struct ixgbe_hw *hw)
62 {
63 struct ixgbe_mac_info *mac = &hw->mac;
64 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
65 s32 ret_val;
66
67 DEBUGFUNC("ixgbe_init_ops_X550");
68
69 ret_val = ixgbe_init_ops_X540(hw);
70 mac->ops.dmac_config = ixgbe_dmac_config_X550;
71 mac->ops.dmac_config_tcs = ixgbe_dmac_config_tcs_X550;
72 mac->ops.dmac_update_tcs = ixgbe_dmac_update_tcs_X550;
73 mac->ops.setup_eee = NULL;
74 mac->ops.set_source_address_pruning =
75 ixgbe_set_source_address_pruning_X550;
76 mac->ops.set_ethertype_anti_spoofing =
77 ixgbe_set_ethertype_anti_spoofing_X550;
78
79 mac->ops.get_rtrup2tc = ixgbe_dcb_get_rtrup2tc_generic;
80 eeprom->ops.init_params = ixgbe_init_eeprom_params_X550;
81 eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_X550;
82 eeprom->ops.read = ixgbe_read_ee_hostif_X550;
83 eeprom->ops.read_buffer = ixgbe_read_ee_hostif_buffer_X550;
84 eeprom->ops.write = ixgbe_write_ee_hostif_X550;
85 eeprom->ops.write_buffer = ixgbe_write_ee_hostif_buffer_X550;
86 eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_X550;
87 eeprom->ops.validate_checksum = ixgbe_validate_eeprom_checksum_X550;
88
89 mac->ops.disable_mdd = ixgbe_disable_mdd_X550;
90 mac->ops.enable_mdd = ixgbe_enable_mdd_X550;
91 mac->ops.mdd_event = ixgbe_mdd_event_X550;
92 mac->ops.restore_mdd_vf = ixgbe_restore_mdd_vf_X550;
93 mac->ops.fw_recovery_mode = ixgbe_fw_recovery_mode_X550;
94 mac->ops.disable_rx = ixgbe_disable_rx_x550;
95 /* Manageability interface */
96 mac->ops.set_fw_drv_ver = ixgbe_set_fw_drv_ver_x550;
97 switch (hw->device_id) {
98 case IXGBE_DEV_ID_X550EM_X_1G_T:
99 hw->mac.ops.led_on = NULL;
100 hw->mac.ops.led_off = NULL;
101 break;
102 case IXGBE_DEV_ID_X550EM_X_10G_T:
103 case IXGBE_DEV_ID_X550EM_A_10G_T:
104 hw->mac.ops.led_on = ixgbe_led_on_t_X550em;
105 hw->mac.ops.led_off = ixgbe_led_off_t_X550em;
106 break;
107 default:
108 break;
109 }
110 return ret_val;
111 }
112
113 /**
114 * ixgbe_read_cs4227 - Read CS4227 register
115 * @hw: pointer to hardware structure
116 * @reg: register number to write
117 * @value: pointer to receive value read
118 *
119 * Returns status code
120 **/
121 static s32 ixgbe_read_cs4227(struct ixgbe_hw *hw, u16 reg, u16 *value)
122 {
123 return hw->link.ops.read_link_unlocked(hw, hw->link.addr, reg, value);
124 }
125
126 /**
127 * ixgbe_write_cs4227 - Write CS4227 register
128 * @hw: pointer to hardware structure
129 * @reg: register number to write
130 * @value: value to write to register
131 *
132 * Returns status code
133 **/
134 static s32 ixgbe_write_cs4227(struct ixgbe_hw *hw, u16 reg, u16 value)
135 {
136 return hw->link.ops.write_link_unlocked(hw, hw->link.addr, reg, value);
137 }
138
139 /**
140 * ixgbe_read_pe - Read register from port expander
141 * @hw: pointer to hardware structure
142 * @reg: register number to read
143 * @value: pointer to receive read value
144 *
145 * Returns status code
146 **/
147 static s32 ixgbe_read_pe(struct ixgbe_hw *hw, u8 reg, u8 *value)
148 {
149 s32 status;
150
151 status = ixgbe_read_i2c_byte_unlocked(hw, reg, IXGBE_PE, value);
152 if (status != IXGBE_SUCCESS)
153 ERROR_REPORT2(IXGBE_ERROR_CAUTION,
154 "port expander access failed with %d\n", status);
155 return status;
156 }
157
158 /**
159 * ixgbe_write_pe - Write register to port expander
160 * @hw: pointer to hardware structure
161 * @reg: register number to write
162 * @value: value to write
163 *
164 * Returns status code
165 **/
166 static s32 ixgbe_write_pe(struct ixgbe_hw *hw, u8 reg, u8 value)
167 {
168 s32 status;
169
170 status = ixgbe_write_i2c_byte_unlocked(hw, reg, IXGBE_PE, value);
171 if (status != IXGBE_SUCCESS)
172 ERROR_REPORT2(IXGBE_ERROR_CAUTION,
173 "port expander access failed with %d\n", status);
174 return status;
175 }
176
177 /**
178 * ixgbe_reset_cs4227 - Reset CS4227 using port expander
179 * @hw: pointer to hardware structure
180 *
181 * This function assumes that the caller has acquired the proper semaphore.
182 * Returns error code
183 **/
184 static s32 ixgbe_reset_cs4227(struct ixgbe_hw *hw)
185 {
186 s32 status;
187 u32 retry;
188 u16 value;
189 u8 reg;
190
191 /* Trigger hard reset. */
192 status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, ®);
193 if (status != IXGBE_SUCCESS)
194 return status;
195 reg |= IXGBE_PE_BIT1;
196 status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg);
197 if (status != IXGBE_SUCCESS)
198 return status;
199
200 status = ixgbe_read_pe(hw, IXGBE_PE_CONFIG, ®);
201 if (status != IXGBE_SUCCESS)
202 return status;
203 reg &= ~IXGBE_PE_BIT1;
204 status = ixgbe_write_pe(hw, IXGBE_PE_CONFIG, reg);
205 if (status != IXGBE_SUCCESS)
206 return status;
207
208 status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, ®);
209 if (status != IXGBE_SUCCESS)
210 return status;
211 reg &= ~IXGBE_PE_BIT1;
212 status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg);
213 if (status != IXGBE_SUCCESS)
214 return status;
215
216 usec_delay(IXGBE_CS4227_RESET_HOLD);
217
218 status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, ®);
219 if (status != IXGBE_SUCCESS)
220 return status;
221 reg |= IXGBE_PE_BIT1;
222 status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg);
223 if (status != IXGBE_SUCCESS)
224 return status;
225
226 /* Wait for the reset to complete. */
227 msec_delay(IXGBE_CS4227_RESET_DELAY);
228 for (retry = 0; retry < IXGBE_CS4227_RETRIES; retry++) {
229 status = ixgbe_read_cs4227(hw, IXGBE_CS4227_EFUSE_STATUS,
230 &value);
231 if (status == IXGBE_SUCCESS &&
232 value == IXGBE_CS4227_EEPROM_LOAD_OK)
233 break;
234 msec_delay(IXGBE_CS4227_CHECK_DELAY);
235 }
236 if (retry == IXGBE_CS4227_RETRIES) {
237 ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE,
238 "CS4227 reset did not complete.");
239 return IXGBE_ERR_PHY;
240 }
241
242 status = ixgbe_read_cs4227(hw, IXGBE_CS4227_EEPROM_STATUS, &value);
243 if (status != IXGBE_SUCCESS ||
244 !(value & IXGBE_CS4227_EEPROM_LOAD_OK)) {
245 ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE,
246 "CS4227 EEPROM did not load successfully.");
247 return IXGBE_ERR_PHY;
248 }
249
250 return IXGBE_SUCCESS;
251 }
252
253 /**
254 * ixgbe_check_cs4227 - Check CS4227 and reset as needed
255 * @hw: pointer to hardware structure
256 **/
257 static void ixgbe_check_cs4227(struct ixgbe_hw *hw)
258 {
259 s32 status = IXGBE_SUCCESS;
260 u32 swfw_mask = hw->phy.phy_semaphore_mask;
261 u16 value = 0;
262 u8 retry;
263
264 for (retry = 0; retry < IXGBE_CS4227_RETRIES; retry++) {
265 status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
266 if (status != IXGBE_SUCCESS) {
267 ERROR_REPORT2(IXGBE_ERROR_CAUTION,
268 "semaphore failed with %d", status);
269 msec_delay(IXGBE_CS4227_CHECK_DELAY);
270 continue;
271 }
272
273 /* Get status of reset flow. */
274 status = ixgbe_read_cs4227(hw, IXGBE_CS4227_SCRATCH, &value);
275
276 if (status == IXGBE_SUCCESS &&
277 value == IXGBE_CS4227_RESET_COMPLETE)
278 goto out;
279
280 if (status != IXGBE_SUCCESS ||
281 value != IXGBE_CS4227_RESET_PENDING)
282 break;
283
284 /* Reset is pending. Wait and check again. */
285 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
286 msec_delay(IXGBE_CS4227_CHECK_DELAY);
287 }
288
289 /* If still pending, assume other instance failed. */
290 if (retry == IXGBE_CS4227_RETRIES) {
291 status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
292 if (status != IXGBE_SUCCESS) {
293 ERROR_REPORT2(IXGBE_ERROR_CAUTION,
294 "semaphore failed with %d", status);
295 return;
296 }
297 }
298
299 /* Reset the CS4227. */
300 status = ixgbe_reset_cs4227(hw);
301 if (status != IXGBE_SUCCESS) {
302 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
303 "CS4227 reset failed: %d", status);
304 goto out;
305 }
306
307 /* Reset takes so long, temporarily release semaphore in case the
308 * other driver instance is waiting for the reset indication.
309 */
310 ixgbe_write_cs4227(hw, IXGBE_CS4227_SCRATCH,
311 IXGBE_CS4227_RESET_PENDING);
312 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
313 msec_delay(10);
314 status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
315 if (status != IXGBE_SUCCESS) {
316 ERROR_REPORT2(IXGBE_ERROR_CAUTION,
317 "semaphore failed with %d", status);
318 return;
319 }
320
321 /* Record completion for next time. */
322 status = ixgbe_write_cs4227(hw, IXGBE_CS4227_SCRATCH,
323 IXGBE_CS4227_RESET_COMPLETE);
324
325 out:
326 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
327 msec_delay(hw->eeprom.semaphore_delay);
328 }
329
330 /**
331 * ixgbe_setup_mux_ctl - Setup ESDP register for I2C mux control
332 * @hw: pointer to hardware structure
333 **/
334 static void ixgbe_setup_mux_ctl(struct ixgbe_hw *hw)
335 {
336 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
337
338 if (hw->bus.lan_id) {
339 esdp &= ~(IXGBE_ESDP_SDP1_NATIVE | IXGBE_ESDP_SDP1);
340 esdp |= IXGBE_ESDP_SDP1_DIR;
341 }
342 esdp &= ~(IXGBE_ESDP_SDP0_NATIVE | IXGBE_ESDP_SDP0_DIR);
343 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
344 IXGBE_WRITE_FLUSH(hw);
345 }
346
347 /**
348 * ixgbe_read_phy_reg_mdi_22 - Read from a clause 22 PHY register without lock
349 * @hw: pointer to hardware structure
350 * @reg_addr: 32 bit address of PHY register to read
351 * @dev_type: always unused
352 * @phy_data: Pointer to read data from PHY register
353 */
354 static s32 ixgbe_read_phy_reg_mdi_22(struct ixgbe_hw *hw, u32 reg_addr,
355 u32 dev_type, u16 *phy_data)
356 {
357 u32 i, data, command;
358 UNREFERENCED_1PARAMETER(dev_type);
359
360 /* Setup and write the read command */
361 command = (reg_addr << IXGBE_MSCA_DEV_TYPE_SHIFT) |
362 (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
363 IXGBE_MSCA_OLD_PROTOCOL | IXGBE_MSCA_READ_AUTOINC |
364 IXGBE_MSCA_MDI_COMMAND;
365
366 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
367
368 /* Check every 10 usec to see if the access completed.
369 * The MDI Command bit will clear when the operation is
370 * complete
371 */
372 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
373 usec_delay(10);
374
375 command = IXGBE_READ_REG(hw, IXGBE_MSCA);
376 if (!(command & IXGBE_MSCA_MDI_COMMAND))
377 break;
378 }
379
380 if (command & IXGBE_MSCA_MDI_COMMAND) {
381 ERROR_REPORT1(IXGBE_ERROR_POLLING,
382 "PHY read command did not complete.\n");
383 return IXGBE_ERR_PHY;
384 }
385
386 /* Read operation is complete. Get the data from MSRWD */
387 data = IXGBE_READ_REG(hw, IXGBE_MSRWD);
388 data >>= IXGBE_MSRWD_READ_DATA_SHIFT;
389 *phy_data = (u16)data;
390
391 return IXGBE_SUCCESS;
392 }
393
394 /**
395 * ixgbe_write_phy_reg_mdi_22 - Write to a clause 22 PHY register without lock
396 * @hw: pointer to hardware structure
397 * @reg_addr: 32 bit PHY register to write
398 * @dev_type: always unused
399 * @phy_data: Data to write to the PHY register
400 */
401 static s32 ixgbe_write_phy_reg_mdi_22(struct ixgbe_hw *hw, u32 reg_addr,
402 u32 dev_type, u16 phy_data)
403 {
404 u32 i, command;
405 UNREFERENCED_1PARAMETER(dev_type);
406
407 /* Put the data in the MDI single read and write data register*/
408 IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)phy_data);
409
410 /* Setup and write the write command */
411 command = (reg_addr << IXGBE_MSCA_DEV_TYPE_SHIFT) |
412 (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
413 IXGBE_MSCA_OLD_PROTOCOL | IXGBE_MSCA_WRITE |
414 IXGBE_MSCA_MDI_COMMAND;
415
416 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
417
418 /* Check every 10 usec to see if the access completed.
419 * The MDI Command bit will clear when the operation is
420 * complete
421 */
422 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
423 usec_delay(10);
424
425 command = IXGBE_READ_REG(hw, IXGBE_MSCA);
426 if (!(command & IXGBE_MSCA_MDI_COMMAND))
427 break;
428 }
429
430 if (command & IXGBE_MSCA_MDI_COMMAND) {
431 ERROR_REPORT1(IXGBE_ERROR_POLLING,
432 "PHY write cmd didn't complete\n");
433 return IXGBE_ERR_PHY;
434 }
435
436 return IXGBE_SUCCESS;
437 }
438
439 /**
440 * ixgbe_identify_phy_x550em - Get PHY type based on device id
441 * @hw: pointer to hardware structure
442 *
443 * Returns error code
444 */
445 static s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw)
446 {
447 hw->mac.ops.set_lan_id(hw);
448
449 ixgbe_read_mng_if_sel_x550em(hw);
450
451 switch (hw->device_id) {
452 case IXGBE_DEV_ID_X550EM_A_SFP:
453 return ixgbe_identify_sfp_module_X550em(hw);
454 case IXGBE_DEV_ID_X550EM_X_SFP:
455 /* set up for CS4227 usage */
456 ixgbe_setup_mux_ctl(hw);
457 ixgbe_check_cs4227(hw);
458 /* Fallthrough */
459
460 case IXGBE_DEV_ID_X550EM_A_SFP_N:
461 return ixgbe_identify_sfp_module_X550em(hw);
462 break;
463 case IXGBE_DEV_ID_X550EM_X_KX4:
464 hw->phy.type = ixgbe_phy_x550em_kx4;
465 break;
466 case IXGBE_DEV_ID_X550EM_X_XFI:
467 hw->phy.type = ixgbe_phy_x550em_xfi;
468 break;
469 case IXGBE_DEV_ID_X550EM_X_KR:
470 case IXGBE_DEV_ID_X550EM_A_KR:
471 case IXGBE_DEV_ID_X550EM_A_KR_L:
472 hw->phy.type = ixgbe_phy_x550em_kr;
473 break;
474 case IXGBE_DEV_ID_X550EM_A_10G_T:
475 case IXGBE_DEV_ID_X550EM_X_10G_T:
476 return ixgbe_identify_phy_generic(hw);
477 case IXGBE_DEV_ID_X550EM_X_1G_T:
478 hw->phy.type = ixgbe_phy_ext_1g_t;
479 break;
480 case IXGBE_DEV_ID_X550EM_A_1G_T:
481 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
482 hw->phy.type = ixgbe_phy_fw;
483 if (hw->bus.lan_id)
484 hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY1_SM;
485 else
486 hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY0_SM;
487 break;
488 default:
489 break;
490 }
491 return IXGBE_SUCCESS;
492 }
493
494 /**
495 * ixgbe_fw_phy_activity - Perform an activity on a PHY
496 * @hw: pointer to hardware structure
497 * @activity: activity to perform
498 * @data: Pointer to 4 32-bit words of data
499 */
500 s32 ixgbe_fw_phy_activity(struct ixgbe_hw *hw, u16 activity,
501 u32 (*data)[FW_PHY_ACT_DATA_COUNT])
502 {
503 union {
504 struct ixgbe_hic_phy_activity_req cmd;
505 struct ixgbe_hic_phy_activity_resp rsp;
506 } hic;
507 u16 retries = FW_PHY_ACT_RETRIES;
508 s32 rc;
509 u16 i;
510
511 do {
512 memset(&hic, 0, sizeof(hic));
513 hic.cmd.hdr.cmd = FW_PHY_ACT_REQ_CMD;
514 hic.cmd.hdr.buf_len = FW_PHY_ACT_REQ_LEN;
515 hic.cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
516 hic.cmd.port_number = hw->bus.lan_id;
517 hic.cmd.activity_id = IXGBE_CPU_TO_LE16(activity);
518 for (i = 0; i < FW_PHY_ACT_DATA_COUNT; ++i)
519 hic.cmd.data[i] = IXGBE_CPU_TO_BE32((*data)[i]);
520
521 rc = ixgbe_host_interface_command(hw, (u32 *)&hic.cmd,
522 sizeof(hic.cmd),
523 IXGBE_HI_COMMAND_TIMEOUT,
524 TRUE);
525 if (rc != IXGBE_SUCCESS)
526 return rc;
527 if (hic.rsp.hdr.cmd_or_resp.ret_status ==
528 FW_CEM_RESP_STATUS_SUCCESS) {
529 for (i = 0; i < FW_PHY_ACT_DATA_COUNT; ++i)
530 (*data)[i] = IXGBE_BE32_TO_CPU(hic.rsp.data[i]);
531 return IXGBE_SUCCESS;
532 }
533 usec_delay(20);
534 --retries;
535 } while (retries > 0);
536
537 return IXGBE_ERR_HOST_INTERFACE_COMMAND;
538 }
539
540 static const struct {
541 u16 fw_speed;
542 ixgbe_link_speed phy_speed;
543 } ixgbe_fw_map[] = {
544 { FW_PHY_ACT_LINK_SPEED_10, IXGBE_LINK_SPEED_10_FULL },
545 { FW_PHY_ACT_LINK_SPEED_100, IXGBE_LINK_SPEED_100_FULL },
546 { FW_PHY_ACT_LINK_SPEED_1G, IXGBE_LINK_SPEED_1GB_FULL },
547 { FW_PHY_ACT_LINK_SPEED_2_5G, IXGBE_LINK_SPEED_2_5GB_FULL },
548 { FW_PHY_ACT_LINK_SPEED_5G, IXGBE_LINK_SPEED_5GB_FULL },
549 { FW_PHY_ACT_LINK_SPEED_10G, IXGBE_LINK_SPEED_10GB_FULL },
550 };
551
552 /**
553 * ixgbe_get_phy_id_fw - Get the phy ID via firmware command
554 * @hw: pointer to hardware structure
555 *
556 * Returns error code
557 */
558 static s32 ixgbe_get_phy_id_fw(struct ixgbe_hw *hw)
559 {
560 u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 };
561 u16 phy_speeds;
562 u16 phy_id_lo;
563 s32 rc;
564 u16 i;
565
566 rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_PHY_INFO, &info);
567 if (rc)
568 return rc;
569
570 hw->phy.speeds_supported = 0;
571 phy_speeds = info[0] & FW_PHY_INFO_SPEED_MASK;
572 for (i = 0; i < sizeof(ixgbe_fw_map) / sizeof(ixgbe_fw_map[0]); ++i) {
573 if (phy_speeds & ixgbe_fw_map[i].fw_speed)
574 hw->phy.speeds_supported |= ixgbe_fw_map[i].phy_speed;
575 }
576
577 #if 0
578 /*
579 * Don't set autoneg_advertised here to not to be inconsistent with
580 * if_media value.
581 */
582 if (!hw->phy.autoneg_advertised)
583 hw->phy.autoneg_advertised = hw->phy.speeds_supported;
584 #endif
585
586 hw->phy.id = info[0] & FW_PHY_INFO_ID_HI_MASK;
587 phy_id_lo = info[1] & FW_PHY_INFO_ID_LO_MASK;
588 hw->phy.id |= phy_id_lo & IXGBE_PHY_REVISION_MASK;
589 hw->phy.revision = phy_id_lo & ~IXGBE_PHY_REVISION_MASK;
590 if (!hw->phy.id || hw->phy.id == IXGBE_PHY_REVISION_MASK)
591 return IXGBE_ERR_PHY_ADDR_INVALID;
592 return IXGBE_SUCCESS;
593 }
594
595 /**
596 * ixgbe_identify_phy_fw - Get PHY type based on firmware command
597 * @hw: pointer to hardware structure
598 *
599 * Returns error code
600 */
601 static s32 ixgbe_identify_phy_fw(struct ixgbe_hw *hw)
602 {
603 if (hw->bus.lan_id)
604 hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM;
605 else
606 hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM;
607
608 hw->phy.type = ixgbe_phy_fw;
609 hw->phy.ops.read_reg = NULL;
610 hw->phy.ops.write_reg = NULL;
611 return ixgbe_get_phy_id_fw(hw);
612 }
613
614 /**
615 * ixgbe_shutdown_fw_phy - Shutdown a firmware-controlled PHY
616 * @hw: pointer to hardware structure
617 *
618 * Returns error code
619 */
620 s32 ixgbe_shutdown_fw_phy(struct ixgbe_hw *hw)
621 {
622 u32 setup[FW_PHY_ACT_DATA_COUNT] = { 0 };
623
624 setup[0] = FW_PHY_ACT_FORCE_LINK_DOWN_OFF;
625 return ixgbe_fw_phy_activity(hw, FW_PHY_ACT_FORCE_LINK_DOWN, &setup);
626 }
627
628 static s32 ixgbe_read_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr,
629 u32 device_type, u16 *phy_data)
630 {
631 UNREFERENCED_4PARAMETER(*hw, reg_addr, device_type, *phy_data);
632 return IXGBE_NOT_IMPLEMENTED;
633 }
634
635 static s32 ixgbe_write_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr,
636 u32 device_type, u16 phy_data)
637 {
638 UNREFERENCED_4PARAMETER(*hw, reg_addr, device_type, phy_data);
639 return IXGBE_NOT_IMPLEMENTED;
640 }
641
642 /**
643 * ixgbe_read_i2c_combined_generic - Perform I2C read combined operation
644 * @hw: pointer to the hardware structure
645 * @addr: I2C bus address to read from
646 * @reg: I2C device register to read from
647 * @val: pointer to location to receive read value
648 *
649 * Returns an error code on error.
650 **/
651 static s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr,
652 u16 reg, u16 *val)
653 {
654 return ixgbe_read_i2c_combined_generic_int(hw, addr, reg, val, TRUE);
655 }
656
657 /**
658 * ixgbe_read_i2c_combined_generic_unlocked - Do I2C read combined operation
659 * @hw: pointer to the hardware structure
660 * @addr: I2C bus address to read from
661 * @reg: I2C device register to read from
662 * @val: pointer to location to receive read value
663 *
664 * Returns an error code on error.
665 **/
666 static s32
667 ixgbe_read_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, u8 addr,
668 u16 reg, u16 *val)
669 {
670 return ixgbe_read_i2c_combined_generic_int(hw, addr, reg, val, FALSE);
671 }
672
673 /**
674 * ixgbe_write_i2c_combined_generic - Perform I2C write combined operation
675 * @hw: pointer to the hardware structure
676 * @addr: I2C bus address to write to
677 * @reg: I2C device register to write to
678 * @val: value to write
679 *
680 * Returns an error code on error.
681 **/
682 static s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw,
683 u8 addr, u16 reg, u16 val)
684 {
685 return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, TRUE);
686 }
687
688 /**
689 * ixgbe_write_i2c_combined_generic_unlocked - Do I2C write combined operation
690 * @hw: pointer to the hardware structure
691 * @addr: I2C bus address to write to
692 * @reg: I2C device register to write to
693 * @val: value to write
694 *
695 * Returns an error code on error.
696 **/
697 static s32
698 ixgbe_write_i2c_combined_generic_unlocked(struct ixgbe_hw *hw,
699 u8 addr, u16 reg, u16 val)
700 {
701 return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, FALSE);
702 }
703
704 /**
705 * ixgbe_init_ops_X550EM - Inits func ptrs and MAC type
706 * @hw: pointer to hardware structure
707 *
708 * Initialize the function pointers and for MAC type X550EM.
709 * Does not touch the hardware.
710 **/
711 s32 ixgbe_init_ops_X550EM(struct ixgbe_hw *hw)
712 {
713 struct ixgbe_mac_info *mac = &hw->mac;
714 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
715 struct ixgbe_phy_info *phy = &hw->phy;
716 s32 ret_val;
717
718 DEBUGFUNC("ixgbe_init_ops_X550EM");
719
720 /* Similar to X550 so start there. */
721 ret_val = ixgbe_init_ops_X550(hw);
722
723 /* Since this function eventually calls
724 * ixgbe_init_ops_540 by design, we are setting
725 * the pointers to NULL explicitly here to overwrite
726 * the values being set in the x540 function.
727 */
728
729 /* Bypass not supported in x550EM */
730 mac->ops.bypass_rw = NULL;
731 mac->ops.bypass_valid_rd = NULL;
732 mac->ops.bypass_set = NULL;
733 mac->ops.bypass_rd_eep = NULL;
734
735 /* FCOE not supported in x550EM */
736 mac->ops.get_san_mac_addr = NULL;
737 mac->ops.set_san_mac_addr = NULL;
738 mac->ops.get_wwn_prefix = NULL;
739 mac->ops.get_fcoe_boot_status = NULL;
740
741 /* IPsec not supported in x550EM */
742 mac->ops.disable_sec_rx_path = NULL;
743 mac->ops.enable_sec_rx_path = NULL;
744
745 /* AUTOC register is not present in x550EM. */
746 mac->ops.prot_autoc_read = NULL;
747 mac->ops.prot_autoc_write = NULL;
748
749 /* X550EM bus type is internal*/
750 hw->bus.type = ixgbe_bus_type_internal;
751 mac->ops.get_bus_info = ixgbe_get_bus_info_X550em;
752
753
754 mac->ops.get_media_type = ixgbe_get_media_type_X550em;
755 mac->ops.setup_sfp = ixgbe_setup_sfp_modules_X550em;
756 mac->ops.get_link_capabilities = ixgbe_get_link_capabilities_X550em;
757 mac->ops.reset_hw = ixgbe_reset_hw_X550em;
758 mac->ops.get_supported_physical_layer =
759 ixgbe_get_supported_physical_layer_X550em;
760
761 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper)
762 mac->ops.setup_fc = ixgbe_setup_fc_generic;
763 else
764 mac->ops.setup_fc = ixgbe_setup_fc_X550em;
765
766 /* PHY */
767 phy->ops.init = ixgbe_init_phy_ops_X550em;
768 switch (hw->device_id) {
769 case IXGBE_DEV_ID_X550EM_A_1G_T:
770 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
771 mac->ops.setup_fc = NULL;
772 phy->ops.identify = ixgbe_identify_phy_fw;
773 phy->ops.set_phy_power = NULL;
774 phy->ops.get_firmware_version = NULL;
775 break;
776 case IXGBE_DEV_ID_X550EM_X_1G_T:
777 mac->ops.setup_fc = NULL;
778 phy->ops.identify = ixgbe_identify_phy_x550em;
779 phy->ops.set_phy_power = NULL;
780 break;
781 default:
782 phy->ops.identify = ixgbe_identify_phy_x550em;
783 }
784
785 if (mac->ops.get_media_type(hw) != ixgbe_media_type_copper)
786 phy->ops.set_phy_power = NULL;
787
788
789 /* EEPROM */
790 eeprom->ops.init_params = ixgbe_init_eeprom_params_X540;
791 eeprom->ops.read = ixgbe_read_ee_hostif_X550;
792 eeprom->ops.read_buffer = ixgbe_read_ee_hostif_buffer_X550;
793 eeprom->ops.write = ixgbe_write_ee_hostif_X550;
794 eeprom->ops.write_buffer = ixgbe_write_ee_hostif_buffer_X550;
795 eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_X550;
796 eeprom->ops.validate_checksum = ixgbe_validate_eeprom_checksum_X550;
797 eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_X550;
798
799 return ret_val;
800 }
801
802 #define IXGBE_DENVERTON_WA 1
803
804 /**
805 * ixgbe_setup_fw_link - Setup firmware-controlled PHYs
806 * @hw: pointer to hardware structure
807 */
808 static s32 ixgbe_setup_fw_link(struct ixgbe_hw *hw)
809 {
810 u32 setup[FW_PHY_ACT_DATA_COUNT] = { 0 };
811 s32 rc;
812 #ifdef IXGBE_DENVERTON_WA
813 s32 ret_val;
814 u16 phydata;
815 #endif
816 u16 i;
817
818 if (hw->phy.reset_disable || ixgbe_check_reset_blocked(hw))
819 return 0;
820
821 if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
822 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
823 "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
824 return IXGBE_ERR_INVALID_LINK_SETTINGS;
825 }
826
827 switch (hw->fc.requested_mode) {
828 case ixgbe_fc_full:
829 setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_RXTX <<
830 FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT;
831 break;
832 case ixgbe_fc_rx_pause:
833 setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_RX <<
834 FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT;
835 break;
836 case ixgbe_fc_tx_pause:
837 setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_TX <<
838 FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT;
839 break;
840 default:
841 break;
842 }
843
844 for (i = 0; i < sizeof(ixgbe_fw_map) / sizeof(ixgbe_fw_map[0]); ++i) {
845 if (hw->phy.autoneg_advertised & ixgbe_fw_map[i].phy_speed)
846 setup[0] |= ixgbe_fw_map[i].fw_speed;
847 }
848 setup[0] |= FW_PHY_ACT_SETUP_LINK_HP | FW_PHY_ACT_SETUP_LINK_AN;
849
850 if (hw->phy.eee_speeds_advertised)
851 setup[0] |= FW_PHY_ACT_SETUP_LINK_EEE;
852
853 #ifdef IXGBE_DENVERTON_WA
854 if ((hw->phy.force_10_100_autonego == false)
855 && ((hw->phy.autoneg_advertised == IXGBE_LINK_SPEED_100_FULL)
856 || (hw->phy.autoneg_advertised == IXGBE_LINK_SPEED_10_FULL))) {
857 /* Don't use auto-nego for 10/100Mbps */
858 setup[0] &= ~FW_PHY_ACT_SETUP_LINK_AN;
859 setup[0] &= ~FW_PHY_ACT_SETUP_LINK_EEE;
860 setup[0] &= ~(FW_PHY_ACT_SETUP_LINK_PAUSE_RXTX
861 << FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT);
862 }
863 #endif
864
865 rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_SETUP_LINK, &setup);
866 if (rc)
867 return rc;
868
869 #ifdef IXGBE_DENVERTON_WA
870 if (hw->phy.force_10_100_autonego == true)
871 goto out;
872
873 ret_val = ixgbe_read_phy_reg_x550a(hw, MII_BMCR, 0, &phydata);
874 if (ret_val != 0)
875 goto out;
876
877 /*
878 * Broken firmware sets BMCR register incorrectly if
879 * FW_PHY_ACT_SETUP_LINK_AN isn't set.
880 * a) FDX may not be set.
881 * b) BMCR_SPEED1 (bit 6) is always cleared.
882 * + -------+------+-----------+-----+--------------------------+
883 * |request | BMCR | BMCR spd | BMCR | |
884 * | | (HEX)| (in bits)| FDX | |
885 * +--------+------+----------+------+--------------------------+
886 * | 10M | 0000 | 10M(00) | 0 | |
887 * | 10M | 2000 | 100M(01) | 0 |(I've never observed this)|
888 * | 10M | 2100 | 100M(01) | 1 | |
889 * | 100M | 0000 | 10M(00) | 0 | |
890 * | 100M | 0100 | 10M(00) | 1 | |
891 * +--------------------------+------+--------------------------+
892 */
893 if (((hw->phy.autoneg_advertised == IXGBE_LINK_SPEED_100_FULL)
894 && (((phydata & BMCR_FDX) == 0) || (BMCR_SPEED(phydata) == 0)))
895 || ((hw->phy.autoneg_advertised == IXGBE_LINK_SPEED_10_FULL)
896 && (((phydata & BMCR_FDX) == 0)
897 || (BMCR_SPEED(phydata) != BMCR_S10)))) {
898 phydata = BMCR_FDX;
899 switch (hw->phy.autoneg_advertised) {
900 case IXGBE_LINK_SPEED_10_FULL:
901 phydata |= BMCR_S10;
902 break;
903 case IXGBE_LINK_SPEED_100_FULL:
904 phydata |= BMCR_S100;
905 break;
906 case IXGBE_LINK_SPEED_1GB_FULL:
907 panic("%s: 1GB_FULL is set", __func__);
908 break;
909 default:
910 break;
911 }
912 ret_val = ixgbe_write_phy_reg_x550a(hw, MII_BMCR, 0, phydata);
913 if (ret_val != 0)
914 return ret_val;
915 }
916 out:
917 #endif
918 if (setup[0] == FW_PHY_ACT_SETUP_LINK_RSP_DOWN)
919 return IXGBE_ERR_OVERTEMP;
920 return IXGBE_SUCCESS;
921 }
922
923 /**
924 * ixgbe_fc_autoneg_fw _ Set up flow control for FW-controlled PHYs
925 * @hw: pointer to hardware structure
926 *
927 * Called at init time to set up flow control.
928 */
929 static s32 ixgbe_fc_autoneg_fw(struct ixgbe_hw *hw)
930 {
931 if (hw->fc.requested_mode == ixgbe_fc_default)
932 hw->fc.requested_mode = ixgbe_fc_full;
933
934 return ixgbe_setup_fw_link(hw);
935 }
936
937 /**
938 * ixgbe_setup_eee_fw - Enable/disable EEE support
939 * @hw: pointer to the HW structure
940 * @enable_eee: boolean flag to enable EEE
941 *
942 * Enable/disable EEE based on enable_eee flag.
943 * This function controls EEE for firmware-based PHY implementations.
944 */
945 static s32 ixgbe_setup_eee_fw(struct ixgbe_hw *hw, bool enable_eee)
946 {
947 if (!!hw->phy.eee_speeds_advertised == enable_eee)
948 return IXGBE_SUCCESS;
949 if (enable_eee)
950 hw->phy.eee_speeds_advertised = hw->phy.eee_speeds_supported;
951 else
952 hw->phy.eee_speeds_advertised = 0;
953 return hw->phy.ops.setup_link(hw);
954 }
955
956 /**
957 * ixgbe_init_ops_X550EM_a - Inits func ptrs and MAC type
958 * @hw: pointer to hardware structure
959 *
960 * Initialize the function pointers and for MAC type X550EM_a.
961 * Does not touch the hardware.
962 **/
963 s32 ixgbe_init_ops_X550EM_a(struct ixgbe_hw *hw)
964 {
965 struct ixgbe_mac_info *mac = &hw->mac;
966 s32 ret_val;
967
968 DEBUGFUNC("ixgbe_init_ops_X550EM_a");
969
970 /* Start with generic X550EM init */
971 ret_val = ixgbe_init_ops_X550EM(hw);
972
973 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII ||
974 hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII_L) {
975 mac->ops.read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550;
976 mac->ops.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550;
977 } else {
978 mac->ops.read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550a;
979 mac->ops.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550a;
980 }
981 mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync_X550a;
982 mac->ops.release_swfw_sync = ixgbe_release_swfw_sync_X550a;
983
984 switch (mac->ops.get_media_type(hw)) {
985 case ixgbe_media_type_fiber:
986 mac->ops.setup_fc = NULL;
987 mac->ops.fc_autoneg = ixgbe_fc_autoneg_fiber_x550em_a;
988 break;
989 case ixgbe_media_type_backplane:
990 mac->ops.fc_autoneg = ixgbe_fc_autoneg_backplane_x550em_a;
991 mac->ops.setup_fc = ixgbe_setup_fc_backplane_x550em_a;
992 break;
993 default:
994 break;
995 }
996
997 switch (hw->device_id) {
998 case IXGBE_DEV_ID_X550EM_A_1G_T:
999 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
1000 mac->ops.fc_autoneg = ixgbe_fc_autoneg_sgmii_x550em_a;
1001 mac->ops.setup_fc = ixgbe_fc_autoneg_fw;
1002 mac->ops.setup_eee = ixgbe_setup_eee_fw;
1003 hw->phy.eee_speeds_supported = IXGBE_LINK_SPEED_100_FULL |
1004 IXGBE_LINK_SPEED_1GB_FULL;
1005 hw->phy.eee_speeds_advertised = hw->phy.eee_speeds_supported;
1006 break;
1007 default:
1008 break;
1009 }
1010
1011 return ret_val;
1012 }
1013
1014 /**
1015 * ixgbe_init_ops_X550EM_x - Inits func ptrs and MAC type
1016 * @hw: pointer to hardware structure
1017 *
1018 * Initialize the function pointers and for MAC type X550EM_x.
1019 * Does not touch the hardware.
1020 **/
1021 s32 ixgbe_init_ops_X550EM_x(struct ixgbe_hw *hw)
1022 {
1023 struct ixgbe_mac_info *mac = &hw->mac;
1024 struct ixgbe_link_info *link = &hw->link;
1025 s32 ret_val;
1026
1027 DEBUGFUNC("ixgbe_init_ops_X550EM_x");
1028
1029 /* Start with generic X550EM init */
1030 ret_val = ixgbe_init_ops_X550EM(hw);
1031
1032 mac->ops.read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550;
1033 mac->ops.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550;
1034 mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync_X550em;
1035 mac->ops.release_swfw_sync = ixgbe_release_swfw_sync_X550em;
1036 link->ops.read_link = ixgbe_read_i2c_combined_generic;
1037 link->ops.read_link_unlocked = ixgbe_read_i2c_combined_generic_unlocked;
1038 link->ops.write_link = ixgbe_write_i2c_combined_generic;
1039 link->ops.write_link_unlocked =
1040 ixgbe_write_i2c_combined_generic_unlocked;
1041 link->addr = IXGBE_CS4227;
1042
1043 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_1G_T) {
1044 mac->ops.setup_fc = NULL;
1045 mac->ops.setup_eee = NULL;
1046 mac->ops.init_led_link_act = NULL;
1047 }
1048
1049 return ret_val;
1050 }
1051
1052 /**
1053 * ixgbe_dmac_config_X550
1054 * @hw: pointer to hardware structure
1055 *
1056 * Configure DMA coalescing. If enabling dmac, dmac is activated.
1057 * When disabling dmac, dmac enable dmac bit is cleared.
1058 **/
1059 s32 ixgbe_dmac_config_X550(struct ixgbe_hw *hw)
1060 {
1061 u32 reg, high_pri_tc;
1062
1063 DEBUGFUNC("ixgbe_dmac_config_X550");
1064
1065 /* Disable DMA coalescing before configuring */
1066 reg = IXGBE_READ_REG(hw, IXGBE_DMACR);
1067 reg &= ~IXGBE_DMACR_DMAC_EN;
1068 IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg);
1069
1070 /* Disable DMA Coalescing if the watchdog timer is 0 */
1071 if (!hw->mac.dmac_config.watchdog_timer)
1072 goto out;
1073
1074 ixgbe_dmac_config_tcs_X550(hw);
1075
1076 /* Configure DMA Coalescing Control Register */
1077 reg = IXGBE_READ_REG(hw, IXGBE_DMACR);
1078
1079 /* Set the watchdog timer in units of 40.96 usec */
1080 reg &= ~IXGBE_DMACR_DMACWT_MASK;
1081 reg |= (hw->mac.dmac_config.watchdog_timer * 100) / 4096;
1082
1083 reg &= ~IXGBE_DMACR_HIGH_PRI_TC_MASK;
1084 /* If fcoe is enabled, set high priority traffic class */
1085 if (hw->mac.dmac_config.fcoe_en) {
1086 high_pri_tc = 1 << hw->mac.dmac_config.fcoe_tc;
1087 reg |= ((high_pri_tc << IXGBE_DMACR_HIGH_PRI_TC_SHIFT) &
1088 IXGBE_DMACR_HIGH_PRI_TC_MASK);
1089 }
1090 reg |= IXGBE_DMACR_EN_MNG_IND;
1091
1092 /* Enable DMA coalescing after configuration */
1093 reg |= IXGBE_DMACR_DMAC_EN;
1094 IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg);
1095
1096 out:
1097 return IXGBE_SUCCESS;
1098 }
1099
1100 /**
1101 * ixgbe_dmac_config_tcs_X550
1102 * @hw: pointer to hardware structure
1103 *
1104 * Configure DMA coalescing threshold per TC. The dmac enable bit must
1105 * be cleared before configuring.
1106 **/
1107 s32 ixgbe_dmac_config_tcs_X550(struct ixgbe_hw *hw)
1108 {
1109 u32 tc, reg, pb_headroom, rx_pb_size, maxframe_size_kb;
1110
1111 DEBUGFUNC("ixgbe_dmac_config_tcs_X550");
1112
1113 /* Configure DMA coalescing enabled */
1114 switch (hw->mac.dmac_config.link_speed) {
1115 case IXGBE_LINK_SPEED_10_FULL:
1116 case IXGBE_LINK_SPEED_100_FULL:
1117 pb_headroom = IXGBE_DMACRXT_100M;
1118 break;
1119 case IXGBE_LINK_SPEED_1GB_FULL:
1120 pb_headroom = IXGBE_DMACRXT_1G;
1121 break;
1122 default:
1123 pb_headroom = IXGBE_DMACRXT_10G;
1124 break;
1125 }
1126
1127 maxframe_size_kb = ((IXGBE_READ_REG(hw, IXGBE_MAXFRS) >>
1128 IXGBE_MHADD_MFS_SHIFT) / 1024);
1129
1130 /* Set the per Rx packet buffer receive threshold */
1131 for (tc = 0; tc < IXGBE_DCB_MAX_TRAFFIC_CLASS; tc++) {
1132 reg = IXGBE_READ_REG(hw, IXGBE_DMCTH(tc));
1133 reg &= ~IXGBE_DMCTH_DMACRXT_MASK;
1134
1135 if (tc < hw->mac.dmac_config.num_tcs) {
1136 /* Get Rx PB size */
1137 rx_pb_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc));
1138 rx_pb_size = (rx_pb_size & IXGBE_RXPBSIZE_MASK) >>
1139 IXGBE_RXPBSIZE_SHIFT;
1140
1141 /* Calculate receive buffer threshold in kilobytes */
1142 if (rx_pb_size > pb_headroom)
1143 rx_pb_size = rx_pb_size - pb_headroom;
1144 else
1145 rx_pb_size = 0;
1146
1147 /* Minimum of MFS shall be set for DMCTH */
1148 reg |= (rx_pb_size > maxframe_size_kb) ?
1149 rx_pb_size : maxframe_size_kb;
1150 }
1151 IXGBE_WRITE_REG(hw, IXGBE_DMCTH(tc), reg);
1152 }
1153 return IXGBE_SUCCESS;
1154 }
1155
1156 /**
1157 * ixgbe_dmac_update_tcs_X550
1158 * @hw: pointer to hardware structure
1159 *
1160 * Disables dmac, updates per TC settings, and then enables dmac.
1161 **/
1162 s32 ixgbe_dmac_update_tcs_X550(struct ixgbe_hw *hw)
1163 {
1164 u32 reg;
1165
1166 DEBUGFUNC("ixgbe_dmac_update_tcs_X550");
1167
1168 /* Disable DMA coalescing before configuring */
1169 reg = IXGBE_READ_REG(hw, IXGBE_DMACR);
1170 reg &= ~IXGBE_DMACR_DMAC_EN;
1171 IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg);
1172
1173 ixgbe_dmac_config_tcs_X550(hw);
1174
1175 /* Enable DMA coalescing after configuration */
1176 reg = IXGBE_READ_REG(hw, IXGBE_DMACR);
1177 reg |= IXGBE_DMACR_DMAC_EN;
1178 IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg);
1179
1180 return IXGBE_SUCCESS;
1181 }
1182
1183 /**
1184 * ixgbe_init_eeprom_params_X550 - Initialize EEPROM params
1185 * @hw: pointer to hardware structure
1186 *
1187 * Initializes the EEPROM parameters ixgbe_eeprom_info within the
1188 * ixgbe_hw struct in order to set up EEPROM access.
1189 **/
1190 s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw)
1191 {
1192 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
1193 u32 eec;
1194 u16 eeprom_size;
1195
1196 DEBUGFUNC("ixgbe_init_eeprom_params_X550");
1197
1198 if (eeprom->type == ixgbe_eeprom_uninitialized) {
1199 eeprom->semaphore_delay = 10;
1200 eeprom->type = ixgbe_flash;
1201
1202 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1203 eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
1204 IXGBE_EEC_SIZE_SHIFT);
1205 eeprom->word_size = 1 << (eeprom_size +
1206 IXGBE_EEPROM_WORD_SIZE_SHIFT);
1207
1208 DEBUGOUT2("Eeprom params: type = %d, size = %d\n",
1209 eeprom->type, eeprom->word_size);
1210 }
1211
1212 return IXGBE_SUCCESS;
1213 }
1214
1215 /**
1216 * ixgbe_set_source_address_pruning_X550 - Enable/Disbale source address pruning
1217 * @hw: pointer to hardware structure
1218 * @enable: enable or disable source address pruning
1219 * @pool: Rx pool to set source address pruning for
1220 **/
1221 void ixgbe_set_source_address_pruning_X550(struct ixgbe_hw *hw, bool enable,
1222 unsigned int pool)
1223 {
1224 u64 pfflp;
1225
1226 /* max rx pool is 63 */
1227 if (pool > 63)
1228 return;
1229
1230 pfflp = (u64)IXGBE_READ_REG(hw, IXGBE_PFFLPL);
1231 pfflp |= (u64)IXGBE_READ_REG(hw, IXGBE_PFFLPH) << 32;
1232
1233 if (enable)
1234 pfflp |= (1ULL << pool);
1235 else
1236 pfflp &= ~(1ULL << pool);
1237
1238 IXGBE_WRITE_REG(hw, IXGBE_PFFLPL, (u32)pfflp);
1239 IXGBE_WRITE_REG(hw, IXGBE_PFFLPH, (u32)(pfflp >> 32));
1240 }
1241
1242 /**
1243 * ixgbe_set_ethertype_anti_spoofing_X550 - Enable/Disable Ethertype anti-spoofing
1244 * @hw: pointer to hardware structure
1245 * @enable: enable or disable switch for Ethertype anti-spoofing
1246 * @vf: Virtual Function pool - VF Pool to set for Ethertype anti-spoofing
1247 *
1248 **/
1249 void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw,
1250 bool enable, int vf)
1251 {
1252 int vf_target_reg = vf >> 3;
1253 int vf_target_shift = vf % 8 + IXGBE_SPOOF_ETHERTYPEAS_SHIFT;
1254 u32 pfvfspoof;
1255
1256 DEBUGFUNC("ixgbe_set_ethertype_anti_spoofing_X550");
1257
1258 pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
1259 if (enable)
1260 pfvfspoof |= (1 << vf_target_shift);
1261 else
1262 pfvfspoof &= ~(1 << vf_target_shift);
1263
1264 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
1265 }
1266
1267 /**
1268 * ixgbe_iosf_wait - Wait for IOSF command completion
1269 * @hw: pointer to hardware structure
1270 * @ctrl: pointer to location to receive final IOSF control value
1271 *
1272 * Returns failing status on timeout
1273 *
1274 * Note: ctrl can be NULL if the IOSF control register value is not needed
1275 **/
1276 static s32 ixgbe_iosf_wait(struct ixgbe_hw *hw, u32 *ctrl)
1277 {
1278 u32 i, command = 0;
1279
1280 /* Check every 10 usec to see if the address cycle completed.
1281 * The SB IOSF BUSY bit will clear when the operation is
1282 * complete
1283 */
1284 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
1285 command = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL);
1286 if ((command & IXGBE_SB_IOSF_CTRL_BUSY) == 0)
1287 break;
1288 usec_delay(10);
1289 }
1290 if (ctrl)
1291 *ctrl = command;
1292 if (i == IXGBE_MDIO_COMMAND_TIMEOUT) {
1293 ERROR_REPORT1(IXGBE_ERROR_POLLING, "Wait timed out\n");
1294 return IXGBE_ERR_PHY;
1295 }
1296
1297 return IXGBE_SUCCESS;
1298 }
1299
1300 /**
1301 * ixgbe_write_iosf_sb_reg_x550 - Writes a value to specified register
1302 * of the IOSF device
1303 * @hw: pointer to hardware structure
1304 * @reg_addr: 32 bit PHY register to write
1305 * @device_type: 3 bit device type
1306 * @data: Data to write to the register
1307 **/
1308 s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
1309 u32 device_type, u32 data)
1310 {
1311 u32 gssr = IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_PHY0_SM;
1312 u32 command, error __unused;
1313 s32 ret;
1314
1315 ret = ixgbe_acquire_swfw_semaphore(hw, gssr);
1316 if (ret != IXGBE_SUCCESS)
1317 return ret;
1318
1319 ret = ixgbe_iosf_wait(hw, NULL);
1320 if (ret != IXGBE_SUCCESS)
1321 goto out;
1322
1323 command = ((reg_addr << IXGBE_SB_IOSF_CTRL_ADDR_SHIFT) |
1324 (device_type << IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT));
1325
1326 /* Write IOSF control register */
1327 IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL, command);
1328
1329 /* Write IOSF data register */
1330 IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA, data);
1331
1332 ret = ixgbe_iosf_wait(hw, &command);
1333
1334 if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) {
1335 error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >>
1336 IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT;
1337 ERROR_REPORT2(IXGBE_ERROR_POLLING,
1338 "Failed to write, error %x\n", error);
1339 ret = IXGBE_ERR_PHY;
1340 }
1341
1342 out:
1343 ixgbe_release_swfw_semaphore(hw, gssr);
1344 return ret;
1345 }
1346
1347 /**
1348 * ixgbe_read_iosf_sb_reg_x550 - Reads specified register of the IOSF device
1349 * @hw: pointer to hardware structure
1350 * @reg_addr: 32 bit PHY register to write
1351 * @device_type: 3 bit device type
1352 * @data: Pointer to read data from the register
1353 **/
1354 s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
1355 u32 device_type, u32 *data)
1356 {
1357 u32 gssr = IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_PHY0_SM;
1358 u32 command, error __unused;
1359 s32 ret;
1360
1361 ret = ixgbe_acquire_swfw_semaphore(hw, gssr);
1362 if (ret != IXGBE_SUCCESS)
1363 return ret;
1364
1365 ret = ixgbe_iosf_wait(hw, NULL);
1366 if (ret != IXGBE_SUCCESS)
1367 goto out;
1368
1369 command = ((reg_addr << IXGBE_SB_IOSF_CTRL_ADDR_SHIFT) |
1370 (device_type << IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT));
1371
1372 /* Write IOSF control register */
1373 IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL, command);
1374
1375 ret = ixgbe_iosf_wait(hw, &command);
1376
1377 if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) {
1378 error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >>
1379 IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT;
1380 ERROR_REPORT2(IXGBE_ERROR_POLLING,
1381 "Failed to read, error %x\n", error);
1382 ret = IXGBE_ERR_PHY;
1383 }
1384
1385 if (ret == IXGBE_SUCCESS)
1386 *data = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA);
1387
1388 out:
1389 ixgbe_release_swfw_semaphore(hw, gssr);
1390 return ret;
1391 }
1392
1393 /**
1394 * ixgbe_get_phy_token - Get the token for shared phy access
1395 * @hw: Pointer to hardware structure
1396 */
1397
1398 s32 ixgbe_get_phy_token(struct ixgbe_hw *hw)
1399 {
1400 struct ixgbe_hic_phy_token_req token_cmd;
1401 s32 status;
1402
1403 token_cmd.hdr.cmd = FW_PHY_TOKEN_REQ_CMD;
1404 token_cmd.hdr.buf_len = FW_PHY_TOKEN_REQ_LEN;
1405 token_cmd.hdr.cmd_or_resp.cmd_resv = 0;
1406 token_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
1407 token_cmd.port_number = hw->bus.lan_id;
1408 token_cmd.command_type = FW_PHY_TOKEN_REQ;
1409 token_cmd.pad = 0;
1410 status = ixgbe_host_interface_command(hw, (u32 *)&token_cmd,
1411 sizeof(token_cmd),
1412 IXGBE_HI_COMMAND_TIMEOUT,
1413 TRUE);
1414 if (status) {
1415 DEBUGOUT1("Issuing host interface command failed with Status = %d\n",
1416 status);
1417 return status;
1418 }
1419 if (token_cmd.hdr.cmd_or_resp.ret_status == FW_PHY_TOKEN_OK)
1420 return IXGBE_SUCCESS;
1421 if (token_cmd.hdr.cmd_or_resp.ret_status != FW_PHY_TOKEN_RETRY) {
1422 DEBUGOUT1("Host interface command returned 0x%08x , returning IXGBE_ERR_FW_RESP_INVALID\n",
1423 token_cmd.hdr.cmd_or_resp.ret_status);
1424 return IXGBE_ERR_FW_RESP_INVALID;
1425 }
1426
1427 DEBUGOUT("Returning IXGBE_ERR_TOKEN_RETRY\n");
1428 return IXGBE_ERR_TOKEN_RETRY;
1429 }
1430
1431 /**
1432 * ixgbe_put_phy_token - Put the token for shared phy access
1433 * @hw: Pointer to hardware structure
1434 */
1435
1436 s32 ixgbe_put_phy_token(struct ixgbe_hw *hw)
1437 {
1438 struct ixgbe_hic_phy_token_req token_cmd;
1439 s32 status;
1440
1441 token_cmd.hdr.cmd = FW_PHY_TOKEN_REQ_CMD;
1442 token_cmd.hdr.buf_len = FW_PHY_TOKEN_REQ_LEN;
1443 token_cmd.hdr.cmd_or_resp.cmd_resv = 0;
1444 token_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
1445 token_cmd.port_number = hw->bus.lan_id;
1446 token_cmd.command_type = FW_PHY_TOKEN_REL;
1447 token_cmd.pad = 0;
1448 status = ixgbe_host_interface_command(hw, (u32 *)&token_cmd,
1449 sizeof(token_cmd),
1450 IXGBE_HI_COMMAND_TIMEOUT,
1451 TRUE);
1452 if (status)
1453 return status;
1454 if (token_cmd.hdr.cmd_or_resp.ret_status == FW_PHY_TOKEN_OK)
1455 return IXGBE_SUCCESS;
1456
1457 DEBUGOUT("Put PHY Token host interface command failed");
1458 return IXGBE_ERR_FW_RESP_INVALID;
1459 }
1460
1461 /**
1462 * ixgbe_write_iosf_sb_reg_x550a - Writes a value to specified register
1463 * of the IOSF device
1464 * @hw: pointer to hardware structure
1465 * @reg_addr: 32 bit PHY register to write
1466 * @device_type: 3 bit device type
1467 * @data: Data to write to the register
1468 **/
1469 s32 ixgbe_write_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
1470 u32 device_type, u32 data)
1471 {
1472 struct ixgbe_hic_internal_phy_req write_cmd;
1473 s32 status;
1474 UNREFERENCED_1PARAMETER(device_type);
1475
1476 memset(&write_cmd, 0, sizeof(write_cmd));
1477 write_cmd.hdr.cmd = FW_INT_PHY_REQ_CMD;
1478 write_cmd.hdr.buf_len = FW_INT_PHY_REQ_LEN;
1479 write_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
1480 write_cmd.port_number = hw->bus.lan_id;
1481 write_cmd.command_type = FW_INT_PHY_REQ_WRITE;
1482 write_cmd.address = IXGBE_CPU_TO_BE16(reg_addr);
1483 write_cmd.write_data = IXGBE_CPU_TO_BE32(data);
1484
1485 status = ixgbe_host_interface_command(hw, (u32 *)&write_cmd,
1486 sizeof(write_cmd),
1487 IXGBE_HI_COMMAND_TIMEOUT, FALSE);
1488
1489 return status;
1490 }
1491
1492 /**
1493 * ixgbe_read_iosf_sb_reg_x550a - Reads specified register of the IOSF device
1494 * @hw: pointer to hardware structure
1495 * @reg_addr: 32 bit PHY register to write
1496 * @device_type: 3 bit device type
1497 * @data: Pointer to read data from the register
1498 **/
1499 s32 ixgbe_read_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
1500 u32 device_type, u32 *data)
1501 {
1502 union {
1503 struct ixgbe_hic_internal_phy_req cmd;
1504 struct ixgbe_hic_internal_phy_resp rsp;
1505 } hic;
1506 s32 status;
1507 UNREFERENCED_1PARAMETER(device_type);
1508
1509 memset(&hic, 0, sizeof(hic));
1510 hic.cmd.hdr.cmd = FW_INT_PHY_REQ_CMD;
1511 hic.cmd.hdr.buf_len = FW_INT_PHY_REQ_LEN;
1512 hic.cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
1513 hic.cmd.port_number = hw->bus.lan_id;
1514 hic.cmd.command_type = FW_INT_PHY_REQ_READ;
1515 hic.cmd.address = IXGBE_CPU_TO_BE16(reg_addr);
1516
1517 status = ixgbe_host_interface_command(hw, (u32 *)&hic.cmd,
1518 sizeof(hic.cmd),
1519 IXGBE_HI_COMMAND_TIMEOUT, TRUE);
1520
1521 /* Extract the register value from the response. */
1522 *data = IXGBE_BE32_TO_CPU(hic.rsp.read_data);
1523
1524 return status;
1525 }
1526
1527 /**
1528 * ixgbe_disable_mdd_X550
1529 * @hw: pointer to hardware structure
1530 *
1531 * Disable malicious driver detection
1532 **/
1533 void ixgbe_disable_mdd_X550(struct ixgbe_hw *hw)
1534 {
1535 u32 reg;
1536
1537 DEBUGFUNC("ixgbe_disable_mdd_X550");
1538
1539 /* Disable MDD for TX DMA and interrupt */
1540 reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1541 reg &= ~(IXGBE_DMATXCTL_MDP_EN | IXGBE_DMATXCTL_MBINTEN);
1542 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg);
1543
1544 /* Disable MDD for RX and interrupt */
1545 reg = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
1546 reg &= ~(IXGBE_RDRXCTL_MDP_EN | IXGBE_RDRXCTL_MBINTEN);
1547 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg);
1548 }
1549
1550 /**
1551 * ixgbe_enable_mdd_X550
1552 * @hw: pointer to hardware structure
1553 *
1554 * Enable malicious driver detection
1555 **/
1556 void ixgbe_enable_mdd_X550(struct ixgbe_hw *hw)
1557 {
1558 u32 reg;
1559
1560 DEBUGFUNC("ixgbe_enable_mdd_X550");
1561
1562 /* Enable MDD for TX DMA and interrupt */
1563 reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1564 reg |= (IXGBE_DMATXCTL_MDP_EN | IXGBE_DMATXCTL_MBINTEN);
1565 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg);
1566
1567 /* Enable MDD for RX and interrupt */
1568 reg = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
1569 reg |= (IXGBE_RDRXCTL_MDP_EN | IXGBE_RDRXCTL_MBINTEN);
1570 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg);
1571 }
1572
1573 /**
1574 * ixgbe_restore_mdd_vf_X550
1575 * @hw: pointer to hardware structure
1576 * @vf: vf index
1577 *
1578 * Restore VF that was disabled during malicious driver detection event
1579 **/
1580 void ixgbe_restore_mdd_vf_X550(struct ixgbe_hw *hw, u32 vf)
1581 {
1582 u32 idx, reg, num_qs, start_q, bitmask;
1583
1584 DEBUGFUNC("ixgbe_restore_mdd_vf_X550");
1585
1586 /* Map VF to queues */
1587 reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
1588 switch (reg & IXGBE_MRQC_MRQE_MASK) {
1589 case IXGBE_MRQC_VMDQRT8TCEN:
1590 num_qs = 8; /* 16 VFs / pools */
1591 bitmask = 0x000000FF;
1592 break;
1593 case IXGBE_MRQC_VMDQRSS32EN:
1594 case IXGBE_MRQC_VMDQRT4TCEN:
1595 num_qs = 4; /* 32 VFs / pools */
1596 bitmask = 0x0000000F;
1597 break;
1598 default: /* 64 VFs / pools */
1599 num_qs = 2;
1600 bitmask = 0x00000003;
1601 break;
1602 }
1603 start_q = vf * num_qs;
1604
1605 /* Release vf's queues by clearing WQBR_TX and WQBR_RX (RW1C) */
1606 idx = start_q / 32;
1607 reg = 0;
1608 reg |= (bitmask << (start_q % 32));
1609 IXGBE_WRITE_REG(hw, IXGBE_WQBR_TX(idx), reg);
1610 IXGBE_WRITE_REG(hw, IXGBE_WQBR_RX(idx), reg);
1611 }
1612
1613 /**
1614 * ixgbe_mdd_event_X550
1615 * @hw: pointer to hardware structure
1616 * @vf_bitmap: vf bitmap of malicious vfs
1617 *
1618 * Handle malicious driver detection event.
1619 **/
1620 void ixgbe_mdd_event_X550(struct ixgbe_hw *hw, u32 *vf_bitmap)
1621 {
1622 u32 wqbr;
1623 u32 i, j, reg, q, shift, vf, idx;
1624
1625 DEBUGFUNC("ixgbe_mdd_event_X550");
1626
1627 /* figure out pool size for mapping to vf's */
1628 reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
1629 switch (reg & IXGBE_MRQC_MRQE_MASK) {
1630 case IXGBE_MRQC_VMDQRT8TCEN:
1631 shift = 3; /* 16 VFs / pools */
1632 break;
1633 case IXGBE_MRQC_VMDQRSS32EN:
1634 case IXGBE_MRQC_VMDQRT4TCEN:
1635 shift = 2; /* 32 VFs / pools */
1636 break;
1637 default:
1638 shift = 1; /* 64 VFs / pools */
1639 break;
1640 }
1641
1642 /* Read WQBR_TX and WQBR_RX and check for malicious queues */
1643 for (i = 0; i < 4; i++) {
1644 wqbr = IXGBE_READ_REG(hw, IXGBE_WQBR_TX(i));
1645 wqbr |= IXGBE_READ_REG(hw, IXGBE_WQBR_RX(i));
1646
1647 if (!wqbr)
1648 continue;
1649
1650 /* Get malicious queue */
1651 for (j = 0; j < 32 && wqbr; j++) {
1652
1653 if (!(wqbr & (1 << j)))
1654 continue;
1655
1656 /* Get queue from bitmask */
1657 q = j + (i * 32);
1658
1659 /* Map queue to vf */
1660 vf = (q >> shift);
1661
1662 /* Set vf bit in vf_bitmap */
1663 idx = vf / 32;
1664 vf_bitmap[idx] |= (1 << (vf % 32));
1665 wqbr &= ~(1 << j);
1666 }
1667 }
1668 }
1669
1670 /**
1671 * ixgbe_get_media_type_X550em - Get media type
1672 * @hw: pointer to hardware structure
1673 *
1674 * Returns the media type (fiber, copper, backplane)
1675 */
1676 enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw)
1677 {
1678 enum ixgbe_media_type media_type;
1679
1680 DEBUGFUNC("ixgbe_get_media_type_X550em");
1681
1682 /* Detect if there is a copper PHY attached. */
1683 switch (hw->device_id) {
1684 case IXGBE_DEV_ID_X550EM_X_KR:
1685 case IXGBE_DEV_ID_X550EM_X_KX4:
1686 case IXGBE_DEV_ID_X550EM_X_XFI:
1687 case IXGBE_DEV_ID_X550EM_A_KR:
1688 case IXGBE_DEV_ID_X550EM_A_KR_L:
1689 media_type = ixgbe_media_type_backplane;
1690 break;
1691 case IXGBE_DEV_ID_X550EM_X_SFP:
1692 case IXGBE_DEV_ID_X550EM_A_SFP:
1693 case IXGBE_DEV_ID_X550EM_A_SFP_N:
1694 case IXGBE_DEV_ID_X550EM_A_QSFP:
1695 case IXGBE_DEV_ID_X550EM_A_QSFP_N:
1696 media_type = ixgbe_media_type_fiber;
1697 break;
1698 case IXGBE_DEV_ID_X550EM_X_1G_T:
1699 case IXGBE_DEV_ID_X550EM_X_10G_T:
1700 case IXGBE_DEV_ID_X550EM_A_10G_T:
1701 media_type = ixgbe_media_type_copper;
1702 break;
1703 case IXGBE_DEV_ID_X550EM_A_SGMII:
1704 case IXGBE_DEV_ID_X550EM_A_SGMII_L:
1705 media_type = ixgbe_media_type_backplane;
1706 hw->phy.type = ixgbe_phy_sgmii;
1707 break;
1708 case IXGBE_DEV_ID_X550EM_A_1G_T:
1709 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
1710 media_type = ixgbe_media_type_copper;
1711 break;
1712 default:
1713 media_type = ixgbe_media_type_unknown;
1714 break;
1715 }
1716 return media_type;
1717 }
1718
1719 /**
1720 * ixgbe_supported_sfp_modules_X550em - Check if SFP module type is supported
1721 * @hw: pointer to hardware structure
1722 * @linear: TRUE if SFP module is linear
1723 */
1724 static s32 ixgbe_supported_sfp_modules_X550em(struct ixgbe_hw *hw, bool *linear)
1725 {
1726 DEBUGFUNC("ixgbe_supported_sfp_modules_X550em");
1727
1728 switch (hw->phy.sfp_type) {
1729 case ixgbe_sfp_type_not_present:
1730 return IXGBE_ERR_SFP_NOT_PRESENT;
1731 case ixgbe_sfp_type_da_cu_core0:
1732 case ixgbe_sfp_type_da_cu_core1:
1733 *linear = TRUE;
1734 break;
1735 case ixgbe_sfp_type_srlr_core0:
1736 case ixgbe_sfp_type_srlr_core1:
1737 case ixgbe_sfp_type_da_act_lmt_core0:
1738 case ixgbe_sfp_type_da_act_lmt_core1:
1739 case ixgbe_sfp_type_1g_sx_core0:
1740 case ixgbe_sfp_type_1g_sx_core1:
1741 case ixgbe_sfp_type_1g_lx_core0:
1742 case ixgbe_sfp_type_1g_lx_core1:
1743 *linear = FALSE;
1744 break;
1745 case ixgbe_sfp_type_unknown:
1746 case ixgbe_sfp_type_1g_cu_core0:
1747 case ixgbe_sfp_type_1g_cu_core1:
1748 default:
1749 return IXGBE_ERR_SFP_NOT_SUPPORTED;
1750 }
1751
1752 return IXGBE_SUCCESS;
1753 }
1754
1755 /**
1756 * ixgbe_identify_sfp_module_X550em - Identifies SFP modules
1757 * @hw: pointer to hardware structure
1758 *
1759 * Searches for and identifies the SFP module and assigns appropriate PHY type.
1760 **/
1761 s32 ixgbe_identify_sfp_module_X550em(struct ixgbe_hw *hw)
1762 {
1763 s32 status;
1764 bool linear;
1765
1766 DEBUGFUNC("ixgbe_identify_sfp_module_X550em");
1767
1768 status = ixgbe_identify_module_generic(hw);
1769
1770 if (status != IXGBE_SUCCESS)
1771 return status;
1772
1773 /* Check if SFP module is supported */
1774 status = ixgbe_supported_sfp_modules_X550em(hw, &linear);
1775
1776 return status;
1777 }
1778
1779 /**
1780 * ixgbe_setup_sfp_modules_X550em - Setup MAC link ops
1781 * @hw: pointer to hardware structure
1782 */
1783 s32 ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw)
1784 {
1785 s32 status;
1786 bool linear;
1787
1788 DEBUGFUNC("ixgbe_setup_sfp_modules_X550em");
1789
1790 /* Check if SFP module is supported */
1791 status = ixgbe_supported_sfp_modules_X550em(hw, &linear);
1792
1793 if (status != IXGBE_SUCCESS)
1794 return status;
1795
1796 ixgbe_init_mac_link_ops_X550em(hw);
1797 hw->phy.ops.reset = NULL;
1798
1799 return IXGBE_SUCCESS;
1800 }
1801
1802 /**
1803 * ixgbe_restart_an_internal_phy_x550em - restart autonegotiation for the
1804 * internal PHY
1805 * @hw: pointer to hardware structure
1806 **/
1807 static s32 ixgbe_restart_an_internal_phy_x550em(struct ixgbe_hw *hw)
1808 {
1809 s32 status;
1810 u32 link_ctrl;
1811
1812 /* Restart auto-negotiation. */
1813 status = hw->mac.ops.read_iosf_sb_reg(hw,
1814 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1815 IXGBE_SB_IOSF_TARGET_KR_PHY, &link_ctrl);
1816
1817 if (status) {
1818 DEBUGOUT("Auto-negotiation did not complete\n");
1819 return status;
1820 }
1821
1822 link_ctrl |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART;
1823 status = hw->mac.ops.write_iosf_sb_reg(hw,
1824 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1825 IXGBE_SB_IOSF_TARGET_KR_PHY, link_ctrl);
1826
1827 if (hw->mac.type == ixgbe_mac_X550EM_a) {
1828 u32 flx_mask_st20;
1829
1830 /* Indicate to FW that AN restart has been asserted */
1831 status = hw->mac.ops.read_iosf_sb_reg(hw,
1832 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1833 IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_mask_st20);
1834
1835 if (status) {
1836 DEBUGOUT("Auto-negotiation did not complete\n");
1837 return status;
1838 }
1839
1840 flx_mask_st20 |= IXGBE_KRM_PMD_FLX_MASK_ST20_FW_AN_RESTART;
1841 status = hw->mac.ops.write_iosf_sb_reg(hw,
1842 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1843 IXGBE_SB_IOSF_TARGET_KR_PHY, flx_mask_st20);
1844 }
1845
1846 return status;
1847 }
1848
1849 /**
1850 * ixgbe_setup_sgmii - Set up link for sgmii
1851 * @hw: pointer to hardware structure
1852 * @speed: new link speed
1853 * @autoneg_wait: TRUE when waiting for completion is needed
1854 */
1855 static s32 ixgbe_setup_sgmii(struct ixgbe_hw *hw, ixgbe_link_speed speed,
1856 bool autoneg_wait)
1857 {
1858 struct ixgbe_mac_info *mac = &hw->mac;
1859 u32 lval, sval, flx_val;
1860 s32 rc;
1861
1862 rc = mac->ops.read_iosf_sb_reg(hw,
1863 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1864 IXGBE_SB_IOSF_TARGET_KR_PHY, &lval);
1865 if (rc)
1866 return rc;
1867
1868 lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
1869 lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
1870 lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN;
1871 lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN;
1872 lval |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G;
1873 rc = mac->ops.write_iosf_sb_reg(hw,
1874 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1875 IXGBE_SB_IOSF_TARGET_KR_PHY, lval);
1876 if (rc)
1877 return rc;
1878
1879 rc = mac->ops.read_iosf_sb_reg(hw,
1880 IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id),
1881 IXGBE_SB_IOSF_TARGET_KR_PHY, &sval);
1882 if (rc)
1883 return rc;
1884
1885 sval |= IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D;
1886 sval |= IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D;
1887 rc = mac->ops.write_iosf_sb_reg(hw,
1888 IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id),
1889 IXGBE_SB_IOSF_TARGET_KR_PHY, sval);
1890 if (rc)
1891 return rc;
1892
1893 rc = mac->ops.read_iosf_sb_reg(hw,
1894 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1895 IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_val);
1896 if (rc)
1897 return rc;
1898
1899 flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK;
1900 flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_1G;
1901 flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN;
1902 flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN;
1903 flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN;
1904
1905 rc = mac->ops.write_iosf_sb_reg(hw,
1906 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1907 IXGBE_SB_IOSF_TARGET_KR_PHY, flx_val);
1908 if (rc)
1909 return rc;
1910
1911 rc = ixgbe_restart_an_internal_phy_x550em(hw);
1912 if (rc)
1913 return rc;
1914
1915 return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait);
1916 }
1917
1918 /**
1919 * ixgbe_setup_sgmii_fw - Set up link for internal PHY SGMII auto-negotiation
1920 * @hw: pointer to hardware structure
1921 * @speed: new link speed
1922 * @autoneg_wait: TRUE when waiting for completion is needed
1923 */
1924 static s32 ixgbe_setup_sgmii_fw(struct ixgbe_hw *hw, ixgbe_link_speed speed,
1925 bool autoneg_wait)
1926 {
1927 struct ixgbe_mac_info *mac = &hw->mac;
1928 u32 lval, sval, flx_val;
1929 s32 rc;
1930
1931 rc = mac->ops.read_iosf_sb_reg(hw,
1932 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1933 IXGBE_SB_IOSF_TARGET_KR_PHY, &lval);
1934 if (rc)
1935 return rc;
1936
1937 lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
1938 lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
1939 lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN;
1940 lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN;
1941 lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G;
1942 rc = mac->ops.write_iosf_sb_reg(hw,
1943 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1944 IXGBE_SB_IOSF_TARGET_KR_PHY, lval);
1945 if (rc)
1946 return rc;
1947
1948 rc = mac->ops.read_iosf_sb_reg(hw,
1949 IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id),
1950 IXGBE_SB_IOSF_TARGET_KR_PHY, &sval);
1951 if (rc)
1952 return rc;
1953
1954 sval &= ~IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D;
1955 sval &= ~IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D;
1956 rc = mac->ops.write_iosf_sb_reg(hw,
1957 IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id),
1958 IXGBE_SB_IOSF_TARGET_KR_PHY, sval);
1959 if (rc)
1960 return rc;
1961
1962 rc = mac->ops.write_iosf_sb_reg(hw,
1963 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1964 IXGBE_SB_IOSF_TARGET_KR_PHY, lval);
1965 if (rc)
1966 return rc;
1967
1968 rc = mac->ops.read_iosf_sb_reg(hw,
1969 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1970 IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_val);
1971 if (rc)
1972 return rc;
1973
1974 flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK;
1975 flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_AN;
1976 flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN;
1977 flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN;
1978 flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN;
1979
1980 rc = mac->ops.write_iosf_sb_reg(hw,
1981 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1982 IXGBE_SB_IOSF_TARGET_KR_PHY, flx_val);
1983 if (rc)
1984 return rc;
1985
1986 rc = ixgbe_restart_an_internal_phy_x550em(hw);
1987
1988 return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait);
1989 }
1990
1991 /**
1992 * ixgbe_init_mac_link_ops_X550em - init mac link function pointers
1993 * @hw: pointer to hardware structure
1994 */
1995 void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw)
1996 {
1997 struct ixgbe_mac_info *mac = &hw->mac;
1998
1999 DEBUGFUNC("ixgbe_init_mac_link_ops_X550em");
2000
2001 switch (hw->mac.ops.get_media_type(hw)) {
2002 case ixgbe_media_type_fiber:
2003 /* CS4227 does not support autoneg, so disable the laser control
2004 * functions for SFP+ fiber
2005 */
2006 mac->ops.disable_tx_laser = NULL;
2007 mac->ops.enable_tx_laser = NULL;
2008 mac->ops.flap_tx_laser = NULL;
2009 mac->ops.setup_link = ixgbe_setup_mac_link_multispeed_fiber;
2010 mac->ops.set_rate_select_speed =
2011 ixgbe_set_soft_rate_select_speed;
2012
2013 if ((hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N) ||
2014 (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP))
2015 mac->ops.setup_mac_link =
2016 ixgbe_setup_mac_link_sfp_x550a;
2017 else
2018 mac->ops.setup_mac_link =
2019 ixgbe_setup_mac_link_sfp_x550em;
2020 break;
2021 case ixgbe_media_type_copper:
2022 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_1G_T)
2023 break;
2024 if (hw->mac.type == ixgbe_mac_X550EM_a) {
2025 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T ||
2026 hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L) {
2027 mac->ops.setup_link = ixgbe_setup_sgmii_fw;
2028 mac->ops.check_link =
2029 ixgbe_check_mac_link_generic;
2030 } else {
2031 mac->ops.setup_link =
2032 ixgbe_setup_mac_link_t_X550em;
2033 }
2034 } else {
2035 mac->ops.setup_link = ixgbe_setup_mac_link_t_X550em;
2036 mac->ops.check_link = ixgbe_check_link_t_X550em;
2037 }
2038 break;
2039 case ixgbe_media_type_backplane:
2040 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII ||
2041 hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII_L)
2042 mac->ops.setup_link = ixgbe_setup_sgmii;
2043 break;
2044 default:
2045 break;
2046 }
2047 }
2048
2049 /**
2050 * ixgbe_get_link_capabilities_x550em - Determines link capabilities
2051 * @hw: pointer to hardware structure
2052 * @speed: pointer to link speed
2053 * @autoneg: TRUE when autoneg or autotry is enabled
2054 */
2055 s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
2056 ixgbe_link_speed *speed,
2057 bool *autoneg)
2058 {
2059 DEBUGFUNC("ixgbe_get_link_capabilities_X550em");
2060
2061
2062 if (hw->phy.type == ixgbe_phy_fw) {
2063 *autoneg = TRUE;
2064 *speed = hw->phy.speeds_supported;
2065 return 0;
2066 }
2067
2068 /* SFP */
2069 if (hw->phy.media_type == ixgbe_media_type_fiber) {
2070
2071 /* CS4227 SFP must not enable auto-negotiation */
2072 *autoneg = FALSE;
2073
2074 /* Check if 1G SFP module. */
2075 if (hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
2076 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1
2077 || hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
2078 hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1) {
2079 *speed = IXGBE_LINK_SPEED_1GB_FULL;
2080 return IXGBE_SUCCESS;
2081 }
2082
2083 /* Link capabilities are based on SFP */
2084 if (hw->phy.multispeed_fiber)
2085 *speed = IXGBE_LINK_SPEED_10GB_FULL |
2086 IXGBE_LINK_SPEED_1GB_FULL;
2087 else
2088 *speed = IXGBE_LINK_SPEED_10GB_FULL;
2089 } else {
2090 *autoneg = TRUE;
2091
2092 switch (hw->phy.type) {
2093 case ixgbe_phy_x550em_xfi:
2094 *speed = IXGBE_LINK_SPEED_1GB_FULL |
2095 IXGBE_LINK_SPEED_10GB_FULL;
2096 *autoneg = FALSE;
2097 break;
2098 case ixgbe_phy_ext_1g_t:
2099 case ixgbe_phy_sgmii:
2100 *speed = IXGBE_LINK_SPEED_1GB_FULL;
2101 break;
2102 case ixgbe_phy_x550em_kr:
2103 if (hw->mac.type == ixgbe_mac_X550EM_a) {
2104 /* check different backplane modes */
2105 if (hw->phy.nw_mng_if_sel &
2106 IXGBE_NW_MNG_IF_SEL_PHY_SPEED_2_5G) {
2107 *speed = IXGBE_LINK_SPEED_2_5GB_FULL;
2108 break;
2109 } else if (hw->device_id ==
2110 IXGBE_DEV_ID_X550EM_A_KR_L) {
2111 *speed = IXGBE_LINK_SPEED_1GB_FULL;
2112 break;
2113 }
2114 }
2115 /* fall through */
2116 default:
2117 *speed = IXGBE_LINK_SPEED_10GB_FULL |
2118 IXGBE_LINK_SPEED_1GB_FULL;
2119 break;
2120 }
2121 }
2122
2123 return IXGBE_SUCCESS;
2124 }
2125
2126 /**
2127 * ixgbe_get_lasi_ext_t_x550em - Determime external Base T PHY interrupt cause
2128 * @hw: pointer to hardware structure
2129 * @lsc: pointer to boolean flag which indicates whether external Base T
2130 * PHY interrupt is lsc
2131 *
2132 * Determime if external Base T PHY interrupt cause is high temperature
2133 * failure alarm or link status change.
2134 *
2135 * Return IXGBE_ERR_OVERTEMP if interrupt is high temperature
2136 * failure alarm, else return PHY access status.
2137 */
2138 static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc)
2139 {
2140 u32 status;
2141 u16 reg;
2142
2143 *lsc = FALSE;
2144
2145 /* Vendor alarm triggered */
2146 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_CHIP_STD_INT_FLAG,
2147 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2148 ®);
2149
2150 if (status != IXGBE_SUCCESS ||
2151 !(reg & IXGBE_MDIO_GLOBAL_VEN_ALM_INT_EN))
2152 return status;
2153
2154 /* Vendor Auto-Neg alarm triggered or Global alarm 1 triggered */
2155 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_FLAG,
2156 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2157 ®);
2158
2159 if (status != IXGBE_SUCCESS ||
2160 !(reg & (IXGBE_MDIO_GLOBAL_AN_VEN_ALM_INT_EN |
2161 IXGBE_MDIO_GLOBAL_ALARM_1_INT)))
2162 return status;
2163
2164 /* Global alarm triggered */
2165 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_ALARM_1,
2166 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2167 ®);
2168
2169 if (status != IXGBE_SUCCESS)
2170 return status;
2171
2172 /* If high temperature failure, then return over temp error and exit */
2173 if (reg & IXGBE_MDIO_GLOBAL_ALM_1_HI_TMP_FAIL) {
2174 /* power down the PHY in case the PHY FW didn't already */
2175 ixgbe_set_copper_phy_power(hw, FALSE);
2176 return IXGBE_ERR_OVERTEMP;
2177 } else if (reg & IXGBE_MDIO_GLOBAL_ALM_1_DEV_FAULT) {
2178 /* device fault alarm triggered */
2179 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_FAULT_MSG,
2180 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2181 ®);
2182
2183 if (status != IXGBE_SUCCESS)
2184 return status;
2185
2186 /* if device fault was due to high temp alarm handle and exit */
2187 if (reg == IXGBE_MDIO_GLOBAL_FAULT_MSG_HI_TMP) {
2188 /* power down the PHY in case the PHY FW didn't */
2189 ixgbe_set_copper_phy_power(hw, FALSE);
2190 return IXGBE_ERR_OVERTEMP;
2191 }
2192 }
2193
2194 /* Vendor alarm 2 triggered */
2195 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_CHIP_STD_INT_FLAG,
2196 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®);
2197
2198 if (status != IXGBE_SUCCESS ||
2199 !(reg & IXGBE_MDIO_GLOBAL_STD_ALM2_INT))
2200 return status;
2201
2202 /* link connect/disconnect event occurred */
2203 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM2,
2204 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®);
2205
2206 if (status != IXGBE_SUCCESS)
2207 return status;
2208
2209 /* Indicate LSC */
2210 if (reg & IXGBE_MDIO_AUTO_NEG_VEN_LSC)
2211 *lsc = TRUE;
2212
2213 return IXGBE_SUCCESS;
2214 }
2215
2216 /**
2217 * ixgbe_enable_lasi_ext_t_x550em - Enable external Base T PHY interrupts
2218 * @hw: pointer to hardware structure
2219 *
2220 * Enable link status change and temperature failure alarm for the external
2221 * Base T PHY
2222 *
2223 * Returns PHY access status
2224 */
2225 static s32 ixgbe_enable_lasi_ext_t_x550em(struct ixgbe_hw *hw)
2226 {
2227 u32 status;
2228 u16 reg;
2229 bool lsc;
2230
2231 /* Clear interrupt flags */
2232 status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc);
2233
2234 /* Enable link status change alarm */
2235
2236 /* Enable the LASI interrupts on X552 devices to receive notifications
2237 * of the link configurations of the external PHY and correspondingly
2238 * support the configuration of the internal iXFI link, since iXFI does
2239 * not support auto-negotiation. This is not required for X553 devices
2240 * having KR support, which performs auto-negotiations and which is used
2241 * as the internal link to the external PHY. Hence adding a check here
2242 * to avoid enabling LASI interrupts for X553 devices.
2243 */
2244 if (hw->mac.type != ixgbe_mac_X550EM_a) {
2245 status = hw->phy.ops.read_reg(hw,
2246 IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK,
2247 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®);
2248
2249 if (status != IXGBE_SUCCESS)
2250 return status;
2251
2252 reg |= IXGBE_MDIO_PMA_TX_VEN_LASI_INT_EN;
2253
2254 status = hw->phy.ops.write_reg(hw,
2255 IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK,
2256 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg);
2257
2258 if (status != IXGBE_SUCCESS)
2259 return status;
2260 }
2261
2262 /* Enable high temperature failure and global fault alarms */
2263 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_MASK,
2264 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2265 ®);
2266
2267 if (status != IXGBE_SUCCESS)
2268 return status;
2269
2270 reg |= (IXGBE_MDIO_GLOBAL_INT_HI_TEMP_EN |
2271 IXGBE_MDIO_GLOBAL_INT_DEV_FAULT_EN);
2272
2273 status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_MASK,
2274 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2275 reg);
2276
2277 if (status != IXGBE_SUCCESS)
2278 return status;
2279
2280 /* Enable vendor Auto-Neg alarm and Global Interrupt Mask 1 alarm */
2281 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_MASK,
2282 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2283 ®);
2284
2285 if (status != IXGBE_SUCCESS)
2286 return status;
2287
2288 reg |= (IXGBE_MDIO_GLOBAL_AN_VEN_ALM_INT_EN |
2289 IXGBE_MDIO_GLOBAL_ALARM_1_INT);
2290
2291 status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_MASK,
2292 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2293 reg);
2294
2295 if (status != IXGBE_SUCCESS)
2296 return status;
2297
2298 /* Enable chip-wide vendor alarm */
2299 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_STD_MASK,
2300 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2301 ®);
2302
2303 if (status != IXGBE_SUCCESS)
2304 return status;
2305
2306 reg |= IXGBE_MDIO_GLOBAL_VEN_ALM_INT_EN;
2307
2308 status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_STD_MASK,
2309 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2310 reg);
2311
2312 return status;
2313 }
2314
2315 /**
2316 * ixgbe_setup_kr_speed_x550em - Configure the KR PHY for link speed.
2317 * @hw: pointer to hardware structure
2318 * @speed: link speed
2319 *
2320 * Configures the integrated KR PHY.
2321 **/
2322 static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw,
2323 ixgbe_link_speed speed)
2324 {
2325 s32 status;
2326 u32 reg_val;
2327
2328 status = hw->mac.ops.read_iosf_sb_reg(hw,
2329 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
2330 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
2331 if (status)
2332 return status;
2333
2334 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
2335 reg_val &= ~(IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR |
2336 IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX);
2337
2338 /* Advertise 10G support. */
2339 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
2340 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR;
2341
2342 /* Advertise 1G support. */
2343 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
2344 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX;
2345
2346 status = hw->mac.ops.write_iosf_sb_reg(hw,
2347 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
2348 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
2349
2350 if (hw->mac.type == ixgbe_mac_X550EM_a) {
2351 /* Set lane mode to KR auto negotiation */
2352 status = hw->mac.ops.read_iosf_sb_reg(hw,
2353 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
2354 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
2355
2356 if (status)
2357 return status;
2358
2359 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK;
2360 reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_AN;
2361 reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN;
2362 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN;
2363 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN;
2364
2365 status = hw->mac.ops.write_iosf_sb_reg(hw,
2366 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
2367 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
2368 }
2369
2370 return ixgbe_restart_an_internal_phy_x550em(hw);
2371 }
2372
2373 /**
2374 * ixgbe_reset_phy_fw - Reset firmware-controlled PHYs
2375 * @hw: pointer to hardware structure
2376 */
2377 static s32 ixgbe_reset_phy_fw(struct ixgbe_hw *hw)
2378 {
2379 u32 store[FW_PHY_ACT_DATA_COUNT] = { 0 };
2380 s32 rc;
2381
2382 if (hw->phy.reset_disable || ixgbe_check_reset_blocked(hw))
2383 return IXGBE_SUCCESS;
2384
2385 rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_PHY_SW_RESET, &store);
2386 if (rc)
2387 return rc;
2388 memset(store, 0, sizeof(store));
2389
2390 rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_INIT_PHY, &store);
2391 if (rc)
2392 return rc;
2393
2394 return ixgbe_setup_fw_link(hw);
2395 }
2396
2397 /**
2398 * ixgbe_check_overtemp_fw - Check firmware-controlled PHYs for overtemp
2399 * @hw: pointer to hardware structure
2400 */
2401 static s32 ixgbe_check_overtemp_fw(struct ixgbe_hw *hw)
2402 {
2403 u32 store[FW_PHY_ACT_DATA_COUNT] = { 0 };
2404 s32 rc;
2405
2406 rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_LINK_INFO, &store);
2407 if (rc)
2408 return rc;
2409
2410 if (store[0] & FW_PHY_ACT_GET_LINK_INFO_TEMP) {
2411 ixgbe_shutdown_fw_phy(hw);
2412 return IXGBE_ERR_OVERTEMP;
2413 }
2414 return IXGBE_SUCCESS;
2415 }
2416
2417 /**
2418 * ixgbe_read_mng_if_sel_x550em - Read NW_MNG_IF_SEL register
2419 * @hw: pointer to hardware structure
2420 *
2421 * Read NW_MNG_IF_SEL register and save field values, and check for valid field
2422 * values.
2423 **/
2424 static s32 ixgbe_read_mng_if_sel_x550em(struct ixgbe_hw *hw)
2425 {
2426 /* Save NW management interface connected on board. This is used
2427 * to determine internal PHY mode.
2428 */
2429 hw->phy.nw_mng_if_sel = IXGBE_READ_REG(hw, IXGBE_NW_MNG_IF_SEL);
2430
2431 /* If X552 (X550EM_a) and MDIO is connected to external PHY, then set
2432 * PHY address. This register field was has only been used for X552.
2433 */
2434 if (hw->mac.type == ixgbe_mac_X550EM_a &&
2435 hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_MDIO_ACT) {
2436 hw->phy.addr = (hw->phy.nw_mng_if_sel &
2437 IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD) >>
2438 IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT;
2439 }
2440
2441 return IXGBE_SUCCESS;
2442 }
2443
2444 /**
2445 * ixgbe_init_phy_ops_X550em - PHY/SFP specific init
2446 * @hw: pointer to hardware structure
2447 *
2448 * Initialize any function pointers that were not able to be
2449 * set during init_shared_code because the PHY/SFP type was
2450 * not known. Perform the SFP init if necessary.
2451 */
2452 s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
2453 {
2454 struct ixgbe_phy_info *phy = &hw->phy;
2455 s32 ret_val;
2456
2457 DEBUGFUNC("ixgbe_init_phy_ops_X550em");
2458
2459 hw->mac.ops.set_lan_id(hw);
2460 ixgbe_read_mng_if_sel_x550em(hw);
2461
2462 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) {
2463 phy->phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM;
2464 ixgbe_setup_mux_ctl(hw);
2465 phy->ops.identify_sfp = ixgbe_identify_sfp_module_X550em;
2466 }
2467
2468 switch (hw->device_id) {
2469 case IXGBE_DEV_ID_X550EM_A_1G_T:
2470 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
2471 phy->ops.read_reg_mdi = ixgbe_read_phy_reg_mdi_22;
2472 phy->ops.write_reg_mdi = ixgbe_write_phy_reg_mdi_22;
2473 hw->phy.ops.read_reg = ixgbe_read_phy_reg_x550a;
2474 hw->phy.ops.write_reg = ixgbe_write_phy_reg_x550a;
2475 phy->ops.check_overtemp = ixgbe_check_overtemp_fw;
2476 if (hw->bus.lan_id)
2477 hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY1_SM;
2478 else
2479 hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY0_SM;
2480
2481 break;
2482 case IXGBE_DEV_ID_X550EM_A_10G_T:
2483 case IXGBE_DEV_ID_X550EM_A_SFP:
2484 hw->phy.ops.read_reg = ixgbe_read_phy_reg_x550a;
2485 hw->phy.ops.write_reg = ixgbe_write_phy_reg_x550a;
2486 if (hw->bus.lan_id)
2487 hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY1_SM;
2488 else
2489 hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY0_SM;
2490 break;
2491 case IXGBE_DEV_ID_X550EM_X_SFP:
2492 /* set up for CS4227 usage */
2493 hw->phy.phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM;
2494 break;
2495 case IXGBE_DEV_ID_X550EM_X_1G_T:
2496 phy->ops.read_reg_mdi = NULL;
2497 phy->ops.write_reg_mdi = NULL;
2498 break;
2499 default:
2500 break;
2501 }
2502
2503 /* Identify the PHY or SFP module */
2504 ret_val = phy->ops.identify(hw);
2505 if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED ||
2506 ret_val == IXGBE_ERR_PHY_ADDR_INVALID)
2507 return ret_val;
2508
2509 /* Setup function pointers based on detected hardware */
2510 ixgbe_init_mac_link_ops_X550em(hw);
2511 if (phy->sfp_type != ixgbe_sfp_type_unknown)
2512 phy->ops.reset = NULL;
2513
2514 /* Set functions pointers based on phy type */
2515 switch (hw->phy.type) {
2516 case ixgbe_phy_x550em_kx4:
2517 phy->ops.setup_link = NULL;
2518 phy->ops.read_reg = ixgbe_read_phy_reg_x550em;
2519 phy->ops.write_reg = ixgbe_write_phy_reg_x550em;
2520 break;
2521 case ixgbe_phy_x550em_kr:
2522 phy->ops.setup_link = ixgbe_setup_kr_x550em;
2523 phy->ops.read_reg = ixgbe_read_phy_reg_x550em;
2524 phy->ops.write_reg = ixgbe_write_phy_reg_x550em;
2525 break;
2526 case ixgbe_phy_ext_1g_t:
2527 /* link is managed by FW */
2528 phy->ops.setup_link = NULL;
2529 phy->ops.reset = NULL;
2530 break;
2531 case ixgbe_phy_x550em_xfi:
2532 /* link is managed by HW */
2533 phy->ops.setup_link = NULL;
2534 phy->ops.read_reg = ixgbe_read_phy_reg_x550em;
2535 phy->ops.write_reg = ixgbe_write_phy_reg_x550em;
2536 break;
2537 case ixgbe_phy_x550em_ext_t:
2538 /* If internal link mode is XFI, then setup iXFI internal link,
2539 * else setup KR now.
2540 */
2541 phy->ops.setup_internal_link =
2542 ixgbe_setup_internal_phy_t_x550em;
2543
2544 /* setup SW LPLU only for first revision of X550EM_x */
2545 if ((hw->mac.type == ixgbe_mac_X550EM_x) &&
2546 !(IXGBE_FUSES0_REV_MASK &
2547 IXGBE_READ_REG(hw, IXGBE_FUSES0_GROUP(0))))
2548 phy->ops.enter_lplu = ixgbe_enter_lplu_t_x550em;
2549
2550 phy->ops.handle_lasi = ixgbe_handle_lasi_ext_t_x550em;
2551 phy->ops.reset = ixgbe_reset_phy_t_X550em;
2552 break;
2553 case ixgbe_phy_sgmii:
2554 phy->ops.setup_link = NULL;
2555 break;
2556 case ixgbe_phy_fw:
2557 phy->ops.setup_link = ixgbe_setup_fw_link;
2558 phy->ops.reset = ixgbe_reset_phy_fw;
2559 break;
2560 default:
2561 break;
2562 }
2563 return ret_val;
2564 }
2565
2566 /**
2567 * ixgbe_set_mdio_speed - Set MDIO clock speed
2568 * @hw: pointer to hardware structure
2569 */
2570 static void ixgbe_set_mdio_speed(struct ixgbe_hw *hw)
2571 {
2572 u32 hlreg0;
2573
2574 switch (hw->device_id) {
2575 case IXGBE_DEV_ID_X550EM_X_10G_T:
2576 case IXGBE_DEV_ID_X550EM_A_SGMII:
2577 case IXGBE_DEV_ID_X550EM_A_SGMII_L:
2578 case IXGBE_DEV_ID_X550EM_A_10G_T:
2579 case IXGBE_DEV_ID_X550EM_A_SFP:
2580 case IXGBE_DEV_ID_X550EM_A_QSFP:
2581 /* Config MDIO clock speed before the first MDIO PHY access */
2582 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2583 hlreg0 &= ~IXGBE_HLREG0_MDCSPD;
2584 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
2585 break;
2586 case IXGBE_DEV_ID_X550EM_A_1G_T:
2587 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
2588 /* Select fast MDIO clock speed for these devices */
2589 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2590 hlreg0 |= IXGBE_HLREG0_MDCSPD;
2591 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
2592 break;
2593 default:
2594 break;
2595 }
2596 }
2597
2598 /**
2599 * ixgbe_reset_hw_X550em - Perform hardware reset
2600 * @hw: pointer to hardware structure
2601 *
2602 * Resets the hardware by resetting the transmit and receive units, masks
2603 * and clears all interrupts, perform a PHY reset, and perform a link (MAC)
2604 * reset.
2605 */
2606 s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw)
2607 {
2608 ixgbe_link_speed link_speed;
2609 s32 status;
2610 s32 phy_status = IXGBE_SUCCESS;
2611 u32 ctrl = 0;
2612 u32 i;
2613 bool link_up = FALSE;
2614 u32 swfw_mask = hw->phy.phy_semaphore_mask;
2615
2616 DEBUGFUNC("ixgbe_reset_hw_X550em");
2617
2618 /* Call adapter stop to disable Tx/Rx and clear interrupts */
2619 status = hw->mac.ops.stop_adapter(hw);
2620 if (status != IXGBE_SUCCESS) {
2621 DEBUGOUT1("Failed to stop adapter, STATUS = %d\n", status);
2622 return status;
2623 }
2624 /* flush pending Tx transactions */
2625 ixgbe_clear_tx_pending(hw);
2626
2627 ixgbe_set_mdio_speed(hw);
2628
2629 /* PHY ops must be identified and initialized prior to reset */
2630 phy_status = hw->phy.ops.init(hw);
2631
2632 if (phy_status)
2633 DEBUGOUT1("Failed to initialize PHY ops, STATUS = %d\n",
2634 status);
2635
2636 if (phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED ||
2637 phy_status == IXGBE_ERR_PHY_ADDR_INVALID) {
2638 DEBUGOUT("Returning from reset HW due to PHY init failure\n");
2639 goto mac_reset_top;
2640 }
2641
2642 /* start the external PHY */
2643 if (hw->phy.type == ixgbe_phy_x550em_ext_t) {
2644 status = ixgbe_init_ext_t_x550em(hw);
2645 if (status) {
2646 DEBUGOUT1("Failed to start the external PHY, STATUS = %d\n",
2647 status);
2648 return status;
2649 }
2650 }
2651
2652 /* Setup SFP module if there is one present. */
2653 if (hw->phy.sfp_setup_needed) {
2654 phy_status = hw->mac.ops.setup_sfp(hw);
2655 hw->phy.sfp_setup_needed = FALSE;
2656 }
2657
2658 if (phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED)
2659 goto mac_reset_top;
2660
2661 /* Reset PHY */
2662 if (!hw->phy.reset_disable && hw->phy.ops.reset) {
2663 if (hw->phy.ops.reset(hw) == IXGBE_ERR_OVERTEMP)
2664 return IXGBE_ERR_OVERTEMP;
2665 }
2666
2667 mac_reset_top:
2668 /* Issue global reset to the MAC. Needs to be SW reset if link is up.
2669 * If link reset is used when link is up, it might reset the PHY when
2670 * mng is using it. If link is down or the flag to force full link
2671 * reset is set, then perform link reset.
2672 */
2673 ctrl = IXGBE_CTRL_LNK_RST;
2674 if (!hw->force_full_reset) {
2675 hw->mac.ops.check_link(hw, &link_speed, &link_up, FALSE);
2676 if (link_up)
2677 ctrl = IXGBE_CTRL_RST;
2678 }
2679
2680 status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
2681 if (status != IXGBE_SUCCESS) {
2682 ERROR_REPORT2(IXGBE_ERROR_CAUTION,
2683 "semaphore failed with %d", status);
2684 return IXGBE_ERR_SWFW_SYNC;
2685 }
2686 ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
2687 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
2688 IXGBE_WRITE_FLUSH(hw);
2689 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
2690
2691 /* Poll for reset bit to self-clear meaning reset is complete */
2692 for (i = 0; i < 10; i++) {
2693 usec_delay(1);
2694 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
2695 if (!(ctrl & IXGBE_CTRL_RST_MASK))
2696 break;
2697 }
2698
2699 if (ctrl & IXGBE_CTRL_RST_MASK) {
2700 status = IXGBE_ERR_RESET_FAILED;
2701 DEBUGOUT("Reset polling failed to complete.\n");
2702 }
2703
2704 msec_delay(50);
2705
2706 /* Double resets are required for recovery from certain error
2707 * conditions. Between resets, it is necessary to stall to
2708 * allow time for any pending HW events to complete.
2709 */
2710 if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
2711 hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
2712 goto mac_reset_top;
2713 }
2714
2715 /* Store the permanent mac address */
2716 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
2717
2718 /* Store MAC address from RAR0, clear receive address registers, and
2719 * clear the multicast table. Also reset num_rar_entries to 128,
2720 * since we modify this value when programming the SAN MAC address.
2721 */
2722 hw->mac.num_rar_entries = 128;
2723 hw->mac.ops.init_rx_addrs(hw);
2724
2725 ixgbe_set_mdio_speed(hw);
2726
2727 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP)
2728 ixgbe_setup_mux_ctl(hw);
2729
2730 if (status != IXGBE_SUCCESS)
2731 DEBUGOUT1("Reset HW failed, STATUS = %d\n", status);
2732
2733 if (phy_status != IXGBE_SUCCESS)
2734 status = phy_status;
2735
2736 return status;
2737 }
2738
2739 /**
2740 * ixgbe_init_ext_t_x550em - Start (unstall) the external Base T PHY.
2741 * @hw: pointer to hardware structure
2742 */
2743 s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw)
2744 {
2745 u32 status;
2746 u16 reg;
2747
2748 status = hw->phy.ops.read_reg(hw,
2749 IXGBE_MDIO_TX_VENDOR_ALARMS_3,
2750 IXGBE_MDIO_PMA_PMD_DEV_TYPE,
2751 ®);
2752
2753 if (status != IXGBE_SUCCESS)
2754 return status;
2755
2756 /* If PHY FW reset completed bit is set then this is the first
2757 * SW instance after a power on so the PHY FW must be un-stalled.
2758 */
2759 if (reg & IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK) {
2760 status = hw->phy.ops.read_reg(hw,
2761 IXGBE_MDIO_GLOBAL_RES_PR_10,
2762 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2763 ®);
2764
2765 if (status != IXGBE_SUCCESS)
2766 return status;
2767
2768 reg &= ~IXGBE_MDIO_POWER_UP_STALL;
2769
2770 status = hw->phy.ops.write_reg(hw,
2771 IXGBE_MDIO_GLOBAL_RES_PR_10,
2772 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2773 reg);
2774
2775 if (status != IXGBE_SUCCESS)
2776 return status;
2777 }
2778
2779 return status;
2780 }
2781
2782 /**
2783 * ixgbe_setup_kr_x550em - Configure the KR PHY.
2784 * @hw: pointer to hardware structure
2785 **/
2786 s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw)
2787 {
2788 /* leave link alone for 2.5G */
2789 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_2_5GB_FULL)
2790 return IXGBE_SUCCESS;
2791
2792 if (ixgbe_check_reset_blocked(hw))
2793 return 0;
2794
2795 return ixgbe_setup_kr_speed_x550em(hw, hw->phy.autoneg_advertised);
2796 }
2797
2798 /**
2799 * ixgbe_setup_mac_link_sfp_x550em - Setup internal/external the PHY for SFP
2800 * @hw: pointer to hardware structure
2801 * @speed: new link speed
2802 * @autoneg_wait_to_complete: unused
2803 *
2804 * Configure the external PHY and the integrated KR PHY for SFP support.
2805 **/
2806 s32 ixgbe_setup_mac_link_sfp_x550em(struct ixgbe_hw *hw,
2807 ixgbe_link_speed speed,
2808 bool autoneg_wait_to_complete)
2809 {
2810 s32 ret_val;
2811 u16 reg_slice, reg_val;
2812 bool setup_linear = FALSE;
2813 UNREFERENCED_1PARAMETER(autoneg_wait_to_complete);
2814
2815 /* Check if SFP module is supported and linear */
2816 ret_val = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear);
2817
2818 /* If no SFP module present, then return success. Return success since
2819 * there is no reason to configure CS4227 and SFP not present error is
2820 * not excepted in the setup MAC link flow.
2821 */
2822 if (ret_val == IXGBE_ERR_SFP_NOT_PRESENT)
2823 return IXGBE_SUCCESS;
2824
2825 if (ret_val != IXGBE_SUCCESS)
2826 return ret_val;
2827
2828 /* Configure internal PHY for KR/KX. */
2829 ixgbe_setup_kr_speed_x550em(hw, speed);
2830
2831 /* Configure CS4227 LINE side to proper mode. */
2832 reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB +
2833 (hw->bus.lan_id << 12);
2834 if (setup_linear)
2835 reg_val = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1;
2836 else
2837 reg_val = (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1;
2838 ret_val = hw->link.ops.write_link(hw, hw->link.addr, reg_slice,
2839 reg_val);
2840 return ret_val;
2841 }
2842
2843 /**
2844 * ixgbe_setup_sfi_x550a - Configure the internal PHY for native SFI mode
2845 * @hw: pointer to hardware structure
2846 * @speed: the link speed to force
2847 *
2848 * Configures the integrated PHY for native SFI mode. Used to connect the
2849 * internal PHY directly to an SFP cage, without autonegotiation.
2850 **/
2851 static s32 ixgbe_setup_sfi_x550a(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
2852 {
2853 struct ixgbe_mac_info *mac = &hw->mac;
2854 s32 status;
2855 u32 reg_val;
2856
2857 /* Disable all AN and force speed to 10G Serial. */
2858 status = mac->ops.read_iosf_sb_reg(hw,
2859 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
2860 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
2861 if (status != IXGBE_SUCCESS)
2862 return status;
2863
2864 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN;
2865 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN;
2866 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN;
2867 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK;
2868
2869 /* Select forced link speed for internal PHY. */
2870 switch (*speed) {
2871 case IXGBE_LINK_SPEED_10GB_FULL:
2872 reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_10G;
2873 break;
2874 case IXGBE_LINK_SPEED_1GB_FULL:
2875 reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_1G;
2876 break;
2877 case 0:
2878 /* media none (linkdown) */
2879 break;
2880 default:
2881 /* Other link speeds are not supported by internal PHY. */
2882 return IXGBE_ERR_LINK_SETUP;
2883 }
2884
2885 status = mac->ops.write_iosf_sb_reg(hw,
2886 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
2887 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
2888
2889 /* Toggle port SW reset by AN reset. */
2890 status = ixgbe_restart_an_internal_phy_x550em(hw);
2891
2892 return status;
2893 }
2894
2895 /**
2896 * ixgbe_setup_mac_link_sfp_x550a - Setup internal PHY for SFP
2897 * @hw: pointer to hardware structure
2898 * @speed: new link speed
2899 * @autoneg_wait_to_complete: unused
2900 *
2901 * Configure the integrated PHY for SFP support.
2902 **/
2903 static s32 ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw,
2904 ixgbe_link_speed speed,
2905 bool autoneg_wait_to_complete)
2906 {
2907 s32 ret_val;
2908 u16 reg_phy_ext;
2909 bool setup_linear = FALSE;
2910 u32 reg_slice, reg_phy_int, slice_offset;
2911
2912 UNREFERENCED_1PARAMETER(autoneg_wait_to_complete);
2913
2914 /* Check if SFP module is supported and linear */
2915 ret_val = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear);
2916
2917 /* If no SFP module present, then return success. Return success since
2918 * SFP not present error is not excepted in the setup MAC link flow.
2919 */
2920 if (ret_val == IXGBE_ERR_SFP_NOT_PRESENT)
2921 return IXGBE_SUCCESS;
2922
2923 if (ret_val != IXGBE_SUCCESS)
2924 return ret_val;
2925
2926 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N) {
2927 /* Configure internal PHY for native SFI based on module type */
2928 ret_val = hw->mac.ops.read_iosf_sb_reg(hw,
2929 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
2930 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_phy_int);
2931
2932 if (ret_val != IXGBE_SUCCESS)
2933 return ret_val;
2934
2935 reg_phy_int &= IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_DA;
2936 if (!setup_linear)
2937 reg_phy_int |= IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_SR;
2938
2939 ret_val = hw->mac.ops.write_iosf_sb_reg(hw,
2940 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
2941 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_phy_int);
2942
2943 if (ret_val != IXGBE_SUCCESS)
2944 return ret_val;
2945
2946 /* Setup SFI internal link. */
2947 ret_val = ixgbe_setup_sfi_x550a(hw, &speed);
2948 } else {
2949 /* Configure internal PHY for KR/KX. */
2950 ixgbe_setup_kr_speed_x550em(hw, speed);
2951
2952 if (hw->phy.addr == 0x0 || hw->phy.addr == 0xFFFF) {
2953 /* Find Address */
2954 DEBUGOUT("Invalid NW_MNG_IF_SEL.MDIO_PHY_ADD value\n");
2955 return IXGBE_ERR_PHY_ADDR_INVALID;
2956 }
2957
2958 /* Get external PHY SKU id */
2959 ret_val = hw->phy.ops.read_reg(hw, IXGBE_CS4227_EFUSE_PDF_SKU,
2960 IXGBE_MDIO_ZERO_DEV_TYPE, ®_phy_ext);
2961
2962 if (ret_val != IXGBE_SUCCESS)
2963 return ret_val;
2964
2965 /* When configuring quad port CS4223, the MAC instance is part
2966 * of the slice offset.
2967 */
2968 if (reg_phy_ext == IXGBE_CS4223_SKU_ID)
2969 slice_offset = (hw->bus.lan_id +
2970 (hw->bus.instance_id << 1)) << 12;
2971 else
2972 slice_offset = hw->bus.lan_id << 12;
2973
2974 /* Configure CS4227/CS4223 LINE side to proper mode. */
2975 reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB + slice_offset;
2976
2977 ret_val = hw->phy.ops.read_reg(hw, reg_slice,
2978 IXGBE_MDIO_ZERO_DEV_TYPE, ®_phy_ext);
2979
2980 if (ret_val != IXGBE_SUCCESS)
2981 return ret_val;
2982
2983 reg_phy_ext &= ~((IXGBE_CS4227_EDC_MODE_CX1 << 1) |
2984 (IXGBE_CS4227_EDC_MODE_SR << 1));
2985
2986 if (setup_linear)
2987 reg_phy_ext |= (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1;
2988 else
2989 reg_phy_ext |= (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1;
2990 ret_val = hw->phy.ops.write_reg(hw, reg_slice,
2991 IXGBE_MDIO_ZERO_DEV_TYPE, reg_phy_ext);
2992
2993 /* Flush previous write with a read */
2994 ret_val = hw->phy.ops.read_reg(hw, reg_slice,
2995 IXGBE_MDIO_ZERO_DEV_TYPE, ®_phy_ext);
2996 }
2997 return ret_val;
2998 }
2999
3000 /**
3001 * ixgbe_setup_ixfi_x550em_x - MAC specific iXFI configuration
3002 * @hw: pointer to hardware structure
3003 *
3004 * iXfI configuration needed for ixgbe_mac_X550EM_x devices.
3005 **/
3006 static s32 ixgbe_setup_ixfi_x550em_x(struct ixgbe_hw *hw)
3007 {
3008 struct ixgbe_mac_info *mac = &hw->mac;
3009 s32 status;
3010 u32 reg_val;
3011
3012 /* Disable training protocol FSM. */
3013 status = mac->ops.read_iosf_sb_reg(hw,
3014 IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
3015 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3016 if (status != IXGBE_SUCCESS)
3017 return status;
3018 reg_val |= IXGBE_KRM_RX_TRN_LINKUP_CTRL_CONV_WO_PROTOCOL;
3019 status = mac->ops.write_iosf_sb_reg(hw,
3020 IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
3021 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3022 if (status != IXGBE_SUCCESS)
3023 return status;
3024
3025 /* Disable Flex from training TXFFE. */
3026 status = mac->ops.read_iosf_sb_reg(hw,
3027 IXGBE_KRM_DSP_TXFFE_STATE_4(hw->bus.lan_id),
3028 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3029 if (status != IXGBE_SUCCESS)
3030 return status;
3031 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN;
3032 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN;
3033 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN;
3034 status = mac->ops.write_iosf_sb_reg(hw,
3035 IXGBE_KRM_DSP_TXFFE_STATE_4(hw->bus.lan_id),
3036 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3037 if (status != IXGBE_SUCCESS)
3038 return status;
3039 status = mac->ops.read_iosf_sb_reg(hw,
3040 IXGBE_KRM_DSP_TXFFE_STATE_5(hw->bus.lan_id),
3041 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3042 if (status != IXGBE_SUCCESS)
3043 return status;
3044 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN;
3045 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN;
3046 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN;
3047 status = mac->ops.write_iosf_sb_reg(hw,
3048 IXGBE_KRM_DSP_TXFFE_STATE_5(hw->bus.lan_id),
3049 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3050 if (status != IXGBE_SUCCESS)
3051 return status;
3052
3053 /* Enable override for coefficients. */
3054 status = mac->ops.read_iosf_sb_reg(hw,
3055 IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id),
3056 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3057 if (status != IXGBE_SUCCESS)
3058 return status;
3059 reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_OVRRD_EN;
3060 reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN;
3061 reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CPLUS1_OVRRD_EN;
3062 reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CMINUS1_OVRRD_EN;
3063 status = mac->ops.write_iosf_sb_reg(hw,
3064 IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id),
3065 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3066 return status;
3067 }
3068
3069 /**
3070 * ixgbe_setup_ixfi_x550em - Configure the KR PHY for iXFI mode.
3071 * @hw: pointer to hardware structure
3072 * @speed: the link speed to force
3073 *
3074 * Configures the integrated KR PHY to use iXFI mode. Used to connect an
3075 * internal and external PHY at a specific speed, without autonegotiation.
3076 **/
3077 static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
3078 {
3079 struct ixgbe_mac_info *mac = &hw->mac;
3080 s32 status;
3081 u32 reg_val;
3082
3083 /* iXFI is only supported with X552 */
3084 if (mac->type != ixgbe_mac_X550EM_x)
3085 return IXGBE_ERR_LINK_SETUP;
3086
3087 /* Disable AN and force speed to 10G Serial. */
3088 status = mac->ops.read_iosf_sb_reg(hw,
3089 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
3090 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3091 if (status != IXGBE_SUCCESS)
3092 return status;
3093
3094 reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
3095 reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
3096
3097 /* Select forced link speed for internal PHY. */
3098 switch (*speed) {
3099 case IXGBE_LINK_SPEED_10GB_FULL:
3100 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G;
3101 break;
3102 case IXGBE_LINK_SPEED_1GB_FULL:
3103 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G;
3104 break;
3105 default:
3106 /* Other link speeds are not supported by internal KR PHY. */
3107 return IXGBE_ERR_LINK_SETUP;
3108 }
3109
3110 status = mac->ops.write_iosf_sb_reg(hw,
3111 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
3112 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3113 if (status != IXGBE_SUCCESS)
3114 return status;
3115
3116 /* Additional configuration needed for x550em_x */
3117 if (hw->mac.type == ixgbe_mac_X550EM_x) {
3118 status = ixgbe_setup_ixfi_x550em_x(hw);
3119 if (status != IXGBE_SUCCESS)
3120 return status;
3121 }
3122
3123 /* Toggle port SW reset by AN reset. */
3124 status = ixgbe_restart_an_internal_phy_x550em(hw);
3125
3126 return status;
3127 }
3128
3129 /**
3130 * ixgbe_ext_phy_t_x550em_get_link - Get ext phy link status
3131 * @hw: address of hardware structure
3132 * @link_up: address of boolean to indicate link status
3133 *
3134 * Returns error code if unable to get link status.
3135 */
3136 static s32 ixgbe_ext_phy_t_x550em_get_link(struct ixgbe_hw *hw, bool *link_up)
3137 {
3138 u32 ret;
3139 u16 autoneg_status;
3140
3141 *link_up = FALSE;
3142
3143 /* read this twice back to back to indicate current status */
3144 ret = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
3145 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
3146 &autoneg_status);
3147 if (ret != IXGBE_SUCCESS)
3148 return ret;
3149
3150 ret = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
3151 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
3152 &autoneg_status);
3153 if (ret != IXGBE_SUCCESS)
3154 return ret;
3155
3156 *link_up = !!(autoneg_status & IXGBE_MDIO_AUTO_NEG_LINK_STATUS);
3157
3158 return IXGBE_SUCCESS;
3159 }
3160
3161 /**
3162 * ixgbe_setup_internal_phy_t_x550em - Configure KR PHY to X557 link
3163 * @hw: point to hardware structure
3164 *
3165 * Configures the link between the integrated KR PHY and the external X557 PHY
3166 * The driver will call this function when it gets a link status change
3167 * interrupt from the X557 PHY. This function configures the link speed
3168 * between the PHYs to match the link speed of the BASE-T link.
3169 *
3170 * A return of a non-zero value indicates an error, and the base driver should
3171 * not report link up.
3172 */
3173 s32 ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw)
3174 {
3175 ixgbe_link_speed force_speed;
3176 bool link_up;
3177 u32 status;
3178 u16 speed;
3179
3180 if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper)
3181 return IXGBE_ERR_CONFIG;
3182
3183 if (hw->mac.type == ixgbe_mac_X550EM_x &&
3184 !(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) {
3185 /* If link is down, there is no setup necessary so return */
3186 status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up);
3187 if (status != IXGBE_SUCCESS)
3188 return status;
3189
3190 if (!link_up)
3191 return IXGBE_SUCCESS;
3192
3193 status = hw->phy.ops.read_reg(hw,
3194 IXGBE_MDIO_AUTO_NEG_VENDOR_STAT,
3195 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
3196 &speed);
3197 if (status != IXGBE_SUCCESS)
3198 return status;
3199
3200 /* If link is still down - no setup is required so return */
3201 status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up);
3202 if (status != IXGBE_SUCCESS)
3203 return status;
3204 if (!link_up)
3205 return IXGBE_SUCCESS;
3206
3207 /* clear everything but the speed and duplex bits */
3208 speed &= IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_MASK;
3209
3210 switch (speed) {
3211 case IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_FULL:
3212 force_speed = IXGBE_LINK_SPEED_10GB_FULL;
3213 break;
3214 case IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB_FULL:
3215 force_speed = IXGBE_LINK_SPEED_1GB_FULL;
3216 break;
3217 default:
3218 /* Internal PHY does not support anything else */
3219 return IXGBE_ERR_INVALID_LINK_SETTINGS;
3220 }
3221
3222 return ixgbe_setup_ixfi_x550em(hw, &force_speed);
3223 } else {
3224 speed = IXGBE_LINK_SPEED_10GB_FULL |
3225 IXGBE_LINK_SPEED_1GB_FULL;
3226 return ixgbe_setup_kr_speed_x550em(hw, speed);
3227 }
3228 }
3229
3230 /**
3231 * ixgbe_setup_phy_loopback_x550em - Configure the KR PHY for loopback.
3232 * @hw: pointer to hardware structure
3233 *
3234 * Configures the integrated KR PHY to use internal loopback mode.
3235 **/
3236 s32 ixgbe_setup_phy_loopback_x550em(struct ixgbe_hw *hw)
3237 {
3238 s32 status;
3239 u32 reg_val;
3240
3241 /* Disable AN and force speed to 10G Serial. */
3242 status = hw->mac.ops.read_iosf_sb_reg(hw,
3243 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
3244 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3245 if (status != IXGBE_SUCCESS)
3246 return status;
3247 reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
3248 reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
3249 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G;
3250 status = hw->mac.ops.write_iosf_sb_reg(hw,
3251 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
3252 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3253 if (status != IXGBE_SUCCESS)
3254 return status;
3255
3256 /* Set near-end loopback clocks. */
3257 status = hw->mac.ops.read_iosf_sb_reg(hw,
3258 IXGBE_KRM_PORT_CAR_GEN_CTRL(hw->bus.lan_id),
3259 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3260 if (status != IXGBE_SUCCESS)
3261 return status;
3262 reg_val |= IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_32B;
3263 reg_val |= IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_KRPCS;
3264 status = hw->mac.ops.write_iosf_sb_reg(hw,
3265 IXGBE_KRM_PORT_CAR_GEN_CTRL(hw->bus.lan_id),
3266 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3267 if (status != IXGBE_SUCCESS)
3268 return status;
3269
3270 /* Set loopback enable. */
3271 status = hw->mac.ops.read_iosf_sb_reg(hw,
3272 IXGBE_KRM_PMD_DFX_BURNIN(hw->bus.lan_id),
3273 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3274 if (status != IXGBE_SUCCESS)
3275 return status;
3276 reg_val |= IXGBE_KRM_PMD_DFX_BURNIN_TX_RX_KR_LB_MASK;
3277 status = hw->mac.ops.write_iosf_sb_reg(hw,
3278 IXGBE_KRM_PMD_DFX_BURNIN(hw->bus.lan_id),
3279 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3280 if (status != IXGBE_SUCCESS)
3281 return status;
3282
3283 /* Training bypass. */
3284 status = hw->mac.ops.read_iosf_sb_reg(hw,
3285 IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
3286 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3287 if (status != IXGBE_SUCCESS)
3288 return status;
3289 reg_val |= IXGBE_KRM_RX_TRN_LINKUP_CTRL_PROTOCOL_BYPASS;
3290 status = hw->mac.ops.write_iosf_sb_reg(hw,
3291 IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
3292 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3293
3294 return status;
3295 }
3296
3297 /**
3298 * ixgbe_read_ee_hostif_X550 - Read EEPROM word using a host interface command
3299 * assuming that the semaphore is already obtained.
3300 * @hw: pointer to hardware structure
3301 * @offset: offset of word in the EEPROM to read
3302 * @data: word read from the EEPROM
3303 *
3304 * Reads a 16 bit word from the EEPROM using the hostif.
3305 **/
3306 s32 ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 *data)
3307 {
3308 const u32 mask = IXGBE_GSSR_SW_MNG_SM | IXGBE_GSSR_EEP_SM;
3309 struct ixgbe_hic_read_shadow_ram buffer;
3310 s32 status;
3311
3312 DEBUGFUNC("ixgbe_read_ee_hostif_X550");
3313 buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD;
3314 buffer.hdr.req.buf_lenh = 0;
3315 buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN;
3316 buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
3317
3318 /* convert offset from words to bytes */
3319 buffer.address = IXGBE_CPU_TO_BE32(offset * 2);
3320 /* one word */
3321 buffer.length = IXGBE_CPU_TO_BE16(sizeof(u16));
3322 buffer.pad2 = 0;
3323 buffer.pad3 = 0;
3324
3325 status = hw->mac.ops.acquire_swfw_sync(hw, mask);
3326 if (status)
3327 return status;
3328
3329 status = ixgbe_hic_unlocked(hw, (u32 *)&buffer, sizeof(buffer),
3330 IXGBE_HI_COMMAND_TIMEOUT);
3331 if (!status) {
3332 *data = (u16)IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG,
3333 FW_NVM_DATA_OFFSET);
3334 }
3335
3336 hw->mac.ops.release_swfw_sync(hw, mask);
3337 return status;
3338 }
3339
3340 /**
3341 * ixgbe_read_ee_hostif_buffer_X550- Read EEPROM word(s) using hostif
3342 * @hw: pointer to hardware structure
3343 * @offset: offset of word in the EEPROM to read
3344 * @words: number of words
3345 * @data: word(s) read from the EEPROM
3346 *
3347 * Reads a 16 bit word(s) from the EEPROM using the hostif.
3348 **/
3349 s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
3350 u16 offset, u16 words, u16 *data)
3351 {
3352 const u32 mask = IXGBE_GSSR_SW_MNG_SM | IXGBE_GSSR_EEP_SM;
3353 struct ixgbe_hic_read_shadow_ram buffer;
3354 u32 current_word = 0;
3355 u16 words_to_read;
3356 s32 status;
3357 u32 i;
3358
3359 DEBUGFUNC("ixgbe_read_ee_hostif_buffer_X550");
3360
3361 /* Take semaphore for the entire operation. */
3362 status = hw->mac.ops.acquire_swfw_sync(hw, mask);
3363 if (status) {
3364 DEBUGOUT("EEPROM read buffer - semaphore failed\n");
3365 return status;
3366 }
3367
3368 while (words) {
3369 if (words > FW_MAX_READ_BUFFER_SIZE / 2)
3370 words_to_read = FW_MAX_READ_BUFFER_SIZE / 2;
3371 else
3372 words_to_read = words;
3373
3374 buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD;
3375 buffer.hdr.req.buf_lenh = 0;
3376 buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN;
3377 buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
3378
3379 /* convert offset from words to bytes */
3380 buffer.address = IXGBE_CPU_TO_BE32((offset + current_word) * 2);
3381 buffer.length = IXGBE_CPU_TO_BE16(words_to_read * 2);
3382 buffer.pad2 = 0;
3383 buffer.pad3 = 0;
3384
3385 status = ixgbe_hic_unlocked(hw, (u32 *)&buffer, sizeof(buffer),
3386 IXGBE_HI_COMMAND_TIMEOUT);
3387
3388 if (status) {
3389 DEBUGOUT("Host interface command failed\n");
3390 goto out;
3391 }
3392
3393 for (i = 0; i < words_to_read; i++) {
3394 u32 reg = IXGBE_FLEX_MNG + (FW_NVM_DATA_OFFSET << 2) +
3395 2 * i;
3396 u32 value = IXGBE_READ_REG(hw, reg);
3397
3398 data[current_word] = (u16)(value & 0xffff);
3399 current_word++;
3400 i++;
3401 if (i < words_to_read) {
3402 value >>= 16;
3403 data[current_word] = (u16)(value & 0xffff);
3404 current_word++;
3405 }
3406 }
3407 words -= words_to_read;
3408 }
3409
3410 out:
3411 hw->mac.ops.release_swfw_sync(hw, mask);
3412 return status;
3413 }
3414
3415 /**
3416 * ixgbe_write_ee_hostif_X550 - Write EEPROM word using hostif
3417 * @hw: pointer to hardware structure
3418 * @offset: offset of word in the EEPROM to write
3419 * @data: word write to the EEPROM
3420 *
3421 * Write a 16 bit word to the EEPROM using the hostif.
3422 **/
3423 s32 ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset,
3424 u16 data)
3425 {
3426 s32 status;
3427 struct ixgbe_hic_write_shadow_ram buffer;
3428
3429 DEBUGFUNC("ixgbe_write_ee_hostif_data_X550");
3430
3431 buffer.hdr.req.cmd = FW_WRITE_SHADOW_RAM_CMD;
3432 buffer.hdr.req.buf_lenh = 0;
3433 buffer.hdr.req.buf_lenl = FW_WRITE_SHADOW_RAM_LEN;
3434 buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
3435
3436 /* one word */
3437 buffer.length = IXGBE_CPU_TO_BE16(sizeof(u16));
3438 buffer.data = data;
3439 buffer.address = IXGBE_CPU_TO_BE32(offset * 2);
3440
3441 status = ixgbe_host_interface_command(hw, (u32 *)&buffer,
3442 sizeof(buffer),
3443 IXGBE_HI_COMMAND_TIMEOUT, FALSE);
3444
3445 return status;
3446 }
3447
3448 /**
3449 * ixgbe_write_ee_hostif_X550 - Write EEPROM word using hostif
3450 * @hw: pointer to hardware structure
3451 * @offset: offset of word in the EEPROM to write
3452 * @data: word write to the EEPROM
3453 *
3454 * Write a 16 bit word to the EEPROM using the hostif.
3455 **/
3456 s32 ixgbe_write_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset,
3457 u16 data)
3458 {
3459 s32 status = IXGBE_SUCCESS;
3460
3461 DEBUGFUNC("ixgbe_write_ee_hostif_X550");
3462
3463 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
3464 IXGBE_SUCCESS) {
3465 status = ixgbe_write_ee_hostif_data_X550(hw, offset, data);
3466 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
3467 } else {
3468 DEBUGOUT("write ee hostif failed to get semaphore");
3469 status = IXGBE_ERR_SWFW_SYNC;
3470 }
3471
3472 return status;
3473 }
3474
3475 /**
3476 * ixgbe_write_ee_hostif_buffer_X550 - Write EEPROM word(s) using hostif
3477 * @hw: pointer to hardware structure
3478 * @offset: offset of word in the EEPROM to write
3479 * @words: number of words
3480 * @data: word(s) write to the EEPROM
3481 *
3482 * Write a 16 bit word(s) to the EEPROM using the hostif.
3483 **/
3484 s32 ixgbe_write_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
3485 u16 offset, u16 words, u16 *data)
3486 {
3487 s32 status = IXGBE_SUCCESS;
3488 u32 i = 0;
3489
3490 DEBUGFUNC("ixgbe_write_ee_hostif_buffer_X550");
3491
3492 /* Take semaphore for the entire operation. */
3493 status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
3494 if (status != IXGBE_SUCCESS) {
3495 DEBUGOUT("EEPROM write buffer - semaphore failed\n");
3496 goto out;
3497 }
3498
3499 for (i = 0; i < words; i++) {
3500 status = ixgbe_write_ee_hostif_data_X550(hw, offset + i,
3501 data[i]);
3502
3503 if (status != IXGBE_SUCCESS) {
3504 DEBUGOUT("Eeprom buffered write failed\n");
3505 break;
3506 }
3507 }
3508
3509 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
3510 out:
3511
3512 return status;
3513 }
3514
3515 /**
3516 * ixgbe_checksum_ptr_x550 - Checksum one pointer region
3517 * @hw: pointer to hardware structure
3518 * @ptr: pointer offset in eeprom
3519 * @size: size of section pointed by ptr, if 0 first word will be used as size
3520 * @csum: address of checksum to update
3521 * @buffer: pointer to buffer containing calculated checksum
3522 * @buffer_size: size of buffer
3523 *
3524 * Returns error status for any failure
3525 */
3526 static s32 ixgbe_checksum_ptr_x550(struct ixgbe_hw *hw, u16 ptr,
3527 u16 size, u16 *csum, u16 *buffer,
3528 u32 buffer_size)
3529 {
3530 u16 buf[256];
3531 s32 status;
3532 u16 length, bufsz, i, start;
3533 u16 *local_buffer;
3534
3535 bufsz = sizeof(buf) / sizeof(buf[0]);
3536
3537 /* Read a chunk at the pointer location */
3538 if (!buffer) {
3539 status = ixgbe_read_ee_hostif_buffer_X550(hw, ptr, bufsz, buf);
3540 if (status) {
3541 DEBUGOUT("Failed to read EEPROM image\n");
3542 return status;
3543 }
3544 local_buffer = buf;
3545 } else {
3546 if (buffer_size < ptr)
3547 return IXGBE_ERR_PARAM;
3548 local_buffer = &buffer[ptr];
3549 }
3550
3551 if (size) {
3552 start = 0;
3553 length = size;
3554 } else {
3555 start = 1;
3556 length = local_buffer[0];
3557
3558 /* Skip pointer section if length is invalid. */
3559 if (length == 0xFFFF || length == 0 ||
3560 (ptr + length) >= hw->eeprom.word_size)
3561 return IXGBE_SUCCESS;
3562 }
3563
3564 if (buffer && ((u32)start + (u32)length > buffer_size))
3565 return IXGBE_ERR_PARAM;
3566
3567 for (i = start; length; i++, length--) {
3568 if (i == bufsz && !buffer) {
3569 ptr += bufsz;
3570 i = 0;
3571 if (length < bufsz)
3572 bufsz = length;
3573
3574 /* Read a chunk at the pointer location */
3575 status = ixgbe_read_ee_hostif_buffer_X550(hw, ptr,
3576 bufsz, buf);
3577 if (status) {
3578 DEBUGOUT("Failed to read EEPROM image\n");
3579 return status;
3580 }
3581 }
3582 *csum += local_buffer[i];
3583 }
3584 return IXGBE_SUCCESS;
3585 }
3586
3587 /**
3588 * ixgbe_calc_checksum_X550 - Calculates and returns the checksum
3589 * @hw: pointer to hardware structure
3590 * @buffer: pointer to buffer containing calculated checksum
3591 * @buffer_size: size of buffer
3592 *
3593 * Returns a negative error code on error, or the 16-bit checksum
3594 **/
3595 s32 ixgbe_calc_checksum_X550(struct ixgbe_hw *hw, u16 *buffer, u32 buffer_size)
3596 {
3597 u16 eeprom_ptrs[IXGBE_EEPROM_LAST_WORD + 1];
3598 u16 *local_buffer;
3599 s32 status;
3600 u16 checksum = 0;
3601 u16 pointer, i, size;
3602
3603 DEBUGFUNC("ixgbe_calc_eeprom_checksum_X550");
3604
3605 hw->eeprom.ops.init_params(hw);
3606
3607 if (!buffer) {
3608 /* Read pointer area */
3609 status = ixgbe_read_ee_hostif_buffer_X550(hw, 0,
3610 IXGBE_EEPROM_LAST_WORD + 1,
3611 eeprom_ptrs);
3612 if (status) {
3613 DEBUGOUT("Failed to read EEPROM image\n");
3614 return status;
3615 }
3616 local_buffer = eeprom_ptrs;
3617 } else {
3618 if (buffer_size < IXGBE_EEPROM_LAST_WORD)
3619 return IXGBE_ERR_PARAM;
3620 local_buffer = buffer;
3621 }
3622
3623 /*
3624 * For X550 hardware include 0x0-0x41 in the checksum, skip the
3625 * checksum word itself
3626 */
3627 for (i = 0; i <= IXGBE_EEPROM_LAST_WORD; i++)
3628 if (i != IXGBE_EEPROM_CHECKSUM)
3629 checksum += local_buffer[i];
3630
3631 /*
3632 * Include all data from pointers 0x3, 0x6-0xE. This excludes the
3633 * FW, PHY module, and PCIe Expansion/Option ROM pointers.
3634 */
3635 for (i = IXGBE_PCIE_ANALOG_PTR_X550; i < IXGBE_FW_PTR; i++) {
3636 if (i == IXGBE_PHY_PTR || i == IXGBE_OPTION_ROM_PTR)
3637 continue;
3638
3639 pointer = local_buffer[i];
3640
3641 /* Skip pointer section if the pointer is invalid. */
3642 if (pointer == 0xFFFF || pointer == 0 ||
3643 pointer >= hw->eeprom.word_size)
3644 continue;
3645
3646 switch (i) {
3647 case IXGBE_PCIE_GENERAL_PTR:
3648 size = IXGBE_IXGBE_PCIE_GENERAL_SIZE;
3649 break;
3650 case IXGBE_PCIE_CONFIG0_PTR:
3651 case IXGBE_PCIE_CONFIG1_PTR:
3652 size = IXGBE_PCIE_CONFIG_SIZE;
3653 break;
3654 default:
3655 size = 0;
3656 break;
3657 }
3658
3659 status = ixgbe_checksum_ptr_x550(hw, pointer, size, &checksum,
3660 buffer, buffer_size);
3661 if (status)
3662 return status;
3663 }
3664
3665 checksum = (u16)IXGBE_EEPROM_SUM - checksum;
3666
3667 return (s32)checksum;
3668 }
3669
3670 /**
3671 * ixgbe_calc_eeprom_checksum_X550 - Calculates and returns the checksum
3672 * @hw: pointer to hardware structure
3673 *
3674 * Returns a negative error code on error, or the 16-bit checksum
3675 **/
3676 s32 ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw *hw)
3677 {
3678 return ixgbe_calc_checksum_X550(hw, NULL, 0);
3679 }
3680
3681 /**
3682 * ixgbe_validate_eeprom_checksum_X550 - Validate EEPROM checksum
3683 * @hw: pointer to hardware structure
3684 * @checksum_val: calculated checksum
3685 *
3686 * Performs checksum calculation and validates the EEPROM checksum. If the
3687 * caller does not need checksum_val, the value can be NULL.
3688 **/
3689 s32 ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw *hw, u16 *checksum_val)
3690 {
3691 s32 status;
3692 u16 checksum;
3693 u16 read_checksum = 0;
3694
3695 DEBUGFUNC("ixgbe_validate_eeprom_checksum_X550");
3696
3697 /* Read the first word from the EEPROM. If this times out or fails, do
3698 * not continue or we could be in for a very long wait while every
3699 * EEPROM read fails
3700 */
3701 status = hw->eeprom.ops.read(hw, 0, &checksum);
3702 if (status) {
3703 DEBUGOUT("EEPROM read failed\n");
3704 return status;
3705 }
3706
3707 status = hw->eeprom.ops.calc_checksum(hw);
3708 if (status < 0)
3709 return status;
3710
3711 checksum = (u16)(status & 0xffff);
3712
3713 status = ixgbe_read_ee_hostif_X550(hw, IXGBE_EEPROM_CHECKSUM,
3714 &read_checksum);
3715 if (status)
3716 return status;
3717
3718 /* Verify read checksum from EEPROM is the same as
3719 * calculated checksum
3720 */
3721 if (read_checksum != checksum) {
3722 status = IXGBE_ERR_EEPROM_CHECKSUM;
3723 ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE,
3724 "Invalid EEPROM checksum");
3725 }
3726
3727 /* If the user cares, return the calculated checksum */
3728 if (checksum_val)
3729 *checksum_val = checksum;
3730
3731 return status;
3732 }
3733
3734 /**
3735 * ixgbe_update_eeprom_checksum_X550 - Updates the EEPROM checksum and flash
3736 * @hw: pointer to hardware structure
3737 *
3738 * After writing EEPROM to shadow RAM using EEWR register, software calculates
3739 * checksum and updates the EEPROM and instructs the hardware to update
3740 * the flash.
3741 **/
3742 s32 ixgbe_update_eeprom_checksum_X550(struct ixgbe_hw *hw)
3743 {
3744 s32 status;
3745 u16 checksum = 0;
3746
3747 DEBUGFUNC("ixgbe_update_eeprom_checksum_X550");
3748
3749 /* Read the first word from the EEPROM. If this times out or fails, do
3750 * not continue or we could be in for a very long wait while every
3751 * EEPROM read fails
3752 */
3753 status = ixgbe_read_ee_hostif_X550(hw, 0, &checksum);
3754 if (status) {
3755 DEBUGOUT("EEPROM read failed\n");
3756 return status;
3757 }
3758
3759 status = ixgbe_calc_eeprom_checksum_X550(hw);
3760 if (status < 0)
3761 return status;
3762
3763 checksum = (u16)(status & 0xffff);
3764
3765 status = ixgbe_write_ee_hostif_X550(hw, IXGBE_EEPROM_CHECKSUM,
3766 checksum);
3767 if (status)
3768 return status;
3769
3770 status = ixgbe_update_flash_X550(hw);
3771
3772 return status;
3773 }
3774
3775 /**
3776 * ixgbe_update_flash_X550 - Instruct HW to copy EEPROM to Flash device
3777 * @hw: pointer to hardware structure
3778 *
3779 * Issue a shadow RAM dump to FW to copy EEPROM from shadow RAM to the flash.
3780 **/
3781 s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw)
3782 {
3783 s32 status = IXGBE_SUCCESS;
3784 union ixgbe_hic_hdr2 buffer;
3785
3786 DEBUGFUNC("ixgbe_update_flash_X550");
3787
3788 buffer.req.cmd = FW_SHADOW_RAM_DUMP_CMD;
3789 buffer.req.buf_lenh = 0;
3790 buffer.req.buf_lenl = FW_SHADOW_RAM_DUMP_LEN;
3791 buffer.req.checksum = FW_DEFAULT_CHECKSUM;
3792
3793 status = ixgbe_host_interface_command(hw, (u32 *)&buffer,
3794 sizeof(buffer),
3795 IXGBE_HI_COMMAND_TIMEOUT, FALSE);
3796
3797 return status;
3798 }
3799
3800 /**
3801 * ixgbe_get_supported_physical_layer_X550em - Returns physical layer type
3802 * @hw: pointer to hardware structure
3803 *
3804 * Determines physical layer capabilities of the current configuration.
3805 **/
3806 u64 ixgbe_get_supported_physical_layer_X550em(struct ixgbe_hw *hw)
3807 {
3808 u64 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
3809 u16 ext_ability = 0;
3810
3811 DEBUGFUNC("ixgbe_get_supported_physical_layer_X550em");
3812
3813 hw->phy.ops.identify(hw);
3814
3815 switch (hw->phy.type) {
3816 case ixgbe_phy_x550em_kr:
3817 if (hw->mac.type == ixgbe_mac_X550EM_a) {
3818 if (hw->phy.nw_mng_if_sel &
3819 IXGBE_NW_MNG_IF_SEL_PHY_SPEED_2_5G) {
3820 physical_layer =
3821 IXGBE_PHYSICAL_LAYER_2500BASE_KX;
3822 break;
3823 } else if (hw->device_id ==
3824 IXGBE_DEV_ID_X550EM_A_KR_L) {
3825 physical_layer =
3826 IXGBE_PHYSICAL_LAYER_1000BASE_KX;
3827 break;
3828 }
3829 }
3830 /* fall through */
3831 case ixgbe_phy_x550em_xfi:
3832 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR |
3833 IXGBE_PHYSICAL_LAYER_1000BASE_KX;
3834 break;
3835 case ixgbe_phy_x550em_kx4:
3836 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4 |
3837 IXGBE_PHYSICAL_LAYER_1000BASE_KX;
3838 break;
3839 case ixgbe_phy_x550em_ext_t:
3840 hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
3841 IXGBE_MDIO_PMA_PMD_DEV_TYPE,
3842 &ext_ability);
3843 if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
3844 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
3845 if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
3846 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
3847 break;
3848 case ixgbe_phy_fw:
3849 if (hw->phy.speeds_supported & IXGBE_LINK_SPEED_1GB_FULL)
3850 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
3851 if (hw->phy.speeds_supported & IXGBE_LINK_SPEED_100_FULL)
3852 physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
3853 if (hw->phy.speeds_supported & IXGBE_LINK_SPEED_10_FULL)
3854 physical_layer |= IXGBE_PHYSICAL_LAYER_10BASE_T;
3855 break;
3856 case ixgbe_phy_sgmii:
3857 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX;
3858 break;
3859 case ixgbe_phy_ext_1g_t:
3860 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_T;
3861 break;
3862 default:
3863 break;
3864 }
3865
3866 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
3867 physical_layer = ixgbe_get_supported_phy_sfp_layer_generic(hw);
3868
3869 return physical_layer;
3870 }
3871
3872 /**
3873 * ixgbe_get_bus_info_x550em - Set PCI bus info
3874 * @hw: pointer to hardware structure
3875 *
3876 * Sets bus link width and speed to unknown because X550em is
3877 * not a PCI device.
3878 **/
3879 s32 ixgbe_get_bus_info_X550em(struct ixgbe_hw *hw)
3880 {
3881
3882 DEBUGFUNC("ixgbe_get_bus_info_x550em");
3883
3884 hw->bus.width = ixgbe_bus_width_unknown;
3885 hw->bus.speed = ixgbe_bus_speed_unknown;
3886
3887 hw->mac.ops.set_lan_id(hw);
3888
3889 return IXGBE_SUCCESS;
3890 }
3891
3892 /**
3893 * ixgbe_disable_rx_x550 - Disable RX unit
3894 * @hw: pointer to hardware structure
3895 *
3896 * Enables the Rx DMA unit for x550
3897 **/
3898 void ixgbe_disable_rx_x550(struct ixgbe_hw *hw)
3899 {
3900 u32 rxctrl, pfdtxgswc;
3901 s32 status;
3902 struct ixgbe_hic_disable_rxen fw_cmd;
3903
3904 DEBUGFUNC("ixgbe_disable_rx_dma_x550");
3905
3906 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3907 if (rxctrl & IXGBE_RXCTRL_RXEN) {
3908 pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
3909 if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) {
3910 pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN;
3911 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
3912 hw->mac.set_lben = TRUE;
3913 } else {
3914 hw->mac.set_lben = FALSE;
3915 }
3916
3917 fw_cmd.hdr.cmd = FW_DISABLE_RXEN_CMD;
3918 fw_cmd.hdr.buf_len = FW_DISABLE_RXEN_LEN;
3919 fw_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
3920 fw_cmd.port_number = (u8)hw->bus.lan_id;
3921
3922 status = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
3923 sizeof(struct ixgbe_hic_disable_rxen),
3924 IXGBE_HI_COMMAND_TIMEOUT, TRUE);
3925
3926 /* If we fail - disable RX using register write */
3927 if (status) {
3928 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3929 if (rxctrl & IXGBE_RXCTRL_RXEN) {
3930 rxctrl &= ~IXGBE_RXCTRL_RXEN;
3931 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl);
3932 }
3933 }
3934 }
3935 }
3936
3937 /**
3938 * ixgbe_enter_lplu_x550em - Transition to low power states
3939 * @hw: pointer to hardware structure
3940 *
3941 * Configures Low Power Link Up on transition to low power states
3942 * (from D0 to non-D0). Link is required to enter LPLU so avoid resetting the
3943 * X557 PHY immediately prior to entering LPLU.
3944 **/
3945 s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw)
3946 {
3947 u16 an_10g_cntl_reg, autoneg_reg, speed;
3948 s32 status;
3949 ixgbe_link_speed lcd_speed;
3950 u32 save_autoneg;
3951 bool link_up;
3952
3953 /* SW LPLU not required on later HW revisions. */
3954 if ((hw->mac.type == ixgbe_mac_X550EM_x) &&
3955 (IXGBE_FUSES0_REV_MASK &
3956 IXGBE_READ_REG(hw, IXGBE_FUSES0_GROUP(0))))
3957 return IXGBE_SUCCESS;
3958
3959 /* If blocked by MNG FW, then don't restart AN */
3960 if (ixgbe_check_reset_blocked(hw))
3961 return IXGBE_SUCCESS;
3962
3963 status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up);
3964 if (status != IXGBE_SUCCESS)
3965 return status;
3966
3967 status = ixgbe_read_eeprom(hw, NVM_INIT_CTRL_3, &hw->eeprom.ctrl_word_3);
3968
3969 if (status != IXGBE_SUCCESS)
3970 return status;
3971
3972 /* If link is down, LPLU disabled in NVM, WoL disabled, or manageability
3973 * disabled, then force link down by entering low power mode.
3974 */
3975 if (!link_up || !(hw->eeprom.ctrl_word_3 & NVM_INIT_CTRL_3_LPLU) ||
3976 !(hw->wol_enabled || ixgbe_mng_present(hw)))
3977 return ixgbe_set_copper_phy_power(hw, FALSE);
3978
3979 /* Determine LCD */
3980 status = ixgbe_get_lcd_t_x550em(hw, &lcd_speed);
3981
3982 if (status != IXGBE_SUCCESS)
3983 return status;
3984
3985 /* If no valid LCD link speed, then force link down and exit. */
3986 if (lcd_speed == IXGBE_LINK_SPEED_UNKNOWN)
3987 return ixgbe_set_copper_phy_power(hw, FALSE);
3988
3989 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_STAT,
3990 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
3991 &speed);
3992
3993 if (status != IXGBE_SUCCESS)
3994 return status;
3995
3996 /* If no link now, speed is invalid so take link down */
3997 status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up);
3998 if (status != IXGBE_SUCCESS)
3999 return ixgbe_set_copper_phy_power(hw, FALSE);
4000
4001 /* clear everything but the speed bits */
4002 speed &= IXGBE_MDIO_AUTO_NEG_VEN_STAT_SPEED_MASK;
4003
4004 /* If current speed is already LCD, then exit. */
4005 if (((speed == IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB) &&
4006 (lcd_speed == IXGBE_LINK_SPEED_1GB_FULL)) ||
4007 ((speed == IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB) &&
4008 (lcd_speed == IXGBE_LINK_SPEED_10GB_FULL)))
4009 return status;
4010
4011 /* Clear AN completed indication */
4012 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM,
4013 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
4014 &autoneg_reg);
4015
4016 if (status != IXGBE_SUCCESS)
4017 return status;
4018
4019 status = hw->phy.ops.read_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG,
4020 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
4021 &an_10g_cntl_reg);
4022
4023 if (status != IXGBE_SUCCESS)
4024 return status;
4025
4026 status = hw->phy.ops.read_reg(hw,
4027 IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
4028 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
4029 &autoneg_reg);
4030
4031 if (status != IXGBE_SUCCESS)
4032 return status;
4033
4034 save_autoneg = hw->phy.autoneg_advertised;
4035
4036 /* Setup link at least common link speed */
4037 status = hw->mac.ops.setup_link(hw, lcd_speed, FALSE);
4038
4039 /* restore autoneg from before setting lplu speed */
4040 hw->phy.autoneg_advertised = save_autoneg;
4041
4042 return status;
4043 }
4044
4045 /**
4046 * ixgbe_get_lcd_x550em - Determine lowest common denominator
4047 * @hw: pointer to hardware structure
4048 * @lcd_speed: pointer to lowest common link speed
4049 *
4050 * Determine lowest common link speed with link partner.
4051 **/
4052 s32 ixgbe_get_lcd_t_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *lcd_speed)
4053 {
4054 u16 an_lp_status;
4055 s32 status;
4056 u16 word = hw->eeprom.ctrl_word_3;
4057
4058 *lcd_speed = IXGBE_LINK_SPEED_UNKNOWN;
4059
4060 status = hw->phy.ops.read_reg(hw, IXGBE_AUTO_NEG_LP_STATUS,
4061 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
4062 &an_lp_status);
4063
4064 if (status != IXGBE_SUCCESS)
4065 return status;
4066
4067 /* If link partner advertised 1G, return 1G */
4068 if (an_lp_status & IXGBE_AUTO_NEG_LP_1000BASE_CAP) {
4069 *lcd_speed = IXGBE_LINK_SPEED_1GB_FULL;
4070 return status;
4071 }
4072
4073 /* If 10G disabled for LPLU via NVM D10GMP, then return no valid LCD */
4074 if ((hw->bus.lan_id && (word & NVM_INIT_CTRL_3_D10GMP_PORT1)) ||
4075 (word & NVM_INIT_CTRL_3_D10GMP_PORT0))
4076 return status;
4077
4078 /* Link partner not capable of lower speeds, return 10G */
4079 *lcd_speed = IXGBE_LINK_SPEED_10GB_FULL;
4080 return status;
4081 }
4082
4083 /**
4084 * ixgbe_setup_fc_X550em - Set up flow control
4085 * @hw: pointer to hardware structure
4086 *
4087 * Called at init time to set up flow control.
4088 **/
4089 s32 ixgbe_setup_fc_X550em(struct ixgbe_hw *hw)
4090 {
4091 s32 ret_val = IXGBE_SUCCESS;
4092 u32 pause, asm_dir, reg_val;
4093
4094 DEBUGFUNC("ixgbe_setup_fc_X550em");
4095
4096 /* Validate the requested mode */
4097 if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
4098 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
4099 "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
4100 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
4101 goto out;
4102 }
4103
4104 /* 10gig parts do not have a word in the EEPROM to determine the
4105 * default flow control setting, so we explicitly set it to full.
4106 */
4107 if (hw->fc.requested_mode == ixgbe_fc_default)
4108 hw->fc.requested_mode = ixgbe_fc_full;
4109
4110 /* Determine PAUSE and ASM_DIR bits. */
4111 switch (hw->fc.requested_mode) {
4112 case ixgbe_fc_none:
4113 pause = 0;
4114 asm_dir = 0;
4115 break;
4116 case ixgbe_fc_tx_pause:
4117 pause = 0;
4118 asm_dir = 1;
4119 break;
4120 case ixgbe_fc_rx_pause:
4121 /* Rx Flow control is enabled and Tx Flow control is
4122 * disabled by software override. Since there really
4123 * isn't a way to advertise that we are capable of RX
4124 * Pause ONLY, we will advertise that we support both
4125 * symmetric and asymmetric Rx PAUSE, as such we fall
4126 * through to the fc_full statement. Later, we will
4127 * disable the adapter's ability to send PAUSE frames.
4128 */
4129 case ixgbe_fc_full:
4130 pause = 1;
4131 asm_dir = 1;
4132 break;
4133 default:
4134 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
4135 "Flow control param set incorrectly\n");
4136 ret_val = IXGBE_ERR_CONFIG;
4137 goto out;
4138 }
4139
4140 switch (hw->device_id) {
4141 case IXGBE_DEV_ID_X550EM_X_KR:
4142 case IXGBE_DEV_ID_X550EM_A_KR:
4143 case IXGBE_DEV_ID_X550EM_A_KR_L:
4144 ret_val = hw->mac.ops.read_iosf_sb_reg(hw,
4145 IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
4146 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
4147 if (ret_val != IXGBE_SUCCESS)
4148 goto out;
4149 reg_val &= ~(IXGBE_KRM_AN_CNTL_1_SYM_PAUSE |
4150 IXGBE_KRM_AN_CNTL_1_ASM_PAUSE);
4151 if (pause)
4152 reg_val |= IXGBE_KRM_AN_CNTL_1_SYM_PAUSE;
4153 if (asm_dir)
4154 reg_val |= IXGBE_KRM_AN_CNTL_1_ASM_PAUSE;
4155 ret_val = hw->mac.ops.write_iosf_sb_reg(hw,
4156 IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
4157 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
4158
4159 /* This device does not fully support AN. */
4160 hw->fc.disable_fc_autoneg = TRUE;
4161 break;
4162 case IXGBE_DEV_ID_X550EM_X_XFI:
4163 hw->fc.disable_fc_autoneg = TRUE;
4164 break;
4165 default:
4166 break;
4167 }
4168
4169 out:
4170 return ret_val;
4171 }
4172
4173 /**
4174 * ixgbe_fc_autoneg_backplane_x550em_a - Enable flow control IEEE clause 37
4175 * @hw: pointer to hardware structure
4176 *
4177 * Enable flow control according to IEEE clause 37.
4178 **/
4179 void ixgbe_fc_autoneg_backplane_x550em_a(struct ixgbe_hw *hw)
4180 {
4181 u32 link_s1, lp_an_page_low, an_cntl_1;
4182 s32 status = IXGBE_ERR_FC_NOT_NEGOTIATED;
4183 ixgbe_link_speed speed;
4184 bool link_up;
4185
4186 /* AN should have completed when the cable was plugged in.
4187 * Look for reasons to bail out. Bail out if:
4188 * - FC autoneg is disabled, or if
4189 * - link is not up.
4190 */
4191 if (hw->fc.disable_fc_autoneg) {
4192 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
4193 "Flow control autoneg is disabled");
4194 goto out;
4195 }
4196
4197 hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
4198 if (!link_up) {
4199 ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "The link is down");
4200 goto out;
4201 }
4202
4203 /* Check at auto-negotiation has completed */
4204 status = hw->mac.ops.read_iosf_sb_reg(hw,
4205 IXGBE_KRM_LINK_S1(hw->bus.lan_id),
4206 IXGBE_SB_IOSF_TARGET_KR_PHY, &link_s1);
4207
4208 if (status != IXGBE_SUCCESS ||
4209 (link_s1 & IXGBE_KRM_LINK_S1_MAC_AN_COMPLETE) == 0) {
4210 DEBUGOUT("Auto-Negotiation did not complete\n");
4211 status = IXGBE_ERR_FC_NOT_NEGOTIATED;
4212 goto out;
4213 }
4214
4215 /* Read the 10g AN autoc and LP ability registers and resolve
4216 * local flow control settings accordingly
4217 */
4218 status = hw->mac.ops.read_iosf_sb_reg(hw,
4219 IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
4220 IXGBE_SB_IOSF_TARGET_KR_PHY, &an_cntl_1);
4221
4222 if (status != IXGBE_SUCCESS) {
4223 DEBUGOUT("Auto-Negotiation did not complete\n");
4224 goto out;
4225 }
4226
4227 status = hw->mac.ops.read_iosf_sb_reg(hw,
4228 IXGBE_KRM_LP_BASE_PAGE_HIGH(hw->bus.lan_id),
4229 IXGBE_SB_IOSF_TARGET_KR_PHY, &lp_an_page_low);
4230
4231 if (status != IXGBE_SUCCESS) {
4232 DEBUGOUT("Auto-Negotiation did not complete\n");
4233 goto out;
4234 }
4235
4236 status = ixgbe_negotiate_fc(hw, an_cntl_1, lp_an_page_low,
4237 IXGBE_KRM_AN_CNTL_1_SYM_PAUSE,
4238 IXGBE_KRM_AN_CNTL_1_ASM_PAUSE,
4239 IXGBE_KRM_LP_BASE_PAGE_HIGH_SYM_PAUSE,
4240 IXGBE_KRM_LP_BASE_PAGE_HIGH_ASM_PAUSE);
4241
4242 out:
4243 if (status == IXGBE_SUCCESS) {
4244 hw->fc.fc_was_autonegged = TRUE;
4245 } else {
4246 hw->fc.fc_was_autonegged = FALSE;
4247 hw->fc.current_mode = hw->fc.requested_mode;
4248 }
4249 }
4250
4251 /**
4252 * ixgbe_fc_autoneg_fiber_x550em_a - passthrough FC settings
4253 * @hw: pointer to hardware structure
4254 *
4255 **/
4256 void ixgbe_fc_autoneg_fiber_x550em_a(struct ixgbe_hw *hw)
4257 {
4258 hw->fc.fc_was_autonegged = FALSE;
4259 hw->fc.current_mode = hw->fc.requested_mode;
4260 }
4261
4262 /**
4263 * ixgbe_fc_autoneg_sgmii_x550em_a - Enable flow control IEEE clause 37
4264 * @hw: pointer to hardware structure
4265 *
4266 * Enable flow control according to IEEE clause 37.
4267 **/
4268 void ixgbe_fc_autoneg_sgmii_x550em_a(struct ixgbe_hw *hw)
4269 {
4270 s32 status = IXGBE_ERR_FC_NOT_NEGOTIATED;
4271 u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 };
4272 ixgbe_link_speed speed;
4273 bool link_up;
4274
4275 /* AN should have completed when the cable was plugged in.
4276 * Look for reasons to bail out. Bail out if:
4277 * - FC autoneg is disabled, or if
4278 * - link is not up.
4279 */
4280 if (hw->fc.disable_fc_autoneg) {
4281 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
4282 "Flow control autoneg is disabled");
4283 goto out;
4284 }
4285
4286 hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
4287 if (!link_up) {
4288 ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "The link is down");
4289 goto out;
4290 }
4291
4292 /* Check if auto-negotiation has completed */
4293 status = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_LINK_INFO, &info);
4294 if (status != IXGBE_SUCCESS ||
4295 !(info[0] & FW_PHY_ACT_GET_LINK_INFO_AN_COMPLETE)) {
4296 DEBUGOUT("Auto-Negotiation did not complete\n");
4297 status = IXGBE_ERR_FC_NOT_NEGOTIATED;
4298 goto out;
4299 }
4300
4301 /* Negotiate the flow control */
4302 status = ixgbe_negotiate_fc(hw, info[0], info[0],
4303 FW_PHY_ACT_GET_LINK_INFO_FC_RX,
4304 FW_PHY_ACT_GET_LINK_INFO_FC_TX,
4305 FW_PHY_ACT_GET_LINK_INFO_LP_FC_RX,
4306 FW_PHY_ACT_GET_LINK_INFO_LP_FC_TX);
4307
4308 out:
4309 if (status == IXGBE_SUCCESS) {
4310 hw->fc.fc_was_autonegged = TRUE;
4311 } else {
4312 hw->fc.fc_was_autonegged = FALSE;
4313 hw->fc.current_mode = hw->fc.requested_mode;
4314 }
4315 }
4316
4317 /**
4318 * ixgbe_setup_fc_backplane_x550em_a - Set up flow control
4319 * @hw: pointer to hardware structure
4320 *
4321 * Called at init time to set up flow control.
4322 **/
4323 s32 ixgbe_setup_fc_backplane_x550em_a(struct ixgbe_hw *hw)
4324 {
4325 s32 status = IXGBE_SUCCESS;
4326 u32 an_cntl = 0;
4327
4328 DEBUGFUNC("ixgbe_setup_fc_backplane_x550em_a");
4329
4330 /* Validate the requested mode */
4331 if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
4332 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
4333 "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
4334 return IXGBE_ERR_INVALID_LINK_SETTINGS;
4335 }
4336
4337 if (hw->fc.requested_mode == ixgbe_fc_default)
4338 hw->fc.requested_mode = ixgbe_fc_full;
4339
4340 /* Set up the 1G and 10G flow control advertisement registers so the
4341 * HW will be able to do FC autoneg once the cable is plugged in. If
4342 * we link at 10G, the 1G advertisement is harmless and vice versa.
4343 */
4344 status = hw->mac.ops.read_iosf_sb_reg(hw,
4345 IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
4346 IXGBE_SB_IOSF_TARGET_KR_PHY, &an_cntl);
4347
4348 if (status != IXGBE_SUCCESS) {
4349 DEBUGOUT("Auto-Negotiation did not complete\n");
4350 return status;
4351 }
4352
4353 /* The possible values of fc.requested_mode are:
4354 * 0: Flow control is completely disabled
4355 * 1: Rx flow control is enabled (we can receive pause frames,
4356 * but not send pause frames).
4357 * 2: Tx flow control is enabled (we can send pause frames but
4358 * we do not support receiving pause frames).
4359 * 3: Both Rx and Tx flow control (symmetric) are enabled.
4360 * other: Invalid.
4361 */
4362 switch (hw->fc.requested_mode) {
4363 case ixgbe_fc_none:
4364 /* Flow control completely disabled by software override. */
4365 an_cntl &= ~(IXGBE_KRM_AN_CNTL_1_SYM_PAUSE |
4366 IXGBE_KRM_AN_CNTL_1_ASM_PAUSE);
4367 break;
4368 case ixgbe_fc_tx_pause:
4369 /* Tx Flow control is enabled, and Rx Flow control is
4370 * disabled by software override.
4371 */
4372 an_cntl |= IXGBE_KRM_AN_CNTL_1_ASM_PAUSE;
4373 an_cntl &= ~IXGBE_KRM_AN_CNTL_1_SYM_PAUSE;
4374 break;
4375 case ixgbe_fc_rx_pause:
4376 /* Rx Flow control is enabled and Tx Flow control is
4377 * disabled by software override. Since there really
4378 * isn't a way to advertise that we are capable of RX
4379 * Pause ONLY, we will advertise that we support both
4380 * symmetric and asymmetric Rx PAUSE, as such we fall
4381 * through to the fc_full statement. Later, we will
4382 * disable the adapter's ability to send PAUSE frames.
4383 */
4384 case ixgbe_fc_full:
4385 /* Flow control (both Rx and Tx) is enabled by SW override. */
4386 an_cntl |= IXGBE_KRM_AN_CNTL_1_SYM_PAUSE |
4387 IXGBE_KRM_AN_CNTL_1_ASM_PAUSE;
4388 break;
4389 default:
4390 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
4391 "Flow control param set incorrectly\n");
4392 return IXGBE_ERR_CONFIG;
4393 }
4394
4395 status = hw->mac.ops.write_iosf_sb_reg(hw,
4396 IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
4397 IXGBE_SB_IOSF_TARGET_KR_PHY, an_cntl);
4398
4399 /* Restart auto-negotiation. */
4400 status = ixgbe_restart_an_internal_phy_x550em(hw);
4401
4402 return status;
4403 }
4404
4405 /**
4406 * ixgbe_set_mux - Set mux for port 1 access with CS4227
4407 * @hw: pointer to hardware structure
4408 * @state: set mux if 1, clear if 0
4409 */
4410 static void ixgbe_set_mux(struct ixgbe_hw *hw, u8 state)
4411 {
4412 u32 esdp;
4413
4414 if (!hw->bus.lan_id)
4415 return;
4416 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
4417 if (state)
4418 esdp |= IXGBE_ESDP_SDP1;
4419 else
4420 esdp &= ~IXGBE_ESDP_SDP1;
4421 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
4422 IXGBE_WRITE_FLUSH(hw);
4423 }
4424
4425 /**
4426 * ixgbe_acquire_swfw_sync_X550em - Acquire SWFW semaphore
4427 * @hw: pointer to hardware structure
4428 * @mask: Mask to specify which semaphore to acquire
4429 *
4430 * Acquires the SWFW semaphore and sets the I2C MUX
4431 **/
4432 s32 ixgbe_acquire_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask)
4433 {
4434 s32 status;
4435
4436 DEBUGFUNC("ixgbe_acquire_swfw_sync_X550em");
4437
4438 status = ixgbe_acquire_swfw_sync_X540(hw, mask);
4439 if (status)
4440 return status;
4441
4442 if (mask & IXGBE_GSSR_I2C_MASK)
4443 ixgbe_set_mux(hw, 1);
4444
4445 return IXGBE_SUCCESS;
4446 }
4447
4448 /**
4449 * ixgbe_release_swfw_sync_X550em - Release SWFW semaphore
4450 * @hw: pointer to hardware structure
4451 * @mask: Mask to specify which semaphore to release
4452 *
4453 * Releases the SWFW semaphore and sets the I2C MUX
4454 **/
4455 void ixgbe_release_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask)
4456 {
4457 DEBUGFUNC("ixgbe_release_swfw_sync_X550em");
4458
4459 if (mask & IXGBE_GSSR_I2C_MASK)
4460 ixgbe_set_mux(hw, 0);
4461
4462 ixgbe_release_swfw_sync_X540(hw, mask);
4463 }
4464
4465 /**
4466 * ixgbe_acquire_swfw_sync_X550a - Acquire SWFW semaphore
4467 * @hw: pointer to hardware structure
4468 * @mask: Mask to specify which semaphore to acquire
4469 *
4470 * Acquires the SWFW semaphore and get the shared phy token as needed
4471 */
4472 static s32 ixgbe_acquire_swfw_sync_X550a(struct ixgbe_hw *hw, u32 mask)
4473 {
4474 u32 hmask = mask & ~IXGBE_GSSR_TOKEN_SM;
4475 int retries = FW_PHY_TOKEN_RETRIES;
4476 s32 status = IXGBE_SUCCESS;
4477
4478 DEBUGFUNC("ixgbe_acquire_swfw_sync_X550a");
4479
4480 while (--retries) {
4481 status = IXGBE_SUCCESS;
4482 if (hmask)
4483 status = ixgbe_acquire_swfw_sync_X540(hw, hmask);
4484 if (status) {
4485 DEBUGOUT1("Could not acquire SWFW semaphore, Status = %d\n",
4486 status);
4487 return status;
4488 }
4489 if (!(mask & IXGBE_GSSR_TOKEN_SM))
4490 return IXGBE_SUCCESS;
4491
4492 status = ixgbe_get_phy_token(hw);
4493 if (status == IXGBE_ERR_TOKEN_RETRY)
4494 DEBUGOUT1("Could not acquire PHY token, Status = %d\n",
4495 status);
4496
4497 if (status == IXGBE_SUCCESS)
4498 return IXGBE_SUCCESS;
4499
4500 if (hmask)
4501 ixgbe_release_swfw_sync_X540(hw, hmask);
4502
4503 if (status != IXGBE_ERR_TOKEN_RETRY) {
4504 DEBUGOUT1("Unable to retry acquiring the PHY token, Status = %d\n",
4505 status);
4506 return status;
4507 }
4508 }
4509
4510 DEBUGOUT1("Semaphore acquisition retries failed!: PHY ID = 0x%08X\n",
4511 hw->phy.id);
4512 return status;
4513 }
4514
4515 /**
4516 * ixgbe_release_swfw_sync_X550a - Release SWFW semaphore
4517 * @hw: pointer to hardware structure
4518 * @mask: Mask to specify which semaphore to release
4519 *
4520 * Releases the SWFW semaphore and puts the shared phy token as needed
4521 */
4522 static void ixgbe_release_swfw_sync_X550a(struct ixgbe_hw *hw, u32 mask)
4523 {
4524 u32 hmask = mask & ~IXGBE_GSSR_TOKEN_SM;
4525
4526 DEBUGFUNC("ixgbe_release_swfw_sync_X550a");
4527
4528 if (mask & IXGBE_GSSR_TOKEN_SM)
4529 ixgbe_put_phy_token(hw);
4530
4531 if (hmask)
4532 ixgbe_release_swfw_sync_X540(hw, hmask);
4533 }
4534
4535 /**
4536 * ixgbe_read_phy_reg_x550a - Reads specified PHY register
4537 * @hw: pointer to hardware structure
4538 * @reg_addr: 32 bit address of PHY register to read
4539 * @device_type: 5 bit device type
4540 * @phy_data: Pointer to read data from PHY register
4541 *
4542 * Reads a value from a specified PHY register using the SWFW lock and PHY
4543 * Token. The PHY Token is needed since the MDIO is shared between to MAC
4544 * instances.
4545 **/
4546 s32 ixgbe_read_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
4547 u32 device_type, u16 *phy_data)
4548 {
4549 s32 status;
4550 u32 mask = hw->phy.phy_semaphore_mask | IXGBE_GSSR_TOKEN_SM;
4551
4552 DEBUGFUNC("ixgbe_read_phy_reg_x550a");
4553
4554 if (hw->mac.ops.acquire_swfw_sync(hw, mask))
4555 return IXGBE_ERR_SWFW_SYNC;
4556
4557 status = hw->phy.ops.read_reg_mdi(hw, reg_addr, device_type, phy_data);
4558
4559 hw->mac.ops.release_swfw_sync(hw, mask);
4560
4561 return status;
4562 }
4563
4564 /**
4565 * ixgbe_write_phy_reg_x550a - Writes specified PHY register
4566 * @hw: pointer to hardware structure
4567 * @reg_addr: 32 bit PHY register to write
4568 * @device_type: 5 bit device type
4569 * @phy_data: Data to write to the PHY register
4570 *
4571 * Writes a value to specified PHY register using the SWFW lock and PHY Token.
4572 * The PHY Token is needed since the MDIO is shared between to MAC instances.
4573 **/
4574 s32 ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
4575 u32 device_type, u16 phy_data)
4576 {
4577 s32 status;
4578 u32 mask = hw->phy.phy_semaphore_mask | IXGBE_GSSR_TOKEN_SM;
4579
4580 DEBUGFUNC("ixgbe_write_phy_reg_x550a");
4581
4582 if (hw->mac.ops.acquire_swfw_sync(hw, mask) == IXGBE_SUCCESS) {
4583 status = hw->phy.ops.write_reg_mdi(hw, reg_addr, device_type,
4584 phy_data);
4585 hw->mac.ops.release_swfw_sync(hw, mask);
4586 } else {
4587 status = IXGBE_ERR_SWFW_SYNC;
4588 }
4589
4590 return status;
4591 }
4592
4593 /**
4594 * ixgbe_handle_lasi_ext_t_x550em - Handle external Base T PHY interrupt
4595 * @hw: pointer to hardware structure
4596 *
4597 * Handle external Base T PHY interrupt. If high temperature
4598 * failure alarm then return error, else if link status change
4599 * then setup internal/external PHY link
4600 *
4601 * Return IXGBE_ERR_OVERTEMP if interrupt is high temperature
4602 * failure alarm, else return PHY access status.
4603 */
4604 s32 ixgbe_handle_lasi_ext_t_x550em(struct ixgbe_hw *hw)
4605 {
4606 bool lsc;
4607 u32 status;
4608
4609 status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc);
4610
4611 if (status != IXGBE_SUCCESS)
4612 return status;
4613
4614 if (lsc)
4615 return ixgbe_setup_internal_phy(hw);
4616
4617 return IXGBE_SUCCESS;
4618 }
4619
4620 /**
4621 * ixgbe_setup_mac_link_t_X550em - Sets the auto advertised link speed
4622 * @hw: pointer to hardware structure
4623 * @speed: new link speed
4624 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed
4625 *
4626 * Setup internal/external PHY link speed based on link speed, then set
4627 * external PHY auto advertised link speed.
4628 *
4629 * Returns error status for any failure
4630 **/
4631 s32 ixgbe_setup_mac_link_t_X550em(struct ixgbe_hw *hw,
4632 ixgbe_link_speed speed,
4633 bool autoneg_wait_to_complete)
4634 {
4635 s32 status;
4636 ixgbe_link_speed force_speed;
4637
4638 DEBUGFUNC("ixgbe_setup_mac_link_t_X550em");
4639
4640 /* Setup internal/external PHY link speed to iXFI (10G), unless
4641 * only 1G is auto advertised then setup KX link.
4642 */
4643 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
4644 force_speed = IXGBE_LINK_SPEED_10GB_FULL;
4645 else
4646 force_speed = IXGBE_LINK_SPEED_1GB_FULL;
4647
4648 /* If X552 and internal link mode is XFI, then setup XFI internal link.
4649 */
4650 if (hw->mac.type == ixgbe_mac_X550EM_x &&
4651 !(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) {
4652 status = ixgbe_setup_ixfi_x550em(hw, &force_speed);
4653
4654 if (status != IXGBE_SUCCESS)
4655 return status;
4656 }
4657
4658 return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait_to_complete);
4659 }
4660
4661 /**
4662 * ixgbe_check_link_t_X550em - Determine link and speed status
4663 * @hw: pointer to hardware structure
4664 * @speed: pointer to link speed
4665 * @link_up: TRUE when link is up
4666 * @link_up_wait_to_complete: bool used to wait for link up or not
4667 *
4668 * Check that both the MAC and X557 external PHY have link.
4669 **/
4670 s32 ixgbe_check_link_t_X550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
4671 bool *link_up, bool link_up_wait_to_complete)
4672 {
4673 u32 status;
4674 u16 i, autoneg_status = 0;
4675
4676 if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper)
4677 return IXGBE_ERR_CONFIG;
4678
4679 status = ixgbe_check_mac_link_generic(hw, speed, link_up,
4680 link_up_wait_to_complete);
4681
4682 /* If check link fails or MAC link is not up, then return */
4683 if (status != IXGBE_SUCCESS || !(*link_up))
4684 return status;
4685
4686 /* MAC link is up, so check external PHY link.
4687 * X557 PHY. Link status is latching low, and can only be used to detect
4688 * link drop, and not the current status of the link without performing
4689 * back-to-back reads.
4690 */
4691 for (i = 0; i < 2; i++) {
4692 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
4693 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
4694 &autoneg_status);
4695
4696 if (status != IXGBE_SUCCESS)
4697 return status;
4698 }
4699
4700 /* If external PHY link is not up, then indicate link not up */
4701 if (!(autoneg_status & IXGBE_MDIO_AUTO_NEG_LINK_STATUS))
4702 *link_up = FALSE;
4703
4704 return IXGBE_SUCCESS;
4705 }
4706
4707 /**
4708 * ixgbe_reset_phy_t_X550em - Performs X557 PHY reset and enables LASI
4709 * @hw: pointer to hardware structure
4710 **/
4711 s32 ixgbe_reset_phy_t_X550em(struct ixgbe_hw *hw)
4712 {
4713 s32 status;
4714
4715 status = ixgbe_reset_phy_generic(hw);
4716
4717 if (status != IXGBE_SUCCESS)
4718 return status;
4719
4720 /* Configure Link Status Alarm and Temperature Threshold interrupts */
4721 return ixgbe_enable_lasi_ext_t_x550em(hw);
4722 }
4723
4724 /**
4725 * ixgbe_led_on_t_X550em - Turns on the software controllable LEDs.
4726 * @hw: pointer to hardware structure
4727 * @led_idx: led number to turn on
4728 **/
4729 s32 ixgbe_led_on_t_X550em(struct ixgbe_hw *hw, u32 led_idx)
4730 {
4731 u16 phy_data;
4732
4733 DEBUGFUNC("ixgbe_led_on_t_X550em");
4734
4735 if (led_idx >= IXGBE_X557_MAX_LED_INDEX)
4736 return IXGBE_ERR_PARAM;
4737
4738 /* To turn on the LED, set mode to ON. */
4739 ixgbe_read_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
4740 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &phy_data);
4741 phy_data |= IXGBE_X557_LED_MANUAL_SET_MASK;
4742 ixgbe_write_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
4743 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, phy_data);
4744
4745 /* Some designs have the LEDs wired to the MAC */
4746 return ixgbe_led_on_generic(hw, led_idx);
4747 }
4748
4749 /**
4750 * ixgbe_led_off_t_X550em - Turns off the software controllable LEDs.
4751 * @hw: pointer to hardware structure
4752 * @led_idx: led number to turn off
4753 **/
4754 s32 ixgbe_led_off_t_X550em(struct ixgbe_hw *hw, u32 led_idx)
4755 {
4756 u16 phy_data;
4757
4758 DEBUGFUNC("ixgbe_led_off_t_X550em");
4759
4760 if (led_idx >= IXGBE_X557_MAX_LED_INDEX)
4761 return IXGBE_ERR_PARAM;
4762
4763 /* To turn on the LED, set mode to ON. */
4764 ixgbe_read_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
4765 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &phy_data);
4766 phy_data &= ~IXGBE_X557_LED_MANUAL_SET_MASK;
4767 ixgbe_write_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
4768 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, phy_data);
4769
4770 /* Some designs have the LEDs wired to the MAC */
4771 return ixgbe_led_off_generic(hw, led_idx);
4772 }
4773
4774 /**
4775 * ixgbe_set_fw_drv_ver_x550 - Sends driver version to firmware
4776 * @hw: pointer to the HW structure
4777 * @maj: driver version major number
4778 * @min: driver version minor number
4779 * @build: driver version build number
4780 * @sub: driver version sub build number
4781 * @len: length of driver_ver string
4782 * @driver_ver: driver string
4783 *
4784 * Sends driver version number to firmware through the manageability
4785 * block. On success return IXGBE_SUCCESS
4786 * else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring
4787 * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
4788 **/
4789 s32 ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min,
4790 u8 build, u8 sub, u16 len, const char *driver_ver)
4791 {
4792 struct ixgbe_hic_drv_info2 fw_cmd;
4793 s32 ret_val = IXGBE_SUCCESS;
4794 int i;
4795
4796 DEBUGFUNC("ixgbe_set_fw_drv_ver_x550");
4797
4798 if ((len == 0) || (driver_ver == NULL) ||
4799 (len > sizeof(fw_cmd.driver_string)))
4800 return IXGBE_ERR_INVALID_ARGUMENT;
4801
4802 fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO;
4803 fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN + len;
4804 fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
4805 fw_cmd.port_num = (u8)hw->bus.func;
4806 fw_cmd.ver_maj = maj;
4807 fw_cmd.ver_min = min;
4808 fw_cmd.ver_build = build;
4809 fw_cmd.ver_sub = sub;
4810 fw_cmd.hdr.checksum = 0;
4811 memcpy(fw_cmd.driver_string, driver_ver, len);
4812 fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd,
4813 (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len));
4814
4815 for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
4816 ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
4817 sizeof(fw_cmd),
4818 IXGBE_HI_COMMAND_TIMEOUT,
4819 TRUE);
4820 if (ret_val != IXGBE_SUCCESS)
4821 continue;
4822
4823 if (fw_cmd.hdr.cmd_or_resp.ret_status ==
4824 FW_CEM_RESP_STATUS_SUCCESS)
4825 ret_val = IXGBE_SUCCESS;
4826 else
4827 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
4828
4829 break;
4830 }
4831
4832 return ret_val;
4833 }
4834
4835 /**
4836 * ixgbe_fw_recovery_mode_X550 - Check FW NVM recovery mode
4837 * @hw: pointer t hardware structure
4838 *
4839 * Returns TRUE if in FW NVM recovery mode.
4840 **/
4841 bool ixgbe_fw_recovery_mode_X550(struct ixgbe_hw *hw)
4842 {
4843 u32 fwsm;
4844
4845 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw));
4846
4847 return !!(fwsm & IXGBE_FWSM_FW_NVM_RECOVERY_MODE);
4848 }
4849