ixgbe_x550.c revision 1.15 1 /******************************************************************************
2
3 Copyright (c) 2001-2017, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33 /*$FreeBSD: head/sys/dev/ixgbe/ixgbe_x550.c 331224 2018-03-19 20:55:05Z erj $*/
34
35 #include "ixgbe_x550.h"
36 #include "ixgbe_x540.h"
37 #include "ixgbe_type.h"
38 #include "ixgbe_api.h"
39 #include "ixgbe_common.h"
40 #include "ixgbe_phy.h"
41 #include <dev/mii/mii.h>
42
43 static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed);
44 static s32 ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw,
45 ixgbe_link_speed speed,
46 bool autoneg_wait_to_complete);
47 static s32 ixgbe_acquire_swfw_sync_X550a(struct ixgbe_hw *, u32 mask);
48 static void ixgbe_release_swfw_sync_X550a(struct ixgbe_hw *, u32 mask);
49 static s32 ixgbe_read_mng_if_sel_x550em(struct ixgbe_hw *hw);
50
51 /**
52 * ixgbe_init_ops_X550 - Inits func ptrs and MAC type
53 * @hw: pointer to hardware structure
54 *
55 * Initialize the function pointers and assign the MAC type for X550.
56 * Does not touch the hardware.
57 **/
58 s32 ixgbe_init_ops_X550(struct ixgbe_hw *hw)
59 {
60 struct ixgbe_mac_info *mac = &hw->mac;
61 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
62 s32 ret_val;
63
64 DEBUGFUNC("ixgbe_init_ops_X550");
65
66 ret_val = ixgbe_init_ops_X540(hw);
67 mac->ops.dmac_config = ixgbe_dmac_config_X550;
68 mac->ops.dmac_config_tcs = ixgbe_dmac_config_tcs_X550;
69 mac->ops.dmac_update_tcs = ixgbe_dmac_update_tcs_X550;
70 mac->ops.setup_eee = NULL;
71 mac->ops.set_source_address_pruning =
72 ixgbe_set_source_address_pruning_X550;
73 mac->ops.set_ethertype_anti_spoofing =
74 ixgbe_set_ethertype_anti_spoofing_X550;
75
76 mac->ops.get_rtrup2tc = ixgbe_dcb_get_rtrup2tc_generic;
77 eeprom->ops.init_params = ixgbe_init_eeprom_params_X550;
78 eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_X550;
79 eeprom->ops.read = ixgbe_read_ee_hostif_X550;
80 eeprom->ops.read_buffer = ixgbe_read_ee_hostif_buffer_X550;
81 eeprom->ops.write = ixgbe_write_ee_hostif_X550;
82 eeprom->ops.write_buffer = ixgbe_write_ee_hostif_buffer_X550;
83 eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_X550;
84 eeprom->ops.validate_checksum = ixgbe_validate_eeprom_checksum_X550;
85
86 mac->ops.disable_mdd = ixgbe_disable_mdd_X550;
87 mac->ops.enable_mdd = ixgbe_enable_mdd_X550;
88 mac->ops.mdd_event = ixgbe_mdd_event_X550;
89 mac->ops.restore_mdd_vf = ixgbe_restore_mdd_vf_X550;
90 mac->ops.fw_recovery_mode = ixgbe_fw_recovery_mode_X550;
91 mac->ops.disable_rx = ixgbe_disable_rx_x550;
92 /* Manageability interface */
93 mac->ops.set_fw_drv_ver = ixgbe_set_fw_drv_ver_x550;
94 switch (hw->device_id) {
95 case IXGBE_DEV_ID_X550EM_X_1G_T:
96 hw->mac.ops.led_on = NULL;
97 hw->mac.ops.led_off = NULL;
98 break;
99 case IXGBE_DEV_ID_X550EM_X_10G_T:
100 case IXGBE_DEV_ID_X550EM_A_10G_T:
101 hw->mac.ops.led_on = ixgbe_led_on_t_X550em;
102 hw->mac.ops.led_off = ixgbe_led_off_t_X550em;
103 break;
104 default:
105 break;
106 }
107 return ret_val;
108 }
109
110 /**
111 * ixgbe_read_cs4227 - Read CS4227 register
112 * @hw: pointer to hardware structure
113 * @reg: register number to write
114 * @value: pointer to receive value read
115 *
116 * Returns status code
117 **/
118 static s32 ixgbe_read_cs4227(struct ixgbe_hw *hw, u16 reg, u16 *value)
119 {
120 return hw->link.ops.read_link_unlocked(hw, hw->link.addr, reg, value);
121 }
122
123 /**
124 * ixgbe_write_cs4227 - Write CS4227 register
125 * @hw: pointer to hardware structure
126 * @reg: register number to write
127 * @value: value to write to register
128 *
129 * Returns status code
130 **/
131 static s32 ixgbe_write_cs4227(struct ixgbe_hw *hw, u16 reg, u16 value)
132 {
133 return hw->link.ops.write_link_unlocked(hw, hw->link.addr, reg, value);
134 }
135
136 /**
137 * ixgbe_read_pe - Read register from port expander
138 * @hw: pointer to hardware structure
139 * @reg: register number to read
140 * @value: pointer to receive read value
141 *
142 * Returns status code
143 **/
144 static s32 ixgbe_read_pe(struct ixgbe_hw *hw, u8 reg, u8 *value)
145 {
146 s32 status;
147
148 status = ixgbe_read_i2c_byte_unlocked(hw, reg, IXGBE_PE, value);
149 if (status != IXGBE_SUCCESS)
150 ERROR_REPORT2(IXGBE_ERROR_CAUTION,
151 "port expander access failed with %d\n", status);
152 return status;
153 }
154
155 /**
156 * ixgbe_write_pe - Write register to port expander
157 * @hw: pointer to hardware structure
158 * @reg: register number to write
159 * @value: value to write
160 *
161 * Returns status code
162 **/
163 static s32 ixgbe_write_pe(struct ixgbe_hw *hw, u8 reg, u8 value)
164 {
165 s32 status;
166
167 status = ixgbe_write_i2c_byte_unlocked(hw, reg, IXGBE_PE, value);
168 if (status != IXGBE_SUCCESS)
169 ERROR_REPORT2(IXGBE_ERROR_CAUTION,
170 "port expander access failed with %d\n", status);
171 return status;
172 }
173
174 /**
175 * ixgbe_reset_cs4227 - Reset CS4227 using port expander
176 * @hw: pointer to hardware structure
177 *
178 * This function assumes that the caller has acquired the proper semaphore.
179 * Returns error code
180 **/
181 static s32 ixgbe_reset_cs4227(struct ixgbe_hw *hw)
182 {
183 s32 status;
184 u32 retry;
185 u16 value;
186 u8 reg;
187
188 /* Trigger hard reset. */
189 status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, ®);
190 if (status != IXGBE_SUCCESS)
191 return status;
192 reg |= IXGBE_PE_BIT1;
193 status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg);
194 if (status != IXGBE_SUCCESS)
195 return status;
196
197 status = ixgbe_read_pe(hw, IXGBE_PE_CONFIG, ®);
198 if (status != IXGBE_SUCCESS)
199 return status;
200 reg &= ~IXGBE_PE_BIT1;
201 status = ixgbe_write_pe(hw, IXGBE_PE_CONFIG, reg);
202 if (status != IXGBE_SUCCESS)
203 return status;
204
205 status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, ®);
206 if (status != IXGBE_SUCCESS)
207 return status;
208 reg &= ~IXGBE_PE_BIT1;
209 status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg);
210 if (status != IXGBE_SUCCESS)
211 return status;
212
213 usec_delay(IXGBE_CS4227_RESET_HOLD);
214
215 status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, ®);
216 if (status != IXGBE_SUCCESS)
217 return status;
218 reg |= IXGBE_PE_BIT1;
219 status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg);
220 if (status != IXGBE_SUCCESS)
221 return status;
222
223 /* Wait for the reset to complete. */
224 msec_delay(IXGBE_CS4227_RESET_DELAY);
225 for (retry = 0; retry < IXGBE_CS4227_RETRIES; retry++) {
226 status = ixgbe_read_cs4227(hw, IXGBE_CS4227_EFUSE_STATUS,
227 &value);
228 if (status == IXGBE_SUCCESS &&
229 value == IXGBE_CS4227_EEPROM_LOAD_OK)
230 break;
231 msec_delay(IXGBE_CS4227_CHECK_DELAY);
232 }
233 if (retry == IXGBE_CS4227_RETRIES) {
234 ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE,
235 "CS4227 reset did not complete.");
236 return IXGBE_ERR_PHY;
237 }
238
239 status = ixgbe_read_cs4227(hw, IXGBE_CS4227_EEPROM_STATUS, &value);
240 if (status != IXGBE_SUCCESS ||
241 !(value & IXGBE_CS4227_EEPROM_LOAD_OK)) {
242 ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE,
243 "CS4227 EEPROM did not load successfully.");
244 return IXGBE_ERR_PHY;
245 }
246
247 return IXGBE_SUCCESS;
248 }
249
250 /**
251 * ixgbe_check_cs4227 - Check CS4227 and reset as needed
252 * @hw: pointer to hardware structure
253 **/
254 static void ixgbe_check_cs4227(struct ixgbe_hw *hw)
255 {
256 s32 status = IXGBE_SUCCESS;
257 u32 swfw_mask = hw->phy.phy_semaphore_mask;
258 u16 value = 0;
259 u8 retry;
260
261 for (retry = 0; retry < IXGBE_CS4227_RETRIES; retry++) {
262 status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
263 if (status != IXGBE_SUCCESS) {
264 ERROR_REPORT2(IXGBE_ERROR_CAUTION,
265 "semaphore failed with %d", status);
266 msec_delay(IXGBE_CS4227_CHECK_DELAY);
267 continue;
268 }
269
270 /* Get status of reset flow. */
271 status = ixgbe_read_cs4227(hw, IXGBE_CS4227_SCRATCH, &value);
272
273 if (status == IXGBE_SUCCESS &&
274 value == IXGBE_CS4227_RESET_COMPLETE)
275 goto out;
276
277 if (status != IXGBE_SUCCESS ||
278 value != IXGBE_CS4227_RESET_PENDING)
279 break;
280
281 /* Reset is pending. Wait and check again. */
282 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
283 msec_delay(IXGBE_CS4227_CHECK_DELAY);
284 }
285
286 /* If still pending, assume other instance failed. */
287 if (retry == IXGBE_CS4227_RETRIES) {
288 status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
289 if (status != IXGBE_SUCCESS) {
290 ERROR_REPORT2(IXGBE_ERROR_CAUTION,
291 "semaphore failed with %d", status);
292 return;
293 }
294 }
295
296 /* Reset the CS4227. */
297 status = ixgbe_reset_cs4227(hw);
298 if (status != IXGBE_SUCCESS) {
299 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
300 "CS4227 reset failed: %d", status);
301 goto out;
302 }
303
304 /* Reset takes so long, temporarily release semaphore in case the
305 * other driver instance is waiting for the reset indication.
306 */
307 ixgbe_write_cs4227(hw, IXGBE_CS4227_SCRATCH,
308 IXGBE_CS4227_RESET_PENDING);
309 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
310 msec_delay(10);
311 status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
312 if (status != IXGBE_SUCCESS) {
313 ERROR_REPORT2(IXGBE_ERROR_CAUTION,
314 "semaphore failed with %d", status);
315 return;
316 }
317
318 /* Record completion for next time. */
319 status = ixgbe_write_cs4227(hw, IXGBE_CS4227_SCRATCH,
320 IXGBE_CS4227_RESET_COMPLETE);
321
322 out:
323 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
324 msec_delay(hw->eeprom.semaphore_delay);
325 }
326
327 /**
328 * ixgbe_setup_mux_ctl - Setup ESDP register for I2C mux control
329 * @hw: pointer to hardware structure
330 **/
331 static void ixgbe_setup_mux_ctl(struct ixgbe_hw *hw)
332 {
333 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
334
335 if (hw->bus.lan_id) {
336 esdp &= ~(IXGBE_ESDP_SDP1_NATIVE | IXGBE_ESDP_SDP1);
337 esdp |= IXGBE_ESDP_SDP1_DIR;
338 }
339 esdp &= ~(IXGBE_ESDP_SDP0_NATIVE | IXGBE_ESDP_SDP0_DIR);
340 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
341 IXGBE_WRITE_FLUSH(hw);
342 }
343
344 /**
345 * ixgbe_read_phy_reg_mdi_22 - Read from a clause 22 PHY register without lock
346 * @hw: pointer to hardware structure
347 * @reg_addr: 32 bit address of PHY register to read
348 * @dev_type: always unused
349 * @phy_data: Pointer to read data from PHY register
350 */
351 static s32 ixgbe_read_phy_reg_mdi_22(struct ixgbe_hw *hw, u32 reg_addr,
352 u32 dev_type, u16 *phy_data)
353 {
354 u32 i, data, command;
355 UNREFERENCED_1PARAMETER(dev_type);
356
357 /* Setup and write the read command */
358 command = (reg_addr << IXGBE_MSCA_DEV_TYPE_SHIFT) |
359 (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
360 IXGBE_MSCA_OLD_PROTOCOL | IXGBE_MSCA_READ_AUTOINC |
361 IXGBE_MSCA_MDI_COMMAND;
362
363 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
364
365 /* Check every 10 usec to see if the access completed.
366 * The MDI Command bit will clear when the operation is
367 * complete
368 */
369 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
370 usec_delay(10);
371
372 command = IXGBE_READ_REG(hw, IXGBE_MSCA);
373 if (!(command & IXGBE_MSCA_MDI_COMMAND))
374 break;
375 }
376
377 if (command & IXGBE_MSCA_MDI_COMMAND) {
378 ERROR_REPORT1(IXGBE_ERROR_POLLING,
379 "PHY read command did not complete.\n");
380 return IXGBE_ERR_PHY;
381 }
382
383 /* Read operation is complete. Get the data from MSRWD */
384 data = IXGBE_READ_REG(hw, IXGBE_MSRWD);
385 data >>= IXGBE_MSRWD_READ_DATA_SHIFT;
386 *phy_data = (u16)data;
387
388 return IXGBE_SUCCESS;
389 }
390
391 /**
392 * ixgbe_write_phy_reg_mdi_22 - Write to a clause 22 PHY register without lock
393 * @hw: pointer to hardware structure
394 * @reg_addr: 32 bit PHY register to write
395 * @dev_type: always unused
396 * @phy_data: Data to write to the PHY register
397 */
398 static s32 ixgbe_write_phy_reg_mdi_22(struct ixgbe_hw *hw, u32 reg_addr,
399 u32 dev_type, u16 phy_data)
400 {
401 u32 i, command;
402 UNREFERENCED_1PARAMETER(dev_type);
403
404 /* Put the data in the MDI single read and write data register*/
405 IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)phy_data);
406
407 /* Setup and write the write command */
408 command = (reg_addr << IXGBE_MSCA_DEV_TYPE_SHIFT) |
409 (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
410 IXGBE_MSCA_OLD_PROTOCOL | IXGBE_MSCA_WRITE |
411 IXGBE_MSCA_MDI_COMMAND;
412
413 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
414
415 /* Check every 10 usec to see if the access completed.
416 * The MDI Command bit will clear when the operation is
417 * complete
418 */
419 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
420 usec_delay(10);
421
422 command = IXGBE_READ_REG(hw, IXGBE_MSCA);
423 if (!(command & IXGBE_MSCA_MDI_COMMAND))
424 break;
425 }
426
427 if (command & IXGBE_MSCA_MDI_COMMAND) {
428 ERROR_REPORT1(IXGBE_ERROR_POLLING,
429 "PHY write cmd didn't complete\n");
430 return IXGBE_ERR_PHY;
431 }
432
433 return IXGBE_SUCCESS;
434 }
435
436 /**
437 * ixgbe_identify_phy_x550em - Get PHY type based on device id
438 * @hw: pointer to hardware structure
439 *
440 * Returns error code
441 */
442 static s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw)
443 {
444 hw->mac.ops.set_lan_id(hw);
445
446 ixgbe_read_mng_if_sel_x550em(hw);
447
448 switch (hw->device_id) {
449 case IXGBE_DEV_ID_X550EM_A_SFP:
450 return ixgbe_identify_sfp_module_X550em(hw);
451 case IXGBE_DEV_ID_X550EM_X_SFP:
452 /* set up for CS4227 usage */
453 ixgbe_setup_mux_ctl(hw);
454 ixgbe_check_cs4227(hw);
455 /* Fallthrough */
456
457 case IXGBE_DEV_ID_X550EM_A_SFP_N:
458 return ixgbe_identify_sfp_module_X550em(hw);
459 break;
460 case IXGBE_DEV_ID_X550EM_X_KX4:
461 hw->phy.type = ixgbe_phy_x550em_kx4;
462 break;
463 case IXGBE_DEV_ID_X550EM_X_XFI:
464 hw->phy.type = ixgbe_phy_x550em_xfi;
465 break;
466 case IXGBE_DEV_ID_X550EM_X_KR:
467 case IXGBE_DEV_ID_X550EM_A_KR:
468 case IXGBE_DEV_ID_X550EM_A_KR_L:
469 hw->phy.type = ixgbe_phy_x550em_kr;
470 break;
471 case IXGBE_DEV_ID_X550EM_A_10G_T:
472 case IXGBE_DEV_ID_X550EM_X_10G_T:
473 return ixgbe_identify_phy_generic(hw);
474 case IXGBE_DEV_ID_X550EM_X_1G_T:
475 hw->phy.type = ixgbe_phy_ext_1g_t;
476 break;
477 case IXGBE_DEV_ID_X550EM_A_1G_T:
478 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
479 hw->phy.type = ixgbe_phy_fw;
480 if (hw->bus.lan_id)
481 hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY1_SM;
482 else
483 hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY0_SM;
484 break;
485 default:
486 break;
487 }
488 return IXGBE_SUCCESS;
489 }
490
491 /**
492 * ixgbe_fw_phy_activity - Perform an activity on a PHY
493 * @hw: pointer to hardware structure
494 * @activity: activity to perform
495 * @data: Pointer to 4 32-bit words of data
496 */
497 s32 ixgbe_fw_phy_activity(struct ixgbe_hw *hw, u16 activity,
498 u32 (*data)[FW_PHY_ACT_DATA_COUNT])
499 {
500 union {
501 struct ixgbe_hic_phy_activity_req cmd;
502 struct ixgbe_hic_phy_activity_resp rsp;
503 } hic;
504 u16 retries = FW_PHY_ACT_RETRIES;
505 s32 rc;
506 u16 i;
507
508 do {
509 memset(&hic, 0, sizeof(hic));
510 hic.cmd.hdr.cmd = FW_PHY_ACT_REQ_CMD;
511 hic.cmd.hdr.buf_len = FW_PHY_ACT_REQ_LEN;
512 hic.cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
513 hic.cmd.port_number = hw->bus.lan_id;
514 hic.cmd.activity_id = IXGBE_CPU_TO_LE16(activity);
515 for (i = 0; i < FW_PHY_ACT_DATA_COUNT; ++i)
516 hic.cmd.data[i] = IXGBE_CPU_TO_BE32((*data)[i]);
517
518 rc = ixgbe_host_interface_command(hw, (u32 *)&hic.cmd,
519 sizeof(hic.cmd),
520 IXGBE_HI_COMMAND_TIMEOUT,
521 TRUE);
522 if (rc != IXGBE_SUCCESS)
523 return rc;
524 if (hic.rsp.hdr.cmd_or_resp.ret_status ==
525 FW_CEM_RESP_STATUS_SUCCESS) {
526 for (i = 0; i < FW_PHY_ACT_DATA_COUNT; ++i)
527 (*data)[i] = IXGBE_BE32_TO_CPU(hic.rsp.data[i]);
528 return IXGBE_SUCCESS;
529 }
530 usec_delay(20);
531 --retries;
532 } while (retries > 0);
533
534 return IXGBE_ERR_HOST_INTERFACE_COMMAND;
535 }
536
537 static const struct {
538 u16 fw_speed;
539 ixgbe_link_speed phy_speed;
540 } ixgbe_fw_map[] = {
541 { FW_PHY_ACT_LINK_SPEED_10, IXGBE_LINK_SPEED_10_FULL },
542 { FW_PHY_ACT_LINK_SPEED_100, IXGBE_LINK_SPEED_100_FULL },
543 { FW_PHY_ACT_LINK_SPEED_1G, IXGBE_LINK_SPEED_1GB_FULL },
544 { FW_PHY_ACT_LINK_SPEED_2_5G, IXGBE_LINK_SPEED_2_5GB_FULL },
545 { FW_PHY_ACT_LINK_SPEED_5G, IXGBE_LINK_SPEED_5GB_FULL },
546 { FW_PHY_ACT_LINK_SPEED_10G, IXGBE_LINK_SPEED_10GB_FULL },
547 };
548
549 /**
550 * ixgbe_get_phy_id_fw - Get the phy ID via firmware command
551 * @hw: pointer to hardware structure
552 *
553 * Returns error code
554 */
555 static s32 ixgbe_get_phy_id_fw(struct ixgbe_hw *hw)
556 {
557 u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 };
558 u16 phy_speeds;
559 u16 phy_id_lo;
560 s32 rc;
561 u16 i;
562
563 rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_PHY_INFO, &info);
564 if (rc)
565 return rc;
566
567 hw->phy.speeds_supported = 0;
568 phy_speeds = info[0] & FW_PHY_INFO_SPEED_MASK;
569 for (i = 0; i < sizeof(ixgbe_fw_map) / sizeof(ixgbe_fw_map[0]); ++i) {
570 if (phy_speeds & ixgbe_fw_map[i].fw_speed)
571 hw->phy.speeds_supported |= ixgbe_fw_map[i].phy_speed;
572 }
573
574 #if 0
575 /*
576 * Don't set autoneg_advertised here to not to be inconsistent with
577 * if_media value.
578 */
579 if (!hw->phy.autoneg_advertised)
580 hw->phy.autoneg_advertised = hw->phy.speeds_supported;
581 #endif
582
583 hw->phy.id = info[0] & FW_PHY_INFO_ID_HI_MASK;
584 phy_id_lo = info[1] & FW_PHY_INFO_ID_LO_MASK;
585 hw->phy.id |= phy_id_lo & IXGBE_PHY_REVISION_MASK;
586 hw->phy.revision = phy_id_lo & ~IXGBE_PHY_REVISION_MASK;
587 if (!hw->phy.id || hw->phy.id == IXGBE_PHY_REVISION_MASK)
588 return IXGBE_ERR_PHY_ADDR_INVALID;
589 return IXGBE_SUCCESS;
590 }
591
592 /**
593 * ixgbe_identify_phy_fw - Get PHY type based on firmware command
594 * @hw: pointer to hardware structure
595 *
596 * Returns error code
597 */
598 static s32 ixgbe_identify_phy_fw(struct ixgbe_hw *hw)
599 {
600 if (hw->bus.lan_id)
601 hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM;
602 else
603 hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM;
604
605 hw->phy.type = ixgbe_phy_fw;
606 hw->phy.ops.read_reg = NULL;
607 hw->phy.ops.write_reg = NULL;
608 return ixgbe_get_phy_id_fw(hw);
609 }
610
611 /**
612 * ixgbe_shutdown_fw_phy - Shutdown a firmware-controlled PHY
613 * @hw: pointer to hardware structure
614 *
615 * Returns error code
616 */
617 s32 ixgbe_shutdown_fw_phy(struct ixgbe_hw *hw)
618 {
619 u32 setup[FW_PHY_ACT_DATA_COUNT] = { 0 };
620
621 setup[0] = FW_PHY_ACT_FORCE_LINK_DOWN_OFF;
622 return ixgbe_fw_phy_activity(hw, FW_PHY_ACT_FORCE_LINK_DOWN, &setup);
623 }
624
625 static s32 ixgbe_read_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr,
626 u32 device_type, u16 *phy_data)
627 {
628 UNREFERENCED_4PARAMETER(*hw, reg_addr, device_type, *phy_data);
629 return IXGBE_NOT_IMPLEMENTED;
630 }
631
632 static s32 ixgbe_write_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr,
633 u32 device_type, u16 phy_data)
634 {
635 UNREFERENCED_4PARAMETER(*hw, reg_addr, device_type, phy_data);
636 return IXGBE_NOT_IMPLEMENTED;
637 }
638
639 /**
640 * ixgbe_read_i2c_combined_generic - Perform I2C read combined operation
641 * @hw: pointer to the hardware structure
642 * @addr: I2C bus address to read from
643 * @reg: I2C device register to read from
644 * @val: pointer to location to receive read value
645 *
646 * Returns an error code on error.
647 **/
648 static s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr,
649 u16 reg, u16 *val)
650 {
651 return ixgbe_read_i2c_combined_generic_int(hw, addr, reg, val, TRUE);
652 }
653
654 /**
655 * ixgbe_read_i2c_combined_generic_unlocked - Do I2C read combined operation
656 * @hw: pointer to the hardware structure
657 * @addr: I2C bus address to read from
658 * @reg: I2C device register to read from
659 * @val: pointer to location to receive read value
660 *
661 * Returns an error code on error.
662 **/
663 static s32
664 ixgbe_read_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, u8 addr,
665 u16 reg, u16 *val)
666 {
667 return ixgbe_read_i2c_combined_generic_int(hw, addr, reg, val, FALSE);
668 }
669
670 /**
671 * ixgbe_write_i2c_combined_generic - Perform I2C write combined operation
672 * @hw: pointer to the hardware structure
673 * @addr: I2C bus address to write to
674 * @reg: I2C device register to write to
675 * @val: value to write
676 *
677 * Returns an error code on error.
678 **/
679 static s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw,
680 u8 addr, u16 reg, u16 val)
681 {
682 return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, TRUE);
683 }
684
685 /**
686 * ixgbe_write_i2c_combined_generic_unlocked - Do I2C write combined operation
687 * @hw: pointer to the hardware structure
688 * @addr: I2C bus address to write to
689 * @reg: I2C device register to write to
690 * @val: value to write
691 *
692 * Returns an error code on error.
693 **/
694 static s32
695 ixgbe_write_i2c_combined_generic_unlocked(struct ixgbe_hw *hw,
696 u8 addr, u16 reg, u16 val)
697 {
698 return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, FALSE);
699 }
700
701 /**
702 * ixgbe_init_ops_X550EM - Inits func ptrs and MAC type
703 * @hw: pointer to hardware structure
704 *
705 * Initialize the function pointers and for MAC type X550EM.
706 * Does not touch the hardware.
707 **/
708 s32 ixgbe_init_ops_X550EM(struct ixgbe_hw *hw)
709 {
710 struct ixgbe_mac_info *mac = &hw->mac;
711 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
712 struct ixgbe_phy_info *phy = &hw->phy;
713 s32 ret_val;
714
715 DEBUGFUNC("ixgbe_init_ops_X550EM");
716
717 /* Similar to X550 so start there. */
718 ret_val = ixgbe_init_ops_X550(hw);
719
720 /* Since this function eventually calls
721 * ixgbe_init_ops_540 by design, we are setting
722 * the pointers to NULL explicitly here to overwrite
723 * the values being set in the x540 function.
724 */
725
726 /* Bypass not supported in x550EM */
727 mac->ops.bypass_rw = NULL;
728 mac->ops.bypass_valid_rd = NULL;
729 mac->ops.bypass_set = NULL;
730 mac->ops.bypass_rd_eep = NULL;
731
732 /* FCOE not supported in x550EM */
733 mac->ops.get_san_mac_addr = NULL;
734 mac->ops.set_san_mac_addr = NULL;
735 mac->ops.get_wwn_prefix = NULL;
736 mac->ops.get_fcoe_boot_status = NULL;
737
738 /* IPsec not supported in x550EM */
739 mac->ops.disable_sec_rx_path = NULL;
740 mac->ops.enable_sec_rx_path = NULL;
741
742 /* AUTOC register is not present in x550EM. */
743 mac->ops.prot_autoc_read = NULL;
744 mac->ops.prot_autoc_write = NULL;
745
746 /* X550EM bus type is internal*/
747 hw->bus.type = ixgbe_bus_type_internal;
748 mac->ops.get_bus_info = ixgbe_get_bus_info_X550em;
749
750
751 mac->ops.get_media_type = ixgbe_get_media_type_X550em;
752 mac->ops.setup_sfp = ixgbe_setup_sfp_modules_X550em;
753 mac->ops.get_link_capabilities = ixgbe_get_link_capabilities_X550em;
754 mac->ops.reset_hw = ixgbe_reset_hw_X550em;
755 mac->ops.get_supported_physical_layer =
756 ixgbe_get_supported_physical_layer_X550em;
757
758 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper)
759 mac->ops.setup_fc = ixgbe_setup_fc_generic;
760 else
761 mac->ops.setup_fc = ixgbe_setup_fc_X550em;
762
763 /* PHY */
764 phy->ops.init = ixgbe_init_phy_ops_X550em;
765 switch (hw->device_id) {
766 case IXGBE_DEV_ID_X550EM_A_1G_T:
767 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
768 mac->ops.setup_fc = NULL;
769 phy->ops.identify = ixgbe_identify_phy_fw;
770 phy->ops.set_phy_power = NULL;
771 phy->ops.get_firmware_version = NULL;
772 break;
773 case IXGBE_DEV_ID_X550EM_X_1G_T:
774 mac->ops.setup_fc = NULL;
775 phy->ops.identify = ixgbe_identify_phy_x550em;
776 phy->ops.set_phy_power = NULL;
777 break;
778 default:
779 phy->ops.identify = ixgbe_identify_phy_x550em;
780 }
781
782 if (mac->ops.get_media_type(hw) != ixgbe_media_type_copper)
783 phy->ops.set_phy_power = NULL;
784
785
786 /* EEPROM */
787 eeprom->ops.init_params = ixgbe_init_eeprom_params_X540;
788 eeprom->ops.read = ixgbe_read_ee_hostif_X550;
789 eeprom->ops.read_buffer = ixgbe_read_ee_hostif_buffer_X550;
790 eeprom->ops.write = ixgbe_write_ee_hostif_X550;
791 eeprom->ops.write_buffer = ixgbe_write_ee_hostif_buffer_X550;
792 eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_X550;
793 eeprom->ops.validate_checksum = ixgbe_validate_eeprom_checksum_X550;
794 eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_X550;
795
796 return ret_val;
797 }
798
799 #define IXGBE_DENVERTON_WA 1
800
801 /**
802 * ixgbe_setup_fw_link - Setup firmware-controlled PHYs
803 * @hw: pointer to hardware structure
804 */
805 static s32 ixgbe_setup_fw_link(struct ixgbe_hw *hw)
806 {
807 u32 setup[FW_PHY_ACT_DATA_COUNT] = { 0 };
808 s32 rc;
809 #ifdef IXGBE_DENVERTON_WA
810 s32 ret_val;
811 u16 phydata;
812 #endif
813 u16 i;
814
815 if (hw->phy.reset_disable || ixgbe_check_reset_blocked(hw))
816 return 0;
817
818 if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
819 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
820 "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
821 return IXGBE_ERR_INVALID_LINK_SETTINGS;
822 }
823
824 switch (hw->fc.requested_mode) {
825 case ixgbe_fc_full:
826 setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_RXTX <<
827 FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT;
828 break;
829 case ixgbe_fc_rx_pause:
830 setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_RX <<
831 FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT;
832 break;
833 case ixgbe_fc_tx_pause:
834 setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_TX <<
835 FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT;
836 break;
837 default:
838 break;
839 }
840
841 for (i = 0; i < sizeof(ixgbe_fw_map) / sizeof(ixgbe_fw_map[0]); ++i) {
842 if (hw->phy.autoneg_advertised & ixgbe_fw_map[i].phy_speed)
843 setup[0] |= ixgbe_fw_map[i].fw_speed;
844 }
845 setup[0] |= FW_PHY_ACT_SETUP_LINK_HP | FW_PHY_ACT_SETUP_LINK_AN;
846
847 if (hw->phy.eee_speeds_advertised)
848 setup[0] |= FW_PHY_ACT_SETUP_LINK_EEE;
849
850 #ifdef IXGBE_DENVERTON_WA
851 if ((hw->phy.force_10_100_autonego == false)
852 && ((hw->phy.autoneg_advertised == IXGBE_LINK_SPEED_100_FULL)
853 || (hw->phy.autoneg_advertised == IXGBE_LINK_SPEED_10_FULL))) {
854 /* Don't use auto-nego for 10/100Mbps */
855 setup[0] &= ~FW_PHY_ACT_SETUP_LINK_AN;
856 setup[0] &= ~FW_PHY_ACT_SETUP_LINK_EEE;
857 setup[0] &= ~(FW_PHY_ACT_SETUP_LINK_PAUSE_RXTX
858 << FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT);
859 }
860 #endif
861
862 rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_SETUP_LINK, &setup);
863 if (rc)
864 return rc;
865
866 #ifdef IXGBE_DENVERTON_WA
867 if (hw->phy.force_10_100_autonego == true)
868 goto out;
869
870 ret_val = ixgbe_read_phy_reg_x550a(hw, MII_BMCR, 0, &phydata);
871 if (ret_val != 0)
872 goto out;
873
874 /*
875 * Broken firmware sets BMCR register incorrectly if
876 * FW_PHY_ACT_SETUP_LINK_AN isn't set.
877 * a) FDX may not be set.
878 * b) BMCR_SPEED1 (bit 6) is always cleard.
879 * + -------+------+-----------+-----+--------------------------+
880 * |request | BMCR | BMCR spd | BMCR | |
881 * | | (HEX)| (in bits)| FDX | |
882 * +--------+------+----------+------+--------------------------+
883 * | 10M | 0000 | 10M(00) | 0 | |
884 * | 10M | 2000 | 100M(01) | 0 |(I've never observed this)|
885 * | 10M | 2100 | 100M(01) | 1 | |
886 * | 100M | 0000 | 10M(00) | 0 | |
887 * | 100M | 0100 | 10M(00) | 1 | |
888 * +--------------------------+------+--------------------------+
889 */
890 if (((hw->phy.autoneg_advertised == IXGBE_LINK_SPEED_100_FULL)
891 && (((phydata & BMCR_FDX) == 0) || (BMCR_SPEED(phydata) == 0)))
892 || ((hw->phy.autoneg_advertised == IXGBE_LINK_SPEED_10_FULL)
893 && (((phydata & BMCR_FDX) == 0)
894 || (BMCR_SPEED(phydata) != BMCR_S10)))) {
895 phydata = BMCR_FDX;
896 switch (hw->phy.autoneg_advertised) {
897 case IXGBE_LINK_SPEED_10_FULL:
898 phydata |= BMCR_S10;
899 break;
900 case IXGBE_LINK_SPEED_100_FULL:
901 phydata |= BMCR_S100;
902 break;
903 case IXGBE_LINK_SPEED_1GB_FULL:
904 panic("%s: 1GB_FULL is set", __func__);
905 break;
906 default:
907 break;
908 }
909 ret_val = ixgbe_write_phy_reg_x550a(hw, MII_BMCR, 0, phydata);
910 if (ret_val != 0)
911 return ret_val;
912 }
913 out:
914 #endif
915 if (setup[0] == FW_PHY_ACT_SETUP_LINK_RSP_DOWN)
916 return IXGBE_ERR_OVERTEMP;
917 return IXGBE_SUCCESS;
918 }
919
920 /**
921 * ixgbe_fc_autoneg_fw _ Set up flow control for FW-controlled PHYs
922 * @hw: pointer to hardware structure
923 *
924 * Called at init time to set up flow control.
925 */
926 static s32 ixgbe_fc_autoneg_fw(struct ixgbe_hw *hw)
927 {
928 if (hw->fc.requested_mode == ixgbe_fc_default)
929 hw->fc.requested_mode = ixgbe_fc_full;
930
931 return ixgbe_setup_fw_link(hw);
932 }
933
934 /**
935 * ixgbe_setup_eee_fw - Enable/disable EEE support
936 * @hw: pointer to the HW structure
937 * @enable_eee: boolean flag to enable EEE
938 *
939 * Enable/disable EEE based on enable_eee flag.
940 * This function controls EEE for firmware-based PHY implementations.
941 */
942 static s32 ixgbe_setup_eee_fw(struct ixgbe_hw *hw, bool enable_eee)
943 {
944 if (!!hw->phy.eee_speeds_advertised == enable_eee)
945 return IXGBE_SUCCESS;
946 if (enable_eee)
947 hw->phy.eee_speeds_advertised = hw->phy.eee_speeds_supported;
948 else
949 hw->phy.eee_speeds_advertised = 0;
950 return hw->phy.ops.setup_link(hw);
951 }
952
953 /**
954 * ixgbe_init_ops_X550EM_a - Inits func ptrs and MAC type
955 * @hw: pointer to hardware structure
956 *
957 * Initialize the function pointers and for MAC type X550EM_a.
958 * Does not touch the hardware.
959 **/
960 s32 ixgbe_init_ops_X550EM_a(struct ixgbe_hw *hw)
961 {
962 struct ixgbe_mac_info *mac = &hw->mac;
963 s32 ret_val;
964
965 DEBUGFUNC("ixgbe_init_ops_X550EM_a");
966
967 /* Start with generic X550EM init */
968 ret_val = ixgbe_init_ops_X550EM(hw);
969
970 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII ||
971 hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII_L) {
972 mac->ops.read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550;
973 mac->ops.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550;
974 } else {
975 mac->ops.read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550a;
976 mac->ops.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550a;
977 }
978 mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync_X550a;
979 mac->ops.release_swfw_sync = ixgbe_release_swfw_sync_X550a;
980
981 switch (mac->ops.get_media_type(hw)) {
982 case ixgbe_media_type_fiber:
983 mac->ops.setup_fc = NULL;
984 mac->ops.fc_autoneg = ixgbe_fc_autoneg_fiber_x550em_a;
985 break;
986 case ixgbe_media_type_backplane:
987 mac->ops.fc_autoneg = ixgbe_fc_autoneg_backplane_x550em_a;
988 mac->ops.setup_fc = ixgbe_setup_fc_backplane_x550em_a;
989 break;
990 default:
991 break;
992 }
993
994 switch (hw->device_id) {
995 case IXGBE_DEV_ID_X550EM_A_1G_T:
996 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
997 mac->ops.fc_autoneg = ixgbe_fc_autoneg_sgmii_x550em_a;
998 mac->ops.setup_fc = ixgbe_fc_autoneg_fw;
999 mac->ops.setup_eee = ixgbe_setup_eee_fw;
1000 hw->phy.eee_speeds_supported = IXGBE_LINK_SPEED_100_FULL |
1001 IXGBE_LINK_SPEED_1GB_FULL;
1002 hw->phy.eee_speeds_advertised = hw->phy.eee_speeds_supported;
1003 break;
1004 default:
1005 break;
1006 }
1007
1008 return ret_val;
1009 }
1010
1011 /**
1012 * ixgbe_init_ops_X550EM_x - Inits func ptrs and MAC type
1013 * @hw: pointer to hardware structure
1014 *
1015 * Initialize the function pointers and for MAC type X550EM_x.
1016 * Does not touch the hardware.
1017 **/
1018 s32 ixgbe_init_ops_X550EM_x(struct ixgbe_hw *hw)
1019 {
1020 struct ixgbe_mac_info *mac = &hw->mac;
1021 struct ixgbe_link_info *link = &hw->link;
1022 s32 ret_val;
1023
1024 DEBUGFUNC("ixgbe_init_ops_X550EM_x");
1025
1026 /* Start with generic X550EM init */
1027 ret_val = ixgbe_init_ops_X550EM(hw);
1028
1029 mac->ops.read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550;
1030 mac->ops.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550;
1031 mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync_X550em;
1032 mac->ops.release_swfw_sync = ixgbe_release_swfw_sync_X550em;
1033 link->ops.read_link = ixgbe_read_i2c_combined_generic;
1034 link->ops.read_link_unlocked = ixgbe_read_i2c_combined_generic_unlocked;
1035 link->ops.write_link = ixgbe_write_i2c_combined_generic;
1036 link->ops.write_link_unlocked =
1037 ixgbe_write_i2c_combined_generic_unlocked;
1038 link->addr = IXGBE_CS4227;
1039
1040 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_1G_T) {
1041 mac->ops.setup_fc = NULL;
1042 mac->ops.setup_eee = NULL;
1043 mac->ops.init_led_link_act = NULL;
1044 }
1045
1046 return ret_val;
1047 }
1048
1049 /**
1050 * ixgbe_dmac_config_X550
1051 * @hw: pointer to hardware structure
1052 *
1053 * Configure DMA coalescing. If enabling dmac, dmac is activated.
1054 * When disabling dmac, dmac enable dmac bit is cleared.
1055 **/
1056 s32 ixgbe_dmac_config_X550(struct ixgbe_hw *hw)
1057 {
1058 u32 reg, high_pri_tc;
1059
1060 DEBUGFUNC("ixgbe_dmac_config_X550");
1061
1062 /* Disable DMA coalescing before configuring */
1063 reg = IXGBE_READ_REG(hw, IXGBE_DMACR);
1064 reg &= ~IXGBE_DMACR_DMAC_EN;
1065 IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg);
1066
1067 /* Disable DMA Coalescing if the watchdog timer is 0 */
1068 if (!hw->mac.dmac_config.watchdog_timer)
1069 goto out;
1070
1071 ixgbe_dmac_config_tcs_X550(hw);
1072
1073 /* Configure DMA Coalescing Control Register */
1074 reg = IXGBE_READ_REG(hw, IXGBE_DMACR);
1075
1076 /* Set the watchdog timer in units of 40.96 usec */
1077 reg &= ~IXGBE_DMACR_DMACWT_MASK;
1078 reg |= (hw->mac.dmac_config.watchdog_timer * 100) / 4096;
1079
1080 reg &= ~IXGBE_DMACR_HIGH_PRI_TC_MASK;
1081 /* If fcoe is enabled, set high priority traffic class */
1082 if (hw->mac.dmac_config.fcoe_en) {
1083 high_pri_tc = 1 << hw->mac.dmac_config.fcoe_tc;
1084 reg |= ((high_pri_tc << IXGBE_DMACR_HIGH_PRI_TC_SHIFT) &
1085 IXGBE_DMACR_HIGH_PRI_TC_MASK);
1086 }
1087 reg |= IXGBE_DMACR_EN_MNG_IND;
1088
1089 /* Enable DMA coalescing after configuration */
1090 reg |= IXGBE_DMACR_DMAC_EN;
1091 IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg);
1092
1093 out:
1094 return IXGBE_SUCCESS;
1095 }
1096
1097 /**
1098 * ixgbe_dmac_config_tcs_X550
1099 * @hw: pointer to hardware structure
1100 *
1101 * Configure DMA coalescing threshold per TC. The dmac enable bit must
1102 * be cleared before configuring.
1103 **/
1104 s32 ixgbe_dmac_config_tcs_X550(struct ixgbe_hw *hw)
1105 {
1106 u32 tc, reg, pb_headroom, rx_pb_size, maxframe_size_kb;
1107
1108 DEBUGFUNC("ixgbe_dmac_config_tcs_X550");
1109
1110 /* Configure DMA coalescing enabled */
1111 switch (hw->mac.dmac_config.link_speed) {
1112 case IXGBE_LINK_SPEED_10_FULL:
1113 case IXGBE_LINK_SPEED_100_FULL:
1114 pb_headroom = IXGBE_DMACRXT_100M;
1115 break;
1116 case IXGBE_LINK_SPEED_1GB_FULL:
1117 pb_headroom = IXGBE_DMACRXT_1G;
1118 break;
1119 default:
1120 pb_headroom = IXGBE_DMACRXT_10G;
1121 break;
1122 }
1123
1124 maxframe_size_kb = ((IXGBE_READ_REG(hw, IXGBE_MAXFRS) >>
1125 IXGBE_MHADD_MFS_SHIFT) / 1024);
1126
1127 /* Set the per Rx packet buffer receive threshold */
1128 for (tc = 0; tc < IXGBE_DCB_MAX_TRAFFIC_CLASS; tc++) {
1129 reg = IXGBE_READ_REG(hw, IXGBE_DMCTH(tc));
1130 reg &= ~IXGBE_DMCTH_DMACRXT_MASK;
1131
1132 if (tc < hw->mac.dmac_config.num_tcs) {
1133 /* Get Rx PB size */
1134 rx_pb_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc));
1135 rx_pb_size = (rx_pb_size & IXGBE_RXPBSIZE_MASK) >>
1136 IXGBE_RXPBSIZE_SHIFT;
1137
1138 /* Calculate receive buffer threshold in kilobytes */
1139 if (rx_pb_size > pb_headroom)
1140 rx_pb_size = rx_pb_size - pb_headroom;
1141 else
1142 rx_pb_size = 0;
1143
1144 /* Minimum of MFS shall be set for DMCTH */
1145 reg |= (rx_pb_size > maxframe_size_kb) ?
1146 rx_pb_size : maxframe_size_kb;
1147 }
1148 IXGBE_WRITE_REG(hw, IXGBE_DMCTH(tc), reg);
1149 }
1150 return IXGBE_SUCCESS;
1151 }
1152
1153 /**
1154 * ixgbe_dmac_update_tcs_X550
1155 * @hw: pointer to hardware structure
1156 *
1157 * Disables dmac, updates per TC settings, and then enables dmac.
1158 **/
1159 s32 ixgbe_dmac_update_tcs_X550(struct ixgbe_hw *hw)
1160 {
1161 u32 reg;
1162
1163 DEBUGFUNC("ixgbe_dmac_update_tcs_X550");
1164
1165 /* Disable DMA coalescing before configuring */
1166 reg = IXGBE_READ_REG(hw, IXGBE_DMACR);
1167 reg &= ~IXGBE_DMACR_DMAC_EN;
1168 IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg);
1169
1170 ixgbe_dmac_config_tcs_X550(hw);
1171
1172 /* Enable DMA coalescing after configuration */
1173 reg = IXGBE_READ_REG(hw, IXGBE_DMACR);
1174 reg |= IXGBE_DMACR_DMAC_EN;
1175 IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg);
1176
1177 return IXGBE_SUCCESS;
1178 }
1179
1180 /**
1181 * ixgbe_init_eeprom_params_X550 - Initialize EEPROM params
1182 * @hw: pointer to hardware structure
1183 *
1184 * Initializes the EEPROM parameters ixgbe_eeprom_info within the
1185 * ixgbe_hw struct in order to set up EEPROM access.
1186 **/
1187 s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw)
1188 {
1189 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
1190 u32 eec;
1191 u16 eeprom_size;
1192
1193 DEBUGFUNC("ixgbe_init_eeprom_params_X550");
1194
1195 if (eeprom->type == ixgbe_eeprom_uninitialized) {
1196 eeprom->semaphore_delay = 10;
1197 eeprom->type = ixgbe_flash;
1198
1199 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1200 eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
1201 IXGBE_EEC_SIZE_SHIFT);
1202 eeprom->word_size = 1 << (eeprom_size +
1203 IXGBE_EEPROM_WORD_SIZE_SHIFT);
1204
1205 DEBUGOUT2("Eeprom params: type = %d, size = %d\n",
1206 eeprom->type, eeprom->word_size);
1207 }
1208
1209 return IXGBE_SUCCESS;
1210 }
1211
1212 /**
1213 * ixgbe_set_source_address_pruning_X550 - Enable/Disbale source address pruning
1214 * @hw: pointer to hardware structure
1215 * @enable: enable or disable source address pruning
1216 * @pool: Rx pool to set source address pruning for
1217 **/
1218 void ixgbe_set_source_address_pruning_X550(struct ixgbe_hw *hw, bool enable,
1219 unsigned int pool)
1220 {
1221 u64 pfflp;
1222
1223 /* max rx pool is 63 */
1224 if (pool > 63)
1225 return;
1226
1227 pfflp = (u64)IXGBE_READ_REG(hw, IXGBE_PFFLPL);
1228 pfflp |= (u64)IXGBE_READ_REG(hw, IXGBE_PFFLPH) << 32;
1229
1230 if (enable)
1231 pfflp |= (1ULL << pool);
1232 else
1233 pfflp &= ~(1ULL << pool);
1234
1235 IXGBE_WRITE_REG(hw, IXGBE_PFFLPL, (u32)pfflp);
1236 IXGBE_WRITE_REG(hw, IXGBE_PFFLPH, (u32)(pfflp >> 32));
1237 }
1238
1239 /**
1240 * ixgbe_set_ethertype_anti_spoofing_X550 - Enable/Disable Ethertype anti-spoofing
1241 * @hw: pointer to hardware structure
1242 * @enable: enable or disable switch for Ethertype anti-spoofing
1243 * @vf: Virtual Function pool - VF Pool to set for Ethertype anti-spoofing
1244 *
1245 **/
1246 void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw,
1247 bool enable, int vf)
1248 {
1249 int vf_target_reg = vf >> 3;
1250 int vf_target_shift = vf % 8 + IXGBE_SPOOF_ETHERTYPEAS_SHIFT;
1251 u32 pfvfspoof;
1252
1253 DEBUGFUNC("ixgbe_set_ethertype_anti_spoofing_X550");
1254
1255 pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
1256 if (enable)
1257 pfvfspoof |= (1 << vf_target_shift);
1258 else
1259 pfvfspoof &= ~(1 << vf_target_shift);
1260
1261 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
1262 }
1263
1264 /**
1265 * ixgbe_iosf_wait - Wait for IOSF command completion
1266 * @hw: pointer to hardware structure
1267 * @ctrl: pointer to location to receive final IOSF control value
1268 *
1269 * Returns failing status on timeout
1270 *
1271 * Note: ctrl can be NULL if the IOSF control register value is not needed
1272 **/
1273 static s32 ixgbe_iosf_wait(struct ixgbe_hw *hw, u32 *ctrl)
1274 {
1275 u32 i, command = 0;
1276
1277 /* Check every 10 usec to see if the address cycle completed.
1278 * The SB IOSF BUSY bit will clear when the operation is
1279 * complete
1280 */
1281 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
1282 command = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL);
1283 if ((command & IXGBE_SB_IOSF_CTRL_BUSY) == 0)
1284 break;
1285 usec_delay(10);
1286 }
1287 if (ctrl)
1288 *ctrl = command;
1289 if (i == IXGBE_MDIO_COMMAND_TIMEOUT) {
1290 ERROR_REPORT1(IXGBE_ERROR_POLLING, "Wait timed out\n");
1291 return IXGBE_ERR_PHY;
1292 }
1293
1294 return IXGBE_SUCCESS;
1295 }
1296
1297 /**
1298 * ixgbe_write_iosf_sb_reg_x550 - Writes a value to specified register
1299 * of the IOSF device
1300 * @hw: pointer to hardware structure
1301 * @reg_addr: 32 bit PHY register to write
1302 * @device_type: 3 bit device type
1303 * @data: Data to write to the register
1304 **/
1305 s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
1306 u32 device_type, u32 data)
1307 {
1308 u32 gssr = IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_PHY0_SM;
1309 u32 command, error __unused;
1310 s32 ret;
1311
1312 ret = ixgbe_acquire_swfw_semaphore(hw, gssr);
1313 if (ret != IXGBE_SUCCESS)
1314 return ret;
1315
1316 ret = ixgbe_iosf_wait(hw, NULL);
1317 if (ret != IXGBE_SUCCESS)
1318 goto out;
1319
1320 command = ((reg_addr << IXGBE_SB_IOSF_CTRL_ADDR_SHIFT) |
1321 (device_type << IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT));
1322
1323 /* Write IOSF control register */
1324 IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL, command);
1325
1326 /* Write IOSF data register */
1327 IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA, data);
1328
1329 ret = ixgbe_iosf_wait(hw, &command);
1330
1331 if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) {
1332 error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >>
1333 IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT;
1334 ERROR_REPORT2(IXGBE_ERROR_POLLING,
1335 "Failed to write, error %x\n", error);
1336 ret = IXGBE_ERR_PHY;
1337 }
1338
1339 out:
1340 ixgbe_release_swfw_semaphore(hw, gssr);
1341 return ret;
1342 }
1343
1344 /**
1345 * ixgbe_read_iosf_sb_reg_x550 - Reads specified register of the IOSF device
1346 * @hw: pointer to hardware structure
1347 * @reg_addr: 32 bit PHY register to write
1348 * @device_type: 3 bit device type
1349 * @data: Pointer to read data from the register
1350 **/
1351 s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
1352 u32 device_type, u32 *data)
1353 {
1354 u32 gssr = IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_PHY0_SM;
1355 u32 command, error __unused;
1356 s32 ret;
1357
1358 ret = ixgbe_acquire_swfw_semaphore(hw, gssr);
1359 if (ret != IXGBE_SUCCESS)
1360 return ret;
1361
1362 ret = ixgbe_iosf_wait(hw, NULL);
1363 if (ret != IXGBE_SUCCESS)
1364 goto out;
1365
1366 command = ((reg_addr << IXGBE_SB_IOSF_CTRL_ADDR_SHIFT) |
1367 (device_type << IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT));
1368
1369 /* Write IOSF control register */
1370 IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL, command);
1371
1372 ret = ixgbe_iosf_wait(hw, &command);
1373
1374 if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) {
1375 error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >>
1376 IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT;
1377 ERROR_REPORT2(IXGBE_ERROR_POLLING,
1378 "Failed to read, error %x\n", error);
1379 ret = IXGBE_ERR_PHY;
1380 }
1381
1382 if (ret == IXGBE_SUCCESS)
1383 *data = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA);
1384
1385 out:
1386 ixgbe_release_swfw_semaphore(hw, gssr);
1387 return ret;
1388 }
1389
1390 /**
1391 * ixgbe_get_phy_token - Get the token for shared phy access
1392 * @hw: Pointer to hardware structure
1393 */
1394
1395 s32 ixgbe_get_phy_token(struct ixgbe_hw *hw)
1396 {
1397 struct ixgbe_hic_phy_token_req token_cmd;
1398 s32 status;
1399
1400 token_cmd.hdr.cmd = FW_PHY_TOKEN_REQ_CMD;
1401 token_cmd.hdr.buf_len = FW_PHY_TOKEN_REQ_LEN;
1402 token_cmd.hdr.cmd_or_resp.cmd_resv = 0;
1403 token_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
1404 token_cmd.port_number = hw->bus.lan_id;
1405 token_cmd.command_type = FW_PHY_TOKEN_REQ;
1406 token_cmd.pad = 0;
1407 status = ixgbe_host_interface_command(hw, (u32 *)&token_cmd,
1408 sizeof(token_cmd),
1409 IXGBE_HI_COMMAND_TIMEOUT,
1410 TRUE);
1411 if (status) {
1412 DEBUGOUT1("Issuing host interface command failed with Status = %d\n",
1413 status);
1414 return status;
1415 }
1416 if (token_cmd.hdr.cmd_or_resp.ret_status == FW_PHY_TOKEN_OK)
1417 return IXGBE_SUCCESS;
1418 if (token_cmd.hdr.cmd_or_resp.ret_status != FW_PHY_TOKEN_RETRY) {
1419 DEBUGOUT1("Host interface command returned 0x%08x , returning IXGBE_ERR_FW_RESP_INVALID\n",
1420 token_cmd.hdr.cmd_or_resp.ret_status);
1421 return IXGBE_ERR_FW_RESP_INVALID;
1422 }
1423
1424 DEBUGOUT("Returning IXGBE_ERR_TOKEN_RETRY\n");
1425 return IXGBE_ERR_TOKEN_RETRY;
1426 }
1427
1428 /**
1429 * ixgbe_put_phy_token - Put the token for shared phy access
1430 * @hw: Pointer to hardware structure
1431 */
1432
1433 s32 ixgbe_put_phy_token(struct ixgbe_hw *hw)
1434 {
1435 struct ixgbe_hic_phy_token_req token_cmd;
1436 s32 status;
1437
1438 token_cmd.hdr.cmd = FW_PHY_TOKEN_REQ_CMD;
1439 token_cmd.hdr.buf_len = FW_PHY_TOKEN_REQ_LEN;
1440 token_cmd.hdr.cmd_or_resp.cmd_resv = 0;
1441 token_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
1442 token_cmd.port_number = hw->bus.lan_id;
1443 token_cmd.command_type = FW_PHY_TOKEN_REL;
1444 token_cmd.pad = 0;
1445 status = ixgbe_host_interface_command(hw, (u32 *)&token_cmd,
1446 sizeof(token_cmd),
1447 IXGBE_HI_COMMAND_TIMEOUT,
1448 TRUE);
1449 if (status)
1450 return status;
1451 if (token_cmd.hdr.cmd_or_resp.ret_status == FW_PHY_TOKEN_OK)
1452 return IXGBE_SUCCESS;
1453
1454 DEBUGOUT("Put PHY Token host interface command failed");
1455 return IXGBE_ERR_FW_RESP_INVALID;
1456 }
1457
1458 /**
1459 * ixgbe_write_iosf_sb_reg_x550a - Writes a value to specified register
1460 * of the IOSF device
1461 * @hw: pointer to hardware structure
1462 * @reg_addr: 32 bit PHY register to write
1463 * @device_type: 3 bit device type
1464 * @data: Data to write to the register
1465 **/
1466 s32 ixgbe_write_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
1467 u32 device_type, u32 data)
1468 {
1469 struct ixgbe_hic_internal_phy_req write_cmd;
1470 s32 status;
1471 UNREFERENCED_1PARAMETER(device_type);
1472
1473 memset(&write_cmd, 0, sizeof(write_cmd));
1474 write_cmd.hdr.cmd = FW_INT_PHY_REQ_CMD;
1475 write_cmd.hdr.buf_len = FW_INT_PHY_REQ_LEN;
1476 write_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
1477 write_cmd.port_number = hw->bus.lan_id;
1478 write_cmd.command_type = FW_INT_PHY_REQ_WRITE;
1479 write_cmd.address = IXGBE_CPU_TO_BE16(reg_addr);
1480 write_cmd.write_data = IXGBE_CPU_TO_BE32(data);
1481
1482 status = ixgbe_host_interface_command(hw, (u32 *)&write_cmd,
1483 sizeof(write_cmd),
1484 IXGBE_HI_COMMAND_TIMEOUT, FALSE);
1485
1486 return status;
1487 }
1488
1489 /**
1490 * ixgbe_read_iosf_sb_reg_x550a - Reads specified register of the IOSF device
1491 * @hw: pointer to hardware structure
1492 * @reg_addr: 32 bit PHY register to write
1493 * @device_type: 3 bit device type
1494 * @data: Pointer to read data from the register
1495 **/
1496 s32 ixgbe_read_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
1497 u32 device_type, u32 *data)
1498 {
1499 union {
1500 struct ixgbe_hic_internal_phy_req cmd;
1501 struct ixgbe_hic_internal_phy_resp rsp;
1502 } hic;
1503 s32 status;
1504 UNREFERENCED_1PARAMETER(device_type);
1505
1506 memset(&hic, 0, sizeof(hic));
1507 hic.cmd.hdr.cmd = FW_INT_PHY_REQ_CMD;
1508 hic.cmd.hdr.buf_len = FW_INT_PHY_REQ_LEN;
1509 hic.cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
1510 hic.cmd.port_number = hw->bus.lan_id;
1511 hic.cmd.command_type = FW_INT_PHY_REQ_READ;
1512 hic.cmd.address = IXGBE_CPU_TO_BE16(reg_addr);
1513
1514 status = ixgbe_host_interface_command(hw, (u32 *)&hic.cmd,
1515 sizeof(hic.cmd),
1516 IXGBE_HI_COMMAND_TIMEOUT, TRUE);
1517
1518 /* Extract the register value from the response. */
1519 *data = IXGBE_BE32_TO_CPU(hic.rsp.read_data);
1520
1521 return status;
1522 }
1523
1524 /**
1525 * ixgbe_disable_mdd_X550
1526 * @hw: pointer to hardware structure
1527 *
1528 * Disable malicious driver detection
1529 **/
1530 void ixgbe_disable_mdd_X550(struct ixgbe_hw *hw)
1531 {
1532 u32 reg;
1533
1534 DEBUGFUNC("ixgbe_disable_mdd_X550");
1535
1536 /* Disable MDD for TX DMA and interrupt */
1537 reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1538 reg &= ~(IXGBE_DMATXCTL_MDP_EN | IXGBE_DMATXCTL_MBINTEN);
1539 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg);
1540
1541 /* Disable MDD for RX and interrupt */
1542 reg = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
1543 reg &= ~(IXGBE_RDRXCTL_MDP_EN | IXGBE_RDRXCTL_MBINTEN);
1544 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg);
1545 }
1546
1547 /**
1548 * ixgbe_enable_mdd_X550
1549 * @hw: pointer to hardware structure
1550 *
1551 * Enable malicious driver detection
1552 **/
1553 void ixgbe_enable_mdd_X550(struct ixgbe_hw *hw)
1554 {
1555 u32 reg;
1556
1557 DEBUGFUNC("ixgbe_enable_mdd_X550");
1558
1559 /* Enable MDD for TX DMA and interrupt */
1560 reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1561 reg |= (IXGBE_DMATXCTL_MDP_EN | IXGBE_DMATXCTL_MBINTEN);
1562 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg);
1563
1564 /* Enable MDD for RX and interrupt */
1565 reg = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
1566 reg |= (IXGBE_RDRXCTL_MDP_EN | IXGBE_RDRXCTL_MBINTEN);
1567 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg);
1568 }
1569
1570 /**
1571 * ixgbe_restore_mdd_vf_X550
1572 * @hw: pointer to hardware structure
1573 * @vf: vf index
1574 *
1575 * Restore VF that was disabled during malicious driver detection event
1576 **/
1577 void ixgbe_restore_mdd_vf_X550(struct ixgbe_hw *hw, u32 vf)
1578 {
1579 u32 idx, reg, num_qs, start_q, bitmask;
1580
1581 DEBUGFUNC("ixgbe_restore_mdd_vf_X550");
1582
1583 /* Map VF to queues */
1584 reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
1585 switch (reg & IXGBE_MRQC_MRQE_MASK) {
1586 case IXGBE_MRQC_VMDQRT8TCEN:
1587 num_qs = 8; /* 16 VFs / pools */
1588 bitmask = 0x000000FF;
1589 break;
1590 case IXGBE_MRQC_VMDQRSS32EN:
1591 case IXGBE_MRQC_VMDQRT4TCEN:
1592 num_qs = 4; /* 32 VFs / pools */
1593 bitmask = 0x0000000F;
1594 break;
1595 default: /* 64 VFs / pools */
1596 num_qs = 2;
1597 bitmask = 0x00000003;
1598 break;
1599 }
1600 start_q = vf * num_qs;
1601
1602 /* Release vf's queues by clearing WQBR_TX and WQBR_RX (RW1C) */
1603 idx = start_q / 32;
1604 reg = 0;
1605 reg |= (bitmask << (start_q % 32));
1606 IXGBE_WRITE_REG(hw, IXGBE_WQBR_TX(idx), reg);
1607 IXGBE_WRITE_REG(hw, IXGBE_WQBR_RX(idx), reg);
1608 }
1609
1610 /**
1611 * ixgbe_mdd_event_X550
1612 * @hw: pointer to hardware structure
1613 * @vf_bitmap: vf bitmap of malicious vfs
1614 *
1615 * Handle malicious driver detection event.
1616 **/
1617 void ixgbe_mdd_event_X550(struct ixgbe_hw *hw, u32 *vf_bitmap)
1618 {
1619 u32 wqbr;
1620 u32 i, j, reg, q, shift, vf, idx;
1621
1622 DEBUGFUNC("ixgbe_mdd_event_X550");
1623
1624 /* figure out pool size for mapping to vf's */
1625 reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
1626 switch (reg & IXGBE_MRQC_MRQE_MASK) {
1627 case IXGBE_MRQC_VMDQRT8TCEN:
1628 shift = 3; /* 16 VFs / pools */
1629 break;
1630 case IXGBE_MRQC_VMDQRSS32EN:
1631 case IXGBE_MRQC_VMDQRT4TCEN:
1632 shift = 2; /* 32 VFs / pools */
1633 break;
1634 default:
1635 shift = 1; /* 64 VFs / pools */
1636 break;
1637 }
1638
1639 /* Read WQBR_TX and WQBR_RX and check for malicious queues */
1640 for (i = 0; i < 4; i++) {
1641 wqbr = IXGBE_READ_REG(hw, IXGBE_WQBR_TX(i));
1642 wqbr |= IXGBE_READ_REG(hw, IXGBE_WQBR_RX(i));
1643
1644 if (!wqbr)
1645 continue;
1646
1647 /* Get malicious queue */
1648 for (j = 0; j < 32 && wqbr; j++) {
1649
1650 if (!(wqbr & (1 << j)))
1651 continue;
1652
1653 /* Get queue from bitmask */
1654 q = j + (i * 32);
1655
1656 /* Map queue to vf */
1657 vf = (q >> shift);
1658
1659 /* Set vf bit in vf_bitmap */
1660 idx = vf / 32;
1661 vf_bitmap[idx] |= (1 << (vf % 32));
1662 wqbr &= ~(1 << j);
1663 }
1664 }
1665 }
1666
1667 /**
1668 * ixgbe_get_media_type_X550em - Get media type
1669 * @hw: pointer to hardware structure
1670 *
1671 * Returns the media type (fiber, copper, backplane)
1672 */
1673 enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw)
1674 {
1675 enum ixgbe_media_type media_type;
1676
1677 DEBUGFUNC("ixgbe_get_media_type_X550em");
1678
1679 /* Detect if there is a copper PHY attached. */
1680 switch (hw->device_id) {
1681 case IXGBE_DEV_ID_X550EM_X_KR:
1682 case IXGBE_DEV_ID_X550EM_X_KX4:
1683 case IXGBE_DEV_ID_X550EM_X_XFI:
1684 case IXGBE_DEV_ID_X550EM_A_KR:
1685 case IXGBE_DEV_ID_X550EM_A_KR_L:
1686 media_type = ixgbe_media_type_backplane;
1687 break;
1688 case IXGBE_DEV_ID_X550EM_X_SFP:
1689 case IXGBE_DEV_ID_X550EM_A_SFP:
1690 case IXGBE_DEV_ID_X550EM_A_SFP_N:
1691 case IXGBE_DEV_ID_X550EM_A_QSFP:
1692 case IXGBE_DEV_ID_X550EM_A_QSFP_N:
1693 media_type = ixgbe_media_type_fiber;
1694 break;
1695 case IXGBE_DEV_ID_X550EM_X_1G_T:
1696 case IXGBE_DEV_ID_X550EM_X_10G_T:
1697 case IXGBE_DEV_ID_X550EM_A_10G_T:
1698 media_type = ixgbe_media_type_copper;
1699 break;
1700 case IXGBE_DEV_ID_X550EM_A_SGMII:
1701 case IXGBE_DEV_ID_X550EM_A_SGMII_L:
1702 media_type = ixgbe_media_type_backplane;
1703 hw->phy.type = ixgbe_phy_sgmii;
1704 break;
1705 case IXGBE_DEV_ID_X550EM_A_1G_T:
1706 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
1707 media_type = ixgbe_media_type_copper;
1708 break;
1709 default:
1710 media_type = ixgbe_media_type_unknown;
1711 break;
1712 }
1713 return media_type;
1714 }
1715
1716 /**
1717 * ixgbe_supported_sfp_modules_X550em - Check if SFP module type is supported
1718 * @hw: pointer to hardware structure
1719 * @linear: TRUE if SFP module is linear
1720 */
1721 static s32 ixgbe_supported_sfp_modules_X550em(struct ixgbe_hw *hw, bool *linear)
1722 {
1723 DEBUGFUNC("ixgbe_supported_sfp_modules_X550em");
1724
1725 switch (hw->phy.sfp_type) {
1726 case ixgbe_sfp_type_not_present:
1727 return IXGBE_ERR_SFP_NOT_PRESENT;
1728 case ixgbe_sfp_type_da_cu_core0:
1729 case ixgbe_sfp_type_da_cu_core1:
1730 *linear = TRUE;
1731 break;
1732 case ixgbe_sfp_type_srlr_core0:
1733 case ixgbe_sfp_type_srlr_core1:
1734 case ixgbe_sfp_type_da_act_lmt_core0:
1735 case ixgbe_sfp_type_da_act_lmt_core1:
1736 case ixgbe_sfp_type_1g_sx_core0:
1737 case ixgbe_sfp_type_1g_sx_core1:
1738 case ixgbe_sfp_type_1g_lx_core0:
1739 case ixgbe_sfp_type_1g_lx_core1:
1740 *linear = FALSE;
1741 break;
1742 case ixgbe_sfp_type_unknown:
1743 case ixgbe_sfp_type_1g_cu_core0:
1744 case ixgbe_sfp_type_1g_cu_core1:
1745 default:
1746 return IXGBE_ERR_SFP_NOT_SUPPORTED;
1747 }
1748
1749 return IXGBE_SUCCESS;
1750 }
1751
1752 /**
1753 * ixgbe_identify_sfp_module_X550em - Identifies SFP modules
1754 * @hw: pointer to hardware structure
1755 *
1756 * Searches for and identifies the SFP module and assigns appropriate PHY type.
1757 **/
1758 s32 ixgbe_identify_sfp_module_X550em(struct ixgbe_hw *hw)
1759 {
1760 s32 status;
1761 bool linear;
1762
1763 DEBUGFUNC("ixgbe_identify_sfp_module_X550em");
1764
1765 status = ixgbe_identify_module_generic(hw);
1766
1767 if (status != IXGBE_SUCCESS)
1768 return status;
1769
1770 /* Check if SFP module is supported */
1771 status = ixgbe_supported_sfp_modules_X550em(hw, &linear);
1772
1773 return status;
1774 }
1775
1776 /**
1777 * ixgbe_setup_sfp_modules_X550em - Setup MAC link ops
1778 * @hw: pointer to hardware structure
1779 */
1780 s32 ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw)
1781 {
1782 s32 status;
1783 bool linear;
1784
1785 DEBUGFUNC("ixgbe_setup_sfp_modules_X550em");
1786
1787 /* Check if SFP module is supported */
1788 status = ixgbe_supported_sfp_modules_X550em(hw, &linear);
1789
1790 if (status != IXGBE_SUCCESS)
1791 return status;
1792
1793 ixgbe_init_mac_link_ops_X550em(hw);
1794 hw->phy.ops.reset = NULL;
1795
1796 return IXGBE_SUCCESS;
1797 }
1798
1799 /**
1800 * ixgbe_restart_an_internal_phy_x550em - restart autonegotiation for the
1801 * internal PHY
1802 * @hw: pointer to hardware structure
1803 **/
1804 static s32 ixgbe_restart_an_internal_phy_x550em(struct ixgbe_hw *hw)
1805 {
1806 s32 status;
1807 u32 link_ctrl;
1808
1809 /* Restart auto-negotiation. */
1810 status = hw->mac.ops.read_iosf_sb_reg(hw,
1811 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1812 IXGBE_SB_IOSF_TARGET_KR_PHY, &link_ctrl);
1813
1814 if (status) {
1815 DEBUGOUT("Auto-negotiation did not complete\n");
1816 return status;
1817 }
1818
1819 link_ctrl |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART;
1820 status = hw->mac.ops.write_iosf_sb_reg(hw,
1821 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1822 IXGBE_SB_IOSF_TARGET_KR_PHY, link_ctrl);
1823
1824 if (hw->mac.type == ixgbe_mac_X550EM_a) {
1825 u32 flx_mask_st20;
1826
1827 /* Indicate to FW that AN restart has been asserted */
1828 status = hw->mac.ops.read_iosf_sb_reg(hw,
1829 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1830 IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_mask_st20);
1831
1832 if (status) {
1833 DEBUGOUT("Auto-negotiation did not complete\n");
1834 return status;
1835 }
1836
1837 flx_mask_st20 |= IXGBE_KRM_PMD_FLX_MASK_ST20_FW_AN_RESTART;
1838 status = hw->mac.ops.write_iosf_sb_reg(hw,
1839 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1840 IXGBE_SB_IOSF_TARGET_KR_PHY, flx_mask_st20);
1841 }
1842
1843 return status;
1844 }
1845
1846 /**
1847 * ixgbe_setup_sgmii - Set up link for sgmii
1848 * @hw: pointer to hardware structure
1849 * @speed: new link speed
1850 * @autoneg_wait: TRUE when waiting for completion is needed
1851 */
1852 static s32 ixgbe_setup_sgmii(struct ixgbe_hw *hw, ixgbe_link_speed speed,
1853 bool autoneg_wait)
1854 {
1855 struct ixgbe_mac_info *mac = &hw->mac;
1856 u32 lval, sval, flx_val;
1857 s32 rc;
1858
1859 rc = mac->ops.read_iosf_sb_reg(hw,
1860 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1861 IXGBE_SB_IOSF_TARGET_KR_PHY, &lval);
1862 if (rc)
1863 return rc;
1864
1865 lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
1866 lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
1867 lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN;
1868 lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN;
1869 lval |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G;
1870 rc = mac->ops.write_iosf_sb_reg(hw,
1871 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1872 IXGBE_SB_IOSF_TARGET_KR_PHY, lval);
1873 if (rc)
1874 return rc;
1875
1876 rc = mac->ops.read_iosf_sb_reg(hw,
1877 IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id),
1878 IXGBE_SB_IOSF_TARGET_KR_PHY, &sval);
1879 if (rc)
1880 return rc;
1881
1882 sval |= IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D;
1883 sval |= IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D;
1884 rc = mac->ops.write_iosf_sb_reg(hw,
1885 IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id),
1886 IXGBE_SB_IOSF_TARGET_KR_PHY, sval);
1887 if (rc)
1888 return rc;
1889
1890 rc = mac->ops.read_iosf_sb_reg(hw,
1891 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1892 IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_val);
1893 if (rc)
1894 return rc;
1895
1896 flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK;
1897 flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_1G;
1898 flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN;
1899 flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN;
1900 flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN;
1901
1902 rc = mac->ops.write_iosf_sb_reg(hw,
1903 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1904 IXGBE_SB_IOSF_TARGET_KR_PHY, flx_val);
1905 if (rc)
1906 return rc;
1907
1908 rc = ixgbe_restart_an_internal_phy_x550em(hw);
1909 if (rc)
1910 return rc;
1911
1912 return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait);
1913 }
1914
1915 /**
1916 * ixgbe_setup_sgmii_fw - Set up link for internal PHY SGMII auto-negotiation
1917 * @hw: pointer to hardware structure
1918 * @speed: new link speed
1919 * @autoneg_wait: TRUE when waiting for completion is needed
1920 */
1921 static s32 ixgbe_setup_sgmii_fw(struct ixgbe_hw *hw, ixgbe_link_speed speed,
1922 bool autoneg_wait)
1923 {
1924 struct ixgbe_mac_info *mac = &hw->mac;
1925 u32 lval, sval, flx_val;
1926 s32 rc;
1927
1928 rc = mac->ops.read_iosf_sb_reg(hw,
1929 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1930 IXGBE_SB_IOSF_TARGET_KR_PHY, &lval);
1931 if (rc)
1932 return rc;
1933
1934 lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
1935 lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
1936 lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN;
1937 lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN;
1938 lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G;
1939 rc = mac->ops.write_iosf_sb_reg(hw,
1940 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1941 IXGBE_SB_IOSF_TARGET_KR_PHY, lval);
1942 if (rc)
1943 return rc;
1944
1945 rc = mac->ops.read_iosf_sb_reg(hw,
1946 IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id),
1947 IXGBE_SB_IOSF_TARGET_KR_PHY, &sval);
1948 if (rc)
1949 return rc;
1950
1951 sval &= ~IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D;
1952 sval &= ~IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D;
1953 rc = mac->ops.write_iosf_sb_reg(hw,
1954 IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id),
1955 IXGBE_SB_IOSF_TARGET_KR_PHY, sval);
1956 if (rc)
1957 return rc;
1958
1959 rc = mac->ops.write_iosf_sb_reg(hw,
1960 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1961 IXGBE_SB_IOSF_TARGET_KR_PHY, lval);
1962 if (rc)
1963 return rc;
1964
1965 rc = mac->ops.read_iosf_sb_reg(hw,
1966 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1967 IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_val);
1968 if (rc)
1969 return rc;
1970
1971 flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK;
1972 flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_AN;
1973 flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN;
1974 flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN;
1975 flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN;
1976
1977 rc = mac->ops.write_iosf_sb_reg(hw,
1978 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1979 IXGBE_SB_IOSF_TARGET_KR_PHY, flx_val);
1980 if (rc)
1981 return rc;
1982
1983 rc = ixgbe_restart_an_internal_phy_x550em(hw);
1984
1985 return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait);
1986 }
1987
1988 /**
1989 * ixgbe_init_mac_link_ops_X550em - init mac link function pointers
1990 * @hw: pointer to hardware structure
1991 */
1992 void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw)
1993 {
1994 struct ixgbe_mac_info *mac = &hw->mac;
1995
1996 DEBUGFUNC("ixgbe_init_mac_link_ops_X550em");
1997
1998 switch (hw->mac.ops.get_media_type(hw)) {
1999 case ixgbe_media_type_fiber:
2000 /* CS4227 does not support autoneg, so disable the laser control
2001 * functions for SFP+ fiber
2002 */
2003 mac->ops.disable_tx_laser = NULL;
2004 mac->ops.enable_tx_laser = NULL;
2005 mac->ops.flap_tx_laser = NULL;
2006 mac->ops.setup_link = ixgbe_setup_mac_link_multispeed_fiber;
2007 mac->ops.set_rate_select_speed =
2008 ixgbe_set_soft_rate_select_speed;
2009
2010 if ((hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N) ||
2011 (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP))
2012 mac->ops.setup_mac_link =
2013 ixgbe_setup_mac_link_sfp_x550a;
2014 else
2015 mac->ops.setup_mac_link =
2016 ixgbe_setup_mac_link_sfp_x550em;
2017 break;
2018 case ixgbe_media_type_copper:
2019 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_1G_T)
2020 break;
2021 if (hw->mac.type == ixgbe_mac_X550EM_a) {
2022 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T ||
2023 hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L) {
2024 mac->ops.setup_link = ixgbe_setup_sgmii_fw;
2025 mac->ops.check_link =
2026 ixgbe_check_mac_link_generic;
2027 } else {
2028 mac->ops.setup_link =
2029 ixgbe_setup_mac_link_t_X550em;
2030 }
2031 } else {
2032 mac->ops.setup_link = ixgbe_setup_mac_link_t_X550em;
2033 mac->ops.check_link = ixgbe_check_link_t_X550em;
2034 }
2035 break;
2036 case ixgbe_media_type_backplane:
2037 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII ||
2038 hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII_L)
2039 mac->ops.setup_link = ixgbe_setup_sgmii;
2040 break;
2041 default:
2042 break;
2043 }
2044 }
2045
2046 /**
2047 * ixgbe_get_link_capabilities_x550em - Determines link capabilities
2048 * @hw: pointer to hardware structure
2049 * @speed: pointer to link speed
2050 * @autoneg: TRUE when autoneg or autotry is enabled
2051 */
2052 s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
2053 ixgbe_link_speed *speed,
2054 bool *autoneg)
2055 {
2056 DEBUGFUNC("ixgbe_get_link_capabilities_X550em");
2057
2058
2059 if (hw->phy.type == ixgbe_phy_fw) {
2060 *autoneg = TRUE;
2061 *speed = hw->phy.speeds_supported;
2062 return 0;
2063 }
2064
2065 /* SFP */
2066 if (hw->phy.media_type == ixgbe_media_type_fiber) {
2067
2068 /* CS4227 SFP must not enable auto-negotiation */
2069 *autoneg = FALSE;
2070
2071 /* Check if 1G SFP module. */
2072 if (hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
2073 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1
2074 || hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
2075 hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1) {
2076 *speed = IXGBE_LINK_SPEED_1GB_FULL;
2077 return IXGBE_SUCCESS;
2078 }
2079
2080 /* Link capabilities are based on SFP */
2081 if (hw->phy.multispeed_fiber)
2082 *speed = IXGBE_LINK_SPEED_10GB_FULL |
2083 IXGBE_LINK_SPEED_1GB_FULL;
2084 else
2085 *speed = IXGBE_LINK_SPEED_10GB_FULL;
2086 } else {
2087 *autoneg = TRUE;
2088
2089 switch (hw->phy.type) {
2090 case ixgbe_phy_x550em_xfi:
2091 *speed = IXGBE_LINK_SPEED_1GB_FULL |
2092 IXGBE_LINK_SPEED_10GB_FULL;
2093 *autoneg = FALSE;
2094 break;
2095 case ixgbe_phy_ext_1g_t:
2096 case ixgbe_phy_sgmii:
2097 *speed = IXGBE_LINK_SPEED_1GB_FULL;
2098 break;
2099 case ixgbe_phy_x550em_kr:
2100 if (hw->mac.type == ixgbe_mac_X550EM_a) {
2101 /* check different backplane modes */
2102 if (hw->phy.nw_mng_if_sel &
2103 IXGBE_NW_MNG_IF_SEL_PHY_SPEED_2_5G) {
2104 *speed = IXGBE_LINK_SPEED_2_5GB_FULL;
2105 break;
2106 } else if (hw->device_id ==
2107 IXGBE_DEV_ID_X550EM_A_KR_L) {
2108 *speed = IXGBE_LINK_SPEED_1GB_FULL;
2109 break;
2110 }
2111 }
2112 /* fall through */
2113 default:
2114 *speed = IXGBE_LINK_SPEED_10GB_FULL |
2115 IXGBE_LINK_SPEED_1GB_FULL;
2116 break;
2117 }
2118 }
2119
2120 return IXGBE_SUCCESS;
2121 }
2122
2123 /**
2124 * ixgbe_get_lasi_ext_t_x550em - Determime external Base T PHY interrupt cause
2125 * @hw: pointer to hardware structure
2126 * @lsc: pointer to boolean flag which indicates whether external Base T
2127 * PHY interrupt is lsc
2128 *
2129 * Determime if external Base T PHY interrupt cause is high temperature
2130 * failure alarm or link status change.
2131 *
2132 * Return IXGBE_ERR_OVERTEMP if interrupt is high temperature
2133 * failure alarm, else return PHY access status.
2134 */
2135 static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc)
2136 {
2137 u32 status;
2138 u16 reg;
2139
2140 *lsc = FALSE;
2141
2142 /* Vendor alarm triggered */
2143 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_CHIP_STD_INT_FLAG,
2144 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2145 ®);
2146
2147 if (status != IXGBE_SUCCESS ||
2148 !(reg & IXGBE_MDIO_GLOBAL_VEN_ALM_INT_EN))
2149 return status;
2150
2151 /* Vendor Auto-Neg alarm triggered or Global alarm 1 triggered */
2152 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_FLAG,
2153 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2154 ®);
2155
2156 if (status != IXGBE_SUCCESS ||
2157 !(reg & (IXGBE_MDIO_GLOBAL_AN_VEN_ALM_INT_EN |
2158 IXGBE_MDIO_GLOBAL_ALARM_1_INT)))
2159 return status;
2160
2161 /* Global alarm triggered */
2162 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_ALARM_1,
2163 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2164 ®);
2165
2166 if (status != IXGBE_SUCCESS)
2167 return status;
2168
2169 /* If high temperature failure, then return over temp error and exit */
2170 if (reg & IXGBE_MDIO_GLOBAL_ALM_1_HI_TMP_FAIL) {
2171 /* power down the PHY in case the PHY FW didn't already */
2172 ixgbe_set_copper_phy_power(hw, FALSE);
2173 return IXGBE_ERR_OVERTEMP;
2174 } else if (reg & IXGBE_MDIO_GLOBAL_ALM_1_DEV_FAULT) {
2175 /* device fault alarm triggered */
2176 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_FAULT_MSG,
2177 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2178 ®);
2179
2180 if (status != IXGBE_SUCCESS)
2181 return status;
2182
2183 /* if device fault was due to high temp alarm handle and exit */
2184 if (reg == IXGBE_MDIO_GLOBAL_FAULT_MSG_HI_TMP) {
2185 /* power down the PHY in case the PHY FW didn't */
2186 ixgbe_set_copper_phy_power(hw, FALSE);
2187 return IXGBE_ERR_OVERTEMP;
2188 }
2189 }
2190
2191 /* Vendor alarm 2 triggered */
2192 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_CHIP_STD_INT_FLAG,
2193 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®);
2194
2195 if (status != IXGBE_SUCCESS ||
2196 !(reg & IXGBE_MDIO_GLOBAL_STD_ALM2_INT))
2197 return status;
2198
2199 /* link connect/disconnect event occurred */
2200 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM2,
2201 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®);
2202
2203 if (status != IXGBE_SUCCESS)
2204 return status;
2205
2206 /* Indicate LSC */
2207 if (reg & IXGBE_MDIO_AUTO_NEG_VEN_LSC)
2208 *lsc = TRUE;
2209
2210 return IXGBE_SUCCESS;
2211 }
2212
2213 /**
2214 * ixgbe_enable_lasi_ext_t_x550em - Enable external Base T PHY interrupts
2215 * @hw: pointer to hardware structure
2216 *
2217 * Enable link status change and temperature failure alarm for the external
2218 * Base T PHY
2219 *
2220 * Returns PHY access status
2221 */
2222 static s32 ixgbe_enable_lasi_ext_t_x550em(struct ixgbe_hw *hw)
2223 {
2224 u32 status;
2225 u16 reg;
2226 bool lsc;
2227
2228 /* Clear interrupt flags */
2229 status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc);
2230
2231 /* Enable link status change alarm */
2232
2233 /* Enable the LASI interrupts on X552 devices to receive notifications
2234 * of the link configurations of the external PHY and correspondingly
2235 * support the configuration of the internal iXFI link, since iXFI does
2236 * not support auto-negotiation. This is not required for X553 devices
2237 * having KR support, which performs auto-negotiations and which is used
2238 * as the internal link to the external PHY. Hence adding a check here
2239 * to avoid enabling LASI interrupts for X553 devices.
2240 */
2241 if (hw->mac.type != ixgbe_mac_X550EM_a) {
2242 status = hw->phy.ops.read_reg(hw,
2243 IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK,
2244 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®);
2245
2246 if (status != IXGBE_SUCCESS)
2247 return status;
2248
2249 reg |= IXGBE_MDIO_PMA_TX_VEN_LASI_INT_EN;
2250
2251 status = hw->phy.ops.write_reg(hw,
2252 IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK,
2253 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg);
2254
2255 if (status != IXGBE_SUCCESS)
2256 return status;
2257 }
2258
2259 /* Enable high temperature failure and global fault alarms */
2260 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_MASK,
2261 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2262 ®);
2263
2264 if (status != IXGBE_SUCCESS)
2265 return status;
2266
2267 reg |= (IXGBE_MDIO_GLOBAL_INT_HI_TEMP_EN |
2268 IXGBE_MDIO_GLOBAL_INT_DEV_FAULT_EN);
2269
2270 status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_MASK,
2271 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2272 reg);
2273
2274 if (status != IXGBE_SUCCESS)
2275 return status;
2276
2277 /* Enable vendor Auto-Neg alarm and Global Interrupt Mask 1 alarm */
2278 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_MASK,
2279 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2280 ®);
2281
2282 if (status != IXGBE_SUCCESS)
2283 return status;
2284
2285 reg |= (IXGBE_MDIO_GLOBAL_AN_VEN_ALM_INT_EN |
2286 IXGBE_MDIO_GLOBAL_ALARM_1_INT);
2287
2288 status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_MASK,
2289 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2290 reg);
2291
2292 if (status != IXGBE_SUCCESS)
2293 return status;
2294
2295 /* Enable chip-wide vendor alarm */
2296 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_STD_MASK,
2297 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2298 ®);
2299
2300 if (status != IXGBE_SUCCESS)
2301 return status;
2302
2303 reg |= IXGBE_MDIO_GLOBAL_VEN_ALM_INT_EN;
2304
2305 status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_STD_MASK,
2306 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2307 reg);
2308
2309 return status;
2310 }
2311
2312 /**
2313 * ixgbe_setup_kr_speed_x550em - Configure the KR PHY for link speed.
2314 * @hw: pointer to hardware structure
2315 * @speed: link speed
2316 *
2317 * Configures the integrated KR PHY.
2318 **/
2319 static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw,
2320 ixgbe_link_speed speed)
2321 {
2322 s32 status;
2323 u32 reg_val;
2324
2325 status = hw->mac.ops.read_iosf_sb_reg(hw,
2326 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
2327 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
2328 if (status)
2329 return status;
2330
2331 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
2332 reg_val &= ~(IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR |
2333 IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX);
2334
2335 /* Advertise 10G support. */
2336 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
2337 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR;
2338
2339 /* Advertise 1G support. */
2340 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
2341 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX;
2342
2343 status = hw->mac.ops.write_iosf_sb_reg(hw,
2344 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
2345 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
2346
2347 if (hw->mac.type == ixgbe_mac_X550EM_a) {
2348 /* Set lane mode to KR auto negotiation */
2349 status = hw->mac.ops.read_iosf_sb_reg(hw,
2350 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
2351 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
2352
2353 if (status)
2354 return status;
2355
2356 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK;
2357 reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_AN;
2358 reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN;
2359 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN;
2360 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN;
2361
2362 status = hw->mac.ops.write_iosf_sb_reg(hw,
2363 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
2364 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
2365 }
2366
2367 return ixgbe_restart_an_internal_phy_x550em(hw);
2368 }
2369
2370 /**
2371 * ixgbe_reset_phy_fw - Reset firmware-controlled PHYs
2372 * @hw: pointer to hardware structure
2373 */
2374 static s32 ixgbe_reset_phy_fw(struct ixgbe_hw *hw)
2375 {
2376 u32 store[FW_PHY_ACT_DATA_COUNT] = { 0 };
2377 s32 rc;
2378
2379 if (hw->phy.reset_disable || ixgbe_check_reset_blocked(hw))
2380 return IXGBE_SUCCESS;
2381
2382 rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_PHY_SW_RESET, &store);
2383 if (rc)
2384 return rc;
2385 memset(store, 0, sizeof(store));
2386
2387 rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_INIT_PHY, &store);
2388 if (rc)
2389 return rc;
2390
2391 return ixgbe_setup_fw_link(hw);
2392 }
2393
2394 /**
2395 * ixgbe_check_overtemp_fw - Check firmware-controlled PHYs for overtemp
2396 * @hw: pointer to hardware structure
2397 */
2398 static s32 ixgbe_check_overtemp_fw(struct ixgbe_hw *hw)
2399 {
2400 u32 store[FW_PHY_ACT_DATA_COUNT] = { 0 };
2401 s32 rc;
2402
2403 rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_LINK_INFO, &store);
2404 if (rc)
2405 return rc;
2406
2407 if (store[0] & FW_PHY_ACT_GET_LINK_INFO_TEMP) {
2408 ixgbe_shutdown_fw_phy(hw);
2409 return IXGBE_ERR_OVERTEMP;
2410 }
2411 return IXGBE_SUCCESS;
2412 }
2413
2414 /**
2415 * ixgbe_read_mng_if_sel_x550em - Read NW_MNG_IF_SEL register
2416 * @hw: pointer to hardware structure
2417 *
2418 * Read NW_MNG_IF_SEL register and save field values, and check for valid field
2419 * values.
2420 **/
2421 static s32 ixgbe_read_mng_if_sel_x550em(struct ixgbe_hw *hw)
2422 {
2423 /* Save NW management interface connected on board. This is used
2424 * to determine internal PHY mode.
2425 */
2426 hw->phy.nw_mng_if_sel = IXGBE_READ_REG(hw, IXGBE_NW_MNG_IF_SEL);
2427
2428 /* If X552 (X550EM_a) and MDIO is connected to external PHY, then set
2429 * PHY address. This register field was has only been used for X552.
2430 */
2431 if (hw->mac.type == ixgbe_mac_X550EM_a &&
2432 hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_MDIO_ACT) {
2433 hw->phy.addr = (hw->phy.nw_mng_if_sel &
2434 IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD) >>
2435 IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT;
2436 }
2437
2438 return IXGBE_SUCCESS;
2439 }
2440
2441 /**
2442 * ixgbe_init_phy_ops_X550em - PHY/SFP specific init
2443 * @hw: pointer to hardware structure
2444 *
2445 * Initialize any function pointers that were not able to be
2446 * set during init_shared_code because the PHY/SFP type was
2447 * not known. Perform the SFP init if necessary.
2448 */
2449 s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
2450 {
2451 struct ixgbe_phy_info *phy = &hw->phy;
2452 s32 ret_val;
2453
2454 DEBUGFUNC("ixgbe_init_phy_ops_X550em");
2455
2456 hw->mac.ops.set_lan_id(hw);
2457 ixgbe_read_mng_if_sel_x550em(hw);
2458
2459 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) {
2460 phy->phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM;
2461 ixgbe_setup_mux_ctl(hw);
2462 phy->ops.identify_sfp = ixgbe_identify_sfp_module_X550em;
2463 }
2464
2465 switch (hw->device_id) {
2466 case IXGBE_DEV_ID_X550EM_A_1G_T:
2467 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
2468 phy->ops.read_reg_mdi = ixgbe_read_phy_reg_mdi_22;
2469 phy->ops.write_reg_mdi = ixgbe_write_phy_reg_mdi_22;
2470 hw->phy.ops.read_reg = ixgbe_read_phy_reg_x550a;
2471 hw->phy.ops.write_reg = ixgbe_write_phy_reg_x550a;
2472 phy->ops.check_overtemp = ixgbe_check_overtemp_fw;
2473 if (hw->bus.lan_id)
2474 hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY1_SM;
2475 else
2476 hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY0_SM;
2477
2478 break;
2479 case IXGBE_DEV_ID_X550EM_A_10G_T:
2480 case IXGBE_DEV_ID_X550EM_A_SFP:
2481 hw->phy.ops.read_reg = ixgbe_read_phy_reg_x550a;
2482 hw->phy.ops.write_reg = ixgbe_write_phy_reg_x550a;
2483 if (hw->bus.lan_id)
2484 hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY1_SM;
2485 else
2486 hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY0_SM;
2487 break;
2488 case IXGBE_DEV_ID_X550EM_X_SFP:
2489 /* set up for CS4227 usage */
2490 hw->phy.phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM;
2491 break;
2492 case IXGBE_DEV_ID_X550EM_X_1G_T:
2493 phy->ops.read_reg_mdi = NULL;
2494 phy->ops.write_reg_mdi = NULL;
2495 break;
2496 default:
2497 break;
2498 }
2499
2500 /* Identify the PHY or SFP module */
2501 ret_val = phy->ops.identify(hw);
2502 if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED ||
2503 ret_val == IXGBE_ERR_PHY_ADDR_INVALID)
2504 return ret_val;
2505
2506 /* Setup function pointers based on detected hardware */
2507 ixgbe_init_mac_link_ops_X550em(hw);
2508 if (phy->sfp_type != ixgbe_sfp_type_unknown)
2509 phy->ops.reset = NULL;
2510
2511 /* Set functions pointers based on phy type */
2512 switch (hw->phy.type) {
2513 case ixgbe_phy_x550em_kx4:
2514 phy->ops.setup_link = NULL;
2515 phy->ops.read_reg = ixgbe_read_phy_reg_x550em;
2516 phy->ops.write_reg = ixgbe_write_phy_reg_x550em;
2517 break;
2518 case ixgbe_phy_x550em_kr:
2519 phy->ops.setup_link = ixgbe_setup_kr_x550em;
2520 phy->ops.read_reg = ixgbe_read_phy_reg_x550em;
2521 phy->ops.write_reg = ixgbe_write_phy_reg_x550em;
2522 break;
2523 case ixgbe_phy_ext_1g_t:
2524 /* link is managed by FW */
2525 phy->ops.setup_link = NULL;
2526 phy->ops.reset = NULL;
2527 break;
2528 case ixgbe_phy_x550em_xfi:
2529 /* link is managed by HW */
2530 phy->ops.setup_link = NULL;
2531 phy->ops.read_reg = ixgbe_read_phy_reg_x550em;
2532 phy->ops.write_reg = ixgbe_write_phy_reg_x550em;
2533 break;
2534 case ixgbe_phy_x550em_ext_t:
2535 /* If internal link mode is XFI, then setup iXFI internal link,
2536 * else setup KR now.
2537 */
2538 phy->ops.setup_internal_link =
2539 ixgbe_setup_internal_phy_t_x550em;
2540
2541 /* setup SW LPLU only for first revision of X550EM_x */
2542 if ((hw->mac.type == ixgbe_mac_X550EM_x) &&
2543 !(IXGBE_FUSES0_REV_MASK &
2544 IXGBE_READ_REG(hw, IXGBE_FUSES0_GROUP(0))))
2545 phy->ops.enter_lplu = ixgbe_enter_lplu_t_x550em;
2546
2547 phy->ops.handle_lasi = ixgbe_handle_lasi_ext_t_x550em;
2548 phy->ops.reset = ixgbe_reset_phy_t_X550em;
2549 break;
2550 case ixgbe_phy_sgmii:
2551 phy->ops.setup_link = NULL;
2552 break;
2553 case ixgbe_phy_fw:
2554 phy->ops.setup_link = ixgbe_setup_fw_link;
2555 phy->ops.reset = ixgbe_reset_phy_fw;
2556 break;
2557 default:
2558 break;
2559 }
2560 return ret_val;
2561 }
2562
2563 /**
2564 * ixgbe_set_mdio_speed - Set MDIO clock speed
2565 * @hw: pointer to hardware structure
2566 */
2567 static void ixgbe_set_mdio_speed(struct ixgbe_hw *hw)
2568 {
2569 u32 hlreg0;
2570
2571 switch (hw->device_id) {
2572 case IXGBE_DEV_ID_X550EM_X_10G_T:
2573 case IXGBE_DEV_ID_X550EM_A_SGMII:
2574 case IXGBE_DEV_ID_X550EM_A_SGMII_L:
2575 case IXGBE_DEV_ID_X550EM_A_10G_T:
2576 case IXGBE_DEV_ID_X550EM_A_SFP:
2577 case IXGBE_DEV_ID_X550EM_A_QSFP:
2578 /* Config MDIO clock speed before the first MDIO PHY access */
2579 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2580 hlreg0 &= ~IXGBE_HLREG0_MDCSPD;
2581 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
2582 break;
2583 case IXGBE_DEV_ID_X550EM_A_1G_T:
2584 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
2585 /* Select fast MDIO clock speed for these devices */
2586 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2587 hlreg0 |= IXGBE_HLREG0_MDCSPD;
2588 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
2589 break;
2590 default:
2591 break;
2592 }
2593 }
2594
2595 /**
2596 * ixgbe_reset_hw_X550em - Perform hardware reset
2597 * @hw: pointer to hardware structure
2598 *
2599 * Resets the hardware by resetting the transmit and receive units, masks
2600 * and clears all interrupts, perform a PHY reset, and perform a link (MAC)
2601 * reset.
2602 */
2603 s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw)
2604 {
2605 ixgbe_link_speed link_speed;
2606 s32 status;
2607 u32 ctrl = 0;
2608 u32 i;
2609 bool link_up = FALSE;
2610 u32 swfw_mask = hw->phy.phy_semaphore_mask;
2611
2612 DEBUGFUNC("ixgbe_reset_hw_X550em");
2613
2614 /* Call adapter stop to disable Tx/Rx and clear interrupts */
2615 status = hw->mac.ops.stop_adapter(hw);
2616 if (status != IXGBE_SUCCESS) {
2617 DEBUGOUT1("Failed to stop adapter, STATUS = %d\n", status);
2618 return status;
2619 }
2620 /* flush pending Tx transactions */
2621 ixgbe_clear_tx_pending(hw);
2622
2623 ixgbe_set_mdio_speed(hw);
2624
2625 /* PHY ops must be identified and initialized prior to reset */
2626 status = hw->phy.ops.init(hw);
2627
2628 if (status)
2629 DEBUGOUT1("Failed to initialize PHY ops, STATUS = %d\n",
2630 status);
2631
2632 if (status == IXGBE_ERR_SFP_NOT_SUPPORTED ||
2633 status == IXGBE_ERR_PHY_ADDR_INVALID) {
2634 DEBUGOUT("Returning from reset HW due to PHY init failure\n");
2635 return status;
2636 }
2637
2638 /* start the external PHY */
2639 if (hw->phy.type == ixgbe_phy_x550em_ext_t) {
2640 status = ixgbe_init_ext_t_x550em(hw);
2641 if (status) {
2642 DEBUGOUT1("Failed to start the external PHY, STATUS = %d\n",
2643 status);
2644 return status;
2645 }
2646 }
2647
2648 /* Setup SFP module if there is one present. */
2649 if (hw->phy.sfp_setup_needed) {
2650 status = hw->mac.ops.setup_sfp(hw);
2651 hw->phy.sfp_setup_needed = FALSE;
2652 }
2653
2654 if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
2655 return status;
2656
2657 /* Reset PHY */
2658 if (!hw->phy.reset_disable && hw->phy.ops.reset) {
2659 if (hw->phy.ops.reset(hw) == IXGBE_ERR_OVERTEMP)
2660 return IXGBE_ERR_OVERTEMP;
2661 }
2662
2663 mac_reset_top:
2664 /* Issue global reset to the MAC. Needs to be SW reset if link is up.
2665 * If link reset is used when link is up, it might reset the PHY when
2666 * mng is using it. If link is down or the flag to force full link
2667 * reset is set, then perform link reset.
2668 */
2669 ctrl = IXGBE_CTRL_LNK_RST;
2670 if (!hw->force_full_reset) {
2671 hw->mac.ops.check_link(hw, &link_speed, &link_up, FALSE);
2672 if (link_up)
2673 ctrl = IXGBE_CTRL_RST;
2674 }
2675
2676 status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
2677 if (status != IXGBE_SUCCESS) {
2678 ERROR_REPORT2(IXGBE_ERROR_CAUTION,
2679 "semaphore failed with %d", status);
2680 return IXGBE_ERR_SWFW_SYNC;
2681 }
2682 ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
2683 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
2684 IXGBE_WRITE_FLUSH(hw);
2685 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
2686
2687 /* Poll for reset bit to self-clear meaning reset is complete */
2688 for (i = 0; i < 10; i++) {
2689 usec_delay(1);
2690 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
2691 if (!(ctrl & IXGBE_CTRL_RST_MASK))
2692 break;
2693 }
2694
2695 if (ctrl & IXGBE_CTRL_RST_MASK) {
2696 status = IXGBE_ERR_RESET_FAILED;
2697 DEBUGOUT("Reset polling failed to complete.\n");
2698 }
2699
2700 msec_delay(50);
2701
2702 /* Double resets are required for recovery from certain error
2703 * conditions. Between resets, it is necessary to stall to
2704 * allow time for any pending HW events to complete.
2705 */
2706 if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
2707 hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
2708 goto mac_reset_top;
2709 }
2710
2711 /* Store the permanent mac address */
2712 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
2713
2714 /* Store MAC address from RAR0, clear receive address registers, and
2715 * clear the multicast table. Also reset num_rar_entries to 128,
2716 * since we modify this value when programming the SAN MAC address.
2717 */
2718 hw->mac.num_rar_entries = 128;
2719 hw->mac.ops.init_rx_addrs(hw);
2720
2721 ixgbe_set_mdio_speed(hw);
2722
2723 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP)
2724 ixgbe_setup_mux_ctl(hw);
2725
2726 if (status != IXGBE_SUCCESS)
2727 DEBUGOUT1("Reset HW failed, STATUS = %d\n", status);
2728
2729 return status;
2730 }
2731
2732 /**
2733 * ixgbe_init_ext_t_x550em - Start (unstall) the external Base T PHY.
2734 * @hw: pointer to hardware structure
2735 */
2736 s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw)
2737 {
2738 u32 status;
2739 u16 reg;
2740
2741 status = hw->phy.ops.read_reg(hw,
2742 IXGBE_MDIO_TX_VENDOR_ALARMS_3,
2743 IXGBE_MDIO_PMA_PMD_DEV_TYPE,
2744 ®);
2745
2746 if (status != IXGBE_SUCCESS)
2747 return status;
2748
2749 /* If PHY FW reset completed bit is set then this is the first
2750 * SW instance after a power on so the PHY FW must be un-stalled.
2751 */
2752 if (reg & IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK) {
2753 status = hw->phy.ops.read_reg(hw,
2754 IXGBE_MDIO_GLOBAL_RES_PR_10,
2755 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2756 ®);
2757
2758 if (status != IXGBE_SUCCESS)
2759 return status;
2760
2761 reg &= ~IXGBE_MDIO_POWER_UP_STALL;
2762
2763 status = hw->phy.ops.write_reg(hw,
2764 IXGBE_MDIO_GLOBAL_RES_PR_10,
2765 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2766 reg);
2767
2768 if (status != IXGBE_SUCCESS)
2769 return status;
2770 }
2771
2772 return status;
2773 }
2774
2775 /**
2776 * ixgbe_setup_kr_x550em - Configure the KR PHY.
2777 * @hw: pointer to hardware structure
2778 **/
2779 s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw)
2780 {
2781 /* leave link alone for 2.5G */
2782 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_2_5GB_FULL)
2783 return IXGBE_SUCCESS;
2784
2785 if (ixgbe_check_reset_blocked(hw))
2786 return 0;
2787
2788 return ixgbe_setup_kr_speed_x550em(hw, hw->phy.autoneg_advertised);
2789 }
2790
2791 /**
2792 * ixgbe_setup_mac_link_sfp_x550em - Setup internal/external the PHY for SFP
2793 * @hw: pointer to hardware structure
2794 * @speed: new link speed
2795 * @autoneg_wait_to_complete: unused
2796 *
2797 * Configure the external PHY and the integrated KR PHY for SFP support.
2798 **/
2799 s32 ixgbe_setup_mac_link_sfp_x550em(struct ixgbe_hw *hw,
2800 ixgbe_link_speed speed,
2801 bool autoneg_wait_to_complete)
2802 {
2803 s32 ret_val;
2804 u16 reg_slice, reg_val;
2805 bool setup_linear = FALSE;
2806 UNREFERENCED_1PARAMETER(autoneg_wait_to_complete);
2807
2808 /* Check if SFP module is supported and linear */
2809 ret_val = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear);
2810
2811 /* If no SFP module present, then return success. Return success since
2812 * there is no reason to configure CS4227 and SFP not present error is
2813 * not excepted in the setup MAC link flow.
2814 */
2815 if (ret_val == IXGBE_ERR_SFP_NOT_PRESENT)
2816 return IXGBE_SUCCESS;
2817
2818 if (ret_val != IXGBE_SUCCESS)
2819 return ret_val;
2820
2821 /* Configure internal PHY for KR/KX. */
2822 ixgbe_setup_kr_speed_x550em(hw, speed);
2823
2824 /* Configure CS4227 LINE side to proper mode. */
2825 reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB +
2826 (hw->bus.lan_id << 12);
2827 if (setup_linear)
2828 reg_val = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1;
2829 else
2830 reg_val = (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1;
2831 ret_val = hw->link.ops.write_link(hw, hw->link.addr, reg_slice,
2832 reg_val);
2833 return ret_val;
2834 }
2835
2836 /**
2837 * ixgbe_setup_sfi_x550a - Configure the internal PHY for native SFI mode
2838 * @hw: pointer to hardware structure
2839 * @speed: the link speed to force
2840 *
2841 * Configures the integrated PHY for native SFI mode. Used to connect the
2842 * internal PHY directly to an SFP cage, without autonegotiation.
2843 **/
2844 static s32 ixgbe_setup_sfi_x550a(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
2845 {
2846 struct ixgbe_mac_info *mac = &hw->mac;
2847 s32 status;
2848 u32 reg_val;
2849
2850 /* Disable all AN and force speed to 10G Serial. */
2851 status = mac->ops.read_iosf_sb_reg(hw,
2852 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
2853 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
2854 if (status != IXGBE_SUCCESS)
2855 return status;
2856
2857 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN;
2858 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN;
2859 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN;
2860 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK;
2861
2862 /* Select forced link speed for internal PHY. */
2863 switch (*speed) {
2864 case IXGBE_LINK_SPEED_10GB_FULL:
2865 reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_10G;
2866 break;
2867 case IXGBE_LINK_SPEED_1GB_FULL:
2868 reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_1G;
2869 break;
2870 case 0:
2871 /* media none (linkdown) */
2872 break;
2873 default:
2874 /* Other link speeds are not supported by internal PHY. */
2875 return IXGBE_ERR_LINK_SETUP;
2876 }
2877
2878 status = mac->ops.write_iosf_sb_reg(hw,
2879 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
2880 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
2881
2882 /* Toggle port SW reset by AN reset. */
2883 status = ixgbe_restart_an_internal_phy_x550em(hw);
2884
2885 return status;
2886 }
2887
2888 /**
2889 * ixgbe_setup_mac_link_sfp_x550a - Setup internal PHY for SFP
2890 * @hw: pointer to hardware structure
2891 * @speed: new link speed
2892 * @autoneg_wait_to_complete: unused
2893 *
2894 * Configure the the integrated PHY for SFP support.
2895 **/
2896 static s32 ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw,
2897 ixgbe_link_speed speed,
2898 bool autoneg_wait_to_complete)
2899 {
2900 s32 ret_val;
2901 u16 reg_phy_ext;
2902 bool setup_linear = FALSE;
2903 u32 reg_slice, reg_phy_int, slice_offset;
2904
2905 UNREFERENCED_1PARAMETER(autoneg_wait_to_complete);
2906
2907 /* Check if SFP module is supported and linear */
2908 ret_val = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear);
2909
2910 /* If no SFP module present, then return success. Return success since
2911 * SFP not present error is not excepted in the setup MAC link flow.
2912 */
2913 if (ret_val == IXGBE_ERR_SFP_NOT_PRESENT)
2914 return IXGBE_SUCCESS;
2915
2916 if (ret_val != IXGBE_SUCCESS)
2917 return ret_val;
2918
2919 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N) {
2920 /* Configure internal PHY for native SFI based on module type */
2921 ret_val = hw->mac.ops.read_iosf_sb_reg(hw,
2922 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
2923 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_phy_int);
2924
2925 if (ret_val != IXGBE_SUCCESS)
2926 return ret_val;
2927
2928 reg_phy_int &= IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_DA;
2929 if (!setup_linear)
2930 reg_phy_int |= IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_SR;
2931
2932 ret_val = hw->mac.ops.write_iosf_sb_reg(hw,
2933 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
2934 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_phy_int);
2935
2936 if (ret_val != IXGBE_SUCCESS)
2937 return ret_val;
2938
2939 /* Setup SFI internal link. */
2940 ret_val = ixgbe_setup_sfi_x550a(hw, &speed);
2941 } else {
2942 /* Configure internal PHY for KR/KX. */
2943 ixgbe_setup_kr_speed_x550em(hw, speed);
2944
2945 if (hw->phy.addr == 0x0 || hw->phy.addr == 0xFFFF) {
2946 /* Find Address */
2947 DEBUGOUT("Invalid NW_MNG_IF_SEL.MDIO_PHY_ADD value\n");
2948 return IXGBE_ERR_PHY_ADDR_INVALID;
2949 }
2950
2951 /* Get external PHY SKU id */
2952 ret_val = hw->phy.ops.read_reg(hw, IXGBE_CS4227_EFUSE_PDF_SKU,
2953 IXGBE_MDIO_ZERO_DEV_TYPE, ®_phy_ext);
2954
2955 if (ret_val != IXGBE_SUCCESS)
2956 return ret_val;
2957
2958 /* When configuring quad port CS4223, the MAC instance is part
2959 * of the slice offset.
2960 */
2961 if (reg_phy_ext == IXGBE_CS4223_SKU_ID)
2962 slice_offset = (hw->bus.lan_id +
2963 (hw->bus.instance_id << 1)) << 12;
2964 else
2965 slice_offset = hw->bus.lan_id << 12;
2966
2967 /* Configure CS4227/CS4223 LINE side to proper mode. */
2968 reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB + slice_offset;
2969
2970 ret_val = hw->phy.ops.read_reg(hw, reg_slice,
2971 IXGBE_MDIO_ZERO_DEV_TYPE, ®_phy_ext);
2972
2973 if (ret_val != IXGBE_SUCCESS)
2974 return ret_val;
2975
2976 reg_phy_ext &= ~((IXGBE_CS4227_EDC_MODE_CX1 << 1) |
2977 (IXGBE_CS4227_EDC_MODE_SR << 1));
2978
2979 if (setup_linear)
2980 reg_phy_ext |= (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1;
2981 else
2982 reg_phy_ext |= (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1;
2983 ret_val = hw->phy.ops.write_reg(hw, reg_slice,
2984 IXGBE_MDIO_ZERO_DEV_TYPE, reg_phy_ext);
2985
2986 /* Flush previous write with a read */
2987 ret_val = hw->phy.ops.read_reg(hw, reg_slice,
2988 IXGBE_MDIO_ZERO_DEV_TYPE, ®_phy_ext);
2989 }
2990 return ret_val;
2991 }
2992
2993 /**
2994 * ixgbe_setup_ixfi_x550em_x - MAC specific iXFI configuration
2995 * @hw: pointer to hardware structure
2996 *
2997 * iXfI configuration needed for ixgbe_mac_X550EM_x devices.
2998 **/
2999 static s32 ixgbe_setup_ixfi_x550em_x(struct ixgbe_hw *hw)
3000 {
3001 struct ixgbe_mac_info *mac = &hw->mac;
3002 s32 status;
3003 u32 reg_val;
3004
3005 /* Disable training protocol FSM. */
3006 status = mac->ops.read_iosf_sb_reg(hw,
3007 IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
3008 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3009 if (status != IXGBE_SUCCESS)
3010 return status;
3011 reg_val |= IXGBE_KRM_RX_TRN_LINKUP_CTRL_CONV_WO_PROTOCOL;
3012 status = mac->ops.write_iosf_sb_reg(hw,
3013 IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
3014 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3015 if (status != IXGBE_SUCCESS)
3016 return status;
3017
3018 /* Disable Flex from training TXFFE. */
3019 status = mac->ops.read_iosf_sb_reg(hw,
3020 IXGBE_KRM_DSP_TXFFE_STATE_4(hw->bus.lan_id),
3021 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3022 if (status != IXGBE_SUCCESS)
3023 return status;
3024 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN;
3025 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN;
3026 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN;
3027 status = mac->ops.write_iosf_sb_reg(hw,
3028 IXGBE_KRM_DSP_TXFFE_STATE_4(hw->bus.lan_id),
3029 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3030 if (status != IXGBE_SUCCESS)
3031 return status;
3032 status = mac->ops.read_iosf_sb_reg(hw,
3033 IXGBE_KRM_DSP_TXFFE_STATE_5(hw->bus.lan_id),
3034 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3035 if (status != IXGBE_SUCCESS)
3036 return status;
3037 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN;
3038 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN;
3039 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN;
3040 status = mac->ops.write_iosf_sb_reg(hw,
3041 IXGBE_KRM_DSP_TXFFE_STATE_5(hw->bus.lan_id),
3042 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3043 if (status != IXGBE_SUCCESS)
3044 return status;
3045
3046 /* Enable override for coefficients. */
3047 status = mac->ops.read_iosf_sb_reg(hw,
3048 IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id),
3049 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3050 if (status != IXGBE_SUCCESS)
3051 return status;
3052 reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_OVRRD_EN;
3053 reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN;
3054 reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CPLUS1_OVRRD_EN;
3055 reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CMINUS1_OVRRD_EN;
3056 status = mac->ops.write_iosf_sb_reg(hw,
3057 IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id),
3058 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3059 return status;
3060 }
3061
3062 /**
3063 * ixgbe_setup_ixfi_x550em - Configure the KR PHY for iXFI mode.
3064 * @hw: pointer to hardware structure
3065 * @speed: the link speed to force
3066 *
3067 * Configures the integrated KR PHY to use iXFI mode. Used to connect an
3068 * internal and external PHY at a specific speed, without autonegotiation.
3069 **/
3070 static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
3071 {
3072 struct ixgbe_mac_info *mac = &hw->mac;
3073 s32 status;
3074 u32 reg_val;
3075
3076 /* iXFI is only supported with X552 */
3077 if (mac->type != ixgbe_mac_X550EM_x)
3078 return IXGBE_ERR_LINK_SETUP;
3079
3080 /* Disable AN and force speed to 10G Serial. */
3081 status = mac->ops.read_iosf_sb_reg(hw,
3082 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
3083 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3084 if (status != IXGBE_SUCCESS)
3085 return status;
3086
3087 reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
3088 reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
3089
3090 /* Select forced link speed for internal PHY. */
3091 switch (*speed) {
3092 case IXGBE_LINK_SPEED_10GB_FULL:
3093 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G;
3094 break;
3095 case IXGBE_LINK_SPEED_1GB_FULL:
3096 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G;
3097 break;
3098 default:
3099 /* Other link speeds are not supported by internal KR PHY. */
3100 return IXGBE_ERR_LINK_SETUP;
3101 }
3102
3103 status = mac->ops.write_iosf_sb_reg(hw,
3104 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
3105 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3106 if (status != IXGBE_SUCCESS)
3107 return status;
3108
3109 /* Additional configuration needed for x550em_x */
3110 if (hw->mac.type == ixgbe_mac_X550EM_x) {
3111 status = ixgbe_setup_ixfi_x550em_x(hw);
3112 if (status != IXGBE_SUCCESS)
3113 return status;
3114 }
3115
3116 /* Toggle port SW reset by AN reset. */
3117 status = ixgbe_restart_an_internal_phy_x550em(hw);
3118
3119 return status;
3120 }
3121
3122 /**
3123 * ixgbe_ext_phy_t_x550em_get_link - Get ext phy link status
3124 * @hw: address of hardware structure
3125 * @link_up: address of boolean to indicate link status
3126 *
3127 * Returns error code if unable to get link status.
3128 */
3129 static s32 ixgbe_ext_phy_t_x550em_get_link(struct ixgbe_hw *hw, bool *link_up)
3130 {
3131 u32 ret;
3132 u16 autoneg_status;
3133
3134 *link_up = FALSE;
3135
3136 /* read this twice back to back to indicate current status */
3137 ret = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
3138 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
3139 &autoneg_status);
3140 if (ret != IXGBE_SUCCESS)
3141 return ret;
3142
3143 ret = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
3144 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
3145 &autoneg_status);
3146 if (ret != IXGBE_SUCCESS)
3147 return ret;
3148
3149 *link_up = !!(autoneg_status & IXGBE_MDIO_AUTO_NEG_LINK_STATUS);
3150
3151 return IXGBE_SUCCESS;
3152 }
3153
3154 /**
3155 * ixgbe_setup_internal_phy_t_x550em - Configure KR PHY to X557 link
3156 * @hw: point to hardware structure
3157 *
3158 * Configures the link between the integrated KR PHY and the external X557 PHY
3159 * The driver will call this function when it gets a link status change
3160 * interrupt from the X557 PHY. This function configures the link speed
3161 * between the PHYs to match the link speed of the BASE-T link.
3162 *
3163 * A return of a non-zero value indicates an error, and the base driver should
3164 * not report link up.
3165 */
3166 s32 ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw)
3167 {
3168 ixgbe_link_speed force_speed;
3169 bool link_up;
3170 u32 status;
3171 u16 speed;
3172
3173 if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper)
3174 return IXGBE_ERR_CONFIG;
3175
3176 if (hw->mac.type == ixgbe_mac_X550EM_x &&
3177 !(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) {
3178 /* If link is down, there is no setup necessary so return */
3179 status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up);
3180 if (status != IXGBE_SUCCESS)
3181 return status;
3182
3183 if (!link_up)
3184 return IXGBE_SUCCESS;
3185
3186 status = hw->phy.ops.read_reg(hw,
3187 IXGBE_MDIO_AUTO_NEG_VENDOR_STAT,
3188 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
3189 &speed);
3190 if (status != IXGBE_SUCCESS)
3191 return status;
3192
3193 /* If link is still down - no setup is required so return */
3194 status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up);
3195 if (status != IXGBE_SUCCESS)
3196 return status;
3197 if (!link_up)
3198 return IXGBE_SUCCESS;
3199
3200 /* clear everything but the speed and duplex bits */
3201 speed &= IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_MASK;
3202
3203 switch (speed) {
3204 case IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_FULL:
3205 force_speed = IXGBE_LINK_SPEED_10GB_FULL;
3206 break;
3207 case IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB_FULL:
3208 force_speed = IXGBE_LINK_SPEED_1GB_FULL;
3209 break;
3210 default:
3211 /* Internal PHY does not support anything else */
3212 return IXGBE_ERR_INVALID_LINK_SETTINGS;
3213 }
3214
3215 return ixgbe_setup_ixfi_x550em(hw, &force_speed);
3216 } else {
3217 speed = IXGBE_LINK_SPEED_10GB_FULL |
3218 IXGBE_LINK_SPEED_1GB_FULL;
3219 return ixgbe_setup_kr_speed_x550em(hw, speed);
3220 }
3221 }
3222
3223 /**
3224 * ixgbe_setup_phy_loopback_x550em - Configure the KR PHY for loopback.
3225 * @hw: pointer to hardware structure
3226 *
3227 * Configures the integrated KR PHY to use internal loopback mode.
3228 **/
3229 s32 ixgbe_setup_phy_loopback_x550em(struct ixgbe_hw *hw)
3230 {
3231 s32 status;
3232 u32 reg_val;
3233
3234 /* Disable AN and force speed to 10G Serial. */
3235 status = hw->mac.ops.read_iosf_sb_reg(hw,
3236 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
3237 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3238 if (status != IXGBE_SUCCESS)
3239 return status;
3240 reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
3241 reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
3242 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G;
3243 status = hw->mac.ops.write_iosf_sb_reg(hw,
3244 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
3245 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3246 if (status != IXGBE_SUCCESS)
3247 return status;
3248
3249 /* Set near-end loopback clocks. */
3250 status = hw->mac.ops.read_iosf_sb_reg(hw,
3251 IXGBE_KRM_PORT_CAR_GEN_CTRL(hw->bus.lan_id),
3252 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3253 if (status != IXGBE_SUCCESS)
3254 return status;
3255 reg_val |= IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_32B;
3256 reg_val |= IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_KRPCS;
3257 status = hw->mac.ops.write_iosf_sb_reg(hw,
3258 IXGBE_KRM_PORT_CAR_GEN_CTRL(hw->bus.lan_id),
3259 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3260 if (status != IXGBE_SUCCESS)
3261 return status;
3262
3263 /* Set loopback enable. */
3264 status = hw->mac.ops.read_iosf_sb_reg(hw,
3265 IXGBE_KRM_PMD_DFX_BURNIN(hw->bus.lan_id),
3266 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3267 if (status != IXGBE_SUCCESS)
3268 return status;
3269 reg_val |= IXGBE_KRM_PMD_DFX_BURNIN_TX_RX_KR_LB_MASK;
3270 status = hw->mac.ops.write_iosf_sb_reg(hw,
3271 IXGBE_KRM_PMD_DFX_BURNIN(hw->bus.lan_id),
3272 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3273 if (status != IXGBE_SUCCESS)
3274 return status;
3275
3276 /* Training bypass. */
3277 status = hw->mac.ops.read_iosf_sb_reg(hw,
3278 IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
3279 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3280 if (status != IXGBE_SUCCESS)
3281 return status;
3282 reg_val |= IXGBE_KRM_RX_TRN_LINKUP_CTRL_PROTOCOL_BYPASS;
3283 status = hw->mac.ops.write_iosf_sb_reg(hw,
3284 IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
3285 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3286
3287 return status;
3288 }
3289
3290 /**
3291 * ixgbe_read_ee_hostif_X550 - Read EEPROM word using a host interface command
3292 * assuming that the semaphore is already obtained.
3293 * @hw: pointer to hardware structure
3294 * @offset: offset of word in the EEPROM to read
3295 * @data: word read from the EEPROM
3296 *
3297 * Reads a 16 bit word from the EEPROM using the hostif.
3298 **/
3299 s32 ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 *data)
3300 {
3301 const u32 mask = IXGBE_GSSR_SW_MNG_SM | IXGBE_GSSR_EEP_SM;
3302 struct ixgbe_hic_read_shadow_ram buffer;
3303 s32 status;
3304
3305 DEBUGFUNC("ixgbe_read_ee_hostif_X550");
3306 buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD;
3307 buffer.hdr.req.buf_lenh = 0;
3308 buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN;
3309 buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
3310
3311 /* convert offset from words to bytes */
3312 buffer.address = IXGBE_CPU_TO_BE32(offset * 2);
3313 /* one word */
3314 buffer.length = IXGBE_CPU_TO_BE16(sizeof(u16));
3315 buffer.pad2 = 0;
3316 buffer.pad3 = 0;
3317
3318 status = hw->mac.ops.acquire_swfw_sync(hw, mask);
3319 if (status)
3320 return status;
3321
3322 status = ixgbe_hic_unlocked(hw, (u32 *)&buffer, sizeof(buffer),
3323 IXGBE_HI_COMMAND_TIMEOUT);
3324 if (!status) {
3325 *data = (u16)IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG,
3326 FW_NVM_DATA_OFFSET);
3327 }
3328
3329 hw->mac.ops.release_swfw_sync(hw, mask);
3330 return status;
3331 }
3332
3333 /**
3334 * ixgbe_read_ee_hostif_buffer_X550- Read EEPROM word(s) using hostif
3335 * @hw: pointer to hardware structure
3336 * @offset: offset of word in the EEPROM to read
3337 * @words: number of words
3338 * @data: word(s) read from the EEPROM
3339 *
3340 * Reads a 16 bit word(s) from the EEPROM using the hostif.
3341 **/
3342 s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
3343 u16 offset, u16 words, u16 *data)
3344 {
3345 const u32 mask = IXGBE_GSSR_SW_MNG_SM | IXGBE_GSSR_EEP_SM;
3346 struct ixgbe_hic_read_shadow_ram buffer;
3347 u32 current_word = 0;
3348 u16 words_to_read;
3349 s32 status;
3350 u32 i;
3351
3352 DEBUGFUNC("ixgbe_read_ee_hostif_buffer_X550");
3353
3354 /* Take semaphore for the entire operation. */
3355 status = hw->mac.ops.acquire_swfw_sync(hw, mask);
3356 if (status) {
3357 DEBUGOUT("EEPROM read buffer - semaphore failed\n");
3358 return status;
3359 }
3360
3361 while (words) {
3362 if (words > FW_MAX_READ_BUFFER_SIZE / 2)
3363 words_to_read = FW_MAX_READ_BUFFER_SIZE / 2;
3364 else
3365 words_to_read = words;
3366
3367 buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD;
3368 buffer.hdr.req.buf_lenh = 0;
3369 buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN;
3370 buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
3371
3372 /* convert offset from words to bytes */
3373 buffer.address = IXGBE_CPU_TO_BE32((offset + current_word) * 2);
3374 buffer.length = IXGBE_CPU_TO_BE16(words_to_read * 2);
3375 buffer.pad2 = 0;
3376 buffer.pad3 = 0;
3377
3378 status = ixgbe_hic_unlocked(hw, (u32 *)&buffer, sizeof(buffer),
3379 IXGBE_HI_COMMAND_TIMEOUT);
3380
3381 if (status) {
3382 DEBUGOUT("Host interface command failed\n");
3383 goto out;
3384 }
3385
3386 for (i = 0; i < words_to_read; i++) {
3387 u32 reg = IXGBE_FLEX_MNG + (FW_NVM_DATA_OFFSET << 2) +
3388 2 * i;
3389 u32 value = IXGBE_READ_REG(hw, reg);
3390
3391 data[current_word] = (u16)(value & 0xffff);
3392 current_word++;
3393 i++;
3394 if (i < words_to_read) {
3395 value >>= 16;
3396 data[current_word] = (u16)(value & 0xffff);
3397 current_word++;
3398 }
3399 }
3400 words -= words_to_read;
3401 }
3402
3403 out:
3404 hw->mac.ops.release_swfw_sync(hw, mask);
3405 return status;
3406 }
3407
3408 /**
3409 * ixgbe_write_ee_hostif_X550 - Write EEPROM word using hostif
3410 * @hw: pointer to hardware structure
3411 * @offset: offset of word in the EEPROM to write
3412 * @data: word write to the EEPROM
3413 *
3414 * Write a 16 bit word to the EEPROM using the hostif.
3415 **/
3416 s32 ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset,
3417 u16 data)
3418 {
3419 s32 status;
3420 struct ixgbe_hic_write_shadow_ram buffer;
3421
3422 DEBUGFUNC("ixgbe_write_ee_hostif_data_X550");
3423
3424 buffer.hdr.req.cmd = FW_WRITE_SHADOW_RAM_CMD;
3425 buffer.hdr.req.buf_lenh = 0;
3426 buffer.hdr.req.buf_lenl = FW_WRITE_SHADOW_RAM_LEN;
3427 buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
3428
3429 /* one word */
3430 buffer.length = IXGBE_CPU_TO_BE16(sizeof(u16));
3431 buffer.data = data;
3432 buffer.address = IXGBE_CPU_TO_BE32(offset * 2);
3433
3434 status = ixgbe_host_interface_command(hw, (u32 *)&buffer,
3435 sizeof(buffer),
3436 IXGBE_HI_COMMAND_TIMEOUT, FALSE);
3437
3438 return status;
3439 }
3440
3441 /**
3442 * ixgbe_write_ee_hostif_X550 - Write EEPROM word using hostif
3443 * @hw: pointer to hardware structure
3444 * @offset: offset of word in the EEPROM to write
3445 * @data: word write to the EEPROM
3446 *
3447 * Write a 16 bit word to the EEPROM using the hostif.
3448 **/
3449 s32 ixgbe_write_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset,
3450 u16 data)
3451 {
3452 s32 status = IXGBE_SUCCESS;
3453
3454 DEBUGFUNC("ixgbe_write_ee_hostif_X550");
3455
3456 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
3457 IXGBE_SUCCESS) {
3458 status = ixgbe_write_ee_hostif_data_X550(hw, offset, data);
3459 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
3460 } else {
3461 DEBUGOUT("write ee hostif failed to get semaphore");
3462 status = IXGBE_ERR_SWFW_SYNC;
3463 }
3464
3465 return status;
3466 }
3467
3468 /**
3469 * ixgbe_write_ee_hostif_buffer_X550 - Write EEPROM word(s) using hostif
3470 * @hw: pointer to hardware structure
3471 * @offset: offset of word in the EEPROM to write
3472 * @words: number of words
3473 * @data: word(s) write to the EEPROM
3474 *
3475 * Write a 16 bit word(s) to the EEPROM using the hostif.
3476 **/
3477 s32 ixgbe_write_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
3478 u16 offset, u16 words, u16 *data)
3479 {
3480 s32 status = IXGBE_SUCCESS;
3481 u32 i = 0;
3482
3483 DEBUGFUNC("ixgbe_write_ee_hostif_buffer_X550");
3484
3485 /* Take semaphore for the entire operation. */
3486 status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
3487 if (status != IXGBE_SUCCESS) {
3488 DEBUGOUT("EEPROM write buffer - semaphore failed\n");
3489 goto out;
3490 }
3491
3492 for (i = 0; i < words; i++) {
3493 status = ixgbe_write_ee_hostif_data_X550(hw, offset + i,
3494 data[i]);
3495
3496 if (status != IXGBE_SUCCESS) {
3497 DEBUGOUT("Eeprom buffered write failed\n");
3498 break;
3499 }
3500 }
3501
3502 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
3503 out:
3504
3505 return status;
3506 }
3507
3508 /**
3509 * ixgbe_checksum_ptr_x550 - Checksum one pointer region
3510 * @hw: pointer to hardware structure
3511 * @ptr: pointer offset in eeprom
3512 * @size: size of section pointed by ptr, if 0 first word will be used as size
3513 * @csum: address of checksum to update
3514 * @buffer: pointer to buffer containing calculated checksum
3515 * @buffer_size: size of buffer
3516 *
3517 * Returns error status for any failure
3518 */
3519 static s32 ixgbe_checksum_ptr_x550(struct ixgbe_hw *hw, u16 ptr,
3520 u16 size, u16 *csum, u16 *buffer,
3521 u32 buffer_size)
3522 {
3523 u16 buf[256];
3524 s32 status;
3525 u16 length, bufsz, i, start;
3526 u16 *local_buffer;
3527
3528 bufsz = sizeof(buf) / sizeof(buf[0]);
3529
3530 /* Read a chunk at the pointer location */
3531 if (!buffer) {
3532 status = ixgbe_read_ee_hostif_buffer_X550(hw, ptr, bufsz, buf);
3533 if (status) {
3534 DEBUGOUT("Failed to read EEPROM image\n");
3535 return status;
3536 }
3537 local_buffer = buf;
3538 } else {
3539 if (buffer_size < ptr)
3540 return IXGBE_ERR_PARAM;
3541 local_buffer = &buffer[ptr];
3542 }
3543
3544 if (size) {
3545 start = 0;
3546 length = size;
3547 } else {
3548 start = 1;
3549 length = local_buffer[0];
3550
3551 /* Skip pointer section if length is invalid. */
3552 if (length == 0xFFFF || length == 0 ||
3553 (ptr + length) >= hw->eeprom.word_size)
3554 return IXGBE_SUCCESS;
3555 }
3556
3557 if (buffer && ((u32)start + (u32)length > buffer_size))
3558 return IXGBE_ERR_PARAM;
3559
3560 for (i = start; length; i++, length--) {
3561 if (i == bufsz && !buffer) {
3562 ptr += bufsz;
3563 i = 0;
3564 if (length < bufsz)
3565 bufsz = length;
3566
3567 /* Read a chunk at the pointer location */
3568 status = ixgbe_read_ee_hostif_buffer_X550(hw, ptr,
3569 bufsz, buf);
3570 if (status) {
3571 DEBUGOUT("Failed to read EEPROM image\n");
3572 return status;
3573 }
3574 }
3575 *csum += local_buffer[i];
3576 }
3577 return IXGBE_SUCCESS;
3578 }
3579
3580 /**
3581 * ixgbe_calc_checksum_X550 - Calculates and returns the checksum
3582 * @hw: pointer to hardware structure
3583 * @buffer: pointer to buffer containing calculated checksum
3584 * @buffer_size: size of buffer
3585 *
3586 * Returns a negative error code on error, or the 16-bit checksum
3587 **/
3588 s32 ixgbe_calc_checksum_X550(struct ixgbe_hw *hw, u16 *buffer, u32 buffer_size)
3589 {
3590 u16 eeprom_ptrs[IXGBE_EEPROM_LAST_WORD + 1];
3591 u16 *local_buffer;
3592 s32 status;
3593 u16 checksum = 0;
3594 u16 pointer, i, size;
3595
3596 DEBUGFUNC("ixgbe_calc_eeprom_checksum_X550");
3597
3598 hw->eeprom.ops.init_params(hw);
3599
3600 if (!buffer) {
3601 /* Read pointer area */
3602 status = ixgbe_read_ee_hostif_buffer_X550(hw, 0,
3603 IXGBE_EEPROM_LAST_WORD + 1,
3604 eeprom_ptrs);
3605 if (status) {
3606 DEBUGOUT("Failed to read EEPROM image\n");
3607 return status;
3608 }
3609 local_buffer = eeprom_ptrs;
3610 } else {
3611 if (buffer_size < IXGBE_EEPROM_LAST_WORD)
3612 return IXGBE_ERR_PARAM;
3613 local_buffer = buffer;
3614 }
3615
3616 /*
3617 * For X550 hardware include 0x0-0x41 in the checksum, skip the
3618 * checksum word itself
3619 */
3620 for (i = 0; i <= IXGBE_EEPROM_LAST_WORD; i++)
3621 if (i != IXGBE_EEPROM_CHECKSUM)
3622 checksum += local_buffer[i];
3623
3624 /*
3625 * Include all data from pointers 0x3, 0x6-0xE. This excludes the
3626 * FW, PHY module, and PCIe Expansion/Option ROM pointers.
3627 */
3628 for (i = IXGBE_PCIE_ANALOG_PTR_X550; i < IXGBE_FW_PTR; i++) {
3629 if (i == IXGBE_PHY_PTR || i == IXGBE_OPTION_ROM_PTR)
3630 continue;
3631
3632 pointer = local_buffer[i];
3633
3634 /* Skip pointer section if the pointer is invalid. */
3635 if (pointer == 0xFFFF || pointer == 0 ||
3636 pointer >= hw->eeprom.word_size)
3637 continue;
3638
3639 switch (i) {
3640 case IXGBE_PCIE_GENERAL_PTR:
3641 size = IXGBE_IXGBE_PCIE_GENERAL_SIZE;
3642 break;
3643 case IXGBE_PCIE_CONFIG0_PTR:
3644 case IXGBE_PCIE_CONFIG1_PTR:
3645 size = IXGBE_PCIE_CONFIG_SIZE;
3646 break;
3647 default:
3648 size = 0;
3649 break;
3650 }
3651
3652 status = ixgbe_checksum_ptr_x550(hw, pointer, size, &checksum,
3653 buffer, buffer_size);
3654 if (status)
3655 return status;
3656 }
3657
3658 checksum = (u16)IXGBE_EEPROM_SUM - checksum;
3659
3660 return (s32)checksum;
3661 }
3662
3663 /**
3664 * ixgbe_calc_eeprom_checksum_X550 - Calculates and returns the checksum
3665 * @hw: pointer to hardware structure
3666 *
3667 * Returns a negative error code on error, or the 16-bit checksum
3668 **/
3669 s32 ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw *hw)
3670 {
3671 return ixgbe_calc_checksum_X550(hw, NULL, 0);
3672 }
3673
3674 /**
3675 * ixgbe_validate_eeprom_checksum_X550 - Validate EEPROM checksum
3676 * @hw: pointer to hardware structure
3677 * @checksum_val: calculated checksum
3678 *
3679 * Performs checksum calculation and validates the EEPROM checksum. If the
3680 * caller does not need checksum_val, the value can be NULL.
3681 **/
3682 s32 ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw *hw, u16 *checksum_val)
3683 {
3684 s32 status;
3685 u16 checksum;
3686 u16 read_checksum = 0;
3687
3688 DEBUGFUNC("ixgbe_validate_eeprom_checksum_X550");
3689
3690 /* Read the first word from the EEPROM. If this times out or fails, do
3691 * not continue or we could be in for a very long wait while every
3692 * EEPROM read fails
3693 */
3694 status = hw->eeprom.ops.read(hw, 0, &checksum);
3695 if (status) {
3696 DEBUGOUT("EEPROM read failed\n");
3697 return status;
3698 }
3699
3700 status = hw->eeprom.ops.calc_checksum(hw);
3701 if (status < 0)
3702 return status;
3703
3704 checksum = (u16)(status & 0xffff);
3705
3706 status = ixgbe_read_ee_hostif_X550(hw, IXGBE_EEPROM_CHECKSUM,
3707 &read_checksum);
3708 if (status)
3709 return status;
3710
3711 /* Verify read checksum from EEPROM is the same as
3712 * calculated checksum
3713 */
3714 if (read_checksum != checksum) {
3715 status = IXGBE_ERR_EEPROM_CHECKSUM;
3716 ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE,
3717 "Invalid EEPROM checksum");
3718 }
3719
3720 /* If the user cares, return the calculated checksum */
3721 if (checksum_val)
3722 *checksum_val = checksum;
3723
3724 return status;
3725 }
3726
3727 /**
3728 * ixgbe_update_eeprom_checksum_X550 - Updates the EEPROM checksum and flash
3729 * @hw: pointer to hardware structure
3730 *
3731 * After writing EEPROM to shadow RAM using EEWR register, software calculates
3732 * checksum and updates the EEPROM and instructs the hardware to update
3733 * the flash.
3734 **/
3735 s32 ixgbe_update_eeprom_checksum_X550(struct ixgbe_hw *hw)
3736 {
3737 s32 status;
3738 u16 checksum = 0;
3739
3740 DEBUGFUNC("ixgbe_update_eeprom_checksum_X550");
3741
3742 /* Read the first word from the EEPROM. If this times out or fails, do
3743 * not continue or we could be in for a very long wait while every
3744 * EEPROM read fails
3745 */
3746 status = ixgbe_read_ee_hostif_X550(hw, 0, &checksum);
3747 if (status) {
3748 DEBUGOUT("EEPROM read failed\n");
3749 return status;
3750 }
3751
3752 status = ixgbe_calc_eeprom_checksum_X550(hw);
3753 if (status < 0)
3754 return status;
3755
3756 checksum = (u16)(status & 0xffff);
3757
3758 status = ixgbe_write_ee_hostif_X550(hw, IXGBE_EEPROM_CHECKSUM,
3759 checksum);
3760 if (status)
3761 return status;
3762
3763 status = ixgbe_update_flash_X550(hw);
3764
3765 return status;
3766 }
3767
3768 /**
3769 * ixgbe_update_flash_X550 - Instruct HW to copy EEPROM to Flash device
3770 * @hw: pointer to hardware structure
3771 *
3772 * Issue a shadow RAM dump to FW to copy EEPROM from shadow RAM to the flash.
3773 **/
3774 s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw)
3775 {
3776 s32 status = IXGBE_SUCCESS;
3777 union ixgbe_hic_hdr2 buffer;
3778
3779 DEBUGFUNC("ixgbe_update_flash_X550");
3780
3781 buffer.req.cmd = FW_SHADOW_RAM_DUMP_CMD;
3782 buffer.req.buf_lenh = 0;
3783 buffer.req.buf_lenl = FW_SHADOW_RAM_DUMP_LEN;
3784 buffer.req.checksum = FW_DEFAULT_CHECKSUM;
3785
3786 status = ixgbe_host_interface_command(hw, (u32 *)&buffer,
3787 sizeof(buffer),
3788 IXGBE_HI_COMMAND_TIMEOUT, FALSE);
3789
3790 return status;
3791 }
3792
3793 /**
3794 * ixgbe_get_supported_physical_layer_X550em - Returns physical layer type
3795 * @hw: pointer to hardware structure
3796 *
3797 * Determines physical layer capabilities of the current configuration.
3798 **/
3799 u64 ixgbe_get_supported_physical_layer_X550em(struct ixgbe_hw *hw)
3800 {
3801 u64 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
3802 u16 ext_ability = 0;
3803
3804 DEBUGFUNC("ixgbe_get_supported_physical_layer_X550em");
3805
3806 hw->phy.ops.identify(hw);
3807
3808 switch (hw->phy.type) {
3809 case ixgbe_phy_x550em_kr:
3810 if (hw->mac.type == ixgbe_mac_X550EM_a) {
3811 if (hw->phy.nw_mng_if_sel &
3812 IXGBE_NW_MNG_IF_SEL_PHY_SPEED_2_5G) {
3813 physical_layer =
3814 IXGBE_PHYSICAL_LAYER_2500BASE_KX;
3815 break;
3816 } else if (hw->device_id ==
3817 IXGBE_DEV_ID_X550EM_A_KR_L) {
3818 physical_layer =
3819 IXGBE_PHYSICAL_LAYER_1000BASE_KX;
3820 break;
3821 }
3822 }
3823 /* fall through */
3824 case ixgbe_phy_x550em_xfi:
3825 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR |
3826 IXGBE_PHYSICAL_LAYER_1000BASE_KX;
3827 break;
3828 case ixgbe_phy_x550em_kx4:
3829 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4 |
3830 IXGBE_PHYSICAL_LAYER_1000BASE_KX;
3831 break;
3832 case ixgbe_phy_x550em_ext_t:
3833 hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
3834 IXGBE_MDIO_PMA_PMD_DEV_TYPE,
3835 &ext_ability);
3836 if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
3837 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
3838 if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
3839 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
3840 break;
3841 case ixgbe_phy_fw:
3842 if (hw->phy.speeds_supported & IXGBE_LINK_SPEED_1GB_FULL)
3843 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
3844 if (hw->phy.speeds_supported & IXGBE_LINK_SPEED_100_FULL)
3845 physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
3846 if (hw->phy.speeds_supported & IXGBE_LINK_SPEED_10_FULL)
3847 physical_layer |= IXGBE_PHYSICAL_LAYER_10BASE_T;
3848 break;
3849 case ixgbe_phy_sgmii:
3850 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX;
3851 break;
3852 case ixgbe_phy_ext_1g_t:
3853 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_T;
3854 break;
3855 default:
3856 break;
3857 }
3858
3859 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
3860 physical_layer = ixgbe_get_supported_phy_sfp_layer_generic(hw);
3861
3862 return physical_layer;
3863 }
3864
3865 /**
3866 * ixgbe_get_bus_info_x550em - Set PCI bus info
3867 * @hw: pointer to hardware structure
3868 *
3869 * Sets bus link width and speed to unknown because X550em is
3870 * not a PCI device.
3871 **/
3872 s32 ixgbe_get_bus_info_X550em(struct ixgbe_hw *hw)
3873 {
3874
3875 DEBUGFUNC("ixgbe_get_bus_info_x550em");
3876
3877 hw->bus.width = ixgbe_bus_width_unknown;
3878 hw->bus.speed = ixgbe_bus_speed_unknown;
3879
3880 hw->mac.ops.set_lan_id(hw);
3881
3882 return IXGBE_SUCCESS;
3883 }
3884
3885 /**
3886 * ixgbe_disable_rx_x550 - Disable RX unit
3887 * @hw: pointer to hardware structure
3888 *
3889 * Enables the Rx DMA unit for x550
3890 **/
3891 void ixgbe_disable_rx_x550(struct ixgbe_hw *hw)
3892 {
3893 u32 rxctrl, pfdtxgswc;
3894 s32 status;
3895 struct ixgbe_hic_disable_rxen fw_cmd;
3896
3897 DEBUGFUNC("ixgbe_disable_rx_dma_x550");
3898
3899 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3900 if (rxctrl & IXGBE_RXCTRL_RXEN) {
3901 pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
3902 if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) {
3903 pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN;
3904 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
3905 hw->mac.set_lben = TRUE;
3906 } else {
3907 hw->mac.set_lben = FALSE;
3908 }
3909
3910 fw_cmd.hdr.cmd = FW_DISABLE_RXEN_CMD;
3911 fw_cmd.hdr.buf_len = FW_DISABLE_RXEN_LEN;
3912 fw_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
3913 fw_cmd.port_number = (u8)hw->bus.lan_id;
3914
3915 status = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
3916 sizeof(struct ixgbe_hic_disable_rxen),
3917 IXGBE_HI_COMMAND_TIMEOUT, TRUE);
3918
3919 /* If we fail - disable RX using register write */
3920 if (status) {
3921 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3922 if (rxctrl & IXGBE_RXCTRL_RXEN) {
3923 rxctrl &= ~IXGBE_RXCTRL_RXEN;
3924 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl);
3925 }
3926 }
3927 }
3928 }
3929
3930 /**
3931 * ixgbe_enter_lplu_x550em - Transition to low power states
3932 * @hw: pointer to hardware structure
3933 *
3934 * Configures Low Power Link Up on transition to low power states
3935 * (from D0 to non-D0). Link is required to enter LPLU so avoid resetting the
3936 * X557 PHY immediately prior to entering LPLU.
3937 **/
3938 s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw)
3939 {
3940 u16 an_10g_cntl_reg, autoneg_reg, speed;
3941 s32 status;
3942 ixgbe_link_speed lcd_speed;
3943 u32 save_autoneg;
3944 bool link_up;
3945
3946 /* SW LPLU not required on later HW revisions. */
3947 if ((hw->mac.type == ixgbe_mac_X550EM_x) &&
3948 (IXGBE_FUSES0_REV_MASK &
3949 IXGBE_READ_REG(hw, IXGBE_FUSES0_GROUP(0))))
3950 return IXGBE_SUCCESS;
3951
3952 /* If blocked by MNG FW, then don't restart AN */
3953 if (ixgbe_check_reset_blocked(hw))
3954 return IXGBE_SUCCESS;
3955
3956 status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up);
3957 if (status != IXGBE_SUCCESS)
3958 return status;
3959
3960 status = ixgbe_read_eeprom(hw, NVM_INIT_CTRL_3, &hw->eeprom.ctrl_word_3);
3961
3962 if (status != IXGBE_SUCCESS)
3963 return status;
3964
3965 /* If link is down, LPLU disabled in NVM, WoL disabled, or manageability
3966 * disabled, then force link down by entering low power mode.
3967 */
3968 if (!link_up || !(hw->eeprom.ctrl_word_3 & NVM_INIT_CTRL_3_LPLU) ||
3969 !(hw->wol_enabled || ixgbe_mng_present(hw)))
3970 return ixgbe_set_copper_phy_power(hw, FALSE);
3971
3972 /* Determine LCD */
3973 status = ixgbe_get_lcd_t_x550em(hw, &lcd_speed);
3974
3975 if (status != IXGBE_SUCCESS)
3976 return status;
3977
3978 /* If no valid LCD link speed, then force link down and exit. */
3979 if (lcd_speed == IXGBE_LINK_SPEED_UNKNOWN)
3980 return ixgbe_set_copper_phy_power(hw, FALSE);
3981
3982 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_STAT,
3983 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
3984 &speed);
3985
3986 if (status != IXGBE_SUCCESS)
3987 return status;
3988
3989 /* If no link now, speed is invalid so take link down */
3990 status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up);
3991 if (status != IXGBE_SUCCESS)
3992 return ixgbe_set_copper_phy_power(hw, FALSE);
3993
3994 /* clear everything but the speed bits */
3995 speed &= IXGBE_MDIO_AUTO_NEG_VEN_STAT_SPEED_MASK;
3996
3997 /* If current speed is already LCD, then exit. */
3998 if (((speed == IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB) &&
3999 (lcd_speed == IXGBE_LINK_SPEED_1GB_FULL)) ||
4000 ((speed == IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB) &&
4001 (lcd_speed == IXGBE_LINK_SPEED_10GB_FULL)))
4002 return status;
4003
4004 /* Clear AN completed indication */
4005 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM,
4006 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
4007 &autoneg_reg);
4008
4009 if (status != IXGBE_SUCCESS)
4010 return status;
4011
4012 status = hw->phy.ops.read_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG,
4013 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
4014 &an_10g_cntl_reg);
4015
4016 if (status != IXGBE_SUCCESS)
4017 return status;
4018
4019 status = hw->phy.ops.read_reg(hw,
4020 IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
4021 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
4022 &autoneg_reg);
4023
4024 if (status != IXGBE_SUCCESS)
4025 return status;
4026
4027 save_autoneg = hw->phy.autoneg_advertised;
4028
4029 /* Setup link at least common link speed */
4030 status = hw->mac.ops.setup_link(hw, lcd_speed, FALSE);
4031
4032 /* restore autoneg from before setting lplu speed */
4033 hw->phy.autoneg_advertised = save_autoneg;
4034
4035 return status;
4036 }
4037
4038 /**
4039 * ixgbe_get_lcd_x550em - Determine lowest common denominator
4040 * @hw: pointer to hardware structure
4041 * @lcd_speed: pointer to lowest common link speed
4042 *
4043 * Determine lowest common link speed with link partner.
4044 **/
4045 s32 ixgbe_get_lcd_t_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *lcd_speed)
4046 {
4047 u16 an_lp_status;
4048 s32 status;
4049 u16 word = hw->eeprom.ctrl_word_3;
4050
4051 *lcd_speed = IXGBE_LINK_SPEED_UNKNOWN;
4052
4053 status = hw->phy.ops.read_reg(hw, IXGBE_AUTO_NEG_LP_STATUS,
4054 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
4055 &an_lp_status);
4056
4057 if (status != IXGBE_SUCCESS)
4058 return status;
4059
4060 /* If link partner advertised 1G, return 1G */
4061 if (an_lp_status & IXGBE_AUTO_NEG_LP_1000BASE_CAP) {
4062 *lcd_speed = IXGBE_LINK_SPEED_1GB_FULL;
4063 return status;
4064 }
4065
4066 /* If 10G disabled for LPLU via NVM D10GMP, then return no valid LCD */
4067 if ((hw->bus.lan_id && (word & NVM_INIT_CTRL_3_D10GMP_PORT1)) ||
4068 (word & NVM_INIT_CTRL_3_D10GMP_PORT0))
4069 return status;
4070
4071 /* Link partner not capable of lower speeds, return 10G */
4072 *lcd_speed = IXGBE_LINK_SPEED_10GB_FULL;
4073 return status;
4074 }
4075
4076 /**
4077 * ixgbe_setup_fc_X550em - Set up flow control
4078 * @hw: pointer to hardware structure
4079 *
4080 * Called at init time to set up flow control.
4081 **/
4082 s32 ixgbe_setup_fc_X550em(struct ixgbe_hw *hw)
4083 {
4084 s32 ret_val = IXGBE_SUCCESS;
4085 u32 pause, asm_dir, reg_val;
4086
4087 DEBUGFUNC("ixgbe_setup_fc_X550em");
4088
4089 /* Validate the requested mode */
4090 if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
4091 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
4092 "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
4093 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
4094 goto out;
4095 }
4096
4097 /* 10gig parts do not have a word in the EEPROM to determine the
4098 * default flow control setting, so we explicitly set it to full.
4099 */
4100 if (hw->fc.requested_mode == ixgbe_fc_default)
4101 hw->fc.requested_mode = ixgbe_fc_full;
4102
4103 /* Determine PAUSE and ASM_DIR bits. */
4104 switch (hw->fc.requested_mode) {
4105 case ixgbe_fc_none:
4106 pause = 0;
4107 asm_dir = 0;
4108 break;
4109 case ixgbe_fc_tx_pause:
4110 pause = 0;
4111 asm_dir = 1;
4112 break;
4113 case ixgbe_fc_rx_pause:
4114 /* Rx Flow control is enabled and Tx Flow control is
4115 * disabled by software override. Since there really
4116 * isn't a way to advertise that we are capable of RX
4117 * Pause ONLY, we will advertise that we support both
4118 * symmetric and asymmetric Rx PAUSE, as such we fall
4119 * through to the fc_full statement. Later, we will
4120 * disable the adapter's ability to send PAUSE frames.
4121 */
4122 case ixgbe_fc_full:
4123 pause = 1;
4124 asm_dir = 1;
4125 break;
4126 default:
4127 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
4128 "Flow control param set incorrectly\n");
4129 ret_val = IXGBE_ERR_CONFIG;
4130 goto out;
4131 }
4132
4133 switch (hw->device_id) {
4134 case IXGBE_DEV_ID_X550EM_X_KR:
4135 case IXGBE_DEV_ID_X550EM_A_KR:
4136 case IXGBE_DEV_ID_X550EM_A_KR_L:
4137 ret_val = hw->mac.ops.read_iosf_sb_reg(hw,
4138 IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
4139 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
4140 if (ret_val != IXGBE_SUCCESS)
4141 goto out;
4142 reg_val &= ~(IXGBE_KRM_AN_CNTL_1_SYM_PAUSE |
4143 IXGBE_KRM_AN_CNTL_1_ASM_PAUSE);
4144 if (pause)
4145 reg_val |= IXGBE_KRM_AN_CNTL_1_SYM_PAUSE;
4146 if (asm_dir)
4147 reg_val |= IXGBE_KRM_AN_CNTL_1_ASM_PAUSE;
4148 ret_val = hw->mac.ops.write_iosf_sb_reg(hw,
4149 IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
4150 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
4151
4152 /* This device does not fully support AN. */
4153 hw->fc.disable_fc_autoneg = TRUE;
4154 break;
4155 case IXGBE_DEV_ID_X550EM_X_XFI:
4156 hw->fc.disable_fc_autoneg = TRUE;
4157 break;
4158 default:
4159 break;
4160 }
4161
4162 out:
4163 return ret_val;
4164 }
4165
4166 /**
4167 * ixgbe_fc_autoneg_backplane_x550em_a - Enable flow control IEEE clause 37
4168 * @hw: pointer to hardware structure
4169 *
4170 * Enable flow control according to IEEE clause 37.
4171 **/
4172 void ixgbe_fc_autoneg_backplane_x550em_a(struct ixgbe_hw *hw)
4173 {
4174 u32 link_s1, lp_an_page_low, an_cntl_1;
4175 s32 status = IXGBE_ERR_FC_NOT_NEGOTIATED;
4176 ixgbe_link_speed speed;
4177 bool link_up;
4178
4179 /* AN should have completed when the cable was plugged in.
4180 * Look for reasons to bail out. Bail out if:
4181 * - FC autoneg is disabled, or if
4182 * - link is not up.
4183 */
4184 if (hw->fc.disable_fc_autoneg) {
4185 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
4186 "Flow control autoneg is disabled");
4187 goto out;
4188 }
4189
4190 hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
4191 if (!link_up) {
4192 ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "The link is down");
4193 goto out;
4194 }
4195
4196 /* Check at auto-negotiation has completed */
4197 status = hw->mac.ops.read_iosf_sb_reg(hw,
4198 IXGBE_KRM_LINK_S1(hw->bus.lan_id),
4199 IXGBE_SB_IOSF_TARGET_KR_PHY, &link_s1);
4200
4201 if (status != IXGBE_SUCCESS ||
4202 (link_s1 & IXGBE_KRM_LINK_S1_MAC_AN_COMPLETE) == 0) {
4203 DEBUGOUT("Auto-Negotiation did not complete\n");
4204 status = IXGBE_ERR_FC_NOT_NEGOTIATED;
4205 goto out;
4206 }
4207
4208 /* Read the 10g AN autoc and LP ability registers and resolve
4209 * local flow control settings accordingly
4210 */
4211 status = hw->mac.ops.read_iosf_sb_reg(hw,
4212 IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
4213 IXGBE_SB_IOSF_TARGET_KR_PHY, &an_cntl_1);
4214
4215 if (status != IXGBE_SUCCESS) {
4216 DEBUGOUT("Auto-Negotiation did not complete\n");
4217 goto out;
4218 }
4219
4220 status = hw->mac.ops.read_iosf_sb_reg(hw,
4221 IXGBE_KRM_LP_BASE_PAGE_HIGH(hw->bus.lan_id),
4222 IXGBE_SB_IOSF_TARGET_KR_PHY, &lp_an_page_low);
4223
4224 if (status != IXGBE_SUCCESS) {
4225 DEBUGOUT("Auto-Negotiation did not complete\n");
4226 goto out;
4227 }
4228
4229 status = ixgbe_negotiate_fc(hw, an_cntl_1, lp_an_page_low,
4230 IXGBE_KRM_AN_CNTL_1_SYM_PAUSE,
4231 IXGBE_KRM_AN_CNTL_1_ASM_PAUSE,
4232 IXGBE_KRM_LP_BASE_PAGE_HIGH_SYM_PAUSE,
4233 IXGBE_KRM_LP_BASE_PAGE_HIGH_ASM_PAUSE);
4234
4235 out:
4236 if (status == IXGBE_SUCCESS) {
4237 hw->fc.fc_was_autonegged = TRUE;
4238 } else {
4239 hw->fc.fc_was_autonegged = FALSE;
4240 hw->fc.current_mode = hw->fc.requested_mode;
4241 }
4242 }
4243
4244 /**
4245 * ixgbe_fc_autoneg_fiber_x550em_a - passthrough FC settings
4246 * @hw: pointer to hardware structure
4247 *
4248 **/
4249 void ixgbe_fc_autoneg_fiber_x550em_a(struct ixgbe_hw *hw)
4250 {
4251 hw->fc.fc_was_autonegged = FALSE;
4252 hw->fc.current_mode = hw->fc.requested_mode;
4253 }
4254
4255 /**
4256 * ixgbe_fc_autoneg_sgmii_x550em_a - Enable flow control IEEE clause 37
4257 * @hw: pointer to hardware structure
4258 *
4259 * Enable flow control according to IEEE clause 37.
4260 **/
4261 void ixgbe_fc_autoneg_sgmii_x550em_a(struct ixgbe_hw *hw)
4262 {
4263 s32 status = IXGBE_ERR_FC_NOT_NEGOTIATED;
4264 u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 };
4265 ixgbe_link_speed speed;
4266 bool link_up;
4267
4268 /* AN should have completed when the cable was plugged in.
4269 * Look for reasons to bail out. Bail out if:
4270 * - FC autoneg is disabled, or if
4271 * - link is not up.
4272 */
4273 if (hw->fc.disable_fc_autoneg) {
4274 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
4275 "Flow control autoneg is disabled");
4276 goto out;
4277 }
4278
4279 hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
4280 if (!link_up) {
4281 ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "The link is down");
4282 goto out;
4283 }
4284
4285 /* Check if auto-negotiation has completed */
4286 status = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_LINK_INFO, &info);
4287 if (status != IXGBE_SUCCESS ||
4288 !(info[0] & FW_PHY_ACT_GET_LINK_INFO_AN_COMPLETE)) {
4289 DEBUGOUT("Auto-Negotiation did not complete\n");
4290 status = IXGBE_ERR_FC_NOT_NEGOTIATED;
4291 goto out;
4292 }
4293
4294 /* Negotiate the flow control */
4295 status = ixgbe_negotiate_fc(hw, info[0], info[0],
4296 FW_PHY_ACT_GET_LINK_INFO_FC_RX,
4297 FW_PHY_ACT_GET_LINK_INFO_FC_TX,
4298 FW_PHY_ACT_GET_LINK_INFO_LP_FC_RX,
4299 FW_PHY_ACT_GET_LINK_INFO_LP_FC_TX);
4300
4301 out:
4302 if (status == IXGBE_SUCCESS) {
4303 hw->fc.fc_was_autonegged = TRUE;
4304 } else {
4305 hw->fc.fc_was_autonegged = FALSE;
4306 hw->fc.current_mode = hw->fc.requested_mode;
4307 }
4308 }
4309
4310 /**
4311 * ixgbe_setup_fc_backplane_x550em_a - Set up flow control
4312 * @hw: pointer to hardware structure
4313 *
4314 * Called at init time to set up flow control.
4315 **/
4316 s32 ixgbe_setup_fc_backplane_x550em_a(struct ixgbe_hw *hw)
4317 {
4318 s32 status = IXGBE_SUCCESS;
4319 u32 an_cntl = 0;
4320
4321 DEBUGFUNC("ixgbe_setup_fc_backplane_x550em_a");
4322
4323 /* Validate the requested mode */
4324 if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
4325 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
4326 "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
4327 return IXGBE_ERR_INVALID_LINK_SETTINGS;
4328 }
4329
4330 if (hw->fc.requested_mode == ixgbe_fc_default)
4331 hw->fc.requested_mode = ixgbe_fc_full;
4332
4333 /* Set up the 1G and 10G flow control advertisement registers so the
4334 * HW will be able to do FC autoneg once the cable is plugged in. If
4335 * we link at 10G, the 1G advertisement is harmless and vice versa.
4336 */
4337 status = hw->mac.ops.read_iosf_sb_reg(hw,
4338 IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
4339 IXGBE_SB_IOSF_TARGET_KR_PHY, &an_cntl);
4340
4341 if (status != IXGBE_SUCCESS) {
4342 DEBUGOUT("Auto-Negotiation did not complete\n");
4343 return status;
4344 }
4345
4346 /* The possible values of fc.requested_mode are:
4347 * 0: Flow control is completely disabled
4348 * 1: Rx flow control is enabled (we can receive pause frames,
4349 * but not send pause frames).
4350 * 2: Tx flow control is enabled (we can send pause frames but
4351 * we do not support receiving pause frames).
4352 * 3: Both Rx and Tx flow control (symmetric) are enabled.
4353 * other: Invalid.
4354 */
4355 switch (hw->fc.requested_mode) {
4356 case ixgbe_fc_none:
4357 /* Flow control completely disabled by software override. */
4358 an_cntl &= ~(IXGBE_KRM_AN_CNTL_1_SYM_PAUSE |
4359 IXGBE_KRM_AN_CNTL_1_ASM_PAUSE);
4360 break;
4361 case ixgbe_fc_tx_pause:
4362 /* Tx Flow control is enabled, and Rx Flow control is
4363 * disabled by software override.
4364 */
4365 an_cntl |= IXGBE_KRM_AN_CNTL_1_ASM_PAUSE;
4366 an_cntl &= ~IXGBE_KRM_AN_CNTL_1_SYM_PAUSE;
4367 break;
4368 case ixgbe_fc_rx_pause:
4369 /* Rx Flow control is enabled and Tx Flow control is
4370 * disabled by software override. Since there really
4371 * isn't a way to advertise that we are capable of RX
4372 * Pause ONLY, we will advertise that we support both
4373 * symmetric and asymmetric Rx PAUSE, as such we fall
4374 * through to the fc_full statement. Later, we will
4375 * disable the adapter's ability to send PAUSE frames.
4376 */
4377 case ixgbe_fc_full:
4378 /* Flow control (both Rx and Tx) is enabled by SW override. */
4379 an_cntl |= IXGBE_KRM_AN_CNTL_1_SYM_PAUSE |
4380 IXGBE_KRM_AN_CNTL_1_ASM_PAUSE;
4381 break;
4382 default:
4383 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
4384 "Flow control param set incorrectly\n");
4385 return IXGBE_ERR_CONFIG;
4386 }
4387
4388 status = hw->mac.ops.write_iosf_sb_reg(hw,
4389 IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
4390 IXGBE_SB_IOSF_TARGET_KR_PHY, an_cntl);
4391
4392 /* Restart auto-negotiation. */
4393 status = ixgbe_restart_an_internal_phy_x550em(hw);
4394
4395 return status;
4396 }
4397
4398 /**
4399 * ixgbe_set_mux - Set mux for port 1 access with CS4227
4400 * @hw: pointer to hardware structure
4401 * @state: set mux if 1, clear if 0
4402 */
4403 static void ixgbe_set_mux(struct ixgbe_hw *hw, u8 state)
4404 {
4405 u32 esdp;
4406
4407 if (!hw->bus.lan_id)
4408 return;
4409 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
4410 if (state)
4411 esdp |= IXGBE_ESDP_SDP1;
4412 else
4413 esdp &= ~IXGBE_ESDP_SDP1;
4414 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
4415 IXGBE_WRITE_FLUSH(hw);
4416 }
4417
4418 /**
4419 * ixgbe_acquire_swfw_sync_X550em - Acquire SWFW semaphore
4420 * @hw: pointer to hardware structure
4421 * @mask: Mask to specify which semaphore to acquire
4422 *
4423 * Acquires the SWFW semaphore and sets the I2C MUX
4424 **/
4425 s32 ixgbe_acquire_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask)
4426 {
4427 s32 status;
4428
4429 DEBUGFUNC("ixgbe_acquire_swfw_sync_X550em");
4430
4431 status = ixgbe_acquire_swfw_sync_X540(hw, mask);
4432 if (status)
4433 return status;
4434
4435 if (mask & IXGBE_GSSR_I2C_MASK)
4436 ixgbe_set_mux(hw, 1);
4437
4438 return IXGBE_SUCCESS;
4439 }
4440
4441 /**
4442 * ixgbe_release_swfw_sync_X550em - Release SWFW semaphore
4443 * @hw: pointer to hardware structure
4444 * @mask: Mask to specify which semaphore to release
4445 *
4446 * Releases the SWFW semaphore and sets the I2C MUX
4447 **/
4448 void ixgbe_release_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask)
4449 {
4450 DEBUGFUNC("ixgbe_release_swfw_sync_X550em");
4451
4452 if (mask & IXGBE_GSSR_I2C_MASK)
4453 ixgbe_set_mux(hw, 0);
4454
4455 ixgbe_release_swfw_sync_X540(hw, mask);
4456 }
4457
4458 /**
4459 * ixgbe_acquire_swfw_sync_X550a - Acquire SWFW semaphore
4460 * @hw: pointer to hardware structure
4461 * @mask: Mask to specify which semaphore to acquire
4462 *
4463 * Acquires the SWFW semaphore and get the shared phy token as needed
4464 */
4465 static s32 ixgbe_acquire_swfw_sync_X550a(struct ixgbe_hw *hw, u32 mask)
4466 {
4467 u32 hmask = mask & ~IXGBE_GSSR_TOKEN_SM;
4468 int retries = FW_PHY_TOKEN_RETRIES;
4469 s32 status = IXGBE_SUCCESS;
4470
4471 DEBUGFUNC("ixgbe_acquire_swfw_sync_X550a");
4472
4473 while (--retries) {
4474 status = IXGBE_SUCCESS;
4475 if (hmask)
4476 status = ixgbe_acquire_swfw_sync_X540(hw, hmask);
4477 if (status) {
4478 DEBUGOUT1("Could not acquire SWFW semaphore, Status = %d\n",
4479 status);
4480 return status;
4481 }
4482 if (!(mask & IXGBE_GSSR_TOKEN_SM))
4483 return IXGBE_SUCCESS;
4484
4485 status = ixgbe_get_phy_token(hw);
4486 if (status == IXGBE_ERR_TOKEN_RETRY)
4487 DEBUGOUT1("Could not acquire PHY token, Status = %d\n",
4488 status);
4489
4490 if (status == IXGBE_SUCCESS)
4491 return IXGBE_SUCCESS;
4492
4493 if (hmask)
4494 ixgbe_release_swfw_sync_X540(hw, hmask);
4495
4496 if (status != IXGBE_ERR_TOKEN_RETRY) {
4497 DEBUGOUT1("Unable to retry acquiring the PHY token, Status = %d\n",
4498 status);
4499 return status;
4500 }
4501 }
4502
4503 DEBUGOUT1("Semaphore acquisition retries failed!: PHY ID = 0x%08X\n",
4504 hw->phy.id);
4505 return status;
4506 }
4507
4508 /**
4509 * ixgbe_release_swfw_sync_X550a - Release SWFW semaphore
4510 * @hw: pointer to hardware structure
4511 * @mask: Mask to specify which semaphore to release
4512 *
4513 * Releases the SWFW semaphore and puts the shared phy token as needed
4514 */
4515 static void ixgbe_release_swfw_sync_X550a(struct ixgbe_hw *hw, u32 mask)
4516 {
4517 u32 hmask = mask & ~IXGBE_GSSR_TOKEN_SM;
4518
4519 DEBUGFUNC("ixgbe_release_swfw_sync_X550a");
4520
4521 if (mask & IXGBE_GSSR_TOKEN_SM)
4522 ixgbe_put_phy_token(hw);
4523
4524 if (hmask)
4525 ixgbe_release_swfw_sync_X540(hw, hmask);
4526 }
4527
4528 /**
4529 * ixgbe_read_phy_reg_x550a - Reads specified PHY register
4530 * @hw: pointer to hardware structure
4531 * @reg_addr: 32 bit address of PHY register to read
4532 * @device_type: 5 bit device type
4533 * @phy_data: Pointer to read data from PHY register
4534 *
4535 * Reads a value from a specified PHY register using the SWFW lock and PHY
4536 * Token. The PHY Token is needed since the MDIO is shared between to MAC
4537 * instances.
4538 **/
4539 s32 ixgbe_read_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
4540 u32 device_type, u16 *phy_data)
4541 {
4542 s32 status;
4543 u32 mask = hw->phy.phy_semaphore_mask | IXGBE_GSSR_TOKEN_SM;
4544
4545 DEBUGFUNC("ixgbe_read_phy_reg_x550a");
4546
4547 if (hw->mac.ops.acquire_swfw_sync(hw, mask))
4548 return IXGBE_ERR_SWFW_SYNC;
4549
4550 status = hw->phy.ops.read_reg_mdi(hw, reg_addr, device_type, phy_data);
4551
4552 hw->mac.ops.release_swfw_sync(hw, mask);
4553
4554 return status;
4555 }
4556
4557 /**
4558 * ixgbe_write_phy_reg_x550a - Writes specified PHY register
4559 * @hw: pointer to hardware structure
4560 * @reg_addr: 32 bit PHY register to write
4561 * @device_type: 5 bit device type
4562 * @phy_data: Data to write to the PHY register
4563 *
4564 * Writes a value to specified PHY register using the SWFW lock and PHY Token.
4565 * The PHY Token is needed since the MDIO is shared between to MAC instances.
4566 **/
4567 s32 ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
4568 u32 device_type, u16 phy_data)
4569 {
4570 s32 status;
4571 u32 mask = hw->phy.phy_semaphore_mask | IXGBE_GSSR_TOKEN_SM;
4572
4573 DEBUGFUNC("ixgbe_write_phy_reg_x550a");
4574
4575 if (hw->mac.ops.acquire_swfw_sync(hw, mask) == IXGBE_SUCCESS) {
4576 status = hw->phy.ops.write_reg_mdi(hw, reg_addr, device_type,
4577 phy_data);
4578 hw->mac.ops.release_swfw_sync(hw, mask);
4579 } else {
4580 status = IXGBE_ERR_SWFW_SYNC;
4581 }
4582
4583 return status;
4584 }
4585
4586 /**
4587 * ixgbe_handle_lasi_ext_t_x550em - Handle external Base T PHY interrupt
4588 * @hw: pointer to hardware structure
4589 *
4590 * Handle external Base T PHY interrupt. If high temperature
4591 * failure alarm then return error, else if link status change
4592 * then setup internal/external PHY link
4593 *
4594 * Return IXGBE_ERR_OVERTEMP if interrupt is high temperature
4595 * failure alarm, else return PHY access status.
4596 */
4597 s32 ixgbe_handle_lasi_ext_t_x550em(struct ixgbe_hw *hw)
4598 {
4599 bool lsc;
4600 u32 status;
4601
4602 status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc);
4603
4604 if (status != IXGBE_SUCCESS)
4605 return status;
4606
4607 if (lsc)
4608 return ixgbe_setup_internal_phy(hw);
4609
4610 return IXGBE_SUCCESS;
4611 }
4612
4613 /**
4614 * ixgbe_setup_mac_link_t_X550em - Sets the auto advertised link speed
4615 * @hw: pointer to hardware structure
4616 * @speed: new link speed
4617 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed
4618 *
4619 * Setup internal/external PHY link speed based on link speed, then set
4620 * external PHY auto advertised link speed.
4621 *
4622 * Returns error status for any failure
4623 **/
4624 s32 ixgbe_setup_mac_link_t_X550em(struct ixgbe_hw *hw,
4625 ixgbe_link_speed speed,
4626 bool autoneg_wait_to_complete)
4627 {
4628 s32 status;
4629 ixgbe_link_speed force_speed;
4630
4631 DEBUGFUNC("ixgbe_setup_mac_link_t_X550em");
4632
4633 /* Setup internal/external PHY link speed to iXFI (10G), unless
4634 * only 1G is auto advertised then setup KX link.
4635 */
4636 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
4637 force_speed = IXGBE_LINK_SPEED_10GB_FULL;
4638 else
4639 force_speed = IXGBE_LINK_SPEED_1GB_FULL;
4640
4641 /* If X552 and internal link mode is XFI, then setup XFI internal link.
4642 */
4643 if (hw->mac.type == ixgbe_mac_X550EM_x &&
4644 !(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) {
4645 status = ixgbe_setup_ixfi_x550em(hw, &force_speed);
4646
4647 if (status != IXGBE_SUCCESS)
4648 return status;
4649 }
4650
4651 return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait_to_complete);
4652 }
4653
4654 /**
4655 * ixgbe_check_link_t_X550em - Determine link and speed status
4656 * @hw: pointer to hardware structure
4657 * @speed: pointer to link speed
4658 * @link_up: TRUE when link is up
4659 * @link_up_wait_to_complete: bool used to wait for link up or not
4660 *
4661 * Check that both the MAC and X557 external PHY have link.
4662 **/
4663 s32 ixgbe_check_link_t_X550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
4664 bool *link_up, bool link_up_wait_to_complete)
4665 {
4666 u32 status;
4667 u16 i, autoneg_status = 0;
4668
4669 if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper)
4670 return IXGBE_ERR_CONFIG;
4671
4672 status = ixgbe_check_mac_link_generic(hw, speed, link_up,
4673 link_up_wait_to_complete);
4674
4675 /* If check link fails or MAC link is not up, then return */
4676 if (status != IXGBE_SUCCESS || !(*link_up))
4677 return status;
4678
4679 /* MAC link is up, so check external PHY link.
4680 * X557 PHY. Link status is latching low, and can only be used to detect
4681 * link drop, and not the current status of the link without performing
4682 * back-to-back reads.
4683 */
4684 for (i = 0; i < 2; i++) {
4685 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
4686 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
4687 &autoneg_status);
4688
4689 if (status != IXGBE_SUCCESS)
4690 return status;
4691 }
4692
4693 /* If external PHY link is not up, then indicate link not up */
4694 if (!(autoneg_status & IXGBE_MDIO_AUTO_NEG_LINK_STATUS))
4695 *link_up = FALSE;
4696
4697 return IXGBE_SUCCESS;
4698 }
4699
4700 /**
4701 * ixgbe_reset_phy_t_X550em - Performs X557 PHY reset and enables LASI
4702 * @hw: pointer to hardware structure
4703 **/
4704 s32 ixgbe_reset_phy_t_X550em(struct ixgbe_hw *hw)
4705 {
4706 s32 status;
4707
4708 status = ixgbe_reset_phy_generic(hw);
4709
4710 if (status != IXGBE_SUCCESS)
4711 return status;
4712
4713 /* Configure Link Status Alarm and Temperature Threshold interrupts */
4714 return ixgbe_enable_lasi_ext_t_x550em(hw);
4715 }
4716
4717 /**
4718 * ixgbe_led_on_t_X550em - Turns on the software controllable LEDs.
4719 * @hw: pointer to hardware structure
4720 * @led_idx: led number to turn on
4721 **/
4722 s32 ixgbe_led_on_t_X550em(struct ixgbe_hw *hw, u32 led_idx)
4723 {
4724 u16 phy_data;
4725
4726 DEBUGFUNC("ixgbe_led_on_t_X550em");
4727
4728 if (led_idx >= IXGBE_X557_MAX_LED_INDEX)
4729 return IXGBE_ERR_PARAM;
4730
4731 /* To turn on the LED, set mode to ON. */
4732 ixgbe_read_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
4733 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &phy_data);
4734 phy_data |= IXGBE_X557_LED_MANUAL_SET_MASK;
4735 ixgbe_write_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
4736 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, phy_data);
4737
4738 /* Some designs have the LEDs wired to the MAC */
4739 return ixgbe_led_on_generic(hw, led_idx);
4740 }
4741
4742 /**
4743 * ixgbe_led_off_t_X550em - Turns off the software controllable LEDs.
4744 * @hw: pointer to hardware structure
4745 * @led_idx: led number to turn off
4746 **/
4747 s32 ixgbe_led_off_t_X550em(struct ixgbe_hw *hw, u32 led_idx)
4748 {
4749 u16 phy_data;
4750
4751 DEBUGFUNC("ixgbe_led_off_t_X550em");
4752
4753 if (led_idx >= IXGBE_X557_MAX_LED_INDEX)
4754 return IXGBE_ERR_PARAM;
4755
4756 /* To turn on the LED, set mode to ON. */
4757 ixgbe_read_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
4758 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &phy_data);
4759 phy_data &= ~IXGBE_X557_LED_MANUAL_SET_MASK;
4760 ixgbe_write_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
4761 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, phy_data);
4762
4763 /* Some designs have the LEDs wired to the MAC */
4764 return ixgbe_led_off_generic(hw, led_idx);
4765 }
4766
4767 /**
4768 * ixgbe_set_fw_drv_ver_x550 - Sends driver version to firmware
4769 * @hw: pointer to the HW structure
4770 * @maj: driver version major number
4771 * @min: driver version minor number
4772 * @build: driver version build number
4773 * @sub: driver version sub build number
4774 * @len: length of driver_ver string
4775 * @driver_ver: driver string
4776 *
4777 * Sends driver version number to firmware through the manageability
4778 * block. On success return IXGBE_SUCCESS
4779 * else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring
4780 * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
4781 **/
4782 s32 ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min,
4783 u8 build, u8 sub, u16 len, const char *driver_ver)
4784 {
4785 struct ixgbe_hic_drv_info2 fw_cmd;
4786 s32 ret_val = IXGBE_SUCCESS;
4787 int i;
4788
4789 DEBUGFUNC("ixgbe_set_fw_drv_ver_x550");
4790
4791 if ((len == 0) || (driver_ver == NULL) ||
4792 (len > sizeof(fw_cmd.driver_string)))
4793 return IXGBE_ERR_INVALID_ARGUMENT;
4794
4795 fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO;
4796 fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN + len;
4797 fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
4798 fw_cmd.port_num = (u8)hw->bus.func;
4799 fw_cmd.ver_maj = maj;
4800 fw_cmd.ver_min = min;
4801 fw_cmd.ver_build = build;
4802 fw_cmd.ver_sub = sub;
4803 fw_cmd.hdr.checksum = 0;
4804 memcpy(fw_cmd.driver_string, driver_ver, len);
4805 fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd,
4806 (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len));
4807
4808 for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
4809 ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
4810 sizeof(fw_cmd),
4811 IXGBE_HI_COMMAND_TIMEOUT,
4812 TRUE);
4813 if (ret_val != IXGBE_SUCCESS)
4814 continue;
4815
4816 if (fw_cmd.hdr.cmd_or_resp.ret_status ==
4817 FW_CEM_RESP_STATUS_SUCCESS)
4818 ret_val = IXGBE_SUCCESS;
4819 else
4820 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
4821
4822 break;
4823 }
4824
4825 return ret_val;
4826 }
4827
4828 /**
4829 * ixgbe_fw_recovery_mode_X550 - Check FW NVM recovery mode
4830 * @hw: pointer t hardware structure
4831 *
4832 * Returns TRUE if in FW NVM recovery mode.
4833 **/
4834 bool ixgbe_fw_recovery_mode_X550(struct ixgbe_hw *hw)
4835 {
4836 u32 fwsm;
4837
4838 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw));
4839
4840 return !!(fwsm & IXGBE_FWSM_FW_NVM_RECOVERY_MODE);
4841 }
4842
4843