ixgbe_x550.c revision 1.12.2.1 1 /******************************************************************************
2
3 Copyright (c) 2001-2017, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33 /*$FreeBSD: head/sys/dev/ixgbe/ixgbe_x550.c 331224 2018-03-19 20:55:05Z erj $*/
34
35 #include "ixgbe_x550.h"
36 #include "ixgbe_x540.h"
37 #include "ixgbe_type.h"
38 #include "ixgbe_api.h"
39 #include "ixgbe_common.h"
40 #include "ixgbe_phy.h"
41 #include <dev/mii/mii.h>
42
43 static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed);
44 static s32 ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw,
45 ixgbe_link_speed speed,
46 bool autoneg_wait_to_complete);
47 static s32 ixgbe_acquire_swfw_sync_X550a(struct ixgbe_hw *, u32 mask);
48 static void ixgbe_release_swfw_sync_X550a(struct ixgbe_hw *, u32 mask);
49 static s32 ixgbe_read_mng_if_sel_x550em(struct ixgbe_hw *hw);
50
51 /**
52 * ixgbe_init_ops_X550 - Inits func ptrs and MAC type
53 * @hw: pointer to hardware structure
54 *
55 * Initialize the function pointers and assign the MAC type for X550.
56 * Does not touch the hardware.
57 **/
58 s32 ixgbe_init_ops_X550(struct ixgbe_hw *hw)
59 {
60 struct ixgbe_mac_info *mac = &hw->mac;
61 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
62 s32 ret_val;
63
64 DEBUGFUNC("ixgbe_init_ops_X550");
65
66 ret_val = ixgbe_init_ops_X540(hw);
67 mac->ops.dmac_config = ixgbe_dmac_config_X550;
68 mac->ops.dmac_config_tcs = ixgbe_dmac_config_tcs_X550;
69 mac->ops.dmac_update_tcs = ixgbe_dmac_update_tcs_X550;
70 mac->ops.setup_eee = NULL;
71 mac->ops.set_source_address_pruning =
72 ixgbe_set_source_address_pruning_X550;
73 mac->ops.set_ethertype_anti_spoofing =
74 ixgbe_set_ethertype_anti_spoofing_X550;
75
76 mac->ops.get_rtrup2tc = ixgbe_dcb_get_rtrup2tc_generic;
77 eeprom->ops.init_params = ixgbe_init_eeprom_params_X550;
78 eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_X550;
79 eeprom->ops.read = ixgbe_read_ee_hostif_X550;
80 eeprom->ops.read_buffer = ixgbe_read_ee_hostif_buffer_X550;
81 eeprom->ops.write = ixgbe_write_ee_hostif_X550;
82 eeprom->ops.write_buffer = ixgbe_write_ee_hostif_buffer_X550;
83 eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_X550;
84 eeprom->ops.validate_checksum = ixgbe_validate_eeprom_checksum_X550;
85
86 mac->ops.disable_mdd = ixgbe_disable_mdd_X550;
87 mac->ops.enable_mdd = ixgbe_enable_mdd_X550;
88 mac->ops.mdd_event = ixgbe_mdd_event_X550;
89 mac->ops.restore_mdd_vf = ixgbe_restore_mdd_vf_X550;
90 mac->ops.fw_recovery_mode = ixgbe_fw_recovery_mode_X550;
91 mac->ops.disable_rx = ixgbe_disable_rx_x550;
92 /* Manageability interface */
93 mac->ops.set_fw_drv_ver = ixgbe_set_fw_drv_ver_x550;
94 switch (hw->device_id) {
95 case IXGBE_DEV_ID_X550EM_X_1G_T:
96 hw->mac.ops.led_on = NULL;
97 hw->mac.ops.led_off = NULL;
98 break;
99 case IXGBE_DEV_ID_X550EM_X_10G_T:
100 case IXGBE_DEV_ID_X550EM_A_10G_T:
101 hw->mac.ops.led_on = ixgbe_led_on_t_X550em;
102 hw->mac.ops.led_off = ixgbe_led_off_t_X550em;
103 break;
104 default:
105 break;
106 }
107 return ret_val;
108 }
109
110 /**
111 * ixgbe_read_cs4227 - Read CS4227 register
112 * @hw: pointer to hardware structure
113 * @reg: register number to write
114 * @value: pointer to receive value read
115 *
116 * Returns status code
117 **/
118 static s32 ixgbe_read_cs4227(struct ixgbe_hw *hw, u16 reg, u16 *value)
119 {
120 return hw->link.ops.read_link_unlocked(hw, hw->link.addr, reg, value);
121 }
122
123 /**
124 * ixgbe_write_cs4227 - Write CS4227 register
125 * @hw: pointer to hardware structure
126 * @reg: register number to write
127 * @value: value to write to register
128 *
129 * Returns status code
130 **/
131 static s32 ixgbe_write_cs4227(struct ixgbe_hw *hw, u16 reg, u16 value)
132 {
133 return hw->link.ops.write_link_unlocked(hw, hw->link.addr, reg, value);
134 }
135
136 /**
137 * ixgbe_read_pe - Read register from port expander
138 * @hw: pointer to hardware structure
139 * @reg: register number to read
140 * @value: pointer to receive read value
141 *
142 * Returns status code
143 **/
144 static s32 ixgbe_read_pe(struct ixgbe_hw *hw, u8 reg, u8 *value)
145 {
146 s32 status;
147
148 status = ixgbe_read_i2c_byte_unlocked(hw, reg, IXGBE_PE, value);
149 if (status != IXGBE_SUCCESS)
150 ERROR_REPORT2(IXGBE_ERROR_CAUTION,
151 "port expander access failed with %d\n", status);
152 return status;
153 }
154
155 /**
156 * ixgbe_write_pe - Write register to port expander
157 * @hw: pointer to hardware structure
158 * @reg: register number to write
159 * @value: value to write
160 *
161 * Returns status code
162 **/
163 static s32 ixgbe_write_pe(struct ixgbe_hw *hw, u8 reg, u8 value)
164 {
165 s32 status;
166
167 status = ixgbe_write_i2c_byte_unlocked(hw, reg, IXGBE_PE, value);
168 if (status != IXGBE_SUCCESS)
169 ERROR_REPORT2(IXGBE_ERROR_CAUTION,
170 "port expander access failed with %d\n", status);
171 return status;
172 }
173
174 /**
175 * ixgbe_reset_cs4227 - Reset CS4227 using port expander
176 * @hw: pointer to hardware structure
177 *
178 * This function assumes that the caller has acquired the proper semaphore.
179 * Returns error code
180 **/
181 static s32 ixgbe_reset_cs4227(struct ixgbe_hw *hw)
182 {
183 s32 status;
184 u32 retry;
185 u16 value;
186 u8 reg;
187
188 /* Trigger hard reset. */
189 status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, ®);
190 if (status != IXGBE_SUCCESS)
191 return status;
192 reg |= IXGBE_PE_BIT1;
193 status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg);
194 if (status != IXGBE_SUCCESS)
195 return status;
196
197 status = ixgbe_read_pe(hw, IXGBE_PE_CONFIG, ®);
198 if (status != IXGBE_SUCCESS)
199 return status;
200 reg &= ~IXGBE_PE_BIT1;
201 status = ixgbe_write_pe(hw, IXGBE_PE_CONFIG, reg);
202 if (status != IXGBE_SUCCESS)
203 return status;
204
205 status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, ®);
206 if (status != IXGBE_SUCCESS)
207 return status;
208 reg &= ~IXGBE_PE_BIT1;
209 status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg);
210 if (status != IXGBE_SUCCESS)
211 return status;
212
213 usec_delay(IXGBE_CS4227_RESET_HOLD);
214
215 status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, ®);
216 if (status != IXGBE_SUCCESS)
217 return status;
218 reg |= IXGBE_PE_BIT1;
219 status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg);
220 if (status != IXGBE_SUCCESS)
221 return status;
222
223 /* Wait for the reset to complete. */
224 msec_delay(IXGBE_CS4227_RESET_DELAY);
225 for (retry = 0; retry < IXGBE_CS4227_RETRIES; retry++) {
226 status = ixgbe_read_cs4227(hw, IXGBE_CS4227_EFUSE_STATUS,
227 &value);
228 if (status == IXGBE_SUCCESS &&
229 value == IXGBE_CS4227_EEPROM_LOAD_OK)
230 break;
231 msec_delay(IXGBE_CS4227_CHECK_DELAY);
232 }
233 if (retry == IXGBE_CS4227_RETRIES) {
234 ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE,
235 "CS4227 reset did not complete.");
236 return IXGBE_ERR_PHY;
237 }
238
239 status = ixgbe_read_cs4227(hw, IXGBE_CS4227_EEPROM_STATUS, &value);
240 if (status != IXGBE_SUCCESS ||
241 !(value & IXGBE_CS4227_EEPROM_LOAD_OK)) {
242 ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE,
243 "CS4227 EEPROM did not load successfully.");
244 return IXGBE_ERR_PHY;
245 }
246
247 return IXGBE_SUCCESS;
248 }
249
250 /**
251 * ixgbe_check_cs4227 - Check CS4227 and reset as needed
252 * @hw: pointer to hardware structure
253 **/
254 static void ixgbe_check_cs4227(struct ixgbe_hw *hw)
255 {
256 s32 status = IXGBE_SUCCESS;
257 u32 swfw_mask = hw->phy.phy_semaphore_mask;
258 u16 value = 0;
259 u8 retry;
260
261 for (retry = 0; retry < IXGBE_CS4227_RETRIES; retry++) {
262 status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
263 if (status != IXGBE_SUCCESS) {
264 ERROR_REPORT2(IXGBE_ERROR_CAUTION,
265 "semaphore failed with %d", status);
266 msec_delay(IXGBE_CS4227_CHECK_DELAY);
267 continue;
268 }
269
270 /* Get status of reset flow. */
271 status = ixgbe_read_cs4227(hw, IXGBE_CS4227_SCRATCH, &value);
272
273 if (status == IXGBE_SUCCESS &&
274 value == IXGBE_CS4227_RESET_COMPLETE)
275 goto out;
276
277 if (status != IXGBE_SUCCESS ||
278 value != IXGBE_CS4227_RESET_PENDING)
279 break;
280
281 /* Reset is pending. Wait and check again. */
282 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
283 msec_delay(IXGBE_CS4227_CHECK_DELAY);
284 }
285
286 /* If still pending, assume other instance failed. */
287 if (retry == IXGBE_CS4227_RETRIES) {
288 status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
289 if (status != IXGBE_SUCCESS) {
290 ERROR_REPORT2(IXGBE_ERROR_CAUTION,
291 "semaphore failed with %d", status);
292 return;
293 }
294 }
295
296 /* Reset the CS4227. */
297 status = ixgbe_reset_cs4227(hw);
298 if (status != IXGBE_SUCCESS) {
299 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
300 "CS4227 reset failed: %d", status);
301 goto out;
302 }
303
304 /* Reset takes so long, temporarily release semaphore in case the
305 * other driver instance is waiting for the reset indication.
306 */
307 ixgbe_write_cs4227(hw, IXGBE_CS4227_SCRATCH,
308 IXGBE_CS4227_RESET_PENDING);
309 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
310 msec_delay(10);
311 status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
312 if (status != IXGBE_SUCCESS) {
313 ERROR_REPORT2(IXGBE_ERROR_CAUTION,
314 "semaphore failed with %d", status);
315 return;
316 }
317
318 /* Record completion for next time. */
319 status = ixgbe_write_cs4227(hw, IXGBE_CS4227_SCRATCH,
320 IXGBE_CS4227_RESET_COMPLETE);
321
322 out:
323 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
324 msec_delay(hw->eeprom.semaphore_delay);
325 }
326
327 /**
328 * ixgbe_setup_mux_ctl - Setup ESDP register for I2C mux control
329 * @hw: pointer to hardware structure
330 **/
331 static void ixgbe_setup_mux_ctl(struct ixgbe_hw *hw)
332 {
333 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
334
335 if (hw->bus.lan_id) {
336 esdp &= ~(IXGBE_ESDP_SDP1_NATIVE | IXGBE_ESDP_SDP1);
337 esdp |= IXGBE_ESDP_SDP1_DIR;
338 }
339 esdp &= ~(IXGBE_ESDP_SDP0_NATIVE | IXGBE_ESDP_SDP0_DIR);
340 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
341 IXGBE_WRITE_FLUSH(hw);
342 }
343
344 /**
345 * ixgbe_read_phy_reg_mdi_22 - Read from a clause 22 PHY register without lock
346 * @hw: pointer to hardware structure
347 * @reg_addr: 32 bit address of PHY register to read
348 * @dev_type: always unused
349 * @phy_data: Pointer to read data from PHY register
350 */
351 static s32 ixgbe_read_phy_reg_mdi_22(struct ixgbe_hw *hw, u32 reg_addr,
352 u32 dev_type, u16 *phy_data)
353 {
354 u32 i, data, command;
355 UNREFERENCED_1PARAMETER(dev_type);
356
357 /* Setup and write the read command */
358 command = (reg_addr << IXGBE_MSCA_DEV_TYPE_SHIFT) |
359 (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
360 IXGBE_MSCA_OLD_PROTOCOL | IXGBE_MSCA_READ_AUTOINC |
361 IXGBE_MSCA_MDI_COMMAND;
362
363 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
364
365 /* Check every 10 usec to see if the access completed.
366 * The MDI Command bit will clear when the operation is
367 * complete
368 */
369 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
370 usec_delay(10);
371
372 command = IXGBE_READ_REG(hw, IXGBE_MSCA);
373 if (!(command & IXGBE_MSCA_MDI_COMMAND))
374 break;
375 }
376
377 if (command & IXGBE_MSCA_MDI_COMMAND) {
378 ERROR_REPORT1(IXGBE_ERROR_POLLING,
379 "PHY read command did not complete.\n");
380 return IXGBE_ERR_PHY;
381 }
382
383 /* Read operation is complete. Get the data from MSRWD */
384 data = IXGBE_READ_REG(hw, IXGBE_MSRWD);
385 data >>= IXGBE_MSRWD_READ_DATA_SHIFT;
386 *phy_data = (u16)data;
387
388 return IXGBE_SUCCESS;
389 }
390
391 /**
392 * ixgbe_write_phy_reg_mdi_22 - Write to a clause 22 PHY register without lock
393 * @hw: pointer to hardware structure
394 * @reg_addr: 32 bit PHY register to write
395 * @dev_type: always unused
396 * @phy_data: Data to write to the PHY register
397 */
398 static s32 ixgbe_write_phy_reg_mdi_22(struct ixgbe_hw *hw, u32 reg_addr,
399 u32 dev_type, u16 phy_data)
400 {
401 u32 i, command;
402 UNREFERENCED_1PARAMETER(dev_type);
403
404 /* Put the data in the MDI single read and write data register*/
405 IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)phy_data);
406
407 /* Setup and write the write command */
408 command = (reg_addr << IXGBE_MSCA_DEV_TYPE_SHIFT) |
409 (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
410 IXGBE_MSCA_OLD_PROTOCOL | IXGBE_MSCA_WRITE |
411 IXGBE_MSCA_MDI_COMMAND;
412
413 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
414
415 /* Check every 10 usec to see if the access completed.
416 * The MDI Command bit will clear when the operation is
417 * complete
418 */
419 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
420 usec_delay(10);
421
422 command = IXGBE_READ_REG(hw, IXGBE_MSCA);
423 if (!(command & IXGBE_MSCA_MDI_COMMAND))
424 break;
425 }
426
427 if (command & IXGBE_MSCA_MDI_COMMAND) {
428 ERROR_REPORT1(IXGBE_ERROR_POLLING,
429 "PHY write cmd didn't complete\n");
430 return IXGBE_ERR_PHY;
431 }
432
433 return IXGBE_SUCCESS;
434 }
435
436 /**
437 * ixgbe_identify_phy_x550em - Get PHY type based on device id
438 * @hw: pointer to hardware structure
439 *
440 * Returns error code
441 */
442 static s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw)
443 {
444 hw->mac.ops.set_lan_id(hw);
445
446 ixgbe_read_mng_if_sel_x550em(hw);
447
448 switch (hw->device_id) {
449 case IXGBE_DEV_ID_X550EM_A_SFP:
450 return ixgbe_identify_sfp_module_X550em(hw);
451 case IXGBE_DEV_ID_X550EM_X_SFP:
452 /* set up for CS4227 usage */
453 ixgbe_setup_mux_ctl(hw);
454 ixgbe_check_cs4227(hw);
455 /* Fallthrough */
456
457 case IXGBE_DEV_ID_X550EM_A_SFP_N:
458 return ixgbe_identify_sfp_module_X550em(hw);
459 break;
460 case IXGBE_DEV_ID_X550EM_X_KX4:
461 hw->phy.type = ixgbe_phy_x550em_kx4;
462 break;
463 case IXGBE_DEV_ID_X550EM_X_XFI:
464 hw->phy.type = ixgbe_phy_x550em_xfi;
465 break;
466 case IXGBE_DEV_ID_X550EM_X_KR:
467 case IXGBE_DEV_ID_X550EM_A_KR:
468 case IXGBE_DEV_ID_X550EM_A_KR_L:
469 hw->phy.type = ixgbe_phy_x550em_kr;
470 break;
471 case IXGBE_DEV_ID_X550EM_A_10G_T:
472 case IXGBE_DEV_ID_X550EM_X_10G_T:
473 return ixgbe_identify_phy_generic(hw);
474 case IXGBE_DEV_ID_X550EM_X_1G_T:
475 hw->phy.type = ixgbe_phy_ext_1g_t;
476 break;
477 case IXGBE_DEV_ID_X550EM_A_1G_T:
478 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
479 hw->phy.type = ixgbe_phy_fw;
480 if (hw->bus.lan_id)
481 hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY1_SM;
482 else
483 hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY0_SM;
484 break;
485 default:
486 break;
487 }
488 return IXGBE_SUCCESS;
489 }
490
491 /**
492 * ixgbe_fw_phy_activity - Perform an activity on a PHY
493 * @hw: pointer to hardware structure
494 * @activity: activity to perform
495 * @data: Pointer to 4 32-bit words of data
496 */
497 s32 ixgbe_fw_phy_activity(struct ixgbe_hw *hw, u16 activity,
498 u32 (*data)[FW_PHY_ACT_DATA_COUNT])
499 {
500 union {
501 struct ixgbe_hic_phy_activity_req cmd;
502 struct ixgbe_hic_phy_activity_resp rsp;
503 } hic;
504 u16 retries = FW_PHY_ACT_RETRIES;
505 s32 rc;
506 u16 i;
507
508 do {
509 memset(&hic, 0, sizeof(hic));
510 hic.cmd.hdr.cmd = FW_PHY_ACT_REQ_CMD;
511 hic.cmd.hdr.buf_len = FW_PHY_ACT_REQ_LEN;
512 hic.cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
513 hic.cmd.port_number = hw->bus.lan_id;
514 hic.cmd.activity_id = IXGBE_CPU_TO_LE16(activity);
515 for (i = 0; i < FW_PHY_ACT_DATA_COUNT; ++i)
516 hic.cmd.data[i] = IXGBE_CPU_TO_BE32((*data)[i]);
517
518 rc = ixgbe_host_interface_command(hw, (u32 *)&hic.cmd,
519 sizeof(hic.cmd),
520 IXGBE_HI_COMMAND_TIMEOUT,
521 TRUE);
522 if (rc != IXGBE_SUCCESS)
523 return rc;
524 if (hic.rsp.hdr.cmd_or_resp.ret_status ==
525 FW_CEM_RESP_STATUS_SUCCESS) {
526 for (i = 0; i < FW_PHY_ACT_DATA_COUNT; ++i)
527 (*data)[i] = IXGBE_BE32_TO_CPU(hic.rsp.data[i]);
528 return IXGBE_SUCCESS;
529 }
530 usec_delay(20);
531 --retries;
532 } while (retries > 0);
533
534 return IXGBE_ERR_HOST_INTERFACE_COMMAND;
535 }
536
537 static const struct {
538 u16 fw_speed;
539 ixgbe_link_speed phy_speed;
540 } ixgbe_fw_map[] = {
541 { FW_PHY_ACT_LINK_SPEED_10, IXGBE_LINK_SPEED_10_FULL },
542 { FW_PHY_ACT_LINK_SPEED_100, IXGBE_LINK_SPEED_100_FULL },
543 { FW_PHY_ACT_LINK_SPEED_1G, IXGBE_LINK_SPEED_1GB_FULL },
544 { FW_PHY_ACT_LINK_SPEED_2_5G, IXGBE_LINK_SPEED_2_5GB_FULL },
545 { FW_PHY_ACT_LINK_SPEED_5G, IXGBE_LINK_SPEED_5GB_FULL },
546 { FW_PHY_ACT_LINK_SPEED_10G, IXGBE_LINK_SPEED_10GB_FULL },
547 };
548
549 /**
550 * ixgbe_get_phy_id_fw - Get the phy ID via firmware command
551 * @hw: pointer to hardware structure
552 *
553 * Returns error code
554 */
555 static s32 ixgbe_get_phy_id_fw(struct ixgbe_hw *hw)
556 {
557 u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 };
558 u16 phy_speeds;
559 u16 phy_id_lo;
560 s32 rc;
561 u16 i;
562
563 rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_PHY_INFO, &info);
564 if (rc)
565 return rc;
566
567 hw->phy.speeds_supported = 0;
568 phy_speeds = info[0] & FW_PHY_INFO_SPEED_MASK;
569 for (i = 0; i < sizeof(ixgbe_fw_map) / sizeof(ixgbe_fw_map[0]); ++i) {
570 if (phy_speeds & ixgbe_fw_map[i].fw_speed)
571 hw->phy.speeds_supported |= ixgbe_fw_map[i].phy_speed;
572 }
573
574 #if 0
575 /*
576 * Don't set autoneg_advertised here to not to be inconsistent with
577 * if_media value.
578 */
579 if (!hw->phy.autoneg_advertised)
580 hw->phy.autoneg_advertised = hw->phy.speeds_supported;
581 #endif
582
583 hw->phy.id = info[0] & FW_PHY_INFO_ID_HI_MASK;
584 phy_id_lo = info[1] & FW_PHY_INFO_ID_LO_MASK;
585 hw->phy.id |= phy_id_lo & IXGBE_PHY_REVISION_MASK;
586 hw->phy.revision = phy_id_lo & ~IXGBE_PHY_REVISION_MASK;
587 if (!hw->phy.id || hw->phy.id == IXGBE_PHY_REVISION_MASK)
588 return IXGBE_ERR_PHY_ADDR_INVALID;
589 return IXGBE_SUCCESS;
590 }
591
592 /**
593 * ixgbe_identify_phy_fw - Get PHY type based on firmware command
594 * @hw: pointer to hardware structure
595 *
596 * Returns error code
597 */
598 static s32 ixgbe_identify_phy_fw(struct ixgbe_hw *hw)
599 {
600 if (hw->bus.lan_id)
601 hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM;
602 else
603 hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM;
604
605 hw->phy.type = ixgbe_phy_fw;
606 hw->phy.ops.read_reg = NULL;
607 hw->phy.ops.write_reg = NULL;
608 return ixgbe_get_phy_id_fw(hw);
609 }
610
611 /**
612 * ixgbe_shutdown_fw_phy - Shutdown a firmware-controlled PHY
613 * @hw: pointer to hardware structure
614 *
615 * Returns error code
616 */
617 s32 ixgbe_shutdown_fw_phy(struct ixgbe_hw *hw)
618 {
619 u32 setup[FW_PHY_ACT_DATA_COUNT] = { 0 };
620
621 setup[0] = FW_PHY_ACT_FORCE_LINK_DOWN_OFF;
622 return ixgbe_fw_phy_activity(hw, FW_PHY_ACT_FORCE_LINK_DOWN, &setup);
623 }
624
625 static s32 ixgbe_read_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr,
626 u32 device_type, u16 *phy_data)
627 {
628 UNREFERENCED_4PARAMETER(*hw, reg_addr, device_type, *phy_data);
629 return IXGBE_NOT_IMPLEMENTED;
630 }
631
632 static s32 ixgbe_write_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr,
633 u32 device_type, u16 phy_data)
634 {
635 UNREFERENCED_4PARAMETER(*hw, reg_addr, device_type, phy_data);
636 return IXGBE_NOT_IMPLEMENTED;
637 }
638
639 /**
640 * ixgbe_read_i2c_combined_generic - Perform I2C read combined operation
641 * @hw: pointer to the hardware structure
642 * @addr: I2C bus address to read from
643 * @reg: I2C device register to read from
644 * @val: pointer to location to receive read value
645 *
646 * Returns an error code on error.
647 **/
648 static s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr,
649 u16 reg, u16 *val)
650 {
651 return ixgbe_read_i2c_combined_generic_int(hw, addr, reg, val, TRUE);
652 }
653
654 /**
655 * ixgbe_read_i2c_combined_generic_unlocked - Do I2C read combined operation
656 * @hw: pointer to the hardware structure
657 * @addr: I2C bus address to read from
658 * @reg: I2C device register to read from
659 * @val: pointer to location to receive read value
660 *
661 * Returns an error code on error.
662 **/
663 static s32
664 ixgbe_read_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, u8 addr,
665 u16 reg, u16 *val)
666 {
667 return ixgbe_read_i2c_combined_generic_int(hw, addr, reg, val, FALSE);
668 }
669
670 /**
671 * ixgbe_write_i2c_combined_generic - Perform I2C write combined operation
672 * @hw: pointer to the hardware structure
673 * @addr: I2C bus address to write to
674 * @reg: I2C device register to write to
675 * @val: value to write
676 *
677 * Returns an error code on error.
678 **/
679 static s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw,
680 u8 addr, u16 reg, u16 val)
681 {
682 return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, TRUE);
683 }
684
685 /**
686 * ixgbe_write_i2c_combined_generic_unlocked - Do I2C write combined operation
687 * @hw: pointer to the hardware structure
688 * @addr: I2C bus address to write to
689 * @reg: I2C device register to write to
690 * @val: value to write
691 *
692 * Returns an error code on error.
693 **/
694 static s32
695 ixgbe_write_i2c_combined_generic_unlocked(struct ixgbe_hw *hw,
696 u8 addr, u16 reg, u16 val)
697 {
698 return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, FALSE);
699 }
700
701 /**
702 * ixgbe_init_ops_X550EM - Inits func ptrs and MAC type
703 * @hw: pointer to hardware structure
704 *
705 * Initialize the function pointers and for MAC type X550EM.
706 * Does not touch the hardware.
707 **/
708 s32 ixgbe_init_ops_X550EM(struct ixgbe_hw *hw)
709 {
710 struct ixgbe_mac_info *mac = &hw->mac;
711 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
712 struct ixgbe_phy_info *phy = &hw->phy;
713 s32 ret_val;
714
715 DEBUGFUNC("ixgbe_init_ops_X550EM");
716
717 /* Similar to X550 so start there. */
718 ret_val = ixgbe_init_ops_X550(hw);
719
720 /* Since this function eventually calls
721 * ixgbe_init_ops_540 by design, we are setting
722 * the pointers to NULL explicitly here to overwrite
723 * the values being set in the x540 function.
724 */
725
726 /* Bypass not supported in x550EM */
727 mac->ops.bypass_rw = NULL;
728 mac->ops.bypass_valid_rd = NULL;
729 mac->ops.bypass_set = NULL;
730 mac->ops.bypass_rd_eep = NULL;
731
732 /* FCOE not supported in x550EM */
733 mac->ops.get_san_mac_addr = NULL;
734 mac->ops.set_san_mac_addr = NULL;
735 mac->ops.get_wwn_prefix = NULL;
736 mac->ops.get_fcoe_boot_status = NULL;
737
738 /* IPsec not supported in x550EM */
739 mac->ops.disable_sec_rx_path = NULL;
740 mac->ops.enable_sec_rx_path = NULL;
741
742 /* AUTOC register is not present in x550EM. */
743 mac->ops.prot_autoc_read = NULL;
744 mac->ops.prot_autoc_write = NULL;
745
746 /* X550EM bus type is internal*/
747 hw->bus.type = ixgbe_bus_type_internal;
748 mac->ops.get_bus_info = ixgbe_get_bus_info_X550em;
749
750
751 mac->ops.get_media_type = ixgbe_get_media_type_X550em;
752 mac->ops.setup_sfp = ixgbe_setup_sfp_modules_X550em;
753 mac->ops.get_link_capabilities = ixgbe_get_link_capabilities_X550em;
754 mac->ops.reset_hw = ixgbe_reset_hw_X550em;
755 mac->ops.get_supported_physical_layer =
756 ixgbe_get_supported_physical_layer_X550em;
757
758 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper)
759 mac->ops.setup_fc = ixgbe_setup_fc_generic;
760 else
761 mac->ops.setup_fc = ixgbe_setup_fc_X550em;
762
763 /* PHY */
764 phy->ops.init = ixgbe_init_phy_ops_X550em;
765 switch (hw->device_id) {
766 case IXGBE_DEV_ID_X550EM_A_1G_T:
767 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
768 mac->ops.setup_fc = NULL;
769 phy->ops.identify = ixgbe_identify_phy_fw;
770 phy->ops.set_phy_power = NULL;
771 phy->ops.get_firmware_version = NULL;
772 break;
773 case IXGBE_DEV_ID_X550EM_X_1G_T:
774 mac->ops.setup_fc = NULL;
775 phy->ops.identify = ixgbe_identify_phy_x550em;
776 phy->ops.set_phy_power = NULL;
777 break;
778 default:
779 phy->ops.identify = ixgbe_identify_phy_x550em;
780 }
781
782 if (mac->ops.get_media_type(hw) != ixgbe_media_type_copper)
783 phy->ops.set_phy_power = NULL;
784
785
786 /* EEPROM */
787 eeprom->ops.init_params = ixgbe_init_eeprom_params_X540;
788 eeprom->ops.read = ixgbe_read_ee_hostif_X550;
789 eeprom->ops.read_buffer = ixgbe_read_ee_hostif_buffer_X550;
790 eeprom->ops.write = ixgbe_write_ee_hostif_X550;
791 eeprom->ops.write_buffer = ixgbe_write_ee_hostif_buffer_X550;
792 eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_X550;
793 eeprom->ops.validate_checksum = ixgbe_validate_eeprom_checksum_X550;
794 eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_X550;
795
796 return ret_val;
797 }
798
799 #define IXGBE_DENVERTON_WA 1
800
801 /**
802 * ixgbe_setup_fw_link - Setup firmware-controlled PHYs
803 * @hw: pointer to hardware structure
804 */
805 static s32 ixgbe_setup_fw_link(struct ixgbe_hw *hw)
806 {
807 u32 setup[FW_PHY_ACT_DATA_COUNT] = { 0 };
808 s32 rc;
809 #ifdef IXGBE_DENVERTON_WA
810 s32 ret_val;
811 u16 phydata;
812 #endif
813 u16 i;
814
815 if (hw->phy.reset_disable || ixgbe_check_reset_blocked(hw))
816 return 0;
817
818 if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
819 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
820 "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
821 return IXGBE_ERR_INVALID_LINK_SETTINGS;
822 }
823
824 switch (hw->fc.requested_mode) {
825 case ixgbe_fc_full:
826 setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_RXTX <<
827 FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT;
828 break;
829 case ixgbe_fc_rx_pause:
830 setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_RX <<
831 FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT;
832 break;
833 case ixgbe_fc_tx_pause:
834 setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_TX <<
835 FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT;
836 break;
837 default:
838 break;
839 }
840
841 for (i = 0; i < sizeof(ixgbe_fw_map) / sizeof(ixgbe_fw_map[0]); ++i) {
842 if (hw->phy.autoneg_advertised & ixgbe_fw_map[i].phy_speed)
843 setup[0] |= ixgbe_fw_map[i].fw_speed;
844 }
845 setup[0] |= FW_PHY_ACT_SETUP_LINK_HP | FW_PHY_ACT_SETUP_LINK_AN;
846
847 if (hw->phy.eee_speeds_advertised)
848 setup[0] |= FW_PHY_ACT_SETUP_LINK_EEE;
849
850 #ifdef IXGBE_DENVERTON_WA
851 if ((hw->phy.force_10_100_autonego == false)
852 && ((hw->phy.autoneg_advertised == IXGBE_LINK_SPEED_100_FULL)
853 || (hw->phy.autoneg_advertised == IXGBE_LINK_SPEED_10_FULL))) {
854 /* Don't use auto-nego for 10/100Mbps */
855 setup[0] &= ~FW_PHY_ACT_SETUP_LINK_AN;
856 setup[0] &= ~FW_PHY_ACT_SETUP_LINK_EEE;
857 setup[0] &= ~(FW_PHY_ACT_SETUP_LINK_PAUSE_RXTX
858 << FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT);
859 }
860 #endif
861
862 rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_SETUP_LINK, &setup);
863 if (rc)
864 return rc;
865
866 #ifdef IXGBE_DENVERTON_WA
867 if (hw->phy.force_10_100_autonego == true)
868 goto out;
869
870 ret_val = ixgbe_read_phy_reg_x550a(hw, MII_BMCR, 0, &phydata);
871 if (ret_val != 0)
872 goto out;
873
874 /*
875 * Broken firmware sets BMCR register incorrectly if
876 * FW_PHY_ACT_SETUP_LINK_AN isn't set.
877 * a) FDX may not be set.
878 * b) BMCR_SPEED1 (bit 6) is always cleard.
879 * + -------+------+-----------+-----+--------------------------+
880 * |request | BMCR | BMCR spd | BMCR | |
881 * | | (HEX)| (in bits)| FDX | |
882 * +--------+------+----------+------+--------------------------+
883 * | 10M | 0000 | 10M(00) | 0 | |
884 * | 10M | 2000 | 100M(01) | 0 |(I've never observed this)|
885 * | 10M | 2100 | 100M(01) | 1 | |
886 * | 100M | 0000 | 10M(00) | 0 | |
887 * | 100M | 0100 | 10M(00) | 1 | |
888 * +--------------------------+------+--------------------------+
889 */
890 if (((hw->phy.autoneg_advertised == IXGBE_LINK_SPEED_100_FULL)
891 && (((phydata & BMCR_FDX) == 0) || (BMCR_SPEED(phydata) == 0)))
892 || ((hw->phy.autoneg_advertised == IXGBE_LINK_SPEED_10_FULL)
893 && (((phydata & BMCR_FDX) == 0)
894 || (BMCR_SPEED(phydata) != BMCR_S10)))) {
895 phydata = BMCR_FDX;
896 switch (hw->phy.autoneg_advertised) {
897 case IXGBE_LINK_SPEED_10_FULL:
898 phydata |= BMCR_S10;
899 break;
900 case IXGBE_LINK_SPEED_100_FULL:
901 phydata |= BMCR_S100;
902 break;
903 case IXGBE_LINK_SPEED_1GB_FULL:
904 panic("%s: 1GB_FULL is set", __func__);
905 break;
906 default:
907 break;
908 }
909 ret_val = ixgbe_write_phy_reg_x550a(hw, MII_BMCR, 0, phydata);
910 if (ret_val != 0)
911 return ret_val;
912 }
913 out:
914 #endif
915 if (setup[0] == FW_PHY_ACT_SETUP_LINK_RSP_DOWN)
916 return IXGBE_ERR_OVERTEMP;
917 return IXGBE_SUCCESS;
918 }
919
920 /**
921 * ixgbe_fc_autoneg_fw _ Set up flow control for FW-controlled PHYs
922 * @hw: pointer to hardware structure
923 *
924 * Called at init time to set up flow control.
925 */
926 static s32 ixgbe_fc_autoneg_fw(struct ixgbe_hw *hw)
927 {
928 if (hw->fc.requested_mode == ixgbe_fc_default)
929 hw->fc.requested_mode = ixgbe_fc_full;
930
931 return ixgbe_setup_fw_link(hw);
932 }
933
934 /**
935 * ixgbe_setup_eee_fw - Enable/disable EEE support
936 * @hw: pointer to the HW structure
937 * @enable_eee: boolean flag to enable EEE
938 *
939 * Enable/disable EEE based on enable_eee flag.
940 * This function controls EEE for firmware-based PHY implementations.
941 */
942 static s32 ixgbe_setup_eee_fw(struct ixgbe_hw *hw, bool enable_eee)
943 {
944 if (!!hw->phy.eee_speeds_advertised == enable_eee)
945 return IXGBE_SUCCESS;
946 if (enable_eee)
947 hw->phy.eee_speeds_advertised = hw->phy.eee_speeds_supported;
948 else
949 hw->phy.eee_speeds_advertised = 0;
950 return hw->phy.ops.setup_link(hw);
951 }
952
953 /**
954 * ixgbe_init_ops_X550EM_a - Inits func ptrs and MAC type
955 * @hw: pointer to hardware structure
956 *
957 * Initialize the function pointers and for MAC type X550EM_a.
958 * Does not touch the hardware.
959 **/
960 s32 ixgbe_init_ops_X550EM_a(struct ixgbe_hw *hw)
961 {
962 struct ixgbe_mac_info *mac = &hw->mac;
963 s32 ret_val;
964
965 DEBUGFUNC("ixgbe_init_ops_X550EM_a");
966
967 /* Start with generic X550EM init */
968 ret_val = ixgbe_init_ops_X550EM(hw);
969
970 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII ||
971 hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII_L) {
972 mac->ops.read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550;
973 mac->ops.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550;
974 } else {
975 mac->ops.read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550a;
976 mac->ops.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550a;
977 }
978 mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync_X550a;
979 mac->ops.release_swfw_sync = ixgbe_release_swfw_sync_X550a;
980
981 switch (mac->ops.get_media_type(hw)) {
982 case ixgbe_media_type_fiber:
983 mac->ops.setup_fc = NULL;
984 mac->ops.fc_autoneg = ixgbe_fc_autoneg_fiber_x550em_a;
985 break;
986 case ixgbe_media_type_backplane:
987 mac->ops.fc_autoneg = ixgbe_fc_autoneg_backplane_x550em_a;
988 mac->ops.setup_fc = ixgbe_setup_fc_backplane_x550em_a;
989 break;
990 default:
991 break;
992 }
993
994 switch (hw->device_id) {
995 case IXGBE_DEV_ID_X550EM_A_1G_T:
996 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
997 mac->ops.fc_autoneg = ixgbe_fc_autoneg_sgmii_x550em_a;
998 mac->ops.setup_fc = ixgbe_fc_autoneg_fw;
999 mac->ops.setup_eee = ixgbe_setup_eee_fw;
1000 hw->phy.eee_speeds_supported = IXGBE_LINK_SPEED_100_FULL |
1001 IXGBE_LINK_SPEED_1GB_FULL;
1002 hw->phy.eee_speeds_advertised = hw->phy.eee_speeds_supported;
1003 break;
1004 default:
1005 break;
1006 }
1007
1008 return ret_val;
1009 }
1010
1011 /**
1012 * ixgbe_init_ops_X550EM_x - Inits func ptrs and MAC type
1013 * @hw: pointer to hardware structure
1014 *
1015 * Initialize the function pointers and for MAC type X550EM_x.
1016 * Does not touch the hardware.
1017 **/
1018 s32 ixgbe_init_ops_X550EM_x(struct ixgbe_hw *hw)
1019 {
1020 struct ixgbe_mac_info *mac = &hw->mac;
1021 struct ixgbe_link_info *link = &hw->link;
1022 s32 ret_val;
1023
1024 DEBUGFUNC("ixgbe_init_ops_X550EM_x");
1025
1026 /* Start with generic X550EM init */
1027 ret_val = ixgbe_init_ops_X550EM(hw);
1028
1029 mac->ops.read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550;
1030 mac->ops.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550;
1031 mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync_X550em;
1032 mac->ops.release_swfw_sync = ixgbe_release_swfw_sync_X550em;
1033 link->ops.read_link = ixgbe_read_i2c_combined_generic;
1034 link->ops.read_link_unlocked = ixgbe_read_i2c_combined_generic_unlocked;
1035 link->ops.write_link = ixgbe_write_i2c_combined_generic;
1036 link->ops.write_link_unlocked =
1037 ixgbe_write_i2c_combined_generic_unlocked;
1038 link->addr = IXGBE_CS4227;
1039
1040 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_1G_T) {
1041 mac->ops.setup_fc = NULL;
1042 mac->ops.setup_eee = NULL;
1043 mac->ops.init_led_link_act = NULL;
1044 }
1045
1046 return ret_val;
1047 }
1048
1049 /**
1050 * ixgbe_dmac_config_X550
1051 * @hw: pointer to hardware structure
1052 *
1053 * Configure DMA coalescing. If enabling dmac, dmac is activated.
1054 * When disabling dmac, dmac enable dmac bit is cleared.
1055 **/
1056 s32 ixgbe_dmac_config_X550(struct ixgbe_hw *hw)
1057 {
1058 u32 reg, high_pri_tc;
1059
1060 DEBUGFUNC("ixgbe_dmac_config_X550");
1061
1062 /* Disable DMA coalescing before configuring */
1063 reg = IXGBE_READ_REG(hw, IXGBE_DMACR);
1064 reg &= ~IXGBE_DMACR_DMAC_EN;
1065 IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg);
1066
1067 /* Disable DMA Coalescing if the watchdog timer is 0 */
1068 if (!hw->mac.dmac_config.watchdog_timer)
1069 goto out;
1070
1071 ixgbe_dmac_config_tcs_X550(hw);
1072
1073 /* Configure DMA Coalescing Control Register */
1074 reg = IXGBE_READ_REG(hw, IXGBE_DMACR);
1075
1076 /* Set the watchdog timer in units of 40.96 usec */
1077 reg &= ~IXGBE_DMACR_DMACWT_MASK;
1078 reg |= (hw->mac.dmac_config.watchdog_timer * 100) / 4096;
1079
1080 reg &= ~IXGBE_DMACR_HIGH_PRI_TC_MASK;
1081 /* If fcoe is enabled, set high priority traffic class */
1082 if (hw->mac.dmac_config.fcoe_en) {
1083 high_pri_tc = 1 << hw->mac.dmac_config.fcoe_tc;
1084 reg |= ((high_pri_tc << IXGBE_DMACR_HIGH_PRI_TC_SHIFT) &
1085 IXGBE_DMACR_HIGH_PRI_TC_MASK);
1086 }
1087 reg |= IXGBE_DMACR_EN_MNG_IND;
1088
1089 /* Enable DMA coalescing after configuration */
1090 reg |= IXGBE_DMACR_DMAC_EN;
1091 IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg);
1092
1093 out:
1094 return IXGBE_SUCCESS;
1095 }
1096
1097 /**
1098 * ixgbe_dmac_config_tcs_X550
1099 * @hw: pointer to hardware structure
1100 *
1101 * Configure DMA coalescing threshold per TC. The dmac enable bit must
1102 * be cleared before configuring.
1103 **/
1104 s32 ixgbe_dmac_config_tcs_X550(struct ixgbe_hw *hw)
1105 {
1106 u32 tc, reg, pb_headroom, rx_pb_size, maxframe_size_kb;
1107
1108 DEBUGFUNC("ixgbe_dmac_config_tcs_X550");
1109
1110 /* Configure DMA coalescing enabled */
1111 switch (hw->mac.dmac_config.link_speed) {
1112 case IXGBE_LINK_SPEED_10_FULL:
1113 case IXGBE_LINK_SPEED_100_FULL:
1114 pb_headroom = IXGBE_DMACRXT_100M;
1115 break;
1116 case IXGBE_LINK_SPEED_1GB_FULL:
1117 pb_headroom = IXGBE_DMACRXT_1G;
1118 break;
1119 default:
1120 pb_headroom = IXGBE_DMACRXT_10G;
1121 break;
1122 }
1123
1124 maxframe_size_kb = ((IXGBE_READ_REG(hw, IXGBE_MAXFRS) >>
1125 IXGBE_MHADD_MFS_SHIFT) / 1024);
1126
1127 /* Set the per Rx packet buffer receive threshold */
1128 for (tc = 0; tc < IXGBE_DCB_MAX_TRAFFIC_CLASS; tc++) {
1129 reg = IXGBE_READ_REG(hw, IXGBE_DMCTH(tc));
1130 reg &= ~IXGBE_DMCTH_DMACRXT_MASK;
1131
1132 if (tc < hw->mac.dmac_config.num_tcs) {
1133 /* Get Rx PB size */
1134 rx_pb_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc));
1135 rx_pb_size = (rx_pb_size & IXGBE_RXPBSIZE_MASK) >>
1136 IXGBE_RXPBSIZE_SHIFT;
1137
1138 /* Calculate receive buffer threshold in kilobytes */
1139 if (rx_pb_size > pb_headroom)
1140 rx_pb_size = rx_pb_size - pb_headroom;
1141 else
1142 rx_pb_size = 0;
1143
1144 /* Minimum of MFS shall be set for DMCTH */
1145 reg |= (rx_pb_size > maxframe_size_kb) ?
1146 rx_pb_size : maxframe_size_kb;
1147 }
1148 IXGBE_WRITE_REG(hw, IXGBE_DMCTH(tc), reg);
1149 }
1150 return IXGBE_SUCCESS;
1151 }
1152
1153 /**
1154 * ixgbe_dmac_update_tcs_X550
1155 * @hw: pointer to hardware structure
1156 *
1157 * Disables dmac, updates per TC settings, and then enables dmac.
1158 **/
1159 s32 ixgbe_dmac_update_tcs_X550(struct ixgbe_hw *hw)
1160 {
1161 u32 reg;
1162
1163 DEBUGFUNC("ixgbe_dmac_update_tcs_X550");
1164
1165 /* Disable DMA coalescing before configuring */
1166 reg = IXGBE_READ_REG(hw, IXGBE_DMACR);
1167 reg &= ~IXGBE_DMACR_DMAC_EN;
1168 IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg);
1169
1170 ixgbe_dmac_config_tcs_X550(hw);
1171
1172 /* Enable DMA coalescing after configuration */
1173 reg = IXGBE_READ_REG(hw, IXGBE_DMACR);
1174 reg |= IXGBE_DMACR_DMAC_EN;
1175 IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg);
1176
1177 return IXGBE_SUCCESS;
1178 }
1179
1180 /**
1181 * ixgbe_init_eeprom_params_X550 - Initialize EEPROM params
1182 * @hw: pointer to hardware structure
1183 *
1184 * Initializes the EEPROM parameters ixgbe_eeprom_info within the
1185 * ixgbe_hw struct in order to set up EEPROM access.
1186 **/
1187 s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw)
1188 {
1189 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
1190 u32 eec;
1191 u16 eeprom_size;
1192
1193 DEBUGFUNC("ixgbe_init_eeprom_params_X550");
1194
1195 if (eeprom->type == ixgbe_eeprom_uninitialized) {
1196 eeprom->semaphore_delay = 10;
1197 eeprom->type = ixgbe_flash;
1198
1199 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1200 eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
1201 IXGBE_EEC_SIZE_SHIFT);
1202 eeprom->word_size = 1 << (eeprom_size +
1203 IXGBE_EEPROM_WORD_SIZE_SHIFT);
1204
1205 DEBUGOUT2("Eeprom params: type = %d, size = %d\n",
1206 eeprom->type, eeprom->word_size);
1207 }
1208
1209 return IXGBE_SUCCESS;
1210 }
1211
1212 /**
1213 * ixgbe_set_source_address_pruning_X550 - Enable/Disbale source address pruning
1214 * @hw: pointer to hardware structure
1215 * @enable: enable or disable source address pruning
1216 * @pool: Rx pool to set source address pruning for
1217 **/
1218 void ixgbe_set_source_address_pruning_X550(struct ixgbe_hw *hw, bool enable,
1219 unsigned int pool)
1220 {
1221 u64 pfflp;
1222
1223 /* max rx pool is 63 */
1224 if (pool > 63)
1225 return;
1226
1227 pfflp = (u64)IXGBE_READ_REG(hw, IXGBE_PFFLPL);
1228 pfflp |= (u64)IXGBE_READ_REG(hw, IXGBE_PFFLPH) << 32;
1229
1230 if (enable)
1231 pfflp |= (1ULL << pool);
1232 else
1233 pfflp &= ~(1ULL << pool);
1234
1235 IXGBE_WRITE_REG(hw, IXGBE_PFFLPL, (u32)pfflp);
1236 IXGBE_WRITE_REG(hw, IXGBE_PFFLPH, (u32)(pfflp >> 32));
1237 }
1238
1239 /**
1240 * ixgbe_set_ethertype_anti_spoofing_X550 - Enable/Disable Ethertype anti-spoofing
1241 * @hw: pointer to hardware structure
1242 * @enable: enable or disable switch for Ethertype anti-spoofing
1243 * @vf: Virtual Function pool - VF Pool to set for Ethertype anti-spoofing
1244 *
1245 **/
1246 void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw,
1247 bool enable, int vf)
1248 {
1249 int vf_target_reg = vf >> 3;
1250 int vf_target_shift = vf % 8 + IXGBE_SPOOF_ETHERTYPEAS_SHIFT;
1251 u32 pfvfspoof;
1252
1253 DEBUGFUNC("ixgbe_set_ethertype_anti_spoofing_X550");
1254
1255 pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
1256 if (enable)
1257 pfvfspoof |= (1 << vf_target_shift);
1258 else
1259 pfvfspoof &= ~(1 << vf_target_shift);
1260
1261 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
1262 }
1263
1264 /**
1265 * ixgbe_iosf_wait - Wait for IOSF command completion
1266 * @hw: pointer to hardware structure
1267 * @ctrl: pointer to location to receive final IOSF control value
1268 *
1269 * Returns failing status on timeout
1270 *
1271 * Note: ctrl can be NULL if the IOSF control register value is not needed
1272 **/
1273 static s32 ixgbe_iosf_wait(struct ixgbe_hw *hw, u32 *ctrl)
1274 {
1275 u32 i, command = 0;
1276
1277 /* Check every 10 usec to see if the address cycle completed.
1278 * The SB IOSF BUSY bit will clear when the operation is
1279 * complete
1280 */
1281 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
1282 command = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL);
1283 if ((command & IXGBE_SB_IOSF_CTRL_BUSY) == 0)
1284 break;
1285 usec_delay(10);
1286 }
1287 if (ctrl)
1288 *ctrl = command;
1289 if (i == IXGBE_MDIO_COMMAND_TIMEOUT) {
1290 ERROR_REPORT1(IXGBE_ERROR_POLLING, "Wait timed out\n");
1291 return IXGBE_ERR_PHY;
1292 }
1293
1294 return IXGBE_SUCCESS;
1295 }
1296
1297 /**
1298 * ixgbe_write_iosf_sb_reg_x550 - Writes a value to specified register
1299 * of the IOSF device
1300 * @hw: pointer to hardware structure
1301 * @reg_addr: 32 bit PHY register to write
1302 * @device_type: 3 bit device type
1303 * @data: Data to write to the register
1304 **/
1305 s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
1306 u32 device_type, u32 data)
1307 {
1308 u32 gssr = IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_PHY0_SM;
1309 u32 command, error __unused;
1310 s32 ret;
1311
1312 ret = ixgbe_acquire_swfw_semaphore(hw, gssr);
1313 if (ret != IXGBE_SUCCESS)
1314 return ret;
1315
1316 ret = ixgbe_iosf_wait(hw, NULL);
1317 if (ret != IXGBE_SUCCESS)
1318 goto out;
1319
1320 command = ((reg_addr << IXGBE_SB_IOSF_CTRL_ADDR_SHIFT) |
1321 (device_type << IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT));
1322
1323 /* Write IOSF control register */
1324 IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL, command);
1325
1326 /* Write IOSF data register */
1327 IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA, data);
1328
1329 ret = ixgbe_iosf_wait(hw, &command);
1330
1331 if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) {
1332 error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >>
1333 IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT;
1334 ERROR_REPORT2(IXGBE_ERROR_POLLING,
1335 "Failed to write, error %x\n", error);
1336 ret = IXGBE_ERR_PHY;
1337 }
1338
1339 out:
1340 ixgbe_release_swfw_semaphore(hw, gssr);
1341 return ret;
1342 }
1343
1344 /**
1345 * ixgbe_read_iosf_sb_reg_x550 - Reads specified register of the IOSF device
1346 * @hw: pointer to hardware structure
1347 * @reg_addr: 32 bit PHY register to write
1348 * @device_type: 3 bit device type
1349 * @data: Pointer to read data from the register
1350 **/
1351 s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
1352 u32 device_type, u32 *data)
1353 {
1354 u32 gssr = IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_PHY0_SM;
1355 u32 command, error __unused;
1356 s32 ret;
1357
1358 ret = ixgbe_acquire_swfw_semaphore(hw, gssr);
1359 if (ret != IXGBE_SUCCESS)
1360 return ret;
1361
1362 ret = ixgbe_iosf_wait(hw, NULL);
1363 if (ret != IXGBE_SUCCESS)
1364 goto out;
1365
1366 command = ((reg_addr << IXGBE_SB_IOSF_CTRL_ADDR_SHIFT) |
1367 (device_type << IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT));
1368
1369 /* Write IOSF control register */
1370 IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL, command);
1371
1372 ret = ixgbe_iosf_wait(hw, &command);
1373
1374 if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) {
1375 error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >>
1376 IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT;
1377 ERROR_REPORT2(IXGBE_ERROR_POLLING,
1378 "Failed to read, error %x\n", error);
1379 ret = IXGBE_ERR_PHY;
1380 }
1381
1382 if (ret == IXGBE_SUCCESS)
1383 *data = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA);
1384
1385 out:
1386 ixgbe_release_swfw_semaphore(hw, gssr);
1387 return ret;
1388 }
1389
1390 /**
1391 * ixgbe_get_phy_token - Get the token for shared phy access
1392 * @hw: Pointer to hardware structure
1393 */
1394
1395 s32 ixgbe_get_phy_token(struct ixgbe_hw *hw)
1396 {
1397 struct ixgbe_hic_phy_token_req token_cmd;
1398 s32 status;
1399
1400 token_cmd.hdr.cmd = FW_PHY_TOKEN_REQ_CMD;
1401 token_cmd.hdr.buf_len = FW_PHY_TOKEN_REQ_LEN;
1402 token_cmd.hdr.cmd_or_resp.cmd_resv = 0;
1403 token_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
1404 token_cmd.port_number = hw->bus.lan_id;
1405 token_cmd.command_type = FW_PHY_TOKEN_REQ;
1406 token_cmd.pad = 0;
1407 status = ixgbe_host_interface_command(hw, (u32 *)&token_cmd,
1408 sizeof(token_cmd),
1409 IXGBE_HI_COMMAND_TIMEOUT,
1410 TRUE);
1411 if (status) {
1412 DEBUGOUT1("Issuing host interface command failed with Status = %d\n",
1413 status);
1414 return status;
1415 }
1416 if (token_cmd.hdr.cmd_or_resp.ret_status == FW_PHY_TOKEN_OK)
1417 return IXGBE_SUCCESS;
1418 if (token_cmd.hdr.cmd_or_resp.ret_status != FW_PHY_TOKEN_RETRY) {
1419 DEBUGOUT1("Host interface command returned 0x%08x , returning IXGBE_ERR_FW_RESP_INVALID\n",
1420 token_cmd.hdr.cmd_or_resp.ret_status);
1421 return IXGBE_ERR_FW_RESP_INVALID;
1422 }
1423
1424 DEBUGOUT("Returning IXGBE_ERR_TOKEN_RETRY\n");
1425 return IXGBE_ERR_TOKEN_RETRY;
1426 }
1427
1428 /**
1429 * ixgbe_put_phy_token - Put the token for shared phy access
1430 * @hw: Pointer to hardware structure
1431 */
1432
1433 s32 ixgbe_put_phy_token(struct ixgbe_hw *hw)
1434 {
1435 struct ixgbe_hic_phy_token_req token_cmd;
1436 s32 status;
1437
1438 token_cmd.hdr.cmd = FW_PHY_TOKEN_REQ_CMD;
1439 token_cmd.hdr.buf_len = FW_PHY_TOKEN_REQ_LEN;
1440 token_cmd.hdr.cmd_or_resp.cmd_resv = 0;
1441 token_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
1442 token_cmd.port_number = hw->bus.lan_id;
1443 token_cmd.command_type = FW_PHY_TOKEN_REL;
1444 token_cmd.pad = 0;
1445 status = ixgbe_host_interface_command(hw, (u32 *)&token_cmd,
1446 sizeof(token_cmd),
1447 IXGBE_HI_COMMAND_TIMEOUT,
1448 TRUE);
1449 if (status)
1450 return status;
1451 if (token_cmd.hdr.cmd_or_resp.ret_status == FW_PHY_TOKEN_OK)
1452 return IXGBE_SUCCESS;
1453
1454 DEBUGOUT("Put PHY Token host interface command failed");
1455 return IXGBE_ERR_FW_RESP_INVALID;
1456 }
1457
1458 /**
1459 * ixgbe_write_iosf_sb_reg_x550a - Writes a value to specified register
1460 * of the IOSF device
1461 * @hw: pointer to hardware structure
1462 * @reg_addr: 32 bit PHY register to write
1463 * @device_type: 3 bit device type
1464 * @data: Data to write to the register
1465 **/
1466 s32 ixgbe_write_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
1467 u32 device_type, u32 data)
1468 {
1469 struct ixgbe_hic_internal_phy_req write_cmd;
1470 s32 status;
1471 UNREFERENCED_1PARAMETER(device_type);
1472
1473 memset(&write_cmd, 0, sizeof(write_cmd));
1474 write_cmd.hdr.cmd = FW_INT_PHY_REQ_CMD;
1475 write_cmd.hdr.buf_len = FW_INT_PHY_REQ_LEN;
1476 write_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
1477 write_cmd.port_number = hw->bus.lan_id;
1478 write_cmd.command_type = FW_INT_PHY_REQ_WRITE;
1479 write_cmd.address = IXGBE_CPU_TO_BE16(reg_addr);
1480 write_cmd.write_data = IXGBE_CPU_TO_BE32(data);
1481
1482 status = ixgbe_host_interface_command(hw, (u32 *)&write_cmd,
1483 sizeof(write_cmd),
1484 IXGBE_HI_COMMAND_TIMEOUT, FALSE);
1485
1486 return status;
1487 }
1488
1489 /**
1490 * ixgbe_read_iosf_sb_reg_x550a - Reads specified register of the IOSF device
1491 * @hw: pointer to hardware structure
1492 * @reg_addr: 32 bit PHY register to write
1493 * @device_type: 3 bit device type
1494 * @data: Pointer to read data from the register
1495 **/
1496 s32 ixgbe_read_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
1497 u32 device_type, u32 *data)
1498 {
1499 union {
1500 struct ixgbe_hic_internal_phy_req cmd;
1501 struct ixgbe_hic_internal_phy_resp rsp;
1502 } hic;
1503 s32 status;
1504 UNREFERENCED_1PARAMETER(device_type);
1505
1506 memset(&hic, 0, sizeof(hic));
1507 hic.cmd.hdr.cmd = FW_INT_PHY_REQ_CMD;
1508 hic.cmd.hdr.buf_len = FW_INT_PHY_REQ_LEN;
1509 hic.cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
1510 hic.cmd.port_number = hw->bus.lan_id;
1511 hic.cmd.command_type = FW_INT_PHY_REQ_READ;
1512 hic.cmd.address = IXGBE_CPU_TO_BE16(reg_addr);
1513
1514 status = ixgbe_host_interface_command(hw, (u32 *)&hic.cmd,
1515 sizeof(hic.cmd),
1516 IXGBE_HI_COMMAND_TIMEOUT, TRUE);
1517
1518 /* Extract the register value from the response. */
1519 *data = IXGBE_BE32_TO_CPU(hic.rsp.read_data);
1520
1521 return status;
1522 }
1523
1524 /**
1525 * ixgbe_disable_mdd_X550
1526 * @hw: pointer to hardware structure
1527 *
1528 * Disable malicious driver detection
1529 **/
1530 void ixgbe_disable_mdd_X550(struct ixgbe_hw *hw)
1531 {
1532 u32 reg;
1533
1534 DEBUGFUNC("ixgbe_disable_mdd_X550");
1535
1536 /* Disable MDD for TX DMA and interrupt */
1537 reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1538 reg &= ~(IXGBE_DMATXCTL_MDP_EN | IXGBE_DMATXCTL_MBINTEN);
1539 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg);
1540
1541 /* Disable MDD for RX and interrupt */
1542 reg = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
1543 reg &= ~(IXGBE_RDRXCTL_MDP_EN | IXGBE_RDRXCTL_MBINTEN);
1544 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg);
1545 }
1546
1547 /**
1548 * ixgbe_enable_mdd_X550
1549 * @hw: pointer to hardware structure
1550 *
1551 * Enable malicious driver detection
1552 **/
1553 void ixgbe_enable_mdd_X550(struct ixgbe_hw *hw)
1554 {
1555 u32 reg;
1556
1557 DEBUGFUNC("ixgbe_enable_mdd_X550");
1558
1559 /* Enable MDD for TX DMA and interrupt */
1560 reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1561 reg |= (IXGBE_DMATXCTL_MDP_EN | IXGBE_DMATXCTL_MBINTEN);
1562 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg);
1563
1564 /* Enable MDD for RX and interrupt */
1565 reg = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
1566 reg |= (IXGBE_RDRXCTL_MDP_EN | IXGBE_RDRXCTL_MBINTEN);
1567 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg);
1568 }
1569
1570 /**
1571 * ixgbe_restore_mdd_vf_X550
1572 * @hw: pointer to hardware structure
1573 * @vf: vf index
1574 *
1575 * Restore VF that was disabled during malicious driver detection event
1576 **/
1577 void ixgbe_restore_mdd_vf_X550(struct ixgbe_hw *hw, u32 vf)
1578 {
1579 u32 idx, reg, num_qs, start_q, bitmask;
1580
1581 DEBUGFUNC("ixgbe_restore_mdd_vf_X550");
1582
1583 /* Map VF to queues */
1584 reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
1585 switch (reg & IXGBE_MRQC_MRQE_MASK) {
1586 case IXGBE_MRQC_VMDQRT8TCEN:
1587 num_qs = 8; /* 16 VFs / pools */
1588 bitmask = 0x000000FF;
1589 break;
1590 case IXGBE_MRQC_VMDQRSS32EN:
1591 case IXGBE_MRQC_VMDQRT4TCEN:
1592 num_qs = 4; /* 32 VFs / pools */
1593 bitmask = 0x0000000F;
1594 break;
1595 default: /* 64 VFs / pools */
1596 num_qs = 2;
1597 bitmask = 0x00000003;
1598 break;
1599 }
1600 start_q = vf * num_qs;
1601
1602 /* Release vf's queues by clearing WQBR_TX and WQBR_RX (RW1C) */
1603 idx = start_q / 32;
1604 reg = 0;
1605 reg |= (bitmask << (start_q % 32));
1606 IXGBE_WRITE_REG(hw, IXGBE_WQBR_TX(idx), reg);
1607 IXGBE_WRITE_REG(hw, IXGBE_WQBR_RX(idx), reg);
1608 }
1609
1610 /**
1611 * ixgbe_mdd_event_X550
1612 * @hw: pointer to hardware structure
1613 * @vf_bitmap: vf bitmap of malicious vfs
1614 *
1615 * Handle malicious driver detection event.
1616 **/
1617 void ixgbe_mdd_event_X550(struct ixgbe_hw *hw, u32 *vf_bitmap)
1618 {
1619 u32 wqbr;
1620 u32 i, j, reg, q, shift, vf, idx;
1621
1622 DEBUGFUNC("ixgbe_mdd_event_X550");
1623
1624 /* figure out pool size for mapping to vf's */
1625 reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
1626 switch (reg & IXGBE_MRQC_MRQE_MASK) {
1627 case IXGBE_MRQC_VMDQRT8TCEN:
1628 shift = 3; /* 16 VFs / pools */
1629 break;
1630 case IXGBE_MRQC_VMDQRSS32EN:
1631 case IXGBE_MRQC_VMDQRT4TCEN:
1632 shift = 2; /* 32 VFs / pools */
1633 break;
1634 default:
1635 shift = 1; /* 64 VFs / pools */
1636 break;
1637 }
1638
1639 /* Read WQBR_TX and WQBR_RX and check for malicious queues */
1640 for (i = 0; i < 4; i++) {
1641 wqbr = IXGBE_READ_REG(hw, IXGBE_WQBR_TX(i));
1642 wqbr |= IXGBE_READ_REG(hw, IXGBE_WQBR_RX(i));
1643
1644 if (!wqbr)
1645 continue;
1646
1647 /* Get malicious queue */
1648 for (j = 0; j < 32 && wqbr; j++) {
1649
1650 if (!(wqbr & (1 << j)))
1651 continue;
1652
1653 /* Get queue from bitmask */
1654 q = j + (i * 32);
1655
1656 /* Map queue to vf */
1657 vf = (q >> shift);
1658
1659 /* Set vf bit in vf_bitmap */
1660 idx = vf / 32;
1661 vf_bitmap[idx] |= (1 << (vf % 32));
1662 wqbr &= ~(1 << j);
1663 }
1664 }
1665 }
1666
1667 /**
1668 * ixgbe_get_media_type_X550em - Get media type
1669 * @hw: pointer to hardware structure
1670 *
1671 * Returns the media type (fiber, copper, backplane)
1672 */
1673 enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw)
1674 {
1675 enum ixgbe_media_type media_type;
1676
1677 DEBUGFUNC("ixgbe_get_media_type_X550em");
1678
1679 /* Detect if there is a copper PHY attached. */
1680 switch (hw->device_id) {
1681 case IXGBE_DEV_ID_X550EM_X_KR:
1682 case IXGBE_DEV_ID_X550EM_X_KX4:
1683 case IXGBE_DEV_ID_X550EM_X_XFI:
1684 case IXGBE_DEV_ID_X550EM_A_KR:
1685 case IXGBE_DEV_ID_X550EM_A_KR_L:
1686 media_type = ixgbe_media_type_backplane;
1687 break;
1688 case IXGBE_DEV_ID_X550EM_X_SFP:
1689 case IXGBE_DEV_ID_X550EM_A_SFP:
1690 case IXGBE_DEV_ID_X550EM_A_SFP_N:
1691 case IXGBE_DEV_ID_X550EM_A_QSFP:
1692 case IXGBE_DEV_ID_X550EM_A_QSFP_N:
1693 media_type = ixgbe_media_type_fiber;
1694 break;
1695 case IXGBE_DEV_ID_X550EM_X_1G_T:
1696 case IXGBE_DEV_ID_X550EM_X_10G_T:
1697 case IXGBE_DEV_ID_X550EM_A_10G_T:
1698 media_type = ixgbe_media_type_copper;
1699 break;
1700 case IXGBE_DEV_ID_X550EM_A_SGMII:
1701 case IXGBE_DEV_ID_X550EM_A_SGMII_L:
1702 media_type = ixgbe_media_type_backplane;
1703 hw->phy.type = ixgbe_phy_sgmii;
1704 break;
1705 case IXGBE_DEV_ID_X550EM_A_1G_T:
1706 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
1707 media_type = ixgbe_media_type_copper;
1708 break;
1709 default:
1710 media_type = ixgbe_media_type_unknown;
1711 break;
1712 }
1713 return media_type;
1714 }
1715
1716 /**
1717 * ixgbe_supported_sfp_modules_X550em - Check if SFP module type is supported
1718 * @hw: pointer to hardware structure
1719 * @linear: TRUE if SFP module is linear
1720 */
1721 static s32 ixgbe_supported_sfp_modules_X550em(struct ixgbe_hw *hw, bool *linear)
1722 {
1723 DEBUGFUNC("ixgbe_supported_sfp_modules_X550em");
1724
1725 switch (hw->phy.sfp_type) {
1726 case ixgbe_sfp_type_not_present:
1727 return IXGBE_ERR_SFP_NOT_PRESENT;
1728 case ixgbe_sfp_type_da_cu_core0:
1729 case ixgbe_sfp_type_da_cu_core1:
1730 *linear = TRUE;
1731 break;
1732 case ixgbe_sfp_type_srlr_core0:
1733 case ixgbe_sfp_type_srlr_core1:
1734 case ixgbe_sfp_type_da_act_lmt_core0:
1735 case ixgbe_sfp_type_da_act_lmt_core1:
1736 case ixgbe_sfp_type_1g_sx_core0:
1737 case ixgbe_sfp_type_1g_sx_core1:
1738 case ixgbe_sfp_type_1g_lx_core0:
1739 case ixgbe_sfp_type_1g_lx_core1:
1740 *linear = FALSE;
1741 break;
1742 case ixgbe_sfp_type_unknown:
1743 case ixgbe_sfp_type_1g_cu_core0:
1744 case ixgbe_sfp_type_1g_cu_core1:
1745 default:
1746 return IXGBE_ERR_SFP_NOT_SUPPORTED;
1747 }
1748
1749 return IXGBE_SUCCESS;
1750 }
1751
1752 /**
1753 * ixgbe_identify_sfp_module_X550em - Identifies SFP modules
1754 * @hw: pointer to hardware structure
1755 *
1756 * Searches for and identifies the SFP module and assigns appropriate PHY type.
1757 **/
1758 s32 ixgbe_identify_sfp_module_X550em(struct ixgbe_hw *hw)
1759 {
1760 s32 status;
1761 bool linear;
1762
1763 DEBUGFUNC("ixgbe_identify_sfp_module_X550em");
1764
1765 status = ixgbe_identify_module_generic(hw);
1766
1767 if (status != IXGBE_SUCCESS)
1768 return status;
1769
1770 /* Check if SFP module is supported */
1771 status = ixgbe_supported_sfp_modules_X550em(hw, &linear);
1772
1773 return status;
1774 }
1775
1776 /**
1777 * ixgbe_setup_sfp_modules_X550em - Setup MAC link ops
1778 * @hw: pointer to hardware structure
1779 */
1780 s32 ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw)
1781 {
1782 s32 status;
1783 bool linear;
1784
1785 DEBUGFUNC("ixgbe_setup_sfp_modules_X550em");
1786
1787 /* Check if SFP module is supported */
1788 status = ixgbe_supported_sfp_modules_X550em(hw, &linear);
1789
1790 if (status != IXGBE_SUCCESS)
1791 return status;
1792
1793 ixgbe_init_mac_link_ops_X550em(hw);
1794 hw->phy.ops.reset = NULL;
1795
1796 return IXGBE_SUCCESS;
1797 }
1798
1799 /**
1800 * ixgbe_restart_an_internal_phy_x550em - restart autonegotiation for the
1801 * internal PHY
1802 * @hw: pointer to hardware structure
1803 **/
1804 static s32 ixgbe_restart_an_internal_phy_x550em(struct ixgbe_hw *hw)
1805 {
1806 s32 status;
1807 u32 link_ctrl;
1808
1809 /* Restart auto-negotiation. */
1810 status = hw->mac.ops.read_iosf_sb_reg(hw,
1811 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1812 IXGBE_SB_IOSF_TARGET_KR_PHY, &link_ctrl);
1813
1814 if (status) {
1815 DEBUGOUT("Auto-negotiation did not complete\n");
1816 return status;
1817 }
1818
1819 link_ctrl |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART;
1820 status = hw->mac.ops.write_iosf_sb_reg(hw,
1821 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1822 IXGBE_SB_IOSF_TARGET_KR_PHY, link_ctrl);
1823
1824 if (hw->mac.type == ixgbe_mac_X550EM_a) {
1825 u32 flx_mask_st20;
1826
1827 /* Indicate to FW that AN restart has been asserted */
1828 status = hw->mac.ops.read_iosf_sb_reg(hw,
1829 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1830 IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_mask_st20);
1831
1832 if (status) {
1833 DEBUGOUT("Auto-negotiation did not complete\n");
1834 return status;
1835 }
1836
1837 flx_mask_st20 |= IXGBE_KRM_PMD_FLX_MASK_ST20_FW_AN_RESTART;
1838 status = hw->mac.ops.write_iosf_sb_reg(hw,
1839 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1840 IXGBE_SB_IOSF_TARGET_KR_PHY, flx_mask_st20);
1841 }
1842
1843 return status;
1844 }
1845
1846 /**
1847 * ixgbe_setup_sgmii - Set up link for sgmii
1848 * @hw: pointer to hardware structure
1849 * @speed: new link speed
1850 * @autoneg_wait: TRUE when waiting for completion is needed
1851 */
1852 static s32 ixgbe_setup_sgmii(struct ixgbe_hw *hw, ixgbe_link_speed speed,
1853 bool autoneg_wait)
1854 {
1855 struct ixgbe_mac_info *mac = &hw->mac;
1856 u32 lval, sval, flx_val;
1857 s32 rc;
1858
1859 rc = mac->ops.read_iosf_sb_reg(hw,
1860 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1861 IXGBE_SB_IOSF_TARGET_KR_PHY, &lval);
1862 if (rc)
1863 return rc;
1864
1865 lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
1866 lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
1867 lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN;
1868 lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN;
1869 lval |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G;
1870 rc = mac->ops.write_iosf_sb_reg(hw,
1871 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1872 IXGBE_SB_IOSF_TARGET_KR_PHY, lval);
1873 if (rc)
1874 return rc;
1875
1876 rc = mac->ops.read_iosf_sb_reg(hw,
1877 IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id),
1878 IXGBE_SB_IOSF_TARGET_KR_PHY, &sval);
1879 if (rc)
1880 return rc;
1881
1882 sval |= IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D;
1883 sval |= IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D;
1884 rc = mac->ops.write_iosf_sb_reg(hw,
1885 IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id),
1886 IXGBE_SB_IOSF_TARGET_KR_PHY, sval);
1887 if (rc)
1888 return rc;
1889
1890 rc = mac->ops.read_iosf_sb_reg(hw,
1891 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1892 IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_val);
1893 if (rc)
1894 return rc;
1895
1896 flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK;
1897 flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_1G;
1898 flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN;
1899 flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN;
1900 flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN;
1901
1902 rc = mac->ops.write_iosf_sb_reg(hw,
1903 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1904 IXGBE_SB_IOSF_TARGET_KR_PHY, flx_val);
1905 if (rc)
1906 return rc;
1907
1908 rc = ixgbe_restart_an_internal_phy_x550em(hw);
1909 if (rc)
1910 return rc;
1911
1912 return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait);
1913 }
1914
1915 /**
1916 * ixgbe_setup_sgmii_fw - Set up link for internal PHY SGMII auto-negotiation
1917 * @hw: pointer to hardware structure
1918 * @speed: new link speed
1919 * @autoneg_wait: TRUE when waiting for completion is needed
1920 */
1921 static s32 ixgbe_setup_sgmii_fw(struct ixgbe_hw *hw, ixgbe_link_speed speed,
1922 bool autoneg_wait)
1923 {
1924 struct ixgbe_mac_info *mac = &hw->mac;
1925 u32 lval, sval, flx_val;
1926 s32 rc;
1927
1928 rc = mac->ops.read_iosf_sb_reg(hw,
1929 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1930 IXGBE_SB_IOSF_TARGET_KR_PHY, &lval);
1931 if (rc)
1932 return rc;
1933
1934 lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
1935 lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
1936 lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN;
1937 lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN;
1938 lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G;
1939 rc = mac->ops.write_iosf_sb_reg(hw,
1940 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1941 IXGBE_SB_IOSF_TARGET_KR_PHY, lval);
1942 if (rc)
1943 return rc;
1944
1945 rc = mac->ops.read_iosf_sb_reg(hw,
1946 IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id),
1947 IXGBE_SB_IOSF_TARGET_KR_PHY, &sval);
1948 if (rc)
1949 return rc;
1950
1951 sval &= ~IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D;
1952 sval &= ~IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D;
1953 rc = mac->ops.write_iosf_sb_reg(hw,
1954 IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id),
1955 IXGBE_SB_IOSF_TARGET_KR_PHY, sval);
1956 if (rc)
1957 return rc;
1958
1959 rc = mac->ops.write_iosf_sb_reg(hw,
1960 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1961 IXGBE_SB_IOSF_TARGET_KR_PHY, lval);
1962 if (rc)
1963 return rc;
1964
1965 rc = mac->ops.read_iosf_sb_reg(hw,
1966 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1967 IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_val);
1968 if (rc)
1969 return rc;
1970
1971 flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK;
1972 flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_AN;
1973 flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN;
1974 flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN;
1975 flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN;
1976
1977 rc = mac->ops.write_iosf_sb_reg(hw,
1978 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1979 IXGBE_SB_IOSF_TARGET_KR_PHY, flx_val);
1980 if (rc)
1981 return rc;
1982
1983 rc = ixgbe_restart_an_internal_phy_x550em(hw);
1984
1985 return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait);
1986 }
1987
1988 /**
1989 * ixgbe_init_mac_link_ops_X550em - init mac link function pointers
1990 * @hw: pointer to hardware structure
1991 */
1992 void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw)
1993 {
1994 struct ixgbe_mac_info *mac = &hw->mac;
1995
1996 DEBUGFUNC("ixgbe_init_mac_link_ops_X550em");
1997
1998 switch (hw->mac.ops.get_media_type(hw)) {
1999 case ixgbe_media_type_fiber:
2000 /* CS4227 does not support autoneg, so disable the laser control
2001 * functions for SFP+ fiber
2002 */
2003 mac->ops.disable_tx_laser = NULL;
2004 mac->ops.enable_tx_laser = NULL;
2005 mac->ops.flap_tx_laser = NULL;
2006 mac->ops.setup_link = ixgbe_setup_mac_link_multispeed_fiber;
2007 mac->ops.set_rate_select_speed =
2008 ixgbe_set_soft_rate_select_speed;
2009
2010 if ((hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N) ||
2011 (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP))
2012 mac->ops.setup_mac_link =
2013 ixgbe_setup_mac_link_sfp_x550a;
2014 else
2015 mac->ops.setup_mac_link =
2016 ixgbe_setup_mac_link_sfp_x550em;
2017 break;
2018 case ixgbe_media_type_copper:
2019 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_1G_T)
2020 break;
2021 if (hw->mac.type == ixgbe_mac_X550EM_a) {
2022 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T ||
2023 hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L) {
2024 mac->ops.setup_link = ixgbe_setup_sgmii_fw;
2025 mac->ops.check_link =
2026 ixgbe_check_mac_link_generic;
2027 } else {
2028 mac->ops.setup_link =
2029 ixgbe_setup_mac_link_t_X550em;
2030 }
2031 } else {
2032 mac->ops.setup_link = ixgbe_setup_mac_link_t_X550em;
2033 mac->ops.check_link = ixgbe_check_link_t_X550em;
2034 }
2035 break;
2036 case ixgbe_media_type_backplane:
2037 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII ||
2038 hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII_L)
2039 mac->ops.setup_link = ixgbe_setup_sgmii;
2040 break;
2041 default:
2042 break;
2043 }
2044 }
2045
2046 /**
2047 * ixgbe_get_link_capabilities_x550em - Determines link capabilities
2048 * @hw: pointer to hardware structure
2049 * @speed: pointer to link speed
2050 * @autoneg: TRUE when autoneg or autotry is enabled
2051 */
2052 s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
2053 ixgbe_link_speed *speed,
2054 bool *autoneg)
2055 {
2056 DEBUGFUNC("ixgbe_get_link_capabilities_X550em");
2057
2058
2059 if (hw->phy.type == ixgbe_phy_fw) {
2060 *autoneg = TRUE;
2061 *speed = hw->phy.speeds_supported;
2062 return 0;
2063 }
2064
2065 /* SFP */
2066 if (hw->phy.media_type == ixgbe_media_type_fiber) {
2067
2068 /* CS4227 SFP must not enable auto-negotiation */
2069 *autoneg = FALSE;
2070
2071 /* Check if 1G SFP module. */
2072 if (hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
2073 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1
2074 || hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
2075 hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1) {
2076 *speed = IXGBE_LINK_SPEED_1GB_FULL;
2077 return IXGBE_SUCCESS;
2078 }
2079
2080 /* Link capabilities are based on SFP */
2081 if (hw->phy.multispeed_fiber)
2082 *speed = IXGBE_LINK_SPEED_10GB_FULL |
2083 IXGBE_LINK_SPEED_1GB_FULL;
2084 else
2085 *speed = IXGBE_LINK_SPEED_10GB_FULL;
2086 } else {
2087 switch (hw->phy.type) {
2088 case ixgbe_phy_ext_1g_t:
2089 case ixgbe_phy_sgmii:
2090 *speed = IXGBE_LINK_SPEED_1GB_FULL;
2091 break;
2092 case ixgbe_phy_x550em_kr:
2093 if (hw->mac.type == ixgbe_mac_X550EM_a) {
2094 /* check different backplane modes */
2095 if (hw->phy.nw_mng_if_sel &
2096 IXGBE_NW_MNG_IF_SEL_PHY_SPEED_2_5G) {
2097 *speed = IXGBE_LINK_SPEED_2_5GB_FULL;
2098 break;
2099 } else if (hw->device_id ==
2100 IXGBE_DEV_ID_X550EM_A_KR_L) {
2101 *speed = IXGBE_LINK_SPEED_1GB_FULL;
2102 break;
2103 }
2104 }
2105 /* fall through */
2106 default:
2107 *speed = IXGBE_LINK_SPEED_10GB_FULL |
2108 IXGBE_LINK_SPEED_1GB_FULL;
2109 break;
2110 }
2111 *autoneg = TRUE;
2112 }
2113
2114 return IXGBE_SUCCESS;
2115 }
2116
2117 /**
2118 * ixgbe_get_lasi_ext_t_x550em - Determime external Base T PHY interrupt cause
2119 * @hw: pointer to hardware structure
2120 * @lsc: pointer to boolean flag which indicates whether external Base T
2121 * PHY interrupt is lsc
2122 *
2123 * Determime if external Base T PHY interrupt cause is high temperature
2124 * failure alarm or link status change.
2125 *
2126 * Return IXGBE_ERR_OVERTEMP if interrupt is high temperature
2127 * failure alarm, else return PHY access status.
2128 */
2129 static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc)
2130 {
2131 u32 status;
2132 u16 reg;
2133
2134 *lsc = FALSE;
2135
2136 /* Vendor alarm triggered */
2137 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_CHIP_STD_INT_FLAG,
2138 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2139 ®);
2140
2141 if (status != IXGBE_SUCCESS ||
2142 !(reg & IXGBE_MDIO_GLOBAL_VEN_ALM_INT_EN))
2143 return status;
2144
2145 /* Vendor Auto-Neg alarm triggered or Global alarm 1 triggered */
2146 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_FLAG,
2147 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2148 ®);
2149
2150 if (status != IXGBE_SUCCESS ||
2151 !(reg & (IXGBE_MDIO_GLOBAL_AN_VEN_ALM_INT_EN |
2152 IXGBE_MDIO_GLOBAL_ALARM_1_INT)))
2153 return status;
2154
2155 /* Global alarm triggered */
2156 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_ALARM_1,
2157 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2158 ®);
2159
2160 if (status != IXGBE_SUCCESS)
2161 return status;
2162
2163 /* If high temperature failure, then return over temp error and exit */
2164 if (reg & IXGBE_MDIO_GLOBAL_ALM_1_HI_TMP_FAIL) {
2165 /* power down the PHY in case the PHY FW didn't already */
2166 ixgbe_set_copper_phy_power(hw, FALSE);
2167 return IXGBE_ERR_OVERTEMP;
2168 } else if (reg & IXGBE_MDIO_GLOBAL_ALM_1_DEV_FAULT) {
2169 /* device fault alarm triggered */
2170 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_FAULT_MSG,
2171 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2172 ®);
2173
2174 if (status != IXGBE_SUCCESS)
2175 return status;
2176
2177 /* if device fault was due to high temp alarm handle and exit */
2178 if (reg == IXGBE_MDIO_GLOBAL_FAULT_MSG_HI_TMP) {
2179 /* power down the PHY in case the PHY FW didn't */
2180 ixgbe_set_copper_phy_power(hw, FALSE);
2181 return IXGBE_ERR_OVERTEMP;
2182 }
2183 }
2184
2185 /* Vendor alarm 2 triggered */
2186 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_CHIP_STD_INT_FLAG,
2187 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®);
2188
2189 if (status != IXGBE_SUCCESS ||
2190 !(reg & IXGBE_MDIO_GLOBAL_STD_ALM2_INT))
2191 return status;
2192
2193 /* link connect/disconnect event occurred */
2194 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM2,
2195 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®);
2196
2197 if (status != IXGBE_SUCCESS)
2198 return status;
2199
2200 /* Indicate LSC */
2201 if (reg & IXGBE_MDIO_AUTO_NEG_VEN_LSC)
2202 *lsc = TRUE;
2203
2204 return IXGBE_SUCCESS;
2205 }
2206
2207 /**
2208 * ixgbe_enable_lasi_ext_t_x550em - Enable external Base T PHY interrupts
2209 * @hw: pointer to hardware structure
2210 *
2211 * Enable link status change and temperature failure alarm for the external
2212 * Base T PHY
2213 *
2214 * Returns PHY access status
2215 */
2216 static s32 ixgbe_enable_lasi_ext_t_x550em(struct ixgbe_hw *hw)
2217 {
2218 u32 status;
2219 u16 reg;
2220 bool lsc;
2221
2222 /* Clear interrupt flags */
2223 status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc);
2224
2225 /* Enable link status change alarm */
2226
2227 /* Enable the LASI interrupts on X552 devices to receive notifications
2228 * of the link configurations of the external PHY and correspondingly
2229 * support the configuration of the internal iXFI link, since iXFI does
2230 * not support auto-negotiation. This is not required for X553 devices
2231 * having KR support, which performs auto-negotiations and which is used
2232 * as the internal link to the external PHY. Hence adding a check here
2233 * to avoid enabling LASI interrupts for X553 devices.
2234 */
2235 if (hw->mac.type != ixgbe_mac_X550EM_a) {
2236 status = hw->phy.ops.read_reg(hw,
2237 IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK,
2238 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®);
2239
2240 if (status != IXGBE_SUCCESS)
2241 return status;
2242
2243 reg |= IXGBE_MDIO_PMA_TX_VEN_LASI_INT_EN;
2244
2245 status = hw->phy.ops.write_reg(hw,
2246 IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK,
2247 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg);
2248
2249 if (status != IXGBE_SUCCESS)
2250 return status;
2251 }
2252
2253 /* Enable high temperature failure and global fault alarms */
2254 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_MASK,
2255 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2256 ®);
2257
2258 if (status != IXGBE_SUCCESS)
2259 return status;
2260
2261 reg |= (IXGBE_MDIO_GLOBAL_INT_HI_TEMP_EN |
2262 IXGBE_MDIO_GLOBAL_INT_DEV_FAULT_EN);
2263
2264 status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_MASK,
2265 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2266 reg);
2267
2268 if (status != IXGBE_SUCCESS)
2269 return status;
2270
2271 /* Enable vendor Auto-Neg alarm and Global Interrupt Mask 1 alarm */
2272 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_MASK,
2273 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2274 ®);
2275
2276 if (status != IXGBE_SUCCESS)
2277 return status;
2278
2279 reg |= (IXGBE_MDIO_GLOBAL_AN_VEN_ALM_INT_EN |
2280 IXGBE_MDIO_GLOBAL_ALARM_1_INT);
2281
2282 status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_MASK,
2283 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2284 reg);
2285
2286 if (status != IXGBE_SUCCESS)
2287 return status;
2288
2289 /* Enable chip-wide vendor alarm */
2290 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_STD_MASK,
2291 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2292 ®);
2293
2294 if (status != IXGBE_SUCCESS)
2295 return status;
2296
2297 reg |= IXGBE_MDIO_GLOBAL_VEN_ALM_INT_EN;
2298
2299 status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_STD_MASK,
2300 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2301 reg);
2302
2303 return status;
2304 }
2305
2306 /**
2307 * ixgbe_setup_kr_speed_x550em - Configure the KR PHY for link speed.
2308 * @hw: pointer to hardware structure
2309 * @speed: link speed
2310 *
2311 * Configures the integrated KR PHY.
2312 **/
2313 static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw,
2314 ixgbe_link_speed speed)
2315 {
2316 s32 status;
2317 u32 reg_val;
2318
2319 status = hw->mac.ops.read_iosf_sb_reg(hw,
2320 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
2321 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
2322 if (status)
2323 return status;
2324
2325 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
2326 reg_val &= ~(IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR |
2327 IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX);
2328
2329 /* Advertise 10G support. */
2330 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
2331 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR;
2332
2333 /* Advertise 1G support. */
2334 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
2335 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX;
2336
2337 status = hw->mac.ops.write_iosf_sb_reg(hw,
2338 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
2339 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
2340
2341 if (hw->mac.type == ixgbe_mac_X550EM_a) {
2342 /* Set lane mode to KR auto negotiation */
2343 status = hw->mac.ops.read_iosf_sb_reg(hw,
2344 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
2345 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
2346
2347 if (status)
2348 return status;
2349
2350 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK;
2351 reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_AN;
2352 reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN;
2353 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN;
2354 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN;
2355
2356 status = hw->mac.ops.write_iosf_sb_reg(hw,
2357 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
2358 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
2359 }
2360
2361 return ixgbe_restart_an_internal_phy_x550em(hw);
2362 }
2363
2364 /**
2365 * ixgbe_reset_phy_fw - Reset firmware-controlled PHYs
2366 * @hw: pointer to hardware structure
2367 */
2368 static s32 ixgbe_reset_phy_fw(struct ixgbe_hw *hw)
2369 {
2370 u32 store[FW_PHY_ACT_DATA_COUNT] = { 0 };
2371 s32 rc;
2372
2373 if (hw->phy.reset_disable || ixgbe_check_reset_blocked(hw))
2374 return IXGBE_SUCCESS;
2375
2376 rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_PHY_SW_RESET, &store);
2377 if (rc)
2378 return rc;
2379 memset(store, 0, sizeof(store));
2380
2381 rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_INIT_PHY, &store);
2382 if (rc)
2383 return rc;
2384
2385 return ixgbe_setup_fw_link(hw);
2386 }
2387
2388 /**
2389 * ixgbe_check_overtemp_fw - Check firmware-controlled PHYs for overtemp
2390 * @hw: pointer to hardware structure
2391 */
2392 static s32 ixgbe_check_overtemp_fw(struct ixgbe_hw *hw)
2393 {
2394 u32 store[FW_PHY_ACT_DATA_COUNT] = { 0 };
2395 s32 rc;
2396
2397 rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_LINK_INFO, &store);
2398 if (rc)
2399 return rc;
2400
2401 if (store[0] & FW_PHY_ACT_GET_LINK_INFO_TEMP) {
2402 ixgbe_shutdown_fw_phy(hw);
2403 return IXGBE_ERR_OVERTEMP;
2404 }
2405 return IXGBE_SUCCESS;
2406 }
2407
2408 /**
2409 * ixgbe_read_mng_if_sel_x550em - Read NW_MNG_IF_SEL register
2410 * @hw: pointer to hardware structure
2411 *
2412 * Read NW_MNG_IF_SEL register and save field values, and check for valid field
2413 * values.
2414 **/
2415 static s32 ixgbe_read_mng_if_sel_x550em(struct ixgbe_hw *hw)
2416 {
2417 /* Save NW management interface connected on board. This is used
2418 * to determine internal PHY mode.
2419 */
2420 hw->phy.nw_mng_if_sel = IXGBE_READ_REG(hw, IXGBE_NW_MNG_IF_SEL);
2421
2422 /* If X552 (X550EM_a) and MDIO is connected to external PHY, then set
2423 * PHY address. This register field was has only been used for X552.
2424 */
2425 if (hw->mac.type == ixgbe_mac_X550EM_a &&
2426 hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_MDIO_ACT) {
2427 hw->phy.addr = (hw->phy.nw_mng_if_sel &
2428 IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD) >>
2429 IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT;
2430 }
2431
2432 return IXGBE_SUCCESS;
2433 }
2434
2435 /**
2436 * ixgbe_init_phy_ops_X550em - PHY/SFP specific init
2437 * @hw: pointer to hardware structure
2438 *
2439 * Initialize any function pointers that were not able to be
2440 * set during init_shared_code because the PHY/SFP type was
2441 * not known. Perform the SFP init if necessary.
2442 */
2443 s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
2444 {
2445 struct ixgbe_phy_info *phy = &hw->phy;
2446 s32 ret_val;
2447
2448 DEBUGFUNC("ixgbe_init_phy_ops_X550em");
2449
2450 hw->mac.ops.set_lan_id(hw);
2451 ixgbe_read_mng_if_sel_x550em(hw);
2452
2453 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) {
2454 phy->phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM;
2455 ixgbe_setup_mux_ctl(hw);
2456 phy->ops.identify_sfp = ixgbe_identify_sfp_module_X550em;
2457 }
2458
2459 switch (hw->device_id) {
2460 case IXGBE_DEV_ID_X550EM_A_1G_T:
2461 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
2462 phy->ops.read_reg_mdi = ixgbe_read_phy_reg_mdi_22;
2463 phy->ops.write_reg_mdi = ixgbe_write_phy_reg_mdi_22;
2464 hw->phy.ops.read_reg = ixgbe_read_phy_reg_x550a;
2465 hw->phy.ops.write_reg = ixgbe_write_phy_reg_x550a;
2466 phy->ops.check_overtemp = ixgbe_check_overtemp_fw;
2467 if (hw->bus.lan_id)
2468 hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY1_SM;
2469 else
2470 hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY0_SM;
2471
2472 break;
2473 case IXGBE_DEV_ID_X550EM_A_10G_T:
2474 case IXGBE_DEV_ID_X550EM_A_SFP:
2475 hw->phy.ops.read_reg = ixgbe_read_phy_reg_x550a;
2476 hw->phy.ops.write_reg = ixgbe_write_phy_reg_x550a;
2477 if (hw->bus.lan_id)
2478 hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY1_SM;
2479 else
2480 hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY0_SM;
2481 break;
2482 case IXGBE_DEV_ID_X550EM_X_SFP:
2483 /* set up for CS4227 usage */
2484 hw->phy.phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM;
2485 break;
2486 case IXGBE_DEV_ID_X550EM_X_1G_T:
2487 phy->ops.read_reg_mdi = NULL;
2488 phy->ops.write_reg_mdi = NULL;
2489 break;
2490 default:
2491 break;
2492 }
2493
2494 /* Identify the PHY or SFP module */
2495 ret_val = phy->ops.identify(hw);
2496 if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED ||
2497 ret_val == IXGBE_ERR_PHY_ADDR_INVALID)
2498 return ret_val;
2499
2500 /* Setup function pointers based on detected hardware */
2501 ixgbe_init_mac_link_ops_X550em(hw);
2502 if (phy->sfp_type != ixgbe_sfp_type_unknown)
2503 phy->ops.reset = NULL;
2504
2505 /* Set functions pointers based on phy type */
2506 switch (hw->phy.type) {
2507 case ixgbe_phy_x550em_kx4:
2508 phy->ops.setup_link = NULL;
2509 phy->ops.read_reg = ixgbe_read_phy_reg_x550em;
2510 phy->ops.write_reg = ixgbe_write_phy_reg_x550em;
2511 break;
2512 case ixgbe_phy_x550em_kr:
2513 phy->ops.setup_link = ixgbe_setup_kr_x550em;
2514 phy->ops.read_reg = ixgbe_read_phy_reg_x550em;
2515 phy->ops.write_reg = ixgbe_write_phy_reg_x550em;
2516 break;
2517 case ixgbe_phy_ext_1g_t:
2518 /* link is managed by FW */
2519 phy->ops.setup_link = NULL;
2520 phy->ops.reset = NULL;
2521 break;
2522 case ixgbe_phy_x550em_xfi:
2523 /* link is managed by HW */
2524 phy->ops.setup_link = NULL;
2525 phy->ops.read_reg = ixgbe_read_phy_reg_x550em;
2526 phy->ops.write_reg = ixgbe_write_phy_reg_x550em;
2527 break;
2528 case ixgbe_phy_x550em_ext_t:
2529 /* If internal link mode is XFI, then setup iXFI internal link,
2530 * else setup KR now.
2531 */
2532 phy->ops.setup_internal_link =
2533 ixgbe_setup_internal_phy_t_x550em;
2534
2535 /* setup SW LPLU only for first revision of X550EM_x */
2536 if ((hw->mac.type == ixgbe_mac_X550EM_x) &&
2537 !(IXGBE_FUSES0_REV_MASK &
2538 IXGBE_READ_REG(hw, IXGBE_FUSES0_GROUP(0))))
2539 phy->ops.enter_lplu = ixgbe_enter_lplu_t_x550em;
2540
2541 phy->ops.handle_lasi = ixgbe_handle_lasi_ext_t_x550em;
2542 phy->ops.reset = ixgbe_reset_phy_t_X550em;
2543 break;
2544 case ixgbe_phy_sgmii:
2545 phy->ops.setup_link = NULL;
2546 break;
2547 case ixgbe_phy_fw:
2548 phy->ops.setup_link = ixgbe_setup_fw_link;
2549 phy->ops.reset = ixgbe_reset_phy_fw;
2550 break;
2551 default:
2552 break;
2553 }
2554 return ret_val;
2555 }
2556
2557 /**
2558 * ixgbe_set_mdio_speed - Set MDIO clock speed
2559 * @hw: pointer to hardware structure
2560 */
2561 static void ixgbe_set_mdio_speed(struct ixgbe_hw *hw)
2562 {
2563 u32 hlreg0;
2564
2565 switch (hw->device_id) {
2566 case IXGBE_DEV_ID_X550EM_X_10G_T:
2567 case IXGBE_DEV_ID_X550EM_A_SGMII:
2568 case IXGBE_DEV_ID_X550EM_A_SGMII_L:
2569 case IXGBE_DEV_ID_X550EM_A_10G_T:
2570 case IXGBE_DEV_ID_X550EM_A_SFP:
2571 case IXGBE_DEV_ID_X550EM_A_QSFP:
2572 /* Config MDIO clock speed before the first MDIO PHY access */
2573 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2574 hlreg0 &= ~IXGBE_HLREG0_MDCSPD;
2575 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
2576 break;
2577 case IXGBE_DEV_ID_X550EM_A_1G_T:
2578 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
2579 /* Select fast MDIO clock speed for these devices */
2580 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2581 hlreg0 |= IXGBE_HLREG0_MDCSPD;
2582 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
2583 break;
2584 default:
2585 break;
2586 }
2587 }
2588
2589 /**
2590 * ixgbe_reset_hw_X550em - Perform hardware reset
2591 * @hw: pointer to hardware structure
2592 *
2593 * Resets the hardware by resetting the transmit and receive units, masks
2594 * and clears all interrupts, perform a PHY reset, and perform a link (MAC)
2595 * reset.
2596 */
2597 s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw)
2598 {
2599 ixgbe_link_speed link_speed;
2600 s32 status;
2601 u32 ctrl = 0;
2602 u32 i;
2603 bool link_up = FALSE;
2604 u32 swfw_mask = hw->phy.phy_semaphore_mask;
2605
2606 DEBUGFUNC("ixgbe_reset_hw_X550em");
2607
2608 /* Call adapter stop to disable Tx/Rx and clear interrupts */
2609 status = hw->mac.ops.stop_adapter(hw);
2610 if (status != IXGBE_SUCCESS) {
2611 DEBUGOUT1("Failed to stop adapter, STATUS = %d\n", status);
2612 return status;
2613 }
2614 /* flush pending Tx transactions */
2615 ixgbe_clear_tx_pending(hw);
2616
2617 ixgbe_set_mdio_speed(hw);
2618
2619 /* PHY ops must be identified and initialized prior to reset */
2620 status = hw->phy.ops.init(hw);
2621
2622 if (status)
2623 DEBUGOUT1("Failed to initialize PHY ops, STATUS = %d\n",
2624 status);
2625
2626 if (status == IXGBE_ERR_SFP_NOT_SUPPORTED ||
2627 status == IXGBE_ERR_PHY_ADDR_INVALID) {
2628 DEBUGOUT("Returning from reset HW due to PHY init failure\n");
2629 return status;
2630 }
2631
2632 /* start the external PHY */
2633 if (hw->phy.type == ixgbe_phy_x550em_ext_t) {
2634 status = ixgbe_init_ext_t_x550em(hw);
2635 if (status) {
2636 DEBUGOUT1("Failed to start the external PHY, STATUS = %d\n",
2637 status);
2638 return status;
2639 }
2640 }
2641
2642 /* Setup SFP module if there is one present. */
2643 if (hw->phy.sfp_setup_needed) {
2644 status = hw->mac.ops.setup_sfp(hw);
2645 hw->phy.sfp_setup_needed = FALSE;
2646 }
2647
2648 if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
2649 return status;
2650
2651 /* Reset PHY */
2652 if (!hw->phy.reset_disable && hw->phy.ops.reset) {
2653 if (hw->phy.ops.reset(hw) == IXGBE_ERR_OVERTEMP)
2654 return IXGBE_ERR_OVERTEMP;
2655 }
2656
2657 mac_reset_top:
2658 /* Issue global reset to the MAC. Needs to be SW reset if link is up.
2659 * If link reset is used when link is up, it might reset the PHY when
2660 * mng is using it. If link is down or the flag to force full link
2661 * reset is set, then perform link reset.
2662 */
2663 ctrl = IXGBE_CTRL_LNK_RST;
2664 if (!hw->force_full_reset) {
2665 hw->mac.ops.check_link(hw, &link_speed, &link_up, FALSE);
2666 if (link_up)
2667 ctrl = IXGBE_CTRL_RST;
2668 }
2669
2670 status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
2671 if (status != IXGBE_SUCCESS) {
2672 ERROR_REPORT2(IXGBE_ERROR_CAUTION,
2673 "semaphore failed with %d", status);
2674 return IXGBE_ERR_SWFW_SYNC;
2675 }
2676 ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
2677 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
2678 IXGBE_WRITE_FLUSH(hw);
2679 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
2680
2681 /* Poll for reset bit to self-clear meaning reset is complete */
2682 for (i = 0; i < 10; i++) {
2683 usec_delay(1);
2684 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
2685 if (!(ctrl & IXGBE_CTRL_RST_MASK))
2686 break;
2687 }
2688
2689 if (ctrl & IXGBE_CTRL_RST_MASK) {
2690 status = IXGBE_ERR_RESET_FAILED;
2691 DEBUGOUT("Reset polling failed to complete.\n");
2692 }
2693
2694 msec_delay(50);
2695
2696 /* Double resets are required for recovery from certain error
2697 * conditions. Between resets, it is necessary to stall to
2698 * allow time for any pending HW events to complete.
2699 */
2700 if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
2701 hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
2702 goto mac_reset_top;
2703 }
2704
2705 /* Store the permanent mac address */
2706 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
2707
2708 /* Store MAC address from RAR0, clear receive address registers, and
2709 * clear the multicast table. Also reset num_rar_entries to 128,
2710 * since we modify this value when programming the SAN MAC address.
2711 */
2712 hw->mac.num_rar_entries = 128;
2713 hw->mac.ops.init_rx_addrs(hw);
2714
2715 ixgbe_set_mdio_speed(hw);
2716
2717 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP)
2718 ixgbe_setup_mux_ctl(hw);
2719
2720 if (status != IXGBE_SUCCESS)
2721 DEBUGOUT1("Reset HW failed, STATUS = %d\n", status);
2722
2723 return status;
2724 }
2725
2726 /**
2727 * ixgbe_init_ext_t_x550em - Start (unstall) the external Base T PHY.
2728 * @hw: pointer to hardware structure
2729 */
2730 s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw)
2731 {
2732 u32 status;
2733 u16 reg;
2734
2735 status = hw->phy.ops.read_reg(hw,
2736 IXGBE_MDIO_TX_VENDOR_ALARMS_3,
2737 IXGBE_MDIO_PMA_PMD_DEV_TYPE,
2738 ®);
2739
2740 if (status != IXGBE_SUCCESS)
2741 return status;
2742
2743 /* If PHY FW reset completed bit is set then this is the first
2744 * SW instance after a power on so the PHY FW must be un-stalled.
2745 */
2746 if (reg & IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK) {
2747 status = hw->phy.ops.read_reg(hw,
2748 IXGBE_MDIO_GLOBAL_RES_PR_10,
2749 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2750 ®);
2751
2752 if (status != IXGBE_SUCCESS)
2753 return status;
2754
2755 reg &= ~IXGBE_MDIO_POWER_UP_STALL;
2756
2757 status = hw->phy.ops.write_reg(hw,
2758 IXGBE_MDIO_GLOBAL_RES_PR_10,
2759 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2760 reg);
2761
2762 if (status != IXGBE_SUCCESS)
2763 return status;
2764 }
2765
2766 return status;
2767 }
2768
2769 /**
2770 * ixgbe_setup_kr_x550em - Configure the KR PHY.
2771 * @hw: pointer to hardware structure
2772 **/
2773 s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw)
2774 {
2775 /* leave link alone for 2.5G */
2776 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_2_5GB_FULL)
2777 return IXGBE_SUCCESS;
2778
2779 if (ixgbe_check_reset_blocked(hw))
2780 return 0;
2781
2782 return ixgbe_setup_kr_speed_x550em(hw, hw->phy.autoneg_advertised);
2783 }
2784
2785 /**
2786 * ixgbe_setup_mac_link_sfp_x550em - Setup internal/external the PHY for SFP
2787 * @hw: pointer to hardware structure
2788 * @speed: new link speed
2789 * @autoneg_wait_to_complete: unused
2790 *
2791 * Configure the external PHY and the integrated KR PHY for SFP support.
2792 **/
2793 s32 ixgbe_setup_mac_link_sfp_x550em(struct ixgbe_hw *hw,
2794 ixgbe_link_speed speed,
2795 bool autoneg_wait_to_complete)
2796 {
2797 s32 ret_val;
2798 u16 reg_slice, reg_val;
2799 bool setup_linear = FALSE;
2800 UNREFERENCED_1PARAMETER(autoneg_wait_to_complete);
2801
2802 /* Check if SFP module is supported and linear */
2803 ret_val = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear);
2804
2805 /* If no SFP module present, then return success. Return success since
2806 * there is no reason to configure CS4227 and SFP not present error is
2807 * not excepted in the setup MAC link flow.
2808 */
2809 if (ret_val == IXGBE_ERR_SFP_NOT_PRESENT)
2810 return IXGBE_SUCCESS;
2811
2812 if (ret_val != IXGBE_SUCCESS)
2813 return ret_val;
2814
2815 /* Configure internal PHY for KR/KX. */
2816 ixgbe_setup_kr_speed_x550em(hw, speed);
2817
2818 /* Configure CS4227 LINE side to proper mode. */
2819 reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB +
2820 (hw->bus.lan_id << 12);
2821 if (setup_linear)
2822 reg_val = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1;
2823 else
2824 reg_val = (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1;
2825 ret_val = hw->link.ops.write_link(hw, hw->link.addr, reg_slice,
2826 reg_val);
2827 return ret_val;
2828 }
2829
2830 /**
2831 * ixgbe_setup_sfi_x550a - Configure the internal PHY for native SFI mode
2832 * @hw: pointer to hardware structure
2833 * @speed: the link speed to force
2834 *
2835 * Configures the integrated PHY for native SFI mode. Used to connect the
2836 * internal PHY directly to an SFP cage, without autonegotiation.
2837 **/
2838 static s32 ixgbe_setup_sfi_x550a(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
2839 {
2840 struct ixgbe_mac_info *mac = &hw->mac;
2841 s32 status;
2842 u32 reg_val;
2843
2844 /* Disable all AN and force speed to 10G Serial. */
2845 status = mac->ops.read_iosf_sb_reg(hw,
2846 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
2847 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
2848 if (status != IXGBE_SUCCESS)
2849 return status;
2850
2851 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN;
2852 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN;
2853 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN;
2854 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK;
2855
2856 /* Select forced link speed for internal PHY. */
2857 switch (*speed) {
2858 case IXGBE_LINK_SPEED_10GB_FULL:
2859 reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_10G;
2860 break;
2861 case IXGBE_LINK_SPEED_1GB_FULL:
2862 reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_1G;
2863 break;
2864 case 0:
2865 /* media none (linkdown) */
2866 break;
2867 default:
2868 /* Other link speeds are not supported by internal PHY. */
2869 return IXGBE_ERR_LINK_SETUP;
2870 }
2871
2872 status = mac->ops.write_iosf_sb_reg(hw,
2873 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
2874 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
2875
2876 /* Toggle port SW reset by AN reset. */
2877 status = ixgbe_restart_an_internal_phy_x550em(hw);
2878
2879 return status;
2880 }
2881
2882 /**
2883 * ixgbe_setup_mac_link_sfp_x550a - Setup internal PHY for SFP
2884 * @hw: pointer to hardware structure
2885 * @speed: new link speed
2886 * @autoneg_wait_to_complete: unused
2887 *
2888 * Configure the the integrated PHY for SFP support.
2889 **/
2890 static s32 ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw,
2891 ixgbe_link_speed speed,
2892 bool autoneg_wait_to_complete)
2893 {
2894 s32 ret_val;
2895 u16 reg_phy_ext;
2896 bool setup_linear = FALSE;
2897 u32 reg_slice, reg_phy_int, slice_offset;
2898
2899 UNREFERENCED_1PARAMETER(autoneg_wait_to_complete);
2900
2901 /* Check if SFP module is supported and linear */
2902 ret_val = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear);
2903
2904 /* If no SFP module present, then return success. Return success since
2905 * SFP not present error is not excepted in the setup MAC link flow.
2906 */
2907 if (ret_val == IXGBE_ERR_SFP_NOT_PRESENT)
2908 return IXGBE_SUCCESS;
2909
2910 if (ret_val != IXGBE_SUCCESS)
2911 return ret_val;
2912
2913 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N) {
2914 /* Configure internal PHY for native SFI based on module type */
2915 ret_val = hw->mac.ops.read_iosf_sb_reg(hw,
2916 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
2917 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_phy_int);
2918
2919 if (ret_val != IXGBE_SUCCESS)
2920 return ret_val;
2921
2922 reg_phy_int &= IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_DA;
2923 if (!setup_linear)
2924 reg_phy_int |= IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_SR;
2925
2926 ret_val = hw->mac.ops.write_iosf_sb_reg(hw,
2927 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
2928 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_phy_int);
2929
2930 if (ret_val != IXGBE_SUCCESS)
2931 return ret_val;
2932
2933 /* Setup SFI internal link. */
2934 ret_val = ixgbe_setup_sfi_x550a(hw, &speed);
2935 } else {
2936 /* Configure internal PHY for KR/KX. */
2937 ixgbe_setup_kr_speed_x550em(hw, speed);
2938
2939 if (hw->phy.addr == 0x0 || hw->phy.addr == 0xFFFF) {
2940 /* Find Address */
2941 DEBUGOUT("Invalid NW_MNG_IF_SEL.MDIO_PHY_ADD value\n");
2942 return IXGBE_ERR_PHY_ADDR_INVALID;
2943 }
2944
2945 /* Get external PHY SKU id */
2946 ret_val = hw->phy.ops.read_reg(hw, IXGBE_CS4227_EFUSE_PDF_SKU,
2947 IXGBE_MDIO_ZERO_DEV_TYPE, ®_phy_ext);
2948
2949 if (ret_val != IXGBE_SUCCESS)
2950 return ret_val;
2951
2952 /* When configuring quad port CS4223, the MAC instance is part
2953 * of the slice offset.
2954 */
2955 if (reg_phy_ext == IXGBE_CS4223_SKU_ID)
2956 slice_offset = (hw->bus.lan_id +
2957 (hw->bus.instance_id << 1)) << 12;
2958 else
2959 slice_offset = hw->bus.lan_id << 12;
2960
2961 /* Configure CS4227/CS4223 LINE side to proper mode. */
2962 reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB + slice_offset;
2963
2964 ret_val = hw->phy.ops.read_reg(hw, reg_slice,
2965 IXGBE_MDIO_ZERO_DEV_TYPE, ®_phy_ext);
2966
2967 if (ret_val != IXGBE_SUCCESS)
2968 return ret_val;
2969
2970 reg_phy_ext &= ~((IXGBE_CS4227_EDC_MODE_CX1 << 1) |
2971 (IXGBE_CS4227_EDC_MODE_SR << 1));
2972
2973 if (setup_linear)
2974 reg_phy_ext |= (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1;
2975 else
2976 reg_phy_ext |= (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1;
2977 ret_val = hw->phy.ops.write_reg(hw, reg_slice,
2978 IXGBE_MDIO_ZERO_DEV_TYPE, reg_phy_ext);
2979
2980 /* Flush previous write with a read */
2981 ret_val = hw->phy.ops.read_reg(hw, reg_slice,
2982 IXGBE_MDIO_ZERO_DEV_TYPE, ®_phy_ext);
2983 }
2984 return ret_val;
2985 }
2986
2987 /**
2988 * ixgbe_setup_ixfi_x550em_x - MAC specific iXFI configuration
2989 * @hw: pointer to hardware structure
2990 *
2991 * iXfI configuration needed for ixgbe_mac_X550EM_x devices.
2992 **/
2993 static s32 ixgbe_setup_ixfi_x550em_x(struct ixgbe_hw *hw)
2994 {
2995 struct ixgbe_mac_info *mac = &hw->mac;
2996 s32 status;
2997 u32 reg_val;
2998
2999 /* Disable training protocol FSM. */
3000 status = mac->ops.read_iosf_sb_reg(hw,
3001 IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
3002 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3003 if (status != IXGBE_SUCCESS)
3004 return status;
3005 reg_val |= IXGBE_KRM_RX_TRN_LINKUP_CTRL_CONV_WO_PROTOCOL;
3006 status = mac->ops.write_iosf_sb_reg(hw,
3007 IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
3008 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3009 if (status != IXGBE_SUCCESS)
3010 return status;
3011
3012 /* Disable Flex from training TXFFE. */
3013 status = mac->ops.read_iosf_sb_reg(hw,
3014 IXGBE_KRM_DSP_TXFFE_STATE_4(hw->bus.lan_id),
3015 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3016 if (status != IXGBE_SUCCESS)
3017 return status;
3018 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN;
3019 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN;
3020 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN;
3021 status = mac->ops.write_iosf_sb_reg(hw,
3022 IXGBE_KRM_DSP_TXFFE_STATE_4(hw->bus.lan_id),
3023 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3024 if (status != IXGBE_SUCCESS)
3025 return status;
3026 status = mac->ops.read_iosf_sb_reg(hw,
3027 IXGBE_KRM_DSP_TXFFE_STATE_5(hw->bus.lan_id),
3028 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3029 if (status != IXGBE_SUCCESS)
3030 return status;
3031 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN;
3032 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN;
3033 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN;
3034 status = mac->ops.write_iosf_sb_reg(hw,
3035 IXGBE_KRM_DSP_TXFFE_STATE_5(hw->bus.lan_id),
3036 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3037 if (status != IXGBE_SUCCESS)
3038 return status;
3039
3040 /* Enable override for coefficients. */
3041 status = mac->ops.read_iosf_sb_reg(hw,
3042 IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id),
3043 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3044 if (status != IXGBE_SUCCESS)
3045 return status;
3046 reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_OVRRD_EN;
3047 reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN;
3048 reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CPLUS1_OVRRD_EN;
3049 reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CMINUS1_OVRRD_EN;
3050 status = mac->ops.write_iosf_sb_reg(hw,
3051 IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id),
3052 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3053 return status;
3054 }
3055
3056 /**
3057 * ixgbe_setup_ixfi_x550em - Configure the KR PHY for iXFI mode.
3058 * @hw: pointer to hardware structure
3059 * @speed: the link speed to force
3060 *
3061 * Configures the integrated KR PHY to use iXFI mode. Used to connect an
3062 * internal and external PHY at a specific speed, without autonegotiation.
3063 **/
3064 static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
3065 {
3066 struct ixgbe_mac_info *mac = &hw->mac;
3067 s32 status;
3068 u32 reg_val;
3069
3070 /* iXFI is only supported with X552 */
3071 if (mac->type != ixgbe_mac_X550EM_x)
3072 return IXGBE_ERR_LINK_SETUP;
3073
3074 /* Disable AN and force speed to 10G Serial. */
3075 status = mac->ops.read_iosf_sb_reg(hw,
3076 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
3077 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3078 if (status != IXGBE_SUCCESS)
3079 return status;
3080
3081 reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
3082 reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
3083
3084 /* Select forced link speed for internal PHY. */
3085 switch (*speed) {
3086 case IXGBE_LINK_SPEED_10GB_FULL:
3087 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G;
3088 break;
3089 case IXGBE_LINK_SPEED_1GB_FULL:
3090 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G;
3091 break;
3092 default:
3093 /* Other link speeds are not supported by internal KR PHY. */
3094 return IXGBE_ERR_LINK_SETUP;
3095 }
3096
3097 status = mac->ops.write_iosf_sb_reg(hw,
3098 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
3099 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3100 if (status != IXGBE_SUCCESS)
3101 return status;
3102
3103 /* Additional configuration needed for x550em_x */
3104 if (hw->mac.type == ixgbe_mac_X550EM_x) {
3105 status = ixgbe_setup_ixfi_x550em_x(hw);
3106 if (status != IXGBE_SUCCESS)
3107 return status;
3108 }
3109
3110 /* Toggle port SW reset by AN reset. */
3111 status = ixgbe_restart_an_internal_phy_x550em(hw);
3112
3113 return status;
3114 }
3115
3116 /**
3117 * ixgbe_ext_phy_t_x550em_get_link - Get ext phy link status
3118 * @hw: address of hardware structure
3119 * @link_up: address of boolean to indicate link status
3120 *
3121 * Returns error code if unable to get link status.
3122 */
3123 static s32 ixgbe_ext_phy_t_x550em_get_link(struct ixgbe_hw *hw, bool *link_up)
3124 {
3125 u32 ret;
3126 u16 autoneg_status;
3127
3128 *link_up = FALSE;
3129
3130 /* read this twice back to back to indicate current status */
3131 ret = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
3132 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
3133 &autoneg_status);
3134 if (ret != IXGBE_SUCCESS)
3135 return ret;
3136
3137 ret = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
3138 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
3139 &autoneg_status);
3140 if (ret != IXGBE_SUCCESS)
3141 return ret;
3142
3143 *link_up = !!(autoneg_status & IXGBE_MDIO_AUTO_NEG_LINK_STATUS);
3144
3145 return IXGBE_SUCCESS;
3146 }
3147
3148 /**
3149 * ixgbe_setup_internal_phy_t_x550em - Configure KR PHY to X557 link
3150 * @hw: point to hardware structure
3151 *
3152 * Configures the link between the integrated KR PHY and the external X557 PHY
3153 * The driver will call this function when it gets a link status change
3154 * interrupt from the X557 PHY. This function configures the link speed
3155 * between the PHYs to match the link speed of the BASE-T link.
3156 *
3157 * A return of a non-zero value indicates an error, and the base driver should
3158 * not report link up.
3159 */
3160 s32 ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw)
3161 {
3162 ixgbe_link_speed force_speed;
3163 bool link_up;
3164 u32 status;
3165 u16 speed;
3166
3167 if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper)
3168 return IXGBE_ERR_CONFIG;
3169
3170 if (hw->mac.type == ixgbe_mac_X550EM_x &&
3171 !(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) {
3172 /* If link is down, there is no setup necessary so return */
3173 status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up);
3174 if (status != IXGBE_SUCCESS)
3175 return status;
3176
3177 if (!link_up)
3178 return IXGBE_SUCCESS;
3179
3180 status = hw->phy.ops.read_reg(hw,
3181 IXGBE_MDIO_AUTO_NEG_VENDOR_STAT,
3182 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
3183 &speed);
3184 if (status != IXGBE_SUCCESS)
3185 return status;
3186
3187 /* If link is still down - no setup is required so return */
3188 status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up);
3189 if (status != IXGBE_SUCCESS)
3190 return status;
3191 if (!link_up)
3192 return IXGBE_SUCCESS;
3193
3194 /* clear everything but the speed and duplex bits */
3195 speed &= IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_MASK;
3196
3197 switch (speed) {
3198 case IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_FULL:
3199 force_speed = IXGBE_LINK_SPEED_10GB_FULL;
3200 break;
3201 case IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB_FULL:
3202 force_speed = IXGBE_LINK_SPEED_1GB_FULL;
3203 break;
3204 default:
3205 /* Internal PHY does not support anything else */
3206 return IXGBE_ERR_INVALID_LINK_SETTINGS;
3207 }
3208
3209 return ixgbe_setup_ixfi_x550em(hw, &force_speed);
3210 } else {
3211 speed = IXGBE_LINK_SPEED_10GB_FULL |
3212 IXGBE_LINK_SPEED_1GB_FULL;
3213 return ixgbe_setup_kr_speed_x550em(hw, speed);
3214 }
3215 }
3216
3217 /**
3218 * ixgbe_setup_phy_loopback_x550em - Configure the KR PHY for loopback.
3219 * @hw: pointer to hardware structure
3220 *
3221 * Configures the integrated KR PHY to use internal loopback mode.
3222 **/
3223 s32 ixgbe_setup_phy_loopback_x550em(struct ixgbe_hw *hw)
3224 {
3225 s32 status;
3226 u32 reg_val;
3227
3228 /* Disable AN and force speed to 10G Serial. */
3229 status = hw->mac.ops.read_iosf_sb_reg(hw,
3230 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
3231 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3232 if (status != IXGBE_SUCCESS)
3233 return status;
3234 reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
3235 reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
3236 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G;
3237 status = hw->mac.ops.write_iosf_sb_reg(hw,
3238 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
3239 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3240 if (status != IXGBE_SUCCESS)
3241 return status;
3242
3243 /* Set near-end loopback clocks. */
3244 status = hw->mac.ops.read_iosf_sb_reg(hw,
3245 IXGBE_KRM_PORT_CAR_GEN_CTRL(hw->bus.lan_id),
3246 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3247 if (status != IXGBE_SUCCESS)
3248 return status;
3249 reg_val |= IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_32B;
3250 reg_val |= IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_KRPCS;
3251 status = hw->mac.ops.write_iosf_sb_reg(hw,
3252 IXGBE_KRM_PORT_CAR_GEN_CTRL(hw->bus.lan_id),
3253 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3254 if (status != IXGBE_SUCCESS)
3255 return status;
3256
3257 /* Set loopback enable. */
3258 status = hw->mac.ops.read_iosf_sb_reg(hw,
3259 IXGBE_KRM_PMD_DFX_BURNIN(hw->bus.lan_id),
3260 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3261 if (status != IXGBE_SUCCESS)
3262 return status;
3263 reg_val |= IXGBE_KRM_PMD_DFX_BURNIN_TX_RX_KR_LB_MASK;
3264 status = hw->mac.ops.write_iosf_sb_reg(hw,
3265 IXGBE_KRM_PMD_DFX_BURNIN(hw->bus.lan_id),
3266 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3267 if (status != IXGBE_SUCCESS)
3268 return status;
3269
3270 /* Training bypass. */
3271 status = hw->mac.ops.read_iosf_sb_reg(hw,
3272 IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
3273 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3274 if (status != IXGBE_SUCCESS)
3275 return status;
3276 reg_val |= IXGBE_KRM_RX_TRN_LINKUP_CTRL_PROTOCOL_BYPASS;
3277 status = hw->mac.ops.write_iosf_sb_reg(hw,
3278 IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
3279 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3280
3281 return status;
3282 }
3283
3284 /**
3285 * ixgbe_read_ee_hostif_X550 - Read EEPROM word using a host interface command
3286 * assuming that the semaphore is already obtained.
3287 * @hw: pointer to hardware structure
3288 * @offset: offset of word in the EEPROM to read
3289 * @data: word read from the EEPROM
3290 *
3291 * Reads a 16 bit word from the EEPROM using the hostif.
3292 **/
3293 s32 ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 *data)
3294 {
3295 const u32 mask = IXGBE_GSSR_SW_MNG_SM | IXGBE_GSSR_EEP_SM;
3296 struct ixgbe_hic_read_shadow_ram buffer;
3297 s32 status;
3298
3299 DEBUGFUNC("ixgbe_read_ee_hostif_X550");
3300 buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD;
3301 buffer.hdr.req.buf_lenh = 0;
3302 buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN;
3303 buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
3304
3305 /* convert offset from words to bytes */
3306 buffer.address = IXGBE_CPU_TO_BE32(offset * 2);
3307 /* one word */
3308 buffer.length = IXGBE_CPU_TO_BE16(sizeof(u16));
3309 buffer.pad2 = 0;
3310 buffer.pad3 = 0;
3311
3312 status = hw->mac.ops.acquire_swfw_sync(hw, mask);
3313 if (status)
3314 return status;
3315
3316 status = ixgbe_hic_unlocked(hw, (u32 *)&buffer, sizeof(buffer),
3317 IXGBE_HI_COMMAND_TIMEOUT);
3318 if (!status) {
3319 *data = (u16)IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG,
3320 FW_NVM_DATA_OFFSET);
3321 }
3322
3323 hw->mac.ops.release_swfw_sync(hw, mask);
3324 return status;
3325 }
3326
3327 /**
3328 * ixgbe_read_ee_hostif_buffer_X550- Read EEPROM word(s) using hostif
3329 * @hw: pointer to hardware structure
3330 * @offset: offset of word in the EEPROM to read
3331 * @words: number of words
3332 * @data: word(s) read from the EEPROM
3333 *
3334 * Reads a 16 bit word(s) from the EEPROM using the hostif.
3335 **/
3336 s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
3337 u16 offset, u16 words, u16 *data)
3338 {
3339 const u32 mask = IXGBE_GSSR_SW_MNG_SM | IXGBE_GSSR_EEP_SM;
3340 struct ixgbe_hic_read_shadow_ram buffer;
3341 u32 current_word = 0;
3342 u16 words_to_read;
3343 s32 status;
3344 u32 i;
3345
3346 DEBUGFUNC("ixgbe_read_ee_hostif_buffer_X550");
3347
3348 /* Take semaphore for the entire operation. */
3349 status = hw->mac.ops.acquire_swfw_sync(hw, mask);
3350 if (status) {
3351 DEBUGOUT("EEPROM read buffer - semaphore failed\n");
3352 return status;
3353 }
3354
3355 while (words) {
3356 if (words > FW_MAX_READ_BUFFER_SIZE / 2)
3357 words_to_read = FW_MAX_READ_BUFFER_SIZE / 2;
3358 else
3359 words_to_read = words;
3360
3361 buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD;
3362 buffer.hdr.req.buf_lenh = 0;
3363 buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN;
3364 buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
3365
3366 /* convert offset from words to bytes */
3367 buffer.address = IXGBE_CPU_TO_BE32((offset + current_word) * 2);
3368 buffer.length = IXGBE_CPU_TO_BE16(words_to_read * 2);
3369 buffer.pad2 = 0;
3370 buffer.pad3 = 0;
3371
3372 status = ixgbe_hic_unlocked(hw, (u32 *)&buffer, sizeof(buffer),
3373 IXGBE_HI_COMMAND_TIMEOUT);
3374
3375 if (status) {
3376 DEBUGOUT("Host interface command failed\n");
3377 goto out;
3378 }
3379
3380 for (i = 0; i < words_to_read; i++) {
3381 u32 reg = IXGBE_FLEX_MNG + (FW_NVM_DATA_OFFSET << 2) +
3382 2 * i;
3383 u32 value = IXGBE_READ_REG(hw, reg);
3384
3385 data[current_word] = (u16)(value & 0xffff);
3386 current_word++;
3387 i++;
3388 if (i < words_to_read) {
3389 value >>= 16;
3390 data[current_word] = (u16)(value & 0xffff);
3391 current_word++;
3392 }
3393 }
3394 words -= words_to_read;
3395 }
3396
3397 out:
3398 hw->mac.ops.release_swfw_sync(hw, mask);
3399 return status;
3400 }
3401
3402 /**
3403 * ixgbe_write_ee_hostif_X550 - Write EEPROM word using hostif
3404 * @hw: pointer to hardware structure
3405 * @offset: offset of word in the EEPROM to write
3406 * @data: word write to the EEPROM
3407 *
3408 * Write a 16 bit word to the EEPROM using the hostif.
3409 **/
3410 s32 ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset,
3411 u16 data)
3412 {
3413 s32 status;
3414 struct ixgbe_hic_write_shadow_ram buffer;
3415
3416 DEBUGFUNC("ixgbe_write_ee_hostif_data_X550");
3417
3418 buffer.hdr.req.cmd = FW_WRITE_SHADOW_RAM_CMD;
3419 buffer.hdr.req.buf_lenh = 0;
3420 buffer.hdr.req.buf_lenl = FW_WRITE_SHADOW_RAM_LEN;
3421 buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
3422
3423 /* one word */
3424 buffer.length = IXGBE_CPU_TO_BE16(sizeof(u16));
3425 buffer.data = data;
3426 buffer.address = IXGBE_CPU_TO_BE32(offset * 2);
3427
3428 status = ixgbe_host_interface_command(hw, (u32 *)&buffer,
3429 sizeof(buffer),
3430 IXGBE_HI_COMMAND_TIMEOUT, FALSE);
3431
3432 return status;
3433 }
3434
3435 /**
3436 * ixgbe_write_ee_hostif_X550 - Write EEPROM word using hostif
3437 * @hw: pointer to hardware structure
3438 * @offset: offset of word in the EEPROM to write
3439 * @data: word write to the EEPROM
3440 *
3441 * Write a 16 bit word to the EEPROM using the hostif.
3442 **/
3443 s32 ixgbe_write_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset,
3444 u16 data)
3445 {
3446 s32 status = IXGBE_SUCCESS;
3447
3448 DEBUGFUNC("ixgbe_write_ee_hostif_X550");
3449
3450 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
3451 IXGBE_SUCCESS) {
3452 status = ixgbe_write_ee_hostif_data_X550(hw, offset, data);
3453 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
3454 } else {
3455 DEBUGOUT("write ee hostif failed to get semaphore");
3456 status = IXGBE_ERR_SWFW_SYNC;
3457 }
3458
3459 return status;
3460 }
3461
3462 /**
3463 * ixgbe_write_ee_hostif_buffer_X550 - Write EEPROM word(s) using hostif
3464 * @hw: pointer to hardware structure
3465 * @offset: offset of word in the EEPROM to write
3466 * @words: number of words
3467 * @data: word(s) write to the EEPROM
3468 *
3469 * Write a 16 bit word(s) to the EEPROM using the hostif.
3470 **/
3471 s32 ixgbe_write_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
3472 u16 offset, u16 words, u16 *data)
3473 {
3474 s32 status = IXGBE_SUCCESS;
3475 u32 i = 0;
3476
3477 DEBUGFUNC("ixgbe_write_ee_hostif_buffer_X550");
3478
3479 /* Take semaphore for the entire operation. */
3480 status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
3481 if (status != IXGBE_SUCCESS) {
3482 DEBUGOUT("EEPROM write buffer - semaphore failed\n");
3483 goto out;
3484 }
3485
3486 for (i = 0; i < words; i++) {
3487 status = ixgbe_write_ee_hostif_data_X550(hw, offset + i,
3488 data[i]);
3489
3490 if (status != IXGBE_SUCCESS) {
3491 DEBUGOUT("Eeprom buffered write failed\n");
3492 break;
3493 }
3494 }
3495
3496 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
3497 out:
3498
3499 return status;
3500 }
3501
3502 /**
3503 * ixgbe_checksum_ptr_x550 - Checksum one pointer region
3504 * @hw: pointer to hardware structure
3505 * @ptr: pointer offset in eeprom
3506 * @size: size of section pointed by ptr, if 0 first word will be used as size
3507 * @csum: address of checksum to update
3508 * @buffer: pointer to buffer containing calculated checksum
3509 * @buffer_size: size of buffer
3510 *
3511 * Returns error status for any failure
3512 */
3513 static s32 ixgbe_checksum_ptr_x550(struct ixgbe_hw *hw, u16 ptr,
3514 u16 size, u16 *csum, u16 *buffer,
3515 u32 buffer_size)
3516 {
3517 u16 buf[256];
3518 s32 status;
3519 u16 length, bufsz, i, start;
3520 u16 *local_buffer;
3521
3522 bufsz = sizeof(buf) / sizeof(buf[0]);
3523
3524 /* Read a chunk at the pointer location */
3525 if (!buffer) {
3526 status = ixgbe_read_ee_hostif_buffer_X550(hw, ptr, bufsz, buf);
3527 if (status) {
3528 DEBUGOUT("Failed to read EEPROM image\n");
3529 return status;
3530 }
3531 local_buffer = buf;
3532 } else {
3533 if (buffer_size < ptr)
3534 return IXGBE_ERR_PARAM;
3535 local_buffer = &buffer[ptr];
3536 }
3537
3538 if (size) {
3539 start = 0;
3540 length = size;
3541 } else {
3542 start = 1;
3543 length = local_buffer[0];
3544
3545 /* Skip pointer section if length is invalid. */
3546 if (length == 0xFFFF || length == 0 ||
3547 (ptr + length) >= hw->eeprom.word_size)
3548 return IXGBE_SUCCESS;
3549 }
3550
3551 if (buffer && ((u32)start + (u32)length > buffer_size))
3552 return IXGBE_ERR_PARAM;
3553
3554 for (i = start; length; i++, length--) {
3555 if (i == bufsz && !buffer) {
3556 ptr += bufsz;
3557 i = 0;
3558 if (length < bufsz)
3559 bufsz = length;
3560
3561 /* Read a chunk at the pointer location */
3562 status = ixgbe_read_ee_hostif_buffer_X550(hw, ptr,
3563 bufsz, buf);
3564 if (status) {
3565 DEBUGOUT("Failed to read EEPROM image\n");
3566 return status;
3567 }
3568 }
3569 *csum += local_buffer[i];
3570 }
3571 return IXGBE_SUCCESS;
3572 }
3573
3574 /**
3575 * ixgbe_calc_checksum_X550 - Calculates and returns the checksum
3576 * @hw: pointer to hardware structure
3577 * @buffer: pointer to buffer containing calculated checksum
3578 * @buffer_size: size of buffer
3579 *
3580 * Returns a negative error code on error, or the 16-bit checksum
3581 **/
3582 s32 ixgbe_calc_checksum_X550(struct ixgbe_hw *hw, u16 *buffer, u32 buffer_size)
3583 {
3584 u16 eeprom_ptrs[IXGBE_EEPROM_LAST_WORD + 1];
3585 u16 *local_buffer;
3586 s32 status;
3587 u16 checksum = 0;
3588 u16 pointer, i, size;
3589
3590 DEBUGFUNC("ixgbe_calc_eeprom_checksum_X550");
3591
3592 hw->eeprom.ops.init_params(hw);
3593
3594 if (!buffer) {
3595 /* Read pointer area */
3596 status = ixgbe_read_ee_hostif_buffer_X550(hw, 0,
3597 IXGBE_EEPROM_LAST_WORD + 1,
3598 eeprom_ptrs);
3599 if (status) {
3600 DEBUGOUT("Failed to read EEPROM image\n");
3601 return status;
3602 }
3603 local_buffer = eeprom_ptrs;
3604 } else {
3605 if (buffer_size < IXGBE_EEPROM_LAST_WORD)
3606 return IXGBE_ERR_PARAM;
3607 local_buffer = buffer;
3608 }
3609
3610 /*
3611 * For X550 hardware include 0x0-0x41 in the checksum, skip the
3612 * checksum word itself
3613 */
3614 for (i = 0; i <= IXGBE_EEPROM_LAST_WORD; i++)
3615 if (i != IXGBE_EEPROM_CHECKSUM)
3616 checksum += local_buffer[i];
3617
3618 /*
3619 * Include all data from pointers 0x3, 0x6-0xE. This excludes the
3620 * FW, PHY module, and PCIe Expansion/Option ROM pointers.
3621 */
3622 for (i = IXGBE_PCIE_ANALOG_PTR_X550; i < IXGBE_FW_PTR; i++) {
3623 if (i == IXGBE_PHY_PTR || i == IXGBE_OPTION_ROM_PTR)
3624 continue;
3625
3626 pointer = local_buffer[i];
3627
3628 /* Skip pointer section if the pointer is invalid. */
3629 if (pointer == 0xFFFF || pointer == 0 ||
3630 pointer >= hw->eeprom.word_size)
3631 continue;
3632
3633 switch (i) {
3634 case IXGBE_PCIE_GENERAL_PTR:
3635 size = IXGBE_IXGBE_PCIE_GENERAL_SIZE;
3636 break;
3637 case IXGBE_PCIE_CONFIG0_PTR:
3638 case IXGBE_PCIE_CONFIG1_PTR:
3639 size = IXGBE_PCIE_CONFIG_SIZE;
3640 break;
3641 default:
3642 size = 0;
3643 break;
3644 }
3645
3646 status = ixgbe_checksum_ptr_x550(hw, pointer, size, &checksum,
3647 buffer, buffer_size);
3648 if (status)
3649 return status;
3650 }
3651
3652 checksum = (u16)IXGBE_EEPROM_SUM - checksum;
3653
3654 return (s32)checksum;
3655 }
3656
3657 /**
3658 * ixgbe_calc_eeprom_checksum_X550 - Calculates and returns the checksum
3659 * @hw: pointer to hardware structure
3660 *
3661 * Returns a negative error code on error, or the 16-bit checksum
3662 **/
3663 s32 ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw *hw)
3664 {
3665 return ixgbe_calc_checksum_X550(hw, NULL, 0);
3666 }
3667
3668 /**
3669 * ixgbe_validate_eeprom_checksum_X550 - Validate EEPROM checksum
3670 * @hw: pointer to hardware structure
3671 * @checksum_val: calculated checksum
3672 *
3673 * Performs checksum calculation and validates the EEPROM checksum. If the
3674 * caller does not need checksum_val, the value can be NULL.
3675 **/
3676 s32 ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw *hw, u16 *checksum_val)
3677 {
3678 s32 status;
3679 u16 checksum;
3680 u16 read_checksum = 0;
3681
3682 DEBUGFUNC("ixgbe_validate_eeprom_checksum_X550");
3683
3684 /* Read the first word from the EEPROM. If this times out or fails, do
3685 * not continue or we could be in for a very long wait while every
3686 * EEPROM read fails
3687 */
3688 status = hw->eeprom.ops.read(hw, 0, &checksum);
3689 if (status) {
3690 DEBUGOUT("EEPROM read failed\n");
3691 return status;
3692 }
3693
3694 status = hw->eeprom.ops.calc_checksum(hw);
3695 if (status < 0)
3696 return status;
3697
3698 checksum = (u16)(status & 0xffff);
3699
3700 status = ixgbe_read_ee_hostif_X550(hw, IXGBE_EEPROM_CHECKSUM,
3701 &read_checksum);
3702 if (status)
3703 return status;
3704
3705 /* Verify read checksum from EEPROM is the same as
3706 * calculated checksum
3707 */
3708 if (read_checksum != checksum) {
3709 status = IXGBE_ERR_EEPROM_CHECKSUM;
3710 ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE,
3711 "Invalid EEPROM checksum");
3712 }
3713
3714 /* If the user cares, return the calculated checksum */
3715 if (checksum_val)
3716 *checksum_val = checksum;
3717
3718 return status;
3719 }
3720
3721 /**
3722 * ixgbe_update_eeprom_checksum_X550 - Updates the EEPROM checksum and flash
3723 * @hw: pointer to hardware structure
3724 *
3725 * After writing EEPROM to shadow RAM using EEWR register, software calculates
3726 * checksum and updates the EEPROM and instructs the hardware to update
3727 * the flash.
3728 **/
3729 s32 ixgbe_update_eeprom_checksum_X550(struct ixgbe_hw *hw)
3730 {
3731 s32 status;
3732 u16 checksum = 0;
3733
3734 DEBUGFUNC("ixgbe_update_eeprom_checksum_X550");
3735
3736 /* Read the first word from the EEPROM. If this times out or fails, do
3737 * not continue or we could be in for a very long wait while every
3738 * EEPROM read fails
3739 */
3740 status = ixgbe_read_ee_hostif_X550(hw, 0, &checksum);
3741 if (status) {
3742 DEBUGOUT("EEPROM read failed\n");
3743 return status;
3744 }
3745
3746 status = ixgbe_calc_eeprom_checksum_X550(hw);
3747 if (status < 0)
3748 return status;
3749
3750 checksum = (u16)(status & 0xffff);
3751
3752 status = ixgbe_write_ee_hostif_X550(hw, IXGBE_EEPROM_CHECKSUM,
3753 checksum);
3754 if (status)
3755 return status;
3756
3757 status = ixgbe_update_flash_X550(hw);
3758
3759 return status;
3760 }
3761
3762 /**
3763 * ixgbe_update_flash_X550 - Instruct HW to copy EEPROM to Flash device
3764 * @hw: pointer to hardware structure
3765 *
3766 * Issue a shadow RAM dump to FW to copy EEPROM from shadow RAM to the flash.
3767 **/
3768 s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw)
3769 {
3770 s32 status = IXGBE_SUCCESS;
3771 union ixgbe_hic_hdr2 buffer;
3772
3773 DEBUGFUNC("ixgbe_update_flash_X550");
3774
3775 buffer.req.cmd = FW_SHADOW_RAM_DUMP_CMD;
3776 buffer.req.buf_lenh = 0;
3777 buffer.req.buf_lenl = FW_SHADOW_RAM_DUMP_LEN;
3778 buffer.req.checksum = FW_DEFAULT_CHECKSUM;
3779
3780 status = ixgbe_host_interface_command(hw, (u32 *)&buffer,
3781 sizeof(buffer),
3782 IXGBE_HI_COMMAND_TIMEOUT, FALSE);
3783
3784 return status;
3785 }
3786
3787 /**
3788 * ixgbe_get_supported_physical_layer_X550em - Returns physical layer type
3789 * @hw: pointer to hardware structure
3790 *
3791 * Determines physical layer capabilities of the current configuration.
3792 **/
3793 u64 ixgbe_get_supported_physical_layer_X550em(struct ixgbe_hw *hw)
3794 {
3795 u64 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
3796 u16 ext_ability = 0;
3797
3798 DEBUGFUNC("ixgbe_get_supported_physical_layer_X550em");
3799
3800 hw->phy.ops.identify(hw);
3801
3802 switch (hw->phy.type) {
3803 case ixgbe_phy_x550em_kr:
3804 if (hw->mac.type == ixgbe_mac_X550EM_a) {
3805 if (hw->phy.nw_mng_if_sel &
3806 IXGBE_NW_MNG_IF_SEL_PHY_SPEED_2_5G) {
3807 physical_layer =
3808 IXGBE_PHYSICAL_LAYER_2500BASE_KX;
3809 break;
3810 } else if (hw->device_id ==
3811 IXGBE_DEV_ID_X550EM_A_KR_L) {
3812 physical_layer =
3813 IXGBE_PHYSICAL_LAYER_1000BASE_KX;
3814 break;
3815 }
3816 }
3817 /* fall through */
3818 case ixgbe_phy_x550em_xfi:
3819 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR |
3820 IXGBE_PHYSICAL_LAYER_1000BASE_KX;
3821 break;
3822 case ixgbe_phy_x550em_kx4:
3823 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4 |
3824 IXGBE_PHYSICAL_LAYER_1000BASE_KX;
3825 break;
3826 case ixgbe_phy_x550em_ext_t:
3827 hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
3828 IXGBE_MDIO_PMA_PMD_DEV_TYPE,
3829 &ext_ability);
3830 if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
3831 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
3832 if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
3833 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
3834 break;
3835 case ixgbe_phy_fw:
3836 if (hw->phy.speeds_supported & IXGBE_LINK_SPEED_1GB_FULL)
3837 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
3838 if (hw->phy.speeds_supported & IXGBE_LINK_SPEED_100_FULL)
3839 physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
3840 if (hw->phy.speeds_supported & IXGBE_LINK_SPEED_10_FULL)
3841 physical_layer |= IXGBE_PHYSICAL_LAYER_10BASE_T;
3842 break;
3843 case ixgbe_phy_sgmii:
3844 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX;
3845 break;
3846 case ixgbe_phy_ext_1g_t:
3847 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_T;
3848 break;
3849 default:
3850 break;
3851 }
3852
3853 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
3854 physical_layer = ixgbe_get_supported_phy_sfp_layer_generic(hw);
3855
3856 return physical_layer;
3857 }
3858
3859 /**
3860 * ixgbe_get_bus_info_x550em - Set PCI bus info
3861 * @hw: pointer to hardware structure
3862 *
3863 * Sets bus link width and speed to unknown because X550em is
3864 * not a PCI device.
3865 **/
3866 s32 ixgbe_get_bus_info_X550em(struct ixgbe_hw *hw)
3867 {
3868
3869 DEBUGFUNC("ixgbe_get_bus_info_x550em");
3870
3871 hw->bus.width = ixgbe_bus_width_unknown;
3872 hw->bus.speed = ixgbe_bus_speed_unknown;
3873
3874 hw->mac.ops.set_lan_id(hw);
3875
3876 return IXGBE_SUCCESS;
3877 }
3878
3879 /**
3880 * ixgbe_disable_rx_x550 - Disable RX unit
3881 * @hw: pointer to hardware structure
3882 *
3883 * Enables the Rx DMA unit for x550
3884 **/
3885 void ixgbe_disable_rx_x550(struct ixgbe_hw *hw)
3886 {
3887 u32 rxctrl, pfdtxgswc;
3888 s32 status;
3889 struct ixgbe_hic_disable_rxen fw_cmd;
3890
3891 DEBUGFUNC("ixgbe_disable_rx_dma_x550");
3892
3893 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3894 if (rxctrl & IXGBE_RXCTRL_RXEN) {
3895 pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
3896 if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) {
3897 pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN;
3898 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
3899 hw->mac.set_lben = TRUE;
3900 } else {
3901 hw->mac.set_lben = FALSE;
3902 }
3903
3904 fw_cmd.hdr.cmd = FW_DISABLE_RXEN_CMD;
3905 fw_cmd.hdr.buf_len = FW_DISABLE_RXEN_LEN;
3906 fw_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
3907 fw_cmd.port_number = (u8)hw->bus.lan_id;
3908
3909 status = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
3910 sizeof(struct ixgbe_hic_disable_rxen),
3911 IXGBE_HI_COMMAND_TIMEOUT, TRUE);
3912
3913 /* If we fail - disable RX using register write */
3914 if (status) {
3915 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3916 if (rxctrl & IXGBE_RXCTRL_RXEN) {
3917 rxctrl &= ~IXGBE_RXCTRL_RXEN;
3918 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl);
3919 }
3920 }
3921 }
3922 }
3923
3924 /**
3925 * ixgbe_enter_lplu_x550em - Transition to low power states
3926 * @hw: pointer to hardware structure
3927 *
3928 * Configures Low Power Link Up on transition to low power states
3929 * (from D0 to non-D0). Link is required to enter LPLU so avoid resetting the
3930 * X557 PHY immediately prior to entering LPLU.
3931 **/
3932 s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw)
3933 {
3934 u16 an_10g_cntl_reg, autoneg_reg, speed;
3935 s32 status;
3936 ixgbe_link_speed lcd_speed;
3937 u32 save_autoneg;
3938 bool link_up;
3939
3940 /* SW LPLU not required on later HW revisions. */
3941 if ((hw->mac.type == ixgbe_mac_X550EM_x) &&
3942 (IXGBE_FUSES0_REV_MASK &
3943 IXGBE_READ_REG(hw, IXGBE_FUSES0_GROUP(0))))
3944 return IXGBE_SUCCESS;
3945
3946 /* If blocked by MNG FW, then don't restart AN */
3947 if (ixgbe_check_reset_blocked(hw))
3948 return IXGBE_SUCCESS;
3949
3950 status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up);
3951 if (status != IXGBE_SUCCESS)
3952 return status;
3953
3954 status = ixgbe_read_eeprom(hw, NVM_INIT_CTRL_3, &hw->eeprom.ctrl_word_3);
3955
3956 if (status != IXGBE_SUCCESS)
3957 return status;
3958
3959 /* If link is down, LPLU disabled in NVM, WoL disabled, or manageability
3960 * disabled, then force link down by entering low power mode.
3961 */
3962 if (!link_up || !(hw->eeprom.ctrl_word_3 & NVM_INIT_CTRL_3_LPLU) ||
3963 !(hw->wol_enabled || ixgbe_mng_present(hw)))
3964 return ixgbe_set_copper_phy_power(hw, FALSE);
3965
3966 /* Determine LCD */
3967 status = ixgbe_get_lcd_t_x550em(hw, &lcd_speed);
3968
3969 if (status != IXGBE_SUCCESS)
3970 return status;
3971
3972 /* If no valid LCD link speed, then force link down and exit. */
3973 if (lcd_speed == IXGBE_LINK_SPEED_UNKNOWN)
3974 return ixgbe_set_copper_phy_power(hw, FALSE);
3975
3976 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_STAT,
3977 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
3978 &speed);
3979
3980 if (status != IXGBE_SUCCESS)
3981 return status;
3982
3983 /* If no link now, speed is invalid so take link down */
3984 status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up);
3985 if (status != IXGBE_SUCCESS)
3986 return ixgbe_set_copper_phy_power(hw, FALSE);
3987
3988 /* clear everything but the speed bits */
3989 speed &= IXGBE_MDIO_AUTO_NEG_VEN_STAT_SPEED_MASK;
3990
3991 /* If current speed is already LCD, then exit. */
3992 if (((speed == IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB) &&
3993 (lcd_speed == IXGBE_LINK_SPEED_1GB_FULL)) ||
3994 ((speed == IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB) &&
3995 (lcd_speed == IXGBE_LINK_SPEED_10GB_FULL)))
3996 return status;
3997
3998 /* Clear AN completed indication */
3999 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM,
4000 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
4001 &autoneg_reg);
4002
4003 if (status != IXGBE_SUCCESS)
4004 return status;
4005
4006 status = hw->phy.ops.read_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG,
4007 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
4008 &an_10g_cntl_reg);
4009
4010 if (status != IXGBE_SUCCESS)
4011 return status;
4012
4013 status = hw->phy.ops.read_reg(hw,
4014 IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
4015 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
4016 &autoneg_reg);
4017
4018 if (status != IXGBE_SUCCESS)
4019 return status;
4020
4021 save_autoneg = hw->phy.autoneg_advertised;
4022
4023 /* Setup link at least common link speed */
4024 status = hw->mac.ops.setup_link(hw, lcd_speed, FALSE);
4025
4026 /* restore autoneg from before setting lplu speed */
4027 hw->phy.autoneg_advertised = save_autoneg;
4028
4029 return status;
4030 }
4031
4032 /**
4033 * ixgbe_get_lcd_x550em - Determine lowest common denominator
4034 * @hw: pointer to hardware structure
4035 * @lcd_speed: pointer to lowest common link speed
4036 *
4037 * Determine lowest common link speed with link partner.
4038 **/
4039 s32 ixgbe_get_lcd_t_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *lcd_speed)
4040 {
4041 u16 an_lp_status;
4042 s32 status;
4043 u16 word = hw->eeprom.ctrl_word_3;
4044
4045 *lcd_speed = IXGBE_LINK_SPEED_UNKNOWN;
4046
4047 status = hw->phy.ops.read_reg(hw, IXGBE_AUTO_NEG_LP_STATUS,
4048 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
4049 &an_lp_status);
4050
4051 if (status != IXGBE_SUCCESS)
4052 return status;
4053
4054 /* If link partner advertised 1G, return 1G */
4055 if (an_lp_status & IXGBE_AUTO_NEG_LP_1000BASE_CAP) {
4056 *lcd_speed = IXGBE_LINK_SPEED_1GB_FULL;
4057 return status;
4058 }
4059
4060 /* If 10G disabled for LPLU via NVM D10GMP, then return no valid LCD */
4061 if ((hw->bus.lan_id && (word & NVM_INIT_CTRL_3_D10GMP_PORT1)) ||
4062 (word & NVM_INIT_CTRL_3_D10GMP_PORT0))
4063 return status;
4064
4065 /* Link partner not capable of lower speeds, return 10G */
4066 *lcd_speed = IXGBE_LINK_SPEED_10GB_FULL;
4067 return status;
4068 }
4069
4070 /**
4071 * ixgbe_setup_fc_X550em - Set up flow control
4072 * @hw: pointer to hardware structure
4073 *
4074 * Called at init time to set up flow control.
4075 **/
4076 s32 ixgbe_setup_fc_X550em(struct ixgbe_hw *hw)
4077 {
4078 s32 ret_val = IXGBE_SUCCESS;
4079 u32 pause, asm_dir, reg_val;
4080
4081 DEBUGFUNC("ixgbe_setup_fc_X550em");
4082
4083 /* Validate the requested mode */
4084 if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
4085 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
4086 "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
4087 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
4088 goto out;
4089 }
4090
4091 /* 10gig parts do not have a word in the EEPROM to determine the
4092 * default flow control setting, so we explicitly set it to full.
4093 */
4094 if (hw->fc.requested_mode == ixgbe_fc_default)
4095 hw->fc.requested_mode = ixgbe_fc_full;
4096
4097 /* Determine PAUSE and ASM_DIR bits. */
4098 switch (hw->fc.requested_mode) {
4099 case ixgbe_fc_none:
4100 pause = 0;
4101 asm_dir = 0;
4102 break;
4103 case ixgbe_fc_tx_pause:
4104 pause = 0;
4105 asm_dir = 1;
4106 break;
4107 case ixgbe_fc_rx_pause:
4108 /* Rx Flow control is enabled and Tx Flow control is
4109 * disabled by software override. Since there really
4110 * isn't a way to advertise that we are capable of RX
4111 * Pause ONLY, we will advertise that we support both
4112 * symmetric and asymmetric Rx PAUSE, as such we fall
4113 * through to the fc_full statement. Later, we will
4114 * disable the adapter's ability to send PAUSE frames.
4115 */
4116 case ixgbe_fc_full:
4117 pause = 1;
4118 asm_dir = 1;
4119 break;
4120 default:
4121 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
4122 "Flow control param set incorrectly\n");
4123 ret_val = IXGBE_ERR_CONFIG;
4124 goto out;
4125 }
4126
4127 switch (hw->device_id) {
4128 case IXGBE_DEV_ID_X550EM_X_KR:
4129 case IXGBE_DEV_ID_X550EM_A_KR:
4130 case IXGBE_DEV_ID_X550EM_A_KR_L:
4131 ret_val = hw->mac.ops.read_iosf_sb_reg(hw,
4132 IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
4133 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
4134 if (ret_val != IXGBE_SUCCESS)
4135 goto out;
4136 reg_val &= ~(IXGBE_KRM_AN_CNTL_1_SYM_PAUSE |
4137 IXGBE_KRM_AN_CNTL_1_ASM_PAUSE);
4138 if (pause)
4139 reg_val |= IXGBE_KRM_AN_CNTL_1_SYM_PAUSE;
4140 if (asm_dir)
4141 reg_val |= IXGBE_KRM_AN_CNTL_1_ASM_PAUSE;
4142 ret_val = hw->mac.ops.write_iosf_sb_reg(hw,
4143 IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
4144 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
4145
4146 /* This device does not fully support AN. */
4147 hw->fc.disable_fc_autoneg = TRUE;
4148 break;
4149 case IXGBE_DEV_ID_X550EM_X_XFI:
4150 hw->fc.disable_fc_autoneg = TRUE;
4151 break;
4152 default:
4153 break;
4154 }
4155
4156 out:
4157 return ret_val;
4158 }
4159
4160 /**
4161 * ixgbe_fc_autoneg_backplane_x550em_a - Enable flow control IEEE clause 37
4162 * @hw: pointer to hardware structure
4163 *
4164 * Enable flow control according to IEEE clause 37.
4165 **/
4166 void ixgbe_fc_autoneg_backplane_x550em_a(struct ixgbe_hw *hw)
4167 {
4168 u32 link_s1, lp_an_page_low, an_cntl_1;
4169 s32 status = IXGBE_ERR_FC_NOT_NEGOTIATED;
4170 ixgbe_link_speed speed;
4171 bool link_up;
4172
4173 /* AN should have completed when the cable was plugged in.
4174 * Look for reasons to bail out. Bail out if:
4175 * - FC autoneg is disabled, or if
4176 * - link is not up.
4177 */
4178 if (hw->fc.disable_fc_autoneg) {
4179 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
4180 "Flow control autoneg is disabled");
4181 goto out;
4182 }
4183
4184 hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
4185 if (!link_up) {
4186 ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "The link is down");
4187 goto out;
4188 }
4189
4190 /* Check at auto-negotiation has completed */
4191 status = hw->mac.ops.read_iosf_sb_reg(hw,
4192 IXGBE_KRM_LINK_S1(hw->bus.lan_id),
4193 IXGBE_SB_IOSF_TARGET_KR_PHY, &link_s1);
4194
4195 if (status != IXGBE_SUCCESS ||
4196 (link_s1 & IXGBE_KRM_LINK_S1_MAC_AN_COMPLETE) == 0) {
4197 DEBUGOUT("Auto-Negotiation did not complete\n");
4198 status = IXGBE_ERR_FC_NOT_NEGOTIATED;
4199 goto out;
4200 }
4201
4202 /* Read the 10g AN autoc and LP ability registers and resolve
4203 * local flow control settings accordingly
4204 */
4205 status = hw->mac.ops.read_iosf_sb_reg(hw,
4206 IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
4207 IXGBE_SB_IOSF_TARGET_KR_PHY, &an_cntl_1);
4208
4209 if (status != IXGBE_SUCCESS) {
4210 DEBUGOUT("Auto-Negotiation did not complete\n");
4211 goto out;
4212 }
4213
4214 status = hw->mac.ops.read_iosf_sb_reg(hw,
4215 IXGBE_KRM_LP_BASE_PAGE_HIGH(hw->bus.lan_id),
4216 IXGBE_SB_IOSF_TARGET_KR_PHY, &lp_an_page_low);
4217
4218 if (status != IXGBE_SUCCESS) {
4219 DEBUGOUT("Auto-Negotiation did not complete\n");
4220 goto out;
4221 }
4222
4223 status = ixgbe_negotiate_fc(hw, an_cntl_1, lp_an_page_low,
4224 IXGBE_KRM_AN_CNTL_1_SYM_PAUSE,
4225 IXGBE_KRM_AN_CNTL_1_ASM_PAUSE,
4226 IXGBE_KRM_LP_BASE_PAGE_HIGH_SYM_PAUSE,
4227 IXGBE_KRM_LP_BASE_PAGE_HIGH_ASM_PAUSE);
4228
4229 out:
4230 if (status == IXGBE_SUCCESS) {
4231 hw->fc.fc_was_autonegged = TRUE;
4232 } else {
4233 hw->fc.fc_was_autonegged = FALSE;
4234 hw->fc.current_mode = hw->fc.requested_mode;
4235 }
4236 }
4237
4238 /**
4239 * ixgbe_fc_autoneg_fiber_x550em_a - passthrough FC settings
4240 * @hw: pointer to hardware structure
4241 *
4242 **/
4243 void ixgbe_fc_autoneg_fiber_x550em_a(struct ixgbe_hw *hw)
4244 {
4245 hw->fc.fc_was_autonegged = FALSE;
4246 hw->fc.current_mode = hw->fc.requested_mode;
4247 }
4248
4249 /**
4250 * ixgbe_fc_autoneg_sgmii_x550em_a - Enable flow control IEEE clause 37
4251 * @hw: pointer to hardware structure
4252 *
4253 * Enable flow control according to IEEE clause 37.
4254 **/
4255 void ixgbe_fc_autoneg_sgmii_x550em_a(struct ixgbe_hw *hw)
4256 {
4257 s32 status = IXGBE_ERR_FC_NOT_NEGOTIATED;
4258 u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 };
4259 ixgbe_link_speed speed;
4260 bool link_up;
4261
4262 /* AN should have completed when the cable was plugged in.
4263 * Look for reasons to bail out. Bail out if:
4264 * - FC autoneg is disabled, or if
4265 * - link is not up.
4266 */
4267 if (hw->fc.disable_fc_autoneg) {
4268 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
4269 "Flow control autoneg is disabled");
4270 goto out;
4271 }
4272
4273 hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
4274 if (!link_up) {
4275 ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "The link is down");
4276 goto out;
4277 }
4278
4279 /* Check if auto-negotiation has completed */
4280 status = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_LINK_INFO, &info);
4281 if (status != IXGBE_SUCCESS ||
4282 !(info[0] & FW_PHY_ACT_GET_LINK_INFO_AN_COMPLETE)) {
4283 DEBUGOUT("Auto-Negotiation did not complete\n");
4284 status = IXGBE_ERR_FC_NOT_NEGOTIATED;
4285 goto out;
4286 }
4287
4288 /* Negotiate the flow control */
4289 status = ixgbe_negotiate_fc(hw, info[0], info[0],
4290 FW_PHY_ACT_GET_LINK_INFO_FC_RX,
4291 FW_PHY_ACT_GET_LINK_INFO_FC_TX,
4292 FW_PHY_ACT_GET_LINK_INFO_LP_FC_RX,
4293 FW_PHY_ACT_GET_LINK_INFO_LP_FC_TX);
4294
4295 out:
4296 if (status == IXGBE_SUCCESS) {
4297 hw->fc.fc_was_autonegged = TRUE;
4298 } else {
4299 hw->fc.fc_was_autonegged = FALSE;
4300 hw->fc.current_mode = hw->fc.requested_mode;
4301 }
4302 }
4303
4304 /**
4305 * ixgbe_setup_fc_backplane_x550em_a - Set up flow control
4306 * @hw: pointer to hardware structure
4307 *
4308 * Called at init time to set up flow control.
4309 **/
4310 s32 ixgbe_setup_fc_backplane_x550em_a(struct ixgbe_hw *hw)
4311 {
4312 s32 status = IXGBE_SUCCESS;
4313 u32 an_cntl = 0;
4314
4315 DEBUGFUNC("ixgbe_setup_fc_backplane_x550em_a");
4316
4317 /* Validate the requested mode */
4318 if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
4319 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
4320 "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
4321 return IXGBE_ERR_INVALID_LINK_SETTINGS;
4322 }
4323
4324 if (hw->fc.requested_mode == ixgbe_fc_default)
4325 hw->fc.requested_mode = ixgbe_fc_full;
4326
4327 /* Set up the 1G and 10G flow control advertisement registers so the
4328 * HW will be able to do FC autoneg once the cable is plugged in. If
4329 * we link at 10G, the 1G advertisement is harmless and vice versa.
4330 */
4331 status = hw->mac.ops.read_iosf_sb_reg(hw,
4332 IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
4333 IXGBE_SB_IOSF_TARGET_KR_PHY, &an_cntl);
4334
4335 if (status != IXGBE_SUCCESS) {
4336 DEBUGOUT("Auto-Negotiation did not complete\n");
4337 return status;
4338 }
4339
4340 /* The possible values of fc.requested_mode are:
4341 * 0: Flow control is completely disabled
4342 * 1: Rx flow control is enabled (we can receive pause frames,
4343 * but not send pause frames).
4344 * 2: Tx flow control is enabled (we can send pause frames but
4345 * we do not support receiving pause frames).
4346 * 3: Both Rx and Tx flow control (symmetric) are enabled.
4347 * other: Invalid.
4348 */
4349 switch (hw->fc.requested_mode) {
4350 case ixgbe_fc_none:
4351 /* Flow control completely disabled by software override. */
4352 an_cntl &= ~(IXGBE_KRM_AN_CNTL_1_SYM_PAUSE |
4353 IXGBE_KRM_AN_CNTL_1_ASM_PAUSE);
4354 break;
4355 case ixgbe_fc_tx_pause:
4356 /* Tx Flow control is enabled, and Rx Flow control is
4357 * disabled by software override.
4358 */
4359 an_cntl |= IXGBE_KRM_AN_CNTL_1_ASM_PAUSE;
4360 an_cntl &= ~IXGBE_KRM_AN_CNTL_1_SYM_PAUSE;
4361 break;
4362 case ixgbe_fc_rx_pause:
4363 /* Rx Flow control is enabled and Tx Flow control is
4364 * disabled by software override. Since there really
4365 * isn't a way to advertise that we are capable of RX
4366 * Pause ONLY, we will advertise that we support both
4367 * symmetric and asymmetric Rx PAUSE, as such we fall
4368 * through to the fc_full statement. Later, we will
4369 * disable the adapter's ability to send PAUSE frames.
4370 */
4371 case ixgbe_fc_full:
4372 /* Flow control (both Rx and Tx) is enabled by SW override. */
4373 an_cntl |= IXGBE_KRM_AN_CNTL_1_SYM_PAUSE |
4374 IXGBE_KRM_AN_CNTL_1_ASM_PAUSE;
4375 break;
4376 default:
4377 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
4378 "Flow control param set incorrectly\n");
4379 return IXGBE_ERR_CONFIG;
4380 }
4381
4382 status = hw->mac.ops.write_iosf_sb_reg(hw,
4383 IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
4384 IXGBE_SB_IOSF_TARGET_KR_PHY, an_cntl);
4385
4386 /* Restart auto-negotiation. */
4387 status = ixgbe_restart_an_internal_phy_x550em(hw);
4388
4389 return status;
4390 }
4391
4392 /**
4393 * ixgbe_set_mux - Set mux for port 1 access with CS4227
4394 * @hw: pointer to hardware structure
4395 * @state: set mux if 1, clear if 0
4396 */
4397 static void ixgbe_set_mux(struct ixgbe_hw *hw, u8 state)
4398 {
4399 u32 esdp;
4400
4401 if (!hw->bus.lan_id)
4402 return;
4403 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
4404 if (state)
4405 esdp |= IXGBE_ESDP_SDP1;
4406 else
4407 esdp &= ~IXGBE_ESDP_SDP1;
4408 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
4409 IXGBE_WRITE_FLUSH(hw);
4410 }
4411
4412 /**
4413 * ixgbe_acquire_swfw_sync_X550em - Acquire SWFW semaphore
4414 * @hw: pointer to hardware structure
4415 * @mask: Mask to specify which semaphore to acquire
4416 *
4417 * Acquires the SWFW semaphore and sets the I2C MUX
4418 **/
4419 s32 ixgbe_acquire_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask)
4420 {
4421 s32 status;
4422
4423 DEBUGFUNC("ixgbe_acquire_swfw_sync_X550em");
4424
4425 status = ixgbe_acquire_swfw_sync_X540(hw, mask);
4426 if (status)
4427 return status;
4428
4429 if (mask & IXGBE_GSSR_I2C_MASK)
4430 ixgbe_set_mux(hw, 1);
4431
4432 return IXGBE_SUCCESS;
4433 }
4434
4435 /**
4436 * ixgbe_release_swfw_sync_X550em - Release SWFW semaphore
4437 * @hw: pointer to hardware structure
4438 * @mask: Mask to specify which semaphore to release
4439 *
4440 * Releases the SWFW semaphore and sets the I2C MUX
4441 **/
4442 void ixgbe_release_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask)
4443 {
4444 DEBUGFUNC("ixgbe_release_swfw_sync_X550em");
4445
4446 if (mask & IXGBE_GSSR_I2C_MASK)
4447 ixgbe_set_mux(hw, 0);
4448
4449 ixgbe_release_swfw_sync_X540(hw, mask);
4450 }
4451
4452 /**
4453 * ixgbe_acquire_swfw_sync_X550a - Acquire SWFW semaphore
4454 * @hw: pointer to hardware structure
4455 * @mask: Mask to specify which semaphore to acquire
4456 *
4457 * Acquires the SWFW semaphore and get the shared phy token as needed
4458 */
4459 static s32 ixgbe_acquire_swfw_sync_X550a(struct ixgbe_hw *hw, u32 mask)
4460 {
4461 u32 hmask = mask & ~IXGBE_GSSR_TOKEN_SM;
4462 int retries = FW_PHY_TOKEN_RETRIES;
4463 s32 status = IXGBE_SUCCESS;
4464
4465 DEBUGFUNC("ixgbe_acquire_swfw_sync_X550a");
4466
4467 while (--retries) {
4468 status = IXGBE_SUCCESS;
4469 if (hmask)
4470 status = ixgbe_acquire_swfw_sync_X540(hw, hmask);
4471 if (status) {
4472 DEBUGOUT1("Could not acquire SWFW semaphore, Status = %d\n",
4473 status);
4474 return status;
4475 }
4476 if (!(mask & IXGBE_GSSR_TOKEN_SM))
4477 return IXGBE_SUCCESS;
4478
4479 status = ixgbe_get_phy_token(hw);
4480 if (status == IXGBE_ERR_TOKEN_RETRY)
4481 DEBUGOUT1("Could not acquire PHY token, Status = %d\n",
4482 status);
4483
4484 if (status == IXGBE_SUCCESS)
4485 return IXGBE_SUCCESS;
4486
4487 if (hmask)
4488 ixgbe_release_swfw_sync_X540(hw, hmask);
4489
4490 if (status != IXGBE_ERR_TOKEN_RETRY) {
4491 DEBUGOUT1("Unable to retry acquiring the PHY token, Status = %d\n",
4492 status);
4493 return status;
4494 }
4495 }
4496
4497 DEBUGOUT1("Semaphore acquisition retries failed!: PHY ID = 0x%08X\n",
4498 hw->phy.id);
4499 return status;
4500 }
4501
4502 /**
4503 * ixgbe_release_swfw_sync_X550a - Release SWFW semaphore
4504 * @hw: pointer to hardware structure
4505 * @mask: Mask to specify which semaphore to release
4506 *
4507 * Releases the SWFW semaphore and puts the shared phy token as needed
4508 */
4509 static void ixgbe_release_swfw_sync_X550a(struct ixgbe_hw *hw, u32 mask)
4510 {
4511 u32 hmask = mask & ~IXGBE_GSSR_TOKEN_SM;
4512
4513 DEBUGFUNC("ixgbe_release_swfw_sync_X550a");
4514
4515 if (mask & IXGBE_GSSR_TOKEN_SM)
4516 ixgbe_put_phy_token(hw);
4517
4518 if (hmask)
4519 ixgbe_release_swfw_sync_X540(hw, hmask);
4520 }
4521
4522 /**
4523 * ixgbe_read_phy_reg_x550a - Reads specified PHY register
4524 * @hw: pointer to hardware structure
4525 * @reg_addr: 32 bit address of PHY register to read
4526 * @device_type: 5 bit device type
4527 * @phy_data: Pointer to read data from PHY register
4528 *
4529 * Reads a value from a specified PHY register using the SWFW lock and PHY
4530 * Token. The PHY Token is needed since the MDIO is shared between to MAC
4531 * instances.
4532 **/
4533 s32 ixgbe_read_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
4534 u32 device_type, u16 *phy_data)
4535 {
4536 s32 status;
4537 u32 mask = hw->phy.phy_semaphore_mask | IXGBE_GSSR_TOKEN_SM;
4538
4539 DEBUGFUNC("ixgbe_read_phy_reg_x550a");
4540
4541 if (hw->mac.ops.acquire_swfw_sync(hw, mask))
4542 return IXGBE_ERR_SWFW_SYNC;
4543
4544 status = hw->phy.ops.read_reg_mdi(hw, reg_addr, device_type, phy_data);
4545
4546 hw->mac.ops.release_swfw_sync(hw, mask);
4547
4548 return status;
4549 }
4550
4551 /**
4552 * ixgbe_write_phy_reg_x550a - Writes specified PHY register
4553 * @hw: pointer to hardware structure
4554 * @reg_addr: 32 bit PHY register to write
4555 * @device_type: 5 bit device type
4556 * @phy_data: Data to write to the PHY register
4557 *
4558 * Writes a value to specified PHY register using the SWFW lock and PHY Token.
4559 * The PHY Token is needed since the MDIO is shared between to MAC instances.
4560 **/
4561 s32 ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
4562 u32 device_type, u16 phy_data)
4563 {
4564 s32 status;
4565 u32 mask = hw->phy.phy_semaphore_mask | IXGBE_GSSR_TOKEN_SM;
4566
4567 DEBUGFUNC("ixgbe_write_phy_reg_x550a");
4568
4569 if (hw->mac.ops.acquire_swfw_sync(hw, mask) == IXGBE_SUCCESS) {
4570 status = hw->phy.ops.write_reg_mdi(hw, reg_addr, device_type,
4571 phy_data);
4572 hw->mac.ops.release_swfw_sync(hw, mask);
4573 } else {
4574 status = IXGBE_ERR_SWFW_SYNC;
4575 }
4576
4577 return status;
4578 }
4579
4580 /**
4581 * ixgbe_handle_lasi_ext_t_x550em - Handle external Base T PHY interrupt
4582 * @hw: pointer to hardware structure
4583 *
4584 * Handle external Base T PHY interrupt. If high temperature
4585 * failure alarm then return error, else if link status change
4586 * then setup internal/external PHY link
4587 *
4588 * Return IXGBE_ERR_OVERTEMP if interrupt is high temperature
4589 * failure alarm, else return PHY access status.
4590 */
4591 s32 ixgbe_handle_lasi_ext_t_x550em(struct ixgbe_hw *hw)
4592 {
4593 bool lsc;
4594 u32 status;
4595
4596 status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc);
4597
4598 if (status != IXGBE_SUCCESS)
4599 return status;
4600
4601 if (lsc)
4602 return ixgbe_setup_internal_phy(hw);
4603
4604 return IXGBE_SUCCESS;
4605 }
4606
4607 /**
4608 * ixgbe_setup_mac_link_t_X550em - Sets the auto advertised link speed
4609 * @hw: pointer to hardware structure
4610 * @speed: new link speed
4611 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed
4612 *
4613 * Setup internal/external PHY link speed based on link speed, then set
4614 * external PHY auto advertised link speed.
4615 *
4616 * Returns error status for any failure
4617 **/
4618 s32 ixgbe_setup_mac_link_t_X550em(struct ixgbe_hw *hw,
4619 ixgbe_link_speed speed,
4620 bool autoneg_wait_to_complete)
4621 {
4622 s32 status;
4623 ixgbe_link_speed force_speed;
4624
4625 DEBUGFUNC("ixgbe_setup_mac_link_t_X550em");
4626
4627 /* Setup internal/external PHY link speed to iXFI (10G), unless
4628 * only 1G is auto advertised then setup KX link.
4629 */
4630 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
4631 force_speed = IXGBE_LINK_SPEED_10GB_FULL;
4632 else
4633 force_speed = IXGBE_LINK_SPEED_1GB_FULL;
4634
4635 /* If X552 and internal link mode is XFI, then setup XFI internal link.
4636 */
4637 if (hw->mac.type == ixgbe_mac_X550EM_x &&
4638 !(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) {
4639 status = ixgbe_setup_ixfi_x550em(hw, &force_speed);
4640
4641 if (status != IXGBE_SUCCESS)
4642 return status;
4643 }
4644
4645 return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait_to_complete);
4646 }
4647
4648 /**
4649 * ixgbe_check_link_t_X550em - Determine link and speed status
4650 * @hw: pointer to hardware structure
4651 * @speed: pointer to link speed
4652 * @link_up: TRUE when link is up
4653 * @link_up_wait_to_complete: bool used to wait for link up or not
4654 *
4655 * Check that both the MAC and X557 external PHY have link.
4656 **/
4657 s32 ixgbe_check_link_t_X550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
4658 bool *link_up, bool link_up_wait_to_complete)
4659 {
4660 u32 status;
4661 u16 i, autoneg_status = 0;
4662
4663 if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper)
4664 return IXGBE_ERR_CONFIG;
4665
4666 status = ixgbe_check_mac_link_generic(hw, speed, link_up,
4667 link_up_wait_to_complete);
4668
4669 /* If check link fails or MAC link is not up, then return */
4670 if (status != IXGBE_SUCCESS || !(*link_up))
4671 return status;
4672
4673 /* MAC link is up, so check external PHY link.
4674 * X557 PHY. Link status is latching low, and can only be used to detect
4675 * link drop, and not the current status of the link without performing
4676 * back-to-back reads.
4677 */
4678 for (i = 0; i < 2; i++) {
4679 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
4680 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
4681 &autoneg_status);
4682
4683 if (status != IXGBE_SUCCESS)
4684 return status;
4685 }
4686
4687 /* If external PHY link is not up, then indicate link not up */
4688 if (!(autoneg_status & IXGBE_MDIO_AUTO_NEG_LINK_STATUS))
4689 *link_up = FALSE;
4690
4691 return IXGBE_SUCCESS;
4692 }
4693
4694 /**
4695 * ixgbe_reset_phy_t_X550em - Performs X557 PHY reset and enables LASI
4696 * @hw: pointer to hardware structure
4697 **/
4698 s32 ixgbe_reset_phy_t_X550em(struct ixgbe_hw *hw)
4699 {
4700 s32 status;
4701
4702 status = ixgbe_reset_phy_generic(hw);
4703
4704 if (status != IXGBE_SUCCESS)
4705 return status;
4706
4707 /* Configure Link Status Alarm and Temperature Threshold interrupts */
4708 return ixgbe_enable_lasi_ext_t_x550em(hw);
4709 }
4710
4711 /**
4712 * ixgbe_led_on_t_X550em - Turns on the software controllable LEDs.
4713 * @hw: pointer to hardware structure
4714 * @led_idx: led number to turn on
4715 **/
4716 s32 ixgbe_led_on_t_X550em(struct ixgbe_hw *hw, u32 led_idx)
4717 {
4718 u16 phy_data;
4719
4720 DEBUGFUNC("ixgbe_led_on_t_X550em");
4721
4722 if (led_idx >= IXGBE_X557_MAX_LED_INDEX)
4723 return IXGBE_ERR_PARAM;
4724
4725 /* To turn on the LED, set mode to ON. */
4726 ixgbe_read_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
4727 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &phy_data);
4728 phy_data |= IXGBE_X557_LED_MANUAL_SET_MASK;
4729 ixgbe_write_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
4730 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, phy_data);
4731
4732 /* Some designs have the LEDs wired to the MAC */
4733 return ixgbe_led_on_generic(hw, led_idx);
4734 }
4735
4736 /**
4737 * ixgbe_led_off_t_X550em - Turns off the software controllable LEDs.
4738 * @hw: pointer to hardware structure
4739 * @led_idx: led number to turn off
4740 **/
4741 s32 ixgbe_led_off_t_X550em(struct ixgbe_hw *hw, u32 led_idx)
4742 {
4743 u16 phy_data;
4744
4745 DEBUGFUNC("ixgbe_led_off_t_X550em");
4746
4747 if (led_idx >= IXGBE_X557_MAX_LED_INDEX)
4748 return IXGBE_ERR_PARAM;
4749
4750 /* To turn on the LED, set mode to ON. */
4751 ixgbe_read_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
4752 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &phy_data);
4753 phy_data &= ~IXGBE_X557_LED_MANUAL_SET_MASK;
4754 ixgbe_write_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
4755 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, phy_data);
4756
4757 /* Some designs have the LEDs wired to the MAC */
4758 return ixgbe_led_off_generic(hw, led_idx);
4759 }
4760
4761 /**
4762 * ixgbe_set_fw_drv_ver_x550 - Sends driver version to firmware
4763 * @hw: pointer to the HW structure
4764 * @maj: driver version major number
4765 * @min: driver version minor number
4766 * @build: driver version build number
4767 * @sub: driver version sub build number
4768 * @len: length of driver_ver string
4769 * @driver_ver: driver string
4770 *
4771 * Sends driver version number to firmware through the manageability
4772 * block. On success return IXGBE_SUCCESS
4773 * else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring
4774 * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
4775 **/
4776 s32 ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min,
4777 u8 build, u8 sub, u16 len, const char *driver_ver)
4778 {
4779 struct ixgbe_hic_drv_info2 fw_cmd;
4780 s32 ret_val = IXGBE_SUCCESS;
4781 int i;
4782
4783 DEBUGFUNC("ixgbe_set_fw_drv_ver_x550");
4784
4785 if ((len == 0) || (driver_ver == NULL) ||
4786 (len > sizeof(fw_cmd.driver_string)))
4787 return IXGBE_ERR_INVALID_ARGUMENT;
4788
4789 fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO;
4790 fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN + len;
4791 fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
4792 fw_cmd.port_num = (u8)hw->bus.func;
4793 fw_cmd.ver_maj = maj;
4794 fw_cmd.ver_min = min;
4795 fw_cmd.ver_build = build;
4796 fw_cmd.ver_sub = sub;
4797 fw_cmd.hdr.checksum = 0;
4798 memcpy(fw_cmd.driver_string, driver_ver, len);
4799 fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd,
4800 (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len));
4801
4802 for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
4803 ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
4804 sizeof(fw_cmd),
4805 IXGBE_HI_COMMAND_TIMEOUT,
4806 TRUE);
4807 if (ret_val != IXGBE_SUCCESS)
4808 continue;
4809
4810 if (fw_cmd.hdr.cmd_or_resp.ret_status ==
4811 FW_CEM_RESP_STATUS_SUCCESS)
4812 ret_val = IXGBE_SUCCESS;
4813 else
4814 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
4815
4816 break;
4817 }
4818
4819 return ret_val;
4820 }
4821
4822 /**
4823 * ixgbe_fw_recovery_mode_X550 - Check FW NVM recovery mode
4824 * @hw: pointer t hardware structure
4825 *
4826 * Returns TRUE if in FW NVM recovery mode.
4827 **/
4828 bool ixgbe_fw_recovery_mode_X550(struct ixgbe_hw *hw)
4829 {
4830 u32 fwsm;
4831
4832 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw));
4833
4834 return !!(fwsm & IXGBE_FWSM_FW_NVM_RECOVERY_MODE);
4835 }
4836
4837