ixgbe_x550.c revision 1.7.4.3 1 /******************************************************************************
2
3 Copyright (c) 2001-2017, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33 /*$FreeBSD: head/sys/dev/ixgbe/ixgbe_x550.c 331224 2018-03-19 20:55:05Z erj $*/
34
35 #include "ixgbe_x550.h"
36 #include "ixgbe_x540.h"
37 #include "ixgbe_type.h"
38 #include "ixgbe_api.h"
39 #include "ixgbe_common.h"
40 #include "ixgbe_phy.h"
41 #include <dev/mii/mii.h>
42
43 static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed);
44 static s32 ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw,
45 ixgbe_link_speed speed,
46 bool autoneg_wait_to_complete);
47 static s32 ixgbe_acquire_swfw_sync_X550a(struct ixgbe_hw *, u32 mask);
48 static void ixgbe_release_swfw_sync_X550a(struct ixgbe_hw *, u32 mask);
49 static s32 ixgbe_read_mng_if_sel_x550em(struct ixgbe_hw *hw);
50
51 /**
52 * ixgbe_init_ops_X550 - Inits func ptrs and MAC type
53 * @hw: pointer to hardware structure
54 *
55 * Initialize the function pointers and assign the MAC type for X550.
56 * Does not touch the hardware.
57 **/
58 s32 ixgbe_init_ops_X550(struct ixgbe_hw *hw)
59 {
60 struct ixgbe_mac_info *mac = &hw->mac;
61 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
62 s32 ret_val;
63
64 DEBUGFUNC("ixgbe_init_ops_X550");
65
66 ret_val = ixgbe_init_ops_X540(hw);
67 mac->ops.dmac_config = ixgbe_dmac_config_X550;
68 mac->ops.dmac_config_tcs = ixgbe_dmac_config_tcs_X550;
69 mac->ops.dmac_update_tcs = ixgbe_dmac_update_tcs_X550;
70 mac->ops.setup_eee = NULL;
71 mac->ops.set_source_address_pruning =
72 ixgbe_set_source_address_pruning_X550;
73 mac->ops.set_ethertype_anti_spoofing =
74 ixgbe_set_ethertype_anti_spoofing_X550;
75
76 mac->ops.get_rtrup2tc = ixgbe_dcb_get_rtrup2tc_generic;
77 eeprom->ops.init_params = ixgbe_init_eeprom_params_X550;
78 eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_X550;
79 eeprom->ops.read = ixgbe_read_ee_hostif_X550;
80 eeprom->ops.read_buffer = ixgbe_read_ee_hostif_buffer_X550;
81 eeprom->ops.write = ixgbe_write_ee_hostif_X550;
82 eeprom->ops.write_buffer = ixgbe_write_ee_hostif_buffer_X550;
83 eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_X550;
84 eeprom->ops.validate_checksum = ixgbe_validate_eeprom_checksum_X550;
85
86 mac->ops.disable_mdd = ixgbe_disable_mdd_X550;
87 mac->ops.enable_mdd = ixgbe_enable_mdd_X550;
88 mac->ops.mdd_event = ixgbe_mdd_event_X550;
89 mac->ops.restore_mdd_vf = ixgbe_restore_mdd_vf_X550;
90 mac->ops.disable_rx = ixgbe_disable_rx_x550;
91 /* Manageability interface */
92 mac->ops.set_fw_drv_ver = ixgbe_set_fw_drv_ver_x550;
93 switch (hw->device_id) {
94 case IXGBE_DEV_ID_X550EM_X_1G_T:
95 hw->mac.ops.led_on = NULL;
96 hw->mac.ops.led_off = NULL;
97 break;
98 case IXGBE_DEV_ID_X550EM_X_10G_T:
99 case IXGBE_DEV_ID_X550EM_A_10G_T:
100 hw->mac.ops.led_on = ixgbe_led_on_t_X550em;
101 hw->mac.ops.led_off = ixgbe_led_off_t_X550em;
102 break;
103 default:
104 break;
105 }
106 return ret_val;
107 }
108
109 /**
110 * ixgbe_read_cs4227 - Read CS4227 register
111 * @hw: pointer to hardware structure
112 * @reg: register number to write
113 * @value: pointer to receive value read
114 *
115 * Returns status code
116 **/
117 static s32 ixgbe_read_cs4227(struct ixgbe_hw *hw, u16 reg, u16 *value)
118 {
119 return hw->link.ops.read_link_unlocked(hw, hw->link.addr, reg, value);
120 }
121
122 /**
123 * ixgbe_write_cs4227 - Write CS4227 register
124 * @hw: pointer to hardware structure
125 * @reg: register number to write
126 * @value: value to write to register
127 *
128 * Returns status code
129 **/
130 static s32 ixgbe_write_cs4227(struct ixgbe_hw *hw, u16 reg, u16 value)
131 {
132 return hw->link.ops.write_link_unlocked(hw, hw->link.addr, reg, value);
133 }
134
135 /**
136 * ixgbe_read_pe - Read register from port expander
137 * @hw: pointer to hardware structure
138 * @reg: register number to read
139 * @value: pointer to receive read value
140 *
141 * Returns status code
142 **/
143 static s32 ixgbe_read_pe(struct ixgbe_hw *hw, u8 reg, u8 *value)
144 {
145 s32 status;
146
147 status = ixgbe_read_i2c_byte_unlocked(hw, reg, IXGBE_PE, value);
148 if (status != IXGBE_SUCCESS)
149 ERROR_REPORT2(IXGBE_ERROR_CAUTION,
150 "port expander access failed with %d\n", status);
151 return status;
152 }
153
154 /**
155 * ixgbe_write_pe - Write register to port expander
156 * @hw: pointer to hardware structure
157 * @reg: register number to write
158 * @value: value to write
159 *
160 * Returns status code
161 **/
162 static s32 ixgbe_write_pe(struct ixgbe_hw *hw, u8 reg, u8 value)
163 {
164 s32 status;
165
166 status = ixgbe_write_i2c_byte_unlocked(hw, reg, IXGBE_PE, value);
167 if (status != IXGBE_SUCCESS)
168 ERROR_REPORT2(IXGBE_ERROR_CAUTION,
169 "port expander access failed with %d\n", status);
170 return status;
171 }
172
173 /**
174 * ixgbe_reset_cs4227 - Reset CS4227 using port expander
175 * @hw: pointer to hardware structure
176 *
177 * This function assumes that the caller has acquired the proper semaphore.
178 * Returns error code
179 **/
180 static s32 ixgbe_reset_cs4227(struct ixgbe_hw *hw)
181 {
182 s32 status;
183 u32 retry;
184 u16 value;
185 u8 reg;
186
187 /* Trigger hard reset. */
188 status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, ®);
189 if (status != IXGBE_SUCCESS)
190 return status;
191 reg |= IXGBE_PE_BIT1;
192 status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg);
193 if (status != IXGBE_SUCCESS)
194 return status;
195
196 status = ixgbe_read_pe(hw, IXGBE_PE_CONFIG, ®);
197 if (status != IXGBE_SUCCESS)
198 return status;
199 reg &= ~IXGBE_PE_BIT1;
200 status = ixgbe_write_pe(hw, IXGBE_PE_CONFIG, reg);
201 if (status != IXGBE_SUCCESS)
202 return status;
203
204 status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, ®);
205 if (status != IXGBE_SUCCESS)
206 return status;
207 reg &= ~IXGBE_PE_BIT1;
208 status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg);
209 if (status != IXGBE_SUCCESS)
210 return status;
211
212 usec_delay(IXGBE_CS4227_RESET_HOLD);
213
214 status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, ®);
215 if (status != IXGBE_SUCCESS)
216 return status;
217 reg |= IXGBE_PE_BIT1;
218 status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg);
219 if (status != IXGBE_SUCCESS)
220 return status;
221
222 /* Wait for the reset to complete. */
223 msec_delay(IXGBE_CS4227_RESET_DELAY);
224 for (retry = 0; retry < IXGBE_CS4227_RETRIES; retry++) {
225 status = ixgbe_read_cs4227(hw, IXGBE_CS4227_EFUSE_STATUS,
226 &value);
227 if (status == IXGBE_SUCCESS &&
228 value == IXGBE_CS4227_EEPROM_LOAD_OK)
229 break;
230 msec_delay(IXGBE_CS4227_CHECK_DELAY);
231 }
232 if (retry == IXGBE_CS4227_RETRIES) {
233 ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE,
234 "CS4227 reset did not complete.");
235 return IXGBE_ERR_PHY;
236 }
237
238 status = ixgbe_read_cs4227(hw, IXGBE_CS4227_EEPROM_STATUS, &value);
239 if (status != IXGBE_SUCCESS ||
240 !(value & IXGBE_CS4227_EEPROM_LOAD_OK)) {
241 ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE,
242 "CS4227 EEPROM did not load successfully.");
243 return IXGBE_ERR_PHY;
244 }
245
246 return IXGBE_SUCCESS;
247 }
248
249 /**
250 * ixgbe_check_cs4227 - Check CS4227 and reset as needed
251 * @hw: pointer to hardware structure
252 **/
253 static void ixgbe_check_cs4227(struct ixgbe_hw *hw)
254 {
255 s32 status = IXGBE_SUCCESS;
256 u32 swfw_mask = hw->phy.phy_semaphore_mask;
257 u16 value = 0;
258 u8 retry;
259
260 for (retry = 0; retry < IXGBE_CS4227_RETRIES; retry++) {
261 status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
262 if (status != IXGBE_SUCCESS) {
263 ERROR_REPORT2(IXGBE_ERROR_CAUTION,
264 "semaphore failed with %d", status);
265 msec_delay(IXGBE_CS4227_CHECK_DELAY);
266 continue;
267 }
268
269 /* Get status of reset flow. */
270 status = ixgbe_read_cs4227(hw, IXGBE_CS4227_SCRATCH, &value);
271
272 if (status == IXGBE_SUCCESS &&
273 value == IXGBE_CS4227_RESET_COMPLETE)
274 goto out;
275
276 if (status != IXGBE_SUCCESS ||
277 value != IXGBE_CS4227_RESET_PENDING)
278 break;
279
280 /* Reset is pending. Wait and check again. */
281 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
282 msec_delay(IXGBE_CS4227_CHECK_DELAY);
283 }
284
285 /* If still pending, assume other instance failed. */
286 if (retry == IXGBE_CS4227_RETRIES) {
287 status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
288 if (status != IXGBE_SUCCESS) {
289 ERROR_REPORT2(IXGBE_ERROR_CAUTION,
290 "semaphore failed with %d", status);
291 return;
292 }
293 }
294
295 /* Reset the CS4227. */
296 status = ixgbe_reset_cs4227(hw);
297 if (status != IXGBE_SUCCESS) {
298 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
299 "CS4227 reset failed: %d", status);
300 goto out;
301 }
302
303 /* Reset takes so long, temporarily release semaphore in case the
304 * other driver instance is waiting for the reset indication.
305 */
306 ixgbe_write_cs4227(hw, IXGBE_CS4227_SCRATCH,
307 IXGBE_CS4227_RESET_PENDING);
308 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
309 msec_delay(10);
310 status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
311 if (status != IXGBE_SUCCESS) {
312 ERROR_REPORT2(IXGBE_ERROR_CAUTION,
313 "semaphore failed with %d", status);
314 return;
315 }
316
317 /* Record completion for next time. */
318 status = ixgbe_write_cs4227(hw, IXGBE_CS4227_SCRATCH,
319 IXGBE_CS4227_RESET_COMPLETE);
320
321 out:
322 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
323 msec_delay(hw->eeprom.semaphore_delay);
324 }
325
326 /**
327 * ixgbe_setup_mux_ctl - Setup ESDP register for I2C mux control
328 * @hw: pointer to hardware structure
329 **/
330 static void ixgbe_setup_mux_ctl(struct ixgbe_hw *hw)
331 {
332 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
333
334 if (hw->bus.lan_id) {
335 esdp &= ~(IXGBE_ESDP_SDP1_NATIVE | IXGBE_ESDP_SDP1);
336 esdp |= IXGBE_ESDP_SDP1_DIR;
337 }
338 esdp &= ~(IXGBE_ESDP_SDP0_NATIVE | IXGBE_ESDP_SDP0_DIR);
339 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
340 IXGBE_WRITE_FLUSH(hw);
341 }
342
343 /**
344 * ixgbe_read_phy_reg_mdi_22 - Read from a clause 22 PHY register without lock
345 * @hw: pointer to hardware structure
346 * @reg_addr: 32 bit address of PHY register to read
347 * @dev_type: always unused
348 * @phy_data: Pointer to read data from PHY register
349 */
350 static s32 ixgbe_read_phy_reg_mdi_22(struct ixgbe_hw *hw, u32 reg_addr,
351 u32 dev_type, u16 *phy_data)
352 {
353 u32 i, data, command;
354 UNREFERENCED_1PARAMETER(dev_type);
355
356 /* Setup and write the read command */
357 command = (reg_addr << IXGBE_MSCA_DEV_TYPE_SHIFT) |
358 (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
359 IXGBE_MSCA_OLD_PROTOCOL | IXGBE_MSCA_READ_AUTOINC |
360 IXGBE_MSCA_MDI_COMMAND;
361
362 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
363
364 /* Check every 10 usec to see if the access completed.
365 * The MDI Command bit will clear when the operation is
366 * complete
367 */
368 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
369 usec_delay(10);
370
371 command = IXGBE_READ_REG(hw, IXGBE_MSCA);
372 if (!(command & IXGBE_MSCA_MDI_COMMAND))
373 break;
374 }
375
376 if (command & IXGBE_MSCA_MDI_COMMAND) {
377 ERROR_REPORT1(IXGBE_ERROR_POLLING,
378 "PHY read command did not complete.\n");
379 return IXGBE_ERR_PHY;
380 }
381
382 /* Read operation is complete. Get the data from MSRWD */
383 data = IXGBE_READ_REG(hw, IXGBE_MSRWD);
384 data >>= IXGBE_MSRWD_READ_DATA_SHIFT;
385 *phy_data = (u16)data;
386
387 return IXGBE_SUCCESS;
388 }
389
390 /**
391 * ixgbe_write_phy_reg_mdi_22 - Write to a clause 22 PHY register without lock
392 * @hw: pointer to hardware structure
393 * @reg_addr: 32 bit PHY register to write
394 * @dev_type: always unused
395 * @phy_data: Data to write to the PHY register
396 */
397 static s32 ixgbe_write_phy_reg_mdi_22(struct ixgbe_hw *hw, u32 reg_addr,
398 u32 dev_type, u16 phy_data)
399 {
400 u32 i, command;
401 UNREFERENCED_1PARAMETER(dev_type);
402
403 /* Put the data in the MDI single read and write data register*/
404 IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)phy_data);
405
406 /* Setup and write the write command */
407 command = (reg_addr << IXGBE_MSCA_DEV_TYPE_SHIFT) |
408 (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
409 IXGBE_MSCA_OLD_PROTOCOL | IXGBE_MSCA_WRITE |
410 IXGBE_MSCA_MDI_COMMAND;
411
412 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
413
414 /* Check every 10 usec to see if the access completed.
415 * The MDI Command bit will clear when the operation is
416 * complete
417 */
418 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
419 usec_delay(10);
420
421 command = IXGBE_READ_REG(hw, IXGBE_MSCA);
422 if (!(command & IXGBE_MSCA_MDI_COMMAND))
423 break;
424 }
425
426 if (command & IXGBE_MSCA_MDI_COMMAND) {
427 ERROR_REPORT1(IXGBE_ERROR_POLLING,
428 "PHY write cmd didn't complete\n");
429 return IXGBE_ERR_PHY;
430 }
431
432 return IXGBE_SUCCESS;
433 }
434
435 /**
436 * ixgbe_identify_phy_x550em - Get PHY type based on device id
437 * @hw: pointer to hardware structure
438 *
439 * Returns error code
440 */
441 static s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw)
442 {
443 hw->mac.ops.set_lan_id(hw);
444
445 ixgbe_read_mng_if_sel_x550em(hw);
446
447 switch (hw->device_id) {
448 case IXGBE_DEV_ID_X550EM_A_SFP:
449 return ixgbe_identify_module_generic(hw);
450 case IXGBE_DEV_ID_X550EM_X_SFP:
451 /* set up for CS4227 usage */
452 ixgbe_setup_mux_ctl(hw);
453 ixgbe_check_cs4227(hw);
454 /* Fallthrough */
455
456 case IXGBE_DEV_ID_X550EM_A_SFP_N:
457 return ixgbe_identify_module_generic(hw);
458 break;
459 case IXGBE_DEV_ID_X550EM_X_KX4:
460 hw->phy.type = ixgbe_phy_x550em_kx4;
461 break;
462 case IXGBE_DEV_ID_X550EM_X_XFI:
463 hw->phy.type = ixgbe_phy_x550em_xfi;
464 break;
465 case IXGBE_DEV_ID_X550EM_X_KR:
466 case IXGBE_DEV_ID_X550EM_A_KR:
467 case IXGBE_DEV_ID_X550EM_A_KR_L:
468 hw->phy.type = ixgbe_phy_x550em_kr;
469 break;
470 case IXGBE_DEV_ID_X550EM_A_10G_T:
471 case IXGBE_DEV_ID_X550EM_X_10G_T:
472 return ixgbe_identify_phy_generic(hw);
473 case IXGBE_DEV_ID_X550EM_X_1G_T:
474 hw->phy.type = ixgbe_phy_ext_1g_t;
475 break;
476 case IXGBE_DEV_ID_X550EM_A_1G_T:
477 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
478 hw->phy.type = ixgbe_phy_fw;
479 if (hw->bus.lan_id)
480 hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY1_SM;
481 else
482 hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY0_SM;
483 break;
484 default:
485 break;
486 }
487 return IXGBE_SUCCESS;
488 }
489
490 /**
491 * ixgbe_fw_phy_activity - Perform an activity on a PHY
492 * @hw: pointer to hardware structure
493 * @activity: activity to perform
494 * @data: Pointer to 4 32-bit words of data
495 */
496 s32 ixgbe_fw_phy_activity(struct ixgbe_hw *hw, u16 activity,
497 u32 (*data)[FW_PHY_ACT_DATA_COUNT])
498 {
499 union {
500 struct ixgbe_hic_phy_activity_req cmd;
501 struct ixgbe_hic_phy_activity_resp rsp;
502 } hic;
503 u16 retries = FW_PHY_ACT_RETRIES;
504 s32 rc;
505 u16 i;
506
507 do {
508 memset(&hic, 0, sizeof(hic));
509 hic.cmd.hdr.cmd = FW_PHY_ACT_REQ_CMD;
510 hic.cmd.hdr.buf_len = FW_PHY_ACT_REQ_LEN;
511 hic.cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
512 hic.cmd.port_number = hw->bus.lan_id;
513 hic.cmd.activity_id = IXGBE_CPU_TO_LE16(activity);
514 for (i = 0; i < FW_PHY_ACT_DATA_COUNT; ++i)
515 hic.cmd.data[i] = IXGBE_CPU_TO_BE32((*data)[i]);
516
517 rc = ixgbe_host_interface_command(hw, (u32 *)&hic.cmd,
518 sizeof(hic.cmd),
519 IXGBE_HI_COMMAND_TIMEOUT,
520 TRUE);
521 if (rc != IXGBE_SUCCESS)
522 return rc;
523 if (hic.rsp.hdr.cmd_or_resp.ret_status ==
524 FW_CEM_RESP_STATUS_SUCCESS) {
525 for (i = 0; i < FW_PHY_ACT_DATA_COUNT; ++i)
526 (*data)[i] = IXGBE_BE32_TO_CPU(hic.rsp.data[i]);
527 return IXGBE_SUCCESS;
528 }
529 usec_delay(20);
530 --retries;
531 } while (retries > 0);
532
533 return IXGBE_ERR_HOST_INTERFACE_COMMAND;
534 }
535
536 static const struct {
537 u16 fw_speed;
538 ixgbe_link_speed phy_speed;
539 } ixgbe_fw_map[] = {
540 { FW_PHY_ACT_LINK_SPEED_10, IXGBE_LINK_SPEED_10_FULL },
541 { FW_PHY_ACT_LINK_SPEED_100, IXGBE_LINK_SPEED_100_FULL },
542 { FW_PHY_ACT_LINK_SPEED_1G, IXGBE_LINK_SPEED_1GB_FULL },
543 { FW_PHY_ACT_LINK_SPEED_2_5G, IXGBE_LINK_SPEED_2_5GB_FULL },
544 { FW_PHY_ACT_LINK_SPEED_5G, IXGBE_LINK_SPEED_5GB_FULL },
545 { FW_PHY_ACT_LINK_SPEED_10G, IXGBE_LINK_SPEED_10GB_FULL },
546 };
547
548 /**
549 * ixgbe_get_phy_id_fw - Get the phy ID via firmware command
550 * @hw: pointer to hardware structure
551 *
552 * Returns error code
553 */
554 static s32 ixgbe_get_phy_id_fw(struct ixgbe_hw *hw)
555 {
556 u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 };
557 u16 phy_speeds;
558 u16 phy_id_lo;
559 s32 rc;
560 u16 i;
561
562 rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_PHY_INFO, &info);
563 if (rc)
564 return rc;
565
566 hw->phy.speeds_supported = 0;
567 phy_speeds = info[0] & FW_PHY_INFO_SPEED_MASK;
568 for (i = 0; i < sizeof(ixgbe_fw_map) / sizeof(ixgbe_fw_map[0]); ++i) {
569 if (phy_speeds & ixgbe_fw_map[i].fw_speed)
570 hw->phy.speeds_supported |= ixgbe_fw_map[i].phy_speed;
571 }
572
573 #if 0
574 /*
575 * Don't set autoneg_advertised here to not to be inconsistent with
576 * if_media value.
577 */
578 if (!hw->phy.autoneg_advertised)
579 hw->phy.autoneg_advertised = hw->phy.speeds_supported;
580 #endif
581
582 hw->phy.id = info[0] & FW_PHY_INFO_ID_HI_MASK;
583 phy_id_lo = info[1] & FW_PHY_INFO_ID_LO_MASK;
584 hw->phy.id |= phy_id_lo & IXGBE_PHY_REVISION_MASK;
585 hw->phy.revision = phy_id_lo & ~IXGBE_PHY_REVISION_MASK;
586 if (!hw->phy.id || hw->phy.id == IXGBE_PHY_REVISION_MASK)
587 return IXGBE_ERR_PHY_ADDR_INVALID;
588 return IXGBE_SUCCESS;
589 }
590
591 /**
592 * ixgbe_identify_phy_fw - Get PHY type based on firmware command
593 * @hw: pointer to hardware structure
594 *
595 * Returns error code
596 */
597 static s32 ixgbe_identify_phy_fw(struct ixgbe_hw *hw)
598 {
599 if (hw->bus.lan_id)
600 hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM;
601 else
602 hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM;
603
604 hw->phy.type = ixgbe_phy_fw;
605 hw->phy.ops.read_reg = NULL;
606 hw->phy.ops.write_reg = NULL;
607 return ixgbe_get_phy_id_fw(hw);
608 }
609
610 /**
611 * ixgbe_shutdown_fw_phy - Shutdown a firmware-controlled PHY
612 * @hw: pointer to hardware structure
613 *
614 * Returns error code
615 */
616 s32 ixgbe_shutdown_fw_phy(struct ixgbe_hw *hw)
617 {
618 u32 setup[FW_PHY_ACT_DATA_COUNT] = { 0 };
619
620 setup[0] = FW_PHY_ACT_FORCE_LINK_DOWN_OFF;
621 return ixgbe_fw_phy_activity(hw, FW_PHY_ACT_FORCE_LINK_DOWN, &setup);
622 }
623
624 static s32 ixgbe_read_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr,
625 u32 device_type, u16 *phy_data)
626 {
627 UNREFERENCED_4PARAMETER(*hw, reg_addr, device_type, *phy_data);
628 return IXGBE_NOT_IMPLEMENTED;
629 }
630
631 static s32 ixgbe_write_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr,
632 u32 device_type, u16 phy_data)
633 {
634 UNREFERENCED_4PARAMETER(*hw, reg_addr, device_type, phy_data);
635 return IXGBE_NOT_IMPLEMENTED;
636 }
637
638 /**
639 * ixgbe_read_i2c_combined_generic - Perform I2C read combined operation
640 * @hw: pointer to the hardware structure
641 * @addr: I2C bus address to read from
642 * @reg: I2C device register to read from
643 * @val: pointer to location to receive read value
644 *
645 * Returns an error code on error.
646 **/
647 static s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr,
648 u16 reg, u16 *val)
649 {
650 return ixgbe_read_i2c_combined_generic_int(hw, addr, reg, val, TRUE);
651 }
652
653 /**
654 * ixgbe_read_i2c_combined_generic_unlocked - Do I2C read combined operation
655 * @hw: pointer to the hardware structure
656 * @addr: I2C bus address to read from
657 * @reg: I2C device register to read from
658 * @val: pointer to location to receive read value
659 *
660 * Returns an error code on error.
661 **/
662 static s32
663 ixgbe_read_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, u8 addr,
664 u16 reg, u16 *val)
665 {
666 return ixgbe_read_i2c_combined_generic_int(hw, addr, reg, val, FALSE);
667 }
668
669 /**
670 * ixgbe_write_i2c_combined_generic - Perform I2C write combined operation
671 * @hw: pointer to the hardware structure
672 * @addr: I2C bus address to write to
673 * @reg: I2C device register to write to
674 * @val: value to write
675 *
676 * Returns an error code on error.
677 **/
678 static s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw,
679 u8 addr, u16 reg, u16 val)
680 {
681 return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, TRUE);
682 }
683
684 /**
685 * ixgbe_write_i2c_combined_generic_unlocked - Do I2C write combined operation
686 * @hw: pointer to the hardware structure
687 * @addr: I2C bus address to write to
688 * @reg: I2C device register to write to
689 * @val: value to write
690 *
691 * Returns an error code on error.
692 **/
693 static s32
694 ixgbe_write_i2c_combined_generic_unlocked(struct ixgbe_hw *hw,
695 u8 addr, u16 reg, u16 val)
696 {
697 return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, FALSE);
698 }
699
700 /**
701 * ixgbe_init_ops_X550EM - Inits func ptrs and MAC type
702 * @hw: pointer to hardware structure
703 *
704 * Initialize the function pointers and for MAC type X550EM.
705 * Does not touch the hardware.
706 **/
707 s32 ixgbe_init_ops_X550EM(struct ixgbe_hw *hw)
708 {
709 struct ixgbe_mac_info *mac = &hw->mac;
710 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
711 struct ixgbe_phy_info *phy = &hw->phy;
712 s32 ret_val;
713
714 DEBUGFUNC("ixgbe_init_ops_X550EM");
715
716 /* Similar to X550 so start there. */
717 ret_val = ixgbe_init_ops_X550(hw);
718
719 /* Since this function eventually calls
720 * ixgbe_init_ops_540 by design, we are setting
721 * the pointers to NULL explicitly here to overwrite
722 * the values being set in the x540 function.
723 */
724
725 /* Bypass not supported in x550EM */
726 mac->ops.bypass_rw = NULL;
727 mac->ops.bypass_valid_rd = NULL;
728 mac->ops.bypass_set = NULL;
729 mac->ops.bypass_rd_eep = NULL;
730
731 /* FCOE not supported in x550EM */
732 mac->ops.get_san_mac_addr = NULL;
733 mac->ops.set_san_mac_addr = NULL;
734 mac->ops.get_wwn_prefix = NULL;
735 mac->ops.get_fcoe_boot_status = NULL;
736
737 /* IPsec not supported in x550EM */
738 mac->ops.disable_sec_rx_path = NULL;
739 mac->ops.enable_sec_rx_path = NULL;
740
741 /* AUTOC register is not present in x550EM. */
742 mac->ops.prot_autoc_read = NULL;
743 mac->ops.prot_autoc_write = NULL;
744
745 /* X550EM bus type is internal*/
746 hw->bus.type = ixgbe_bus_type_internal;
747 mac->ops.get_bus_info = ixgbe_get_bus_info_X550em;
748
749
750 mac->ops.get_media_type = ixgbe_get_media_type_X550em;
751 mac->ops.setup_sfp = ixgbe_setup_sfp_modules_X550em;
752 mac->ops.get_link_capabilities = ixgbe_get_link_capabilities_X550em;
753 mac->ops.reset_hw = ixgbe_reset_hw_X550em;
754 mac->ops.get_supported_physical_layer =
755 ixgbe_get_supported_physical_layer_X550em;
756
757 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper)
758 mac->ops.setup_fc = ixgbe_setup_fc_generic;
759 else
760 mac->ops.setup_fc = ixgbe_setup_fc_X550em;
761
762 /* PHY */
763 phy->ops.init = ixgbe_init_phy_ops_X550em;
764 switch (hw->device_id) {
765 case IXGBE_DEV_ID_X550EM_A_1G_T:
766 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
767 mac->ops.setup_fc = NULL;
768 phy->ops.identify = ixgbe_identify_phy_fw;
769 phy->ops.set_phy_power = NULL;
770 phy->ops.get_firmware_version = NULL;
771 break;
772 case IXGBE_DEV_ID_X550EM_X_1G_T:
773 mac->ops.setup_fc = NULL;
774 phy->ops.identify = ixgbe_identify_phy_x550em;
775 phy->ops.set_phy_power = NULL;
776 break;
777 default:
778 phy->ops.identify = ixgbe_identify_phy_x550em;
779 }
780
781 if (mac->ops.get_media_type(hw) != ixgbe_media_type_copper)
782 phy->ops.set_phy_power = NULL;
783
784
785 /* EEPROM */
786 eeprom->ops.init_params = ixgbe_init_eeprom_params_X540;
787 eeprom->ops.read = ixgbe_read_ee_hostif_X550;
788 eeprom->ops.read_buffer = ixgbe_read_ee_hostif_buffer_X550;
789 eeprom->ops.write = ixgbe_write_ee_hostif_X550;
790 eeprom->ops.write_buffer = ixgbe_write_ee_hostif_buffer_X550;
791 eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_X550;
792 eeprom->ops.validate_checksum = ixgbe_validate_eeprom_checksum_X550;
793 eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_X550;
794
795 return ret_val;
796 }
797
798 #define IXGBE_DENVERTON_WA 1
799
800 /**
801 * ixgbe_setup_fw_link - Setup firmware-controlled PHYs
802 * @hw: pointer to hardware structure
803 */
804 static s32 ixgbe_setup_fw_link(struct ixgbe_hw *hw)
805 {
806 u32 setup[FW_PHY_ACT_DATA_COUNT] = { 0 };
807 s32 rc;
808 #ifdef IXGBE_DENVERTON_WA
809 s32 ret_val;
810 u16 phydata;
811 #endif
812 u16 i;
813
814 if (hw->phy.reset_disable || ixgbe_check_reset_blocked(hw))
815 return 0;
816
817 if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
818 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
819 "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
820 return IXGBE_ERR_INVALID_LINK_SETTINGS;
821 }
822
823 switch (hw->fc.requested_mode) {
824 case ixgbe_fc_full:
825 setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_RXTX <<
826 FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT;
827 break;
828 case ixgbe_fc_rx_pause:
829 setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_RX <<
830 FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT;
831 break;
832 case ixgbe_fc_tx_pause:
833 setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_TX <<
834 FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT;
835 break;
836 default:
837 break;
838 }
839
840 for (i = 0; i < sizeof(ixgbe_fw_map) / sizeof(ixgbe_fw_map[0]); ++i) {
841 if (hw->phy.autoneg_advertised & ixgbe_fw_map[i].phy_speed)
842 setup[0] |= ixgbe_fw_map[i].fw_speed;
843 }
844 setup[0] |= FW_PHY_ACT_SETUP_LINK_HP | FW_PHY_ACT_SETUP_LINK_AN;
845
846 if (hw->phy.eee_speeds_advertised)
847 setup[0] |= FW_PHY_ACT_SETUP_LINK_EEE;
848
849 #ifdef IXGBE_DENVERTON_WA
850 /* Don't use auto-nego for 10/100Mbps */
851 if ((hw->phy.autoneg_advertised == IXGBE_LINK_SPEED_100_FULL)
852 || (hw->phy.autoneg_advertised == IXGBE_LINK_SPEED_10_FULL)) {
853 setup[0] &= ~FW_PHY_ACT_SETUP_LINK_AN;
854 setup[0] &= ~FW_PHY_ACT_SETUP_LINK_EEE;
855 setup[0] &= ~(FW_PHY_ACT_SETUP_LINK_PAUSE_RXTX
856 << FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT);
857 }
858 #endif
859
860 rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_SETUP_LINK, &setup);
861 if (rc)
862 return rc;
863
864 #ifdef IXGBE_DENVERTON_WA
865 ret_val = ixgbe_read_phy_reg_x550a(hw, MII_BMCR, 0, &phydata);
866 if (ret_val != 0)
867 goto out;
868
869 /*
870 * Broken firmware sets BMCR register incorrectly if
871 * FW_PHY_ACT_SETUP_LINK_AN isn't set.
872 * a) FDX may not be set.
873 * b) BMCR_SPEED1 (bit 6) is always cleard.
874 * + -------+------+-----------+-----+--------------------------+
875 * |request | BMCR | BMCR spd | BMCR | |
876 * | | (HEX)| (in bits)| FDX | |
877 * +--------+------+----------+------+--------------------------+
878 * | 10M | 0000 | 10M(00) | 0 | |
879 * | 10M | 2000 | 100M(01) | 0 |(I've never observed this)|
880 * | 10M | 2100 | 100M(01) | 1 | |
881 * | 100M | 0000 | 10M(00) | 0 | |
882 * | 100M | 0100 | 10M(00) | 1 | |
883 * +--------------------------+------+--------------------------+
884 */
885 if (((hw->phy.autoneg_advertised == IXGBE_LINK_SPEED_100_FULL)
886 && (((phydata & BMCR_FDX) == 0) || (BMCR_SPEED(phydata) == 0)))
887 || ((hw->phy.autoneg_advertised == IXGBE_LINK_SPEED_10_FULL)
888 && (((phydata & BMCR_FDX) == 0)
889 || (BMCR_SPEED(phydata) != BMCR_S10)))) {
890 phydata = BMCR_FDX;
891 switch (hw->phy.autoneg_advertised) {
892 case IXGBE_LINK_SPEED_10_FULL:
893 phydata |= BMCR_S10;
894 break;
895 case IXGBE_LINK_SPEED_100_FULL:
896 phydata |= BMCR_S100;
897 break;
898 case IXGBE_LINK_SPEED_1GB_FULL:
899 panic("%s: 1GB_FULL is set", __func__);
900 break;
901 default:
902 break;
903 }
904 ret_val = ixgbe_write_phy_reg_x550a(hw, MII_BMCR, 0, phydata);
905 if (ret_val != 0)
906 return ret_val;
907 }
908 out:
909 #endif
910 if (setup[0] == FW_PHY_ACT_SETUP_LINK_RSP_DOWN)
911 return IXGBE_ERR_OVERTEMP;
912 return IXGBE_SUCCESS;
913 }
914
915 /**
916 * ixgbe_fc_autoneg_fw _ Set up flow control for FW-controlled PHYs
917 * @hw: pointer to hardware structure
918 *
919 * Called at init time to set up flow control.
920 */
921 static s32 ixgbe_fc_autoneg_fw(struct ixgbe_hw *hw)
922 {
923 if (hw->fc.requested_mode == ixgbe_fc_default)
924 hw->fc.requested_mode = ixgbe_fc_full;
925
926 return ixgbe_setup_fw_link(hw);
927 }
928
929 /**
930 * ixgbe_setup_eee_fw - Enable/disable EEE support
931 * @hw: pointer to the HW structure
932 * @enable_eee: boolean flag to enable EEE
933 *
934 * Enable/disable EEE based on enable_eee flag.
935 * This function controls EEE for firmware-based PHY implementations.
936 */
937 static s32 ixgbe_setup_eee_fw(struct ixgbe_hw *hw, bool enable_eee)
938 {
939 if (!!hw->phy.eee_speeds_advertised == enable_eee)
940 return IXGBE_SUCCESS;
941 if (enable_eee)
942 hw->phy.eee_speeds_advertised = hw->phy.eee_speeds_supported;
943 else
944 hw->phy.eee_speeds_advertised = 0;
945 return hw->phy.ops.setup_link(hw);
946 }
947
948 /**
949 * ixgbe_init_ops_X550EM_a - Inits func ptrs and MAC type
950 * @hw: pointer to hardware structure
951 *
952 * Initialize the function pointers and for MAC type X550EM_a.
953 * Does not touch the hardware.
954 **/
955 s32 ixgbe_init_ops_X550EM_a(struct ixgbe_hw *hw)
956 {
957 struct ixgbe_mac_info *mac = &hw->mac;
958 s32 ret_val;
959
960 DEBUGFUNC("ixgbe_init_ops_X550EM_a");
961
962 /* Start with generic X550EM init */
963 ret_val = ixgbe_init_ops_X550EM(hw);
964
965 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII ||
966 hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII_L) {
967 mac->ops.read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550;
968 mac->ops.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550;
969 } else {
970 mac->ops.read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550a;
971 mac->ops.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550a;
972 }
973 mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync_X550a;
974 mac->ops.release_swfw_sync = ixgbe_release_swfw_sync_X550a;
975
976 switch (mac->ops.get_media_type(hw)) {
977 case ixgbe_media_type_fiber:
978 mac->ops.setup_fc = NULL;
979 mac->ops.fc_autoneg = ixgbe_fc_autoneg_fiber_x550em_a;
980 break;
981 case ixgbe_media_type_backplane:
982 mac->ops.fc_autoneg = ixgbe_fc_autoneg_backplane_x550em_a;
983 mac->ops.setup_fc = ixgbe_setup_fc_backplane_x550em_a;
984 break;
985 default:
986 break;
987 }
988
989 switch (hw->device_id) {
990 case IXGBE_DEV_ID_X550EM_A_1G_T:
991 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
992 mac->ops.fc_autoneg = ixgbe_fc_autoneg_sgmii_x550em_a;
993 mac->ops.setup_fc = ixgbe_fc_autoneg_fw;
994 mac->ops.setup_eee = ixgbe_setup_eee_fw;
995 hw->phy.eee_speeds_supported = IXGBE_LINK_SPEED_100_FULL |
996 IXGBE_LINK_SPEED_1GB_FULL;
997 hw->phy.eee_speeds_advertised = hw->phy.eee_speeds_supported;
998 break;
999 default:
1000 break;
1001 }
1002
1003 return ret_val;
1004 }
1005
1006 /**
1007 * ixgbe_init_ops_X550EM_x - Inits func ptrs and MAC type
1008 * @hw: pointer to hardware structure
1009 *
1010 * Initialize the function pointers and for MAC type X550EM_x.
1011 * Does not touch the hardware.
1012 **/
1013 s32 ixgbe_init_ops_X550EM_x(struct ixgbe_hw *hw)
1014 {
1015 struct ixgbe_mac_info *mac = &hw->mac;
1016 struct ixgbe_link_info *link = &hw->link;
1017 s32 ret_val;
1018
1019 DEBUGFUNC("ixgbe_init_ops_X550EM_x");
1020
1021 /* Start with generic X550EM init */
1022 ret_val = ixgbe_init_ops_X550EM(hw);
1023
1024 mac->ops.read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550;
1025 mac->ops.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550;
1026 mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync_X550em;
1027 mac->ops.release_swfw_sync = ixgbe_release_swfw_sync_X550em;
1028 link->ops.read_link = ixgbe_read_i2c_combined_generic;
1029 link->ops.read_link_unlocked = ixgbe_read_i2c_combined_generic_unlocked;
1030 link->ops.write_link = ixgbe_write_i2c_combined_generic;
1031 link->ops.write_link_unlocked =
1032 ixgbe_write_i2c_combined_generic_unlocked;
1033 link->addr = IXGBE_CS4227;
1034
1035 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_1G_T) {
1036 mac->ops.setup_fc = NULL;
1037 mac->ops.setup_eee = NULL;
1038 mac->ops.init_led_link_act = NULL;
1039 }
1040
1041 return ret_val;
1042 }
1043
1044 /**
1045 * ixgbe_dmac_config_X550
1046 * @hw: pointer to hardware structure
1047 *
1048 * Configure DMA coalescing. If enabling dmac, dmac is activated.
1049 * When disabling dmac, dmac enable dmac bit is cleared.
1050 **/
1051 s32 ixgbe_dmac_config_X550(struct ixgbe_hw *hw)
1052 {
1053 u32 reg, high_pri_tc;
1054
1055 DEBUGFUNC("ixgbe_dmac_config_X550");
1056
1057 /* Disable DMA coalescing before configuring */
1058 reg = IXGBE_READ_REG(hw, IXGBE_DMACR);
1059 reg &= ~IXGBE_DMACR_DMAC_EN;
1060 IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg);
1061
1062 /* Disable DMA Coalescing if the watchdog timer is 0 */
1063 if (!hw->mac.dmac_config.watchdog_timer)
1064 goto out;
1065
1066 ixgbe_dmac_config_tcs_X550(hw);
1067
1068 /* Configure DMA Coalescing Control Register */
1069 reg = IXGBE_READ_REG(hw, IXGBE_DMACR);
1070
1071 /* Set the watchdog timer in units of 40.96 usec */
1072 reg &= ~IXGBE_DMACR_DMACWT_MASK;
1073 reg |= (hw->mac.dmac_config.watchdog_timer * 100) / 4096;
1074
1075 reg &= ~IXGBE_DMACR_HIGH_PRI_TC_MASK;
1076 /* If fcoe is enabled, set high priority traffic class */
1077 if (hw->mac.dmac_config.fcoe_en) {
1078 high_pri_tc = 1 << hw->mac.dmac_config.fcoe_tc;
1079 reg |= ((high_pri_tc << IXGBE_DMACR_HIGH_PRI_TC_SHIFT) &
1080 IXGBE_DMACR_HIGH_PRI_TC_MASK);
1081 }
1082 reg |= IXGBE_DMACR_EN_MNG_IND;
1083
1084 /* Enable DMA coalescing after configuration */
1085 reg |= IXGBE_DMACR_DMAC_EN;
1086 IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg);
1087
1088 out:
1089 return IXGBE_SUCCESS;
1090 }
1091
1092 /**
1093 * ixgbe_dmac_config_tcs_X550
1094 * @hw: pointer to hardware structure
1095 *
1096 * Configure DMA coalescing threshold per TC. The dmac enable bit must
1097 * be cleared before configuring.
1098 **/
1099 s32 ixgbe_dmac_config_tcs_X550(struct ixgbe_hw *hw)
1100 {
1101 u32 tc, reg, pb_headroom, rx_pb_size, maxframe_size_kb;
1102
1103 DEBUGFUNC("ixgbe_dmac_config_tcs_X550");
1104
1105 /* Configure DMA coalescing enabled */
1106 switch (hw->mac.dmac_config.link_speed) {
1107 case IXGBE_LINK_SPEED_10_FULL:
1108 case IXGBE_LINK_SPEED_100_FULL:
1109 pb_headroom = IXGBE_DMACRXT_100M;
1110 break;
1111 case IXGBE_LINK_SPEED_1GB_FULL:
1112 pb_headroom = IXGBE_DMACRXT_1G;
1113 break;
1114 default:
1115 pb_headroom = IXGBE_DMACRXT_10G;
1116 break;
1117 }
1118
1119 maxframe_size_kb = ((IXGBE_READ_REG(hw, IXGBE_MAXFRS) >>
1120 IXGBE_MHADD_MFS_SHIFT) / 1024);
1121
1122 /* Set the per Rx packet buffer receive threshold */
1123 for (tc = 0; tc < IXGBE_DCB_MAX_TRAFFIC_CLASS; tc++) {
1124 reg = IXGBE_READ_REG(hw, IXGBE_DMCTH(tc));
1125 reg &= ~IXGBE_DMCTH_DMACRXT_MASK;
1126
1127 if (tc < hw->mac.dmac_config.num_tcs) {
1128 /* Get Rx PB size */
1129 rx_pb_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc));
1130 rx_pb_size = (rx_pb_size & IXGBE_RXPBSIZE_MASK) >>
1131 IXGBE_RXPBSIZE_SHIFT;
1132
1133 /* Calculate receive buffer threshold in kilobytes */
1134 if (rx_pb_size > pb_headroom)
1135 rx_pb_size = rx_pb_size - pb_headroom;
1136 else
1137 rx_pb_size = 0;
1138
1139 /* Minimum of MFS shall be set for DMCTH */
1140 reg |= (rx_pb_size > maxframe_size_kb) ?
1141 rx_pb_size : maxframe_size_kb;
1142 }
1143 IXGBE_WRITE_REG(hw, IXGBE_DMCTH(tc), reg);
1144 }
1145 return IXGBE_SUCCESS;
1146 }
1147
1148 /**
1149 * ixgbe_dmac_update_tcs_X550
1150 * @hw: pointer to hardware structure
1151 *
1152 * Disables dmac, updates per TC settings, and then enables dmac.
1153 **/
1154 s32 ixgbe_dmac_update_tcs_X550(struct ixgbe_hw *hw)
1155 {
1156 u32 reg;
1157
1158 DEBUGFUNC("ixgbe_dmac_update_tcs_X550");
1159
1160 /* Disable DMA coalescing before configuring */
1161 reg = IXGBE_READ_REG(hw, IXGBE_DMACR);
1162 reg &= ~IXGBE_DMACR_DMAC_EN;
1163 IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg);
1164
1165 ixgbe_dmac_config_tcs_X550(hw);
1166
1167 /* Enable DMA coalescing after configuration */
1168 reg = IXGBE_READ_REG(hw, IXGBE_DMACR);
1169 reg |= IXGBE_DMACR_DMAC_EN;
1170 IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg);
1171
1172 return IXGBE_SUCCESS;
1173 }
1174
1175 /**
1176 * ixgbe_init_eeprom_params_X550 - Initialize EEPROM params
1177 * @hw: pointer to hardware structure
1178 *
1179 * Initializes the EEPROM parameters ixgbe_eeprom_info within the
1180 * ixgbe_hw struct in order to set up EEPROM access.
1181 **/
1182 s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw)
1183 {
1184 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
1185 u32 eec;
1186 u16 eeprom_size;
1187
1188 DEBUGFUNC("ixgbe_init_eeprom_params_X550");
1189
1190 if (eeprom->type == ixgbe_eeprom_uninitialized) {
1191 eeprom->semaphore_delay = 10;
1192 eeprom->type = ixgbe_flash;
1193
1194 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1195 eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
1196 IXGBE_EEC_SIZE_SHIFT);
1197 eeprom->word_size = 1 << (eeprom_size +
1198 IXGBE_EEPROM_WORD_SIZE_SHIFT);
1199
1200 DEBUGOUT2("Eeprom params: type = %d, size = %d\n",
1201 eeprom->type, eeprom->word_size);
1202 }
1203
1204 return IXGBE_SUCCESS;
1205 }
1206
1207 /**
1208 * ixgbe_set_source_address_pruning_X550 - Enable/Disbale source address pruning
1209 * @hw: pointer to hardware structure
1210 * @enable: enable or disable source address pruning
1211 * @pool: Rx pool to set source address pruning for
1212 **/
1213 void ixgbe_set_source_address_pruning_X550(struct ixgbe_hw *hw, bool enable,
1214 unsigned int pool)
1215 {
1216 u64 pfflp;
1217
1218 /* max rx pool is 63 */
1219 if (pool > 63)
1220 return;
1221
1222 pfflp = (u64)IXGBE_READ_REG(hw, IXGBE_PFFLPL);
1223 pfflp |= (u64)IXGBE_READ_REG(hw, IXGBE_PFFLPH) << 32;
1224
1225 if (enable)
1226 pfflp |= (1ULL << pool);
1227 else
1228 pfflp &= ~(1ULL << pool);
1229
1230 IXGBE_WRITE_REG(hw, IXGBE_PFFLPL, (u32)pfflp);
1231 IXGBE_WRITE_REG(hw, IXGBE_PFFLPH, (u32)(pfflp >> 32));
1232 }
1233
1234 /**
1235 * ixgbe_set_ethertype_anti_spoofing_X550 - Enable/Disable Ethertype anti-spoofing
1236 * @hw: pointer to hardware structure
1237 * @enable: enable or disable switch for Ethertype anti-spoofing
1238 * @vf: Virtual Function pool - VF Pool to set for Ethertype anti-spoofing
1239 *
1240 **/
1241 void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw,
1242 bool enable, int vf)
1243 {
1244 int vf_target_reg = vf >> 3;
1245 int vf_target_shift = vf % 8 + IXGBE_SPOOF_ETHERTYPEAS_SHIFT;
1246 u32 pfvfspoof;
1247
1248 DEBUGFUNC("ixgbe_set_ethertype_anti_spoofing_X550");
1249
1250 pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
1251 if (enable)
1252 pfvfspoof |= (1 << vf_target_shift);
1253 else
1254 pfvfspoof &= ~(1 << vf_target_shift);
1255
1256 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
1257 }
1258
1259 /**
1260 * ixgbe_iosf_wait - Wait for IOSF command completion
1261 * @hw: pointer to hardware structure
1262 * @ctrl: pointer to location to receive final IOSF control value
1263 *
1264 * Returns failing status on timeout
1265 *
1266 * Note: ctrl can be NULL if the IOSF control register value is not needed
1267 **/
1268 static s32 ixgbe_iosf_wait(struct ixgbe_hw *hw, u32 *ctrl)
1269 {
1270 u32 i, command = 0;
1271
1272 /* Check every 10 usec to see if the address cycle completed.
1273 * The SB IOSF BUSY bit will clear when the operation is
1274 * complete
1275 */
1276 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
1277 command = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL);
1278 if ((command & IXGBE_SB_IOSF_CTRL_BUSY) == 0)
1279 break;
1280 usec_delay(10);
1281 }
1282 if (ctrl)
1283 *ctrl = command;
1284 if (i == IXGBE_MDIO_COMMAND_TIMEOUT) {
1285 ERROR_REPORT1(IXGBE_ERROR_POLLING, "Wait timed out\n");
1286 return IXGBE_ERR_PHY;
1287 }
1288
1289 return IXGBE_SUCCESS;
1290 }
1291
1292 /**
1293 * ixgbe_write_iosf_sb_reg_x550 - Writes a value to specified register
1294 * of the IOSF device
1295 * @hw: pointer to hardware structure
1296 * @reg_addr: 32 bit PHY register to write
1297 * @device_type: 3 bit device type
1298 * @data: Data to write to the register
1299 **/
1300 s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
1301 u32 device_type, u32 data)
1302 {
1303 u32 gssr = IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_PHY0_SM;
1304 u32 command, error __unused;
1305 s32 ret;
1306
1307 ret = ixgbe_acquire_swfw_semaphore(hw, gssr);
1308 if (ret != IXGBE_SUCCESS)
1309 return ret;
1310
1311 ret = ixgbe_iosf_wait(hw, NULL);
1312 if (ret != IXGBE_SUCCESS)
1313 goto out;
1314
1315 command = ((reg_addr << IXGBE_SB_IOSF_CTRL_ADDR_SHIFT) |
1316 (device_type << IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT));
1317
1318 /* Write IOSF control register */
1319 IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL, command);
1320
1321 /* Write IOSF data register */
1322 IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA, data);
1323
1324 ret = ixgbe_iosf_wait(hw, &command);
1325
1326 if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) {
1327 error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >>
1328 IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT;
1329 ERROR_REPORT2(IXGBE_ERROR_POLLING,
1330 "Failed to write, error %x\n", error);
1331 ret = IXGBE_ERR_PHY;
1332 }
1333
1334 out:
1335 ixgbe_release_swfw_semaphore(hw, gssr);
1336 return ret;
1337 }
1338
1339 /**
1340 * ixgbe_read_iosf_sb_reg_x550 - Reads specified register of the IOSF device
1341 * @hw: pointer to hardware structure
1342 * @reg_addr: 32 bit PHY register to write
1343 * @device_type: 3 bit device type
1344 * @data: Pointer to read data from the register
1345 **/
1346 s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
1347 u32 device_type, u32 *data)
1348 {
1349 u32 gssr = IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_PHY0_SM;
1350 u32 command, error __unused;
1351 s32 ret;
1352
1353 ret = ixgbe_acquire_swfw_semaphore(hw, gssr);
1354 if (ret != IXGBE_SUCCESS)
1355 return ret;
1356
1357 ret = ixgbe_iosf_wait(hw, NULL);
1358 if (ret != IXGBE_SUCCESS)
1359 goto out;
1360
1361 command = ((reg_addr << IXGBE_SB_IOSF_CTRL_ADDR_SHIFT) |
1362 (device_type << IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT));
1363
1364 /* Write IOSF control register */
1365 IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL, command);
1366
1367 ret = ixgbe_iosf_wait(hw, &command);
1368
1369 if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) {
1370 error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >>
1371 IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT;
1372 ERROR_REPORT2(IXGBE_ERROR_POLLING,
1373 "Failed to read, error %x\n", error);
1374 ret = IXGBE_ERR_PHY;
1375 }
1376
1377 if (ret == IXGBE_SUCCESS)
1378 *data = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA);
1379
1380 out:
1381 ixgbe_release_swfw_semaphore(hw, gssr);
1382 return ret;
1383 }
1384
1385 /**
1386 * ixgbe_get_phy_token - Get the token for shared phy access
1387 * @hw: Pointer to hardware structure
1388 */
1389
1390 s32 ixgbe_get_phy_token(struct ixgbe_hw *hw)
1391 {
1392 struct ixgbe_hic_phy_token_req token_cmd;
1393 s32 status;
1394
1395 token_cmd.hdr.cmd = FW_PHY_TOKEN_REQ_CMD;
1396 token_cmd.hdr.buf_len = FW_PHY_TOKEN_REQ_LEN;
1397 token_cmd.hdr.cmd_or_resp.cmd_resv = 0;
1398 token_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
1399 token_cmd.port_number = hw->bus.lan_id;
1400 token_cmd.command_type = FW_PHY_TOKEN_REQ;
1401 token_cmd.pad = 0;
1402 status = ixgbe_host_interface_command(hw, (u32 *)&token_cmd,
1403 sizeof(token_cmd),
1404 IXGBE_HI_COMMAND_TIMEOUT,
1405 TRUE);
1406 if (status) {
1407 DEBUGOUT1("Issuing host interface command failed with Status = %d\n",
1408 status);
1409 return status;
1410 }
1411 if (token_cmd.hdr.cmd_or_resp.ret_status == FW_PHY_TOKEN_OK)
1412 return IXGBE_SUCCESS;
1413 if (token_cmd.hdr.cmd_or_resp.ret_status != FW_PHY_TOKEN_RETRY) {
1414 DEBUGOUT1("Host interface command returned 0x%08x , returning IXGBE_ERR_FW_RESP_INVALID\n",
1415 token_cmd.hdr.cmd_or_resp.ret_status);
1416 return IXGBE_ERR_FW_RESP_INVALID;
1417 }
1418
1419 DEBUGOUT("Returning IXGBE_ERR_TOKEN_RETRY\n");
1420 return IXGBE_ERR_TOKEN_RETRY;
1421 }
1422
1423 /**
1424 * ixgbe_put_phy_token - Put the token for shared phy access
1425 * @hw: Pointer to hardware structure
1426 */
1427
1428 s32 ixgbe_put_phy_token(struct ixgbe_hw *hw)
1429 {
1430 struct ixgbe_hic_phy_token_req token_cmd;
1431 s32 status;
1432
1433 token_cmd.hdr.cmd = FW_PHY_TOKEN_REQ_CMD;
1434 token_cmd.hdr.buf_len = FW_PHY_TOKEN_REQ_LEN;
1435 token_cmd.hdr.cmd_or_resp.cmd_resv = 0;
1436 token_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
1437 token_cmd.port_number = hw->bus.lan_id;
1438 token_cmd.command_type = FW_PHY_TOKEN_REL;
1439 token_cmd.pad = 0;
1440 status = ixgbe_host_interface_command(hw, (u32 *)&token_cmd,
1441 sizeof(token_cmd),
1442 IXGBE_HI_COMMAND_TIMEOUT,
1443 TRUE);
1444 if (status)
1445 return status;
1446 if (token_cmd.hdr.cmd_or_resp.ret_status == FW_PHY_TOKEN_OK)
1447 return IXGBE_SUCCESS;
1448
1449 DEBUGOUT("Put PHY Token host interface command failed");
1450 return IXGBE_ERR_FW_RESP_INVALID;
1451 }
1452
1453 /**
1454 * ixgbe_write_iosf_sb_reg_x550a - Writes a value to specified register
1455 * of the IOSF device
1456 * @hw: pointer to hardware structure
1457 * @reg_addr: 32 bit PHY register to write
1458 * @device_type: 3 bit device type
1459 * @data: Data to write to the register
1460 **/
1461 s32 ixgbe_write_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
1462 u32 device_type, u32 data)
1463 {
1464 struct ixgbe_hic_internal_phy_req write_cmd;
1465 s32 status;
1466 UNREFERENCED_1PARAMETER(device_type);
1467
1468 memset(&write_cmd, 0, sizeof(write_cmd));
1469 write_cmd.hdr.cmd = FW_INT_PHY_REQ_CMD;
1470 write_cmd.hdr.buf_len = FW_INT_PHY_REQ_LEN;
1471 write_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
1472 write_cmd.port_number = hw->bus.lan_id;
1473 write_cmd.command_type = FW_INT_PHY_REQ_WRITE;
1474 write_cmd.address = IXGBE_CPU_TO_BE16(reg_addr);
1475 write_cmd.write_data = IXGBE_CPU_TO_BE32(data);
1476
1477 status = ixgbe_host_interface_command(hw, (u32 *)&write_cmd,
1478 sizeof(write_cmd),
1479 IXGBE_HI_COMMAND_TIMEOUT, FALSE);
1480
1481 return status;
1482 }
1483
1484 /**
1485 * ixgbe_read_iosf_sb_reg_x550a - Reads specified register of the IOSF device
1486 * @hw: pointer to hardware structure
1487 * @reg_addr: 32 bit PHY register to write
1488 * @device_type: 3 bit device type
1489 * @data: Pointer to read data from the register
1490 **/
1491 s32 ixgbe_read_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
1492 u32 device_type, u32 *data)
1493 {
1494 union {
1495 struct ixgbe_hic_internal_phy_req cmd;
1496 struct ixgbe_hic_internal_phy_resp rsp;
1497 } hic;
1498 s32 status;
1499 UNREFERENCED_1PARAMETER(device_type);
1500
1501 memset(&hic, 0, sizeof(hic));
1502 hic.cmd.hdr.cmd = FW_INT_PHY_REQ_CMD;
1503 hic.cmd.hdr.buf_len = FW_INT_PHY_REQ_LEN;
1504 hic.cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
1505 hic.cmd.port_number = hw->bus.lan_id;
1506 hic.cmd.command_type = FW_INT_PHY_REQ_READ;
1507 hic.cmd.address = IXGBE_CPU_TO_BE16(reg_addr);
1508
1509 status = ixgbe_host_interface_command(hw, (u32 *)&hic.cmd,
1510 sizeof(hic.cmd),
1511 IXGBE_HI_COMMAND_TIMEOUT, TRUE);
1512
1513 /* Extract the register value from the response. */
1514 *data = IXGBE_BE32_TO_CPU(hic.rsp.read_data);
1515
1516 return status;
1517 }
1518
1519 /**
1520 * ixgbe_disable_mdd_X550
1521 * @hw: pointer to hardware structure
1522 *
1523 * Disable malicious driver detection
1524 **/
1525 void ixgbe_disable_mdd_X550(struct ixgbe_hw *hw)
1526 {
1527 u32 reg;
1528
1529 DEBUGFUNC("ixgbe_disable_mdd_X550");
1530
1531 /* Disable MDD for TX DMA and interrupt */
1532 reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1533 reg &= ~(IXGBE_DMATXCTL_MDP_EN | IXGBE_DMATXCTL_MBINTEN);
1534 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg);
1535
1536 /* Disable MDD for RX and interrupt */
1537 reg = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
1538 reg &= ~(IXGBE_RDRXCTL_MDP_EN | IXGBE_RDRXCTL_MBINTEN);
1539 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg);
1540 }
1541
1542 /**
1543 * ixgbe_enable_mdd_X550
1544 * @hw: pointer to hardware structure
1545 *
1546 * Enable malicious driver detection
1547 **/
1548 void ixgbe_enable_mdd_X550(struct ixgbe_hw *hw)
1549 {
1550 u32 reg;
1551
1552 DEBUGFUNC("ixgbe_enable_mdd_X550");
1553
1554 /* Enable MDD for TX DMA and interrupt */
1555 reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1556 reg |= (IXGBE_DMATXCTL_MDP_EN | IXGBE_DMATXCTL_MBINTEN);
1557 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg);
1558
1559 /* Enable MDD for RX and interrupt */
1560 reg = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
1561 reg |= (IXGBE_RDRXCTL_MDP_EN | IXGBE_RDRXCTL_MBINTEN);
1562 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg);
1563 }
1564
1565 /**
1566 * ixgbe_restore_mdd_vf_X550
1567 * @hw: pointer to hardware structure
1568 * @vf: vf index
1569 *
1570 * Restore VF that was disabled during malicious driver detection event
1571 **/
1572 void ixgbe_restore_mdd_vf_X550(struct ixgbe_hw *hw, u32 vf)
1573 {
1574 u32 idx, reg, num_qs, start_q, bitmask;
1575
1576 DEBUGFUNC("ixgbe_restore_mdd_vf_X550");
1577
1578 /* Map VF to queues */
1579 reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
1580 switch (reg & IXGBE_MRQC_MRQE_MASK) {
1581 case IXGBE_MRQC_VMDQRT8TCEN:
1582 num_qs = 8; /* 16 VFs / pools */
1583 bitmask = 0x000000FF;
1584 break;
1585 case IXGBE_MRQC_VMDQRSS32EN:
1586 case IXGBE_MRQC_VMDQRT4TCEN:
1587 num_qs = 4; /* 32 VFs / pools */
1588 bitmask = 0x0000000F;
1589 break;
1590 default: /* 64 VFs / pools */
1591 num_qs = 2;
1592 bitmask = 0x00000003;
1593 break;
1594 }
1595 start_q = vf * num_qs;
1596
1597 /* Release vf's queues by clearing WQBR_TX and WQBR_RX (RW1C) */
1598 idx = start_q / 32;
1599 reg = 0;
1600 reg |= (bitmask << (start_q % 32));
1601 IXGBE_WRITE_REG(hw, IXGBE_WQBR_TX(idx), reg);
1602 IXGBE_WRITE_REG(hw, IXGBE_WQBR_RX(idx), reg);
1603 }
1604
1605 /**
1606 * ixgbe_mdd_event_X550
1607 * @hw: pointer to hardware structure
1608 * @vf_bitmap: vf bitmap of malicious vfs
1609 *
1610 * Handle malicious driver detection event.
1611 **/
1612 void ixgbe_mdd_event_X550(struct ixgbe_hw *hw, u32 *vf_bitmap)
1613 {
1614 u32 wqbr;
1615 u32 i, j, reg, q, shift, vf, idx;
1616
1617 DEBUGFUNC("ixgbe_mdd_event_X550");
1618
1619 /* figure out pool size for mapping to vf's */
1620 reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
1621 switch (reg & IXGBE_MRQC_MRQE_MASK) {
1622 case IXGBE_MRQC_VMDQRT8TCEN:
1623 shift = 3; /* 16 VFs / pools */
1624 break;
1625 case IXGBE_MRQC_VMDQRSS32EN:
1626 case IXGBE_MRQC_VMDQRT4TCEN:
1627 shift = 2; /* 32 VFs / pools */
1628 break;
1629 default:
1630 shift = 1; /* 64 VFs / pools */
1631 break;
1632 }
1633
1634 /* Read WQBR_TX and WQBR_RX and check for malicious queues */
1635 for (i = 0; i < 4; i++) {
1636 wqbr = IXGBE_READ_REG(hw, IXGBE_WQBR_TX(i));
1637 wqbr |= IXGBE_READ_REG(hw, IXGBE_WQBR_RX(i));
1638
1639 if (!wqbr)
1640 continue;
1641
1642 /* Get malicious queue */
1643 for (j = 0; j < 32 && wqbr; j++) {
1644
1645 if (!(wqbr & (1 << j)))
1646 continue;
1647
1648 /* Get queue from bitmask */
1649 q = j + (i * 32);
1650
1651 /* Map queue to vf */
1652 vf = (q >> shift);
1653
1654 /* Set vf bit in vf_bitmap */
1655 idx = vf / 32;
1656 vf_bitmap[idx] |= (1 << (vf % 32));
1657 wqbr &= ~(1 << j);
1658 }
1659 }
1660 }
1661
1662 /**
1663 * ixgbe_get_media_type_X550em - Get media type
1664 * @hw: pointer to hardware structure
1665 *
1666 * Returns the media type (fiber, copper, backplane)
1667 */
1668 enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw)
1669 {
1670 enum ixgbe_media_type media_type;
1671
1672 DEBUGFUNC("ixgbe_get_media_type_X550em");
1673
1674 /* Detect if there is a copper PHY attached. */
1675 switch (hw->device_id) {
1676 case IXGBE_DEV_ID_X550EM_X_KR:
1677 case IXGBE_DEV_ID_X550EM_X_KX4:
1678 case IXGBE_DEV_ID_X550EM_X_XFI:
1679 case IXGBE_DEV_ID_X550EM_A_KR:
1680 case IXGBE_DEV_ID_X550EM_A_KR_L:
1681 media_type = ixgbe_media_type_backplane;
1682 break;
1683 case IXGBE_DEV_ID_X550EM_X_SFP:
1684 case IXGBE_DEV_ID_X550EM_A_SFP:
1685 case IXGBE_DEV_ID_X550EM_A_SFP_N:
1686 case IXGBE_DEV_ID_X550EM_A_QSFP:
1687 case IXGBE_DEV_ID_X550EM_A_QSFP_N:
1688 media_type = ixgbe_media_type_fiber;
1689 break;
1690 case IXGBE_DEV_ID_X550EM_X_1G_T:
1691 case IXGBE_DEV_ID_X550EM_X_10G_T:
1692 case IXGBE_DEV_ID_X550EM_A_10G_T:
1693 media_type = ixgbe_media_type_copper;
1694 break;
1695 case IXGBE_DEV_ID_X550EM_A_SGMII:
1696 case IXGBE_DEV_ID_X550EM_A_SGMII_L:
1697 media_type = ixgbe_media_type_backplane;
1698 hw->phy.type = ixgbe_phy_sgmii;
1699 break;
1700 case IXGBE_DEV_ID_X550EM_A_1G_T:
1701 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
1702 media_type = ixgbe_media_type_copper;
1703 break;
1704 default:
1705 media_type = ixgbe_media_type_unknown;
1706 break;
1707 }
1708 return media_type;
1709 }
1710
1711 /**
1712 * ixgbe_supported_sfp_modules_X550em - Check if SFP module type is supported
1713 * @hw: pointer to hardware structure
1714 * @linear: TRUE if SFP module is linear
1715 */
1716 static s32 ixgbe_supported_sfp_modules_X550em(struct ixgbe_hw *hw, bool *linear)
1717 {
1718 DEBUGFUNC("ixgbe_supported_sfp_modules_X550em");
1719
1720 switch (hw->phy.sfp_type) {
1721 case ixgbe_sfp_type_not_present:
1722 return IXGBE_ERR_SFP_NOT_PRESENT;
1723 case ixgbe_sfp_type_da_cu_core0:
1724 case ixgbe_sfp_type_da_cu_core1:
1725 *linear = TRUE;
1726 break;
1727 case ixgbe_sfp_type_srlr_core0:
1728 case ixgbe_sfp_type_srlr_core1:
1729 case ixgbe_sfp_type_da_act_lmt_core0:
1730 case ixgbe_sfp_type_da_act_lmt_core1:
1731 case ixgbe_sfp_type_1g_sx_core0:
1732 case ixgbe_sfp_type_1g_sx_core1:
1733 case ixgbe_sfp_type_1g_lx_core0:
1734 case ixgbe_sfp_type_1g_lx_core1:
1735 *linear = FALSE;
1736 break;
1737 case ixgbe_sfp_type_unknown:
1738 case ixgbe_sfp_type_1g_cu_core0:
1739 case ixgbe_sfp_type_1g_cu_core1:
1740 default:
1741 return IXGBE_ERR_SFP_NOT_SUPPORTED;
1742 }
1743
1744 return IXGBE_SUCCESS;
1745 }
1746
1747 /**
1748 * ixgbe_identify_sfp_module_X550em - Identifies SFP modules
1749 * @hw: pointer to hardware structure
1750 *
1751 * Searches for and identifies the SFP module and assigns appropriate PHY type.
1752 **/
1753 s32 ixgbe_identify_sfp_module_X550em(struct ixgbe_hw *hw)
1754 {
1755 s32 status;
1756 bool linear;
1757
1758 DEBUGFUNC("ixgbe_identify_sfp_module_X550em");
1759
1760 status = ixgbe_identify_module_generic(hw);
1761
1762 if (status != IXGBE_SUCCESS)
1763 return status;
1764
1765 /* Check if SFP module is supported */
1766 status = ixgbe_supported_sfp_modules_X550em(hw, &linear);
1767
1768 return status;
1769 }
1770
1771 /**
1772 * ixgbe_setup_sfp_modules_X550em - Setup MAC link ops
1773 * @hw: pointer to hardware structure
1774 */
1775 s32 ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw)
1776 {
1777 s32 status;
1778 bool linear;
1779
1780 DEBUGFUNC("ixgbe_setup_sfp_modules_X550em");
1781
1782 /* Check if SFP module is supported */
1783 status = ixgbe_supported_sfp_modules_X550em(hw, &linear);
1784
1785 if (status != IXGBE_SUCCESS)
1786 return status;
1787
1788 ixgbe_init_mac_link_ops_X550em(hw);
1789 hw->phy.ops.reset = NULL;
1790
1791 return IXGBE_SUCCESS;
1792 }
1793
1794 /**
1795 * ixgbe_restart_an_internal_phy_x550em - restart autonegotiation for the
1796 * internal PHY
1797 * @hw: pointer to hardware structure
1798 **/
1799 static s32 ixgbe_restart_an_internal_phy_x550em(struct ixgbe_hw *hw)
1800 {
1801 s32 status;
1802 u32 link_ctrl;
1803
1804 /* Restart auto-negotiation. */
1805 status = hw->mac.ops.read_iosf_sb_reg(hw,
1806 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1807 IXGBE_SB_IOSF_TARGET_KR_PHY, &link_ctrl);
1808
1809 if (status) {
1810 DEBUGOUT("Auto-negotiation did not complete\n");
1811 return status;
1812 }
1813
1814 link_ctrl |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART;
1815 status = hw->mac.ops.write_iosf_sb_reg(hw,
1816 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1817 IXGBE_SB_IOSF_TARGET_KR_PHY, link_ctrl);
1818
1819 if (hw->mac.type == ixgbe_mac_X550EM_a) {
1820 u32 flx_mask_st20;
1821
1822 /* Indicate to FW that AN restart has been asserted */
1823 status = hw->mac.ops.read_iosf_sb_reg(hw,
1824 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1825 IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_mask_st20);
1826
1827 if (status) {
1828 DEBUGOUT("Auto-negotiation did not complete\n");
1829 return status;
1830 }
1831
1832 flx_mask_st20 |= IXGBE_KRM_PMD_FLX_MASK_ST20_FW_AN_RESTART;
1833 status = hw->mac.ops.write_iosf_sb_reg(hw,
1834 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1835 IXGBE_SB_IOSF_TARGET_KR_PHY, flx_mask_st20);
1836 }
1837
1838 return status;
1839 }
1840
1841 /**
1842 * ixgbe_setup_sgmii - Set up link for sgmii
1843 * @hw: pointer to hardware structure
1844 * @speed: new link speed
1845 * @autoneg_wait: TRUE when waiting for completion is needed
1846 */
1847 static s32 ixgbe_setup_sgmii(struct ixgbe_hw *hw, ixgbe_link_speed speed,
1848 bool autoneg_wait)
1849 {
1850 struct ixgbe_mac_info *mac = &hw->mac;
1851 u32 lval, sval, flx_val;
1852 s32 rc;
1853
1854 rc = mac->ops.read_iosf_sb_reg(hw,
1855 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1856 IXGBE_SB_IOSF_TARGET_KR_PHY, &lval);
1857 if (rc)
1858 return rc;
1859
1860 lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
1861 lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
1862 lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN;
1863 lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN;
1864 lval |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G;
1865 rc = mac->ops.write_iosf_sb_reg(hw,
1866 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1867 IXGBE_SB_IOSF_TARGET_KR_PHY, lval);
1868 if (rc)
1869 return rc;
1870
1871 rc = mac->ops.read_iosf_sb_reg(hw,
1872 IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id),
1873 IXGBE_SB_IOSF_TARGET_KR_PHY, &sval);
1874 if (rc)
1875 return rc;
1876
1877 sval |= IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D;
1878 sval |= IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D;
1879 rc = mac->ops.write_iosf_sb_reg(hw,
1880 IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id),
1881 IXGBE_SB_IOSF_TARGET_KR_PHY, sval);
1882 if (rc)
1883 return rc;
1884
1885 rc = mac->ops.read_iosf_sb_reg(hw,
1886 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1887 IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_val);
1888 if (rc)
1889 return rc;
1890
1891 flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK;
1892 flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_1G;
1893 flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN;
1894 flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN;
1895 flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN;
1896
1897 rc = mac->ops.write_iosf_sb_reg(hw,
1898 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1899 IXGBE_SB_IOSF_TARGET_KR_PHY, flx_val);
1900 if (rc)
1901 return rc;
1902
1903 rc = ixgbe_restart_an_internal_phy_x550em(hw);
1904 if (rc)
1905 return rc;
1906
1907 return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait);
1908 }
1909
1910 /**
1911 * ixgbe_setup_sgmii_fw - Set up link for internal PHY SGMII auto-negotiation
1912 * @hw: pointer to hardware structure
1913 * @speed: new link speed
1914 * @autoneg_wait: TRUE when waiting for completion is needed
1915 */
1916 static s32 ixgbe_setup_sgmii_fw(struct ixgbe_hw *hw, ixgbe_link_speed speed,
1917 bool autoneg_wait)
1918 {
1919 struct ixgbe_mac_info *mac = &hw->mac;
1920 u32 lval, sval, flx_val;
1921 s32 rc;
1922
1923 rc = mac->ops.read_iosf_sb_reg(hw,
1924 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1925 IXGBE_SB_IOSF_TARGET_KR_PHY, &lval);
1926 if (rc)
1927 return rc;
1928
1929 lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
1930 lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
1931 lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN;
1932 lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN;
1933 lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G;
1934 rc = mac->ops.write_iosf_sb_reg(hw,
1935 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1936 IXGBE_SB_IOSF_TARGET_KR_PHY, lval);
1937 if (rc)
1938 return rc;
1939
1940 rc = mac->ops.read_iosf_sb_reg(hw,
1941 IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id),
1942 IXGBE_SB_IOSF_TARGET_KR_PHY, &sval);
1943 if (rc)
1944 return rc;
1945
1946 sval &= ~IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D;
1947 sval &= ~IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D;
1948 rc = mac->ops.write_iosf_sb_reg(hw,
1949 IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id),
1950 IXGBE_SB_IOSF_TARGET_KR_PHY, sval);
1951 if (rc)
1952 return rc;
1953
1954 rc = mac->ops.write_iosf_sb_reg(hw,
1955 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1956 IXGBE_SB_IOSF_TARGET_KR_PHY, lval);
1957 if (rc)
1958 return rc;
1959
1960 rc = mac->ops.read_iosf_sb_reg(hw,
1961 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1962 IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_val);
1963 if (rc)
1964 return rc;
1965
1966 flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK;
1967 flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_AN;
1968 flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN;
1969 flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN;
1970 flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN;
1971
1972 rc = mac->ops.write_iosf_sb_reg(hw,
1973 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1974 IXGBE_SB_IOSF_TARGET_KR_PHY, flx_val);
1975 if (rc)
1976 return rc;
1977
1978 rc = ixgbe_restart_an_internal_phy_x550em(hw);
1979
1980 return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait);
1981 }
1982
1983 /**
1984 * ixgbe_init_mac_link_ops_X550em - init mac link function pointers
1985 * @hw: pointer to hardware structure
1986 */
1987 void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw)
1988 {
1989 struct ixgbe_mac_info *mac = &hw->mac;
1990
1991 DEBUGFUNC("ixgbe_init_mac_link_ops_X550em");
1992
1993 switch (hw->mac.ops.get_media_type(hw)) {
1994 case ixgbe_media_type_fiber:
1995 /* CS4227 does not support autoneg, so disable the laser control
1996 * functions for SFP+ fiber
1997 */
1998 mac->ops.disable_tx_laser = NULL;
1999 mac->ops.enable_tx_laser = NULL;
2000 mac->ops.flap_tx_laser = NULL;
2001 mac->ops.setup_link = ixgbe_setup_mac_link_multispeed_fiber;
2002 mac->ops.set_rate_select_speed =
2003 ixgbe_set_soft_rate_select_speed;
2004
2005 if ((hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N) ||
2006 (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP))
2007 mac->ops.setup_mac_link =
2008 ixgbe_setup_mac_link_sfp_x550a;
2009 else
2010 mac->ops.setup_mac_link =
2011 ixgbe_setup_mac_link_sfp_x550em;
2012 break;
2013 case ixgbe_media_type_copper:
2014 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_1G_T)
2015 break;
2016 if (hw->mac.type == ixgbe_mac_X550EM_a) {
2017 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T ||
2018 hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L) {
2019 mac->ops.setup_link = ixgbe_setup_sgmii_fw;
2020 mac->ops.check_link =
2021 ixgbe_check_mac_link_generic;
2022 } else {
2023 mac->ops.setup_link =
2024 ixgbe_setup_mac_link_t_X550em;
2025 }
2026 } else {
2027 mac->ops.setup_link = ixgbe_setup_mac_link_t_X550em;
2028 mac->ops.check_link = ixgbe_check_link_t_X550em;
2029 }
2030 break;
2031 case ixgbe_media_type_backplane:
2032 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII ||
2033 hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII_L)
2034 mac->ops.setup_link = ixgbe_setup_sgmii;
2035 break;
2036 default:
2037 break;
2038 }
2039 }
2040
2041 /**
2042 * ixgbe_get_link_capabilities_x550em - Determines link capabilities
2043 * @hw: pointer to hardware structure
2044 * @speed: pointer to link speed
2045 * @autoneg: TRUE when autoneg or autotry is enabled
2046 */
2047 s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
2048 ixgbe_link_speed *speed,
2049 bool *autoneg)
2050 {
2051 DEBUGFUNC("ixgbe_get_link_capabilities_X550em");
2052
2053
2054 if (hw->phy.type == ixgbe_phy_fw) {
2055 *autoneg = TRUE;
2056 *speed = hw->phy.speeds_supported;
2057 return 0;
2058 }
2059
2060 /* SFP */
2061 if (hw->phy.media_type == ixgbe_media_type_fiber) {
2062
2063 /* CS4227 SFP must not enable auto-negotiation */
2064 *autoneg = FALSE;
2065
2066 /* Check if 1G SFP module. */
2067 if (hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
2068 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1
2069 || hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
2070 hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1) {
2071 *speed = IXGBE_LINK_SPEED_1GB_FULL;
2072 return IXGBE_SUCCESS;
2073 }
2074
2075 /* Link capabilities are based on SFP */
2076 if (hw->phy.multispeed_fiber)
2077 *speed = IXGBE_LINK_SPEED_10GB_FULL |
2078 IXGBE_LINK_SPEED_1GB_FULL;
2079 else
2080 *speed = IXGBE_LINK_SPEED_10GB_FULL;
2081 } else {
2082 switch (hw->phy.type) {
2083 case ixgbe_phy_ext_1g_t:
2084 case ixgbe_phy_sgmii:
2085 *speed = IXGBE_LINK_SPEED_1GB_FULL;
2086 break;
2087 case ixgbe_phy_x550em_kr:
2088 if (hw->mac.type == ixgbe_mac_X550EM_a) {
2089 /* check different backplane modes */
2090 if (hw->phy.nw_mng_if_sel &
2091 IXGBE_NW_MNG_IF_SEL_PHY_SPEED_2_5G) {
2092 *speed = IXGBE_LINK_SPEED_2_5GB_FULL;
2093 break;
2094 } else if (hw->device_id ==
2095 IXGBE_DEV_ID_X550EM_A_KR_L) {
2096 *speed = IXGBE_LINK_SPEED_1GB_FULL;
2097 break;
2098 }
2099 }
2100 /* fall through */
2101 default:
2102 *speed = IXGBE_LINK_SPEED_10GB_FULL |
2103 IXGBE_LINK_SPEED_1GB_FULL;
2104 break;
2105 }
2106 *autoneg = TRUE;
2107 }
2108
2109 return IXGBE_SUCCESS;
2110 }
2111
2112 /**
2113 * ixgbe_get_lasi_ext_t_x550em - Determime external Base T PHY interrupt cause
2114 * @hw: pointer to hardware structure
2115 * @lsc: pointer to boolean flag which indicates whether external Base T
2116 * PHY interrupt is lsc
2117 *
2118 * Determime if external Base T PHY interrupt cause is high temperature
2119 * failure alarm or link status change.
2120 *
2121 * Return IXGBE_ERR_OVERTEMP if interrupt is high temperature
2122 * failure alarm, else return PHY access status.
2123 */
2124 static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc)
2125 {
2126 u32 status;
2127 u16 reg;
2128
2129 *lsc = FALSE;
2130
2131 /* Vendor alarm triggered */
2132 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_CHIP_STD_INT_FLAG,
2133 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2134 ®);
2135
2136 if (status != IXGBE_SUCCESS ||
2137 !(reg & IXGBE_MDIO_GLOBAL_VEN_ALM_INT_EN))
2138 return status;
2139
2140 /* Vendor Auto-Neg alarm triggered or Global alarm 1 triggered */
2141 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_FLAG,
2142 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2143 ®);
2144
2145 if (status != IXGBE_SUCCESS ||
2146 !(reg & (IXGBE_MDIO_GLOBAL_AN_VEN_ALM_INT_EN |
2147 IXGBE_MDIO_GLOBAL_ALARM_1_INT)))
2148 return status;
2149
2150 /* Global alarm triggered */
2151 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_ALARM_1,
2152 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2153 ®);
2154
2155 if (status != IXGBE_SUCCESS)
2156 return status;
2157
2158 /* If high temperature failure, then return over temp error and exit */
2159 if (reg & IXGBE_MDIO_GLOBAL_ALM_1_HI_TMP_FAIL) {
2160 /* power down the PHY in case the PHY FW didn't already */
2161 ixgbe_set_copper_phy_power(hw, FALSE);
2162 return IXGBE_ERR_OVERTEMP;
2163 } else if (reg & IXGBE_MDIO_GLOBAL_ALM_1_DEV_FAULT) {
2164 /* device fault alarm triggered */
2165 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_FAULT_MSG,
2166 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2167 ®);
2168
2169 if (status != IXGBE_SUCCESS)
2170 return status;
2171
2172 /* if device fault was due to high temp alarm handle and exit */
2173 if (reg == IXGBE_MDIO_GLOBAL_FAULT_MSG_HI_TMP) {
2174 /* power down the PHY in case the PHY FW didn't */
2175 ixgbe_set_copper_phy_power(hw, FALSE);
2176 return IXGBE_ERR_OVERTEMP;
2177 }
2178 }
2179
2180 /* Vendor alarm 2 triggered */
2181 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_CHIP_STD_INT_FLAG,
2182 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®);
2183
2184 if (status != IXGBE_SUCCESS ||
2185 !(reg & IXGBE_MDIO_GLOBAL_STD_ALM2_INT))
2186 return status;
2187
2188 /* link connect/disconnect event occurred */
2189 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM2,
2190 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®);
2191
2192 if (status != IXGBE_SUCCESS)
2193 return status;
2194
2195 /* Indicate LSC */
2196 if (reg & IXGBE_MDIO_AUTO_NEG_VEN_LSC)
2197 *lsc = TRUE;
2198
2199 return IXGBE_SUCCESS;
2200 }
2201
2202 /**
2203 * ixgbe_enable_lasi_ext_t_x550em - Enable external Base T PHY interrupts
2204 * @hw: pointer to hardware structure
2205 *
2206 * Enable link status change and temperature failure alarm for the external
2207 * Base T PHY
2208 *
2209 * Returns PHY access status
2210 */
2211 static s32 ixgbe_enable_lasi_ext_t_x550em(struct ixgbe_hw *hw)
2212 {
2213 u32 status;
2214 u16 reg;
2215 bool lsc;
2216
2217 /* Clear interrupt flags */
2218 status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc);
2219
2220 /* Enable link status change alarm */
2221
2222 /* Enable the LASI interrupts on X552 devices to receive notifications
2223 * of the link configurations of the external PHY and correspondingly
2224 * support the configuration of the internal iXFI link, since iXFI does
2225 * not support auto-negotiation. This is not required for X553 devices
2226 * having KR support, which performs auto-negotiations and which is used
2227 * as the internal link to the external PHY. Hence adding a check here
2228 * to avoid enabling LASI interrupts for X553 devices.
2229 */
2230 if (hw->mac.type != ixgbe_mac_X550EM_a) {
2231 status = hw->phy.ops.read_reg(hw,
2232 IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK,
2233 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®);
2234
2235 if (status != IXGBE_SUCCESS)
2236 return status;
2237
2238 reg |= IXGBE_MDIO_PMA_TX_VEN_LASI_INT_EN;
2239
2240 status = hw->phy.ops.write_reg(hw,
2241 IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK,
2242 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg);
2243
2244 if (status != IXGBE_SUCCESS)
2245 return status;
2246 }
2247
2248 /* Enable high temperature failure and global fault alarms */
2249 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_MASK,
2250 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2251 ®);
2252
2253 if (status != IXGBE_SUCCESS)
2254 return status;
2255
2256 reg |= (IXGBE_MDIO_GLOBAL_INT_HI_TEMP_EN |
2257 IXGBE_MDIO_GLOBAL_INT_DEV_FAULT_EN);
2258
2259 status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_MASK,
2260 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2261 reg);
2262
2263 if (status != IXGBE_SUCCESS)
2264 return status;
2265
2266 /* Enable vendor Auto-Neg alarm and Global Interrupt Mask 1 alarm */
2267 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_MASK,
2268 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2269 ®);
2270
2271 if (status != IXGBE_SUCCESS)
2272 return status;
2273
2274 reg |= (IXGBE_MDIO_GLOBAL_AN_VEN_ALM_INT_EN |
2275 IXGBE_MDIO_GLOBAL_ALARM_1_INT);
2276
2277 status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_MASK,
2278 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2279 reg);
2280
2281 if (status != IXGBE_SUCCESS)
2282 return status;
2283
2284 /* Enable chip-wide vendor alarm */
2285 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_STD_MASK,
2286 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2287 ®);
2288
2289 if (status != IXGBE_SUCCESS)
2290 return status;
2291
2292 reg |= IXGBE_MDIO_GLOBAL_VEN_ALM_INT_EN;
2293
2294 status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_STD_MASK,
2295 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2296 reg);
2297
2298 return status;
2299 }
2300
2301 /**
2302 * ixgbe_setup_kr_speed_x550em - Configure the KR PHY for link speed.
2303 * @hw: pointer to hardware structure
2304 * @speed: link speed
2305 *
2306 * Configures the integrated KR PHY.
2307 **/
2308 static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw,
2309 ixgbe_link_speed speed)
2310 {
2311 s32 status;
2312 u32 reg_val;
2313
2314 status = hw->mac.ops.read_iosf_sb_reg(hw,
2315 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
2316 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
2317 if (status)
2318 return status;
2319
2320 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
2321 reg_val &= ~(IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR |
2322 IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX);
2323
2324 /* Advertise 10G support. */
2325 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
2326 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR;
2327
2328 /* Advertise 1G support. */
2329 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
2330 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX;
2331
2332 status = hw->mac.ops.write_iosf_sb_reg(hw,
2333 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
2334 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
2335
2336 if (hw->mac.type == ixgbe_mac_X550EM_a) {
2337 /* Set lane mode to KR auto negotiation */
2338 status = hw->mac.ops.read_iosf_sb_reg(hw,
2339 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
2340 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
2341
2342 if (status)
2343 return status;
2344
2345 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK;
2346 reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_AN;
2347 reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN;
2348 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN;
2349 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN;
2350
2351 status = hw->mac.ops.write_iosf_sb_reg(hw,
2352 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
2353 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
2354 }
2355
2356 return ixgbe_restart_an_internal_phy_x550em(hw);
2357 }
2358
2359 /**
2360 * ixgbe_reset_phy_fw - Reset firmware-controlled PHYs
2361 * @hw: pointer to hardware structure
2362 */
2363 static s32 ixgbe_reset_phy_fw(struct ixgbe_hw *hw)
2364 {
2365 u32 store[FW_PHY_ACT_DATA_COUNT] = { 0 };
2366 s32 rc;
2367
2368 if (hw->phy.reset_disable || ixgbe_check_reset_blocked(hw))
2369 return IXGBE_SUCCESS;
2370
2371 rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_PHY_SW_RESET, &store);
2372 if (rc)
2373 return rc;
2374 memset(store, 0, sizeof(store));
2375
2376 rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_INIT_PHY, &store);
2377 if (rc)
2378 return rc;
2379
2380 return ixgbe_setup_fw_link(hw);
2381 }
2382
2383 /**
2384 * ixgbe_check_overtemp_fw - Check firmware-controlled PHYs for overtemp
2385 * @hw: pointer to hardware structure
2386 */
2387 static s32 ixgbe_check_overtemp_fw(struct ixgbe_hw *hw)
2388 {
2389 u32 store[FW_PHY_ACT_DATA_COUNT] = { 0 };
2390 s32 rc;
2391
2392 rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_LINK_INFO, &store);
2393 if (rc)
2394 return rc;
2395
2396 if (store[0] & FW_PHY_ACT_GET_LINK_INFO_TEMP) {
2397 ixgbe_shutdown_fw_phy(hw);
2398 return IXGBE_ERR_OVERTEMP;
2399 }
2400 return IXGBE_SUCCESS;
2401 }
2402
2403 /**
2404 * ixgbe_read_mng_if_sel_x550em - Read NW_MNG_IF_SEL register
2405 * @hw: pointer to hardware structure
2406 *
2407 * Read NW_MNG_IF_SEL register and save field values, and check for valid field
2408 * values.
2409 **/
2410 static s32 ixgbe_read_mng_if_sel_x550em(struct ixgbe_hw *hw)
2411 {
2412 /* Save NW management interface connected on board. This is used
2413 * to determine internal PHY mode.
2414 */
2415 hw->phy.nw_mng_if_sel = IXGBE_READ_REG(hw, IXGBE_NW_MNG_IF_SEL);
2416
2417 /* If X552 (X550EM_a) and MDIO is connected to external PHY, then set
2418 * PHY address. This register field was has only been used for X552.
2419 */
2420 if (hw->mac.type == ixgbe_mac_X550EM_a &&
2421 hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_MDIO_ACT) {
2422 hw->phy.addr = (hw->phy.nw_mng_if_sel &
2423 IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD) >>
2424 IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT;
2425 }
2426
2427 return IXGBE_SUCCESS;
2428 }
2429
2430 /**
2431 * ixgbe_init_phy_ops_X550em - PHY/SFP specific init
2432 * @hw: pointer to hardware structure
2433 *
2434 * Initialize any function pointers that were not able to be
2435 * set during init_shared_code because the PHY/SFP type was
2436 * not known. Perform the SFP init if necessary.
2437 */
2438 s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
2439 {
2440 struct ixgbe_phy_info *phy = &hw->phy;
2441 s32 ret_val;
2442
2443 DEBUGFUNC("ixgbe_init_phy_ops_X550em");
2444
2445 hw->mac.ops.set_lan_id(hw);
2446 ixgbe_read_mng_if_sel_x550em(hw);
2447
2448 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) {
2449 phy->phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM;
2450 ixgbe_setup_mux_ctl(hw);
2451 phy->ops.identify_sfp = ixgbe_identify_sfp_module_X550em;
2452 }
2453
2454 switch (hw->device_id) {
2455 case IXGBE_DEV_ID_X550EM_A_1G_T:
2456 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
2457 phy->ops.read_reg_mdi = ixgbe_read_phy_reg_mdi_22;
2458 phy->ops.write_reg_mdi = ixgbe_write_phy_reg_mdi_22;
2459 hw->phy.ops.read_reg = ixgbe_read_phy_reg_x550a;
2460 hw->phy.ops.write_reg = ixgbe_write_phy_reg_x550a;
2461 phy->ops.check_overtemp = ixgbe_check_overtemp_fw;
2462 if (hw->bus.lan_id)
2463 hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY1_SM;
2464 else
2465 hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY0_SM;
2466
2467 break;
2468 case IXGBE_DEV_ID_X550EM_A_10G_T:
2469 case IXGBE_DEV_ID_X550EM_A_SFP:
2470 hw->phy.ops.read_reg = ixgbe_read_phy_reg_x550a;
2471 hw->phy.ops.write_reg = ixgbe_write_phy_reg_x550a;
2472 if (hw->bus.lan_id)
2473 hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY1_SM;
2474 else
2475 hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY0_SM;
2476 break;
2477 case IXGBE_DEV_ID_X550EM_X_SFP:
2478 /* set up for CS4227 usage */
2479 hw->phy.phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM;
2480 break;
2481 case IXGBE_DEV_ID_X550EM_X_1G_T:
2482 phy->ops.read_reg_mdi = NULL;
2483 phy->ops.write_reg_mdi = NULL;
2484 break;
2485 default:
2486 break;
2487 }
2488
2489 /* Identify the PHY or SFP module */
2490 ret_val = phy->ops.identify(hw);
2491 if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED ||
2492 ret_val == IXGBE_ERR_PHY_ADDR_INVALID)
2493 return ret_val;
2494
2495 /* Setup function pointers based on detected hardware */
2496 ixgbe_init_mac_link_ops_X550em(hw);
2497 if (phy->sfp_type != ixgbe_sfp_type_unknown)
2498 phy->ops.reset = NULL;
2499
2500 /* Set functions pointers based on phy type */
2501 switch (hw->phy.type) {
2502 case ixgbe_phy_x550em_kx4:
2503 phy->ops.setup_link = NULL;
2504 phy->ops.read_reg = ixgbe_read_phy_reg_x550em;
2505 phy->ops.write_reg = ixgbe_write_phy_reg_x550em;
2506 break;
2507 case ixgbe_phy_x550em_kr:
2508 phy->ops.setup_link = ixgbe_setup_kr_x550em;
2509 phy->ops.read_reg = ixgbe_read_phy_reg_x550em;
2510 phy->ops.write_reg = ixgbe_write_phy_reg_x550em;
2511 break;
2512 case ixgbe_phy_ext_1g_t:
2513 /* link is managed by FW */
2514 phy->ops.setup_link = NULL;
2515 phy->ops.reset = NULL;
2516 break;
2517 case ixgbe_phy_x550em_xfi:
2518 /* link is managed by HW */
2519 phy->ops.setup_link = NULL;
2520 phy->ops.read_reg = ixgbe_read_phy_reg_x550em;
2521 phy->ops.write_reg = ixgbe_write_phy_reg_x550em;
2522 break;
2523 case ixgbe_phy_x550em_ext_t:
2524 /* If internal link mode is XFI, then setup iXFI internal link,
2525 * else setup KR now.
2526 */
2527 phy->ops.setup_internal_link =
2528 ixgbe_setup_internal_phy_t_x550em;
2529
2530 /* setup SW LPLU only for first revision of X550EM_x */
2531 if ((hw->mac.type == ixgbe_mac_X550EM_x) &&
2532 !(IXGBE_FUSES0_REV_MASK &
2533 IXGBE_READ_REG(hw, IXGBE_FUSES0_GROUP(0))))
2534 phy->ops.enter_lplu = ixgbe_enter_lplu_t_x550em;
2535
2536 phy->ops.handle_lasi = ixgbe_handle_lasi_ext_t_x550em;
2537 phy->ops.reset = ixgbe_reset_phy_t_X550em;
2538 break;
2539 case ixgbe_phy_sgmii:
2540 phy->ops.setup_link = NULL;
2541 break;
2542 case ixgbe_phy_fw:
2543 phy->ops.setup_link = ixgbe_setup_fw_link;
2544 phy->ops.reset = ixgbe_reset_phy_fw;
2545 break;
2546 default:
2547 break;
2548 }
2549 return ret_val;
2550 }
2551
2552 /**
2553 * ixgbe_set_mdio_speed - Set MDIO clock speed
2554 * @hw: pointer to hardware structure
2555 */
2556 static void ixgbe_set_mdio_speed(struct ixgbe_hw *hw)
2557 {
2558 u32 hlreg0;
2559
2560 switch (hw->device_id) {
2561 case IXGBE_DEV_ID_X550EM_X_10G_T:
2562 case IXGBE_DEV_ID_X550EM_A_SGMII:
2563 case IXGBE_DEV_ID_X550EM_A_SGMII_L:
2564 case IXGBE_DEV_ID_X550EM_A_10G_T:
2565 case IXGBE_DEV_ID_X550EM_A_SFP:
2566 case IXGBE_DEV_ID_X550EM_A_QSFP:
2567 /* Config MDIO clock speed before the first MDIO PHY access */
2568 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2569 hlreg0 &= ~IXGBE_HLREG0_MDCSPD;
2570 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
2571 break;
2572 case IXGBE_DEV_ID_X550EM_A_1G_T:
2573 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
2574 /* Select fast MDIO clock speed for these devices */
2575 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2576 hlreg0 |= IXGBE_HLREG0_MDCSPD;
2577 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
2578 break;
2579 default:
2580 break;
2581 }
2582 }
2583
2584 /**
2585 * ixgbe_reset_hw_X550em - Perform hardware reset
2586 * @hw: pointer to hardware structure
2587 *
2588 * Resets the hardware by resetting the transmit and receive units, masks
2589 * and clears all interrupts, perform a PHY reset, and perform a link (MAC)
2590 * reset.
2591 */
2592 s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw)
2593 {
2594 ixgbe_link_speed link_speed;
2595 s32 status;
2596 u32 ctrl = 0;
2597 u32 i;
2598 bool link_up = FALSE;
2599 u32 swfw_mask = hw->phy.phy_semaphore_mask;
2600
2601 DEBUGFUNC("ixgbe_reset_hw_X550em");
2602
2603 /* Call adapter stop to disable Tx/Rx and clear interrupts */
2604 status = hw->mac.ops.stop_adapter(hw);
2605 if (status != IXGBE_SUCCESS) {
2606 DEBUGOUT1("Failed to stop adapter, STATUS = %d\n", status);
2607 return status;
2608 }
2609 /* flush pending Tx transactions */
2610 ixgbe_clear_tx_pending(hw);
2611
2612 ixgbe_set_mdio_speed(hw);
2613
2614 /* PHY ops must be identified and initialized prior to reset */
2615 status = hw->phy.ops.init(hw);
2616
2617 if (status)
2618 DEBUGOUT1("Failed to initialize PHY ops, STATUS = %d\n",
2619 status);
2620
2621 if (status == IXGBE_ERR_SFP_NOT_SUPPORTED ||
2622 status == IXGBE_ERR_PHY_ADDR_INVALID) {
2623 DEBUGOUT("Returning from reset HW due to PHY init failure\n");
2624 return status;
2625 }
2626
2627 /* start the external PHY */
2628 if (hw->phy.type == ixgbe_phy_x550em_ext_t) {
2629 status = ixgbe_init_ext_t_x550em(hw);
2630 if (status) {
2631 DEBUGOUT1("Failed to start the external PHY, STATUS = %d\n",
2632 status);
2633 return status;
2634 }
2635 }
2636
2637 /* Setup SFP module if there is one present. */
2638 if (hw->phy.sfp_setup_needed) {
2639 status = hw->mac.ops.setup_sfp(hw);
2640 hw->phy.sfp_setup_needed = FALSE;
2641 }
2642
2643 if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
2644 return status;
2645
2646 /* Reset PHY */
2647 if (!hw->phy.reset_disable && hw->phy.ops.reset) {
2648 if (hw->phy.ops.reset(hw) == IXGBE_ERR_OVERTEMP)
2649 return IXGBE_ERR_OVERTEMP;
2650 }
2651
2652 mac_reset_top:
2653 /* Issue global reset to the MAC. Needs to be SW reset if link is up.
2654 * If link reset is used when link is up, it might reset the PHY when
2655 * mng is using it. If link is down or the flag to force full link
2656 * reset is set, then perform link reset.
2657 */
2658 ctrl = IXGBE_CTRL_LNK_RST;
2659 if (!hw->force_full_reset) {
2660 hw->mac.ops.check_link(hw, &link_speed, &link_up, FALSE);
2661 if (link_up)
2662 ctrl = IXGBE_CTRL_RST;
2663 }
2664
2665 status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
2666 if (status != IXGBE_SUCCESS) {
2667 ERROR_REPORT2(IXGBE_ERROR_CAUTION,
2668 "semaphore failed with %d", status);
2669 return IXGBE_ERR_SWFW_SYNC;
2670 }
2671 ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
2672 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
2673 IXGBE_WRITE_FLUSH(hw);
2674 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
2675
2676 /* Poll for reset bit to self-clear meaning reset is complete */
2677 for (i = 0; i < 10; i++) {
2678 usec_delay(1);
2679 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
2680 if (!(ctrl & IXGBE_CTRL_RST_MASK))
2681 break;
2682 }
2683
2684 if (ctrl & IXGBE_CTRL_RST_MASK) {
2685 status = IXGBE_ERR_RESET_FAILED;
2686 DEBUGOUT("Reset polling failed to complete.\n");
2687 }
2688
2689 msec_delay(50);
2690
2691 /* Double resets are required for recovery from certain error
2692 * conditions. Between resets, it is necessary to stall to
2693 * allow time for any pending HW events to complete.
2694 */
2695 if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
2696 hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
2697 goto mac_reset_top;
2698 }
2699
2700 /* Store the permanent mac address */
2701 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
2702
2703 /* Store MAC address from RAR0, clear receive address registers, and
2704 * clear the multicast table. Also reset num_rar_entries to 128,
2705 * since we modify this value when programming the SAN MAC address.
2706 */
2707 hw->mac.num_rar_entries = 128;
2708 hw->mac.ops.init_rx_addrs(hw);
2709
2710 ixgbe_set_mdio_speed(hw);
2711
2712 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP)
2713 ixgbe_setup_mux_ctl(hw);
2714
2715 if (status != IXGBE_SUCCESS)
2716 DEBUGOUT1("Reset HW failed, STATUS = %d\n", status);
2717
2718 return status;
2719 }
2720
2721 /**
2722 * ixgbe_init_ext_t_x550em - Start (unstall) the external Base T PHY.
2723 * @hw: pointer to hardware structure
2724 */
2725 s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw)
2726 {
2727 u32 status;
2728 u16 reg;
2729
2730 status = hw->phy.ops.read_reg(hw,
2731 IXGBE_MDIO_TX_VENDOR_ALARMS_3,
2732 IXGBE_MDIO_PMA_PMD_DEV_TYPE,
2733 ®);
2734
2735 if (status != IXGBE_SUCCESS)
2736 return status;
2737
2738 /* If PHY FW reset completed bit is set then this is the first
2739 * SW instance after a power on so the PHY FW must be un-stalled.
2740 */
2741 if (reg & IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK) {
2742 status = hw->phy.ops.read_reg(hw,
2743 IXGBE_MDIO_GLOBAL_RES_PR_10,
2744 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2745 ®);
2746
2747 if (status != IXGBE_SUCCESS)
2748 return status;
2749
2750 reg &= ~IXGBE_MDIO_POWER_UP_STALL;
2751
2752 status = hw->phy.ops.write_reg(hw,
2753 IXGBE_MDIO_GLOBAL_RES_PR_10,
2754 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2755 reg);
2756
2757 if (status != IXGBE_SUCCESS)
2758 return status;
2759 }
2760
2761 return status;
2762 }
2763
2764 /**
2765 * ixgbe_setup_kr_x550em - Configure the KR PHY.
2766 * @hw: pointer to hardware structure
2767 **/
2768 s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw)
2769 {
2770 /* leave link alone for 2.5G */
2771 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_2_5GB_FULL)
2772 return IXGBE_SUCCESS;
2773
2774 if (ixgbe_check_reset_blocked(hw))
2775 return 0;
2776
2777 return ixgbe_setup_kr_speed_x550em(hw, hw->phy.autoneg_advertised);
2778 }
2779
2780 /**
2781 * ixgbe_setup_mac_link_sfp_x550em - Setup internal/external the PHY for SFP
2782 * @hw: pointer to hardware structure
2783 * @speed: new link speed
2784 * @autoneg_wait_to_complete: unused
2785 *
2786 * Configure the external PHY and the integrated KR PHY for SFP support.
2787 **/
2788 s32 ixgbe_setup_mac_link_sfp_x550em(struct ixgbe_hw *hw,
2789 ixgbe_link_speed speed,
2790 bool autoneg_wait_to_complete)
2791 {
2792 s32 ret_val;
2793 u16 reg_slice, reg_val;
2794 bool setup_linear = FALSE;
2795 UNREFERENCED_1PARAMETER(autoneg_wait_to_complete);
2796
2797 /* Check if SFP module is supported and linear */
2798 ret_val = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear);
2799
2800 /* If no SFP module present, then return success. Return success since
2801 * there is no reason to configure CS4227 and SFP not present error is
2802 * not excepted in the setup MAC link flow.
2803 */
2804 if (ret_val == IXGBE_ERR_SFP_NOT_PRESENT)
2805 return IXGBE_SUCCESS;
2806
2807 if (ret_val != IXGBE_SUCCESS)
2808 return ret_val;
2809
2810 /* Configure internal PHY for KR/KX. */
2811 ixgbe_setup_kr_speed_x550em(hw, speed);
2812
2813 /* Configure CS4227 LINE side to proper mode. */
2814 reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB +
2815 (hw->bus.lan_id << 12);
2816 if (setup_linear)
2817 reg_val = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1;
2818 else
2819 reg_val = (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1;
2820 ret_val = hw->link.ops.write_link(hw, hw->link.addr, reg_slice,
2821 reg_val);
2822 return ret_val;
2823 }
2824
2825 /**
2826 * ixgbe_setup_sfi_x550a - Configure the internal PHY for native SFI mode
2827 * @hw: pointer to hardware structure
2828 * @speed: the link speed to force
2829 *
2830 * Configures the integrated PHY for native SFI mode. Used to connect the
2831 * internal PHY directly to an SFP cage, without autonegotiation.
2832 **/
2833 static s32 ixgbe_setup_sfi_x550a(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
2834 {
2835 struct ixgbe_mac_info *mac = &hw->mac;
2836 s32 status;
2837 u32 reg_val;
2838
2839 /* Disable all AN and force speed to 10G Serial. */
2840 status = mac->ops.read_iosf_sb_reg(hw,
2841 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
2842 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
2843 if (status != IXGBE_SUCCESS)
2844 return status;
2845
2846 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN;
2847 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN;
2848 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN;
2849 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK;
2850
2851 /* Select forced link speed for internal PHY. */
2852 switch (*speed) {
2853 case IXGBE_LINK_SPEED_10GB_FULL:
2854 reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_10G;
2855 break;
2856 case IXGBE_LINK_SPEED_1GB_FULL:
2857 reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_1G;
2858 break;
2859 case 0:
2860 /* media none (linkdown) */
2861 break;
2862 default:
2863 /* Other link speeds are not supported by internal PHY. */
2864 return IXGBE_ERR_LINK_SETUP;
2865 }
2866
2867 status = mac->ops.write_iosf_sb_reg(hw,
2868 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
2869 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
2870
2871 /* Toggle port SW reset by AN reset. */
2872 status = ixgbe_restart_an_internal_phy_x550em(hw);
2873
2874 return status;
2875 }
2876
2877 /**
2878 * ixgbe_setup_mac_link_sfp_x550a - Setup internal PHY for SFP
2879 * @hw: pointer to hardware structure
2880 * @speed: new link speed
2881 * @autoneg_wait_to_complete: unused
2882 *
2883 * Configure the the integrated PHY for SFP support.
2884 **/
2885 static s32 ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw,
2886 ixgbe_link_speed speed,
2887 bool autoneg_wait_to_complete)
2888 {
2889 s32 ret_val;
2890 u16 reg_phy_ext;
2891 bool setup_linear = FALSE;
2892 u32 reg_slice, reg_phy_int, slice_offset;
2893
2894 UNREFERENCED_1PARAMETER(autoneg_wait_to_complete);
2895
2896 /* Check if SFP module is supported and linear */
2897 ret_val = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear);
2898
2899 /* If no SFP module present, then return success. Return success since
2900 * SFP not present error is not excepted in the setup MAC link flow.
2901 */
2902 if (ret_val == IXGBE_ERR_SFP_NOT_PRESENT)
2903 return IXGBE_SUCCESS;
2904
2905 if (ret_val != IXGBE_SUCCESS)
2906 return ret_val;
2907
2908 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N) {
2909 /* Configure internal PHY for native SFI based on module type */
2910 ret_val = hw->mac.ops.read_iosf_sb_reg(hw,
2911 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
2912 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_phy_int);
2913
2914 if (ret_val != IXGBE_SUCCESS)
2915 return ret_val;
2916
2917 reg_phy_int &= IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_DA;
2918 if (!setup_linear)
2919 reg_phy_int |= IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_SR;
2920
2921 ret_val = hw->mac.ops.write_iosf_sb_reg(hw,
2922 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
2923 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_phy_int);
2924
2925 if (ret_val != IXGBE_SUCCESS)
2926 return ret_val;
2927
2928 /* Setup SFI internal link. */
2929 ret_val = ixgbe_setup_sfi_x550a(hw, &speed);
2930 } else {
2931 /* Configure internal PHY for KR/KX. */
2932 ixgbe_setup_kr_speed_x550em(hw, speed);
2933
2934 if (hw->phy.addr == 0x0 || hw->phy.addr == 0xFFFF) {
2935 /* Find Address */
2936 DEBUGOUT("Invalid NW_MNG_IF_SEL.MDIO_PHY_ADD value\n");
2937 return IXGBE_ERR_PHY_ADDR_INVALID;
2938 }
2939
2940 /* Get external PHY SKU id */
2941 ret_val = hw->phy.ops.read_reg(hw, IXGBE_CS4227_EFUSE_PDF_SKU,
2942 IXGBE_MDIO_ZERO_DEV_TYPE, ®_phy_ext);
2943
2944 if (ret_val != IXGBE_SUCCESS)
2945 return ret_val;
2946
2947 /* When configuring quad port CS4223, the MAC instance is part
2948 * of the slice offset.
2949 */
2950 if (reg_phy_ext == IXGBE_CS4223_SKU_ID)
2951 slice_offset = (hw->bus.lan_id +
2952 (hw->bus.instance_id << 1)) << 12;
2953 else
2954 slice_offset = hw->bus.lan_id << 12;
2955
2956 /* Configure CS4227/CS4223 LINE side to proper mode. */
2957 reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB + slice_offset;
2958
2959 ret_val = hw->phy.ops.read_reg(hw, reg_slice,
2960 IXGBE_MDIO_ZERO_DEV_TYPE, ®_phy_ext);
2961
2962 if (ret_val != IXGBE_SUCCESS)
2963 return ret_val;
2964
2965 reg_phy_ext &= ~((IXGBE_CS4227_EDC_MODE_CX1 << 1) |
2966 (IXGBE_CS4227_EDC_MODE_SR << 1));
2967
2968 if (setup_linear)
2969 reg_phy_ext |= (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1;
2970 else
2971 reg_phy_ext |= (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1;
2972 ret_val = hw->phy.ops.write_reg(hw, reg_slice,
2973 IXGBE_MDIO_ZERO_DEV_TYPE, reg_phy_ext);
2974
2975 /* Flush previous write with a read */
2976 ret_val = hw->phy.ops.read_reg(hw, reg_slice,
2977 IXGBE_MDIO_ZERO_DEV_TYPE, ®_phy_ext);
2978 }
2979 return ret_val;
2980 }
2981
2982 /**
2983 * ixgbe_setup_ixfi_x550em_x - MAC specific iXFI configuration
2984 * @hw: pointer to hardware structure
2985 *
2986 * iXfI configuration needed for ixgbe_mac_X550EM_x devices.
2987 **/
2988 static s32 ixgbe_setup_ixfi_x550em_x(struct ixgbe_hw *hw)
2989 {
2990 struct ixgbe_mac_info *mac = &hw->mac;
2991 s32 status;
2992 u32 reg_val;
2993
2994 /* Disable training protocol FSM. */
2995 status = mac->ops.read_iosf_sb_reg(hw,
2996 IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
2997 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
2998 if (status != IXGBE_SUCCESS)
2999 return status;
3000 reg_val |= IXGBE_KRM_RX_TRN_LINKUP_CTRL_CONV_WO_PROTOCOL;
3001 status = mac->ops.write_iosf_sb_reg(hw,
3002 IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
3003 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3004 if (status != IXGBE_SUCCESS)
3005 return status;
3006
3007 /* Disable Flex from training TXFFE. */
3008 status = mac->ops.read_iosf_sb_reg(hw,
3009 IXGBE_KRM_DSP_TXFFE_STATE_4(hw->bus.lan_id),
3010 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3011 if (status != IXGBE_SUCCESS)
3012 return status;
3013 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN;
3014 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN;
3015 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN;
3016 status = mac->ops.write_iosf_sb_reg(hw,
3017 IXGBE_KRM_DSP_TXFFE_STATE_4(hw->bus.lan_id),
3018 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3019 if (status != IXGBE_SUCCESS)
3020 return status;
3021 status = mac->ops.read_iosf_sb_reg(hw,
3022 IXGBE_KRM_DSP_TXFFE_STATE_5(hw->bus.lan_id),
3023 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3024 if (status != IXGBE_SUCCESS)
3025 return status;
3026 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN;
3027 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN;
3028 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN;
3029 status = mac->ops.write_iosf_sb_reg(hw,
3030 IXGBE_KRM_DSP_TXFFE_STATE_5(hw->bus.lan_id),
3031 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3032 if (status != IXGBE_SUCCESS)
3033 return status;
3034
3035 /* Enable override for coefficients. */
3036 status = mac->ops.read_iosf_sb_reg(hw,
3037 IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id),
3038 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3039 if (status != IXGBE_SUCCESS)
3040 return status;
3041 reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_OVRRD_EN;
3042 reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN;
3043 reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CPLUS1_OVRRD_EN;
3044 reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CMINUS1_OVRRD_EN;
3045 status = mac->ops.write_iosf_sb_reg(hw,
3046 IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id),
3047 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3048 return status;
3049 }
3050
3051 /**
3052 * ixgbe_setup_ixfi_x550em - Configure the KR PHY for iXFI mode.
3053 * @hw: pointer to hardware structure
3054 * @speed: the link speed to force
3055 *
3056 * Configures the integrated KR PHY to use iXFI mode. Used to connect an
3057 * internal and external PHY at a specific speed, without autonegotiation.
3058 **/
3059 static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
3060 {
3061 struct ixgbe_mac_info *mac = &hw->mac;
3062 s32 status;
3063 u32 reg_val;
3064
3065 /* iXFI is only supported with X552 */
3066 if (mac->type != ixgbe_mac_X550EM_x)
3067 return IXGBE_ERR_LINK_SETUP;
3068
3069 /* Disable AN and force speed to 10G Serial. */
3070 status = mac->ops.read_iosf_sb_reg(hw,
3071 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
3072 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3073 if (status != IXGBE_SUCCESS)
3074 return status;
3075
3076 reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
3077 reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
3078
3079 /* Select forced link speed for internal PHY. */
3080 switch (*speed) {
3081 case IXGBE_LINK_SPEED_10GB_FULL:
3082 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G;
3083 break;
3084 case IXGBE_LINK_SPEED_1GB_FULL:
3085 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G;
3086 break;
3087 default:
3088 /* Other link speeds are not supported by internal KR PHY. */
3089 return IXGBE_ERR_LINK_SETUP;
3090 }
3091
3092 status = mac->ops.write_iosf_sb_reg(hw,
3093 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
3094 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3095 if (status != IXGBE_SUCCESS)
3096 return status;
3097
3098 /* Additional configuration needed for x550em_x */
3099 if (hw->mac.type == ixgbe_mac_X550EM_x) {
3100 status = ixgbe_setup_ixfi_x550em_x(hw);
3101 if (status != IXGBE_SUCCESS)
3102 return status;
3103 }
3104
3105 /* Toggle port SW reset by AN reset. */
3106 status = ixgbe_restart_an_internal_phy_x550em(hw);
3107
3108 return status;
3109 }
3110
3111 /**
3112 * ixgbe_ext_phy_t_x550em_get_link - Get ext phy link status
3113 * @hw: address of hardware structure
3114 * @link_up: address of boolean to indicate link status
3115 *
3116 * Returns error code if unable to get link status.
3117 */
3118 static s32 ixgbe_ext_phy_t_x550em_get_link(struct ixgbe_hw *hw, bool *link_up)
3119 {
3120 u32 ret;
3121 u16 autoneg_status;
3122
3123 *link_up = FALSE;
3124
3125 /* read this twice back to back to indicate current status */
3126 ret = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
3127 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
3128 &autoneg_status);
3129 if (ret != IXGBE_SUCCESS)
3130 return ret;
3131
3132 ret = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
3133 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
3134 &autoneg_status);
3135 if (ret != IXGBE_SUCCESS)
3136 return ret;
3137
3138 *link_up = !!(autoneg_status & IXGBE_MDIO_AUTO_NEG_LINK_STATUS);
3139
3140 return IXGBE_SUCCESS;
3141 }
3142
3143 /**
3144 * ixgbe_setup_internal_phy_t_x550em - Configure KR PHY to X557 link
3145 * @hw: point to hardware structure
3146 *
3147 * Configures the link between the integrated KR PHY and the external X557 PHY
3148 * The driver will call this function when it gets a link status change
3149 * interrupt from the X557 PHY. This function configures the link speed
3150 * between the PHYs to match the link speed of the BASE-T link.
3151 *
3152 * A return of a non-zero value indicates an error, and the base driver should
3153 * not report link up.
3154 */
3155 s32 ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw)
3156 {
3157 ixgbe_link_speed force_speed;
3158 bool link_up;
3159 u32 status;
3160 u16 speed;
3161
3162 if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper)
3163 return IXGBE_ERR_CONFIG;
3164
3165 if (hw->mac.type == ixgbe_mac_X550EM_x &&
3166 !(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) {
3167 /* If link is down, there is no setup necessary so return */
3168 status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up);
3169 if (status != IXGBE_SUCCESS)
3170 return status;
3171
3172 if (!link_up)
3173 return IXGBE_SUCCESS;
3174
3175 status = hw->phy.ops.read_reg(hw,
3176 IXGBE_MDIO_AUTO_NEG_VENDOR_STAT,
3177 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
3178 &speed);
3179 if (status != IXGBE_SUCCESS)
3180 return status;
3181
3182 /* If link is still down - no setup is required so return */
3183 status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up);
3184 if (status != IXGBE_SUCCESS)
3185 return status;
3186 if (!link_up)
3187 return IXGBE_SUCCESS;
3188
3189 /* clear everything but the speed and duplex bits */
3190 speed &= IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_MASK;
3191
3192 switch (speed) {
3193 case IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_FULL:
3194 force_speed = IXGBE_LINK_SPEED_10GB_FULL;
3195 break;
3196 case IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB_FULL:
3197 force_speed = IXGBE_LINK_SPEED_1GB_FULL;
3198 break;
3199 default:
3200 /* Internal PHY does not support anything else */
3201 return IXGBE_ERR_INVALID_LINK_SETTINGS;
3202 }
3203
3204 return ixgbe_setup_ixfi_x550em(hw, &force_speed);
3205 } else {
3206 speed = IXGBE_LINK_SPEED_10GB_FULL |
3207 IXGBE_LINK_SPEED_1GB_FULL;
3208 return ixgbe_setup_kr_speed_x550em(hw, speed);
3209 }
3210 }
3211
3212 /**
3213 * ixgbe_setup_phy_loopback_x550em - Configure the KR PHY for loopback.
3214 * @hw: pointer to hardware structure
3215 *
3216 * Configures the integrated KR PHY to use internal loopback mode.
3217 **/
3218 s32 ixgbe_setup_phy_loopback_x550em(struct ixgbe_hw *hw)
3219 {
3220 s32 status;
3221 u32 reg_val;
3222
3223 /* Disable AN and force speed to 10G Serial. */
3224 status = hw->mac.ops.read_iosf_sb_reg(hw,
3225 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
3226 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3227 if (status != IXGBE_SUCCESS)
3228 return status;
3229 reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
3230 reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
3231 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G;
3232 status = hw->mac.ops.write_iosf_sb_reg(hw,
3233 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
3234 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3235 if (status != IXGBE_SUCCESS)
3236 return status;
3237
3238 /* Set near-end loopback clocks. */
3239 status = hw->mac.ops.read_iosf_sb_reg(hw,
3240 IXGBE_KRM_PORT_CAR_GEN_CTRL(hw->bus.lan_id),
3241 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3242 if (status != IXGBE_SUCCESS)
3243 return status;
3244 reg_val |= IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_32B;
3245 reg_val |= IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_KRPCS;
3246 status = hw->mac.ops.write_iosf_sb_reg(hw,
3247 IXGBE_KRM_PORT_CAR_GEN_CTRL(hw->bus.lan_id),
3248 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3249 if (status != IXGBE_SUCCESS)
3250 return status;
3251
3252 /* Set loopback enable. */
3253 status = hw->mac.ops.read_iosf_sb_reg(hw,
3254 IXGBE_KRM_PMD_DFX_BURNIN(hw->bus.lan_id),
3255 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3256 if (status != IXGBE_SUCCESS)
3257 return status;
3258 reg_val |= IXGBE_KRM_PMD_DFX_BURNIN_TX_RX_KR_LB_MASK;
3259 status = hw->mac.ops.write_iosf_sb_reg(hw,
3260 IXGBE_KRM_PMD_DFX_BURNIN(hw->bus.lan_id),
3261 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3262 if (status != IXGBE_SUCCESS)
3263 return status;
3264
3265 /* Training bypass. */
3266 status = hw->mac.ops.read_iosf_sb_reg(hw,
3267 IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
3268 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3269 if (status != IXGBE_SUCCESS)
3270 return status;
3271 reg_val |= IXGBE_KRM_RX_TRN_LINKUP_CTRL_PROTOCOL_BYPASS;
3272 status = hw->mac.ops.write_iosf_sb_reg(hw,
3273 IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
3274 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3275
3276 return status;
3277 }
3278
3279 /**
3280 * ixgbe_read_ee_hostif_X550 - Read EEPROM word using a host interface command
3281 * assuming that the semaphore is already obtained.
3282 * @hw: pointer to hardware structure
3283 * @offset: offset of word in the EEPROM to read
3284 * @data: word read from the EEPROM
3285 *
3286 * Reads a 16 bit word from the EEPROM using the hostif.
3287 **/
3288 s32 ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 *data)
3289 {
3290 const u32 mask = IXGBE_GSSR_SW_MNG_SM | IXGBE_GSSR_EEP_SM;
3291 struct ixgbe_hic_read_shadow_ram buffer;
3292 s32 status;
3293
3294 DEBUGFUNC("ixgbe_read_ee_hostif_X550");
3295 buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD;
3296 buffer.hdr.req.buf_lenh = 0;
3297 buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN;
3298 buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
3299
3300 /* convert offset from words to bytes */
3301 buffer.address = IXGBE_CPU_TO_BE32(offset * 2);
3302 /* one word */
3303 buffer.length = IXGBE_CPU_TO_BE16(sizeof(u16));
3304 buffer.pad2 = 0;
3305 buffer.pad3 = 0;
3306
3307 status = hw->mac.ops.acquire_swfw_sync(hw, mask);
3308 if (status)
3309 return status;
3310
3311 status = ixgbe_hic_unlocked(hw, (u32 *)&buffer, sizeof(buffer),
3312 IXGBE_HI_COMMAND_TIMEOUT);
3313 if (!status) {
3314 *data = (u16)IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG,
3315 FW_NVM_DATA_OFFSET);
3316 }
3317
3318 hw->mac.ops.release_swfw_sync(hw, mask);
3319 return status;
3320 }
3321
3322 /**
3323 * ixgbe_read_ee_hostif_buffer_X550- Read EEPROM word(s) using hostif
3324 * @hw: pointer to hardware structure
3325 * @offset: offset of word in the EEPROM to read
3326 * @words: number of words
3327 * @data: word(s) read from the EEPROM
3328 *
3329 * Reads a 16 bit word(s) from the EEPROM using the hostif.
3330 **/
3331 s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
3332 u16 offset, u16 words, u16 *data)
3333 {
3334 const u32 mask = IXGBE_GSSR_SW_MNG_SM | IXGBE_GSSR_EEP_SM;
3335 struct ixgbe_hic_read_shadow_ram buffer;
3336 u32 current_word = 0;
3337 u16 words_to_read;
3338 s32 status;
3339 u32 i;
3340
3341 DEBUGFUNC("ixgbe_read_ee_hostif_buffer_X550");
3342
3343 /* Take semaphore for the entire operation. */
3344 status = hw->mac.ops.acquire_swfw_sync(hw, mask);
3345 if (status) {
3346 DEBUGOUT("EEPROM read buffer - semaphore failed\n");
3347 return status;
3348 }
3349
3350 while (words) {
3351 if (words > FW_MAX_READ_BUFFER_SIZE / 2)
3352 words_to_read = FW_MAX_READ_BUFFER_SIZE / 2;
3353 else
3354 words_to_read = words;
3355
3356 buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD;
3357 buffer.hdr.req.buf_lenh = 0;
3358 buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN;
3359 buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
3360
3361 /* convert offset from words to bytes */
3362 buffer.address = IXGBE_CPU_TO_BE32((offset + current_word) * 2);
3363 buffer.length = IXGBE_CPU_TO_BE16(words_to_read * 2);
3364 buffer.pad2 = 0;
3365 buffer.pad3 = 0;
3366
3367 status = ixgbe_hic_unlocked(hw, (u32 *)&buffer, sizeof(buffer),
3368 IXGBE_HI_COMMAND_TIMEOUT);
3369
3370 if (status) {
3371 DEBUGOUT("Host interface command failed\n");
3372 goto out;
3373 }
3374
3375 for (i = 0; i < words_to_read; i++) {
3376 u32 reg = IXGBE_FLEX_MNG + (FW_NVM_DATA_OFFSET << 2) +
3377 2 * i;
3378 u32 value = IXGBE_READ_REG(hw, reg);
3379
3380 data[current_word] = (u16)(value & 0xffff);
3381 current_word++;
3382 i++;
3383 if (i < words_to_read) {
3384 value >>= 16;
3385 data[current_word] = (u16)(value & 0xffff);
3386 current_word++;
3387 }
3388 }
3389 words -= words_to_read;
3390 }
3391
3392 out:
3393 hw->mac.ops.release_swfw_sync(hw, mask);
3394 return status;
3395 }
3396
3397 /**
3398 * ixgbe_write_ee_hostif_X550 - Write EEPROM word using hostif
3399 * @hw: pointer to hardware structure
3400 * @offset: offset of word in the EEPROM to write
3401 * @data: word write to the EEPROM
3402 *
3403 * Write a 16 bit word to the EEPROM using the hostif.
3404 **/
3405 s32 ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset,
3406 u16 data)
3407 {
3408 s32 status;
3409 struct ixgbe_hic_write_shadow_ram buffer;
3410
3411 DEBUGFUNC("ixgbe_write_ee_hostif_data_X550");
3412
3413 buffer.hdr.req.cmd = FW_WRITE_SHADOW_RAM_CMD;
3414 buffer.hdr.req.buf_lenh = 0;
3415 buffer.hdr.req.buf_lenl = FW_WRITE_SHADOW_RAM_LEN;
3416 buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
3417
3418 /* one word */
3419 buffer.length = IXGBE_CPU_TO_BE16(sizeof(u16));
3420 buffer.data = data;
3421 buffer.address = IXGBE_CPU_TO_BE32(offset * 2);
3422
3423 status = ixgbe_host_interface_command(hw, (u32 *)&buffer,
3424 sizeof(buffer),
3425 IXGBE_HI_COMMAND_TIMEOUT, FALSE);
3426
3427 return status;
3428 }
3429
3430 /**
3431 * ixgbe_write_ee_hostif_X550 - Write EEPROM word using hostif
3432 * @hw: pointer to hardware structure
3433 * @offset: offset of word in the EEPROM to write
3434 * @data: word write to the EEPROM
3435 *
3436 * Write a 16 bit word to the EEPROM using the hostif.
3437 **/
3438 s32 ixgbe_write_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset,
3439 u16 data)
3440 {
3441 s32 status = IXGBE_SUCCESS;
3442
3443 DEBUGFUNC("ixgbe_write_ee_hostif_X550");
3444
3445 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
3446 IXGBE_SUCCESS) {
3447 status = ixgbe_write_ee_hostif_data_X550(hw, offset, data);
3448 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
3449 } else {
3450 DEBUGOUT("write ee hostif failed to get semaphore");
3451 status = IXGBE_ERR_SWFW_SYNC;
3452 }
3453
3454 return status;
3455 }
3456
3457 /**
3458 * ixgbe_write_ee_hostif_buffer_X550 - Write EEPROM word(s) using hostif
3459 * @hw: pointer to hardware structure
3460 * @offset: offset of word in the EEPROM to write
3461 * @words: number of words
3462 * @data: word(s) write to the EEPROM
3463 *
3464 * Write a 16 bit word(s) to the EEPROM using the hostif.
3465 **/
3466 s32 ixgbe_write_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
3467 u16 offset, u16 words, u16 *data)
3468 {
3469 s32 status = IXGBE_SUCCESS;
3470 u32 i = 0;
3471
3472 DEBUGFUNC("ixgbe_write_ee_hostif_buffer_X550");
3473
3474 /* Take semaphore for the entire operation. */
3475 status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
3476 if (status != IXGBE_SUCCESS) {
3477 DEBUGOUT("EEPROM write buffer - semaphore failed\n");
3478 goto out;
3479 }
3480
3481 for (i = 0; i < words; i++) {
3482 status = ixgbe_write_ee_hostif_data_X550(hw, offset + i,
3483 data[i]);
3484
3485 if (status != IXGBE_SUCCESS) {
3486 DEBUGOUT("Eeprom buffered write failed\n");
3487 break;
3488 }
3489 }
3490
3491 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
3492 out:
3493
3494 return status;
3495 }
3496
3497 /**
3498 * ixgbe_checksum_ptr_x550 - Checksum one pointer region
3499 * @hw: pointer to hardware structure
3500 * @ptr: pointer offset in eeprom
3501 * @size: size of section pointed by ptr, if 0 first word will be used as size
3502 * @csum: address of checksum to update
3503 * @buffer: pointer to buffer containing calculated checksum
3504 * @buffer_size: size of buffer
3505 *
3506 * Returns error status for any failure
3507 */
3508 static s32 ixgbe_checksum_ptr_x550(struct ixgbe_hw *hw, u16 ptr,
3509 u16 size, u16 *csum, u16 *buffer,
3510 u32 buffer_size)
3511 {
3512 u16 buf[256];
3513 s32 status;
3514 u16 length, bufsz, i, start;
3515 u16 *local_buffer;
3516
3517 bufsz = sizeof(buf) / sizeof(buf[0]);
3518
3519 /* Read a chunk at the pointer location */
3520 if (!buffer) {
3521 status = ixgbe_read_ee_hostif_buffer_X550(hw, ptr, bufsz, buf);
3522 if (status) {
3523 DEBUGOUT("Failed to read EEPROM image\n");
3524 return status;
3525 }
3526 local_buffer = buf;
3527 } else {
3528 if (buffer_size < ptr)
3529 return IXGBE_ERR_PARAM;
3530 local_buffer = &buffer[ptr];
3531 }
3532
3533 if (size) {
3534 start = 0;
3535 length = size;
3536 } else {
3537 start = 1;
3538 length = local_buffer[0];
3539
3540 /* Skip pointer section if length is invalid. */
3541 if (length == 0xFFFF || length == 0 ||
3542 (ptr + length) >= hw->eeprom.word_size)
3543 return IXGBE_SUCCESS;
3544 }
3545
3546 if (buffer && ((u32)start + (u32)length > buffer_size))
3547 return IXGBE_ERR_PARAM;
3548
3549 for (i = start; length; i++, length--) {
3550 if (i == bufsz && !buffer) {
3551 ptr += bufsz;
3552 i = 0;
3553 if (length < bufsz)
3554 bufsz = length;
3555
3556 /* Read a chunk at the pointer location */
3557 status = ixgbe_read_ee_hostif_buffer_X550(hw, ptr,
3558 bufsz, buf);
3559 if (status) {
3560 DEBUGOUT("Failed to read EEPROM image\n");
3561 return status;
3562 }
3563 }
3564 *csum += local_buffer[i];
3565 }
3566 return IXGBE_SUCCESS;
3567 }
3568
3569 /**
3570 * ixgbe_calc_checksum_X550 - Calculates and returns the checksum
3571 * @hw: pointer to hardware structure
3572 * @buffer: pointer to buffer containing calculated checksum
3573 * @buffer_size: size of buffer
3574 *
3575 * Returns a negative error code on error, or the 16-bit checksum
3576 **/
3577 s32 ixgbe_calc_checksum_X550(struct ixgbe_hw *hw, u16 *buffer, u32 buffer_size)
3578 {
3579 u16 eeprom_ptrs[IXGBE_EEPROM_LAST_WORD + 1];
3580 u16 *local_buffer;
3581 s32 status;
3582 u16 checksum = 0;
3583 u16 pointer, i, size;
3584
3585 DEBUGFUNC("ixgbe_calc_eeprom_checksum_X550");
3586
3587 hw->eeprom.ops.init_params(hw);
3588
3589 if (!buffer) {
3590 /* Read pointer area */
3591 status = ixgbe_read_ee_hostif_buffer_X550(hw, 0,
3592 IXGBE_EEPROM_LAST_WORD + 1,
3593 eeprom_ptrs);
3594 if (status) {
3595 DEBUGOUT("Failed to read EEPROM image\n");
3596 return status;
3597 }
3598 local_buffer = eeprom_ptrs;
3599 } else {
3600 if (buffer_size < IXGBE_EEPROM_LAST_WORD)
3601 return IXGBE_ERR_PARAM;
3602 local_buffer = buffer;
3603 }
3604
3605 /*
3606 * For X550 hardware include 0x0-0x41 in the checksum, skip the
3607 * checksum word itself
3608 */
3609 for (i = 0; i <= IXGBE_EEPROM_LAST_WORD; i++)
3610 if (i != IXGBE_EEPROM_CHECKSUM)
3611 checksum += local_buffer[i];
3612
3613 /*
3614 * Include all data from pointers 0x3, 0x6-0xE. This excludes the
3615 * FW, PHY module, and PCIe Expansion/Option ROM pointers.
3616 */
3617 for (i = IXGBE_PCIE_ANALOG_PTR_X550; i < IXGBE_FW_PTR; i++) {
3618 if (i == IXGBE_PHY_PTR || i == IXGBE_OPTION_ROM_PTR)
3619 continue;
3620
3621 pointer = local_buffer[i];
3622
3623 /* Skip pointer section if the pointer is invalid. */
3624 if (pointer == 0xFFFF || pointer == 0 ||
3625 pointer >= hw->eeprom.word_size)
3626 continue;
3627
3628 switch (i) {
3629 case IXGBE_PCIE_GENERAL_PTR:
3630 size = IXGBE_IXGBE_PCIE_GENERAL_SIZE;
3631 break;
3632 case IXGBE_PCIE_CONFIG0_PTR:
3633 case IXGBE_PCIE_CONFIG1_PTR:
3634 size = IXGBE_PCIE_CONFIG_SIZE;
3635 break;
3636 default:
3637 size = 0;
3638 break;
3639 }
3640
3641 status = ixgbe_checksum_ptr_x550(hw, pointer, size, &checksum,
3642 buffer, buffer_size);
3643 if (status)
3644 return status;
3645 }
3646
3647 checksum = (u16)IXGBE_EEPROM_SUM - checksum;
3648
3649 return (s32)checksum;
3650 }
3651
3652 /**
3653 * ixgbe_calc_eeprom_checksum_X550 - Calculates and returns the checksum
3654 * @hw: pointer to hardware structure
3655 *
3656 * Returns a negative error code on error, or the 16-bit checksum
3657 **/
3658 s32 ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw *hw)
3659 {
3660 return ixgbe_calc_checksum_X550(hw, NULL, 0);
3661 }
3662
3663 /**
3664 * ixgbe_validate_eeprom_checksum_X550 - Validate EEPROM checksum
3665 * @hw: pointer to hardware structure
3666 * @checksum_val: calculated checksum
3667 *
3668 * Performs checksum calculation and validates the EEPROM checksum. If the
3669 * caller does not need checksum_val, the value can be NULL.
3670 **/
3671 s32 ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw *hw, u16 *checksum_val)
3672 {
3673 s32 status;
3674 u16 checksum;
3675 u16 read_checksum = 0;
3676
3677 DEBUGFUNC("ixgbe_validate_eeprom_checksum_X550");
3678
3679 /* Read the first word from the EEPROM. If this times out or fails, do
3680 * not continue or we could be in for a very long wait while every
3681 * EEPROM read fails
3682 */
3683 status = hw->eeprom.ops.read(hw, 0, &checksum);
3684 if (status) {
3685 DEBUGOUT("EEPROM read failed\n");
3686 return status;
3687 }
3688
3689 status = hw->eeprom.ops.calc_checksum(hw);
3690 if (status < 0)
3691 return status;
3692
3693 checksum = (u16)(status & 0xffff);
3694
3695 status = ixgbe_read_ee_hostif_X550(hw, IXGBE_EEPROM_CHECKSUM,
3696 &read_checksum);
3697 if (status)
3698 return status;
3699
3700 /* Verify read checksum from EEPROM is the same as
3701 * calculated checksum
3702 */
3703 if (read_checksum != checksum) {
3704 status = IXGBE_ERR_EEPROM_CHECKSUM;
3705 ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE,
3706 "Invalid EEPROM checksum");
3707 }
3708
3709 /* If the user cares, return the calculated checksum */
3710 if (checksum_val)
3711 *checksum_val = checksum;
3712
3713 return status;
3714 }
3715
3716 /**
3717 * ixgbe_update_eeprom_checksum_X550 - Updates the EEPROM checksum and flash
3718 * @hw: pointer to hardware structure
3719 *
3720 * After writing EEPROM to shadow RAM using EEWR register, software calculates
3721 * checksum and updates the EEPROM and instructs the hardware to update
3722 * the flash.
3723 **/
3724 s32 ixgbe_update_eeprom_checksum_X550(struct ixgbe_hw *hw)
3725 {
3726 s32 status;
3727 u16 checksum = 0;
3728
3729 DEBUGFUNC("ixgbe_update_eeprom_checksum_X550");
3730
3731 /* Read the first word from the EEPROM. If this times out or fails, do
3732 * not continue or we could be in for a very long wait while every
3733 * EEPROM read fails
3734 */
3735 status = ixgbe_read_ee_hostif_X550(hw, 0, &checksum);
3736 if (status) {
3737 DEBUGOUT("EEPROM read failed\n");
3738 return status;
3739 }
3740
3741 status = ixgbe_calc_eeprom_checksum_X550(hw);
3742 if (status < 0)
3743 return status;
3744
3745 checksum = (u16)(status & 0xffff);
3746
3747 status = ixgbe_write_ee_hostif_X550(hw, IXGBE_EEPROM_CHECKSUM,
3748 checksum);
3749 if (status)
3750 return status;
3751
3752 status = ixgbe_update_flash_X550(hw);
3753
3754 return status;
3755 }
3756
3757 /**
3758 * ixgbe_update_flash_X550 - Instruct HW to copy EEPROM to Flash device
3759 * @hw: pointer to hardware structure
3760 *
3761 * Issue a shadow RAM dump to FW to copy EEPROM from shadow RAM to the flash.
3762 **/
3763 s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw)
3764 {
3765 s32 status = IXGBE_SUCCESS;
3766 union ixgbe_hic_hdr2 buffer;
3767
3768 DEBUGFUNC("ixgbe_update_flash_X550");
3769
3770 buffer.req.cmd = FW_SHADOW_RAM_DUMP_CMD;
3771 buffer.req.buf_lenh = 0;
3772 buffer.req.buf_lenl = FW_SHADOW_RAM_DUMP_LEN;
3773 buffer.req.checksum = FW_DEFAULT_CHECKSUM;
3774
3775 status = ixgbe_host_interface_command(hw, (u32 *)&buffer,
3776 sizeof(buffer),
3777 IXGBE_HI_COMMAND_TIMEOUT, FALSE);
3778
3779 return status;
3780 }
3781
3782 /**
3783 * ixgbe_get_supported_physical_layer_X550em - Returns physical layer type
3784 * @hw: pointer to hardware structure
3785 *
3786 * Determines physical layer capabilities of the current configuration.
3787 **/
3788 u64 ixgbe_get_supported_physical_layer_X550em(struct ixgbe_hw *hw)
3789 {
3790 u64 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
3791 u16 ext_ability = 0;
3792
3793 DEBUGFUNC("ixgbe_get_supported_physical_layer_X550em");
3794
3795 hw->phy.ops.identify(hw);
3796
3797 switch (hw->phy.type) {
3798 case ixgbe_phy_x550em_kr:
3799 if (hw->mac.type == ixgbe_mac_X550EM_a) {
3800 if (hw->phy.nw_mng_if_sel &
3801 IXGBE_NW_MNG_IF_SEL_PHY_SPEED_2_5G) {
3802 physical_layer =
3803 IXGBE_PHYSICAL_LAYER_2500BASE_KX;
3804 break;
3805 } else if (hw->device_id ==
3806 IXGBE_DEV_ID_X550EM_A_KR_L) {
3807 physical_layer =
3808 IXGBE_PHYSICAL_LAYER_1000BASE_KX;
3809 break;
3810 }
3811 }
3812 /* fall through */
3813 case ixgbe_phy_x550em_xfi:
3814 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR |
3815 IXGBE_PHYSICAL_LAYER_1000BASE_KX;
3816 break;
3817 case ixgbe_phy_x550em_kx4:
3818 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4 |
3819 IXGBE_PHYSICAL_LAYER_1000BASE_KX;
3820 break;
3821 case ixgbe_phy_x550em_ext_t:
3822 hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
3823 IXGBE_MDIO_PMA_PMD_DEV_TYPE,
3824 &ext_ability);
3825 if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
3826 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
3827 if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
3828 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
3829 break;
3830 case ixgbe_phy_fw:
3831 if (hw->phy.speeds_supported & IXGBE_LINK_SPEED_1GB_FULL)
3832 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
3833 if (hw->phy.speeds_supported & IXGBE_LINK_SPEED_100_FULL)
3834 physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
3835 if (hw->phy.speeds_supported & IXGBE_LINK_SPEED_10_FULL)
3836 physical_layer |= IXGBE_PHYSICAL_LAYER_10BASE_T;
3837 break;
3838 case ixgbe_phy_sgmii:
3839 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX;
3840 break;
3841 case ixgbe_phy_ext_1g_t:
3842 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_T;
3843 break;
3844 default:
3845 break;
3846 }
3847
3848 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
3849 physical_layer = ixgbe_get_supported_phy_sfp_layer_generic(hw);
3850
3851 return physical_layer;
3852 }
3853
3854 /**
3855 * ixgbe_get_bus_info_x550em - Set PCI bus info
3856 * @hw: pointer to hardware structure
3857 *
3858 * Sets bus link width and speed to unknown because X550em is
3859 * not a PCI device.
3860 **/
3861 s32 ixgbe_get_bus_info_X550em(struct ixgbe_hw *hw)
3862 {
3863
3864 DEBUGFUNC("ixgbe_get_bus_info_x550em");
3865
3866 hw->bus.width = ixgbe_bus_width_unknown;
3867 hw->bus.speed = ixgbe_bus_speed_unknown;
3868
3869 hw->mac.ops.set_lan_id(hw);
3870
3871 return IXGBE_SUCCESS;
3872 }
3873
3874 /**
3875 * ixgbe_disable_rx_x550 - Disable RX unit
3876 * @hw: pointer to hardware structure
3877 *
3878 * Enables the Rx DMA unit for x550
3879 **/
3880 void ixgbe_disable_rx_x550(struct ixgbe_hw *hw)
3881 {
3882 u32 rxctrl, pfdtxgswc;
3883 s32 status;
3884 struct ixgbe_hic_disable_rxen fw_cmd;
3885
3886 DEBUGFUNC("ixgbe_disable_rx_dma_x550");
3887
3888 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3889 if (rxctrl & IXGBE_RXCTRL_RXEN) {
3890 pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
3891 if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) {
3892 pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN;
3893 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
3894 hw->mac.set_lben = TRUE;
3895 } else {
3896 hw->mac.set_lben = FALSE;
3897 }
3898
3899 fw_cmd.hdr.cmd = FW_DISABLE_RXEN_CMD;
3900 fw_cmd.hdr.buf_len = FW_DISABLE_RXEN_LEN;
3901 fw_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
3902 fw_cmd.port_number = (u8)hw->bus.lan_id;
3903
3904 status = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
3905 sizeof(struct ixgbe_hic_disable_rxen),
3906 IXGBE_HI_COMMAND_TIMEOUT, TRUE);
3907
3908 /* If we fail - disable RX using register write */
3909 if (status) {
3910 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3911 if (rxctrl & IXGBE_RXCTRL_RXEN) {
3912 rxctrl &= ~IXGBE_RXCTRL_RXEN;
3913 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl);
3914 }
3915 }
3916 }
3917 }
3918
3919 /**
3920 * ixgbe_enter_lplu_x550em - Transition to low power states
3921 * @hw: pointer to hardware structure
3922 *
3923 * Configures Low Power Link Up on transition to low power states
3924 * (from D0 to non-D0). Link is required to enter LPLU so avoid resetting the
3925 * X557 PHY immediately prior to entering LPLU.
3926 **/
3927 s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw)
3928 {
3929 u16 an_10g_cntl_reg, autoneg_reg, speed;
3930 s32 status;
3931 ixgbe_link_speed lcd_speed;
3932 u32 save_autoneg;
3933 bool link_up;
3934
3935 /* SW LPLU not required on later HW revisions. */
3936 if ((hw->mac.type == ixgbe_mac_X550EM_x) &&
3937 (IXGBE_FUSES0_REV_MASK &
3938 IXGBE_READ_REG(hw, IXGBE_FUSES0_GROUP(0))))
3939 return IXGBE_SUCCESS;
3940
3941 /* If blocked by MNG FW, then don't restart AN */
3942 if (ixgbe_check_reset_blocked(hw))
3943 return IXGBE_SUCCESS;
3944
3945 status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up);
3946 if (status != IXGBE_SUCCESS)
3947 return status;
3948
3949 status = ixgbe_read_eeprom(hw, NVM_INIT_CTRL_3, &hw->eeprom.ctrl_word_3);
3950
3951 if (status != IXGBE_SUCCESS)
3952 return status;
3953
3954 /* If link is down, LPLU disabled in NVM, WoL disabled, or manageability
3955 * disabled, then force link down by entering low power mode.
3956 */
3957 if (!link_up || !(hw->eeprom.ctrl_word_3 & NVM_INIT_CTRL_3_LPLU) ||
3958 !(hw->wol_enabled || ixgbe_mng_present(hw)))
3959 return ixgbe_set_copper_phy_power(hw, FALSE);
3960
3961 /* Determine LCD */
3962 status = ixgbe_get_lcd_t_x550em(hw, &lcd_speed);
3963
3964 if (status != IXGBE_SUCCESS)
3965 return status;
3966
3967 /* If no valid LCD link speed, then force link down and exit. */
3968 if (lcd_speed == IXGBE_LINK_SPEED_UNKNOWN)
3969 return ixgbe_set_copper_phy_power(hw, FALSE);
3970
3971 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_STAT,
3972 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
3973 &speed);
3974
3975 if (status != IXGBE_SUCCESS)
3976 return status;
3977
3978 /* If no link now, speed is invalid so take link down */
3979 status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up);
3980 if (status != IXGBE_SUCCESS)
3981 return ixgbe_set_copper_phy_power(hw, FALSE);
3982
3983 /* clear everything but the speed bits */
3984 speed &= IXGBE_MDIO_AUTO_NEG_VEN_STAT_SPEED_MASK;
3985
3986 /* If current speed is already LCD, then exit. */
3987 if (((speed == IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB) &&
3988 (lcd_speed == IXGBE_LINK_SPEED_1GB_FULL)) ||
3989 ((speed == IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB) &&
3990 (lcd_speed == IXGBE_LINK_SPEED_10GB_FULL)))
3991 return status;
3992
3993 /* Clear AN completed indication */
3994 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM,
3995 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
3996 &autoneg_reg);
3997
3998 if (status != IXGBE_SUCCESS)
3999 return status;
4000
4001 status = hw->phy.ops.read_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG,
4002 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
4003 &an_10g_cntl_reg);
4004
4005 if (status != IXGBE_SUCCESS)
4006 return status;
4007
4008 status = hw->phy.ops.read_reg(hw,
4009 IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
4010 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
4011 &autoneg_reg);
4012
4013 if (status != IXGBE_SUCCESS)
4014 return status;
4015
4016 save_autoneg = hw->phy.autoneg_advertised;
4017
4018 /* Setup link at least common link speed */
4019 status = hw->mac.ops.setup_link(hw, lcd_speed, FALSE);
4020
4021 /* restore autoneg from before setting lplu speed */
4022 hw->phy.autoneg_advertised = save_autoneg;
4023
4024 return status;
4025 }
4026
4027 /**
4028 * ixgbe_get_lcd_x550em - Determine lowest common denominator
4029 * @hw: pointer to hardware structure
4030 * @lcd_speed: pointer to lowest common link speed
4031 *
4032 * Determine lowest common link speed with link partner.
4033 **/
4034 s32 ixgbe_get_lcd_t_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *lcd_speed)
4035 {
4036 u16 an_lp_status;
4037 s32 status;
4038 u16 word = hw->eeprom.ctrl_word_3;
4039
4040 *lcd_speed = IXGBE_LINK_SPEED_UNKNOWN;
4041
4042 status = hw->phy.ops.read_reg(hw, IXGBE_AUTO_NEG_LP_STATUS,
4043 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
4044 &an_lp_status);
4045
4046 if (status != IXGBE_SUCCESS)
4047 return status;
4048
4049 /* If link partner advertised 1G, return 1G */
4050 if (an_lp_status & IXGBE_AUTO_NEG_LP_1000BASE_CAP) {
4051 *lcd_speed = IXGBE_LINK_SPEED_1GB_FULL;
4052 return status;
4053 }
4054
4055 /* If 10G disabled for LPLU via NVM D10GMP, then return no valid LCD */
4056 if ((hw->bus.lan_id && (word & NVM_INIT_CTRL_3_D10GMP_PORT1)) ||
4057 (word & NVM_INIT_CTRL_3_D10GMP_PORT0))
4058 return status;
4059
4060 /* Link partner not capable of lower speeds, return 10G */
4061 *lcd_speed = IXGBE_LINK_SPEED_10GB_FULL;
4062 return status;
4063 }
4064
4065 /**
4066 * ixgbe_setup_fc_X550em - Set up flow control
4067 * @hw: pointer to hardware structure
4068 *
4069 * Called at init time to set up flow control.
4070 **/
4071 s32 ixgbe_setup_fc_X550em(struct ixgbe_hw *hw)
4072 {
4073 s32 ret_val = IXGBE_SUCCESS;
4074 u32 pause, asm_dir, reg_val;
4075
4076 DEBUGFUNC("ixgbe_setup_fc_X550em");
4077
4078 /* Validate the requested mode */
4079 if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
4080 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
4081 "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
4082 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
4083 goto out;
4084 }
4085
4086 /* 10gig parts do not have a word in the EEPROM to determine the
4087 * default flow control setting, so we explicitly set it to full.
4088 */
4089 if (hw->fc.requested_mode == ixgbe_fc_default)
4090 hw->fc.requested_mode = ixgbe_fc_full;
4091
4092 /* Determine PAUSE and ASM_DIR bits. */
4093 switch (hw->fc.requested_mode) {
4094 case ixgbe_fc_none:
4095 pause = 0;
4096 asm_dir = 0;
4097 break;
4098 case ixgbe_fc_tx_pause:
4099 pause = 0;
4100 asm_dir = 1;
4101 break;
4102 case ixgbe_fc_rx_pause:
4103 /* Rx Flow control is enabled and Tx Flow control is
4104 * disabled by software override. Since there really
4105 * isn't a way to advertise that we are capable of RX
4106 * Pause ONLY, we will advertise that we support both
4107 * symmetric and asymmetric Rx PAUSE, as such we fall
4108 * through to the fc_full statement. Later, we will
4109 * disable the adapter's ability to send PAUSE frames.
4110 */
4111 case ixgbe_fc_full:
4112 pause = 1;
4113 asm_dir = 1;
4114 break;
4115 default:
4116 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
4117 "Flow control param set incorrectly\n");
4118 ret_val = IXGBE_ERR_CONFIG;
4119 goto out;
4120 }
4121
4122 switch (hw->device_id) {
4123 case IXGBE_DEV_ID_X550EM_X_KR:
4124 case IXGBE_DEV_ID_X550EM_A_KR:
4125 case IXGBE_DEV_ID_X550EM_A_KR_L:
4126 ret_val = hw->mac.ops.read_iosf_sb_reg(hw,
4127 IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
4128 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
4129 if (ret_val != IXGBE_SUCCESS)
4130 goto out;
4131 reg_val &= ~(IXGBE_KRM_AN_CNTL_1_SYM_PAUSE |
4132 IXGBE_KRM_AN_CNTL_1_ASM_PAUSE);
4133 if (pause)
4134 reg_val |= IXGBE_KRM_AN_CNTL_1_SYM_PAUSE;
4135 if (asm_dir)
4136 reg_val |= IXGBE_KRM_AN_CNTL_1_ASM_PAUSE;
4137 ret_val = hw->mac.ops.write_iosf_sb_reg(hw,
4138 IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
4139 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
4140
4141 /* This device does not fully support AN. */
4142 hw->fc.disable_fc_autoneg = TRUE;
4143 break;
4144 case IXGBE_DEV_ID_X550EM_X_XFI:
4145 hw->fc.disable_fc_autoneg = TRUE;
4146 break;
4147 default:
4148 break;
4149 }
4150
4151 out:
4152 return ret_val;
4153 }
4154
4155 /**
4156 * ixgbe_fc_autoneg_backplane_x550em_a - Enable flow control IEEE clause 37
4157 * @hw: pointer to hardware structure
4158 *
4159 * Enable flow control according to IEEE clause 37.
4160 **/
4161 void ixgbe_fc_autoneg_backplane_x550em_a(struct ixgbe_hw *hw)
4162 {
4163 u32 link_s1, lp_an_page_low, an_cntl_1;
4164 s32 status = IXGBE_ERR_FC_NOT_NEGOTIATED;
4165 ixgbe_link_speed speed;
4166 bool link_up;
4167
4168 /* AN should have completed when the cable was plugged in.
4169 * Look for reasons to bail out. Bail out if:
4170 * - FC autoneg is disabled, or if
4171 * - link is not up.
4172 */
4173 if (hw->fc.disable_fc_autoneg) {
4174 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
4175 "Flow control autoneg is disabled");
4176 goto out;
4177 }
4178
4179 hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
4180 if (!link_up) {
4181 ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "The link is down");
4182 goto out;
4183 }
4184
4185 /* Check at auto-negotiation has completed */
4186 status = hw->mac.ops.read_iosf_sb_reg(hw,
4187 IXGBE_KRM_LINK_S1(hw->bus.lan_id),
4188 IXGBE_SB_IOSF_TARGET_KR_PHY, &link_s1);
4189
4190 if (status != IXGBE_SUCCESS ||
4191 (link_s1 & IXGBE_KRM_LINK_S1_MAC_AN_COMPLETE) == 0) {
4192 DEBUGOUT("Auto-Negotiation did not complete\n");
4193 status = IXGBE_ERR_FC_NOT_NEGOTIATED;
4194 goto out;
4195 }
4196
4197 /* Read the 10g AN autoc and LP ability registers and resolve
4198 * local flow control settings accordingly
4199 */
4200 status = hw->mac.ops.read_iosf_sb_reg(hw,
4201 IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
4202 IXGBE_SB_IOSF_TARGET_KR_PHY, &an_cntl_1);
4203
4204 if (status != IXGBE_SUCCESS) {
4205 DEBUGOUT("Auto-Negotiation did not complete\n");
4206 goto out;
4207 }
4208
4209 status = hw->mac.ops.read_iosf_sb_reg(hw,
4210 IXGBE_KRM_LP_BASE_PAGE_HIGH(hw->bus.lan_id),
4211 IXGBE_SB_IOSF_TARGET_KR_PHY, &lp_an_page_low);
4212
4213 if (status != IXGBE_SUCCESS) {
4214 DEBUGOUT("Auto-Negotiation did not complete\n");
4215 goto out;
4216 }
4217
4218 status = ixgbe_negotiate_fc(hw, an_cntl_1, lp_an_page_low,
4219 IXGBE_KRM_AN_CNTL_1_SYM_PAUSE,
4220 IXGBE_KRM_AN_CNTL_1_ASM_PAUSE,
4221 IXGBE_KRM_LP_BASE_PAGE_HIGH_SYM_PAUSE,
4222 IXGBE_KRM_LP_BASE_PAGE_HIGH_ASM_PAUSE);
4223
4224 out:
4225 if (status == IXGBE_SUCCESS) {
4226 hw->fc.fc_was_autonegged = TRUE;
4227 } else {
4228 hw->fc.fc_was_autonegged = FALSE;
4229 hw->fc.current_mode = hw->fc.requested_mode;
4230 }
4231 }
4232
4233 /**
4234 * ixgbe_fc_autoneg_fiber_x550em_a - passthrough FC settings
4235 * @hw: pointer to hardware structure
4236 *
4237 **/
4238 void ixgbe_fc_autoneg_fiber_x550em_a(struct ixgbe_hw *hw)
4239 {
4240 hw->fc.fc_was_autonegged = FALSE;
4241 hw->fc.current_mode = hw->fc.requested_mode;
4242 }
4243
4244 /**
4245 * ixgbe_fc_autoneg_sgmii_x550em_a - Enable flow control IEEE clause 37
4246 * @hw: pointer to hardware structure
4247 *
4248 * Enable flow control according to IEEE clause 37.
4249 **/
4250 void ixgbe_fc_autoneg_sgmii_x550em_a(struct ixgbe_hw *hw)
4251 {
4252 s32 status = IXGBE_ERR_FC_NOT_NEGOTIATED;
4253 u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 };
4254 ixgbe_link_speed speed;
4255 bool link_up;
4256
4257 /* AN should have completed when the cable was plugged in.
4258 * Look for reasons to bail out. Bail out if:
4259 * - FC autoneg is disabled, or if
4260 * - link is not up.
4261 */
4262 if (hw->fc.disable_fc_autoneg) {
4263 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
4264 "Flow control autoneg is disabled");
4265 goto out;
4266 }
4267
4268 hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
4269 if (!link_up) {
4270 ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "The link is down");
4271 goto out;
4272 }
4273
4274 /* Check if auto-negotiation has completed */
4275 status = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_LINK_INFO, &info);
4276 if (status != IXGBE_SUCCESS ||
4277 !(info[0] & FW_PHY_ACT_GET_LINK_INFO_AN_COMPLETE)) {
4278 DEBUGOUT("Auto-Negotiation did not complete\n");
4279 status = IXGBE_ERR_FC_NOT_NEGOTIATED;
4280 goto out;
4281 }
4282
4283 /* Negotiate the flow control */
4284 status = ixgbe_negotiate_fc(hw, info[0], info[0],
4285 FW_PHY_ACT_GET_LINK_INFO_FC_RX,
4286 FW_PHY_ACT_GET_LINK_INFO_FC_TX,
4287 FW_PHY_ACT_GET_LINK_INFO_LP_FC_RX,
4288 FW_PHY_ACT_GET_LINK_INFO_LP_FC_TX);
4289
4290 out:
4291 if (status == IXGBE_SUCCESS) {
4292 hw->fc.fc_was_autonegged = TRUE;
4293 } else {
4294 hw->fc.fc_was_autonegged = FALSE;
4295 hw->fc.current_mode = hw->fc.requested_mode;
4296 }
4297 }
4298
4299 /**
4300 * ixgbe_setup_fc_backplane_x550em_a - Set up flow control
4301 * @hw: pointer to hardware structure
4302 *
4303 * Called at init time to set up flow control.
4304 **/
4305 s32 ixgbe_setup_fc_backplane_x550em_a(struct ixgbe_hw *hw)
4306 {
4307 s32 status = IXGBE_SUCCESS;
4308 u32 an_cntl = 0;
4309
4310 DEBUGFUNC("ixgbe_setup_fc_backplane_x550em_a");
4311
4312 /* Validate the requested mode */
4313 if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
4314 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
4315 "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
4316 return IXGBE_ERR_INVALID_LINK_SETTINGS;
4317 }
4318
4319 if (hw->fc.requested_mode == ixgbe_fc_default)
4320 hw->fc.requested_mode = ixgbe_fc_full;
4321
4322 /* Set up the 1G and 10G flow control advertisement registers so the
4323 * HW will be able to do FC autoneg once the cable is plugged in. If
4324 * we link at 10G, the 1G advertisement is harmless and vice versa.
4325 */
4326 status = hw->mac.ops.read_iosf_sb_reg(hw,
4327 IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
4328 IXGBE_SB_IOSF_TARGET_KR_PHY, &an_cntl);
4329
4330 if (status != IXGBE_SUCCESS) {
4331 DEBUGOUT("Auto-Negotiation did not complete\n");
4332 return status;
4333 }
4334
4335 /* The possible values of fc.requested_mode are:
4336 * 0: Flow control is completely disabled
4337 * 1: Rx flow control is enabled (we can receive pause frames,
4338 * but not send pause frames).
4339 * 2: Tx flow control is enabled (we can send pause frames but
4340 * we do not support receiving pause frames).
4341 * 3: Both Rx and Tx flow control (symmetric) are enabled.
4342 * other: Invalid.
4343 */
4344 switch (hw->fc.requested_mode) {
4345 case ixgbe_fc_none:
4346 /* Flow control completely disabled by software override. */
4347 an_cntl &= ~(IXGBE_KRM_AN_CNTL_1_SYM_PAUSE |
4348 IXGBE_KRM_AN_CNTL_1_ASM_PAUSE);
4349 break;
4350 case ixgbe_fc_tx_pause:
4351 /* Tx Flow control is enabled, and Rx Flow control is
4352 * disabled by software override.
4353 */
4354 an_cntl |= IXGBE_KRM_AN_CNTL_1_ASM_PAUSE;
4355 an_cntl &= ~IXGBE_KRM_AN_CNTL_1_SYM_PAUSE;
4356 break;
4357 case ixgbe_fc_rx_pause:
4358 /* Rx Flow control is enabled and Tx Flow control is
4359 * disabled by software override. Since there really
4360 * isn't a way to advertise that we are capable of RX
4361 * Pause ONLY, we will advertise that we support both
4362 * symmetric and asymmetric Rx PAUSE, as such we fall
4363 * through to the fc_full statement. Later, we will
4364 * disable the adapter's ability to send PAUSE frames.
4365 */
4366 case ixgbe_fc_full:
4367 /* Flow control (both Rx and Tx) is enabled by SW override. */
4368 an_cntl |= IXGBE_KRM_AN_CNTL_1_SYM_PAUSE |
4369 IXGBE_KRM_AN_CNTL_1_ASM_PAUSE;
4370 break;
4371 default:
4372 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
4373 "Flow control param set incorrectly\n");
4374 return IXGBE_ERR_CONFIG;
4375 }
4376
4377 status = hw->mac.ops.write_iosf_sb_reg(hw,
4378 IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
4379 IXGBE_SB_IOSF_TARGET_KR_PHY, an_cntl);
4380
4381 /* Restart auto-negotiation. */
4382 status = ixgbe_restart_an_internal_phy_x550em(hw);
4383
4384 return status;
4385 }
4386
4387 /**
4388 * ixgbe_set_mux - Set mux for port 1 access with CS4227
4389 * @hw: pointer to hardware structure
4390 * @state: set mux if 1, clear if 0
4391 */
4392 static void ixgbe_set_mux(struct ixgbe_hw *hw, u8 state)
4393 {
4394 u32 esdp;
4395
4396 if (!hw->bus.lan_id)
4397 return;
4398 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
4399 if (state)
4400 esdp |= IXGBE_ESDP_SDP1;
4401 else
4402 esdp &= ~IXGBE_ESDP_SDP1;
4403 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
4404 IXGBE_WRITE_FLUSH(hw);
4405 }
4406
4407 /**
4408 * ixgbe_acquire_swfw_sync_X550em - Acquire SWFW semaphore
4409 * @hw: pointer to hardware structure
4410 * @mask: Mask to specify which semaphore to acquire
4411 *
4412 * Acquires the SWFW semaphore and sets the I2C MUX
4413 **/
4414 s32 ixgbe_acquire_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask)
4415 {
4416 s32 status;
4417
4418 DEBUGFUNC("ixgbe_acquire_swfw_sync_X550em");
4419
4420 status = ixgbe_acquire_swfw_sync_X540(hw, mask);
4421 if (status)
4422 return status;
4423
4424 if (mask & IXGBE_GSSR_I2C_MASK)
4425 ixgbe_set_mux(hw, 1);
4426
4427 return IXGBE_SUCCESS;
4428 }
4429
4430 /**
4431 * ixgbe_release_swfw_sync_X550em - Release SWFW semaphore
4432 * @hw: pointer to hardware structure
4433 * @mask: Mask to specify which semaphore to release
4434 *
4435 * Releases the SWFW semaphore and sets the I2C MUX
4436 **/
4437 void ixgbe_release_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask)
4438 {
4439 DEBUGFUNC("ixgbe_release_swfw_sync_X550em");
4440
4441 if (mask & IXGBE_GSSR_I2C_MASK)
4442 ixgbe_set_mux(hw, 0);
4443
4444 ixgbe_release_swfw_sync_X540(hw, mask);
4445 }
4446
4447 /**
4448 * ixgbe_acquire_swfw_sync_X550a - Acquire SWFW semaphore
4449 * @hw: pointer to hardware structure
4450 * @mask: Mask to specify which semaphore to acquire
4451 *
4452 * Acquires the SWFW semaphore and get the shared phy token as needed
4453 */
4454 static s32 ixgbe_acquire_swfw_sync_X550a(struct ixgbe_hw *hw, u32 mask)
4455 {
4456 u32 hmask = mask & ~IXGBE_GSSR_TOKEN_SM;
4457 int retries = FW_PHY_TOKEN_RETRIES;
4458 s32 status = IXGBE_SUCCESS;
4459
4460 DEBUGFUNC("ixgbe_acquire_swfw_sync_X550a");
4461
4462 while (--retries) {
4463 status = IXGBE_SUCCESS;
4464 if (hmask)
4465 status = ixgbe_acquire_swfw_sync_X540(hw, hmask);
4466 if (status) {
4467 DEBUGOUT1("Could not acquire SWFW semaphore, Status = %d\n",
4468 status);
4469 return status;
4470 }
4471 if (!(mask & IXGBE_GSSR_TOKEN_SM))
4472 return IXGBE_SUCCESS;
4473
4474 status = ixgbe_get_phy_token(hw);
4475 if (status == IXGBE_ERR_TOKEN_RETRY)
4476 DEBUGOUT1("Could not acquire PHY token, Status = %d\n",
4477 status);
4478
4479 if (status == IXGBE_SUCCESS)
4480 return IXGBE_SUCCESS;
4481
4482 if (hmask)
4483 ixgbe_release_swfw_sync_X540(hw, hmask);
4484
4485 if (status != IXGBE_ERR_TOKEN_RETRY) {
4486 DEBUGOUT1("Unable to retry acquiring the PHY token, Status = %d\n",
4487 status);
4488 return status;
4489 }
4490 }
4491
4492 DEBUGOUT1("Semaphore acquisition retries failed!: PHY ID = 0x%08X\n",
4493 hw->phy.id);
4494 return status;
4495 }
4496
4497 /**
4498 * ixgbe_release_swfw_sync_X550a - Release SWFW semaphore
4499 * @hw: pointer to hardware structure
4500 * @mask: Mask to specify which semaphore to release
4501 *
4502 * Releases the SWFW semaphore and puts the shared phy token as needed
4503 */
4504 static void ixgbe_release_swfw_sync_X550a(struct ixgbe_hw *hw, u32 mask)
4505 {
4506 u32 hmask = mask & ~IXGBE_GSSR_TOKEN_SM;
4507
4508 DEBUGFUNC("ixgbe_release_swfw_sync_X550a");
4509
4510 if (mask & IXGBE_GSSR_TOKEN_SM)
4511 ixgbe_put_phy_token(hw);
4512
4513 if (hmask)
4514 ixgbe_release_swfw_sync_X540(hw, hmask);
4515 }
4516
4517 /**
4518 * ixgbe_read_phy_reg_x550a - Reads specified PHY register
4519 * @hw: pointer to hardware structure
4520 * @reg_addr: 32 bit address of PHY register to read
4521 * @device_type: 5 bit device type
4522 * @phy_data: Pointer to read data from PHY register
4523 *
4524 * Reads a value from a specified PHY register using the SWFW lock and PHY
4525 * Token. The PHY Token is needed since the MDIO is shared between to MAC
4526 * instances.
4527 **/
4528 s32 ixgbe_read_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
4529 u32 device_type, u16 *phy_data)
4530 {
4531 s32 status;
4532 u32 mask = hw->phy.phy_semaphore_mask | IXGBE_GSSR_TOKEN_SM;
4533
4534 DEBUGFUNC("ixgbe_read_phy_reg_x550a");
4535
4536 if (hw->mac.ops.acquire_swfw_sync(hw, mask))
4537 return IXGBE_ERR_SWFW_SYNC;
4538
4539 status = hw->phy.ops.read_reg_mdi(hw, reg_addr, device_type, phy_data);
4540
4541 hw->mac.ops.release_swfw_sync(hw, mask);
4542
4543 return status;
4544 }
4545
4546 /**
4547 * ixgbe_write_phy_reg_x550a - Writes specified PHY register
4548 * @hw: pointer to hardware structure
4549 * @reg_addr: 32 bit PHY register to write
4550 * @device_type: 5 bit device type
4551 * @phy_data: Data to write to the PHY register
4552 *
4553 * Writes a value to specified PHY register using the SWFW lock and PHY Token.
4554 * The PHY Token is needed since the MDIO is shared between to MAC instances.
4555 **/
4556 s32 ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
4557 u32 device_type, u16 phy_data)
4558 {
4559 s32 status;
4560 u32 mask = hw->phy.phy_semaphore_mask | IXGBE_GSSR_TOKEN_SM;
4561
4562 DEBUGFUNC("ixgbe_write_phy_reg_x550a");
4563
4564 if (hw->mac.ops.acquire_swfw_sync(hw, mask) == IXGBE_SUCCESS) {
4565 status = hw->phy.ops.write_reg_mdi(hw, reg_addr, device_type,
4566 phy_data);
4567 hw->mac.ops.release_swfw_sync(hw, mask);
4568 } else {
4569 status = IXGBE_ERR_SWFW_SYNC;
4570 }
4571
4572 return status;
4573 }
4574
4575 /**
4576 * ixgbe_handle_lasi_ext_t_x550em - Handle external Base T PHY interrupt
4577 * @hw: pointer to hardware structure
4578 *
4579 * Handle external Base T PHY interrupt. If high temperature
4580 * failure alarm then return error, else if link status change
4581 * then setup internal/external PHY link
4582 *
4583 * Return IXGBE_ERR_OVERTEMP if interrupt is high temperature
4584 * failure alarm, else return PHY access status.
4585 */
4586 s32 ixgbe_handle_lasi_ext_t_x550em(struct ixgbe_hw *hw)
4587 {
4588 bool lsc;
4589 u32 status;
4590
4591 status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc);
4592
4593 if (status != IXGBE_SUCCESS)
4594 return status;
4595
4596 if (lsc)
4597 return ixgbe_setup_internal_phy(hw);
4598
4599 return IXGBE_SUCCESS;
4600 }
4601
4602 /**
4603 * ixgbe_setup_mac_link_t_X550em - Sets the auto advertised link speed
4604 * @hw: pointer to hardware structure
4605 * @speed: new link speed
4606 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed
4607 *
4608 * Setup internal/external PHY link speed based on link speed, then set
4609 * external PHY auto advertised link speed.
4610 *
4611 * Returns error status for any failure
4612 **/
4613 s32 ixgbe_setup_mac_link_t_X550em(struct ixgbe_hw *hw,
4614 ixgbe_link_speed speed,
4615 bool autoneg_wait_to_complete)
4616 {
4617 s32 status;
4618 ixgbe_link_speed force_speed;
4619
4620 DEBUGFUNC("ixgbe_setup_mac_link_t_X550em");
4621
4622 /* Setup internal/external PHY link speed to iXFI (10G), unless
4623 * only 1G is auto advertised then setup KX link.
4624 */
4625 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
4626 force_speed = IXGBE_LINK_SPEED_10GB_FULL;
4627 else
4628 force_speed = IXGBE_LINK_SPEED_1GB_FULL;
4629
4630 /* If X552 and internal link mode is XFI, then setup XFI internal link.
4631 */
4632 if (hw->mac.type == ixgbe_mac_X550EM_x &&
4633 !(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) {
4634 status = ixgbe_setup_ixfi_x550em(hw, &force_speed);
4635
4636 if (status != IXGBE_SUCCESS)
4637 return status;
4638 }
4639
4640 return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait_to_complete);
4641 }
4642
4643 /**
4644 * ixgbe_check_link_t_X550em - Determine link and speed status
4645 * @hw: pointer to hardware structure
4646 * @speed: pointer to link speed
4647 * @link_up: TRUE when link is up
4648 * @link_up_wait_to_complete: bool used to wait for link up or not
4649 *
4650 * Check that both the MAC and X557 external PHY have link.
4651 **/
4652 s32 ixgbe_check_link_t_X550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
4653 bool *link_up, bool link_up_wait_to_complete)
4654 {
4655 u32 status;
4656 u16 i, autoneg_status = 0;
4657
4658 if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper)
4659 return IXGBE_ERR_CONFIG;
4660
4661 status = ixgbe_check_mac_link_generic(hw, speed, link_up,
4662 link_up_wait_to_complete);
4663
4664 /* If check link fails or MAC link is not up, then return */
4665 if (status != IXGBE_SUCCESS || !(*link_up))
4666 return status;
4667
4668 /* MAC link is up, so check external PHY link.
4669 * X557 PHY. Link status is latching low, and can only be used to detect
4670 * link drop, and not the current status of the link without performing
4671 * back-to-back reads.
4672 */
4673 for (i = 0; i < 2; i++) {
4674 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
4675 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
4676 &autoneg_status);
4677
4678 if (status != IXGBE_SUCCESS)
4679 return status;
4680 }
4681
4682 /* If external PHY link is not up, then indicate link not up */
4683 if (!(autoneg_status & IXGBE_MDIO_AUTO_NEG_LINK_STATUS))
4684 *link_up = FALSE;
4685
4686 return IXGBE_SUCCESS;
4687 }
4688
4689 /**
4690 * ixgbe_reset_phy_t_X550em - Performs X557 PHY reset and enables LASI
4691 * @hw: pointer to hardware structure
4692 **/
4693 s32 ixgbe_reset_phy_t_X550em(struct ixgbe_hw *hw)
4694 {
4695 s32 status;
4696
4697 status = ixgbe_reset_phy_generic(hw);
4698
4699 if (status != IXGBE_SUCCESS)
4700 return status;
4701
4702 /* Configure Link Status Alarm and Temperature Threshold interrupts */
4703 return ixgbe_enable_lasi_ext_t_x550em(hw);
4704 }
4705
4706 /**
4707 * ixgbe_led_on_t_X550em - Turns on the software controllable LEDs.
4708 * @hw: pointer to hardware structure
4709 * @led_idx: led number to turn on
4710 **/
4711 s32 ixgbe_led_on_t_X550em(struct ixgbe_hw *hw, u32 led_idx)
4712 {
4713 u16 phy_data;
4714
4715 DEBUGFUNC("ixgbe_led_on_t_X550em");
4716
4717 if (led_idx >= IXGBE_X557_MAX_LED_INDEX)
4718 return IXGBE_ERR_PARAM;
4719
4720 /* To turn on the LED, set mode to ON. */
4721 ixgbe_read_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
4722 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &phy_data);
4723 phy_data |= IXGBE_X557_LED_MANUAL_SET_MASK;
4724 ixgbe_write_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
4725 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, phy_data);
4726
4727 /* Some designs have the LEDs wired to the MAC */
4728 return ixgbe_led_on_generic(hw, led_idx);
4729 }
4730
4731 /**
4732 * ixgbe_led_off_t_X550em - Turns off the software controllable LEDs.
4733 * @hw: pointer to hardware structure
4734 * @led_idx: led number to turn off
4735 **/
4736 s32 ixgbe_led_off_t_X550em(struct ixgbe_hw *hw, u32 led_idx)
4737 {
4738 u16 phy_data;
4739
4740 DEBUGFUNC("ixgbe_led_off_t_X550em");
4741
4742 if (led_idx >= IXGBE_X557_MAX_LED_INDEX)
4743 return IXGBE_ERR_PARAM;
4744
4745 /* To turn on the LED, set mode to ON. */
4746 ixgbe_read_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
4747 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &phy_data);
4748 phy_data &= ~IXGBE_X557_LED_MANUAL_SET_MASK;
4749 ixgbe_write_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
4750 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, phy_data);
4751
4752 /* Some designs have the LEDs wired to the MAC */
4753 return ixgbe_led_off_generic(hw, led_idx);
4754 }
4755
4756 /**
4757 * ixgbe_set_fw_drv_ver_x550 - Sends driver version to firmware
4758 * @hw: pointer to the HW structure
4759 * @maj: driver version major number
4760 * @min: driver version minor number
4761 * @build: driver version build number
4762 * @sub: driver version sub build number
4763 * @len: length of driver_ver string
4764 * @driver_ver: driver string
4765 *
4766 * Sends driver version number to firmware through the manageability
4767 * block. On success return IXGBE_SUCCESS
4768 * else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring
4769 * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
4770 **/
4771 s32 ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min,
4772 u8 build, u8 sub, u16 len, const char *driver_ver)
4773 {
4774 struct ixgbe_hic_drv_info2 fw_cmd;
4775 s32 ret_val = IXGBE_SUCCESS;
4776 int i;
4777
4778 DEBUGFUNC("ixgbe_set_fw_drv_ver_x550");
4779
4780 if ((len == 0) || (driver_ver == NULL) ||
4781 (len > sizeof(fw_cmd.driver_string)))
4782 return IXGBE_ERR_INVALID_ARGUMENT;
4783
4784 fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO;
4785 fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN + len;
4786 fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
4787 fw_cmd.port_num = (u8)hw->bus.func;
4788 fw_cmd.ver_maj = maj;
4789 fw_cmd.ver_min = min;
4790 fw_cmd.ver_build = build;
4791 fw_cmd.ver_sub = sub;
4792 fw_cmd.hdr.checksum = 0;
4793 memcpy(fw_cmd.driver_string, driver_ver, len);
4794 fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd,
4795 (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len));
4796
4797 for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
4798 ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
4799 sizeof(fw_cmd),
4800 IXGBE_HI_COMMAND_TIMEOUT,
4801 TRUE);
4802 if (ret_val != IXGBE_SUCCESS)
4803 continue;
4804
4805 if (fw_cmd.hdr.cmd_or_resp.ret_status ==
4806 FW_CEM_RESP_STATUS_SUCCESS)
4807 ret_val = IXGBE_SUCCESS;
4808 else
4809 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
4810
4811 break;
4812 }
4813
4814 return ret_val;
4815 }
4816