ixgbe_x550.c revision 1.7.4.4 1 /******************************************************************************
2
3 Copyright (c) 2001-2017, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33 /*$FreeBSD: head/sys/dev/ixgbe/ixgbe_x550.c 331224 2018-03-19 20:55:05Z erj $*/
34
35 #include "ixgbe_x550.h"
36 #include "ixgbe_x540.h"
37 #include "ixgbe_type.h"
38 #include "ixgbe_api.h"
39 #include "ixgbe_common.h"
40 #include "ixgbe_phy.h"
41 #include <dev/mii/mii.h>
42
43 static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed);
44 static s32 ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw,
45 ixgbe_link_speed speed,
46 bool autoneg_wait_to_complete);
47 static s32 ixgbe_acquire_swfw_sync_X550a(struct ixgbe_hw *, u32 mask);
48 static void ixgbe_release_swfw_sync_X550a(struct ixgbe_hw *, u32 mask);
49 static s32 ixgbe_read_mng_if_sel_x550em(struct ixgbe_hw *hw);
50
51 /**
52 * ixgbe_init_ops_X550 - Inits func ptrs and MAC type
53 * @hw: pointer to hardware structure
54 *
55 * Initialize the function pointers and assign the MAC type for X550.
56 * Does not touch the hardware.
57 **/
58 s32 ixgbe_init_ops_X550(struct ixgbe_hw *hw)
59 {
60 struct ixgbe_mac_info *mac = &hw->mac;
61 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
62 s32 ret_val;
63
64 DEBUGFUNC("ixgbe_init_ops_X550");
65
66 ret_val = ixgbe_init_ops_X540(hw);
67 mac->ops.dmac_config = ixgbe_dmac_config_X550;
68 mac->ops.dmac_config_tcs = ixgbe_dmac_config_tcs_X550;
69 mac->ops.dmac_update_tcs = ixgbe_dmac_update_tcs_X550;
70 mac->ops.setup_eee = NULL;
71 mac->ops.set_source_address_pruning =
72 ixgbe_set_source_address_pruning_X550;
73 mac->ops.set_ethertype_anti_spoofing =
74 ixgbe_set_ethertype_anti_spoofing_X550;
75
76 mac->ops.get_rtrup2tc = ixgbe_dcb_get_rtrup2tc_generic;
77 eeprom->ops.init_params = ixgbe_init_eeprom_params_X550;
78 eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_X550;
79 eeprom->ops.read = ixgbe_read_ee_hostif_X550;
80 eeprom->ops.read_buffer = ixgbe_read_ee_hostif_buffer_X550;
81 eeprom->ops.write = ixgbe_write_ee_hostif_X550;
82 eeprom->ops.write_buffer = ixgbe_write_ee_hostif_buffer_X550;
83 eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_X550;
84 eeprom->ops.validate_checksum = ixgbe_validate_eeprom_checksum_X550;
85
86 mac->ops.disable_mdd = ixgbe_disable_mdd_X550;
87 mac->ops.enable_mdd = ixgbe_enable_mdd_X550;
88 mac->ops.mdd_event = ixgbe_mdd_event_X550;
89 mac->ops.restore_mdd_vf = ixgbe_restore_mdd_vf_X550;
90 mac->ops.disable_rx = ixgbe_disable_rx_x550;
91 /* Manageability interface */
92 mac->ops.set_fw_drv_ver = ixgbe_set_fw_drv_ver_x550;
93 switch (hw->device_id) {
94 case IXGBE_DEV_ID_X550EM_X_1G_T:
95 hw->mac.ops.led_on = NULL;
96 hw->mac.ops.led_off = NULL;
97 break;
98 case IXGBE_DEV_ID_X550EM_X_10G_T:
99 case IXGBE_DEV_ID_X550EM_A_10G_T:
100 hw->mac.ops.led_on = ixgbe_led_on_t_X550em;
101 hw->mac.ops.led_off = ixgbe_led_off_t_X550em;
102 break;
103 default:
104 break;
105 }
106 return ret_val;
107 }
108
109 /**
110 * ixgbe_read_cs4227 - Read CS4227 register
111 * @hw: pointer to hardware structure
112 * @reg: register number to write
113 * @value: pointer to receive value read
114 *
115 * Returns status code
116 **/
117 static s32 ixgbe_read_cs4227(struct ixgbe_hw *hw, u16 reg, u16 *value)
118 {
119 return hw->link.ops.read_link_unlocked(hw, hw->link.addr, reg, value);
120 }
121
122 /**
123 * ixgbe_write_cs4227 - Write CS4227 register
124 * @hw: pointer to hardware structure
125 * @reg: register number to write
126 * @value: value to write to register
127 *
128 * Returns status code
129 **/
130 static s32 ixgbe_write_cs4227(struct ixgbe_hw *hw, u16 reg, u16 value)
131 {
132 return hw->link.ops.write_link_unlocked(hw, hw->link.addr, reg, value);
133 }
134
135 /**
136 * ixgbe_read_pe - Read register from port expander
137 * @hw: pointer to hardware structure
138 * @reg: register number to read
139 * @value: pointer to receive read value
140 *
141 * Returns status code
142 **/
143 static s32 ixgbe_read_pe(struct ixgbe_hw *hw, u8 reg, u8 *value)
144 {
145 s32 status;
146
147 status = ixgbe_read_i2c_byte_unlocked(hw, reg, IXGBE_PE, value);
148 if (status != IXGBE_SUCCESS)
149 ERROR_REPORT2(IXGBE_ERROR_CAUTION,
150 "port expander access failed with %d\n", status);
151 return status;
152 }
153
154 /**
155 * ixgbe_write_pe - Write register to port expander
156 * @hw: pointer to hardware structure
157 * @reg: register number to write
158 * @value: value to write
159 *
160 * Returns status code
161 **/
162 static s32 ixgbe_write_pe(struct ixgbe_hw *hw, u8 reg, u8 value)
163 {
164 s32 status;
165
166 status = ixgbe_write_i2c_byte_unlocked(hw, reg, IXGBE_PE, value);
167 if (status != IXGBE_SUCCESS)
168 ERROR_REPORT2(IXGBE_ERROR_CAUTION,
169 "port expander access failed with %d\n", status);
170 return status;
171 }
172
173 /**
174 * ixgbe_reset_cs4227 - Reset CS4227 using port expander
175 * @hw: pointer to hardware structure
176 *
177 * This function assumes that the caller has acquired the proper semaphore.
178 * Returns error code
179 **/
180 static s32 ixgbe_reset_cs4227(struct ixgbe_hw *hw)
181 {
182 s32 status;
183 u32 retry;
184 u16 value;
185 u8 reg;
186
187 /* Trigger hard reset. */
188 status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, ®);
189 if (status != IXGBE_SUCCESS)
190 return status;
191 reg |= IXGBE_PE_BIT1;
192 status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg);
193 if (status != IXGBE_SUCCESS)
194 return status;
195
196 status = ixgbe_read_pe(hw, IXGBE_PE_CONFIG, ®);
197 if (status != IXGBE_SUCCESS)
198 return status;
199 reg &= ~IXGBE_PE_BIT1;
200 status = ixgbe_write_pe(hw, IXGBE_PE_CONFIG, reg);
201 if (status != IXGBE_SUCCESS)
202 return status;
203
204 status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, ®);
205 if (status != IXGBE_SUCCESS)
206 return status;
207 reg &= ~IXGBE_PE_BIT1;
208 status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg);
209 if (status != IXGBE_SUCCESS)
210 return status;
211
212 usec_delay(IXGBE_CS4227_RESET_HOLD);
213
214 status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, ®);
215 if (status != IXGBE_SUCCESS)
216 return status;
217 reg |= IXGBE_PE_BIT1;
218 status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg);
219 if (status != IXGBE_SUCCESS)
220 return status;
221
222 /* Wait for the reset to complete. */
223 msec_delay(IXGBE_CS4227_RESET_DELAY);
224 for (retry = 0; retry < IXGBE_CS4227_RETRIES; retry++) {
225 status = ixgbe_read_cs4227(hw, IXGBE_CS4227_EFUSE_STATUS,
226 &value);
227 if (status == IXGBE_SUCCESS &&
228 value == IXGBE_CS4227_EEPROM_LOAD_OK)
229 break;
230 msec_delay(IXGBE_CS4227_CHECK_DELAY);
231 }
232 if (retry == IXGBE_CS4227_RETRIES) {
233 ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE,
234 "CS4227 reset did not complete.");
235 return IXGBE_ERR_PHY;
236 }
237
238 status = ixgbe_read_cs4227(hw, IXGBE_CS4227_EEPROM_STATUS, &value);
239 if (status != IXGBE_SUCCESS ||
240 !(value & IXGBE_CS4227_EEPROM_LOAD_OK)) {
241 ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE,
242 "CS4227 EEPROM did not load successfully.");
243 return IXGBE_ERR_PHY;
244 }
245
246 return IXGBE_SUCCESS;
247 }
248
249 /**
250 * ixgbe_check_cs4227 - Check CS4227 and reset as needed
251 * @hw: pointer to hardware structure
252 **/
253 static void ixgbe_check_cs4227(struct ixgbe_hw *hw)
254 {
255 s32 status = IXGBE_SUCCESS;
256 u32 swfw_mask = hw->phy.phy_semaphore_mask;
257 u16 value = 0;
258 u8 retry;
259
260 for (retry = 0; retry < IXGBE_CS4227_RETRIES; retry++) {
261 status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
262 if (status != IXGBE_SUCCESS) {
263 ERROR_REPORT2(IXGBE_ERROR_CAUTION,
264 "semaphore failed with %d", status);
265 msec_delay(IXGBE_CS4227_CHECK_DELAY);
266 continue;
267 }
268
269 /* Get status of reset flow. */
270 status = ixgbe_read_cs4227(hw, IXGBE_CS4227_SCRATCH, &value);
271
272 if (status == IXGBE_SUCCESS &&
273 value == IXGBE_CS4227_RESET_COMPLETE)
274 goto out;
275
276 if (status != IXGBE_SUCCESS ||
277 value != IXGBE_CS4227_RESET_PENDING)
278 break;
279
280 /* Reset is pending. Wait and check again. */
281 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
282 msec_delay(IXGBE_CS4227_CHECK_DELAY);
283 }
284
285 /* If still pending, assume other instance failed. */
286 if (retry == IXGBE_CS4227_RETRIES) {
287 status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
288 if (status != IXGBE_SUCCESS) {
289 ERROR_REPORT2(IXGBE_ERROR_CAUTION,
290 "semaphore failed with %d", status);
291 return;
292 }
293 }
294
295 /* Reset the CS4227. */
296 status = ixgbe_reset_cs4227(hw);
297 if (status != IXGBE_SUCCESS) {
298 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
299 "CS4227 reset failed: %d", status);
300 goto out;
301 }
302
303 /* Reset takes so long, temporarily release semaphore in case the
304 * other driver instance is waiting for the reset indication.
305 */
306 ixgbe_write_cs4227(hw, IXGBE_CS4227_SCRATCH,
307 IXGBE_CS4227_RESET_PENDING);
308 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
309 msec_delay(10);
310 status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
311 if (status != IXGBE_SUCCESS) {
312 ERROR_REPORT2(IXGBE_ERROR_CAUTION,
313 "semaphore failed with %d", status);
314 return;
315 }
316
317 /* Record completion for next time. */
318 status = ixgbe_write_cs4227(hw, IXGBE_CS4227_SCRATCH,
319 IXGBE_CS4227_RESET_COMPLETE);
320
321 out:
322 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
323 msec_delay(hw->eeprom.semaphore_delay);
324 }
325
326 /**
327 * ixgbe_setup_mux_ctl - Setup ESDP register for I2C mux control
328 * @hw: pointer to hardware structure
329 **/
330 static void ixgbe_setup_mux_ctl(struct ixgbe_hw *hw)
331 {
332 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
333
334 if (hw->bus.lan_id) {
335 esdp &= ~(IXGBE_ESDP_SDP1_NATIVE | IXGBE_ESDP_SDP1);
336 esdp |= IXGBE_ESDP_SDP1_DIR;
337 }
338 esdp &= ~(IXGBE_ESDP_SDP0_NATIVE | IXGBE_ESDP_SDP0_DIR);
339 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
340 IXGBE_WRITE_FLUSH(hw);
341 }
342
343 /**
344 * ixgbe_read_phy_reg_mdi_22 - Read from a clause 22 PHY register without lock
345 * @hw: pointer to hardware structure
346 * @reg_addr: 32 bit address of PHY register to read
347 * @dev_type: always unused
348 * @phy_data: Pointer to read data from PHY register
349 */
350 static s32 ixgbe_read_phy_reg_mdi_22(struct ixgbe_hw *hw, u32 reg_addr,
351 u32 dev_type, u16 *phy_data)
352 {
353 u32 i, data, command;
354 UNREFERENCED_1PARAMETER(dev_type);
355
356 /* Setup and write the read command */
357 command = (reg_addr << IXGBE_MSCA_DEV_TYPE_SHIFT) |
358 (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
359 IXGBE_MSCA_OLD_PROTOCOL | IXGBE_MSCA_READ_AUTOINC |
360 IXGBE_MSCA_MDI_COMMAND;
361
362 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
363
364 /* Check every 10 usec to see if the access completed.
365 * The MDI Command bit will clear when the operation is
366 * complete
367 */
368 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
369 usec_delay(10);
370
371 command = IXGBE_READ_REG(hw, IXGBE_MSCA);
372 if (!(command & IXGBE_MSCA_MDI_COMMAND))
373 break;
374 }
375
376 if (command & IXGBE_MSCA_MDI_COMMAND) {
377 ERROR_REPORT1(IXGBE_ERROR_POLLING,
378 "PHY read command did not complete.\n");
379 return IXGBE_ERR_PHY;
380 }
381
382 /* Read operation is complete. Get the data from MSRWD */
383 data = IXGBE_READ_REG(hw, IXGBE_MSRWD);
384 data >>= IXGBE_MSRWD_READ_DATA_SHIFT;
385 *phy_data = (u16)data;
386
387 return IXGBE_SUCCESS;
388 }
389
390 /**
391 * ixgbe_write_phy_reg_mdi_22 - Write to a clause 22 PHY register without lock
392 * @hw: pointer to hardware structure
393 * @reg_addr: 32 bit PHY register to write
394 * @dev_type: always unused
395 * @phy_data: Data to write to the PHY register
396 */
397 static s32 ixgbe_write_phy_reg_mdi_22(struct ixgbe_hw *hw, u32 reg_addr,
398 u32 dev_type, u16 phy_data)
399 {
400 u32 i, command;
401 UNREFERENCED_1PARAMETER(dev_type);
402
403 /* Put the data in the MDI single read and write data register*/
404 IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)phy_data);
405
406 /* Setup and write the write command */
407 command = (reg_addr << IXGBE_MSCA_DEV_TYPE_SHIFT) |
408 (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
409 IXGBE_MSCA_OLD_PROTOCOL | IXGBE_MSCA_WRITE |
410 IXGBE_MSCA_MDI_COMMAND;
411
412 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
413
414 /* Check every 10 usec to see if the access completed.
415 * The MDI Command bit will clear when the operation is
416 * complete
417 */
418 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
419 usec_delay(10);
420
421 command = IXGBE_READ_REG(hw, IXGBE_MSCA);
422 if (!(command & IXGBE_MSCA_MDI_COMMAND))
423 break;
424 }
425
426 if (command & IXGBE_MSCA_MDI_COMMAND) {
427 ERROR_REPORT1(IXGBE_ERROR_POLLING,
428 "PHY write cmd didn't complete\n");
429 return IXGBE_ERR_PHY;
430 }
431
432 return IXGBE_SUCCESS;
433 }
434
435 /**
436 * ixgbe_identify_phy_x550em - Get PHY type based on device id
437 * @hw: pointer to hardware structure
438 *
439 * Returns error code
440 */
441 static s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw)
442 {
443 hw->mac.ops.set_lan_id(hw);
444
445 ixgbe_read_mng_if_sel_x550em(hw);
446
447 switch (hw->device_id) {
448 case IXGBE_DEV_ID_X550EM_A_SFP:
449 return ixgbe_identify_module_generic(hw);
450 case IXGBE_DEV_ID_X550EM_X_SFP:
451 /* set up for CS4227 usage */
452 ixgbe_setup_mux_ctl(hw);
453 ixgbe_check_cs4227(hw);
454 /* Fallthrough */
455
456 case IXGBE_DEV_ID_X550EM_A_SFP_N:
457 return ixgbe_identify_module_generic(hw);
458 break;
459 case IXGBE_DEV_ID_X550EM_X_KX4:
460 hw->phy.type = ixgbe_phy_x550em_kx4;
461 break;
462 case IXGBE_DEV_ID_X550EM_X_XFI:
463 hw->phy.type = ixgbe_phy_x550em_xfi;
464 break;
465 case IXGBE_DEV_ID_X550EM_X_KR:
466 case IXGBE_DEV_ID_X550EM_A_KR:
467 case IXGBE_DEV_ID_X550EM_A_KR_L:
468 hw->phy.type = ixgbe_phy_x550em_kr;
469 break;
470 case IXGBE_DEV_ID_X550EM_A_10G_T:
471 case IXGBE_DEV_ID_X550EM_X_10G_T:
472 return ixgbe_identify_phy_generic(hw);
473 case IXGBE_DEV_ID_X550EM_X_1G_T:
474 hw->phy.type = ixgbe_phy_ext_1g_t;
475 break;
476 case IXGBE_DEV_ID_X550EM_A_1G_T:
477 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
478 hw->phy.type = ixgbe_phy_fw;
479 if (hw->bus.lan_id)
480 hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY1_SM;
481 else
482 hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY0_SM;
483 break;
484 default:
485 break;
486 }
487 return IXGBE_SUCCESS;
488 }
489
490 /**
491 * ixgbe_fw_phy_activity - Perform an activity on a PHY
492 * @hw: pointer to hardware structure
493 * @activity: activity to perform
494 * @data: Pointer to 4 32-bit words of data
495 */
496 s32 ixgbe_fw_phy_activity(struct ixgbe_hw *hw, u16 activity,
497 u32 (*data)[FW_PHY_ACT_DATA_COUNT])
498 {
499 union {
500 struct ixgbe_hic_phy_activity_req cmd;
501 struct ixgbe_hic_phy_activity_resp rsp;
502 } hic;
503 u16 retries = FW_PHY_ACT_RETRIES;
504 s32 rc;
505 u16 i;
506
507 do {
508 memset(&hic, 0, sizeof(hic));
509 hic.cmd.hdr.cmd = FW_PHY_ACT_REQ_CMD;
510 hic.cmd.hdr.buf_len = FW_PHY_ACT_REQ_LEN;
511 hic.cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
512 hic.cmd.port_number = hw->bus.lan_id;
513 hic.cmd.activity_id = IXGBE_CPU_TO_LE16(activity);
514 for (i = 0; i < FW_PHY_ACT_DATA_COUNT; ++i)
515 hic.cmd.data[i] = IXGBE_CPU_TO_BE32((*data)[i]);
516
517 rc = ixgbe_host_interface_command(hw, (u32 *)&hic.cmd,
518 sizeof(hic.cmd),
519 IXGBE_HI_COMMAND_TIMEOUT,
520 TRUE);
521 if (rc != IXGBE_SUCCESS)
522 return rc;
523 if (hic.rsp.hdr.cmd_or_resp.ret_status ==
524 FW_CEM_RESP_STATUS_SUCCESS) {
525 for (i = 0; i < FW_PHY_ACT_DATA_COUNT; ++i)
526 (*data)[i] = IXGBE_BE32_TO_CPU(hic.rsp.data[i]);
527 return IXGBE_SUCCESS;
528 }
529 usec_delay(20);
530 --retries;
531 } while (retries > 0);
532
533 return IXGBE_ERR_HOST_INTERFACE_COMMAND;
534 }
535
536 static const struct {
537 u16 fw_speed;
538 ixgbe_link_speed phy_speed;
539 } ixgbe_fw_map[] = {
540 { FW_PHY_ACT_LINK_SPEED_10, IXGBE_LINK_SPEED_10_FULL },
541 { FW_PHY_ACT_LINK_SPEED_100, IXGBE_LINK_SPEED_100_FULL },
542 { FW_PHY_ACT_LINK_SPEED_1G, IXGBE_LINK_SPEED_1GB_FULL },
543 { FW_PHY_ACT_LINK_SPEED_2_5G, IXGBE_LINK_SPEED_2_5GB_FULL },
544 { FW_PHY_ACT_LINK_SPEED_5G, IXGBE_LINK_SPEED_5GB_FULL },
545 { FW_PHY_ACT_LINK_SPEED_10G, IXGBE_LINK_SPEED_10GB_FULL },
546 };
547
548 /**
549 * ixgbe_get_phy_id_fw - Get the phy ID via firmware command
550 * @hw: pointer to hardware structure
551 *
552 * Returns error code
553 */
554 static s32 ixgbe_get_phy_id_fw(struct ixgbe_hw *hw)
555 {
556 u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 };
557 u16 phy_speeds;
558 u16 phy_id_lo;
559 s32 rc;
560 u16 i;
561
562 rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_PHY_INFO, &info);
563 if (rc)
564 return rc;
565
566 hw->phy.speeds_supported = 0;
567 phy_speeds = info[0] & FW_PHY_INFO_SPEED_MASK;
568 for (i = 0; i < sizeof(ixgbe_fw_map) / sizeof(ixgbe_fw_map[0]); ++i) {
569 if (phy_speeds & ixgbe_fw_map[i].fw_speed)
570 hw->phy.speeds_supported |= ixgbe_fw_map[i].phy_speed;
571 }
572
573 #if 0
574 /*
575 * Don't set autoneg_advertised here to not to be inconsistent with
576 * if_media value.
577 */
578 if (!hw->phy.autoneg_advertised)
579 hw->phy.autoneg_advertised = hw->phy.speeds_supported;
580 #endif
581
582 hw->phy.id = info[0] & FW_PHY_INFO_ID_HI_MASK;
583 phy_id_lo = info[1] & FW_PHY_INFO_ID_LO_MASK;
584 hw->phy.id |= phy_id_lo & IXGBE_PHY_REVISION_MASK;
585 hw->phy.revision = phy_id_lo & ~IXGBE_PHY_REVISION_MASK;
586 if (!hw->phy.id || hw->phy.id == IXGBE_PHY_REVISION_MASK)
587 return IXGBE_ERR_PHY_ADDR_INVALID;
588 return IXGBE_SUCCESS;
589 }
590
591 /**
592 * ixgbe_identify_phy_fw - Get PHY type based on firmware command
593 * @hw: pointer to hardware structure
594 *
595 * Returns error code
596 */
597 static s32 ixgbe_identify_phy_fw(struct ixgbe_hw *hw)
598 {
599 if (hw->bus.lan_id)
600 hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM;
601 else
602 hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM;
603
604 hw->phy.type = ixgbe_phy_fw;
605 hw->phy.ops.read_reg = NULL;
606 hw->phy.ops.write_reg = NULL;
607 return ixgbe_get_phy_id_fw(hw);
608 }
609
610 /**
611 * ixgbe_shutdown_fw_phy - Shutdown a firmware-controlled PHY
612 * @hw: pointer to hardware structure
613 *
614 * Returns error code
615 */
616 s32 ixgbe_shutdown_fw_phy(struct ixgbe_hw *hw)
617 {
618 u32 setup[FW_PHY_ACT_DATA_COUNT] = { 0 };
619
620 setup[0] = FW_PHY_ACT_FORCE_LINK_DOWN_OFF;
621 return ixgbe_fw_phy_activity(hw, FW_PHY_ACT_FORCE_LINK_DOWN, &setup);
622 }
623
624 static s32 ixgbe_read_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr,
625 u32 device_type, u16 *phy_data)
626 {
627 UNREFERENCED_4PARAMETER(*hw, reg_addr, device_type, *phy_data);
628 return IXGBE_NOT_IMPLEMENTED;
629 }
630
631 static s32 ixgbe_write_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr,
632 u32 device_type, u16 phy_data)
633 {
634 UNREFERENCED_4PARAMETER(*hw, reg_addr, device_type, phy_data);
635 return IXGBE_NOT_IMPLEMENTED;
636 }
637
638 /**
639 * ixgbe_read_i2c_combined_generic - Perform I2C read combined operation
640 * @hw: pointer to the hardware structure
641 * @addr: I2C bus address to read from
642 * @reg: I2C device register to read from
643 * @val: pointer to location to receive read value
644 *
645 * Returns an error code on error.
646 **/
647 static s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr,
648 u16 reg, u16 *val)
649 {
650 return ixgbe_read_i2c_combined_generic_int(hw, addr, reg, val, TRUE);
651 }
652
653 /**
654 * ixgbe_read_i2c_combined_generic_unlocked - Do I2C read combined operation
655 * @hw: pointer to the hardware structure
656 * @addr: I2C bus address to read from
657 * @reg: I2C device register to read from
658 * @val: pointer to location to receive read value
659 *
660 * Returns an error code on error.
661 **/
662 static s32
663 ixgbe_read_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, u8 addr,
664 u16 reg, u16 *val)
665 {
666 return ixgbe_read_i2c_combined_generic_int(hw, addr, reg, val, FALSE);
667 }
668
669 /**
670 * ixgbe_write_i2c_combined_generic - Perform I2C write combined operation
671 * @hw: pointer to the hardware structure
672 * @addr: I2C bus address to write to
673 * @reg: I2C device register to write to
674 * @val: value to write
675 *
676 * Returns an error code on error.
677 **/
678 static s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw,
679 u8 addr, u16 reg, u16 val)
680 {
681 return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, TRUE);
682 }
683
684 /**
685 * ixgbe_write_i2c_combined_generic_unlocked - Do I2C write combined operation
686 * @hw: pointer to the hardware structure
687 * @addr: I2C bus address to write to
688 * @reg: I2C device register to write to
689 * @val: value to write
690 *
691 * Returns an error code on error.
692 **/
693 static s32
694 ixgbe_write_i2c_combined_generic_unlocked(struct ixgbe_hw *hw,
695 u8 addr, u16 reg, u16 val)
696 {
697 return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, FALSE);
698 }
699
700 /**
701 * ixgbe_init_ops_X550EM - Inits func ptrs and MAC type
702 * @hw: pointer to hardware structure
703 *
704 * Initialize the function pointers and for MAC type X550EM.
705 * Does not touch the hardware.
706 **/
707 s32 ixgbe_init_ops_X550EM(struct ixgbe_hw *hw)
708 {
709 struct ixgbe_mac_info *mac = &hw->mac;
710 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
711 struct ixgbe_phy_info *phy = &hw->phy;
712 s32 ret_val;
713
714 DEBUGFUNC("ixgbe_init_ops_X550EM");
715
716 /* Similar to X550 so start there. */
717 ret_val = ixgbe_init_ops_X550(hw);
718
719 /* Since this function eventually calls
720 * ixgbe_init_ops_540 by design, we are setting
721 * the pointers to NULL explicitly here to overwrite
722 * the values being set in the x540 function.
723 */
724
725 /* Bypass not supported in x550EM */
726 mac->ops.bypass_rw = NULL;
727 mac->ops.bypass_valid_rd = NULL;
728 mac->ops.bypass_set = NULL;
729 mac->ops.bypass_rd_eep = NULL;
730
731 /* FCOE not supported in x550EM */
732 mac->ops.get_san_mac_addr = NULL;
733 mac->ops.set_san_mac_addr = NULL;
734 mac->ops.get_wwn_prefix = NULL;
735 mac->ops.get_fcoe_boot_status = NULL;
736
737 /* IPsec not supported in x550EM */
738 mac->ops.disable_sec_rx_path = NULL;
739 mac->ops.enable_sec_rx_path = NULL;
740
741 /* AUTOC register is not present in x550EM. */
742 mac->ops.prot_autoc_read = NULL;
743 mac->ops.prot_autoc_write = NULL;
744
745 /* X550EM bus type is internal*/
746 hw->bus.type = ixgbe_bus_type_internal;
747 mac->ops.get_bus_info = ixgbe_get_bus_info_X550em;
748
749
750 mac->ops.get_media_type = ixgbe_get_media_type_X550em;
751 mac->ops.setup_sfp = ixgbe_setup_sfp_modules_X550em;
752 mac->ops.get_link_capabilities = ixgbe_get_link_capabilities_X550em;
753 mac->ops.reset_hw = ixgbe_reset_hw_X550em;
754 mac->ops.get_supported_physical_layer =
755 ixgbe_get_supported_physical_layer_X550em;
756
757 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper)
758 mac->ops.setup_fc = ixgbe_setup_fc_generic;
759 else
760 mac->ops.setup_fc = ixgbe_setup_fc_X550em;
761
762 /* PHY */
763 phy->ops.init = ixgbe_init_phy_ops_X550em;
764 switch (hw->device_id) {
765 case IXGBE_DEV_ID_X550EM_A_1G_T:
766 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
767 mac->ops.setup_fc = NULL;
768 phy->ops.identify = ixgbe_identify_phy_fw;
769 phy->ops.set_phy_power = NULL;
770 phy->ops.get_firmware_version = NULL;
771 break;
772 case IXGBE_DEV_ID_X550EM_X_1G_T:
773 mac->ops.setup_fc = NULL;
774 phy->ops.identify = ixgbe_identify_phy_x550em;
775 phy->ops.set_phy_power = NULL;
776 break;
777 default:
778 phy->ops.identify = ixgbe_identify_phy_x550em;
779 }
780
781 if (mac->ops.get_media_type(hw) != ixgbe_media_type_copper)
782 phy->ops.set_phy_power = NULL;
783
784
785 /* EEPROM */
786 eeprom->ops.init_params = ixgbe_init_eeprom_params_X540;
787 eeprom->ops.read = ixgbe_read_ee_hostif_X550;
788 eeprom->ops.read_buffer = ixgbe_read_ee_hostif_buffer_X550;
789 eeprom->ops.write = ixgbe_write_ee_hostif_X550;
790 eeprom->ops.write_buffer = ixgbe_write_ee_hostif_buffer_X550;
791 eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_X550;
792 eeprom->ops.validate_checksum = ixgbe_validate_eeprom_checksum_X550;
793 eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_X550;
794
795 return ret_val;
796 }
797
798 #define IXGBE_DENVERTON_WA 1
799
800 /**
801 * ixgbe_setup_fw_link - Setup firmware-controlled PHYs
802 * @hw: pointer to hardware structure
803 */
804 static s32 ixgbe_setup_fw_link(struct ixgbe_hw *hw)
805 {
806 u32 setup[FW_PHY_ACT_DATA_COUNT] = { 0 };
807 s32 rc;
808 #ifdef IXGBE_DENVERTON_WA
809 s32 ret_val;
810 u16 phydata;
811 #endif
812 u16 i;
813
814 if (hw->phy.reset_disable || ixgbe_check_reset_blocked(hw))
815 return 0;
816
817 if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
818 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
819 "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
820 return IXGBE_ERR_INVALID_LINK_SETTINGS;
821 }
822
823 switch (hw->fc.requested_mode) {
824 case ixgbe_fc_full:
825 setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_RXTX <<
826 FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT;
827 break;
828 case ixgbe_fc_rx_pause:
829 setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_RX <<
830 FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT;
831 break;
832 case ixgbe_fc_tx_pause:
833 setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_TX <<
834 FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT;
835 break;
836 default:
837 break;
838 }
839
840 for (i = 0; i < sizeof(ixgbe_fw_map) / sizeof(ixgbe_fw_map[0]); ++i) {
841 if (hw->phy.autoneg_advertised & ixgbe_fw_map[i].phy_speed)
842 setup[0] |= ixgbe_fw_map[i].fw_speed;
843 }
844 setup[0] |= FW_PHY_ACT_SETUP_LINK_HP | FW_PHY_ACT_SETUP_LINK_AN;
845
846 if (hw->phy.eee_speeds_advertised)
847 setup[0] |= FW_PHY_ACT_SETUP_LINK_EEE;
848
849 #ifdef IXGBE_DENVERTON_WA
850 if ((hw->phy.force_10_100_autonego == false)
851 && ((hw->phy.autoneg_advertised == IXGBE_LINK_SPEED_100_FULL)
852 || (hw->phy.autoneg_advertised == IXGBE_LINK_SPEED_10_FULL))) {
853 /* Don't use auto-nego for 10/100Mbps */
854 setup[0] &= ~FW_PHY_ACT_SETUP_LINK_AN;
855 setup[0] &= ~FW_PHY_ACT_SETUP_LINK_EEE;
856 setup[0] &= ~(FW_PHY_ACT_SETUP_LINK_PAUSE_RXTX
857 << FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT);
858 }
859 #endif
860
861 rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_SETUP_LINK, &setup);
862 if (rc)
863 return rc;
864
865 #ifdef IXGBE_DENVERTON_WA
866 if (hw->phy.force_10_100_autonego == true)
867 goto out;
868
869 ret_val = ixgbe_read_phy_reg_x550a(hw, MII_BMCR, 0, &phydata);
870 if (ret_val != 0)
871 goto out;
872
873 /*
874 * Broken firmware sets BMCR register incorrectly if
875 * FW_PHY_ACT_SETUP_LINK_AN isn't set.
876 * a) FDX may not be set.
877 * b) BMCR_SPEED1 (bit 6) is always cleard.
878 * + -------+------+-----------+-----+--------------------------+
879 * |request | BMCR | BMCR spd | BMCR | |
880 * | | (HEX)| (in bits)| FDX | |
881 * +--------+------+----------+------+--------------------------+
882 * | 10M | 0000 | 10M(00) | 0 | |
883 * | 10M | 2000 | 100M(01) | 0 |(I've never observed this)|
884 * | 10M | 2100 | 100M(01) | 1 | |
885 * | 100M | 0000 | 10M(00) | 0 | |
886 * | 100M | 0100 | 10M(00) | 1 | |
887 * +--------------------------+------+--------------------------+
888 */
889 if (((hw->phy.autoneg_advertised == IXGBE_LINK_SPEED_100_FULL)
890 && (((phydata & BMCR_FDX) == 0) || (BMCR_SPEED(phydata) == 0)))
891 || ((hw->phy.autoneg_advertised == IXGBE_LINK_SPEED_10_FULL)
892 && (((phydata & BMCR_FDX) == 0)
893 || (BMCR_SPEED(phydata) != BMCR_S10)))) {
894 phydata = BMCR_FDX;
895 switch (hw->phy.autoneg_advertised) {
896 case IXGBE_LINK_SPEED_10_FULL:
897 phydata |= BMCR_S10;
898 break;
899 case IXGBE_LINK_SPEED_100_FULL:
900 phydata |= BMCR_S100;
901 break;
902 case IXGBE_LINK_SPEED_1GB_FULL:
903 panic("%s: 1GB_FULL is set", __func__);
904 break;
905 default:
906 break;
907 }
908 ret_val = ixgbe_write_phy_reg_x550a(hw, MII_BMCR, 0, phydata);
909 if (ret_val != 0)
910 return ret_val;
911 }
912 out:
913 #endif
914 if (setup[0] == FW_PHY_ACT_SETUP_LINK_RSP_DOWN)
915 return IXGBE_ERR_OVERTEMP;
916 return IXGBE_SUCCESS;
917 }
918
919 /**
920 * ixgbe_fc_autoneg_fw _ Set up flow control for FW-controlled PHYs
921 * @hw: pointer to hardware structure
922 *
923 * Called at init time to set up flow control.
924 */
925 static s32 ixgbe_fc_autoneg_fw(struct ixgbe_hw *hw)
926 {
927 if (hw->fc.requested_mode == ixgbe_fc_default)
928 hw->fc.requested_mode = ixgbe_fc_full;
929
930 return ixgbe_setup_fw_link(hw);
931 }
932
933 /**
934 * ixgbe_setup_eee_fw - Enable/disable EEE support
935 * @hw: pointer to the HW structure
936 * @enable_eee: boolean flag to enable EEE
937 *
938 * Enable/disable EEE based on enable_eee flag.
939 * This function controls EEE for firmware-based PHY implementations.
940 */
941 static s32 ixgbe_setup_eee_fw(struct ixgbe_hw *hw, bool enable_eee)
942 {
943 if (!!hw->phy.eee_speeds_advertised == enable_eee)
944 return IXGBE_SUCCESS;
945 if (enable_eee)
946 hw->phy.eee_speeds_advertised = hw->phy.eee_speeds_supported;
947 else
948 hw->phy.eee_speeds_advertised = 0;
949 return hw->phy.ops.setup_link(hw);
950 }
951
952 /**
953 * ixgbe_init_ops_X550EM_a - Inits func ptrs and MAC type
954 * @hw: pointer to hardware structure
955 *
956 * Initialize the function pointers and for MAC type X550EM_a.
957 * Does not touch the hardware.
958 **/
959 s32 ixgbe_init_ops_X550EM_a(struct ixgbe_hw *hw)
960 {
961 struct ixgbe_mac_info *mac = &hw->mac;
962 s32 ret_val;
963
964 DEBUGFUNC("ixgbe_init_ops_X550EM_a");
965
966 /* Start with generic X550EM init */
967 ret_val = ixgbe_init_ops_X550EM(hw);
968
969 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII ||
970 hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII_L) {
971 mac->ops.read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550;
972 mac->ops.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550;
973 } else {
974 mac->ops.read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550a;
975 mac->ops.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550a;
976 }
977 mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync_X550a;
978 mac->ops.release_swfw_sync = ixgbe_release_swfw_sync_X550a;
979
980 switch (mac->ops.get_media_type(hw)) {
981 case ixgbe_media_type_fiber:
982 mac->ops.setup_fc = NULL;
983 mac->ops.fc_autoneg = ixgbe_fc_autoneg_fiber_x550em_a;
984 break;
985 case ixgbe_media_type_backplane:
986 mac->ops.fc_autoneg = ixgbe_fc_autoneg_backplane_x550em_a;
987 mac->ops.setup_fc = ixgbe_setup_fc_backplane_x550em_a;
988 break;
989 default:
990 break;
991 }
992
993 switch (hw->device_id) {
994 case IXGBE_DEV_ID_X550EM_A_1G_T:
995 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
996 mac->ops.fc_autoneg = ixgbe_fc_autoneg_sgmii_x550em_a;
997 mac->ops.setup_fc = ixgbe_fc_autoneg_fw;
998 mac->ops.setup_eee = ixgbe_setup_eee_fw;
999 hw->phy.eee_speeds_supported = IXGBE_LINK_SPEED_100_FULL |
1000 IXGBE_LINK_SPEED_1GB_FULL;
1001 hw->phy.eee_speeds_advertised = hw->phy.eee_speeds_supported;
1002 break;
1003 default:
1004 break;
1005 }
1006
1007 return ret_val;
1008 }
1009
1010 /**
1011 * ixgbe_init_ops_X550EM_x - Inits func ptrs and MAC type
1012 * @hw: pointer to hardware structure
1013 *
1014 * Initialize the function pointers and for MAC type X550EM_x.
1015 * Does not touch the hardware.
1016 **/
1017 s32 ixgbe_init_ops_X550EM_x(struct ixgbe_hw *hw)
1018 {
1019 struct ixgbe_mac_info *mac = &hw->mac;
1020 struct ixgbe_link_info *link = &hw->link;
1021 s32 ret_val;
1022
1023 DEBUGFUNC("ixgbe_init_ops_X550EM_x");
1024
1025 /* Start with generic X550EM init */
1026 ret_val = ixgbe_init_ops_X550EM(hw);
1027
1028 mac->ops.read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550;
1029 mac->ops.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550;
1030 mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync_X550em;
1031 mac->ops.release_swfw_sync = ixgbe_release_swfw_sync_X550em;
1032 link->ops.read_link = ixgbe_read_i2c_combined_generic;
1033 link->ops.read_link_unlocked = ixgbe_read_i2c_combined_generic_unlocked;
1034 link->ops.write_link = ixgbe_write_i2c_combined_generic;
1035 link->ops.write_link_unlocked =
1036 ixgbe_write_i2c_combined_generic_unlocked;
1037 link->addr = IXGBE_CS4227;
1038
1039 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_1G_T) {
1040 mac->ops.setup_fc = NULL;
1041 mac->ops.setup_eee = NULL;
1042 mac->ops.init_led_link_act = NULL;
1043 }
1044
1045 return ret_val;
1046 }
1047
1048 /**
1049 * ixgbe_dmac_config_X550
1050 * @hw: pointer to hardware structure
1051 *
1052 * Configure DMA coalescing. If enabling dmac, dmac is activated.
1053 * When disabling dmac, dmac enable dmac bit is cleared.
1054 **/
1055 s32 ixgbe_dmac_config_X550(struct ixgbe_hw *hw)
1056 {
1057 u32 reg, high_pri_tc;
1058
1059 DEBUGFUNC("ixgbe_dmac_config_X550");
1060
1061 /* Disable DMA coalescing before configuring */
1062 reg = IXGBE_READ_REG(hw, IXGBE_DMACR);
1063 reg &= ~IXGBE_DMACR_DMAC_EN;
1064 IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg);
1065
1066 /* Disable DMA Coalescing if the watchdog timer is 0 */
1067 if (!hw->mac.dmac_config.watchdog_timer)
1068 goto out;
1069
1070 ixgbe_dmac_config_tcs_X550(hw);
1071
1072 /* Configure DMA Coalescing Control Register */
1073 reg = IXGBE_READ_REG(hw, IXGBE_DMACR);
1074
1075 /* Set the watchdog timer in units of 40.96 usec */
1076 reg &= ~IXGBE_DMACR_DMACWT_MASK;
1077 reg |= (hw->mac.dmac_config.watchdog_timer * 100) / 4096;
1078
1079 reg &= ~IXGBE_DMACR_HIGH_PRI_TC_MASK;
1080 /* If fcoe is enabled, set high priority traffic class */
1081 if (hw->mac.dmac_config.fcoe_en) {
1082 high_pri_tc = 1 << hw->mac.dmac_config.fcoe_tc;
1083 reg |= ((high_pri_tc << IXGBE_DMACR_HIGH_PRI_TC_SHIFT) &
1084 IXGBE_DMACR_HIGH_PRI_TC_MASK);
1085 }
1086 reg |= IXGBE_DMACR_EN_MNG_IND;
1087
1088 /* Enable DMA coalescing after configuration */
1089 reg |= IXGBE_DMACR_DMAC_EN;
1090 IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg);
1091
1092 out:
1093 return IXGBE_SUCCESS;
1094 }
1095
1096 /**
1097 * ixgbe_dmac_config_tcs_X550
1098 * @hw: pointer to hardware structure
1099 *
1100 * Configure DMA coalescing threshold per TC. The dmac enable bit must
1101 * be cleared before configuring.
1102 **/
1103 s32 ixgbe_dmac_config_tcs_X550(struct ixgbe_hw *hw)
1104 {
1105 u32 tc, reg, pb_headroom, rx_pb_size, maxframe_size_kb;
1106
1107 DEBUGFUNC("ixgbe_dmac_config_tcs_X550");
1108
1109 /* Configure DMA coalescing enabled */
1110 switch (hw->mac.dmac_config.link_speed) {
1111 case IXGBE_LINK_SPEED_10_FULL:
1112 case IXGBE_LINK_SPEED_100_FULL:
1113 pb_headroom = IXGBE_DMACRXT_100M;
1114 break;
1115 case IXGBE_LINK_SPEED_1GB_FULL:
1116 pb_headroom = IXGBE_DMACRXT_1G;
1117 break;
1118 default:
1119 pb_headroom = IXGBE_DMACRXT_10G;
1120 break;
1121 }
1122
1123 maxframe_size_kb = ((IXGBE_READ_REG(hw, IXGBE_MAXFRS) >>
1124 IXGBE_MHADD_MFS_SHIFT) / 1024);
1125
1126 /* Set the per Rx packet buffer receive threshold */
1127 for (tc = 0; tc < IXGBE_DCB_MAX_TRAFFIC_CLASS; tc++) {
1128 reg = IXGBE_READ_REG(hw, IXGBE_DMCTH(tc));
1129 reg &= ~IXGBE_DMCTH_DMACRXT_MASK;
1130
1131 if (tc < hw->mac.dmac_config.num_tcs) {
1132 /* Get Rx PB size */
1133 rx_pb_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc));
1134 rx_pb_size = (rx_pb_size & IXGBE_RXPBSIZE_MASK) >>
1135 IXGBE_RXPBSIZE_SHIFT;
1136
1137 /* Calculate receive buffer threshold in kilobytes */
1138 if (rx_pb_size > pb_headroom)
1139 rx_pb_size = rx_pb_size - pb_headroom;
1140 else
1141 rx_pb_size = 0;
1142
1143 /* Minimum of MFS shall be set for DMCTH */
1144 reg |= (rx_pb_size > maxframe_size_kb) ?
1145 rx_pb_size : maxframe_size_kb;
1146 }
1147 IXGBE_WRITE_REG(hw, IXGBE_DMCTH(tc), reg);
1148 }
1149 return IXGBE_SUCCESS;
1150 }
1151
1152 /**
1153 * ixgbe_dmac_update_tcs_X550
1154 * @hw: pointer to hardware structure
1155 *
1156 * Disables dmac, updates per TC settings, and then enables dmac.
1157 **/
1158 s32 ixgbe_dmac_update_tcs_X550(struct ixgbe_hw *hw)
1159 {
1160 u32 reg;
1161
1162 DEBUGFUNC("ixgbe_dmac_update_tcs_X550");
1163
1164 /* Disable DMA coalescing before configuring */
1165 reg = IXGBE_READ_REG(hw, IXGBE_DMACR);
1166 reg &= ~IXGBE_DMACR_DMAC_EN;
1167 IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg);
1168
1169 ixgbe_dmac_config_tcs_X550(hw);
1170
1171 /* Enable DMA coalescing after configuration */
1172 reg = IXGBE_READ_REG(hw, IXGBE_DMACR);
1173 reg |= IXGBE_DMACR_DMAC_EN;
1174 IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg);
1175
1176 return IXGBE_SUCCESS;
1177 }
1178
1179 /**
1180 * ixgbe_init_eeprom_params_X550 - Initialize EEPROM params
1181 * @hw: pointer to hardware structure
1182 *
1183 * Initializes the EEPROM parameters ixgbe_eeprom_info within the
1184 * ixgbe_hw struct in order to set up EEPROM access.
1185 **/
1186 s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw)
1187 {
1188 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
1189 u32 eec;
1190 u16 eeprom_size;
1191
1192 DEBUGFUNC("ixgbe_init_eeprom_params_X550");
1193
1194 if (eeprom->type == ixgbe_eeprom_uninitialized) {
1195 eeprom->semaphore_delay = 10;
1196 eeprom->type = ixgbe_flash;
1197
1198 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1199 eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
1200 IXGBE_EEC_SIZE_SHIFT);
1201 eeprom->word_size = 1 << (eeprom_size +
1202 IXGBE_EEPROM_WORD_SIZE_SHIFT);
1203
1204 DEBUGOUT2("Eeprom params: type = %d, size = %d\n",
1205 eeprom->type, eeprom->word_size);
1206 }
1207
1208 return IXGBE_SUCCESS;
1209 }
1210
1211 /**
1212 * ixgbe_set_source_address_pruning_X550 - Enable/Disbale source address pruning
1213 * @hw: pointer to hardware structure
1214 * @enable: enable or disable source address pruning
1215 * @pool: Rx pool to set source address pruning for
1216 **/
1217 void ixgbe_set_source_address_pruning_X550(struct ixgbe_hw *hw, bool enable,
1218 unsigned int pool)
1219 {
1220 u64 pfflp;
1221
1222 /* max rx pool is 63 */
1223 if (pool > 63)
1224 return;
1225
1226 pfflp = (u64)IXGBE_READ_REG(hw, IXGBE_PFFLPL);
1227 pfflp |= (u64)IXGBE_READ_REG(hw, IXGBE_PFFLPH) << 32;
1228
1229 if (enable)
1230 pfflp |= (1ULL << pool);
1231 else
1232 pfflp &= ~(1ULL << pool);
1233
1234 IXGBE_WRITE_REG(hw, IXGBE_PFFLPL, (u32)pfflp);
1235 IXGBE_WRITE_REG(hw, IXGBE_PFFLPH, (u32)(pfflp >> 32));
1236 }
1237
1238 /**
1239 * ixgbe_set_ethertype_anti_spoofing_X550 - Enable/Disable Ethertype anti-spoofing
1240 * @hw: pointer to hardware structure
1241 * @enable: enable or disable switch for Ethertype anti-spoofing
1242 * @vf: Virtual Function pool - VF Pool to set for Ethertype anti-spoofing
1243 *
1244 **/
1245 void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw,
1246 bool enable, int vf)
1247 {
1248 int vf_target_reg = vf >> 3;
1249 int vf_target_shift = vf % 8 + IXGBE_SPOOF_ETHERTYPEAS_SHIFT;
1250 u32 pfvfspoof;
1251
1252 DEBUGFUNC("ixgbe_set_ethertype_anti_spoofing_X550");
1253
1254 pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
1255 if (enable)
1256 pfvfspoof |= (1 << vf_target_shift);
1257 else
1258 pfvfspoof &= ~(1 << vf_target_shift);
1259
1260 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
1261 }
1262
1263 /**
1264 * ixgbe_iosf_wait - Wait for IOSF command completion
1265 * @hw: pointer to hardware structure
1266 * @ctrl: pointer to location to receive final IOSF control value
1267 *
1268 * Returns failing status on timeout
1269 *
1270 * Note: ctrl can be NULL if the IOSF control register value is not needed
1271 **/
1272 static s32 ixgbe_iosf_wait(struct ixgbe_hw *hw, u32 *ctrl)
1273 {
1274 u32 i, command = 0;
1275
1276 /* Check every 10 usec to see if the address cycle completed.
1277 * The SB IOSF BUSY bit will clear when the operation is
1278 * complete
1279 */
1280 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
1281 command = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL);
1282 if ((command & IXGBE_SB_IOSF_CTRL_BUSY) == 0)
1283 break;
1284 usec_delay(10);
1285 }
1286 if (ctrl)
1287 *ctrl = command;
1288 if (i == IXGBE_MDIO_COMMAND_TIMEOUT) {
1289 ERROR_REPORT1(IXGBE_ERROR_POLLING, "Wait timed out\n");
1290 return IXGBE_ERR_PHY;
1291 }
1292
1293 return IXGBE_SUCCESS;
1294 }
1295
1296 /**
1297 * ixgbe_write_iosf_sb_reg_x550 - Writes a value to specified register
1298 * of the IOSF device
1299 * @hw: pointer to hardware structure
1300 * @reg_addr: 32 bit PHY register to write
1301 * @device_type: 3 bit device type
1302 * @data: Data to write to the register
1303 **/
1304 s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
1305 u32 device_type, u32 data)
1306 {
1307 u32 gssr = IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_PHY0_SM;
1308 u32 command, error __unused;
1309 s32 ret;
1310
1311 ret = ixgbe_acquire_swfw_semaphore(hw, gssr);
1312 if (ret != IXGBE_SUCCESS)
1313 return ret;
1314
1315 ret = ixgbe_iosf_wait(hw, NULL);
1316 if (ret != IXGBE_SUCCESS)
1317 goto out;
1318
1319 command = ((reg_addr << IXGBE_SB_IOSF_CTRL_ADDR_SHIFT) |
1320 (device_type << IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT));
1321
1322 /* Write IOSF control register */
1323 IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL, command);
1324
1325 /* Write IOSF data register */
1326 IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA, data);
1327
1328 ret = ixgbe_iosf_wait(hw, &command);
1329
1330 if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) {
1331 error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >>
1332 IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT;
1333 ERROR_REPORT2(IXGBE_ERROR_POLLING,
1334 "Failed to write, error %x\n", error);
1335 ret = IXGBE_ERR_PHY;
1336 }
1337
1338 out:
1339 ixgbe_release_swfw_semaphore(hw, gssr);
1340 return ret;
1341 }
1342
1343 /**
1344 * ixgbe_read_iosf_sb_reg_x550 - Reads specified register of the IOSF device
1345 * @hw: pointer to hardware structure
1346 * @reg_addr: 32 bit PHY register to write
1347 * @device_type: 3 bit device type
1348 * @data: Pointer to read data from the register
1349 **/
1350 s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
1351 u32 device_type, u32 *data)
1352 {
1353 u32 gssr = IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_PHY0_SM;
1354 u32 command, error __unused;
1355 s32 ret;
1356
1357 ret = ixgbe_acquire_swfw_semaphore(hw, gssr);
1358 if (ret != IXGBE_SUCCESS)
1359 return ret;
1360
1361 ret = ixgbe_iosf_wait(hw, NULL);
1362 if (ret != IXGBE_SUCCESS)
1363 goto out;
1364
1365 command = ((reg_addr << IXGBE_SB_IOSF_CTRL_ADDR_SHIFT) |
1366 (device_type << IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT));
1367
1368 /* Write IOSF control register */
1369 IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL, command);
1370
1371 ret = ixgbe_iosf_wait(hw, &command);
1372
1373 if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) {
1374 error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >>
1375 IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT;
1376 ERROR_REPORT2(IXGBE_ERROR_POLLING,
1377 "Failed to read, error %x\n", error);
1378 ret = IXGBE_ERR_PHY;
1379 }
1380
1381 if (ret == IXGBE_SUCCESS)
1382 *data = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA);
1383
1384 out:
1385 ixgbe_release_swfw_semaphore(hw, gssr);
1386 return ret;
1387 }
1388
1389 /**
1390 * ixgbe_get_phy_token - Get the token for shared phy access
1391 * @hw: Pointer to hardware structure
1392 */
1393
1394 s32 ixgbe_get_phy_token(struct ixgbe_hw *hw)
1395 {
1396 struct ixgbe_hic_phy_token_req token_cmd;
1397 s32 status;
1398
1399 token_cmd.hdr.cmd = FW_PHY_TOKEN_REQ_CMD;
1400 token_cmd.hdr.buf_len = FW_PHY_TOKEN_REQ_LEN;
1401 token_cmd.hdr.cmd_or_resp.cmd_resv = 0;
1402 token_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
1403 token_cmd.port_number = hw->bus.lan_id;
1404 token_cmd.command_type = FW_PHY_TOKEN_REQ;
1405 token_cmd.pad = 0;
1406 status = ixgbe_host_interface_command(hw, (u32 *)&token_cmd,
1407 sizeof(token_cmd),
1408 IXGBE_HI_COMMAND_TIMEOUT,
1409 TRUE);
1410 if (status) {
1411 DEBUGOUT1("Issuing host interface command failed with Status = %d\n",
1412 status);
1413 return status;
1414 }
1415 if (token_cmd.hdr.cmd_or_resp.ret_status == FW_PHY_TOKEN_OK)
1416 return IXGBE_SUCCESS;
1417 if (token_cmd.hdr.cmd_or_resp.ret_status != FW_PHY_TOKEN_RETRY) {
1418 DEBUGOUT1("Host interface command returned 0x%08x , returning IXGBE_ERR_FW_RESP_INVALID\n",
1419 token_cmd.hdr.cmd_or_resp.ret_status);
1420 return IXGBE_ERR_FW_RESP_INVALID;
1421 }
1422
1423 DEBUGOUT("Returning IXGBE_ERR_TOKEN_RETRY\n");
1424 return IXGBE_ERR_TOKEN_RETRY;
1425 }
1426
1427 /**
1428 * ixgbe_put_phy_token - Put the token for shared phy access
1429 * @hw: Pointer to hardware structure
1430 */
1431
1432 s32 ixgbe_put_phy_token(struct ixgbe_hw *hw)
1433 {
1434 struct ixgbe_hic_phy_token_req token_cmd;
1435 s32 status;
1436
1437 token_cmd.hdr.cmd = FW_PHY_TOKEN_REQ_CMD;
1438 token_cmd.hdr.buf_len = FW_PHY_TOKEN_REQ_LEN;
1439 token_cmd.hdr.cmd_or_resp.cmd_resv = 0;
1440 token_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
1441 token_cmd.port_number = hw->bus.lan_id;
1442 token_cmd.command_type = FW_PHY_TOKEN_REL;
1443 token_cmd.pad = 0;
1444 status = ixgbe_host_interface_command(hw, (u32 *)&token_cmd,
1445 sizeof(token_cmd),
1446 IXGBE_HI_COMMAND_TIMEOUT,
1447 TRUE);
1448 if (status)
1449 return status;
1450 if (token_cmd.hdr.cmd_or_resp.ret_status == FW_PHY_TOKEN_OK)
1451 return IXGBE_SUCCESS;
1452
1453 DEBUGOUT("Put PHY Token host interface command failed");
1454 return IXGBE_ERR_FW_RESP_INVALID;
1455 }
1456
1457 /**
1458 * ixgbe_write_iosf_sb_reg_x550a - Writes a value to specified register
1459 * of the IOSF device
1460 * @hw: pointer to hardware structure
1461 * @reg_addr: 32 bit PHY register to write
1462 * @device_type: 3 bit device type
1463 * @data: Data to write to the register
1464 **/
1465 s32 ixgbe_write_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
1466 u32 device_type, u32 data)
1467 {
1468 struct ixgbe_hic_internal_phy_req write_cmd;
1469 s32 status;
1470 UNREFERENCED_1PARAMETER(device_type);
1471
1472 memset(&write_cmd, 0, sizeof(write_cmd));
1473 write_cmd.hdr.cmd = FW_INT_PHY_REQ_CMD;
1474 write_cmd.hdr.buf_len = FW_INT_PHY_REQ_LEN;
1475 write_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
1476 write_cmd.port_number = hw->bus.lan_id;
1477 write_cmd.command_type = FW_INT_PHY_REQ_WRITE;
1478 write_cmd.address = IXGBE_CPU_TO_BE16(reg_addr);
1479 write_cmd.write_data = IXGBE_CPU_TO_BE32(data);
1480
1481 status = ixgbe_host_interface_command(hw, (u32 *)&write_cmd,
1482 sizeof(write_cmd),
1483 IXGBE_HI_COMMAND_TIMEOUT, FALSE);
1484
1485 return status;
1486 }
1487
1488 /**
1489 * ixgbe_read_iosf_sb_reg_x550a - Reads specified register of the IOSF device
1490 * @hw: pointer to hardware structure
1491 * @reg_addr: 32 bit PHY register to write
1492 * @device_type: 3 bit device type
1493 * @data: Pointer to read data from the register
1494 **/
1495 s32 ixgbe_read_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
1496 u32 device_type, u32 *data)
1497 {
1498 union {
1499 struct ixgbe_hic_internal_phy_req cmd;
1500 struct ixgbe_hic_internal_phy_resp rsp;
1501 } hic;
1502 s32 status;
1503 UNREFERENCED_1PARAMETER(device_type);
1504
1505 memset(&hic, 0, sizeof(hic));
1506 hic.cmd.hdr.cmd = FW_INT_PHY_REQ_CMD;
1507 hic.cmd.hdr.buf_len = FW_INT_PHY_REQ_LEN;
1508 hic.cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
1509 hic.cmd.port_number = hw->bus.lan_id;
1510 hic.cmd.command_type = FW_INT_PHY_REQ_READ;
1511 hic.cmd.address = IXGBE_CPU_TO_BE16(reg_addr);
1512
1513 status = ixgbe_host_interface_command(hw, (u32 *)&hic.cmd,
1514 sizeof(hic.cmd),
1515 IXGBE_HI_COMMAND_TIMEOUT, TRUE);
1516
1517 /* Extract the register value from the response. */
1518 *data = IXGBE_BE32_TO_CPU(hic.rsp.read_data);
1519
1520 return status;
1521 }
1522
1523 /**
1524 * ixgbe_disable_mdd_X550
1525 * @hw: pointer to hardware structure
1526 *
1527 * Disable malicious driver detection
1528 **/
1529 void ixgbe_disable_mdd_X550(struct ixgbe_hw *hw)
1530 {
1531 u32 reg;
1532
1533 DEBUGFUNC("ixgbe_disable_mdd_X550");
1534
1535 /* Disable MDD for TX DMA and interrupt */
1536 reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1537 reg &= ~(IXGBE_DMATXCTL_MDP_EN | IXGBE_DMATXCTL_MBINTEN);
1538 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg);
1539
1540 /* Disable MDD for RX and interrupt */
1541 reg = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
1542 reg &= ~(IXGBE_RDRXCTL_MDP_EN | IXGBE_RDRXCTL_MBINTEN);
1543 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg);
1544 }
1545
1546 /**
1547 * ixgbe_enable_mdd_X550
1548 * @hw: pointer to hardware structure
1549 *
1550 * Enable malicious driver detection
1551 **/
1552 void ixgbe_enable_mdd_X550(struct ixgbe_hw *hw)
1553 {
1554 u32 reg;
1555
1556 DEBUGFUNC("ixgbe_enable_mdd_X550");
1557
1558 /* Enable MDD for TX DMA and interrupt */
1559 reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1560 reg |= (IXGBE_DMATXCTL_MDP_EN | IXGBE_DMATXCTL_MBINTEN);
1561 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg);
1562
1563 /* Enable MDD for RX and interrupt */
1564 reg = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
1565 reg |= (IXGBE_RDRXCTL_MDP_EN | IXGBE_RDRXCTL_MBINTEN);
1566 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg);
1567 }
1568
1569 /**
1570 * ixgbe_restore_mdd_vf_X550
1571 * @hw: pointer to hardware structure
1572 * @vf: vf index
1573 *
1574 * Restore VF that was disabled during malicious driver detection event
1575 **/
1576 void ixgbe_restore_mdd_vf_X550(struct ixgbe_hw *hw, u32 vf)
1577 {
1578 u32 idx, reg, num_qs, start_q, bitmask;
1579
1580 DEBUGFUNC("ixgbe_restore_mdd_vf_X550");
1581
1582 /* Map VF to queues */
1583 reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
1584 switch (reg & IXGBE_MRQC_MRQE_MASK) {
1585 case IXGBE_MRQC_VMDQRT8TCEN:
1586 num_qs = 8; /* 16 VFs / pools */
1587 bitmask = 0x000000FF;
1588 break;
1589 case IXGBE_MRQC_VMDQRSS32EN:
1590 case IXGBE_MRQC_VMDQRT4TCEN:
1591 num_qs = 4; /* 32 VFs / pools */
1592 bitmask = 0x0000000F;
1593 break;
1594 default: /* 64 VFs / pools */
1595 num_qs = 2;
1596 bitmask = 0x00000003;
1597 break;
1598 }
1599 start_q = vf * num_qs;
1600
1601 /* Release vf's queues by clearing WQBR_TX and WQBR_RX (RW1C) */
1602 idx = start_q / 32;
1603 reg = 0;
1604 reg |= (bitmask << (start_q % 32));
1605 IXGBE_WRITE_REG(hw, IXGBE_WQBR_TX(idx), reg);
1606 IXGBE_WRITE_REG(hw, IXGBE_WQBR_RX(idx), reg);
1607 }
1608
1609 /**
1610 * ixgbe_mdd_event_X550
1611 * @hw: pointer to hardware structure
1612 * @vf_bitmap: vf bitmap of malicious vfs
1613 *
1614 * Handle malicious driver detection event.
1615 **/
1616 void ixgbe_mdd_event_X550(struct ixgbe_hw *hw, u32 *vf_bitmap)
1617 {
1618 u32 wqbr;
1619 u32 i, j, reg, q, shift, vf, idx;
1620
1621 DEBUGFUNC("ixgbe_mdd_event_X550");
1622
1623 /* figure out pool size for mapping to vf's */
1624 reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
1625 switch (reg & IXGBE_MRQC_MRQE_MASK) {
1626 case IXGBE_MRQC_VMDQRT8TCEN:
1627 shift = 3; /* 16 VFs / pools */
1628 break;
1629 case IXGBE_MRQC_VMDQRSS32EN:
1630 case IXGBE_MRQC_VMDQRT4TCEN:
1631 shift = 2; /* 32 VFs / pools */
1632 break;
1633 default:
1634 shift = 1; /* 64 VFs / pools */
1635 break;
1636 }
1637
1638 /* Read WQBR_TX and WQBR_RX and check for malicious queues */
1639 for (i = 0; i < 4; i++) {
1640 wqbr = IXGBE_READ_REG(hw, IXGBE_WQBR_TX(i));
1641 wqbr |= IXGBE_READ_REG(hw, IXGBE_WQBR_RX(i));
1642
1643 if (!wqbr)
1644 continue;
1645
1646 /* Get malicious queue */
1647 for (j = 0; j < 32 && wqbr; j++) {
1648
1649 if (!(wqbr & (1 << j)))
1650 continue;
1651
1652 /* Get queue from bitmask */
1653 q = j + (i * 32);
1654
1655 /* Map queue to vf */
1656 vf = (q >> shift);
1657
1658 /* Set vf bit in vf_bitmap */
1659 idx = vf / 32;
1660 vf_bitmap[idx] |= (1 << (vf % 32));
1661 wqbr &= ~(1 << j);
1662 }
1663 }
1664 }
1665
1666 /**
1667 * ixgbe_get_media_type_X550em - Get media type
1668 * @hw: pointer to hardware structure
1669 *
1670 * Returns the media type (fiber, copper, backplane)
1671 */
1672 enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw)
1673 {
1674 enum ixgbe_media_type media_type;
1675
1676 DEBUGFUNC("ixgbe_get_media_type_X550em");
1677
1678 /* Detect if there is a copper PHY attached. */
1679 switch (hw->device_id) {
1680 case IXGBE_DEV_ID_X550EM_X_KR:
1681 case IXGBE_DEV_ID_X550EM_X_KX4:
1682 case IXGBE_DEV_ID_X550EM_X_XFI:
1683 case IXGBE_DEV_ID_X550EM_A_KR:
1684 case IXGBE_DEV_ID_X550EM_A_KR_L:
1685 media_type = ixgbe_media_type_backplane;
1686 break;
1687 case IXGBE_DEV_ID_X550EM_X_SFP:
1688 case IXGBE_DEV_ID_X550EM_A_SFP:
1689 case IXGBE_DEV_ID_X550EM_A_SFP_N:
1690 case IXGBE_DEV_ID_X550EM_A_QSFP:
1691 case IXGBE_DEV_ID_X550EM_A_QSFP_N:
1692 media_type = ixgbe_media_type_fiber;
1693 break;
1694 case IXGBE_DEV_ID_X550EM_X_1G_T:
1695 case IXGBE_DEV_ID_X550EM_X_10G_T:
1696 case IXGBE_DEV_ID_X550EM_A_10G_T:
1697 media_type = ixgbe_media_type_copper;
1698 break;
1699 case IXGBE_DEV_ID_X550EM_A_SGMII:
1700 case IXGBE_DEV_ID_X550EM_A_SGMII_L:
1701 media_type = ixgbe_media_type_backplane;
1702 hw->phy.type = ixgbe_phy_sgmii;
1703 break;
1704 case IXGBE_DEV_ID_X550EM_A_1G_T:
1705 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
1706 media_type = ixgbe_media_type_copper;
1707 break;
1708 default:
1709 media_type = ixgbe_media_type_unknown;
1710 break;
1711 }
1712 return media_type;
1713 }
1714
1715 /**
1716 * ixgbe_supported_sfp_modules_X550em - Check if SFP module type is supported
1717 * @hw: pointer to hardware structure
1718 * @linear: TRUE if SFP module is linear
1719 */
1720 static s32 ixgbe_supported_sfp_modules_X550em(struct ixgbe_hw *hw, bool *linear)
1721 {
1722 DEBUGFUNC("ixgbe_supported_sfp_modules_X550em");
1723
1724 switch (hw->phy.sfp_type) {
1725 case ixgbe_sfp_type_not_present:
1726 return IXGBE_ERR_SFP_NOT_PRESENT;
1727 case ixgbe_sfp_type_da_cu_core0:
1728 case ixgbe_sfp_type_da_cu_core1:
1729 *linear = TRUE;
1730 break;
1731 case ixgbe_sfp_type_srlr_core0:
1732 case ixgbe_sfp_type_srlr_core1:
1733 case ixgbe_sfp_type_da_act_lmt_core0:
1734 case ixgbe_sfp_type_da_act_lmt_core1:
1735 case ixgbe_sfp_type_1g_sx_core0:
1736 case ixgbe_sfp_type_1g_sx_core1:
1737 case ixgbe_sfp_type_1g_lx_core0:
1738 case ixgbe_sfp_type_1g_lx_core1:
1739 *linear = FALSE;
1740 break;
1741 case ixgbe_sfp_type_unknown:
1742 case ixgbe_sfp_type_1g_cu_core0:
1743 case ixgbe_sfp_type_1g_cu_core1:
1744 default:
1745 return IXGBE_ERR_SFP_NOT_SUPPORTED;
1746 }
1747
1748 return IXGBE_SUCCESS;
1749 }
1750
1751 /**
1752 * ixgbe_identify_sfp_module_X550em - Identifies SFP modules
1753 * @hw: pointer to hardware structure
1754 *
1755 * Searches for and identifies the SFP module and assigns appropriate PHY type.
1756 **/
1757 s32 ixgbe_identify_sfp_module_X550em(struct ixgbe_hw *hw)
1758 {
1759 s32 status;
1760 bool linear;
1761
1762 DEBUGFUNC("ixgbe_identify_sfp_module_X550em");
1763
1764 status = ixgbe_identify_module_generic(hw);
1765
1766 if (status != IXGBE_SUCCESS)
1767 return status;
1768
1769 /* Check if SFP module is supported */
1770 status = ixgbe_supported_sfp_modules_X550em(hw, &linear);
1771
1772 return status;
1773 }
1774
1775 /**
1776 * ixgbe_setup_sfp_modules_X550em - Setup MAC link ops
1777 * @hw: pointer to hardware structure
1778 */
1779 s32 ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw)
1780 {
1781 s32 status;
1782 bool linear;
1783
1784 DEBUGFUNC("ixgbe_setup_sfp_modules_X550em");
1785
1786 /* Check if SFP module is supported */
1787 status = ixgbe_supported_sfp_modules_X550em(hw, &linear);
1788
1789 if (status != IXGBE_SUCCESS)
1790 return status;
1791
1792 ixgbe_init_mac_link_ops_X550em(hw);
1793 hw->phy.ops.reset = NULL;
1794
1795 return IXGBE_SUCCESS;
1796 }
1797
1798 /**
1799 * ixgbe_restart_an_internal_phy_x550em - restart autonegotiation for the
1800 * internal PHY
1801 * @hw: pointer to hardware structure
1802 **/
1803 static s32 ixgbe_restart_an_internal_phy_x550em(struct ixgbe_hw *hw)
1804 {
1805 s32 status;
1806 u32 link_ctrl;
1807
1808 /* Restart auto-negotiation. */
1809 status = hw->mac.ops.read_iosf_sb_reg(hw,
1810 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1811 IXGBE_SB_IOSF_TARGET_KR_PHY, &link_ctrl);
1812
1813 if (status) {
1814 DEBUGOUT("Auto-negotiation did not complete\n");
1815 return status;
1816 }
1817
1818 link_ctrl |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART;
1819 status = hw->mac.ops.write_iosf_sb_reg(hw,
1820 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1821 IXGBE_SB_IOSF_TARGET_KR_PHY, link_ctrl);
1822
1823 if (hw->mac.type == ixgbe_mac_X550EM_a) {
1824 u32 flx_mask_st20;
1825
1826 /* Indicate to FW that AN restart has been asserted */
1827 status = hw->mac.ops.read_iosf_sb_reg(hw,
1828 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1829 IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_mask_st20);
1830
1831 if (status) {
1832 DEBUGOUT("Auto-negotiation did not complete\n");
1833 return status;
1834 }
1835
1836 flx_mask_st20 |= IXGBE_KRM_PMD_FLX_MASK_ST20_FW_AN_RESTART;
1837 status = hw->mac.ops.write_iosf_sb_reg(hw,
1838 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1839 IXGBE_SB_IOSF_TARGET_KR_PHY, flx_mask_st20);
1840 }
1841
1842 return status;
1843 }
1844
1845 /**
1846 * ixgbe_setup_sgmii - Set up link for sgmii
1847 * @hw: pointer to hardware structure
1848 * @speed: new link speed
1849 * @autoneg_wait: TRUE when waiting for completion is needed
1850 */
1851 static s32 ixgbe_setup_sgmii(struct ixgbe_hw *hw, ixgbe_link_speed speed,
1852 bool autoneg_wait)
1853 {
1854 struct ixgbe_mac_info *mac = &hw->mac;
1855 u32 lval, sval, flx_val;
1856 s32 rc;
1857
1858 rc = mac->ops.read_iosf_sb_reg(hw,
1859 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1860 IXGBE_SB_IOSF_TARGET_KR_PHY, &lval);
1861 if (rc)
1862 return rc;
1863
1864 lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
1865 lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
1866 lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN;
1867 lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN;
1868 lval |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G;
1869 rc = mac->ops.write_iosf_sb_reg(hw,
1870 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1871 IXGBE_SB_IOSF_TARGET_KR_PHY, lval);
1872 if (rc)
1873 return rc;
1874
1875 rc = mac->ops.read_iosf_sb_reg(hw,
1876 IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id),
1877 IXGBE_SB_IOSF_TARGET_KR_PHY, &sval);
1878 if (rc)
1879 return rc;
1880
1881 sval |= IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D;
1882 sval |= IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D;
1883 rc = mac->ops.write_iosf_sb_reg(hw,
1884 IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id),
1885 IXGBE_SB_IOSF_TARGET_KR_PHY, sval);
1886 if (rc)
1887 return rc;
1888
1889 rc = mac->ops.read_iosf_sb_reg(hw,
1890 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1891 IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_val);
1892 if (rc)
1893 return rc;
1894
1895 flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK;
1896 flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_1G;
1897 flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN;
1898 flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN;
1899 flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN;
1900
1901 rc = mac->ops.write_iosf_sb_reg(hw,
1902 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1903 IXGBE_SB_IOSF_TARGET_KR_PHY, flx_val);
1904 if (rc)
1905 return rc;
1906
1907 rc = ixgbe_restart_an_internal_phy_x550em(hw);
1908 if (rc)
1909 return rc;
1910
1911 return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait);
1912 }
1913
1914 /**
1915 * ixgbe_setup_sgmii_fw - Set up link for internal PHY SGMII auto-negotiation
1916 * @hw: pointer to hardware structure
1917 * @speed: new link speed
1918 * @autoneg_wait: TRUE when waiting for completion is needed
1919 */
1920 static s32 ixgbe_setup_sgmii_fw(struct ixgbe_hw *hw, ixgbe_link_speed speed,
1921 bool autoneg_wait)
1922 {
1923 struct ixgbe_mac_info *mac = &hw->mac;
1924 u32 lval, sval, flx_val;
1925 s32 rc;
1926
1927 rc = mac->ops.read_iosf_sb_reg(hw,
1928 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1929 IXGBE_SB_IOSF_TARGET_KR_PHY, &lval);
1930 if (rc)
1931 return rc;
1932
1933 lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
1934 lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
1935 lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN;
1936 lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN;
1937 lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G;
1938 rc = mac->ops.write_iosf_sb_reg(hw,
1939 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1940 IXGBE_SB_IOSF_TARGET_KR_PHY, lval);
1941 if (rc)
1942 return rc;
1943
1944 rc = mac->ops.read_iosf_sb_reg(hw,
1945 IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id),
1946 IXGBE_SB_IOSF_TARGET_KR_PHY, &sval);
1947 if (rc)
1948 return rc;
1949
1950 sval &= ~IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D;
1951 sval &= ~IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D;
1952 rc = mac->ops.write_iosf_sb_reg(hw,
1953 IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id),
1954 IXGBE_SB_IOSF_TARGET_KR_PHY, sval);
1955 if (rc)
1956 return rc;
1957
1958 rc = mac->ops.write_iosf_sb_reg(hw,
1959 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1960 IXGBE_SB_IOSF_TARGET_KR_PHY, lval);
1961 if (rc)
1962 return rc;
1963
1964 rc = mac->ops.read_iosf_sb_reg(hw,
1965 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1966 IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_val);
1967 if (rc)
1968 return rc;
1969
1970 flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK;
1971 flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_AN;
1972 flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN;
1973 flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN;
1974 flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN;
1975
1976 rc = mac->ops.write_iosf_sb_reg(hw,
1977 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1978 IXGBE_SB_IOSF_TARGET_KR_PHY, flx_val);
1979 if (rc)
1980 return rc;
1981
1982 rc = ixgbe_restart_an_internal_phy_x550em(hw);
1983
1984 return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait);
1985 }
1986
1987 /**
1988 * ixgbe_init_mac_link_ops_X550em - init mac link function pointers
1989 * @hw: pointer to hardware structure
1990 */
1991 void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw)
1992 {
1993 struct ixgbe_mac_info *mac = &hw->mac;
1994
1995 DEBUGFUNC("ixgbe_init_mac_link_ops_X550em");
1996
1997 switch (hw->mac.ops.get_media_type(hw)) {
1998 case ixgbe_media_type_fiber:
1999 /* CS4227 does not support autoneg, so disable the laser control
2000 * functions for SFP+ fiber
2001 */
2002 mac->ops.disable_tx_laser = NULL;
2003 mac->ops.enable_tx_laser = NULL;
2004 mac->ops.flap_tx_laser = NULL;
2005 mac->ops.setup_link = ixgbe_setup_mac_link_multispeed_fiber;
2006 mac->ops.set_rate_select_speed =
2007 ixgbe_set_soft_rate_select_speed;
2008
2009 if ((hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N) ||
2010 (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP))
2011 mac->ops.setup_mac_link =
2012 ixgbe_setup_mac_link_sfp_x550a;
2013 else
2014 mac->ops.setup_mac_link =
2015 ixgbe_setup_mac_link_sfp_x550em;
2016 break;
2017 case ixgbe_media_type_copper:
2018 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_1G_T)
2019 break;
2020 if (hw->mac.type == ixgbe_mac_X550EM_a) {
2021 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T ||
2022 hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L) {
2023 mac->ops.setup_link = ixgbe_setup_sgmii_fw;
2024 mac->ops.check_link =
2025 ixgbe_check_mac_link_generic;
2026 } else {
2027 mac->ops.setup_link =
2028 ixgbe_setup_mac_link_t_X550em;
2029 }
2030 } else {
2031 mac->ops.setup_link = ixgbe_setup_mac_link_t_X550em;
2032 mac->ops.check_link = ixgbe_check_link_t_X550em;
2033 }
2034 break;
2035 case ixgbe_media_type_backplane:
2036 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII ||
2037 hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII_L)
2038 mac->ops.setup_link = ixgbe_setup_sgmii;
2039 break;
2040 default:
2041 break;
2042 }
2043 }
2044
2045 /**
2046 * ixgbe_get_link_capabilities_x550em - Determines link capabilities
2047 * @hw: pointer to hardware structure
2048 * @speed: pointer to link speed
2049 * @autoneg: TRUE when autoneg or autotry is enabled
2050 */
2051 s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
2052 ixgbe_link_speed *speed,
2053 bool *autoneg)
2054 {
2055 DEBUGFUNC("ixgbe_get_link_capabilities_X550em");
2056
2057
2058 if (hw->phy.type == ixgbe_phy_fw) {
2059 *autoneg = TRUE;
2060 *speed = hw->phy.speeds_supported;
2061 return 0;
2062 }
2063
2064 /* SFP */
2065 if (hw->phy.media_type == ixgbe_media_type_fiber) {
2066
2067 /* CS4227 SFP must not enable auto-negotiation */
2068 *autoneg = FALSE;
2069
2070 /* Check if 1G SFP module. */
2071 if (hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
2072 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1
2073 || hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
2074 hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1) {
2075 *speed = IXGBE_LINK_SPEED_1GB_FULL;
2076 return IXGBE_SUCCESS;
2077 }
2078
2079 /* Link capabilities are based on SFP */
2080 if (hw->phy.multispeed_fiber)
2081 *speed = IXGBE_LINK_SPEED_10GB_FULL |
2082 IXGBE_LINK_SPEED_1GB_FULL;
2083 else
2084 *speed = IXGBE_LINK_SPEED_10GB_FULL;
2085 } else {
2086 switch (hw->phy.type) {
2087 case ixgbe_phy_ext_1g_t:
2088 case ixgbe_phy_sgmii:
2089 *speed = IXGBE_LINK_SPEED_1GB_FULL;
2090 break;
2091 case ixgbe_phy_x550em_kr:
2092 if (hw->mac.type == ixgbe_mac_X550EM_a) {
2093 /* check different backplane modes */
2094 if (hw->phy.nw_mng_if_sel &
2095 IXGBE_NW_MNG_IF_SEL_PHY_SPEED_2_5G) {
2096 *speed = IXGBE_LINK_SPEED_2_5GB_FULL;
2097 break;
2098 } else if (hw->device_id ==
2099 IXGBE_DEV_ID_X550EM_A_KR_L) {
2100 *speed = IXGBE_LINK_SPEED_1GB_FULL;
2101 break;
2102 }
2103 }
2104 /* fall through */
2105 default:
2106 *speed = IXGBE_LINK_SPEED_10GB_FULL |
2107 IXGBE_LINK_SPEED_1GB_FULL;
2108 break;
2109 }
2110 *autoneg = TRUE;
2111 }
2112
2113 return IXGBE_SUCCESS;
2114 }
2115
2116 /**
2117 * ixgbe_get_lasi_ext_t_x550em - Determime external Base T PHY interrupt cause
2118 * @hw: pointer to hardware structure
2119 * @lsc: pointer to boolean flag which indicates whether external Base T
2120 * PHY interrupt is lsc
2121 *
2122 * Determime if external Base T PHY interrupt cause is high temperature
2123 * failure alarm or link status change.
2124 *
2125 * Return IXGBE_ERR_OVERTEMP if interrupt is high temperature
2126 * failure alarm, else return PHY access status.
2127 */
2128 static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc)
2129 {
2130 u32 status;
2131 u16 reg;
2132
2133 *lsc = FALSE;
2134
2135 /* Vendor alarm triggered */
2136 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_CHIP_STD_INT_FLAG,
2137 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2138 ®);
2139
2140 if (status != IXGBE_SUCCESS ||
2141 !(reg & IXGBE_MDIO_GLOBAL_VEN_ALM_INT_EN))
2142 return status;
2143
2144 /* Vendor Auto-Neg alarm triggered or Global alarm 1 triggered */
2145 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_FLAG,
2146 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2147 ®);
2148
2149 if (status != IXGBE_SUCCESS ||
2150 !(reg & (IXGBE_MDIO_GLOBAL_AN_VEN_ALM_INT_EN |
2151 IXGBE_MDIO_GLOBAL_ALARM_1_INT)))
2152 return status;
2153
2154 /* Global alarm triggered */
2155 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_ALARM_1,
2156 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2157 ®);
2158
2159 if (status != IXGBE_SUCCESS)
2160 return status;
2161
2162 /* If high temperature failure, then return over temp error and exit */
2163 if (reg & IXGBE_MDIO_GLOBAL_ALM_1_HI_TMP_FAIL) {
2164 /* power down the PHY in case the PHY FW didn't already */
2165 ixgbe_set_copper_phy_power(hw, FALSE);
2166 return IXGBE_ERR_OVERTEMP;
2167 } else if (reg & IXGBE_MDIO_GLOBAL_ALM_1_DEV_FAULT) {
2168 /* device fault alarm triggered */
2169 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_FAULT_MSG,
2170 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2171 ®);
2172
2173 if (status != IXGBE_SUCCESS)
2174 return status;
2175
2176 /* if device fault was due to high temp alarm handle and exit */
2177 if (reg == IXGBE_MDIO_GLOBAL_FAULT_MSG_HI_TMP) {
2178 /* power down the PHY in case the PHY FW didn't */
2179 ixgbe_set_copper_phy_power(hw, FALSE);
2180 return IXGBE_ERR_OVERTEMP;
2181 }
2182 }
2183
2184 /* Vendor alarm 2 triggered */
2185 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_CHIP_STD_INT_FLAG,
2186 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®);
2187
2188 if (status != IXGBE_SUCCESS ||
2189 !(reg & IXGBE_MDIO_GLOBAL_STD_ALM2_INT))
2190 return status;
2191
2192 /* link connect/disconnect event occurred */
2193 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM2,
2194 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®);
2195
2196 if (status != IXGBE_SUCCESS)
2197 return status;
2198
2199 /* Indicate LSC */
2200 if (reg & IXGBE_MDIO_AUTO_NEG_VEN_LSC)
2201 *lsc = TRUE;
2202
2203 return IXGBE_SUCCESS;
2204 }
2205
2206 /**
2207 * ixgbe_enable_lasi_ext_t_x550em - Enable external Base T PHY interrupts
2208 * @hw: pointer to hardware structure
2209 *
2210 * Enable link status change and temperature failure alarm for the external
2211 * Base T PHY
2212 *
2213 * Returns PHY access status
2214 */
2215 static s32 ixgbe_enable_lasi_ext_t_x550em(struct ixgbe_hw *hw)
2216 {
2217 u32 status;
2218 u16 reg;
2219 bool lsc;
2220
2221 /* Clear interrupt flags */
2222 status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc);
2223
2224 /* Enable link status change alarm */
2225
2226 /* Enable the LASI interrupts on X552 devices to receive notifications
2227 * of the link configurations of the external PHY and correspondingly
2228 * support the configuration of the internal iXFI link, since iXFI does
2229 * not support auto-negotiation. This is not required for X553 devices
2230 * having KR support, which performs auto-negotiations and which is used
2231 * as the internal link to the external PHY. Hence adding a check here
2232 * to avoid enabling LASI interrupts for X553 devices.
2233 */
2234 if (hw->mac.type != ixgbe_mac_X550EM_a) {
2235 status = hw->phy.ops.read_reg(hw,
2236 IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK,
2237 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®);
2238
2239 if (status != IXGBE_SUCCESS)
2240 return status;
2241
2242 reg |= IXGBE_MDIO_PMA_TX_VEN_LASI_INT_EN;
2243
2244 status = hw->phy.ops.write_reg(hw,
2245 IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK,
2246 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg);
2247
2248 if (status != IXGBE_SUCCESS)
2249 return status;
2250 }
2251
2252 /* Enable high temperature failure and global fault alarms */
2253 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_MASK,
2254 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2255 ®);
2256
2257 if (status != IXGBE_SUCCESS)
2258 return status;
2259
2260 reg |= (IXGBE_MDIO_GLOBAL_INT_HI_TEMP_EN |
2261 IXGBE_MDIO_GLOBAL_INT_DEV_FAULT_EN);
2262
2263 status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_MASK,
2264 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2265 reg);
2266
2267 if (status != IXGBE_SUCCESS)
2268 return status;
2269
2270 /* Enable vendor Auto-Neg alarm and Global Interrupt Mask 1 alarm */
2271 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_MASK,
2272 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2273 ®);
2274
2275 if (status != IXGBE_SUCCESS)
2276 return status;
2277
2278 reg |= (IXGBE_MDIO_GLOBAL_AN_VEN_ALM_INT_EN |
2279 IXGBE_MDIO_GLOBAL_ALARM_1_INT);
2280
2281 status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_MASK,
2282 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2283 reg);
2284
2285 if (status != IXGBE_SUCCESS)
2286 return status;
2287
2288 /* Enable chip-wide vendor alarm */
2289 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_STD_MASK,
2290 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2291 ®);
2292
2293 if (status != IXGBE_SUCCESS)
2294 return status;
2295
2296 reg |= IXGBE_MDIO_GLOBAL_VEN_ALM_INT_EN;
2297
2298 status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_STD_MASK,
2299 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2300 reg);
2301
2302 return status;
2303 }
2304
2305 /**
2306 * ixgbe_setup_kr_speed_x550em - Configure the KR PHY for link speed.
2307 * @hw: pointer to hardware structure
2308 * @speed: link speed
2309 *
2310 * Configures the integrated KR PHY.
2311 **/
2312 static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw,
2313 ixgbe_link_speed speed)
2314 {
2315 s32 status;
2316 u32 reg_val;
2317
2318 status = hw->mac.ops.read_iosf_sb_reg(hw,
2319 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
2320 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
2321 if (status)
2322 return status;
2323
2324 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
2325 reg_val &= ~(IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR |
2326 IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX);
2327
2328 /* Advertise 10G support. */
2329 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
2330 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR;
2331
2332 /* Advertise 1G support. */
2333 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
2334 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX;
2335
2336 status = hw->mac.ops.write_iosf_sb_reg(hw,
2337 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
2338 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
2339
2340 if (hw->mac.type == ixgbe_mac_X550EM_a) {
2341 /* Set lane mode to KR auto negotiation */
2342 status = hw->mac.ops.read_iosf_sb_reg(hw,
2343 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
2344 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
2345
2346 if (status)
2347 return status;
2348
2349 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK;
2350 reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_AN;
2351 reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN;
2352 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN;
2353 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN;
2354
2355 status = hw->mac.ops.write_iosf_sb_reg(hw,
2356 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
2357 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
2358 }
2359
2360 return ixgbe_restart_an_internal_phy_x550em(hw);
2361 }
2362
2363 /**
2364 * ixgbe_reset_phy_fw - Reset firmware-controlled PHYs
2365 * @hw: pointer to hardware structure
2366 */
2367 static s32 ixgbe_reset_phy_fw(struct ixgbe_hw *hw)
2368 {
2369 u32 store[FW_PHY_ACT_DATA_COUNT] = { 0 };
2370 s32 rc;
2371
2372 if (hw->phy.reset_disable || ixgbe_check_reset_blocked(hw))
2373 return IXGBE_SUCCESS;
2374
2375 rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_PHY_SW_RESET, &store);
2376 if (rc)
2377 return rc;
2378 memset(store, 0, sizeof(store));
2379
2380 rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_INIT_PHY, &store);
2381 if (rc)
2382 return rc;
2383
2384 return ixgbe_setup_fw_link(hw);
2385 }
2386
2387 /**
2388 * ixgbe_check_overtemp_fw - Check firmware-controlled PHYs for overtemp
2389 * @hw: pointer to hardware structure
2390 */
2391 static s32 ixgbe_check_overtemp_fw(struct ixgbe_hw *hw)
2392 {
2393 u32 store[FW_PHY_ACT_DATA_COUNT] = { 0 };
2394 s32 rc;
2395
2396 rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_LINK_INFO, &store);
2397 if (rc)
2398 return rc;
2399
2400 if (store[0] & FW_PHY_ACT_GET_LINK_INFO_TEMP) {
2401 ixgbe_shutdown_fw_phy(hw);
2402 return IXGBE_ERR_OVERTEMP;
2403 }
2404 return IXGBE_SUCCESS;
2405 }
2406
2407 /**
2408 * ixgbe_read_mng_if_sel_x550em - Read NW_MNG_IF_SEL register
2409 * @hw: pointer to hardware structure
2410 *
2411 * Read NW_MNG_IF_SEL register and save field values, and check for valid field
2412 * values.
2413 **/
2414 static s32 ixgbe_read_mng_if_sel_x550em(struct ixgbe_hw *hw)
2415 {
2416 /* Save NW management interface connected on board. This is used
2417 * to determine internal PHY mode.
2418 */
2419 hw->phy.nw_mng_if_sel = IXGBE_READ_REG(hw, IXGBE_NW_MNG_IF_SEL);
2420
2421 /* If X552 (X550EM_a) and MDIO is connected to external PHY, then set
2422 * PHY address. This register field was has only been used for X552.
2423 */
2424 if (hw->mac.type == ixgbe_mac_X550EM_a &&
2425 hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_MDIO_ACT) {
2426 hw->phy.addr = (hw->phy.nw_mng_if_sel &
2427 IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD) >>
2428 IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT;
2429 }
2430
2431 return IXGBE_SUCCESS;
2432 }
2433
2434 /**
2435 * ixgbe_init_phy_ops_X550em - PHY/SFP specific init
2436 * @hw: pointer to hardware structure
2437 *
2438 * Initialize any function pointers that were not able to be
2439 * set during init_shared_code because the PHY/SFP type was
2440 * not known. Perform the SFP init if necessary.
2441 */
2442 s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
2443 {
2444 struct ixgbe_phy_info *phy = &hw->phy;
2445 s32 ret_val;
2446
2447 DEBUGFUNC("ixgbe_init_phy_ops_X550em");
2448
2449 hw->mac.ops.set_lan_id(hw);
2450 ixgbe_read_mng_if_sel_x550em(hw);
2451
2452 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) {
2453 phy->phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM;
2454 ixgbe_setup_mux_ctl(hw);
2455 phy->ops.identify_sfp = ixgbe_identify_sfp_module_X550em;
2456 }
2457
2458 switch (hw->device_id) {
2459 case IXGBE_DEV_ID_X550EM_A_1G_T:
2460 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
2461 phy->ops.read_reg_mdi = ixgbe_read_phy_reg_mdi_22;
2462 phy->ops.write_reg_mdi = ixgbe_write_phy_reg_mdi_22;
2463 hw->phy.ops.read_reg = ixgbe_read_phy_reg_x550a;
2464 hw->phy.ops.write_reg = ixgbe_write_phy_reg_x550a;
2465 phy->ops.check_overtemp = ixgbe_check_overtemp_fw;
2466 if (hw->bus.lan_id)
2467 hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY1_SM;
2468 else
2469 hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY0_SM;
2470
2471 break;
2472 case IXGBE_DEV_ID_X550EM_A_10G_T:
2473 case IXGBE_DEV_ID_X550EM_A_SFP:
2474 hw->phy.ops.read_reg = ixgbe_read_phy_reg_x550a;
2475 hw->phy.ops.write_reg = ixgbe_write_phy_reg_x550a;
2476 if (hw->bus.lan_id)
2477 hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY1_SM;
2478 else
2479 hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY0_SM;
2480 break;
2481 case IXGBE_DEV_ID_X550EM_X_SFP:
2482 /* set up for CS4227 usage */
2483 hw->phy.phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM;
2484 break;
2485 case IXGBE_DEV_ID_X550EM_X_1G_T:
2486 phy->ops.read_reg_mdi = NULL;
2487 phy->ops.write_reg_mdi = NULL;
2488 break;
2489 default:
2490 break;
2491 }
2492
2493 /* Identify the PHY or SFP module */
2494 ret_val = phy->ops.identify(hw);
2495 if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED ||
2496 ret_val == IXGBE_ERR_PHY_ADDR_INVALID)
2497 return ret_val;
2498
2499 /* Setup function pointers based on detected hardware */
2500 ixgbe_init_mac_link_ops_X550em(hw);
2501 if (phy->sfp_type != ixgbe_sfp_type_unknown)
2502 phy->ops.reset = NULL;
2503
2504 /* Set functions pointers based on phy type */
2505 switch (hw->phy.type) {
2506 case ixgbe_phy_x550em_kx4:
2507 phy->ops.setup_link = NULL;
2508 phy->ops.read_reg = ixgbe_read_phy_reg_x550em;
2509 phy->ops.write_reg = ixgbe_write_phy_reg_x550em;
2510 break;
2511 case ixgbe_phy_x550em_kr:
2512 phy->ops.setup_link = ixgbe_setup_kr_x550em;
2513 phy->ops.read_reg = ixgbe_read_phy_reg_x550em;
2514 phy->ops.write_reg = ixgbe_write_phy_reg_x550em;
2515 break;
2516 case ixgbe_phy_ext_1g_t:
2517 /* link is managed by FW */
2518 phy->ops.setup_link = NULL;
2519 phy->ops.reset = NULL;
2520 break;
2521 case ixgbe_phy_x550em_xfi:
2522 /* link is managed by HW */
2523 phy->ops.setup_link = NULL;
2524 phy->ops.read_reg = ixgbe_read_phy_reg_x550em;
2525 phy->ops.write_reg = ixgbe_write_phy_reg_x550em;
2526 break;
2527 case ixgbe_phy_x550em_ext_t:
2528 /* If internal link mode is XFI, then setup iXFI internal link,
2529 * else setup KR now.
2530 */
2531 phy->ops.setup_internal_link =
2532 ixgbe_setup_internal_phy_t_x550em;
2533
2534 /* setup SW LPLU only for first revision of X550EM_x */
2535 if ((hw->mac.type == ixgbe_mac_X550EM_x) &&
2536 !(IXGBE_FUSES0_REV_MASK &
2537 IXGBE_READ_REG(hw, IXGBE_FUSES0_GROUP(0))))
2538 phy->ops.enter_lplu = ixgbe_enter_lplu_t_x550em;
2539
2540 phy->ops.handle_lasi = ixgbe_handle_lasi_ext_t_x550em;
2541 phy->ops.reset = ixgbe_reset_phy_t_X550em;
2542 break;
2543 case ixgbe_phy_sgmii:
2544 phy->ops.setup_link = NULL;
2545 break;
2546 case ixgbe_phy_fw:
2547 phy->ops.setup_link = ixgbe_setup_fw_link;
2548 phy->ops.reset = ixgbe_reset_phy_fw;
2549 break;
2550 default:
2551 break;
2552 }
2553 return ret_val;
2554 }
2555
2556 /**
2557 * ixgbe_set_mdio_speed - Set MDIO clock speed
2558 * @hw: pointer to hardware structure
2559 */
2560 static void ixgbe_set_mdio_speed(struct ixgbe_hw *hw)
2561 {
2562 u32 hlreg0;
2563
2564 switch (hw->device_id) {
2565 case IXGBE_DEV_ID_X550EM_X_10G_T:
2566 case IXGBE_DEV_ID_X550EM_A_SGMII:
2567 case IXGBE_DEV_ID_X550EM_A_SGMII_L:
2568 case IXGBE_DEV_ID_X550EM_A_10G_T:
2569 case IXGBE_DEV_ID_X550EM_A_SFP:
2570 case IXGBE_DEV_ID_X550EM_A_QSFP:
2571 /* Config MDIO clock speed before the first MDIO PHY access */
2572 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2573 hlreg0 &= ~IXGBE_HLREG0_MDCSPD;
2574 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
2575 break;
2576 case IXGBE_DEV_ID_X550EM_A_1G_T:
2577 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
2578 /* Select fast MDIO clock speed for these devices */
2579 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2580 hlreg0 |= IXGBE_HLREG0_MDCSPD;
2581 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
2582 break;
2583 default:
2584 break;
2585 }
2586 }
2587
2588 /**
2589 * ixgbe_reset_hw_X550em - Perform hardware reset
2590 * @hw: pointer to hardware structure
2591 *
2592 * Resets the hardware by resetting the transmit and receive units, masks
2593 * and clears all interrupts, perform a PHY reset, and perform a link (MAC)
2594 * reset.
2595 */
2596 s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw)
2597 {
2598 ixgbe_link_speed link_speed;
2599 s32 status;
2600 u32 ctrl = 0;
2601 u32 i;
2602 bool link_up = FALSE;
2603 u32 swfw_mask = hw->phy.phy_semaphore_mask;
2604
2605 DEBUGFUNC("ixgbe_reset_hw_X550em");
2606
2607 /* Call adapter stop to disable Tx/Rx and clear interrupts */
2608 status = hw->mac.ops.stop_adapter(hw);
2609 if (status != IXGBE_SUCCESS) {
2610 DEBUGOUT1("Failed to stop adapter, STATUS = %d\n", status);
2611 return status;
2612 }
2613 /* flush pending Tx transactions */
2614 ixgbe_clear_tx_pending(hw);
2615
2616 ixgbe_set_mdio_speed(hw);
2617
2618 /* PHY ops must be identified and initialized prior to reset */
2619 status = hw->phy.ops.init(hw);
2620
2621 if (status)
2622 DEBUGOUT1("Failed to initialize PHY ops, STATUS = %d\n",
2623 status);
2624
2625 if (status == IXGBE_ERR_SFP_NOT_SUPPORTED ||
2626 status == IXGBE_ERR_PHY_ADDR_INVALID) {
2627 DEBUGOUT("Returning from reset HW due to PHY init failure\n");
2628 return status;
2629 }
2630
2631 /* start the external PHY */
2632 if (hw->phy.type == ixgbe_phy_x550em_ext_t) {
2633 status = ixgbe_init_ext_t_x550em(hw);
2634 if (status) {
2635 DEBUGOUT1("Failed to start the external PHY, STATUS = %d\n",
2636 status);
2637 return status;
2638 }
2639 }
2640
2641 /* Setup SFP module if there is one present. */
2642 if (hw->phy.sfp_setup_needed) {
2643 status = hw->mac.ops.setup_sfp(hw);
2644 hw->phy.sfp_setup_needed = FALSE;
2645 }
2646
2647 if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
2648 return status;
2649
2650 /* Reset PHY */
2651 if (!hw->phy.reset_disable && hw->phy.ops.reset) {
2652 if (hw->phy.ops.reset(hw) == IXGBE_ERR_OVERTEMP)
2653 return IXGBE_ERR_OVERTEMP;
2654 }
2655
2656 mac_reset_top:
2657 /* Issue global reset to the MAC. Needs to be SW reset if link is up.
2658 * If link reset is used when link is up, it might reset the PHY when
2659 * mng is using it. If link is down or the flag to force full link
2660 * reset is set, then perform link reset.
2661 */
2662 ctrl = IXGBE_CTRL_LNK_RST;
2663 if (!hw->force_full_reset) {
2664 hw->mac.ops.check_link(hw, &link_speed, &link_up, FALSE);
2665 if (link_up)
2666 ctrl = IXGBE_CTRL_RST;
2667 }
2668
2669 status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
2670 if (status != IXGBE_SUCCESS) {
2671 ERROR_REPORT2(IXGBE_ERROR_CAUTION,
2672 "semaphore failed with %d", status);
2673 return IXGBE_ERR_SWFW_SYNC;
2674 }
2675 ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
2676 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
2677 IXGBE_WRITE_FLUSH(hw);
2678 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
2679
2680 /* Poll for reset bit to self-clear meaning reset is complete */
2681 for (i = 0; i < 10; i++) {
2682 usec_delay(1);
2683 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
2684 if (!(ctrl & IXGBE_CTRL_RST_MASK))
2685 break;
2686 }
2687
2688 if (ctrl & IXGBE_CTRL_RST_MASK) {
2689 status = IXGBE_ERR_RESET_FAILED;
2690 DEBUGOUT("Reset polling failed to complete.\n");
2691 }
2692
2693 msec_delay(50);
2694
2695 /* Double resets are required for recovery from certain error
2696 * conditions. Between resets, it is necessary to stall to
2697 * allow time for any pending HW events to complete.
2698 */
2699 if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
2700 hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
2701 goto mac_reset_top;
2702 }
2703
2704 /* Store the permanent mac address */
2705 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
2706
2707 /* Store MAC address from RAR0, clear receive address registers, and
2708 * clear the multicast table. Also reset num_rar_entries to 128,
2709 * since we modify this value when programming the SAN MAC address.
2710 */
2711 hw->mac.num_rar_entries = 128;
2712 hw->mac.ops.init_rx_addrs(hw);
2713
2714 ixgbe_set_mdio_speed(hw);
2715
2716 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP)
2717 ixgbe_setup_mux_ctl(hw);
2718
2719 if (status != IXGBE_SUCCESS)
2720 DEBUGOUT1("Reset HW failed, STATUS = %d\n", status);
2721
2722 return status;
2723 }
2724
2725 /**
2726 * ixgbe_init_ext_t_x550em - Start (unstall) the external Base T PHY.
2727 * @hw: pointer to hardware structure
2728 */
2729 s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw)
2730 {
2731 u32 status;
2732 u16 reg;
2733
2734 status = hw->phy.ops.read_reg(hw,
2735 IXGBE_MDIO_TX_VENDOR_ALARMS_3,
2736 IXGBE_MDIO_PMA_PMD_DEV_TYPE,
2737 ®);
2738
2739 if (status != IXGBE_SUCCESS)
2740 return status;
2741
2742 /* If PHY FW reset completed bit is set then this is the first
2743 * SW instance after a power on so the PHY FW must be un-stalled.
2744 */
2745 if (reg & IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK) {
2746 status = hw->phy.ops.read_reg(hw,
2747 IXGBE_MDIO_GLOBAL_RES_PR_10,
2748 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2749 ®);
2750
2751 if (status != IXGBE_SUCCESS)
2752 return status;
2753
2754 reg &= ~IXGBE_MDIO_POWER_UP_STALL;
2755
2756 status = hw->phy.ops.write_reg(hw,
2757 IXGBE_MDIO_GLOBAL_RES_PR_10,
2758 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2759 reg);
2760
2761 if (status != IXGBE_SUCCESS)
2762 return status;
2763 }
2764
2765 return status;
2766 }
2767
2768 /**
2769 * ixgbe_setup_kr_x550em - Configure the KR PHY.
2770 * @hw: pointer to hardware structure
2771 **/
2772 s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw)
2773 {
2774 /* leave link alone for 2.5G */
2775 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_2_5GB_FULL)
2776 return IXGBE_SUCCESS;
2777
2778 if (ixgbe_check_reset_blocked(hw))
2779 return 0;
2780
2781 return ixgbe_setup_kr_speed_x550em(hw, hw->phy.autoneg_advertised);
2782 }
2783
2784 /**
2785 * ixgbe_setup_mac_link_sfp_x550em - Setup internal/external the PHY for SFP
2786 * @hw: pointer to hardware structure
2787 * @speed: new link speed
2788 * @autoneg_wait_to_complete: unused
2789 *
2790 * Configure the external PHY and the integrated KR PHY for SFP support.
2791 **/
2792 s32 ixgbe_setup_mac_link_sfp_x550em(struct ixgbe_hw *hw,
2793 ixgbe_link_speed speed,
2794 bool autoneg_wait_to_complete)
2795 {
2796 s32 ret_val;
2797 u16 reg_slice, reg_val;
2798 bool setup_linear = FALSE;
2799 UNREFERENCED_1PARAMETER(autoneg_wait_to_complete);
2800
2801 /* Check if SFP module is supported and linear */
2802 ret_val = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear);
2803
2804 /* If no SFP module present, then return success. Return success since
2805 * there is no reason to configure CS4227 and SFP not present error is
2806 * not excepted in the setup MAC link flow.
2807 */
2808 if (ret_val == IXGBE_ERR_SFP_NOT_PRESENT)
2809 return IXGBE_SUCCESS;
2810
2811 if (ret_val != IXGBE_SUCCESS)
2812 return ret_val;
2813
2814 /* Configure internal PHY for KR/KX. */
2815 ixgbe_setup_kr_speed_x550em(hw, speed);
2816
2817 /* Configure CS4227 LINE side to proper mode. */
2818 reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB +
2819 (hw->bus.lan_id << 12);
2820 if (setup_linear)
2821 reg_val = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1;
2822 else
2823 reg_val = (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1;
2824 ret_val = hw->link.ops.write_link(hw, hw->link.addr, reg_slice,
2825 reg_val);
2826 return ret_val;
2827 }
2828
2829 /**
2830 * ixgbe_setup_sfi_x550a - Configure the internal PHY for native SFI mode
2831 * @hw: pointer to hardware structure
2832 * @speed: the link speed to force
2833 *
2834 * Configures the integrated PHY for native SFI mode. Used to connect the
2835 * internal PHY directly to an SFP cage, without autonegotiation.
2836 **/
2837 static s32 ixgbe_setup_sfi_x550a(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
2838 {
2839 struct ixgbe_mac_info *mac = &hw->mac;
2840 s32 status;
2841 u32 reg_val;
2842
2843 /* Disable all AN and force speed to 10G Serial. */
2844 status = mac->ops.read_iosf_sb_reg(hw,
2845 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
2846 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
2847 if (status != IXGBE_SUCCESS)
2848 return status;
2849
2850 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN;
2851 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN;
2852 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN;
2853 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK;
2854
2855 /* Select forced link speed for internal PHY. */
2856 switch (*speed) {
2857 case IXGBE_LINK_SPEED_10GB_FULL:
2858 reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_10G;
2859 break;
2860 case IXGBE_LINK_SPEED_1GB_FULL:
2861 reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_1G;
2862 break;
2863 case 0:
2864 /* media none (linkdown) */
2865 break;
2866 default:
2867 /* Other link speeds are not supported by internal PHY. */
2868 return IXGBE_ERR_LINK_SETUP;
2869 }
2870
2871 status = mac->ops.write_iosf_sb_reg(hw,
2872 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
2873 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
2874
2875 /* Toggle port SW reset by AN reset. */
2876 status = ixgbe_restart_an_internal_phy_x550em(hw);
2877
2878 return status;
2879 }
2880
2881 /**
2882 * ixgbe_setup_mac_link_sfp_x550a - Setup internal PHY for SFP
2883 * @hw: pointer to hardware structure
2884 * @speed: new link speed
2885 * @autoneg_wait_to_complete: unused
2886 *
2887 * Configure the the integrated PHY for SFP support.
2888 **/
2889 static s32 ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw,
2890 ixgbe_link_speed speed,
2891 bool autoneg_wait_to_complete)
2892 {
2893 s32 ret_val;
2894 u16 reg_phy_ext;
2895 bool setup_linear = FALSE;
2896 u32 reg_slice, reg_phy_int, slice_offset;
2897
2898 UNREFERENCED_1PARAMETER(autoneg_wait_to_complete);
2899
2900 /* Check if SFP module is supported and linear */
2901 ret_val = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear);
2902
2903 /* If no SFP module present, then return success. Return success since
2904 * SFP not present error is not excepted in the setup MAC link flow.
2905 */
2906 if (ret_val == IXGBE_ERR_SFP_NOT_PRESENT)
2907 return IXGBE_SUCCESS;
2908
2909 if (ret_val != IXGBE_SUCCESS)
2910 return ret_val;
2911
2912 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N) {
2913 /* Configure internal PHY for native SFI based on module type */
2914 ret_val = hw->mac.ops.read_iosf_sb_reg(hw,
2915 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
2916 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_phy_int);
2917
2918 if (ret_val != IXGBE_SUCCESS)
2919 return ret_val;
2920
2921 reg_phy_int &= IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_DA;
2922 if (!setup_linear)
2923 reg_phy_int |= IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_SR;
2924
2925 ret_val = hw->mac.ops.write_iosf_sb_reg(hw,
2926 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
2927 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_phy_int);
2928
2929 if (ret_val != IXGBE_SUCCESS)
2930 return ret_val;
2931
2932 /* Setup SFI internal link. */
2933 ret_val = ixgbe_setup_sfi_x550a(hw, &speed);
2934 } else {
2935 /* Configure internal PHY for KR/KX. */
2936 ixgbe_setup_kr_speed_x550em(hw, speed);
2937
2938 if (hw->phy.addr == 0x0 || hw->phy.addr == 0xFFFF) {
2939 /* Find Address */
2940 DEBUGOUT("Invalid NW_MNG_IF_SEL.MDIO_PHY_ADD value\n");
2941 return IXGBE_ERR_PHY_ADDR_INVALID;
2942 }
2943
2944 /* Get external PHY SKU id */
2945 ret_val = hw->phy.ops.read_reg(hw, IXGBE_CS4227_EFUSE_PDF_SKU,
2946 IXGBE_MDIO_ZERO_DEV_TYPE, ®_phy_ext);
2947
2948 if (ret_val != IXGBE_SUCCESS)
2949 return ret_val;
2950
2951 /* When configuring quad port CS4223, the MAC instance is part
2952 * of the slice offset.
2953 */
2954 if (reg_phy_ext == IXGBE_CS4223_SKU_ID)
2955 slice_offset = (hw->bus.lan_id +
2956 (hw->bus.instance_id << 1)) << 12;
2957 else
2958 slice_offset = hw->bus.lan_id << 12;
2959
2960 /* Configure CS4227/CS4223 LINE side to proper mode. */
2961 reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB + slice_offset;
2962
2963 ret_val = hw->phy.ops.read_reg(hw, reg_slice,
2964 IXGBE_MDIO_ZERO_DEV_TYPE, ®_phy_ext);
2965
2966 if (ret_val != IXGBE_SUCCESS)
2967 return ret_val;
2968
2969 reg_phy_ext &= ~((IXGBE_CS4227_EDC_MODE_CX1 << 1) |
2970 (IXGBE_CS4227_EDC_MODE_SR << 1));
2971
2972 if (setup_linear)
2973 reg_phy_ext |= (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1;
2974 else
2975 reg_phy_ext |= (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1;
2976 ret_val = hw->phy.ops.write_reg(hw, reg_slice,
2977 IXGBE_MDIO_ZERO_DEV_TYPE, reg_phy_ext);
2978
2979 /* Flush previous write with a read */
2980 ret_val = hw->phy.ops.read_reg(hw, reg_slice,
2981 IXGBE_MDIO_ZERO_DEV_TYPE, ®_phy_ext);
2982 }
2983 return ret_val;
2984 }
2985
2986 /**
2987 * ixgbe_setup_ixfi_x550em_x - MAC specific iXFI configuration
2988 * @hw: pointer to hardware structure
2989 *
2990 * iXfI configuration needed for ixgbe_mac_X550EM_x devices.
2991 **/
2992 static s32 ixgbe_setup_ixfi_x550em_x(struct ixgbe_hw *hw)
2993 {
2994 struct ixgbe_mac_info *mac = &hw->mac;
2995 s32 status;
2996 u32 reg_val;
2997
2998 /* Disable training protocol FSM. */
2999 status = mac->ops.read_iosf_sb_reg(hw,
3000 IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
3001 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3002 if (status != IXGBE_SUCCESS)
3003 return status;
3004 reg_val |= IXGBE_KRM_RX_TRN_LINKUP_CTRL_CONV_WO_PROTOCOL;
3005 status = mac->ops.write_iosf_sb_reg(hw,
3006 IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
3007 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3008 if (status != IXGBE_SUCCESS)
3009 return status;
3010
3011 /* Disable Flex from training TXFFE. */
3012 status = mac->ops.read_iosf_sb_reg(hw,
3013 IXGBE_KRM_DSP_TXFFE_STATE_4(hw->bus.lan_id),
3014 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3015 if (status != IXGBE_SUCCESS)
3016 return status;
3017 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN;
3018 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN;
3019 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN;
3020 status = mac->ops.write_iosf_sb_reg(hw,
3021 IXGBE_KRM_DSP_TXFFE_STATE_4(hw->bus.lan_id),
3022 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3023 if (status != IXGBE_SUCCESS)
3024 return status;
3025 status = mac->ops.read_iosf_sb_reg(hw,
3026 IXGBE_KRM_DSP_TXFFE_STATE_5(hw->bus.lan_id),
3027 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3028 if (status != IXGBE_SUCCESS)
3029 return status;
3030 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN;
3031 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN;
3032 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN;
3033 status = mac->ops.write_iosf_sb_reg(hw,
3034 IXGBE_KRM_DSP_TXFFE_STATE_5(hw->bus.lan_id),
3035 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3036 if (status != IXGBE_SUCCESS)
3037 return status;
3038
3039 /* Enable override for coefficients. */
3040 status = mac->ops.read_iosf_sb_reg(hw,
3041 IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id),
3042 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3043 if (status != IXGBE_SUCCESS)
3044 return status;
3045 reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_OVRRD_EN;
3046 reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN;
3047 reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CPLUS1_OVRRD_EN;
3048 reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CMINUS1_OVRRD_EN;
3049 status = mac->ops.write_iosf_sb_reg(hw,
3050 IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id),
3051 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3052 return status;
3053 }
3054
3055 /**
3056 * ixgbe_setup_ixfi_x550em - Configure the KR PHY for iXFI mode.
3057 * @hw: pointer to hardware structure
3058 * @speed: the link speed to force
3059 *
3060 * Configures the integrated KR PHY to use iXFI mode. Used to connect an
3061 * internal and external PHY at a specific speed, without autonegotiation.
3062 **/
3063 static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
3064 {
3065 struct ixgbe_mac_info *mac = &hw->mac;
3066 s32 status;
3067 u32 reg_val;
3068
3069 /* iXFI is only supported with X552 */
3070 if (mac->type != ixgbe_mac_X550EM_x)
3071 return IXGBE_ERR_LINK_SETUP;
3072
3073 /* Disable AN and force speed to 10G Serial. */
3074 status = mac->ops.read_iosf_sb_reg(hw,
3075 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
3076 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3077 if (status != IXGBE_SUCCESS)
3078 return status;
3079
3080 reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
3081 reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
3082
3083 /* Select forced link speed for internal PHY. */
3084 switch (*speed) {
3085 case IXGBE_LINK_SPEED_10GB_FULL:
3086 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G;
3087 break;
3088 case IXGBE_LINK_SPEED_1GB_FULL:
3089 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G;
3090 break;
3091 default:
3092 /* Other link speeds are not supported by internal KR PHY. */
3093 return IXGBE_ERR_LINK_SETUP;
3094 }
3095
3096 status = mac->ops.write_iosf_sb_reg(hw,
3097 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
3098 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3099 if (status != IXGBE_SUCCESS)
3100 return status;
3101
3102 /* Additional configuration needed for x550em_x */
3103 if (hw->mac.type == ixgbe_mac_X550EM_x) {
3104 status = ixgbe_setup_ixfi_x550em_x(hw);
3105 if (status != IXGBE_SUCCESS)
3106 return status;
3107 }
3108
3109 /* Toggle port SW reset by AN reset. */
3110 status = ixgbe_restart_an_internal_phy_x550em(hw);
3111
3112 return status;
3113 }
3114
3115 /**
3116 * ixgbe_ext_phy_t_x550em_get_link - Get ext phy link status
3117 * @hw: address of hardware structure
3118 * @link_up: address of boolean to indicate link status
3119 *
3120 * Returns error code if unable to get link status.
3121 */
3122 static s32 ixgbe_ext_phy_t_x550em_get_link(struct ixgbe_hw *hw, bool *link_up)
3123 {
3124 u32 ret;
3125 u16 autoneg_status;
3126
3127 *link_up = FALSE;
3128
3129 /* read this twice back to back to indicate current status */
3130 ret = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
3131 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
3132 &autoneg_status);
3133 if (ret != IXGBE_SUCCESS)
3134 return ret;
3135
3136 ret = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
3137 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
3138 &autoneg_status);
3139 if (ret != IXGBE_SUCCESS)
3140 return ret;
3141
3142 *link_up = !!(autoneg_status & IXGBE_MDIO_AUTO_NEG_LINK_STATUS);
3143
3144 return IXGBE_SUCCESS;
3145 }
3146
3147 /**
3148 * ixgbe_setup_internal_phy_t_x550em - Configure KR PHY to X557 link
3149 * @hw: point to hardware structure
3150 *
3151 * Configures the link between the integrated KR PHY and the external X557 PHY
3152 * The driver will call this function when it gets a link status change
3153 * interrupt from the X557 PHY. This function configures the link speed
3154 * between the PHYs to match the link speed of the BASE-T link.
3155 *
3156 * A return of a non-zero value indicates an error, and the base driver should
3157 * not report link up.
3158 */
3159 s32 ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw)
3160 {
3161 ixgbe_link_speed force_speed;
3162 bool link_up;
3163 u32 status;
3164 u16 speed;
3165
3166 if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper)
3167 return IXGBE_ERR_CONFIG;
3168
3169 if (hw->mac.type == ixgbe_mac_X550EM_x &&
3170 !(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) {
3171 /* If link is down, there is no setup necessary so return */
3172 status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up);
3173 if (status != IXGBE_SUCCESS)
3174 return status;
3175
3176 if (!link_up)
3177 return IXGBE_SUCCESS;
3178
3179 status = hw->phy.ops.read_reg(hw,
3180 IXGBE_MDIO_AUTO_NEG_VENDOR_STAT,
3181 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
3182 &speed);
3183 if (status != IXGBE_SUCCESS)
3184 return status;
3185
3186 /* If link is still down - no setup is required so return */
3187 status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up);
3188 if (status != IXGBE_SUCCESS)
3189 return status;
3190 if (!link_up)
3191 return IXGBE_SUCCESS;
3192
3193 /* clear everything but the speed and duplex bits */
3194 speed &= IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_MASK;
3195
3196 switch (speed) {
3197 case IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_FULL:
3198 force_speed = IXGBE_LINK_SPEED_10GB_FULL;
3199 break;
3200 case IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB_FULL:
3201 force_speed = IXGBE_LINK_SPEED_1GB_FULL;
3202 break;
3203 default:
3204 /* Internal PHY does not support anything else */
3205 return IXGBE_ERR_INVALID_LINK_SETTINGS;
3206 }
3207
3208 return ixgbe_setup_ixfi_x550em(hw, &force_speed);
3209 } else {
3210 speed = IXGBE_LINK_SPEED_10GB_FULL |
3211 IXGBE_LINK_SPEED_1GB_FULL;
3212 return ixgbe_setup_kr_speed_x550em(hw, speed);
3213 }
3214 }
3215
3216 /**
3217 * ixgbe_setup_phy_loopback_x550em - Configure the KR PHY for loopback.
3218 * @hw: pointer to hardware structure
3219 *
3220 * Configures the integrated KR PHY to use internal loopback mode.
3221 **/
3222 s32 ixgbe_setup_phy_loopback_x550em(struct ixgbe_hw *hw)
3223 {
3224 s32 status;
3225 u32 reg_val;
3226
3227 /* Disable AN and force speed to 10G Serial. */
3228 status = hw->mac.ops.read_iosf_sb_reg(hw,
3229 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
3230 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3231 if (status != IXGBE_SUCCESS)
3232 return status;
3233 reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
3234 reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
3235 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G;
3236 status = hw->mac.ops.write_iosf_sb_reg(hw,
3237 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
3238 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3239 if (status != IXGBE_SUCCESS)
3240 return status;
3241
3242 /* Set near-end loopback clocks. */
3243 status = hw->mac.ops.read_iosf_sb_reg(hw,
3244 IXGBE_KRM_PORT_CAR_GEN_CTRL(hw->bus.lan_id),
3245 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3246 if (status != IXGBE_SUCCESS)
3247 return status;
3248 reg_val |= IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_32B;
3249 reg_val |= IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_KRPCS;
3250 status = hw->mac.ops.write_iosf_sb_reg(hw,
3251 IXGBE_KRM_PORT_CAR_GEN_CTRL(hw->bus.lan_id),
3252 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3253 if (status != IXGBE_SUCCESS)
3254 return status;
3255
3256 /* Set loopback enable. */
3257 status = hw->mac.ops.read_iosf_sb_reg(hw,
3258 IXGBE_KRM_PMD_DFX_BURNIN(hw->bus.lan_id),
3259 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3260 if (status != IXGBE_SUCCESS)
3261 return status;
3262 reg_val |= IXGBE_KRM_PMD_DFX_BURNIN_TX_RX_KR_LB_MASK;
3263 status = hw->mac.ops.write_iosf_sb_reg(hw,
3264 IXGBE_KRM_PMD_DFX_BURNIN(hw->bus.lan_id),
3265 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3266 if (status != IXGBE_SUCCESS)
3267 return status;
3268
3269 /* Training bypass. */
3270 status = hw->mac.ops.read_iosf_sb_reg(hw,
3271 IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
3272 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3273 if (status != IXGBE_SUCCESS)
3274 return status;
3275 reg_val |= IXGBE_KRM_RX_TRN_LINKUP_CTRL_PROTOCOL_BYPASS;
3276 status = hw->mac.ops.write_iosf_sb_reg(hw,
3277 IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
3278 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3279
3280 return status;
3281 }
3282
3283 /**
3284 * ixgbe_read_ee_hostif_X550 - Read EEPROM word using a host interface command
3285 * assuming that the semaphore is already obtained.
3286 * @hw: pointer to hardware structure
3287 * @offset: offset of word in the EEPROM to read
3288 * @data: word read from the EEPROM
3289 *
3290 * Reads a 16 bit word from the EEPROM using the hostif.
3291 **/
3292 s32 ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 *data)
3293 {
3294 const u32 mask = IXGBE_GSSR_SW_MNG_SM | IXGBE_GSSR_EEP_SM;
3295 struct ixgbe_hic_read_shadow_ram buffer;
3296 s32 status;
3297
3298 DEBUGFUNC("ixgbe_read_ee_hostif_X550");
3299 buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD;
3300 buffer.hdr.req.buf_lenh = 0;
3301 buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN;
3302 buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
3303
3304 /* convert offset from words to bytes */
3305 buffer.address = IXGBE_CPU_TO_BE32(offset * 2);
3306 /* one word */
3307 buffer.length = IXGBE_CPU_TO_BE16(sizeof(u16));
3308 buffer.pad2 = 0;
3309 buffer.pad3 = 0;
3310
3311 status = hw->mac.ops.acquire_swfw_sync(hw, mask);
3312 if (status)
3313 return status;
3314
3315 status = ixgbe_hic_unlocked(hw, (u32 *)&buffer, sizeof(buffer),
3316 IXGBE_HI_COMMAND_TIMEOUT);
3317 if (!status) {
3318 *data = (u16)IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG,
3319 FW_NVM_DATA_OFFSET);
3320 }
3321
3322 hw->mac.ops.release_swfw_sync(hw, mask);
3323 return status;
3324 }
3325
3326 /**
3327 * ixgbe_read_ee_hostif_buffer_X550- Read EEPROM word(s) using hostif
3328 * @hw: pointer to hardware structure
3329 * @offset: offset of word in the EEPROM to read
3330 * @words: number of words
3331 * @data: word(s) read from the EEPROM
3332 *
3333 * Reads a 16 bit word(s) from the EEPROM using the hostif.
3334 **/
3335 s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
3336 u16 offset, u16 words, u16 *data)
3337 {
3338 const u32 mask = IXGBE_GSSR_SW_MNG_SM | IXGBE_GSSR_EEP_SM;
3339 struct ixgbe_hic_read_shadow_ram buffer;
3340 u32 current_word = 0;
3341 u16 words_to_read;
3342 s32 status;
3343 u32 i;
3344
3345 DEBUGFUNC("ixgbe_read_ee_hostif_buffer_X550");
3346
3347 /* Take semaphore for the entire operation. */
3348 status = hw->mac.ops.acquire_swfw_sync(hw, mask);
3349 if (status) {
3350 DEBUGOUT("EEPROM read buffer - semaphore failed\n");
3351 return status;
3352 }
3353
3354 while (words) {
3355 if (words > FW_MAX_READ_BUFFER_SIZE / 2)
3356 words_to_read = FW_MAX_READ_BUFFER_SIZE / 2;
3357 else
3358 words_to_read = words;
3359
3360 buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD;
3361 buffer.hdr.req.buf_lenh = 0;
3362 buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN;
3363 buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
3364
3365 /* convert offset from words to bytes */
3366 buffer.address = IXGBE_CPU_TO_BE32((offset + current_word) * 2);
3367 buffer.length = IXGBE_CPU_TO_BE16(words_to_read * 2);
3368 buffer.pad2 = 0;
3369 buffer.pad3 = 0;
3370
3371 status = ixgbe_hic_unlocked(hw, (u32 *)&buffer, sizeof(buffer),
3372 IXGBE_HI_COMMAND_TIMEOUT);
3373
3374 if (status) {
3375 DEBUGOUT("Host interface command failed\n");
3376 goto out;
3377 }
3378
3379 for (i = 0; i < words_to_read; i++) {
3380 u32 reg = IXGBE_FLEX_MNG + (FW_NVM_DATA_OFFSET << 2) +
3381 2 * i;
3382 u32 value = IXGBE_READ_REG(hw, reg);
3383
3384 data[current_word] = (u16)(value & 0xffff);
3385 current_word++;
3386 i++;
3387 if (i < words_to_read) {
3388 value >>= 16;
3389 data[current_word] = (u16)(value & 0xffff);
3390 current_word++;
3391 }
3392 }
3393 words -= words_to_read;
3394 }
3395
3396 out:
3397 hw->mac.ops.release_swfw_sync(hw, mask);
3398 return status;
3399 }
3400
3401 /**
3402 * ixgbe_write_ee_hostif_X550 - Write EEPROM word using hostif
3403 * @hw: pointer to hardware structure
3404 * @offset: offset of word in the EEPROM to write
3405 * @data: word write to the EEPROM
3406 *
3407 * Write a 16 bit word to the EEPROM using the hostif.
3408 **/
3409 s32 ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset,
3410 u16 data)
3411 {
3412 s32 status;
3413 struct ixgbe_hic_write_shadow_ram buffer;
3414
3415 DEBUGFUNC("ixgbe_write_ee_hostif_data_X550");
3416
3417 buffer.hdr.req.cmd = FW_WRITE_SHADOW_RAM_CMD;
3418 buffer.hdr.req.buf_lenh = 0;
3419 buffer.hdr.req.buf_lenl = FW_WRITE_SHADOW_RAM_LEN;
3420 buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
3421
3422 /* one word */
3423 buffer.length = IXGBE_CPU_TO_BE16(sizeof(u16));
3424 buffer.data = data;
3425 buffer.address = IXGBE_CPU_TO_BE32(offset * 2);
3426
3427 status = ixgbe_host_interface_command(hw, (u32 *)&buffer,
3428 sizeof(buffer),
3429 IXGBE_HI_COMMAND_TIMEOUT, FALSE);
3430
3431 return status;
3432 }
3433
3434 /**
3435 * ixgbe_write_ee_hostif_X550 - Write EEPROM word using hostif
3436 * @hw: pointer to hardware structure
3437 * @offset: offset of word in the EEPROM to write
3438 * @data: word write to the EEPROM
3439 *
3440 * Write a 16 bit word to the EEPROM using the hostif.
3441 **/
3442 s32 ixgbe_write_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset,
3443 u16 data)
3444 {
3445 s32 status = IXGBE_SUCCESS;
3446
3447 DEBUGFUNC("ixgbe_write_ee_hostif_X550");
3448
3449 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
3450 IXGBE_SUCCESS) {
3451 status = ixgbe_write_ee_hostif_data_X550(hw, offset, data);
3452 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
3453 } else {
3454 DEBUGOUT("write ee hostif failed to get semaphore");
3455 status = IXGBE_ERR_SWFW_SYNC;
3456 }
3457
3458 return status;
3459 }
3460
3461 /**
3462 * ixgbe_write_ee_hostif_buffer_X550 - Write EEPROM word(s) using hostif
3463 * @hw: pointer to hardware structure
3464 * @offset: offset of word in the EEPROM to write
3465 * @words: number of words
3466 * @data: word(s) write to the EEPROM
3467 *
3468 * Write a 16 bit word(s) to the EEPROM using the hostif.
3469 **/
3470 s32 ixgbe_write_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
3471 u16 offset, u16 words, u16 *data)
3472 {
3473 s32 status = IXGBE_SUCCESS;
3474 u32 i = 0;
3475
3476 DEBUGFUNC("ixgbe_write_ee_hostif_buffer_X550");
3477
3478 /* Take semaphore for the entire operation. */
3479 status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
3480 if (status != IXGBE_SUCCESS) {
3481 DEBUGOUT("EEPROM write buffer - semaphore failed\n");
3482 goto out;
3483 }
3484
3485 for (i = 0; i < words; i++) {
3486 status = ixgbe_write_ee_hostif_data_X550(hw, offset + i,
3487 data[i]);
3488
3489 if (status != IXGBE_SUCCESS) {
3490 DEBUGOUT("Eeprom buffered write failed\n");
3491 break;
3492 }
3493 }
3494
3495 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
3496 out:
3497
3498 return status;
3499 }
3500
3501 /**
3502 * ixgbe_checksum_ptr_x550 - Checksum one pointer region
3503 * @hw: pointer to hardware structure
3504 * @ptr: pointer offset in eeprom
3505 * @size: size of section pointed by ptr, if 0 first word will be used as size
3506 * @csum: address of checksum to update
3507 * @buffer: pointer to buffer containing calculated checksum
3508 * @buffer_size: size of buffer
3509 *
3510 * Returns error status for any failure
3511 */
3512 static s32 ixgbe_checksum_ptr_x550(struct ixgbe_hw *hw, u16 ptr,
3513 u16 size, u16 *csum, u16 *buffer,
3514 u32 buffer_size)
3515 {
3516 u16 buf[256];
3517 s32 status;
3518 u16 length, bufsz, i, start;
3519 u16 *local_buffer;
3520
3521 bufsz = sizeof(buf) / sizeof(buf[0]);
3522
3523 /* Read a chunk at the pointer location */
3524 if (!buffer) {
3525 status = ixgbe_read_ee_hostif_buffer_X550(hw, ptr, bufsz, buf);
3526 if (status) {
3527 DEBUGOUT("Failed to read EEPROM image\n");
3528 return status;
3529 }
3530 local_buffer = buf;
3531 } else {
3532 if (buffer_size < ptr)
3533 return IXGBE_ERR_PARAM;
3534 local_buffer = &buffer[ptr];
3535 }
3536
3537 if (size) {
3538 start = 0;
3539 length = size;
3540 } else {
3541 start = 1;
3542 length = local_buffer[0];
3543
3544 /* Skip pointer section if length is invalid. */
3545 if (length == 0xFFFF || length == 0 ||
3546 (ptr + length) >= hw->eeprom.word_size)
3547 return IXGBE_SUCCESS;
3548 }
3549
3550 if (buffer && ((u32)start + (u32)length > buffer_size))
3551 return IXGBE_ERR_PARAM;
3552
3553 for (i = start; length; i++, length--) {
3554 if (i == bufsz && !buffer) {
3555 ptr += bufsz;
3556 i = 0;
3557 if (length < bufsz)
3558 bufsz = length;
3559
3560 /* Read a chunk at the pointer location */
3561 status = ixgbe_read_ee_hostif_buffer_X550(hw, ptr,
3562 bufsz, buf);
3563 if (status) {
3564 DEBUGOUT("Failed to read EEPROM image\n");
3565 return status;
3566 }
3567 }
3568 *csum += local_buffer[i];
3569 }
3570 return IXGBE_SUCCESS;
3571 }
3572
3573 /**
3574 * ixgbe_calc_checksum_X550 - Calculates and returns the checksum
3575 * @hw: pointer to hardware structure
3576 * @buffer: pointer to buffer containing calculated checksum
3577 * @buffer_size: size of buffer
3578 *
3579 * Returns a negative error code on error, or the 16-bit checksum
3580 **/
3581 s32 ixgbe_calc_checksum_X550(struct ixgbe_hw *hw, u16 *buffer, u32 buffer_size)
3582 {
3583 u16 eeprom_ptrs[IXGBE_EEPROM_LAST_WORD + 1];
3584 u16 *local_buffer;
3585 s32 status;
3586 u16 checksum = 0;
3587 u16 pointer, i, size;
3588
3589 DEBUGFUNC("ixgbe_calc_eeprom_checksum_X550");
3590
3591 hw->eeprom.ops.init_params(hw);
3592
3593 if (!buffer) {
3594 /* Read pointer area */
3595 status = ixgbe_read_ee_hostif_buffer_X550(hw, 0,
3596 IXGBE_EEPROM_LAST_WORD + 1,
3597 eeprom_ptrs);
3598 if (status) {
3599 DEBUGOUT("Failed to read EEPROM image\n");
3600 return status;
3601 }
3602 local_buffer = eeprom_ptrs;
3603 } else {
3604 if (buffer_size < IXGBE_EEPROM_LAST_WORD)
3605 return IXGBE_ERR_PARAM;
3606 local_buffer = buffer;
3607 }
3608
3609 /*
3610 * For X550 hardware include 0x0-0x41 in the checksum, skip the
3611 * checksum word itself
3612 */
3613 for (i = 0; i <= IXGBE_EEPROM_LAST_WORD; i++)
3614 if (i != IXGBE_EEPROM_CHECKSUM)
3615 checksum += local_buffer[i];
3616
3617 /*
3618 * Include all data from pointers 0x3, 0x6-0xE. This excludes the
3619 * FW, PHY module, and PCIe Expansion/Option ROM pointers.
3620 */
3621 for (i = IXGBE_PCIE_ANALOG_PTR_X550; i < IXGBE_FW_PTR; i++) {
3622 if (i == IXGBE_PHY_PTR || i == IXGBE_OPTION_ROM_PTR)
3623 continue;
3624
3625 pointer = local_buffer[i];
3626
3627 /* Skip pointer section if the pointer is invalid. */
3628 if (pointer == 0xFFFF || pointer == 0 ||
3629 pointer >= hw->eeprom.word_size)
3630 continue;
3631
3632 switch (i) {
3633 case IXGBE_PCIE_GENERAL_PTR:
3634 size = IXGBE_IXGBE_PCIE_GENERAL_SIZE;
3635 break;
3636 case IXGBE_PCIE_CONFIG0_PTR:
3637 case IXGBE_PCIE_CONFIG1_PTR:
3638 size = IXGBE_PCIE_CONFIG_SIZE;
3639 break;
3640 default:
3641 size = 0;
3642 break;
3643 }
3644
3645 status = ixgbe_checksum_ptr_x550(hw, pointer, size, &checksum,
3646 buffer, buffer_size);
3647 if (status)
3648 return status;
3649 }
3650
3651 checksum = (u16)IXGBE_EEPROM_SUM - checksum;
3652
3653 return (s32)checksum;
3654 }
3655
3656 /**
3657 * ixgbe_calc_eeprom_checksum_X550 - Calculates and returns the checksum
3658 * @hw: pointer to hardware structure
3659 *
3660 * Returns a negative error code on error, or the 16-bit checksum
3661 **/
3662 s32 ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw *hw)
3663 {
3664 return ixgbe_calc_checksum_X550(hw, NULL, 0);
3665 }
3666
3667 /**
3668 * ixgbe_validate_eeprom_checksum_X550 - Validate EEPROM checksum
3669 * @hw: pointer to hardware structure
3670 * @checksum_val: calculated checksum
3671 *
3672 * Performs checksum calculation and validates the EEPROM checksum. If the
3673 * caller does not need checksum_val, the value can be NULL.
3674 **/
3675 s32 ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw *hw, u16 *checksum_val)
3676 {
3677 s32 status;
3678 u16 checksum;
3679 u16 read_checksum = 0;
3680
3681 DEBUGFUNC("ixgbe_validate_eeprom_checksum_X550");
3682
3683 /* Read the first word from the EEPROM. If this times out or fails, do
3684 * not continue or we could be in for a very long wait while every
3685 * EEPROM read fails
3686 */
3687 status = hw->eeprom.ops.read(hw, 0, &checksum);
3688 if (status) {
3689 DEBUGOUT("EEPROM read failed\n");
3690 return status;
3691 }
3692
3693 status = hw->eeprom.ops.calc_checksum(hw);
3694 if (status < 0)
3695 return status;
3696
3697 checksum = (u16)(status & 0xffff);
3698
3699 status = ixgbe_read_ee_hostif_X550(hw, IXGBE_EEPROM_CHECKSUM,
3700 &read_checksum);
3701 if (status)
3702 return status;
3703
3704 /* Verify read checksum from EEPROM is the same as
3705 * calculated checksum
3706 */
3707 if (read_checksum != checksum) {
3708 status = IXGBE_ERR_EEPROM_CHECKSUM;
3709 ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE,
3710 "Invalid EEPROM checksum");
3711 }
3712
3713 /* If the user cares, return the calculated checksum */
3714 if (checksum_val)
3715 *checksum_val = checksum;
3716
3717 return status;
3718 }
3719
3720 /**
3721 * ixgbe_update_eeprom_checksum_X550 - Updates the EEPROM checksum and flash
3722 * @hw: pointer to hardware structure
3723 *
3724 * After writing EEPROM to shadow RAM using EEWR register, software calculates
3725 * checksum and updates the EEPROM and instructs the hardware to update
3726 * the flash.
3727 **/
3728 s32 ixgbe_update_eeprom_checksum_X550(struct ixgbe_hw *hw)
3729 {
3730 s32 status;
3731 u16 checksum = 0;
3732
3733 DEBUGFUNC("ixgbe_update_eeprom_checksum_X550");
3734
3735 /* Read the first word from the EEPROM. If this times out or fails, do
3736 * not continue or we could be in for a very long wait while every
3737 * EEPROM read fails
3738 */
3739 status = ixgbe_read_ee_hostif_X550(hw, 0, &checksum);
3740 if (status) {
3741 DEBUGOUT("EEPROM read failed\n");
3742 return status;
3743 }
3744
3745 status = ixgbe_calc_eeprom_checksum_X550(hw);
3746 if (status < 0)
3747 return status;
3748
3749 checksum = (u16)(status & 0xffff);
3750
3751 status = ixgbe_write_ee_hostif_X550(hw, IXGBE_EEPROM_CHECKSUM,
3752 checksum);
3753 if (status)
3754 return status;
3755
3756 status = ixgbe_update_flash_X550(hw);
3757
3758 return status;
3759 }
3760
3761 /**
3762 * ixgbe_update_flash_X550 - Instruct HW to copy EEPROM to Flash device
3763 * @hw: pointer to hardware structure
3764 *
3765 * Issue a shadow RAM dump to FW to copy EEPROM from shadow RAM to the flash.
3766 **/
3767 s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw)
3768 {
3769 s32 status = IXGBE_SUCCESS;
3770 union ixgbe_hic_hdr2 buffer;
3771
3772 DEBUGFUNC("ixgbe_update_flash_X550");
3773
3774 buffer.req.cmd = FW_SHADOW_RAM_DUMP_CMD;
3775 buffer.req.buf_lenh = 0;
3776 buffer.req.buf_lenl = FW_SHADOW_RAM_DUMP_LEN;
3777 buffer.req.checksum = FW_DEFAULT_CHECKSUM;
3778
3779 status = ixgbe_host_interface_command(hw, (u32 *)&buffer,
3780 sizeof(buffer),
3781 IXGBE_HI_COMMAND_TIMEOUT, FALSE);
3782
3783 return status;
3784 }
3785
3786 /**
3787 * ixgbe_get_supported_physical_layer_X550em - Returns physical layer type
3788 * @hw: pointer to hardware structure
3789 *
3790 * Determines physical layer capabilities of the current configuration.
3791 **/
3792 u64 ixgbe_get_supported_physical_layer_X550em(struct ixgbe_hw *hw)
3793 {
3794 u64 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
3795 u16 ext_ability = 0;
3796
3797 DEBUGFUNC("ixgbe_get_supported_physical_layer_X550em");
3798
3799 hw->phy.ops.identify(hw);
3800
3801 switch (hw->phy.type) {
3802 case ixgbe_phy_x550em_kr:
3803 if (hw->mac.type == ixgbe_mac_X550EM_a) {
3804 if (hw->phy.nw_mng_if_sel &
3805 IXGBE_NW_MNG_IF_SEL_PHY_SPEED_2_5G) {
3806 physical_layer =
3807 IXGBE_PHYSICAL_LAYER_2500BASE_KX;
3808 break;
3809 } else if (hw->device_id ==
3810 IXGBE_DEV_ID_X550EM_A_KR_L) {
3811 physical_layer =
3812 IXGBE_PHYSICAL_LAYER_1000BASE_KX;
3813 break;
3814 }
3815 }
3816 /* fall through */
3817 case ixgbe_phy_x550em_xfi:
3818 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR |
3819 IXGBE_PHYSICAL_LAYER_1000BASE_KX;
3820 break;
3821 case ixgbe_phy_x550em_kx4:
3822 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4 |
3823 IXGBE_PHYSICAL_LAYER_1000BASE_KX;
3824 break;
3825 case ixgbe_phy_x550em_ext_t:
3826 hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
3827 IXGBE_MDIO_PMA_PMD_DEV_TYPE,
3828 &ext_ability);
3829 if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
3830 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
3831 if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
3832 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
3833 break;
3834 case ixgbe_phy_fw:
3835 if (hw->phy.speeds_supported & IXGBE_LINK_SPEED_1GB_FULL)
3836 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
3837 if (hw->phy.speeds_supported & IXGBE_LINK_SPEED_100_FULL)
3838 physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
3839 if (hw->phy.speeds_supported & IXGBE_LINK_SPEED_10_FULL)
3840 physical_layer |= IXGBE_PHYSICAL_LAYER_10BASE_T;
3841 break;
3842 case ixgbe_phy_sgmii:
3843 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX;
3844 break;
3845 case ixgbe_phy_ext_1g_t:
3846 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_T;
3847 break;
3848 default:
3849 break;
3850 }
3851
3852 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
3853 physical_layer = ixgbe_get_supported_phy_sfp_layer_generic(hw);
3854
3855 return physical_layer;
3856 }
3857
3858 /**
3859 * ixgbe_get_bus_info_x550em - Set PCI bus info
3860 * @hw: pointer to hardware structure
3861 *
3862 * Sets bus link width and speed to unknown because X550em is
3863 * not a PCI device.
3864 **/
3865 s32 ixgbe_get_bus_info_X550em(struct ixgbe_hw *hw)
3866 {
3867
3868 DEBUGFUNC("ixgbe_get_bus_info_x550em");
3869
3870 hw->bus.width = ixgbe_bus_width_unknown;
3871 hw->bus.speed = ixgbe_bus_speed_unknown;
3872
3873 hw->mac.ops.set_lan_id(hw);
3874
3875 return IXGBE_SUCCESS;
3876 }
3877
3878 /**
3879 * ixgbe_disable_rx_x550 - Disable RX unit
3880 * @hw: pointer to hardware structure
3881 *
3882 * Enables the Rx DMA unit for x550
3883 **/
3884 void ixgbe_disable_rx_x550(struct ixgbe_hw *hw)
3885 {
3886 u32 rxctrl, pfdtxgswc;
3887 s32 status;
3888 struct ixgbe_hic_disable_rxen fw_cmd;
3889
3890 DEBUGFUNC("ixgbe_disable_rx_dma_x550");
3891
3892 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3893 if (rxctrl & IXGBE_RXCTRL_RXEN) {
3894 pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
3895 if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) {
3896 pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN;
3897 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
3898 hw->mac.set_lben = TRUE;
3899 } else {
3900 hw->mac.set_lben = FALSE;
3901 }
3902
3903 fw_cmd.hdr.cmd = FW_DISABLE_RXEN_CMD;
3904 fw_cmd.hdr.buf_len = FW_DISABLE_RXEN_LEN;
3905 fw_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
3906 fw_cmd.port_number = (u8)hw->bus.lan_id;
3907
3908 status = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
3909 sizeof(struct ixgbe_hic_disable_rxen),
3910 IXGBE_HI_COMMAND_TIMEOUT, TRUE);
3911
3912 /* If we fail - disable RX using register write */
3913 if (status) {
3914 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3915 if (rxctrl & IXGBE_RXCTRL_RXEN) {
3916 rxctrl &= ~IXGBE_RXCTRL_RXEN;
3917 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl);
3918 }
3919 }
3920 }
3921 }
3922
3923 /**
3924 * ixgbe_enter_lplu_x550em - Transition to low power states
3925 * @hw: pointer to hardware structure
3926 *
3927 * Configures Low Power Link Up on transition to low power states
3928 * (from D0 to non-D0). Link is required to enter LPLU so avoid resetting the
3929 * X557 PHY immediately prior to entering LPLU.
3930 **/
3931 s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw)
3932 {
3933 u16 an_10g_cntl_reg, autoneg_reg, speed;
3934 s32 status;
3935 ixgbe_link_speed lcd_speed;
3936 u32 save_autoneg;
3937 bool link_up;
3938
3939 /* SW LPLU not required on later HW revisions. */
3940 if ((hw->mac.type == ixgbe_mac_X550EM_x) &&
3941 (IXGBE_FUSES0_REV_MASK &
3942 IXGBE_READ_REG(hw, IXGBE_FUSES0_GROUP(0))))
3943 return IXGBE_SUCCESS;
3944
3945 /* If blocked by MNG FW, then don't restart AN */
3946 if (ixgbe_check_reset_blocked(hw))
3947 return IXGBE_SUCCESS;
3948
3949 status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up);
3950 if (status != IXGBE_SUCCESS)
3951 return status;
3952
3953 status = ixgbe_read_eeprom(hw, NVM_INIT_CTRL_3, &hw->eeprom.ctrl_word_3);
3954
3955 if (status != IXGBE_SUCCESS)
3956 return status;
3957
3958 /* If link is down, LPLU disabled in NVM, WoL disabled, or manageability
3959 * disabled, then force link down by entering low power mode.
3960 */
3961 if (!link_up || !(hw->eeprom.ctrl_word_3 & NVM_INIT_CTRL_3_LPLU) ||
3962 !(hw->wol_enabled || ixgbe_mng_present(hw)))
3963 return ixgbe_set_copper_phy_power(hw, FALSE);
3964
3965 /* Determine LCD */
3966 status = ixgbe_get_lcd_t_x550em(hw, &lcd_speed);
3967
3968 if (status != IXGBE_SUCCESS)
3969 return status;
3970
3971 /* If no valid LCD link speed, then force link down and exit. */
3972 if (lcd_speed == IXGBE_LINK_SPEED_UNKNOWN)
3973 return ixgbe_set_copper_phy_power(hw, FALSE);
3974
3975 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_STAT,
3976 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
3977 &speed);
3978
3979 if (status != IXGBE_SUCCESS)
3980 return status;
3981
3982 /* If no link now, speed is invalid so take link down */
3983 status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up);
3984 if (status != IXGBE_SUCCESS)
3985 return ixgbe_set_copper_phy_power(hw, FALSE);
3986
3987 /* clear everything but the speed bits */
3988 speed &= IXGBE_MDIO_AUTO_NEG_VEN_STAT_SPEED_MASK;
3989
3990 /* If current speed is already LCD, then exit. */
3991 if (((speed == IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB) &&
3992 (lcd_speed == IXGBE_LINK_SPEED_1GB_FULL)) ||
3993 ((speed == IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB) &&
3994 (lcd_speed == IXGBE_LINK_SPEED_10GB_FULL)))
3995 return status;
3996
3997 /* Clear AN completed indication */
3998 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM,
3999 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
4000 &autoneg_reg);
4001
4002 if (status != IXGBE_SUCCESS)
4003 return status;
4004
4005 status = hw->phy.ops.read_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG,
4006 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
4007 &an_10g_cntl_reg);
4008
4009 if (status != IXGBE_SUCCESS)
4010 return status;
4011
4012 status = hw->phy.ops.read_reg(hw,
4013 IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
4014 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
4015 &autoneg_reg);
4016
4017 if (status != IXGBE_SUCCESS)
4018 return status;
4019
4020 save_autoneg = hw->phy.autoneg_advertised;
4021
4022 /* Setup link at least common link speed */
4023 status = hw->mac.ops.setup_link(hw, lcd_speed, FALSE);
4024
4025 /* restore autoneg from before setting lplu speed */
4026 hw->phy.autoneg_advertised = save_autoneg;
4027
4028 return status;
4029 }
4030
4031 /**
4032 * ixgbe_get_lcd_x550em - Determine lowest common denominator
4033 * @hw: pointer to hardware structure
4034 * @lcd_speed: pointer to lowest common link speed
4035 *
4036 * Determine lowest common link speed with link partner.
4037 **/
4038 s32 ixgbe_get_lcd_t_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *lcd_speed)
4039 {
4040 u16 an_lp_status;
4041 s32 status;
4042 u16 word = hw->eeprom.ctrl_word_3;
4043
4044 *lcd_speed = IXGBE_LINK_SPEED_UNKNOWN;
4045
4046 status = hw->phy.ops.read_reg(hw, IXGBE_AUTO_NEG_LP_STATUS,
4047 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
4048 &an_lp_status);
4049
4050 if (status != IXGBE_SUCCESS)
4051 return status;
4052
4053 /* If link partner advertised 1G, return 1G */
4054 if (an_lp_status & IXGBE_AUTO_NEG_LP_1000BASE_CAP) {
4055 *lcd_speed = IXGBE_LINK_SPEED_1GB_FULL;
4056 return status;
4057 }
4058
4059 /* If 10G disabled for LPLU via NVM D10GMP, then return no valid LCD */
4060 if ((hw->bus.lan_id && (word & NVM_INIT_CTRL_3_D10GMP_PORT1)) ||
4061 (word & NVM_INIT_CTRL_3_D10GMP_PORT0))
4062 return status;
4063
4064 /* Link partner not capable of lower speeds, return 10G */
4065 *lcd_speed = IXGBE_LINK_SPEED_10GB_FULL;
4066 return status;
4067 }
4068
4069 /**
4070 * ixgbe_setup_fc_X550em - Set up flow control
4071 * @hw: pointer to hardware structure
4072 *
4073 * Called at init time to set up flow control.
4074 **/
4075 s32 ixgbe_setup_fc_X550em(struct ixgbe_hw *hw)
4076 {
4077 s32 ret_val = IXGBE_SUCCESS;
4078 u32 pause, asm_dir, reg_val;
4079
4080 DEBUGFUNC("ixgbe_setup_fc_X550em");
4081
4082 /* Validate the requested mode */
4083 if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
4084 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
4085 "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
4086 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
4087 goto out;
4088 }
4089
4090 /* 10gig parts do not have a word in the EEPROM to determine the
4091 * default flow control setting, so we explicitly set it to full.
4092 */
4093 if (hw->fc.requested_mode == ixgbe_fc_default)
4094 hw->fc.requested_mode = ixgbe_fc_full;
4095
4096 /* Determine PAUSE and ASM_DIR bits. */
4097 switch (hw->fc.requested_mode) {
4098 case ixgbe_fc_none:
4099 pause = 0;
4100 asm_dir = 0;
4101 break;
4102 case ixgbe_fc_tx_pause:
4103 pause = 0;
4104 asm_dir = 1;
4105 break;
4106 case ixgbe_fc_rx_pause:
4107 /* Rx Flow control is enabled and Tx Flow control is
4108 * disabled by software override. Since there really
4109 * isn't a way to advertise that we are capable of RX
4110 * Pause ONLY, we will advertise that we support both
4111 * symmetric and asymmetric Rx PAUSE, as such we fall
4112 * through to the fc_full statement. Later, we will
4113 * disable the adapter's ability to send PAUSE frames.
4114 */
4115 case ixgbe_fc_full:
4116 pause = 1;
4117 asm_dir = 1;
4118 break;
4119 default:
4120 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
4121 "Flow control param set incorrectly\n");
4122 ret_val = IXGBE_ERR_CONFIG;
4123 goto out;
4124 }
4125
4126 switch (hw->device_id) {
4127 case IXGBE_DEV_ID_X550EM_X_KR:
4128 case IXGBE_DEV_ID_X550EM_A_KR:
4129 case IXGBE_DEV_ID_X550EM_A_KR_L:
4130 ret_val = hw->mac.ops.read_iosf_sb_reg(hw,
4131 IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
4132 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
4133 if (ret_val != IXGBE_SUCCESS)
4134 goto out;
4135 reg_val &= ~(IXGBE_KRM_AN_CNTL_1_SYM_PAUSE |
4136 IXGBE_KRM_AN_CNTL_1_ASM_PAUSE);
4137 if (pause)
4138 reg_val |= IXGBE_KRM_AN_CNTL_1_SYM_PAUSE;
4139 if (asm_dir)
4140 reg_val |= IXGBE_KRM_AN_CNTL_1_ASM_PAUSE;
4141 ret_val = hw->mac.ops.write_iosf_sb_reg(hw,
4142 IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
4143 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
4144
4145 /* This device does not fully support AN. */
4146 hw->fc.disable_fc_autoneg = TRUE;
4147 break;
4148 case IXGBE_DEV_ID_X550EM_X_XFI:
4149 hw->fc.disable_fc_autoneg = TRUE;
4150 break;
4151 default:
4152 break;
4153 }
4154
4155 out:
4156 return ret_val;
4157 }
4158
4159 /**
4160 * ixgbe_fc_autoneg_backplane_x550em_a - Enable flow control IEEE clause 37
4161 * @hw: pointer to hardware structure
4162 *
4163 * Enable flow control according to IEEE clause 37.
4164 **/
4165 void ixgbe_fc_autoneg_backplane_x550em_a(struct ixgbe_hw *hw)
4166 {
4167 u32 link_s1, lp_an_page_low, an_cntl_1;
4168 s32 status = IXGBE_ERR_FC_NOT_NEGOTIATED;
4169 ixgbe_link_speed speed;
4170 bool link_up;
4171
4172 /* AN should have completed when the cable was plugged in.
4173 * Look for reasons to bail out. Bail out if:
4174 * - FC autoneg is disabled, or if
4175 * - link is not up.
4176 */
4177 if (hw->fc.disable_fc_autoneg) {
4178 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
4179 "Flow control autoneg is disabled");
4180 goto out;
4181 }
4182
4183 hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
4184 if (!link_up) {
4185 ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "The link is down");
4186 goto out;
4187 }
4188
4189 /* Check at auto-negotiation has completed */
4190 status = hw->mac.ops.read_iosf_sb_reg(hw,
4191 IXGBE_KRM_LINK_S1(hw->bus.lan_id),
4192 IXGBE_SB_IOSF_TARGET_KR_PHY, &link_s1);
4193
4194 if (status != IXGBE_SUCCESS ||
4195 (link_s1 & IXGBE_KRM_LINK_S1_MAC_AN_COMPLETE) == 0) {
4196 DEBUGOUT("Auto-Negotiation did not complete\n");
4197 status = IXGBE_ERR_FC_NOT_NEGOTIATED;
4198 goto out;
4199 }
4200
4201 /* Read the 10g AN autoc and LP ability registers and resolve
4202 * local flow control settings accordingly
4203 */
4204 status = hw->mac.ops.read_iosf_sb_reg(hw,
4205 IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
4206 IXGBE_SB_IOSF_TARGET_KR_PHY, &an_cntl_1);
4207
4208 if (status != IXGBE_SUCCESS) {
4209 DEBUGOUT("Auto-Negotiation did not complete\n");
4210 goto out;
4211 }
4212
4213 status = hw->mac.ops.read_iosf_sb_reg(hw,
4214 IXGBE_KRM_LP_BASE_PAGE_HIGH(hw->bus.lan_id),
4215 IXGBE_SB_IOSF_TARGET_KR_PHY, &lp_an_page_low);
4216
4217 if (status != IXGBE_SUCCESS) {
4218 DEBUGOUT("Auto-Negotiation did not complete\n");
4219 goto out;
4220 }
4221
4222 status = ixgbe_negotiate_fc(hw, an_cntl_1, lp_an_page_low,
4223 IXGBE_KRM_AN_CNTL_1_SYM_PAUSE,
4224 IXGBE_KRM_AN_CNTL_1_ASM_PAUSE,
4225 IXGBE_KRM_LP_BASE_PAGE_HIGH_SYM_PAUSE,
4226 IXGBE_KRM_LP_BASE_PAGE_HIGH_ASM_PAUSE);
4227
4228 out:
4229 if (status == IXGBE_SUCCESS) {
4230 hw->fc.fc_was_autonegged = TRUE;
4231 } else {
4232 hw->fc.fc_was_autonegged = FALSE;
4233 hw->fc.current_mode = hw->fc.requested_mode;
4234 }
4235 }
4236
4237 /**
4238 * ixgbe_fc_autoneg_fiber_x550em_a - passthrough FC settings
4239 * @hw: pointer to hardware structure
4240 *
4241 **/
4242 void ixgbe_fc_autoneg_fiber_x550em_a(struct ixgbe_hw *hw)
4243 {
4244 hw->fc.fc_was_autonegged = FALSE;
4245 hw->fc.current_mode = hw->fc.requested_mode;
4246 }
4247
4248 /**
4249 * ixgbe_fc_autoneg_sgmii_x550em_a - Enable flow control IEEE clause 37
4250 * @hw: pointer to hardware structure
4251 *
4252 * Enable flow control according to IEEE clause 37.
4253 **/
4254 void ixgbe_fc_autoneg_sgmii_x550em_a(struct ixgbe_hw *hw)
4255 {
4256 s32 status = IXGBE_ERR_FC_NOT_NEGOTIATED;
4257 u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 };
4258 ixgbe_link_speed speed;
4259 bool link_up;
4260
4261 /* AN should have completed when the cable was plugged in.
4262 * Look for reasons to bail out. Bail out if:
4263 * - FC autoneg is disabled, or if
4264 * - link is not up.
4265 */
4266 if (hw->fc.disable_fc_autoneg) {
4267 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
4268 "Flow control autoneg is disabled");
4269 goto out;
4270 }
4271
4272 hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
4273 if (!link_up) {
4274 ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "The link is down");
4275 goto out;
4276 }
4277
4278 /* Check if auto-negotiation has completed */
4279 status = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_LINK_INFO, &info);
4280 if (status != IXGBE_SUCCESS ||
4281 !(info[0] & FW_PHY_ACT_GET_LINK_INFO_AN_COMPLETE)) {
4282 DEBUGOUT("Auto-Negotiation did not complete\n");
4283 status = IXGBE_ERR_FC_NOT_NEGOTIATED;
4284 goto out;
4285 }
4286
4287 /* Negotiate the flow control */
4288 status = ixgbe_negotiate_fc(hw, info[0], info[0],
4289 FW_PHY_ACT_GET_LINK_INFO_FC_RX,
4290 FW_PHY_ACT_GET_LINK_INFO_FC_TX,
4291 FW_PHY_ACT_GET_LINK_INFO_LP_FC_RX,
4292 FW_PHY_ACT_GET_LINK_INFO_LP_FC_TX);
4293
4294 out:
4295 if (status == IXGBE_SUCCESS) {
4296 hw->fc.fc_was_autonegged = TRUE;
4297 } else {
4298 hw->fc.fc_was_autonegged = FALSE;
4299 hw->fc.current_mode = hw->fc.requested_mode;
4300 }
4301 }
4302
4303 /**
4304 * ixgbe_setup_fc_backplane_x550em_a - Set up flow control
4305 * @hw: pointer to hardware structure
4306 *
4307 * Called at init time to set up flow control.
4308 **/
4309 s32 ixgbe_setup_fc_backplane_x550em_a(struct ixgbe_hw *hw)
4310 {
4311 s32 status = IXGBE_SUCCESS;
4312 u32 an_cntl = 0;
4313
4314 DEBUGFUNC("ixgbe_setup_fc_backplane_x550em_a");
4315
4316 /* Validate the requested mode */
4317 if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
4318 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
4319 "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
4320 return IXGBE_ERR_INVALID_LINK_SETTINGS;
4321 }
4322
4323 if (hw->fc.requested_mode == ixgbe_fc_default)
4324 hw->fc.requested_mode = ixgbe_fc_full;
4325
4326 /* Set up the 1G and 10G flow control advertisement registers so the
4327 * HW will be able to do FC autoneg once the cable is plugged in. If
4328 * we link at 10G, the 1G advertisement is harmless and vice versa.
4329 */
4330 status = hw->mac.ops.read_iosf_sb_reg(hw,
4331 IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
4332 IXGBE_SB_IOSF_TARGET_KR_PHY, &an_cntl);
4333
4334 if (status != IXGBE_SUCCESS) {
4335 DEBUGOUT("Auto-Negotiation did not complete\n");
4336 return status;
4337 }
4338
4339 /* The possible values of fc.requested_mode are:
4340 * 0: Flow control is completely disabled
4341 * 1: Rx flow control is enabled (we can receive pause frames,
4342 * but not send pause frames).
4343 * 2: Tx flow control is enabled (we can send pause frames but
4344 * we do not support receiving pause frames).
4345 * 3: Both Rx and Tx flow control (symmetric) are enabled.
4346 * other: Invalid.
4347 */
4348 switch (hw->fc.requested_mode) {
4349 case ixgbe_fc_none:
4350 /* Flow control completely disabled by software override. */
4351 an_cntl &= ~(IXGBE_KRM_AN_CNTL_1_SYM_PAUSE |
4352 IXGBE_KRM_AN_CNTL_1_ASM_PAUSE);
4353 break;
4354 case ixgbe_fc_tx_pause:
4355 /* Tx Flow control is enabled, and Rx Flow control is
4356 * disabled by software override.
4357 */
4358 an_cntl |= IXGBE_KRM_AN_CNTL_1_ASM_PAUSE;
4359 an_cntl &= ~IXGBE_KRM_AN_CNTL_1_SYM_PAUSE;
4360 break;
4361 case ixgbe_fc_rx_pause:
4362 /* Rx Flow control is enabled and Tx Flow control is
4363 * disabled by software override. Since there really
4364 * isn't a way to advertise that we are capable of RX
4365 * Pause ONLY, we will advertise that we support both
4366 * symmetric and asymmetric Rx PAUSE, as such we fall
4367 * through to the fc_full statement. Later, we will
4368 * disable the adapter's ability to send PAUSE frames.
4369 */
4370 case ixgbe_fc_full:
4371 /* Flow control (both Rx and Tx) is enabled by SW override. */
4372 an_cntl |= IXGBE_KRM_AN_CNTL_1_SYM_PAUSE |
4373 IXGBE_KRM_AN_CNTL_1_ASM_PAUSE;
4374 break;
4375 default:
4376 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
4377 "Flow control param set incorrectly\n");
4378 return IXGBE_ERR_CONFIG;
4379 }
4380
4381 status = hw->mac.ops.write_iosf_sb_reg(hw,
4382 IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
4383 IXGBE_SB_IOSF_TARGET_KR_PHY, an_cntl);
4384
4385 /* Restart auto-negotiation. */
4386 status = ixgbe_restart_an_internal_phy_x550em(hw);
4387
4388 return status;
4389 }
4390
4391 /**
4392 * ixgbe_set_mux - Set mux for port 1 access with CS4227
4393 * @hw: pointer to hardware structure
4394 * @state: set mux if 1, clear if 0
4395 */
4396 static void ixgbe_set_mux(struct ixgbe_hw *hw, u8 state)
4397 {
4398 u32 esdp;
4399
4400 if (!hw->bus.lan_id)
4401 return;
4402 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
4403 if (state)
4404 esdp |= IXGBE_ESDP_SDP1;
4405 else
4406 esdp &= ~IXGBE_ESDP_SDP1;
4407 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
4408 IXGBE_WRITE_FLUSH(hw);
4409 }
4410
4411 /**
4412 * ixgbe_acquire_swfw_sync_X550em - Acquire SWFW semaphore
4413 * @hw: pointer to hardware structure
4414 * @mask: Mask to specify which semaphore to acquire
4415 *
4416 * Acquires the SWFW semaphore and sets the I2C MUX
4417 **/
4418 s32 ixgbe_acquire_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask)
4419 {
4420 s32 status;
4421
4422 DEBUGFUNC("ixgbe_acquire_swfw_sync_X550em");
4423
4424 status = ixgbe_acquire_swfw_sync_X540(hw, mask);
4425 if (status)
4426 return status;
4427
4428 if (mask & IXGBE_GSSR_I2C_MASK)
4429 ixgbe_set_mux(hw, 1);
4430
4431 return IXGBE_SUCCESS;
4432 }
4433
4434 /**
4435 * ixgbe_release_swfw_sync_X550em - Release SWFW semaphore
4436 * @hw: pointer to hardware structure
4437 * @mask: Mask to specify which semaphore to release
4438 *
4439 * Releases the SWFW semaphore and sets the I2C MUX
4440 **/
4441 void ixgbe_release_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask)
4442 {
4443 DEBUGFUNC("ixgbe_release_swfw_sync_X550em");
4444
4445 if (mask & IXGBE_GSSR_I2C_MASK)
4446 ixgbe_set_mux(hw, 0);
4447
4448 ixgbe_release_swfw_sync_X540(hw, mask);
4449 }
4450
4451 /**
4452 * ixgbe_acquire_swfw_sync_X550a - Acquire SWFW semaphore
4453 * @hw: pointer to hardware structure
4454 * @mask: Mask to specify which semaphore to acquire
4455 *
4456 * Acquires the SWFW semaphore and get the shared phy token as needed
4457 */
4458 static s32 ixgbe_acquire_swfw_sync_X550a(struct ixgbe_hw *hw, u32 mask)
4459 {
4460 u32 hmask = mask & ~IXGBE_GSSR_TOKEN_SM;
4461 int retries = FW_PHY_TOKEN_RETRIES;
4462 s32 status = IXGBE_SUCCESS;
4463
4464 DEBUGFUNC("ixgbe_acquire_swfw_sync_X550a");
4465
4466 while (--retries) {
4467 status = IXGBE_SUCCESS;
4468 if (hmask)
4469 status = ixgbe_acquire_swfw_sync_X540(hw, hmask);
4470 if (status) {
4471 DEBUGOUT1("Could not acquire SWFW semaphore, Status = %d\n",
4472 status);
4473 return status;
4474 }
4475 if (!(mask & IXGBE_GSSR_TOKEN_SM))
4476 return IXGBE_SUCCESS;
4477
4478 status = ixgbe_get_phy_token(hw);
4479 if (status == IXGBE_ERR_TOKEN_RETRY)
4480 DEBUGOUT1("Could not acquire PHY token, Status = %d\n",
4481 status);
4482
4483 if (status == IXGBE_SUCCESS)
4484 return IXGBE_SUCCESS;
4485
4486 if (hmask)
4487 ixgbe_release_swfw_sync_X540(hw, hmask);
4488
4489 if (status != IXGBE_ERR_TOKEN_RETRY) {
4490 DEBUGOUT1("Unable to retry acquiring the PHY token, Status = %d\n",
4491 status);
4492 return status;
4493 }
4494 }
4495
4496 DEBUGOUT1("Semaphore acquisition retries failed!: PHY ID = 0x%08X\n",
4497 hw->phy.id);
4498 return status;
4499 }
4500
4501 /**
4502 * ixgbe_release_swfw_sync_X550a - Release SWFW semaphore
4503 * @hw: pointer to hardware structure
4504 * @mask: Mask to specify which semaphore to release
4505 *
4506 * Releases the SWFW semaphore and puts the shared phy token as needed
4507 */
4508 static void ixgbe_release_swfw_sync_X550a(struct ixgbe_hw *hw, u32 mask)
4509 {
4510 u32 hmask = mask & ~IXGBE_GSSR_TOKEN_SM;
4511
4512 DEBUGFUNC("ixgbe_release_swfw_sync_X550a");
4513
4514 if (mask & IXGBE_GSSR_TOKEN_SM)
4515 ixgbe_put_phy_token(hw);
4516
4517 if (hmask)
4518 ixgbe_release_swfw_sync_X540(hw, hmask);
4519 }
4520
4521 /**
4522 * ixgbe_read_phy_reg_x550a - Reads specified PHY register
4523 * @hw: pointer to hardware structure
4524 * @reg_addr: 32 bit address of PHY register to read
4525 * @device_type: 5 bit device type
4526 * @phy_data: Pointer to read data from PHY register
4527 *
4528 * Reads a value from a specified PHY register using the SWFW lock and PHY
4529 * Token. The PHY Token is needed since the MDIO is shared between to MAC
4530 * instances.
4531 **/
4532 s32 ixgbe_read_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
4533 u32 device_type, u16 *phy_data)
4534 {
4535 s32 status;
4536 u32 mask = hw->phy.phy_semaphore_mask | IXGBE_GSSR_TOKEN_SM;
4537
4538 DEBUGFUNC("ixgbe_read_phy_reg_x550a");
4539
4540 if (hw->mac.ops.acquire_swfw_sync(hw, mask))
4541 return IXGBE_ERR_SWFW_SYNC;
4542
4543 status = hw->phy.ops.read_reg_mdi(hw, reg_addr, device_type, phy_data);
4544
4545 hw->mac.ops.release_swfw_sync(hw, mask);
4546
4547 return status;
4548 }
4549
4550 /**
4551 * ixgbe_write_phy_reg_x550a - Writes specified PHY register
4552 * @hw: pointer to hardware structure
4553 * @reg_addr: 32 bit PHY register to write
4554 * @device_type: 5 bit device type
4555 * @phy_data: Data to write to the PHY register
4556 *
4557 * Writes a value to specified PHY register using the SWFW lock and PHY Token.
4558 * The PHY Token is needed since the MDIO is shared between to MAC instances.
4559 **/
4560 s32 ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
4561 u32 device_type, u16 phy_data)
4562 {
4563 s32 status;
4564 u32 mask = hw->phy.phy_semaphore_mask | IXGBE_GSSR_TOKEN_SM;
4565
4566 DEBUGFUNC("ixgbe_write_phy_reg_x550a");
4567
4568 if (hw->mac.ops.acquire_swfw_sync(hw, mask) == IXGBE_SUCCESS) {
4569 status = hw->phy.ops.write_reg_mdi(hw, reg_addr, device_type,
4570 phy_data);
4571 hw->mac.ops.release_swfw_sync(hw, mask);
4572 } else {
4573 status = IXGBE_ERR_SWFW_SYNC;
4574 }
4575
4576 return status;
4577 }
4578
4579 /**
4580 * ixgbe_handle_lasi_ext_t_x550em - Handle external Base T PHY interrupt
4581 * @hw: pointer to hardware structure
4582 *
4583 * Handle external Base T PHY interrupt. If high temperature
4584 * failure alarm then return error, else if link status change
4585 * then setup internal/external PHY link
4586 *
4587 * Return IXGBE_ERR_OVERTEMP if interrupt is high temperature
4588 * failure alarm, else return PHY access status.
4589 */
4590 s32 ixgbe_handle_lasi_ext_t_x550em(struct ixgbe_hw *hw)
4591 {
4592 bool lsc;
4593 u32 status;
4594
4595 status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc);
4596
4597 if (status != IXGBE_SUCCESS)
4598 return status;
4599
4600 if (lsc)
4601 return ixgbe_setup_internal_phy(hw);
4602
4603 return IXGBE_SUCCESS;
4604 }
4605
4606 /**
4607 * ixgbe_setup_mac_link_t_X550em - Sets the auto advertised link speed
4608 * @hw: pointer to hardware structure
4609 * @speed: new link speed
4610 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed
4611 *
4612 * Setup internal/external PHY link speed based on link speed, then set
4613 * external PHY auto advertised link speed.
4614 *
4615 * Returns error status for any failure
4616 **/
4617 s32 ixgbe_setup_mac_link_t_X550em(struct ixgbe_hw *hw,
4618 ixgbe_link_speed speed,
4619 bool autoneg_wait_to_complete)
4620 {
4621 s32 status;
4622 ixgbe_link_speed force_speed;
4623
4624 DEBUGFUNC("ixgbe_setup_mac_link_t_X550em");
4625
4626 /* Setup internal/external PHY link speed to iXFI (10G), unless
4627 * only 1G is auto advertised then setup KX link.
4628 */
4629 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
4630 force_speed = IXGBE_LINK_SPEED_10GB_FULL;
4631 else
4632 force_speed = IXGBE_LINK_SPEED_1GB_FULL;
4633
4634 /* If X552 and internal link mode is XFI, then setup XFI internal link.
4635 */
4636 if (hw->mac.type == ixgbe_mac_X550EM_x &&
4637 !(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) {
4638 status = ixgbe_setup_ixfi_x550em(hw, &force_speed);
4639
4640 if (status != IXGBE_SUCCESS)
4641 return status;
4642 }
4643
4644 return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait_to_complete);
4645 }
4646
4647 /**
4648 * ixgbe_check_link_t_X550em - Determine link and speed status
4649 * @hw: pointer to hardware structure
4650 * @speed: pointer to link speed
4651 * @link_up: TRUE when link is up
4652 * @link_up_wait_to_complete: bool used to wait for link up or not
4653 *
4654 * Check that both the MAC and X557 external PHY have link.
4655 **/
4656 s32 ixgbe_check_link_t_X550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
4657 bool *link_up, bool link_up_wait_to_complete)
4658 {
4659 u32 status;
4660 u16 i, autoneg_status = 0;
4661
4662 if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper)
4663 return IXGBE_ERR_CONFIG;
4664
4665 status = ixgbe_check_mac_link_generic(hw, speed, link_up,
4666 link_up_wait_to_complete);
4667
4668 /* If check link fails or MAC link is not up, then return */
4669 if (status != IXGBE_SUCCESS || !(*link_up))
4670 return status;
4671
4672 /* MAC link is up, so check external PHY link.
4673 * X557 PHY. Link status is latching low, and can only be used to detect
4674 * link drop, and not the current status of the link without performing
4675 * back-to-back reads.
4676 */
4677 for (i = 0; i < 2; i++) {
4678 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
4679 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
4680 &autoneg_status);
4681
4682 if (status != IXGBE_SUCCESS)
4683 return status;
4684 }
4685
4686 /* If external PHY link is not up, then indicate link not up */
4687 if (!(autoneg_status & IXGBE_MDIO_AUTO_NEG_LINK_STATUS))
4688 *link_up = FALSE;
4689
4690 return IXGBE_SUCCESS;
4691 }
4692
4693 /**
4694 * ixgbe_reset_phy_t_X550em - Performs X557 PHY reset and enables LASI
4695 * @hw: pointer to hardware structure
4696 **/
4697 s32 ixgbe_reset_phy_t_X550em(struct ixgbe_hw *hw)
4698 {
4699 s32 status;
4700
4701 status = ixgbe_reset_phy_generic(hw);
4702
4703 if (status != IXGBE_SUCCESS)
4704 return status;
4705
4706 /* Configure Link Status Alarm and Temperature Threshold interrupts */
4707 return ixgbe_enable_lasi_ext_t_x550em(hw);
4708 }
4709
4710 /**
4711 * ixgbe_led_on_t_X550em - Turns on the software controllable LEDs.
4712 * @hw: pointer to hardware structure
4713 * @led_idx: led number to turn on
4714 **/
4715 s32 ixgbe_led_on_t_X550em(struct ixgbe_hw *hw, u32 led_idx)
4716 {
4717 u16 phy_data;
4718
4719 DEBUGFUNC("ixgbe_led_on_t_X550em");
4720
4721 if (led_idx >= IXGBE_X557_MAX_LED_INDEX)
4722 return IXGBE_ERR_PARAM;
4723
4724 /* To turn on the LED, set mode to ON. */
4725 ixgbe_read_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
4726 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &phy_data);
4727 phy_data |= IXGBE_X557_LED_MANUAL_SET_MASK;
4728 ixgbe_write_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
4729 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, phy_data);
4730
4731 /* Some designs have the LEDs wired to the MAC */
4732 return ixgbe_led_on_generic(hw, led_idx);
4733 }
4734
4735 /**
4736 * ixgbe_led_off_t_X550em - Turns off the software controllable LEDs.
4737 * @hw: pointer to hardware structure
4738 * @led_idx: led number to turn off
4739 **/
4740 s32 ixgbe_led_off_t_X550em(struct ixgbe_hw *hw, u32 led_idx)
4741 {
4742 u16 phy_data;
4743
4744 DEBUGFUNC("ixgbe_led_off_t_X550em");
4745
4746 if (led_idx >= IXGBE_X557_MAX_LED_INDEX)
4747 return IXGBE_ERR_PARAM;
4748
4749 /* To turn on the LED, set mode to ON. */
4750 ixgbe_read_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
4751 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &phy_data);
4752 phy_data &= ~IXGBE_X557_LED_MANUAL_SET_MASK;
4753 ixgbe_write_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
4754 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, phy_data);
4755
4756 /* Some designs have the LEDs wired to the MAC */
4757 return ixgbe_led_off_generic(hw, led_idx);
4758 }
4759
4760 /**
4761 * ixgbe_set_fw_drv_ver_x550 - Sends driver version to firmware
4762 * @hw: pointer to the HW structure
4763 * @maj: driver version major number
4764 * @min: driver version minor number
4765 * @build: driver version build number
4766 * @sub: driver version sub build number
4767 * @len: length of driver_ver string
4768 * @driver_ver: driver string
4769 *
4770 * Sends driver version number to firmware through the manageability
4771 * block. On success return IXGBE_SUCCESS
4772 * else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring
4773 * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
4774 **/
4775 s32 ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min,
4776 u8 build, u8 sub, u16 len, const char *driver_ver)
4777 {
4778 struct ixgbe_hic_drv_info2 fw_cmd;
4779 s32 ret_val = IXGBE_SUCCESS;
4780 int i;
4781
4782 DEBUGFUNC("ixgbe_set_fw_drv_ver_x550");
4783
4784 if ((len == 0) || (driver_ver == NULL) ||
4785 (len > sizeof(fw_cmd.driver_string)))
4786 return IXGBE_ERR_INVALID_ARGUMENT;
4787
4788 fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO;
4789 fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN + len;
4790 fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
4791 fw_cmd.port_num = (u8)hw->bus.func;
4792 fw_cmd.ver_maj = maj;
4793 fw_cmd.ver_min = min;
4794 fw_cmd.ver_build = build;
4795 fw_cmd.ver_sub = sub;
4796 fw_cmd.hdr.checksum = 0;
4797 memcpy(fw_cmd.driver_string, driver_ver, len);
4798 fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd,
4799 (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len));
4800
4801 for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
4802 ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
4803 sizeof(fw_cmd),
4804 IXGBE_HI_COMMAND_TIMEOUT,
4805 TRUE);
4806 if (ret_val != IXGBE_SUCCESS)
4807 continue;
4808
4809 if (fw_cmd.hdr.cmd_or_resp.ret_status ==
4810 FW_CEM_RESP_STATUS_SUCCESS)
4811 ret_val = IXGBE_SUCCESS;
4812 else
4813 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
4814
4815 break;
4816 }
4817
4818 return ret_val;
4819 }
4820