ixgbe_82598.c revision 1.4 1 /******************************************************************************
2
3 Copyright (c) 2001-2013, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33 /*$FreeBSD: head/sys/dev/ixgbe/ixgbe_82598.c 247822 2013-03-04 23:07:40Z jfv $*/
34 /*$NetBSD: ixgbe_82598.c,v 1.4 2015/04/24 07:00:51 msaitoh Exp $*/
35
36 #include "ixgbe_type.h"
37 #include "ixgbe_82598.h"
38 #include "ixgbe_api.h"
39 #include "ixgbe_common.h"
40 #include "ixgbe_phy.h"
41
42 static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
43 ixgbe_link_speed *speed,
44 bool *autoneg);
45 static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw);
46 static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
47 bool autoneg_wait_to_complete);
48 static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
49 ixgbe_link_speed *speed, bool *link_up,
50 bool link_up_wait_to_complete);
51 static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
52 ixgbe_link_speed speed,
53 bool autoneg_wait_to_complete);
54 static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
55 ixgbe_link_speed speed,
56 bool autoneg_wait_to_complete);
57 static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw);
58 static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
59 static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw);
60 static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb,
61 u32 headroom, int strategy);
62 static s32 ixgbe_read_i2c_sff8472_82598(struct ixgbe_hw *hw, u8 byte_offset,
63 u8 *sff8472_data);
64 /**
65 * ixgbe_set_pcie_completion_timeout - set pci-e completion timeout
66 * @hw: pointer to the HW structure
67 *
68 * The defaults for 82598 should be in the range of 50us to 50ms,
69 * however the hardware default for these parts is 500us to 1ms which is less
70 * than the 10ms recommended by the pci-e spec. To address this we need to
71 * increase the value to either 10ms to 250ms for capability version 1 config,
72 * or 16ms to 55ms for version 2.
73 **/
74 void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw)
75 {
76 u32 gcr = IXGBE_READ_REG(hw, IXGBE_GCR);
77 u16 pcie_devctl2;
78
79 /* only take action if timeout value is defaulted to 0 */
80 if (gcr & IXGBE_GCR_CMPL_TMOUT_MASK)
81 goto out;
82
83 /*
84 * if capababilities version is type 1 we can write the
85 * timeout of 10ms to 250ms through the GCR register
86 */
87 if (!(gcr & IXGBE_GCR_CAP_VER2)) {
88 gcr |= IXGBE_GCR_CMPL_TMOUT_10ms;
89 goto out;
90 }
91
92 /*
93 * for version 2 capabilities we need to write the config space
94 * directly in order to set the completion timeout value for
95 * 16ms to 55ms
96 */
97 pcie_devctl2 = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2);
98 pcie_devctl2 |= IXGBE_PCI_DEVICE_CONTROL2_16ms;
99 IXGBE_WRITE_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2, pcie_devctl2);
100 out:
101 /* disable completion timeout resend */
102 gcr &= ~IXGBE_GCR_CMPL_TMOUT_RESEND;
103 IXGBE_WRITE_REG(hw, IXGBE_GCR, gcr);
104 }
105
106 /**
107 * ixgbe_init_ops_82598 - Inits func ptrs and MAC type
108 * @hw: pointer to hardware structure
109 *
110 * Initialize the function pointers and assign the MAC type for 82598.
111 * Does not touch the hardware.
112 **/
113 s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw)
114 {
115 struct ixgbe_mac_info *mac = &hw->mac;
116 struct ixgbe_phy_info *phy = &hw->phy;
117 s32 ret_val;
118
119 DEBUGFUNC("ixgbe_init_ops_82598");
120
121 ret_val = ixgbe_init_phy_ops_generic(hw);
122 ret_val = ixgbe_init_ops_generic(hw);
123
124 /* PHY */
125 phy->ops.init = &ixgbe_init_phy_ops_82598;
126
127 /* MAC */
128 mac->ops.start_hw = &ixgbe_start_hw_82598;
129 mac->ops.enable_relaxed_ordering = &ixgbe_enable_relaxed_ordering_82598;
130 mac->ops.reset_hw = &ixgbe_reset_hw_82598;
131 mac->ops.get_media_type = &ixgbe_get_media_type_82598;
132 mac->ops.get_supported_physical_layer =
133 &ixgbe_get_supported_physical_layer_82598;
134 mac->ops.read_analog_reg8 = &ixgbe_read_analog_reg8_82598;
135 mac->ops.write_analog_reg8 = &ixgbe_write_analog_reg8_82598;
136 mac->ops.set_lan_id = &ixgbe_set_lan_id_multi_port_pcie_82598;
137
138 /* RAR, Multicast, VLAN */
139 mac->ops.set_vmdq = &ixgbe_set_vmdq_82598;
140 mac->ops.clear_vmdq = &ixgbe_clear_vmdq_82598;
141 mac->ops.set_vfta = &ixgbe_set_vfta_82598;
142 mac->ops.set_vlvf = NULL;
143 mac->ops.clear_vfta = &ixgbe_clear_vfta_82598;
144
145 /* Flow Control */
146 mac->ops.fc_enable = &ixgbe_fc_enable_82598;
147
148 mac->mcft_size = 128;
149 mac->vft_size = 128;
150 mac->num_rar_entries = 16;
151 mac->rx_pb_size = 512;
152 mac->max_tx_queues = 32;
153 mac->max_rx_queues = 64;
154 mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
155
156 /* SFP+ Module */
157 phy->ops.read_i2c_eeprom = &ixgbe_read_i2c_eeprom_82598;
158 phy->ops.read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_82598;
159
160 /* Link */
161 mac->ops.check_link = &ixgbe_check_mac_link_82598;
162 mac->ops.setup_link = &ixgbe_setup_mac_link_82598;
163 mac->ops.flap_tx_laser = NULL;
164 mac->ops.get_link_capabilities = &ixgbe_get_link_capabilities_82598;
165 mac->ops.setup_rxpba = &ixgbe_set_rxpba_82598;
166
167 /* Manageability interface */
168 mac->ops.set_fw_drv_ver = NULL;
169
170 return ret_val;
171 }
172
173 /**
174 * ixgbe_init_phy_ops_82598 - PHY/SFP specific init
175 * @hw: pointer to hardware structure
176 *
177 * Initialize any function pointers that were not able to be
178 * set during init_shared_code because the PHY/SFP type was
179 * not known. Perform the SFP init if necessary.
180 *
181 **/
182 s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
183 {
184 struct ixgbe_mac_info *mac = &hw->mac;
185 struct ixgbe_phy_info *phy = &hw->phy;
186 s32 ret_val = IXGBE_SUCCESS;
187 u16 list_offset, data_offset;
188
189 DEBUGFUNC("ixgbe_init_phy_ops_82598");
190
191 /* Identify the PHY */
192 phy->ops.identify(hw);
193
194 /* Overwrite the link function pointers if copper PHY */
195 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
196 mac->ops.setup_link = &ixgbe_setup_copper_link_82598;
197 mac->ops.get_link_capabilities =
198 &ixgbe_get_copper_link_capabilities_generic;
199 }
200
201 switch (hw->phy.type) {
202 case ixgbe_phy_tn:
203 phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
204 phy->ops.check_link = &ixgbe_check_phy_link_tnx;
205 phy->ops.get_firmware_version =
206 &ixgbe_get_phy_firmware_version_tnx;
207 break;
208 case ixgbe_phy_nl:
209 phy->ops.reset = &ixgbe_reset_phy_nl;
210
211 /* Call SFP+ identify routine to get the SFP+ module type */
212 ret_val = phy->ops.identify_sfp(hw);
213 if (ret_val != IXGBE_SUCCESS)
214 goto out;
215 else if (hw->phy.sfp_type == ixgbe_sfp_type_unknown) {
216 ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
217 goto out;
218 }
219
220 /* Check to see if SFP+ module is supported */
221 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw,
222 &list_offset,
223 &data_offset);
224 if (ret_val != IXGBE_SUCCESS) {
225 ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
226 goto out;
227 }
228 break;
229 default:
230 break;
231 }
232
233 out:
234 return ret_val;
235 }
236
237 /**
238 * ixgbe_start_hw_82598 - Prepare hardware for Tx/Rx
239 * @hw: pointer to hardware structure
240 *
241 * Starts the hardware using the generic start_hw function.
242 * Disables relaxed ordering Then set pcie completion timeout
243 *
244 **/
245 s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
246 {
247 u32 regval;
248 u32 i;
249 s32 ret_val = IXGBE_SUCCESS;
250
251 DEBUGFUNC("ixgbe_start_hw_82598");
252
253 ret_val = ixgbe_start_hw_generic(hw);
254
255 /* Disable relaxed ordering */
256 for (i = 0; ((i < hw->mac.max_tx_queues) &&
257 (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
258 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
259 regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
260 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval);
261 }
262
263 for (i = 0; ((i < hw->mac.max_rx_queues) &&
264 (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
265 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
266 regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
267 IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
268 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
269 }
270
271 /* set the completion timeout for interface */
272 if (ret_val == IXGBE_SUCCESS)
273 ixgbe_set_pcie_completion_timeout(hw);
274
275 return ret_val;
276 }
277
278 /**
279 * ixgbe_get_link_capabilities_82598 - Determines link capabilities
280 * @hw: pointer to hardware structure
281 * @speed: pointer to link speed
282 * @autoneg: boolean auto-negotiation value
283 *
284 * Determines the link capabilities by reading the AUTOC register.
285 **/
286 static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
287 ixgbe_link_speed *speed,
288 bool *autoneg)
289 {
290 s32 status = IXGBE_SUCCESS;
291 u32 autoc = 0;
292
293 DEBUGFUNC("ixgbe_get_link_capabilities_82598");
294
295 /*
296 * Determine link capabilities based on the stored value of AUTOC,
297 * which represents EEPROM defaults. If AUTOC value has not been
298 * stored, use the current register value.
299 */
300 if (hw->mac.orig_link_settings_stored)
301 autoc = hw->mac.orig_autoc;
302 else
303 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
304
305 switch (autoc & IXGBE_AUTOC_LMS_MASK) {
306 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
307 *speed = IXGBE_LINK_SPEED_1GB_FULL;
308 *autoneg = FALSE;
309 break;
310
311 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
312 *speed = IXGBE_LINK_SPEED_10GB_FULL;
313 *autoneg = FALSE;
314 break;
315
316 case IXGBE_AUTOC_LMS_1G_AN:
317 *speed = IXGBE_LINK_SPEED_1GB_FULL;
318 *autoneg = TRUE;
319 break;
320
321 case IXGBE_AUTOC_LMS_KX4_AN:
322 case IXGBE_AUTOC_LMS_KX4_AN_1G_AN:
323 *speed = IXGBE_LINK_SPEED_UNKNOWN;
324 if (autoc & IXGBE_AUTOC_KX4_SUPP)
325 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
326 if (autoc & IXGBE_AUTOC_KX_SUPP)
327 *speed |= IXGBE_LINK_SPEED_1GB_FULL;
328 *autoneg = TRUE;
329 break;
330
331 default:
332 status = IXGBE_ERR_LINK_SETUP;
333 break;
334 }
335
336 return status;
337 }
338
339 /**
340 * ixgbe_get_media_type_82598 - Determines media type
341 * @hw: pointer to hardware structure
342 *
343 * Returns the media type (fiber, copper, backplane)
344 **/
345 static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw)
346 {
347 enum ixgbe_media_type media_type;
348
349 DEBUGFUNC("ixgbe_get_media_type_82598");
350
351 /* Detect if there is a copper PHY attached. */
352 switch (hw->phy.type) {
353 case ixgbe_phy_cu_unknown:
354 case ixgbe_phy_tn:
355 media_type = ixgbe_media_type_copper;
356 goto out;
357 default:
358 break;
359 }
360
361 /* Media type for I82598 is based on device ID */
362 switch (hw->device_id) {
363 case IXGBE_DEV_ID_82598:
364 case IXGBE_DEV_ID_82598_BX:
365 /* Default device ID is mezzanine card KX/KX4 */
366 media_type = ixgbe_media_type_backplane;
367 break;
368 case IXGBE_DEV_ID_82598AF_DUAL_PORT:
369 case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
370 case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
371 case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
372 case IXGBE_DEV_ID_82598EB_XF_LR:
373 case IXGBE_DEV_ID_82598EB_SFP_LOM:
374 media_type = ixgbe_media_type_fiber;
375 break;
376 case IXGBE_DEV_ID_82598EB_CX4:
377 case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
378 media_type = ixgbe_media_type_cx4;
379 break;
380 case IXGBE_DEV_ID_82598AT:
381 case IXGBE_DEV_ID_82598AT2:
382 media_type = ixgbe_media_type_copper;
383 break;
384 default:
385 media_type = ixgbe_media_type_unknown;
386 break;
387 }
388 out:
389 return media_type;
390 }
391
392 /**
393 * ixgbe_fc_enable_82598 - Enable flow control
394 * @hw: pointer to hardware structure
395 *
396 * Enable flow control according to the current settings.
397 **/
398 s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
399 {
400 s32 ret_val = IXGBE_SUCCESS;
401 u32 fctrl_reg;
402 u32 rmcs_reg;
403 u32 reg;
404 u32 fcrtl, fcrth;
405 u32 link_speed = 0;
406 int i;
407 bool link_up;
408
409 DEBUGFUNC("ixgbe_fc_enable_82598");
410
411 /* Validate the water mark configuration */
412 if (!hw->fc.pause_time) {
413 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
414 goto out;
415 }
416
417 /* Low water mark of zero causes XOFF floods */
418 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
419 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
420 hw->fc.high_water[i]) {
421 if (!hw->fc.low_water[i] ||
422 hw->fc.low_water[i] >= hw->fc.high_water[i]) {
423 DEBUGOUT("Invalid water mark configuration\n");
424 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
425 goto out;
426 }
427 }
428 }
429
430 /*
431 * On 82598 having Rx FC on causes resets while doing 1G
432 * so if it's on turn it off once we know link_speed. For
433 * more details see 82598 Specification update.
434 */
435 hw->mac.ops.check_link(hw, &link_speed, &link_up, FALSE);
436 if (link_up && link_speed == IXGBE_LINK_SPEED_1GB_FULL) {
437 switch (hw->fc.requested_mode) {
438 case ixgbe_fc_full:
439 hw->fc.requested_mode = ixgbe_fc_tx_pause;
440 break;
441 case ixgbe_fc_rx_pause:
442 hw->fc.requested_mode = ixgbe_fc_none;
443 break;
444 default:
445 /* no change */
446 break;
447 }
448 }
449
450 /* Negotiate the fc mode to use */
451 ixgbe_fc_autoneg(hw);
452
453 /* Disable any previous flow control settings */
454 fctrl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
455 fctrl_reg &= ~(IXGBE_FCTRL_RFCE | IXGBE_FCTRL_RPFCE);
456
457 rmcs_reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
458 rmcs_reg &= ~(IXGBE_RMCS_TFCE_PRIORITY | IXGBE_RMCS_TFCE_802_3X);
459
460 /*
461 * The possible values of fc.current_mode are:
462 * 0: Flow control is completely disabled
463 * 1: Rx flow control is enabled (we can receive pause frames,
464 * but not send pause frames).
465 * 2: Tx flow control is enabled (we can send pause frames but
466 * we do not support receiving pause frames).
467 * 3: Both Rx and Tx flow control (symmetric) are enabled.
468 * other: Invalid.
469 */
470 switch (hw->fc.current_mode) {
471 case ixgbe_fc_none:
472 /*
473 * Flow control is disabled by software override or autoneg.
474 * The code below will actually disable it in the HW.
475 */
476 break;
477 case ixgbe_fc_rx_pause:
478 /*
479 * Rx Flow control is enabled and Tx Flow control is
480 * disabled by software override. Since there really
481 * isn't a way to advertise that we are capable of RX
482 * Pause ONLY, we will advertise that we support both
483 * symmetric and asymmetric Rx PAUSE. Later, we will
484 * disable the adapter's ability to send PAUSE frames.
485 */
486 fctrl_reg |= IXGBE_FCTRL_RFCE;
487 break;
488 case ixgbe_fc_tx_pause:
489 /*
490 * Tx Flow control is enabled, and Rx Flow control is
491 * disabled by software override.
492 */
493 rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
494 break;
495 case ixgbe_fc_full:
496 /* Flow control (both Rx and Tx) is enabled by SW override. */
497 fctrl_reg |= IXGBE_FCTRL_RFCE;
498 rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
499 break;
500 default:
501 DEBUGOUT("Flow control param set incorrectly\n");
502 ret_val = IXGBE_ERR_CONFIG;
503 goto out;
504 break;
505 }
506
507 /* Set 802.3x based flow control settings. */
508 fctrl_reg |= IXGBE_FCTRL_DPF;
509 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl_reg);
510 IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg);
511
512 /* Set up and enable Rx high/low water mark thresholds, enable XON. */
513 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
514 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
515 hw->fc.high_water[i]) {
516 fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
517 fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
518 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl);
519 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), fcrth);
520 } else {
521 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), 0);
522 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), 0);
523 }
524
525 }
526
527 /* Configure pause time (2 TCs per register) */
528 reg = hw->fc.pause_time * 0x00010001;
529 for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
530 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
531
532 /* Configure flow control refresh threshold value */
533 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
534
535 out:
536 return ret_val;
537 }
538
539 /**
540 * ixgbe_start_mac_link_82598 - Configures MAC link settings
541 * @hw: pointer to hardware structure
542 *
543 * Configures link settings based on values in the ixgbe_hw struct.
544 * Restarts the link. Performs autonegotiation if needed.
545 **/
546 static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
547 bool autoneg_wait_to_complete)
548 {
549 u32 autoc_reg;
550 u32 links_reg;
551 u32 i;
552 s32 status = IXGBE_SUCCESS;
553
554 DEBUGFUNC("ixgbe_start_mac_link_82598");
555
556 /* Restart link */
557 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
558 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
559 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
560
561 /* Only poll for autoneg to complete if specified to do so */
562 if (autoneg_wait_to_complete) {
563 if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
564 IXGBE_AUTOC_LMS_KX4_AN ||
565 (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
566 IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
567 links_reg = 0; /* Just in case Autoneg time = 0 */
568 for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
569 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
570 if (links_reg & IXGBE_LINKS_KX_AN_COMP)
571 break;
572 msec_delay(100);
573 }
574 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
575 status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
576 DEBUGOUT("Autonegotiation did not complete.\n");
577 }
578 }
579 }
580
581 /* Add delay to filter out noises during initial link setup */
582 msec_delay(50);
583
584 return status;
585 }
586
587 /**
588 * ixgbe_validate_link_ready - Function looks for phy link
589 * @hw: pointer to hardware structure
590 *
591 * Function indicates success when phy link is available. If phy is not ready
592 * within 5 seconds of MAC indicating link, the function returns error.
593 **/
594 static s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw)
595 {
596 u32 timeout;
597 u16 an_reg;
598
599 if (hw->device_id != IXGBE_DEV_ID_82598AT2)
600 return IXGBE_SUCCESS;
601
602 for (timeout = 0;
603 timeout < IXGBE_VALIDATE_LINK_READY_TIMEOUT; timeout++) {
604 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
605 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &an_reg);
606
607 if ((an_reg & IXGBE_MII_AUTONEG_COMPLETE) &&
608 (an_reg & IXGBE_MII_AUTONEG_LINK_UP))
609 break;
610
611 msec_delay(100);
612 }
613
614 if (timeout == IXGBE_VALIDATE_LINK_READY_TIMEOUT) {
615 DEBUGOUT("Link was indicated but link is down\n");
616 return IXGBE_ERR_LINK_SETUP;
617 }
618
619 return IXGBE_SUCCESS;
620 }
621
622 /**
623 * ixgbe_check_mac_link_82598 - Get link/speed status
624 * @hw: pointer to hardware structure
625 * @speed: pointer to link speed
626 * @link_up: TRUE is link is up, FALSE otherwise
627 * @link_up_wait_to_complete: bool used to wait for link up or not
628 *
629 * Reads the links register to determine if link is up and the current speed
630 **/
631 static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
632 ixgbe_link_speed *speed, bool *link_up,
633 bool link_up_wait_to_complete)
634 {
635 u32 links_reg;
636 u32 i;
637 u16 link_reg, adapt_comp_reg;
638
639 DEBUGFUNC("ixgbe_check_mac_link_82598");
640
641 /*
642 * SERDES PHY requires us to read link status from undocumented
643 * register 0xC79F. Bit 0 set indicates link is up/ready; clear
644 * indicates link down. OxC00C is read to check that the XAUI lanes
645 * are active. Bit 0 clear indicates active; set indicates inactive.
646 */
647 if (hw->phy.type == ixgbe_phy_nl) {
648 hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg);
649 hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg);
650 hw->phy.ops.read_reg(hw, 0xC00C, IXGBE_TWINAX_DEV,
651 &adapt_comp_reg);
652 if (link_up_wait_to_complete) {
653 for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
654 if ((link_reg & 1) &&
655 ((adapt_comp_reg & 1) == 0)) {
656 *link_up = TRUE;
657 break;
658 } else {
659 *link_up = FALSE;
660 }
661 msec_delay(100);
662 hw->phy.ops.read_reg(hw, 0xC79F,
663 IXGBE_TWINAX_DEV,
664 &link_reg);
665 hw->phy.ops.read_reg(hw, 0xC00C,
666 IXGBE_TWINAX_DEV,
667 &adapt_comp_reg);
668 }
669 } else {
670 if ((link_reg & 1) && ((adapt_comp_reg & 1) == 0))
671 *link_up = TRUE;
672 else
673 *link_up = FALSE;
674 }
675
676 if (*link_up == FALSE)
677 goto out;
678 }
679
680 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
681 if (link_up_wait_to_complete) {
682 for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
683 if (links_reg & IXGBE_LINKS_UP) {
684 *link_up = TRUE;
685 break;
686 } else {
687 *link_up = FALSE;
688 }
689 msec_delay(100);
690 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
691 }
692 } else {
693 if (links_reg & IXGBE_LINKS_UP)
694 *link_up = TRUE;
695 else
696 *link_up = FALSE;
697 }
698
699 if (links_reg & IXGBE_LINKS_SPEED)
700 *speed = IXGBE_LINK_SPEED_10GB_FULL;
701 else
702 *speed = IXGBE_LINK_SPEED_1GB_FULL;
703
704 if ((hw->device_id == IXGBE_DEV_ID_82598AT2) && (*link_up == TRUE) &&
705 (ixgbe_validate_link_ready(hw) != IXGBE_SUCCESS))
706 *link_up = FALSE;
707
708 out:
709 return IXGBE_SUCCESS;
710 }
711
712 /**
713 * ixgbe_setup_mac_link_82598 - Set MAC link speed
714 * @hw: pointer to hardware structure
715 * @speed: new link speed
716 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed
717 *
718 * Set the link speed in the AUTOC register and restarts link.
719 **/
720 static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
721 ixgbe_link_speed speed,
722 bool autoneg_wait_to_complete)
723 {
724 bool autoneg = FALSE;
725 s32 status = IXGBE_SUCCESS;
726 ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
727 u32 curr_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
728 u32 autoc = curr_autoc;
729 u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
730
731 DEBUGFUNC("ixgbe_setup_mac_link_82598");
732
733 /* Check to see if speed passed in is supported. */
734 ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg);
735 speed &= link_capabilities;
736
737 if (speed == IXGBE_LINK_SPEED_UNKNOWN)
738 status = IXGBE_ERR_LINK_SETUP;
739
740 /* Set KX4/KX support according to speed requested */
741 else if (link_mode == IXGBE_AUTOC_LMS_KX4_AN ||
742 link_mode == IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
743 autoc &= ~IXGBE_AUTOC_KX4_KX_SUPP_MASK;
744 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
745 autoc |= IXGBE_AUTOC_KX4_SUPP;
746 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
747 autoc |= IXGBE_AUTOC_KX_SUPP;
748 if (autoc != curr_autoc)
749 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
750 }
751
752 if (status == IXGBE_SUCCESS) {
753 /*
754 * Setup and restart the link based on the new values in
755 * ixgbe_hw This will write the AUTOC register based on the new
756 * stored values
757 */
758 status = ixgbe_start_mac_link_82598(hw,
759 autoneg_wait_to_complete);
760 }
761
762 return status;
763 }
764
765
766 /**
767 * ixgbe_setup_copper_link_82598 - Set the PHY autoneg advertised field
768 * @hw: pointer to hardware structure
769 * @speed: new link speed
770 * @autoneg_wait_to_complete: TRUE if waiting is needed to complete
771 *
772 * Sets the link speed in the AUTOC register in the MAC and restarts link.
773 **/
774 static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
775 ixgbe_link_speed speed,
776 bool autoneg_wait_to_complete)
777 {
778 s32 status;
779
780 DEBUGFUNC("ixgbe_setup_copper_link_82598");
781
782 /* Setup the PHY according to input speed */
783 status = hw->phy.ops.setup_link_speed(hw, speed,
784 autoneg_wait_to_complete);
785 /* Set up MAC */
786 ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete);
787
788 return status;
789 }
790
791 /**
792 * ixgbe_reset_hw_82598 - Performs hardware reset
793 * @hw: pointer to hardware structure
794 *
795 * Resets the hardware by resetting the transmit and receive units, masks and
796 * clears all interrupts, performing a PHY reset, and performing a link (MAC)
797 * reset.
798 **/
799 static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
800 {
801 s32 status = IXGBE_SUCCESS;
802 s32 phy_status = IXGBE_SUCCESS;
803 u32 ctrl;
804 u32 gheccr;
805 u32 i;
806 u32 autoc;
807 u8 analog_val;
808
809 DEBUGFUNC("ixgbe_reset_hw_82598");
810
811 /* Call adapter stop to disable tx/rx and clear interrupts */
812 status = hw->mac.ops.stop_adapter(hw);
813 if (status != IXGBE_SUCCESS)
814 goto reset_hw_out;
815
816 /*
817 * Power up the Atlas Tx lanes if they are currently powered down.
818 * Atlas Tx lanes are powered down for MAC loopback tests, but
819 * they are not automatically restored on reset.
820 */
821 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val);
822 if (analog_val & IXGBE_ATLAS_PDN_TX_REG_EN) {
823 /* Enable Tx Atlas so packets can be transmitted again */
824 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
825 &analog_val);
826 analog_val &= ~IXGBE_ATLAS_PDN_TX_REG_EN;
827 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
828 analog_val);
829
830 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
831 &analog_val);
832 analog_val &= ~IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
833 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
834 analog_val);
835
836 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
837 &analog_val);
838 analog_val &= ~IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
839 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
840 analog_val);
841
842 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
843 &analog_val);
844 analog_val &= ~IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
845 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
846 analog_val);
847 }
848
849 /* Reset PHY */
850 if (hw->phy.reset_disable == FALSE) {
851 /* PHY ops must be identified and initialized prior to reset */
852
853 /* Init PHY and function pointers, perform SFP setup */
854 phy_status = hw->phy.ops.init(hw);
855 if (phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED)
856 goto reset_hw_out;
857 if (phy_status == IXGBE_ERR_SFP_NOT_PRESENT)
858 goto mac_reset_top;
859
860 hw->phy.ops.reset(hw);
861 }
862
863 mac_reset_top:
864 /*
865 * Issue global reset to the MAC. This needs to be a SW reset.
866 * If link reset is used, it might reset the MAC when mng is using it
867 */
868 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL) | IXGBE_CTRL_RST;
869 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
870 IXGBE_WRITE_FLUSH(hw);
871
872 /* Poll for reset bit to self-clear indicating reset is complete */
873 for (i = 0; i < 10; i++) {
874 usec_delay(1);
875 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
876 if (!(ctrl & IXGBE_CTRL_RST))
877 break;
878 }
879 if (ctrl & IXGBE_CTRL_RST) {
880 status = IXGBE_ERR_RESET_FAILED;
881 DEBUGOUT("Reset polling failed to complete.\n");
882 }
883
884 msec_delay(50);
885
886 /*
887 * Double resets are required for recovery from certain error
888 * conditions. Between resets, it is necessary to stall to allow time
889 * for any pending HW events to complete.
890 */
891 if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
892 hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
893 goto mac_reset_top;
894 }
895
896 gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR);
897 gheccr &= ~((1 << 21) | (1 << 18) | (1 << 9) | (1 << 6));
898 IXGBE_WRITE_REG(hw, IXGBE_GHECCR, gheccr);
899
900 /*
901 * Store the original AUTOC value if it has not been
902 * stored off yet. Otherwise restore the stored original
903 * AUTOC value since the reset operation sets back to deaults.
904 */
905 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
906 if (hw->mac.orig_link_settings_stored == FALSE) {
907 hw->mac.orig_autoc = autoc;
908 hw->mac.orig_link_settings_stored = TRUE;
909 } else if (autoc != hw->mac.orig_autoc) {
910 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc);
911 }
912
913 /* Store the permanent mac address */
914 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
915
916 /*
917 * Store MAC address from RAR0, clear receive address registers, and
918 * clear the multicast table
919 */
920 hw->mac.ops.init_rx_addrs(hw);
921
922 reset_hw_out:
923 if (phy_status != IXGBE_SUCCESS)
924 status = phy_status;
925
926 return status;
927 }
928
929 /**
930 * ixgbe_set_vmdq_82598 - Associate a VMDq set index with a rx address
931 * @hw: pointer to hardware struct
932 * @rar: receive address register index to associate with a VMDq index
933 * @vmdq: VMDq set index
934 **/
935 s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
936 {
937 u32 rar_high;
938 u32 rar_entries = hw->mac.num_rar_entries;
939
940 DEBUGFUNC("ixgbe_set_vmdq_82598");
941
942 /* Make sure we are using a valid rar index range */
943 if (rar >= rar_entries) {
944 DEBUGOUT1("RAR index %d is out of range.\n", rar);
945 return IXGBE_ERR_INVALID_ARGUMENT;
946 }
947
948 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
949 rar_high &= ~IXGBE_RAH_VIND_MASK;
950 rar_high |= ((vmdq << IXGBE_RAH_VIND_SHIFT) & IXGBE_RAH_VIND_MASK);
951 IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
952 return IXGBE_SUCCESS;
953 }
954
955 /**
956 * ixgbe_clear_vmdq_82598 - Disassociate a VMDq set index from an rx address
957 * @hw: pointer to hardware struct
958 * @rar: receive address register index to associate with a VMDq index
959 * @vmdq: VMDq clear index (not used in 82598, but elsewhere)
960 **/
961 static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
962 {
963 u32 rar_high;
964 u32 rar_entries = hw->mac.num_rar_entries;
965
966 UNREFERENCED_1PARAMETER(vmdq);
967
968 /* Make sure we are using a valid rar index range */
969 if (rar >= rar_entries) {
970 DEBUGOUT1("RAR index %d is out of range.\n", rar);
971 return IXGBE_ERR_INVALID_ARGUMENT;
972 }
973
974 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
975 if (rar_high & IXGBE_RAH_VIND_MASK) {
976 rar_high &= ~IXGBE_RAH_VIND_MASK;
977 IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
978 }
979
980 return IXGBE_SUCCESS;
981 }
982
983 /**
984 * ixgbe_set_vfta_82598 - Set VLAN filter table
985 * @hw: pointer to hardware structure
986 * @vlan: VLAN id to write to VLAN filter
987 * @vind: VMDq output index that maps queue to VLAN id in VFTA
988 * @vlan_on: boolean flag to turn on/off VLAN in VFTA
989 *
990 * Turn on/off specified VLAN in the VLAN filter table.
991 **/
992 s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind,
993 bool vlan_on)
994 {
995 u32 regindex;
996 u32 bitindex;
997 u32 bits;
998 u32 vftabyte;
999
1000 DEBUGFUNC("ixgbe_set_vfta_82598");
1001
1002 if (vlan > 4095)
1003 return IXGBE_ERR_PARAM;
1004
1005 /* Determine 32-bit word position in array */
1006 regindex = (vlan >> 5) & 0x7F; /* upper seven bits */
1007
1008 /* Determine the location of the (VMD) queue index */
1009 vftabyte = ((vlan >> 3) & 0x03); /* bits (4:3) indicating byte array */
1010 bitindex = (vlan & 0x7) << 2; /* lower 3 bits indicate nibble */
1011
1012 /* Set the nibble for VMD queue index */
1013 bits = IXGBE_READ_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex));
1014 bits &= (~(0x0F << bitindex));
1015 bits |= (vind << bitindex);
1016 IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex), bits);
1017
1018 /* Determine the location of the bit for this VLAN id */
1019 bitindex = vlan & 0x1F; /* lower five bits */
1020
1021 bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
1022 if (vlan_on)
1023 /* Turn on this VLAN id */
1024 bits |= (1 << bitindex);
1025 else
1026 /* Turn off this VLAN id */
1027 bits &= ~(1 << bitindex);
1028 IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits);
1029
1030 return IXGBE_SUCCESS;
1031 }
1032
1033 /**
1034 * ixgbe_clear_vfta_82598 - Clear VLAN filter table
1035 * @hw: pointer to hardware structure
1036 *
1037 * Clears the VLAN filer table, and the VMDq index associated with the filter
1038 **/
1039 static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw)
1040 {
1041 u32 offset;
1042 u32 vlanbyte;
1043
1044 DEBUGFUNC("ixgbe_clear_vfta_82598");
1045
1046 for (offset = 0; offset < hw->mac.vft_size; offset++)
1047 IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
1048
1049 for (vlanbyte = 0; vlanbyte < 4; vlanbyte++)
1050 for (offset = 0; offset < hw->mac.vft_size; offset++)
1051 IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset),
1052 0);
1053
1054 return IXGBE_SUCCESS;
1055 }
1056
1057 /**
1058 * ixgbe_read_analog_reg8_82598 - Reads 8 bit Atlas analog register
1059 * @hw: pointer to hardware structure
1060 * @reg: analog register to read
1061 * @val: read value
1062 *
1063 * Performs read operation to Atlas analog register specified.
1064 **/
1065 s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val)
1066 {
1067 u32 atlas_ctl;
1068
1069 DEBUGFUNC("ixgbe_read_analog_reg8_82598");
1070
1071 IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL,
1072 IXGBE_ATLASCTL_WRITE_CMD | (reg << 8));
1073 IXGBE_WRITE_FLUSH(hw);
1074 usec_delay(10);
1075 atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
1076 *val = (u8)atlas_ctl;
1077
1078 return IXGBE_SUCCESS;
1079 }
1080
1081 /**
1082 * ixgbe_write_analog_reg8_82598 - Writes 8 bit Atlas analog register
1083 * @hw: pointer to hardware structure
1084 * @reg: atlas register to write
1085 * @val: value to write
1086 *
1087 * Performs write operation to Atlas analog register specified.
1088 **/
1089 s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val)
1090 {
1091 u32 atlas_ctl;
1092
1093 DEBUGFUNC("ixgbe_write_analog_reg8_82598");
1094
1095 atlas_ctl = (reg << 8) | val;
1096 IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, atlas_ctl);
1097 IXGBE_WRITE_FLUSH(hw);
1098 usec_delay(10);
1099
1100 return IXGBE_SUCCESS;
1101 }
1102
1103 /**
1104 * ixgbe_read_i2c_phy_82598 - Reads 8 bit word over I2C interface.
1105 * @hw: pointer to hardware structure
1106 * @dev_addr: address to read from
1107 * @byte_offset: byte offset to read from dev_addr
1108 * @eeprom_data: value read
1109 *
1110 * Performs 8 byte read operation to SFP module's EEPROM over I2C interface.
1111 **/
1112 static s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr,
1113 u8 byte_offset, u8 *eeprom_data)
1114 {
1115 s32 status = IXGBE_SUCCESS;
1116 u16 sfp_addr = 0;
1117 u16 sfp_data = 0;
1118 u16 sfp_stat = 0;
1119 u32 i;
1120
1121 DEBUGFUNC("ixgbe_read_i2c_phy_82598");
1122
1123 if (hw->phy.type == ixgbe_phy_nl) {
1124 /*
1125 * NetLogic phy SDA/SCL registers are at addresses 0xC30A to
1126 * 0xC30D. These registers are used to talk to the SFP+
1127 * module's EEPROM through the SDA/SCL (I2C) interface.
1128 */
1129 sfp_addr = (dev_addr << 8) + byte_offset;
1130 sfp_addr = (sfp_addr | IXGBE_I2C_EEPROM_READ_MASK);
1131 hw->phy.ops.write_reg(hw,
1132 IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR,
1133 IXGBE_MDIO_PMA_PMD_DEV_TYPE,
1134 sfp_addr);
1135
1136 /* Poll status */
1137 for (i = 0; i < 100; i++) {
1138 hw->phy.ops.read_reg(hw,
1139 IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT,
1140 IXGBE_MDIO_PMA_PMD_DEV_TYPE,
1141 &sfp_stat);
1142 sfp_stat = sfp_stat & IXGBE_I2C_EEPROM_STATUS_MASK;
1143 if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS)
1144 break;
1145 msec_delay(10);
1146 }
1147
1148 if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_PASS) {
1149 DEBUGOUT("EEPROM read did not pass.\n");
1150 status = IXGBE_ERR_SFP_NOT_PRESENT;
1151 goto out;
1152 }
1153
1154 /* Read data */
1155 hw->phy.ops.read_reg(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA,
1156 IXGBE_MDIO_PMA_PMD_DEV_TYPE, &sfp_data);
1157
1158 *eeprom_data = (u8)(sfp_data >> 8);
1159 } else {
1160 status = IXGBE_ERR_PHY;
1161 }
1162
1163 out:
1164 return status;
1165 }
1166
1167 /**
1168 * ixgbe_read_i2c_eeprom_82598 - Reads 8 bit word over I2C interface.
1169 * @hw: pointer to hardware structure
1170 * @byte_offset: EEPROM byte offset to read
1171 * @eeprom_data: value read
1172 *
1173 * Performs 8 byte read operation to SFP module's EEPROM over I2C interface.
1174 **/
1175 s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
1176 u8 *eeprom_data)
1177 {
1178 return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR,
1179 byte_offset, eeprom_data);
1180 }
1181
1182 /**
1183 * ixgbe_read_i2c_sff8472_82598 - Reads 8 bit word over I2C interface.
1184 * @hw: pointer to hardware structure
1185 * @byte_offset: byte offset at address 0xA2
1186 * @eeprom_data: value read
1187 *
1188 * Performs 8 byte read operation to SFP module's SFF-8472 data over I2C
1189 **/
1190 static s32 ixgbe_read_i2c_sff8472_82598(struct ixgbe_hw *hw, u8 byte_offset,
1191 u8 *sff8472_data)
1192 {
1193 return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR2,
1194 byte_offset, sff8472_data);
1195 }
1196
1197 /**
1198 * ixgbe_get_supported_physical_layer_82598 - Returns physical layer type
1199 * @hw: pointer to hardware structure
1200 *
1201 * Determines physical layer capabilities of the current configuration.
1202 **/
1203 u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw)
1204 {
1205 u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1206 u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
1207 u32 pma_pmd_10g = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK;
1208 u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
1209 u16 ext_ability = 0;
1210
1211 DEBUGFUNC("ixgbe_get_supported_physical_layer_82598");
1212
1213 hw->phy.ops.identify(hw);
1214
1215 /* Copper PHY must be checked before AUTOC LMS to determine correct
1216 * physical layer because 10GBase-T PHYs use LMS = KX4/KX */
1217 switch (hw->phy.type) {
1218 case ixgbe_phy_tn:
1219 case ixgbe_phy_cu_unknown:
1220 hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
1221 IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
1222 if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
1223 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
1224 if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
1225 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
1226 if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
1227 physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
1228 goto out;
1229 default:
1230 break;
1231 }
1232
1233 switch (autoc & IXGBE_AUTOC_LMS_MASK) {
1234 case IXGBE_AUTOC_LMS_1G_AN:
1235 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
1236 if (pma_pmd_1g == IXGBE_AUTOC_1G_KX)
1237 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX;
1238 else
1239 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_BX;
1240 break;
1241 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
1242 if (pma_pmd_10g == IXGBE_AUTOC_10G_CX4)
1243 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
1244 else if (pma_pmd_10g == IXGBE_AUTOC_10G_KX4)
1245 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
1246 else /* XAUI */
1247 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1248 break;
1249 case IXGBE_AUTOC_LMS_KX4_AN:
1250 case IXGBE_AUTOC_LMS_KX4_AN_1G_AN:
1251 if (autoc & IXGBE_AUTOC_KX_SUPP)
1252 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
1253 if (autoc & IXGBE_AUTOC_KX4_SUPP)
1254 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
1255 break;
1256 default:
1257 break;
1258 }
1259
1260 if (hw->phy.type == ixgbe_phy_nl) {
1261 hw->phy.ops.identify_sfp(hw);
1262
1263 switch (hw->phy.sfp_type) {
1264 case ixgbe_sfp_type_da_cu:
1265 physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
1266 break;
1267 case ixgbe_sfp_type_sr:
1268 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
1269 break;
1270 case ixgbe_sfp_type_lr:
1271 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
1272 break;
1273 default:
1274 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1275 break;
1276 }
1277 }
1278
1279 switch (hw->device_id) {
1280 case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
1281 physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
1282 break;
1283 case IXGBE_DEV_ID_82598AF_DUAL_PORT:
1284 case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
1285 case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
1286 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
1287 break;
1288 case IXGBE_DEV_ID_82598EB_XF_LR:
1289 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
1290 break;
1291 default:
1292 break;
1293 }
1294
1295 out:
1296 return physical_layer;
1297 }
1298
1299 /**
1300 * ixgbe_set_lan_id_multi_port_pcie_82598 - Set LAN id for PCIe multiple
1301 * port devices.
1302 * @hw: pointer to the HW structure
1303 *
1304 * Calls common function and corrects issue with some single port devices
1305 * that enable LAN1 but not LAN0.
1306 **/
1307 void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw)
1308 {
1309 struct ixgbe_bus_info *bus = &hw->bus;
1310 u16 pci_gen = 0;
1311 u16 pci_ctrl2 = 0;
1312
1313 DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie_82598");
1314
1315 ixgbe_set_lan_id_multi_port_pcie(hw);
1316
1317 /* check if LAN0 is disabled */
1318 hw->eeprom.ops.read(hw, IXGBE_PCIE_GENERAL_PTR, &pci_gen);
1319 if ((pci_gen != 0) && (pci_gen != 0xFFFF)) {
1320
1321 hw->eeprom.ops.read(hw, pci_gen + IXGBE_PCIE_CTRL2, &pci_ctrl2);
1322
1323 /* if LAN0 is completely disabled force function to 0 */
1324 if ((pci_ctrl2 & IXGBE_PCIE_CTRL2_LAN_DISABLE) &&
1325 !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DISABLE_SELECT) &&
1326 !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DUMMY_ENABLE)) {
1327
1328 bus->func = 0;
1329 }
1330 }
1331 }
1332
1333 /**
1334 * ixgbe_enable_relaxed_ordering_82598 - enable relaxed ordering
1335 * @hw: pointer to hardware structure
1336 *
1337 **/
1338 void ixgbe_enable_relaxed_ordering_82598(struct ixgbe_hw *hw)
1339 {
1340 u32 regval;
1341 u32 i;
1342
1343 DEBUGFUNC("ixgbe_enable_relaxed_ordering_82598");
1344
1345 /* Enable relaxed ordering */
1346 for (i = 0; ((i < hw->mac.max_tx_queues) &&
1347 (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
1348 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
1349 regval |= IXGBE_DCA_TXCTRL_DESC_WRO_EN;
1350 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval);
1351 }
1352
1353 for (i = 0; ((i < hw->mac.max_rx_queues) &&
1354 (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
1355 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
1356 regval |= IXGBE_DCA_RXCTRL_DATA_WRO_EN |
1357 IXGBE_DCA_RXCTRL_HEAD_WRO_EN;
1358 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
1359 }
1360
1361 }
1362
1363 /**
1364 * ixgbe_set_rxpba_82598 - Initialize RX packet buffer
1365 * @hw: pointer to hardware structure
1366 * @num_pb: number of packet buffers to allocate
1367 * @headroom: reserve n KB of headroom
1368 * @strategy: packet buffer allocation strategy
1369 **/
1370 static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb,
1371 u32 headroom, int strategy)
1372 {
1373 u32 rxpktsize = IXGBE_RXPBSIZE_64KB;
1374 u8 i = 0;
1375 UNREFERENCED_1PARAMETER(headroom);
1376
1377 if (!num_pb)
1378 return;
1379
1380 /* Setup Rx packet buffer sizes */
1381 switch (strategy) {
1382 case PBA_STRATEGY_WEIGHTED:
1383 /* Setup the first four at 80KB */
1384 rxpktsize = IXGBE_RXPBSIZE_80KB;
1385 for (; i < 4; i++)
1386 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
1387 /* Setup the last four at 48KB...don't re-init i */
1388 rxpktsize = IXGBE_RXPBSIZE_48KB;
1389 /* Fall Through */
1390 case PBA_STRATEGY_EQUAL:
1391 default:
1392 /* Divide the remaining Rx packet buffer evenly among the TCs */
1393 for (; i < IXGBE_MAX_PACKET_BUFFERS; i++)
1394 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
1395 break;
1396 }
1397
1398 /* Setup Tx packet buffer sizes */
1399 for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++)
1400 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), IXGBE_TXPBSIZE_40KB);
1401
1402 return;
1403 }
1404