ixgbe_82598.c revision 1.8 1 /******************************************************************************
2
3 Copyright (c) 2001-2015, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33 /*$FreeBSD: head/sys/dev/ixgbe/ixgbe_82598.c 292674 2015-12-23 22:45:17Z sbruno $*/
34 /*$NetBSD: ixgbe_82598.c,v 1.8 2016/12/02 10:42:04 msaitoh Exp $*/
35
36 #include "ixgbe_type.h"
37 #include "ixgbe_82598.h"
38 #include "ixgbe_api.h"
39 #include "ixgbe_common.h"
40 #include "ixgbe_phy.h"
41
42 #define IXGBE_82598_MAX_TX_QUEUES 32
43 #define IXGBE_82598_MAX_RX_QUEUES 64
44 #define IXGBE_82598_RAR_ENTRIES 16
45 #define IXGBE_82598_MC_TBL_SIZE 128
46 #define IXGBE_82598_VFT_TBL_SIZE 128
47 #define IXGBE_82598_RX_PB_SIZE 512
48
49 static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
50 ixgbe_link_speed *speed,
51 bool *autoneg);
52 static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw);
53 static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
54 bool autoneg_wait_to_complete);
55 static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
56 ixgbe_link_speed *speed, bool *link_up,
57 bool link_up_wait_to_complete);
58 static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
59 ixgbe_link_speed speed,
60 bool autoneg_wait_to_complete);
61 static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
62 ixgbe_link_speed speed,
63 bool autoneg_wait_to_complete);
64 static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw);
65 static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
66 static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw);
67 static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb,
68 u32 headroom, int strategy);
69 static s32 ixgbe_read_i2c_sff8472_82598(struct ixgbe_hw *hw, u8 byte_offset,
70 u8 *sff8472_data);
71 /**
72 * ixgbe_set_pcie_completion_timeout - set pci-e completion timeout
73 * @hw: pointer to the HW structure
74 *
75 * The defaults for 82598 should be in the range of 50us to 50ms,
76 * however the hardware default for these parts is 500us to 1ms which is less
77 * than the 10ms recommended by the pci-e spec. To address this we need to
78 * increase the value to either 10ms to 250ms for capability version 1 config,
79 * or 16ms to 55ms for version 2.
80 **/
81 void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw)
82 {
83 u32 gcr = IXGBE_READ_REG(hw, IXGBE_GCR);
84 u16 pcie_devctl2;
85
86 /* only take action if timeout value is defaulted to 0 */
87 if (gcr & IXGBE_GCR_CMPL_TMOUT_MASK)
88 goto out;
89
90 /*
91 * if capababilities version is type 1 we can write the
92 * timeout of 10ms to 250ms through the GCR register
93 */
94 if (!(gcr & IXGBE_GCR_CAP_VER2)) {
95 gcr |= IXGBE_GCR_CMPL_TMOUT_10ms;
96 goto out;
97 }
98
99 /*
100 * for version 2 capabilities we need to write the config space
101 * directly in order to set the completion timeout value for
102 * 16ms to 55ms
103 */
104 pcie_devctl2 = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2);
105 pcie_devctl2 |= IXGBE_PCI_DEVICE_CONTROL2_16ms;
106 IXGBE_WRITE_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2, pcie_devctl2);
107 out:
108 /* disable completion timeout resend */
109 gcr &= ~IXGBE_GCR_CMPL_TMOUT_RESEND;
110 IXGBE_WRITE_REG(hw, IXGBE_GCR, gcr);
111 }
112
113 /**
114 * ixgbe_init_ops_82598 - Inits func ptrs and MAC type
115 * @hw: pointer to hardware structure
116 *
117 * Initialize the function pointers and assign the MAC type for 82598.
118 * Does not touch the hardware.
119 **/
120 s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw)
121 {
122 struct ixgbe_mac_info *mac = &hw->mac;
123 struct ixgbe_phy_info *phy = &hw->phy;
124 s32 ret_val;
125
126 DEBUGFUNC("ixgbe_init_ops_82598");
127
128 ret_val = ixgbe_init_phy_ops_generic(hw);
129 ret_val = ixgbe_init_ops_generic(hw);
130
131 /* PHY */
132 phy->ops.init = ixgbe_init_phy_ops_82598;
133
134 /* MAC */
135 mac->ops.start_hw = ixgbe_start_hw_82598;
136 mac->ops.enable_relaxed_ordering = ixgbe_enable_relaxed_ordering_82598;
137 mac->ops.reset_hw = ixgbe_reset_hw_82598;
138 mac->ops.get_media_type = ixgbe_get_media_type_82598;
139 mac->ops.get_supported_physical_layer =
140 ixgbe_get_supported_physical_layer_82598;
141 mac->ops.read_analog_reg8 = ixgbe_read_analog_reg8_82598;
142 mac->ops.write_analog_reg8 = ixgbe_write_analog_reg8_82598;
143 mac->ops.set_lan_id = ixgbe_set_lan_id_multi_port_pcie_82598;
144 mac->ops.enable_rx_dma = ixgbe_enable_rx_dma_82598;
145
146 /* RAR, Multicast, VLAN */
147 mac->ops.set_vmdq = ixgbe_set_vmdq_82598;
148 mac->ops.clear_vmdq = ixgbe_clear_vmdq_82598;
149 mac->ops.set_vfta = ixgbe_set_vfta_82598;
150 mac->ops.set_vlvf = NULL;
151 mac->ops.clear_vfta = ixgbe_clear_vfta_82598;
152
153 /* Flow Control */
154 mac->ops.fc_enable = ixgbe_fc_enable_82598;
155
156 mac->mcft_size = IXGBE_82598_MC_TBL_SIZE;
157 mac->vft_size = IXGBE_82598_VFT_TBL_SIZE;
158 mac->num_rar_entries = IXGBE_82598_RAR_ENTRIES;
159 mac->rx_pb_size = IXGBE_82598_RX_PB_SIZE;
160 mac->max_rx_queues = IXGBE_82598_MAX_RX_QUEUES;
161 mac->max_tx_queues = IXGBE_82598_MAX_TX_QUEUES;
162 mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
163
164 /* SFP+ Module */
165 phy->ops.read_i2c_eeprom = ixgbe_read_i2c_eeprom_82598;
166 phy->ops.read_i2c_sff8472 = ixgbe_read_i2c_sff8472_82598;
167
168 /* Link */
169 mac->ops.check_link = ixgbe_check_mac_link_82598;
170 mac->ops.setup_link = ixgbe_setup_mac_link_82598;
171 mac->ops.flap_tx_laser = NULL;
172 mac->ops.get_link_capabilities = ixgbe_get_link_capabilities_82598;
173 mac->ops.setup_rxpba = ixgbe_set_rxpba_82598;
174
175 /* Manageability interface */
176 mac->ops.set_fw_drv_ver = NULL;
177
178 mac->ops.get_rtrup2tc = NULL;
179
180 return ret_val;
181 }
182
183 /**
184 * ixgbe_init_phy_ops_82598 - PHY/SFP specific init
185 * @hw: pointer to hardware structure
186 *
187 * Initialize any function pointers that were not able to be
188 * set during init_shared_code because the PHY/SFP type was
189 * not known. Perform the SFP init if necessary.
190 *
191 **/
192 s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
193 {
194 struct ixgbe_mac_info *mac = &hw->mac;
195 struct ixgbe_phy_info *phy = &hw->phy;
196 s32 ret_val = IXGBE_SUCCESS;
197 u16 list_offset, data_offset;
198
199 DEBUGFUNC("ixgbe_init_phy_ops_82598");
200
201 /* Identify the PHY */
202 phy->ops.identify(hw);
203
204 /* Overwrite the link function pointers if copper PHY */
205 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
206 mac->ops.setup_link = ixgbe_setup_copper_link_82598;
207 mac->ops.get_link_capabilities =
208 ixgbe_get_copper_link_capabilities_generic;
209 }
210
211 switch (hw->phy.type) {
212 case ixgbe_phy_tn:
213 phy->ops.setup_link = ixgbe_setup_phy_link_tnx;
214 phy->ops.check_link = ixgbe_check_phy_link_tnx;
215 phy->ops.get_firmware_version =
216 ixgbe_get_phy_firmware_version_tnx;
217 break;
218 case ixgbe_phy_nl:
219 phy->ops.reset = ixgbe_reset_phy_nl;
220
221 /* Call SFP+ identify routine to get the SFP+ module type */
222 ret_val = phy->ops.identify_sfp(hw);
223 if (ret_val != IXGBE_SUCCESS)
224 goto out;
225 else if (hw->phy.sfp_type == ixgbe_sfp_type_unknown) {
226 ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
227 goto out;
228 }
229
230 /* Check to see if SFP+ module is supported */
231 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw,
232 &list_offset,
233 &data_offset);
234 if (ret_val != IXGBE_SUCCESS) {
235 ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
236 goto out;
237 }
238 break;
239 default:
240 break;
241 }
242
243 out:
244 return ret_val;
245 }
246
247 /**
248 * ixgbe_start_hw_82598 - Prepare hardware for Tx/Rx
249 * @hw: pointer to hardware structure
250 *
251 * Starts the hardware using the generic start_hw function.
252 * Disables relaxed ordering Then set pcie completion timeout
253 *
254 **/
255 s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
256 {
257 u32 regval;
258 u32 i;
259 s32 ret_val = IXGBE_SUCCESS;
260
261 DEBUGFUNC("ixgbe_start_hw_82598");
262
263 ret_val = ixgbe_start_hw_generic(hw);
264 if (ret_val)
265 return ret_val;
266
267 /* Disable relaxed ordering */
268 for (i = 0; ((i < hw->mac.max_tx_queues) &&
269 (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
270 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
271 regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
272 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval);
273 }
274
275 for (i = 0; ((i < hw->mac.max_rx_queues) &&
276 (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
277 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
278 regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
279 IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
280 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
281 }
282
283 /* set the completion timeout for interface */
284 ixgbe_set_pcie_completion_timeout(hw);
285
286 return ret_val;
287 }
288
289 /**
290 * ixgbe_get_link_capabilities_82598 - Determines link capabilities
291 * @hw: pointer to hardware structure
292 * @speed: pointer to link speed
293 * @autoneg: boolean auto-negotiation value
294 *
295 * Determines the link capabilities by reading the AUTOC register.
296 **/
297 static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
298 ixgbe_link_speed *speed,
299 bool *autoneg)
300 {
301 s32 status = IXGBE_SUCCESS;
302 u32 autoc = 0;
303
304 DEBUGFUNC("ixgbe_get_link_capabilities_82598");
305
306 /*
307 * Determine link capabilities based on the stored value of AUTOC,
308 * which represents EEPROM defaults. If AUTOC value has not been
309 * stored, use the current register value.
310 */
311 if (hw->mac.orig_link_settings_stored)
312 autoc = hw->mac.orig_autoc;
313 else
314 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
315
316 switch (autoc & IXGBE_AUTOC_LMS_MASK) {
317 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
318 *speed = IXGBE_LINK_SPEED_1GB_FULL;
319 *autoneg = FALSE;
320 break;
321
322 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
323 *speed = IXGBE_LINK_SPEED_10GB_FULL;
324 *autoneg = FALSE;
325 break;
326
327 case IXGBE_AUTOC_LMS_1G_AN:
328 *speed = IXGBE_LINK_SPEED_1GB_FULL;
329 *autoneg = TRUE;
330 break;
331
332 case IXGBE_AUTOC_LMS_KX4_AN:
333 case IXGBE_AUTOC_LMS_KX4_AN_1G_AN:
334 *speed = IXGBE_LINK_SPEED_UNKNOWN;
335 if (autoc & IXGBE_AUTOC_KX4_SUPP)
336 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
337 if (autoc & IXGBE_AUTOC_KX_SUPP)
338 *speed |= IXGBE_LINK_SPEED_1GB_FULL;
339 *autoneg = TRUE;
340 break;
341
342 default:
343 status = IXGBE_ERR_LINK_SETUP;
344 break;
345 }
346
347 return status;
348 }
349
350 /**
351 * ixgbe_get_media_type_82598 - Determines media type
352 * @hw: pointer to hardware structure
353 *
354 * Returns the media type (fiber, copper, backplane)
355 **/
356 static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw)
357 {
358 enum ixgbe_media_type media_type;
359
360 DEBUGFUNC("ixgbe_get_media_type_82598");
361
362 /* Detect if there is a copper PHY attached. */
363 switch (hw->phy.type) {
364 case ixgbe_phy_cu_unknown:
365 case ixgbe_phy_tn:
366 media_type = ixgbe_media_type_copper;
367 goto out;
368 default:
369 break;
370 }
371
372 /* Media type for I82598 is based on device ID */
373 switch (hw->device_id) {
374 case IXGBE_DEV_ID_82598:
375 case IXGBE_DEV_ID_82598_BX:
376 /* Default device ID is mezzanine card KX/KX4 */
377 media_type = ixgbe_media_type_backplane;
378 break;
379 case IXGBE_DEV_ID_82598AF_DUAL_PORT:
380 case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
381 case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
382 case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
383 case IXGBE_DEV_ID_82598EB_XF_LR:
384 case IXGBE_DEV_ID_82598EB_SFP_LOM:
385 media_type = ixgbe_media_type_fiber;
386 break;
387 case IXGBE_DEV_ID_82598EB_CX4:
388 case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
389 media_type = ixgbe_media_type_cx4;
390 break;
391 case IXGBE_DEV_ID_82598AT:
392 case IXGBE_DEV_ID_82598AT2:
393 media_type = ixgbe_media_type_copper;
394 break;
395 default:
396 media_type = ixgbe_media_type_unknown;
397 break;
398 }
399 out:
400 return media_type;
401 }
402
403 /**
404 * ixgbe_fc_enable_82598 - Enable flow control
405 * @hw: pointer to hardware structure
406 *
407 * Enable flow control according to the current settings.
408 **/
409 s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
410 {
411 s32 ret_val = IXGBE_SUCCESS;
412 u32 fctrl_reg;
413 u32 rmcs_reg;
414 u32 reg;
415 u32 fcrtl, fcrth;
416 u32 link_speed = 0;
417 int i;
418 bool link_up;
419
420 DEBUGFUNC("ixgbe_fc_enable_82598");
421
422 /* Validate the water mark configuration */
423 if (!hw->fc.pause_time) {
424 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
425 goto out;
426 }
427
428 /* Low water mark of zero causes XOFF floods */
429 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
430 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
431 hw->fc.high_water[i]) {
432 if (!hw->fc.low_water[i] ||
433 hw->fc.low_water[i] >= hw->fc.high_water[i]) {
434 DEBUGOUT("Invalid water mark configuration\n");
435 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
436 goto out;
437 }
438 }
439 }
440
441 /*
442 * On 82598 having Rx FC on causes resets while doing 1G
443 * so if it's on turn it off once we know link_speed. For
444 * more details see 82598 Specification update.
445 */
446 hw->mac.ops.check_link(hw, &link_speed, &link_up, FALSE);
447 if (link_up && link_speed == IXGBE_LINK_SPEED_1GB_FULL) {
448 switch (hw->fc.requested_mode) {
449 case ixgbe_fc_full:
450 hw->fc.requested_mode = ixgbe_fc_tx_pause;
451 break;
452 case ixgbe_fc_rx_pause:
453 hw->fc.requested_mode = ixgbe_fc_none;
454 break;
455 default:
456 /* no change */
457 break;
458 }
459 }
460
461 /* Negotiate the fc mode to use */
462 ixgbe_fc_autoneg(hw);
463
464 /* Disable any previous flow control settings */
465 fctrl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
466 fctrl_reg &= ~(IXGBE_FCTRL_RFCE | IXGBE_FCTRL_RPFCE);
467
468 rmcs_reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
469 rmcs_reg &= ~(IXGBE_RMCS_TFCE_PRIORITY | IXGBE_RMCS_TFCE_802_3X);
470
471 /*
472 * The possible values of fc.current_mode are:
473 * 0: Flow control is completely disabled
474 * 1: Rx flow control is enabled (we can receive pause frames,
475 * but not send pause frames).
476 * 2: Tx flow control is enabled (we can send pause frames but
477 * we do not support receiving pause frames).
478 * 3: Both Rx and Tx flow control (symmetric) are enabled.
479 * other: Invalid.
480 */
481 switch (hw->fc.current_mode) {
482 case ixgbe_fc_none:
483 /*
484 * Flow control is disabled by software override or autoneg.
485 * The code below will actually disable it in the HW.
486 */
487 break;
488 case ixgbe_fc_rx_pause:
489 /*
490 * Rx Flow control is enabled and Tx Flow control is
491 * disabled by software override. Since there really
492 * isn't a way to advertise that we are capable of RX
493 * Pause ONLY, we will advertise that we support both
494 * symmetric and asymmetric Rx PAUSE. Later, we will
495 * disable the adapter's ability to send PAUSE frames.
496 */
497 fctrl_reg |= IXGBE_FCTRL_RFCE;
498 break;
499 case ixgbe_fc_tx_pause:
500 /*
501 * Tx Flow control is enabled, and Rx Flow control is
502 * disabled by software override.
503 */
504 rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
505 break;
506 case ixgbe_fc_full:
507 /* Flow control (both Rx and Tx) is enabled by SW override. */
508 fctrl_reg |= IXGBE_FCTRL_RFCE;
509 rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
510 break;
511 default:
512 DEBUGOUT("Flow control param set incorrectly\n");
513 ret_val = IXGBE_ERR_CONFIG;
514 goto out;
515 break;
516 }
517
518 /* Set 802.3x based flow control settings. */
519 fctrl_reg |= IXGBE_FCTRL_DPF;
520 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl_reg);
521 IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg);
522
523 /* Set up and enable Rx high/low water mark thresholds, enable XON. */
524 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
525 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
526 hw->fc.high_water[i]) {
527 fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
528 fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
529 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl);
530 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), fcrth);
531 } else {
532 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), 0);
533 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), 0);
534 }
535
536 }
537
538 /* Configure pause time (2 TCs per register) */
539 reg = hw->fc.pause_time * 0x00010001;
540 for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
541 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
542
543 /* Configure flow control refresh threshold value */
544 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
545
546 out:
547 return ret_val;
548 }
549
550 /**
551 * ixgbe_start_mac_link_82598 - Configures MAC link settings
552 * @hw: pointer to hardware structure
553 *
554 * Configures link settings based on values in the ixgbe_hw struct.
555 * Restarts the link. Performs autonegotiation if needed.
556 **/
557 static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
558 bool autoneg_wait_to_complete)
559 {
560 u32 autoc_reg;
561 u32 links_reg;
562 u32 i;
563 s32 status = IXGBE_SUCCESS;
564
565 DEBUGFUNC("ixgbe_start_mac_link_82598");
566
567 /* Restart link */
568 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
569 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
570 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
571
572 /* Only poll for autoneg to complete if specified to do so */
573 if (autoneg_wait_to_complete) {
574 if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
575 IXGBE_AUTOC_LMS_KX4_AN ||
576 (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
577 IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
578 links_reg = 0; /* Just in case Autoneg time = 0 */
579 for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
580 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
581 if (links_reg & IXGBE_LINKS_KX_AN_COMP)
582 break;
583 msec_delay(100);
584 }
585 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
586 status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
587 DEBUGOUT("Autonegotiation did not complete.\n");
588 }
589 }
590 }
591
592 /* Add delay to filter out noises during initial link setup */
593 msec_delay(50);
594
595 return status;
596 }
597
598 /**
599 * ixgbe_validate_link_ready - Function looks for phy link
600 * @hw: pointer to hardware structure
601 *
602 * Function indicates success when phy link is available. If phy is not ready
603 * within 5 seconds of MAC indicating link, the function returns error.
604 **/
605 static s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw)
606 {
607 u32 timeout;
608 u16 an_reg;
609
610 if (hw->device_id != IXGBE_DEV_ID_82598AT2)
611 return IXGBE_SUCCESS;
612
613 for (timeout = 0;
614 timeout < IXGBE_VALIDATE_LINK_READY_TIMEOUT; timeout++) {
615 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
616 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &an_reg);
617
618 if ((an_reg & IXGBE_MII_AUTONEG_COMPLETE) &&
619 (an_reg & IXGBE_MII_AUTONEG_LINK_UP))
620 break;
621
622 msec_delay(100);
623 }
624
625 if (timeout == IXGBE_VALIDATE_LINK_READY_TIMEOUT) {
626 DEBUGOUT("Link was indicated but link is down\n");
627 return IXGBE_ERR_LINK_SETUP;
628 }
629
630 return IXGBE_SUCCESS;
631 }
632
633 /**
634 * ixgbe_check_mac_link_82598 - Get link/speed status
635 * @hw: pointer to hardware structure
636 * @speed: pointer to link speed
637 * @link_up: TRUE is link is up, FALSE otherwise
638 * @link_up_wait_to_complete: bool used to wait for link up or not
639 *
640 * Reads the links register to determine if link is up and the current speed
641 **/
642 static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
643 ixgbe_link_speed *speed, bool *link_up,
644 bool link_up_wait_to_complete)
645 {
646 u32 links_reg;
647 u32 i;
648 u16 link_reg, adapt_comp_reg;
649
650 DEBUGFUNC("ixgbe_check_mac_link_82598");
651
652 /*
653 * SERDES PHY requires us to read link status from undocumented
654 * register 0xC79F. Bit 0 set indicates link is up/ready; clear
655 * indicates link down. OxC00C is read to check that the XAUI lanes
656 * are active. Bit 0 clear indicates active; set indicates inactive.
657 */
658 if (hw->phy.type == ixgbe_phy_nl) {
659 hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg);
660 hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg);
661 hw->phy.ops.read_reg(hw, 0xC00C, IXGBE_TWINAX_DEV,
662 &adapt_comp_reg);
663 if (link_up_wait_to_complete) {
664 for (i = 0; i < hw->mac.max_link_up_time; i++) {
665 if ((link_reg & 1) &&
666 ((adapt_comp_reg & 1) == 0)) {
667 *link_up = TRUE;
668 break;
669 } else {
670 *link_up = FALSE;
671 }
672 msec_delay(100);
673 hw->phy.ops.read_reg(hw, 0xC79F,
674 IXGBE_TWINAX_DEV,
675 &link_reg);
676 hw->phy.ops.read_reg(hw, 0xC00C,
677 IXGBE_TWINAX_DEV,
678 &adapt_comp_reg);
679 }
680 } else {
681 if ((link_reg & 1) && ((adapt_comp_reg & 1) == 0))
682 *link_up = TRUE;
683 else
684 *link_up = FALSE;
685 }
686
687 if (*link_up == FALSE)
688 goto out;
689 }
690
691 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
692 if (link_up_wait_to_complete) {
693 for (i = 0; i < hw->mac.max_link_up_time; i++) {
694 if (links_reg & IXGBE_LINKS_UP) {
695 *link_up = TRUE;
696 break;
697 } else {
698 *link_up = FALSE;
699 }
700 msec_delay(100);
701 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
702 }
703 } else {
704 if (links_reg & IXGBE_LINKS_UP)
705 *link_up = TRUE;
706 else
707 *link_up = FALSE;
708 }
709
710 if (links_reg & IXGBE_LINKS_SPEED)
711 *speed = IXGBE_LINK_SPEED_10GB_FULL;
712 else
713 *speed = IXGBE_LINK_SPEED_1GB_FULL;
714
715 if ((hw->device_id == IXGBE_DEV_ID_82598AT2) && (*link_up == TRUE) &&
716 (ixgbe_validate_link_ready(hw) != IXGBE_SUCCESS))
717 *link_up = FALSE;
718
719 out:
720 return IXGBE_SUCCESS;
721 }
722
723 /**
724 * ixgbe_setup_mac_link_82598 - Set MAC link speed
725 * @hw: pointer to hardware structure
726 * @speed: new link speed
727 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed
728 *
729 * Set the link speed in the AUTOC register and restarts link.
730 **/
731 static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
732 ixgbe_link_speed speed,
733 bool autoneg_wait_to_complete)
734 {
735 bool autoneg = FALSE;
736 s32 status = IXGBE_SUCCESS;
737 ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
738 u32 curr_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
739 u32 autoc = curr_autoc;
740 u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
741
742 DEBUGFUNC("ixgbe_setup_mac_link_82598");
743
744 /* Check to see if speed passed in is supported. */
745 ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg);
746 speed &= link_capabilities;
747
748 if (speed == IXGBE_LINK_SPEED_UNKNOWN)
749 status = IXGBE_ERR_LINK_SETUP;
750
751 /* Set KX4/KX support according to speed requested */
752 else if (link_mode == IXGBE_AUTOC_LMS_KX4_AN ||
753 link_mode == IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
754 autoc &= ~IXGBE_AUTOC_KX4_KX_SUPP_MASK;
755 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
756 autoc |= IXGBE_AUTOC_KX4_SUPP;
757 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
758 autoc |= IXGBE_AUTOC_KX_SUPP;
759 if (autoc != curr_autoc)
760 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
761 }
762
763 if (status == IXGBE_SUCCESS) {
764 /*
765 * Setup and restart the link based on the new values in
766 * ixgbe_hw This will write the AUTOC register based on the new
767 * stored values
768 */
769 status = ixgbe_start_mac_link_82598(hw,
770 autoneg_wait_to_complete);
771 }
772
773 return status;
774 }
775
776
777 /**
778 * ixgbe_setup_copper_link_82598 - Set the PHY autoneg advertised field
779 * @hw: pointer to hardware structure
780 * @speed: new link speed
781 * @autoneg_wait_to_complete: TRUE if waiting is needed to complete
782 *
783 * Sets the link speed in the AUTOC register in the MAC and restarts link.
784 **/
785 static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
786 ixgbe_link_speed speed,
787 bool autoneg_wait_to_complete)
788 {
789 s32 status;
790
791 DEBUGFUNC("ixgbe_setup_copper_link_82598");
792
793 /* Setup the PHY according to input speed */
794 status = hw->phy.ops.setup_link_speed(hw, speed,
795 autoneg_wait_to_complete);
796 /* Set up MAC */
797 ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete);
798
799 return status;
800 }
801
802 /**
803 * ixgbe_reset_hw_82598 - Performs hardware reset
804 * @hw: pointer to hardware structure
805 *
806 * Resets the hardware by resetting the transmit and receive units, masks and
807 * clears all interrupts, performing a PHY reset, and performing a link (MAC)
808 * reset.
809 **/
810 static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
811 {
812 s32 status = IXGBE_SUCCESS;
813 s32 phy_status = IXGBE_SUCCESS;
814 u32 ctrl;
815 u32 gheccr;
816 u32 i;
817 u32 autoc;
818 u8 analog_val;
819
820 DEBUGFUNC("ixgbe_reset_hw_82598");
821
822 /* Call adapter stop to disable tx/rx and clear interrupts */
823 status = hw->mac.ops.stop_adapter(hw);
824 if (status != IXGBE_SUCCESS)
825 goto reset_hw_out;
826
827 /*
828 * Power up the Atlas Tx lanes if they are currently powered down.
829 * Atlas Tx lanes are powered down for MAC loopback tests, but
830 * they are not automatically restored on reset.
831 */
832 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val);
833 if (analog_val & IXGBE_ATLAS_PDN_TX_REG_EN) {
834 /* Enable Tx Atlas so packets can be transmitted again */
835 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
836 &analog_val);
837 analog_val &= ~IXGBE_ATLAS_PDN_TX_REG_EN;
838 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
839 analog_val);
840
841 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
842 &analog_val);
843 analog_val &= ~IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
844 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
845 analog_val);
846
847 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
848 &analog_val);
849 analog_val &= ~IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
850 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
851 analog_val);
852
853 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
854 &analog_val);
855 analog_val &= ~IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
856 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
857 analog_val);
858 }
859
860 /* Reset PHY */
861 if (hw->phy.reset_disable == FALSE) {
862 /* PHY ops must be identified and initialized prior to reset */
863
864 /* Init PHY and function pointers, perform SFP setup */
865 phy_status = hw->phy.ops.init(hw);
866 if (phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED)
867 goto reset_hw_out;
868 if (phy_status == IXGBE_ERR_SFP_NOT_PRESENT)
869 goto mac_reset_top;
870
871 hw->phy.ops.reset(hw);
872 }
873
874 mac_reset_top:
875 /*
876 * Issue global reset to the MAC. This needs to be a SW reset.
877 * If link reset is used, it might reset the MAC when mng is using it
878 */
879 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL) | IXGBE_CTRL_RST;
880 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
881 IXGBE_WRITE_FLUSH(hw);
882
883 /* Poll for reset bit to self-clear indicating reset is complete */
884 for (i = 0; i < 10; i++) {
885 usec_delay(1);
886 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
887 if (!(ctrl & IXGBE_CTRL_RST))
888 break;
889 }
890 if (ctrl & IXGBE_CTRL_RST) {
891 status = IXGBE_ERR_RESET_FAILED;
892 DEBUGOUT("Reset polling failed to complete.\n");
893 }
894
895 msec_delay(50);
896
897 /*
898 * Double resets are required for recovery from certain error
899 * conditions. Between resets, it is necessary to stall to allow time
900 * for any pending HW events to complete.
901 */
902 if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
903 hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
904 goto mac_reset_top;
905 }
906
907 gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR);
908 gheccr &= ~((1 << 21) | (1 << 18) | (1 << 9) | (1 << 6));
909 IXGBE_WRITE_REG(hw, IXGBE_GHECCR, gheccr);
910
911 /*
912 * Store the original AUTOC value if it has not been
913 * stored off yet. Otherwise restore the stored original
914 * AUTOC value since the reset operation sets back to deaults.
915 */
916 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
917 if (hw->mac.orig_link_settings_stored == FALSE) {
918 hw->mac.orig_autoc = autoc;
919 hw->mac.orig_link_settings_stored = TRUE;
920 } else if (autoc != hw->mac.orig_autoc) {
921 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc);
922 }
923
924 /* Store the permanent mac address */
925 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
926
927 /*
928 * Store MAC address from RAR0, clear receive address registers, and
929 * clear the multicast table
930 */
931 hw->mac.ops.init_rx_addrs(hw);
932
933 reset_hw_out:
934 if (phy_status != IXGBE_SUCCESS)
935 status = phy_status;
936
937 return status;
938 }
939
940 /**
941 * ixgbe_set_vmdq_82598 - Associate a VMDq set index with a rx address
942 * @hw: pointer to hardware struct
943 * @rar: receive address register index to associate with a VMDq index
944 * @vmdq: VMDq set index
945 **/
946 s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
947 {
948 u32 rar_high;
949 u32 rar_entries = hw->mac.num_rar_entries;
950
951 DEBUGFUNC("ixgbe_set_vmdq_82598");
952
953 /* Make sure we are using a valid rar index range */
954 if (rar >= rar_entries) {
955 DEBUGOUT1("RAR index %d is out of range.\n", rar);
956 return IXGBE_ERR_INVALID_ARGUMENT;
957 }
958
959 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
960 rar_high &= ~IXGBE_RAH_VIND_MASK;
961 rar_high |= ((vmdq << IXGBE_RAH_VIND_SHIFT) & IXGBE_RAH_VIND_MASK);
962 IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
963 return IXGBE_SUCCESS;
964 }
965
966 /**
967 * ixgbe_clear_vmdq_82598 - Disassociate a VMDq set index from an rx address
968 * @hw: pointer to hardware struct
969 * @rar: receive address register index to associate with a VMDq index
970 * @vmdq: VMDq clear index (not used in 82598, but elsewhere)
971 **/
972 static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
973 {
974 u32 rar_high;
975 u32 rar_entries = hw->mac.num_rar_entries;
976
977 UNREFERENCED_1PARAMETER(vmdq);
978
979 /* Make sure we are using a valid rar index range */
980 if (rar >= rar_entries) {
981 DEBUGOUT1("RAR index %d is out of range.\n", rar);
982 return IXGBE_ERR_INVALID_ARGUMENT;
983 }
984
985 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
986 if (rar_high & IXGBE_RAH_VIND_MASK) {
987 rar_high &= ~IXGBE_RAH_VIND_MASK;
988 IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
989 }
990
991 return IXGBE_SUCCESS;
992 }
993
994 /**
995 * ixgbe_set_vfta_82598 - Set VLAN filter table
996 * @hw: pointer to hardware structure
997 * @vlan: VLAN id to write to VLAN filter
998 * @vind: VMDq output index that maps queue to VLAN id in VFTA
999 * @vlan_on: boolean flag to turn on/off VLAN in VFTA
1000 *
1001 * Turn on/off specified VLAN in the VLAN filter table.
1002 **/
1003 s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind,
1004 bool vlan_on)
1005 {
1006 u32 regindex;
1007 u32 bitindex;
1008 u32 bits;
1009 u32 vftabyte;
1010
1011 DEBUGFUNC("ixgbe_set_vfta_82598");
1012
1013 if (vlan > 4095)
1014 return IXGBE_ERR_PARAM;
1015
1016 /* Determine 32-bit word position in array */
1017 regindex = (vlan >> 5) & 0x7F; /* upper seven bits */
1018
1019 /* Determine the location of the (VMD) queue index */
1020 vftabyte = ((vlan >> 3) & 0x03); /* bits (4:3) indicating byte array */
1021 bitindex = (vlan & 0x7) << 2; /* lower 3 bits indicate nibble */
1022
1023 /* Set the nibble for VMD queue index */
1024 bits = IXGBE_READ_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex));
1025 bits &= (~(0x0F << bitindex));
1026 bits |= (vind << bitindex);
1027 IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex), bits);
1028
1029 /* Determine the location of the bit for this VLAN id */
1030 bitindex = vlan & 0x1F; /* lower five bits */
1031
1032 bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
1033 if (vlan_on)
1034 /* Turn on this VLAN id */
1035 bits |= (1 << bitindex);
1036 else
1037 /* Turn off this VLAN id */
1038 bits &= ~(1 << bitindex);
1039 IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits);
1040
1041 return IXGBE_SUCCESS;
1042 }
1043
1044 /**
1045 * ixgbe_clear_vfta_82598 - Clear VLAN filter table
1046 * @hw: pointer to hardware structure
1047 *
1048 * Clears the VLAN filer table, and the VMDq index associated with the filter
1049 **/
1050 static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw)
1051 {
1052 u32 offset;
1053 u32 vlanbyte;
1054
1055 DEBUGFUNC("ixgbe_clear_vfta_82598");
1056
1057 for (offset = 0; offset < hw->mac.vft_size; offset++)
1058 IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
1059
1060 for (vlanbyte = 0; vlanbyte < 4; vlanbyte++)
1061 for (offset = 0; offset < hw->mac.vft_size; offset++)
1062 IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset),
1063 0);
1064
1065 return IXGBE_SUCCESS;
1066 }
1067
1068 /**
1069 * ixgbe_read_analog_reg8_82598 - Reads 8 bit Atlas analog register
1070 * @hw: pointer to hardware structure
1071 * @reg: analog register to read
1072 * @val: read value
1073 *
1074 * Performs read operation to Atlas analog register specified.
1075 **/
1076 s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val)
1077 {
1078 u32 atlas_ctl;
1079
1080 DEBUGFUNC("ixgbe_read_analog_reg8_82598");
1081
1082 IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL,
1083 IXGBE_ATLASCTL_WRITE_CMD | (reg << 8));
1084 IXGBE_WRITE_FLUSH(hw);
1085 usec_delay(10);
1086 atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
1087 *val = (u8)atlas_ctl;
1088
1089 return IXGBE_SUCCESS;
1090 }
1091
1092 /**
1093 * ixgbe_write_analog_reg8_82598 - Writes 8 bit Atlas analog register
1094 * @hw: pointer to hardware structure
1095 * @reg: atlas register to write
1096 * @val: value to write
1097 *
1098 * Performs write operation to Atlas analog register specified.
1099 **/
1100 s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val)
1101 {
1102 u32 atlas_ctl;
1103
1104 DEBUGFUNC("ixgbe_write_analog_reg8_82598");
1105
1106 atlas_ctl = (reg << 8) | val;
1107 IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, atlas_ctl);
1108 IXGBE_WRITE_FLUSH(hw);
1109 usec_delay(10);
1110
1111 return IXGBE_SUCCESS;
1112 }
1113
1114 /**
1115 * ixgbe_read_i2c_phy_82598 - Reads 8 bit word over I2C interface.
1116 * @hw: pointer to hardware structure
1117 * @dev_addr: address to read from
1118 * @byte_offset: byte offset to read from dev_addr
1119 * @eeprom_data: value read
1120 *
1121 * Performs 8 byte read operation to SFP module's EEPROM over I2C interface.
1122 **/
1123 static s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr,
1124 u8 byte_offset, u8 *eeprom_data)
1125 {
1126 s32 status = IXGBE_SUCCESS;
1127 u16 sfp_addr = 0;
1128 u16 sfp_data = 0;
1129 u16 sfp_stat = 0;
1130 u16 gssr;
1131 u32 i;
1132
1133 DEBUGFUNC("ixgbe_read_i2c_phy_82598");
1134
1135 if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
1136 gssr = IXGBE_GSSR_PHY1_SM;
1137 else
1138 gssr = IXGBE_GSSR_PHY0_SM;
1139
1140 if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != IXGBE_SUCCESS)
1141 return IXGBE_ERR_SWFW_SYNC;
1142
1143 if (hw->phy.type == ixgbe_phy_nl) {
1144 /*
1145 * NetLogic phy SDA/SCL registers are at addresses 0xC30A to
1146 * 0xC30D. These registers are used to talk to the SFP+
1147 * module's EEPROM through the SDA/SCL (I2C) interface.
1148 */
1149 sfp_addr = (dev_addr << 8) + byte_offset;
1150 sfp_addr = (sfp_addr | IXGBE_I2C_EEPROM_READ_MASK);
1151 hw->phy.ops.write_reg_mdi(hw,
1152 IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR,
1153 IXGBE_MDIO_PMA_PMD_DEV_TYPE,
1154 sfp_addr);
1155
1156 /* Poll status */
1157 for (i = 0; i < 100; i++) {
1158 hw->phy.ops.read_reg_mdi(hw,
1159 IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT,
1160 IXGBE_MDIO_PMA_PMD_DEV_TYPE,
1161 &sfp_stat);
1162 sfp_stat = sfp_stat & IXGBE_I2C_EEPROM_STATUS_MASK;
1163 if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS)
1164 break;
1165 msec_delay(10);
1166 }
1167
1168 if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_PASS) {
1169 DEBUGOUT("EEPROM read did not pass.\n");
1170 status = IXGBE_ERR_SFP_NOT_PRESENT;
1171 goto out;
1172 }
1173
1174 /* Read data */
1175 hw->phy.ops.read_reg_mdi(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA,
1176 IXGBE_MDIO_PMA_PMD_DEV_TYPE, &sfp_data);
1177
1178 *eeprom_data = (u8)(sfp_data >> 8);
1179 } else {
1180 status = IXGBE_ERR_PHY;
1181 }
1182
1183 out:
1184 hw->mac.ops.release_swfw_sync(hw, gssr);
1185 return status;
1186 }
1187
1188 /**
1189 * ixgbe_read_i2c_eeprom_82598 - Reads 8 bit word over I2C interface.
1190 * @hw: pointer to hardware structure
1191 * @byte_offset: EEPROM byte offset to read
1192 * @eeprom_data: value read
1193 *
1194 * Performs 8 byte read operation to SFP module's EEPROM over I2C interface.
1195 **/
1196 s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
1197 u8 *eeprom_data)
1198 {
1199 return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR,
1200 byte_offset, eeprom_data);
1201 }
1202
1203 /**
1204 * ixgbe_read_i2c_sff8472_82598 - Reads 8 bit word over I2C interface.
1205 * @hw: pointer to hardware structure
1206 * @byte_offset: byte offset at address 0xA2
1207 * @eeprom_data: value read
1208 *
1209 * Performs 8 byte read operation to SFP module's SFF-8472 data over I2C
1210 **/
1211 static s32 ixgbe_read_i2c_sff8472_82598(struct ixgbe_hw *hw, u8 byte_offset,
1212 u8 *sff8472_data)
1213 {
1214 return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR2,
1215 byte_offset, sff8472_data);
1216 }
1217
1218 /**
1219 * ixgbe_get_supported_physical_layer_82598 - Returns physical layer type
1220 * @hw: pointer to hardware structure
1221 *
1222 * Determines physical layer capabilities of the current configuration.
1223 **/
1224 u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw)
1225 {
1226 u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1227 u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
1228 u32 pma_pmd_10g = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK;
1229 u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
1230 u16 ext_ability = 0;
1231
1232 DEBUGFUNC("ixgbe_get_supported_physical_layer_82598");
1233
1234 hw->phy.ops.identify(hw);
1235
1236 /* Copper PHY must be checked before AUTOC LMS to determine correct
1237 * physical layer because 10GBase-T PHYs use LMS = KX4/KX */
1238 switch (hw->phy.type) {
1239 case ixgbe_phy_tn:
1240 case ixgbe_phy_cu_unknown:
1241 hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
1242 IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
1243 if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
1244 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
1245 if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
1246 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
1247 if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
1248 physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
1249 goto out;
1250 default:
1251 break;
1252 }
1253
1254 switch (autoc & IXGBE_AUTOC_LMS_MASK) {
1255 case IXGBE_AUTOC_LMS_1G_AN:
1256 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
1257 if (pma_pmd_1g == IXGBE_AUTOC_1G_KX)
1258 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX;
1259 else
1260 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_BX;
1261 break;
1262 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
1263 if (pma_pmd_10g == IXGBE_AUTOC_10G_CX4)
1264 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
1265 else if (pma_pmd_10g == IXGBE_AUTOC_10G_KX4)
1266 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
1267 else /* XAUI */
1268 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1269 break;
1270 case IXGBE_AUTOC_LMS_KX4_AN:
1271 case IXGBE_AUTOC_LMS_KX4_AN_1G_AN:
1272 if (autoc & IXGBE_AUTOC_KX_SUPP)
1273 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
1274 if (autoc & IXGBE_AUTOC_KX4_SUPP)
1275 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
1276 break;
1277 default:
1278 break;
1279 }
1280
1281 if (hw->phy.type == ixgbe_phy_nl) {
1282 hw->phy.ops.identify_sfp(hw);
1283
1284 switch (hw->phy.sfp_type) {
1285 case ixgbe_sfp_type_da_cu:
1286 physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
1287 break;
1288 case ixgbe_sfp_type_sr:
1289 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
1290 break;
1291 case ixgbe_sfp_type_lr:
1292 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
1293 break;
1294 default:
1295 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1296 break;
1297 }
1298 }
1299
1300 switch (hw->device_id) {
1301 case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
1302 physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
1303 break;
1304 case IXGBE_DEV_ID_82598AF_DUAL_PORT:
1305 case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
1306 case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
1307 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
1308 break;
1309 case IXGBE_DEV_ID_82598EB_XF_LR:
1310 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
1311 break;
1312 default:
1313 break;
1314 }
1315
1316 out:
1317 return physical_layer;
1318 }
1319
1320 /**
1321 * ixgbe_set_lan_id_multi_port_pcie_82598 - Set LAN id for PCIe multiple
1322 * port devices.
1323 * @hw: pointer to the HW structure
1324 *
1325 * Calls common function and corrects issue with some single port devices
1326 * that enable LAN1 but not LAN0.
1327 **/
1328 void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw)
1329 {
1330 struct ixgbe_bus_info *bus = &hw->bus;
1331 u16 pci_gen = 0;
1332 u16 pci_ctrl2 = 0;
1333
1334 DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie_82598");
1335
1336 ixgbe_set_lan_id_multi_port_pcie(hw);
1337
1338 /* check if LAN0 is disabled */
1339 hw->eeprom.ops.read(hw, IXGBE_PCIE_GENERAL_PTR, &pci_gen);
1340 if ((pci_gen != 0) && (pci_gen != 0xFFFF)) {
1341
1342 hw->eeprom.ops.read(hw, pci_gen + IXGBE_PCIE_CTRL2, &pci_ctrl2);
1343
1344 /* if LAN0 is completely disabled force function to 0 */
1345 if ((pci_ctrl2 & IXGBE_PCIE_CTRL2_LAN_DISABLE) &&
1346 !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DISABLE_SELECT) &&
1347 !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DUMMY_ENABLE)) {
1348
1349 bus->func = 0;
1350 }
1351 }
1352 }
1353
1354 /**
1355 * ixgbe_enable_relaxed_ordering_82598 - enable relaxed ordering
1356 * @hw: pointer to hardware structure
1357 *
1358 **/
1359 void ixgbe_enable_relaxed_ordering_82598(struct ixgbe_hw *hw)
1360 {
1361 u32 regval;
1362 u32 i;
1363
1364 DEBUGFUNC("ixgbe_enable_relaxed_ordering_82598");
1365
1366 /* Enable relaxed ordering */
1367 for (i = 0; ((i < hw->mac.max_tx_queues) &&
1368 (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
1369 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
1370 regval |= IXGBE_DCA_TXCTRL_DESC_WRO_EN;
1371 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval);
1372 }
1373
1374 for (i = 0; ((i < hw->mac.max_rx_queues) &&
1375 (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
1376 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
1377 regval |= IXGBE_DCA_RXCTRL_DATA_WRO_EN |
1378 IXGBE_DCA_RXCTRL_HEAD_WRO_EN;
1379 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
1380 }
1381
1382 }
1383
1384 /**
1385 * ixgbe_set_rxpba_82598 - Initialize RX packet buffer
1386 * @hw: pointer to hardware structure
1387 * @num_pb: number of packet buffers to allocate
1388 * @headroom: reserve n KB of headroom
1389 * @strategy: packet buffer allocation strategy
1390 **/
1391 static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb,
1392 u32 headroom, int strategy)
1393 {
1394 u32 rxpktsize = IXGBE_RXPBSIZE_64KB;
1395 u8 i = 0;
1396 UNREFERENCED_1PARAMETER(headroom);
1397
1398 if (!num_pb)
1399 return;
1400
1401 /* Setup Rx packet buffer sizes */
1402 switch (strategy) {
1403 case PBA_STRATEGY_WEIGHTED:
1404 /* Setup the first four at 80KB */
1405 rxpktsize = IXGBE_RXPBSIZE_80KB;
1406 for (; i < 4; i++)
1407 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
1408 /* Setup the last four at 48KB...don't re-init i */
1409 rxpktsize = IXGBE_RXPBSIZE_48KB;
1410 /* Fall Through */
1411 case PBA_STRATEGY_EQUAL:
1412 default:
1413 /* Divide the remaining Rx packet buffer evenly among the TCs */
1414 for (; i < IXGBE_MAX_PACKET_BUFFERS; i++)
1415 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
1416 break;
1417 }
1418
1419 /* Setup Tx packet buffer sizes */
1420 for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++)
1421 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), IXGBE_TXPBSIZE_40KB);
1422 }
1423
1424 /**
1425 * ixgbe_enable_rx_dma_82598 - Enable the Rx DMA unit
1426 * @hw: pointer to hardware structure
1427 * @regval: register value to write to RXCTRL
1428 *
1429 * Enables the Rx DMA unit
1430 **/
1431 s32 ixgbe_enable_rx_dma_82598(struct ixgbe_hw *hw, u32 regval)
1432 {
1433 DEBUGFUNC("ixgbe_enable_rx_dma_82598");
1434
1435 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval);
1436
1437 return IXGBE_SUCCESS;
1438 }
1439