ixgbe_82598.c revision 1.2 1 /******************************************************************************
2
3 Copyright (c) 2001-2012, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33 /*$FreeBSD: src/sys/dev/ixgbe/ixgbe_82598.c,v 1.11 2010/11/26 22:46:32 jfv Exp $*/
34 /*$NetBSD: ixgbe_82598.c,v 1.2 2015/03/27 05:57:28 msaitoh Exp $*/
35
36 #include "ixgbe_type.h"
37 #include "ixgbe_82598.h"
38 #include "ixgbe_api.h"
39 #include "ixgbe_common.h"
40 #include "ixgbe_phy.h"
41
42 static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
43 ixgbe_link_speed *speed,
44 bool *autoneg);
45 static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw);
46 static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
47 bool autoneg_wait_to_complete);
48 static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
49 ixgbe_link_speed *speed, bool *link_up,
50 bool link_up_wait_to_complete);
51 static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
52 ixgbe_link_speed speed,
53 bool autoneg,
54 bool autoneg_wait_to_complete);
55 static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
56 ixgbe_link_speed speed,
57 bool autoneg,
58 bool autoneg_wait_to_complete);
59 static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw);
60 static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
61 static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw);
62 static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb,
63 u32 headroom, int strategy);
64
65 /**
66 * ixgbe_set_pcie_completion_timeout - set pci-e completion timeout
67 * @hw: pointer to the HW structure
68 *
69 * The defaults for 82598 should be in the range of 50us to 50ms,
70 * however the hardware default for these parts is 500us to 1ms which is less
71 * than the 10ms recommended by the pci-e spec. To address this we need to
72 * increase the value to either 10ms to 250ms for capability version 1 config,
73 * or 16ms to 55ms for version 2.
74 **/
75 void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw)
76 {
77 u32 gcr = IXGBE_READ_REG(hw, IXGBE_GCR);
78 u16 pcie_devctl2;
79
80 /* only take action if timeout value is defaulted to 0 */
81 if (gcr & IXGBE_GCR_CMPL_TMOUT_MASK)
82 goto out;
83
84 /*
85 * if capababilities version is type 1 we can write the
86 * timeout of 10ms to 250ms through the GCR register
87 */
88 if (!(gcr & IXGBE_GCR_CAP_VER2)) {
89 gcr |= IXGBE_GCR_CMPL_TMOUT_10ms;
90 goto out;
91 }
92
93 /*
94 * for version 2 capabilities we need to write the config space
95 * directly in order to set the completion timeout value for
96 * 16ms to 55ms
97 */
98 pcie_devctl2 = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2);
99 pcie_devctl2 |= IXGBE_PCI_DEVICE_CONTROL2_16ms;
100 IXGBE_WRITE_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2, pcie_devctl2);
101 out:
102 /* disable completion timeout resend */
103 gcr &= ~IXGBE_GCR_CMPL_TMOUT_RESEND;
104 IXGBE_WRITE_REG(hw, IXGBE_GCR, gcr);
105 }
106
107 /**
108 * ixgbe_get_pcie_msix_count_82598 - Gets MSI-X vector count
109 * @hw: pointer to hardware structure
110 *
111 * Read PCIe configuration space, and get the MSI-X vector count from
112 * the capabilities table.
113 **/
114 u32 ixgbe_get_pcie_msix_count_82598(struct ixgbe_hw *hw)
115 {
116 u32 msix_count = 18;
117
118 DEBUGFUNC("ixgbe_get_pcie_msix_count_82598");
119
120 if (hw->mac.msix_vectors_from_pcie) {
121 msix_count = IXGBE_READ_PCIE_WORD(hw,
122 IXGBE_PCIE_MSIX_82598_CAPS);
123 msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
124
125 /* MSI-X count is zero-based in HW, so increment to give
126 * proper value */
127 msix_count++;
128 }
129 return msix_count;
130 }
131
132 /**
133 * ixgbe_init_ops_82598 - Inits func ptrs and MAC type
134 * @hw: pointer to hardware structure
135 *
136 * Initialize the function pointers and assign the MAC type for 82598.
137 * Does not touch the hardware.
138 **/
139 s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw)
140 {
141 struct ixgbe_mac_info *mac = &hw->mac;
142 struct ixgbe_phy_info *phy = &hw->phy;
143 s32 ret_val;
144
145 DEBUGFUNC("ixgbe_init_ops_82598");
146
147 ret_val = ixgbe_init_phy_ops_generic(hw);
148 ret_val = ixgbe_init_ops_generic(hw);
149
150 /* PHY */
151 phy->ops.init = &ixgbe_init_phy_ops_82598;
152
153 /* MAC */
154 mac->ops.start_hw = &ixgbe_start_hw_82598;
155 mac->ops.enable_relaxed_ordering = &ixgbe_enable_relaxed_ordering_82598;
156 mac->ops.reset_hw = &ixgbe_reset_hw_82598;
157 mac->ops.get_media_type = &ixgbe_get_media_type_82598;
158 mac->ops.get_supported_physical_layer =
159 &ixgbe_get_supported_physical_layer_82598;
160 mac->ops.read_analog_reg8 = &ixgbe_read_analog_reg8_82598;
161 mac->ops.write_analog_reg8 = &ixgbe_write_analog_reg8_82598;
162 mac->ops.set_lan_id = &ixgbe_set_lan_id_multi_port_pcie_82598;
163
164 /* RAR, Multicast, VLAN */
165 mac->ops.set_vmdq = &ixgbe_set_vmdq_82598;
166 mac->ops.clear_vmdq = &ixgbe_clear_vmdq_82598;
167 mac->ops.set_vfta = &ixgbe_set_vfta_82598;
168 mac->ops.set_vlvf = NULL;
169 mac->ops.clear_vfta = &ixgbe_clear_vfta_82598;
170
171 /* Flow Control */
172 mac->ops.fc_enable = &ixgbe_fc_enable_82598;
173
174 mac->mcft_size = 128;
175 mac->vft_size = 128;
176 mac->num_rar_entries = 16;
177 mac->rx_pb_size = 512;
178 mac->max_tx_queues = 32;
179 mac->max_rx_queues = 64;
180 mac->max_msix_vectors = ixgbe_get_pcie_msix_count_82598(hw);
181
182 /* SFP+ Module */
183 phy->ops.read_i2c_eeprom = &ixgbe_read_i2c_eeprom_82598;
184
185 /* Link */
186 mac->ops.check_link = &ixgbe_check_mac_link_82598;
187 mac->ops.setup_link = &ixgbe_setup_mac_link_82598;
188 mac->ops.flap_tx_laser = NULL;
189 mac->ops.get_link_capabilities = &ixgbe_get_link_capabilities_82598;
190 mac->ops.setup_rxpba = &ixgbe_set_rxpba_82598;
191
192 /* Manageability interface */
193 mac->ops.set_fw_drv_ver = NULL;
194
195 return ret_val;
196 }
197
198 /**
199 * ixgbe_init_phy_ops_82598 - PHY/SFP specific init
200 * @hw: pointer to hardware structure
201 *
202 * Initialize any function pointers that were not able to be
203 * set during init_shared_code because the PHY/SFP type was
204 * not known. Perform the SFP init if necessary.
205 *
206 **/
207 s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
208 {
209 struct ixgbe_mac_info *mac = &hw->mac;
210 struct ixgbe_phy_info *phy = &hw->phy;
211 s32 ret_val = IXGBE_SUCCESS;
212 u16 list_offset, data_offset;
213
214 DEBUGFUNC("ixgbe_init_phy_ops_82598");
215
216 /* Identify the PHY */
217 phy->ops.identify(hw);
218
219 /* Overwrite the link function pointers if copper PHY */
220 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
221 mac->ops.setup_link = &ixgbe_setup_copper_link_82598;
222 mac->ops.get_link_capabilities =
223 &ixgbe_get_copper_link_capabilities_generic;
224 }
225
226 switch (hw->phy.type) {
227 case ixgbe_phy_tn:
228 phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
229 phy->ops.check_link = &ixgbe_check_phy_link_tnx;
230 phy->ops.get_firmware_version =
231 &ixgbe_get_phy_firmware_version_tnx;
232 break;
233 case ixgbe_phy_nl:
234 phy->ops.reset = &ixgbe_reset_phy_nl;
235
236 /* Call SFP+ identify routine to get the SFP+ module type */
237 ret_val = phy->ops.identify_sfp(hw);
238 if (ret_val != IXGBE_SUCCESS)
239 goto out;
240 else if (hw->phy.sfp_type == ixgbe_sfp_type_unknown) {
241 ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
242 goto out;
243 }
244
245 /* Check to see if SFP+ module is supported */
246 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw,
247 &list_offset,
248 &data_offset);
249 if (ret_val != IXGBE_SUCCESS) {
250 ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
251 goto out;
252 }
253 break;
254 default:
255 break;
256 }
257
258 out:
259 return ret_val;
260 }
261
262 /**
263 * ixgbe_start_hw_82598 - Prepare hardware for Tx/Rx
264 * @hw: pointer to hardware structure
265 *
266 * Starts the hardware using the generic start_hw function.
267 * Disables relaxed ordering Then set pcie completion timeout
268 *
269 **/
270 s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
271 {
272 u32 regval;
273 u32 i;
274 s32 ret_val = IXGBE_SUCCESS;
275
276 DEBUGFUNC("ixgbe_start_hw_82598");
277
278 ret_val = ixgbe_start_hw_generic(hw);
279
280 /* Disable relaxed ordering */
281 for (i = 0; ((i < hw->mac.max_tx_queues) &&
282 (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
283 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
284 regval &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
285 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval);
286 }
287
288 for (i = 0; ((i < hw->mac.max_rx_queues) &&
289 (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
290 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
291 regval &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
292 IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
293 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
294 }
295
296 /* set the completion timeout for interface */
297 if (ret_val == IXGBE_SUCCESS)
298 ixgbe_set_pcie_completion_timeout(hw);
299
300 return ret_val;
301 }
302
303 /**
304 * ixgbe_get_link_capabilities_82598 - Determines link capabilities
305 * @hw: pointer to hardware structure
306 * @speed: pointer to link speed
307 * @autoneg: boolean auto-negotiation value
308 *
309 * Determines the link capabilities by reading the AUTOC register.
310 **/
311 static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
312 ixgbe_link_speed *speed,
313 bool *autoneg)
314 {
315 s32 status = IXGBE_SUCCESS;
316 u32 autoc = 0;
317
318 DEBUGFUNC("ixgbe_get_link_capabilities_82598");
319
320 /*
321 * Determine link capabilities based on the stored value of AUTOC,
322 * which represents EEPROM defaults. If AUTOC value has not been
323 * stored, use the current register value.
324 */
325 if (hw->mac.orig_link_settings_stored)
326 autoc = hw->mac.orig_autoc;
327 else
328 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
329
330 switch (autoc & IXGBE_AUTOC_LMS_MASK) {
331 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
332 *speed = IXGBE_LINK_SPEED_1GB_FULL;
333 *autoneg = FALSE;
334 break;
335
336 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
337 *speed = IXGBE_LINK_SPEED_10GB_FULL;
338 *autoneg = FALSE;
339 break;
340
341 case IXGBE_AUTOC_LMS_1G_AN:
342 *speed = IXGBE_LINK_SPEED_1GB_FULL;
343 *autoneg = TRUE;
344 break;
345
346 case IXGBE_AUTOC_LMS_KX4_AN:
347 case IXGBE_AUTOC_LMS_KX4_AN_1G_AN:
348 *speed = IXGBE_LINK_SPEED_UNKNOWN;
349 if (autoc & IXGBE_AUTOC_KX4_SUPP)
350 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
351 if (autoc & IXGBE_AUTOC_KX_SUPP)
352 *speed |= IXGBE_LINK_SPEED_1GB_FULL;
353 *autoneg = TRUE;
354 break;
355
356 default:
357 status = IXGBE_ERR_LINK_SETUP;
358 break;
359 }
360
361 return status;
362 }
363
364 /**
365 * ixgbe_get_media_type_82598 - Determines media type
366 * @hw: pointer to hardware structure
367 *
368 * Returns the media type (fiber, copper, backplane)
369 **/
370 static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw)
371 {
372 enum ixgbe_media_type media_type;
373
374 DEBUGFUNC("ixgbe_get_media_type_82598");
375
376 /* Detect if there is a copper PHY attached. */
377 switch (hw->phy.type) {
378 case ixgbe_phy_cu_unknown:
379 case ixgbe_phy_tn:
380 media_type = ixgbe_media_type_copper;
381 goto out;
382 default:
383 break;
384 }
385
386 /* Media type for I82598 is based on device ID */
387 switch (hw->device_id) {
388 case IXGBE_DEV_ID_82598:
389 case IXGBE_DEV_ID_82598_BX:
390 /* Default device ID is mezzanine card KX/KX4 */
391 media_type = ixgbe_media_type_backplane;
392 break;
393 case IXGBE_DEV_ID_82598AF_DUAL_PORT:
394 case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
395 case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
396 case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
397 case IXGBE_DEV_ID_82598EB_XF_LR:
398 case IXGBE_DEV_ID_82598EB_SFP_LOM:
399 media_type = ixgbe_media_type_fiber;
400 break;
401 case IXGBE_DEV_ID_82598EB_CX4:
402 case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
403 media_type = ixgbe_media_type_cx4;
404 break;
405 case IXGBE_DEV_ID_82598AT:
406 case IXGBE_DEV_ID_82598AT2:
407 media_type = ixgbe_media_type_copper;
408 break;
409 default:
410 media_type = ixgbe_media_type_unknown;
411 break;
412 }
413 out:
414 return media_type;
415 }
416
417 /**
418 * ixgbe_fc_enable_82598 - Enable flow control
419 * @hw: pointer to hardware structure
420 * @packetbuf_num: packet buffer number (0-7)
421 *
422 * Enable flow control according to the current settings.
423 **/
424 s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
425 {
426 s32 ret_val = IXGBE_SUCCESS;
427 u32 fctrl_reg;
428 u32 rmcs_reg;
429 u32 reg;
430 u32 link_speed = 0;
431 bool link_up;
432
433 DEBUGFUNC("ixgbe_fc_enable_82598");
434
435 /*
436 * On 82598 having Rx FC on causes resets while doing 1G
437 * so if it's on turn it off once we know link_speed. For
438 * more details see 82598 Specification update.
439 */
440 hw->mac.ops.check_link(hw, &link_speed, &link_up, FALSE);
441 if (link_up && link_speed == IXGBE_LINK_SPEED_1GB_FULL) {
442 switch (hw->fc.requested_mode) {
443 case ixgbe_fc_full:
444 hw->fc.requested_mode = ixgbe_fc_tx_pause;
445 break;
446 case ixgbe_fc_rx_pause:
447 hw->fc.requested_mode = ixgbe_fc_none;
448 break;
449 default:
450 /* no change */
451 break;
452 }
453 }
454
455 /* Negotiate the fc mode to use */
456 ret_val = ixgbe_fc_autoneg(hw);
457 if (ret_val == IXGBE_ERR_FLOW_CONTROL)
458 goto out;
459
460 /* Disable any previous flow control settings */
461 fctrl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
462 fctrl_reg &= ~(IXGBE_FCTRL_RFCE | IXGBE_FCTRL_RPFCE);
463
464 rmcs_reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
465 rmcs_reg &= ~(IXGBE_RMCS_TFCE_PRIORITY | IXGBE_RMCS_TFCE_802_3X);
466
467 /*
468 * The possible values of fc.current_mode are:
469 * 0: Flow control is completely disabled
470 * 1: Rx flow control is enabled (we can receive pause frames,
471 * but not send pause frames).
472 * 2: Tx flow control is enabled (we can send pause frames but
473 * we do not support receiving pause frames).
474 * 3: Both Rx and Tx flow control (symmetric) are enabled.
475 * other: Invalid.
476 */
477 switch (hw->fc.current_mode) {
478 case ixgbe_fc_none:
479 /*
480 * Flow control is disabled by software override or autoneg.
481 * The code below will actually disable it in the HW.
482 */
483 break;
484 case ixgbe_fc_rx_pause:
485 /*
486 * Rx Flow control is enabled and Tx Flow control is
487 * disabled by software override. Since there really
488 * isn't a way to advertise that we are capable of RX
489 * Pause ONLY, we will advertise that we support both
490 * symmetric and asymmetric Rx PAUSE. Later, we will
491 * disable the adapter's ability to send PAUSE frames.
492 */
493 fctrl_reg |= IXGBE_FCTRL_RFCE;
494 break;
495 case ixgbe_fc_tx_pause:
496 /*
497 * Tx Flow control is enabled, and Rx Flow control is
498 * disabled by software override.
499 */
500 rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
501 break;
502 case ixgbe_fc_full:
503 /* Flow control (both Rx and Tx) is enabled by SW override. */
504 fctrl_reg |= IXGBE_FCTRL_RFCE;
505 rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
506 break;
507 default:
508 DEBUGOUT("Flow control param set incorrectly\n");
509 ret_val = IXGBE_ERR_CONFIG;
510 goto out;
511 break;
512 }
513
514 /* Set 802.3x based flow control settings. */
515 fctrl_reg |= IXGBE_FCTRL_DPF;
516 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl_reg);
517 IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg);
518
519 /* Set up and enable Rx high/low water mark thresholds, enable XON. */
520 if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
521 reg = hw->fc.low_water << 6;
522 if (hw->fc.send_xon)
523 reg |= IXGBE_FCRTL_XONE;
524
525 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num), reg);
526
527 reg = hw->fc.high_water[packetbuf_num] << 6;
528 reg |= IXGBE_FCRTH_FCEN;
529
530 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(packetbuf_num), reg);
531 }
532
533 /* Configure pause time (2 TCs per register) */
534 reg = IXGBE_READ_REG(hw, IXGBE_FCTTV(packetbuf_num / 2));
535 if ((packetbuf_num & 1) == 0)
536 reg = (reg & 0xFFFF0000) | hw->fc.pause_time;
537 else
538 reg = (reg & 0x0000FFFF) | (hw->fc.pause_time << 16);
539 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(packetbuf_num / 2), reg);
540
541 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, (hw->fc.pause_time >> 1));
542
543 out:
544 return ret_val;
545 }
546
547 /**
548 * ixgbe_start_mac_link_82598 - Configures MAC link settings
549 * @hw: pointer to hardware structure
550 *
551 * Configures link settings based on values in the ixgbe_hw struct.
552 * Restarts the link. Performs autonegotiation if needed.
553 **/
554 static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
555 bool autoneg_wait_to_complete)
556 {
557 u32 autoc_reg;
558 u32 links_reg;
559 u32 i;
560 s32 status = IXGBE_SUCCESS;
561
562 DEBUGFUNC("ixgbe_start_mac_link_82598");
563
564 /* Restart link */
565 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
566 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
567 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
568
569 /* Only poll for autoneg to complete if specified to do so */
570 if (autoneg_wait_to_complete) {
571 if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
572 IXGBE_AUTOC_LMS_KX4_AN ||
573 (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
574 IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
575 links_reg = 0; /* Just in case Autoneg time = 0 */
576 for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
577 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
578 if (links_reg & IXGBE_LINKS_KX_AN_COMP)
579 break;
580 msec_delay(100);
581 }
582 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
583 status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
584 DEBUGOUT("Autonegotiation did not complete.\n");
585 }
586 }
587 }
588
589 /* Add delay to filter out noises during initial link setup */
590 msec_delay(50);
591
592 return status;
593 }
594
595 /**
596 * ixgbe_validate_link_ready - Function looks for phy link
597 * @hw: pointer to hardware structure
598 *
599 * Function indicates success when phy link is available. If phy is not ready
600 * within 5 seconds of MAC indicating link, the function returns error.
601 **/
602 static s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw)
603 {
604 u32 timeout;
605 u16 an_reg;
606
607 if (hw->device_id != IXGBE_DEV_ID_82598AT2)
608 return IXGBE_SUCCESS;
609
610 for (timeout = 0;
611 timeout < IXGBE_VALIDATE_LINK_READY_TIMEOUT; timeout++) {
612 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
613 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &an_reg);
614
615 if ((an_reg & IXGBE_MII_AUTONEG_COMPLETE) &&
616 (an_reg & IXGBE_MII_AUTONEG_LINK_UP))
617 break;
618
619 msec_delay(100);
620 }
621
622 if (timeout == IXGBE_VALIDATE_LINK_READY_TIMEOUT) {
623 DEBUGOUT("Link was indicated but link is down\n");
624 return IXGBE_ERR_LINK_SETUP;
625 }
626
627 return IXGBE_SUCCESS;
628 }
629
630 /**
631 * ixgbe_check_mac_link_82598 - Get link/speed status
632 * @hw: pointer to hardware structure
633 * @speed: pointer to link speed
634 * @link_up: TRUE is link is up, FALSE otherwise
635 * @link_up_wait_to_complete: bool used to wait for link up or not
636 *
637 * Reads the links register to determine if link is up and the current speed
638 **/
639 static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
640 ixgbe_link_speed *speed, bool *link_up,
641 bool link_up_wait_to_complete)
642 {
643 u32 links_reg;
644 u32 i;
645 u16 link_reg, adapt_comp_reg;
646
647 DEBUGFUNC("ixgbe_check_mac_link_82598");
648
649 /*
650 * SERDES PHY requires us to read link status from undocumented
651 * register 0xC79F. Bit 0 set indicates link is up/ready; clear
652 * indicates link down. OxC00C is read to check that the XAUI lanes
653 * are active. Bit 0 clear indicates active; set indicates inactive.
654 */
655 if (hw->phy.type == ixgbe_phy_nl) {
656 hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg);
657 hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg);
658 hw->phy.ops.read_reg(hw, 0xC00C, IXGBE_TWINAX_DEV,
659 &adapt_comp_reg);
660 if (link_up_wait_to_complete) {
661 for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
662 if ((link_reg & 1) &&
663 ((adapt_comp_reg & 1) == 0)) {
664 *link_up = TRUE;
665 break;
666 } else {
667 *link_up = FALSE;
668 }
669 msec_delay(100);
670 hw->phy.ops.read_reg(hw, 0xC79F,
671 IXGBE_TWINAX_DEV,
672 &link_reg);
673 hw->phy.ops.read_reg(hw, 0xC00C,
674 IXGBE_TWINAX_DEV,
675 &adapt_comp_reg);
676 }
677 } else {
678 if ((link_reg & 1) && ((adapt_comp_reg & 1) == 0))
679 *link_up = TRUE;
680 else
681 *link_up = FALSE;
682 }
683
684 if (*link_up == FALSE)
685 goto out;
686 }
687
688 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
689 if (link_up_wait_to_complete) {
690 for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
691 if (links_reg & IXGBE_LINKS_UP) {
692 *link_up = TRUE;
693 break;
694 } else {
695 *link_up = FALSE;
696 }
697 msec_delay(100);
698 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
699 }
700 } else {
701 if (links_reg & IXGBE_LINKS_UP)
702 *link_up = TRUE;
703 else
704 *link_up = FALSE;
705 }
706
707 if (links_reg & IXGBE_LINKS_SPEED)
708 *speed = IXGBE_LINK_SPEED_10GB_FULL;
709 else
710 *speed = IXGBE_LINK_SPEED_1GB_FULL;
711
712 if ((hw->device_id == IXGBE_DEV_ID_82598AT2) && (*link_up == TRUE) &&
713 (ixgbe_validate_link_ready(hw) != IXGBE_SUCCESS))
714 *link_up = FALSE;
715
716 out:
717 return IXGBE_SUCCESS;
718 }
719
720 /**
721 * ixgbe_setup_mac_link_82598 - Set MAC link speed
722 * @hw: pointer to hardware structure
723 * @speed: new link speed
724 * @autoneg: TRUE if autonegotiation enabled
725 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed
726 *
727 * Set the link speed in the AUTOC register and restarts link.
728 **/
729 static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
730 ixgbe_link_speed speed, bool autoneg,
731 bool autoneg_wait_to_complete)
732 {
733 s32 status = IXGBE_SUCCESS;
734 ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
735 u32 curr_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
736 u32 autoc = curr_autoc;
737 u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
738
739 DEBUGFUNC("ixgbe_setup_mac_link_82598");
740
741 /* Check to see if speed passed in is supported. */
742 ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg);
743 speed &= link_capabilities;
744
745 if (speed == IXGBE_LINK_SPEED_UNKNOWN)
746 status = IXGBE_ERR_LINK_SETUP;
747
748 /* Set KX4/KX support according to speed requested */
749 else if (link_mode == IXGBE_AUTOC_LMS_KX4_AN ||
750 link_mode == IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
751 autoc &= ~IXGBE_AUTOC_KX4_KX_SUPP_MASK;
752 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
753 autoc |= IXGBE_AUTOC_KX4_SUPP;
754 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
755 autoc |= IXGBE_AUTOC_KX_SUPP;
756 if (autoc != curr_autoc)
757 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
758 }
759
760 if (status == IXGBE_SUCCESS) {
761 /*
762 * Setup and restart the link based on the new values in
763 * ixgbe_hw This will write the AUTOC register based on the new
764 * stored values
765 */
766 status = ixgbe_start_mac_link_82598(hw,
767 autoneg_wait_to_complete);
768 }
769
770 return status;
771 }
772
773
774 /**
775 * ixgbe_setup_copper_link_82598 - Set the PHY autoneg advertised field
776 * @hw: pointer to hardware structure
777 * @speed: new link speed
778 * @autoneg: TRUE if autonegotiation enabled
779 * @autoneg_wait_to_complete: TRUE if waiting is needed to complete
780 *
781 * Sets the link speed in the AUTOC register in the MAC and restarts link.
782 **/
783 static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
784 ixgbe_link_speed speed,
785 bool autoneg,
786 bool autoneg_wait_to_complete)
787 {
788 s32 status;
789
790 DEBUGFUNC("ixgbe_setup_copper_link_82598");
791
792 /* Setup the PHY according to input speed */
793 status = hw->phy.ops.setup_link_speed(hw, speed, autoneg,
794 autoneg_wait_to_complete);
795 /* Set up MAC */
796 ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete);
797
798 return status;
799 }
800
801 /**
802 * ixgbe_reset_hw_82598 - Performs hardware reset
803 * @hw: pointer to hardware structure
804 *
805 * Resets the hardware by resetting the transmit and receive units, masks and
806 * clears all interrupts, performing a PHY reset, and performing a link (MAC)
807 * reset.
808 **/
809 static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
810 {
811 s32 status = IXGBE_SUCCESS;
812 s32 phy_status = IXGBE_SUCCESS;
813 u32 ctrl;
814 u32 gheccr;
815 u32 i;
816 u32 autoc;
817 u8 analog_val;
818
819 DEBUGFUNC("ixgbe_reset_hw_82598");
820
821 /* Call adapter stop to disable tx/rx and clear interrupts */
822 status = hw->mac.ops.stop_adapter(hw);
823 if (status != IXGBE_SUCCESS)
824 goto reset_hw_out;
825
826 /*
827 * Power up the Atlas Tx lanes if they are currently powered down.
828 * Atlas Tx lanes are powered down for MAC loopback tests, but
829 * they are not automatically restored on reset.
830 */
831 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val);
832 if (analog_val & IXGBE_ATLAS_PDN_TX_REG_EN) {
833 /* Enable Tx Atlas so packets can be transmitted again */
834 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
835 &analog_val);
836 analog_val &= ~IXGBE_ATLAS_PDN_TX_REG_EN;
837 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
838 analog_val);
839
840 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
841 &analog_val);
842 analog_val &= ~IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
843 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
844 analog_val);
845
846 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
847 &analog_val);
848 analog_val &= ~IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
849 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
850 analog_val);
851
852 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
853 &analog_val);
854 analog_val &= ~IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
855 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
856 analog_val);
857 }
858
859 /* Reset PHY */
860 if (hw->phy.reset_disable == FALSE) {
861 /* PHY ops must be identified and initialized prior to reset */
862
863 /* Init PHY and function pointers, perform SFP setup */
864 phy_status = hw->phy.ops.init(hw);
865 if (phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED)
866 goto reset_hw_out;
867 if (phy_status == IXGBE_ERR_SFP_NOT_PRESENT)
868 goto mac_reset_top;
869
870 hw->phy.ops.reset(hw);
871 }
872
873 mac_reset_top:
874 /*
875 * Issue global reset to the MAC. This needs to be a SW reset.
876 * If link reset is used, it might reset the MAC when mng is using it
877 */
878 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL) | IXGBE_CTRL_RST;
879 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
880 IXGBE_WRITE_FLUSH(hw);
881
882 /* Poll for reset bit to self-clear indicating reset is complete */
883 for (i = 0; i < 10; i++) {
884 usec_delay(1);
885 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
886 if (!(ctrl & IXGBE_CTRL_RST))
887 break;
888 }
889 if (ctrl & IXGBE_CTRL_RST) {
890 status = IXGBE_ERR_RESET_FAILED;
891 DEBUGOUT("Reset polling failed to complete.\n");
892 }
893
894 msec_delay(50);
895
896 /*
897 * Double resets are required for recovery from certain error
898 * conditions. Between resets, it is necessary to stall to allow time
899 * for any pending HW events to complete.
900 */
901 if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
902 hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
903 goto mac_reset_top;
904 }
905
906 gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR);
907 gheccr &= ~((1 << 21) | (1 << 18) | (1 << 9) | (1 << 6));
908 IXGBE_WRITE_REG(hw, IXGBE_GHECCR, gheccr);
909
910 /*
911 * Store the original AUTOC value if it has not been
912 * stored off yet. Otherwise restore the stored original
913 * AUTOC value since the reset operation sets back to deaults.
914 */
915 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
916 if (hw->mac.orig_link_settings_stored == FALSE) {
917 hw->mac.orig_autoc = autoc;
918 hw->mac.orig_link_settings_stored = TRUE;
919 } else if (autoc != hw->mac.orig_autoc) {
920 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc);
921 }
922
923 /* Store the permanent mac address */
924 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
925
926 /*
927 * Store MAC address from RAR0, clear receive address registers, and
928 * clear the multicast table
929 */
930 hw->mac.ops.init_rx_addrs(hw);
931
932 reset_hw_out:
933 if (phy_status != IXGBE_SUCCESS)
934 status = phy_status;
935
936 return status;
937 }
938
939 /**
940 * ixgbe_set_vmdq_82598 - Associate a VMDq set index with a rx address
941 * @hw: pointer to hardware struct
942 * @rar: receive address register index to associate with a VMDq index
943 * @vmdq: VMDq set index
944 **/
945 s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
946 {
947 u32 rar_high;
948 u32 rar_entries = hw->mac.num_rar_entries;
949
950 DEBUGFUNC("ixgbe_set_vmdq_82598");
951
952 /* Make sure we are using a valid rar index range */
953 if (rar >= rar_entries) {
954 DEBUGOUT1("RAR index %d is out of range.\n", rar);
955 return IXGBE_ERR_INVALID_ARGUMENT;
956 }
957
958 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
959 rar_high &= ~IXGBE_RAH_VIND_MASK;
960 rar_high |= ((vmdq << IXGBE_RAH_VIND_SHIFT) & IXGBE_RAH_VIND_MASK);
961 IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
962 return IXGBE_SUCCESS;
963 }
964
965 /**
966 * ixgbe_clear_vmdq_82598 - Disassociate a VMDq set index from an rx address
967 * @hw: pointer to hardware struct
968 * @rar: receive address register index to associate with a VMDq index
969 * @vmdq: VMDq clear index (not used in 82598, but elsewhere)
970 **/
971 static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
972 {
973 u32 rar_high;
974 u32 rar_entries = hw->mac.num_rar_entries;
975
976 UNREFERENCED_1PARAMETER(vmdq);
977
978 /* Make sure we are using a valid rar index range */
979 if (rar >= rar_entries) {
980 DEBUGOUT1("RAR index %d is out of range.\n", rar);
981 return IXGBE_ERR_INVALID_ARGUMENT;
982 }
983
984 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
985 if (rar_high & IXGBE_RAH_VIND_MASK) {
986 rar_high &= ~IXGBE_RAH_VIND_MASK;
987 IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
988 }
989
990 return IXGBE_SUCCESS;
991 }
992
993 /**
994 * ixgbe_set_vfta_82598 - Set VLAN filter table
995 * @hw: pointer to hardware structure
996 * @vlan: VLAN id to write to VLAN filter
997 * @vind: VMDq output index that maps queue to VLAN id in VFTA
998 * @vlan_on: boolean flag to turn on/off VLAN in VFTA
999 *
1000 * Turn on/off specified VLAN in the VLAN filter table.
1001 **/
1002 s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind,
1003 bool vlan_on)
1004 {
1005 u32 regindex;
1006 u32 bitindex;
1007 u32 bits;
1008 u32 vftabyte;
1009
1010 DEBUGFUNC("ixgbe_set_vfta_82598");
1011
1012 if (vlan > 4095)
1013 return IXGBE_ERR_PARAM;
1014
1015 /* Determine 32-bit word position in array */
1016 regindex = (vlan >> 5) & 0x7F; /* upper seven bits */
1017
1018 /* Determine the location of the (VMD) queue index */
1019 vftabyte = ((vlan >> 3) & 0x03); /* bits (4:3) indicating byte array */
1020 bitindex = (vlan & 0x7) << 2; /* lower 3 bits indicate nibble */
1021
1022 /* Set the nibble for VMD queue index */
1023 bits = IXGBE_READ_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex));
1024 bits &= (~(0x0F << bitindex));
1025 bits |= (vind << bitindex);
1026 IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex), bits);
1027
1028 /* Determine the location of the bit for this VLAN id */
1029 bitindex = vlan & 0x1F; /* lower five bits */
1030
1031 bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
1032 if (vlan_on)
1033 /* Turn on this VLAN id */
1034 bits |= (1 << bitindex);
1035 else
1036 /* Turn off this VLAN id */
1037 bits &= ~(1 << bitindex);
1038 IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits);
1039
1040 return IXGBE_SUCCESS;
1041 }
1042
1043 /**
1044 * ixgbe_clear_vfta_82598 - Clear VLAN filter table
1045 * @hw: pointer to hardware structure
1046 *
1047 * Clears the VLAN filer table, and the VMDq index associated with the filter
1048 **/
1049 static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw)
1050 {
1051 u32 offset;
1052 u32 vlanbyte;
1053
1054 DEBUGFUNC("ixgbe_clear_vfta_82598");
1055
1056 for (offset = 0; offset < hw->mac.vft_size; offset++)
1057 IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
1058
1059 for (vlanbyte = 0; vlanbyte < 4; vlanbyte++)
1060 for (offset = 0; offset < hw->mac.vft_size; offset++)
1061 IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset),
1062 0);
1063
1064 return IXGBE_SUCCESS;
1065 }
1066
1067 /**
1068 * ixgbe_read_analog_reg8_82598 - Reads 8 bit Atlas analog register
1069 * @hw: pointer to hardware structure
1070 * @reg: analog register to read
1071 * @val: read value
1072 *
1073 * Performs read operation to Atlas analog register specified.
1074 **/
1075 s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val)
1076 {
1077 u32 atlas_ctl;
1078
1079 DEBUGFUNC("ixgbe_read_analog_reg8_82598");
1080
1081 IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL,
1082 IXGBE_ATLASCTL_WRITE_CMD | (reg << 8));
1083 IXGBE_WRITE_FLUSH(hw);
1084 usec_delay(10);
1085 atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
1086 *val = (u8)atlas_ctl;
1087
1088 return IXGBE_SUCCESS;
1089 }
1090
1091 /**
1092 * ixgbe_write_analog_reg8_82598 - Writes 8 bit Atlas analog register
1093 * @hw: pointer to hardware structure
1094 * @reg: atlas register to write
1095 * @val: value to write
1096 *
1097 * Performs write operation to Atlas analog register specified.
1098 **/
1099 s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val)
1100 {
1101 u32 atlas_ctl;
1102
1103 DEBUGFUNC("ixgbe_write_analog_reg8_82598");
1104
1105 atlas_ctl = (reg << 8) | val;
1106 IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, atlas_ctl);
1107 IXGBE_WRITE_FLUSH(hw);
1108 usec_delay(10);
1109
1110 return IXGBE_SUCCESS;
1111 }
1112
1113 /**
1114 * ixgbe_read_i2c_eeprom_82598 - Reads 8 bit word over I2C interface.
1115 * @hw: pointer to hardware structure
1116 * @byte_offset: EEPROM byte offset to read
1117 * @eeprom_data: value read
1118 *
1119 * Performs 8 byte read operation to SFP module's EEPROM over I2C interface.
1120 **/
1121 s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
1122 u8 *eeprom_data)
1123 {
1124 s32 status = IXGBE_SUCCESS;
1125 u16 sfp_addr = 0;
1126 u16 sfp_data = 0;
1127 u16 sfp_stat = 0;
1128 u32 i;
1129
1130 DEBUGFUNC("ixgbe_read_i2c_eeprom_82598");
1131
1132 if (hw->phy.type == ixgbe_phy_nl) {
1133 /*
1134 * NetLogic phy SDA/SCL registers are at addresses 0xC30A to
1135 * 0xC30D. These registers are used to talk to the SFP+
1136 * module's EEPROM through the SDA/SCL (I2C) interface.
1137 */
1138 sfp_addr = (IXGBE_I2C_EEPROM_DEV_ADDR << 8) + byte_offset;
1139 sfp_addr = (sfp_addr | IXGBE_I2C_EEPROM_READ_MASK);
1140 hw->phy.ops.write_reg(hw,
1141 IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR,
1142 IXGBE_MDIO_PMA_PMD_DEV_TYPE,
1143 sfp_addr);
1144
1145 /* Poll status */
1146 for (i = 0; i < 100; i++) {
1147 hw->phy.ops.read_reg(hw,
1148 IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT,
1149 IXGBE_MDIO_PMA_PMD_DEV_TYPE,
1150 &sfp_stat);
1151 sfp_stat = sfp_stat & IXGBE_I2C_EEPROM_STATUS_MASK;
1152 if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS)
1153 break;
1154 msec_delay(10);
1155 }
1156
1157 if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_PASS) {
1158 DEBUGOUT("EEPROM read did not pass.\n");
1159 status = IXGBE_ERR_SFP_NOT_PRESENT;
1160 goto out;
1161 }
1162
1163 /* Read data */
1164 hw->phy.ops.read_reg(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA,
1165 IXGBE_MDIO_PMA_PMD_DEV_TYPE, &sfp_data);
1166
1167 *eeprom_data = (u8)(sfp_data >> 8);
1168 } else {
1169 status = IXGBE_ERR_PHY;
1170 goto out;
1171 }
1172
1173 out:
1174 return status;
1175 }
1176
1177 /**
1178 * ixgbe_get_supported_physical_layer_82598 - Returns physical layer type
1179 * @hw: pointer to hardware structure
1180 *
1181 * Determines physical layer capabilities of the current configuration.
1182 **/
1183 u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw)
1184 {
1185 u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1186 u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
1187 u32 pma_pmd_10g = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK;
1188 u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
1189 u16 ext_ability = 0;
1190
1191 DEBUGFUNC("ixgbe_get_supported_physical_layer_82598");
1192
1193 hw->phy.ops.identify(hw);
1194
1195 /* Copper PHY must be checked before AUTOC LMS to determine correct
1196 * physical layer because 10GBase-T PHYs use LMS = KX4/KX */
1197 switch (hw->phy.type) {
1198 case ixgbe_phy_tn:
1199 case ixgbe_phy_cu_unknown:
1200 hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
1201 IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
1202 if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
1203 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
1204 if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
1205 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
1206 if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
1207 physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
1208 goto out;
1209 default:
1210 break;
1211 }
1212
1213 switch (autoc & IXGBE_AUTOC_LMS_MASK) {
1214 case IXGBE_AUTOC_LMS_1G_AN:
1215 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
1216 if (pma_pmd_1g == IXGBE_AUTOC_1G_KX)
1217 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX;
1218 else
1219 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_BX;
1220 break;
1221 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
1222 if (pma_pmd_10g == IXGBE_AUTOC_10G_CX4)
1223 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
1224 else if (pma_pmd_10g == IXGBE_AUTOC_10G_KX4)
1225 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
1226 else /* XAUI */
1227 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1228 break;
1229 case IXGBE_AUTOC_LMS_KX4_AN:
1230 case IXGBE_AUTOC_LMS_KX4_AN_1G_AN:
1231 if (autoc & IXGBE_AUTOC_KX_SUPP)
1232 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
1233 if (autoc & IXGBE_AUTOC_KX4_SUPP)
1234 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
1235 break;
1236 default:
1237 break;
1238 }
1239
1240 if (hw->phy.type == ixgbe_phy_nl) {
1241 hw->phy.ops.identify_sfp(hw);
1242
1243 switch (hw->phy.sfp_type) {
1244 case ixgbe_sfp_type_da_cu:
1245 physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
1246 break;
1247 case ixgbe_sfp_type_sr:
1248 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
1249 break;
1250 case ixgbe_sfp_type_lr:
1251 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
1252 break;
1253 default:
1254 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1255 break;
1256 }
1257 }
1258
1259 switch (hw->device_id) {
1260 case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
1261 physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
1262 break;
1263 case IXGBE_DEV_ID_82598AF_DUAL_PORT:
1264 case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
1265 case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
1266 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
1267 break;
1268 case IXGBE_DEV_ID_82598EB_XF_LR:
1269 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
1270 break;
1271 default:
1272 break;
1273 }
1274
1275 out:
1276 return physical_layer;
1277 }
1278
1279 /**
1280 * ixgbe_set_lan_id_multi_port_pcie_82598 - Set LAN id for PCIe multiple
1281 * port devices.
1282 * @hw: pointer to the HW structure
1283 *
1284 * Calls common function and corrects issue with some single port devices
1285 * that enable LAN1 but not LAN0.
1286 **/
1287 void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw)
1288 {
1289 struct ixgbe_bus_info *bus = &hw->bus;
1290 u16 pci_gen = 0;
1291 u16 pci_ctrl2 = 0;
1292
1293 DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie_82598");
1294
1295 ixgbe_set_lan_id_multi_port_pcie(hw);
1296
1297 /* check if LAN0 is disabled */
1298 hw->eeprom.ops.read(hw, IXGBE_PCIE_GENERAL_PTR, &pci_gen);
1299 if ((pci_gen != 0) && (pci_gen != 0xFFFF)) {
1300
1301 hw->eeprom.ops.read(hw, pci_gen + IXGBE_PCIE_CTRL2, &pci_ctrl2);
1302
1303 /* if LAN0 is completely disabled force function to 0 */
1304 if ((pci_ctrl2 & IXGBE_PCIE_CTRL2_LAN_DISABLE) &&
1305 !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DISABLE_SELECT) &&
1306 !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DUMMY_ENABLE)) {
1307
1308 bus->func = 0;
1309 }
1310 }
1311 }
1312
1313 /**
1314 * ixgbe_enable_relaxed_ordering_82598 - enable relaxed ordering
1315 * @hw: pointer to hardware structure
1316 *
1317 **/
1318 void ixgbe_enable_relaxed_ordering_82598(struct ixgbe_hw *hw)
1319 {
1320 u32 regval;
1321 u32 i;
1322
1323 DEBUGFUNC("ixgbe_enable_relaxed_ordering_82598");
1324
1325 /* Enable relaxed ordering */
1326 for (i = 0; ((i < hw->mac.max_tx_queues) &&
1327 (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
1328 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
1329 regval |= IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
1330 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval);
1331 }
1332
1333 for (i = 0; ((i < hw->mac.max_rx_queues) &&
1334 (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
1335 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
1336 regval |= (IXGBE_DCA_RXCTRL_DESC_WRO_EN |
1337 IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
1338 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
1339 }
1340
1341 }
1342
1343 /**
1344 * ixgbe_set_rxpba_82598 - Initialize RX packet buffer
1345 * @hw: pointer to hardware structure
1346 * @num_pb: number of packet buffers to allocate
1347 * @headroom: reserve n KB of headroom
1348 * @strategy: packet buffer allocation strategy
1349 **/
1350 static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb,
1351 u32 headroom, int strategy)
1352 {
1353 u32 rxpktsize = IXGBE_RXPBSIZE_64KB;
1354 u8 i = 0;
1355 UNREFERENCED_1PARAMETER(headroom);
1356
1357 if (!num_pb)
1358 return;
1359
1360 /* Setup Rx packet buffer sizes */
1361 switch (strategy) {
1362 case PBA_STRATEGY_WEIGHTED:
1363 /* Setup the first four at 80KB */
1364 rxpktsize = IXGBE_RXPBSIZE_80KB;
1365 for (; i < 4; i++)
1366 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
1367 /* Setup the last four at 48KB...don't re-init i */
1368 rxpktsize = IXGBE_RXPBSIZE_48KB;
1369 /* Fall Through */
1370 case PBA_STRATEGY_EQUAL:
1371 default:
1372 /* Divide the remaining Rx packet buffer evenly among the TCs */
1373 for (; i < IXGBE_MAX_PACKET_BUFFERS; i++)
1374 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
1375 break;
1376 }
1377
1378 /* Setup Tx packet buffer sizes */
1379 for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++)
1380 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), IXGBE_TXPBSIZE_40KB);
1381
1382 return;
1383 }
1384