ixgbe_common.c revision 1.20 1 /* $NetBSD: ixgbe_common.c,v 1.20 2018/03/30 06:44:30 msaitoh Exp $ */
2
3 /******************************************************************************
4 SPDX-License-Identifier: BSD-3-Clause
5
6 Copyright (c) 2001-2017, Intel Corporation
7 All rights reserved.
8
9 Redistribution and use in source and binary forms, with or without
10 modification, are permitted provided that the following conditions are met:
11
12 1. Redistributions of source code must retain the above copyright notice,
13 this list of conditions and the following disclaimer.
14
15 2. Redistributions in binary form must reproduce the above copyright
16 notice, this list of conditions and the following disclaimer in the
17 documentation and/or other materials provided with the distribution.
18
19 3. Neither the name of the Intel Corporation nor the names of its
20 contributors may be used to endorse or promote products derived from
21 this software without specific prior written permission.
22
23 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
27 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33 POSSIBILITY OF SUCH DAMAGE.
34
35 ******************************************************************************/
36 /*$FreeBSD: head/sys/dev/ixgbe/ixgbe_common.c 320688 2017-07-05 17:27:03Z erj $*/
37
38 #include "ixgbe_common.h"
39 #include "ixgbe_phy.h"
40 #include "ixgbe_dcb.h"
41 #include "ixgbe_dcb_82599.h"
42 #include "ixgbe_api.h"
43
44 static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw);
45 static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw);
46 static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw);
47 static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw);
48 static void ixgbe_standby_eeprom(struct ixgbe_hw *hw);
49 static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
50 u16 count);
51 static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count);
52 static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
53 static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
54 static void ixgbe_release_eeprom(struct ixgbe_hw *hw);
55
56 static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr);
57 static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
58 u16 *san_mac_offset);
59 static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
60 u16 words, u16 *data);
61 static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
62 u16 words, u16 *data);
63 static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
64 u16 offset);
65
66 /**
67 * ixgbe_init_ops_generic - Inits function ptrs
68 * @hw: pointer to the hardware structure
69 *
70 * Initialize the function pointers.
71 **/
72 s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw)
73 {
74 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
75 struct ixgbe_mac_info *mac = &hw->mac;
76 u32 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
77
78 DEBUGFUNC("ixgbe_init_ops_generic");
79
80 /* EEPROM */
81 eeprom->ops.init_params = ixgbe_init_eeprom_params_generic;
82 /* If EEPROM is valid (bit 8 = 1), use EERD otherwise use bit bang */
83 if (eec & IXGBE_EEC_PRES) {
84 eeprom->ops.read = ixgbe_read_eerd_generic;
85 eeprom->ops.read_buffer = ixgbe_read_eerd_buffer_generic;
86 } else {
87 eeprom->ops.read = ixgbe_read_eeprom_bit_bang_generic;
88 eeprom->ops.read_buffer =
89 ixgbe_read_eeprom_buffer_bit_bang_generic;
90 }
91 eeprom->ops.write = ixgbe_write_eeprom_generic;
92 eeprom->ops.write_buffer = ixgbe_write_eeprom_buffer_bit_bang_generic;
93 eeprom->ops.validate_checksum =
94 ixgbe_validate_eeprom_checksum_generic;
95 eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_generic;
96 eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_generic;
97
98 /* MAC */
99 mac->ops.init_hw = ixgbe_init_hw_generic;
100 mac->ops.reset_hw = NULL;
101 mac->ops.start_hw = ixgbe_start_hw_generic;
102 mac->ops.clear_hw_cntrs = ixgbe_clear_hw_cntrs_generic;
103 mac->ops.get_media_type = NULL;
104 mac->ops.get_supported_physical_layer = NULL;
105 mac->ops.enable_rx_dma = ixgbe_enable_rx_dma_generic;
106 mac->ops.get_mac_addr = ixgbe_get_mac_addr_generic;
107 mac->ops.stop_adapter = ixgbe_stop_adapter_generic;
108 mac->ops.get_bus_info = ixgbe_get_bus_info_generic;
109 mac->ops.set_lan_id = ixgbe_set_lan_id_multi_port_pcie;
110 mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync;
111 mac->ops.release_swfw_sync = ixgbe_release_swfw_sync;
112 mac->ops.prot_autoc_read = prot_autoc_read_generic;
113 mac->ops.prot_autoc_write = prot_autoc_write_generic;
114
115 /* LEDs */
116 mac->ops.led_on = ixgbe_led_on_generic;
117 mac->ops.led_off = ixgbe_led_off_generic;
118 mac->ops.blink_led_start = ixgbe_blink_led_start_generic;
119 mac->ops.blink_led_stop = ixgbe_blink_led_stop_generic;
120 mac->ops.init_led_link_act = ixgbe_init_led_link_act_generic;
121
122 /* RAR, Multicast, VLAN */
123 mac->ops.set_rar = ixgbe_set_rar_generic;
124 mac->ops.clear_rar = ixgbe_clear_rar_generic;
125 mac->ops.insert_mac_addr = NULL;
126 mac->ops.set_vmdq = NULL;
127 mac->ops.clear_vmdq = NULL;
128 mac->ops.init_rx_addrs = ixgbe_init_rx_addrs_generic;
129 mac->ops.update_uc_addr_list = ixgbe_update_uc_addr_list_generic;
130 mac->ops.update_mc_addr_list = ixgbe_update_mc_addr_list_generic;
131 mac->ops.enable_mc = ixgbe_enable_mc_generic;
132 mac->ops.disable_mc = ixgbe_disable_mc_generic;
133 mac->ops.clear_vfta = NULL;
134 mac->ops.set_vfta = NULL;
135 mac->ops.set_vlvf = NULL;
136 mac->ops.init_uta_tables = NULL;
137 mac->ops.enable_rx = ixgbe_enable_rx_generic;
138 mac->ops.disable_rx = ixgbe_disable_rx_generic;
139
140 /* Flow Control */
141 mac->ops.fc_enable = ixgbe_fc_enable_generic;
142 mac->ops.setup_fc = ixgbe_setup_fc_generic;
143 mac->ops.fc_autoneg = ixgbe_fc_autoneg;
144
145 /* Link */
146 mac->ops.get_link_capabilities = NULL;
147 mac->ops.setup_link = NULL;
148 mac->ops.check_link = NULL;
149 mac->ops.dmac_config = NULL;
150 mac->ops.dmac_update_tcs = NULL;
151 mac->ops.dmac_config_tcs = NULL;
152
153 return IXGBE_SUCCESS;
154 }
155
156 /**
157 * ixgbe_device_supports_autoneg_fc - Check if device supports autonegotiation
158 * of flow control
159 * @hw: pointer to hardware structure
160 *
161 * This function returns TRUE if the device supports flow control
162 * autonegotiation, and FALSE if it does not.
163 *
164 **/
165 bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
166 {
167 bool supported = FALSE;
168 ixgbe_link_speed speed;
169 bool link_up;
170
171 DEBUGFUNC("ixgbe_device_supports_autoneg_fc");
172
173 switch (hw->phy.media_type) {
174 case ixgbe_media_type_fiber_fixed:
175 case ixgbe_media_type_fiber_qsfp:
176 case ixgbe_media_type_fiber:
177 /* flow control autoneg black list */
178 switch (hw->device_id) {
179 case IXGBE_DEV_ID_X550EM_A_SFP:
180 case IXGBE_DEV_ID_X550EM_A_SFP_N:
181 case IXGBE_DEV_ID_X550EM_A_QSFP:
182 case IXGBE_DEV_ID_X550EM_A_QSFP_N:
183 supported = FALSE;
184 break;
185 default:
186 hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
187 /* if link is down, assume supported */
188 if (link_up)
189 supported = speed == IXGBE_LINK_SPEED_1GB_FULL ?
190 TRUE : FALSE;
191 else
192 supported = TRUE;
193 }
194
195 break;
196 case ixgbe_media_type_backplane:
197 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_XFI)
198 supported = FALSE;
199 else
200 supported = TRUE;
201 break;
202 case ixgbe_media_type_copper:
203 /* only some copper devices support flow control autoneg */
204 switch (hw->device_id) {
205 case IXGBE_DEV_ID_82599_T3_LOM:
206 case IXGBE_DEV_ID_X540T:
207 case IXGBE_DEV_ID_X540T1:
208 case IXGBE_DEV_ID_X540_BYPASS:
209 case IXGBE_DEV_ID_X550T:
210 case IXGBE_DEV_ID_X550T1:
211 case IXGBE_DEV_ID_X550EM_X_10G_T:
212 case IXGBE_DEV_ID_X550EM_A_10G_T:
213 case IXGBE_DEV_ID_X550EM_A_1G_T:
214 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
215 supported = TRUE;
216 break;
217 default:
218 supported = FALSE;
219 }
220 default:
221 break;
222 }
223
224 if (!supported)
225 ERROR_REPORT2(IXGBE_ERROR_UNSUPPORTED,
226 "Device %x does not support flow control autoneg",
227 hw->device_id);
228
229 return supported;
230 }
231
232 /**
233 * ixgbe_setup_fc_generic - Set up flow control
234 * @hw: pointer to hardware structure
235 *
236 * Called at init time to set up flow control.
237 **/
238 s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw)
239 {
240 s32 ret_val = IXGBE_SUCCESS;
241 u32 reg = 0, reg_bp = 0;
242 u16 reg_cu = 0;
243 bool locked = FALSE;
244
245 DEBUGFUNC("ixgbe_setup_fc_generic");
246
247 /* Validate the requested mode */
248 if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
249 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
250 "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
251 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
252 goto out;
253 }
254
255 /*
256 * 10gig parts do not have a word in the EEPROM to determine the
257 * default flow control setting, so we explicitly set it to full.
258 */
259 if (hw->fc.requested_mode == ixgbe_fc_default)
260 hw->fc.requested_mode = ixgbe_fc_full;
261
262 /*
263 * Set up the 1G and 10G flow control advertisement registers so the
264 * HW will be able to do fc autoneg once the cable is plugged in. If
265 * we link at 10G, the 1G advertisement is harmless and vice versa.
266 */
267 switch (hw->phy.media_type) {
268 case ixgbe_media_type_backplane:
269 /* some MAC's need RMW protection on AUTOC */
270 ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, ®_bp);
271 if (ret_val != IXGBE_SUCCESS)
272 goto out;
273
274 /* fall through - only backplane uses autoc */
275 case ixgbe_media_type_fiber_fixed:
276 case ixgbe_media_type_fiber_qsfp:
277 case ixgbe_media_type_fiber:
278 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
279
280 break;
281 case ixgbe_media_type_copper:
282 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
283 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®_cu);
284 break;
285 default:
286 break;
287 }
288
289 /*
290 * The possible values of fc.requested_mode are:
291 * 0: Flow control is completely disabled
292 * 1: Rx flow control is enabled (we can receive pause frames,
293 * but not send pause frames).
294 * 2: Tx flow control is enabled (we can send pause frames but
295 * we do not support receiving pause frames).
296 * 3: Both Rx and Tx flow control (symmetric) are enabled.
297 * other: Invalid.
298 */
299 switch (hw->fc.requested_mode) {
300 case ixgbe_fc_none:
301 /* Flow control completely disabled by software override. */
302 reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
303 if (hw->phy.media_type == ixgbe_media_type_backplane)
304 reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE |
305 IXGBE_AUTOC_ASM_PAUSE);
306 else if (hw->phy.media_type == ixgbe_media_type_copper)
307 reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
308 break;
309 case ixgbe_fc_tx_pause:
310 /*
311 * Tx Flow control is enabled, and Rx Flow control is
312 * disabled by software override.
313 */
314 reg |= IXGBE_PCS1GANA_ASM_PAUSE;
315 reg &= ~IXGBE_PCS1GANA_SYM_PAUSE;
316 if (hw->phy.media_type == ixgbe_media_type_backplane) {
317 reg_bp |= IXGBE_AUTOC_ASM_PAUSE;
318 reg_bp &= ~IXGBE_AUTOC_SYM_PAUSE;
319 } else if (hw->phy.media_type == ixgbe_media_type_copper) {
320 reg_cu |= IXGBE_TAF_ASM_PAUSE;
321 reg_cu &= ~IXGBE_TAF_SYM_PAUSE;
322 }
323 break;
324 case ixgbe_fc_rx_pause:
325 /*
326 * Rx Flow control is enabled and Tx Flow control is
327 * disabled by software override. Since there really
328 * isn't a way to advertise that we are capable of RX
329 * Pause ONLY, we will advertise that we support both
330 * symmetric and asymmetric Rx PAUSE, as such we fall
331 * through to the fc_full statement. Later, we will
332 * disable the adapter's ability to send PAUSE frames.
333 */
334 case ixgbe_fc_full:
335 /* Flow control (both Rx and Tx) is enabled by SW override. */
336 reg |= IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE;
337 if (hw->phy.media_type == ixgbe_media_type_backplane)
338 reg_bp |= IXGBE_AUTOC_SYM_PAUSE |
339 IXGBE_AUTOC_ASM_PAUSE;
340 else if (hw->phy.media_type == ixgbe_media_type_copper)
341 reg_cu |= IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE;
342 break;
343 default:
344 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
345 "Flow control param set incorrectly\n");
346 ret_val = IXGBE_ERR_CONFIG;
347 goto out;
348 break;
349 }
350
351 if (hw->mac.type < ixgbe_mac_X540) {
352 /*
353 * Enable auto-negotiation between the MAC & PHY;
354 * the MAC will advertise clause 37 flow control.
355 */
356 IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
357 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
358
359 /* Disable AN timeout */
360 if (hw->fc.strict_ieee)
361 reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN;
362
363 IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg);
364 DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg);
365 }
366
367 /*
368 * AUTOC restart handles negotiation of 1G and 10G on backplane
369 * and copper. There is no need to set the PCS1GCTL register.
370 *
371 */
372 if (hw->phy.media_type == ixgbe_media_type_backplane) {
373 reg_bp |= IXGBE_AUTOC_AN_RESTART;
374 ret_val = hw->mac.ops.prot_autoc_write(hw, reg_bp, locked);
375 if (ret_val)
376 goto out;
377 } else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
378 (ixgbe_device_supports_autoneg_fc(hw))) {
379 hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
380 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg_cu);
381 }
382
383 DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg);
384 out:
385 return ret_val;
386 }
387
388 /**
389 * ixgbe_start_hw_generic - Prepare hardware for Tx/Rx
390 * @hw: pointer to hardware structure
391 *
392 * Starts the hardware by filling the bus info structure and media type, clears
393 * all on chip counters, initializes receive address registers, multicast
394 * table, VLAN filter table, calls routine to set up link and flow control
395 * settings, and leaves transmit and receive units disabled and uninitialized
396 **/
397 s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
398 {
399 s32 ret_val;
400 u32 ctrl_ext;
401 u16 device_caps;
402
403 DEBUGFUNC("ixgbe_start_hw_generic");
404
405 /* Set the media type */
406 hw->phy.media_type = hw->mac.ops.get_media_type(hw);
407
408 /* PHY ops initialization must be done in reset_hw() */
409
410 /* Clear the VLAN filter table */
411 hw->mac.ops.clear_vfta(hw);
412
413 /* Clear statistics registers */
414 hw->mac.ops.clear_hw_cntrs(hw);
415
416 /* Set No Snoop Disable */
417 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
418 ctrl_ext |= IXGBE_CTRL_EXT_NS_DIS;
419 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
420 IXGBE_WRITE_FLUSH(hw);
421
422 /* Setup flow control */
423 ret_val = ixgbe_setup_fc(hw);
424 if (ret_val != IXGBE_SUCCESS && ret_val != IXGBE_NOT_IMPLEMENTED) {
425 DEBUGOUT1("Flow control setup failed, returning %d\n", ret_val);
426 return ret_val;
427 }
428
429 /* Cache bit indicating need for crosstalk fix */
430 switch (hw->mac.type) {
431 case ixgbe_mac_82599EB:
432 case ixgbe_mac_X550EM_x:
433 case ixgbe_mac_X550EM_a:
434 hw->mac.ops.get_device_caps(hw, &device_caps);
435 if (device_caps & IXGBE_DEVICE_CAPS_NO_CROSSTALK_WR)
436 hw->need_crosstalk_fix = FALSE;
437 else
438 hw->need_crosstalk_fix = TRUE;
439 break;
440 default:
441 hw->need_crosstalk_fix = FALSE;
442 break;
443 }
444
445 /* Clear adapter stopped flag */
446 hw->adapter_stopped = FALSE;
447
448 return IXGBE_SUCCESS;
449 }
450
451 /**
452 * ixgbe_start_hw_gen2 - Init sequence for common device family
453 * @hw: pointer to hw structure
454 *
455 * Performs the init sequence common to the second generation
456 * of 10 GbE devices.
457 * Devices in the second generation:
458 * 82599
459 * X540
460 **/
461 s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw)
462 {
463 u32 i;
464 u32 regval;
465
466 /* Clear the rate limiters */
467 for (i = 0; i < hw->mac.max_tx_queues; i++) {
468 IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i);
469 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, 0);
470 }
471 IXGBE_WRITE_FLUSH(hw);
472
473 /* Disable relaxed ordering */
474 for (i = 0; i < hw->mac.max_tx_queues; i++) {
475 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
476 regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
477 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
478 }
479
480 for (i = 0; i < hw->mac.max_rx_queues; i++) {
481 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
482 regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
483 IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
484 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
485 }
486
487 return IXGBE_SUCCESS;
488 }
489
490 /**
491 * ixgbe_init_hw_generic - Generic hardware initialization
492 * @hw: pointer to hardware structure
493 *
494 * Initialize the hardware by resetting the hardware, filling the bus info
495 * structure and media type, clears all on chip counters, initializes receive
496 * address registers, multicast table, VLAN filter table, calls routine to set
497 * up link and flow control settings, and leaves transmit and receive units
498 * disabled and uninitialized
499 **/
500 s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw)
501 {
502 s32 status;
503
504 DEBUGFUNC("ixgbe_init_hw_generic");
505
506 /* Reset the hardware */
507 status = hw->mac.ops.reset_hw(hw);
508
509 if (status == IXGBE_SUCCESS || status == IXGBE_ERR_SFP_NOT_PRESENT) {
510 /* Start the HW */
511 status = hw->mac.ops.start_hw(hw);
512 }
513
514 /* Initialize the LED link active for LED blink support */
515 if (hw->mac.ops.init_led_link_act)
516 hw->mac.ops.init_led_link_act(hw);
517
518 if (status != IXGBE_SUCCESS)
519 DEBUGOUT1("Failed to initialize HW, STATUS = %d\n", status);
520
521 return status;
522 }
523
524 /**
525 * ixgbe_clear_hw_cntrs_generic - Generic clear hardware counters
526 * @hw: pointer to hardware structure
527 *
528 * Clears all hardware statistics counters by reading them from the hardware
529 * Statistics counters are clear on read.
530 **/
531 s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
532 {
533 u16 i = 0;
534
535 DEBUGFUNC("ixgbe_clear_hw_cntrs_generic");
536
537 IXGBE_READ_REG(hw, IXGBE_CRCERRS);
538 IXGBE_READ_REG(hw, IXGBE_ILLERRC);
539 IXGBE_READ_REG(hw, IXGBE_ERRBC);
540 IXGBE_READ_REG(hw, IXGBE_MSPDC);
541 if (hw->mac.type >= ixgbe_mac_X550)
542 IXGBE_READ_REG(hw, IXGBE_MBSDC);
543 for (i = 0; i < 8; i++)
544 IXGBE_READ_REG(hw, IXGBE_MPC(i));
545
546 IXGBE_READ_REG(hw, IXGBE_MLFC);
547 IXGBE_READ_REG(hw, IXGBE_MRFC);
548 IXGBE_READ_REG(hw, IXGBE_RLEC);
549 IXGBE_READ_REG(hw, IXGBE_LXONTXC);
550 IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
551 if (hw->mac.type >= ixgbe_mac_82599EB) {
552 IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
553 IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
554 } else {
555 IXGBE_READ_REG(hw, IXGBE_LXONRXC);
556 IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
557 }
558
559 for (i = 0; i < 8; i++) {
560 IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
561 IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
562 if (hw->mac.type >= ixgbe_mac_82599EB) {
563 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
564 IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
565 } else {
566 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
567 IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
568 }
569 }
570 if (hw->mac.type >= ixgbe_mac_82599EB)
571 for (i = 0; i < 8; i++)
572 IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
573 IXGBE_READ_REG(hw, IXGBE_PRC64);
574 IXGBE_READ_REG(hw, IXGBE_PRC127);
575 IXGBE_READ_REG(hw, IXGBE_PRC255);
576 IXGBE_READ_REG(hw, IXGBE_PRC511);
577 IXGBE_READ_REG(hw, IXGBE_PRC1023);
578 IXGBE_READ_REG(hw, IXGBE_PRC1522);
579 IXGBE_READ_REG(hw, IXGBE_GPRC);
580 IXGBE_READ_REG(hw, IXGBE_BPRC);
581 IXGBE_READ_REG(hw, IXGBE_MPRC);
582 IXGBE_READ_REG(hw, IXGBE_GPTC);
583 IXGBE_READ_REG(hw, IXGBE_GORCL);
584 IXGBE_READ_REG(hw, IXGBE_GORCH);
585 IXGBE_READ_REG(hw, IXGBE_GOTCL);
586 IXGBE_READ_REG(hw, IXGBE_GOTCH);
587 if (hw->mac.type == ixgbe_mac_82598EB)
588 for (i = 0; i < 8; i++)
589 IXGBE_READ_REG(hw, IXGBE_RNBC(i));
590 IXGBE_READ_REG(hw, IXGBE_RUC);
591 IXGBE_READ_REG(hw, IXGBE_RFC);
592 IXGBE_READ_REG(hw, IXGBE_ROC);
593 IXGBE_READ_REG(hw, IXGBE_RJC);
594 IXGBE_READ_REG(hw, IXGBE_MNGPRC);
595 IXGBE_READ_REG(hw, IXGBE_MNGPDC);
596 IXGBE_READ_REG(hw, IXGBE_MNGPTC);
597 IXGBE_READ_REG(hw, IXGBE_TORL);
598 IXGBE_READ_REG(hw, IXGBE_TORH);
599 IXGBE_READ_REG(hw, IXGBE_TPR);
600 IXGBE_READ_REG(hw, IXGBE_TPT);
601 IXGBE_READ_REG(hw, IXGBE_PTC64);
602 IXGBE_READ_REG(hw, IXGBE_PTC127);
603 IXGBE_READ_REG(hw, IXGBE_PTC255);
604 IXGBE_READ_REG(hw, IXGBE_PTC511);
605 IXGBE_READ_REG(hw, IXGBE_PTC1023);
606 IXGBE_READ_REG(hw, IXGBE_PTC1522);
607 IXGBE_READ_REG(hw, IXGBE_MPTC);
608 IXGBE_READ_REG(hw, IXGBE_BPTC);
609 for (i = 0; i < 16; i++) {
610 IXGBE_READ_REG(hw, IXGBE_QPRC(i));
611 IXGBE_READ_REG(hw, IXGBE_QPTC(i));
612 if (hw->mac.type >= ixgbe_mac_82599EB) {
613 IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
614 IXGBE_READ_REG(hw, IXGBE_QBRC_H(i));
615 IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
616 IXGBE_READ_REG(hw, IXGBE_QBTC_H(i));
617 IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
618 } else {
619 IXGBE_READ_REG(hw, IXGBE_QBRC(i));
620 IXGBE_READ_REG(hw, IXGBE_QBTC(i));
621 }
622 }
623
624 if (hw->mac.type == ixgbe_mac_X550 || hw->mac.type == ixgbe_mac_X540) {
625 if (hw->phy.id == 0)
626 ixgbe_identify_phy(hw);
627 hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL,
628 IXGBE_MDIO_PCS_DEV_TYPE, &i);
629 hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECH,
630 IXGBE_MDIO_PCS_DEV_TYPE, &i);
631 hw->phy.ops.read_reg(hw, IXGBE_LDPCECL,
632 IXGBE_MDIO_PCS_DEV_TYPE, &i);
633 hw->phy.ops.read_reg(hw, IXGBE_LDPCECH,
634 IXGBE_MDIO_PCS_DEV_TYPE, &i);
635 }
636
637 return IXGBE_SUCCESS;
638 }
639
640 /**
641 * ixgbe_read_pba_string_generic - Reads part number string from EEPROM
642 * @hw: pointer to hardware structure
643 * @pba_num: stores the part number string from the EEPROM
644 * @pba_num_size: part number string buffer length
645 *
646 * Reads the part number string from the EEPROM.
647 **/
648 s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
649 u32 pba_num_size)
650 {
651 s32 ret_val;
652 u16 data;
653 u16 pba_ptr;
654 u16 offset;
655 u16 length;
656
657 DEBUGFUNC("ixgbe_read_pba_string_generic");
658
659 if (pba_num == NULL) {
660 DEBUGOUT("PBA string buffer was null\n");
661 return IXGBE_ERR_INVALID_ARGUMENT;
662 }
663
664 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
665 if (ret_val) {
666 DEBUGOUT("NVM Read Error\n");
667 return ret_val;
668 }
669
670 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &pba_ptr);
671 if (ret_val) {
672 DEBUGOUT("NVM Read Error\n");
673 return ret_val;
674 }
675
676 /*
677 * if data is not ptr guard the PBA must be in legacy format which
678 * means pba_ptr is actually our second data word for the PBA number
679 * and we can decode it into an ascii string
680 */
681 if (data != IXGBE_PBANUM_PTR_GUARD) {
682 DEBUGOUT("NVM PBA number is not stored as string\n");
683
684 /* we will need 11 characters to store the PBA */
685 if (pba_num_size < 11) {
686 DEBUGOUT("PBA string buffer too small\n");
687 return IXGBE_ERR_NO_SPACE;
688 }
689
690 /* extract hex string from data and pba_ptr */
691 pba_num[0] = (data >> 12) & 0xF;
692 pba_num[1] = (data >> 8) & 0xF;
693 pba_num[2] = (data >> 4) & 0xF;
694 pba_num[3] = data & 0xF;
695 pba_num[4] = (pba_ptr >> 12) & 0xF;
696 pba_num[5] = (pba_ptr >> 8) & 0xF;
697 pba_num[6] = '-';
698 pba_num[7] = 0;
699 pba_num[8] = (pba_ptr >> 4) & 0xF;
700 pba_num[9] = pba_ptr & 0xF;
701
702 /* put a null character on the end of our string */
703 pba_num[10] = '\0';
704
705 /* switch all the data but the '-' to hex char */
706 for (offset = 0; offset < 10; offset++) {
707 if (pba_num[offset] < 0xA)
708 pba_num[offset] += '0';
709 else if (pba_num[offset] < 0x10)
710 pba_num[offset] += 'A' - 0xA;
711 }
712
713 return IXGBE_SUCCESS;
714 }
715
716 ret_val = hw->eeprom.ops.read(hw, pba_ptr, &length);
717 if (ret_val) {
718 DEBUGOUT("NVM Read Error\n");
719 return ret_val;
720 }
721
722 if (length == 0xFFFF || length == 0) {
723 DEBUGOUT("NVM PBA number section invalid length\n");
724 return IXGBE_ERR_PBA_SECTION;
725 }
726
727 /* check if pba_num buffer is big enough */
728 if (pba_num_size < (((u32)length * 2) - 1)) {
729 DEBUGOUT("PBA string buffer too small\n");
730 return IXGBE_ERR_NO_SPACE;
731 }
732
733 /* trim pba length from start of string */
734 pba_ptr++;
735 length--;
736
737 for (offset = 0; offset < length; offset++) {
738 ret_val = hw->eeprom.ops.read(hw, pba_ptr + offset, &data);
739 if (ret_val) {
740 DEBUGOUT("NVM Read Error\n");
741 return ret_val;
742 }
743 pba_num[offset * 2] = (u8)(data >> 8);
744 pba_num[(offset * 2) + 1] = (u8)(data & 0xFF);
745 }
746 pba_num[offset * 2] = '\0';
747
748 return IXGBE_SUCCESS;
749 }
750
751 /**
752 * ixgbe_read_pba_num_generic - Reads part number from EEPROM
753 * @hw: pointer to hardware structure
754 * @pba_num: stores the part number from the EEPROM
755 *
756 * Reads the part number from the EEPROM.
757 **/
758 s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num)
759 {
760 s32 ret_val;
761 u16 data;
762
763 DEBUGFUNC("ixgbe_read_pba_num_generic");
764
765 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
766 if (ret_val) {
767 DEBUGOUT("NVM Read Error\n");
768 return ret_val;
769 } else if (data == IXGBE_PBANUM_PTR_GUARD) {
770 DEBUGOUT("NVM Not supported\n");
771 return IXGBE_NOT_IMPLEMENTED;
772 }
773 *pba_num = (u32)(data << 16);
774
775 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &data);
776 if (ret_val) {
777 DEBUGOUT("NVM Read Error\n");
778 return ret_val;
779 }
780 *pba_num |= data;
781
782 return IXGBE_SUCCESS;
783 }
784
785 /**
786 * ixgbe_read_pba_raw
787 * @hw: pointer to the HW structure
788 * @eeprom_buf: optional pointer to EEPROM image
789 * @eeprom_buf_size: size of EEPROM image in words
790 * @max_pba_block_size: PBA block size limit
791 * @pba: pointer to output PBA structure
792 *
793 * Reads PBA from EEPROM image when eeprom_buf is not NULL.
794 * Reads PBA from physical EEPROM device when eeprom_buf is NULL.
795 *
796 **/
797 s32 ixgbe_read_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf,
798 u32 eeprom_buf_size, u16 max_pba_block_size,
799 struct ixgbe_pba *pba)
800 {
801 s32 ret_val;
802 u16 pba_block_size;
803
804 if (pba == NULL)
805 return IXGBE_ERR_PARAM;
806
807 if (eeprom_buf == NULL) {
808 ret_val = hw->eeprom.ops.read_buffer(hw, IXGBE_PBANUM0_PTR, 2,
809 &pba->word[0]);
810 if (ret_val)
811 return ret_val;
812 } else {
813 if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
814 pba->word[0] = eeprom_buf[IXGBE_PBANUM0_PTR];
815 pba->word[1] = eeprom_buf[IXGBE_PBANUM1_PTR];
816 } else {
817 return IXGBE_ERR_PARAM;
818 }
819 }
820
821 if (pba->word[0] == IXGBE_PBANUM_PTR_GUARD) {
822 if (pba->pba_block == NULL)
823 return IXGBE_ERR_PARAM;
824
825 ret_val = ixgbe_get_pba_block_size(hw, eeprom_buf,
826 eeprom_buf_size,
827 &pba_block_size);
828 if (ret_val)
829 return ret_val;
830
831 if (pba_block_size > max_pba_block_size)
832 return IXGBE_ERR_PARAM;
833
834 if (eeprom_buf == NULL) {
835 ret_val = hw->eeprom.ops.read_buffer(hw, pba->word[1],
836 pba_block_size,
837 pba->pba_block);
838 if (ret_val)
839 return ret_val;
840 } else {
841 if (eeprom_buf_size > (u32)(pba->word[1] +
842 pba_block_size)) {
843 memcpy(pba->pba_block,
844 &eeprom_buf[pba->word[1]],
845 pba_block_size * sizeof(u16));
846 } else {
847 return IXGBE_ERR_PARAM;
848 }
849 }
850 }
851
852 return IXGBE_SUCCESS;
853 }
854
855 /**
856 * ixgbe_write_pba_raw
857 * @hw: pointer to the HW structure
858 * @eeprom_buf: optional pointer to EEPROM image
859 * @eeprom_buf_size: size of EEPROM image in words
860 * @pba: pointer to PBA structure
861 *
862 * Writes PBA to EEPROM image when eeprom_buf is not NULL.
863 * Writes PBA to physical EEPROM device when eeprom_buf is NULL.
864 *
865 **/
866 s32 ixgbe_write_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf,
867 u32 eeprom_buf_size, struct ixgbe_pba *pba)
868 {
869 s32 ret_val;
870
871 if (pba == NULL)
872 return IXGBE_ERR_PARAM;
873
874 if (eeprom_buf == NULL) {
875 ret_val = hw->eeprom.ops.write_buffer(hw, IXGBE_PBANUM0_PTR, 2,
876 &pba->word[0]);
877 if (ret_val)
878 return ret_val;
879 } else {
880 if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
881 eeprom_buf[IXGBE_PBANUM0_PTR] = pba->word[0];
882 eeprom_buf[IXGBE_PBANUM1_PTR] = pba->word[1];
883 } else {
884 return IXGBE_ERR_PARAM;
885 }
886 }
887
888 if (pba->word[0] == IXGBE_PBANUM_PTR_GUARD) {
889 if (pba->pba_block == NULL)
890 return IXGBE_ERR_PARAM;
891
892 if (eeprom_buf == NULL) {
893 ret_val = hw->eeprom.ops.write_buffer(hw, pba->word[1],
894 pba->pba_block[0],
895 pba->pba_block);
896 if (ret_val)
897 return ret_val;
898 } else {
899 if (eeprom_buf_size > (u32)(pba->word[1] +
900 pba->pba_block[0])) {
901 memcpy(&eeprom_buf[pba->word[1]],
902 pba->pba_block,
903 pba->pba_block[0] * sizeof(u16));
904 } else {
905 return IXGBE_ERR_PARAM;
906 }
907 }
908 }
909
910 return IXGBE_SUCCESS;
911 }
912
913 /**
914 * ixgbe_get_pba_block_size
915 * @hw: pointer to the HW structure
916 * @eeprom_buf: optional pointer to EEPROM image
917 * @eeprom_buf_size: size of EEPROM image in words
918 * @pba_data_size: pointer to output variable
919 *
920 * Returns the size of the PBA block in words. Function operates on EEPROM
921 * image if the eeprom_buf pointer is not NULL otherwise it accesses physical
922 * EEPROM device.
923 *
924 **/
925 s32 ixgbe_get_pba_block_size(struct ixgbe_hw *hw, u16 *eeprom_buf,
926 u32 eeprom_buf_size, u16 *pba_block_size)
927 {
928 s32 ret_val;
929 u16 pba_word[2];
930 u16 length;
931
932 DEBUGFUNC("ixgbe_get_pba_block_size");
933
934 if (eeprom_buf == NULL) {
935 ret_val = hw->eeprom.ops.read_buffer(hw, IXGBE_PBANUM0_PTR, 2,
936 &pba_word[0]);
937 if (ret_val)
938 return ret_val;
939 } else {
940 if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
941 pba_word[0] = eeprom_buf[IXGBE_PBANUM0_PTR];
942 pba_word[1] = eeprom_buf[IXGBE_PBANUM1_PTR];
943 } else {
944 return IXGBE_ERR_PARAM;
945 }
946 }
947
948 if (pba_word[0] == IXGBE_PBANUM_PTR_GUARD) {
949 if (eeprom_buf == NULL) {
950 ret_val = hw->eeprom.ops.read(hw, pba_word[1] + 0,
951 &length);
952 if (ret_val)
953 return ret_val;
954 } else {
955 if (eeprom_buf_size > pba_word[1])
956 length = eeprom_buf[pba_word[1] + 0];
957 else
958 return IXGBE_ERR_PARAM;
959 }
960
961 if (length == 0xFFFF || length == 0)
962 return IXGBE_ERR_PBA_SECTION;
963 } else {
964 /* PBA number in legacy format, there is no PBA Block. */
965 length = 0;
966 }
967
968 if (pba_block_size != NULL)
969 *pba_block_size = length;
970
971 return IXGBE_SUCCESS;
972 }
973
974 /**
975 * ixgbe_get_mac_addr_generic - Generic get MAC address
976 * @hw: pointer to hardware structure
977 * @mac_addr: Adapter MAC address
978 *
979 * Reads the adapter's MAC address from first Receive Address Register (RAR0)
980 * A reset of the adapter must be performed prior to calling this function
981 * in order for the MAC address to have been loaded from the EEPROM into RAR0
982 **/
983 s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr)
984 {
985 u32 rar_high;
986 u32 rar_low;
987 u16 i;
988
989 DEBUGFUNC("ixgbe_get_mac_addr_generic");
990
991 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(0));
992 rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(0));
993
994 for (i = 0; i < 4; i++)
995 mac_addr[i] = (u8)(rar_low >> (i*8));
996
997 for (i = 0; i < 2; i++)
998 mac_addr[i+4] = (u8)(rar_high >> (i*8));
999
1000 return IXGBE_SUCCESS;
1001 }
1002
1003 /**
1004 * ixgbe_set_pci_config_data_generic - Generic store PCI bus info
1005 * @hw: pointer to hardware structure
1006 * @link_status: the link status returned by the PCI config space
1007 *
1008 * Stores the PCI bus info (speed, width, type) within the ixgbe_hw structure
1009 **/
1010 void ixgbe_set_pci_config_data_generic(struct ixgbe_hw *hw, u16 link_status)
1011 {
1012 struct ixgbe_mac_info *mac = &hw->mac;
1013
1014 if (hw->bus.type == ixgbe_bus_type_unknown)
1015 hw->bus.type = ixgbe_bus_type_pci_express;
1016
1017 switch (link_status & IXGBE_PCI_LINK_WIDTH) {
1018 case IXGBE_PCI_LINK_WIDTH_1:
1019 hw->bus.width = ixgbe_bus_width_pcie_x1;
1020 break;
1021 case IXGBE_PCI_LINK_WIDTH_2:
1022 hw->bus.width = ixgbe_bus_width_pcie_x2;
1023 break;
1024 case IXGBE_PCI_LINK_WIDTH_4:
1025 hw->bus.width = ixgbe_bus_width_pcie_x4;
1026 break;
1027 case IXGBE_PCI_LINK_WIDTH_8:
1028 hw->bus.width = ixgbe_bus_width_pcie_x8;
1029 break;
1030 default:
1031 hw->bus.width = ixgbe_bus_width_unknown;
1032 break;
1033 }
1034
1035 switch (link_status & IXGBE_PCI_LINK_SPEED) {
1036 case IXGBE_PCI_LINK_SPEED_2500:
1037 hw->bus.speed = ixgbe_bus_speed_2500;
1038 break;
1039 case IXGBE_PCI_LINK_SPEED_5000:
1040 hw->bus.speed = ixgbe_bus_speed_5000;
1041 break;
1042 case IXGBE_PCI_LINK_SPEED_8000:
1043 hw->bus.speed = ixgbe_bus_speed_8000;
1044 break;
1045 default:
1046 hw->bus.speed = ixgbe_bus_speed_unknown;
1047 break;
1048 }
1049
1050 mac->ops.set_lan_id(hw);
1051 }
1052
1053 /**
1054 * ixgbe_get_bus_info_generic - Generic set PCI bus info
1055 * @hw: pointer to hardware structure
1056 *
1057 * Gets the PCI bus info (speed, width, type) then calls helper function to
1058 * store this data within the ixgbe_hw structure.
1059 **/
1060 s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
1061 {
1062 u16 link_status;
1063
1064 DEBUGFUNC("ixgbe_get_bus_info_generic");
1065
1066 /* Get the negotiated link width and speed from PCI config space */
1067 link_status = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_LINK_STATUS);
1068
1069 ixgbe_set_pci_config_data_generic(hw, link_status);
1070
1071 return IXGBE_SUCCESS;
1072 }
1073
1074 /**
1075 * ixgbe_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices
1076 * @hw: pointer to the HW structure
1077 *
1078 * Determines the LAN function id by reading memory-mapped registers and swaps
1079 * the port value if requested, and set MAC instance for devices that share
1080 * CS4227.
1081 **/
1082 void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw)
1083 {
1084 struct ixgbe_bus_info *bus = &hw->bus;
1085 u32 reg;
1086 u16 ee_ctrl_4;
1087
1088 DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie");
1089
1090 reg = IXGBE_READ_REG(hw, IXGBE_STATUS);
1091 bus->func = (reg & IXGBE_STATUS_LAN_ID) >> IXGBE_STATUS_LAN_ID_SHIFT;
1092 bus->lan_id = (u8)bus->func;
1093
1094 /* check for a port swap */
1095 reg = IXGBE_READ_REG(hw, IXGBE_FACTPS_BY_MAC(hw));
1096 if (reg & IXGBE_FACTPS_LFS)
1097 bus->func ^= 0x1;
1098
1099 /* Get MAC instance from EEPROM for configuring CS4227 */
1100 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP) {
1101 hw->eeprom.ops.read(hw, IXGBE_EEPROM_CTRL_4, &ee_ctrl_4);
1102 bus->instance_id = (ee_ctrl_4 & IXGBE_EE_CTRL_4_INST_ID) >>
1103 IXGBE_EE_CTRL_4_INST_ID_SHIFT;
1104 }
1105 }
1106
1107 /**
1108 * ixgbe_stop_adapter_generic - Generic stop Tx/Rx units
1109 * @hw: pointer to hardware structure
1110 *
1111 * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
1112 * disables transmit and receive units. The adapter_stopped flag is used by
1113 * the shared code and drivers to determine if the adapter is in a stopped
1114 * state and should not touch the hardware.
1115 **/
1116 s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
1117 {
1118 u32 reg_val;
1119 u16 i;
1120
1121 DEBUGFUNC("ixgbe_stop_adapter_generic");
1122
1123 /*
1124 * Set the adapter_stopped flag so other driver functions stop touching
1125 * the hardware
1126 */
1127 hw->adapter_stopped = TRUE;
1128
1129 /* Disable the receive unit */
1130 ixgbe_disable_rx(hw);
1131
1132 /* Clear interrupt mask to stop interrupts from being generated */
1133 /*
1134 * XXX
1135 * This function is called in the state of both interrupt disabled
1136 * and interrupt enabled, e.g.
1137 * + interrupt disabled case:
1138 * - ixgbe_stop()
1139 * - ixgbe_disable_intr() // interrupt disabled here
1140 * - ixgbe_stop_adapter()
1141 * - hw->mac.ops.stop_adapter()
1142 * == this function
1143 * + interrupt enabled case:
1144 * - ixgbe_local_timer1()
1145 * - ixgbe_init_locked()
1146 * - ixgbe_stop_adapter()
1147 * - hw->mac.ops.stop_adapter()
1148 * == this function
1149 * Therefore, it causes nest status breaking to nest the status
1150 * (that is, que->im_nest++) at all times. So, this function must
1151 * use ixgbe_ensure_disabled_intr() instead of ixgbe_disable_intr().
1152 */
1153 ixgbe_ensure_disabled_intr(hw->back);
1154
1155 /* Clear any pending interrupts, flush previous writes */
1156 IXGBE_READ_REG(hw, IXGBE_EICR);
1157
1158 /* Disable the transmit unit. Each queue must be disabled. */
1159 for (i = 0; i < hw->mac.max_tx_queues; i++)
1160 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), IXGBE_TXDCTL_SWFLSH);
1161
1162 /* Disable the receive unit by stopping each queue */
1163 for (i = 0; i < hw->mac.max_rx_queues; i++) {
1164 reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
1165 reg_val &= ~IXGBE_RXDCTL_ENABLE;
1166 reg_val |= IXGBE_RXDCTL_SWFLSH;
1167 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val);
1168 }
1169
1170 /* flush all queues disables */
1171 IXGBE_WRITE_FLUSH(hw);
1172 msec_delay(2);
1173
1174 /*
1175 * Prevent the PCI-E bus from hanging by disabling PCI-E master
1176 * access and verify no pending requests
1177 */
1178 return ixgbe_disable_pcie_master(hw);
1179 }
1180
1181 /**
1182 * ixgbe_init_led_link_act_generic - Store the LED index link/activity.
1183 * @hw: pointer to hardware structure
1184 *
1185 * Store the index for the link active LED. This will be used to support
1186 * blinking the LED.
1187 **/
1188 s32 ixgbe_init_led_link_act_generic(struct ixgbe_hw *hw)
1189 {
1190 struct ixgbe_mac_info *mac = &hw->mac;
1191 u32 led_reg, led_mode;
1192 u8 i;
1193
1194 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
1195
1196 /* Get LED link active from the LEDCTL register */
1197 for (i = 0; i < 4; i++) {
1198 led_mode = led_reg >> IXGBE_LED_MODE_SHIFT(i);
1199
1200 if ((led_mode & IXGBE_LED_MODE_MASK_BASE) ==
1201 IXGBE_LED_LINK_ACTIVE) {
1202 mac->led_link_act = i;
1203 return IXGBE_SUCCESS;
1204 }
1205 }
1206
1207 /*
1208 * If LEDCTL register does not have the LED link active set, then use
1209 * known MAC defaults.
1210 */
1211 switch (hw->mac.type) {
1212 case ixgbe_mac_X550EM_a:
1213 case ixgbe_mac_X550EM_x:
1214 mac->led_link_act = 1;
1215 break;
1216 default:
1217 mac->led_link_act = 2;
1218 }
1219 return IXGBE_SUCCESS;
1220 }
1221
1222 /**
1223 * ixgbe_led_on_generic - Turns on the software controllable LEDs.
1224 * @hw: pointer to hardware structure
1225 * @index: led number to turn on
1226 **/
1227 s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index)
1228 {
1229 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
1230
1231 DEBUGFUNC("ixgbe_led_on_generic");
1232
1233 if (index > 3)
1234 return IXGBE_ERR_PARAM;
1235
1236 /* To turn on the LED, set mode to ON. */
1237 led_reg &= ~IXGBE_LED_MODE_MASK(index);
1238 led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index);
1239 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
1240 IXGBE_WRITE_FLUSH(hw);
1241
1242 return IXGBE_SUCCESS;
1243 }
1244
1245 /**
1246 * ixgbe_led_off_generic - Turns off the software controllable LEDs.
1247 * @hw: pointer to hardware structure
1248 * @index: led number to turn off
1249 **/
1250 s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index)
1251 {
1252 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
1253
1254 DEBUGFUNC("ixgbe_led_off_generic");
1255
1256 if (index > 3)
1257 return IXGBE_ERR_PARAM;
1258
1259 /* To turn off the LED, set mode to OFF. */
1260 led_reg &= ~IXGBE_LED_MODE_MASK(index);
1261 led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index);
1262 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
1263 IXGBE_WRITE_FLUSH(hw);
1264
1265 return IXGBE_SUCCESS;
1266 }
1267
1268 /**
1269 * ixgbe_init_eeprom_params_generic - Initialize EEPROM params
1270 * @hw: pointer to hardware structure
1271 *
1272 * Initializes the EEPROM parameters ixgbe_eeprom_info within the
1273 * ixgbe_hw struct in order to set up EEPROM access.
1274 **/
1275 s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
1276 {
1277 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
1278 u32 eec;
1279 u16 eeprom_size;
1280
1281 DEBUGFUNC("ixgbe_init_eeprom_params_generic");
1282
1283 if (eeprom->type == ixgbe_eeprom_uninitialized) {
1284 eeprom->type = ixgbe_eeprom_none;
1285 /* Set default semaphore delay to 10ms which is a well
1286 * tested value */
1287 eeprom->semaphore_delay = 10;
1288 /* Clear EEPROM page size, it will be initialized as needed */
1289 eeprom->word_page_size = 0;
1290
1291 /*
1292 * Check for EEPROM present first.
1293 * If not present leave as none
1294 */
1295 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
1296 if (eec & IXGBE_EEC_PRES) {
1297 eeprom->type = ixgbe_eeprom_spi;
1298
1299 /*
1300 * SPI EEPROM is assumed here. This code would need to
1301 * change if a future EEPROM is not SPI.
1302 */
1303 eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
1304 IXGBE_EEC_SIZE_SHIFT);
1305 eeprom->word_size = 1 << (eeprom_size +
1306 IXGBE_EEPROM_WORD_SIZE_SHIFT);
1307 }
1308
1309 if (eec & IXGBE_EEC_ADDR_SIZE)
1310 eeprom->address_bits = 16;
1311 else
1312 eeprom->address_bits = 8;
1313 DEBUGOUT3("Eeprom params: type = %d, size = %d, address bits: "
1314 "%d\n", eeprom->type, eeprom->word_size,
1315 eeprom->address_bits);
1316 }
1317
1318 return IXGBE_SUCCESS;
1319 }
1320
1321 /**
1322 * ixgbe_write_eeprom_buffer_bit_bang_generic - Write EEPROM using bit-bang
1323 * @hw: pointer to hardware structure
1324 * @offset: offset within the EEPROM to write
1325 * @words: number of word(s)
1326 * @data: 16 bit word(s) to write to EEPROM
1327 *
1328 * Reads 16 bit word(s) from EEPROM through bit-bang method
1329 **/
1330 s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
1331 u16 words, u16 *data)
1332 {
1333 s32 status = IXGBE_SUCCESS;
1334 u16 i, count;
1335
1336 DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang_generic");
1337
1338 hw->eeprom.ops.init_params(hw);
1339
1340 if (words == 0) {
1341 status = IXGBE_ERR_INVALID_ARGUMENT;
1342 goto out;
1343 }
1344
1345 if (offset + words > hw->eeprom.word_size) {
1346 status = IXGBE_ERR_EEPROM;
1347 goto out;
1348 }
1349
1350 /*
1351 * The EEPROM page size cannot be queried from the chip. We do lazy
1352 * initialization. It is worth to do that when we write large buffer.
1353 */
1354 if ((hw->eeprom.word_page_size == 0) &&
1355 (words > IXGBE_EEPROM_PAGE_SIZE_MAX))
1356 ixgbe_detect_eeprom_page_size_generic(hw, offset);
1357
1358 /*
1359 * We cannot hold synchronization semaphores for too long
1360 * to avoid other entity starvation. However it is more efficient
1361 * to read in bursts than synchronizing access for each word.
1362 */
1363 for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
1364 count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
1365 IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
1366 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset + i,
1367 count, &data[i]);
1368
1369 if (status != IXGBE_SUCCESS)
1370 break;
1371 }
1372
1373 out:
1374 return status;
1375 }
1376
1377 /**
1378 * ixgbe_write_eeprom_buffer_bit_bang - Writes 16 bit word(s) to EEPROM
1379 * @hw: pointer to hardware structure
1380 * @offset: offset within the EEPROM to be written to
1381 * @words: number of word(s)
1382 * @data: 16 bit word(s) to be written to the EEPROM
1383 *
1384 * If ixgbe_eeprom_update_checksum is not called after this function, the
1385 * EEPROM will most likely contain an invalid checksum.
1386 **/
1387 static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
1388 u16 words, u16 *data)
1389 {
1390 s32 status;
1391 u16 word;
1392 u16 page_size;
1393 u16 i;
1394 u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI;
1395
1396 DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang");
1397
1398 /* Prepare the EEPROM for writing */
1399 status = ixgbe_acquire_eeprom(hw);
1400
1401 if (status == IXGBE_SUCCESS) {
1402 if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) {
1403 ixgbe_release_eeprom(hw);
1404 status = IXGBE_ERR_EEPROM;
1405 }
1406 }
1407
1408 if (status == IXGBE_SUCCESS) {
1409 for (i = 0; i < words; i++) {
1410 ixgbe_standby_eeprom(hw);
1411
1412 /* Send the WRITE ENABLE command (8 bit opcode ) */
1413 ixgbe_shift_out_eeprom_bits(hw,
1414 IXGBE_EEPROM_WREN_OPCODE_SPI,
1415 IXGBE_EEPROM_OPCODE_BITS);
1416
1417 ixgbe_standby_eeprom(hw);
1418
1419 /*
1420 * Some SPI eeproms use the 8th address bit embedded
1421 * in the opcode
1422 */
1423 if ((hw->eeprom.address_bits == 8) &&
1424 ((offset + i) >= 128))
1425 write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
1426
1427 /* Send the Write command (8-bit opcode + addr) */
1428 ixgbe_shift_out_eeprom_bits(hw, write_opcode,
1429 IXGBE_EEPROM_OPCODE_BITS);
1430 ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
1431 hw->eeprom.address_bits);
1432
1433 page_size = hw->eeprom.word_page_size;
1434
1435 /* Send the data in burst via SPI*/
1436 do {
1437 word = data[i];
1438 word = (word >> 8) | (word << 8);
1439 ixgbe_shift_out_eeprom_bits(hw, word, 16);
1440
1441 if (page_size == 0)
1442 break;
1443
1444 /* do not wrap around page */
1445 if (((offset + i) & (page_size - 1)) ==
1446 (page_size - 1))
1447 break;
1448 } while (++i < words);
1449
1450 ixgbe_standby_eeprom(hw);
1451 msec_delay(10);
1452 }
1453 /* Done with writing - release the EEPROM */
1454 ixgbe_release_eeprom(hw);
1455 }
1456
1457 return status;
1458 }
1459
1460 /**
1461 * ixgbe_write_eeprom_generic - Writes 16 bit value to EEPROM
1462 * @hw: pointer to hardware structure
1463 * @offset: offset within the EEPROM to be written to
1464 * @data: 16 bit word to be written to the EEPROM
1465 *
1466 * If ixgbe_eeprom_update_checksum is not called after this function, the
1467 * EEPROM will most likely contain an invalid checksum.
1468 **/
1469 s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
1470 {
1471 s32 status;
1472
1473 DEBUGFUNC("ixgbe_write_eeprom_generic");
1474
1475 hw->eeprom.ops.init_params(hw);
1476
1477 if (offset >= hw->eeprom.word_size) {
1478 status = IXGBE_ERR_EEPROM;
1479 goto out;
1480 }
1481
1482 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1, &data);
1483
1484 out:
1485 return status;
1486 }
1487
1488 /**
1489 * ixgbe_read_eeprom_buffer_bit_bang_generic - Read EEPROM using bit-bang
1490 * @hw: pointer to hardware structure
1491 * @offset: offset within the EEPROM to be read
1492 * @data: read 16 bit words(s) from EEPROM
1493 * @words: number of word(s)
1494 *
1495 * Reads 16 bit word(s) from EEPROM through bit-bang method
1496 **/
1497 s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
1498 u16 words, u16 *data)
1499 {
1500 s32 status = IXGBE_SUCCESS;
1501 u16 i, count;
1502
1503 DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang_generic");
1504
1505 hw->eeprom.ops.init_params(hw);
1506
1507 if (words == 0) {
1508 status = IXGBE_ERR_INVALID_ARGUMENT;
1509 goto out;
1510 }
1511
1512 if (offset + words > hw->eeprom.word_size) {
1513 status = IXGBE_ERR_EEPROM;
1514 goto out;
1515 }
1516
1517 /*
1518 * We cannot hold synchronization semaphores for too long
1519 * to avoid other entity starvation. However it is more efficient
1520 * to read in bursts than synchronizing access for each word.
1521 */
1522 for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
1523 count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
1524 IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
1525
1526 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset + i,
1527 count, &data[i]);
1528
1529 if (status != IXGBE_SUCCESS)
1530 break;
1531 }
1532
1533 out:
1534 return status;
1535 }
1536
1537 /**
1538 * ixgbe_read_eeprom_buffer_bit_bang - Read EEPROM using bit-bang
1539 * @hw: pointer to hardware structure
1540 * @offset: offset within the EEPROM to be read
1541 * @words: number of word(s)
1542 * @data: read 16 bit word(s) from EEPROM
1543 *
1544 * Reads 16 bit word(s) from EEPROM through bit-bang method
1545 **/
1546 static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
1547 u16 words, u16 *data)
1548 {
1549 s32 status;
1550 u16 word_in;
1551 u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI;
1552 u16 i;
1553
1554 DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang");
1555
1556 /* Prepare the EEPROM for reading */
1557 status = ixgbe_acquire_eeprom(hw);
1558
1559 if (status == IXGBE_SUCCESS) {
1560 if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) {
1561 ixgbe_release_eeprom(hw);
1562 status = IXGBE_ERR_EEPROM;
1563 }
1564 }
1565
1566 if (status == IXGBE_SUCCESS) {
1567 for (i = 0; i < words; i++) {
1568 ixgbe_standby_eeprom(hw);
1569 /*
1570 * Some SPI eeproms use the 8th address bit embedded
1571 * in the opcode
1572 */
1573 if ((hw->eeprom.address_bits == 8) &&
1574 ((offset + i) >= 128))
1575 read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
1576
1577 /* Send the READ command (opcode + addr) */
1578 ixgbe_shift_out_eeprom_bits(hw, read_opcode,
1579 IXGBE_EEPROM_OPCODE_BITS);
1580 ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
1581 hw->eeprom.address_bits);
1582
1583 /* Read the data. */
1584 word_in = ixgbe_shift_in_eeprom_bits(hw, 16);
1585 data[i] = (word_in >> 8) | (word_in << 8);
1586 }
1587
1588 /* End this read operation */
1589 ixgbe_release_eeprom(hw);
1590 }
1591
1592 return status;
1593 }
1594
1595 /**
1596 * ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang
1597 * @hw: pointer to hardware structure
1598 * @offset: offset within the EEPROM to be read
1599 * @data: read 16 bit value from EEPROM
1600 *
1601 * Reads 16 bit value from EEPROM through bit-bang method
1602 **/
1603 s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
1604 u16 *data)
1605 {
1606 s32 status;
1607
1608 DEBUGFUNC("ixgbe_read_eeprom_bit_bang_generic");
1609
1610 hw->eeprom.ops.init_params(hw);
1611
1612 if (offset >= hw->eeprom.word_size) {
1613 status = IXGBE_ERR_EEPROM;
1614 goto out;
1615 }
1616
1617 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
1618
1619 out:
1620 return status;
1621 }
1622
1623 /**
1624 * ixgbe_read_eerd_buffer_generic - Read EEPROM word(s) using EERD
1625 * @hw: pointer to hardware structure
1626 * @offset: offset of word in the EEPROM to read
1627 * @words: number of word(s)
1628 * @data: 16 bit word(s) from the EEPROM
1629 *
1630 * Reads a 16 bit word(s) from the EEPROM using the EERD register.
1631 **/
1632 s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset,
1633 u16 words, u16 *data)
1634 {
1635 u32 eerd;
1636 s32 status = IXGBE_SUCCESS;
1637 u32 i;
1638
1639 DEBUGFUNC("ixgbe_read_eerd_buffer_generic");
1640
1641 hw->eeprom.ops.init_params(hw);
1642
1643 if (words == 0) {
1644 status = IXGBE_ERR_INVALID_ARGUMENT;
1645 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM words");
1646 goto out;
1647 }
1648
1649 if (offset >= hw->eeprom.word_size) {
1650 status = IXGBE_ERR_EEPROM;
1651 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM offset");
1652 goto out;
1653 }
1654
1655 for (i = 0; i < words; i++) {
1656 eerd = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
1657 IXGBE_EEPROM_RW_REG_START;
1658
1659 IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd);
1660 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_READ);
1661
1662 if (status == IXGBE_SUCCESS) {
1663 data[i] = (IXGBE_READ_REG(hw, IXGBE_EERD) >>
1664 IXGBE_EEPROM_RW_REG_DATA);
1665 } else {
1666 DEBUGOUT("Eeprom read timed out\n");
1667 goto out;
1668 }
1669 }
1670 out:
1671 return status;
1672 }
1673
1674 /**
1675 * ixgbe_detect_eeprom_page_size_generic - Detect EEPROM page size
1676 * @hw: pointer to hardware structure
1677 * @offset: offset within the EEPROM to be used as a scratch pad
1678 *
1679 * Discover EEPROM page size by writing marching data at given offset.
1680 * This function is called only when we are writing a new large buffer
1681 * at given offset so the data would be overwritten anyway.
1682 **/
1683 static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
1684 u16 offset)
1685 {
1686 u16 data[IXGBE_EEPROM_PAGE_SIZE_MAX];
1687 s32 status = IXGBE_SUCCESS;
1688 u16 i;
1689
1690 DEBUGFUNC("ixgbe_detect_eeprom_page_size_generic");
1691
1692 for (i = 0; i < IXGBE_EEPROM_PAGE_SIZE_MAX; i++)
1693 data[i] = i;
1694
1695 hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX;
1696 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset,
1697 IXGBE_EEPROM_PAGE_SIZE_MAX, data);
1698 hw->eeprom.word_page_size = 0;
1699 if (status != IXGBE_SUCCESS)
1700 goto out;
1701
1702 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
1703 if (status != IXGBE_SUCCESS)
1704 goto out;
1705
1706 /*
1707 * When writing in burst more than the actual page size
1708 * EEPROM address wraps around current page.
1709 */
1710 hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX - data[0];
1711
1712 DEBUGOUT1("Detected EEPROM page size = %d words.",
1713 hw->eeprom.word_page_size);
1714 out:
1715 return status;
1716 }
1717
1718 /**
1719 * ixgbe_read_eerd_generic - Read EEPROM word using EERD
1720 * @hw: pointer to hardware structure
1721 * @offset: offset of word in the EEPROM to read
1722 * @data: word read from the EEPROM
1723 *
1724 * Reads a 16 bit word from the EEPROM using the EERD register.
1725 **/
1726 s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data)
1727 {
1728 return ixgbe_read_eerd_buffer_generic(hw, offset, 1, data);
1729 }
1730
1731 /**
1732 * ixgbe_write_eewr_buffer_generic - Write EEPROM word(s) using EEWR
1733 * @hw: pointer to hardware structure
1734 * @offset: offset of word in the EEPROM to write
1735 * @words: number of word(s)
1736 * @data: word(s) write to the EEPROM
1737 *
1738 * Write a 16 bit word(s) to the EEPROM using the EEWR register.
1739 **/
1740 s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
1741 u16 words, u16 *data)
1742 {
1743 u32 eewr;
1744 s32 status = IXGBE_SUCCESS;
1745 u16 i;
1746
1747 DEBUGFUNC("ixgbe_write_eewr_generic");
1748
1749 hw->eeprom.ops.init_params(hw);
1750
1751 if (words == 0) {
1752 status = IXGBE_ERR_INVALID_ARGUMENT;
1753 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM words");
1754 goto out;
1755 }
1756
1757 if (offset >= hw->eeprom.word_size) {
1758 status = IXGBE_ERR_EEPROM;
1759 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM offset");
1760 goto out;
1761 }
1762
1763 for (i = 0; i < words; i++) {
1764 eewr = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
1765 (data[i] << IXGBE_EEPROM_RW_REG_DATA) |
1766 IXGBE_EEPROM_RW_REG_START;
1767
1768 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
1769 if (status != IXGBE_SUCCESS) {
1770 DEBUGOUT("Eeprom write EEWR timed out\n");
1771 goto out;
1772 }
1773
1774 IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr);
1775
1776 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
1777 if (status != IXGBE_SUCCESS) {
1778 DEBUGOUT("Eeprom write EEWR timed out\n");
1779 goto out;
1780 }
1781 }
1782
1783 out:
1784 return status;
1785 }
1786
1787 /**
1788 * ixgbe_write_eewr_generic - Write EEPROM word using EEWR
1789 * @hw: pointer to hardware structure
1790 * @offset: offset of word in the EEPROM to write
1791 * @data: word write to the EEPROM
1792 *
1793 * Write a 16 bit word to the EEPROM using the EEWR register.
1794 **/
1795 s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
1796 {
1797 return ixgbe_write_eewr_buffer_generic(hw, offset, 1, &data);
1798 }
1799
1800 /**
1801 * ixgbe_poll_eerd_eewr_done - Poll EERD read or EEWR write status
1802 * @hw: pointer to hardware structure
1803 * @ee_reg: EEPROM flag for polling
1804 *
1805 * Polls the status bit (bit 1) of the EERD or EEWR to determine when the
1806 * read or write is done respectively.
1807 **/
1808 s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
1809 {
1810 u32 i;
1811 u32 reg;
1812 s32 status = IXGBE_ERR_EEPROM;
1813
1814 DEBUGFUNC("ixgbe_poll_eerd_eewr_done");
1815
1816 for (i = 0; i < IXGBE_EERD_EEWR_ATTEMPTS; i++) {
1817 if (ee_reg == IXGBE_NVM_POLL_READ)
1818 reg = IXGBE_READ_REG(hw, IXGBE_EERD);
1819 else
1820 reg = IXGBE_READ_REG(hw, IXGBE_EEWR);
1821
1822 if (reg & IXGBE_EEPROM_RW_REG_DONE) {
1823 status = IXGBE_SUCCESS;
1824 break;
1825 }
1826 usec_delay(5);
1827 }
1828
1829 if (i == IXGBE_EERD_EEWR_ATTEMPTS)
1830 ERROR_REPORT1(IXGBE_ERROR_POLLING,
1831 "EEPROM read/write done polling timed out");
1832
1833 return status;
1834 }
1835
1836 /**
1837 * ixgbe_acquire_eeprom - Acquire EEPROM using bit-bang
1838 * @hw: pointer to hardware structure
1839 *
1840 * Prepares EEPROM for access using bit-bang method. This function should
1841 * be called before issuing a command to the EEPROM.
1842 **/
1843 static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
1844 {
1845 s32 status = IXGBE_SUCCESS;
1846 u32 eec;
1847 u32 i;
1848
1849 DEBUGFUNC("ixgbe_acquire_eeprom");
1850
1851 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)
1852 != IXGBE_SUCCESS)
1853 status = IXGBE_ERR_SWFW_SYNC;
1854
1855 if (status == IXGBE_SUCCESS) {
1856 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
1857
1858 /* Request EEPROM Access */
1859 eec |= IXGBE_EEC_REQ;
1860 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
1861
1862 for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) {
1863 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
1864 if (eec & IXGBE_EEC_GNT)
1865 break;
1866 usec_delay(5);
1867 }
1868
1869 /* Release if grant not acquired */
1870 if (!(eec & IXGBE_EEC_GNT)) {
1871 eec &= ~IXGBE_EEC_REQ;
1872 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
1873 DEBUGOUT("Could not acquire EEPROM grant\n");
1874
1875 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
1876 status = IXGBE_ERR_EEPROM;
1877 }
1878
1879 /* Setup EEPROM for Read/Write */
1880 if (status == IXGBE_SUCCESS) {
1881 /* Clear CS and SK */
1882 eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK);
1883 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
1884 IXGBE_WRITE_FLUSH(hw);
1885 usec_delay(1);
1886 }
1887 }
1888 return status;
1889 }
1890
1891 /**
1892 * ixgbe_get_eeprom_semaphore - Get hardware semaphore
1893 * @hw: pointer to hardware structure
1894 *
1895 * Sets the hardware semaphores so EEPROM access can occur for bit-bang method
1896 **/
1897 static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
1898 {
1899 s32 status = IXGBE_ERR_EEPROM;
1900 u32 timeout = 2000;
1901 u32 i;
1902 u32 swsm;
1903
1904 DEBUGFUNC("ixgbe_get_eeprom_semaphore");
1905
1906
1907 /* Get SMBI software semaphore between device drivers first */
1908 for (i = 0; i < timeout; i++) {
1909 /*
1910 * If the SMBI bit is 0 when we read it, then the bit will be
1911 * set and we have the semaphore
1912 */
1913 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw));
1914 if (!(swsm & IXGBE_SWSM_SMBI)) {
1915 status = IXGBE_SUCCESS;
1916 break;
1917 }
1918 usec_delay(50);
1919 }
1920
1921 if (i == timeout) {
1922 DEBUGOUT("Driver can't access the Eeprom - SMBI Semaphore "
1923 "not granted.\n");
1924 /*
1925 * this release is particularly important because our attempts
1926 * above to get the semaphore may have succeeded, and if there
1927 * was a timeout, we should unconditionally clear the semaphore
1928 * bits to free the driver to make progress
1929 */
1930 ixgbe_release_eeprom_semaphore(hw);
1931
1932 usec_delay(50);
1933 /*
1934 * one last try
1935 * If the SMBI bit is 0 when we read it, then the bit will be
1936 * set and we have the semaphore
1937 */
1938 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw));
1939 if (!(swsm & IXGBE_SWSM_SMBI))
1940 status = IXGBE_SUCCESS;
1941 }
1942
1943 /* Now get the semaphore between SW/FW through the SWESMBI bit */
1944 if (status == IXGBE_SUCCESS) {
1945 for (i = 0; i < timeout; i++) {
1946 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw));
1947
1948 /* Set the SW EEPROM semaphore bit to request access */
1949 swsm |= IXGBE_SWSM_SWESMBI;
1950 IXGBE_WRITE_REG(hw, IXGBE_SWSM_BY_MAC(hw), swsm);
1951
1952 /*
1953 * If we set the bit successfully then we got the
1954 * semaphore.
1955 */
1956 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw));
1957 if (swsm & IXGBE_SWSM_SWESMBI)
1958 break;
1959
1960 usec_delay(50);
1961 }
1962
1963 /*
1964 * Release semaphores and return error if SW EEPROM semaphore
1965 * was not granted because we don't have access to the EEPROM
1966 */
1967 if (i >= timeout) {
1968 ERROR_REPORT1(IXGBE_ERROR_POLLING,
1969 "SWESMBI Software EEPROM semaphore not granted.\n");
1970 ixgbe_release_eeprom_semaphore(hw);
1971 status = IXGBE_ERR_EEPROM;
1972 }
1973 } else {
1974 ERROR_REPORT1(IXGBE_ERROR_POLLING,
1975 "Software semaphore SMBI between device drivers "
1976 "not granted.\n");
1977 }
1978
1979 return status;
1980 }
1981
1982 /**
1983 * ixgbe_release_eeprom_semaphore - Release hardware semaphore
1984 * @hw: pointer to hardware structure
1985 *
1986 * This function clears hardware semaphore bits.
1987 **/
1988 static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw)
1989 {
1990 u32 swsm;
1991
1992 DEBUGFUNC("ixgbe_release_eeprom_semaphore");
1993
1994 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1995
1996 /* Release both semaphores by writing 0 to the bits SWESMBI and SMBI */
1997 swsm &= ~(IXGBE_SWSM_SWESMBI | IXGBE_SWSM_SMBI);
1998 IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
1999 IXGBE_WRITE_FLUSH(hw);
2000 }
2001
2002 /**
2003 * ixgbe_ready_eeprom - Polls for EEPROM ready
2004 * @hw: pointer to hardware structure
2005 **/
2006 static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw)
2007 {
2008 s32 status = IXGBE_SUCCESS;
2009 u16 i;
2010 u8 spi_stat_reg;
2011
2012 DEBUGFUNC("ixgbe_ready_eeprom");
2013
2014 /*
2015 * Read "Status Register" repeatedly until the LSB is cleared. The
2016 * EEPROM will signal that the command has been completed by clearing
2017 * bit 0 of the internal status register. If it's not cleared within
2018 * 5 milliseconds, then error out.
2019 */
2020 for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) {
2021 ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI,
2022 IXGBE_EEPROM_OPCODE_BITS);
2023 spi_stat_reg = (u8)ixgbe_shift_in_eeprom_bits(hw, 8);
2024 if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI))
2025 break;
2026
2027 usec_delay(5);
2028 ixgbe_standby_eeprom(hw);
2029 }
2030
2031 /*
2032 * On some parts, SPI write time could vary from 0-20mSec on 3.3V
2033 * devices (and only 0-5mSec on 5V devices)
2034 */
2035 if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) {
2036 DEBUGOUT("SPI EEPROM Status error\n");
2037 status = IXGBE_ERR_EEPROM;
2038 }
2039
2040 return status;
2041 }
2042
2043 /**
2044 * ixgbe_standby_eeprom - Returns EEPROM to a "standby" state
2045 * @hw: pointer to hardware structure
2046 **/
2047 static void ixgbe_standby_eeprom(struct ixgbe_hw *hw)
2048 {
2049 u32 eec;
2050
2051 DEBUGFUNC("ixgbe_standby_eeprom");
2052
2053 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
2054
2055 /* Toggle CS to flush commands */
2056 eec |= IXGBE_EEC_CS;
2057 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
2058 IXGBE_WRITE_FLUSH(hw);
2059 usec_delay(1);
2060 eec &= ~IXGBE_EEC_CS;
2061 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
2062 IXGBE_WRITE_FLUSH(hw);
2063 usec_delay(1);
2064 }
2065
2066 /**
2067 * ixgbe_shift_out_eeprom_bits - Shift data bits out to the EEPROM.
2068 * @hw: pointer to hardware structure
2069 * @data: data to send to the EEPROM
2070 * @count: number of bits to shift out
2071 **/
2072 static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
2073 u16 count)
2074 {
2075 u32 eec;
2076 u32 mask;
2077 u32 i;
2078
2079 DEBUGFUNC("ixgbe_shift_out_eeprom_bits");
2080
2081 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
2082
2083 /*
2084 * Mask is used to shift "count" bits of "data" out to the EEPROM
2085 * one bit at a time. Determine the starting bit based on count
2086 */
2087 mask = 0x01 << (count - 1);
2088
2089 for (i = 0; i < count; i++) {
2090 /*
2091 * A "1" is shifted out to the EEPROM by setting bit "DI" to a
2092 * "1", and then raising and then lowering the clock (the SK
2093 * bit controls the clock input to the EEPROM). A "0" is
2094 * shifted out to the EEPROM by setting "DI" to "0" and then
2095 * raising and then lowering the clock.
2096 */
2097 if (data & mask)
2098 eec |= IXGBE_EEC_DI;
2099 else
2100 eec &= ~IXGBE_EEC_DI;
2101
2102 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
2103 IXGBE_WRITE_FLUSH(hw);
2104
2105 usec_delay(1);
2106
2107 ixgbe_raise_eeprom_clk(hw, &eec);
2108 ixgbe_lower_eeprom_clk(hw, &eec);
2109
2110 /*
2111 * Shift mask to signify next bit of data to shift in to the
2112 * EEPROM
2113 */
2114 mask = mask >> 1;
2115 }
2116
2117 /* We leave the "DI" bit set to "0" when we leave this routine. */
2118 eec &= ~IXGBE_EEC_DI;
2119 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
2120 IXGBE_WRITE_FLUSH(hw);
2121 }
2122
2123 /**
2124 * ixgbe_shift_in_eeprom_bits - Shift data bits in from the EEPROM
2125 * @hw: pointer to hardware structure
2126 **/
2127 static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count)
2128 {
2129 u32 eec;
2130 u32 i;
2131 u16 data = 0;
2132
2133 DEBUGFUNC("ixgbe_shift_in_eeprom_bits");
2134
2135 /*
2136 * In order to read a register from the EEPROM, we need to shift
2137 * 'count' bits in from the EEPROM. Bits are "shifted in" by raising
2138 * the clock input to the EEPROM (setting the SK bit), and then reading
2139 * the value of the "DO" bit. During this "shifting in" process the
2140 * "DI" bit should always be clear.
2141 */
2142 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
2143
2144 eec &= ~(IXGBE_EEC_DO | IXGBE_EEC_DI);
2145
2146 for (i = 0; i < count; i++) {
2147 data = data << 1;
2148 ixgbe_raise_eeprom_clk(hw, &eec);
2149
2150 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
2151
2152 eec &= ~(IXGBE_EEC_DI);
2153 if (eec & IXGBE_EEC_DO)
2154 data |= 1;
2155
2156 ixgbe_lower_eeprom_clk(hw, &eec);
2157 }
2158
2159 return data;
2160 }
2161
2162 /**
2163 * ixgbe_raise_eeprom_clk - Raises the EEPROM's clock input.
2164 * @hw: pointer to hardware structure
2165 * @eec: EEC register's current value
2166 **/
2167 static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
2168 {
2169 DEBUGFUNC("ixgbe_raise_eeprom_clk");
2170
2171 /*
2172 * Raise the clock input to the EEPROM
2173 * (setting the SK bit), then delay
2174 */
2175 *eec = *eec | IXGBE_EEC_SK;
2176 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), *eec);
2177 IXGBE_WRITE_FLUSH(hw);
2178 usec_delay(1);
2179 }
2180
2181 /**
2182 * ixgbe_lower_eeprom_clk - Lowers the EEPROM's clock input.
2183 * @hw: pointer to hardware structure
2184 * @eecd: EECD's current value
2185 **/
2186 static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
2187 {
2188 DEBUGFUNC("ixgbe_lower_eeprom_clk");
2189
2190 /*
2191 * Lower the clock input to the EEPROM (clearing the SK bit), then
2192 * delay
2193 */
2194 *eec = *eec & ~IXGBE_EEC_SK;
2195 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), *eec);
2196 IXGBE_WRITE_FLUSH(hw);
2197 usec_delay(1);
2198 }
2199
2200 /**
2201 * ixgbe_release_eeprom - Release EEPROM, release semaphores
2202 * @hw: pointer to hardware structure
2203 **/
2204 static void ixgbe_release_eeprom(struct ixgbe_hw *hw)
2205 {
2206 u32 eec;
2207
2208 DEBUGFUNC("ixgbe_release_eeprom");
2209
2210 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
2211
2212 eec |= IXGBE_EEC_CS; /* Pull CS high */
2213 eec &= ~IXGBE_EEC_SK; /* Lower SCK */
2214
2215 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
2216 IXGBE_WRITE_FLUSH(hw);
2217
2218 usec_delay(1);
2219
2220 /* Stop requesting EEPROM access */
2221 eec &= ~IXGBE_EEC_REQ;
2222 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
2223
2224 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
2225
2226 /* Delay before attempt to obtain semaphore again to allow FW access */
2227 msec_delay(hw->eeprom.semaphore_delay);
2228 }
2229
2230 /**
2231 * ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum
2232 * @hw: pointer to hardware structure
2233 *
2234 * Returns a negative error code on error, or the 16-bit checksum
2235 **/
2236 s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
2237 {
2238 u16 i;
2239 u16 j;
2240 u16 checksum = 0;
2241 u16 length = 0;
2242 u16 pointer = 0;
2243 u16 word = 0;
2244
2245 DEBUGFUNC("ixgbe_calc_eeprom_checksum_generic");
2246
2247 /* Include 0x0-0x3F in the checksum */
2248 for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
2249 if (hw->eeprom.ops.read(hw, i, &word)) {
2250 DEBUGOUT("EEPROM read failed\n");
2251 return IXGBE_ERR_EEPROM;
2252 }
2253 checksum += word;
2254 }
2255
2256 /* Include all data from pointers except for the fw pointer */
2257 for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) {
2258 if (hw->eeprom.ops.read(hw, i, &pointer)) {
2259 DEBUGOUT("EEPROM read failed\n");
2260 return IXGBE_ERR_EEPROM;
2261 }
2262
2263 /* If the pointer seems invalid */
2264 if (pointer == 0xFFFF || pointer == 0)
2265 continue;
2266
2267 if (hw->eeprom.ops.read(hw, pointer, &length)) {
2268 DEBUGOUT("EEPROM read failed\n");
2269 return IXGBE_ERR_EEPROM;
2270 }
2271
2272 if (length == 0xFFFF || length == 0)
2273 continue;
2274
2275 for (j = pointer + 1; j <= pointer + length; j++) {
2276 if (hw->eeprom.ops.read(hw, j, &word)) {
2277 DEBUGOUT("EEPROM read failed\n");
2278 return IXGBE_ERR_EEPROM;
2279 }
2280 checksum += word;
2281 }
2282 }
2283
2284 checksum = (u16)IXGBE_EEPROM_SUM - checksum;
2285
2286 return (s32)checksum;
2287 }
2288
2289 /**
2290 * ixgbe_validate_eeprom_checksum_generic - Validate EEPROM checksum
2291 * @hw: pointer to hardware structure
2292 * @checksum_val: calculated checksum
2293 *
2294 * Performs checksum calculation and validates the EEPROM checksum. If the
2295 * caller does not need checksum_val, the value can be NULL.
2296 **/
2297 s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
2298 u16 *checksum_val)
2299 {
2300 s32 status;
2301 u16 checksum;
2302 u16 read_checksum = 0;
2303
2304 DEBUGFUNC("ixgbe_validate_eeprom_checksum_generic");
2305
2306 /* Read the first word from the EEPROM. If this times out or fails, do
2307 * not continue or we could be in for a very long wait while every
2308 * EEPROM read fails
2309 */
2310 status = hw->eeprom.ops.read(hw, 0, &checksum);
2311 if (status) {
2312 DEBUGOUT("EEPROM read failed\n");
2313 return status;
2314 }
2315
2316 status = hw->eeprom.ops.calc_checksum(hw);
2317 if (status < 0)
2318 return status;
2319
2320 checksum = (u16)(status & 0xffff);
2321
2322 status = hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum);
2323 if (status) {
2324 DEBUGOUT("EEPROM read failed\n");
2325 return status;
2326 }
2327
2328 /* Verify read checksum from EEPROM is the same as
2329 * calculated checksum
2330 */
2331 if (read_checksum != checksum)
2332 status = IXGBE_ERR_EEPROM_CHECKSUM;
2333
2334 /* If the user cares, return the calculated checksum */
2335 if (checksum_val)
2336 *checksum_val = checksum;
2337
2338 return status;
2339 }
2340
2341 /**
2342 * ixgbe_update_eeprom_checksum_generic - Updates the EEPROM checksum
2343 * @hw: pointer to hardware structure
2344 **/
2345 s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
2346 {
2347 s32 status;
2348 u16 checksum;
2349
2350 DEBUGFUNC("ixgbe_update_eeprom_checksum_generic");
2351
2352 /* Read the first word from the EEPROM. If this times out or fails, do
2353 * not continue or we could be in for a very long wait while every
2354 * EEPROM read fails
2355 */
2356 status = hw->eeprom.ops.read(hw, 0, &checksum);
2357 if (status) {
2358 DEBUGOUT("EEPROM read failed\n");
2359 return status;
2360 }
2361
2362 status = hw->eeprom.ops.calc_checksum(hw);
2363 if (status < 0)
2364 return status;
2365
2366 checksum = (u16)(status & 0xffff);
2367
2368 status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM, checksum);
2369
2370 return status;
2371 }
2372
2373 /**
2374 * ixgbe_validate_mac_addr - Validate MAC address
2375 * @mac_addr: pointer to MAC address.
2376 *
2377 * Tests a MAC address to ensure it is a valid Individual Address.
2378 **/
2379 s32 ixgbe_validate_mac_addr(u8 *mac_addr)
2380 {
2381 s32 status = IXGBE_SUCCESS;
2382
2383 DEBUGFUNC("ixgbe_validate_mac_addr");
2384
2385 /* Make sure it is not a multicast address */
2386 if (IXGBE_IS_MULTICAST(mac_addr)) {
2387 status = IXGBE_ERR_INVALID_MAC_ADDR;
2388 /* Not a broadcast address */
2389 } else if (IXGBE_IS_BROADCAST(mac_addr)) {
2390 status = IXGBE_ERR_INVALID_MAC_ADDR;
2391 /* Reject the zero address */
2392 } else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
2393 mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) {
2394 status = IXGBE_ERR_INVALID_MAC_ADDR;
2395 }
2396 return status;
2397 }
2398
2399 /**
2400 * ixgbe_set_rar_generic - Set Rx address register
2401 * @hw: pointer to hardware structure
2402 * @index: Receive address register to write
2403 * @addr: Address to put into receive address register
2404 * @vmdq: VMDq "set" or "pool" index
2405 * @enable_addr: set flag that address is active
2406 *
2407 * Puts an ethernet address into a receive address register.
2408 **/
2409 s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
2410 u32 enable_addr)
2411 {
2412 u32 rar_low, rar_high;
2413 u32 rar_entries = hw->mac.num_rar_entries;
2414
2415 DEBUGFUNC("ixgbe_set_rar_generic");
2416
2417 /* Make sure we are using a valid rar index range */
2418 if (index >= rar_entries) {
2419 ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
2420 "RAR index %d is out of range.\n", index);
2421 return IXGBE_ERR_INVALID_ARGUMENT;
2422 }
2423
2424 /* setup VMDq pool selection before this RAR gets enabled */
2425 hw->mac.ops.set_vmdq(hw, index, vmdq);
2426
2427 /*
2428 * HW expects these in little endian so we reverse the byte
2429 * order from network order (big endian) to little endian
2430 */
2431 rar_low = ((u32)addr[0] |
2432 ((u32)addr[1] << 8) |
2433 ((u32)addr[2] << 16) |
2434 ((u32)addr[3] << 24));
2435 /*
2436 * Some parts put the VMDq setting in the extra RAH bits,
2437 * so save everything except the lower 16 bits that hold part
2438 * of the address and the address valid bit.
2439 */
2440 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
2441 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
2442 rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8));
2443
2444 if (enable_addr != 0)
2445 rar_high |= IXGBE_RAH_AV;
2446
2447 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low);
2448 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
2449
2450 return IXGBE_SUCCESS;
2451 }
2452
2453 /**
2454 * ixgbe_clear_rar_generic - Remove Rx address register
2455 * @hw: pointer to hardware structure
2456 * @index: Receive address register to write
2457 *
2458 * Clears an ethernet address from a receive address register.
2459 **/
2460 s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index)
2461 {
2462 u32 rar_high;
2463 u32 rar_entries = hw->mac.num_rar_entries;
2464
2465 DEBUGFUNC("ixgbe_clear_rar_generic");
2466
2467 /* Make sure we are using a valid rar index range */
2468 if (index >= rar_entries) {
2469 ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
2470 "RAR index %d is out of range.\n", index);
2471 return IXGBE_ERR_INVALID_ARGUMENT;
2472 }
2473
2474 /*
2475 * Some parts put the VMDq setting in the extra RAH bits,
2476 * so save everything except the lower 16 bits that hold part
2477 * of the address and the address valid bit.
2478 */
2479 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
2480 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
2481
2482 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
2483 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
2484
2485 /* clear VMDq pool/queue selection for this RAR */
2486 hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL);
2487
2488 return IXGBE_SUCCESS;
2489 }
2490
2491 /**
2492 * ixgbe_init_rx_addrs_generic - Initializes receive address filters.
2493 * @hw: pointer to hardware structure
2494 *
2495 * Places the MAC address in receive address register 0 and clears the rest
2496 * of the receive address registers. Clears the multicast table. Assumes
2497 * the receiver is in reset when the routine is called.
2498 **/
2499 s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
2500 {
2501 u32 i;
2502 u32 rar_entries = hw->mac.num_rar_entries;
2503
2504 DEBUGFUNC("ixgbe_init_rx_addrs_generic");
2505
2506 /*
2507 * If the current mac address is valid, assume it is a software override
2508 * to the permanent address.
2509 * Otherwise, use the permanent address from the eeprom.
2510 */
2511 if (ixgbe_validate_mac_addr(hw->mac.addr) ==
2512 IXGBE_ERR_INVALID_MAC_ADDR) {
2513 /* Get the MAC address from the RAR0 for later reference */
2514 hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
2515
2516 DEBUGOUT3(" Keeping Current RAR0 Addr =%.2X %.2X %.2X ",
2517 hw->mac.addr[0], hw->mac.addr[1],
2518 hw->mac.addr[2]);
2519 DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3],
2520 hw->mac.addr[4], hw->mac.addr[5]);
2521 } else {
2522 /* Setup the receive address. */
2523 DEBUGOUT("Overriding MAC Address in RAR[0]\n");
2524 DEBUGOUT3(" New MAC Addr =%.2X %.2X %.2X ",
2525 hw->mac.addr[0], hw->mac.addr[1],
2526 hw->mac.addr[2]);
2527 DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3],
2528 hw->mac.addr[4], hw->mac.addr[5]);
2529
2530 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
2531 }
2532
2533 /* clear VMDq pool/queue selection for RAR 0 */
2534 hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL);
2535
2536 hw->addr_ctrl.overflow_promisc = 0;
2537
2538 hw->addr_ctrl.rar_used_count = 1;
2539
2540 /* Zero out the other receive addresses. */
2541 DEBUGOUT1("Clearing RAR[1-%d]\n", rar_entries - 1);
2542 for (i = 1; i < rar_entries; i++) {
2543 IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
2544 IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
2545 }
2546
2547 /* Clear the MTA */
2548 hw->addr_ctrl.mta_in_use = 0;
2549 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
2550
2551 DEBUGOUT(" Clearing MTA\n");
2552 for (i = 0; i < hw->mac.mcft_size; i++)
2553 IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0);
2554
2555 ixgbe_init_uta_tables(hw);
2556
2557 return IXGBE_SUCCESS;
2558 }
2559
2560 /**
2561 * ixgbe_add_uc_addr - Adds a secondary unicast address.
2562 * @hw: pointer to hardware structure
2563 * @addr: new address
2564 *
2565 * Adds it to unused receive address register or goes into promiscuous mode.
2566 **/
2567 void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
2568 {
2569 u32 rar_entries = hw->mac.num_rar_entries;
2570 u32 rar;
2571
2572 DEBUGFUNC("ixgbe_add_uc_addr");
2573
2574 DEBUGOUT6(" UC Addr = %.2X %.2X %.2X %.2X %.2X %.2X\n",
2575 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
2576
2577 /*
2578 * Place this address in the RAR if there is room,
2579 * else put the controller into promiscuous mode
2580 */
2581 if (hw->addr_ctrl.rar_used_count < rar_entries) {
2582 rar = hw->addr_ctrl.rar_used_count;
2583 hw->mac.ops.set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
2584 DEBUGOUT1("Added a secondary address to RAR[%d]\n", rar);
2585 hw->addr_ctrl.rar_used_count++;
2586 } else {
2587 hw->addr_ctrl.overflow_promisc++;
2588 }
2589
2590 DEBUGOUT("ixgbe_add_uc_addr Complete\n");
2591 }
2592
2593 /**
2594 * ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses
2595 * @hw: pointer to hardware structure
2596 * @addr_list: the list of new addresses
2597 * @addr_count: number of addresses
2598 * @next: iterator function to walk the address list
2599 *
2600 * The given list replaces any existing list. Clears the secondary addrs from
2601 * receive address registers. Uses unused receive address registers for the
2602 * first secondary addresses, and falls back to promiscuous mode as needed.
2603 *
2604 * Drivers using secondary unicast addresses must set user_set_promisc when
2605 * manually putting the device into promiscuous mode.
2606 **/
2607 s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list,
2608 u32 addr_count, ixgbe_mc_addr_itr next)
2609 {
2610 u8 *addr;
2611 u32 i;
2612 u32 old_promisc_setting = hw->addr_ctrl.overflow_promisc;
2613 u32 uc_addr_in_use;
2614 u32 fctrl;
2615 u32 vmdq;
2616
2617 DEBUGFUNC("ixgbe_update_uc_addr_list_generic");
2618
2619 /*
2620 * Clear accounting of old secondary address list,
2621 * don't count RAR[0]
2622 */
2623 uc_addr_in_use = hw->addr_ctrl.rar_used_count - 1;
2624 hw->addr_ctrl.rar_used_count -= uc_addr_in_use;
2625 hw->addr_ctrl.overflow_promisc = 0;
2626
2627 /* Zero out the other receive addresses */
2628 DEBUGOUT1("Clearing RAR[1-%d]\n", uc_addr_in_use+1);
2629 for (i = 0; i < uc_addr_in_use; i++) {
2630 IXGBE_WRITE_REG(hw, IXGBE_RAL(1+i), 0);
2631 IXGBE_WRITE_REG(hw, IXGBE_RAH(1+i), 0);
2632 }
2633
2634 /* Add the new addresses */
2635 for (i = 0; i < addr_count; i++) {
2636 DEBUGOUT(" Adding the secondary addresses:\n");
2637 addr = next(hw, &addr_list, &vmdq);
2638 ixgbe_add_uc_addr(hw, addr, vmdq);
2639 }
2640
2641 if (hw->addr_ctrl.overflow_promisc) {
2642 /* enable promisc if not already in overflow or set by user */
2643 if (!old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
2644 DEBUGOUT(" Entering address overflow promisc mode\n");
2645 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2646 fctrl |= IXGBE_FCTRL_UPE;
2647 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2648 }
2649 } else {
2650 /* only disable if set by overflow, not by user */
2651 if (old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
2652 DEBUGOUT(" Leaving address overflow promisc mode\n");
2653 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2654 fctrl &= ~IXGBE_FCTRL_UPE;
2655 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2656 }
2657 }
2658
2659 DEBUGOUT("ixgbe_update_uc_addr_list_generic Complete\n");
2660 return IXGBE_SUCCESS;
2661 }
2662
2663 /**
2664 * ixgbe_mta_vector - Determines bit-vector in multicast table to set
2665 * @hw: pointer to hardware structure
2666 * @mc_addr: the multicast address
2667 *
2668 * Extracts the 12 bits, from a multicast address, to determine which
2669 * bit-vector to set in the multicast table. The hardware uses 12 bits, from
2670 * incoming rx multicast addresses, to determine the bit-vector to check in
2671 * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
2672 * by the MO field of the MCSTCTRL. The MO field is set during initialization
2673 * to mc_filter_type.
2674 **/
2675 static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
2676 {
2677 u32 vector = 0;
2678
2679 DEBUGFUNC("ixgbe_mta_vector");
2680
2681 switch (hw->mac.mc_filter_type) {
2682 case 0: /* use bits [47:36] of the address */
2683 vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
2684 break;
2685 case 1: /* use bits [46:35] of the address */
2686 vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
2687 break;
2688 case 2: /* use bits [45:34] of the address */
2689 vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
2690 break;
2691 case 3: /* use bits [43:32] of the address */
2692 vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
2693 break;
2694 default: /* Invalid mc_filter_type */
2695 DEBUGOUT("MC filter type param set incorrectly\n");
2696 ASSERT(0);
2697 break;
2698 }
2699
2700 /* vector can only be 12-bits or boundary will be exceeded */
2701 vector &= 0xFFF;
2702 return vector;
2703 }
2704
2705 /**
2706 * ixgbe_set_mta - Set bit-vector in multicast table
2707 * @hw: pointer to hardware structure
2708 * @hash_value: Multicast address hash value
2709 *
2710 * Sets the bit-vector in the multicast table.
2711 **/
2712 void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr)
2713 {
2714 u32 vector;
2715 u32 vector_bit;
2716 u32 vector_reg;
2717
2718 DEBUGFUNC("ixgbe_set_mta");
2719
2720 hw->addr_ctrl.mta_in_use++;
2721
2722 vector = ixgbe_mta_vector(hw, mc_addr);
2723 DEBUGOUT1(" bit-vector = 0x%03X\n", vector);
2724
2725 /*
2726 * The MTA is a register array of 128 32-bit registers. It is treated
2727 * like an array of 4096 bits. We want to set bit
2728 * BitArray[vector_value]. So we figure out what register the bit is
2729 * in, read it, OR in the new bit, then write back the new value. The
2730 * register is determined by the upper 7 bits of the vector value and
2731 * the bit within that register are determined by the lower 5 bits of
2732 * the value.
2733 */
2734 vector_reg = (vector >> 5) & 0x7F;
2735 vector_bit = vector & 0x1F;
2736 hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit);
2737 }
2738
2739 /**
2740 * ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses
2741 * @hw: pointer to hardware structure
2742 * @mc_addr_list: the list of new multicast addresses
2743 * @mc_addr_count: number of addresses
2744 * @next: iterator function to walk the multicast address list
2745 * @clear: flag, when set clears the table beforehand
2746 *
2747 * When the clear flag is set, the given list replaces any existing list.
2748 * Hashes the given addresses into the multicast table.
2749 **/
2750 s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
2751 u32 mc_addr_count, ixgbe_mc_addr_itr next,
2752 bool clear)
2753 {
2754 u32 i;
2755 u32 vmdq;
2756
2757 DEBUGFUNC("ixgbe_update_mc_addr_list_generic");
2758
2759 /*
2760 * Set the new number of MC addresses that we are being requested to
2761 * use.
2762 */
2763 hw->addr_ctrl.num_mc_addrs = mc_addr_count;
2764 hw->addr_ctrl.mta_in_use = 0;
2765
2766 /* Clear mta_shadow */
2767 if (clear) {
2768 DEBUGOUT(" Clearing MTA\n");
2769 memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
2770 }
2771
2772 /* Update mta_shadow */
2773 for (i = 0; i < mc_addr_count; i++) {
2774 DEBUGOUT(" Adding the multicast addresses:\n");
2775 ixgbe_set_mta(hw, next(hw, &mc_addr_list, &vmdq));
2776 }
2777
2778 /* Enable mta */
2779 for (i = 0; i < hw->mac.mcft_size; i++)
2780 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_MTA(0), i,
2781 hw->mac.mta_shadow[i]);
2782
2783 if (hw->addr_ctrl.mta_in_use > 0)
2784 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
2785 IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
2786
2787 DEBUGOUT("ixgbe_update_mc_addr_list_generic Complete\n");
2788 return IXGBE_SUCCESS;
2789 }
2790
2791 /**
2792 * ixgbe_enable_mc_generic - Enable multicast address in RAR
2793 * @hw: pointer to hardware structure
2794 *
2795 * Enables multicast address in RAR and the use of the multicast hash table.
2796 **/
2797 s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
2798 {
2799 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
2800
2801 DEBUGFUNC("ixgbe_enable_mc_generic");
2802
2803 if (a->mta_in_use > 0)
2804 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE |
2805 hw->mac.mc_filter_type);
2806
2807 return IXGBE_SUCCESS;
2808 }
2809
2810 /**
2811 * ixgbe_disable_mc_generic - Disable multicast address in RAR
2812 * @hw: pointer to hardware structure
2813 *
2814 * Disables multicast address in RAR and the use of the multicast hash table.
2815 **/
2816 s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw)
2817 {
2818 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
2819
2820 DEBUGFUNC("ixgbe_disable_mc_generic");
2821
2822 if (a->mta_in_use > 0)
2823 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
2824
2825 return IXGBE_SUCCESS;
2826 }
2827
2828 /**
2829 * ixgbe_fc_enable_generic - Enable flow control
2830 * @hw: pointer to hardware structure
2831 *
2832 * Enable flow control according to the current settings.
2833 **/
2834 s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
2835 {
2836 s32 ret_val = IXGBE_SUCCESS;
2837 u32 mflcn_reg, fccfg_reg;
2838 u32 reg;
2839 u32 fcrtl, fcrth;
2840 int i;
2841
2842 DEBUGFUNC("ixgbe_fc_enable_generic");
2843
2844 /* Validate the water mark configuration */
2845 if (!hw->fc.pause_time) {
2846 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2847 goto out;
2848 }
2849
2850 /* Low water mark of zero causes XOFF floods */
2851 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
2852 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
2853 hw->fc.high_water[i]) {
2854 if (!hw->fc.low_water[i] ||
2855 hw->fc.low_water[i] >= hw->fc.high_water[i]) {
2856 DEBUGOUT("Invalid water mark configuration\n");
2857 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2858 goto out;
2859 }
2860 }
2861 }
2862
2863 /* Negotiate the fc mode to use */
2864 hw->mac.ops.fc_autoneg(hw);
2865
2866 /* Disable any previous flow control settings */
2867 mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
2868 mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE);
2869
2870 fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
2871 fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY);
2872
2873 /*
2874 * The possible values of fc.current_mode are:
2875 * 0: Flow control is completely disabled
2876 * 1: Rx flow control is enabled (we can receive pause frames,
2877 * but not send pause frames).
2878 * 2: Tx flow control is enabled (we can send pause frames but
2879 * we do not support receiving pause frames).
2880 * 3: Both Rx and Tx flow control (symmetric) are enabled.
2881 * other: Invalid.
2882 */
2883 switch (hw->fc.current_mode) {
2884 case ixgbe_fc_none:
2885 /*
2886 * Flow control is disabled by software override or autoneg.
2887 * The code below will actually disable it in the HW.
2888 */
2889 break;
2890 case ixgbe_fc_rx_pause:
2891 /*
2892 * Rx Flow control is enabled and Tx Flow control is
2893 * disabled by software override. Since there really
2894 * isn't a way to advertise that we are capable of RX
2895 * Pause ONLY, we will advertise that we support both
2896 * symmetric and asymmetric Rx PAUSE. Later, we will
2897 * disable the adapter's ability to send PAUSE frames.
2898 */
2899 mflcn_reg |= IXGBE_MFLCN_RFCE;
2900 break;
2901 case ixgbe_fc_tx_pause:
2902 /*
2903 * Tx Flow control is enabled, and Rx Flow control is
2904 * disabled by software override.
2905 */
2906 fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
2907 break;
2908 case ixgbe_fc_full:
2909 /* Flow control (both Rx and Tx) is enabled by SW override. */
2910 mflcn_reg |= IXGBE_MFLCN_RFCE;
2911 fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
2912 break;
2913 default:
2914 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
2915 "Flow control param set incorrectly\n");
2916 ret_val = IXGBE_ERR_CONFIG;
2917 goto out;
2918 break;
2919 }
2920
2921 /* Set 802.3x based flow control settings. */
2922 mflcn_reg |= IXGBE_MFLCN_DPF;
2923 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
2924 IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
2925
2926
2927 /* Set up and enable Rx high/low water mark thresholds, enable XON. */
2928 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
2929 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
2930 hw->fc.high_water[i]) {
2931 fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
2932 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl);
2933 fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
2934 } else {
2935 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
2936 /*
2937 * In order to prevent Tx hangs when the internal Tx
2938 * switch is enabled we must set the high water mark
2939 * to the Rx packet buffer size - 24KB. This allows
2940 * the Tx switch to function even under heavy Rx
2941 * workloads.
2942 */
2943 fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 24576;
2944 }
2945
2946 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), fcrth);
2947 }
2948
2949 /* Configure pause time (2 TCs per register) */
2950 reg = hw->fc.pause_time * 0x00010001;
2951 for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
2952 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
2953
2954 /* Configure flow control refresh threshold value */
2955 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
2956
2957 out:
2958 return ret_val;
2959 }
2960
2961 /**
2962 * ixgbe_negotiate_fc - Negotiate flow control
2963 * @hw: pointer to hardware structure
2964 * @adv_reg: flow control advertised settings
2965 * @lp_reg: link partner's flow control settings
2966 * @adv_sym: symmetric pause bit in advertisement
2967 * @adv_asm: asymmetric pause bit in advertisement
2968 * @lp_sym: symmetric pause bit in link partner advertisement
2969 * @lp_asm: asymmetric pause bit in link partner advertisement
2970 *
2971 * Find the intersection between advertised settings and link partner's
2972 * advertised settings
2973 **/
2974 s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
2975 u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm)
2976 {
2977 if ((!(adv_reg)) || (!(lp_reg))) {
2978 ERROR_REPORT3(IXGBE_ERROR_UNSUPPORTED,
2979 "Local or link partner's advertised flow control "
2980 "settings are NULL. Local: %x, link partner: %x\n",
2981 adv_reg, lp_reg);
2982 return IXGBE_ERR_FC_NOT_NEGOTIATED;
2983 }
2984
2985 if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) {
2986 /*
2987 * Now we need to check if the user selected Rx ONLY
2988 * of pause frames. In this case, we had to advertise
2989 * FULL flow control because we could not advertise RX
2990 * ONLY. Hence, we must now check to see if we need to
2991 * turn OFF the TRANSMISSION of PAUSE frames.
2992 */
2993 if (hw->fc.requested_mode == ixgbe_fc_full) {
2994 hw->fc.current_mode = ixgbe_fc_full;
2995 DEBUGOUT("Flow Control = FULL.\n");
2996 } else {
2997 hw->fc.current_mode = ixgbe_fc_rx_pause;
2998 DEBUGOUT("Flow Control=RX PAUSE frames only\n");
2999 }
3000 } else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) &&
3001 (lp_reg & lp_sym) && (lp_reg & lp_asm)) {
3002 hw->fc.current_mode = ixgbe_fc_tx_pause;
3003 DEBUGOUT("Flow Control = TX PAUSE frames only.\n");
3004 } else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) &&
3005 !(lp_reg & lp_sym) && (lp_reg & lp_asm)) {
3006 hw->fc.current_mode = ixgbe_fc_rx_pause;
3007 DEBUGOUT("Flow Control = RX PAUSE frames only.\n");
3008 } else {
3009 hw->fc.current_mode = ixgbe_fc_none;
3010 DEBUGOUT("Flow Control = NONE.\n");
3011 }
3012 return IXGBE_SUCCESS;
3013 }
3014
3015 /**
3016 * ixgbe_fc_autoneg_fiber - Enable flow control on 1 gig fiber
3017 * @hw: pointer to hardware structure
3018 *
3019 * Enable flow control according on 1 gig fiber.
3020 **/
3021 static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
3022 {
3023 u32 pcs_anadv_reg, pcs_lpab_reg, linkstat;
3024 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
3025
3026 /*
3027 * On multispeed fiber at 1g, bail out if
3028 * - link is up but AN did not complete, or if
3029 * - link is up and AN completed but timed out
3030 */
3031
3032 linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
3033 if ((!!(linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
3034 (!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) {
3035 DEBUGOUT("Auto-Negotiation did not complete or timed out\n");
3036 goto out;
3037 }
3038
3039 pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
3040 pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
3041
3042 ret_val = ixgbe_negotiate_fc(hw, pcs_anadv_reg,
3043 pcs_lpab_reg, IXGBE_PCS1GANA_SYM_PAUSE,
3044 IXGBE_PCS1GANA_ASM_PAUSE,
3045 IXGBE_PCS1GANA_SYM_PAUSE,
3046 IXGBE_PCS1GANA_ASM_PAUSE);
3047
3048 out:
3049 return ret_val;
3050 }
3051
3052 /**
3053 * ixgbe_fc_autoneg_backplane - Enable flow control IEEE clause 37
3054 * @hw: pointer to hardware structure
3055 *
3056 * Enable flow control according to IEEE clause 37.
3057 **/
3058 static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
3059 {
3060 u32 links2, anlp1_reg, autoc_reg, links;
3061 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
3062
3063 /*
3064 * On backplane, bail out if
3065 * - backplane autoneg was not completed, or if
3066 * - we are 82599 and link partner is not AN enabled
3067 */
3068 links = IXGBE_READ_REG(hw, IXGBE_LINKS);
3069 if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) {
3070 DEBUGOUT("Auto-Negotiation did not complete\n");
3071 goto out;
3072 }
3073
3074 if (hw->mac.type == ixgbe_mac_82599EB) {
3075 links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
3076 if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) {
3077 DEBUGOUT("Link partner is not AN enabled\n");
3078 goto out;
3079 }
3080 }
3081 /*
3082 * Read the 10g AN autoc and LP ability registers and resolve
3083 * local flow control settings accordingly
3084 */
3085 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
3086 anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
3087
3088 ret_val = ixgbe_negotiate_fc(hw, autoc_reg,
3089 anlp1_reg, IXGBE_AUTOC_SYM_PAUSE, IXGBE_AUTOC_ASM_PAUSE,
3090 IXGBE_ANLP1_SYM_PAUSE, IXGBE_ANLP1_ASM_PAUSE);
3091
3092 out:
3093 return ret_val;
3094 }
3095
3096 /**
3097 * ixgbe_fc_autoneg_copper - Enable flow control IEEE clause 37
3098 * @hw: pointer to hardware structure
3099 *
3100 * Enable flow control according to IEEE clause 37.
3101 **/
3102 static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw)
3103 {
3104 u16 technology_ability_reg = 0;
3105 u16 lp_technology_ability_reg = 0;
3106
3107 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
3108 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
3109 &technology_ability_reg);
3110 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_LP,
3111 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
3112 &lp_technology_ability_reg);
3113
3114 return ixgbe_negotiate_fc(hw, (u32)technology_ability_reg,
3115 (u32)lp_technology_ability_reg,
3116 IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE,
3117 IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE);
3118 }
3119
3120 /**
3121 * ixgbe_fc_autoneg - Configure flow control
3122 * @hw: pointer to hardware structure
3123 *
3124 * Compares our advertised flow control capabilities to those advertised by
3125 * our link partner, and determines the proper flow control mode to use.
3126 **/
3127 void ixgbe_fc_autoneg(struct ixgbe_hw *hw)
3128 {
3129 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
3130 ixgbe_link_speed speed;
3131 bool link_up;
3132
3133 DEBUGFUNC("ixgbe_fc_autoneg");
3134
3135 /*
3136 * AN should have completed when the cable was plugged in.
3137 * Look for reasons to bail out. Bail out if:
3138 * - FC autoneg is disabled, or if
3139 * - link is not up.
3140 */
3141 if (hw->fc.disable_fc_autoneg) {
3142 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
3143 "Flow control autoneg is disabled");
3144 goto out;
3145 }
3146
3147 hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
3148 if (!link_up) {
3149 ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "The link is down");
3150 goto out;
3151 }
3152
3153 switch (hw->phy.media_type) {
3154 /* Autoneg flow control on fiber adapters */
3155 case ixgbe_media_type_fiber_fixed:
3156 case ixgbe_media_type_fiber_qsfp:
3157 case ixgbe_media_type_fiber:
3158 if (speed == IXGBE_LINK_SPEED_1GB_FULL)
3159 ret_val = ixgbe_fc_autoneg_fiber(hw);
3160 break;
3161
3162 /* Autoneg flow control on backplane adapters */
3163 case ixgbe_media_type_backplane:
3164 ret_val = ixgbe_fc_autoneg_backplane(hw);
3165 break;
3166
3167 /* Autoneg flow control on copper adapters */
3168 case ixgbe_media_type_copper:
3169 if (ixgbe_device_supports_autoneg_fc(hw))
3170 ret_val = ixgbe_fc_autoneg_copper(hw);
3171 break;
3172
3173 default:
3174 break;
3175 }
3176
3177 out:
3178 if (ret_val == IXGBE_SUCCESS) {
3179 hw->fc.fc_was_autonegged = TRUE;
3180 } else {
3181 hw->fc.fc_was_autonegged = FALSE;
3182 hw->fc.current_mode = hw->fc.requested_mode;
3183 }
3184 }
3185
3186 /*
3187 * ixgbe_pcie_timeout_poll - Return number of times to poll for completion
3188 * @hw: pointer to hardware structure
3189 *
3190 * System-wide timeout range is encoded in PCIe Device Control2 register.
3191 *
3192 * Add 10% to specified maximum and return the number of times to poll for
3193 * completion timeout, in units of 100 microsec. Never return less than
3194 * 800 = 80 millisec.
3195 */
3196 static u32 ixgbe_pcie_timeout_poll(struct ixgbe_hw *hw)
3197 {
3198 s16 devctl2;
3199 u32 pollcnt;
3200
3201 devctl2 = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2);
3202 devctl2 &= IXGBE_PCIDEVCTRL2_TIMEO_MASK;
3203
3204 switch (devctl2) {
3205 case IXGBE_PCIDEVCTRL2_65_130ms:
3206 pollcnt = 1300; /* 130 millisec */
3207 break;
3208 case IXGBE_PCIDEVCTRL2_260_520ms:
3209 pollcnt = 5200; /* 520 millisec */
3210 break;
3211 case IXGBE_PCIDEVCTRL2_1_2s:
3212 pollcnt = 20000; /* 2 sec */
3213 break;
3214 case IXGBE_PCIDEVCTRL2_4_8s:
3215 pollcnt = 80000; /* 8 sec */
3216 break;
3217 case IXGBE_PCIDEVCTRL2_17_34s:
3218 pollcnt = 34000; /* 34 sec */
3219 break;
3220 case IXGBE_PCIDEVCTRL2_50_100us: /* 100 microsecs */
3221 case IXGBE_PCIDEVCTRL2_1_2ms: /* 2 millisecs */
3222 case IXGBE_PCIDEVCTRL2_16_32ms: /* 32 millisec */
3223 case IXGBE_PCIDEVCTRL2_16_32ms_def: /* 32 millisec default */
3224 default:
3225 pollcnt = 800; /* 80 millisec minimum */
3226 break;
3227 }
3228
3229 /* add 10% to spec maximum */
3230 return (pollcnt * 11) / 10;
3231 }
3232
3233 /**
3234 * ixgbe_disable_pcie_master - Disable PCI-express master access
3235 * @hw: pointer to hardware structure
3236 *
3237 * Disables PCI-Express master access and verifies there are no pending
3238 * requests. IXGBE_ERR_MASTER_REQUESTS_PENDING is returned if master disable
3239 * bit hasn't caused the master requests to be disabled, else IXGBE_SUCCESS
3240 * is returned signifying master requests disabled.
3241 **/
3242 s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
3243 {
3244 s32 status = IXGBE_SUCCESS;
3245 u32 i, poll;
3246 u16 value;
3247
3248 DEBUGFUNC("ixgbe_disable_pcie_master");
3249
3250 /* Always set this bit to ensure any future transactions are blocked */
3251 IXGBE_WRITE_REG(hw, IXGBE_CTRL, IXGBE_CTRL_GIO_DIS);
3252
3253 /* Exit if master requests are blocked */
3254 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO) ||
3255 IXGBE_REMOVED(hw->hw_addr))
3256 goto out;
3257
3258 /* Poll for master request bit to clear */
3259 for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
3260 usec_delay(100);
3261 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
3262 goto out;
3263 }
3264
3265 /*
3266 * Two consecutive resets are required via CTRL.RST per datasheet
3267 * 5.2.5.3.2 Master Disable. We set a flag to inform the reset routine
3268 * of this need. The first reset prevents new master requests from
3269 * being issued by our device. We then must wait 1usec or more for any
3270 * remaining completions from the PCIe bus to trickle in, and then reset
3271 * again to clear out any effects they may have had on our device.
3272 */
3273 DEBUGOUT("GIO Master Disable bit didn't clear - requesting resets\n");
3274 hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
3275
3276 if (hw->mac.type >= ixgbe_mac_X550)
3277 goto out;
3278
3279 /*
3280 * Before proceeding, make sure that the PCIe block does not have
3281 * transactions pending.
3282 */
3283 poll = ixgbe_pcie_timeout_poll(hw);
3284 for (i = 0; i < poll; i++) {
3285 usec_delay(100);
3286 value = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS);
3287 if (IXGBE_REMOVED(hw->hw_addr))
3288 goto out;
3289 if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
3290 goto out;
3291 }
3292
3293 ERROR_REPORT1(IXGBE_ERROR_POLLING,
3294 "PCIe transaction pending bit also did not clear.\n");
3295 status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
3296
3297 out:
3298 return status;
3299 }
3300
3301 /**
3302 * ixgbe_acquire_swfw_sync - Acquire SWFW semaphore
3303 * @hw: pointer to hardware structure
3304 * @mask: Mask to specify which semaphore to acquire
3305 *
3306 * Acquires the SWFW semaphore through the GSSR register for the specified
3307 * function (CSR, PHY0, PHY1, EEPROM, Flash)
3308 **/
3309 s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask)
3310 {
3311 u32 gssr = 0;
3312 u32 swmask = mask;
3313 u32 fwmask = mask << 5;
3314 u32 timeout = 200;
3315 u32 i;
3316
3317 DEBUGFUNC("ixgbe_acquire_swfw_sync");
3318
3319 for (i = 0; i < timeout; i++) {
3320 /*
3321 * SW NVM semaphore bit is used for access to all
3322 * SW_FW_SYNC bits (not just NVM)
3323 */
3324 if (ixgbe_get_eeprom_semaphore(hw))
3325 return IXGBE_ERR_SWFW_SYNC;
3326
3327 gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
3328 if (!(gssr & (fwmask | swmask))) {
3329 gssr |= swmask;
3330 IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
3331 ixgbe_release_eeprom_semaphore(hw);
3332 return IXGBE_SUCCESS;
3333 } else {
3334 /* Resource is currently in use by FW or SW */
3335 ixgbe_release_eeprom_semaphore(hw);
3336 msec_delay(5);
3337 }
3338 }
3339
3340 /* If time expired clear the bits holding the lock and retry */
3341 if (gssr & (fwmask | swmask))
3342 ixgbe_release_swfw_sync(hw, gssr & (fwmask | swmask));
3343
3344 msec_delay(5);
3345 return IXGBE_ERR_SWFW_SYNC;
3346 }
3347
3348 /**
3349 * ixgbe_release_swfw_sync - Release SWFW semaphore
3350 * @hw: pointer to hardware structure
3351 * @mask: Mask to specify which semaphore to release
3352 *
3353 * Releases the SWFW semaphore through the GSSR register for the specified
3354 * function (CSR, PHY0, PHY1, EEPROM, Flash)
3355 **/
3356 void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u32 mask)
3357 {
3358 u32 gssr;
3359 u32 swmask = mask;
3360
3361 DEBUGFUNC("ixgbe_release_swfw_sync");
3362
3363 ixgbe_get_eeprom_semaphore(hw);
3364
3365 gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
3366 gssr &= ~swmask;
3367 IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
3368
3369 ixgbe_release_eeprom_semaphore(hw);
3370 }
3371
3372 /**
3373 * ixgbe_disable_sec_rx_path_generic - Stops the receive data path
3374 * @hw: pointer to hardware structure
3375 *
3376 * Stops the receive data path and waits for the HW to internally empty
3377 * the Rx security block
3378 **/
3379 s32 ixgbe_disable_sec_rx_path_generic(struct ixgbe_hw *hw)
3380 {
3381 #define IXGBE_MAX_SECRX_POLL 4000
3382
3383 int i;
3384 int secrxreg;
3385
3386 DEBUGFUNC("ixgbe_disable_sec_rx_path_generic");
3387
3388
3389 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
3390 secrxreg |= IXGBE_SECRXCTRL_RX_DIS;
3391 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
3392 for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) {
3393 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT);
3394 if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY)
3395 break;
3396 else
3397 /* Use interrupt-safe sleep just in case */
3398 usec_delay(10);
3399 }
3400
3401 /* For informational purposes only */
3402 if (i >= IXGBE_MAX_SECRX_POLL)
3403 DEBUGOUT("Rx unit being enabled before security "
3404 "path fully disabled. Continuing with init.\n");
3405
3406 return IXGBE_SUCCESS;
3407 }
3408
3409 /**
3410 * prot_autoc_read_generic - Hides MAC differences needed for AUTOC read
3411 * @hw: pointer to hardware structure
3412 * @reg_val: Value we read from AUTOC
3413 *
3414 * The default case requires no protection so just to the register read.
3415 */
3416 s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *locked, u32 *reg_val)
3417 {
3418 *locked = FALSE;
3419 *reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC);
3420 return IXGBE_SUCCESS;
3421 }
3422
3423 /**
3424 * prot_autoc_write_generic - Hides MAC differences needed for AUTOC write
3425 * @hw: pointer to hardware structure
3426 * @reg_val: value to write to AUTOC
3427 * @locked: bool to indicate whether the SW/FW lock was already taken by
3428 * previous read.
3429 *
3430 * The default case requires no protection so just to the register write.
3431 */
3432 s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked)
3433 {
3434 UNREFERENCED_1PARAMETER(locked);
3435
3436 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_val);
3437 return IXGBE_SUCCESS;
3438 }
3439
3440 /**
3441 * ixgbe_enable_sec_rx_path_generic - Enables the receive data path
3442 * @hw: pointer to hardware structure
3443 *
3444 * Enables the receive data path.
3445 **/
3446 s32 ixgbe_enable_sec_rx_path_generic(struct ixgbe_hw *hw)
3447 {
3448 u32 secrxreg;
3449
3450 DEBUGFUNC("ixgbe_enable_sec_rx_path_generic");
3451
3452 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
3453 secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS;
3454 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
3455 IXGBE_WRITE_FLUSH(hw);
3456
3457 return IXGBE_SUCCESS;
3458 }
3459
3460 /**
3461 * ixgbe_enable_rx_dma_generic - Enable the Rx DMA unit
3462 * @hw: pointer to hardware structure
3463 * @regval: register value to write to RXCTRL
3464 *
3465 * Enables the Rx DMA unit
3466 **/
3467 s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval)
3468 {
3469 DEBUGFUNC("ixgbe_enable_rx_dma_generic");
3470
3471 if (regval & IXGBE_RXCTRL_RXEN)
3472 ixgbe_enable_rx(hw);
3473 else
3474 ixgbe_disable_rx(hw);
3475
3476 return IXGBE_SUCCESS;
3477 }
3478
3479 /**
3480 * ixgbe_blink_led_start_generic - Blink LED based on index.
3481 * @hw: pointer to hardware structure
3482 * @index: led number to blink
3483 **/
3484 s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
3485 {
3486 ixgbe_link_speed speed = 0;
3487 bool link_up = 0;
3488 u32 autoc_reg = 0;
3489 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
3490 s32 ret_val = IXGBE_SUCCESS;
3491 bool locked = FALSE;
3492
3493 DEBUGFUNC("ixgbe_blink_led_start_generic");
3494
3495 if (index > 3)
3496 return IXGBE_ERR_PARAM;
3497
3498 /*
3499 * Link must be up to auto-blink the LEDs;
3500 * Force it if link is down.
3501 */
3502 hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
3503
3504 if (!link_up) {
3505 ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg);
3506 if (ret_val != IXGBE_SUCCESS)
3507 goto out;
3508
3509 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
3510 autoc_reg |= IXGBE_AUTOC_FLU;
3511
3512 ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked);
3513 if (ret_val != IXGBE_SUCCESS)
3514 goto out;
3515
3516 IXGBE_WRITE_FLUSH(hw);
3517 msec_delay(10);
3518 }
3519
3520 led_reg &= ~IXGBE_LED_MODE_MASK(index);
3521 led_reg |= IXGBE_LED_BLINK(index);
3522 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
3523 IXGBE_WRITE_FLUSH(hw);
3524
3525 out:
3526 return ret_val;
3527 }
3528
3529 /**
3530 * ixgbe_blink_led_stop_generic - Stop blinking LED based on index.
3531 * @hw: pointer to hardware structure
3532 * @index: led number to stop blinking
3533 **/
3534 s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
3535 {
3536 u32 autoc_reg = 0;
3537 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
3538 s32 ret_val = IXGBE_SUCCESS;
3539 bool locked = FALSE;
3540
3541 DEBUGFUNC("ixgbe_blink_led_stop_generic");
3542
3543 if (index > 3)
3544 return IXGBE_ERR_PARAM;
3545
3546 ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg);
3547 if (ret_val != IXGBE_SUCCESS)
3548 goto out;
3549
3550 autoc_reg &= ~IXGBE_AUTOC_FLU;
3551 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
3552
3553 ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked);
3554 if (ret_val != IXGBE_SUCCESS)
3555 goto out;
3556
3557 led_reg &= ~IXGBE_LED_MODE_MASK(index);
3558 led_reg &= ~IXGBE_LED_BLINK(index);
3559 led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index);
3560 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
3561 IXGBE_WRITE_FLUSH(hw);
3562
3563 out:
3564 return ret_val;
3565 }
3566
3567 /**
3568 * ixgbe_get_san_mac_addr_offset - Get SAN MAC address offset from the EEPROM
3569 * @hw: pointer to hardware structure
3570 * @san_mac_offset: SAN MAC address offset
3571 *
3572 * This function will read the EEPROM location for the SAN MAC address
3573 * pointer, and returns the value at that location. This is used in both
3574 * get and set mac_addr routines.
3575 **/
3576 static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
3577 u16 *san_mac_offset)
3578 {
3579 s32 ret_val;
3580
3581 DEBUGFUNC("ixgbe_get_san_mac_addr_offset");
3582
3583 /*
3584 * First read the EEPROM pointer to see if the MAC addresses are
3585 * available.
3586 */
3587 ret_val = hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR,
3588 san_mac_offset);
3589 if (ret_val) {
3590 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
3591 "eeprom at offset %d failed",
3592 IXGBE_SAN_MAC_ADDR_PTR);
3593 }
3594
3595 return ret_val;
3596 }
3597
3598 /**
3599 * ixgbe_get_san_mac_addr_generic - SAN MAC address retrieval from the EEPROM
3600 * @hw: pointer to hardware structure
3601 * @san_mac_addr: SAN MAC address
3602 *
3603 * Reads the SAN MAC address from the EEPROM, if it's available. This is
3604 * per-port, so set_lan_id() must be called before reading the addresses.
3605 * set_lan_id() is called by identify_sfp(), but this cannot be relied
3606 * upon for non-SFP connections, so we must call it here.
3607 **/
3608 s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
3609 {
3610 u16 san_mac_data, san_mac_offset;
3611 u8 i;
3612 s32 ret_val;
3613
3614 DEBUGFUNC("ixgbe_get_san_mac_addr_generic");
3615
3616 /*
3617 * First read the EEPROM pointer to see if the MAC addresses are
3618 * available. If they're not, no point in calling set_lan_id() here.
3619 */
3620 ret_val = ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
3621 if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF)
3622 goto san_mac_addr_out;
3623
3624 /* make sure we know which port we need to program */
3625 hw->mac.ops.set_lan_id(hw);
3626 /* apply the port offset to the address offset */
3627 (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
3628 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
3629 for (i = 0; i < 3; i++) {
3630 ret_val = hw->eeprom.ops.read(hw, san_mac_offset,
3631 &san_mac_data);
3632 if (ret_val) {
3633 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
3634 "eeprom read at offset %d failed",
3635 san_mac_offset);
3636 goto san_mac_addr_out;
3637 }
3638 san_mac_addr[i * 2] = (u8)(san_mac_data);
3639 san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8);
3640 san_mac_offset++;
3641 }
3642 return IXGBE_SUCCESS;
3643
3644 san_mac_addr_out:
3645 /*
3646 * No addresses available in this EEPROM. It's not an
3647 * error though, so just wipe the local address and return.
3648 */
3649 for (i = 0; i < 6; i++)
3650 san_mac_addr[i] = 0xFF;
3651 return IXGBE_SUCCESS;
3652 }
3653
3654 /**
3655 * ixgbe_set_san_mac_addr_generic - Write the SAN MAC address to the EEPROM
3656 * @hw: pointer to hardware structure
3657 * @san_mac_addr: SAN MAC address
3658 *
3659 * Write a SAN MAC address to the EEPROM.
3660 **/
3661 s32 ixgbe_set_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
3662 {
3663 s32 ret_val;
3664 u16 san_mac_data, san_mac_offset;
3665 u8 i;
3666
3667 DEBUGFUNC("ixgbe_set_san_mac_addr_generic");
3668
3669 /* Look for SAN mac address pointer. If not defined, return */
3670 ret_val = ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
3671 if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF)
3672 return IXGBE_ERR_NO_SAN_ADDR_PTR;
3673
3674 /* Make sure we know which port we need to write */
3675 hw->mac.ops.set_lan_id(hw);
3676 /* Apply the port offset to the address offset */
3677 (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
3678 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
3679
3680 for (i = 0; i < 3; i++) {
3681 san_mac_data = (u16)((u16)(san_mac_addr[i * 2 + 1]) << 8);
3682 san_mac_data |= (u16)(san_mac_addr[i * 2]);
3683 hw->eeprom.ops.write(hw, san_mac_offset, san_mac_data);
3684 san_mac_offset++;
3685 }
3686
3687 return IXGBE_SUCCESS;
3688 }
3689
3690 /**
3691 * ixgbe_get_pcie_msix_count_generic - Gets MSI-X vector count
3692 * @hw: pointer to hardware structure
3693 *
3694 * Read PCIe configuration space, and get the MSI-X vector count from
3695 * the capabilities table.
3696 **/
3697 u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
3698 {
3699 u16 msix_count = 1;
3700 u16 max_msix_count;
3701 u16 pcie_offset;
3702
3703 switch (hw->mac.type) {
3704 case ixgbe_mac_82598EB:
3705 pcie_offset = IXGBE_PCIE_MSIX_82598_CAPS;
3706 max_msix_count = IXGBE_MAX_MSIX_VECTORS_82598;
3707 break;
3708 case ixgbe_mac_82599EB:
3709 case ixgbe_mac_X540:
3710 case ixgbe_mac_X550:
3711 case ixgbe_mac_X550EM_x:
3712 case ixgbe_mac_X550EM_a:
3713 pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS;
3714 max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599;
3715 break;
3716 default:
3717 return msix_count;
3718 }
3719
3720 DEBUGFUNC("ixgbe_get_pcie_msix_count_generic");
3721 msix_count = IXGBE_READ_PCIE_WORD(hw, pcie_offset);
3722 if (IXGBE_REMOVED(hw->hw_addr))
3723 msix_count = 0;
3724 msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
3725
3726 /* MSI-X count is zero-based in HW */
3727 msix_count++;
3728
3729 if (msix_count > max_msix_count)
3730 msix_count = max_msix_count;
3731
3732 return msix_count;
3733 }
3734
3735 /**
3736 * ixgbe_insert_mac_addr_generic - Find a RAR for this mac address
3737 * @hw: pointer to hardware structure
3738 * @addr: Address to put into receive address register
3739 * @vmdq: VMDq pool to assign
3740 *
3741 * Puts an ethernet address into a receive address register, or
3742 * finds the rar that it is already in; adds to the pool list
3743 **/
3744 s32 ixgbe_insert_mac_addr_generic(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
3745 {
3746 static const u32 NO_EMPTY_RAR_FOUND = 0xFFFFFFFF;
3747 u32 first_empty_rar = NO_EMPTY_RAR_FOUND;
3748 u32 rar;
3749 u32 rar_low, rar_high;
3750 u32 addr_low, addr_high;
3751
3752 DEBUGFUNC("ixgbe_insert_mac_addr_generic");
3753
3754 /* swap bytes for HW little endian */
3755 addr_low = addr[0] | (addr[1] << 8)
3756 | (addr[2] << 16)
3757 | (addr[3] << 24);
3758 addr_high = addr[4] | (addr[5] << 8);
3759
3760 /*
3761 * Either find the mac_id in rar or find the first empty space.
3762 * rar_highwater points to just after the highest currently used
3763 * rar in order to shorten the search. It grows when we add a new
3764 * rar to the top.
3765 */
3766 for (rar = 0; rar < hw->mac.rar_highwater; rar++) {
3767 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
3768
3769 if (((IXGBE_RAH_AV & rar_high) == 0)
3770 && first_empty_rar == NO_EMPTY_RAR_FOUND) {
3771 first_empty_rar = rar;
3772 } else if ((rar_high & 0xFFFF) == addr_high) {
3773 rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(rar));
3774 if (rar_low == addr_low)
3775 break; /* found it already in the rars */
3776 }
3777 }
3778
3779 if (rar < hw->mac.rar_highwater) {
3780 /* already there so just add to the pool bits */
3781 ixgbe_set_vmdq(hw, rar, vmdq);
3782 } else if (first_empty_rar != NO_EMPTY_RAR_FOUND) {
3783 /* stick it into first empty RAR slot we found */
3784 rar = first_empty_rar;
3785 ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
3786 } else if (rar == hw->mac.rar_highwater) {
3787 /* add it to the top of the list and inc the highwater mark */
3788 ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
3789 hw->mac.rar_highwater++;
3790 } else if (rar >= hw->mac.num_rar_entries) {
3791 return IXGBE_ERR_INVALID_MAC_ADDR;
3792 }
3793
3794 /*
3795 * If we found rar[0], make sure the default pool bit (we use pool 0)
3796 * remains cleared to be sure default pool packets will get delivered
3797 */
3798 if (rar == 0)
3799 ixgbe_clear_vmdq(hw, rar, 0);
3800
3801 return rar;
3802 }
3803
3804 /**
3805 * ixgbe_clear_vmdq_generic - Disassociate a VMDq pool index from a rx address
3806 * @hw: pointer to hardware struct
3807 * @rar: receive address register index to disassociate
3808 * @vmdq: VMDq pool index to remove from the rar
3809 **/
3810 s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
3811 {
3812 u32 mpsar_lo, mpsar_hi;
3813 u32 rar_entries = hw->mac.num_rar_entries;
3814
3815 DEBUGFUNC("ixgbe_clear_vmdq_generic");
3816
3817 /* Make sure we are using a valid rar index range */
3818 if (rar >= rar_entries) {
3819 ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
3820 "RAR index %d is out of range.\n", rar);
3821 return IXGBE_ERR_INVALID_ARGUMENT;
3822 }
3823
3824 mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
3825 mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
3826
3827 if (IXGBE_REMOVED(hw->hw_addr))
3828 goto done;
3829
3830 if (!mpsar_lo && !mpsar_hi)
3831 goto done;
3832
3833 if (vmdq == IXGBE_CLEAR_VMDQ_ALL) {
3834 if (mpsar_lo) {
3835 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
3836 mpsar_lo = 0;
3837 }
3838 if (mpsar_hi) {
3839 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
3840 mpsar_hi = 0;
3841 }
3842 } else if (vmdq < 32) {
3843 mpsar_lo &= ~(1 << vmdq);
3844 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo);
3845 } else {
3846 mpsar_hi &= ~(1 << (vmdq - 32));
3847 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi);
3848 }
3849
3850 /* was that the last pool using this rar? */
3851 if (mpsar_lo == 0 && mpsar_hi == 0 &&
3852 rar != 0 && rar != hw->mac.san_mac_rar_index)
3853 hw->mac.ops.clear_rar(hw, rar);
3854 done:
3855 return IXGBE_SUCCESS;
3856 }
3857
3858 /**
3859 * ixgbe_set_vmdq_generic - Associate a VMDq pool index with a rx address
3860 * @hw: pointer to hardware struct
3861 * @rar: receive address register index to associate with a VMDq index
3862 * @vmdq: VMDq pool index
3863 **/
3864 s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
3865 {
3866 u32 mpsar;
3867 u32 rar_entries = hw->mac.num_rar_entries;
3868
3869 DEBUGFUNC("ixgbe_set_vmdq_generic");
3870
3871 /* Make sure we are using a valid rar index range */
3872 if (rar >= rar_entries) {
3873 ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
3874 "RAR index %d is out of range.\n", rar);
3875 return IXGBE_ERR_INVALID_ARGUMENT;
3876 }
3877
3878 if (vmdq < 32) {
3879 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
3880 mpsar |= 1 << vmdq;
3881 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar);
3882 } else {
3883 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
3884 mpsar |= 1 << (vmdq - 32);
3885 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar);
3886 }
3887 return IXGBE_SUCCESS;
3888 }
3889
3890 /**
3891 * This function should only be involved in the IOV mode.
3892 * In IOV mode, Default pool is next pool after the number of
3893 * VFs advertized and not 0.
3894 * MPSAR table needs to be updated for SAN_MAC RAR [hw->mac.san_mac_rar_index]
3895 *
3896 * ixgbe_set_vmdq_san_mac - Associate default VMDq pool index with a rx address
3897 * @hw: pointer to hardware struct
3898 * @vmdq: VMDq pool index
3899 **/
3900 s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq)
3901 {
3902 u32 rar = hw->mac.san_mac_rar_index;
3903
3904 DEBUGFUNC("ixgbe_set_vmdq_san_mac");
3905
3906 if (vmdq < 32) {
3907 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 1 << vmdq);
3908 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
3909 } else {
3910 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
3911 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 1 << (vmdq - 32));
3912 }
3913
3914 return IXGBE_SUCCESS;
3915 }
3916
3917 /**
3918 * ixgbe_init_uta_tables_generic - Initialize the Unicast Table Array
3919 * @hw: pointer to hardware structure
3920 **/
3921 s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw)
3922 {
3923 int i;
3924
3925 DEBUGFUNC("ixgbe_init_uta_tables_generic");
3926 DEBUGOUT(" Clearing UTA\n");
3927
3928 for (i = 0; i < 128; i++)
3929 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
3930
3931 return IXGBE_SUCCESS;
3932 }
3933
3934 /**
3935 * ixgbe_find_vlvf_slot - find the vlanid or the first empty slot
3936 * @hw: pointer to hardware structure
3937 * @vlan: VLAN id to write to VLAN filter
3938 *
3939 * return the VLVF index where this VLAN id should be placed
3940 *
3941 **/
3942 s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan, bool vlvf_bypass)
3943 {
3944 s32 regindex, first_empty_slot;
3945 u32 bits;
3946
3947 /* short cut the special case */
3948 if (vlan == 0)
3949 return 0;
3950
3951 /* if vlvf_bypass is set we don't want to use an empty slot, we
3952 * will simply bypass the VLVF if there are no entries present in the
3953 * VLVF that contain our VLAN
3954 */
3955 first_empty_slot = vlvf_bypass ? IXGBE_ERR_NO_SPACE : 0;
3956
3957 /* add VLAN enable bit for comparison */
3958 vlan |= IXGBE_VLVF_VIEN;
3959
3960 /* Search for the vlan id in the VLVF entries. Save off the first empty
3961 * slot found along the way.
3962 *
3963 * pre-decrement loop covering (IXGBE_VLVF_ENTRIES - 1) .. 1
3964 */
3965 for (regindex = IXGBE_VLVF_ENTRIES; --regindex;) {
3966 bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex));
3967 if (bits == vlan)
3968 return regindex;
3969 if (!first_empty_slot && !bits)
3970 first_empty_slot = regindex;
3971 }
3972
3973 /* If we are here then we didn't find the VLAN. Return first empty
3974 * slot we found during our search, else error.
3975 */
3976 if (!first_empty_slot)
3977 ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "No space in VLVF.\n");
3978
3979 return first_empty_slot ? first_empty_slot : IXGBE_ERR_NO_SPACE;
3980 }
3981
3982 /**
3983 * ixgbe_set_vfta_generic - Set VLAN filter table
3984 * @hw: pointer to hardware structure
3985 * @vlan: VLAN id to write to VLAN filter
3986 * @vind: VMDq output index that maps queue to VLAN id in VLVFB
3987 * @vlan_on: boolean flag to turn on/off VLAN
3988 * @vlvf_bypass: boolean flag indicating updating default pool is okay
3989 *
3990 * Turn on/off specified VLAN in the VLAN filter table.
3991 **/
3992 s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
3993 bool vlan_on, bool vlvf_bypass)
3994 {
3995 u32 regidx, vfta_delta, vfta;
3996 s32 ret_val;
3997
3998 DEBUGFUNC("ixgbe_set_vfta_generic");
3999
4000 if (vlan > 4095 || vind > 63)
4001 return IXGBE_ERR_PARAM;
4002
4003 /*
4004 * this is a 2 part operation - first the VFTA, then the
4005 * VLVF and VLVFB if VT Mode is set
4006 * We don't write the VFTA until we know the VLVF part succeeded.
4007 */
4008
4009 /* Part 1
4010 * The VFTA is a bitstring made up of 128 32-bit registers
4011 * that enable the particular VLAN id, much like the MTA:
4012 * bits[11-5]: which register
4013 * bits[4-0]: which bit in the register
4014 */
4015 regidx = vlan / 32;
4016 vfta_delta = 1 << (vlan % 32);
4017 vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regidx));
4018
4019 /*
4020 * vfta_delta represents the difference between the current value
4021 * of vfta and the value we want in the register. Since the diff
4022 * is an XOR mask we can just update the vfta using an XOR
4023 */
4024 vfta_delta &= vlan_on ? ~vfta : vfta;
4025 vfta ^= vfta_delta;
4026
4027 /* Part 2
4028 * Call ixgbe_set_vlvf_generic to set VLVFB and VLVF
4029 */
4030 ret_val = ixgbe_set_vlvf_generic(hw, vlan, vind, vlan_on, &vfta_delta,
4031 vfta, vlvf_bypass);
4032 if (ret_val != IXGBE_SUCCESS) {
4033 if (vlvf_bypass)
4034 goto vfta_update;
4035 return ret_val;
4036 }
4037
4038 vfta_update:
4039 /* Update VFTA now that we are ready for traffic */
4040 if (vfta_delta)
4041 IXGBE_WRITE_REG(hw, IXGBE_VFTA(regidx), vfta);
4042
4043 return IXGBE_SUCCESS;
4044 }
4045
4046 /**
4047 * ixgbe_set_vlvf_generic - Set VLAN Pool Filter
4048 * @hw: pointer to hardware structure
4049 * @vlan: VLAN id to write to VLAN filter
4050 * @vind: VMDq output index that maps queue to VLAN id in VLVFB
4051 * @vlan_on: boolean flag to turn on/off VLAN in VLVF
4052 * @vfta_delta: pointer to the difference between the current value of VFTA
4053 * and the desired value
4054 * @vfta: the desired value of the VFTA
4055 * @vlvf_bypass: boolean flag indicating updating default pool is okay
4056 *
4057 * Turn on/off specified bit in VLVF table.
4058 **/
4059 s32 ixgbe_set_vlvf_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
4060 bool vlan_on, u32 *vfta_delta, u32 vfta,
4061 bool vlvf_bypass)
4062 {
4063 u32 bits;
4064 s32 vlvf_index;
4065
4066 DEBUGFUNC("ixgbe_set_vlvf_generic");
4067
4068 if (vlan > 4095 || vind > 63)
4069 return IXGBE_ERR_PARAM;
4070
4071 /* If VT Mode is set
4072 * Either vlan_on
4073 * make sure the vlan is in VLVF
4074 * set the vind bit in the matching VLVFB
4075 * Or !vlan_on
4076 * clear the pool bit and possibly the vind
4077 */
4078 if (!(IXGBE_READ_REG(hw, IXGBE_VT_CTL) & IXGBE_VT_CTL_VT_ENABLE))
4079 return IXGBE_SUCCESS;
4080
4081 vlvf_index = ixgbe_find_vlvf_slot(hw, vlan, vlvf_bypass);
4082 if (vlvf_index < 0)
4083 return vlvf_index;
4084
4085 bits = IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32));
4086
4087 /* set the pool bit */
4088 bits |= 1 << (vind % 32);
4089 if (vlan_on)
4090 goto vlvf_update;
4091
4092 /* clear the pool bit */
4093 bits ^= 1 << (vind % 32);
4094
4095 if (!bits &&
4096 !IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + 1 - vind / 32))) {
4097 /* Clear VFTA first, then disable VLVF. Otherwise
4098 * we run the risk of stray packets leaking into
4099 * the PF via the default pool
4100 */
4101 if (*vfta_delta)
4102 IXGBE_WRITE_REG(hw, IXGBE_VFTA(vlan / 32), vfta);
4103
4104 /* disable VLVF and clear remaining bit from pool */
4105 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0);
4106 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32), 0);
4107
4108 return IXGBE_SUCCESS;
4109 }
4110
4111 /* If there are still bits set in the VLVFB registers
4112 * for the VLAN ID indicated we need to see if the
4113 * caller is requesting that we clear the VFTA entry bit.
4114 * If the caller has requested that we clear the VFTA
4115 * entry bit but there are still pools/VFs using this VLAN
4116 * ID entry then ignore the request. We're not worried
4117 * about the case where we're turning the VFTA VLAN ID
4118 * entry bit on, only when requested to turn it off as
4119 * there may be multiple pools and/or VFs using the
4120 * VLAN ID entry. In that case we cannot clear the
4121 * VFTA bit until all pools/VFs using that VLAN ID have also
4122 * been cleared. This will be indicated by "bits" being
4123 * zero.
4124 */
4125 *vfta_delta = 0;
4126
4127 vlvf_update:
4128 /* record pool change and enable VLAN ID if not already enabled */
4129 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32), bits);
4130 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), IXGBE_VLVF_VIEN | vlan);
4131
4132 return IXGBE_SUCCESS;
4133 }
4134
4135 /**
4136 * ixgbe_clear_vfta_generic - Clear VLAN filter table
4137 * @hw: pointer to hardware structure
4138 *
4139 * Clears the VLAN filer table, and the VMDq index associated with the filter
4140 **/
4141 s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
4142 {
4143 u32 offset;
4144
4145 DEBUGFUNC("ixgbe_clear_vfta_generic");
4146
4147 for (offset = 0; offset < hw->mac.vft_size; offset++)
4148 IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
4149
4150 for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) {
4151 IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0);
4152 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2), 0);
4153 IXGBE_WRITE_REG(hw, IXGBE_VLVFB((offset * 2) + 1), 0);
4154 }
4155
4156 return IXGBE_SUCCESS;
4157 }
4158
4159 /**
4160 * ixgbe_need_crosstalk_fix - Determine if we need to do cross talk fix
4161 * @hw: pointer to hardware structure
4162 *
4163 * Contains the logic to identify if we need to verify link for the
4164 * crosstalk fix
4165 **/
4166 static bool ixgbe_need_crosstalk_fix(struct ixgbe_hw *hw)
4167 {
4168
4169 /* Does FW say we need the fix */
4170 if (!hw->need_crosstalk_fix)
4171 return FALSE;
4172
4173 /* Only consider SFP+ PHYs i.e. media type fiber */
4174 switch (hw->mac.ops.get_media_type(hw)) {
4175 case ixgbe_media_type_fiber:
4176 case ixgbe_media_type_fiber_qsfp:
4177 break;
4178 default:
4179 return FALSE;
4180 }
4181
4182 return TRUE;
4183 }
4184
4185 /**
4186 * ixgbe_check_mac_link_generic - Determine link and speed status
4187 * @hw: pointer to hardware structure
4188 * @speed: pointer to link speed
4189 * @link_up: TRUE when link is up
4190 * @link_up_wait_to_complete: bool used to wait for link up or not
4191 *
4192 * Reads the links register to determine if link is up and the current speed
4193 **/
4194 s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
4195 bool *link_up, bool link_up_wait_to_complete)
4196 {
4197 u32 links_reg, links_orig;
4198 u32 i;
4199
4200 DEBUGFUNC("ixgbe_check_mac_link_generic");
4201
4202 /* If Crosstalk fix enabled do the sanity check of making sure
4203 * the SFP+ cage is full.
4204 */
4205 if (ixgbe_need_crosstalk_fix(hw)) {
4206 u32 sfp_cage_full;
4207
4208 switch (hw->mac.type) {
4209 case ixgbe_mac_82599EB:
4210 sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
4211 IXGBE_ESDP_SDP2;
4212 break;
4213 case ixgbe_mac_X550EM_x:
4214 case ixgbe_mac_X550EM_a:
4215 sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
4216 IXGBE_ESDP_SDP0;
4217 break;
4218 default:
4219 /* sanity check - No SFP+ devices here */
4220 sfp_cage_full = FALSE;
4221 break;
4222 }
4223
4224 if (!sfp_cage_full) {
4225 *link_up = FALSE;
4226 *speed = IXGBE_LINK_SPEED_UNKNOWN;
4227 return IXGBE_SUCCESS;
4228 }
4229 }
4230
4231 /* clear the old state */
4232 links_orig = IXGBE_READ_REG(hw, IXGBE_LINKS);
4233
4234 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
4235
4236 if (links_orig != links_reg) {
4237 DEBUGOUT2("LINKS changed from %08X to %08X\n",
4238 links_orig, links_reg);
4239 }
4240
4241 if (link_up_wait_to_complete) {
4242 for (i = 0; i < hw->mac.max_link_up_time; i++) {
4243 if (links_reg & IXGBE_LINKS_UP) {
4244 *link_up = TRUE;
4245 break;
4246 } else {
4247 *link_up = FALSE;
4248 }
4249 msec_delay(100);
4250 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
4251 }
4252 } else {
4253 if (links_reg & IXGBE_LINKS_UP)
4254 *link_up = TRUE;
4255 else
4256 *link_up = FALSE;
4257 }
4258
4259 switch (links_reg & IXGBE_LINKS_SPEED_82599) {
4260 case IXGBE_LINKS_SPEED_10G_82599:
4261 *speed = IXGBE_LINK_SPEED_10GB_FULL;
4262 if (hw->mac.type >= ixgbe_mac_X550) {
4263 if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
4264 *speed = IXGBE_LINK_SPEED_2_5GB_FULL;
4265 }
4266 break;
4267 case IXGBE_LINKS_SPEED_1G_82599:
4268 *speed = IXGBE_LINK_SPEED_1GB_FULL;
4269 break;
4270 case IXGBE_LINKS_SPEED_100_82599:
4271 *speed = IXGBE_LINK_SPEED_100_FULL;
4272 if (hw->mac.type >= ixgbe_mac_X550) {
4273 if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
4274 *speed = IXGBE_LINK_SPEED_5GB_FULL;
4275 }
4276 break;
4277 case IXGBE_LINKS_SPEED_10_X550EM_A:
4278 *speed = IXGBE_LINK_SPEED_UNKNOWN;
4279 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T ||
4280 hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)
4281 *speed = IXGBE_LINK_SPEED_10_FULL;
4282 break;
4283 default:
4284 *speed = IXGBE_LINK_SPEED_UNKNOWN;
4285 }
4286
4287 return IXGBE_SUCCESS;
4288 }
4289
4290 /**
4291 * ixgbe_get_wwn_prefix_generic - Get alternative WWNN/WWPN prefix from
4292 * the EEPROM
4293 * @hw: pointer to hardware structure
4294 * @wwnn_prefix: the alternative WWNN prefix
4295 * @wwpn_prefix: the alternative WWPN prefix
4296 *
4297 * This function will read the EEPROM from the alternative SAN MAC address
4298 * block to check the support for the alternative WWNN/WWPN prefix support.
4299 **/
4300 s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
4301 u16 *wwpn_prefix)
4302 {
4303 u16 offset, caps;
4304 u16 alt_san_mac_blk_offset;
4305
4306 DEBUGFUNC("ixgbe_get_wwn_prefix_generic");
4307
4308 /* clear output first */
4309 *wwnn_prefix = 0xFFFF;
4310 *wwpn_prefix = 0xFFFF;
4311
4312 /* check if alternative SAN MAC is supported */
4313 offset = IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR;
4314 if (hw->eeprom.ops.read(hw, offset, &alt_san_mac_blk_offset))
4315 goto wwn_prefix_err;
4316
4317 if ((alt_san_mac_blk_offset == 0) ||
4318 (alt_san_mac_blk_offset == 0xFFFF))
4319 goto wwn_prefix_out;
4320
4321 /* check capability in alternative san mac address block */
4322 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET;
4323 if (hw->eeprom.ops.read(hw, offset, &caps))
4324 goto wwn_prefix_err;
4325 if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN))
4326 goto wwn_prefix_out;
4327
4328 /* get the corresponding prefix for WWNN/WWPN */
4329 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET;
4330 if (hw->eeprom.ops.read(hw, offset, wwnn_prefix)) {
4331 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
4332 "eeprom read at offset %d failed", offset);
4333 }
4334
4335 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET;
4336 if (hw->eeprom.ops.read(hw, offset, wwpn_prefix))
4337 goto wwn_prefix_err;
4338
4339 wwn_prefix_out:
4340 return IXGBE_SUCCESS;
4341
4342 wwn_prefix_err:
4343 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
4344 "eeprom read at offset %d failed", offset);
4345 return IXGBE_SUCCESS;
4346 }
4347
4348 /**
4349 * ixgbe_get_fcoe_boot_status_generic - Get FCOE boot status from EEPROM
4350 * @hw: pointer to hardware structure
4351 * @bs: the fcoe boot status
4352 *
4353 * This function will read the FCOE boot status from the iSCSI FCOE block
4354 **/
4355 s32 ixgbe_get_fcoe_boot_status_generic(struct ixgbe_hw *hw, u16 *bs)
4356 {
4357 u16 offset, caps, flags;
4358 s32 status;
4359
4360 DEBUGFUNC("ixgbe_get_fcoe_boot_status_generic");
4361
4362 /* clear output first */
4363 *bs = ixgbe_fcoe_bootstatus_unavailable;
4364
4365 /* check if FCOE IBA block is present */
4366 offset = IXGBE_FCOE_IBA_CAPS_BLK_PTR;
4367 status = hw->eeprom.ops.read(hw, offset, &caps);
4368 if (status != IXGBE_SUCCESS)
4369 goto out;
4370
4371 if (!(caps & IXGBE_FCOE_IBA_CAPS_FCOE))
4372 goto out;
4373
4374 /* check if iSCSI FCOE block is populated */
4375 status = hw->eeprom.ops.read(hw, IXGBE_ISCSI_FCOE_BLK_PTR, &offset);
4376 if (status != IXGBE_SUCCESS)
4377 goto out;
4378
4379 if ((offset == 0) || (offset == 0xFFFF))
4380 goto out;
4381
4382 /* read fcoe flags in iSCSI FCOE block */
4383 offset = offset + IXGBE_ISCSI_FCOE_FLAGS_OFFSET;
4384 status = hw->eeprom.ops.read(hw, offset, &flags);
4385 if (status != IXGBE_SUCCESS)
4386 goto out;
4387
4388 if (flags & IXGBE_ISCSI_FCOE_FLAGS_ENABLE)
4389 *bs = ixgbe_fcoe_bootstatus_enabled;
4390 else
4391 *bs = ixgbe_fcoe_bootstatus_disabled;
4392
4393 out:
4394 return status;
4395 }
4396
4397 /**
4398 * ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing
4399 * @hw: pointer to hardware structure
4400 * @enable: enable or disable switch for MAC anti-spoofing
4401 * @vf: Virtual Function pool - VF Pool to set for MAC anti-spoofing
4402 *
4403 **/
4404 void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
4405 {
4406 int vf_target_reg = vf >> 3;
4407 int vf_target_shift = vf % 8;
4408 u32 pfvfspoof;
4409
4410 if (hw->mac.type == ixgbe_mac_82598EB)
4411 return;
4412
4413 pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
4414 if (enable)
4415 pfvfspoof |= (1 << vf_target_shift);
4416 else
4417 pfvfspoof &= ~(1 << vf_target_shift);
4418 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
4419 }
4420
4421 /**
4422 * ixgbe_set_vlan_anti_spoofing - Enable/Disable VLAN anti-spoofing
4423 * @hw: pointer to hardware structure
4424 * @enable: enable or disable switch for VLAN anti-spoofing
4425 * @vf: Virtual Function pool - VF Pool to set for VLAN anti-spoofing
4426 *
4427 **/
4428 void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
4429 {
4430 int vf_target_reg = vf >> 3;
4431 int vf_target_shift = vf % 8 + IXGBE_SPOOF_VLANAS_SHIFT;
4432 u32 pfvfspoof;
4433
4434 if (hw->mac.type == ixgbe_mac_82598EB)
4435 return;
4436
4437 pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
4438 if (enable)
4439 pfvfspoof |= (1 << vf_target_shift);
4440 else
4441 pfvfspoof &= ~(1 << vf_target_shift);
4442 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
4443 }
4444
4445 /**
4446 * ixgbe_get_device_caps_generic - Get additional device capabilities
4447 * @hw: pointer to hardware structure
4448 * @device_caps: the EEPROM word with the extra device capabilities
4449 *
4450 * This function will read the EEPROM location for the device capabilities,
4451 * and return the word through device_caps.
4452 **/
4453 s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps)
4454 {
4455 DEBUGFUNC("ixgbe_get_device_caps_generic");
4456
4457 hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps);
4458
4459 return IXGBE_SUCCESS;
4460 }
4461
4462 /**
4463 * ixgbe_enable_relaxed_ordering_gen2 - Enable relaxed ordering
4464 * @hw: pointer to hardware structure
4465 *
4466 **/
4467 void ixgbe_enable_relaxed_ordering_gen2(struct ixgbe_hw *hw)
4468 {
4469 u32 regval;
4470 u32 i;
4471
4472 DEBUGFUNC("ixgbe_enable_relaxed_ordering_gen2");
4473
4474 /* Enable relaxed ordering */
4475 for (i = 0; i < hw->mac.max_tx_queues; i++) {
4476 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
4477 regval |= IXGBE_DCA_TXCTRL_DESC_WRO_EN;
4478 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
4479 }
4480
4481 for (i = 0; i < hw->mac.max_rx_queues; i++) {
4482 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
4483 regval |= IXGBE_DCA_RXCTRL_DATA_WRO_EN |
4484 IXGBE_DCA_RXCTRL_HEAD_WRO_EN;
4485 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
4486 }
4487
4488 }
4489
4490 /**
4491 * ixgbe_calculate_checksum - Calculate checksum for buffer
4492 * @buffer: pointer to EEPROM
4493 * @length: size of EEPROM to calculate a checksum for
4494 * Calculates the checksum for some buffer on a specified length. The
4495 * checksum calculated is returned.
4496 **/
4497 u8 ixgbe_calculate_checksum(u8 *buffer, u32 length)
4498 {
4499 u32 i;
4500 u8 sum = 0;
4501
4502 DEBUGFUNC("ixgbe_calculate_checksum");
4503
4504 if (!buffer)
4505 return 0;
4506
4507 for (i = 0; i < length; i++)
4508 sum += buffer[i];
4509
4510 return (u8) (0 - sum);
4511 }
4512
4513 /**
4514 * ixgbe_hic_unlocked - Issue command to manageability block unlocked
4515 * @hw: pointer to the HW structure
4516 * @buffer: command to write and where the return status will be placed
4517 * @length: length of buffer, must be multiple of 4 bytes
4518 * @timeout: time in ms to wait for command completion
4519 *
4520 * Communicates with the manageability block. On success return IXGBE_SUCCESS
4521 * else returns semaphore error when encountering an error acquiring
4522 * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
4523 *
4524 * This function assumes that the IXGBE_GSSR_SW_MNG_SM semaphore is held
4525 * by the caller.
4526 **/
4527 s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 length,
4528 u32 timeout)
4529 {
4530 u32 hicr, i, fwsts;
4531 u16 dword_len;
4532
4533 DEBUGFUNC("ixgbe_hic_unlocked");
4534
4535 if (!length || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
4536 DEBUGOUT1("Buffer length failure buffersize=%d.\n", length);
4537 return IXGBE_ERR_HOST_INTERFACE_COMMAND;
4538 }
4539
4540 /* Set bit 9 of FWSTS clearing FW reset indication */
4541 fwsts = IXGBE_READ_REG(hw, IXGBE_FWSTS);
4542 IXGBE_WRITE_REG(hw, IXGBE_FWSTS, fwsts | IXGBE_FWSTS_FWRI);
4543
4544 /* Check that the host interface is enabled. */
4545 hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
4546 if (!(hicr & IXGBE_HICR_EN)) {
4547 DEBUGOUT("IXGBE_HOST_EN bit disabled.\n");
4548 return IXGBE_ERR_HOST_INTERFACE_COMMAND;
4549 }
4550
4551 /* Calculate length in DWORDs. We must be DWORD aligned */
4552 if (length % sizeof(u32)) {
4553 DEBUGOUT("Buffer length failure, not aligned to dword");
4554 return IXGBE_ERR_INVALID_ARGUMENT;
4555 }
4556
4557 dword_len = length >> 2;
4558
4559 /* The device driver writes the relevant command block
4560 * into the ram area.
4561 */
4562 for (i = 0; i < dword_len; i++)
4563 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG,
4564 i, IXGBE_CPU_TO_LE32(buffer[i]));
4565
4566 /* Setting this bit tells the ARC that a new command is pending. */
4567 IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C);
4568
4569 for (i = 0; i < timeout; i++) {
4570 hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
4571 if (!(hicr & IXGBE_HICR_C))
4572 break;
4573 msec_delay(1);
4574 }
4575
4576 /* Check command completion */
4577 if ((timeout && i == timeout) ||
4578 !(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV)) {
4579 ERROR_REPORT1(IXGBE_ERROR_CAUTION,
4580 "Command has failed with no status valid.\n");
4581 return IXGBE_ERR_HOST_INTERFACE_COMMAND;
4582 }
4583
4584 return IXGBE_SUCCESS;
4585 }
4586
4587 /**
4588 * ixgbe_host_interface_command - Issue command to manageability block
4589 * @hw: pointer to the HW structure
4590 * @buffer: contains the command to write and where the return status will
4591 * be placed
4592 * @length: length of buffer, must be multiple of 4 bytes
4593 * @timeout: time in ms to wait for command completion
4594 * @return_data: read and return data from the buffer (TRUE) or not (FALSE)
4595 * Needed because FW structures are big endian and decoding of
4596 * these fields can be 8 bit or 16 bit based on command. Decoding
4597 * is not easily understood without making a table of commands.
4598 * So we will leave this up to the caller to read back the data
4599 * in these cases.
4600 *
4601 * Communicates with the manageability block. On success return IXGBE_SUCCESS
4602 * else returns semaphore error when encountering an error acquiring
4603 * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
4604 **/
4605 s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
4606 u32 length, u32 timeout, bool return_data)
4607 {
4608 u32 hdr_size = sizeof(struct ixgbe_hic_hdr);
4609 u16 dword_len;
4610 u16 buf_len;
4611 s32 status;
4612 u32 bi;
4613
4614 DEBUGFUNC("ixgbe_host_interface_command");
4615
4616 if (length == 0 || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
4617 DEBUGOUT1("Buffer length failure buffersize=%d.\n", length);
4618 return IXGBE_ERR_HOST_INTERFACE_COMMAND;
4619 }
4620
4621 /* Take management host interface semaphore */
4622 status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
4623 if (status)
4624 return status;
4625
4626 status = ixgbe_hic_unlocked(hw, buffer, length, timeout);
4627 if (status)
4628 goto rel_out;
4629
4630 if (!return_data)
4631 goto rel_out;
4632
4633 /* Calculate length in DWORDs */
4634 dword_len = hdr_size >> 2;
4635
4636 /* first pull in the header so we know the buffer length */
4637 for (bi = 0; bi < dword_len; bi++) {
4638 buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
4639 IXGBE_LE32_TO_CPUS(&buffer[bi]);
4640 }
4641
4642 /* If there is any thing in data position pull it in */
4643 buf_len = ((struct ixgbe_hic_hdr *)buffer)->buf_len;
4644 if (!buf_len)
4645 goto rel_out;
4646
4647 if (length < buf_len + hdr_size) {
4648 DEBUGOUT("Buffer not large enough for reply message.\n");
4649 status = IXGBE_ERR_HOST_INTERFACE_COMMAND;
4650 goto rel_out;
4651 }
4652
4653 /* Calculate length in DWORDs, add 3 for odd lengths */
4654 dword_len = (buf_len + 3) >> 2;
4655
4656 /* Pull in the rest of the buffer (bi is where we left off) */
4657 for (; bi <= dword_len; bi++) {
4658 buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
4659 IXGBE_LE32_TO_CPUS(&buffer[bi]);
4660 }
4661
4662 rel_out:
4663 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
4664
4665 return status;
4666 }
4667
4668 /**
4669 * ixgbe_set_fw_drv_ver_generic - Sends driver version to firmware
4670 * @hw: pointer to the HW structure
4671 * @maj: driver version major number
4672 * @minr: driver version minor number
4673 * @build: driver version build number
4674 * @sub: driver version sub build number
4675 *
4676 * Sends driver version number to firmware through the manageability
4677 * block. On success return IXGBE_SUCCESS
4678 * else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring
4679 * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
4680 **/
4681 s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 minr,
4682 u8 build, u8 sub, u16 len,
4683 const char *driver_ver)
4684 {
4685 struct ixgbe_hic_drv_info fw_cmd;
4686 int i;
4687 s32 ret_val = IXGBE_SUCCESS;
4688
4689 DEBUGFUNC("ixgbe_set_fw_drv_ver_generic");
4690 UNREFERENCED_2PARAMETER(len, driver_ver);
4691
4692 fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO;
4693 fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN;
4694 fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
4695 fw_cmd.port_num = (u8)hw->bus.func;
4696 fw_cmd.ver_maj = maj;
4697 fw_cmd.ver_min = minr;
4698 fw_cmd.ver_build = build;
4699 fw_cmd.ver_sub = sub;
4700 fw_cmd.hdr.checksum = 0;
4701 fw_cmd.pad = 0;
4702 fw_cmd.pad2 = 0;
4703 fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd,
4704 (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len));
4705
4706 for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
4707 ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
4708 sizeof(fw_cmd),
4709 IXGBE_HI_COMMAND_TIMEOUT,
4710 TRUE);
4711 if (ret_val != IXGBE_SUCCESS)
4712 continue;
4713
4714 if (fw_cmd.hdr.cmd_or_resp.ret_status ==
4715 FW_CEM_RESP_STATUS_SUCCESS)
4716 ret_val = IXGBE_SUCCESS;
4717 else
4718 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
4719
4720 break;
4721 }
4722
4723 return ret_val;
4724 }
4725
4726 /**
4727 * ixgbe_set_rxpba_generic - Initialize Rx packet buffer
4728 * @hw: pointer to hardware structure
4729 * @num_pb: number of packet buffers to allocate
4730 * @headroom: reserve n KB of headroom
4731 * @strategy: packet buffer allocation strategy
4732 **/
4733 void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, u32 headroom,
4734 int strategy)
4735 {
4736 u32 pbsize = hw->mac.rx_pb_size;
4737 int i = 0;
4738 u32 rxpktsize, txpktsize, txpbthresh;
4739
4740 /* Reserve headroom */
4741 pbsize -= headroom;
4742
4743 if (!num_pb)
4744 num_pb = 1;
4745
4746 /* Divide remaining packet buffer space amongst the number of packet
4747 * buffers requested using supplied strategy.
4748 */
4749 switch (strategy) {
4750 case PBA_STRATEGY_WEIGHTED:
4751 /* ixgbe_dcb_pba_80_48 strategy weight first half of packet
4752 * buffer with 5/8 of the packet buffer space.
4753 */
4754 rxpktsize = (pbsize * 5) / (num_pb * 4);
4755 pbsize -= rxpktsize * (num_pb / 2);
4756 rxpktsize <<= IXGBE_RXPBSIZE_SHIFT;
4757 for (; i < (num_pb / 2); i++)
4758 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
4759 /* fall through - configure remaining packet buffers */
4760 case PBA_STRATEGY_EQUAL:
4761 rxpktsize = (pbsize / (num_pb - i)) << IXGBE_RXPBSIZE_SHIFT;
4762 for (; i < num_pb; i++)
4763 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
4764 break;
4765 default:
4766 break;
4767 }
4768
4769 /* Only support an equally distributed Tx packet buffer strategy. */
4770 txpktsize = IXGBE_TXPBSIZE_MAX / num_pb;
4771 txpbthresh = (txpktsize / 1024) - IXGBE_TXPKT_SIZE_MAX;
4772 for (i = 0; i < num_pb; i++) {
4773 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize);
4774 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh);
4775 }
4776
4777 /* Clear unused TCs, if any, to zero buffer size*/
4778 for (; i < IXGBE_MAX_PB; i++) {
4779 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
4780 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0);
4781 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0);
4782 }
4783 }
4784
4785 /**
4786 * ixgbe_clear_tx_pending - Clear pending TX work from the PCIe fifo
4787 * @hw: pointer to the hardware structure
4788 *
4789 * The 82599 and x540 MACs can experience issues if TX work is still pending
4790 * when a reset occurs. This function prevents this by flushing the PCIe
4791 * buffers on the system.
4792 **/
4793 void ixgbe_clear_tx_pending(struct ixgbe_hw *hw)
4794 {
4795 u32 gcr_ext, hlreg0, i, poll;
4796 u16 value;
4797
4798 /*
4799 * If double reset is not requested then all transactions should
4800 * already be clear and as such there is no work to do
4801 */
4802 if (!(hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED))
4803 return;
4804
4805 /*
4806 * Set loopback enable to prevent any transmits from being sent
4807 * should the link come up. This assumes that the RXCTRL.RXEN bit
4808 * has already been cleared.
4809 */
4810 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
4811 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0 | IXGBE_HLREG0_LPBK);
4812
4813 /* Wait for a last completion before clearing buffers */
4814 IXGBE_WRITE_FLUSH(hw);
4815 msec_delay(3);
4816
4817 /*
4818 * Before proceeding, make sure that the PCIe block does not have
4819 * transactions pending.
4820 */
4821 poll = ixgbe_pcie_timeout_poll(hw);
4822 for (i = 0; i < poll; i++) {
4823 usec_delay(100);
4824 value = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS);
4825 if (IXGBE_REMOVED(hw->hw_addr))
4826 goto out;
4827 if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
4828 goto out;
4829 }
4830
4831 out:
4832 /* initiate cleaning flow for buffers in the PCIe transaction layer */
4833 gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
4834 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT,
4835 gcr_ext | IXGBE_GCR_EXT_BUFFERS_CLEAR);
4836
4837 /* Flush all writes and allow 20usec for all transactions to clear */
4838 IXGBE_WRITE_FLUSH(hw);
4839 usec_delay(20);
4840
4841 /* restore previous register values */
4842 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
4843 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
4844 }
4845
4846 /**
4847 * ixgbe_bypass_rw_generic - Bit bang data into by_pass FW
4848 *
4849 * @hw: pointer to hardware structure
4850 * @cmd: Command we send to the FW
4851 * @status: The reply from the FW
4852 *
4853 * Bit-bangs the cmd to the by_pass FW status points to what is returned.
4854 **/
4855 #define IXGBE_BYPASS_BB_WAIT 1
4856 s32 ixgbe_bypass_rw_generic(struct ixgbe_hw *hw, u32 cmd, u32 *status)
4857 {
4858 int i;
4859 u32 sck, sdi, sdo, dir_sck, dir_sdi, dir_sdo;
4860 u32 esdp;
4861
4862 if (!status)
4863 return IXGBE_ERR_PARAM;
4864
4865 *status = 0;
4866
4867 /* SDP vary by MAC type */
4868 switch (hw->mac.type) {
4869 case ixgbe_mac_82599EB:
4870 sck = IXGBE_ESDP_SDP7;
4871 sdi = IXGBE_ESDP_SDP0;
4872 sdo = IXGBE_ESDP_SDP6;
4873 dir_sck = IXGBE_ESDP_SDP7_DIR;
4874 dir_sdi = IXGBE_ESDP_SDP0_DIR;
4875 dir_sdo = IXGBE_ESDP_SDP6_DIR;
4876 break;
4877 case ixgbe_mac_X540:
4878 sck = IXGBE_ESDP_SDP2;
4879 sdi = IXGBE_ESDP_SDP0;
4880 sdo = IXGBE_ESDP_SDP1;
4881 dir_sck = IXGBE_ESDP_SDP2_DIR;
4882 dir_sdi = IXGBE_ESDP_SDP0_DIR;
4883 dir_sdo = IXGBE_ESDP_SDP1_DIR;
4884 break;
4885 default:
4886 return IXGBE_ERR_DEVICE_NOT_SUPPORTED;
4887 }
4888
4889 /* Set SDP pins direction */
4890 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
4891 esdp |= dir_sck; /* SCK as output */
4892 esdp |= dir_sdi; /* SDI as output */
4893 esdp &= ~dir_sdo; /* SDO as input */
4894 esdp |= sck;
4895 esdp |= sdi;
4896 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
4897 IXGBE_WRITE_FLUSH(hw);
4898 msec_delay(IXGBE_BYPASS_BB_WAIT);
4899
4900 /* Generate start condition */
4901 esdp &= ~sdi;
4902 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
4903 IXGBE_WRITE_FLUSH(hw);
4904 msec_delay(IXGBE_BYPASS_BB_WAIT);
4905
4906 esdp &= ~sck;
4907 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
4908 IXGBE_WRITE_FLUSH(hw);
4909 msec_delay(IXGBE_BYPASS_BB_WAIT);
4910
4911 /* Clock out the new control word and clock in the status */
4912 for (i = 0; i < 32; i++) {
4913 if ((cmd >> (31 - i)) & 0x01) {
4914 esdp |= sdi;
4915 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
4916 } else {
4917 esdp &= ~sdi;
4918 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
4919 }
4920 IXGBE_WRITE_FLUSH(hw);
4921 msec_delay(IXGBE_BYPASS_BB_WAIT);
4922
4923 esdp |= sck;
4924 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
4925 IXGBE_WRITE_FLUSH(hw);
4926 msec_delay(IXGBE_BYPASS_BB_WAIT);
4927
4928 esdp &= ~sck;
4929 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
4930 IXGBE_WRITE_FLUSH(hw);
4931 msec_delay(IXGBE_BYPASS_BB_WAIT);
4932
4933 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
4934 if (esdp & sdo)
4935 *status = (*status << 1) | 0x01;
4936 else
4937 *status = (*status << 1) | 0x00;
4938 msec_delay(IXGBE_BYPASS_BB_WAIT);
4939 }
4940
4941 /* stop condition */
4942 esdp |= sck;
4943 esdp &= ~sdi;
4944 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
4945 IXGBE_WRITE_FLUSH(hw);
4946 msec_delay(IXGBE_BYPASS_BB_WAIT);
4947
4948 esdp |= sdi;
4949 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
4950 IXGBE_WRITE_FLUSH(hw);
4951
4952 /* set the page bits to match the cmd that the status it belongs to */
4953 *status = (*status & 0x3fffffff) | (cmd & 0xc0000000);
4954
4955 return IXGBE_SUCCESS;
4956 }
4957
4958 /**
4959 * ixgbe_bypass_valid_rd_generic - Verify valid return from bit-bang.
4960 *
4961 * If we send a write we can't be sure it took until we can read back
4962 * that same register. It can be a problem as some of the feilds may
4963 * for valid reasons change inbetween the time wrote the register and
4964 * we read it again to verify. So this function check everything we
4965 * can check and then assumes it worked.
4966 *
4967 * @u32 in_reg - The register cmd for the bit-bang read.
4968 * @u32 out_reg - The register returned from a bit-bang read.
4969 **/
4970 bool ixgbe_bypass_valid_rd_generic(u32 in_reg, u32 out_reg)
4971 {
4972 u32 mask;
4973
4974 /* Page must match for all control pages */
4975 if ((in_reg & BYPASS_PAGE_M) != (out_reg & BYPASS_PAGE_M))
4976 return FALSE;
4977
4978 switch (in_reg & BYPASS_PAGE_M) {
4979 case BYPASS_PAGE_CTL0:
4980 /* All the following can't change since the last write
4981 * - All the event actions
4982 * - The timeout value
4983 */
4984 mask = BYPASS_AUX_ON_M | BYPASS_MAIN_ON_M |
4985 BYPASS_MAIN_OFF_M | BYPASS_AUX_OFF_M |
4986 BYPASS_WDTIMEOUT_M |
4987 BYPASS_WDT_VALUE_M;
4988 if ((out_reg & mask) != (in_reg & mask))
4989 return FALSE;
4990
4991 /* 0x0 is never a valid value for bypass status */
4992 if (!(out_reg & BYPASS_STATUS_OFF_M))
4993 return FALSE;
4994 break;
4995 case BYPASS_PAGE_CTL1:
4996 /* All the following can't change since the last write
4997 * - time valid bit
4998 * - time we last sent
4999 */
5000 mask = BYPASS_CTL1_VALID_M | BYPASS_CTL1_TIME_M;
5001 if ((out_reg & mask) != (in_reg & mask))
5002 return FALSE;
5003 break;
5004 case BYPASS_PAGE_CTL2:
5005 /* All we can check in this page is control number
5006 * which is already done above.
5007 */
5008 break;
5009 }
5010
5011 /* We are as sure as we can be return TRUE */
5012 return TRUE;
5013 }
5014
5015 /**
5016 * ixgbe_bypass_set_generic - Set a bypass field in the FW CTRL Regiter.
5017 *
5018 * @hw: pointer to hardware structure
5019 * @cmd: The control word we are setting.
5020 * @event: The event we are setting in the FW. This also happens to
5021 * be the mask for the event we are setting (handy)
5022 * @action: The action we set the event to in the FW. This is in a
5023 * bit field that happens to be what we want to put in
5024 * the event spot (also handy)
5025 **/
5026 s32 ixgbe_bypass_set_generic(struct ixgbe_hw *hw, u32 ctrl, u32 event,
5027 u32 action)
5028 {
5029 u32 by_ctl = 0;
5030 u32 cmd, verify;
5031 u32 count = 0;
5032
5033 /* Get current values */
5034 cmd = ctrl; /* just reading only need control number */
5035 if (ixgbe_bypass_rw_generic(hw, cmd, &by_ctl))
5036 return IXGBE_ERR_INVALID_ARGUMENT;
5037
5038 /* Set to new action */
5039 cmd = (by_ctl & ~event) | BYPASS_WE | action;
5040 if (ixgbe_bypass_rw_generic(hw, cmd, &by_ctl))
5041 return IXGBE_ERR_INVALID_ARGUMENT;
5042
5043 /* Page 0 force a FW eeprom write which is slow so verify */
5044 if ((cmd & BYPASS_PAGE_M) == BYPASS_PAGE_CTL0) {
5045 verify = BYPASS_PAGE_CTL0;
5046 do {
5047 if (count++ > 5)
5048 return IXGBE_BYPASS_FW_WRITE_FAILURE;
5049
5050 if (ixgbe_bypass_rw_generic(hw, verify, &by_ctl))
5051 return IXGBE_ERR_INVALID_ARGUMENT;
5052 } while (!ixgbe_bypass_valid_rd_generic(cmd, by_ctl));
5053 } else {
5054 /* We have give the FW time for the write to stick */
5055 msec_delay(100);
5056 }
5057
5058 return IXGBE_SUCCESS;
5059 }
5060
5061 /**
5062 * ixgbe_bypass_rd_eep_generic - Read the bypass FW eeprom addres.
5063 *
5064 * @hw: pointer to hardware structure
5065 * @addr: The bypass eeprom address to read.
5066 * @value: The 8b of data at the address above.
5067 **/
5068 s32 ixgbe_bypass_rd_eep_generic(struct ixgbe_hw *hw, u32 addr, u8 *value)
5069 {
5070 u32 cmd;
5071 u32 status;
5072
5073
5074 /* send the request */
5075 cmd = BYPASS_PAGE_CTL2 | BYPASS_WE;
5076 cmd |= (addr << BYPASS_CTL2_OFFSET_SHIFT) & BYPASS_CTL2_OFFSET_M;
5077 if (ixgbe_bypass_rw_generic(hw, cmd, &status))
5078 return IXGBE_ERR_INVALID_ARGUMENT;
5079
5080 /* We have give the FW time for the write to stick */
5081 msec_delay(100);
5082
5083 /* now read the results */
5084 cmd &= ~BYPASS_WE;
5085 if (ixgbe_bypass_rw_generic(hw, cmd, &status))
5086 return IXGBE_ERR_INVALID_ARGUMENT;
5087
5088 *value = status & BYPASS_CTL2_DATA_M;
5089
5090 return IXGBE_SUCCESS;
5091 }
5092
5093 /**
5094 * ixgbe_get_orom_version - Return option ROM from EEPROM
5095 *
5096 * @hw: pointer to hardware structure
5097 * @nvm_ver: pointer to output structure
5098 *
5099 * if valid option ROM version, nvm_ver->or_valid set to TRUE
5100 * else nvm_ver->or_valid is FALSE.
5101 **/
5102 void ixgbe_get_orom_version(struct ixgbe_hw *hw,
5103 struct ixgbe_nvm_version *nvm_ver)
5104 {
5105 u16 offset, eeprom_cfg_blkh, eeprom_cfg_blkl;
5106
5107 nvm_ver->or_valid = FALSE;
5108 /* Option Rom may or may not be present. Start with pointer */
5109 hw->eeprom.ops.read(hw, NVM_OROM_OFFSET, &offset);
5110
5111 /* make sure offset is valid */
5112 if ((offset == 0x0) || (offset == NVM_INVALID_PTR))
5113 return;
5114
5115 hw->eeprom.ops.read(hw, offset + NVM_OROM_BLK_HI, &eeprom_cfg_blkh);
5116 hw->eeprom.ops.read(hw, offset + NVM_OROM_BLK_LOW, &eeprom_cfg_blkl);
5117
5118 /* option rom exists and is valid */
5119 if ((eeprom_cfg_blkl | eeprom_cfg_blkh) == 0x0 ||
5120 eeprom_cfg_blkl == NVM_VER_INVALID ||
5121 eeprom_cfg_blkh == NVM_VER_INVALID)
5122 return;
5123
5124 nvm_ver->or_valid = TRUE;
5125 nvm_ver->or_major = eeprom_cfg_blkl >> NVM_OROM_SHIFT;
5126 nvm_ver->or_build = (eeprom_cfg_blkl << NVM_OROM_SHIFT) |
5127 (eeprom_cfg_blkh >> NVM_OROM_SHIFT);
5128 nvm_ver->or_patch = eeprom_cfg_blkh & NVM_OROM_PATCH_MASK;
5129 }
5130
5131 /**
5132 * ixgbe_get_oem_prod_version - Return OEM Product version
5133 *
5134 * @hw: pointer to hardware structure
5135 * @nvm_ver: pointer to output structure
5136 *
5137 * if valid OEM product version, nvm_ver->oem_valid set to TRUE
5138 * else nvm_ver->oem_valid is FALSE.
5139 **/
5140 void ixgbe_get_oem_prod_version(struct ixgbe_hw *hw,
5141 struct ixgbe_nvm_version *nvm_ver)
5142 {
5143 u16 rel_num, prod_ver, mod_len, cap, offset;
5144
5145 nvm_ver->oem_valid = FALSE;
5146 hw->eeprom.ops.read(hw, NVM_OEM_PROD_VER_PTR, &offset);
5147
5148 /* Return is offset to OEM Product Version block is invalid */
5149 if (offset == 0x0 || offset == NVM_INVALID_PTR)
5150 return;
5151
5152 /* Read product version block */
5153 hw->eeprom.ops.read(hw, offset, &mod_len);
5154 hw->eeprom.ops.read(hw, offset + NVM_OEM_PROD_VER_CAP_OFF, &cap);
5155
5156 /* Return if OEM product version block is invalid */
5157 if (mod_len != NVM_OEM_PROD_VER_MOD_LEN ||
5158 (cap & NVM_OEM_PROD_VER_CAP_MASK) != 0x0)
5159 return;
5160
5161 hw->eeprom.ops.read(hw, offset + NVM_OEM_PROD_VER_OFF_L, &prod_ver);
5162 hw->eeprom.ops.read(hw, offset + NVM_OEM_PROD_VER_OFF_H, &rel_num);
5163
5164 /* Return if version is invalid */
5165 if ((rel_num | prod_ver) == 0x0 ||
5166 rel_num == NVM_VER_INVALID || prod_ver == NVM_VER_INVALID)
5167 return;
5168
5169 nvm_ver->oem_major = prod_ver >> NVM_VER_SHIFT;
5170 nvm_ver->oem_minor = prod_ver & NVM_VER_MASK;
5171 nvm_ver->oem_release = rel_num;
5172 nvm_ver->oem_valid = TRUE;
5173 }
5174
5175 /**
5176 * ixgbe_get_etk_id - Return Etrack ID from EEPROM
5177 *
5178 * @hw: pointer to hardware structure
5179 * @nvm_ver: pointer to output structure
5180 *
5181 * word read errors will return 0xFFFF
5182 **/
5183 void ixgbe_get_etk_id(struct ixgbe_hw *hw, struct ixgbe_nvm_version *nvm_ver)
5184 {
5185 u16 etk_id_l, etk_id_h;
5186
5187 if (hw->eeprom.ops.read(hw, NVM_ETK_OFF_LOW, &etk_id_l))
5188 etk_id_l = NVM_VER_INVALID;
5189 if (hw->eeprom.ops.read(hw, NVM_ETK_OFF_HI, &etk_id_h))
5190 etk_id_h = NVM_VER_INVALID;
5191
5192 /* The word order for the version format is determined by high order
5193 * word bit 15.
5194 */
5195 if ((etk_id_h & NVM_ETK_VALID) == 0) {
5196 nvm_ver->etk_id = etk_id_h;
5197 nvm_ver->etk_id |= (etk_id_l << NVM_ETK_SHIFT);
5198 } else {
5199 nvm_ver->etk_id = etk_id_l;
5200 nvm_ver->etk_id |= (etk_id_h << NVM_ETK_SHIFT);
5201 }
5202 }
5203
5204
5205 /**
5206 * ixgbe_dcb_get_rtrup2tc_generic - read rtrup2tc reg
5207 * @hw: pointer to hardware structure
5208 * @map: pointer to u8 arr for returning map
5209 *
5210 * Read the rtrup2tc HW register and resolve its content into map
5211 **/
5212 void ixgbe_dcb_get_rtrup2tc_generic(struct ixgbe_hw *hw, u8 *map)
5213 {
5214 u32 reg, i;
5215
5216 reg = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC);
5217 for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++)
5218 map[i] = IXGBE_RTRUP2TC_UP_MASK &
5219 (reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT));
5220 return;
5221 }
5222
5223 void ixgbe_disable_rx_generic(struct ixgbe_hw *hw)
5224 {
5225 u32 pfdtxgswc;
5226 u32 rxctrl;
5227
5228 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
5229 if (rxctrl & IXGBE_RXCTRL_RXEN) {
5230 if (hw->mac.type != ixgbe_mac_82598EB) {
5231 pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
5232 if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) {
5233 pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN;
5234 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
5235 hw->mac.set_lben = TRUE;
5236 } else {
5237 hw->mac.set_lben = FALSE;
5238 }
5239 }
5240 rxctrl &= ~IXGBE_RXCTRL_RXEN;
5241 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl);
5242 }
5243 }
5244
5245 void ixgbe_enable_rx_generic(struct ixgbe_hw *hw)
5246 {
5247 u32 pfdtxgswc;
5248 u32 rxctrl;
5249
5250 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
5251 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, (rxctrl | IXGBE_RXCTRL_RXEN));
5252
5253 if (hw->mac.type != ixgbe_mac_82598EB) {
5254 if (hw->mac.set_lben) {
5255 pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
5256 pfdtxgswc |= IXGBE_PFDTXGSWC_VT_LBEN;
5257 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
5258 hw->mac.set_lben = FALSE;
5259 }
5260 }
5261 }
5262
5263 /**
5264 * ixgbe_mng_present - returns TRUE when management capability is present
5265 * @hw: pointer to hardware structure
5266 */
5267 bool ixgbe_mng_present(struct ixgbe_hw *hw)
5268 {
5269 u32 fwsm;
5270
5271 if (hw->mac.type < ixgbe_mac_82599EB)
5272 return FALSE;
5273
5274 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw));
5275 return !!(fwsm & IXGBE_FWSM_FW_MODE_PT);
5276 }
5277
5278 /**
5279 * ixgbe_mng_enabled - Is the manageability engine enabled?
5280 * @hw: pointer to hardware structure
5281 *
5282 * Returns TRUE if the manageability engine is enabled.
5283 **/
5284 bool ixgbe_mng_enabled(struct ixgbe_hw *hw)
5285 {
5286 u32 fwsm, manc, factps;
5287
5288 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw));
5289 if ((fwsm & IXGBE_FWSM_MODE_MASK) != IXGBE_FWSM_FW_MODE_PT)
5290 return FALSE;
5291
5292 manc = IXGBE_READ_REG(hw, IXGBE_MANC);
5293 if (!(manc & IXGBE_MANC_RCV_TCO_EN))
5294 return FALSE;
5295
5296 if (hw->mac.type <= ixgbe_mac_X540) {
5297 factps = IXGBE_READ_REG(hw, IXGBE_FACTPS_BY_MAC(hw));
5298 if (factps & IXGBE_FACTPS_MNGCG)
5299 return FALSE;
5300 }
5301
5302 return TRUE;
5303 }
5304
5305 /**
5306 * ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed
5307 * @hw: pointer to hardware structure
5308 * @speed: new link speed
5309 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed
5310 *
5311 * Set the link speed in the MAC and/or PHY register and restarts link.
5312 **/
5313 s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
5314 ixgbe_link_speed speed,
5315 bool autoneg_wait_to_complete)
5316 {
5317 ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
5318 ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN;
5319 s32 status = IXGBE_SUCCESS;
5320 u32 speedcnt = 0;
5321 u32 i = 0;
5322 bool autoneg, link_up = FALSE;
5323
5324 DEBUGFUNC("ixgbe_setup_mac_link_multispeed_fiber");
5325
5326 /* Mask off requested but non-supported speeds */
5327 status = ixgbe_get_link_capabilities(hw, &link_speed, &autoneg);
5328 if (status != IXGBE_SUCCESS)
5329 return status;
5330
5331 speed &= link_speed;
5332
5333 /* Try each speed one by one, highest priority first. We do this in
5334 * software because 10Gb fiber doesn't support speed autonegotiation.
5335 */
5336 if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
5337 speedcnt++;
5338 highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL;
5339
5340 /* Set the module link speed */
5341 switch (hw->phy.media_type) {
5342 case ixgbe_media_type_fiber_fixed:
5343 case ixgbe_media_type_fiber:
5344 ixgbe_set_rate_select_speed(hw,
5345 IXGBE_LINK_SPEED_10GB_FULL);
5346 break;
5347 case ixgbe_media_type_fiber_qsfp:
5348 /* QSFP module automatically detects MAC link speed */
5349 break;
5350 default:
5351 DEBUGOUT("Unexpected media type.\n");
5352 break;
5353 }
5354
5355 /* Allow module to change analog characteristics (1G->10G) */
5356 msec_delay(40);
5357
5358 status = ixgbe_setup_mac_link(hw,
5359 IXGBE_LINK_SPEED_10GB_FULL,
5360 autoneg_wait_to_complete);
5361 if (status != IXGBE_SUCCESS)
5362 return status;
5363
5364 /* Flap the Tx laser if it has not already been done */
5365 ixgbe_flap_tx_laser(hw);
5366
5367 /* Wait for the controller to acquire link. Per IEEE 802.3ap,
5368 * Section 73.10.2, we may have to wait up to 500ms if KR is
5369 * attempted. 82599 uses the same timing for 10g SFI.
5370 */
5371 for (i = 0; i < 5; i++) {
5372 /* Wait for the link partner to also set speed */
5373 msec_delay(100);
5374
5375 /* If we have link, just jump out */
5376 status = ixgbe_check_link(hw, &link_speed,
5377 &link_up, FALSE);
5378 if (status != IXGBE_SUCCESS)
5379 return status;
5380
5381 if (link_up)
5382 goto out;
5383 }
5384 }
5385
5386 if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
5387 speedcnt++;
5388 if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN)
5389 highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL;
5390
5391 /* Set the module link speed */
5392 switch (hw->phy.media_type) {
5393 case ixgbe_media_type_fiber_fixed:
5394 case ixgbe_media_type_fiber:
5395 ixgbe_set_rate_select_speed(hw,
5396 IXGBE_LINK_SPEED_1GB_FULL);
5397 break;
5398 case ixgbe_media_type_fiber_qsfp:
5399 /* QSFP module automatically detects link speed */
5400 break;
5401 default:
5402 DEBUGOUT("Unexpected media type.\n");
5403 break;
5404 }
5405
5406 /* Allow module to change analog characteristics (10G->1G) */
5407 msec_delay(40);
5408
5409 status = ixgbe_setup_mac_link(hw,
5410 IXGBE_LINK_SPEED_1GB_FULL,
5411 autoneg_wait_to_complete);
5412 if (status != IXGBE_SUCCESS)
5413 return status;
5414
5415 /* Flap the Tx laser if it has not already been done */
5416 ixgbe_flap_tx_laser(hw);
5417
5418 /* Wait for the link partner to also set speed */
5419 msec_delay(100);
5420
5421 /* If we have link, just jump out */
5422 status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
5423 if (status != IXGBE_SUCCESS)
5424 return status;
5425
5426 if (link_up)
5427 goto out;
5428 }
5429
5430 if (speed == 0) {
5431 /* Disable the Tx laser for media none */
5432 ixgbe_disable_tx_laser(hw);
5433
5434 goto out;
5435 }
5436
5437 /* We didn't get link. Configure back to the highest speed we tried,
5438 * (if there was more than one). We call ourselves back with just the
5439 * single highest speed that the user requested.
5440 */
5441 if (speedcnt > 1)
5442 status = ixgbe_setup_mac_link_multispeed_fiber(hw,
5443 highest_link_speed,
5444 autoneg_wait_to_complete);
5445
5446 out:
5447 /* Set autoneg_advertised value based on input link speed */
5448 hw->phy.autoneg_advertised = 0;
5449
5450 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
5451 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
5452
5453 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
5454 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
5455
5456 return status;
5457 }
5458
5459 /**
5460 * ixgbe_set_soft_rate_select_speed - Set module link speed
5461 * @hw: pointer to hardware structure
5462 * @speed: link speed to set
5463 *
5464 * Set module link speed via the soft rate select.
5465 */
5466 void ixgbe_set_soft_rate_select_speed(struct ixgbe_hw *hw,
5467 ixgbe_link_speed speed)
5468 {
5469 s32 status;
5470 u8 rs, eeprom_data;
5471
5472 switch (speed) {
5473 case IXGBE_LINK_SPEED_10GB_FULL:
5474 /* one bit mask same as setting on */
5475 rs = IXGBE_SFF_SOFT_RS_SELECT_10G;
5476 break;
5477 case IXGBE_LINK_SPEED_1GB_FULL:
5478 rs = IXGBE_SFF_SOFT_RS_SELECT_1G;
5479 break;
5480 default:
5481 DEBUGOUT("Invalid fixed module speed\n");
5482 return;
5483 }
5484
5485 /* Set RS0 */
5486 status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
5487 IXGBE_I2C_EEPROM_DEV_ADDR2,
5488 &eeprom_data);
5489 if (status) {
5490 DEBUGOUT("Failed to read Rx Rate Select RS0\n");
5491 goto out;
5492 }
5493
5494 eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs;
5495
5496 status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
5497 IXGBE_I2C_EEPROM_DEV_ADDR2,
5498 eeprom_data);
5499 if (status) {
5500 DEBUGOUT("Failed to write Rx Rate Select RS0\n");
5501 goto out;
5502 }
5503
5504 /* Set RS1 */
5505 status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
5506 IXGBE_I2C_EEPROM_DEV_ADDR2,
5507 &eeprom_data);
5508 if (status) {
5509 DEBUGOUT("Failed to read Rx Rate Select RS1\n");
5510 goto out;
5511 }
5512
5513 eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs;
5514
5515 status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
5516 IXGBE_I2C_EEPROM_DEV_ADDR2,
5517 eeprom_data);
5518 if (status) {
5519 DEBUGOUT("Failed to write Rx Rate Select RS1\n");
5520 goto out;
5521 }
5522 out:
5523 return;
5524 }
5525