ixgbe_common.c revision 1.14 1 /* $NetBSD: ixgbe_common.c,v 1.14 2017/08/30 08:49:18 msaitoh Exp $ */
2
3 /******************************************************************************
4
5 Copyright (c) 2001-2017, Intel Corporation
6 All rights reserved.
7
8 Redistribution and use in source and binary forms, with or without
9 modification, are permitted provided that the following conditions are met:
10
11 1. Redistributions of source code must retain the above copyright notice,
12 this list of conditions and the following disclaimer.
13
14 2. Redistributions in binary form must reproduce the above copyright
15 notice, this list of conditions and the following disclaimer in the
16 documentation and/or other materials provided with the distribution.
17
18 3. Neither the name of the Intel Corporation nor the names of its
19 contributors may be used to endorse or promote products derived from
20 this software without specific prior written permission.
21
22 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 POSSIBILITY OF SUCH DAMAGE.
33
34 ******************************************************************************/
35 /*$FreeBSD: head/sys/dev/ixgbe/ixgbe_common.c 320688 2017-07-05 17:27:03Z erj $*/
36
37 #include "ixgbe_common.h"
38 #include "ixgbe_phy.h"
39 #include "ixgbe_dcb.h"
40 #include "ixgbe_dcb_82599.h"
41 #include "ixgbe_api.h"
42
43 static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw);
44 static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw);
45 static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw);
46 static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw);
47 static void ixgbe_standby_eeprom(struct ixgbe_hw *hw);
48 static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
49 u16 count);
50 static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count);
51 static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
52 static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
53 static void ixgbe_release_eeprom(struct ixgbe_hw *hw);
54
55 static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr);
56 static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
57 u16 *san_mac_offset);
58 static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
59 u16 words, u16 *data);
60 static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
61 u16 words, u16 *data);
62 static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
63 u16 offset);
64
65 /**
66 * ixgbe_init_ops_generic - Inits function ptrs
67 * @hw: pointer to the hardware structure
68 *
69 * Initialize the function pointers.
70 **/
71 s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw)
72 {
73 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
74 struct ixgbe_mac_info *mac = &hw->mac;
75 u32 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
76
77 DEBUGFUNC("ixgbe_init_ops_generic");
78
79 /* EEPROM */
80 eeprom->ops.init_params = ixgbe_init_eeprom_params_generic;
81 /* If EEPROM is valid (bit 8 = 1), use EERD otherwise use bit bang */
82 if (eec & IXGBE_EEC_PRES) {
83 eeprom->ops.read = ixgbe_read_eerd_generic;
84 eeprom->ops.read_buffer = ixgbe_read_eerd_buffer_generic;
85 } else {
86 eeprom->ops.read = ixgbe_read_eeprom_bit_bang_generic;
87 eeprom->ops.read_buffer =
88 ixgbe_read_eeprom_buffer_bit_bang_generic;
89 }
90 eeprom->ops.write = ixgbe_write_eeprom_generic;
91 eeprom->ops.write_buffer = ixgbe_write_eeprom_buffer_bit_bang_generic;
92 eeprom->ops.validate_checksum =
93 ixgbe_validate_eeprom_checksum_generic;
94 eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_generic;
95 eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_generic;
96
97 /* MAC */
98 mac->ops.init_hw = ixgbe_init_hw_generic;
99 mac->ops.reset_hw = NULL;
100 mac->ops.start_hw = ixgbe_start_hw_generic;
101 mac->ops.clear_hw_cntrs = ixgbe_clear_hw_cntrs_generic;
102 mac->ops.get_media_type = NULL;
103 mac->ops.get_supported_physical_layer = NULL;
104 mac->ops.enable_rx_dma = ixgbe_enable_rx_dma_generic;
105 mac->ops.get_mac_addr = ixgbe_get_mac_addr_generic;
106 mac->ops.stop_adapter = ixgbe_stop_adapter_generic;
107 mac->ops.get_bus_info = ixgbe_get_bus_info_generic;
108 mac->ops.set_lan_id = ixgbe_set_lan_id_multi_port_pcie;
109 mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync;
110 mac->ops.release_swfw_sync = ixgbe_release_swfw_sync;
111 mac->ops.prot_autoc_read = prot_autoc_read_generic;
112 mac->ops.prot_autoc_write = prot_autoc_write_generic;
113
114 /* LEDs */
115 mac->ops.led_on = ixgbe_led_on_generic;
116 mac->ops.led_off = ixgbe_led_off_generic;
117 mac->ops.blink_led_start = ixgbe_blink_led_start_generic;
118 mac->ops.blink_led_stop = ixgbe_blink_led_stop_generic;
119 mac->ops.init_led_link_act = ixgbe_init_led_link_act_generic;
120
121 /* RAR, Multicast, VLAN */
122 mac->ops.set_rar = ixgbe_set_rar_generic;
123 mac->ops.clear_rar = ixgbe_clear_rar_generic;
124 mac->ops.insert_mac_addr = NULL;
125 mac->ops.set_vmdq = NULL;
126 mac->ops.clear_vmdq = NULL;
127 mac->ops.init_rx_addrs = ixgbe_init_rx_addrs_generic;
128 mac->ops.update_uc_addr_list = ixgbe_update_uc_addr_list_generic;
129 mac->ops.update_mc_addr_list = ixgbe_update_mc_addr_list_generic;
130 mac->ops.enable_mc = ixgbe_enable_mc_generic;
131 mac->ops.disable_mc = ixgbe_disable_mc_generic;
132 mac->ops.clear_vfta = NULL;
133 mac->ops.set_vfta = NULL;
134 mac->ops.set_vlvf = NULL;
135 mac->ops.init_uta_tables = NULL;
136 mac->ops.enable_rx = ixgbe_enable_rx_generic;
137 mac->ops.disable_rx = ixgbe_disable_rx_generic;
138
139 /* Flow Control */
140 mac->ops.fc_enable = ixgbe_fc_enable_generic;
141 mac->ops.setup_fc = ixgbe_setup_fc_generic;
142 mac->ops.fc_autoneg = ixgbe_fc_autoneg;
143
144 /* Link */
145 mac->ops.get_link_capabilities = NULL;
146 mac->ops.setup_link = NULL;
147 mac->ops.check_link = NULL;
148 mac->ops.dmac_config = NULL;
149 mac->ops.dmac_update_tcs = NULL;
150 mac->ops.dmac_config_tcs = NULL;
151
152 return IXGBE_SUCCESS;
153 }
154
155 /**
156 * ixgbe_device_supports_autoneg_fc - Check if device supports autonegotiation
157 * of flow control
158 * @hw: pointer to hardware structure
159 *
160 * This function returns TRUE if the device supports flow control
161 * autonegotiation, and FALSE if it does not.
162 *
163 **/
164 bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
165 {
166 bool supported = FALSE;
167 ixgbe_link_speed speed;
168 bool link_up;
169
170 DEBUGFUNC("ixgbe_device_supports_autoneg_fc");
171
172 switch (hw->phy.media_type) {
173 case ixgbe_media_type_fiber_fixed:
174 case ixgbe_media_type_fiber_qsfp:
175 case ixgbe_media_type_fiber:
176 /* flow control autoneg black list */
177 switch (hw->device_id) {
178 case IXGBE_DEV_ID_X550EM_A_SFP:
179 case IXGBE_DEV_ID_X550EM_A_SFP_N:
180 case IXGBE_DEV_ID_X550EM_A_QSFP:
181 case IXGBE_DEV_ID_X550EM_A_QSFP_N:
182 supported = FALSE;
183 break;
184 default:
185 hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
186 /* if link is down, assume supported */
187 if (link_up)
188 supported = speed == IXGBE_LINK_SPEED_1GB_FULL ?
189 TRUE : FALSE;
190 else
191 supported = TRUE;
192 }
193
194 break;
195 case ixgbe_media_type_backplane:
196 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_XFI)
197 supported = FALSE;
198 else
199 supported = TRUE;
200 break;
201 case ixgbe_media_type_copper:
202 /* only some copper devices support flow control autoneg */
203 switch (hw->device_id) {
204 case IXGBE_DEV_ID_82599_T3_LOM:
205 case IXGBE_DEV_ID_X540T:
206 case IXGBE_DEV_ID_X540T1:
207 case IXGBE_DEV_ID_X540_BYPASS:
208 case IXGBE_DEV_ID_X550T:
209 case IXGBE_DEV_ID_X550T1:
210 case IXGBE_DEV_ID_X550EM_X_10G_T:
211 case IXGBE_DEV_ID_X550EM_A_10G_T:
212 case IXGBE_DEV_ID_X550EM_A_1G_T:
213 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
214 supported = TRUE;
215 break;
216 default:
217 supported = FALSE;
218 }
219 default:
220 break;
221 }
222
223 if (!supported)
224 ERROR_REPORT2(IXGBE_ERROR_UNSUPPORTED,
225 "Device %x does not support flow control autoneg",
226 hw->device_id);
227 return supported;
228 }
229
230 /**
231 * ixgbe_setup_fc_generic - Set up flow control
232 * @hw: pointer to hardware structure
233 *
234 * Called at init time to set up flow control.
235 **/
236 s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw)
237 {
238 s32 ret_val = IXGBE_SUCCESS;
239 u32 reg = 0, reg_bp = 0;
240 u16 reg_cu = 0;
241 bool locked = FALSE;
242
243 DEBUGFUNC("ixgbe_setup_fc_generic");
244
245 /* Validate the requested mode */
246 if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
247 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
248 "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
249 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
250 goto out;
251 }
252
253 /*
254 * 10gig parts do not have a word in the EEPROM to determine the
255 * default flow control setting, so we explicitly set it to full.
256 */
257 if (hw->fc.requested_mode == ixgbe_fc_default)
258 hw->fc.requested_mode = ixgbe_fc_full;
259
260 /*
261 * Set up the 1G and 10G flow control advertisement registers so the
262 * HW will be able to do fc autoneg once the cable is plugged in. If
263 * we link at 10G, the 1G advertisement is harmless and vice versa.
264 */
265 switch (hw->phy.media_type) {
266 case ixgbe_media_type_backplane:
267 /* some MAC's need RMW protection on AUTOC */
268 ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, ®_bp);
269 if (ret_val != IXGBE_SUCCESS)
270 goto out;
271
272 /* fall through - only backplane uses autoc */
273 case ixgbe_media_type_fiber_fixed:
274 case ixgbe_media_type_fiber_qsfp:
275 case ixgbe_media_type_fiber:
276 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
277
278 break;
279 case ixgbe_media_type_copper:
280 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
281 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®_cu);
282 break;
283 default:
284 break;
285 }
286
287 /*
288 * The possible values of fc.requested_mode are:
289 * 0: Flow control is completely disabled
290 * 1: Rx flow control is enabled (we can receive pause frames,
291 * but not send pause frames).
292 * 2: Tx flow control is enabled (we can send pause frames but
293 * we do not support receiving pause frames).
294 * 3: Both Rx and Tx flow control (symmetric) are enabled.
295 * other: Invalid.
296 */
297 switch (hw->fc.requested_mode) {
298 case ixgbe_fc_none:
299 /* Flow control completely disabled by software override. */
300 reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
301 if (hw->phy.media_type == ixgbe_media_type_backplane)
302 reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE |
303 IXGBE_AUTOC_ASM_PAUSE);
304 else if (hw->phy.media_type == ixgbe_media_type_copper)
305 reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
306 break;
307 case ixgbe_fc_tx_pause:
308 /*
309 * Tx Flow control is enabled, and Rx Flow control is
310 * disabled by software override.
311 */
312 reg |= IXGBE_PCS1GANA_ASM_PAUSE;
313 reg &= ~IXGBE_PCS1GANA_SYM_PAUSE;
314 if (hw->phy.media_type == ixgbe_media_type_backplane) {
315 reg_bp |= IXGBE_AUTOC_ASM_PAUSE;
316 reg_bp &= ~IXGBE_AUTOC_SYM_PAUSE;
317 } else if (hw->phy.media_type == ixgbe_media_type_copper) {
318 reg_cu |= IXGBE_TAF_ASM_PAUSE;
319 reg_cu &= ~IXGBE_TAF_SYM_PAUSE;
320 }
321 break;
322 case ixgbe_fc_rx_pause:
323 /*
324 * Rx Flow control is enabled and Tx Flow control is
325 * disabled by software override. Since there really
326 * isn't a way to advertise that we are capable of RX
327 * Pause ONLY, we will advertise that we support both
328 * symmetric and asymmetric Rx PAUSE, as such we fall
329 * through to the fc_full statement. Later, we will
330 * disable the adapter's ability to send PAUSE frames.
331 */
332 case ixgbe_fc_full:
333 /* Flow control (both Rx and Tx) is enabled by SW override. */
334 reg |= IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE;
335 if (hw->phy.media_type == ixgbe_media_type_backplane)
336 reg_bp |= IXGBE_AUTOC_SYM_PAUSE |
337 IXGBE_AUTOC_ASM_PAUSE;
338 else if (hw->phy.media_type == ixgbe_media_type_copper)
339 reg_cu |= IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE;
340 break;
341 default:
342 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
343 "Flow control param set incorrectly\n");
344 ret_val = IXGBE_ERR_CONFIG;
345 goto out;
346 break;
347 }
348
349 if (hw->mac.type < ixgbe_mac_X540) {
350 /*
351 * Enable auto-negotiation between the MAC & PHY;
352 * the MAC will advertise clause 37 flow control.
353 */
354 IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
355 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
356
357 /* Disable AN timeout */
358 if (hw->fc.strict_ieee)
359 reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN;
360
361 IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg);
362 DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg);
363 }
364
365 /*
366 * AUTOC restart handles negotiation of 1G and 10G on backplane
367 * and copper. There is no need to set the PCS1GCTL register.
368 *
369 */
370 if (hw->phy.media_type == ixgbe_media_type_backplane) {
371 reg_bp |= IXGBE_AUTOC_AN_RESTART;
372 ret_val = hw->mac.ops.prot_autoc_write(hw, reg_bp, locked);
373 if (ret_val)
374 goto out;
375 } else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
376 (ixgbe_device_supports_autoneg_fc(hw))) {
377 hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
378 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg_cu);
379 }
380
381 DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg);
382 out:
383 return ret_val;
384 }
385
386 /**
387 * ixgbe_start_hw_generic - Prepare hardware for Tx/Rx
388 * @hw: pointer to hardware structure
389 *
390 * Starts the hardware by filling the bus info structure and media type, clears
391 * all on chip counters, initializes receive address registers, multicast
392 * table, VLAN filter table, calls routine to set up link and flow control
393 * settings, and leaves transmit and receive units disabled and uninitialized
394 **/
395 s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
396 {
397 s32 ret_val;
398 u32 ctrl_ext;
399 u16 device_caps;
400
401 DEBUGFUNC("ixgbe_start_hw_generic");
402
403 /* Set the media type */
404 hw->phy.media_type = hw->mac.ops.get_media_type(hw);
405
406 /* PHY ops initialization must be done in reset_hw() */
407
408 /* Clear the VLAN filter table */
409 hw->mac.ops.clear_vfta(hw);
410
411 /* Clear statistics registers */
412 hw->mac.ops.clear_hw_cntrs(hw);
413
414 /* Set No Snoop Disable */
415 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
416 ctrl_ext |= IXGBE_CTRL_EXT_NS_DIS;
417 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
418 IXGBE_WRITE_FLUSH(hw);
419
420 /* Setup flow control */
421 ret_val = ixgbe_setup_fc(hw);
422 if (ret_val != IXGBE_SUCCESS && ret_val != IXGBE_NOT_IMPLEMENTED) {
423 DEBUGOUT1("Flow control setup failed, returning %d\n", ret_val);
424 return ret_val;
425 }
426
427 /* Cache bit indicating need for crosstalk fix */
428 switch (hw->mac.type) {
429 case ixgbe_mac_82599EB:
430 case ixgbe_mac_X550EM_x:
431 case ixgbe_mac_X550EM_a:
432 hw->mac.ops.get_device_caps(hw, &device_caps);
433 if (device_caps & IXGBE_DEVICE_CAPS_NO_CROSSTALK_WR)
434 hw->need_crosstalk_fix = FALSE;
435 else
436 hw->need_crosstalk_fix = TRUE;
437 break;
438 default:
439 hw->need_crosstalk_fix = FALSE;
440 break;
441 }
442
443 /* Clear adapter stopped flag */
444 hw->adapter_stopped = FALSE;
445
446 return IXGBE_SUCCESS;
447 }
448
449 /**
450 * ixgbe_start_hw_gen2 - Init sequence for common device family
451 * @hw: pointer to hw structure
452 *
453 * Performs the init sequence common to the second generation
454 * of 10 GbE devices.
455 * Devices in the second generation:
456 * 82599
457 * X540
458 **/
459 s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw)
460 {
461 u32 i;
462 u32 regval;
463
464 /* Clear the rate limiters */
465 for (i = 0; i < hw->mac.max_tx_queues; i++) {
466 IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i);
467 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, 0);
468 }
469 IXGBE_WRITE_FLUSH(hw);
470
471 /* Disable relaxed ordering */
472 for (i = 0; i < hw->mac.max_tx_queues; i++) {
473 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
474 regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
475 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
476 }
477
478 for (i = 0; i < hw->mac.max_rx_queues; i++) {
479 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
480 regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
481 IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
482 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
483 }
484
485 return IXGBE_SUCCESS;
486 }
487
488 /**
489 * ixgbe_init_hw_generic - Generic hardware initialization
490 * @hw: pointer to hardware structure
491 *
492 * Initialize the hardware by resetting the hardware, filling the bus info
493 * structure and media type, clears all on chip counters, initializes receive
494 * address registers, multicast table, VLAN filter table, calls routine to set
495 * up link and flow control settings, and leaves transmit and receive units
496 * disabled and uninitialized
497 **/
498 s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw)
499 {
500 s32 status;
501
502 DEBUGFUNC("ixgbe_init_hw_generic");
503
504 /* Reset the hardware */
505 status = hw->mac.ops.reset_hw(hw);
506
507 if (status == IXGBE_SUCCESS || status == IXGBE_ERR_SFP_NOT_PRESENT) {
508 /* Start the HW */
509 status = hw->mac.ops.start_hw(hw);
510 }
511
512 /* Initialize the LED link active for LED blink support */
513 if (hw->mac.ops.init_led_link_act)
514 hw->mac.ops.init_led_link_act(hw);
515
516 if (status != IXGBE_SUCCESS)
517 DEBUGOUT1("Failed to initialize HW, STATUS = %d\n", status);
518
519 return status;
520 }
521
522 /**
523 * ixgbe_clear_hw_cntrs_generic - Generic clear hardware counters
524 * @hw: pointer to hardware structure
525 *
526 * Clears all hardware statistics counters by reading them from the hardware
527 * Statistics counters are clear on read.
528 **/
529 s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
530 {
531 u16 i = 0;
532
533 DEBUGFUNC("ixgbe_clear_hw_cntrs_generic");
534
535 IXGBE_READ_REG(hw, IXGBE_CRCERRS);
536 IXGBE_READ_REG(hw, IXGBE_ILLERRC);
537 IXGBE_READ_REG(hw, IXGBE_ERRBC);
538 IXGBE_READ_REG(hw, IXGBE_MSPDC);
539 if (hw->mac.type >= ixgbe_mac_X550)
540 IXGBE_READ_REG(hw, IXGBE_MBSDC);
541 for (i = 0; i < 8; i++)
542 IXGBE_READ_REG(hw, IXGBE_MPC(i));
543
544 IXGBE_READ_REG(hw, IXGBE_MLFC);
545 IXGBE_READ_REG(hw, IXGBE_MRFC);
546 IXGBE_READ_REG(hw, IXGBE_RLEC);
547 IXGBE_READ_REG(hw, IXGBE_LXONTXC);
548 IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
549 if (hw->mac.type >= ixgbe_mac_82599EB) {
550 IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
551 IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
552 } else {
553 IXGBE_READ_REG(hw, IXGBE_LXONRXC);
554 IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
555 }
556
557 for (i = 0; i < 8; i++) {
558 IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
559 IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
560 if (hw->mac.type >= ixgbe_mac_82599EB) {
561 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
562 IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
563 } else {
564 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
565 IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
566 }
567 }
568 if (hw->mac.type >= ixgbe_mac_82599EB)
569 for (i = 0; i < 8; i++)
570 IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
571 IXGBE_READ_REG(hw, IXGBE_PRC64);
572 IXGBE_READ_REG(hw, IXGBE_PRC127);
573 IXGBE_READ_REG(hw, IXGBE_PRC255);
574 IXGBE_READ_REG(hw, IXGBE_PRC511);
575 IXGBE_READ_REG(hw, IXGBE_PRC1023);
576 IXGBE_READ_REG(hw, IXGBE_PRC1522);
577 IXGBE_READ_REG(hw, IXGBE_GPRC);
578 IXGBE_READ_REG(hw, IXGBE_BPRC);
579 IXGBE_READ_REG(hw, IXGBE_MPRC);
580 IXGBE_READ_REG(hw, IXGBE_GPTC);
581 IXGBE_READ_REG(hw, IXGBE_GORCL);
582 IXGBE_READ_REG(hw, IXGBE_GORCH);
583 IXGBE_READ_REG(hw, IXGBE_GOTCL);
584 IXGBE_READ_REG(hw, IXGBE_GOTCH);
585 if (hw->mac.type == ixgbe_mac_82598EB)
586 for (i = 0; i < 8; i++)
587 IXGBE_READ_REG(hw, IXGBE_RNBC(i));
588 IXGBE_READ_REG(hw, IXGBE_RUC);
589 IXGBE_READ_REG(hw, IXGBE_RFC);
590 IXGBE_READ_REG(hw, IXGBE_ROC);
591 IXGBE_READ_REG(hw, IXGBE_RJC);
592 IXGBE_READ_REG(hw, IXGBE_MNGPRC);
593 IXGBE_READ_REG(hw, IXGBE_MNGPDC);
594 IXGBE_READ_REG(hw, IXGBE_MNGPTC);
595 IXGBE_READ_REG(hw, IXGBE_TORL);
596 IXGBE_READ_REG(hw, IXGBE_TORH);
597 IXGBE_READ_REG(hw, IXGBE_TPR);
598 IXGBE_READ_REG(hw, IXGBE_TPT);
599 IXGBE_READ_REG(hw, IXGBE_PTC64);
600 IXGBE_READ_REG(hw, IXGBE_PTC127);
601 IXGBE_READ_REG(hw, IXGBE_PTC255);
602 IXGBE_READ_REG(hw, IXGBE_PTC511);
603 IXGBE_READ_REG(hw, IXGBE_PTC1023);
604 IXGBE_READ_REG(hw, IXGBE_PTC1522);
605 IXGBE_READ_REG(hw, IXGBE_MPTC);
606 IXGBE_READ_REG(hw, IXGBE_BPTC);
607 for (i = 0; i < 16; i++) {
608 IXGBE_READ_REG(hw, IXGBE_QPRC(i));
609 IXGBE_READ_REG(hw, IXGBE_QPTC(i));
610 if (hw->mac.type >= ixgbe_mac_82599EB) {
611 IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
612 IXGBE_READ_REG(hw, IXGBE_QBRC_H(i));
613 IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
614 IXGBE_READ_REG(hw, IXGBE_QBTC_H(i));
615 IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
616 } else {
617 IXGBE_READ_REG(hw, IXGBE_QBRC(i));
618 IXGBE_READ_REG(hw, IXGBE_QBTC(i));
619 }
620 }
621
622 if (hw->mac.type == ixgbe_mac_X550 || hw->mac.type == ixgbe_mac_X540) {
623 if (hw->phy.id == 0)
624 ixgbe_identify_phy(hw);
625 hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL,
626 IXGBE_MDIO_PCS_DEV_TYPE, &i);
627 hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECH,
628 IXGBE_MDIO_PCS_DEV_TYPE, &i);
629 hw->phy.ops.read_reg(hw, IXGBE_LDPCECL,
630 IXGBE_MDIO_PCS_DEV_TYPE, &i);
631 hw->phy.ops.read_reg(hw, IXGBE_LDPCECH,
632 IXGBE_MDIO_PCS_DEV_TYPE, &i);
633 }
634
635 return IXGBE_SUCCESS;
636 }
637
638 /**
639 * ixgbe_read_pba_string_generic - Reads part number string from EEPROM
640 * @hw: pointer to hardware structure
641 * @pba_num: stores the part number string from the EEPROM
642 * @pba_num_size: part number string buffer length
643 *
644 * Reads the part number string from the EEPROM.
645 **/
646 s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
647 u32 pba_num_size)
648 {
649 s32 ret_val;
650 u16 data;
651 u16 pba_ptr;
652 u16 offset;
653 u16 length;
654
655 DEBUGFUNC("ixgbe_read_pba_string_generic");
656
657 if (pba_num == NULL) {
658 DEBUGOUT("PBA string buffer was null\n");
659 return IXGBE_ERR_INVALID_ARGUMENT;
660 }
661
662 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
663 if (ret_val) {
664 DEBUGOUT("NVM Read Error\n");
665 return ret_val;
666 }
667
668 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &pba_ptr);
669 if (ret_val) {
670 DEBUGOUT("NVM Read Error\n");
671 return ret_val;
672 }
673
674 /*
675 * if data is not ptr guard the PBA must be in legacy format which
676 * means pba_ptr is actually our second data word for the PBA number
677 * and we can decode it into an ascii string
678 */
679 if (data != IXGBE_PBANUM_PTR_GUARD) {
680 DEBUGOUT("NVM PBA number is not stored as string\n");
681
682 /* we will need 11 characters to store the PBA */
683 if (pba_num_size < 11) {
684 DEBUGOUT("PBA string buffer too small\n");
685 return IXGBE_ERR_NO_SPACE;
686 }
687
688 /* extract hex string from data and pba_ptr */
689 pba_num[0] = (data >> 12) & 0xF;
690 pba_num[1] = (data >> 8) & 0xF;
691 pba_num[2] = (data >> 4) & 0xF;
692 pba_num[3] = data & 0xF;
693 pba_num[4] = (pba_ptr >> 12) & 0xF;
694 pba_num[5] = (pba_ptr >> 8) & 0xF;
695 pba_num[6] = '-';
696 pba_num[7] = 0;
697 pba_num[8] = (pba_ptr >> 4) & 0xF;
698 pba_num[9] = pba_ptr & 0xF;
699
700 /* put a null character on the end of our string */
701 pba_num[10] = '\0';
702
703 /* switch all the data but the '-' to hex char */
704 for (offset = 0; offset < 10; offset++) {
705 if (pba_num[offset] < 0xA)
706 pba_num[offset] += '0';
707 else if (pba_num[offset] < 0x10)
708 pba_num[offset] += 'A' - 0xA;
709 }
710
711 return IXGBE_SUCCESS;
712 }
713
714 ret_val = hw->eeprom.ops.read(hw, pba_ptr, &length);
715 if (ret_val) {
716 DEBUGOUT("NVM Read Error\n");
717 return ret_val;
718 }
719
720 if (length == 0xFFFF || length == 0) {
721 DEBUGOUT("NVM PBA number section invalid length\n");
722 return IXGBE_ERR_PBA_SECTION;
723 }
724
725 /* check if pba_num buffer is big enough */
726 if (pba_num_size < (((u32)length * 2) - 1)) {
727 DEBUGOUT("PBA string buffer too small\n");
728 return IXGBE_ERR_NO_SPACE;
729 }
730
731 /* trim pba length from start of string */
732 pba_ptr++;
733 length--;
734
735 for (offset = 0; offset < length; offset++) {
736 ret_val = hw->eeprom.ops.read(hw, pba_ptr + offset, &data);
737 if (ret_val) {
738 DEBUGOUT("NVM Read Error\n");
739 return ret_val;
740 }
741 pba_num[offset * 2] = (u8)(data >> 8);
742 pba_num[(offset * 2) + 1] = (u8)(data & 0xFF);
743 }
744 pba_num[offset * 2] = '\0';
745
746 return IXGBE_SUCCESS;
747 }
748
749 /**
750 * ixgbe_read_pba_num_generic - Reads part number from EEPROM
751 * @hw: pointer to hardware structure
752 * @pba_num: stores the part number from the EEPROM
753 *
754 * Reads the part number from the EEPROM.
755 **/
756 s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num)
757 {
758 s32 ret_val;
759 u16 data;
760
761 DEBUGFUNC("ixgbe_read_pba_num_generic");
762
763 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
764 if (ret_val) {
765 DEBUGOUT("NVM Read Error\n");
766 return ret_val;
767 } else if (data == IXGBE_PBANUM_PTR_GUARD) {
768 DEBUGOUT("NVM Not supported\n");
769 return IXGBE_NOT_IMPLEMENTED;
770 }
771 *pba_num = (u32)(data << 16);
772
773 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &data);
774 if (ret_val) {
775 DEBUGOUT("NVM Read Error\n");
776 return ret_val;
777 }
778 *pba_num |= data;
779
780 return IXGBE_SUCCESS;
781 }
782
783 /**
784 * ixgbe_read_pba_raw
785 * @hw: pointer to the HW structure
786 * @eeprom_buf: optional pointer to EEPROM image
787 * @eeprom_buf_size: size of EEPROM image in words
788 * @max_pba_block_size: PBA block size limit
789 * @pba: pointer to output PBA structure
790 *
791 * Reads PBA from EEPROM image when eeprom_buf is not NULL.
792 * Reads PBA from physical EEPROM device when eeprom_buf is NULL.
793 *
794 **/
795 s32 ixgbe_read_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf,
796 u32 eeprom_buf_size, u16 max_pba_block_size,
797 struct ixgbe_pba *pba)
798 {
799 s32 ret_val;
800 u16 pba_block_size;
801
802 if (pba == NULL)
803 return IXGBE_ERR_PARAM;
804
805 if (eeprom_buf == NULL) {
806 ret_val = hw->eeprom.ops.read_buffer(hw, IXGBE_PBANUM0_PTR, 2,
807 &pba->word[0]);
808 if (ret_val)
809 return ret_val;
810 } else {
811 if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
812 pba->word[0] = eeprom_buf[IXGBE_PBANUM0_PTR];
813 pba->word[1] = eeprom_buf[IXGBE_PBANUM1_PTR];
814 } else {
815 return IXGBE_ERR_PARAM;
816 }
817 }
818
819 if (pba->word[0] == IXGBE_PBANUM_PTR_GUARD) {
820 if (pba->pba_block == NULL)
821 return IXGBE_ERR_PARAM;
822
823 ret_val = ixgbe_get_pba_block_size(hw, eeprom_buf,
824 eeprom_buf_size,
825 &pba_block_size);
826 if (ret_val)
827 return ret_val;
828
829 if (pba_block_size > max_pba_block_size)
830 return IXGBE_ERR_PARAM;
831
832 if (eeprom_buf == NULL) {
833 ret_val = hw->eeprom.ops.read_buffer(hw, pba->word[1],
834 pba_block_size,
835 pba->pba_block);
836 if (ret_val)
837 return ret_val;
838 } else {
839 if (eeprom_buf_size > (u32)(pba->word[1] +
840 pba_block_size)) {
841 memcpy(pba->pba_block,
842 &eeprom_buf[pba->word[1]],
843 pba_block_size * sizeof(u16));
844 } else {
845 return IXGBE_ERR_PARAM;
846 }
847 }
848 }
849
850 return IXGBE_SUCCESS;
851 }
852
853 /**
854 * ixgbe_write_pba_raw
855 * @hw: pointer to the HW structure
856 * @eeprom_buf: optional pointer to EEPROM image
857 * @eeprom_buf_size: size of EEPROM image in words
858 * @pba: pointer to PBA structure
859 *
860 * Writes PBA to EEPROM image when eeprom_buf is not NULL.
861 * Writes PBA to physical EEPROM device when eeprom_buf is NULL.
862 *
863 **/
864 s32 ixgbe_write_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf,
865 u32 eeprom_buf_size, struct ixgbe_pba *pba)
866 {
867 s32 ret_val;
868
869 if (pba == NULL)
870 return IXGBE_ERR_PARAM;
871
872 if (eeprom_buf == NULL) {
873 ret_val = hw->eeprom.ops.write_buffer(hw, IXGBE_PBANUM0_PTR, 2,
874 &pba->word[0]);
875 if (ret_val)
876 return ret_val;
877 } else {
878 if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
879 eeprom_buf[IXGBE_PBANUM0_PTR] = pba->word[0];
880 eeprom_buf[IXGBE_PBANUM1_PTR] = pba->word[1];
881 } else {
882 return IXGBE_ERR_PARAM;
883 }
884 }
885
886 if (pba->word[0] == IXGBE_PBANUM_PTR_GUARD) {
887 if (pba->pba_block == NULL)
888 return IXGBE_ERR_PARAM;
889
890 if (eeprom_buf == NULL) {
891 ret_val = hw->eeprom.ops.write_buffer(hw, pba->word[1],
892 pba->pba_block[0],
893 pba->pba_block);
894 if (ret_val)
895 return ret_val;
896 } else {
897 if (eeprom_buf_size > (u32)(pba->word[1] +
898 pba->pba_block[0])) {
899 memcpy(&eeprom_buf[pba->word[1]],
900 pba->pba_block,
901 pba->pba_block[0] * sizeof(u16));
902 } else {
903 return IXGBE_ERR_PARAM;
904 }
905 }
906 }
907
908 return IXGBE_SUCCESS;
909 }
910
911 /**
912 * ixgbe_get_pba_block_size
913 * @hw: pointer to the HW structure
914 * @eeprom_buf: optional pointer to EEPROM image
915 * @eeprom_buf_size: size of EEPROM image in words
916 * @pba_data_size: pointer to output variable
917 *
918 * Returns the size of the PBA block in words. Function operates on EEPROM
919 * image if the eeprom_buf pointer is not NULL otherwise it accesses physical
920 * EEPROM device.
921 *
922 **/
923 s32 ixgbe_get_pba_block_size(struct ixgbe_hw *hw, u16 *eeprom_buf,
924 u32 eeprom_buf_size, u16 *pba_block_size)
925 {
926 s32 ret_val;
927 u16 pba_word[2];
928 u16 length;
929
930 DEBUGFUNC("ixgbe_get_pba_block_size");
931
932 if (eeprom_buf == NULL) {
933 ret_val = hw->eeprom.ops.read_buffer(hw, IXGBE_PBANUM0_PTR, 2,
934 &pba_word[0]);
935 if (ret_val)
936 return ret_val;
937 } else {
938 if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
939 pba_word[0] = eeprom_buf[IXGBE_PBANUM0_PTR];
940 pba_word[1] = eeprom_buf[IXGBE_PBANUM1_PTR];
941 } else {
942 return IXGBE_ERR_PARAM;
943 }
944 }
945
946 if (pba_word[0] == IXGBE_PBANUM_PTR_GUARD) {
947 if (eeprom_buf == NULL) {
948 ret_val = hw->eeprom.ops.read(hw, pba_word[1] + 0,
949 &length);
950 if (ret_val)
951 return ret_val;
952 } else {
953 if (eeprom_buf_size > pba_word[1])
954 length = eeprom_buf[pba_word[1] + 0];
955 else
956 return IXGBE_ERR_PARAM;
957 }
958
959 if (length == 0xFFFF || length == 0)
960 return IXGBE_ERR_PBA_SECTION;
961 } else {
962 /* PBA number in legacy format, there is no PBA Block. */
963 length = 0;
964 }
965
966 if (pba_block_size != NULL)
967 *pba_block_size = length;
968
969 return IXGBE_SUCCESS;
970 }
971
972 /**
973 * ixgbe_get_mac_addr_generic - Generic get MAC address
974 * @hw: pointer to hardware structure
975 * @mac_addr: Adapter MAC address
976 *
977 * Reads the adapter's MAC address from first Receive Address Register (RAR0)
978 * A reset of the adapter must be performed prior to calling this function
979 * in order for the MAC address to have been loaded from the EEPROM into RAR0
980 **/
981 s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr)
982 {
983 u32 rar_high;
984 u32 rar_low;
985 u16 i;
986
987 DEBUGFUNC("ixgbe_get_mac_addr_generic");
988
989 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(0));
990 rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(0));
991
992 for (i = 0; i < 4; i++)
993 mac_addr[i] = (u8)(rar_low >> (i*8));
994
995 for (i = 0; i < 2; i++)
996 mac_addr[i+4] = (u8)(rar_high >> (i*8));
997
998 return IXGBE_SUCCESS;
999 }
1000
1001 /**
1002 * ixgbe_set_pci_config_data_generic - Generic store PCI bus info
1003 * @hw: pointer to hardware structure
1004 * @link_status: the link status returned by the PCI config space
1005 *
1006 * Stores the PCI bus info (speed, width, type) within the ixgbe_hw structure
1007 **/
1008 void ixgbe_set_pci_config_data_generic(struct ixgbe_hw *hw, u16 link_status)
1009 {
1010 struct ixgbe_mac_info *mac = &hw->mac;
1011
1012 if (hw->bus.type == ixgbe_bus_type_unknown)
1013 hw->bus.type = ixgbe_bus_type_pci_express;
1014
1015 switch (link_status & IXGBE_PCI_LINK_WIDTH) {
1016 case IXGBE_PCI_LINK_WIDTH_1:
1017 hw->bus.width = ixgbe_bus_width_pcie_x1;
1018 break;
1019 case IXGBE_PCI_LINK_WIDTH_2:
1020 hw->bus.width = ixgbe_bus_width_pcie_x2;
1021 break;
1022 case IXGBE_PCI_LINK_WIDTH_4:
1023 hw->bus.width = ixgbe_bus_width_pcie_x4;
1024 break;
1025 case IXGBE_PCI_LINK_WIDTH_8:
1026 hw->bus.width = ixgbe_bus_width_pcie_x8;
1027 break;
1028 default:
1029 hw->bus.width = ixgbe_bus_width_unknown;
1030 break;
1031 }
1032
1033 switch (link_status & IXGBE_PCI_LINK_SPEED) {
1034 case IXGBE_PCI_LINK_SPEED_2500:
1035 hw->bus.speed = ixgbe_bus_speed_2500;
1036 break;
1037 case IXGBE_PCI_LINK_SPEED_5000:
1038 hw->bus.speed = ixgbe_bus_speed_5000;
1039 break;
1040 case IXGBE_PCI_LINK_SPEED_8000:
1041 hw->bus.speed = ixgbe_bus_speed_8000;
1042 break;
1043 default:
1044 hw->bus.speed = ixgbe_bus_speed_unknown;
1045 break;
1046 }
1047
1048 mac->ops.set_lan_id(hw);
1049 }
1050
1051 /**
1052 * ixgbe_get_bus_info_generic - Generic set PCI bus info
1053 * @hw: pointer to hardware structure
1054 *
1055 * Gets the PCI bus info (speed, width, type) then calls helper function to
1056 * store this data within the ixgbe_hw structure.
1057 **/
1058 s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
1059 {
1060 u16 link_status;
1061
1062 DEBUGFUNC("ixgbe_get_bus_info_generic");
1063
1064 /* Get the negotiated link width and speed from PCI config space */
1065 link_status = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_LINK_STATUS);
1066
1067 ixgbe_set_pci_config_data_generic(hw, link_status);
1068
1069 return IXGBE_SUCCESS;
1070 }
1071
1072 /**
1073 * ixgbe_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices
1074 * @hw: pointer to the HW structure
1075 *
1076 * Determines the LAN function id by reading memory-mapped registers and swaps
1077 * the port value if requested, and set MAC instance for devices that share
1078 * CS4227.
1079 **/
1080 void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw)
1081 {
1082 struct ixgbe_bus_info *bus = &hw->bus;
1083 u32 reg;
1084 u16 ee_ctrl_4;
1085
1086 DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie");
1087
1088 reg = IXGBE_READ_REG(hw, IXGBE_STATUS);
1089 bus->func = (reg & IXGBE_STATUS_LAN_ID) >> IXGBE_STATUS_LAN_ID_SHIFT;
1090 bus->lan_id = (u8)bus->func;
1091
1092 /* check for a port swap */
1093 reg = IXGBE_READ_REG(hw, IXGBE_FACTPS_BY_MAC(hw));
1094 if (reg & IXGBE_FACTPS_LFS)
1095 bus->func ^= 0x1;
1096
1097 /* Get MAC instance from EEPROM for configuring CS4227 */
1098 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP) {
1099 hw->eeprom.ops.read(hw, IXGBE_EEPROM_CTRL_4, &ee_ctrl_4);
1100 bus->instance_id = (ee_ctrl_4 & IXGBE_EE_CTRL_4_INST_ID) >>
1101 IXGBE_EE_CTRL_4_INST_ID_SHIFT;
1102 }
1103 }
1104
1105 /**
1106 * ixgbe_stop_adapter_generic - Generic stop Tx/Rx units
1107 * @hw: pointer to hardware structure
1108 *
1109 * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
1110 * disables transmit and receive units. The adapter_stopped flag is used by
1111 * the shared code and drivers to determine if the adapter is in a stopped
1112 * state and should not touch the hardware.
1113 **/
1114 s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
1115 {
1116 u32 reg_val;
1117 u16 i;
1118
1119 DEBUGFUNC("ixgbe_stop_adapter_generic");
1120
1121 /*
1122 * Set the adapter_stopped flag so other driver functions stop touching
1123 * the hardware
1124 */
1125 hw->adapter_stopped = TRUE;
1126
1127 /* Disable the receive unit */
1128 ixgbe_disable_rx(hw);
1129
1130 /* Clear interrupt mask to stop interrupts from being generated */
1131 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
1132
1133 /* Clear any pending interrupts, flush previous writes */
1134 IXGBE_READ_REG(hw, IXGBE_EICR);
1135
1136 /* Disable the transmit unit. Each queue must be disabled. */
1137 for (i = 0; i < hw->mac.max_tx_queues; i++)
1138 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), IXGBE_TXDCTL_SWFLSH);
1139
1140 /* Disable the receive unit by stopping each queue */
1141 for (i = 0; i < hw->mac.max_rx_queues; i++) {
1142 reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
1143 reg_val &= ~IXGBE_RXDCTL_ENABLE;
1144 reg_val |= IXGBE_RXDCTL_SWFLSH;
1145 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val);
1146 }
1147
1148 /* flush all queues disables */
1149 IXGBE_WRITE_FLUSH(hw);
1150 msec_delay(2);
1151
1152 /*
1153 * Prevent the PCI-E bus from hanging by disabling PCI-E master
1154 * access and verify no pending requests
1155 */
1156 return ixgbe_disable_pcie_master(hw);
1157 }
1158
1159 /**
1160 * ixgbe_init_led_link_act_generic - Store the LED index link/activity.
1161 * @hw: pointer to hardware structure
1162 *
1163 * Store the index for the link active LED. This will be used to support
1164 * blinking the LED.
1165 **/
1166 s32 ixgbe_init_led_link_act_generic(struct ixgbe_hw *hw)
1167 {
1168 struct ixgbe_mac_info *mac = &hw->mac;
1169 u32 led_reg, led_mode;
1170 u8 i;
1171
1172 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
1173
1174 /* Get LED link active from the LEDCTL register */
1175 for (i = 0; i < 4; i++) {
1176 led_mode = led_reg >> IXGBE_LED_MODE_SHIFT(i);
1177
1178 if ((led_mode & IXGBE_LED_MODE_MASK_BASE) ==
1179 IXGBE_LED_LINK_ACTIVE) {
1180 mac->led_link_act = i;
1181 return IXGBE_SUCCESS;
1182 }
1183 }
1184
1185 /*
1186 * If LEDCTL register does not have the LED link active set, then use
1187 * known MAC defaults.
1188 */
1189 switch (hw->mac.type) {
1190 case ixgbe_mac_X550EM_a:
1191 case ixgbe_mac_X550EM_x:
1192 mac->led_link_act = 1;
1193 break;
1194 default:
1195 mac->led_link_act = 2;
1196 }
1197 return IXGBE_SUCCESS;
1198 }
1199
1200 /**
1201 * ixgbe_led_on_generic - Turns on the software controllable LEDs.
1202 * @hw: pointer to hardware structure
1203 * @index: led number to turn on
1204 **/
1205 s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index)
1206 {
1207 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
1208
1209 DEBUGFUNC("ixgbe_led_on_generic");
1210
1211 if (index > 3)
1212 return IXGBE_ERR_PARAM;
1213
1214 /* To turn on the LED, set mode to ON. */
1215 led_reg &= ~IXGBE_LED_MODE_MASK(index);
1216 led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index);
1217 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
1218 IXGBE_WRITE_FLUSH(hw);
1219
1220 return IXGBE_SUCCESS;
1221 }
1222
1223 /**
1224 * ixgbe_led_off_generic - Turns off the software controllable LEDs.
1225 * @hw: pointer to hardware structure
1226 * @index: led number to turn off
1227 **/
1228 s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index)
1229 {
1230 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
1231
1232 DEBUGFUNC("ixgbe_led_off_generic");
1233
1234 if (index > 3)
1235 return IXGBE_ERR_PARAM;
1236
1237 /* To turn off the LED, set mode to OFF. */
1238 led_reg &= ~IXGBE_LED_MODE_MASK(index);
1239 led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index);
1240 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
1241 IXGBE_WRITE_FLUSH(hw);
1242
1243 return IXGBE_SUCCESS;
1244 }
1245
1246 /**
1247 * ixgbe_init_eeprom_params_generic - Initialize EEPROM params
1248 * @hw: pointer to hardware structure
1249 *
1250 * Initializes the EEPROM parameters ixgbe_eeprom_info within the
1251 * ixgbe_hw struct in order to set up EEPROM access.
1252 **/
1253 s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
1254 {
1255 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
1256 u32 eec;
1257 u16 eeprom_size;
1258
1259 DEBUGFUNC("ixgbe_init_eeprom_params_generic");
1260
1261 if (eeprom->type == ixgbe_eeprom_uninitialized) {
1262 eeprom->type = ixgbe_eeprom_none;
1263 /* Set default semaphore delay to 10ms which is a well
1264 * tested value */
1265 eeprom->semaphore_delay = 10;
1266 /* Clear EEPROM page size, it will be initialized as needed */
1267 eeprom->word_page_size = 0;
1268
1269 /*
1270 * Check for EEPROM present first.
1271 * If not present leave as none
1272 */
1273 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
1274 if (eec & IXGBE_EEC_PRES) {
1275 eeprom->type = ixgbe_eeprom_spi;
1276
1277 /*
1278 * SPI EEPROM is assumed here. This code would need to
1279 * change if a future EEPROM is not SPI.
1280 */
1281 eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
1282 IXGBE_EEC_SIZE_SHIFT);
1283 eeprom->word_size = 1 << (eeprom_size +
1284 IXGBE_EEPROM_WORD_SIZE_SHIFT);
1285 }
1286
1287 if (eec & IXGBE_EEC_ADDR_SIZE)
1288 eeprom->address_bits = 16;
1289 else
1290 eeprom->address_bits = 8;
1291 DEBUGOUT3("Eeprom params: type = %d, size = %d, address bits: "
1292 "%d\n", eeprom->type, eeprom->word_size,
1293 eeprom->address_bits);
1294 }
1295
1296 return IXGBE_SUCCESS;
1297 }
1298
1299 /**
1300 * ixgbe_write_eeprom_buffer_bit_bang_generic - Write EEPROM using bit-bang
1301 * @hw: pointer to hardware structure
1302 * @offset: offset within the EEPROM to write
1303 * @words: number of word(s)
1304 * @data: 16 bit word(s) to write to EEPROM
1305 *
1306 * Reads 16 bit word(s) from EEPROM through bit-bang method
1307 **/
1308 s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
1309 u16 words, u16 *data)
1310 {
1311 s32 status = IXGBE_SUCCESS;
1312 u16 i, count;
1313
1314 DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang_generic");
1315
1316 hw->eeprom.ops.init_params(hw);
1317
1318 if (words == 0) {
1319 status = IXGBE_ERR_INVALID_ARGUMENT;
1320 goto out;
1321 }
1322
1323 if (offset + words > hw->eeprom.word_size) {
1324 status = IXGBE_ERR_EEPROM;
1325 goto out;
1326 }
1327
1328 /*
1329 * The EEPROM page size cannot be queried from the chip. We do lazy
1330 * initialization. It is worth to do that when we write large buffer.
1331 */
1332 if ((hw->eeprom.word_page_size == 0) &&
1333 (words > IXGBE_EEPROM_PAGE_SIZE_MAX))
1334 ixgbe_detect_eeprom_page_size_generic(hw, offset);
1335
1336 /*
1337 * We cannot hold synchronization semaphores for too long
1338 * to avoid other entity starvation. However it is more efficient
1339 * to read in bursts than synchronizing access for each word.
1340 */
1341 for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
1342 count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
1343 IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
1344 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset + i,
1345 count, &data[i]);
1346
1347 if (status != IXGBE_SUCCESS)
1348 break;
1349 }
1350
1351 out:
1352 return status;
1353 }
1354
1355 /**
1356 * ixgbe_write_eeprom_buffer_bit_bang - Writes 16 bit word(s) to EEPROM
1357 * @hw: pointer to hardware structure
1358 * @offset: offset within the EEPROM to be written to
1359 * @words: number of word(s)
1360 * @data: 16 bit word(s) to be written to the EEPROM
1361 *
1362 * If ixgbe_eeprom_update_checksum is not called after this function, the
1363 * EEPROM will most likely contain an invalid checksum.
1364 **/
1365 static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
1366 u16 words, u16 *data)
1367 {
1368 s32 status;
1369 u16 word;
1370 u16 page_size;
1371 u16 i;
1372 u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI;
1373
1374 DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang");
1375
1376 /* Prepare the EEPROM for writing */
1377 status = ixgbe_acquire_eeprom(hw);
1378
1379 if (status == IXGBE_SUCCESS) {
1380 if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) {
1381 ixgbe_release_eeprom(hw);
1382 status = IXGBE_ERR_EEPROM;
1383 }
1384 }
1385
1386 if (status == IXGBE_SUCCESS) {
1387 for (i = 0; i < words; i++) {
1388 ixgbe_standby_eeprom(hw);
1389
1390 /* Send the WRITE ENABLE command (8 bit opcode ) */
1391 ixgbe_shift_out_eeprom_bits(hw,
1392 IXGBE_EEPROM_WREN_OPCODE_SPI,
1393 IXGBE_EEPROM_OPCODE_BITS);
1394
1395 ixgbe_standby_eeprom(hw);
1396
1397 /*
1398 * Some SPI eeproms use the 8th address bit embedded
1399 * in the opcode
1400 */
1401 if ((hw->eeprom.address_bits == 8) &&
1402 ((offset + i) >= 128))
1403 write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
1404
1405 /* Send the Write command (8-bit opcode + addr) */
1406 ixgbe_shift_out_eeprom_bits(hw, write_opcode,
1407 IXGBE_EEPROM_OPCODE_BITS);
1408 ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
1409 hw->eeprom.address_bits);
1410
1411 page_size = hw->eeprom.word_page_size;
1412
1413 /* Send the data in burst via SPI*/
1414 do {
1415 word = data[i];
1416 word = (word >> 8) | (word << 8);
1417 ixgbe_shift_out_eeprom_bits(hw, word, 16);
1418
1419 if (page_size == 0)
1420 break;
1421
1422 /* do not wrap around page */
1423 if (((offset + i) & (page_size - 1)) ==
1424 (page_size - 1))
1425 break;
1426 } while (++i < words);
1427
1428 ixgbe_standby_eeprom(hw);
1429 msec_delay(10);
1430 }
1431 /* Done with writing - release the EEPROM */
1432 ixgbe_release_eeprom(hw);
1433 }
1434
1435 return status;
1436 }
1437
1438 /**
1439 * ixgbe_write_eeprom_generic - Writes 16 bit value to EEPROM
1440 * @hw: pointer to hardware structure
1441 * @offset: offset within the EEPROM to be written to
1442 * @data: 16 bit word to be written to the EEPROM
1443 *
1444 * If ixgbe_eeprom_update_checksum is not called after this function, the
1445 * EEPROM will most likely contain an invalid checksum.
1446 **/
1447 s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
1448 {
1449 s32 status;
1450
1451 DEBUGFUNC("ixgbe_write_eeprom_generic");
1452
1453 hw->eeprom.ops.init_params(hw);
1454
1455 if (offset >= hw->eeprom.word_size) {
1456 status = IXGBE_ERR_EEPROM;
1457 goto out;
1458 }
1459
1460 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1, &data);
1461
1462 out:
1463 return status;
1464 }
1465
1466 /**
1467 * ixgbe_read_eeprom_buffer_bit_bang_generic - Read EEPROM using bit-bang
1468 * @hw: pointer to hardware structure
1469 * @offset: offset within the EEPROM to be read
1470 * @data: read 16 bit words(s) from EEPROM
1471 * @words: number of word(s)
1472 *
1473 * Reads 16 bit word(s) from EEPROM through bit-bang method
1474 **/
1475 s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
1476 u16 words, u16 *data)
1477 {
1478 s32 status = IXGBE_SUCCESS;
1479 u16 i, count;
1480
1481 DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang_generic");
1482
1483 hw->eeprom.ops.init_params(hw);
1484
1485 if (words == 0) {
1486 status = IXGBE_ERR_INVALID_ARGUMENT;
1487 goto out;
1488 }
1489
1490 if (offset + words > hw->eeprom.word_size) {
1491 status = IXGBE_ERR_EEPROM;
1492 goto out;
1493 }
1494
1495 /*
1496 * We cannot hold synchronization semaphores for too long
1497 * to avoid other entity starvation. However it is more efficient
1498 * to read in bursts than synchronizing access for each word.
1499 */
1500 for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
1501 count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
1502 IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
1503
1504 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset + i,
1505 count, &data[i]);
1506
1507 if (status != IXGBE_SUCCESS)
1508 break;
1509 }
1510
1511 out:
1512 return status;
1513 }
1514
1515 /**
1516 * ixgbe_read_eeprom_buffer_bit_bang - Read EEPROM using bit-bang
1517 * @hw: pointer to hardware structure
1518 * @offset: offset within the EEPROM to be read
1519 * @words: number of word(s)
1520 * @data: read 16 bit word(s) from EEPROM
1521 *
1522 * Reads 16 bit word(s) from EEPROM through bit-bang method
1523 **/
1524 static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
1525 u16 words, u16 *data)
1526 {
1527 s32 status;
1528 u16 word_in;
1529 u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI;
1530 u16 i;
1531
1532 DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang");
1533
1534 /* Prepare the EEPROM for reading */
1535 status = ixgbe_acquire_eeprom(hw);
1536
1537 if (status == IXGBE_SUCCESS) {
1538 if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) {
1539 ixgbe_release_eeprom(hw);
1540 status = IXGBE_ERR_EEPROM;
1541 }
1542 }
1543
1544 if (status == IXGBE_SUCCESS) {
1545 for (i = 0; i < words; i++) {
1546 ixgbe_standby_eeprom(hw);
1547 /*
1548 * Some SPI eeproms use the 8th address bit embedded
1549 * in the opcode
1550 */
1551 if ((hw->eeprom.address_bits == 8) &&
1552 ((offset + i) >= 128))
1553 read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
1554
1555 /* Send the READ command (opcode + addr) */
1556 ixgbe_shift_out_eeprom_bits(hw, read_opcode,
1557 IXGBE_EEPROM_OPCODE_BITS);
1558 ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
1559 hw->eeprom.address_bits);
1560
1561 /* Read the data. */
1562 word_in = ixgbe_shift_in_eeprom_bits(hw, 16);
1563 data[i] = (word_in >> 8) | (word_in << 8);
1564 }
1565
1566 /* End this read operation */
1567 ixgbe_release_eeprom(hw);
1568 }
1569
1570 return status;
1571 }
1572
1573 /**
1574 * ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang
1575 * @hw: pointer to hardware structure
1576 * @offset: offset within the EEPROM to be read
1577 * @data: read 16 bit value from EEPROM
1578 *
1579 * Reads 16 bit value from EEPROM through bit-bang method
1580 **/
1581 s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
1582 u16 *data)
1583 {
1584 s32 status;
1585
1586 DEBUGFUNC("ixgbe_read_eeprom_bit_bang_generic");
1587
1588 hw->eeprom.ops.init_params(hw);
1589
1590 if (offset >= hw->eeprom.word_size) {
1591 status = IXGBE_ERR_EEPROM;
1592 goto out;
1593 }
1594
1595 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
1596
1597 out:
1598 return status;
1599 }
1600
1601 /**
1602 * ixgbe_read_eerd_buffer_generic - Read EEPROM word(s) using EERD
1603 * @hw: pointer to hardware structure
1604 * @offset: offset of word in the EEPROM to read
1605 * @words: number of word(s)
1606 * @data: 16 bit word(s) from the EEPROM
1607 *
1608 * Reads a 16 bit word(s) from the EEPROM using the EERD register.
1609 **/
1610 s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset,
1611 u16 words, u16 *data)
1612 {
1613 u32 eerd;
1614 s32 status = IXGBE_SUCCESS;
1615 u32 i;
1616
1617 DEBUGFUNC("ixgbe_read_eerd_buffer_generic");
1618
1619 hw->eeprom.ops.init_params(hw);
1620
1621 if (words == 0) {
1622 status = IXGBE_ERR_INVALID_ARGUMENT;
1623 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM words");
1624 goto out;
1625 }
1626
1627 if (offset >= hw->eeprom.word_size) {
1628 status = IXGBE_ERR_EEPROM;
1629 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM offset");
1630 goto out;
1631 }
1632
1633 for (i = 0; i < words; i++) {
1634 eerd = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
1635 IXGBE_EEPROM_RW_REG_START;
1636
1637 IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd);
1638 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_READ);
1639
1640 if (status == IXGBE_SUCCESS) {
1641 data[i] = (IXGBE_READ_REG(hw, IXGBE_EERD) >>
1642 IXGBE_EEPROM_RW_REG_DATA);
1643 } else {
1644 DEBUGOUT("Eeprom read timed out\n");
1645 goto out;
1646 }
1647 }
1648 out:
1649 return status;
1650 }
1651
1652 /**
1653 * ixgbe_detect_eeprom_page_size_generic - Detect EEPROM page size
1654 * @hw: pointer to hardware structure
1655 * @offset: offset within the EEPROM to be used as a scratch pad
1656 *
1657 * Discover EEPROM page size by writing marching data at given offset.
1658 * This function is called only when we are writing a new large buffer
1659 * at given offset so the data would be overwritten anyway.
1660 **/
1661 static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
1662 u16 offset)
1663 {
1664 u16 data[IXGBE_EEPROM_PAGE_SIZE_MAX];
1665 s32 status = IXGBE_SUCCESS;
1666 u16 i;
1667
1668 DEBUGFUNC("ixgbe_detect_eeprom_page_size_generic");
1669
1670 for (i = 0; i < IXGBE_EEPROM_PAGE_SIZE_MAX; i++)
1671 data[i] = i;
1672
1673 hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX;
1674 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset,
1675 IXGBE_EEPROM_PAGE_SIZE_MAX, data);
1676 hw->eeprom.word_page_size = 0;
1677 if (status != IXGBE_SUCCESS)
1678 goto out;
1679
1680 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
1681 if (status != IXGBE_SUCCESS)
1682 goto out;
1683
1684 /*
1685 * When writing in burst more than the actual page size
1686 * EEPROM address wraps around current page.
1687 */
1688 hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX - data[0];
1689
1690 DEBUGOUT1("Detected EEPROM page size = %d words.",
1691 hw->eeprom.word_page_size);
1692 out:
1693 return status;
1694 }
1695
1696 /**
1697 * ixgbe_read_eerd_generic - Read EEPROM word using EERD
1698 * @hw: pointer to hardware structure
1699 * @offset: offset of word in the EEPROM to read
1700 * @data: word read from the EEPROM
1701 *
1702 * Reads a 16 bit word from the EEPROM using the EERD register.
1703 **/
1704 s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data)
1705 {
1706 return ixgbe_read_eerd_buffer_generic(hw, offset, 1, data);
1707 }
1708
1709 /**
1710 * ixgbe_write_eewr_buffer_generic - Write EEPROM word(s) using EEWR
1711 * @hw: pointer to hardware structure
1712 * @offset: offset of word in the EEPROM to write
1713 * @words: number of word(s)
1714 * @data: word(s) write to the EEPROM
1715 *
1716 * Write a 16 bit word(s) to the EEPROM using the EEWR register.
1717 **/
1718 s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
1719 u16 words, u16 *data)
1720 {
1721 u32 eewr;
1722 s32 status = IXGBE_SUCCESS;
1723 u16 i;
1724
1725 DEBUGFUNC("ixgbe_write_eewr_generic");
1726
1727 hw->eeprom.ops.init_params(hw);
1728
1729 if (words == 0) {
1730 status = IXGBE_ERR_INVALID_ARGUMENT;
1731 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM words");
1732 goto out;
1733 }
1734
1735 if (offset >= hw->eeprom.word_size) {
1736 status = IXGBE_ERR_EEPROM;
1737 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM offset");
1738 goto out;
1739 }
1740
1741 for (i = 0; i < words; i++) {
1742 eewr = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
1743 (data[i] << IXGBE_EEPROM_RW_REG_DATA) |
1744 IXGBE_EEPROM_RW_REG_START;
1745
1746 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
1747 if (status != IXGBE_SUCCESS) {
1748 DEBUGOUT("Eeprom write EEWR timed out\n");
1749 goto out;
1750 }
1751
1752 IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr);
1753
1754 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
1755 if (status != IXGBE_SUCCESS) {
1756 DEBUGOUT("Eeprom write EEWR timed out\n");
1757 goto out;
1758 }
1759 }
1760
1761 out:
1762 return status;
1763 }
1764
1765 /**
1766 * ixgbe_write_eewr_generic - Write EEPROM word using EEWR
1767 * @hw: pointer to hardware structure
1768 * @offset: offset of word in the EEPROM to write
1769 * @data: word write to the EEPROM
1770 *
1771 * Write a 16 bit word to the EEPROM using the EEWR register.
1772 **/
1773 s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
1774 {
1775 return ixgbe_write_eewr_buffer_generic(hw, offset, 1, &data);
1776 }
1777
1778 /**
1779 * ixgbe_poll_eerd_eewr_done - Poll EERD read or EEWR write status
1780 * @hw: pointer to hardware structure
1781 * @ee_reg: EEPROM flag for polling
1782 *
1783 * Polls the status bit (bit 1) of the EERD or EEWR to determine when the
1784 * read or write is done respectively.
1785 **/
1786 s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
1787 {
1788 u32 i;
1789 u32 reg;
1790 s32 status = IXGBE_ERR_EEPROM;
1791
1792 DEBUGFUNC("ixgbe_poll_eerd_eewr_done");
1793
1794 for (i = 0; i < IXGBE_EERD_EEWR_ATTEMPTS; i++) {
1795 if (ee_reg == IXGBE_NVM_POLL_READ)
1796 reg = IXGBE_READ_REG(hw, IXGBE_EERD);
1797 else
1798 reg = IXGBE_READ_REG(hw, IXGBE_EEWR);
1799
1800 if (reg & IXGBE_EEPROM_RW_REG_DONE) {
1801 status = IXGBE_SUCCESS;
1802 break;
1803 }
1804 usec_delay(5);
1805 }
1806
1807 if (i == IXGBE_EERD_EEWR_ATTEMPTS)
1808 ERROR_REPORT1(IXGBE_ERROR_POLLING,
1809 "EEPROM read/write done polling timed out");
1810
1811 return status;
1812 }
1813
1814 /**
1815 * ixgbe_acquire_eeprom - Acquire EEPROM using bit-bang
1816 * @hw: pointer to hardware structure
1817 *
1818 * Prepares EEPROM for access using bit-bang method. This function should
1819 * be called before issuing a command to the EEPROM.
1820 **/
1821 static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
1822 {
1823 s32 status = IXGBE_SUCCESS;
1824 u32 eec;
1825 u32 i;
1826
1827 DEBUGFUNC("ixgbe_acquire_eeprom");
1828
1829 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)
1830 != IXGBE_SUCCESS)
1831 status = IXGBE_ERR_SWFW_SYNC;
1832
1833 if (status == IXGBE_SUCCESS) {
1834 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
1835
1836 /* Request EEPROM Access */
1837 eec |= IXGBE_EEC_REQ;
1838 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
1839
1840 for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) {
1841 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
1842 if (eec & IXGBE_EEC_GNT)
1843 break;
1844 usec_delay(5);
1845 }
1846
1847 /* Release if grant not acquired */
1848 if (!(eec & IXGBE_EEC_GNT)) {
1849 eec &= ~IXGBE_EEC_REQ;
1850 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
1851 DEBUGOUT("Could not acquire EEPROM grant\n");
1852
1853 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
1854 status = IXGBE_ERR_EEPROM;
1855 }
1856
1857 /* Setup EEPROM for Read/Write */
1858 if (status == IXGBE_SUCCESS) {
1859 /* Clear CS and SK */
1860 eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK);
1861 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
1862 IXGBE_WRITE_FLUSH(hw);
1863 usec_delay(1);
1864 }
1865 }
1866 return status;
1867 }
1868
1869 /**
1870 * ixgbe_get_eeprom_semaphore - Get hardware semaphore
1871 * @hw: pointer to hardware structure
1872 *
1873 * Sets the hardware semaphores so EEPROM access can occur for bit-bang method
1874 **/
1875 static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
1876 {
1877 s32 status = IXGBE_ERR_EEPROM;
1878 u32 timeout = 2000;
1879 u32 i;
1880 u32 swsm;
1881
1882 DEBUGFUNC("ixgbe_get_eeprom_semaphore");
1883
1884
1885 /* Get SMBI software semaphore between device drivers first */
1886 for (i = 0; i < timeout; i++) {
1887 /*
1888 * If the SMBI bit is 0 when we read it, then the bit will be
1889 * set and we have the semaphore
1890 */
1891 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw));
1892 if (!(swsm & IXGBE_SWSM_SMBI)) {
1893 status = IXGBE_SUCCESS;
1894 break;
1895 }
1896 usec_delay(50);
1897 }
1898
1899 if (i == timeout) {
1900 DEBUGOUT("Driver can't access the Eeprom - SMBI Semaphore "
1901 "not granted.\n");
1902 /*
1903 * this release is particularly important because our attempts
1904 * above to get the semaphore may have succeeded, and if there
1905 * was a timeout, we should unconditionally clear the semaphore
1906 * bits to free the driver to make progress
1907 */
1908 ixgbe_release_eeprom_semaphore(hw);
1909
1910 usec_delay(50);
1911 /*
1912 * one last try
1913 * If the SMBI bit is 0 when we read it, then the bit will be
1914 * set and we have the semaphore
1915 */
1916 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw));
1917 if (!(swsm & IXGBE_SWSM_SMBI))
1918 status = IXGBE_SUCCESS;
1919 }
1920
1921 /* Now get the semaphore between SW/FW through the SWESMBI bit */
1922 if (status == IXGBE_SUCCESS) {
1923 for (i = 0; i < timeout; i++) {
1924 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw));
1925
1926 /* Set the SW EEPROM semaphore bit to request access */
1927 swsm |= IXGBE_SWSM_SWESMBI;
1928 IXGBE_WRITE_REG(hw, IXGBE_SWSM_BY_MAC(hw), swsm);
1929
1930 /*
1931 * If we set the bit successfully then we got the
1932 * semaphore.
1933 */
1934 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw));
1935 if (swsm & IXGBE_SWSM_SWESMBI)
1936 break;
1937
1938 usec_delay(50);
1939 }
1940
1941 /*
1942 * Release semaphores and return error if SW EEPROM semaphore
1943 * was not granted because we don't have access to the EEPROM
1944 */
1945 if (i >= timeout) {
1946 ERROR_REPORT1(IXGBE_ERROR_POLLING,
1947 "SWESMBI Software EEPROM semaphore not granted.\n");
1948 ixgbe_release_eeprom_semaphore(hw);
1949 status = IXGBE_ERR_EEPROM;
1950 }
1951 } else {
1952 ERROR_REPORT1(IXGBE_ERROR_POLLING,
1953 "Software semaphore SMBI between device drivers "
1954 "not granted.\n");
1955 }
1956
1957 return status;
1958 }
1959
1960 /**
1961 * ixgbe_release_eeprom_semaphore - Release hardware semaphore
1962 * @hw: pointer to hardware structure
1963 *
1964 * This function clears hardware semaphore bits.
1965 **/
1966 static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw)
1967 {
1968 u32 swsm;
1969
1970 DEBUGFUNC("ixgbe_release_eeprom_semaphore");
1971
1972 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1973
1974 /* Release both semaphores by writing 0 to the bits SWESMBI and SMBI */
1975 swsm &= ~(IXGBE_SWSM_SWESMBI | IXGBE_SWSM_SMBI);
1976 IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
1977 IXGBE_WRITE_FLUSH(hw);
1978 }
1979
1980 /**
1981 * ixgbe_ready_eeprom - Polls for EEPROM ready
1982 * @hw: pointer to hardware structure
1983 **/
1984 static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw)
1985 {
1986 s32 status = IXGBE_SUCCESS;
1987 u16 i;
1988 u8 spi_stat_reg;
1989
1990 DEBUGFUNC("ixgbe_ready_eeprom");
1991
1992 /*
1993 * Read "Status Register" repeatedly until the LSB is cleared. The
1994 * EEPROM will signal that the command has been completed by clearing
1995 * bit 0 of the internal status register. If it's not cleared within
1996 * 5 milliseconds, then error out.
1997 */
1998 for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) {
1999 ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI,
2000 IXGBE_EEPROM_OPCODE_BITS);
2001 spi_stat_reg = (u8)ixgbe_shift_in_eeprom_bits(hw, 8);
2002 if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI))
2003 break;
2004
2005 usec_delay(5);
2006 ixgbe_standby_eeprom(hw);
2007 }
2008
2009 /*
2010 * On some parts, SPI write time could vary from 0-20mSec on 3.3V
2011 * devices (and only 0-5mSec on 5V devices)
2012 */
2013 if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) {
2014 DEBUGOUT("SPI EEPROM Status error\n");
2015 status = IXGBE_ERR_EEPROM;
2016 }
2017
2018 return status;
2019 }
2020
2021 /**
2022 * ixgbe_standby_eeprom - Returns EEPROM to a "standby" state
2023 * @hw: pointer to hardware structure
2024 **/
2025 static void ixgbe_standby_eeprom(struct ixgbe_hw *hw)
2026 {
2027 u32 eec;
2028
2029 DEBUGFUNC("ixgbe_standby_eeprom");
2030
2031 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
2032
2033 /* Toggle CS to flush commands */
2034 eec |= IXGBE_EEC_CS;
2035 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
2036 IXGBE_WRITE_FLUSH(hw);
2037 usec_delay(1);
2038 eec &= ~IXGBE_EEC_CS;
2039 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
2040 IXGBE_WRITE_FLUSH(hw);
2041 usec_delay(1);
2042 }
2043
2044 /**
2045 * ixgbe_shift_out_eeprom_bits - Shift data bits out to the EEPROM.
2046 * @hw: pointer to hardware structure
2047 * @data: data to send to the EEPROM
2048 * @count: number of bits to shift out
2049 **/
2050 static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
2051 u16 count)
2052 {
2053 u32 eec;
2054 u32 mask;
2055 u32 i;
2056
2057 DEBUGFUNC("ixgbe_shift_out_eeprom_bits");
2058
2059 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
2060
2061 /*
2062 * Mask is used to shift "count" bits of "data" out to the EEPROM
2063 * one bit at a time. Determine the starting bit based on count
2064 */
2065 mask = 0x01 << (count - 1);
2066
2067 for (i = 0; i < count; i++) {
2068 /*
2069 * A "1" is shifted out to the EEPROM by setting bit "DI" to a
2070 * "1", and then raising and then lowering the clock (the SK
2071 * bit controls the clock input to the EEPROM). A "0" is
2072 * shifted out to the EEPROM by setting "DI" to "0" and then
2073 * raising and then lowering the clock.
2074 */
2075 if (data & mask)
2076 eec |= IXGBE_EEC_DI;
2077 else
2078 eec &= ~IXGBE_EEC_DI;
2079
2080 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
2081 IXGBE_WRITE_FLUSH(hw);
2082
2083 usec_delay(1);
2084
2085 ixgbe_raise_eeprom_clk(hw, &eec);
2086 ixgbe_lower_eeprom_clk(hw, &eec);
2087
2088 /*
2089 * Shift mask to signify next bit of data to shift in to the
2090 * EEPROM
2091 */
2092 mask = mask >> 1;
2093 }
2094
2095 /* We leave the "DI" bit set to "0" when we leave this routine. */
2096 eec &= ~IXGBE_EEC_DI;
2097 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
2098 IXGBE_WRITE_FLUSH(hw);
2099 }
2100
2101 /**
2102 * ixgbe_shift_in_eeprom_bits - Shift data bits in from the EEPROM
2103 * @hw: pointer to hardware structure
2104 **/
2105 static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count)
2106 {
2107 u32 eec;
2108 u32 i;
2109 u16 data = 0;
2110
2111 DEBUGFUNC("ixgbe_shift_in_eeprom_bits");
2112
2113 /*
2114 * In order to read a register from the EEPROM, we need to shift
2115 * 'count' bits in from the EEPROM. Bits are "shifted in" by raising
2116 * the clock input to the EEPROM (setting the SK bit), and then reading
2117 * the value of the "DO" bit. During this "shifting in" process the
2118 * "DI" bit should always be clear.
2119 */
2120 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
2121
2122 eec &= ~(IXGBE_EEC_DO | IXGBE_EEC_DI);
2123
2124 for (i = 0; i < count; i++) {
2125 data = data << 1;
2126 ixgbe_raise_eeprom_clk(hw, &eec);
2127
2128 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
2129
2130 eec &= ~(IXGBE_EEC_DI);
2131 if (eec & IXGBE_EEC_DO)
2132 data |= 1;
2133
2134 ixgbe_lower_eeprom_clk(hw, &eec);
2135 }
2136
2137 return data;
2138 }
2139
2140 /**
2141 * ixgbe_raise_eeprom_clk - Raises the EEPROM's clock input.
2142 * @hw: pointer to hardware structure
2143 * @eec: EEC register's current value
2144 **/
2145 static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
2146 {
2147 DEBUGFUNC("ixgbe_raise_eeprom_clk");
2148
2149 /*
2150 * Raise the clock input to the EEPROM
2151 * (setting the SK bit), then delay
2152 */
2153 *eec = *eec | IXGBE_EEC_SK;
2154 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), *eec);
2155 IXGBE_WRITE_FLUSH(hw);
2156 usec_delay(1);
2157 }
2158
2159 /**
2160 * ixgbe_lower_eeprom_clk - Lowers the EEPROM's clock input.
2161 * @hw: pointer to hardware structure
2162 * @eecd: EECD's current value
2163 **/
2164 static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
2165 {
2166 DEBUGFUNC("ixgbe_lower_eeprom_clk");
2167
2168 /*
2169 * Lower the clock input to the EEPROM (clearing the SK bit), then
2170 * delay
2171 */
2172 *eec = *eec & ~IXGBE_EEC_SK;
2173 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), *eec);
2174 IXGBE_WRITE_FLUSH(hw);
2175 usec_delay(1);
2176 }
2177
2178 /**
2179 * ixgbe_release_eeprom - Release EEPROM, release semaphores
2180 * @hw: pointer to hardware structure
2181 **/
2182 static void ixgbe_release_eeprom(struct ixgbe_hw *hw)
2183 {
2184 u32 eec;
2185
2186 DEBUGFUNC("ixgbe_release_eeprom");
2187
2188 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
2189
2190 eec |= IXGBE_EEC_CS; /* Pull CS high */
2191 eec &= ~IXGBE_EEC_SK; /* Lower SCK */
2192
2193 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
2194 IXGBE_WRITE_FLUSH(hw);
2195
2196 usec_delay(1);
2197
2198 /* Stop requesting EEPROM access */
2199 eec &= ~IXGBE_EEC_REQ;
2200 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
2201
2202 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
2203
2204 /* Delay before attempt to obtain semaphore again to allow FW access */
2205 msec_delay(hw->eeprom.semaphore_delay);
2206 }
2207
2208 /**
2209 * ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum
2210 * @hw: pointer to hardware structure
2211 *
2212 * Returns a negative error code on error, or the 16-bit checksum
2213 **/
2214 s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
2215 {
2216 u16 i;
2217 u16 j;
2218 u16 checksum = 0;
2219 u16 length = 0;
2220 u16 pointer = 0;
2221 u16 word = 0;
2222
2223 DEBUGFUNC("ixgbe_calc_eeprom_checksum_generic");
2224
2225 /* Include 0x0-0x3F in the checksum */
2226 for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
2227 if (hw->eeprom.ops.read(hw, i, &word)) {
2228 DEBUGOUT("EEPROM read failed\n");
2229 return IXGBE_ERR_EEPROM;
2230 }
2231 checksum += word;
2232 }
2233
2234 /* Include all data from pointers except for the fw pointer */
2235 for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) {
2236 if (hw->eeprom.ops.read(hw, i, &pointer)) {
2237 DEBUGOUT("EEPROM read failed\n");
2238 return IXGBE_ERR_EEPROM;
2239 }
2240
2241 /* If the pointer seems invalid */
2242 if (pointer == 0xFFFF || pointer == 0)
2243 continue;
2244
2245 if (hw->eeprom.ops.read(hw, pointer, &length)) {
2246 DEBUGOUT("EEPROM read failed\n");
2247 return IXGBE_ERR_EEPROM;
2248 }
2249
2250 if (length == 0xFFFF || length == 0)
2251 continue;
2252
2253 for (j = pointer + 1; j <= pointer + length; j++) {
2254 if (hw->eeprom.ops.read(hw, j, &word)) {
2255 DEBUGOUT("EEPROM read failed\n");
2256 return IXGBE_ERR_EEPROM;
2257 }
2258 checksum += word;
2259 }
2260 }
2261
2262 checksum = (u16)IXGBE_EEPROM_SUM - checksum;
2263
2264 return (s32)checksum;
2265 }
2266
2267 /**
2268 * ixgbe_validate_eeprom_checksum_generic - Validate EEPROM checksum
2269 * @hw: pointer to hardware structure
2270 * @checksum_val: calculated checksum
2271 *
2272 * Performs checksum calculation and validates the EEPROM checksum. If the
2273 * caller does not need checksum_val, the value can be NULL.
2274 **/
2275 s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
2276 u16 *checksum_val)
2277 {
2278 s32 status;
2279 u16 checksum;
2280 u16 read_checksum = 0;
2281
2282 DEBUGFUNC("ixgbe_validate_eeprom_checksum_generic");
2283
2284 /* Read the first word from the EEPROM. If this times out or fails, do
2285 * not continue or we could be in for a very long wait while every
2286 * EEPROM read fails
2287 */
2288 status = hw->eeprom.ops.read(hw, 0, &checksum);
2289 if (status) {
2290 DEBUGOUT("EEPROM read failed\n");
2291 return status;
2292 }
2293
2294 status = hw->eeprom.ops.calc_checksum(hw);
2295 if (status < 0)
2296 return status;
2297
2298 checksum = (u16)(status & 0xffff);
2299
2300 status = hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum);
2301 if (status) {
2302 DEBUGOUT("EEPROM read failed\n");
2303 return status;
2304 }
2305
2306 /* Verify read checksum from EEPROM is the same as
2307 * calculated checksum
2308 */
2309 if (read_checksum != checksum)
2310 status = IXGBE_ERR_EEPROM_CHECKSUM;
2311
2312 /* If the user cares, return the calculated checksum */
2313 if (checksum_val)
2314 *checksum_val = checksum;
2315
2316 return status;
2317 }
2318
2319 /**
2320 * ixgbe_update_eeprom_checksum_generic - Updates the EEPROM checksum
2321 * @hw: pointer to hardware structure
2322 **/
2323 s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
2324 {
2325 s32 status;
2326 u16 checksum;
2327
2328 DEBUGFUNC("ixgbe_update_eeprom_checksum_generic");
2329
2330 /* Read the first word from the EEPROM. If this times out or fails, do
2331 * not continue or we could be in for a very long wait while every
2332 * EEPROM read fails
2333 */
2334 status = hw->eeprom.ops.read(hw, 0, &checksum);
2335 if (status) {
2336 DEBUGOUT("EEPROM read failed\n");
2337 return status;
2338 }
2339
2340 status = hw->eeprom.ops.calc_checksum(hw);
2341 if (status < 0)
2342 return status;
2343
2344 checksum = (u16)(status & 0xffff);
2345
2346 status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM, checksum);
2347
2348 return status;
2349 }
2350
2351 /**
2352 * ixgbe_validate_mac_addr - Validate MAC address
2353 * @mac_addr: pointer to MAC address.
2354 *
2355 * Tests a MAC address to ensure it is a valid Individual Address.
2356 **/
2357 s32 ixgbe_validate_mac_addr(u8 *mac_addr)
2358 {
2359 s32 status = IXGBE_SUCCESS;
2360
2361 DEBUGFUNC("ixgbe_validate_mac_addr");
2362
2363 /* Make sure it is not a multicast address */
2364 if (IXGBE_IS_MULTICAST(mac_addr)) {
2365 status = IXGBE_ERR_INVALID_MAC_ADDR;
2366 /* Not a broadcast address */
2367 } else if (IXGBE_IS_BROADCAST(mac_addr)) {
2368 status = IXGBE_ERR_INVALID_MAC_ADDR;
2369 /* Reject the zero address */
2370 } else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
2371 mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) {
2372 status = IXGBE_ERR_INVALID_MAC_ADDR;
2373 }
2374 return status;
2375 }
2376
2377 /**
2378 * ixgbe_set_rar_generic - Set Rx address register
2379 * @hw: pointer to hardware structure
2380 * @index: Receive address register to write
2381 * @addr: Address to put into receive address register
2382 * @vmdq: VMDq "set" or "pool" index
2383 * @enable_addr: set flag that address is active
2384 *
2385 * Puts an ethernet address into a receive address register.
2386 **/
2387 s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
2388 u32 enable_addr)
2389 {
2390 u32 rar_low, rar_high;
2391 u32 rar_entries = hw->mac.num_rar_entries;
2392
2393 DEBUGFUNC("ixgbe_set_rar_generic");
2394
2395 /* Make sure we are using a valid rar index range */
2396 if (index >= rar_entries) {
2397 ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
2398 "RAR index %d is out of range.\n", index);
2399 return IXGBE_ERR_INVALID_ARGUMENT;
2400 }
2401
2402 /* setup VMDq pool selection before this RAR gets enabled */
2403 hw->mac.ops.set_vmdq(hw, index, vmdq);
2404
2405 /*
2406 * HW expects these in little endian so we reverse the byte
2407 * order from network order (big endian) to little endian
2408 */
2409 rar_low = ((u32)addr[0] |
2410 ((u32)addr[1] << 8) |
2411 ((u32)addr[2] << 16) |
2412 ((u32)addr[3] << 24));
2413 /*
2414 * Some parts put the VMDq setting in the extra RAH bits,
2415 * so save everything except the lower 16 bits that hold part
2416 * of the address and the address valid bit.
2417 */
2418 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
2419 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
2420 rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8));
2421
2422 if (enable_addr != 0)
2423 rar_high |= IXGBE_RAH_AV;
2424
2425 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low);
2426 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
2427
2428 return IXGBE_SUCCESS;
2429 }
2430
2431 /**
2432 * ixgbe_clear_rar_generic - Remove Rx address register
2433 * @hw: pointer to hardware structure
2434 * @index: Receive address register to write
2435 *
2436 * Clears an ethernet address from a receive address register.
2437 **/
2438 s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index)
2439 {
2440 u32 rar_high;
2441 u32 rar_entries = hw->mac.num_rar_entries;
2442
2443 DEBUGFUNC("ixgbe_clear_rar_generic");
2444
2445 /* Make sure we are using a valid rar index range */
2446 if (index >= rar_entries) {
2447 ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
2448 "RAR index %d is out of range.\n", index);
2449 return IXGBE_ERR_INVALID_ARGUMENT;
2450 }
2451
2452 /*
2453 * Some parts put the VMDq setting in the extra RAH bits,
2454 * so save everything except the lower 16 bits that hold part
2455 * of the address and the address valid bit.
2456 */
2457 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
2458 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
2459
2460 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
2461 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
2462
2463 /* clear VMDq pool/queue selection for this RAR */
2464 hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL);
2465
2466 return IXGBE_SUCCESS;
2467 }
2468
2469 /**
2470 * ixgbe_init_rx_addrs_generic - Initializes receive address filters.
2471 * @hw: pointer to hardware structure
2472 *
2473 * Places the MAC address in receive address register 0 and clears the rest
2474 * of the receive address registers. Clears the multicast table. Assumes
2475 * the receiver is in reset when the routine is called.
2476 **/
2477 s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
2478 {
2479 u32 i;
2480 u32 rar_entries = hw->mac.num_rar_entries;
2481
2482 DEBUGFUNC("ixgbe_init_rx_addrs_generic");
2483
2484 /*
2485 * If the current mac address is valid, assume it is a software override
2486 * to the permanent address.
2487 * Otherwise, use the permanent address from the eeprom.
2488 */
2489 if (ixgbe_validate_mac_addr(hw->mac.addr) ==
2490 IXGBE_ERR_INVALID_MAC_ADDR) {
2491 /* Get the MAC address from the RAR0 for later reference */
2492 hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
2493
2494 DEBUGOUT3(" Keeping Current RAR0 Addr =%.2X %.2X %.2X ",
2495 hw->mac.addr[0], hw->mac.addr[1],
2496 hw->mac.addr[2]);
2497 DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3],
2498 hw->mac.addr[4], hw->mac.addr[5]);
2499 } else {
2500 /* Setup the receive address. */
2501 DEBUGOUT("Overriding MAC Address in RAR[0]\n");
2502 DEBUGOUT3(" New MAC Addr =%.2X %.2X %.2X ",
2503 hw->mac.addr[0], hw->mac.addr[1],
2504 hw->mac.addr[2]);
2505 DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3],
2506 hw->mac.addr[4], hw->mac.addr[5]);
2507
2508 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
2509 }
2510
2511 /* clear VMDq pool/queue selection for RAR 0 */
2512 hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL);
2513
2514 hw->addr_ctrl.overflow_promisc = 0;
2515
2516 hw->addr_ctrl.rar_used_count = 1;
2517
2518 /* Zero out the other receive addresses. */
2519 DEBUGOUT1("Clearing RAR[1-%d]\n", rar_entries - 1);
2520 for (i = 1; i < rar_entries; i++) {
2521 IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
2522 IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
2523 }
2524
2525 /* Clear the MTA */
2526 hw->addr_ctrl.mta_in_use = 0;
2527 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
2528
2529 DEBUGOUT(" Clearing MTA\n");
2530 for (i = 0; i < hw->mac.mcft_size; i++)
2531 IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0);
2532
2533 ixgbe_init_uta_tables(hw);
2534
2535 return IXGBE_SUCCESS;
2536 }
2537
2538 /**
2539 * ixgbe_add_uc_addr - Adds a secondary unicast address.
2540 * @hw: pointer to hardware structure
2541 * @addr: new address
2542 *
2543 * Adds it to unused receive address register or goes into promiscuous mode.
2544 **/
2545 void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
2546 {
2547 u32 rar_entries = hw->mac.num_rar_entries;
2548 u32 rar;
2549
2550 DEBUGFUNC("ixgbe_add_uc_addr");
2551
2552 DEBUGOUT6(" UC Addr = %.2X %.2X %.2X %.2X %.2X %.2X\n",
2553 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
2554
2555 /*
2556 * Place this address in the RAR if there is room,
2557 * else put the controller into promiscuous mode
2558 */
2559 if (hw->addr_ctrl.rar_used_count < rar_entries) {
2560 rar = hw->addr_ctrl.rar_used_count;
2561 hw->mac.ops.set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
2562 DEBUGOUT1("Added a secondary address to RAR[%d]\n", rar);
2563 hw->addr_ctrl.rar_used_count++;
2564 } else {
2565 hw->addr_ctrl.overflow_promisc++;
2566 }
2567
2568 DEBUGOUT("ixgbe_add_uc_addr Complete\n");
2569 }
2570
2571 /**
2572 * ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses
2573 * @hw: pointer to hardware structure
2574 * @addr_list: the list of new addresses
2575 * @addr_count: number of addresses
2576 * @next: iterator function to walk the address list
2577 *
2578 * The given list replaces any existing list. Clears the secondary addrs from
2579 * receive address registers. Uses unused receive address registers for the
2580 * first secondary addresses, and falls back to promiscuous mode as needed.
2581 *
2582 * Drivers using secondary unicast addresses must set user_set_promisc when
2583 * manually putting the device into promiscuous mode.
2584 **/
2585 s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list,
2586 u32 addr_count, ixgbe_mc_addr_itr next)
2587 {
2588 u8 *addr;
2589 u32 i;
2590 u32 old_promisc_setting = hw->addr_ctrl.overflow_promisc;
2591 u32 uc_addr_in_use;
2592 u32 fctrl;
2593 u32 vmdq;
2594
2595 DEBUGFUNC("ixgbe_update_uc_addr_list_generic");
2596
2597 /*
2598 * Clear accounting of old secondary address list,
2599 * don't count RAR[0]
2600 */
2601 uc_addr_in_use = hw->addr_ctrl.rar_used_count - 1;
2602 hw->addr_ctrl.rar_used_count -= uc_addr_in_use;
2603 hw->addr_ctrl.overflow_promisc = 0;
2604
2605 /* Zero out the other receive addresses */
2606 DEBUGOUT1("Clearing RAR[1-%d]\n", uc_addr_in_use+1);
2607 for (i = 0; i < uc_addr_in_use; i++) {
2608 IXGBE_WRITE_REG(hw, IXGBE_RAL(1+i), 0);
2609 IXGBE_WRITE_REG(hw, IXGBE_RAH(1+i), 0);
2610 }
2611
2612 /* Add the new addresses */
2613 for (i = 0; i < addr_count; i++) {
2614 DEBUGOUT(" Adding the secondary addresses:\n");
2615 addr = next(hw, &addr_list, &vmdq);
2616 ixgbe_add_uc_addr(hw, addr, vmdq);
2617 }
2618
2619 if (hw->addr_ctrl.overflow_promisc) {
2620 /* enable promisc if not already in overflow or set by user */
2621 if (!old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
2622 DEBUGOUT(" Entering address overflow promisc mode\n");
2623 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2624 fctrl |= IXGBE_FCTRL_UPE;
2625 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2626 }
2627 } else {
2628 /* only disable if set by overflow, not by user */
2629 if (old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
2630 DEBUGOUT(" Leaving address overflow promisc mode\n");
2631 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2632 fctrl &= ~IXGBE_FCTRL_UPE;
2633 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2634 }
2635 }
2636
2637 DEBUGOUT("ixgbe_update_uc_addr_list_generic Complete\n");
2638 return IXGBE_SUCCESS;
2639 }
2640
2641 /**
2642 * ixgbe_mta_vector - Determines bit-vector in multicast table to set
2643 * @hw: pointer to hardware structure
2644 * @mc_addr: the multicast address
2645 *
2646 * Extracts the 12 bits, from a multicast address, to determine which
2647 * bit-vector to set in the multicast table. The hardware uses 12 bits, from
2648 * incoming rx multicast addresses, to determine the bit-vector to check in
2649 * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
2650 * by the MO field of the MCSTCTRL. The MO field is set during initialization
2651 * to mc_filter_type.
2652 **/
2653 static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
2654 {
2655 u32 vector = 0;
2656
2657 DEBUGFUNC("ixgbe_mta_vector");
2658
2659 switch (hw->mac.mc_filter_type) {
2660 case 0: /* use bits [47:36] of the address */
2661 vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
2662 break;
2663 case 1: /* use bits [46:35] of the address */
2664 vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
2665 break;
2666 case 2: /* use bits [45:34] of the address */
2667 vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
2668 break;
2669 case 3: /* use bits [43:32] of the address */
2670 vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
2671 break;
2672 default: /* Invalid mc_filter_type */
2673 DEBUGOUT("MC filter type param set incorrectly\n");
2674 ASSERT(0);
2675 break;
2676 }
2677
2678 /* vector can only be 12-bits or boundary will be exceeded */
2679 vector &= 0xFFF;
2680 return vector;
2681 }
2682
2683 /**
2684 * ixgbe_set_mta - Set bit-vector in multicast table
2685 * @hw: pointer to hardware structure
2686 * @hash_value: Multicast address hash value
2687 *
2688 * Sets the bit-vector in the multicast table.
2689 **/
2690 void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr)
2691 {
2692 u32 vector;
2693 u32 vector_bit;
2694 u32 vector_reg;
2695
2696 DEBUGFUNC("ixgbe_set_mta");
2697
2698 hw->addr_ctrl.mta_in_use++;
2699
2700 vector = ixgbe_mta_vector(hw, mc_addr);
2701 DEBUGOUT1(" bit-vector = 0x%03X\n", vector);
2702
2703 /*
2704 * The MTA is a register array of 128 32-bit registers. It is treated
2705 * like an array of 4096 bits. We want to set bit
2706 * BitArray[vector_value]. So we figure out what register the bit is
2707 * in, read it, OR in the new bit, then write back the new value. The
2708 * register is determined by the upper 7 bits of the vector value and
2709 * the bit within that register are determined by the lower 5 bits of
2710 * the value.
2711 */
2712 vector_reg = (vector >> 5) & 0x7F;
2713 vector_bit = vector & 0x1F;
2714 hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit);
2715 }
2716
2717 /**
2718 * ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses
2719 * @hw: pointer to hardware structure
2720 * @mc_addr_list: the list of new multicast addresses
2721 * @mc_addr_count: number of addresses
2722 * @next: iterator function to walk the multicast address list
2723 * @clear: flag, when set clears the table beforehand
2724 *
2725 * When the clear flag is set, the given list replaces any existing list.
2726 * Hashes the given addresses into the multicast table.
2727 **/
2728 s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
2729 u32 mc_addr_count, ixgbe_mc_addr_itr next,
2730 bool clear)
2731 {
2732 u32 i;
2733 u32 vmdq;
2734
2735 DEBUGFUNC("ixgbe_update_mc_addr_list_generic");
2736
2737 /*
2738 * Set the new number of MC addresses that we are being requested to
2739 * use.
2740 */
2741 hw->addr_ctrl.num_mc_addrs = mc_addr_count;
2742 hw->addr_ctrl.mta_in_use = 0;
2743
2744 /* Clear mta_shadow */
2745 if (clear) {
2746 DEBUGOUT(" Clearing MTA\n");
2747 memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
2748 }
2749
2750 /* Update mta_shadow */
2751 for (i = 0; i < mc_addr_count; i++) {
2752 DEBUGOUT(" Adding the multicast addresses:\n");
2753 ixgbe_set_mta(hw, next(hw, &mc_addr_list, &vmdq));
2754 }
2755
2756 /* Enable mta */
2757 for (i = 0; i < hw->mac.mcft_size; i++)
2758 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_MTA(0), i,
2759 hw->mac.mta_shadow[i]);
2760
2761 if (hw->addr_ctrl.mta_in_use > 0)
2762 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
2763 IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
2764
2765 DEBUGOUT("ixgbe_update_mc_addr_list_generic Complete\n");
2766 return IXGBE_SUCCESS;
2767 }
2768
2769 /**
2770 * ixgbe_enable_mc_generic - Enable multicast address in RAR
2771 * @hw: pointer to hardware structure
2772 *
2773 * Enables multicast address in RAR and the use of the multicast hash table.
2774 **/
2775 s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
2776 {
2777 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
2778
2779 DEBUGFUNC("ixgbe_enable_mc_generic");
2780
2781 if (a->mta_in_use > 0)
2782 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE |
2783 hw->mac.mc_filter_type);
2784
2785 return IXGBE_SUCCESS;
2786 }
2787
2788 /**
2789 * ixgbe_disable_mc_generic - Disable multicast address in RAR
2790 * @hw: pointer to hardware structure
2791 *
2792 * Disables multicast address in RAR and the use of the multicast hash table.
2793 **/
2794 s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw)
2795 {
2796 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
2797
2798 DEBUGFUNC("ixgbe_disable_mc_generic");
2799
2800 if (a->mta_in_use > 0)
2801 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
2802
2803 return IXGBE_SUCCESS;
2804 }
2805
2806 /**
2807 * ixgbe_fc_enable_generic - Enable flow control
2808 * @hw: pointer to hardware structure
2809 *
2810 * Enable flow control according to the current settings.
2811 **/
2812 s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
2813 {
2814 s32 ret_val = IXGBE_SUCCESS;
2815 u32 mflcn_reg, fccfg_reg;
2816 u32 reg;
2817 u32 fcrtl, fcrth;
2818 int i;
2819
2820 DEBUGFUNC("ixgbe_fc_enable_generic");
2821
2822 /* Validate the water mark configuration */
2823 if (!hw->fc.pause_time) {
2824 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2825 goto out;
2826 }
2827
2828 /* Low water mark of zero causes XOFF floods */
2829 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
2830 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
2831 hw->fc.high_water[i]) {
2832 if (!hw->fc.low_water[i] ||
2833 hw->fc.low_water[i] >= hw->fc.high_water[i]) {
2834 DEBUGOUT("Invalid water mark configuration\n");
2835 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2836 goto out;
2837 }
2838 }
2839 }
2840
2841 /* Negotiate the fc mode to use */
2842 hw->mac.ops.fc_autoneg(hw);
2843
2844 /* Disable any previous flow control settings */
2845 mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
2846 mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE);
2847
2848 fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
2849 fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY);
2850
2851 /*
2852 * The possible values of fc.current_mode are:
2853 * 0: Flow control is completely disabled
2854 * 1: Rx flow control is enabled (we can receive pause frames,
2855 * but not send pause frames).
2856 * 2: Tx flow control is enabled (we can send pause frames but
2857 * we do not support receiving pause frames).
2858 * 3: Both Rx and Tx flow control (symmetric) are enabled.
2859 * other: Invalid.
2860 */
2861 switch (hw->fc.current_mode) {
2862 case ixgbe_fc_none:
2863 /*
2864 * Flow control is disabled by software override or autoneg.
2865 * The code below will actually disable it in the HW.
2866 */
2867 break;
2868 case ixgbe_fc_rx_pause:
2869 /*
2870 * Rx Flow control is enabled and Tx Flow control is
2871 * disabled by software override. Since there really
2872 * isn't a way to advertise that we are capable of RX
2873 * Pause ONLY, we will advertise that we support both
2874 * symmetric and asymmetric Rx PAUSE. Later, we will
2875 * disable the adapter's ability to send PAUSE frames.
2876 */
2877 mflcn_reg |= IXGBE_MFLCN_RFCE;
2878 break;
2879 case ixgbe_fc_tx_pause:
2880 /*
2881 * Tx Flow control is enabled, and Rx Flow control is
2882 * disabled by software override.
2883 */
2884 fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
2885 break;
2886 case ixgbe_fc_full:
2887 /* Flow control (both Rx and Tx) is enabled by SW override. */
2888 mflcn_reg |= IXGBE_MFLCN_RFCE;
2889 fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
2890 break;
2891 default:
2892 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
2893 "Flow control param set incorrectly\n");
2894 ret_val = IXGBE_ERR_CONFIG;
2895 goto out;
2896 break;
2897 }
2898
2899 /* Set 802.3x based flow control settings. */
2900 mflcn_reg |= IXGBE_MFLCN_DPF;
2901 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
2902 IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
2903
2904
2905 /* Set up and enable Rx high/low water mark thresholds, enable XON. */
2906 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
2907 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
2908 hw->fc.high_water[i]) {
2909 fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
2910 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl);
2911 fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
2912 } else {
2913 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
2914 /*
2915 * In order to prevent Tx hangs when the internal Tx
2916 * switch is enabled we must set the high water mark
2917 * to the Rx packet buffer size - 24KB. This allows
2918 * the Tx switch to function even under heavy Rx
2919 * workloads.
2920 */
2921 fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 24576;
2922 }
2923
2924 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), fcrth);
2925 }
2926
2927 /* Configure pause time (2 TCs per register) */
2928 reg = hw->fc.pause_time * 0x00010001;
2929 for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
2930 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
2931
2932 /* Configure flow control refresh threshold value */
2933 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
2934
2935 out:
2936 return ret_val;
2937 }
2938
2939 /**
2940 * ixgbe_negotiate_fc - Negotiate flow control
2941 * @hw: pointer to hardware structure
2942 * @adv_reg: flow control advertised settings
2943 * @lp_reg: link partner's flow control settings
2944 * @adv_sym: symmetric pause bit in advertisement
2945 * @adv_asm: asymmetric pause bit in advertisement
2946 * @lp_sym: symmetric pause bit in link partner advertisement
2947 * @lp_asm: asymmetric pause bit in link partner advertisement
2948 *
2949 * Find the intersection between advertised settings and link partner's
2950 * advertised settings
2951 **/
2952 s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
2953 u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm)
2954 {
2955 if ((!(adv_reg)) || (!(lp_reg))) {
2956 ERROR_REPORT3(IXGBE_ERROR_UNSUPPORTED,
2957 "Local or link partner's advertised flow control "
2958 "settings are NULL. Local: %x, link partner: %x\n",
2959 adv_reg, lp_reg);
2960 return IXGBE_ERR_FC_NOT_NEGOTIATED;
2961 }
2962
2963 if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) {
2964 /*
2965 * Now we need to check if the user selected Rx ONLY
2966 * of pause frames. In this case, we had to advertise
2967 * FULL flow control because we could not advertise RX
2968 * ONLY. Hence, we must now check to see if we need to
2969 * turn OFF the TRANSMISSION of PAUSE frames.
2970 */
2971 if (hw->fc.requested_mode == ixgbe_fc_full) {
2972 hw->fc.current_mode = ixgbe_fc_full;
2973 DEBUGOUT("Flow Control = FULL.\n");
2974 } else {
2975 hw->fc.current_mode = ixgbe_fc_rx_pause;
2976 DEBUGOUT("Flow Control=RX PAUSE frames only\n");
2977 }
2978 } else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) &&
2979 (lp_reg & lp_sym) && (lp_reg & lp_asm)) {
2980 hw->fc.current_mode = ixgbe_fc_tx_pause;
2981 DEBUGOUT("Flow Control = TX PAUSE frames only.\n");
2982 } else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) &&
2983 !(lp_reg & lp_sym) && (lp_reg & lp_asm)) {
2984 hw->fc.current_mode = ixgbe_fc_rx_pause;
2985 DEBUGOUT("Flow Control = RX PAUSE frames only.\n");
2986 } else {
2987 hw->fc.current_mode = ixgbe_fc_none;
2988 DEBUGOUT("Flow Control = NONE.\n");
2989 }
2990 return IXGBE_SUCCESS;
2991 }
2992
2993 /**
2994 * ixgbe_fc_autoneg_fiber - Enable flow control on 1 gig fiber
2995 * @hw: pointer to hardware structure
2996 *
2997 * Enable flow control according on 1 gig fiber.
2998 **/
2999 static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
3000 {
3001 u32 pcs_anadv_reg, pcs_lpab_reg, linkstat;
3002 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
3003
3004 /*
3005 * On multispeed fiber at 1g, bail out if
3006 * - link is up but AN did not complete, or if
3007 * - link is up and AN completed but timed out
3008 */
3009
3010 linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
3011 if ((!!(linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
3012 (!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) {
3013 DEBUGOUT("Auto-Negotiation did not complete or timed out\n");
3014 goto out;
3015 }
3016
3017 pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
3018 pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
3019
3020 ret_val = ixgbe_negotiate_fc(hw, pcs_anadv_reg,
3021 pcs_lpab_reg, IXGBE_PCS1GANA_SYM_PAUSE,
3022 IXGBE_PCS1GANA_ASM_PAUSE,
3023 IXGBE_PCS1GANA_SYM_PAUSE,
3024 IXGBE_PCS1GANA_ASM_PAUSE);
3025
3026 out:
3027 return ret_val;
3028 }
3029
3030 /**
3031 * ixgbe_fc_autoneg_backplane - Enable flow control IEEE clause 37
3032 * @hw: pointer to hardware structure
3033 *
3034 * Enable flow control according to IEEE clause 37.
3035 **/
3036 static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
3037 {
3038 u32 links2, anlp1_reg, autoc_reg, links;
3039 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
3040
3041 /*
3042 * On backplane, bail out if
3043 * - backplane autoneg was not completed, or if
3044 * - we are 82599 and link partner is not AN enabled
3045 */
3046 links = IXGBE_READ_REG(hw, IXGBE_LINKS);
3047 if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) {
3048 DEBUGOUT("Auto-Negotiation did not complete\n");
3049 goto out;
3050 }
3051
3052 if (hw->mac.type == ixgbe_mac_82599EB) {
3053 links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
3054 if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) {
3055 DEBUGOUT("Link partner is not AN enabled\n");
3056 goto out;
3057 }
3058 }
3059 /*
3060 * Read the 10g AN autoc and LP ability registers and resolve
3061 * local flow control settings accordingly
3062 */
3063 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
3064 anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
3065
3066 ret_val = ixgbe_negotiate_fc(hw, autoc_reg,
3067 anlp1_reg, IXGBE_AUTOC_SYM_PAUSE, IXGBE_AUTOC_ASM_PAUSE,
3068 IXGBE_ANLP1_SYM_PAUSE, IXGBE_ANLP1_ASM_PAUSE);
3069
3070 out:
3071 return ret_val;
3072 }
3073
3074 /**
3075 * ixgbe_fc_autoneg_copper - Enable flow control IEEE clause 37
3076 * @hw: pointer to hardware structure
3077 *
3078 * Enable flow control according to IEEE clause 37.
3079 **/
3080 static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw)
3081 {
3082 u16 technology_ability_reg = 0;
3083 u16 lp_technology_ability_reg = 0;
3084
3085 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
3086 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
3087 &technology_ability_reg);
3088 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_LP,
3089 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
3090 &lp_technology_ability_reg);
3091
3092 return ixgbe_negotiate_fc(hw, (u32)technology_ability_reg,
3093 (u32)lp_technology_ability_reg,
3094 IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE,
3095 IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE);
3096 }
3097
3098 /**
3099 * ixgbe_fc_autoneg - Configure flow control
3100 * @hw: pointer to hardware structure
3101 *
3102 * Compares our advertised flow control capabilities to those advertised by
3103 * our link partner, and determines the proper flow control mode to use.
3104 **/
3105 void ixgbe_fc_autoneg(struct ixgbe_hw *hw)
3106 {
3107 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
3108 ixgbe_link_speed speed;
3109 bool link_up;
3110
3111 DEBUGFUNC("ixgbe_fc_autoneg");
3112
3113 /*
3114 * AN should have completed when the cable was plugged in.
3115 * Look for reasons to bail out. Bail out if:
3116 * - FC autoneg is disabled, or if
3117 * - link is not up.
3118 */
3119 if (hw->fc.disable_fc_autoneg) {
3120 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
3121 "Flow control autoneg is disabled");
3122 goto out;
3123 }
3124
3125 hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
3126 if (!link_up) {
3127 ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "The link is down");
3128 goto out;
3129 }
3130
3131 switch (hw->phy.media_type) {
3132 /* Autoneg flow control on fiber adapters */
3133 case ixgbe_media_type_fiber_fixed:
3134 case ixgbe_media_type_fiber_qsfp:
3135 case ixgbe_media_type_fiber:
3136 if (speed == IXGBE_LINK_SPEED_1GB_FULL)
3137 ret_val = ixgbe_fc_autoneg_fiber(hw);
3138 break;
3139
3140 /* Autoneg flow control on backplane adapters */
3141 case ixgbe_media_type_backplane:
3142 ret_val = ixgbe_fc_autoneg_backplane(hw);
3143 break;
3144
3145 /* Autoneg flow control on copper adapters */
3146 case ixgbe_media_type_copper:
3147 if (ixgbe_device_supports_autoneg_fc(hw))
3148 ret_val = ixgbe_fc_autoneg_copper(hw);
3149 break;
3150
3151 default:
3152 break;
3153 }
3154
3155 out:
3156 if (ret_val == IXGBE_SUCCESS) {
3157 hw->fc.fc_was_autonegged = TRUE;
3158 } else {
3159 hw->fc.fc_was_autonegged = FALSE;
3160 hw->fc.current_mode = hw->fc.requested_mode;
3161 }
3162 }
3163
3164 /*
3165 * ixgbe_pcie_timeout_poll - Return number of times to poll for completion
3166 * @hw: pointer to hardware structure
3167 *
3168 * System-wide timeout range is encoded in PCIe Device Control2 register.
3169 *
3170 * Add 10% to specified maximum and return the number of times to poll for
3171 * completion timeout, in units of 100 microsec. Never return less than
3172 * 800 = 80 millisec.
3173 */
3174 static u32 ixgbe_pcie_timeout_poll(struct ixgbe_hw *hw)
3175 {
3176 s16 devctl2;
3177 u32 pollcnt;
3178
3179 devctl2 = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2);
3180 devctl2 &= IXGBE_PCIDEVCTRL2_TIMEO_MASK;
3181
3182 switch (devctl2) {
3183 case IXGBE_PCIDEVCTRL2_65_130ms:
3184 pollcnt = 1300; /* 130 millisec */
3185 break;
3186 case IXGBE_PCIDEVCTRL2_260_520ms:
3187 pollcnt = 5200; /* 520 millisec */
3188 break;
3189 case IXGBE_PCIDEVCTRL2_1_2s:
3190 pollcnt = 20000; /* 2 sec */
3191 break;
3192 case IXGBE_PCIDEVCTRL2_4_8s:
3193 pollcnt = 80000; /* 8 sec */
3194 break;
3195 case IXGBE_PCIDEVCTRL2_17_34s:
3196 pollcnt = 34000; /* 34 sec */
3197 break;
3198 case IXGBE_PCIDEVCTRL2_50_100us: /* 100 microsecs */
3199 case IXGBE_PCIDEVCTRL2_1_2ms: /* 2 millisecs */
3200 case IXGBE_PCIDEVCTRL2_16_32ms: /* 32 millisec */
3201 case IXGBE_PCIDEVCTRL2_16_32ms_def: /* 32 millisec default */
3202 default:
3203 pollcnt = 800; /* 80 millisec minimum */
3204 break;
3205 }
3206
3207 /* add 10% to spec maximum */
3208 return (pollcnt * 11) / 10;
3209 }
3210
3211 /**
3212 * ixgbe_disable_pcie_master - Disable PCI-express master access
3213 * @hw: pointer to hardware structure
3214 *
3215 * Disables PCI-Express master access and verifies there are no pending
3216 * requests. IXGBE_ERR_MASTER_REQUESTS_PENDING is returned if master disable
3217 * bit hasn't caused the master requests to be disabled, else IXGBE_SUCCESS
3218 * is returned signifying master requests disabled.
3219 **/
3220 s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
3221 {
3222 s32 status = IXGBE_SUCCESS;
3223 u32 i, poll;
3224 u16 value;
3225
3226 DEBUGFUNC("ixgbe_disable_pcie_master");
3227
3228 /* Always set this bit to ensure any future transactions are blocked */
3229 IXGBE_WRITE_REG(hw, IXGBE_CTRL, IXGBE_CTRL_GIO_DIS);
3230
3231 /* Exit if master requests are blocked */
3232 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO) ||
3233 IXGBE_REMOVED(hw->hw_addr))
3234 goto out;
3235
3236 /* Poll for master request bit to clear */
3237 for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
3238 usec_delay(100);
3239 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
3240 goto out;
3241 }
3242
3243 /*
3244 * Two consecutive resets are required via CTRL.RST per datasheet
3245 * 5.2.5.3.2 Master Disable. We set a flag to inform the reset routine
3246 * of this need. The first reset prevents new master requests from
3247 * being issued by our device. We then must wait 1usec or more for any
3248 * remaining completions from the PCIe bus to trickle in, and then reset
3249 * again to clear out any effects they may have had on our device.
3250 */
3251 DEBUGOUT("GIO Master Disable bit didn't clear - requesting resets\n");
3252 hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
3253
3254 if (hw->mac.type >= ixgbe_mac_X550)
3255 goto out;
3256
3257 /*
3258 * Before proceeding, make sure that the PCIe block does not have
3259 * transactions pending.
3260 */
3261 poll = ixgbe_pcie_timeout_poll(hw);
3262 for (i = 0; i < poll; i++) {
3263 usec_delay(100);
3264 value = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS);
3265 if (IXGBE_REMOVED(hw->hw_addr))
3266 goto out;
3267 if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
3268 goto out;
3269 }
3270
3271 ERROR_REPORT1(IXGBE_ERROR_POLLING,
3272 "PCIe transaction pending bit also did not clear.\n");
3273 status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
3274
3275 out:
3276 return status;
3277 }
3278
3279 /**
3280 * ixgbe_acquire_swfw_sync - Acquire SWFW semaphore
3281 * @hw: pointer to hardware structure
3282 * @mask: Mask to specify which semaphore to acquire
3283 *
3284 * Acquires the SWFW semaphore through the GSSR register for the specified
3285 * function (CSR, PHY0, PHY1, EEPROM, Flash)
3286 **/
3287 s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask)
3288 {
3289 u32 gssr = 0;
3290 u32 swmask = mask;
3291 u32 fwmask = mask << 5;
3292 u32 timeout = 200;
3293 u32 i;
3294
3295 DEBUGFUNC("ixgbe_acquire_swfw_sync");
3296
3297 for (i = 0; i < timeout; i++) {
3298 /*
3299 * SW NVM semaphore bit is used for access to all
3300 * SW_FW_SYNC bits (not just NVM)
3301 */
3302 if (ixgbe_get_eeprom_semaphore(hw))
3303 return IXGBE_ERR_SWFW_SYNC;
3304
3305 gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
3306 if (!(gssr & (fwmask | swmask))) {
3307 gssr |= swmask;
3308 IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
3309 ixgbe_release_eeprom_semaphore(hw);
3310 return IXGBE_SUCCESS;
3311 } else {
3312 /* Resource is currently in use by FW or SW */
3313 ixgbe_release_eeprom_semaphore(hw);
3314 msec_delay(5);
3315 }
3316 }
3317
3318 /* If time expired clear the bits holding the lock and retry */
3319 if (gssr & (fwmask | swmask))
3320 ixgbe_release_swfw_sync(hw, gssr & (fwmask | swmask));
3321
3322 msec_delay(5);
3323 return IXGBE_ERR_SWFW_SYNC;
3324 }
3325
3326 /**
3327 * ixgbe_release_swfw_sync - Release SWFW semaphore
3328 * @hw: pointer to hardware structure
3329 * @mask: Mask to specify which semaphore to release
3330 *
3331 * Releases the SWFW semaphore through the GSSR register for the specified
3332 * function (CSR, PHY0, PHY1, EEPROM, Flash)
3333 **/
3334 void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u32 mask)
3335 {
3336 u32 gssr;
3337 u32 swmask = mask;
3338
3339 DEBUGFUNC("ixgbe_release_swfw_sync");
3340
3341 ixgbe_get_eeprom_semaphore(hw);
3342
3343 gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
3344 gssr &= ~swmask;
3345 IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
3346
3347 ixgbe_release_eeprom_semaphore(hw);
3348 }
3349
3350 /**
3351 * ixgbe_disable_sec_rx_path_generic - Stops the receive data path
3352 * @hw: pointer to hardware structure
3353 *
3354 * Stops the receive data path and waits for the HW to internally empty
3355 * the Rx security block
3356 **/
3357 s32 ixgbe_disable_sec_rx_path_generic(struct ixgbe_hw *hw)
3358 {
3359 #define IXGBE_MAX_SECRX_POLL 40
3360
3361 int i;
3362 int secrxreg;
3363
3364 DEBUGFUNC("ixgbe_disable_sec_rx_path_generic");
3365
3366
3367 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
3368 secrxreg |= IXGBE_SECRXCTRL_RX_DIS;
3369 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
3370 for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) {
3371 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT);
3372 if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY)
3373 break;
3374 else
3375 /* Use interrupt-safe sleep just in case */
3376 usec_delay(1000);
3377 }
3378
3379 /* For informational purposes only */
3380 if (i >= IXGBE_MAX_SECRX_POLL)
3381 DEBUGOUT("Rx unit being enabled before security "
3382 "path fully disabled. Continuing with init.\n");
3383
3384 return IXGBE_SUCCESS;
3385 }
3386
3387 /**
3388 * prot_autoc_read_generic - Hides MAC differences needed for AUTOC read
3389 * @hw: pointer to hardware structure
3390 * @reg_val: Value we read from AUTOC
3391 *
3392 * The default case requires no protection so just to the register read.
3393 */
3394 s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *locked, u32 *reg_val)
3395 {
3396 *locked = FALSE;
3397 *reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC);
3398 return IXGBE_SUCCESS;
3399 }
3400
3401 /**
3402 * prot_autoc_write_generic - Hides MAC differences needed for AUTOC write
3403 * @hw: pointer to hardware structure
3404 * @reg_val: value to write to AUTOC
3405 * @locked: bool to indicate whether the SW/FW lock was already taken by
3406 * previous read.
3407 *
3408 * The default case requires no protection so just to the register write.
3409 */
3410 s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked)
3411 {
3412 UNREFERENCED_1PARAMETER(locked);
3413
3414 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_val);
3415 return IXGBE_SUCCESS;
3416 }
3417
3418 /**
3419 * ixgbe_enable_sec_rx_path_generic - Enables the receive data path
3420 * @hw: pointer to hardware structure
3421 *
3422 * Enables the receive data path.
3423 **/
3424 s32 ixgbe_enable_sec_rx_path_generic(struct ixgbe_hw *hw)
3425 {
3426 u32 secrxreg;
3427
3428 DEBUGFUNC("ixgbe_enable_sec_rx_path_generic");
3429
3430 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
3431 secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS;
3432 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
3433 IXGBE_WRITE_FLUSH(hw);
3434
3435 return IXGBE_SUCCESS;
3436 }
3437
3438 /**
3439 * ixgbe_enable_rx_dma_generic - Enable the Rx DMA unit
3440 * @hw: pointer to hardware structure
3441 * @regval: register value to write to RXCTRL
3442 *
3443 * Enables the Rx DMA unit
3444 **/
3445 s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval)
3446 {
3447 DEBUGFUNC("ixgbe_enable_rx_dma_generic");
3448
3449 if (regval & IXGBE_RXCTRL_RXEN)
3450 ixgbe_enable_rx(hw);
3451 else
3452 ixgbe_disable_rx(hw);
3453
3454 return IXGBE_SUCCESS;
3455 }
3456
3457 /**
3458 * ixgbe_blink_led_start_generic - Blink LED based on index.
3459 * @hw: pointer to hardware structure
3460 * @index: led number to blink
3461 **/
3462 s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
3463 {
3464 ixgbe_link_speed speed = 0;
3465 bool link_up = 0;
3466 u32 autoc_reg = 0;
3467 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
3468 s32 ret_val = IXGBE_SUCCESS;
3469 bool locked = FALSE;
3470
3471 DEBUGFUNC("ixgbe_blink_led_start_generic");
3472
3473 if (index > 3)
3474 return IXGBE_ERR_PARAM;
3475
3476 /*
3477 * Link must be up to auto-blink the LEDs;
3478 * Force it if link is down.
3479 */
3480 hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
3481
3482 if (!link_up) {
3483 ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg);
3484 if (ret_val != IXGBE_SUCCESS)
3485 goto out;
3486
3487 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
3488 autoc_reg |= IXGBE_AUTOC_FLU;
3489
3490 ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked);
3491 if (ret_val != IXGBE_SUCCESS)
3492 goto out;
3493
3494 IXGBE_WRITE_FLUSH(hw);
3495 msec_delay(10);
3496 }
3497
3498 led_reg &= ~IXGBE_LED_MODE_MASK(index);
3499 led_reg |= IXGBE_LED_BLINK(index);
3500 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
3501 IXGBE_WRITE_FLUSH(hw);
3502
3503 out:
3504 return ret_val;
3505 }
3506
3507 /**
3508 * ixgbe_blink_led_stop_generic - Stop blinking LED based on index.
3509 * @hw: pointer to hardware structure
3510 * @index: led number to stop blinking
3511 **/
3512 s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
3513 {
3514 u32 autoc_reg = 0;
3515 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
3516 s32 ret_val = IXGBE_SUCCESS;
3517 bool locked = FALSE;
3518
3519 DEBUGFUNC("ixgbe_blink_led_stop_generic");
3520
3521 if (index > 3)
3522 return IXGBE_ERR_PARAM;
3523
3524
3525 ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg);
3526 if (ret_val != IXGBE_SUCCESS)
3527 goto out;
3528
3529 autoc_reg &= ~IXGBE_AUTOC_FLU;
3530 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
3531
3532 ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked);
3533 if (ret_val != IXGBE_SUCCESS)
3534 goto out;
3535
3536 led_reg &= ~IXGBE_LED_MODE_MASK(index);
3537 led_reg &= ~IXGBE_LED_BLINK(index);
3538 led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index);
3539 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
3540 IXGBE_WRITE_FLUSH(hw);
3541
3542 out:
3543 return ret_val;
3544 }
3545
3546 /**
3547 * ixgbe_get_san_mac_addr_offset - Get SAN MAC address offset from the EEPROM
3548 * @hw: pointer to hardware structure
3549 * @san_mac_offset: SAN MAC address offset
3550 *
3551 * This function will read the EEPROM location for the SAN MAC address
3552 * pointer, and returns the value at that location. This is used in both
3553 * get and set mac_addr routines.
3554 **/
3555 static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
3556 u16 *san_mac_offset)
3557 {
3558 s32 ret_val;
3559
3560 DEBUGFUNC("ixgbe_get_san_mac_addr_offset");
3561
3562 /*
3563 * First read the EEPROM pointer to see if the MAC addresses are
3564 * available.
3565 */
3566 ret_val = hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR,
3567 san_mac_offset);
3568 if (ret_val) {
3569 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
3570 "eeprom at offset %d failed",
3571 IXGBE_SAN_MAC_ADDR_PTR);
3572 }
3573
3574 return ret_val;
3575 }
3576
3577 /**
3578 * ixgbe_get_san_mac_addr_generic - SAN MAC address retrieval from the EEPROM
3579 * @hw: pointer to hardware structure
3580 * @san_mac_addr: SAN MAC address
3581 *
3582 * Reads the SAN MAC address from the EEPROM, if it's available. This is
3583 * per-port, so set_lan_id() must be called before reading the addresses.
3584 * set_lan_id() is called by identify_sfp(), but this cannot be relied
3585 * upon for non-SFP connections, so we must call it here.
3586 **/
3587 s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
3588 {
3589 u16 san_mac_data, san_mac_offset;
3590 u8 i;
3591 s32 ret_val;
3592
3593 DEBUGFUNC("ixgbe_get_san_mac_addr_generic");
3594
3595 /*
3596 * First read the EEPROM pointer to see if the MAC addresses are
3597 * available. If they're not, no point in calling set_lan_id() here.
3598 */
3599 ret_val = ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
3600 if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF)
3601 goto san_mac_addr_out;
3602
3603 /* make sure we know which port we need to program */
3604 hw->mac.ops.set_lan_id(hw);
3605 /* apply the port offset to the address offset */
3606 (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
3607 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
3608 for (i = 0; i < 3; i++) {
3609 ret_val = hw->eeprom.ops.read(hw, san_mac_offset,
3610 &san_mac_data);
3611 if (ret_val) {
3612 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
3613 "eeprom read at offset %d failed",
3614 san_mac_offset);
3615 goto san_mac_addr_out;
3616 }
3617 san_mac_addr[i * 2] = (u8)(san_mac_data);
3618 san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8);
3619 san_mac_offset++;
3620 }
3621 return IXGBE_SUCCESS;
3622
3623 san_mac_addr_out:
3624 /*
3625 * No addresses available in this EEPROM. It's not an
3626 * error though, so just wipe the local address and return.
3627 */
3628 for (i = 0; i < 6; i++)
3629 san_mac_addr[i] = 0xFF;
3630 return IXGBE_SUCCESS;
3631 }
3632
3633 /**
3634 * ixgbe_set_san_mac_addr_generic - Write the SAN MAC address to the EEPROM
3635 * @hw: pointer to hardware structure
3636 * @san_mac_addr: SAN MAC address
3637 *
3638 * Write a SAN MAC address to the EEPROM.
3639 **/
3640 s32 ixgbe_set_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
3641 {
3642 s32 ret_val;
3643 u16 san_mac_data, san_mac_offset;
3644 u8 i;
3645
3646 DEBUGFUNC("ixgbe_set_san_mac_addr_generic");
3647
3648 /* Look for SAN mac address pointer. If not defined, return */
3649 ret_val = ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
3650 if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF)
3651 return IXGBE_ERR_NO_SAN_ADDR_PTR;
3652
3653 /* Make sure we know which port we need to write */
3654 hw->mac.ops.set_lan_id(hw);
3655 /* Apply the port offset to the address offset */
3656 (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
3657 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
3658
3659 for (i = 0; i < 3; i++) {
3660 san_mac_data = (u16)((u16)(san_mac_addr[i * 2 + 1]) << 8);
3661 san_mac_data |= (u16)(san_mac_addr[i * 2]);
3662 hw->eeprom.ops.write(hw, san_mac_offset, san_mac_data);
3663 san_mac_offset++;
3664 }
3665
3666 return IXGBE_SUCCESS;
3667 }
3668
3669 /**
3670 * ixgbe_get_pcie_msix_count_generic - Gets MSI-X vector count
3671 * @hw: pointer to hardware structure
3672 *
3673 * Read PCIe configuration space, and get the MSI-X vector count from
3674 * the capabilities table.
3675 **/
3676 u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
3677 {
3678 u16 msix_count = 1;
3679 u16 max_msix_count;
3680 u16 pcie_offset;
3681
3682 switch (hw->mac.type) {
3683 case ixgbe_mac_82598EB:
3684 pcie_offset = IXGBE_PCIE_MSIX_82598_CAPS;
3685 max_msix_count = IXGBE_MAX_MSIX_VECTORS_82598;
3686 break;
3687 case ixgbe_mac_82599EB:
3688 case ixgbe_mac_X540:
3689 case ixgbe_mac_X550:
3690 case ixgbe_mac_X550EM_x:
3691 case ixgbe_mac_X550EM_a:
3692 pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS;
3693 max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599;
3694 break;
3695 default:
3696 return msix_count;
3697 }
3698
3699 DEBUGFUNC("ixgbe_get_pcie_msix_count_generic");
3700 msix_count = IXGBE_READ_PCIE_WORD(hw, pcie_offset);
3701 if (IXGBE_REMOVED(hw->hw_addr))
3702 msix_count = 0;
3703 msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
3704
3705 /* MSI-X count is zero-based in HW */
3706 msix_count++;
3707
3708 if (msix_count > max_msix_count)
3709 msix_count = max_msix_count;
3710
3711 return msix_count;
3712 }
3713
3714 /**
3715 * ixgbe_insert_mac_addr_generic - Find a RAR for this mac address
3716 * @hw: pointer to hardware structure
3717 * @addr: Address to put into receive address register
3718 * @vmdq: VMDq pool to assign
3719 *
3720 * Puts an ethernet address into a receive address register, or
3721 * finds the rar that it is already in; adds to the pool list
3722 **/
3723 s32 ixgbe_insert_mac_addr_generic(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
3724 {
3725 static const u32 NO_EMPTY_RAR_FOUND = 0xFFFFFFFF;
3726 u32 first_empty_rar = NO_EMPTY_RAR_FOUND;
3727 u32 rar;
3728 u32 rar_low, rar_high;
3729 u32 addr_low, addr_high;
3730
3731 DEBUGFUNC("ixgbe_insert_mac_addr_generic");
3732
3733 /* swap bytes for HW little endian */
3734 addr_low = addr[0] | (addr[1] << 8)
3735 | (addr[2] << 16)
3736 | (addr[3] << 24);
3737 addr_high = addr[4] | (addr[5] << 8);
3738
3739 /*
3740 * Either find the mac_id in rar or find the first empty space.
3741 * rar_highwater points to just after the highest currently used
3742 * rar in order to shorten the search. It grows when we add a new
3743 * rar to the top.
3744 */
3745 for (rar = 0; rar < hw->mac.rar_highwater; rar++) {
3746 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
3747
3748 if (((IXGBE_RAH_AV & rar_high) == 0)
3749 && first_empty_rar == NO_EMPTY_RAR_FOUND) {
3750 first_empty_rar = rar;
3751 } else if ((rar_high & 0xFFFF) == addr_high) {
3752 rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(rar));
3753 if (rar_low == addr_low)
3754 break; /* found it already in the rars */
3755 }
3756 }
3757
3758 if (rar < hw->mac.rar_highwater) {
3759 /* already there so just add to the pool bits */
3760 ixgbe_set_vmdq(hw, rar, vmdq);
3761 } else if (first_empty_rar != NO_EMPTY_RAR_FOUND) {
3762 /* stick it into first empty RAR slot we found */
3763 rar = first_empty_rar;
3764 ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
3765 } else if (rar == hw->mac.rar_highwater) {
3766 /* add it to the top of the list and inc the highwater mark */
3767 ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
3768 hw->mac.rar_highwater++;
3769 } else if (rar >= hw->mac.num_rar_entries) {
3770 return IXGBE_ERR_INVALID_MAC_ADDR;
3771 }
3772
3773 /*
3774 * If we found rar[0], make sure the default pool bit (we use pool 0)
3775 * remains cleared to be sure default pool packets will get delivered
3776 */
3777 if (rar == 0)
3778 ixgbe_clear_vmdq(hw, rar, 0);
3779
3780 return rar;
3781 }
3782
3783 /**
3784 * ixgbe_clear_vmdq_generic - Disassociate a VMDq pool index from a rx address
3785 * @hw: pointer to hardware struct
3786 * @rar: receive address register index to disassociate
3787 * @vmdq: VMDq pool index to remove from the rar
3788 **/
3789 s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
3790 {
3791 u32 mpsar_lo, mpsar_hi;
3792 u32 rar_entries = hw->mac.num_rar_entries;
3793
3794 DEBUGFUNC("ixgbe_clear_vmdq_generic");
3795
3796 /* Make sure we are using a valid rar index range */
3797 if (rar >= rar_entries) {
3798 ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
3799 "RAR index %d is out of range.\n", rar);
3800 return IXGBE_ERR_INVALID_ARGUMENT;
3801 }
3802
3803 mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
3804 mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
3805
3806 if (IXGBE_REMOVED(hw->hw_addr))
3807 goto done;
3808
3809 if (!mpsar_lo && !mpsar_hi)
3810 goto done;
3811
3812 if (vmdq == IXGBE_CLEAR_VMDQ_ALL) {
3813 if (mpsar_lo) {
3814 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
3815 mpsar_lo = 0;
3816 }
3817 if (mpsar_hi) {
3818 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
3819 mpsar_hi = 0;
3820 }
3821 } else if (vmdq < 32) {
3822 mpsar_lo &= ~(1 << vmdq);
3823 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo);
3824 } else {
3825 mpsar_hi &= ~(1 << (vmdq - 32));
3826 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi);
3827 }
3828
3829 /* was that the last pool using this rar? */
3830 if (mpsar_lo == 0 && mpsar_hi == 0 &&
3831 rar != 0 && rar != hw->mac.san_mac_rar_index)
3832 hw->mac.ops.clear_rar(hw, rar);
3833 done:
3834 return IXGBE_SUCCESS;
3835 }
3836
3837 /**
3838 * ixgbe_set_vmdq_generic - Associate a VMDq pool index with a rx address
3839 * @hw: pointer to hardware struct
3840 * @rar: receive address register index to associate with a VMDq index
3841 * @vmdq: VMDq pool index
3842 **/
3843 s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
3844 {
3845 u32 mpsar;
3846 u32 rar_entries = hw->mac.num_rar_entries;
3847
3848 DEBUGFUNC("ixgbe_set_vmdq_generic");
3849
3850 /* Make sure we are using a valid rar index range */
3851 if (rar >= rar_entries) {
3852 ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
3853 "RAR index %d is out of range.\n", rar);
3854 return IXGBE_ERR_INVALID_ARGUMENT;
3855 }
3856
3857 if (vmdq < 32) {
3858 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
3859 mpsar |= 1 << vmdq;
3860 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar);
3861 } else {
3862 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
3863 mpsar |= 1 << (vmdq - 32);
3864 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar);
3865 }
3866 return IXGBE_SUCCESS;
3867 }
3868
3869 /**
3870 * This function should only be involved in the IOV mode.
3871 * In IOV mode, Default pool is next pool after the number of
3872 * VFs advertized and not 0.
3873 * MPSAR table needs to be updated for SAN_MAC RAR [hw->mac.san_mac_rar_index]
3874 *
3875 * ixgbe_set_vmdq_san_mac - Associate default VMDq pool index with a rx address
3876 * @hw: pointer to hardware struct
3877 * @vmdq: VMDq pool index
3878 **/
3879 s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq)
3880 {
3881 u32 rar = hw->mac.san_mac_rar_index;
3882
3883 DEBUGFUNC("ixgbe_set_vmdq_san_mac");
3884
3885 if (vmdq < 32) {
3886 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 1 << vmdq);
3887 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
3888 } else {
3889 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
3890 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 1 << (vmdq - 32));
3891 }
3892
3893 return IXGBE_SUCCESS;
3894 }
3895
3896 /**
3897 * ixgbe_init_uta_tables_generic - Initialize the Unicast Table Array
3898 * @hw: pointer to hardware structure
3899 **/
3900 s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw)
3901 {
3902 int i;
3903
3904 DEBUGFUNC("ixgbe_init_uta_tables_generic");
3905 DEBUGOUT(" Clearing UTA\n");
3906
3907 for (i = 0; i < 128; i++)
3908 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
3909
3910 return IXGBE_SUCCESS;
3911 }
3912
3913 /**
3914 * ixgbe_find_vlvf_slot - find the vlanid or the first empty slot
3915 * @hw: pointer to hardware structure
3916 * @vlan: VLAN id to write to VLAN filter
3917 *
3918 * return the VLVF index where this VLAN id should be placed
3919 *
3920 **/
3921 s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan, bool vlvf_bypass)
3922 {
3923 s32 regindex, first_empty_slot;
3924 u32 bits;
3925
3926 /* short cut the special case */
3927 if (vlan == 0)
3928 return 0;
3929
3930 /* if vlvf_bypass is set we don't want to use an empty slot, we
3931 * will simply bypass the VLVF if there are no entries present in the
3932 * VLVF that contain our VLAN
3933 */
3934 first_empty_slot = vlvf_bypass ? IXGBE_ERR_NO_SPACE : 0;
3935
3936 /* add VLAN enable bit for comparison */
3937 vlan |= IXGBE_VLVF_VIEN;
3938
3939 /* Search for the vlan id in the VLVF entries. Save off the first empty
3940 * slot found along the way.
3941 *
3942 * pre-decrement loop covering (IXGBE_VLVF_ENTRIES - 1) .. 1
3943 */
3944 for (regindex = IXGBE_VLVF_ENTRIES; --regindex;) {
3945 bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex));
3946 if (bits == vlan)
3947 return regindex;
3948 if (!first_empty_slot && !bits)
3949 first_empty_slot = regindex;
3950 }
3951
3952 /* If we are here then we didn't find the VLAN. Return first empty
3953 * slot we found during our search, else error.
3954 */
3955 if (!first_empty_slot)
3956 ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "No space in VLVF.\n");
3957
3958 return first_empty_slot ? first_empty_slot : IXGBE_ERR_NO_SPACE;
3959 }
3960
3961 /**
3962 * ixgbe_set_vfta_generic - Set VLAN filter table
3963 * @hw: pointer to hardware structure
3964 * @vlan: VLAN id to write to VLAN filter
3965 * @vind: VMDq output index that maps queue to VLAN id in VLVFB
3966 * @vlan_on: boolean flag to turn on/off VLAN
3967 * @vlvf_bypass: boolean flag indicating updating default pool is okay
3968 *
3969 * Turn on/off specified VLAN in the VLAN filter table.
3970 **/
3971 s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
3972 bool vlan_on, bool vlvf_bypass)
3973 {
3974 u32 regidx, vfta_delta, vfta;
3975 s32 ret_val;
3976
3977 DEBUGFUNC("ixgbe_set_vfta_generic");
3978
3979 if (vlan > 4095 || vind > 63)
3980 return IXGBE_ERR_PARAM;
3981
3982 /*
3983 * this is a 2 part operation - first the VFTA, then the
3984 * VLVF and VLVFB if VT Mode is set
3985 * We don't write the VFTA until we know the VLVF part succeeded.
3986 */
3987
3988 /* Part 1
3989 * The VFTA is a bitstring made up of 128 32-bit registers
3990 * that enable the particular VLAN id, much like the MTA:
3991 * bits[11-5]: which register
3992 * bits[4-0]: which bit in the register
3993 */
3994 regidx = vlan / 32;
3995 vfta_delta = 1 << (vlan % 32);
3996 vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regidx));
3997
3998 /*
3999 * vfta_delta represents the difference between the current value
4000 * of vfta and the value we want in the register. Since the diff
4001 * is an XOR mask we can just update the vfta using an XOR
4002 */
4003 vfta_delta &= vlan_on ? ~vfta : vfta;
4004 vfta ^= vfta_delta;
4005
4006 /* Part 2
4007 * Call ixgbe_set_vlvf_generic to set VLVFB and VLVF
4008 */
4009 ret_val = ixgbe_set_vlvf_generic(hw, vlan, vind, vlan_on, &vfta_delta,
4010 vfta, vlvf_bypass);
4011 if (ret_val != IXGBE_SUCCESS) {
4012 if (vlvf_bypass)
4013 goto vfta_update;
4014 return ret_val;
4015 }
4016
4017 vfta_update:
4018 /* Update VFTA now that we are ready for traffic */
4019 if (vfta_delta)
4020 IXGBE_WRITE_REG(hw, IXGBE_VFTA(regidx), vfta);
4021
4022 return IXGBE_SUCCESS;
4023 }
4024
4025 /**
4026 * ixgbe_set_vlvf_generic - Set VLAN Pool Filter
4027 * @hw: pointer to hardware structure
4028 * @vlan: VLAN id to write to VLAN filter
4029 * @vind: VMDq output index that maps queue to VLAN id in VLVFB
4030 * @vlan_on: boolean flag to turn on/off VLAN in VLVF
4031 * @vfta_delta: pointer to the difference between the current value of VFTA
4032 * and the desired value
4033 * @vfta: the desired value of the VFTA
4034 * @vlvf_bypass: boolean flag indicating updating default pool is okay
4035 *
4036 * Turn on/off specified bit in VLVF table.
4037 **/
4038 s32 ixgbe_set_vlvf_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
4039 bool vlan_on, u32 *vfta_delta, u32 vfta,
4040 bool vlvf_bypass)
4041 {
4042 u32 bits;
4043 s32 vlvf_index;
4044
4045 DEBUGFUNC("ixgbe_set_vlvf_generic");
4046
4047 if (vlan > 4095 || vind > 63)
4048 return IXGBE_ERR_PARAM;
4049
4050 /* If VT Mode is set
4051 * Either vlan_on
4052 * make sure the vlan is in VLVF
4053 * set the vind bit in the matching VLVFB
4054 * Or !vlan_on
4055 * clear the pool bit and possibly the vind
4056 */
4057 if (!(IXGBE_READ_REG(hw, IXGBE_VT_CTL) & IXGBE_VT_CTL_VT_ENABLE))
4058 return IXGBE_SUCCESS;
4059
4060 vlvf_index = ixgbe_find_vlvf_slot(hw, vlan, vlvf_bypass);
4061 if (vlvf_index < 0)
4062 return vlvf_index;
4063
4064 bits = IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32));
4065
4066 /* set the pool bit */
4067 bits |= 1 << (vind % 32);
4068 if (vlan_on)
4069 goto vlvf_update;
4070
4071 /* clear the pool bit */
4072 bits ^= 1 << (vind % 32);
4073
4074 if (!bits &&
4075 !IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + 1 - vind / 32))) {
4076 /* Clear VFTA first, then disable VLVF. Otherwise
4077 * we run the risk of stray packets leaking into
4078 * the PF via the default pool
4079 */
4080 if (*vfta_delta)
4081 IXGBE_WRITE_REG(hw, IXGBE_VFTA(vlan / 32), vfta);
4082
4083 /* disable VLVF and clear remaining bit from pool */
4084 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0);
4085 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32), 0);
4086
4087 return IXGBE_SUCCESS;
4088 }
4089
4090 /* If there are still bits set in the VLVFB registers
4091 * for the VLAN ID indicated we need to see if the
4092 * caller is requesting that we clear the VFTA entry bit.
4093 * If the caller has requested that we clear the VFTA
4094 * entry bit but there are still pools/VFs using this VLAN
4095 * ID entry then ignore the request. We're not worried
4096 * about the case where we're turning the VFTA VLAN ID
4097 * entry bit on, only when requested to turn it off as
4098 * there may be multiple pools and/or VFs using the
4099 * VLAN ID entry. In that case we cannot clear the
4100 * VFTA bit until all pools/VFs using that VLAN ID have also
4101 * been cleared. This will be indicated by "bits" being
4102 * zero.
4103 */
4104 *vfta_delta = 0;
4105
4106 vlvf_update:
4107 /* record pool change and enable VLAN ID if not already enabled */
4108 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32), bits);
4109 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), IXGBE_VLVF_VIEN | vlan);
4110
4111 return IXGBE_SUCCESS;
4112 }
4113
4114 /**
4115 * ixgbe_clear_vfta_generic - Clear VLAN filter table
4116 * @hw: pointer to hardware structure
4117 *
4118 * Clears the VLAN filer table, and the VMDq index associated with the filter
4119 **/
4120 s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
4121 {
4122 u32 offset;
4123
4124 DEBUGFUNC("ixgbe_clear_vfta_generic");
4125
4126 for (offset = 0; offset < hw->mac.vft_size; offset++)
4127 IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
4128
4129 for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) {
4130 IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0);
4131 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2), 0);
4132 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2 + 1), 0);
4133 }
4134
4135 return IXGBE_SUCCESS;
4136 }
4137
4138 /**
4139 * ixgbe_need_crosstalk_fix - Determine if we need to do cross talk fix
4140 * @hw: pointer to hardware structure
4141 *
4142 * Contains the logic to identify if we need to verify link for the
4143 * crosstalk fix
4144 **/
4145 static bool ixgbe_need_crosstalk_fix(struct ixgbe_hw *hw)
4146 {
4147
4148 /* Does FW say we need the fix */
4149 if (!hw->need_crosstalk_fix)
4150 return FALSE;
4151
4152 /* Only consider SFP+ PHYs i.e. media type fiber */
4153 switch (hw->mac.ops.get_media_type(hw)) {
4154 case ixgbe_media_type_fiber:
4155 case ixgbe_media_type_fiber_qsfp:
4156 break;
4157 default:
4158 return FALSE;
4159 }
4160
4161 return TRUE;
4162 }
4163
4164 /**
4165 * ixgbe_check_mac_link_generic - Determine link and speed status
4166 * @hw: pointer to hardware structure
4167 * @speed: pointer to link speed
4168 * @link_up: TRUE when link is up
4169 * @link_up_wait_to_complete: bool used to wait for link up or not
4170 *
4171 * Reads the links register to determine if link is up and the current speed
4172 **/
4173 s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
4174 bool *link_up, bool link_up_wait_to_complete)
4175 {
4176 u32 links_reg, links_orig;
4177 u32 i;
4178
4179 DEBUGFUNC("ixgbe_check_mac_link_generic");
4180
4181 /* If Crosstalk fix enabled do the sanity check of making sure
4182 * the SFP+ cage is full.
4183 */
4184 if (ixgbe_need_crosstalk_fix(hw)) {
4185 u32 sfp_cage_full;
4186
4187 switch (hw->mac.type) {
4188 case ixgbe_mac_82599EB:
4189 sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
4190 IXGBE_ESDP_SDP2;
4191 break;
4192 case ixgbe_mac_X550EM_x:
4193 case ixgbe_mac_X550EM_a:
4194 sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
4195 IXGBE_ESDP_SDP0;
4196 break;
4197 default:
4198 /* sanity check - No SFP+ devices here */
4199 sfp_cage_full = FALSE;
4200 break;
4201 }
4202
4203 if (!sfp_cage_full) {
4204 *link_up = FALSE;
4205 *speed = IXGBE_LINK_SPEED_UNKNOWN;
4206 return IXGBE_SUCCESS;
4207 }
4208 }
4209
4210 /* clear the old state */
4211 links_orig = IXGBE_READ_REG(hw, IXGBE_LINKS);
4212
4213 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
4214
4215 if (links_orig != links_reg) {
4216 DEBUGOUT2("LINKS changed from %08X to %08X\n",
4217 links_orig, links_reg);
4218 }
4219
4220 if (link_up_wait_to_complete) {
4221 for (i = 0; i < hw->mac.max_link_up_time; i++) {
4222 if (links_reg & IXGBE_LINKS_UP) {
4223 *link_up = TRUE;
4224 break;
4225 } else {
4226 *link_up = FALSE;
4227 }
4228 msec_delay(100);
4229 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
4230 }
4231 } else {
4232 if (links_reg & IXGBE_LINKS_UP)
4233 *link_up = TRUE;
4234 else
4235 *link_up = FALSE;
4236 }
4237
4238 switch (links_reg & IXGBE_LINKS_SPEED_82599) {
4239 case IXGBE_LINKS_SPEED_10G_82599:
4240 *speed = IXGBE_LINK_SPEED_10GB_FULL;
4241 if (hw->mac.type >= ixgbe_mac_X550) {
4242 if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
4243 *speed = IXGBE_LINK_SPEED_2_5GB_FULL;
4244 }
4245 break;
4246 case IXGBE_LINKS_SPEED_1G_82599:
4247 *speed = IXGBE_LINK_SPEED_1GB_FULL;
4248 break;
4249 case IXGBE_LINKS_SPEED_100_82599:
4250 *speed = IXGBE_LINK_SPEED_100_FULL;
4251 if (hw->mac.type == ixgbe_mac_X550) {
4252 if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
4253 *speed = IXGBE_LINK_SPEED_5GB_FULL;
4254 }
4255 break;
4256 case IXGBE_LINKS_SPEED_10_X550EM_A:
4257 *speed = IXGBE_LINK_SPEED_UNKNOWN;
4258 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T ||
4259 hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L) {
4260 *speed = IXGBE_LINK_SPEED_10_FULL;
4261 }
4262 break;
4263 default:
4264 *speed = IXGBE_LINK_SPEED_UNKNOWN;
4265 }
4266
4267 return IXGBE_SUCCESS;
4268 }
4269
4270 /**
4271 * ixgbe_get_wwn_prefix_generic - Get alternative WWNN/WWPN prefix from
4272 * the EEPROM
4273 * @hw: pointer to hardware structure
4274 * @wwnn_prefix: the alternative WWNN prefix
4275 * @wwpn_prefix: the alternative WWPN prefix
4276 *
4277 * This function will read the EEPROM from the alternative SAN MAC address
4278 * block to check the support for the alternative WWNN/WWPN prefix support.
4279 **/
4280 s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
4281 u16 *wwpn_prefix)
4282 {
4283 u16 offset, caps;
4284 u16 alt_san_mac_blk_offset;
4285
4286 DEBUGFUNC("ixgbe_get_wwn_prefix_generic");
4287
4288 /* clear output first */
4289 *wwnn_prefix = 0xFFFF;
4290 *wwpn_prefix = 0xFFFF;
4291
4292 /* check if alternative SAN MAC is supported */
4293 offset = IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR;
4294 if (hw->eeprom.ops.read(hw, offset, &alt_san_mac_blk_offset))
4295 goto wwn_prefix_err;
4296
4297 if ((alt_san_mac_blk_offset == 0) ||
4298 (alt_san_mac_blk_offset == 0xFFFF))
4299 goto wwn_prefix_out;
4300
4301 /* check capability in alternative san mac address block */
4302 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET;
4303 if (hw->eeprom.ops.read(hw, offset, &caps))
4304 goto wwn_prefix_err;
4305 if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN))
4306 goto wwn_prefix_out;
4307
4308 /* get the corresponding prefix for WWNN/WWPN */
4309 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET;
4310 if (hw->eeprom.ops.read(hw, offset, wwnn_prefix)) {
4311 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
4312 "eeprom read at offset %d failed", offset);
4313 }
4314
4315 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET;
4316 if (hw->eeprom.ops.read(hw, offset, wwpn_prefix))
4317 goto wwn_prefix_err;
4318
4319 wwn_prefix_out:
4320 return IXGBE_SUCCESS;
4321
4322 wwn_prefix_err:
4323 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
4324 "eeprom read at offset %d failed", offset);
4325 return IXGBE_SUCCESS;
4326 }
4327
4328 /**
4329 * ixgbe_get_fcoe_boot_status_generic - Get FCOE boot status from EEPROM
4330 * @hw: pointer to hardware structure
4331 * @bs: the fcoe boot status
4332 *
4333 * This function will read the FCOE boot status from the iSCSI FCOE block
4334 **/
4335 s32 ixgbe_get_fcoe_boot_status_generic(struct ixgbe_hw *hw, u16 *bs)
4336 {
4337 u16 offset, caps, flags;
4338 s32 status;
4339
4340 DEBUGFUNC("ixgbe_get_fcoe_boot_status_generic");
4341
4342 /* clear output first */
4343 *bs = ixgbe_fcoe_bootstatus_unavailable;
4344
4345 /* check if FCOE IBA block is present */
4346 offset = IXGBE_FCOE_IBA_CAPS_BLK_PTR;
4347 status = hw->eeprom.ops.read(hw, offset, &caps);
4348 if (status != IXGBE_SUCCESS)
4349 goto out;
4350
4351 if (!(caps & IXGBE_FCOE_IBA_CAPS_FCOE))
4352 goto out;
4353
4354 /* check if iSCSI FCOE block is populated */
4355 status = hw->eeprom.ops.read(hw, IXGBE_ISCSI_FCOE_BLK_PTR, &offset);
4356 if (status != IXGBE_SUCCESS)
4357 goto out;
4358
4359 if ((offset == 0) || (offset == 0xFFFF))
4360 goto out;
4361
4362 /* read fcoe flags in iSCSI FCOE block */
4363 offset = offset + IXGBE_ISCSI_FCOE_FLAGS_OFFSET;
4364 status = hw->eeprom.ops.read(hw, offset, &flags);
4365 if (status != IXGBE_SUCCESS)
4366 goto out;
4367
4368 if (flags & IXGBE_ISCSI_FCOE_FLAGS_ENABLE)
4369 *bs = ixgbe_fcoe_bootstatus_enabled;
4370 else
4371 *bs = ixgbe_fcoe_bootstatus_disabled;
4372
4373 out:
4374 return status;
4375 }
4376
4377 /**
4378 * ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing
4379 * @hw: pointer to hardware structure
4380 * @enable: enable or disable switch for MAC anti-spoofing
4381 * @vf: Virtual Function pool - VF Pool to set for MAC anti-spoofing
4382 *
4383 **/
4384 void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
4385 {
4386 int vf_target_reg = vf >> 3;
4387 int vf_target_shift = vf % 8;
4388 u32 pfvfspoof;
4389
4390 if (hw->mac.type == ixgbe_mac_82598EB)
4391 return;
4392
4393 pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
4394 if (enable)
4395 pfvfspoof |= (1 << vf_target_shift);
4396 else
4397 pfvfspoof &= ~(1 << vf_target_shift);
4398 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
4399 }
4400
4401 /**
4402 * ixgbe_set_vlan_anti_spoofing - Enable/Disable VLAN anti-spoofing
4403 * @hw: pointer to hardware structure
4404 * @enable: enable or disable switch for VLAN anti-spoofing
4405 * @vf: Virtual Function pool - VF Pool to set for VLAN anti-spoofing
4406 *
4407 **/
4408 void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
4409 {
4410 int vf_target_reg = vf >> 3;
4411 int vf_target_shift = vf % 8 + IXGBE_SPOOF_VLANAS_SHIFT;
4412 u32 pfvfspoof;
4413
4414 if (hw->mac.type == ixgbe_mac_82598EB)
4415 return;
4416
4417 pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
4418 if (enable)
4419 pfvfspoof |= (1 << vf_target_shift);
4420 else
4421 pfvfspoof &= ~(1 << vf_target_shift);
4422 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
4423 }
4424
4425 /**
4426 * ixgbe_get_device_caps_generic - Get additional device capabilities
4427 * @hw: pointer to hardware structure
4428 * @device_caps: the EEPROM word with the extra device capabilities
4429 *
4430 * This function will read the EEPROM location for the device capabilities,
4431 * and return the word through device_caps.
4432 **/
4433 s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps)
4434 {
4435 DEBUGFUNC("ixgbe_get_device_caps_generic");
4436
4437 hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps);
4438
4439 return IXGBE_SUCCESS;
4440 }
4441
4442 /**
4443 * ixgbe_enable_relaxed_ordering_gen2 - Enable relaxed ordering
4444 * @hw: pointer to hardware structure
4445 *
4446 **/
4447 void ixgbe_enable_relaxed_ordering_gen2(struct ixgbe_hw *hw)
4448 {
4449 u32 regval;
4450 u32 i;
4451
4452 DEBUGFUNC("ixgbe_enable_relaxed_ordering_gen2");
4453
4454 /* Enable relaxed ordering */
4455 for (i = 0; i < hw->mac.max_tx_queues; i++) {
4456 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
4457 regval |= IXGBE_DCA_TXCTRL_DESC_WRO_EN;
4458 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
4459 }
4460
4461 for (i = 0; i < hw->mac.max_rx_queues; i++) {
4462 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
4463 regval |= IXGBE_DCA_RXCTRL_DATA_WRO_EN |
4464 IXGBE_DCA_RXCTRL_HEAD_WRO_EN;
4465 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
4466 }
4467
4468 }
4469
4470 /**
4471 * ixgbe_calculate_checksum - Calculate checksum for buffer
4472 * @buffer: pointer to EEPROM
4473 * @length: size of EEPROM to calculate a checksum for
4474 * Calculates the checksum for some buffer on a specified length. The
4475 * checksum calculated is returned.
4476 **/
4477 u8 ixgbe_calculate_checksum(u8 *buffer, u32 length)
4478 {
4479 u32 i;
4480 u8 sum = 0;
4481
4482 DEBUGFUNC("ixgbe_calculate_checksum");
4483
4484 if (!buffer)
4485 return 0;
4486
4487 for (i = 0; i < length; i++)
4488 sum += buffer[i];
4489
4490 return (u8) (0 - sum);
4491 }
4492
4493 /**
4494 * ixgbe_hic_unlocked - Issue command to manageability block unlocked
4495 * @hw: pointer to the HW structure
4496 * @buffer: command to write and where the return status will be placed
4497 * @length: length of buffer, must be multiple of 4 bytes
4498 * @timeout: time in ms to wait for command completion
4499 *
4500 * Communicates with the manageability block. On success return IXGBE_SUCCESS
4501 * else returns semaphore error when encountering an error acquiring
4502 * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
4503 *
4504 * This function assumes that the IXGBE_GSSR_SW_MNG_SM semaphore is held
4505 * by the caller.
4506 **/
4507 s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 length,
4508 u32 timeout)
4509 {
4510 u32 hicr, i, fwsts;
4511 u16 dword_len;
4512
4513 DEBUGFUNC("ixgbe_hic_unlocked");
4514
4515 if (!length || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
4516 DEBUGOUT1("Buffer length failure buffersize=%d.\n", length);
4517 return IXGBE_ERR_HOST_INTERFACE_COMMAND;
4518 }
4519
4520 /* Set bit 9 of FWSTS clearing FW reset indication */
4521 fwsts = IXGBE_READ_REG(hw, IXGBE_FWSTS);
4522 IXGBE_WRITE_REG(hw, IXGBE_FWSTS, fwsts | IXGBE_FWSTS_FWRI);
4523
4524 /* Check that the host interface is enabled. */
4525 hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
4526 if (!(hicr & IXGBE_HICR_EN)) {
4527 DEBUGOUT("IXGBE_HOST_EN bit disabled.\n");
4528 return IXGBE_ERR_HOST_INTERFACE_COMMAND;
4529 }
4530
4531 /* Calculate length in DWORDs. We must be DWORD aligned */
4532 if (length % sizeof(u32)) {
4533 DEBUGOUT("Buffer length failure, not aligned to dword");
4534 return IXGBE_ERR_INVALID_ARGUMENT;
4535 }
4536
4537 dword_len = length >> 2;
4538
4539 /* The device driver writes the relevant command block
4540 * into the ram area.
4541 */
4542 for (i = 0; i < dword_len; i++)
4543 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG,
4544 i, IXGBE_CPU_TO_LE32(buffer[i]));
4545
4546 /* Setting this bit tells the ARC that a new command is pending. */
4547 IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C);
4548
4549 for (i = 0; i < timeout; i++) {
4550 hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
4551 if (!(hicr & IXGBE_HICR_C))
4552 break;
4553 msec_delay(1);
4554 }
4555
4556 /* Check command completion */
4557 if ((timeout && i == timeout) ||
4558 !(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV)) {
4559 ERROR_REPORT1(IXGBE_ERROR_CAUTION,
4560 "Command has failed with no status valid.\n");
4561 return IXGBE_ERR_HOST_INTERFACE_COMMAND;
4562 }
4563
4564 return IXGBE_SUCCESS;
4565 }
4566
4567 /**
4568 * ixgbe_host_interface_command - Issue command to manageability block
4569 * @hw: pointer to the HW structure
4570 * @buffer: contains the command to write and where the return status will
4571 * be placed
4572 * @length: length of buffer, must be multiple of 4 bytes
4573 * @timeout: time in ms to wait for command completion
4574 * @return_data: read and return data from the buffer (TRUE) or not (FALSE)
4575 * Needed because FW structures are big endian and decoding of
4576 * these fields can be 8 bit or 16 bit based on command. Decoding
4577 * is not easily understood without making a table of commands.
4578 * So we will leave this up to the caller to read back the data
4579 * in these cases.
4580 *
4581 * Communicates with the manageability block. On success return IXGBE_SUCCESS
4582 * else returns semaphore error when encountering an error acquiring
4583 * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
4584 **/
4585 s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
4586 u32 length, u32 timeout, bool return_data)
4587 {
4588 u32 hdr_size = sizeof(struct ixgbe_hic_hdr);
4589 u16 dword_len;
4590 u16 buf_len;
4591 s32 status;
4592 u32 bi;
4593
4594 DEBUGFUNC("ixgbe_host_interface_command");
4595
4596 if (length == 0 || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
4597 DEBUGOUT1("Buffer length failure buffersize=%d.\n", length);
4598 return IXGBE_ERR_HOST_INTERFACE_COMMAND;
4599 }
4600
4601 /* Take management host interface semaphore */
4602 status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
4603 if (status)
4604 return status;
4605
4606 status = ixgbe_hic_unlocked(hw, buffer, length, timeout);
4607 if (status)
4608 goto rel_out;
4609
4610 if (!return_data)
4611 goto rel_out;
4612
4613 /* Calculate length in DWORDs */
4614 dword_len = hdr_size >> 2;
4615
4616 /* first pull in the header so we know the buffer length */
4617 for (bi = 0; bi < dword_len; bi++) {
4618 buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
4619 IXGBE_LE32_TO_CPUS(&buffer[bi]);
4620 }
4621
4622 /* If there is any thing in data position pull it in */
4623 buf_len = ((struct ixgbe_hic_hdr *)buffer)->buf_len;
4624 if (!buf_len)
4625 goto rel_out;
4626
4627 if (length < buf_len + hdr_size) {
4628 DEBUGOUT("Buffer not large enough for reply message.\n");
4629 status = IXGBE_ERR_HOST_INTERFACE_COMMAND;
4630 goto rel_out;
4631 }
4632
4633 /* Calculate length in DWORDs, add 3 for odd lengths */
4634 dword_len = (buf_len + 3) >> 2;
4635
4636 /* Pull in the rest of the buffer (bi is where we left off) */
4637 for (; bi <= dword_len; bi++) {
4638 buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
4639 IXGBE_LE32_TO_CPUS(&buffer[bi]);
4640 }
4641
4642 rel_out:
4643 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
4644
4645 return status;
4646 }
4647
4648 /**
4649 * ixgbe_set_fw_drv_ver_generic - Sends driver version to firmware
4650 * @hw: pointer to the HW structure
4651 * @maj: driver version major number
4652 * @minr: driver version minor number
4653 * @build: driver version build number
4654 * @sub: driver version sub build number
4655 *
4656 * Sends driver version number to firmware through the manageability
4657 * block. On success return IXGBE_SUCCESS
4658 * else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring
4659 * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
4660 **/
4661 s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 minr,
4662 u8 build, u8 sub, u16 len,
4663 const char *driver_ver)
4664 {
4665 struct ixgbe_hic_drv_info fw_cmd;
4666 int i;
4667 s32 ret_val = IXGBE_SUCCESS;
4668
4669 DEBUGFUNC("ixgbe_set_fw_drv_ver_generic");
4670 UNREFERENCED_2PARAMETER(len, driver_ver);
4671
4672 fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO;
4673 fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN;
4674 fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
4675 fw_cmd.port_num = (u8)hw->bus.func;
4676 fw_cmd.ver_maj = maj;
4677 fw_cmd.ver_min = minr;
4678 fw_cmd.ver_build = build;
4679 fw_cmd.ver_sub = sub;
4680 fw_cmd.hdr.checksum = 0;
4681 fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd,
4682 (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len));
4683 fw_cmd.pad = 0;
4684 fw_cmd.pad2 = 0;
4685
4686 for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
4687 ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
4688 sizeof(fw_cmd),
4689 IXGBE_HI_COMMAND_TIMEOUT,
4690 TRUE);
4691 if (ret_val != IXGBE_SUCCESS)
4692 continue;
4693
4694 if (fw_cmd.hdr.cmd_or_resp.ret_status ==
4695 FW_CEM_RESP_STATUS_SUCCESS)
4696 ret_val = IXGBE_SUCCESS;
4697 else
4698 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
4699
4700 break;
4701 }
4702
4703 return ret_val;
4704 }
4705
4706 /**
4707 * ixgbe_set_rxpba_generic - Initialize Rx packet buffer
4708 * @hw: pointer to hardware structure
4709 * @num_pb: number of packet buffers to allocate
4710 * @headroom: reserve n KB of headroom
4711 * @strategy: packet buffer allocation strategy
4712 **/
4713 void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, u32 headroom,
4714 int strategy)
4715 {
4716 u32 pbsize = hw->mac.rx_pb_size;
4717 int i = 0;
4718 u32 rxpktsize, txpktsize, txpbthresh;
4719
4720 /* Reserve headroom */
4721 pbsize -= headroom;
4722
4723 if (!num_pb)
4724 num_pb = 1;
4725
4726 /* Divide remaining packet buffer space amongst the number of packet
4727 * buffers requested using supplied strategy.
4728 */
4729 switch (strategy) {
4730 case PBA_STRATEGY_WEIGHTED:
4731 /* ixgbe_dcb_pba_80_48 strategy weight first half of packet
4732 * buffer with 5/8 of the packet buffer space.
4733 */
4734 rxpktsize = (pbsize * 5) / (num_pb * 4);
4735 pbsize -= rxpktsize * (num_pb / 2);
4736 rxpktsize <<= IXGBE_RXPBSIZE_SHIFT;
4737 for (; i < (num_pb / 2); i++)
4738 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
4739 /* fall through - configure remaining packet buffers */
4740 case PBA_STRATEGY_EQUAL:
4741 rxpktsize = (pbsize / (num_pb - i)) << IXGBE_RXPBSIZE_SHIFT;
4742 for (; i < num_pb; i++)
4743 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
4744 break;
4745 default:
4746 break;
4747 }
4748
4749 /* Only support an equally distributed Tx packet buffer strategy. */
4750 txpktsize = IXGBE_TXPBSIZE_MAX / num_pb;
4751 txpbthresh = (txpktsize / 1024) - IXGBE_TXPKT_SIZE_MAX;
4752 for (i = 0; i < num_pb; i++) {
4753 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize);
4754 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh);
4755 }
4756
4757 /* Clear unused TCs, if any, to zero buffer size*/
4758 for (; i < IXGBE_MAX_PB; i++) {
4759 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
4760 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0);
4761 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0);
4762 }
4763 }
4764
4765 /**
4766 * ixgbe_clear_tx_pending - Clear pending TX work from the PCIe fifo
4767 * @hw: pointer to the hardware structure
4768 *
4769 * The 82599 and x540 MACs can experience issues if TX work is still pending
4770 * when a reset occurs. This function prevents this by flushing the PCIe
4771 * buffers on the system.
4772 **/
4773 void ixgbe_clear_tx_pending(struct ixgbe_hw *hw)
4774 {
4775 u32 gcr_ext, hlreg0, i, poll;
4776 u16 value;
4777
4778 /*
4779 * If double reset is not requested then all transactions should
4780 * already be clear and as such there is no work to do
4781 */
4782 if (!(hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED))
4783 return;
4784
4785 /*
4786 * Set loopback enable to prevent any transmits from being sent
4787 * should the link come up. This assumes that the RXCTRL.RXEN bit
4788 * has already been cleared.
4789 */
4790 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
4791 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0 | IXGBE_HLREG0_LPBK);
4792
4793 /* Wait for a last completion before clearing buffers */
4794 IXGBE_WRITE_FLUSH(hw);
4795 msec_delay(3);
4796
4797 /*
4798 * Before proceeding, make sure that the PCIe block does not have
4799 * transactions pending.
4800 */
4801 poll = ixgbe_pcie_timeout_poll(hw);
4802 for (i = 0; i < poll; i++) {
4803 usec_delay(100);
4804 value = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS);
4805 if (IXGBE_REMOVED(hw->hw_addr))
4806 goto out;
4807 if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
4808 goto out;
4809 }
4810
4811 out:
4812 /* initiate cleaning flow for buffers in the PCIe transaction layer */
4813 gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
4814 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT,
4815 gcr_ext | IXGBE_GCR_EXT_BUFFERS_CLEAR);
4816
4817 /* Flush all writes and allow 20usec for all transactions to clear */
4818 IXGBE_WRITE_FLUSH(hw);
4819 usec_delay(20);
4820
4821 /* restore previous register values */
4822 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
4823 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
4824 }
4825
4826 /**
4827 * ixgbe_bypass_rw_generic - Bit bang data into by_pass FW
4828 *
4829 * @hw: pointer to hardware structure
4830 * @cmd: Command we send to the FW
4831 * @status: The reply from the FW
4832 *
4833 * Bit-bangs the cmd to the by_pass FW status points to what is returned.
4834 **/
4835 #define IXGBE_BYPASS_BB_WAIT 1
4836 s32 ixgbe_bypass_rw_generic(struct ixgbe_hw *hw, u32 cmd, u32 *status)
4837 {
4838 int i;
4839 u32 sck, sdi, sdo, dir_sck, dir_sdi, dir_sdo;
4840 u32 esdp;
4841
4842 if (!status)
4843 return IXGBE_ERR_PARAM;
4844
4845 *status = 0;
4846
4847 /* SDP vary by MAC type */
4848 switch (hw->mac.type) {
4849 case ixgbe_mac_82599EB:
4850 sck = IXGBE_ESDP_SDP7;
4851 sdi = IXGBE_ESDP_SDP0;
4852 sdo = IXGBE_ESDP_SDP6;
4853 dir_sck = IXGBE_ESDP_SDP7_DIR;
4854 dir_sdi = IXGBE_ESDP_SDP0_DIR;
4855 dir_sdo = IXGBE_ESDP_SDP6_DIR;
4856 break;
4857 case ixgbe_mac_X540:
4858 sck = IXGBE_ESDP_SDP2;
4859 sdi = IXGBE_ESDP_SDP0;
4860 sdo = IXGBE_ESDP_SDP1;
4861 dir_sck = IXGBE_ESDP_SDP2_DIR;
4862 dir_sdi = IXGBE_ESDP_SDP0_DIR;
4863 dir_sdo = IXGBE_ESDP_SDP1_DIR;
4864 break;
4865 default:
4866 return IXGBE_ERR_DEVICE_NOT_SUPPORTED;
4867 }
4868
4869 /* Set SDP pins direction */
4870 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
4871 esdp |= dir_sck; /* SCK as output */
4872 esdp |= dir_sdi; /* SDI as output */
4873 esdp &= ~dir_sdo; /* SDO as input */
4874 esdp |= sck;
4875 esdp |= sdi;
4876 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
4877 IXGBE_WRITE_FLUSH(hw);
4878 msec_delay(IXGBE_BYPASS_BB_WAIT);
4879
4880 /* Generate start condition */
4881 esdp &= ~sdi;
4882 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
4883 IXGBE_WRITE_FLUSH(hw);
4884 msec_delay(IXGBE_BYPASS_BB_WAIT);
4885
4886 esdp &= ~sck;
4887 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
4888 IXGBE_WRITE_FLUSH(hw);
4889 msec_delay(IXGBE_BYPASS_BB_WAIT);
4890
4891 /* Clock out the new control word and clock in the status */
4892 for (i = 0; i < 32; i++) {
4893 if ((cmd >> (31 - i)) & 0x01) {
4894 esdp |= sdi;
4895 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
4896 } else {
4897 esdp &= ~sdi;
4898 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
4899 }
4900 IXGBE_WRITE_FLUSH(hw);
4901 msec_delay(IXGBE_BYPASS_BB_WAIT);
4902
4903 esdp |= sck;
4904 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
4905 IXGBE_WRITE_FLUSH(hw);
4906 msec_delay(IXGBE_BYPASS_BB_WAIT);
4907
4908 esdp &= ~sck;
4909 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
4910 IXGBE_WRITE_FLUSH(hw);
4911 msec_delay(IXGBE_BYPASS_BB_WAIT);
4912
4913 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
4914 if (esdp & sdo)
4915 *status = (*status << 1) | 0x01;
4916 else
4917 *status = (*status << 1) | 0x00;
4918 msec_delay(IXGBE_BYPASS_BB_WAIT);
4919 }
4920
4921 /* stop condition */
4922 esdp |= sck;
4923 esdp &= ~sdi;
4924 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
4925 IXGBE_WRITE_FLUSH(hw);
4926 msec_delay(IXGBE_BYPASS_BB_WAIT);
4927
4928 esdp |= sdi;
4929 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
4930 IXGBE_WRITE_FLUSH(hw);
4931
4932 /* set the page bits to match the cmd that the status it belongs to */
4933 *status = (*status & 0x3fffffff) | (cmd & 0xc0000000);
4934
4935 return IXGBE_SUCCESS;
4936 }
4937
4938 /**
4939 * ixgbe_bypass_valid_rd_generic - Verify valid return from bit-bang.
4940 *
4941 * If we send a write we can't be sure it took until we can read back
4942 * that same register. It can be a problem as some of the feilds may
4943 * for valid reasons change inbetween the time wrote the register and
4944 * we read it again to verify. So this function check everything we
4945 * can check and then assumes it worked.
4946 *
4947 * @u32 in_reg - The register cmd for the bit-bang read.
4948 * @u32 out_reg - The register returned from a bit-bang read.
4949 **/
4950 bool ixgbe_bypass_valid_rd_generic(u32 in_reg, u32 out_reg)
4951 {
4952 u32 mask;
4953
4954 /* Page must match for all control pages */
4955 if ((in_reg & BYPASS_PAGE_M) != (out_reg & BYPASS_PAGE_M))
4956 return FALSE;
4957
4958 switch (in_reg & BYPASS_PAGE_M) {
4959 case BYPASS_PAGE_CTL0:
4960 /* All the following can't change since the last write
4961 * - All the event actions
4962 * - The timeout value
4963 */
4964 mask = BYPASS_AUX_ON_M | BYPASS_MAIN_ON_M |
4965 BYPASS_MAIN_OFF_M | BYPASS_AUX_OFF_M |
4966 BYPASS_WDTIMEOUT_M |
4967 BYPASS_WDT_VALUE_M;
4968 if ((out_reg & mask) != (in_reg & mask))
4969 return FALSE;
4970
4971 /* 0x0 is never a valid value for bypass status */
4972 if (!(out_reg & BYPASS_STATUS_OFF_M))
4973 return FALSE;
4974 break;
4975 case BYPASS_PAGE_CTL1:
4976 /* All the following can't change since the last write
4977 * - time valid bit
4978 * - time we last sent
4979 */
4980 mask = BYPASS_CTL1_VALID_M | BYPASS_CTL1_TIME_M;
4981 if ((out_reg & mask) != (in_reg & mask))
4982 return FALSE;
4983 break;
4984 case BYPASS_PAGE_CTL2:
4985 /* All we can check in this page is control number
4986 * which is already done above.
4987 */
4988 break;
4989 }
4990
4991 /* We are as sure as we can be return TRUE */
4992 return TRUE;
4993 }
4994
4995 /**
4996 * ixgbe_bypass_set_generic - Set a bypass field in the FW CTRL Regiter.
4997 *
4998 * @hw: pointer to hardware structure
4999 * @cmd: The control word we are setting.
5000 * @event: The event we are setting in the FW. This also happens to
5001 * be the mask for the event we are setting (handy)
5002 * @action: The action we set the event to in the FW. This is in a
5003 * bit field that happens to be what we want to put in
5004 * the event spot (also handy)
5005 **/
5006 s32 ixgbe_bypass_set_generic(struct ixgbe_hw *hw, u32 ctrl, u32 event,
5007 u32 action)
5008 {
5009 u32 by_ctl = 0;
5010 u32 cmd, verify;
5011 u32 count = 0;
5012
5013 /* Get current values */
5014 cmd = ctrl; /* just reading only need control number */
5015 if (ixgbe_bypass_rw_generic(hw, cmd, &by_ctl))
5016 return IXGBE_ERR_INVALID_ARGUMENT;
5017
5018 /* Set to new action */
5019 cmd = (by_ctl & ~event) | BYPASS_WE | action;
5020 if (ixgbe_bypass_rw_generic(hw, cmd, &by_ctl))
5021 return IXGBE_ERR_INVALID_ARGUMENT;
5022
5023 /* Page 0 force a FW eeprom write which is slow so verify */
5024 if ((cmd & BYPASS_PAGE_M) == BYPASS_PAGE_CTL0) {
5025 verify = BYPASS_PAGE_CTL0;
5026 do {
5027 if (count++ > 5)
5028 return IXGBE_BYPASS_FW_WRITE_FAILURE;
5029
5030 if (ixgbe_bypass_rw_generic(hw, verify, &by_ctl))
5031 return IXGBE_ERR_INVALID_ARGUMENT;
5032 } while (!ixgbe_bypass_valid_rd_generic(cmd, by_ctl));
5033 } else {
5034 /* We have give the FW time for the write to stick */
5035 msec_delay(100);
5036 }
5037
5038 return IXGBE_SUCCESS;
5039 }
5040
5041 /**
5042 * ixgbe_bypass_rd_eep_generic - Read the bypass FW eeprom addres.
5043 *
5044 * @hw: pointer to hardware structure
5045 * @addr: The bypass eeprom address to read.
5046 * @value: The 8b of data at the address above.
5047 **/
5048 s32 ixgbe_bypass_rd_eep_generic(struct ixgbe_hw *hw, u32 addr, u8 *value)
5049 {
5050 u32 cmd;
5051 u32 status;
5052
5053
5054 /* send the request */
5055 cmd = BYPASS_PAGE_CTL2 | BYPASS_WE;
5056 cmd |= (addr << BYPASS_CTL2_OFFSET_SHIFT) & BYPASS_CTL2_OFFSET_M;
5057 if (ixgbe_bypass_rw_generic(hw, cmd, &status))
5058 return IXGBE_ERR_INVALID_ARGUMENT;
5059
5060 /* We have give the FW time for the write to stick */
5061 msec_delay(100);
5062
5063 /* now read the results */
5064 cmd &= ~BYPASS_WE;
5065 if (ixgbe_bypass_rw_generic(hw, cmd, &status))
5066 return IXGBE_ERR_INVALID_ARGUMENT;
5067
5068 *value = status & BYPASS_CTL2_DATA_M;
5069
5070 return IXGBE_SUCCESS;
5071 }
5072
5073
5074 /**
5075 * ixgbe_dcb_get_rtrup2tc_generic - read rtrup2tc reg
5076 * @hw: pointer to hardware structure
5077 * @map: pointer to u8 arr for returning map
5078 *
5079 * Read the rtrup2tc HW register and resolve its content into map
5080 **/
5081 void ixgbe_dcb_get_rtrup2tc_generic(struct ixgbe_hw *hw, u8 *map)
5082 {
5083 u32 reg, i;
5084
5085 reg = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC);
5086 for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++)
5087 map[i] = IXGBE_RTRUP2TC_UP_MASK &
5088 (reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT));
5089 return;
5090 }
5091
5092 void ixgbe_disable_rx_generic(struct ixgbe_hw *hw)
5093 {
5094 u32 pfdtxgswc;
5095 u32 rxctrl;
5096
5097 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
5098 if (rxctrl & IXGBE_RXCTRL_RXEN) {
5099 if (hw->mac.type != ixgbe_mac_82598EB) {
5100 pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
5101 if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) {
5102 pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN;
5103 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
5104 hw->mac.set_lben = TRUE;
5105 } else {
5106 hw->mac.set_lben = FALSE;
5107 }
5108 }
5109 rxctrl &= ~IXGBE_RXCTRL_RXEN;
5110 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl);
5111 }
5112 }
5113
5114 void ixgbe_enable_rx_generic(struct ixgbe_hw *hw)
5115 {
5116 u32 pfdtxgswc;
5117 u32 rxctrl;
5118
5119 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
5120 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, (rxctrl | IXGBE_RXCTRL_RXEN));
5121
5122 if (hw->mac.type != ixgbe_mac_82598EB) {
5123 if (hw->mac.set_lben) {
5124 pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
5125 pfdtxgswc |= IXGBE_PFDTXGSWC_VT_LBEN;
5126 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
5127 hw->mac.set_lben = FALSE;
5128 }
5129 }
5130 }
5131
5132 /**
5133 * ixgbe_mng_present - returns TRUE when management capability is present
5134 * @hw: pointer to hardware structure
5135 */
5136 bool ixgbe_mng_present(struct ixgbe_hw *hw)
5137 {
5138 u32 fwsm;
5139
5140 if (hw->mac.type < ixgbe_mac_82599EB)
5141 return FALSE;
5142
5143 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw));
5144 fwsm &= IXGBE_FWSM_MODE_MASK;
5145 return fwsm == IXGBE_FWSM_FW_MODE_PT;
5146 }
5147
5148 /**
5149 * ixgbe_mng_enabled - Is the manageability engine enabled?
5150 * @hw: pointer to hardware structure
5151 *
5152 * Returns TRUE if the manageability engine is enabled.
5153 **/
5154 bool ixgbe_mng_enabled(struct ixgbe_hw *hw)
5155 {
5156 u32 fwsm, manc, factps;
5157
5158 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw));
5159 if ((fwsm & IXGBE_FWSM_MODE_MASK) != IXGBE_FWSM_FW_MODE_PT)
5160 return FALSE;
5161
5162 manc = IXGBE_READ_REG(hw, IXGBE_MANC);
5163 if (!(manc & IXGBE_MANC_RCV_TCO_EN))
5164 return FALSE;
5165
5166 if (hw->mac.type <= ixgbe_mac_X540) {
5167 factps = IXGBE_READ_REG(hw, IXGBE_FACTPS_BY_MAC(hw));
5168 if (factps & IXGBE_FACTPS_MNGCG)
5169 return FALSE;
5170 }
5171
5172 return TRUE;
5173 }
5174
5175 /**
5176 * ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed
5177 * @hw: pointer to hardware structure
5178 * @speed: new link speed
5179 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed
5180 *
5181 * Set the link speed in the MAC and/or PHY register and restarts link.
5182 **/
5183 s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
5184 ixgbe_link_speed speed,
5185 bool autoneg_wait_to_complete)
5186 {
5187 ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
5188 ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN;
5189 s32 status = IXGBE_SUCCESS;
5190 u32 speedcnt = 0;
5191 u32 i = 0;
5192 bool autoneg, link_up = FALSE;
5193
5194 DEBUGFUNC("ixgbe_setup_mac_link_multispeed_fiber");
5195
5196 /* Mask off requested but non-supported speeds */
5197 status = ixgbe_get_link_capabilities(hw, &link_speed, &autoneg);
5198 if (status != IXGBE_SUCCESS)
5199 return status;
5200
5201 speed &= link_speed;
5202
5203 /* Try each speed one by one, highest priority first. We do this in
5204 * software because 10Gb fiber doesn't support speed autonegotiation.
5205 */
5206 if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
5207 speedcnt++;
5208 highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL;
5209
5210 /* Set the module link speed */
5211 switch (hw->phy.media_type) {
5212 case ixgbe_media_type_fiber_fixed:
5213 case ixgbe_media_type_fiber:
5214 ixgbe_set_rate_select_speed(hw,
5215 IXGBE_LINK_SPEED_10GB_FULL);
5216 break;
5217 case ixgbe_media_type_fiber_qsfp:
5218 /* QSFP module automatically detects MAC link speed */
5219 break;
5220 default:
5221 DEBUGOUT("Unexpected media type.\n");
5222 break;
5223 }
5224
5225 /* Allow module to change analog characteristics (1G->10G) */
5226 msec_delay(40);
5227
5228 status = ixgbe_setup_mac_link(hw,
5229 IXGBE_LINK_SPEED_10GB_FULL,
5230 autoneg_wait_to_complete);
5231 if (status != IXGBE_SUCCESS)
5232 return status;
5233
5234 /* Flap the Tx laser if it has not already been done */
5235 ixgbe_flap_tx_laser(hw);
5236
5237 /* Wait for the controller to acquire link. Per IEEE 802.3ap,
5238 * Section 73.10.2, we may have to wait up to 500ms if KR is
5239 * attempted. 82599 uses the same timing for 10g SFI.
5240 */
5241 for (i = 0; i < 5; i++) {
5242 /* Wait for the link partner to also set speed */
5243 msec_delay(100);
5244
5245 /* If we have link, just jump out */
5246 status = ixgbe_check_link(hw, &link_speed,
5247 &link_up, FALSE);
5248 if (status != IXGBE_SUCCESS)
5249 return status;
5250
5251 if (link_up)
5252 goto out;
5253 }
5254 }
5255
5256 if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
5257 speedcnt++;
5258 if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN)
5259 highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL;
5260
5261 /* Set the module link speed */
5262 switch (hw->phy.media_type) {
5263 case ixgbe_media_type_fiber_fixed:
5264 case ixgbe_media_type_fiber:
5265 ixgbe_set_rate_select_speed(hw,
5266 IXGBE_LINK_SPEED_1GB_FULL);
5267 break;
5268 case ixgbe_media_type_fiber_qsfp:
5269 /* QSFP module automatically detects link speed */
5270 break;
5271 default:
5272 DEBUGOUT("Unexpected media type.\n");
5273 break;
5274 }
5275
5276 /* Allow module to change analog characteristics (10G->1G) */
5277 msec_delay(40);
5278
5279 status = ixgbe_setup_mac_link(hw,
5280 IXGBE_LINK_SPEED_1GB_FULL,
5281 autoneg_wait_to_complete);
5282 if (status != IXGBE_SUCCESS)
5283 return status;
5284
5285 /* Flap the Tx laser if it has not already been done */
5286 ixgbe_flap_tx_laser(hw);
5287
5288 /* Wait for the link partner to also set speed */
5289 msec_delay(100);
5290
5291 /* If we have link, just jump out */
5292 status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
5293 if (status != IXGBE_SUCCESS)
5294 return status;
5295
5296 if (link_up)
5297 goto out;
5298 }
5299
5300 /* We didn't get link. Configure back to the highest speed we tried,
5301 * (if there was more than one). We call ourselves back with just the
5302 * single highest speed that the user requested.
5303 */
5304 if (speedcnt > 1)
5305 status = ixgbe_setup_mac_link_multispeed_fiber(hw,
5306 highest_link_speed,
5307 autoneg_wait_to_complete);
5308
5309 out:
5310 /* Set autoneg_advertised value based on input link speed */
5311 hw->phy.autoneg_advertised = 0;
5312
5313 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
5314 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
5315
5316 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
5317 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
5318
5319 return status;
5320 }
5321
5322 /**
5323 * ixgbe_set_soft_rate_select_speed - Set module link speed
5324 * @hw: pointer to hardware structure
5325 * @speed: link speed to set
5326 *
5327 * Set module link speed via the soft rate select.
5328 */
5329 void ixgbe_set_soft_rate_select_speed(struct ixgbe_hw *hw,
5330 ixgbe_link_speed speed)
5331 {
5332 s32 status;
5333 u8 rs, eeprom_data;
5334
5335 switch (speed) {
5336 case IXGBE_LINK_SPEED_10GB_FULL:
5337 /* one bit mask same as setting on */
5338 rs = IXGBE_SFF_SOFT_RS_SELECT_10G;
5339 break;
5340 case IXGBE_LINK_SPEED_1GB_FULL:
5341 rs = IXGBE_SFF_SOFT_RS_SELECT_1G;
5342 break;
5343 default:
5344 DEBUGOUT("Invalid fixed module speed\n");
5345 return;
5346 }
5347
5348 /* Set RS0 */
5349 status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
5350 IXGBE_I2C_EEPROM_DEV_ADDR2,
5351 &eeprom_data);
5352 if (status) {
5353 DEBUGOUT("Failed to read Rx Rate Select RS0\n");
5354 goto out;
5355 }
5356
5357 eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs;
5358
5359 status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
5360 IXGBE_I2C_EEPROM_DEV_ADDR2,
5361 eeprom_data);
5362 if (status) {
5363 DEBUGOUT("Failed to write Rx Rate Select RS0\n");
5364 goto out;
5365 }
5366
5367 /* Set RS1 */
5368 status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
5369 IXGBE_I2C_EEPROM_DEV_ADDR2,
5370 &eeprom_data);
5371 if (status) {
5372 DEBUGOUT("Failed to read Rx Rate Select RS1\n");
5373 goto out;
5374 }
5375
5376 eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs;
5377
5378 status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
5379 IXGBE_I2C_EEPROM_DEV_ADDR2,
5380 eeprom_data);
5381 if (status) {
5382 DEBUGOUT("Failed to write Rx Rate Select RS1\n");
5383 goto out;
5384 }
5385 out:
5386 return;
5387 }
5388