ixgbe_common.c revision 1.5 1 /******************************************************************************
2
3 Copyright (c) 2001-2013, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33 /*$FreeBSD: head/sys/dev/ixgbe/ixgbe_common.c 238149 2012-07-05 20:51:44Z jfv $*/
34 /*$NetBSD: ixgbe_common.c,v 1.5 2015/04/24 07:00:51 msaitoh Exp $*/
35
36 #include "ixgbe_common.h"
37 #include "ixgbe_phy.h"
38 #include "ixgbe_api.h"
39
40 static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw);
41 static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw);
42 static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw);
43 static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw);
44 static void ixgbe_standby_eeprom(struct ixgbe_hw *hw);
45 static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
46 u16 count);
47 static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count);
48 static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
49 static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
50 static void ixgbe_release_eeprom(struct ixgbe_hw *hw);
51
52 static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr);
53 static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
54 u16 *san_mac_offset);
55 static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
56 u16 words, u16 *data);
57 static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
58 u16 words, u16 *data);
59 static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
60 u16 offset);
61
62 /**
63 * ixgbe_init_ops_generic - Inits function ptrs
64 * @hw: pointer to the hardware structure
65 *
66 * Initialize the function pointers.
67 **/
68 s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw)
69 {
70 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
71 struct ixgbe_mac_info *mac = &hw->mac;
72 u32 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
73
74 DEBUGFUNC("ixgbe_init_ops_generic");
75
76 /* EEPROM */
77 eeprom->ops.init_params = &ixgbe_init_eeprom_params_generic;
78 /* If EEPROM is valid (bit 8 = 1), use EERD otherwise use bit bang */
79 if (eec & IXGBE_EEC_PRES) {
80 eeprom->ops.read = &ixgbe_read_eerd_generic;
81 eeprom->ops.read_buffer = &ixgbe_read_eerd_buffer_generic;
82 } else {
83 eeprom->ops.read = &ixgbe_read_eeprom_bit_bang_generic;
84 eeprom->ops.read_buffer =
85 &ixgbe_read_eeprom_buffer_bit_bang_generic;
86 }
87 eeprom->ops.write = &ixgbe_write_eeprom_generic;
88 eeprom->ops.write_buffer = &ixgbe_write_eeprom_buffer_bit_bang_generic;
89 eeprom->ops.validate_checksum =
90 &ixgbe_validate_eeprom_checksum_generic;
91 eeprom->ops.update_checksum = &ixgbe_update_eeprom_checksum_generic;
92 eeprom->ops.calc_checksum = &ixgbe_calc_eeprom_checksum_generic;
93
94 /* MAC */
95 mac->ops.init_hw = &ixgbe_init_hw_generic;
96 mac->ops.reset_hw = NULL;
97 mac->ops.start_hw = &ixgbe_start_hw_generic;
98 mac->ops.clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic;
99 mac->ops.get_media_type = NULL;
100 mac->ops.get_supported_physical_layer = NULL;
101 mac->ops.enable_rx_dma = &ixgbe_enable_rx_dma_generic;
102 mac->ops.get_mac_addr = &ixgbe_get_mac_addr_generic;
103 mac->ops.stop_adapter = &ixgbe_stop_adapter_generic;
104 mac->ops.get_bus_info = &ixgbe_get_bus_info_generic;
105 mac->ops.set_lan_id = &ixgbe_set_lan_id_multi_port_pcie;
106 mac->ops.acquire_swfw_sync = &ixgbe_acquire_swfw_sync;
107 mac->ops.release_swfw_sync = &ixgbe_release_swfw_sync;
108
109 /* LEDs */
110 mac->ops.led_on = &ixgbe_led_on_generic;
111 mac->ops.led_off = &ixgbe_led_off_generic;
112 mac->ops.blink_led_start = &ixgbe_blink_led_start_generic;
113 mac->ops.blink_led_stop = &ixgbe_blink_led_stop_generic;
114
115 /* RAR, Multicast, VLAN */
116 mac->ops.set_rar = &ixgbe_set_rar_generic;
117 mac->ops.clear_rar = &ixgbe_clear_rar_generic;
118 mac->ops.insert_mac_addr = NULL;
119 mac->ops.set_vmdq = NULL;
120 mac->ops.clear_vmdq = NULL;
121 mac->ops.init_rx_addrs = &ixgbe_init_rx_addrs_generic;
122 mac->ops.update_uc_addr_list = &ixgbe_update_uc_addr_list_generic;
123 mac->ops.update_mc_addr_list = &ixgbe_update_mc_addr_list_generic;
124 mac->ops.enable_mc = &ixgbe_enable_mc_generic;
125 mac->ops.disable_mc = &ixgbe_disable_mc_generic;
126 mac->ops.clear_vfta = NULL;
127 mac->ops.set_vfta = NULL;
128 mac->ops.set_vlvf = NULL;
129 mac->ops.init_uta_tables = NULL;
130
131 /* Flow Control */
132 mac->ops.fc_enable = &ixgbe_fc_enable_generic;
133
134 /* Link */
135 mac->ops.get_link_capabilities = NULL;
136 mac->ops.setup_link = NULL;
137 mac->ops.check_link = NULL;
138
139 return IXGBE_SUCCESS;
140 }
141
142 /**
143 * ixgbe_device_supports_autoneg_fc - Check if phy supports autoneg flow
144 * control
145 * @hw: pointer to hardware structure
146 *
147 * There are several phys that do not support autoneg flow control. This
148 * function check the device id to see if the associated phy supports
149 * autoneg flow control.
150 **/
151 s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
152 {
153
154 DEBUGFUNC("ixgbe_device_supports_autoneg_fc");
155
156 switch (hw->device_id) {
157 case IXGBE_DEV_ID_82599_T3_LOM:
158 case IXGBE_DEV_ID_X540T:
159 return IXGBE_SUCCESS;
160 default:
161 return IXGBE_ERR_FC_NOT_SUPPORTED;
162 }
163 }
164
165 /**
166 * ixgbe_setup_fc - Set up flow control
167 * @hw: pointer to hardware structure
168 *
169 * Called at init time to set up flow control.
170 **/
171 static s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
172 {
173 s32 ret_val = IXGBE_SUCCESS;
174 u32 reg = 0, reg_bp = 0;
175 u16 reg_cu = 0;
176 bool got_lock = FALSE;
177
178 DEBUGFUNC("ixgbe_setup_fc");
179
180 /*
181 * Validate the requested mode. Strict IEEE mode does not allow
182 * ixgbe_fc_rx_pause because it will cause us to fail at UNH.
183 */
184 if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
185 DEBUGOUT("ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
186 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
187 goto out;
188 }
189
190 /*
191 * 10gig parts do not have a word in the EEPROM to determine the
192 * default flow control setting, so we explicitly set it to full.
193 */
194 if (hw->fc.requested_mode == ixgbe_fc_default)
195 hw->fc.requested_mode = ixgbe_fc_full;
196
197 /*
198 * Set up the 1G and 10G flow control advertisement registers so the
199 * HW will be able to do fc autoneg once the cable is plugged in. If
200 * we link at 10G, the 1G advertisement is harmless and vice versa.
201 */
202 switch (hw->phy.media_type) {
203 case ixgbe_media_type_fiber_fixed:
204 case ixgbe_media_type_fiber:
205 case ixgbe_media_type_backplane:
206 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
207 reg_bp = IXGBE_READ_REG(hw, IXGBE_AUTOC);
208 break;
209 case ixgbe_media_type_copper:
210 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
211 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®_cu);
212 break;
213 default:
214 break;
215 }
216
217 /*
218 * The possible values of fc.requested_mode are:
219 * 0: Flow control is completely disabled
220 * 1: Rx flow control is enabled (we can receive pause frames,
221 * but not send pause frames).
222 * 2: Tx flow control is enabled (we can send pause frames but
223 * we do not support receiving pause frames).
224 * 3: Both Rx and Tx flow control (symmetric) are enabled.
225 * other: Invalid.
226 */
227 switch (hw->fc.requested_mode) {
228 case ixgbe_fc_none:
229 /* Flow control completely disabled by software override. */
230 reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
231 if (hw->phy.media_type == ixgbe_media_type_backplane)
232 reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE |
233 IXGBE_AUTOC_ASM_PAUSE);
234 else if (hw->phy.media_type == ixgbe_media_type_copper)
235 reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
236 break;
237 case ixgbe_fc_tx_pause:
238 /*
239 * Tx Flow control is enabled, and Rx Flow control is
240 * disabled by software override.
241 */
242 reg |= IXGBE_PCS1GANA_ASM_PAUSE;
243 reg &= ~IXGBE_PCS1GANA_SYM_PAUSE;
244 if (hw->phy.media_type == ixgbe_media_type_backplane) {
245 reg_bp |= IXGBE_AUTOC_ASM_PAUSE;
246 reg_bp &= ~IXGBE_AUTOC_SYM_PAUSE;
247 } else if (hw->phy.media_type == ixgbe_media_type_copper) {
248 reg_cu |= IXGBE_TAF_ASM_PAUSE;
249 reg_cu &= ~IXGBE_TAF_SYM_PAUSE;
250 }
251 break;
252 case ixgbe_fc_rx_pause:
253 /*
254 * Rx Flow control is enabled and Tx Flow control is
255 * disabled by software override. Since there really
256 * isn't a way to advertise that we are capable of RX
257 * Pause ONLY, we will advertise that we support both
258 * symmetric and asymmetric Rx PAUSE, as such we fall
259 * through to the fc_full statement. Later, we will
260 * disable the adapter's ability to send PAUSE frames.
261 */
262 case ixgbe_fc_full:
263 /* Flow control (both Rx and Tx) is enabled by SW override. */
264 reg |= IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE;
265 if (hw->phy.media_type == ixgbe_media_type_backplane)
266 reg_bp |= IXGBE_AUTOC_SYM_PAUSE |
267 IXGBE_AUTOC_ASM_PAUSE;
268 else if (hw->phy.media_type == ixgbe_media_type_copper)
269 reg_cu |= IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE;
270 break;
271 default:
272 DEBUGOUT("Flow control param set incorrectly\n");
273 ret_val = IXGBE_ERR_CONFIG;
274 goto out;
275 break;
276 }
277
278 if (hw->mac.type != ixgbe_mac_X540) {
279 /*
280 * Enable auto-negotiation between the MAC & PHY;
281 * the MAC will advertise clause 37 flow control.
282 */
283 IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
284 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
285
286 /* Disable AN timeout */
287 if (hw->fc.strict_ieee)
288 reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN;
289
290 IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg);
291 DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg);
292 }
293
294 /*
295 * AUTOC restart handles negotiation of 1G and 10G on backplane
296 * and copper. There is no need to set the PCS1GCTL register.
297 *
298 */
299 if (hw->phy.media_type == ixgbe_media_type_backplane) {
300 reg_bp |= IXGBE_AUTOC_AN_RESTART;
301 /* Need the SW/FW semaphore around AUTOC writes if 82599 and
302 * LESM is on, likewise reset_pipeline requries the lock as
303 * it also writes AUTOC.
304 */
305 if ((hw->mac.type == ixgbe_mac_82599EB) &&
306 ixgbe_verify_lesm_fw_enabled_82599(hw)) {
307 ret_val = hw->mac.ops.acquire_swfw_sync(hw,
308 IXGBE_GSSR_MAC_CSR_SM);
309 if (ret_val != IXGBE_SUCCESS) {
310 ret_val = IXGBE_ERR_SWFW_SYNC;
311 goto out;
312 }
313 got_lock = TRUE;
314 }
315
316 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_bp);
317 if (hw->mac.type == ixgbe_mac_82599EB)
318 ixgbe_reset_pipeline_82599(hw);
319
320 if (got_lock)
321 hw->mac.ops.release_swfw_sync(hw,
322 IXGBE_GSSR_MAC_CSR_SM);
323 } else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
324 (ixgbe_device_supports_autoneg_fc(hw) == IXGBE_SUCCESS)) {
325 hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
326 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg_cu);
327 }
328
329 DEBUGOUT1("Set up FC; IXGBE_AUTOC = 0x%08X\n", reg);
330 out:
331 return ret_val;
332 }
333
334 /**
335 * ixgbe_start_hw_generic - Prepare hardware for Tx/Rx
336 * @hw: pointer to hardware structure
337 *
338 * Starts the hardware by filling the bus info structure and media type, clears
339 * all on chip counters, initializes receive address registers, multicast
340 * table, VLAN filter table, calls routine to set up link and flow control
341 * settings, and leaves transmit and receive units disabled and uninitialized
342 **/
343 s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
344 {
345 s32 ret_val;
346 u32 ctrl_ext;
347
348 DEBUGFUNC("ixgbe_start_hw_generic");
349
350 /* Set the media type */
351 hw->phy.media_type = hw->mac.ops.get_media_type(hw);
352
353 /* PHY ops initialization must be done in reset_hw() */
354
355 /* Clear the VLAN filter table */
356 hw->mac.ops.clear_vfta(hw);
357
358 /* Clear statistics registers */
359 hw->mac.ops.clear_hw_cntrs(hw);
360
361 /* Set No Snoop Disable */
362 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
363 ctrl_ext |= IXGBE_CTRL_EXT_NS_DIS;
364 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
365 IXGBE_WRITE_FLUSH(hw);
366
367 /* Setup flow control */
368 ret_val = ixgbe_setup_fc(hw);
369 if (ret_val != IXGBE_SUCCESS)
370 goto out;
371
372 /* Clear adapter stopped flag */
373 hw->adapter_stopped = FALSE;
374
375 out:
376 return ret_val;
377 }
378
379 /**
380 * ixgbe_start_hw_gen2 - Init sequence for common device family
381 * @hw: pointer to hw structure
382 *
383 * Performs the init sequence common to the second generation
384 * of 10 GbE devices.
385 * Devices in the second generation:
386 * 82599
387 * X540
388 **/
389 s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw)
390 {
391 u32 i;
392 u32 regval;
393
394 /* Clear the rate limiters */
395 for (i = 0; i < hw->mac.max_tx_queues; i++) {
396 IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i);
397 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, 0);
398 }
399 IXGBE_WRITE_FLUSH(hw);
400
401 /* Disable relaxed ordering */
402 for (i = 0; i < hw->mac.max_tx_queues; i++) {
403 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
404 regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
405 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
406 }
407
408 for (i = 0; i < hw->mac.max_rx_queues; i++) {
409 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
410 regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
411 IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
412 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
413 }
414
415 return IXGBE_SUCCESS;
416 }
417
418 /**
419 * ixgbe_init_hw_generic - Generic hardware initialization
420 * @hw: pointer to hardware structure
421 *
422 * Initialize the hardware by resetting the hardware, filling the bus info
423 * structure and media type, clears all on chip counters, initializes receive
424 * address registers, multicast table, VLAN filter table, calls routine to set
425 * up link and flow control settings, and leaves transmit and receive units
426 * disabled and uninitialized
427 **/
428 s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw)
429 {
430 s32 status;
431
432 DEBUGFUNC("ixgbe_init_hw_generic");
433
434 /* Reset the hardware */
435 status = hw->mac.ops.reset_hw(hw);
436
437 if (status == IXGBE_SUCCESS) {
438 /* Start the HW */
439 status = hw->mac.ops.start_hw(hw);
440 }
441
442 return status;
443 }
444
445 /**
446 * ixgbe_clear_hw_cntrs_generic - Generic clear hardware counters
447 * @hw: pointer to hardware structure
448 *
449 * Clears all hardware statistics counters by reading them from the hardware
450 * Statistics counters are clear on read.
451 **/
452 s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
453 {
454 u16 i = 0;
455
456 DEBUGFUNC("ixgbe_clear_hw_cntrs_generic");
457
458 IXGBE_READ_REG(hw, IXGBE_CRCERRS);
459 IXGBE_READ_REG(hw, IXGBE_ILLERRC);
460 IXGBE_READ_REG(hw, IXGBE_ERRBC);
461 IXGBE_READ_REG(hw, IXGBE_MSPDC);
462 for (i = 0; i < 8; i++)
463 IXGBE_READ_REG(hw, IXGBE_MPC(i));
464
465 IXGBE_READ_REG(hw, IXGBE_MLFC);
466 IXGBE_READ_REG(hw, IXGBE_MRFC);
467 IXGBE_READ_REG(hw, IXGBE_RLEC);
468 IXGBE_READ_REG(hw, IXGBE_LXONTXC);
469 IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
470 if (hw->mac.type >= ixgbe_mac_82599EB) {
471 IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
472 IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
473 } else {
474 IXGBE_READ_REG(hw, IXGBE_LXONRXC);
475 IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
476 }
477
478 for (i = 0; i < 8; i++) {
479 IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
480 IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
481 if (hw->mac.type >= ixgbe_mac_82599EB) {
482 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
483 IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
484 } else {
485 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
486 IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
487 }
488 }
489 if (hw->mac.type >= ixgbe_mac_82599EB)
490 for (i = 0; i < 8; i++)
491 IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
492 IXGBE_READ_REG(hw, IXGBE_PRC64);
493 IXGBE_READ_REG(hw, IXGBE_PRC127);
494 IXGBE_READ_REG(hw, IXGBE_PRC255);
495 IXGBE_READ_REG(hw, IXGBE_PRC511);
496 IXGBE_READ_REG(hw, IXGBE_PRC1023);
497 IXGBE_READ_REG(hw, IXGBE_PRC1522);
498 IXGBE_READ_REG(hw, IXGBE_GPRC);
499 IXGBE_READ_REG(hw, IXGBE_BPRC);
500 IXGBE_READ_REG(hw, IXGBE_MPRC);
501 IXGBE_READ_REG(hw, IXGBE_GPTC);
502 IXGBE_READ_REG(hw, IXGBE_GORCL);
503 IXGBE_READ_REG(hw, IXGBE_GORCH);
504 IXGBE_READ_REG(hw, IXGBE_GOTCL);
505 IXGBE_READ_REG(hw, IXGBE_GOTCH);
506 if (hw->mac.type == ixgbe_mac_82598EB)
507 for (i = 0; i < 8; i++)
508 IXGBE_READ_REG(hw, IXGBE_RNBC(i));
509 IXGBE_READ_REG(hw, IXGBE_RUC);
510 IXGBE_READ_REG(hw, IXGBE_RFC);
511 IXGBE_READ_REG(hw, IXGBE_ROC);
512 IXGBE_READ_REG(hw, IXGBE_RJC);
513 IXGBE_READ_REG(hw, IXGBE_MNGPRC);
514 IXGBE_READ_REG(hw, IXGBE_MNGPDC);
515 IXGBE_READ_REG(hw, IXGBE_MNGPTC);
516 IXGBE_READ_REG(hw, IXGBE_TORL);
517 IXGBE_READ_REG(hw, IXGBE_TORH);
518 IXGBE_READ_REG(hw, IXGBE_TPR);
519 IXGBE_READ_REG(hw, IXGBE_TPT);
520 IXGBE_READ_REG(hw, IXGBE_PTC64);
521 IXGBE_READ_REG(hw, IXGBE_PTC127);
522 IXGBE_READ_REG(hw, IXGBE_PTC255);
523 IXGBE_READ_REG(hw, IXGBE_PTC511);
524 IXGBE_READ_REG(hw, IXGBE_PTC1023);
525 IXGBE_READ_REG(hw, IXGBE_PTC1522);
526 IXGBE_READ_REG(hw, IXGBE_MPTC);
527 IXGBE_READ_REG(hw, IXGBE_BPTC);
528 for (i = 0; i < 16; i++) {
529 IXGBE_READ_REG(hw, IXGBE_QPRC(i));
530 IXGBE_READ_REG(hw, IXGBE_QPTC(i));
531 if (hw->mac.type >= ixgbe_mac_82599EB) {
532 IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
533 IXGBE_READ_REG(hw, IXGBE_QBRC_H(i));
534 IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
535 IXGBE_READ_REG(hw, IXGBE_QBTC_H(i));
536 IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
537 } else {
538 IXGBE_READ_REG(hw, IXGBE_QBRC(i));
539 IXGBE_READ_REG(hw, IXGBE_QBTC(i));
540 }
541 }
542
543 if (hw->mac.type == ixgbe_mac_X540) {
544 if (hw->phy.id == 0)
545 ixgbe_identify_phy(hw);
546 hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL,
547 IXGBE_MDIO_PCS_DEV_TYPE, &i);
548 hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECH,
549 IXGBE_MDIO_PCS_DEV_TYPE, &i);
550 hw->phy.ops.read_reg(hw, IXGBE_LDPCECL,
551 IXGBE_MDIO_PCS_DEV_TYPE, &i);
552 hw->phy.ops.read_reg(hw, IXGBE_LDPCECH,
553 IXGBE_MDIO_PCS_DEV_TYPE, &i);
554 }
555
556 return IXGBE_SUCCESS;
557 }
558
559 /**
560 * ixgbe_read_pba_string_generic - Reads part number string from EEPROM
561 * @hw: pointer to hardware structure
562 * @pba_num: stores the part number string from the EEPROM
563 * @pba_num_size: part number string buffer length
564 *
565 * Reads the part number string from the EEPROM.
566 **/
567 s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
568 u32 pba_num_size)
569 {
570 s32 ret_val;
571 u16 data;
572 u16 pba_ptr;
573 u16 offset;
574 u16 length;
575
576 DEBUGFUNC("ixgbe_read_pba_string_generic");
577
578 if (pba_num == NULL) {
579 DEBUGOUT("PBA string buffer was null\n");
580 return IXGBE_ERR_INVALID_ARGUMENT;
581 }
582
583 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
584 if (ret_val) {
585 DEBUGOUT("NVM Read Error\n");
586 return ret_val;
587 }
588
589 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &pba_ptr);
590 if (ret_val) {
591 DEBUGOUT("NVM Read Error\n");
592 return ret_val;
593 }
594
595 /*
596 * if data is not ptr guard the PBA must be in legacy format which
597 * means pba_ptr is actually our second data word for the PBA number
598 * and we can decode it into an ascii string
599 */
600 if (data != IXGBE_PBANUM_PTR_GUARD) {
601 DEBUGOUT("NVM PBA number is not stored as string\n");
602
603 /* we will need 11 characters to store the PBA */
604 if (pba_num_size < 11) {
605 DEBUGOUT("PBA string buffer too small\n");
606 return IXGBE_ERR_NO_SPACE;
607 }
608
609 /* extract hex string from data and pba_ptr */
610 pba_num[0] = (data >> 12) & 0xF;
611 pba_num[1] = (data >> 8) & 0xF;
612 pba_num[2] = (data >> 4) & 0xF;
613 pba_num[3] = data & 0xF;
614 pba_num[4] = (pba_ptr >> 12) & 0xF;
615 pba_num[5] = (pba_ptr >> 8) & 0xF;
616 pba_num[6] = '-';
617 pba_num[7] = 0;
618 pba_num[8] = (pba_ptr >> 4) & 0xF;
619 pba_num[9] = pba_ptr & 0xF;
620
621 /* put a null character on the end of our string */
622 pba_num[10] = '\0';
623
624 /* switch all the data but the '-' to hex char */
625 for (offset = 0; offset < 10; offset++) {
626 if (pba_num[offset] < 0xA)
627 pba_num[offset] += '0';
628 else if (pba_num[offset] < 0x10)
629 pba_num[offset] += 'A' - 0xA;
630 }
631
632 return IXGBE_SUCCESS;
633 }
634
635 ret_val = hw->eeprom.ops.read(hw, pba_ptr, &length);
636 if (ret_val) {
637 DEBUGOUT("NVM Read Error\n");
638 return ret_val;
639 }
640
641 if (length == 0xFFFF || length == 0) {
642 DEBUGOUT("NVM PBA number section invalid length\n");
643 return IXGBE_ERR_PBA_SECTION;
644 }
645
646 /* check if pba_num buffer is big enough */
647 if (pba_num_size < (((u32)length * 2) - 1)) {
648 DEBUGOUT("PBA string buffer too small\n");
649 return IXGBE_ERR_NO_SPACE;
650 }
651
652 /* trim pba length from start of string */
653 pba_ptr++;
654 length--;
655
656 for (offset = 0; offset < length; offset++) {
657 ret_val = hw->eeprom.ops.read(hw, pba_ptr + offset, &data);
658 if (ret_val) {
659 DEBUGOUT("NVM Read Error\n");
660 return ret_val;
661 }
662 pba_num[offset * 2] = (u8)(data >> 8);
663 pba_num[(offset * 2) + 1] = (u8)(data & 0xFF);
664 }
665 pba_num[offset * 2] = '\0';
666
667 return IXGBE_SUCCESS;
668 }
669
670 /**
671 * ixgbe_read_pba_num_generic - Reads part number from EEPROM
672 * @hw: pointer to hardware structure
673 * @pba_num: stores the part number from the EEPROM
674 *
675 * Reads the part number from the EEPROM.
676 **/
677 s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num)
678 {
679 s32 ret_val;
680 u16 data;
681
682 DEBUGFUNC("ixgbe_read_pba_num_generic");
683
684 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
685 if (ret_val) {
686 DEBUGOUT("NVM Read Error\n");
687 return ret_val;
688 } else if (data == IXGBE_PBANUM_PTR_GUARD) {
689 DEBUGOUT("NVM Not supported\n");
690 return IXGBE_NOT_IMPLEMENTED;
691 }
692 *pba_num = (u32)(data << 16);
693
694 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &data);
695 if (ret_val) {
696 DEBUGOUT("NVM Read Error\n");
697 return ret_val;
698 }
699 *pba_num |= data;
700
701 return IXGBE_SUCCESS;
702 }
703
704 /**
705 * ixgbe_read_pba_raw
706 * @hw: pointer to the HW structure
707 * @eeprom_buf: optional pointer to EEPROM image
708 * @eeprom_buf_size: size of EEPROM image in words
709 * @max_pba_block_size: PBA block size limit
710 * @pba: pointer to output PBA structure
711 *
712 * Reads PBA from EEPROM image when eeprom_buf is not NULL.
713 * Reads PBA from physical EEPROM device when eeprom_buf is NULL.
714 *
715 **/
716 s32 ixgbe_read_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf,
717 u32 eeprom_buf_size, u16 max_pba_block_size,
718 struct ixgbe_pba *pba)
719 {
720 s32 ret_val;
721 u16 pba_block_size;
722
723 if (pba == NULL)
724 return IXGBE_ERR_PARAM;
725
726 if (eeprom_buf == NULL) {
727 ret_val = hw->eeprom.ops.read_buffer(hw, IXGBE_PBANUM0_PTR, 2,
728 &pba->word[0]);
729 if (ret_val)
730 return ret_val;
731 } else {
732 if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
733 pba->word[0] = eeprom_buf[IXGBE_PBANUM0_PTR];
734 pba->word[1] = eeprom_buf[IXGBE_PBANUM1_PTR];
735 } else {
736 return IXGBE_ERR_PARAM;
737 }
738 }
739
740 if (pba->word[0] == IXGBE_PBANUM_PTR_GUARD) {
741 if (pba->pba_block == NULL)
742 return IXGBE_ERR_PARAM;
743
744 ret_val = ixgbe_get_pba_block_size(hw, eeprom_buf,
745 eeprom_buf_size,
746 &pba_block_size);
747 if (ret_val)
748 return ret_val;
749
750 if (pba_block_size > max_pba_block_size)
751 return IXGBE_ERR_PARAM;
752
753 if (eeprom_buf == NULL) {
754 ret_val = hw->eeprom.ops.read_buffer(hw, pba->word[1],
755 pba_block_size,
756 pba->pba_block);
757 if (ret_val)
758 return ret_val;
759 } else {
760 if (eeprom_buf_size > (u32)(pba->word[1] +
761 pba->pba_block[0])) {
762 memcpy(pba->pba_block,
763 &eeprom_buf[pba->word[1]],
764 pba_block_size * sizeof(u16));
765 } else {
766 return IXGBE_ERR_PARAM;
767 }
768 }
769 }
770
771 return IXGBE_SUCCESS;
772 }
773
774 /**
775 * ixgbe_write_pba_raw
776 * @hw: pointer to the HW structure
777 * @eeprom_buf: optional pointer to EEPROM image
778 * @eeprom_buf_size: size of EEPROM image in words
779 * @pba: pointer to PBA structure
780 *
781 * Writes PBA to EEPROM image when eeprom_buf is not NULL.
782 * Writes PBA to physical EEPROM device when eeprom_buf is NULL.
783 *
784 **/
785 s32 ixgbe_write_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf,
786 u32 eeprom_buf_size, struct ixgbe_pba *pba)
787 {
788 s32 ret_val;
789
790 if (pba == NULL)
791 return IXGBE_ERR_PARAM;
792
793 if (eeprom_buf == NULL) {
794 ret_val = hw->eeprom.ops.write_buffer(hw, IXGBE_PBANUM0_PTR, 2,
795 &pba->word[0]);
796 if (ret_val)
797 return ret_val;
798 } else {
799 if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
800 eeprom_buf[IXGBE_PBANUM0_PTR] = pba->word[0];
801 eeprom_buf[IXGBE_PBANUM1_PTR] = pba->word[1];
802 } else {
803 return IXGBE_ERR_PARAM;
804 }
805 }
806
807 if (pba->word[0] == IXGBE_PBANUM_PTR_GUARD) {
808 if (pba->pba_block == NULL)
809 return IXGBE_ERR_PARAM;
810
811 if (eeprom_buf == NULL) {
812 ret_val = hw->eeprom.ops.write_buffer(hw, pba->word[1],
813 pba->pba_block[0],
814 pba->pba_block);
815 if (ret_val)
816 return ret_val;
817 } else {
818 if (eeprom_buf_size > (u32)(pba->word[1] +
819 pba->pba_block[0])) {
820 memcpy(&eeprom_buf[pba->word[1]],
821 pba->pba_block,
822 pba->pba_block[0] * sizeof(u16));
823 } else {
824 return IXGBE_ERR_PARAM;
825 }
826 }
827 }
828
829 return IXGBE_SUCCESS;
830 }
831
832 /**
833 * ixgbe_get_pba_block_size
834 * @hw: pointer to the HW structure
835 * @eeprom_buf: optional pointer to EEPROM image
836 * @eeprom_buf_size: size of EEPROM image in words
837 * @pba_data_size: pointer to output variable
838 *
839 * Returns the size of the PBA block in words. Function operates on EEPROM
840 * image if the eeprom_buf pointer is not NULL otherwise it accesses physical
841 * EEPROM device.
842 *
843 **/
844 s32 ixgbe_get_pba_block_size(struct ixgbe_hw *hw, u16 *eeprom_buf,
845 u32 eeprom_buf_size, u16 *pba_block_size)
846 {
847 s32 ret_val;
848 u16 pba_word[2];
849 u16 length;
850
851 DEBUGFUNC("ixgbe_get_pba_block_size");
852
853 if (eeprom_buf == NULL) {
854 ret_val = hw->eeprom.ops.read_buffer(hw, IXGBE_PBANUM0_PTR, 2,
855 &pba_word[0]);
856 if (ret_val)
857 return ret_val;
858 } else {
859 if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
860 pba_word[0] = eeprom_buf[IXGBE_PBANUM0_PTR];
861 pba_word[1] = eeprom_buf[IXGBE_PBANUM1_PTR];
862 } else {
863 return IXGBE_ERR_PARAM;
864 }
865 }
866
867 if (pba_word[0] == IXGBE_PBANUM_PTR_GUARD) {
868 if (eeprom_buf == NULL) {
869 ret_val = hw->eeprom.ops.read(hw, pba_word[1] + 0,
870 &length);
871 if (ret_val)
872 return ret_val;
873 } else {
874 if (eeprom_buf_size > pba_word[1])
875 length = eeprom_buf[pba_word[1] + 0];
876 else
877 return IXGBE_ERR_PARAM;
878 }
879
880 if (length == 0xFFFF || length == 0)
881 return IXGBE_ERR_PBA_SECTION;
882 } else {
883 /* PBA number in legacy format, there is no PBA Block. */
884 length = 0;
885 }
886
887 if (pba_block_size != NULL)
888 *pba_block_size = length;
889
890 return IXGBE_SUCCESS;
891 }
892
893 /**
894 * ixgbe_get_mac_addr_generic - Generic get MAC address
895 * @hw: pointer to hardware structure
896 * @mac_addr: Adapter MAC address
897 *
898 * Reads the adapter's MAC address from first Receive Address Register (RAR0)
899 * A reset of the adapter must be performed prior to calling this function
900 * in order for the MAC address to have been loaded from the EEPROM into RAR0
901 **/
902 s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr)
903 {
904 u32 rar_high;
905 u32 rar_low;
906 u16 i;
907
908 DEBUGFUNC("ixgbe_get_mac_addr_generic");
909
910 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(0));
911 rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(0));
912
913 for (i = 0; i < 4; i++)
914 mac_addr[i] = (u8)(rar_low >> (i*8));
915
916 for (i = 0; i < 2; i++)
917 mac_addr[i+4] = (u8)(rar_high >> (i*8));
918
919 return IXGBE_SUCCESS;
920 }
921
922 /**
923 * ixgbe_get_bus_info_generic - Generic set PCI bus info
924 * @hw: pointer to hardware structure
925 *
926 * Sets the PCI bus info (speed, width, type) within the ixgbe_hw structure
927 **/
928 s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
929 {
930 struct ixgbe_mac_info *mac = &hw->mac;
931 u16 link_status;
932
933 DEBUGFUNC("ixgbe_get_bus_info_generic");
934
935 hw->bus.type = ixgbe_bus_type_pci_express;
936
937 /* Get the negotiated link width and speed from PCI config space */
938 link_status = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_LINK_STATUS);
939
940 switch (link_status & IXGBE_PCI_LINK_WIDTH) {
941 case IXGBE_PCI_LINK_WIDTH_1:
942 hw->bus.width = ixgbe_bus_width_pcie_x1;
943 break;
944 case IXGBE_PCI_LINK_WIDTH_2:
945 hw->bus.width = ixgbe_bus_width_pcie_x2;
946 break;
947 case IXGBE_PCI_LINK_WIDTH_4:
948 hw->bus.width = ixgbe_bus_width_pcie_x4;
949 break;
950 case IXGBE_PCI_LINK_WIDTH_8:
951 hw->bus.width = ixgbe_bus_width_pcie_x8;
952 break;
953 default:
954 hw->bus.width = ixgbe_bus_width_unknown;
955 break;
956 }
957
958 switch (link_status & IXGBE_PCI_LINK_SPEED) {
959 case IXGBE_PCI_LINK_SPEED_2500:
960 hw->bus.speed = ixgbe_bus_speed_2500;
961 break;
962 case IXGBE_PCI_LINK_SPEED_5000:
963 hw->bus.speed = ixgbe_bus_speed_5000;
964 break;
965 case IXGBE_PCI_LINK_SPEED_8000:
966 hw->bus.speed = ixgbe_bus_speed_8000;
967 break;
968 default:
969 hw->bus.speed = ixgbe_bus_speed_unknown;
970 break;
971 }
972
973 mac->ops.set_lan_id(hw);
974
975 return IXGBE_SUCCESS;
976 }
977
978 /**
979 * ixgbe_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices
980 * @hw: pointer to the HW structure
981 *
982 * Determines the LAN function id by reading memory-mapped registers
983 * and swaps the port value if requested.
984 **/
985 void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw)
986 {
987 struct ixgbe_bus_info *bus = &hw->bus;
988 u32 reg;
989
990 DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie");
991
992 reg = IXGBE_READ_REG(hw, IXGBE_STATUS);
993 bus->func = (reg & IXGBE_STATUS_LAN_ID) >> IXGBE_STATUS_LAN_ID_SHIFT;
994 bus->lan_id = bus->func;
995
996 /* check for a port swap */
997 reg = IXGBE_READ_REG(hw, IXGBE_FACTPS);
998 if (reg & IXGBE_FACTPS_LFS)
999 bus->func ^= 0x1;
1000 }
1001
1002 /**
1003 * ixgbe_stop_adapter_generic - Generic stop Tx/Rx units
1004 * @hw: pointer to hardware structure
1005 *
1006 * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
1007 * disables transmit and receive units. The adapter_stopped flag is used by
1008 * the shared code and drivers to determine if the adapter is in a stopped
1009 * state and should not touch the hardware.
1010 **/
1011 s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
1012 {
1013 u32 reg_val;
1014 u16 i;
1015
1016 DEBUGFUNC("ixgbe_stop_adapter_generic");
1017
1018 /*
1019 * Set the adapter_stopped flag so other driver functions stop touching
1020 * the hardware
1021 */
1022 hw->adapter_stopped = TRUE;
1023
1024 /* Disable the receive unit */
1025 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, 0);
1026
1027 /* Clear interrupt mask to stop interrupts from being generated */
1028 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
1029
1030 /* Clear any pending interrupts, flush previous writes */
1031 IXGBE_READ_REG(hw, IXGBE_EICR);
1032
1033 /* Disable the transmit unit. Each queue must be disabled. */
1034 for (i = 0; i < hw->mac.max_tx_queues; i++)
1035 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), IXGBE_TXDCTL_SWFLSH);
1036
1037 /* Disable the receive unit by stopping each queue */
1038 for (i = 0; i < hw->mac.max_rx_queues; i++) {
1039 reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
1040 reg_val &= ~IXGBE_RXDCTL_ENABLE;
1041 reg_val |= IXGBE_RXDCTL_SWFLSH;
1042 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val);
1043 }
1044
1045 /* flush all queues disables */
1046 IXGBE_WRITE_FLUSH(hw);
1047 msec_delay(2);
1048
1049 /*
1050 * Prevent the PCI-E bus from from hanging by disabling PCI-E master
1051 * access and verify no pending requests
1052 */
1053 return ixgbe_disable_pcie_master(hw);
1054 }
1055
1056 /**
1057 * ixgbe_led_on_generic - Turns on the software controllable LEDs.
1058 * @hw: pointer to hardware structure
1059 * @index: led number to turn on
1060 **/
1061 s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index)
1062 {
1063 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
1064
1065 DEBUGFUNC("ixgbe_led_on_generic");
1066
1067 /* To turn on the LED, set mode to ON. */
1068 led_reg &= ~IXGBE_LED_MODE_MASK(index);
1069 led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index);
1070 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
1071 IXGBE_WRITE_FLUSH(hw);
1072
1073 return IXGBE_SUCCESS;
1074 }
1075
1076 /**
1077 * ixgbe_led_off_generic - Turns off the software controllable LEDs.
1078 * @hw: pointer to hardware structure
1079 * @index: led number to turn off
1080 **/
1081 s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index)
1082 {
1083 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
1084
1085 DEBUGFUNC("ixgbe_led_off_generic");
1086
1087 /* To turn off the LED, set mode to OFF. */
1088 led_reg &= ~IXGBE_LED_MODE_MASK(index);
1089 led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index);
1090 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
1091 IXGBE_WRITE_FLUSH(hw);
1092
1093 return IXGBE_SUCCESS;
1094 }
1095
1096 /**
1097 * ixgbe_init_eeprom_params_generic - Initialize EEPROM params
1098 * @hw: pointer to hardware structure
1099 *
1100 * Initializes the EEPROM parameters ixgbe_eeprom_info within the
1101 * ixgbe_hw struct in order to set up EEPROM access.
1102 **/
1103 s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
1104 {
1105 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
1106 u32 eec;
1107 u16 eeprom_size;
1108
1109 DEBUGFUNC("ixgbe_init_eeprom_params_generic");
1110
1111 if (eeprom->type == ixgbe_eeprom_uninitialized) {
1112 eeprom->type = ixgbe_eeprom_none;
1113 /* Set default semaphore delay to 10ms which is a well
1114 * tested value */
1115 eeprom->semaphore_delay = 10;
1116 /* Clear EEPROM page size, it will be initialized as needed */
1117 eeprom->word_page_size = 0;
1118
1119 /*
1120 * Check for EEPROM present first.
1121 * If not present leave as none
1122 */
1123 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1124 if (eec & IXGBE_EEC_PRES) {
1125 eeprom->type = ixgbe_eeprom_spi;
1126
1127 /*
1128 * SPI EEPROM is assumed here. This code would need to
1129 * change if a future EEPROM is not SPI.
1130 */
1131 eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
1132 IXGBE_EEC_SIZE_SHIFT);
1133 eeprom->word_size = 1 << (eeprom_size +
1134 IXGBE_EEPROM_WORD_SIZE_SHIFT);
1135 }
1136
1137 if (eec & IXGBE_EEC_ADDR_SIZE)
1138 eeprom->address_bits = 16;
1139 else
1140 eeprom->address_bits = 8;
1141 DEBUGOUT3("Eeprom params: type = %d, size = %d, address bits: "
1142 "%d\n", eeprom->type, eeprom->word_size,
1143 eeprom->address_bits);
1144 }
1145
1146 return IXGBE_SUCCESS;
1147 }
1148
1149 /**
1150 * ixgbe_write_eeprom_buffer_bit_bang_generic - Write EEPROM using bit-bang
1151 * @hw: pointer to hardware structure
1152 * @offset: offset within the EEPROM to write
1153 * @words: number of word(s)
1154 * @data: 16 bit word(s) to write to EEPROM
1155 *
1156 * Reads 16 bit word(s) from EEPROM through bit-bang method
1157 **/
1158 s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
1159 u16 words, u16 *data)
1160 {
1161 s32 status = IXGBE_SUCCESS;
1162 u16 i, count;
1163
1164 DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang_generic");
1165
1166 hw->eeprom.ops.init_params(hw);
1167
1168 if (words == 0) {
1169 status = IXGBE_ERR_INVALID_ARGUMENT;
1170 goto out;
1171 }
1172
1173 if (offset + words > hw->eeprom.word_size) {
1174 status = IXGBE_ERR_EEPROM;
1175 goto out;
1176 }
1177
1178 /*
1179 * The EEPROM page size cannot be queried from the chip. We do lazy
1180 * initialization. It is worth to do that when we write large buffer.
1181 */
1182 if ((hw->eeprom.word_page_size == 0) &&
1183 (words > IXGBE_EEPROM_PAGE_SIZE_MAX))
1184 ixgbe_detect_eeprom_page_size_generic(hw, offset);
1185
1186 /*
1187 * We cannot hold synchronization semaphores for too long
1188 * to avoid other entity starvation. However it is more efficient
1189 * to read in bursts than synchronizing access for each word.
1190 */
1191 for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
1192 count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
1193 IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
1194 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset + i,
1195 count, &data[i]);
1196
1197 if (status != IXGBE_SUCCESS)
1198 break;
1199 }
1200
1201 out:
1202 return status;
1203 }
1204
1205 /**
1206 * ixgbe_write_eeprom_buffer_bit_bang - Writes 16 bit word(s) to EEPROM
1207 * @hw: pointer to hardware structure
1208 * @offset: offset within the EEPROM to be written to
1209 * @words: number of word(s)
1210 * @data: 16 bit word(s) to be written to the EEPROM
1211 *
1212 * If ixgbe_eeprom_update_checksum is not called after this function, the
1213 * EEPROM will most likely contain an invalid checksum.
1214 **/
1215 static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
1216 u16 words, u16 *data)
1217 {
1218 s32 status;
1219 u16 word;
1220 u16 page_size;
1221 u16 i;
1222 u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI;
1223
1224 DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang");
1225
1226 /* Prepare the EEPROM for writing */
1227 status = ixgbe_acquire_eeprom(hw);
1228
1229 if (status == IXGBE_SUCCESS) {
1230 if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) {
1231 ixgbe_release_eeprom(hw);
1232 status = IXGBE_ERR_EEPROM;
1233 }
1234 }
1235
1236 if (status == IXGBE_SUCCESS) {
1237 for (i = 0; i < words; i++) {
1238 ixgbe_standby_eeprom(hw);
1239
1240 /* Send the WRITE ENABLE command (8 bit opcode ) */
1241 ixgbe_shift_out_eeprom_bits(hw,
1242 IXGBE_EEPROM_WREN_OPCODE_SPI,
1243 IXGBE_EEPROM_OPCODE_BITS);
1244
1245 ixgbe_standby_eeprom(hw);
1246
1247 /*
1248 * Some SPI eeproms use the 8th address bit embedded
1249 * in the opcode
1250 */
1251 if ((hw->eeprom.address_bits == 8) &&
1252 ((offset + i) >= 128))
1253 write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
1254
1255 /* Send the Write command (8-bit opcode + addr) */
1256 ixgbe_shift_out_eeprom_bits(hw, write_opcode,
1257 IXGBE_EEPROM_OPCODE_BITS);
1258 ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
1259 hw->eeprom.address_bits);
1260
1261 page_size = hw->eeprom.word_page_size;
1262
1263 /* Send the data in burst via SPI*/
1264 do {
1265 word = data[i];
1266 word = (word >> 8) | (word << 8);
1267 ixgbe_shift_out_eeprom_bits(hw, word, 16);
1268
1269 if (page_size == 0)
1270 break;
1271
1272 /* do not wrap around page */
1273 if (((offset + i) & (page_size - 1)) ==
1274 (page_size - 1))
1275 break;
1276 } while (++i < words);
1277
1278 ixgbe_standby_eeprom(hw);
1279 msec_delay(10);
1280 }
1281 /* Done with writing - release the EEPROM */
1282 ixgbe_release_eeprom(hw);
1283 }
1284
1285 return status;
1286 }
1287
1288 /**
1289 * ixgbe_write_eeprom_generic - Writes 16 bit value to EEPROM
1290 * @hw: pointer to hardware structure
1291 * @offset: offset within the EEPROM to be written to
1292 * @data: 16 bit word to be written to the EEPROM
1293 *
1294 * If ixgbe_eeprom_update_checksum is not called after this function, the
1295 * EEPROM will most likely contain an invalid checksum.
1296 **/
1297 s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
1298 {
1299 s32 status;
1300
1301 DEBUGFUNC("ixgbe_write_eeprom_generic");
1302
1303 hw->eeprom.ops.init_params(hw);
1304
1305 if (offset >= hw->eeprom.word_size) {
1306 status = IXGBE_ERR_EEPROM;
1307 goto out;
1308 }
1309
1310 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1, &data);
1311
1312 out:
1313 return status;
1314 }
1315
1316 /**
1317 * ixgbe_read_eeprom_buffer_bit_bang_generic - Read EEPROM using bit-bang
1318 * @hw: pointer to hardware structure
1319 * @offset: offset within the EEPROM to be read
1320 * @data: read 16 bit words(s) from EEPROM
1321 * @words: number of word(s)
1322 *
1323 * Reads 16 bit word(s) from EEPROM through bit-bang method
1324 **/
1325 s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
1326 u16 words, u16 *data)
1327 {
1328 s32 status = IXGBE_SUCCESS;
1329 u16 i, count;
1330
1331 DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang_generic");
1332
1333 hw->eeprom.ops.init_params(hw);
1334
1335 if (words == 0) {
1336 status = IXGBE_ERR_INVALID_ARGUMENT;
1337 goto out;
1338 }
1339
1340 if (offset + words > hw->eeprom.word_size) {
1341 status = IXGBE_ERR_EEPROM;
1342 goto out;
1343 }
1344
1345 /*
1346 * We cannot hold synchronization semaphores for too long
1347 * to avoid other entity starvation. However it is more efficient
1348 * to read in bursts than synchronizing access for each word.
1349 */
1350 for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
1351 count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
1352 IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
1353
1354 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset + i,
1355 count, &data[i]);
1356
1357 if (status != IXGBE_SUCCESS)
1358 break;
1359 }
1360
1361 out:
1362 return status;
1363 }
1364
1365 /**
1366 * ixgbe_read_eeprom_buffer_bit_bang - Read EEPROM using bit-bang
1367 * @hw: pointer to hardware structure
1368 * @offset: offset within the EEPROM to be read
1369 * @words: number of word(s)
1370 * @data: read 16 bit word(s) from EEPROM
1371 *
1372 * Reads 16 bit word(s) from EEPROM through bit-bang method
1373 **/
1374 static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
1375 u16 words, u16 *data)
1376 {
1377 s32 status;
1378 u16 word_in;
1379 u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI;
1380 u16 i;
1381
1382 DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang");
1383
1384 /* Prepare the EEPROM for reading */
1385 status = ixgbe_acquire_eeprom(hw);
1386
1387 if (status == IXGBE_SUCCESS) {
1388 if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) {
1389 ixgbe_release_eeprom(hw);
1390 status = IXGBE_ERR_EEPROM;
1391 }
1392 }
1393
1394 if (status == IXGBE_SUCCESS) {
1395 for (i = 0; i < words; i++) {
1396 ixgbe_standby_eeprom(hw);
1397 /*
1398 * Some SPI eeproms use the 8th address bit embedded
1399 * in the opcode
1400 */
1401 if ((hw->eeprom.address_bits == 8) &&
1402 ((offset + i) >= 128))
1403 read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
1404
1405 /* Send the READ command (opcode + addr) */
1406 ixgbe_shift_out_eeprom_bits(hw, read_opcode,
1407 IXGBE_EEPROM_OPCODE_BITS);
1408 ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
1409 hw->eeprom.address_bits);
1410
1411 /* Read the data. */
1412 word_in = ixgbe_shift_in_eeprom_bits(hw, 16);
1413 data[i] = (word_in >> 8) | (word_in << 8);
1414 }
1415
1416 /* End this read operation */
1417 ixgbe_release_eeprom(hw);
1418 }
1419
1420 return status;
1421 }
1422
1423 /**
1424 * ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang
1425 * @hw: pointer to hardware structure
1426 * @offset: offset within the EEPROM to be read
1427 * @data: read 16 bit value from EEPROM
1428 *
1429 * Reads 16 bit value from EEPROM through bit-bang method
1430 **/
1431 s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
1432 u16 *data)
1433 {
1434 s32 status;
1435
1436 DEBUGFUNC("ixgbe_read_eeprom_bit_bang_generic");
1437
1438 hw->eeprom.ops.init_params(hw);
1439
1440 if (offset >= hw->eeprom.word_size) {
1441 status = IXGBE_ERR_EEPROM;
1442 goto out;
1443 }
1444
1445 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
1446
1447 out:
1448 return status;
1449 }
1450
1451 /**
1452 * ixgbe_read_eerd_buffer_generic - Read EEPROM word(s) using EERD
1453 * @hw: pointer to hardware structure
1454 * @offset: offset of word in the EEPROM to read
1455 * @words: number of word(s)
1456 * @data: 16 bit word(s) from the EEPROM
1457 *
1458 * Reads a 16 bit word(s) from the EEPROM using the EERD register.
1459 **/
1460 s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset,
1461 u16 words, u16 *data)
1462 {
1463 u32 eerd;
1464 s32 status = IXGBE_SUCCESS;
1465 u32 i;
1466
1467 DEBUGFUNC("ixgbe_read_eerd_buffer_generic");
1468
1469 hw->eeprom.ops.init_params(hw);
1470
1471 if (words == 0) {
1472 status = IXGBE_ERR_INVALID_ARGUMENT;
1473 goto out;
1474 }
1475
1476 if (offset >= hw->eeprom.word_size) {
1477 status = IXGBE_ERR_EEPROM;
1478 goto out;
1479 }
1480
1481 for (i = 0; i < words; i++) {
1482 eerd = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
1483 IXGBE_EEPROM_RW_REG_START;
1484
1485 IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd);
1486 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_READ);
1487
1488 if (status == IXGBE_SUCCESS) {
1489 data[i] = (IXGBE_READ_REG(hw, IXGBE_EERD) >>
1490 IXGBE_EEPROM_RW_REG_DATA);
1491 } else {
1492 DEBUGOUT("Eeprom read timed out\n");
1493 goto out;
1494 }
1495 }
1496 out:
1497 return status;
1498 }
1499
1500 /**
1501 * ixgbe_detect_eeprom_page_size_generic - Detect EEPROM page size
1502 * @hw: pointer to hardware structure
1503 * @offset: offset within the EEPROM to be used as a scratch pad
1504 *
1505 * Discover EEPROM page size by writing marching data at given offset.
1506 * This function is called only when we are writing a new large buffer
1507 * at given offset so the data would be overwritten anyway.
1508 **/
1509 static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
1510 u16 offset)
1511 {
1512 u16 data[IXGBE_EEPROM_PAGE_SIZE_MAX];
1513 s32 status = IXGBE_SUCCESS;
1514 u16 i;
1515
1516 DEBUGFUNC("ixgbe_detect_eeprom_page_size_generic");
1517
1518 for (i = 0; i < IXGBE_EEPROM_PAGE_SIZE_MAX; i++)
1519 data[i] = i;
1520
1521 hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX;
1522 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset,
1523 IXGBE_EEPROM_PAGE_SIZE_MAX, data);
1524 hw->eeprom.word_page_size = 0;
1525 if (status != IXGBE_SUCCESS)
1526 goto out;
1527
1528 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
1529 if (status != IXGBE_SUCCESS)
1530 goto out;
1531
1532 /*
1533 * When writing in burst more than the actual page size
1534 * EEPROM address wraps around current page.
1535 */
1536 hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX - data[0];
1537
1538 DEBUGOUT1("Detected EEPROM page size = %d words.",
1539 hw->eeprom.word_page_size);
1540 out:
1541 return status;
1542 }
1543
1544 /**
1545 * ixgbe_read_eerd_generic - Read EEPROM word using EERD
1546 * @hw: pointer to hardware structure
1547 * @offset: offset of word in the EEPROM to read
1548 * @data: word read from the EEPROM
1549 *
1550 * Reads a 16 bit word from the EEPROM using the EERD register.
1551 **/
1552 s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data)
1553 {
1554 return ixgbe_read_eerd_buffer_generic(hw, offset, 1, data);
1555 }
1556
1557 /**
1558 * ixgbe_write_eewr_buffer_generic - Write EEPROM word(s) using EEWR
1559 * @hw: pointer to hardware structure
1560 * @offset: offset of word in the EEPROM to write
1561 * @words: number of word(s)
1562 * @data: word(s) write to the EEPROM
1563 *
1564 * Write a 16 bit word(s) to the EEPROM using the EEWR register.
1565 **/
1566 s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
1567 u16 words, u16 *data)
1568 {
1569 u32 eewr;
1570 s32 status = IXGBE_SUCCESS;
1571 u16 i;
1572
1573 DEBUGFUNC("ixgbe_write_eewr_generic");
1574
1575 hw->eeprom.ops.init_params(hw);
1576
1577 if (words == 0) {
1578 status = IXGBE_ERR_INVALID_ARGUMENT;
1579 goto out;
1580 }
1581
1582 if (offset >= hw->eeprom.word_size) {
1583 status = IXGBE_ERR_EEPROM;
1584 goto out;
1585 }
1586
1587 for (i = 0; i < words; i++) {
1588 eewr = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
1589 (data[i] << IXGBE_EEPROM_RW_REG_DATA) |
1590 IXGBE_EEPROM_RW_REG_START;
1591
1592 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
1593 if (status != IXGBE_SUCCESS) {
1594 DEBUGOUT("Eeprom write EEWR timed out\n");
1595 goto out;
1596 }
1597
1598 IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr);
1599
1600 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
1601 if (status != IXGBE_SUCCESS) {
1602 DEBUGOUT("Eeprom write EEWR timed out\n");
1603 goto out;
1604 }
1605 }
1606
1607 out:
1608 return status;
1609 }
1610
1611 /**
1612 * ixgbe_write_eewr_generic - Write EEPROM word using EEWR
1613 * @hw: pointer to hardware structure
1614 * @offset: offset of word in the EEPROM to write
1615 * @data: word write to the EEPROM
1616 *
1617 * Write a 16 bit word to the EEPROM using the EEWR register.
1618 **/
1619 s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
1620 {
1621 return ixgbe_write_eewr_buffer_generic(hw, offset, 1, &data);
1622 }
1623
1624 /**
1625 * ixgbe_poll_eerd_eewr_done - Poll EERD read or EEWR write status
1626 * @hw: pointer to hardware structure
1627 * @ee_reg: EEPROM flag for polling
1628 *
1629 * Polls the status bit (bit 1) of the EERD or EEWR to determine when the
1630 * read or write is done respectively.
1631 **/
1632 s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
1633 {
1634 u32 i;
1635 u32 reg;
1636 s32 status = IXGBE_ERR_EEPROM;
1637
1638 DEBUGFUNC("ixgbe_poll_eerd_eewr_done");
1639
1640 for (i = 0; i < IXGBE_EERD_EEWR_ATTEMPTS; i++) {
1641 if (ee_reg == IXGBE_NVM_POLL_READ)
1642 reg = IXGBE_READ_REG(hw, IXGBE_EERD);
1643 else
1644 reg = IXGBE_READ_REG(hw, IXGBE_EEWR);
1645
1646 if (reg & IXGBE_EEPROM_RW_REG_DONE) {
1647 status = IXGBE_SUCCESS;
1648 break;
1649 }
1650 usec_delay(5);
1651 }
1652 return status;
1653 }
1654
1655 /**
1656 * ixgbe_acquire_eeprom - Acquire EEPROM using bit-bang
1657 * @hw: pointer to hardware structure
1658 *
1659 * Prepares EEPROM for access using bit-bang method. This function should
1660 * be called before issuing a command to the EEPROM.
1661 **/
1662 static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
1663 {
1664 s32 status = IXGBE_SUCCESS;
1665 u32 eec;
1666 u32 i;
1667
1668 DEBUGFUNC("ixgbe_acquire_eeprom");
1669
1670 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)
1671 != IXGBE_SUCCESS)
1672 status = IXGBE_ERR_SWFW_SYNC;
1673
1674 if (status == IXGBE_SUCCESS) {
1675 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1676
1677 /* Request EEPROM Access */
1678 eec |= IXGBE_EEC_REQ;
1679 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1680
1681 for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) {
1682 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1683 if (eec & IXGBE_EEC_GNT)
1684 break;
1685 usec_delay(5);
1686 }
1687
1688 /* Release if grant not acquired */
1689 if (!(eec & IXGBE_EEC_GNT)) {
1690 eec &= ~IXGBE_EEC_REQ;
1691 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1692 DEBUGOUT("Could not acquire EEPROM grant\n");
1693
1694 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
1695 status = IXGBE_ERR_EEPROM;
1696 }
1697
1698 /* Setup EEPROM for Read/Write */
1699 if (status == IXGBE_SUCCESS) {
1700 /* Clear CS and SK */
1701 eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK);
1702 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1703 IXGBE_WRITE_FLUSH(hw);
1704 usec_delay(1);
1705 }
1706 }
1707 return status;
1708 }
1709
1710 /**
1711 * ixgbe_get_eeprom_semaphore - Get hardware semaphore
1712 * @hw: pointer to hardware structure
1713 *
1714 * Sets the hardware semaphores so EEPROM access can occur for bit-bang method
1715 **/
1716 static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
1717 {
1718 s32 status = IXGBE_ERR_EEPROM;
1719 u32 timeout = 2000;
1720 u32 i;
1721 u32 swsm;
1722
1723 DEBUGFUNC("ixgbe_get_eeprom_semaphore");
1724
1725
1726 /* Get SMBI software semaphore between device drivers first */
1727 for (i = 0; i < timeout; i++) {
1728 /*
1729 * If the SMBI bit is 0 when we read it, then the bit will be
1730 * set and we have the semaphore
1731 */
1732 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1733 if (!(swsm & IXGBE_SWSM_SMBI)) {
1734 status = IXGBE_SUCCESS;
1735 break;
1736 }
1737 usec_delay(50);
1738 }
1739
1740 if (i == timeout) {
1741 DEBUGOUT("Driver can't access the Eeprom - SMBI Semaphore "
1742 "not granted.\n");
1743 /*
1744 * this release is particularly important because our attempts
1745 * above to get the semaphore may have succeeded, and if there
1746 * was a timeout, we should unconditionally clear the semaphore
1747 * bits to free the driver to make progress
1748 */
1749 ixgbe_release_eeprom_semaphore(hw);
1750
1751 usec_delay(50);
1752 /*
1753 * one last try
1754 * If the SMBI bit is 0 when we read it, then the bit will be
1755 * set and we have the semaphore
1756 */
1757 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1758 if (!(swsm & IXGBE_SWSM_SMBI))
1759 status = IXGBE_SUCCESS;
1760 }
1761
1762 /* Now get the semaphore between SW/FW through the SWESMBI bit */
1763 if (status == IXGBE_SUCCESS) {
1764 for (i = 0; i < timeout; i++) {
1765 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1766
1767 /* Set the SW EEPROM semaphore bit to request access */
1768 swsm |= IXGBE_SWSM_SWESMBI;
1769 IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
1770
1771 /*
1772 * If we set the bit successfully then we got the
1773 * semaphore.
1774 */
1775 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1776 if (swsm & IXGBE_SWSM_SWESMBI)
1777 break;
1778
1779 usec_delay(50);
1780 }
1781
1782 /*
1783 * Release semaphores and return error if SW EEPROM semaphore
1784 * was not granted because we don't have access to the EEPROM
1785 */
1786 if (i >= timeout) {
1787 DEBUGOUT("SWESMBI Software EEPROM semaphore "
1788 "not granted.\n");
1789 ixgbe_release_eeprom_semaphore(hw);
1790 status = IXGBE_ERR_EEPROM;
1791 }
1792 } else {
1793 DEBUGOUT("Software semaphore SMBI between device drivers "
1794 "not granted.\n");
1795 }
1796
1797 return status;
1798 }
1799
1800 /**
1801 * ixgbe_release_eeprom_semaphore - Release hardware semaphore
1802 * @hw: pointer to hardware structure
1803 *
1804 * This function clears hardware semaphore bits.
1805 **/
1806 static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw)
1807 {
1808 u32 swsm;
1809
1810 DEBUGFUNC("ixgbe_release_eeprom_semaphore");
1811
1812 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1813
1814 /* Release both semaphores by writing 0 to the bits SWESMBI and SMBI */
1815 swsm &= ~(IXGBE_SWSM_SWESMBI | IXGBE_SWSM_SMBI);
1816 IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
1817 IXGBE_WRITE_FLUSH(hw);
1818 }
1819
1820 /**
1821 * ixgbe_ready_eeprom - Polls for EEPROM ready
1822 * @hw: pointer to hardware structure
1823 **/
1824 static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw)
1825 {
1826 s32 status = IXGBE_SUCCESS;
1827 u16 i;
1828 u8 spi_stat_reg;
1829
1830 DEBUGFUNC("ixgbe_ready_eeprom");
1831
1832 /*
1833 * Read "Status Register" repeatedly until the LSB is cleared. The
1834 * EEPROM will signal that the command has been completed by clearing
1835 * bit 0 of the internal status register. If it's not cleared within
1836 * 5 milliseconds, then error out.
1837 */
1838 for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) {
1839 ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI,
1840 IXGBE_EEPROM_OPCODE_BITS);
1841 spi_stat_reg = (u8)ixgbe_shift_in_eeprom_bits(hw, 8);
1842 if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI))
1843 break;
1844
1845 usec_delay(5);
1846 ixgbe_standby_eeprom(hw);
1847 };
1848
1849 /*
1850 * On some parts, SPI write time could vary from 0-20mSec on 3.3V
1851 * devices (and only 0-5mSec on 5V devices)
1852 */
1853 if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) {
1854 DEBUGOUT("SPI EEPROM Status error\n");
1855 status = IXGBE_ERR_EEPROM;
1856 }
1857
1858 return status;
1859 }
1860
1861 /**
1862 * ixgbe_standby_eeprom - Returns EEPROM to a "standby" state
1863 * @hw: pointer to hardware structure
1864 **/
1865 static void ixgbe_standby_eeprom(struct ixgbe_hw *hw)
1866 {
1867 u32 eec;
1868
1869 DEBUGFUNC("ixgbe_standby_eeprom");
1870
1871 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1872
1873 /* Toggle CS to flush commands */
1874 eec |= IXGBE_EEC_CS;
1875 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1876 IXGBE_WRITE_FLUSH(hw);
1877 usec_delay(1);
1878 eec &= ~IXGBE_EEC_CS;
1879 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1880 IXGBE_WRITE_FLUSH(hw);
1881 usec_delay(1);
1882 }
1883
1884 /**
1885 * ixgbe_shift_out_eeprom_bits - Shift data bits out to the EEPROM.
1886 * @hw: pointer to hardware structure
1887 * @data: data to send to the EEPROM
1888 * @count: number of bits to shift out
1889 **/
1890 static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
1891 u16 count)
1892 {
1893 u32 eec;
1894 u32 mask;
1895 u32 i;
1896
1897 DEBUGFUNC("ixgbe_shift_out_eeprom_bits");
1898
1899 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1900
1901 /*
1902 * Mask is used to shift "count" bits of "data" out to the EEPROM
1903 * one bit at a time. Determine the starting bit based on count
1904 */
1905 mask = 0x01 << (count - 1);
1906
1907 for (i = 0; i < count; i++) {
1908 /*
1909 * A "1" is shifted out to the EEPROM by setting bit "DI" to a
1910 * "1", and then raising and then lowering the clock (the SK
1911 * bit controls the clock input to the EEPROM). A "0" is
1912 * shifted out to the EEPROM by setting "DI" to "0" and then
1913 * raising and then lowering the clock.
1914 */
1915 if (data & mask)
1916 eec |= IXGBE_EEC_DI;
1917 else
1918 eec &= ~IXGBE_EEC_DI;
1919
1920 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1921 IXGBE_WRITE_FLUSH(hw);
1922
1923 usec_delay(1);
1924
1925 ixgbe_raise_eeprom_clk(hw, &eec);
1926 ixgbe_lower_eeprom_clk(hw, &eec);
1927
1928 /*
1929 * Shift mask to signify next bit of data to shift in to the
1930 * EEPROM
1931 */
1932 mask = mask >> 1;
1933 };
1934
1935 /* We leave the "DI" bit set to "0" when we leave this routine. */
1936 eec &= ~IXGBE_EEC_DI;
1937 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1938 IXGBE_WRITE_FLUSH(hw);
1939 }
1940
1941 /**
1942 * ixgbe_shift_in_eeprom_bits - Shift data bits in from the EEPROM
1943 * @hw: pointer to hardware structure
1944 **/
1945 static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count)
1946 {
1947 u32 eec;
1948 u32 i;
1949 u16 data = 0;
1950
1951 DEBUGFUNC("ixgbe_shift_in_eeprom_bits");
1952
1953 /*
1954 * In order to read a register from the EEPROM, we need to shift
1955 * 'count' bits in from the EEPROM. Bits are "shifted in" by raising
1956 * the clock input to the EEPROM (setting the SK bit), and then reading
1957 * the value of the "DO" bit. During this "shifting in" process the
1958 * "DI" bit should always be clear.
1959 */
1960 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1961
1962 eec &= ~(IXGBE_EEC_DO | IXGBE_EEC_DI);
1963
1964 for (i = 0; i < count; i++) {
1965 data = data << 1;
1966 ixgbe_raise_eeprom_clk(hw, &eec);
1967
1968 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1969
1970 eec &= ~(IXGBE_EEC_DI);
1971 if (eec & IXGBE_EEC_DO)
1972 data |= 1;
1973
1974 ixgbe_lower_eeprom_clk(hw, &eec);
1975 }
1976
1977 return data;
1978 }
1979
1980 /**
1981 * ixgbe_raise_eeprom_clk - Raises the EEPROM's clock input.
1982 * @hw: pointer to hardware structure
1983 * @eec: EEC register's current value
1984 **/
1985 static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
1986 {
1987 DEBUGFUNC("ixgbe_raise_eeprom_clk");
1988
1989 /*
1990 * Raise the clock input to the EEPROM
1991 * (setting the SK bit), then delay
1992 */
1993 *eec = *eec | IXGBE_EEC_SK;
1994 IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec);
1995 IXGBE_WRITE_FLUSH(hw);
1996 usec_delay(1);
1997 }
1998
1999 /**
2000 * ixgbe_lower_eeprom_clk - Lowers the EEPROM's clock input.
2001 * @hw: pointer to hardware structure
2002 * @eecd: EECD's current value
2003 **/
2004 static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
2005 {
2006 DEBUGFUNC("ixgbe_lower_eeprom_clk");
2007
2008 /*
2009 * Lower the clock input to the EEPROM (clearing the SK bit), then
2010 * delay
2011 */
2012 *eec = *eec & ~IXGBE_EEC_SK;
2013 IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec);
2014 IXGBE_WRITE_FLUSH(hw);
2015 usec_delay(1);
2016 }
2017
2018 /**
2019 * ixgbe_release_eeprom - Release EEPROM, release semaphores
2020 * @hw: pointer to hardware structure
2021 **/
2022 static void ixgbe_release_eeprom(struct ixgbe_hw *hw)
2023 {
2024 u32 eec;
2025
2026 DEBUGFUNC("ixgbe_release_eeprom");
2027
2028 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
2029
2030 eec |= IXGBE_EEC_CS; /* Pull CS high */
2031 eec &= ~IXGBE_EEC_SK; /* Lower SCK */
2032
2033 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
2034 IXGBE_WRITE_FLUSH(hw);
2035
2036 usec_delay(1);
2037
2038 /* Stop requesting EEPROM access */
2039 eec &= ~IXGBE_EEC_REQ;
2040 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
2041
2042 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
2043
2044 /* Delay before attempt to obtain semaphore again to allow FW access */
2045 msec_delay(hw->eeprom.semaphore_delay);
2046 }
2047
2048 /**
2049 * ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum
2050 * @hw: pointer to hardware structure
2051 **/
2052 u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
2053 {
2054 u16 i;
2055 u16 j;
2056 u16 checksum = 0;
2057 u16 length = 0;
2058 u16 pointer = 0;
2059 u16 word = 0;
2060
2061 DEBUGFUNC("ixgbe_calc_eeprom_checksum_generic");
2062
2063 /* Include 0x0-0x3F in the checksum */
2064 for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
2065 if (hw->eeprom.ops.read(hw, i, &word) != IXGBE_SUCCESS) {
2066 DEBUGOUT("EEPROM read failed\n");
2067 break;
2068 }
2069 checksum += word;
2070 }
2071
2072 /* Include all data from pointers except for the fw pointer */
2073 for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) {
2074 hw->eeprom.ops.read(hw, i, &pointer);
2075
2076 /* Make sure the pointer seems valid */
2077 if (pointer != 0xFFFF && pointer != 0) {
2078 hw->eeprom.ops.read(hw, pointer, &length);
2079
2080 if (length != 0xFFFF && length != 0) {
2081 for (j = pointer+1; j <= pointer+length; j++) {
2082 hw->eeprom.ops.read(hw, j, &word);
2083 checksum += word;
2084 }
2085 }
2086 }
2087 }
2088
2089 checksum = (u16)IXGBE_EEPROM_SUM - checksum;
2090
2091 return checksum;
2092 }
2093
2094 /**
2095 * ixgbe_validate_eeprom_checksum_generic - Validate EEPROM checksum
2096 * @hw: pointer to hardware structure
2097 * @checksum_val: calculated checksum
2098 *
2099 * Performs checksum calculation and validates the EEPROM checksum. If the
2100 * caller does not need checksum_val, the value can be NULL.
2101 **/
2102 s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
2103 u16 *checksum_val)
2104 {
2105 s32 status;
2106 u16 checksum;
2107 u16 read_checksum = 0;
2108
2109 DEBUGFUNC("ixgbe_validate_eeprom_checksum_generic");
2110
2111 /*
2112 * Read the first word from the EEPROM. If this times out or fails, do
2113 * not continue or we could be in for a very long wait while every
2114 * EEPROM read fails
2115 */
2116 status = hw->eeprom.ops.read(hw, 0, &checksum);
2117
2118 if (status == IXGBE_SUCCESS) {
2119 checksum = hw->eeprom.ops.calc_checksum(hw);
2120
2121 hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum);
2122
2123 /*
2124 * Verify read checksum from EEPROM is the same as
2125 * calculated checksum
2126 */
2127 if (read_checksum != checksum)
2128 status = IXGBE_ERR_EEPROM_CHECKSUM;
2129
2130 /* If the user cares, return the calculated checksum */
2131 if (checksum_val)
2132 *checksum_val = checksum;
2133 } else {
2134 DEBUGOUT("EEPROM read failed\n");
2135 }
2136
2137 return status;
2138 }
2139
2140 /**
2141 * ixgbe_update_eeprom_checksum_generic - Updates the EEPROM checksum
2142 * @hw: pointer to hardware structure
2143 **/
2144 s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
2145 {
2146 s32 status;
2147 u16 checksum;
2148
2149 DEBUGFUNC("ixgbe_update_eeprom_checksum_generic");
2150
2151 /*
2152 * Read the first word from the EEPROM. If this times out or fails, do
2153 * not continue or we could be in for a very long wait while every
2154 * EEPROM read fails
2155 */
2156 status = hw->eeprom.ops.read(hw, 0, &checksum);
2157
2158 if (status == IXGBE_SUCCESS) {
2159 checksum = hw->eeprom.ops.calc_checksum(hw);
2160 status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM,
2161 checksum);
2162 } else {
2163 DEBUGOUT("EEPROM read failed\n");
2164 }
2165
2166 return status;
2167 }
2168
2169 /**
2170 * ixgbe_validate_mac_addr - Validate MAC address
2171 * @mac_addr: pointer to MAC address.
2172 *
2173 * Tests a MAC address to ensure it is a valid Individual Address
2174 **/
2175 s32 ixgbe_validate_mac_addr(u8 *mac_addr)
2176 {
2177 s32 status = IXGBE_SUCCESS;
2178
2179 DEBUGFUNC("ixgbe_validate_mac_addr");
2180
2181 /* Make sure it is not a multicast address */
2182 if (IXGBE_IS_MULTICAST(mac_addr)) {
2183 DEBUGOUT("MAC address is multicast\n");
2184 status = IXGBE_ERR_INVALID_MAC_ADDR;
2185 /* Not a broadcast address */
2186 } else if (IXGBE_IS_BROADCAST(mac_addr)) {
2187 DEBUGOUT("MAC address is broadcast\n");
2188 status = IXGBE_ERR_INVALID_MAC_ADDR;
2189 /* Reject the zero address */
2190 } else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
2191 mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) {
2192 DEBUGOUT("MAC address is all zeros\n");
2193 status = IXGBE_ERR_INVALID_MAC_ADDR;
2194 }
2195 return status;
2196 }
2197
2198 /**
2199 * ixgbe_set_rar_generic - Set Rx address register
2200 * @hw: pointer to hardware structure
2201 * @index: Receive address register to write
2202 * @addr: Address to put into receive address register
2203 * @vmdq: VMDq "set" or "pool" index
2204 * @enable_addr: set flag that address is active
2205 *
2206 * Puts an ethernet address into a receive address register.
2207 **/
2208 s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
2209 u32 enable_addr)
2210 {
2211 u32 rar_low, rar_high;
2212 u32 rar_entries = hw->mac.num_rar_entries;
2213
2214 DEBUGFUNC("ixgbe_set_rar_generic");
2215
2216 /* Make sure we are using a valid rar index range */
2217 if (index >= rar_entries) {
2218 DEBUGOUT1("RAR index %d is out of range.\n", index);
2219 return IXGBE_ERR_INVALID_ARGUMENT;
2220 }
2221
2222 /* setup VMDq pool selection before this RAR gets enabled */
2223 hw->mac.ops.set_vmdq(hw, index, vmdq);
2224
2225 /*
2226 * HW expects these in little endian so we reverse the byte
2227 * order from network order (big endian) to little endian
2228 */
2229 rar_low = ((u32)addr[0] |
2230 ((u32)addr[1] << 8) |
2231 ((u32)addr[2] << 16) |
2232 ((u32)addr[3] << 24));
2233 /*
2234 * Some parts put the VMDq setting in the extra RAH bits,
2235 * so save everything except the lower 16 bits that hold part
2236 * of the address and the address valid bit.
2237 */
2238 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
2239 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
2240 rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8));
2241
2242 if (enable_addr != 0)
2243 rar_high |= IXGBE_RAH_AV;
2244
2245 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low);
2246 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
2247
2248 return IXGBE_SUCCESS;
2249 }
2250
2251 /**
2252 * ixgbe_clear_rar_generic - Remove Rx address register
2253 * @hw: pointer to hardware structure
2254 * @index: Receive address register to write
2255 *
2256 * Clears an ethernet address from a receive address register.
2257 **/
2258 s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index)
2259 {
2260 u32 rar_high;
2261 u32 rar_entries = hw->mac.num_rar_entries;
2262
2263 DEBUGFUNC("ixgbe_clear_rar_generic");
2264
2265 /* Make sure we are using a valid rar index range */
2266 if (index >= rar_entries) {
2267 DEBUGOUT1("RAR index %d is out of range.\n", index);
2268 return IXGBE_ERR_INVALID_ARGUMENT;
2269 }
2270
2271 /*
2272 * Some parts put the VMDq setting in the extra RAH bits,
2273 * so save everything except the lower 16 bits that hold part
2274 * of the address and the address valid bit.
2275 */
2276 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
2277 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
2278
2279 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
2280 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
2281
2282 /* clear VMDq pool/queue selection for this RAR */
2283 hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL);
2284
2285 return IXGBE_SUCCESS;
2286 }
2287
2288 /**
2289 * ixgbe_init_rx_addrs_generic - Initializes receive address filters.
2290 * @hw: pointer to hardware structure
2291 *
2292 * Places the MAC address in receive address register 0 and clears the rest
2293 * of the receive address registers. Clears the multicast table. Assumes
2294 * the receiver is in reset when the routine is called.
2295 **/
2296 s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
2297 {
2298 u32 i;
2299 u32 rar_entries = hw->mac.num_rar_entries;
2300
2301 DEBUGFUNC("ixgbe_init_rx_addrs_generic");
2302
2303 /*
2304 * If the current mac address is valid, assume it is a software override
2305 * to the permanent address.
2306 * Otherwise, use the permanent address from the eeprom.
2307 */
2308 if (ixgbe_validate_mac_addr(hw->mac.addr) ==
2309 IXGBE_ERR_INVALID_MAC_ADDR) {
2310 /* Get the MAC address from the RAR0 for later reference */
2311 hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
2312
2313 DEBUGOUT3(" Keeping Current RAR0 Addr =%.2X %.2X %.2X ",
2314 hw->mac.addr[0], hw->mac.addr[1],
2315 hw->mac.addr[2]);
2316 DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3],
2317 hw->mac.addr[4], hw->mac.addr[5]);
2318 } else {
2319 /* Setup the receive address. */
2320 DEBUGOUT("Overriding MAC Address in RAR[0]\n");
2321 DEBUGOUT3(" New MAC Addr =%.2X %.2X %.2X ",
2322 hw->mac.addr[0], hw->mac.addr[1],
2323 hw->mac.addr[2]);
2324 DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3],
2325 hw->mac.addr[4], hw->mac.addr[5]);
2326
2327 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
2328
2329 /* clear VMDq pool/queue selection for RAR 0 */
2330 hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL);
2331 }
2332 hw->addr_ctrl.overflow_promisc = 0;
2333
2334 hw->addr_ctrl.rar_used_count = 1;
2335
2336 /* Zero out the other receive addresses. */
2337 DEBUGOUT1("Clearing RAR[1-%d]\n", rar_entries - 1);
2338 for (i = 1; i < rar_entries; i++) {
2339 IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
2340 IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
2341 }
2342
2343 /* Clear the MTA */
2344 hw->addr_ctrl.mta_in_use = 0;
2345 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
2346
2347 DEBUGOUT(" Clearing MTA\n");
2348 for (i = 0; i < hw->mac.mcft_size; i++)
2349 IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0);
2350
2351 ixgbe_init_uta_tables(hw);
2352
2353 return IXGBE_SUCCESS;
2354 }
2355
2356 /**
2357 * ixgbe_add_uc_addr - Adds a secondary unicast address.
2358 * @hw: pointer to hardware structure
2359 * @addr: new address
2360 *
2361 * Adds it to unused receive address register or goes into promiscuous mode.
2362 **/
2363 void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
2364 {
2365 u32 rar_entries = hw->mac.num_rar_entries;
2366 u32 rar;
2367
2368 DEBUGFUNC("ixgbe_add_uc_addr");
2369
2370 DEBUGOUT6(" UC Addr = %.2X %.2X %.2X %.2X %.2X %.2X\n",
2371 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
2372
2373 /*
2374 * Place this address in the RAR if there is room,
2375 * else put the controller into promiscuous mode
2376 */
2377 if (hw->addr_ctrl.rar_used_count < rar_entries) {
2378 rar = hw->addr_ctrl.rar_used_count;
2379 hw->mac.ops.set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
2380 DEBUGOUT1("Added a secondary address to RAR[%d]\n", rar);
2381 hw->addr_ctrl.rar_used_count++;
2382 } else {
2383 hw->addr_ctrl.overflow_promisc++;
2384 }
2385
2386 DEBUGOUT("ixgbe_add_uc_addr Complete\n");
2387 }
2388
2389 /**
2390 * ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses
2391 * @hw: pointer to hardware structure
2392 * @addr_list: the list of new addresses
2393 * @addr_count: number of addresses
2394 * @next: iterator function to walk the address list
2395 *
2396 * The given list replaces any existing list. Clears the secondary addrs from
2397 * receive address registers. Uses unused receive address registers for the
2398 * first secondary addresses, and falls back to promiscuous mode as needed.
2399 *
2400 * Drivers using secondary unicast addresses must set user_set_promisc when
2401 * manually putting the device into promiscuous mode.
2402 **/
2403 s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list,
2404 u32 addr_count, ixgbe_mc_addr_itr next)
2405 {
2406 u8 *addr;
2407 u32 i;
2408 u32 old_promisc_setting = hw->addr_ctrl.overflow_promisc;
2409 u32 uc_addr_in_use;
2410 u32 fctrl;
2411 u32 vmdq;
2412
2413 DEBUGFUNC("ixgbe_update_uc_addr_list_generic");
2414
2415 /*
2416 * Clear accounting of old secondary address list,
2417 * don't count RAR[0]
2418 */
2419 uc_addr_in_use = hw->addr_ctrl.rar_used_count - 1;
2420 hw->addr_ctrl.rar_used_count -= uc_addr_in_use;
2421 hw->addr_ctrl.overflow_promisc = 0;
2422
2423 /* Zero out the other receive addresses */
2424 DEBUGOUT1("Clearing RAR[1-%d]\n", uc_addr_in_use+1);
2425 for (i = 0; i < uc_addr_in_use; i++) {
2426 IXGBE_WRITE_REG(hw, IXGBE_RAL(1+i), 0);
2427 IXGBE_WRITE_REG(hw, IXGBE_RAH(1+i), 0);
2428 }
2429
2430 /* Add the new addresses */
2431 for (i = 0; i < addr_count; i++) {
2432 DEBUGOUT(" Adding the secondary addresses:\n");
2433 addr = next(hw, &addr_list, &vmdq);
2434 ixgbe_add_uc_addr(hw, addr, vmdq);
2435 }
2436
2437 if (hw->addr_ctrl.overflow_promisc) {
2438 /* enable promisc if not already in overflow or set by user */
2439 if (!old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
2440 DEBUGOUT(" Entering address overflow promisc mode\n");
2441 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2442 fctrl |= IXGBE_FCTRL_UPE;
2443 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2444 }
2445 } else {
2446 /* only disable if set by overflow, not by user */
2447 if (old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
2448 DEBUGOUT(" Leaving address overflow promisc mode\n");
2449 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2450 fctrl &= ~IXGBE_FCTRL_UPE;
2451 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2452 }
2453 }
2454
2455 DEBUGOUT("ixgbe_update_uc_addr_list_generic Complete\n");
2456 return IXGBE_SUCCESS;
2457 }
2458
2459 /**
2460 * ixgbe_mta_vector - Determines bit-vector in multicast table to set
2461 * @hw: pointer to hardware structure
2462 * @mc_addr: the multicast address
2463 *
2464 * Extracts the 12 bits, from a multicast address, to determine which
2465 * bit-vector to set in the multicast table. The hardware uses 12 bits, from
2466 * incoming rx multicast addresses, to determine the bit-vector to check in
2467 * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
2468 * by the MO field of the MCSTCTRL. The MO field is set during initialization
2469 * to mc_filter_type.
2470 **/
2471 static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
2472 {
2473 u32 vector = 0;
2474
2475 DEBUGFUNC("ixgbe_mta_vector");
2476
2477 switch (hw->mac.mc_filter_type) {
2478 case 0: /* use bits [47:36] of the address */
2479 vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
2480 break;
2481 case 1: /* use bits [46:35] of the address */
2482 vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
2483 break;
2484 case 2: /* use bits [45:34] of the address */
2485 vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
2486 break;
2487 case 3: /* use bits [43:32] of the address */
2488 vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
2489 break;
2490 default: /* Invalid mc_filter_type */
2491 DEBUGOUT("MC filter type param set incorrectly\n");
2492 ASSERT(0);
2493 break;
2494 }
2495
2496 /* vector can only be 12-bits or boundary will be exceeded */
2497 vector &= 0xFFF;
2498 return vector;
2499 }
2500
2501 /**
2502 * ixgbe_set_mta - Set bit-vector in multicast table
2503 * @hw: pointer to hardware structure
2504 * @hash_value: Multicast address hash value
2505 *
2506 * Sets the bit-vector in the multicast table.
2507 **/
2508 void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr)
2509 {
2510 u32 vector;
2511 u32 vector_bit;
2512 u32 vector_reg;
2513
2514 DEBUGFUNC("ixgbe_set_mta");
2515
2516 hw->addr_ctrl.mta_in_use++;
2517
2518 vector = ixgbe_mta_vector(hw, mc_addr);
2519 DEBUGOUT1(" bit-vector = 0x%03X\n", vector);
2520
2521 /*
2522 * The MTA is a register array of 128 32-bit registers. It is treated
2523 * like an array of 4096 bits. We want to set bit
2524 * BitArray[vector_value]. So we figure out what register the bit is
2525 * in, read it, OR in the new bit, then write back the new value. The
2526 * register is determined by the upper 7 bits of the vector value and
2527 * the bit within that register are determined by the lower 5 bits of
2528 * the value.
2529 */
2530 vector_reg = (vector >> 5) & 0x7F;
2531 vector_bit = vector & 0x1F;
2532 hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit);
2533 }
2534
2535 /**
2536 * ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses
2537 * @hw: pointer to hardware structure
2538 * @mc_addr_list: the list of new multicast addresses
2539 * @mc_addr_count: number of addresses
2540 * @next: iterator function to walk the multicast address list
2541 * @clear: flag, when set clears the table beforehand
2542 *
2543 * When the clear flag is set, the given list replaces any existing list.
2544 * Hashes the given addresses into the multicast table.
2545 **/
2546 s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
2547 u32 mc_addr_count, ixgbe_mc_addr_itr next,
2548 bool clear)
2549 {
2550 u32 i;
2551 u32 vmdq;
2552
2553 DEBUGFUNC("ixgbe_update_mc_addr_list_generic");
2554
2555 /*
2556 * Set the new number of MC addresses that we are being requested to
2557 * use.
2558 */
2559 hw->addr_ctrl.num_mc_addrs = mc_addr_count;
2560 hw->addr_ctrl.mta_in_use = 0;
2561
2562 /* Clear mta_shadow */
2563 if (clear) {
2564 DEBUGOUT(" Clearing MTA\n");
2565 memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
2566 }
2567
2568 /* Update mta_shadow */
2569 for (i = 0; i < mc_addr_count; i++) {
2570 DEBUGOUT(" Adding the multicast addresses:\n");
2571 ixgbe_set_mta(hw, next(hw, &mc_addr_list, &vmdq));
2572 }
2573
2574 /* Enable mta */
2575 for (i = 0; i < hw->mac.mcft_size; i++)
2576 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_MTA(0), i,
2577 hw->mac.mta_shadow[i]);
2578
2579 if (hw->addr_ctrl.mta_in_use > 0)
2580 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
2581 IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
2582
2583 DEBUGOUT("ixgbe_update_mc_addr_list_generic Complete\n");
2584 return IXGBE_SUCCESS;
2585 }
2586
2587 /**
2588 * ixgbe_enable_mc_generic - Enable multicast address in RAR
2589 * @hw: pointer to hardware structure
2590 *
2591 * Enables multicast address in RAR and the use of the multicast hash table.
2592 **/
2593 s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
2594 {
2595 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
2596
2597 DEBUGFUNC("ixgbe_enable_mc_generic");
2598
2599 if (a->mta_in_use > 0)
2600 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE |
2601 hw->mac.mc_filter_type);
2602
2603 return IXGBE_SUCCESS;
2604 }
2605
2606 /**
2607 * ixgbe_disable_mc_generic - Disable multicast address in RAR
2608 * @hw: pointer to hardware structure
2609 *
2610 * Disables multicast address in RAR and the use of the multicast hash table.
2611 **/
2612 s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw)
2613 {
2614 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
2615
2616 DEBUGFUNC("ixgbe_disable_mc_generic");
2617
2618 if (a->mta_in_use > 0)
2619 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
2620
2621 return IXGBE_SUCCESS;
2622 }
2623
2624 /**
2625 * ixgbe_fc_enable_generic - Enable flow control
2626 * @hw: pointer to hardware structure
2627 *
2628 * Enable flow control according to the current settings.
2629 **/
2630 s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
2631 {
2632 s32 ret_val = IXGBE_SUCCESS;
2633 u32 mflcn_reg, fccfg_reg;
2634 u32 reg;
2635 u32 fcrtl, fcrth;
2636 int i;
2637
2638 DEBUGFUNC("ixgbe_fc_enable_generic");
2639
2640 /* Validate the water mark configuration */
2641 if (!hw->fc.pause_time) {
2642 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2643 goto out;
2644 }
2645
2646 /* Low water mark of zero causes XOFF floods */
2647 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
2648 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
2649 hw->fc.high_water[i]) {
2650 if (!hw->fc.low_water[i] ||
2651 hw->fc.low_water[i] >= hw->fc.high_water[i]) {
2652 DEBUGOUT("Invalid water mark configuration\n");
2653 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2654 goto out;
2655 }
2656 }
2657 }
2658
2659 /* Negotiate the fc mode to use */
2660 ixgbe_fc_autoneg(hw);
2661
2662 /* Disable any previous flow control settings */
2663 mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
2664 mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE);
2665
2666 fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
2667 fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY);
2668
2669 /*
2670 * The possible values of fc.current_mode are:
2671 * 0: Flow control is completely disabled
2672 * 1: Rx flow control is enabled (we can receive pause frames,
2673 * but not send pause frames).
2674 * 2: Tx flow control is enabled (we can send pause frames but
2675 * we do not support receiving pause frames).
2676 * 3: Both Rx and Tx flow control (symmetric) are enabled.
2677 * other: Invalid.
2678 */
2679 switch (hw->fc.current_mode) {
2680 case ixgbe_fc_none:
2681 /*
2682 * Flow control is disabled by software override or autoneg.
2683 * The code below will actually disable it in the HW.
2684 */
2685 break;
2686 case ixgbe_fc_rx_pause:
2687 /*
2688 * Rx Flow control is enabled and Tx Flow control is
2689 * disabled by software override. Since there really
2690 * isn't a way to advertise that we are capable of RX
2691 * Pause ONLY, we will advertise that we support both
2692 * symmetric and asymmetric Rx PAUSE. Later, we will
2693 * disable the adapter's ability to send PAUSE frames.
2694 */
2695 mflcn_reg |= IXGBE_MFLCN_RFCE;
2696 break;
2697 case ixgbe_fc_tx_pause:
2698 /*
2699 * Tx Flow control is enabled, and Rx Flow control is
2700 * disabled by software override.
2701 */
2702 fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
2703 break;
2704 case ixgbe_fc_full:
2705 /* Flow control (both Rx and Tx) is enabled by SW override. */
2706 mflcn_reg |= IXGBE_MFLCN_RFCE;
2707 fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
2708 break;
2709 default:
2710 DEBUGOUT("Flow control param set incorrectly\n");
2711 ret_val = IXGBE_ERR_CONFIG;
2712 goto out;
2713 break;
2714 }
2715
2716 /* Set 802.3x based flow control settings. */
2717 mflcn_reg |= IXGBE_MFLCN_DPF;
2718 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
2719 IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
2720
2721
2722 /* Set up and enable Rx high/low water mark thresholds, enable XON. */
2723 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
2724 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
2725 hw->fc.high_water[i]) {
2726 fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
2727 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl);
2728 fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
2729 } else {
2730 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
2731 /*
2732 * In order to prevent Tx hangs when the internal Tx
2733 * switch is enabled we must set the high water mark
2734 * to the maximum FCRTH value. This allows the Tx
2735 * switch to function even under heavy Rx workloads.
2736 */
2737 fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 32;
2738 }
2739
2740 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), fcrth);
2741 }
2742
2743 /* Configure pause time (2 TCs per register) */
2744 reg = hw->fc.pause_time * 0x00010001;
2745 for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
2746 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
2747
2748 /* Configure flow control refresh threshold value */
2749 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
2750
2751 out:
2752 return ret_val;
2753 }
2754
2755 /**
2756 * ixgbe_negotiate_fc - Negotiate flow control
2757 * @hw: pointer to hardware structure
2758 * @adv_reg: flow control advertised settings
2759 * @lp_reg: link partner's flow control settings
2760 * @adv_sym: symmetric pause bit in advertisement
2761 * @adv_asm: asymmetric pause bit in advertisement
2762 * @lp_sym: symmetric pause bit in link partner advertisement
2763 * @lp_asm: asymmetric pause bit in link partner advertisement
2764 *
2765 * Find the intersection between advertised settings and link partner's
2766 * advertised settings
2767 **/
2768 static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
2769 u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm)
2770 {
2771 if ((!(adv_reg)) || (!(lp_reg)))
2772 return IXGBE_ERR_FC_NOT_NEGOTIATED;
2773
2774 if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) {
2775 /*
2776 * Now we need to check if the user selected Rx ONLY
2777 * of pause frames. In this case, we had to advertise
2778 * FULL flow control because we could not advertise RX
2779 * ONLY. Hence, we must now check to see if we need to
2780 * turn OFF the TRANSMISSION of PAUSE frames.
2781 */
2782 if (hw->fc.requested_mode == ixgbe_fc_full) {
2783 hw->fc.current_mode = ixgbe_fc_full;
2784 DEBUGOUT("Flow Control = FULL.\n");
2785 } else {
2786 hw->fc.current_mode = ixgbe_fc_rx_pause;
2787 DEBUGOUT("Flow Control=RX PAUSE frames only\n");
2788 }
2789 } else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) &&
2790 (lp_reg & lp_sym) && (lp_reg & lp_asm)) {
2791 hw->fc.current_mode = ixgbe_fc_tx_pause;
2792 DEBUGOUT("Flow Control = TX PAUSE frames only.\n");
2793 } else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) &&
2794 !(lp_reg & lp_sym) && (lp_reg & lp_asm)) {
2795 hw->fc.current_mode = ixgbe_fc_rx_pause;
2796 DEBUGOUT("Flow Control = RX PAUSE frames only.\n");
2797 } else {
2798 hw->fc.current_mode = ixgbe_fc_none;
2799 DEBUGOUT("Flow Control = NONE.\n");
2800 }
2801 return IXGBE_SUCCESS;
2802 }
2803
2804 /**
2805 * ixgbe_fc_autoneg_fiber - Enable flow control on 1 gig fiber
2806 * @hw: pointer to hardware structure
2807 *
2808 * Enable flow control according on 1 gig fiber.
2809 **/
2810 static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
2811 {
2812 u32 pcs_anadv_reg, pcs_lpab_reg, linkstat;
2813 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2814
2815 /*
2816 * On multispeed fiber at 1g, bail out if
2817 * - link is up but AN did not complete, or if
2818 * - link is up and AN completed but timed out
2819 */
2820
2821 linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
2822 if ((!!(linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
2823 (!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1))
2824 goto out;
2825
2826 pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
2827 pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
2828
2829 ret_val = ixgbe_negotiate_fc(hw, pcs_anadv_reg,
2830 pcs_lpab_reg, IXGBE_PCS1GANA_SYM_PAUSE,
2831 IXGBE_PCS1GANA_ASM_PAUSE,
2832 IXGBE_PCS1GANA_SYM_PAUSE,
2833 IXGBE_PCS1GANA_ASM_PAUSE);
2834
2835 out:
2836 return ret_val;
2837 }
2838
2839 /**
2840 * ixgbe_fc_autoneg_backplane - Enable flow control IEEE clause 37
2841 * @hw: pointer to hardware structure
2842 *
2843 * Enable flow control according to IEEE clause 37.
2844 **/
2845 static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
2846 {
2847 u32 links2, anlp1_reg, autoc_reg, links;
2848 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2849
2850 /*
2851 * On backplane, bail out if
2852 * - backplane autoneg was not completed, or if
2853 * - we are 82599 and link partner is not AN enabled
2854 */
2855 links = IXGBE_READ_REG(hw, IXGBE_LINKS);
2856 if ((links & IXGBE_LINKS_KX_AN_COMP) == 0)
2857 goto out;
2858
2859 if (hw->mac.type == ixgbe_mac_82599EB) {
2860 links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
2861 if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0)
2862 goto out;
2863 }
2864 /*
2865 * Read the 10g AN autoc and LP ability registers and resolve
2866 * local flow control settings accordingly
2867 */
2868 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2869 anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
2870
2871 ret_val = ixgbe_negotiate_fc(hw, autoc_reg,
2872 anlp1_reg, IXGBE_AUTOC_SYM_PAUSE, IXGBE_AUTOC_ASM_PAUSE,
2873 IXGBE_ANLP1_SYM_PAUSE, IXGBE_ANLP1_ASM_PAUSE);
2874
2875 out:
2876 return ret_val;
2877 }
2878
2879 /**
2880 * ixgbe_fc_autoneg_copper - Enable flow control IEEE clause 37
2881 * @hw: pointer to hardware structure
2882 *
2883 * Enable flow control according to IEEE clause 37.
2884 **/
2885 static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw)
2886 {
2887 u16 technology_ability_reg = 0;
2888 u16 lp_technology_ability_reg = 0;
2889
2890 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
2891 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
2892 &technology_ability_reg);
2893 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_LP,
2894 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
2895 &lp_technology_ability_reg);
2896
2897 return ixgbe_negotiate_fc(hw, (u32)technology_ability_reg,
2898 (u32)lp_technology_ability_reg,
2899 IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE,
2900 IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE);
2901 }
2902
2903 /**
2904 * ixgbe_fc_autoneg - Configure flow control
2905 * @hw: pointer to hardware structure
2906 *
2907 * Compares our advertised flow control capabilities to those advertised by
2908 * our link partner, and determines the proper flow control mode to use.
2909 **/
2910 void ixgbe_fc_autoneg(struct ixgbe_hw *hw)
2911 {
2912 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2913 ixgbe_link_speed speed;
2914 bool link_up;
2915
2916 DEBUGFUNC("ixgbe_fc_autoneg");
2917
2918 /*
2919 * AN should have completed when the cable was plugged in.
2920 * Look for reasons to bail out. Bail out if:
2921 * - FC autoneg is disabled, or if
2922 * - link is not up.
2923 */
2924 if (hw->fc.disable_fc_autoneg)
2925 goto out;
2926
2927 hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
2928 if (!link_up)
2929 goto out;
2930
2931 switch (hw->phy.media_type) {
2932 /* Autoneg flow control on fiber adapters */
2933 case ixgbe_media_type_fiber_fixed:
2934 case ixgbe_media_type_fiber:
2935 if (speed == IXGBE_LINK_SPEED_1GB_FULL)
2936 ret_val = ixgbe_fc_autoneg_fiber(hw);
2937 break;
2938
2939 /* Autoneg flow control on backplane adapters */
2940 case ixgbe_media_type_backplane:
2941 ret_val = ixgbe_fc_autoneg_backplane(hw);
2942 break;
2943
2944 /* Autoneg flow control on copper adapters */
2945 case ixgbe_media_type_copper:
2946 if (ixgbe_device_supports_autoneg_fc(hw) == IXGBE_SUCCESS)
2947 ret_val = ixgbe_fc_autoneg_copper(hw);
2948 break;
2949
2950 default:
2951 break;
2952 }
2953
2954 out:
2955 if (ret_val == IXGBE_SUCCESS) {
2956 hw->fc.fc_was_autonegged = TRUE;
2957 } else {
2958 hw->fc.fc_was_autonegged = FALSE;
2959 hw->fc.current_mode = hw->fc.requested_mode;
2960 }
2961 }
2962
2963 /**
2964 * ixgbe_disable_pcie_master - Disable PCI-express master access
2965 * @hw: pointer to hardware structure
2966 *
2967 * Disables PCI-Express master access and verifies there are no pending
2968 * requests. IXGBE_ERR_MASTER_REQUESTS_PENDING is returned if master disable
2969 * bit hasn't caused the master requests to be disabled, else IXGBE_SUCCESS
2970 * is returned signifying master requests disabled.
2971 **/
2972 s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
2973 {
2974 s32 status = IXGBE_SUCCESS;
2975 u32 i;
2976
2977 DEBUGFUNC("ixgbe_disable_pcie_master");
2978
2979 /* Always set this bit to ensure any future transactions are blocked */
2980 IXGBE_WRITE_REG(hw, IXGBE_CTRL, IXGBE_CTRL_GIO_DIS);
2981
2982 /* Exit if master requets are blocked */
2983 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
2984 goto out;
2985
2986 /* Poll for master request bit to clear */
2987 for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
2988 usec_delay(100);
2989 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
2990 goto out;
2991 }
2992
2993 /*
2994 * Two consecutive resets are required via CTRL.RST per datasheet
2995 * 5.2.5.3.2 Master Disable. We set a flag to inform the reset routine
2996 * of this need. The first reset prevents new master requests from
2997 * being issued by our device. We then must wait 1usec or more for any
2998 * remaining completions from the PCIe bus to trickle in, and then reset
2999 * again to clear out any effects they may have had on our device.
3000 */
3001 DEBUGOUT("GIO Master Disable bit didn't clear - requesting resets\n");
3002 hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
3003
3004 /*
3005 * Before proceeding, make sure that the PCIe block does not have
3006 * transactions pending.
3007 */
3008 for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
3009 usec_delay(100);
3010 if (!(IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS) &
3011 IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
3012 goto out;
3013 }
3014
3015 DEBUGOUT("PCIe transaction pending bit also did not clear.\n");
3016 status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
3017
3018 out:
3019 return status;
3020 }
3021
3022 /**
3023 * ixgbe_acquire_swfw_sync - Acquire SWFW semaphore
3024 * @hw: pointer to hardware structure
3025 * @mask: Mask to specify which semaphore to acquire
3026 *
3027 * Acquires the SWFW semaphore through the GSSR register for the specified
3028 * function (CSR, PHY0, PHY1, EEPROM, Flash)
3029 **/
3030 s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
3031 {
3032 u32 gssr;
3033 u32 swmask = mask;
3034 u32 fwmask = mask << 5;
3035 s32 timeout = 200;
3036
3037 DEBUGFUNC("ixgbe_acquire_swfw_sync");
3038
3039 while (timeout) {
3040 /*
3041 * SW EEPROM semaphore bit is used for access to all
3042 * SW_FW_SYNC/GSSR bits (not just EEPROM)
3043 */
3044 if (ixgbe_get_eeprom_semaphore(hw))
3045 return IXGBE_ERR_SWFW_SYNC;
3046
3047 gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
3048 if (!(gssr & (fwmask | swmask)))
3049 break;
3050
3051 /*
3052 * Firmware currently using resource (fwmask) or other software
3053 * thread currently using resource (swmask)
3054 */
3055 ixgbe_release_eeprom_semaphore(hw);
3056 msec_delay(5);
3057 timeout--;
3058 }
3059
3060 if (!timeout) {
3061 DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n");
3062 return IXGBE_ERR_SWFW_SYNC;
3063 }
3064
3065 gssr |= swmask;
3066 IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
3067
3068 ixgbe_release_eeprom_semaphore(hw);
3069 return IXGBE_SUCCESS;
3070 }
3071
3072 /**
3073 * ixgbe_release_swfw_sync - Release SWFW semaphore
3074 * @hw: pointer to hardware structure
3075 * @mask: Mask to specify which semaphore to release
3076 *
3077 * Releases the SWFW semaphore through the GSSR register for the specified
3078 * function (CSR, PHY0, PHY1, EEPROM, Flash)
3079 **/
3080 void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask)
3081 {
3082 u32 gssr;
3083 u32 swmask = mask;
3084
3085 DEBUGFUNC("ixgbe_release_swfw_sync");
3086
3087 ixgbe_get_eeprom_semaphore(hw);
3088
3089 gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
3090 gssr &= ~swmask;
3091 IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
3092
3093 ixgbe_release_eeprom_semaphore(hw);
3094 }
3095
3096 /**
3097 * ixgbe_disable_sec_rx_path_generic - Stops the receive data path
3098 * @hw: pointer to hardware structure
3099 *
3100 * Stops the receive data path and waits for the HW to internally empty
3101 * the Rx security block
3102 **/
3103 s32 ixgbe_disable_sec_rx_path_generic(struct ixgbe_hw *hw)
3104 {
3105 #define IXGBE_MAX_SECRX_POLL 40
3106
3107 int i;
3108 int secrxreg;
3109
3110 DEBUGFUNC("ixgbe_disable_sec_rx_path_generic");
3111
3112
3113 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
3114 secrxreg |= IXGBE_SECRXCTRL_RX_DIS;
3115 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
3116 for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) {
3117 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT);
3118 if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY)
3119 break;
3120 else
3121 /* Use interrupt-safe sleep just in case */
3122 usec_delay(1000);
3123 }
3124
3125 /* For informational purposes only */
3126 if (i >= IXGBE_MAX_SECRX_POLL)
3127 DEBUGOUT("Rx unit being enabled before security "
3128 "path fully disabled. Continuing with init.\n");
3129
3130 return IXGBE_SUCCESS;
3131 }
3132
3133 /**
3134 * ixgbe_enable_sec_rx_path_generic - Enables the receive data path
3135 * @hw: pointer to hardware structure
3136 *
3137 * Enables the receive data path.
3138 **/
3139 s32 ixgbe_enable_sec_rx_path_generic(struct ixgbe_hw *hw)
3140 {
3141 int secrxreg;
3142
3143 DEBUGFUNC("ixgbe_enable_sec_rx_path_generic");
3144
3145 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
3146 secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS;
3147 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
3148 IXGBE_WRITE_FLUSH(hw);
3149
3150 return IXGBE_SUCCESS;
3151 }
3152
3153 /**
3154 * ixgbe_enable_rx_dma_generic - Enable the Rx DMA unit
3155 * @hw: pointer to hardware structure
3156 * @regval: register value to write to RXCTRL
3157 *
3158 * Enables the Rx DMA unit
3159 **/
3160 s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval)
3161 {
3162 DEBUGFUNC("ixgbe_enable_rx_dma_generic");
3163
3164 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval);
3165
3166 return IXGBE_SUCCESS;
3167 }
3168
3169 /**
3170 * ixgbe_blink_led_start_generic - Blink LED based on index.
3171 * @hw: pointer to hardware structure
3172 * @index: led number to blink
3173 **/
3174 s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
3175 {
3176 ixgbe_link_speed speed = 0;
3177 bool link_up = 0;
3178 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
3179 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
3180 s32 ret_val = IXGBE_SUCCESS;
3181
3182 DEBUGFUNC("ixgbe_blink_led_start_generic");
3183
3184 /*
3185 * Link must be up to auto-blink the LEDs;
3186 * Force it if link is down.
3187 */
3188 hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
3189
3190 if (!link_up) {
3191 /* Need the SW/FW semaphore around AUTOC writes if 82599 and
3192 * LESM is on.
3193 */
3194 bool got_lock = FALSE;
3195 if ((hw->mac.type == ixgbe_mac_82599EB) &&
3196 ixgbe_verify_lesm_fw_enabled_82599(hw)) {
3197 ret_val = hw->mac.ops.acquire_swfw_sync(hw,
3198 IXGBE_GSSR_MAC_CSR_SM);
3199 if (ret_val != IXGBE_SUCCESS) {
3200 ret_val = IXGBE_ERR_SWFW_SYNC;
3201 goto out;
3202 }
3203 got_lock = TRUE;
3204 }
3205
3206 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
3207 autoc_reg |= IXGBE_AUTOC_FLU;
3208 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
3209 IXGBE_WRITE_FLUSH(hw);
3210
3211 if (got_lock)
3212 hw->mac.ops.release_swfw_sync(hw,
3213 IXGBE_GSSR_MAC_CSR_SM);
3214 msec_delay(10);
3215 }
3216
3217 led_reg &= ~IXGBE_LED_MODE_MASK(index);
3218 led_reg |= IXGBE_LED_BLINK(index);
3219 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
3220 IXGBE_WRITE_FLUSH(hw);
3221
3222 out:
3223 return ret_val;
3224 }
3225
3226 /**
3227 * ixgbe_blink_led_stop_generic - Stop blinking LED based on index.
3228 * @hw: pointer to hardware structure
3229 * @index: led number to stop blinking
3230 **/
3231 s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
3232 {
3233 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
3234 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
3235 s32 ret_val = IXGBE_SUCCESS;
3236 bool got_lock = FALSE;
3237
3238 DEBUGFUNC("ixgbe_blink_led_stop_generic");
3239 /* Need the SW/FW semaphore around AUTOC writes if 82599 and
3240 * LESM is on.
3241 */
3242 if ((hw->mac.type == ixgbe_mac_82599EB) &&
3243 ixgbe_verify_lesm_fw_enabled_82599(hw)) {
3244 ret_val = hw->mac.ops.acquire_swfw_sync(hw,
3245 IXGBE_GSSR_MAC_CSR_SM);
3246 if (ret_val != IXGBE_SUCCESS) {
3247 ret_val = IXGBE_ERR_SWFW_SYNC;
3248 goto out;
3249 }
3250 got_lock = TRUE;
3251 }
3252
3253
3254 autoc_reg &= ~IXGBE_AUTOC_FLU;
3255 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
3256 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
3257
3258 if (hw->mac.type == ixgbe_mac_82599EB)
3259 ixgbe_reset_pipeline_82599(hw);
3260
3261 if (got_lock)
3262 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
3263
3264 led_reg &= ~IXGBE_LED_MODE_MASK(index);
3265 led_reg &= ~IXGBE_LED_BLINK(index);
3266 led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index);
3267 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
3268 IXGBE_WRITE_FLUSH(hw);
3269
3270 out:
3271 return ret_val;
3272 }
3273
3274 /**
3275 * ixgbe_get_san_mac_addr_offset - Get SAN MAC address offset from the EEPROM
3276 * @hw: pointer to hardware structure
3277 * @san_mac_offset: SAN MAC address offset
3278 *
3279 * This function will read the EEPROM location for the SAN MAC address
3280 * pointer, and returns the value at that location. This is used in both
3281 * get and set mac_addr routines.
3282 **/
3283 static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
3284 u16 *san_mac_offset)
3285 {
3286 DEBUGFUNC("ixgbe_get_san_mac_addr_offset");
3287
3288 /*
3289 * First read the EEPROM pointer to see if the MAC addresses are
3290 * available.
3291 */
3292 hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR, san_mac_offset);
3293
3294 return IXGBE_SUCCESS;
3295 }
3296
3297 /**
3298 * ixgbe_get_san_mac_addr_generic - SAN MAC address retrieval from the EEPROM
3299 * @hw: pointer to hardware structure
3300 * @san_mac_addr: SAN MAC address
3301 *
3302 * Reads the SAN MAC address from the EEPROM, if it's available. This is
3303 * per-port, so set_lan_id() must be called before reading the addresses.
3304 * set_lan_id() is called by identify_sfp(), but this cannot be relied
3305 * upon for non-SFP connections, so we must call it here.
3306 **/
3307 s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
3308 {
3309 u16 san_mac_data, san_mac_offset;
3310 u8 i;
3311
3312 DEBUGFUNC("ixgbe_get_san_mac_addr_generic");
3313
3314 /*
3315 * First read the EEPROM pointer to see if the MAC addresses are
3316 * available. If they're not, no point in calling set_lan_id() here.
3317 */
3318 ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
3319
3320 if ((san_mac_offset == 0) || (san_mac_offset == 0xFFFF)) {
3321 /*
3322 * No addresses available in this EEPROM. It's not an
3323 * error though, so just wipe the local address and return.
3324 */
3325 for (i = 0; i < 6; i++)
3326 san_mac_addr[i] = 0xFF;
3327
3328 goto san_mac_addr_out;
3329 }
3330
3331 /* make sure we know which port we need to program */
3332 hw->mac.ops.set_lan_id(hw);
3333 /* apply the port offset to the address offset */
3334 (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
3335 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
3336 for (i = 0; i < 3; i++) {
3337 hw->eeprom.ops.read(hw, san_mac_offset, &san_mac_data);
3338 san_mac_addr[i * 2] = (u8)(san_mac_data);
3339 san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8);
3340 san_mac_offset++;
3341 }
3342
3343 san_mac_addr_out:
3344 return IXGBE_SUCCESS;
3345 }
3346
3347 /**
3348 * ixgbe_set_san_mac_addr_generic - Write the SAN MAC address to the EEPROM
3349 * @hw: pointer to hardware structure
3350 * @san_mac_addr: SAN MAC address
3351 *
3352 * Write a SAN MAC address to the EEPROM.
3353 **/
3354 s32 ixgbe_set_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
3355 {
3356 s32 status = IXGBE_SUCCESS;
3357 u16 san_mac_data, san_mac_offset;
3358 u8 i;
3359
3360 DEBUGFUNC("ixgbe_set_san_mac_addr_generic");
3361
3362 /* Look for SAN mac address pointer. If not defined, return */
3363 ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
3364
3365 if ((san_mac_offset == 0) || (san_mac_offset == 0xFFFF)) {
3366 status = IXGBE_ERR_NO_SAN_ADDR_PTR;
3367 goto san_mac_addr_out;
3368 }
3369
3370 /* Make sure we know which port we need to write */
3371 hw->mac.ops.set_lan_id(hw);
3372 /* Apply the port offset to the address offset */
3373 (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
3374 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
3375
3376 for (i = 0; i < 3; i++) {
3377 san_mac_data = (u16)((u16)(san_mac_addr[i * 2 + 1]) << 8);
3378 san_mac_data |= (u16)(san_mac_addr[i * 2]);
3379 hw->eeprom.ops.write(hw, san_mac_offset, san_mac_data);
3380 san_mac_offset++;
3381 }
3382
3383 san_mac_addr_out:
3384 return status;
3385 }
3386
3387 /**
3388 * ixgbe_get_pcie_msix_count_generic - Gets MSI-X vector count
3389 * @hw: pointer to hardware structure
3390 *
3391 * Read PCIe configuration space, and get the MSI-X vector count from
3392 * the capabilities table.
3393 **/
3394 u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
3395 {
3396 u16 msix_count = 1;
3397 u16 max_msix_count;
3398 u16 pcie_offset;
3399
3400 switch (hw->mac.type) {
3401 case ixgbe_mac_82598EB:
3402 pcie_offset = IXGBE_PCIE_MSIX_82598_CAPS;
3403 max_msix_count = IXGBE_MAX_MSIX_VECTORS_82598;
3404 break;
3405 case ixgbe_mac_82599EB:
3406 case ixgbe_mac_X540:
3407 pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS;
3408 max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599;
3409 break;
3410 default:
3411 return msix_count;
3412 }
3413
3414 DEBUGFUNC("ixgbe_get_pcie_msix_count_generic");
3415 msix_count = IXGBE_READ_PCIE_WORD(hw, pcie_offset);
3416 msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
3417
3418 /* MSI-X count is zero-based in HW */
3419 msix_count++;
3420
3421 if (msix_count > max_msix_count)
3422 msix_count = max_msix_count;
3423
3424 return msix_count;
3425 }
3426
3427 /**
3428 * ixgbe_insert_mac_addr_generic - Find a RAR for this mac address
3429 * @hw: pointer to hardware structure
3430 * @addr: Address to put into receive address register
3431 * @vmdq: VMDq pool to assign
3432 *
3433 * Puts an ethernet address into a receive address register, or
3434 * finds the rar that it is aleady in; adds to the pool list
3435 **/
3436 s32 ixgbe_insert_mac_addr_generic(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
3437 {
3438 static const u32 NO_EMPTY_RAR_FOUND = 0xFFFFFFFF;
3439 u32 first_empty_rar = NO_EMPTY_RAR_FOUND;
3440 u32 rar;
3441 u32 rar_low, rar_high;
3442 u32 addr_low, addr_high;
3443
3444 DEBUGFUNC("ixgbe_insert_mac_addr_generic");
3445
3446 /* swap bytes for HW little endian */
3447 addr_low = addr[0] | (addr[1] << 8)
3448 | (addr[2] << 16)
3449 | (addr[3] << 24);
3450 addr_high = addr[4] | (addr[5] << 8);
3451
3452 /*
3453 * Either find the mac_id in rar or find the first empty space.
3454 * rar_highwater points to just after the highest currently used
3455 * rar in order to shorten the search. It grows when we add a new
3456 * rar to the top.
3457 */
3458 for (rar = 0; rar < hw->mac.rar_highwater; rar++) {
3459 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
3460
3461 if (((IXGBE_RAH_AV & rar_high) == 0)
3462 && first_empty_rar == NO_EMPTY_RAR_FOUND) {
3463 first_empty_rar = rar;
3464 } else if ((rar_high & 0xFFFF) == addr_high) {
3465 rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(rar));
3466 if (rar_low == addr_low)
3467 break; /* found it already in the rars */
3468 }
3469 }
3470
3471 if (rar < hw->mac.rar_highwater) {
3472 /* already there so just add to the pool bits */
3473 ixgbe_set_vmdq(hw, rar, vmdq);
3474 } else if (first_empty_rar != NO_EMPTY_RAR_FOUND) {
3475 /* stick it into first empty RAR slot we found */
3476 rar = first_empty_rar;
3477 ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
3478 } else if (rar == hw->mac.rar_highwater) {
3479 /* add it to the top of the list and inc the highwater mark */
3480 ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
3481 hw->mac.rar_highwater++;
3482 } else if (rar >= hw->mac.num_rar_entries) {
3483 return IXGBE_ERR_INVALID_MAC_ADDR;
3484 }
3485
3486 /*
3487 * If we found rar[0], make sure the default pool bit (we use pool 0)
3488 * remains cleared to be sure default pool packets will get delivered
3489 */
3490 if (rar == 0)
3491 ixgbe_clear_vmdq(hw, rar, 0);
3492
3493 return rar;
3494 }
3495
3496 /**
3497 * ixgbe_clear_vmdq_generic - Disassociate a VMDq pool index from a rx address
3498 * @hw: pointer to hardware struct
3499 * @rar: receive address register index to disassociate
3500 * @vmdq: VMDq pool index to remove from the rar
3501 **/
3502 s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
3503 {
3504 u32 mpsar_lo, mpsar_hi;
3505 u32 rar_entries = hw->mac.num_rar_entries;
3506
3507 DEBUGFUNC("ixgbe_clear_vmdq_generic");
3508
3509 /* Make sure we are using a valid rar index range */
3510 if (rar >= rar_entries) {
3511 DEBUGOUT1("RAR index %d is out of range.\n", rar);
3512 return IXGBE_ERR_INVALID_ARGUMENT;
3513 }
3514
3515 mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
3516 mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
3517
3518 if (!mpsar_lo && !mpsar_hi)
3519 goto done;
3520
3521 if (vmdq == IXGBE_CLEAR_VMDQ_ALL) {
3522 if (mpsar_lo) {
3523 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
3524 mpsar_lo = 0;
3525 }
3526 if (mpsar_hi) {
3527 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
3528 mpsar_hi = 0;
3529 }
3530 } else if (vmdq < 32) {
3531 mpsar_lo &= ~(1 << vmdq);
3532 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo);
3533 } else {
3534 mpsar_hi &= ~(1 << (vmdq - 32));
3535 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi);
3536 }
3537
3538 /* was that the last pool using this rar? */
3539 if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0)
3540 hw->mac.ops.clear_rar(hw, rar);
3541 done:
3542 return IXGBE_SUCCESS;
3543 }
3544
3545 /**
3546 * ixgbe_set_vmdq_generic - Associate a VMDq pool index with a rx address
3547 * @hw: pointer to hardware struct
3548 * @rar: receive address register index to associate with a VMDq index
3549 * @vmdq: VMDq pool index
3550 **/
3551 s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
3552 {
3553 u32 mpsar;
3554 u32 rar_entries = hw->mac.num_rar_entries;
3555
3556 DEBUGFUNC("ixgbe_set_vmdq_generic");
3557
3558 /* Make sure we are using a valid rar index range */
3559 if (rar >= rar_entries) {
3560 DEBUGOUT1("RAR index %d is out of range.\n", rar);
3561 return IXGBE_ERR_INVALID_ARGUMENT;
3562 }
3563
3564 if (vmdq < 32) {
3565 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
3566 mpsar |= 1 << vmdq;
3567 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar);
3568 } else {
3569 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
3570 mpsar |= 1 << (vmdq - 32);
3571 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar);
3572 }
3573 return IXGBE_SUCCESS;
3574 }
3575
3576 /**
3577 * This function should only be involved in the IOV mode.
3578 * In IOV mode, Default pool is next pool after the number of
3579 * VFs advertized and not 0.
3580 * MPSAR table needs to be updated for SAN_MAC RAR [hw->mac.san_mac_rar_index]
3581 *
3582 * ixgbe_set_vmdq_san_mac - Associate default VMDq pool index with a rx address
3583 * @hw: pointer to hardware struct
3584 * @vmdq: VMDq pool index
3585 **/
3586 s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq)
3587 {
3588 u32 rar = hw->mac.san_mac_rar_index;
3589
3590 DEBUGFUNC("ixgbe_set_vmdq_san_mac");
3591
3592 if (vmdq < 32) {
3593 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 1 << vmdq);
3594 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
3595 } else {
3596 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
3597 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 1 << (vmdq - 32));
3598 }
3599
3600 return IXGBE_SUCCESS;
3601 }
3602
3603 /**
3604 * ixgbe_init_uta_tables_generic - Initialize the Unicast Table Array
3605 * @hw: pointer to hardware structure
3606 **/
3607 s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw)
3608 {
3609 int i;
3610
3611 DEBUGFUNC("ixgbe_init_uta_tables_generic");
3612 DEBUGOUT(" Clearing UTA\n");
3613
3614 for (i = 0; i < 128; i++)
3615 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
3616
3617 return IXGBE_SUCCESS;
3618 }
3619
3620 /**
3621 * ixgbe_find_vlvf_slot - find the vlanid or the first empty slot
3622 * @hw: pointer to hardware structure
3623 * @vlan: VLAN id to write to VLAN filter
3624 *
3625 * return the VLVF index where this VLAN id should be placed
3626 *
3627 **/
3628 s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan)
3629 {
3630 u32 bits = 0;
3631 u32 first_empty_slot = 0;
3632 s32 regindex;
3633
3634 /* short cut the special case */
3635 if (vlan == 0)
3636 return 0;
3637
3638 /*
3639 * Search for the vlan id in the VLVF entries. Save off the first empty
3640 * slot found along the way
3641 */
3642 for (regindex = 1; regindex < IXGBE_VLVF_ENTRIES; regindex++) {
3643 bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex));
3644 if (!bits && !(first_empty_slot))
3645 first_empty_slot = regindex;
3646 else if ((bits & 0x0FFF) == vlan)
3647 break;
3648 }
3649
3650 /*
3651 * If regindex is less than IXGBE_VLVF_ENTRIES, then we found the vlan
3652 * in the VLVF. Else use the first empty VLVF register for this
3653 * vlan id.
3654 */
3655 if (regindex >= IXGBE_VLVF_ENTRIES) {
3656 if (first_empty_slot)
3657 regindex = first_empty_slot;
3658 else {
3659 DEBUGOUT("No space in VLVF.\n");
3660 regindex = IXGBE_ERR_NO_SPACE;
3661 }
3662 }
3663
3664 return regindex;
3665 }
3666
3667 /**
3668 * ixgbe_set_vfta_generic - Set VLAN filter table
3669 * @hw: pointer to hardware structure
3670 * @vlan: VLAN id to write to VLAN filter
3671 * @vind: VMDq output index that maps queue to VLAN id in VFVFB
3672 * @vlan_on: boolean flag to turn on/off VLAN in VFVF
3673 *
3674 * Turn on/off specified VLAN in the VLAN filter table.
3675 **/
3676 s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
3677 bool vlan_on)
3678 {
3679 s32 regindex;
3680 u32 bitindex;
3681 u32 vfta;
3682 u32 targetbit;
3683 s32 ret_val = IXGBE_SUCCESS;
3684 bool vfta_changed = FALSE;
3685
3686 DEBUGFUNC("ixgbe_set_vfta_generic");
3687
3688 if (vlan > 4095)
3689 return IXGBE_ERR_PARAM;
3690
3691 /*
3692 * this is a 2 part operation - first the VFTA, then the
3693 * VLVF and VLVFB if VT Mode is set
3694 * We don't write the VFTA until we know the VLVF part succeeded.
3695 */
3696
3697 /* Part 1
3698 * The VFTA is a bitstring made up of 128 32-bit registers
3699 * that enable the particular VLAN id, much like the MTA:
3700 * bits[11-5]: which register
3701 * bits[4-0]: which bit in the register
3702 */
3703 regindex = (vlan >> 5) & 0x7F;
3704 bitindex = vlan & 0x1F;
3705 targetbit = (1 << bitindex);
3706 vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
3707
3708 if (vlan_on) {
3709 if (!(vfta & targetbit)) {
3710 vfta |= targetbit;
3711 vfta_changed = TRUE;
3712 }
3713 } else {
3714 if ((vfta & targetbit)) {
3715 vfta &= ~targetbit;
3716 vfta_changed = TRUE;
3717 }
3718 }
3719
3720 /* Part 2
3721 * Call ixgbe_set_vlvf_generic to set VLVFB and VLVF
3722 */
3723 ret_val = ixgbe_set_vlvf_generic(hw, vlan, vind, vlan_on,
3724 &vfta_changed);
3725 if (ret_val != IXGBE_SUCCESS)
3726 return ret_val;
3727
3728 if (vfta_changed)
3729 IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), vfta);
3730
3731 return IXGBE_SUCCESS;
3732 }
3733
3734 /**
3735 * ixgbe_set_vlvf_generic - Set VLAN Pool Filter
3736 * @hw: pointer to hardware structure
3737 * @vlan: VLAN id to write to VLAN filter
3738 * @vind: VMDq output index that maps queue to VLAN id in VFVFB
3739 * @vlan_on: boolean flag to turn on/off VLAN in VFVF
3740 * @vfta_changed: pointer to boolean flag which indicates whether VFTA
3741 * should be changed
3742 *
3743 * Turn on/off specified bit in VLVF table.
3744 **/
3745 s32 ixgbe_set_vlvf_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
3746 bool vlan_on, bool *vfta_changed)
3747 {
3748 u32 vt;
3749
3750 DEBUGFUNC("ixgbe_set_vlvf_generic");
3751
3752 if (vlan > 4095)
3753 return IXGBE_ERR_PARAM;
3754
3755 /* If VT Mode is set
3756 * Either vlan_on
3757 * make sure the vlan is in VLVF
3758 * set the vind bit in the matching VLVFB
3759 * Or !vlan_on
3760 * clear the pool bit and possibly the vind
3761 */
3762 vt = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
3763 if (vt & IXGBE_VT_CTL_VT_ENABLE) {
3764 s32 vlvf_index;
3765 u32 bits;
3766
3767 vlvf_index = ixgbe_find_vlvf_slot(hw, vlan);
3768 if (vlvf_index < 0)
3769 return vlvf_index;
3770
3771 if (vlan_on) {
3772 /* set the pool bit */
3773 if (vind < 32) {
3774 bits = IXGBE_READ_REG(hw,
3775 IXGBE_VLVFB(vlvf_index * 2));
3776 bits |= (1 << vind);
3777 IXGBE_WRITE_REG(hw,
3778 IXGBE_VLVFB(vlvf_index * 2),
3779 bits);
3780 } else {
3781 bits = IXGBE_READ_REG(hw,
3782 IXGBE_VLVFB((vlvf_index * 2) + 1));
3783 bits |= (1 << (vind - 32));
3784 IXGBE_WRITE_REG(hw,
3785 IXGBE_VLVFB((vlvf_index * 2) + 1),
3786 bits);
3787 }
3788 } else {
3789 /* clear the pool bit */
3790 if (vind < 32) {
3791 bits = IXGBE_READ_REG(hw,
3792 IXGBE_VLVFB(vlvf_index * 2));
3793 bits &= ~(1 << vind);
3794 IXGBE_WRITE_REG(hw,
3795 IXGBE_VLVFB(vlvf_index * 2),
3796 bits);
3797 bits |= IXGBE_READ_REG(hw,
3798 IXGBE_VLVFB((vlvf_index * 2) + 1));
3799 } else {
3800 bits = IXGBE_READ_REG(hw,
3801 IXGBE_VLVFB((vlvf_index * 2) + 1));
3802 bits &= ~(1 << (vind - 32));
3803 IXGBE_WRITE_REG(hw,
3804 IXGBE_VLVFB((vlvf_index * 2) + 1),
3805 bits);
3806 bits |= IXGBE_READ_REG(hw,
3807 IXGBE_VLVFB(vlvf_index * 2));
3808 }
3809 }
3810
3811 /*
3812 * If there are still bits set in the VLVFB registers
3813 * for the VLAN ID indicated we need to see if the
3814 * caller is requesting that we clear the VFTA entry bit.
3815 * If the caller has requested that we clear the VFTA
3816 * entry bit but there are still pools/VFs using this VLAN
3817 * ID entry then ignore the request. We're not worried
3818 * about the case where we're turning the VFTA VLAN ID
3819 * entry bit on, only when requested to turn it off as
3820 * there may be multiple pools and/or VFs using the
3821 * VLAN ID entry. In that case we cannot clear the
3822 * VFTA bit until all pools/VFs using that VLAN ID have also
3823 * been cleared. This will be indicated by "bits" being
3824 * zero.
3825 */
3826 if (bits) {
3827 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index),
3828 (IXGBE_VLVF_VIEN | vlan));
3829 if ((!vlan_on) && (vfta_changed != NULL)) {
3830 /* someone wants to clear the vfta entry
3831 * but some pools/VFs are still using it.
3832 * Ignore it. */
3833 *vfta_changed = FALSE;
3834 }
3835 } else
3836 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0);
3837 }
3838
3839 return IXGBE_SUCCESS;
3840 }
3841
3842 /**
3843 * ixgbe_clear_vfta_generic - Clear VLAN filter table
3844 * @hw: pointer to hardware structure
3845 *
3846 * Clears the VLAN filer table, and the VMDq index associated with the filter
3847 **/
3848 s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
3849 {
3850 u32 offset;
3851
3852 DEBUGFUNC("ixgbe_clear_vfta_generic");
3853
3854 for (offset = 0; offset < hw->mac.vft_size; offset++)
3855 IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
3856
3857 for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) {
3858 IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0);
3859 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2), 0);
3860 IXGBE_WRITE_REG(hw, IXGBE_VLVFB((offset * 2) + 1), 0);
3861 }
3862
3863 return IXGBE_SUCCESS;
3864 }
3865
3866 /**
3867 * ixgbe_check_mac_link_generic - Determine link and speed status
3868 * @hw: pointer to hardware structure
3869 * @speed: pointer to link speed
3870 * @link_up: TRUE when link is up
3871 * @link_up_wait_to_complete: bool used to wait for link up or not
3872 *
3873 * Reads the links register to determine if link is up and the current speed
3874 **/
3875 s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
3876 bool *link_up, bool link_up_wait_to_complete)
3877 {
3878 u32 links_reg, links_orig;
3879 u32 i;
3880
3881 DEBUGFUNC("ixgbe_check_mac_link_generic");
3882
3883 /* clear the old state */
3884 links_orig = IXGBE_READ_REG(hw, IXGBE_LINKS);
3885
3886 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
3887
3888 if (links_orig != links_reg) {
3889 DEBUGOUT2("LINKS changed from %08X to %08X\n",
3890 links_orig, links_reg);
3891 }
3892
3893 if (link_up_wait_to_complete) {
3894 for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
3895 if (links_reg & IXGBE_LINKS_UP) {
3896 *link_up = TRUE;
3897 break;
3898 } else {
3899 *link_up = FALSE;
3900 }
3901 msec_delay(100);
3902 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
3903 }
3904 } else {
3905 if (links_reg & IXGBE_LINKS_UP)
3906 *link_up = TRUE;
3907 else
3908 *link_up = FALSE;
3909 }
3910
3911 if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
3912 IXGBE_LINKS_SPEED_10G_82599)
3913 *speed = IXGBE_LINK_SPEED_10GB_FULL;
3914 else if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
3915 IXGBE_LINKS_SPEED_1G_82599)
3916 *speed = IXGBE_LINK_SPEED_1GB_FULL;
3917 else if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
3918 IXGBE_LINKS_SPEED_100_82599)
3919 *speed = IXGBE_LINK_SPEED_100_FULL;
3920 else
3921 *speed = IXGBE_LINK_SPEED_UNKNOWN;
3922
3923 return IXGBE_SUCCESS;
3924 }
3925
3926 /**
3927 * ixgbe_get_wwn_prefix_generic - Get alternative WWNN/WWPN prefix from
3928 * the EEPROM
3929 * @hw: pointer to hardware structure
3930 * @wwnn_prefix: the alternative WWNN prefix
3931 * @wwpn_prefix: the alternative WWPN prefix
3932 *
3933 * This function will read the EEPROM from the alternative SAN MAC address
3934 * block to check the support for the alternative WWNN/WWPN prefix support.
3935 **/
3936 s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
3937 u16 *wwpn_prefix)
3938 {
3939 u16 offset, caps;
3940 u16 alt_san_mac_blk_offset;
3941
3942 DEBUGFUNC("ixgbe_get_wwn_prefix_generic");
3943
3944 /* clear output first */
3945 *wwnn_prefix = 0xFFFF;
3946 *wwpn_prefix = 0xFFFF;
3947
3948 /* check if alternative SAN MAC is supported */
3949 hw->eeprom.ops.read(hw, IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR,
3950 &alt_san_mac_blk_offset);
3951
3952 if ((alt_san_mac_blk_offset == 0) ||
3953 (alt_san_mac_blk_offset == 0xFFFF))
3954 goto wwn_prefix_out;
3955
3956 /* check capability in alternative san mac address block */
3957 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET;
3958 hw->eeprom.ops.read(hw, offset, &caps);
3959 if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN))
3960 goto wwn_prefix_out;
3961
3962 /* get the corresponding prefix for WWNN/WWPN */
3963 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET;
3964 hw->eeprom.ops.read(hw, offset, wwnn_prefix);
3965
3966 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET;
3967 hw->eeprom.ops.read(hw, offset, wwpn_prefix);
3968
3969 wwn_prefix_out:
3970 return IXGBE_SUCCESS;
3971 }
3972
3973 /**
3974 * ixgbe_get_fcoe_boot_status_generic - Get FCOE boot status from EEPROM
3975 * @hw: pointer to hardware structure
3976 * @bs: the fcoe boot status
3977 *
3978 * This function will read the FCOE boot status from the iSCSI FCOE block
3979 **/
3980 s32 ixgbe_get_fcoe_boot_status_generic(struct ixgbe_hw *hw, u16 *bs)
3981 {
3982 u16 offset, caps, flags;
3983 s32 status;
3984
3985 DEBUGFUNC("ixgbe_get_fcoe_boot_status_generic");
3986
3987 /* clear output first */
3988 *bs = ixgbe_fcoe_bootstatus_unavailable;
3989
3990 /* check if FCOE IBA block is present */
3991 offset = IXGBE_FCOE_IBA_CAPS_BLK_PTR;
3992 status = hw->eeprom.ops.read(hw, offset, &caps);
3993 if (status != IXGBE_SUCCESS)
3994 goto out;
3995
3996 if (!(caps & IXGBE_FCOE_IBA_CAPS_FCOE))
3997 goto out;
3998
3999 /* check if iSCSI FCOE block is populated */
4000 status = hw->eeprom.ops.read(hw, IXGBE_ISCSI_FCOE_BLK_PTR, &offset);
4001 if (status != IXGBE_SUCCESS)
4002 goto out;
4003
4004 if ((offset == 0) || (offset == 0xFFFF))
4005 goto out;
4006
4007 /* read fcoe flags in iSCSI FCOE block */
4008 offset = offset + IXGBE_ISCSI_FCOE_FLAGS_OFFSET;
4009 status = hw->eeprom.ops.read(hw, offset, &flags);
4010 if (status != IXGBE_SUCCESS)
4011 goto out;
4012
4013 if (flags & IXGBE_ISCSI_FCOE_FLAGS_ENABLE)
4014 *bs = ixgbe_fcoe_bootstatus_enabled;
4015 else
4016 *bs = ixgbe_fcoe_bootstatus_disabled;
4017
4018 out:
4019 return status;
4020 }
4021
4022 /**
4023 * ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing
4024 * @hw: pointer to hardware structure
4025 * @enable: enable or disable switch for anti-spoofing
4026 * @pf: Physical Function pool - do not enable anti-spoofing for the PF
4027 *
4028 **/
4029 void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf)
4030 {
4031 int j;
4032 int pf_target_reg = pf >> 3;
4033 int pf_target_shift = pf % 8;
4034 u32 pfvfspoof = 0;
4035
4036 if (hw->mac.type == ixgbe_mac_82598EB)
4037 return;
4038
4039 if (enable)
4040 pfvfspoof = IXGBE_SPOOF_MACAS_MASK;
4041
4042 /*
4043 * PFVFSPOOF register array is size 8 with 8 bits assigned to
4044 * MAC anti-spoof enables in each register array element.
4045 */
4046 for (j = 0; j < pf_target_reg; j++)
4047 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof);
4048
4049 /*
4050 * The PF should be allowed to spoof so that it can support
4051 * emulation mode NICs. Do not set the bits assigned to the PF
4052 */
4053 pfvfspoof &= (1 << pf_target_shift) - 1;
4054 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof);
4055
4056 /*
4057 * Remaining pools belong to the PF so they do not need to have
4058 * anti-spoofing enabled.
4059 */
4060 for (j++; j < IXGBE_PFVFSPOOF_REG_COUNT; j++)
4061 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), 0);
4062 }
4063
4064 /**
4065 * ixgbe_set_vlan_anti_spoofing - Enable/Disable VLAN anti-spoofing
4066 * @hw: pointer to hardware structure
4067 * @enable: enable or disable switch for VLAN anti-spoofing
4068 * @pf: Virtual Function pool - VF Pool to set for VLAN anti-spoofing
4069 *
4070 **/
4071 void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
4072 {
4073 int vf_target_reg = vf >> 3;
4074 int vf_target_shift = vf % 8 + IXGBE_SPOOF_VLANAS_SHIFT;
4075 u32 pfvfspoof;
4076
4077 if (hw->mac.type == ixgbe_mac_82598EB)
4078 return;
4079
4080 pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
4081 if (enable)
4082 pfvfspoof |= (1 << vf_target_shift);
4083 else
4084 pfvfspoof &= ~(1 << vf_target_shift);
4085 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
4086 }
4087
4088 /**
4089 * ixgbe_get_device_caps_generic - Get additional device capabilities
4090 * @hw: pointer to hardware structure
4091 * @device_caps: the EEPROM word with the extra device capabilities
4092 *
4093 * This function will read the EEPROM location for the device capabilities,
4094 * and return the word through device_caps.
4095 **/
4096 s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps)
4097 {
4098 DEBUGFUNC("ixgbe_get_device_caps_generic");
4099
4100 hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps);
4101
4102 return IXGBE_SUCCESS;
4103 }
4104
4105 /**
4106 * ixgbe_enable_relaxed_ordering_gen2 - Enable relaxed ordering
4107 * @hw: pointer to hardware structure
4108 *
4109 **/
4110 void ixgbe_enable_relaxed_ordering_gen2(struct ixgbe_hw *hw)
4111 {
4112 u32 regval;
4113 u32 i;
4114
4115 DEBUGFUNC("ixgbe_enable_relaxed_ordering_gen2");
4116
4117 /* Enable relaxed ordering */
4118 for (i = 0; i < hw->mac.max_tx_queues; i++) {
4119 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
4120 regval |= IXGBE_DCA_TXCTRL_DESC_WRO_EN;
4121 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
4122 }
4123
4124 for (i = 0; i < hw->mac.max_rx_queues; i++) {
4125 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
4126 regval |= IXGBE_DCA_RXCTRL_DATA_WRO_EN |
4127 IXGBE_DCA_RXCTRL_HEAD_WRO_EN;
4128 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
4129 }
4130
4131 }
4132
4133 /**
4134 * ixgbe_calculate_checksum - Calculate checksum for buffer
4135 * @buffer: pointer to EEPROM
4136 * @length: size of EEPROM to calculate a checksum for
4137 * Calculates the checksum for some buffer on a specified length. The
4138 * checksum calculated is returned.
4139 **/
4140 u8 ixgbe_calculate_checksum(u8 *buffer, u32 length)
4141 {
4142 u32 i;
4143 u8 sum = 0;
4144
4145 DEBUGFUNC("ixgbe_calculate_checksum");
4146
4147 if (!buffer)
4148 return 0;
4149
4150 for (i = 0; i < length; i++)
4151 sum += buffer[i];
4152
4153 return (u8) (0 - sum);
4154 }
4155
4156 /**
4157 * ixgbe_host_interface_command - Issue command to manageability block
4158 * @hw: pointer to the HW structure
4159 * @buffer: contains the command to write and where the return status will
4160 * be placed
4161 * @length: length of buffer, must be multiple of 4 bytes
4162 *
4163 * Communicates with the manageability block. On success return IXGBE_SUCCESS
4164 * else return IXGBE_ERR_HOST_INTERFACE_COMMAND.
4165 **/
4166 s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
4167 u32 length)
4168 {
4169 u32 hicr, i, bi;
4170 u32 hdr_size = sizeof(struct ixgbe_hic_hdr);
4171 u8 buf_len, dword_len;
4172
4173 s32 ret_val = IXGBE_SUCCESS;
4174
4175 DEBUGFUNC("ixgbe_host_interface_command");
4176
4177 if (length == 0 || length & 0x3 ||
4178 length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
4179 DEBUGOUT("Buffer length failure.\n");
4180 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
4181 goto out;
4182 }
4183
4184 /* Check that the host interface is enabled. */
4185 hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
4186 if ((hicr & IXGBE_HICR_EN) == 0) {
4187 DEBUGOUT("IXGBE_HOST_EN bit disabled.\n");
4188 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
4189 goto out;
4190 }
4191
4192 /* Calculate length in DWORDs */
4193 dword_len = length >> 2;
4194
4195 /*
4196 * The device driver writes the relevant command block
4197 * into the ram area.
4198 */
4199 for (i = 0; i < dword_len; i++)
4200 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG,
4201 i, IXGBE_CPU_TO_LE32(buffer[i]));
4202
4203 /* Setting this bit tells the ARC that a new command is pending. */
4204 IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C);
4205
4206 for (i = 0; i < IXGBE_HI_COMMAND_TIMEOUT; i++) {
4207 hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
4208 if (!(hicr & IXGBE_HICR_C))
4209 break;
4210 msec_delay(1);
4211 }
4212
4213 /* Check command successful completion. */
4214 if (i == IXGBE_HI_COMMAND_TIMEOUT ||
4215 (!(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV))) {
4216 DEBUGOUT("Command has failed with no status valid.\n");
4217 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
4218 goto out;
4219 }
4220
4221 /* Calculate length in DWORDs */
4222 dword_len = hdr_size >> 2;
4223
4224 /* first pull in the header so we know the buffer length */
4225 for (bi = 0; bi < dword_len; bi++) {
4226 buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
4227 IXGBE_LE32_TO_CPUS(&buffer[bi]);
4228 }
4229
4230 /* If there is any thing in data position pull it in */
4231 buf_len = ((struct ixgbe_hic_hdr *)buffer)->buf_len;
4232 if (buf_len == 0)
4233 goto out;
4234
4235 if (length < (buf_len + hdr_size)) {
4236 DEBUGOUT("Buffer not large enough for reply message.\n");
4237 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
4238 goto out;
4239 }
4240
4241 /* Calculate length in DWORDs, add 3 for odd lengths */
4242 dword_len = (buf_len + 3) >> 2;
4243
4244 /* Pull in the rest of the buffer (bi is where we left off)*/
4245 for (; bi <= dword_len; bi++) {
4246 buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
4247 IXGBE_LE32_TO_CPUS(&buffer[bi]);
4248 }
4249
4250 out:
4251 return ret_val;
4252 }
4253
4254 /**
4255 * ixgbe_set_fw_drv_ver_generic - Sends driver version to firmware
4256 * @hw: pointer to the HW structure
4257 * @maj: driver version major number
4258 * @min: driver version minor number
4259 * @build: driver version build number
4260 * @sub: driver version sub build number
4261 *
4262 * Sends driver version number to firmware through the manageability
4263 * block. On success return IXGBE_SUCCESS
4264 * else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring
4265 * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
4266 **/
4267 s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
4268 u8 build, u8 sub)
4269 {
4270 struct ixgbe_hic_drv_info fw_cmd;
4271 int i;
4272 s32 ret_val = IXGBE_SUCCESS;
4273
4274 DEBUGFUNC("ixgbe_set_fw_drv_ver_generic");
4275
4276 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM)
4277 != IXGBE_SUCCESS) {
4278 ret_val = IXGBE_ERR_SWFW_SYNC;
4279 goto out;
4280 }
4281
4282 fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO;
4283 fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN;
4284 fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
4285 fw_cmd.port_num = (u8)hw->bus.func;
4286 fw_cmd.ver_maj = maj;
4287 fw_cmd.ver_min = min;
4288 fw_cmd.ver_build = build;
4289 fw_cmd.ver_sub = sub;
4290 fw_cmd.hdr.checksum = 0;
4291 fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd,
4292 (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len));
4293 fw_cmd.pad = 0;
4294 fw_cmd.pad2 = 0;
4295
4296 for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
4297 ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
4298 sizeof(fw_cmd));
4299 if (ret_val != IXGBE_SUCCESS)
4300 continue;
4301
4302 if (fw_cmd.hdr.cmd_or_resp.ret_status ==
4303 FW_CEM_RESP_STATUS_SUCCESS)
4304 ret_val = IXGBE_SUCCESS;
4305 else
4306 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
4307
4308 break;
4309 }
4310
4311 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
4312 out:
4313 return ret_val;
4314 }
4315
4316 /**
4317 * ixgbe_set_rxpba_generic - Initialize Rx packet buffer
4318 * @hw: pointer to hardware structure
4319 * @num_pb: number of packet buffers to allocate
4320 * @headroom: reserve n KB of headroom
4321 * @strategy: packet buffer allocation strategy
4322 **/
4323 void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, u32 headroom,
4324 int strategy)
4325 {
4326 u32 pbsize = hw->mac.rx_pb_size;
4327 int i = 0;
4328 u32 rxpktsize, txpktsize, txpbthresh;
4329
4330 /* Reserve headroom */
4331 pbsize -= headroom;
4332
4333 if (!num_pb)
4334 num_pb = 1;
4335
4336 /* Divide remaining packet buffer space amongst the number of packet
4337 * buffers requested using supplied strategy.
4338 */
4339 switch (strategy) {
4340 case PBA_STRATEGY_WEIGHTED:
4341 /* ixgbe_dcb_pba_80_48 strategy weight first half of packet
4342 * buffer with 5/8 of the packet buffer space.
4343 */
4344 rxpktsize = (pbsize * 5) / (num_pb * 4);
4345 pbsize -= rxpktsize * (num_pb / 2);
4346 rxpktsize <<= IXGBE_RXPBSIZE_SHIFT;
4347 for (; i < (num_pb / 2); i++)
4348 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
4349 /* Fall through to configure remaining packet buffers */
4350 case PBA_STRATEGY_EQUAL:
4351 rxpktsize = (pbsize / (num_pb - i)) << IXGBE_RXPBSIZE_SHIFT;
4352 for (; i < num_pb; i++)
4353 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
4354 break;
4355 default:
4356 break;
4357 }
4358
4359 /* Only support an equally distributed Tx packet buffer strategy. */
4360 txpktsize = IXGBE_TXPBSIZE_MAX / num_pb;
4361 txpbthresh = (txpktsize / 1024) - IXGBE_TXPKT_SIZE_MAX;
4362 for (i = 0; i < num_pb; i++) {
4363 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize);
4364 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh);
4365 }
4366
4367 /* Clear unused TCs, if any, to zero buffer size*/
4368 for (; i < IXGBE_MAX_PB; i++) {
4369 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
4370 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0);
4371 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0);
4372 }
4373 }
4374
4375 /**
4376 * ixgbe_clear_tx_pending - Clear pending TX work from the PCIe fifo
4377 * @hw: pointer to the hardware structure
4378 *
4379 * The 82599 and x540 MACs can experience issues if TX work is still pending
4380 * when a reset occurs. This function prevents this by flushing the PCIe
4381 * buffers on the system.
4382 **/
4383 void ixgbe_clear_tx_pending(struct ixgbe_hw *hw)
4384 {
4385 u32 gcr_ext, hlreg0;
4386
4387 /*
4388 * If double reset is not requested then all transactions should
4389 * already be clear and as such there is no work to do
4390 */
4391 if (!(hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED))
4392 return;
4393
4394 /*
4395 * Set loopback enable to prevent any transmits from being sent
4396 * should the link come up. This assumes that the RXCTRL.RXEN bit
4397 * has already been cleared.
4398 */
4399 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
4400 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0 | IXGBE_HLREG0_LPBK);
4401
4402 /* initiate cleaning flow for buffers in the PCIe transaction layer */
4403 gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
4404 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT,
4405 gcr_ext | IXGBE_GCR_EXT_BUFFERS_CLEAR);
4406
4407 /* Flush all writes and allow 20usec for all transactions to clear */
4408 IXGBE_WRITE_FLUSH(hw);
4409 usec_delay(20);
4410
4411 /* restore previous register values */
4412 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
4413 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
4414 }
4415
4416