ixgbe_common.c revision 1.4 1 /******************************************************************************
2
3 Copyright (c) 2001-2012, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33 /*$FreeBSD: head/sys/dev/ixgbe/ixgbe_common.c 238149 2012-07-05 20:51:44Z jfv $*/
34 /*$NetBSD: ixgbe_common.c,v 1.4 2015/04/02 09:26:55 msaitoh Exp $*/
35
36 #include "ixgbe_common.h"
37 #include "ixgbe_phy.h"
38 #include "ixgbe_api.h"
39
40 static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw);
41 static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw);
42 static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw);
43 static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw);
44 static void ixgbe_standby_eeprom(struct ixgbe_hw *hw);
45 static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
46 u16 count);
47 static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count);
48 static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
49 static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
50 static void ixgbe_release_eeprom(struct ixgbe_hw *hw);
51
52 static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr);
53 static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
54 u16 *san_mac_offset);
55 static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
56 u16 words, u16 *data);
57 static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
58 u16 words, u16 *data);
59 static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
60 u16 offset);
61
62 /**
63 * ixgbe_init_ops_generic - Inits function ptrs
64 * @hw: pointer to the hardware structure
65 *
66 * Initialize the function pointers.
67 **/
68 s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw)
69 {
70 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
71 struct ixgbe_mac_info *mac = &hw->mac;
72 u32 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
73
74 DEBUGFUNC("ixgbe_init_ops_generic");
75
76 /* EEPROM */
77 eeprom->ops.init_params = &ixgbe_init_eeprom_params_generic;
78 /* If EEPROM is valid (bit 8 = 1), use EERD otherwise use bit bang */
79 if (eec & IXGBE_EEC_PRES) {
80 eeprom->ops.read = &ixgbe_read_eerd_generic;
81 eeprom->ops.read_buffer = &ixgbe_read_eerd_buffer_generic;
82 } else {
83 eeprom->ops.read = &ixgbe_read_eeprom_bit_bang_generic;
84 eeprom->ops.read_buffer =
85 &ixgbe_read_eeprom_buffer_bit_bang_generic;
86 }
87 eeprom->ops.write = &ixgbe_write_eeprom_generic;
88 eeprom->ops.write_buffer = &ixgbe_write_eeprom_buffer_bit_bang_generic;
89 eeprom->ops.validate_checksum =
90 &ixgbe_validate_eeprom_checksum_generic;
91 eeprom->ops.update_checksum = &ixgbe_update_eeprom_checksum_generic;
92 eeprom->ops.calc_checksum = &ixgbe_calc_eeprom_checksum_generic;
93
94 /* MAC */
95 mac->ops.init_hw = &ixgbe_init_hw_generic;
96 mac->ops.reset_hw = NULL;
97 mac->ops.start_hw = &ixgbe_start_hw_generic;
98 mac->ops.clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic;
99 mac->ops.get_media_type = NULL;
100 mac->ops.get_supported_physical_layer = NULL;
101 mac->ops.enable_rx_dma = &ixgbe_enable_rx_dma_generic;
102 mac->ops.get_mac_addr = &ixgbe_get_mac_addr_generic;
103 mac->ops.stop_adapter = &ixgbe_stop_adapter_generic;
104 mac->ops.get_bus_info = &ixgbe_get_bus_info_generic;
105 mac->ops.set_lan_id = &ixgbe_set_lan_id_multi_port_pcie;
106 mac->ops.acquire_swfw_sync = &ixgbe_acquire_swfw_sync;
107 mac->ops.release_swfw_sync = &ixgbe_release_swfw_sync;
108
109 /* LEDs */
110 mac->ops.led_on = &ixgbe_led_on_generic;
111 mac->ops.led_off = &ixgbe_led_off_generic;
112 mac->ops.blink_led_start = &ixgbe_blink_led_start_generic;
113 mac->ops.blink_led_stop = &ixgbe_blink_led_stop_generic;
114
115 /* RAR, Multicast, VLAN */
116 mac->ops.set_rar = &ixgbe_set_rar_generic;
117 mac->ops.clear_rar = &ixgbe_clear_rar_generic;
118 mac->ops.insert_mac_addr = NULL;
119 mac->ops.set_vmdq = NULL;
120 mac->ops.clear_vmdq = NULL;
121 mac->ops.init_rx_addrs = &ixgbe_init_rx_addrs_generic;
122 mac->ops.update_uc_addr_list = &ixgbe_update_uc_addr_list_generic;
123 mac->ops.update_mc_addr_list = &ixgbe_update_mc_addr_list_generic;
124 mac->ops.enable_mc = &ixgbe_enable_mc_generic;
125 mac->ops.disable_mc = &ixgbe_disable_mc_generic;
126 mac->ops.clear_vfta = NULL;
127 mac->ops.set_vfta = NULL;
128 mac->ops.set_vlvf = NULL;
129 mac->ops.init_uta_tables = NULL;
130
131 /* Flow Control */
132 mac->ops.fc_enable = &ixgbe_fc_enable_generic;
133
134 /* Link */
135 mac->ops.get_link_capabilities = NULL;
136 mac->ops.setup_link = NULL;
137 mac->ops.check_link = NULL;
138
139 return IXGBE_SUCCESS;
140 }
141
142 /**
143 * ixgbe_device_supports_autoneg_fc - Check if phy supports autoneg flow
144 * control
145 * @hw: pointer to hardware structure
146 *
147 * There are several phys that do not support autoneg flow control. This
148 * function check the device id to see if the associated phy supports
149 * autoneg flow control.
150 **/
151 static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
152 {
153
154 DEBUGFUNC("ixgbe_device_supports_autoneg_fc");
155
156 switch (hw->device_id) {
157 case IXGBE_DEV_ID_X540T:
158 case IXGBE_DEV_ID_X540T1:
159 return IXGBE_SUCCESS;
160 case IXGBE_DEV_ID_82599_T3_LOM:
161 return IXGBE_SUCCESS;
162 default:
163 return IXGBE_ERR_FC_NOT_SUPPORTED;
164 }
165 }
166
167 /**
168 * ixgbe_setup_fc - Set up flow control
169 * @hw: pointer to hardware structure
170 *
171 * Called at init time to set up flow control.
172 **/
173 static s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
174 {
175 s32 ret_val = IXGBE_SUCCESS;
176 u32 reg = 0, reg_bp = 0;
177 u16 reg_cu = 0;
178
179 DEBUGFUNC("ixgbe_setup_fc");
180
181 /*
182 * Validate the requested mode. Strict IEEE mode does not allow
183 * ixgbe_fc_rx_pause because it will cause us to fail at UNH.
184 */
185 if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
186 DEBUGOUT("ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
187 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
188 goto out;
189 }
190
191 /*
192 * 10gig parts do not have a word in the EEPROM to determine the
193 * default flow control setting, so we explicitly set it to full.
194 */
195 if (hw->fc.requested_mode == ixgbe_fc_default)
196 hw->fc.requested_mode = ixgbe_fc_full;
197
198 /*
199 * Set up the 1G and 10G flow control advertisement registers so the
200 * HW will be able to do fc autoneg once the cable is plugged in. If
201 * we link at 10G, the 1G advertisement is harmless and vice versa.
202 */
203 switch (hw->phy.media_type) {
204 case ixgbe_media_type_fiber:
205 case ixgbe_media_type_backplane:
206 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
207 reg_bp = IXGBE_READ_REG(hw, IXGBE_AUTOC);
208 break;
209 case ixgbe_media_type_copper:
210 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
211 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®_cu);
212 break;
213 default:
214 break;
215 }
216
217 /*
218 * The possible values of fc.requested_mode are:
219 * 0: Flow control is completely disabled
220 * 1: Rx flow control is enabled (we can receive pause frames,
221 * but not send pause frames).
222 * 2: Tx flow control is enabled (we can send pause frames but
223 * we do not support receiving pause frames).
224 * 3: Both Rx and Tx flow control (symmetric) are enabled.
225 * other: Invalid.
226 */
227 switch (hw->fc.requested_mode) {
228 case ixgbe_fc_none:
229 /* Flow control completely disabled by software override. */
230 reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
231 if (hw->phy.media_type == ixgbe_media_type_backplane)
232 reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE |
233 IXGBE_AUTOC_ASM_PAUSE);
234 else if (hw->phy.media_type == ixgbe_media_type_copper)
235 reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
236 break;
237 case ixgbe_fc_tx_pause:
238 /*
239 * Tx Flow control is enabled, and Rx Flow control is
240 * disabled by software override.
241 */
242 reg |= IXGBE_PCS1GANA_ASM_PAUSE;
243 reg &= ~IXGBE_PCS1GANA_SYM_PAUSE;
244 if (hw->phy.media_type == ixgbe_media_type_backplane) {
245 reg_bp |= IXGBE_AUTOC_ASM_PAUSE;
246 reg_bp &= ~IXGBE_AUTOC_SYM_PAUSE;
247 } else if (hw->phy.media_type == ixgbe_media_type_copper) {
248 reg_cu |= IXGBE_TAF_ASM_PAUSE;
249 reg_cu &= ~IXGBE_TAF_SYM_PAUSE;
250 }
251 break;
252 case ixgbe_fc_rx_pause:
253 /*
254 * Rx Flow control is enabled and Tx Flow control is
255 * disabled by software override. Since there really
256 * isn't a way to advertise that we are capable of RX
257 * Pause ONLY, we will advertise that we support both
258 * symmetric and asymmetric Rx PAUSE, as such we fall
259 * through to the fc_full statement. Later, we will
260 * disable the adapter's ability to send PAUSE frames.
261 */
262 case ixgbe_fc_full:
263 /* Flow control (both Rx and Tx) is enabled by SW override. */
264 reg |= IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE;
265 if (hw->phy.media_type == ixgbe_media_type_backplane)
266 reg_bp |= IXGBE_AUTOC_SYM_PAUSE |
267 IXGBE_AUTOC_ASM_PAUSE;
268 else if (hw->phy.media_type == ixgbe_media_type_copper)
269 reg_cu |= IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE;
270 break;
271 default:
272 DEBUGOUT("Flow control param set incorrectly\n");
273 ret_val = IXGBE_ERR_CONFIG;
274 goto out;
275 break;
276 }
277
278 if (hw->mac.type != ixgbe_mac_X540) {
279 /*
280 * Enable auto-negotiation between the MAC & PHY;
281 * the MAC will advertise clause 37 flow control.
282 */
283 IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
284 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
285
286 /* Disable AN timeout */
287 if (hw->fc.strict_ieee)
288 reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN;
289
290 IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg);
291 DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg);
292 }
293
294 /*
295 * AUTOC restart handles negotiation of 1G and 10G on backplane
296 * and copper. There is no need to set the PCS1GCTL register.
297 *
298 */
299 if (hw->phy.media_type == ixgbe_media_type_backplane) {
300 reg_bp |= IXGBE_AUTOC_AN_RESTART;
301 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_bp);
302 } else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
303 (ixgbe_device_supports_autoneg_fc(hw) == IXGBE_SUCCESS)) {
304 hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
305 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg_cu);
306 }
307
308 DEBUGOUT1("Set up FC; IXGBE_AUTOC = 0x%08X\n", reg);
309 out:
310 return ret_val;
311 }
312
313 /**
314 * ixgbe_start_hw_generic - Prepare hardware for Tx/Rx
315 * @hw: pointer to hardware structure
316 *
317 * Starts the hardware by filling the bus info structure and media type, clears
318 * all on chip counters, initializes receive address registers, multicast
319 * table, VLAN filter table, calls routine to set up link and flow control
320 * settings, and leaves transmit and receive units disabled and uninitialized
321 **/
322 s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
323 {
324 s32 ret_val;
325 u32 ctrl_ext;
326
327 DEBUGFUNC("ixgbe_start_hw_generic");
328
329 /* Set the media type */
330 hw->phy.media_type = hw->mac.ops.get_media_type(hw);
331
332 /* PHY ops initialization must be done in reset_hw() */
333
334 /* Clear the VLAN filter table */
335 hw->mac.ops.clear_vfta(hw);
336
337 /* Clear statistics registers */
338 hw->mac.ops.clear_hw_cntrs(hw);
339
340 /* Set No Snoop Disable */
341 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
342 ctrl_ext |= IXGBE_CTRL_EXT_NS_DIS;
343 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
344 IXGBE_WRITE_FLUSH(hw);
345
346 /* Setup flow control */
347 ret_val = ixgbe_setup_fc(hw);
348 if (ret_val != IXGBE_SUCCESS)
349 goto out;
350
351 /* Clear adapter stopped flag */
352 hw->adapter_stopped = FALSE;
353
354 out:
355 return ret_val;
356 }
357
358 /**
359 * ixgbe_start_hw_gen2 - Init sequence for common device family
360 * @hw: pointer to hw structure
361 *
362 * Performs the init sequence common to the second generation
363 * of 10 GbE devices.
364 * Devices in the second generation:
365 * 82599
366 * X540
367 **/
368 s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw)
369 {
370 u32 i;
371 u32 regval;
372
373 /* Clear the rate limiters */
374 for (i = 0; i < hw->mac.max_tx_queues; i++) {
375 IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i);
376 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, 0);
377 }
378 IXGBE_WRITE_FLUSH(hw);
379
380 /* Disable relaxed ordering */
381 for (i = 0; i < hw->mac.max_tx_queues; i++) {
382 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
383 regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
384 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
385 }
386
387 for (i = 0; i < hw->mac.max_rx_queues; i++) {
388 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
389 regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
390 IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
391 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
392 }
393
394 return IXGBE_SUCCESS;
395 }
396
397 /**
398 * ixgbe_init_hw_generic - Generic hardware initialization
399 * @hw: pointer to hardware structure
400 *
401 * Initialize the hardware by resetting the hardware, filling the bus info
402 * structure and media type, clears all on chip counters, initializes receive
403 * address registers, multicast table, VLAN filter table, calls routine to set
404 * up link and flow control settings, and leaves transmit and receive units
405 * disabled and uninitialized
406 **/
407 s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw)
408 {
409 s32 status;
410
411 DEBUGFUNC("ixgbe_init_hw_generic");
412
413 /* Reset the hardware */
414 status = hw->mac.ops.reset_hw(hw);
415
416 if (status == IXGBE_SUCCESS) {
417 /* Start the HW */
418 status = hw->mac.ops.start_hw(hw);
419 }
420
421 return status;
422 }
423
424 /**
425 * ixgbe_clear_hw_cntrs_generic - Generic clear hardware counters
426 * @hw: pointer to hardware structure
427 *
428 * Clears all hardware statistics counters by reading them from the hardware
429 * Statistics counters are clear on read.
430 **/
431 s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
432 {
433 u16 i = 0;
434
435 DEBUGFUNC("ixgbe_clear_hw_cntrs_generic");
436
437 IXGBE_READ_REG(hw, IXGBE_CRCERRS);
438 IXGBE_READ_REG(hw, IXGBE_ILLERRC);
439 IXGBE_READ_REG(hw, IXGBE_ERRBC);
440 IXGBE_READ_REG(hw, IXGBE_MSPDC);
441 for (i = 0; i < 8; i++)
442 IXGBE_READ_REG(hw, IXGBE_MPC(i));
443
444 IXGBE_READ_REG(hw, IXGBE_MLFC);
445 IXGBE_READ_REG(hw, IXGBE_MRFC);
446 IXGBE_READ_REG(hw, IXGBE_RLEC);
447 IXGBE_READ_REG(hw, IXGBE_LXONTXC);
448 IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
449 if (hw->mac.type >= ixgbe_mac_82599EB) {
450 IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
451 IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
452 } else {
453 IXGBE_READ_REG(hw, IXGBE_LXONRXC);
454 IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
455 }
456
457 for (i = 0; i < 8; i++) {
458 IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
459 IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
460 if (hw->mac.type >= ixgbe_mac_82599EB) {
461 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
462 IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
463 } else {
464 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
465 IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
466 }
467 }
468 if (hw->mac.type >= ixgbe_mac_82599EB)
469 for (i = 0; i < 8; i++)
470 IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
471 IXGBE_READ_REG(hw, IXGBE_PRC64);
472 IXGBE_READ_REG(hw, IXGBE_PRC127);
473 IXGBE_READ_REG(hw, IXGBE_PRC255);
474 IXGBE_READ_REG(hw, IXGBE_PRC511);
475 IXGBE_READ_REG(hw, IXGBE_PRC1023);
476 IXGBE_READ_REG(hw, IXGBE_PRC1522);
477 IXGBE_READ_REG(hw, IXGBE_GPRC);
478 IXGBE_READ_REG(hw, IXGBE_BPRC);
479 IXGBE_READ_REG(hw, IXGBE_MPRC);
480 IXGBE_READ_REG(hw, IXGBE_GPTC);
481 IXGBE_READ_REG(hw, IXGBE_GORCL);
482 IXGBE_READ_REG(hw, IXGBE_GORCH);
483 IXGBE_READ_REG(hw, IXGBE_GOTCL);
484 IXGBE_READ_REG(hw, IXGBE_GOTCH);
485 if (hw->mac.type == ixgbe_mac_82598EB)
486 for (i = 0; i < 8; i++)
487 IXGBE_READ_REG(hw, IXGBE_RNBC(i));
488 IXGBE_READ_REG(hw, IXGBE_RUC);
489 IXGBE_READ_REG(hw, IXGBE_RFC);
490 IXGBE_READ_REG(hw, IXGBE_ROC);
491 IXGBE_READ_REG(hw, IXGBE_RJC);
492 IXGBE_READ_REG(hw, IXGBE_MNGPRC);
493 IXGBE_READ_REG(hw, IXGBE_MNGPDC);
494 IXGBE_READ_REG(hw, IXGBE_MNGPTC);
495 IXGBE_READ_REG(hw, IXGBE_TORL);
496 IXGBE_READ_REG(hw, IXGBE_TORH);
497 IXGBE_READ_REG(hw, IXGBE_TPR);
498 IXGBE_READ_REG(hw, IXGBE_TPT);
499 IXGBE_READ_REG(hw, IXGBE_PTC64);
500 IXGBE_READ_REG(hw, IXGBE_PTC127);
501 IXGBE_READ_REG(hw, IXGBE_PTC255);
502 IXGBE_READ_REG(hw, IXGBE_PTC511);
503 IXGBE_READ_REG(hw, IXGBE_PTC1023);
504 IXGBE_READ_REG(hw, IXGBE_PTC1522);
505 IXGBE_READ_REG(hw, IXGBE_MPTC);
506 IXGBE_READ_REG(hw, IXGBE_BPTC);
507 for (i = 0; i < 16; i++) {
508 IXGBE_READ_REG(hw, IXGBE_QPRC(i));
509 IXGBE_READ_REG(hw, IXGBE_QPTC(i));
510 if (hw->mac.type >= ixgbe_mac_82599EB) {
511 IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
512 IXGBE_READ_REG(hw, IXGBE_QBRC_H(i));
513 IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
514 IXGBE_READ_REG(hw, IXGBE_QBTC_H(i));
515 IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
516 } else {
517 IXGBE_READ_REG(hw, IXGBE_QBRC(i));
518 IXGBE_READ_REG(hw, IXGBE_QBTC(i));
519 }
520 }
521
522 if (hw->mac.type == ixgbe_mac_X540) {
523 if (hw->phy.id == 0)
524 ixgbe_identify_phy(hw);
525 hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL,
526 IXGBE_MDIO_PCS_DEV_TYPE, &i);
527 hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECH,
528 IXGBE_MDIO_PCS_DEV_TYPE, &i);
529 hw->phy.ops.read_reg(hw, IXGBE_LDPCECL,
530 IXGBE_MDIO_PCS_DEV_TYPE, &i);
531 hw->phy.ops.read_reg(hw, IXGBE_LDPCECH,
532 IXGBE_MDIO_PCS_DEV_TYPE, &i);
533 }
534
535 return IXGBE_SUCCESS;
536 }
537
538 /**
539 * ixgbe_read_pba_string_generic - Reads part number string from EEPROM
540 * @hw: pointer to hardware structure
541 * @pba_num: stores the part number string from the EEPROM
542 * @pba_num_size: part number string buffer length
543 *
544 * Reads the part number string from the EEPROM.
545 **/
546 s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
547 u32 pba_num_size)
548 {
549 s32 ret_val;
550 u16 data;
551 u16 pba_ptr;
552 u16 offset;
553 u16 length;
554
555 DEBUGFUNC("ixgbe_read_pba_string_generic");
556
557 if (pba_num == NULL) {
558 DEBUGOUT("PBA string buffer was null\n");
559 return IXGBE_ERR_INVALID_ARGUMENT;
560 }
561
562 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
563 if (ret_val) {
564 DEBUGOUT("NVM Read Error\n");
565 return ret_val;
566 }
567
568 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &pba_ptr);
569 if (ret_val) {
570 DEBUGOUT("NVM Read Error\n");
571 return ret_val;
572 }
573
574 /*
575 * if data is not ptr guard the PBA must be in legacy format which
576 * means pba_ptr is actually our second data word for the PBA number
577 * and we can decode it into an ascii string
578 */
579 if (data != IXGBE_PBANUM_PTR_GUARD) {
580 DEBUGOUT("NVM PBA number is not stored as string\n");
581
582 /* we will need 11 characters to store the PBA */
583 if (pba_num_size < 11) {
584 DEBUGOUT("PBA string buffer too small\n");
585 return IXGBE_ERR_NO_SPACE;
586 }
587
588 /* extract hex string from data and pba_ptr */
589 pba_num[0] = (data >> 12) & 0xF;
590 pba_num[1] = (data >> 8) & 0xF;
591 pba_num[2] = (data >> 4) & 0xF;
592 pba_num[3] = data & 0xF;
593 pba_num[4] = (pba_ptr >> 12) & 0xF;
594 pba_num[5] = (pba_ptr >> 8) & 0xF;
595 pba_num[6] = '-';
596 pba_num[7] = 0;
597 pba_num[8] = (pba_ptr >> 4) & 0xF;
598 pba_num[9] = pba_ptr & 0xF;
599
600 /* put a null character on the end of our string */
601 pba_num[10] = '\0';
602
603 /* switch all the data but the '-' to hex char */
604 for (offset = 0; offset < 10; offset++) {
605 if (pba_num[offset] < 0xA)
606 pba_num[offset] += '0';
607 else if (pba_num[offset] < 0x10)
608 pba_num[offset] += 'A' - 0xA;
609 }
610
611 return IXGBE_SUCCESS;
612 }
613
614 ret_val = hw->eeprom.ops.read(hw, pba_ptr, &length);
615 if (ret_val) {
616 DEBUGOUT("NVM Read Error\n");
617 return ret_val;
618 }
619
620 if (length == 0xFFFF || length == 0) {
621 DEBUGOUT("NVM PBA number section invalid length\n");
622 return IXGBE_ERR_PBA_SECTION;
623 }
624
625 /* check if pba_num buffer is big enough */
626 if (pba_num_size < (((u32)length * 2) - 1)) {
627 DEBUGOUT("PBA string buffer too small\n");
628 return IXGBE_ERR_NO_SPACE;
629 }
630
631 /* trim pba length from start of string */
632 pba_ptr++;
633 length--;
634
635 for (offset = 0; offset < length; offset++) {
636 ret_val = hw->eeprom.ops.read(hw, pba_ptr + offset, &data);
637 if (ret_val) {
638 DEBUGOUT("NVM Read Error\n");
639 return ret_val;
640 }
641 pba_num[offset * 2] = (u8)(data >> 8);
642 pba_num[(offset * 2) + 1] = (u8)(data & 0xFF);
643 }
644 pba_num[offset * 2] = '\0';
645
646 return IXGBE_SUCCESS;
647 }
648
649 /**
650 * ixgbe_read_pba_num_generic - Reads part number from EEPROM
651 * @hw: pointer to hardware structure
652 * @pba_num: stores the part number from the EEPROM
653 *
654 * Reads the part number from the EEPROM.
655 **/
656 s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num)
657 {
658 s32 ret_val;
659 u16 data;
660
661 DEBUGFUNC("ixgbe_read_pba_num_generic");
662
663 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
664 if (ret_val) {
665 DEBUGOUT("NVM Read Error\n");
666 return ret_val;
667 } else if (data == IXGBE_PBANUM_PTR_GUARD) {
668 DEBUGOUT("NVM Not supported\n");
669 return IXGBE_NOT_IMPLEMENTED;
670 }
671 *pba_num = (u32)(data << 16);
672
673 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &data);
674 if (ret_val) {
675 DEBUGOUT("NVM Read Error\n");
676 return ret_val;
677 }
678 *pba_num |= data;
679
680 return IXGBE_SUCCESS;
681 }
682
683 /**
684 * ixgbe_get_mac_addr_generic - Generic get MAC address
685 * @hw: pointer to hardware structure
686 * @mac_addr: Adapter MAC address
687 *
688 * Reads the adapter's MAC address from first Receive Address Register (RAR0)
689 * A reset of the adapter must be performed prior to calling this function
690 * in order for the MAC address to have been loaded from the EEPROM into RAR0
691 **/
692 s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr)
693 {
694 u32 rar_high;
695 u32 rar_low;
696 u16 i;
697
698 DEBUGFUNC("ixgbe_get_mac_addr_generic");
699
700 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(0));
701 rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(0));
702
703 for (i = 0; i < 4; i++)
704 mac_addr[i] = (u8)(rar_low >> (i*8));
705
706 for (i = 0; i < 2; i++)
707 mac_addr[i+4] = (u8)(rar_high >> (i*8));
708
709 return IXGBE_SUCCESS;
710 }
711
712 /**
713 * ixgbe_get_bus_info_generic - Generic set PCI bus info
714 * @hw: pointer to hardware structure
715 *
716 * Sets the PCI bus info (speed, width, type) within the ixgbe_hw structure
717 **/
718 s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
719 {
720 struct ixgbe_mac_info *mac = &hw->mac;
721 u16 link_status;
722
723 DEBUGFUNC("ixgbe_get_bus_info_generic");
724
725 hw->bus.type = ixgbe_bus_type_pci_express;
726
727 /* Get the negotiated link width and speed from PCI config space */
728 link_status = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_LINK_STATUS);
729
730 switch (link_status & IXGBE_PCI_LINK_WIDTH) {
731 case IXGBE_PCI_LINK_WIDTH_1:
732 hw->bus.width = ixgbe_bus_width_pcie_x1;
733 break;
734 case IXGBE_PCI_LINK_WIDTH_2:
735 hw->bus.width = ixgbe_bus_width_pcie_x2;
736 break;
737 case IXGBE_PCI_LINK_WIDTH_4:
738 hw->bus.width = ixgbe_bus_width_pcie_x4;
739 break;
740 case IXGBE_PCI_LINK_WIDTH_8:
741 hw->bus.width = ixgbe_bus_width_pcie_x8;
742 break;
743 default:
744 hw->bus.width = ixgbe_bus_width_unknown;
745 break;
746 }
747
748 switch (link_status & IXGBE_PCI_LINK_SPEED) {
749 case IXGBE_PCI_LINK_SPEED_2500:
750 hw->bus.speed = ixgbe_bus_speed_2500;
751 break;
752 case IXGBE_PCI_LINK_SPEED_5000:
753 hw->bus.speed = ixgbe_bus_speed_5000;
754 break;
755 case IXGBE_PCI_LINK_SPEED_8000:
756 hw->bus.speed = ixgbe_bus_speed_8000;
757 break;
758 default:
759 hw->bus.speed = ixgbe_bus_speed_unknown;
760 break;
761 }
762
763 mac->ops.set_lan_id(hw);
764
765 return IXGBE_SUCCESS;
766 }
767
768 /**
769 * ixgbe_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices
770 * @hw: pointer to the HW structure
771 *
772 * Determines the LAN function id by reading memory-mapped registers
773 * and swaps the port value if requested.
774 **/
775 void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw)
776 {
777 struct ixgbe_bus_info *bus = &hw->bus;
778 u32 reg;
779
780 DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie");
781
782 reg = IXGBE_READ_REG(hw, IXGBE_STATUS);
783 bus->func = (reg & IXGBE_STATUS_LAN_ID) >> IXGBE_STATUS_LAN_ID_SHIFT;
784 bus->lan_id = bus->func;
785
786 /* check for a port swap */
787 reg = IXGBE_READ_REG(hw, IXGBE_FACTPS);
788 if (reg & IXGBE_FACTPS_LFS)
789 bus->func ^= 0x1;
790 }
791
792 /**
793 * ixgbe_stop_adapter_generic - Generic stop Tx/Rx units
794 * @hw: pointer to hardware structure
795 *
796 * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
797 * disables transmit and receive units. The adapter_stopped flag is used by
798 * the shared code and drivers to determine if the adapter is in a stopped
799 * state and should not touch the hardware.
800 **/
801 s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
802 {
803 u32 reg_val;
804 u16 i;
805
806 DEBUGFUNC("ixgbe_stop_adapter_generic");
807
808 /*
809 * Set the adapter_stopped flag so other driver functions stop touching
810 * the hardware
811 */
812 hw->adapter_stopped = TRUE;
813
814 /* Disable the receive unit */
815 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, 0);
816
817 /* Clear interrupt mask to stop interrupts from being generated */
818 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
819
820 /* Clear any pending interrupts, flush previous writes */
821 IXGBE_READ_REG(hw, IXGBE_EICR);
822
823 /* Disable the transmit unit. Each queue must be disabled. */
824 for (i = 0; i < hw->mac.max_tx_queues; i++)
825 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), IXGBE_TXDCTL_SWFLSH);
826
827 /* Disable the receive unit by stopping each queue */
828 for (i = 0; i < hw->mac.max_rx_queues; i++) {
829 reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
830 reg_val &= ~IXGBE_RXDCTL_ENABLE;
831 reg_val |= IXGBE_RXDCTL_SWFLSH;
832 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val);
833 }
834
835 /* flush all queues disables */
836 IXGBE_WRITE_FLUSH(hw);
837 msec_delay(2);
838
839 /*
840 * Prevent the PCI-E bus from from hanging by disabling PCI-E master
841 * access and verify no pending requests
842 */
843 return ixgbe_disable_pcie_master(hw);
844 }
845
846 /**
847 * ixgbe_led_on_generic - Turns on the software controllable LEDs.
848 * @hw: pointer to hardware structure
849 * @index: led number to turn on
850 **/
851 s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index)
852 {
853 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
854
855 DEBUGFUNC("ixgbe_led_on_generic");
856
857 /* To turn on the LED, set mode to ON. */
858 led_reg &= ~IXGBE_LED_MODE_MASK(index);
859 led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index);
860 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
861 IXGBE_WRITE_FLUSH(hw);
862
863 return IXGBE_SUCCESS;
864 }
865
866 /**
867 * ixgbe_led_off_generic - Turns off the software controllable LEDs.
868 * @hw: pointer to hardware structure
869 * @index: led number to turn off
870 **/
871 s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index)
872 {
873 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
874
875 DEBUGFUNC("ixgbe_led_off_generic");
876
877 /* To turn off the LED, set mode to OFF. */
878 led_reg &= ~IXGBE_LED_MODE_MASK(index);
879 led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index);
880 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
881 IXGBE_WRITE_FLUSH(hw);
882
883 return IXGBE_SUCCESS;
884 }
885
886 /**
887 * ixgbe_init_eeprom_params_generic - Initialize EEPROM params
888 * @hw: pointer to hardware structure
889 *
890 * Initializes the EEPROM parameters ixgbe_eeprom_info within the
891 * ixgbe_hw struct in order to set up EEPROM access.
892 **/
893 s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
894 {
895 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
896 u32 eec;
897 u16 eeprom_size;
898
899 DEBUGFUNC("ixgbe_init_eeprom_params_generic");
900
901 if (eeprom->type == ixgbe_eeprom_uninitialized) {
902 eeprom->type = ixgbe_eeprom_none;
903 /* Set default semaphore delay to 10ms which is a well
904 * tested value */
905 eeprom->semaphore_delay = 10;
906 /* Clear EEPROM page size, it will be initialized as needed */
907 eeprom->word_page_size = 0;
908
909 /*
910 * Check for EEPROM present first.
911 * If not present leave as none
912 */
913 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
914 if (eec & IXGBE_EEC_PRES) {
915 eeprom->type = ixgbe_eeprom_spi;
916
917 /*
918 * SPI EEPROM is assumed here. This code would need to
919 * change if a future EEPROM is not SPI.
920 */
921 eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
922 IXGBE_EEC_SIZE_SHIFT);
923 eeprom->word_size = 1 << (eeprom_size +
924 IXGBE_EEPROM_WORD_SIZE_SHIFT);
925 }
926
927 if (eec & IXGBE_EEC_ADDR_SIZE)
928 eeprom->address_bits = 16;
929 else
930 eeprom->address_bits = 8;
931 DEBUGOUT3("Eeprom params: type = %d, size = %d, address bits: "
932 "%d\n", eeprom->type, eeprom->word_size,
933 eeprom->address_bits);
934 }
935
936 return IXGBE_SUCCESS;
937 }
938
939 /**
940 * ixgbe_write_eeprom_buffer_bit_bang_generic - Write EEPROM using bit-bang
941 * @hw: pointer to hardware structure
942 * @offset: offset within the EEPROM to write
943 * @words: number of word(s)
944 * @data: 16 bit word(s) to write to EEPROM
945 *
946 * Reads 16 bit word(s) from EEPROM through bit-bang method
947 **/
948 s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
949 u16 words, u16 *data)
950 {
951 s32 status = IXGBE_SUCCESS;
952 u16 i, count;
953
954 DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang_generic");
955
956 hw->eeprom.ops.init_params(hw);
957
958 if (words == 0) {
959 status = IXGBE_ERR_INVALID_ARGUMENT;
960 goto out;
961 }
962
963 if (offset + words > hw->eeprom.word_size) {
964 status = IXGBE_ERR_EEPROM;
965 goto out;
966 }
967
968 /*
969 * The EEPROM page size cannot be queried from the chip. We do lazy
970 * initialization. It is worth to do that when we write large buffer.
971 */
972 if ((hw->eeprom.word_page_size == 0) &&
973 (words > IXGBE_EEPROM_PAGE_SIZE_MAX))
974 ixgbe_detect_eeprom_page_size_generic(hw, offset);
975
976 /*
977 * We cannot hold synchronization semaphores for too long
978 * to avoid other entity starvation. However it is more efficient
979 * to read in bursts than synchronizing access for each word.
980 */
981 for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
982 count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
983 IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
984 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset + i,
985 count, &data[i]);
986
987 if (status != IXGBE_SUCCESS)
988 break;
989 }
990
991 out:
992 return status;
993 }
994
995 /**
996 * ixgbe_write_eeprom_buffer_bit_bang - Writes 16 bit word(s) to EEPROM
997 * @hw: pointer to hardware structure
998 * @offset: offset within the EEPROM to be written to
999 * @words: number of word(s)
1000 * @data: 16 bit word(s) to be written to the EEPROM
1001 *
1002 * If ixgbe_eeprom_update_checksum is not called after this function, the
1003 * EEPROM will most likely contain an invalid checksum.
1004 **/
1005 static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
1006 u16 words, u16 *data)
1007 {
1008 s32 status;
1009 u16 word;
1010 u16 page_size;
1011 u16 i;
1012 u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI;
1013
1014 DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang");
1015
1016 /* Prepare the EEPROM for writing */
1017 status = ixgbe_acquire_eeprom(hw);
1018
1019 if (status == IXGBE_SUCCESS) {
1020 if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) {
1021 ixgbe_release_eeprom(hw);
1022 status = IXGBE_ERR_EEPROM;
1023 }
1024 }
1025
1026 if (status == IXGBE_SUCCESS) {
1027 for (i = 0; i < words; i++) {
1028 ixgbe_standby_eeprom(hw);
1029
1030 /* Send the WRITE ENABLE command (8 bit opcode ) */
1031 ixgbe_shift_out_eeprom_bits(hw,
1032 IXGBE_EEPROM_WREN_OPCODE_SPI,
1033 IXGBE_EEPROM_OPCODE_BITS);
1034
1035 ixgbe_standby_eeprom(hw);
1036
1037 /*
1038 * Some SPI eeproms use the 8th address bit embedded
1039 * in the opcode
1040 */
1041 if ((hw->eeprom.address_bits == 8) &&
1042 ((offset + i) >= 128))
1043 write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
1044
1045 /* Send the Write command (8-bit opcode + addr) */
1046 ixgbe_shift_out_eeprom_bits(hw, write_opcode,
1047 IXGBE_EEPROM_OPCODE_BITS);
1048 ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
1049 hw->eeprom.address_bits);
1050
1051 page_size = hw->eeprom.word_page_size;
1052
1053 /* Send the data in burst via SPI*/
1054 do {
1055 word = data[i];
1056 word = (word >> 8) | (word << 8);
1057 ixgbe_shift_out_eeprom_bits(hw, word, 16);
1058
1059 if (page_size == 0)
1060 break;
1061
1062 /* do not wrap around page */
1063 if (((offset + i) & (page_size - 1)) ==
1064 (page_size - 1))
1065 break;
1066 } while (++i < words);
1067
1068 ixgbe_standby_eeprom(hw);
1069 msec_delay(10);
1070 }
1071 /* Done with writing - release the EEPROM */
1072 ixgbe_release_eeprom(hw);
1073 }
1074
1075 return status;
1076 }
1077
1078 /**
1079 * ixgbe_write_eeprom_generic - Writes 16 bit value to EEPROM
1080 * @hw: pointer to hardware structure
1081 * @offset: offset within the EEPROM to be written to
1082 * @data: 16 bit word to be written to the EEPROM
1083 *
1084 * If ixgbe_eeprom_update_checksum is not called after this function, the
1085 * EEPROM will most likely contain an invalid checksum.
1086 **/
1087 s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
1088 {
1089 s32 status;
1090
1091 DEBUGFUNC("ixgbe_write_eeprom_generic");
1092
1093 hw->eeprom.ops.init_params(hw);
1094
1095 if (offset >= hw->eeprom.word_size) {
1096 status = IXGBE_ERR_EEPROM;
1097 goto out;
1098 }
1099
1100 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1, &data);
1101
1102 out:
1103 return status;
1104 }
1105
1106 /**
1107 * ixgbe_read_eeprom_buffer_bit_bang_generic - Read EEPROM using bit-bang
1108 * @hw: pointer to hardware structure
1109 * @offset: offset within the EEPROM to be read
1110 * @data: read 16 bit words(s) from EEPROM
1111 * @words: number of word(s)
1112 *
1113 * Reads 16 bit word(s) from EEPROM through bit-bang method
1114 **/
1115 s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
1116 u16 words, u16 *data)
1117 {
1118 s32 status = IXGBE_SUCCESS;
1119 u16 i, count;
1120
1121 DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang_generic");
1122
1123 hw->eeprom.ops.init_params(hw);
1124
1125 if (words == 0) {
1126 status = IXGBE_ERR_INVALID_ARGUMENT;
1127 goto out;
1128 }
1129
1130 if (offset + words > hw->eeprom.word_size) {
1131 status = IXGBE_ERR_EEPROM;
1132 goto out;
1133 }
1134
1135 /*
1136 * We cannot hold synchronization semaphores for too long
1137 * to avoid other entity starvation. However it is more efficient
1138 * to read in bursts than synchronizing access for each word.
1139 */
1140 for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
1141 count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
1142 IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
1143
1144 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset + i,
1145 count, &data[i]);
1146
1147 if (status != IXGBE_SUCCESS)
1148 break;
1149 }
1150
1151 out:
1152 return status;
1153 }
1154
1155 /**
1156 * ixgbe_read_eeprom_buffer_bit_bang - Read EEPROM using bit-bang
1157 * @hw: pointer to hardware structure
1158 * @offset: offset within the EEPROM to be read
1159 * @words: number of word(s)
1160 * @data: read 16 bit word(s) from EEPROM
1161 *
1162 * Reads 16 bit word(s) from EEPROM through bit-bang method
1163 **/
1164 static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
1165 u16 words, u16 *data)
1166 {
1167 s32 status;
1168 u16 word_in;
1169 u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI;
1170 u16 i;
1171
1172 DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang");
1173
1174 /* Prepare the EEPROM for reading */
1175 status = ixgbe_acquire_eeprom(hw);
1176
1177 if (status == IXGBE_SUCCESS) {
1178 if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) {
1179 ixgbe_release_eeprom(hw);
1180 status = IXGBE_ERR_EEPROM;
1181 }
1182 }
1183
1184 if (status == IXGBE_SUCCESS) {
1185 for (i = 0; i < words; i++) {
1186 ixgbe_standby_eeprom(hw);
1187 /*
1188 * Some SPI eeproms use the 8th address bit embedded
1189 * in the opcode
1190 */
1191 if ((hw->eeprom.address_bits == 8) &&
1192 ((offset + i) >= 128))
1193 read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
1194
1195 /* Send the READ command (opcode + addr) */
1196 ixgbe_shift_out_eeprom_bits(hw, read_opcode,
1197 IXGBE_EEPROM_OPCODE_BITS);
1198 ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
1199 hw->eeprom.address_bits);
1200
1201 /* Read the data. */
1202 word_in = ixgbe_shift_in_eeprom_bits(hw, 16);
1203 data[i] = (word_in >> 8) | (word_in << 8);
1204 }
1205
1206 /* End this read operation */
1207 ixgbe_release_eeprom(hw);
1208 }
1209
1210 return status;
1211 }
1212
1213 /**
1214 * ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang
1215 * @hw: pointer to hardware structure
1216 * @offset: offset within the EEPROM to be read
1217 * @data: read 16 bit value from EEPROM
1218 *
1219 * Reads 16 bit value from EEPROM through bit-bang method
1220 **/
1221 s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
1222 u16 *data)
1223 {
1224 s32 status;
1225
1226 DEBUGFUNC("ixgbe_read_eeprom_bit_bang_generic");
1227
1228 hw->eeprom.ops.init_params(hw);
1229
1230 if (offset >= hw->eeprom.word_size) {
1231 status = IXGBE_ERR_EEPROM;
1232 goto out;
1233 }
1234
1235 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
1236
1237 out:
1238 return status;
1239 }
1240
1241 /**
1242 * ixgbe_read_eerd_buffer_generic - Read EEPROM word(s) using EERD
1243 * @hw: pointer to hardware structure
1244 * @offset: offset of word in the EEPROM to read
1245 * @words: number of word(s)
1246 * @data: 16 bit word(s) from the EEPROM
1247 *
1248 * Reads a 16 bit word(s) from the EEPROM using the EERD register.
1249 **/
1250 s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset,
1251 u16 words, u16 *data)
1252 {
1253 u32 eerd;
1254 s32 status = IXGBE_SUCCESS;
1255 u32 i;
1256
1257 DEBUGFUNC("ixgbe_read_eerd_buffer_generic");
1258
1259 hw->eeprom.ops.init_params(hw);
1260
1261 if (words == 0) {
1262 status = IXGBE_ERR_INVALID_ARGUMENT;
1263 goto out;
1264 }
1265
1266 if (offset >= hw->eeprom.word_size) {
1267 status = IXGBE_ERR_EEPROM;
1268 goto out;
1269 }
1270
1271 for (i = 0; i < words; i++) {
1272 eerd = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) +
1273 IXGBE_EEPROM_RW_REG_START;
1274
1275 IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd);
1276 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_READ);
1277
1278 if (status == IXGBE_SUCCESS) {
1279 data[i] = (IXGBE_READ_REG(hw, IXGBE_EERD) >>
1280 IXGBE_EEPROM_RW_REG_DATA);
1281 } else {
1282 DEBUGOUT("Eeprom read timed out\n");
1283 goto out;
1284 }
1285 }
1286 out:
1287 return status;
1288 }
1289
1290 /**
1291 * ixgbe_detect_eeprom_page_size_generic - Detect EEPROM page size
1292 * @hw: pointer to hardware structure
1293 * @offset: offset within the EEPROM to be used as a scratch pad
1294 *
1295 * Discover EEPROM page size by writing marching data at given offset.
1296 * This function is called only when we are writing a new large buffer
1297 * at given offset so the data would be overwritten anyway.
1298 **/
1299 static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
1300 u16 offset)
1301 {
1302 u16 data[IXGBE_EEPROM_PAGE_SIZE_MAX];
1303 s32 status = IXGBE_SUCCESS;
1304 u16 i;
1305
1306 DEBUGFUNC("ixgbe_detect_eeprom_page_size_generic");
1307
1308 for (i = 0; i < IXGBE_EEPROM_PAGE_SIZE_MAX; i++)
1309 data[i] = i;
1310
1311 hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX;
1312 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset,
1313 IXGBE_EEPROM_PAGE_SIZE_MAX, data);
1314 hw->eeprom.word_page_size = 0;
1315 if (status != IXGBE_SUCCESS)
1316 goto out;
1317
1318 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
1319 if (status != IXGBE_SUCCESS)
1320 goto out;
1321
1322 /*
1323 * When writing in burst more than the actual page size
1324 * EEPROM address wraps around current page.
1325 */
1326 hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX - data[0];
1327
1328 DEBUGOUT1("Detected EEPROM page size = %d words.",
1329 hw->eeprom.word_page_size);
1330 out:
1331 return status;
1332 }
1333
1334 /**
1335 * ixgbe_read_eerd_generic - Read EEPROM word using EERD
1336 * @hw: pointer to hardware structure
1337 * @offset: offset of word in the EEPROM to read
1338 * @data: word read from the EEPROM
1339 *
1340 * Reads a 16 bit word from the EEPROM using the EERD register.
1341 **/
1342 s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data)
1343 {
1344 return ixgbe_read_eerd_buffer_generic(hw, offset, 1, data);
1345 }
1346
1347 /**
1348 * ixgbe_write_eewr_buffer_generic - Write EEPROM word(s) using EEWR
1349 * @hw: pointer to hardware structure
1350 * @offset: offset of word in the EEPROM to write
1351 * @words: number of word(s)
1352 * @data: word(s) write to the EEPROM
1353 *
1354 * Write a 16 bit word(s) to the EEPROM using the EEWR register.
1355 **/
1356 s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
1357 u16 words, u16 *data)
1358 {
1359 u32 eewr;
1360 s32 status = IXGBE_SUCCESS;
1361 u16 i;
1362
1363 DEBUGFUNC("ixgbe_write_eewr_generic");
1364
1365 hw->eeprom.ops.init_params(hw);
1366
1367 if (words == 0) {
1368 status = IXGBE_ERR_INVALID_ARGUMENT;
1369 goto out;
1370 }
1371
1372 if (offset >= hw->eeprom.word_size) {
1373 status = IXGBE_ERR_EEPROM;
1374 goto out;
1375 }
1376
1377 for (i = 0; i < words; i++) {
1378 eewr = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
1379 (data[i] << IXGBE_EEPROM_RW_REG_DATA) |
1380 IXGBE_EEPROM_RW_REG_START;
1381
1382 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
1383 if (status != IXGBE_SUCCESS) {
1384 DEBUGOUT("Eeprom write EEWR timed out\n");
1385 goto out;
1386 }
1387
1388 IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr);
1389
1390 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
1391 if (status != IXGBE_SUCCESS) {
1392 DEBUGOUT("Eeprom write EEWR timed out\n");
1393 goto out;
1394 }
1395 }
1396
1397 out:
1398 return status;
1399 }
1400
1401 /**
1402 * ixgbe_write_eewr_generic - Write EEPROM word using EEWR
1403 * @hw: pointer to hardware structure
1404 * @offset: offset of word in the EEPROM to write
1405 * @data: word write to the EEPROM
1406 *
1407 * Write a 16 bit word to the EEPROM using the EEWR register.
1408 **/
1409 s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
1410 {
1411 return ixgbe_write_eewr_buffer_generic(hw, offset, 1, &data);
1412 }
1413
1414 /**
1415 * ixgbe_poll_eerd_eewr_done - Poll EERD read or EEWR write status
1416 * @hw: pointer to hardware structure
1417 * @ee_reg: EEPROM flag for polling
1418 *
1419 * Polls the status bit (bit 1) of the EERD or EEWR to determine when the
1420 * read or write is done respectively.
1421 **/
1422 s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
1423 {
1424 u32 i;
1425 u32 reg;
1426 s32 status = IXGBE_ERR_EEPROM;
1427
1428 DEBUGFUNC("ixgbe_poll_eerd_eewr_done");
1429
1430 for (i = 0; i < IXGBE_EERD_EEWR_ATTEMPTS; i++) {
1431 if (ee_reg == IXGBE_NVM_POLL_READ)
1432 reg = IXGBE_READ_REG(hw, IXGBE_EERD);
1433 else
1434 reg = IXGBE_READ_REG(hw, IXGBE_EEWR);
1435
1436 if (reg & IXGBE_EEPROM_RW_REG_DONE) {
1437 status = IXGBE_SUCCESS;
1438 break;
1439 }
1440 usec_delay(5);
1441 }
1442 return status;
1443 }
1444
1445 /**
1446 * ixgbe_acquire_eeprom - Acquire EEPROM using bit-bang
1447 * @hw: pointer to hardware structure
1448 *
1449 * Prepares EEPROM for access using bit-bang method. This function should
1450 * be called before issuing a command to the EEPROM.
1451 **/
1452 static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
1453 {
1454 s32 status = IXGBE_SUCCESS;
1455 u32 eec;
1456 u32 i;
1457
1458 DEBUGFUNC("ixgbe_acquire_eeprom");
1459
1460 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)
1461 != IXGBE_SUCCESS)
1462 status = IXGBE_ERR_SWFW_SYNC;
1463
1464 if (status == IXGBE_SUCCESS) {
1465 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1466
1467 /* Request EEPROM Access */
1468 eec |= IXGBE_EEC_REQ;
1469 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1470
1471 for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) {
1472 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1473 if (eec & IXGBE_EEC_GNT)
1474 break;
1475 usec_delay(5);
1476 }
1477
1478 /* Release if grant not acquired */
1479 if (!(eec & IXGBE_EEC_GNT)) {
1480 eec &= ~IXGBE_EEC_REQ;
1481 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1482 DEBUGOUT("Could not acquire EEPROM grant\n");
1483
1484 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
1485 status = IXGBE_ERR_EEPROM;
1486 }
1487
1488 /* Setup EEPROM for Read/Write */
1489 if (status == IXGBE_SUCCESS) {
1490 /* Clear CS and SK */
1491 eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK);
1492 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1493 IXGBE_WRITE_FLUSH(hw);
1494 usec_delay(1);
1495 }
1496 }
1497 return status;
1498 }
1499
1500 /**
1501 * ixgbe_get_eeprom_semaphore - Get hardware semaphore
1502 * @hw: pointer to hardware structure
1503 *
1504 * Sets the hardware semaphores so EEPROM access can occur for bit-bang method
1505 **/
1506 static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
1507 {
1508 s32 status = IXGBE_ERR_EEPROM;
1509 u32 timeout = 2000;
1510 u32 i;
1511 u32 swsm;
1512
1513 DEBUGFUNC("ixgbe_get_eeprom_semaphore");
1514
1515
1516 /* Get SMBI software semaphore between device drivers first */
1517 for (i = 0; i < timeout; i++) {
1518 /*
1519 * If the SMBI bit is 0 when we read it, then the bit will be
1520 * set and we have the semaphore
1521 */
1522 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1523 if (!(swsm & IXGBE_SWSM_SMBI)) {
1524 status = IXGBE_SUCCESS;
1525 break;
1526 }
1527 usec_delay(50);
1528 }
1529
1530 if (i == timeout) {
1531 DEBUGOUT("Driver can't access the Eeprom - SMBI Semaphore "
1532 "not granted.\n");
1533 /*
1534 * this release is particularly important because our attempts
1535 * above to get the semaphore may have succeeded, and if there
1536 * was a timeout, we should unconditionally clear the semaphore
1537 * bits to free the driver to make progress
1538 */
1539 ixgbe_release_eeprom_semaphore(hw);
1540
1541 usec_delay(50);
1542 /*
1543 * one last try
1544 * If the SMBI bit is 0 when we read it, then the bit will be
1545 * set and we have the semaphore
1546 */
1547 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1548 if (!(swsm & IXGBE_SWSM_SMBI))
1549 status = IXGBE_SUCCESS;
1550 }
1551
1552 /* Now get the semaphore between SW/FW through the SWESMBI bit */
1553 if (status == IXGBE_SUCCESS) {
1554 for (i = 0; i < timeout; i++) {
1555 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1556
1557 /* Set the SW EEPROM semaphore bit to request access */
1558 swsm |= IXGBE_SWSM_SWESMBI;
1559 IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
1560
1561 /*
1562 * If we set the bit successfully then we got the
1563 * semaphore.
1564 */
1565 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1566 if (swsm & IXGBE_SWSM_SWESMBI)
1567 break;
1568
1569 usec_delay(50);
1570 }
1571
1572 /*
1573 * Release semaphores and return error if SW EEPROM semaphore
1574 * was not granted because we don't have access to the EEPROM
1575 */
1576 if (i >= timeout) {
1577 DEBUGOUT("SWESMBI Software EEPROM semaphore "
1578 "not granted.\n");
1579 ixgbe_release_eeprom_semaphore(hw);
1580 status = IXGBE_ERR_EEPROM;
1581 }
1582 } else {
1583 DEBUGOUT("Software semaphore SMBI between device drivers "
1584 "not granted.\n");
1585 }
1586
1587 return status;
1588 }
1589
1590 /**
1591 * ixgbe_release_eeprom_semaphore - Release hardware semaphore
1592 * @hw: pointer to hardware structure
1593 *
1594 * This function clears hardware semaphore bits.
1595 **/
1596 static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw)
1597 {
1598 u32 swsm;
1599
1600 DEBUGFUNC("ixgbe_release_eeprom_semaphore");
1601
1602 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1603
1604 /* Release both semaphores by writing 0 to the bits SWESMBI and SMBI */
1605 swsm &= ~(IXGBE_SWSM_SWESMBI | IXGBE_SWSM_SMBI);
1606 IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
1607 IXGBE_WRITE_FLUSH(hw);
1608 }
1609
1610 /**
1611 * ixgbe_ready_eeprom - Polls for EEPROM ready
1612 * @hw: pointer to hardware structure
1613 **/
1614 static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw)
1615 {
1616 s32 status = IXGBE_SUCCESS;
1617 u16 i;
1618 u8 spi_stat_reg;
1619
1620 DEBUGFUNC("ixgbe_ready_eeprom");
1621
1622 /*
1623 * Read "Status Register" repeatedly until the LSB is cleared. The
1624 * EEPROM will signal that the command has been completed by clearing
1625 * bit 0 of the internal status register. If it's not cleared within
1626 * 5 milliseconds, then error out.
1627 */
1628 for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) {
1629 ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI,
1630 IXGBE_EEPROM_OPCODE_BITS);
1631 spi_stat_reg = (u8)ixgbe_shift_in_eeprom_bits(hw, 8);
1632 if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI))
1633 break;
1634
1635 usec_delay(5);
1636 ixgbe_standby_eeprom(hw);
1637 };
1638
1639 /*
1640 * On some parts, SPI write time could vary from 0-20mSec on 3.3V
1641 * devices (and only 0-5mSec on 5V devices)
1642 */
1643 if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) {
1644 DEBUGOUT("SPI EEPROM Status error\n");
1645 status = IXGBE_ERR_EEPROM;
1646 }
1647
1648 return status;
1649 }
1650
1651 /**
1652 * ixgbe_standby_eeprom - Returns EEPROM to a "standby" state
1653 * @hw: pointer to hardware structure
1654 **/
1655 static void ixgbe_standby_eeprom(struct ixgbe_hw *hw)
1656 {
1657 u32 eec;
1658
1659 DEBUGFUNC("ixgbe_standby_eeprom");
1660
1661 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1662
1663 /* Toggle CS to flush commands */
1664 eec |= IXGBE_EEC_CS;
1665 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1666 IXGBE_WRITE_FLUSH(hw);
1667 usec_delay(1);
1668 eec &= ~IXGBE_EEC_CS;
1669 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1670 IXGBE_WRITE_FLUSH(hw);
1671 usec_delay(1);
1672 }
1673
1674 /**
1675 * ixgbe_shift_out_eeprom_bits - Shift data bits out to the EEPROM.
1676 * @hw: pointer to hardware structure
1677 * @data: data to send to the EEPROM
1678 * @count: number of bits to shift out
1679 **/
1680 static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
1681 u16 count)
1682 {
1683 u32 eec;
1684 u32 mask;
1685 u32 i;
1686
1687 DEBUGFUNC("ixgbe_shift_out_eeprom_bits");
1688
1689 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1690
1691 /*
1692 * Mask is used to shift "count" bits of "data" out to the EEPROM
1693 * one bit at a time. Determine the starting bit based on count
1694 */
1695 mask = 0x01 << (count - 1);
1696
1697 for (i = 0; i < count; i++) {
1698 /*
1699 * A "1" is shifted out to the EEPROM by setting bit "DI" to a
1700 * "1", and then raising and then lowering the clock (the SK
1701 * bit controls the clock input to the EEPROM). A "0" is
1702 * shifted out to the EEPROM by setting "DI" to "0" and then
1703 * raising and then lowering the clock.
1704 */
1705 if (data & mask)
1706 eec |= IXGBE_EEC_DI;
1707 else
1708 eec &= ~IXGBE_EEC_DI;
1709
1710 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1711 IXGBE_WRITE_FLUSH(hw);
1712
1713 usec_delay(1);
1714
1715 ixgbe_raise_eeprom_clk(hw, &eec);
1716 ixgbe_lower_eeprom_clk(hw, &eec);
1717
1718 /*
1719 * Shift mask to signify next bit of data to shift in to the
1720 * EEPROM
1721 */
1722 mask = mask >> 1;
1723 };
1724
1725 /* We leave the "DI" bit set to "0" when we leave this routine. */
1726 eec &= ~IXGBE_EEC_DI;
1727 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1728 IXGBE_WRITE_FLUSH(hw);
1729 }
1730
1731 /**
1732 * ixgbe_shift_in_eeprom_bits - Shift data bits in from the EEPROM
1733 * @hw: pointer to hardware structure
1734 **/
1735 static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count)
1736 {
1737 u32 eec;
1738 u32 i;
1739 u16 data = 0;
1740
1741 DEBUGFUNC("ixgbe_shift_in_eeprom_bits");
1742
1743 /*
1744 * In order to read a register from the EEPROM, we need to shift
1745 * 'count' bits in from the EEPROM. Bits are "shifted in" by raising
1746 * the clock input to the EEPROM (setting the SK bit), and then reading
1747 * the value of the "DO" bit. During this "shifting in" process the
1748 * "DI" bit should always be clear.
1749 */
1750 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1751
1752 eec &= ~(IXGBE_EEC_DO | IXGBE_EEC_DI);
1753
1754 for (i = 0; i < count; i++) {
1755 data = data << 1;
1756 ixgbe_raise_eeprom_clk(hw, &eec);
1757
1758 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1759
1760 eec &= ~(IXGBE_EEC_DI);
1761 if (eec & IXGBE_EEC_DO)
1762 data |= 1;
1763
1764 ixgbe_lower_eeprom_clk(hw, &eec);
1765 }
1766
1767 return data;
1768 }
1769
1770 /**
1771 * ixgbe_raise_eeprom_clk - Raises the EEPROM's clock input.
1772 * @hw: pointer to hardware structure
1773 * @eec: EEC register's current value
1774 **/
1775 static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
1776 {
1777 DEBUGFUNC("ixgbe_raise_eeprom_clk");
1778
1779 /*
1780 * Raise the clock input to the EEPROM
1781 * (setting the SK bit), then delay
1782 */
1783 *eec = *eec | IXGBE_EEC_SK;
1784 IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec);
1785 IXGBE_WRITE_FLUSH(hw);
1786 usec_delay(1);
1787 }
1788
1789 /**
1790 * ixgbe_lower_eeprom_clk - Lowers the EEPROM's clock input.
1791 * @hw: pointer to hardware structure
1792 * @eecd: EECD's current value
1793 **/
1794 static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
1795 {
1796 DEBUGFUNC("ixgbe_lower_eeprom_clk");
1797
1798 /*
1799 * Lower the clock input to the EEPROM (clearing the SK bit), then
1800 * delay
1801 */
1802 *eec = *eec & ~IXGBE_EEC_SK;
1803 IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec);
1804 IXGBE_WRITE_FLUSH(hw);
1805 usec_delay(1);
1806 }
1807
1808 /**
1809 * ixgbe_release_eeprom - Release EEPROM, release semaphores
1810 * @hw: pointer to hardware structure
1811 **/
1812 static void ixgbe_release_eeprom(struct ixgbe_hw *hw)
1813 {
1814 u32 eec;
1815
1816 DEBUGFUNC("ixgbe_release_eeprom");
1817
1818 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1819
1820 eec |= IXGBE_EEC_CS; /* Pull CS high */
1821 eec &= ~IXGBE_EEC_SK; /* Lower SCK */
1822
1823 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1824 IXGBE_WRITE_FLUSH(hw);
1825
1826 usec_delay(1);
1827
1828 /* Stop requesting EEPROM access */
1829 eec &= ~IXGBE_EEC_REQ;
1830 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1831
1832 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
1833
1834 /* Delay before attempt to obtain semaphore again to allow FW access */
1835 msec_delay(hw->eeprom.semaphore_delay);
1836 }
1837
1838 /**
1839 * ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum
1840 * @hw: pointer to hardware structure
1841 **/
1842 u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
1843 {
1844 u16 i;
1845 u16 j;
1846 u16 checksum = 0;
1847 u16 length = 0;
1848 u16 pointer = 0;
1849 u16 word = 0;
1850
1851 DEBUGFUNC("ixgbe_calc_eeprom_checksum_generic");
1852
1853 /* Include 0x0-0x3F in the checksum */
1854 for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
1855 if (hw->eeprom.ops.read(hw, i, &word) != IXGBE_SUCCESS) {
1856 DEBUGOUT("EEPROM read failed\n");
1857 break;
1858 }
1859 checksum += word;
1860 }
1861
1862 /* Include all data from pointers except for the fw pointer */
1863 for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) {
1864 hw->eeprom.ops.read(hw, i, &pointer);
1865
1866 /* Make sure the pointer seems valid */
1867 if (pointer != 0xFFFF && pointer != 0) {
1868 hw->eeprom.ops.read(hw, pointer, &length);
1869
1870 if (length != 0xFFFF && length != 0) {
1871 for (j = pointer+1; j <= pointer+length; j++) {
1872 hw->eeprom.ops.read(hw, j, &word);
1873 checksum += word;
1874 }
1875 }
1876 }
1877 }
1878
1879 checksum = (u16)IXGBE_EEPROM_SUM - checksum;
1880
1881 return checksum;
1882 }
1883
1884 /**
1885 * ixgbe_validate_eeprom_checksum_generic - Validate EEPROM checksum
1886 * @hw: pointer to hardware structure
1887 * @checksum_val: calculated checksum
1888 *
1889 * Performs checksum calculation and validates the EEPROM checksum. If the
1890 * caller does not need checksum_val, the value can be NULL.
1891 **/
1892 s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
1893 u16 *checksum_val)
1894 {
1895 s32 status;
1896 u16 checksum;
1897 u16 read_checksum = 0;
1898
1899 DEBUGFUNC("ixgbe_validate_eeprom_checksum_generic");
1900
1901 /*
1902 * Read the first word from the EEPROM. If this times out or fails, do
1903 * not continue or we could be in for a very long wait while every
1904 * EEPROM read fails
1905 */
1906 status = hw->eeprom.ops.read(hw, 0, &checksum);
1907
1908 if (status == IXGBE_SUCCESS) {
1909 checksum = hw->eeprom.ops.calc_checksum(hw);
1910
1911 hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum);
1912
1913 /*
1914 * Verify read checksum from EEPROM is the same as
1915 * calculated checksum
1916 */
1917 if (read_checksum != checksum)
1918 status = IXGBE_ERR_EEPROM_CHECKSUM;
1919
1920 /* If the user cares, return the calculated checksum */
1921 if (checksum_val)
1922 *checksum_val = checksum;
1923 } else {
1924 DEBUGOUT("EEPROM read failed\n");
1925 }
1926
1927 return status;
1928 }
1929
1930 /**
1931 * ixgbe_update_eeprom_checksum_generic - Updates the EEPROM checksum
1932 * @hw: pointer to hardware structure
1933 **/
1934 s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
1935 {
1936 s32 status;
1937 u16 checksum;
1938
1939 DEBUGFUNC("ixgbe_update_eeprom_checksum_generic");
1940
1941 /*
1942 * Read the first word from the EEPROM. If this times out or fails, do
1943 * not continue or we could be in for a very long wait while every
1944 * EEPROM read fails
1945 */
1946 status = hw->eeprom.ops.read(hw, 0, &checksum);
1947
1948 if (status == IXGBE_SUCCESS) {
1949 checksum = hw->eeprom.ops.calc_checksum(hw);
1950 status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM,
1951 checksum);
1952 } else {
1953 DEBUGOUT("EEPROM read failed\n");
1954 }
1955
1956 return status;
1957 }
1958
1959 /**
1960 * ixgbe_validate_mac_addr - Validate MAC address
1961 * @mac_addr: pointer to MAC address.
1962 *
1963 * Tests a MAC address to ensure it is a valid Individual Address
1964 **/
1965 s32 ixgbe_validate_mac_addr(u8 *mac_addr)
1966 {
1967 s32 status = IXGBE_SUCCESS;
1968
1969 DEBUGFUNC("ixgbe_validate_mac_addr");
1970
1971 /* Make sure it is not a multicast address */
1972 if (IXGBE_IS_MULTICAST(mac_addr)) {
1973 DEBUGOUT("MAC address is multicast\n");
1974 status = IXGBE_ERR_INVALID_MAC_ADDR;
1975 /* Not a broadcast address */
1976 } else if (IXGBE_IS_BROADCAST(mac_addr)) {
1977 DEBUGOUT("MAC address is broadcast\n");
1978 status = IXGBE_ERR_INVALID_MAC_ADDR;
1979 /* Reject the zero address */
1980 } else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
1981 mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) {
1982 DEBUGOUT("MAC address is all zeros\n");
1983 status = IXGBE_ERR_INVALID_MAC_ADDR;
1984 }
1985 return status;
1986 }
1987
1988 /**
1989 * ixgbe_set_rar_generic - Set Rx address register
1990 * @hw: pointer to hardware structure
1991 * @index: Receive address register to write
1992 * @addr: Address to put into receive address register
1993 * @vmdq: VMDq "set" or "pool" index
1994 * @enable_addr: set flag that address is active
1995 *
1996 * Puts an ethernet address into a receive address register.
1997 **/
1998 s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
1999 u32 enable_addr)
2000 {
2001 u32 rar_low, rar_high;
2002 u32 rar_entries = hw->mac.num_rar_entries;
2003
2004 DEBUGFUNC("ixgbe_set_rar_generic");
2005
2006 /* Make sure we are using a valid rar index range */
2007 if (index >= rar_entries) {
2008 DEBUGOUT1("RAR index %d is out of range.\n", index);
2009 return IXGBE_ERR_INVALID_ARGUMENT;
2010 }
2011
2012 /* setup VMDq pool selection before this RAR gets enabled */
2013 hw->mac.ops.set_vmdq(hw, index, vmdq);
2014
2015 /*
2016 * HW expects these in little endian so we reverse the byte
2017 * order from network order (big endian) to little endian
2018 */
2019 rar_low = ((u32)addr[0] |
2020 ((u32)addr[1] << 8) |
2021 ((u32)addr[2] << 16) |
2022 ((u32)addr[3] << 24));
2023 /*
2024 * Some parts put the VMDq setting in the extra RAH bits,
2025 * so save everything except the lower 16 bits that hold part
2026 * of the address and the address valid bit.
2027 */
2028 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
2029 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
2030 rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8));
2031
2032 if (enable_addr != 0)
2033 rar_high |= IXGBE_RAH_AV;
2034
2035 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low);
2036 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
2037
2038 return IXGBE_SUCCESS;
2039 }
2040
2041 /**
2042 * ixgbe_clear_rar_generic - Remove Rx address register
2043 * @hw: pointer to hardware structure
2044 * @index: Receive address register to write
2045 *
2046 * Clears an ethernet address from a receive address register.
2047 **/
2048 s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index)
2049 {
2050 u32 rar_high;
2051 u32 rar_entries = hw->mac.num_rar_entries;
2052
2053 DEBUGFUNC("ixgbe_clear_rar_generic");
2054
2055 /* Make sure we are using a valid rar index range */
2056 if (index >= rar_entries) {
2057 DEBUGOUT1("RAR index %d is out of range.\n", index);
2058 return IXGBE_ERR_INVALID_ARGUMENT;
2059 }
2060
2061 /*
2062 * Some parts put the VMDq setting in the extra RAH bits,
2063 * so save everything except the lower 16 bits that hold part
2064 * of the address and the address valid bit.
2065 */
2066 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
2067 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
2068
2069 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
2070 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
2071
2072 /* clear VMDq pool/queue selection for this RAR */
2073 hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL);
2074
2075 return IXGBE_SUCCESS;
2076 }
2077
2078 /**
2079 * ixgbe_init_rx_addrs_generic - Initializes receive address filters.
2080 * @hw: pointer to hardware structure
2081 *
2082 * Places the MAC address in receive address register 0 and clears the rest
2083 * of the receive address registers. Clears the multicast table. Assumes
2084 * the receiver is in reset when the routine is called.
2085 **/
2086 s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
2087 {
2088 u32 i;
2089 u32 rar_entries = hw->mac.num_rar_entries;
2090
2091 DEBUGFUNC("ixgbe_init_rx_addrs_generic");
2092
2093 /*
2094 * If the current mac address is valid, assume it is a software override
2095 * to the permanent address.
2096 * Otherwise, use the permanent address from the eeprom.
2097 */
2098 if (ixgbe_validate_mac_addr(hw->mac.addr) ==
2099 IXGBE_ERR_INVALID_MAC_ADDR) {
2100 /* Get the MAC address from the RAR0 for later reference */
2101 hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
2102
2103 DEBUGOUT3(" Keeping Current RAR0 Addr =%.2X %.2X %.2X ",
2104 hw->mac.addr[0], hw->mac.addr[1],
2105 hw->mac.addr[2]);
2106 DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3],
2107 hw->mac.addr[4], hw->mac.addr[5]);
2108 } else {
2109 /* Setup the receive address. */
2110 DEBUGOUT("Overriding MAC Address in RAR[0]\n");
2111 DEBUGOUT3(" New MAC Addr =%.2X %.2X %.2X ",
2112 hw->mac.addr[0], hw->mac.addr[1],
2113 hw->mac.addr[2]);
2114 DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3],
2115 hw->mac.addr[4], hw->mac.addr[5]);
2116
2117 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
2118
2119 /* clear VMDq pool/queue selection for RAR 0 */
2120 hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL);
2121 }
2122 hw->addr_ctrl.overflow_promisc = 0;
2123
2124 hw->addr_ctrl.rar_used_count = 1;
2125
2126 /* Zero out the other receive addresses. */
2127 DEBUGOUT1("Clearing RAR[1-%d]\n", rar_entries - 1);
2128 for (i = 1; i < rar_entries; i++) {
2129 IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
2130 IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
2131 }
2132
2133 /* Clear the MTA */
2134 hw->addr_ctrl.mta_in_use = 0;
2135 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
2136
2137 DEBUGOUT(" Clearing MTA\n");
2138 for (i = 0; i < hw->mac.mcft_size; i++)
2139 IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0);
2140
2141 ixgbe_init_uta_tables(hw);
2142
2143 return IXGBE_SUCCESS;
2144 }
2145
2146 /**
2147 * ixgbe_add_uc_addr - Adds a secondary unicast address.
2148 * @hw: pointer to hardware structure
2149 * @addr: new address
2150 *
2151 * Adds it to unused receive address register or goes into promiscuous mode.
2152 **/
2153 void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
2154 {
2155 u32 rar_entries = hw->mac.num_rar_entries;
2156 u32 rar;
2157
2158 DEBUGFUNC("ixgbe_add_uc_addr");
2159
2160 DEBUGOUT6(" UC Addr = %.2X %.2X %.2X %.2X %.2X %.2X\n",
2161 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
2162
2163 /*
2164 * Place this address in the RAR if there is room,
2165 * else put the controller into promiscuous mode
2166 */
2167 if (hw->addr_ctrl.rar_used_count < rar_entries) {
2168 rar = hw->addr_ctrl.rar_used_count;
2169 hw->mac.ops.set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
2170 DEBUGOUT1("Added a secondary address to RAR[%d]\n", rar);
2171 hw->addr_ctrl.rar_used_count++;
2172 } else {
2173 hw->addr_ctrl.overflow_promisc++;
2174 }
2175
2176 DEBUGOUT("ixgbe_add_uc_addr Complete\n");
2177 }
2178
2179 /**
2180 * ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses
2181 * @hw: pointer to hardware structure
2182 * @addr_list: the list of new addresses
2183 * @addr_count: number of addresses
2184 * @next: iterator function to walk the address list
2185 *
2186 * The given list replaces any existing list. Clears the secondary addrs from
2187 * receive address registers. Uses unused receive address registers for the
2188 * first secondary addresses, and falls back to promiscuous mode as needed.
2189 *
2190 * Drivers using secondary unicast addresses must set user_set_promisc when
2191 * manually putting the device into promiscuous mode.
2192 **/
2193 s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list,
2194 u32 addr_count, ixgbe_mc_addr_itr next)
2195 {
2196 u8 *addr;
2197 u32 i;
2198 u32 old_promisc_setting = hw->addr_ctrl.overflow_promisc;
2199 u32 uc_addr_in_use;
2200 u32 fctrl;
2201 u32 vmdq;
2202
2203 DEBUGFUNC("ixgbe_update_uc_addr_list_generic");
2204
2205 /*
2206 * Clear accounting of old secondary address list,
2207 * don't count RAR[0]
2208 */
2209 uc_addr_in_use = hw->addr_ctrl.rar_used_count - 1;
2210 hw->addr_ctrl.rar_used_count -= uc_addr_in_use;
2211 hw->addr_ctrl.overflow_promisc = 0;
2212
2213 /* Zero out the other receive addresses */
2214 DEBUGOUT1("Clearing RAR[1-%d]\n", uc_addr_in_use+1);
2215 for (i = 0; i < uc_addr_in_use; i++) {
2216 IXGBE_WRITE_REG(hw, IXGBE_RAL(1+i), 0);
2217 IXGBE_WRITE_REG(hw, IXGBE_RAH(1+i), 0);
2218 }
2219
2220 /* Add the new addresses */
2221 for (i = 0; i < addr_count; i++) {
2222 DEBUGOUT(" Adding the secondary addresses:\n");
2223 addr = next(hw, &addr_list, &vmdq);
2224 ixgbe_add_uc_addr(hw, addr, vmdq);
2225 }
2226
2227 if (hw->addr_ctrl.overflow_promisc) {
2228 /* enable promisc if not already in overflow or set by user */
2229 if (!old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
2230 DEBUGOUT(" Entering address overflow promisc mode\n");
2231 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2232 fctrl |= IXGBE_FCTRL_UPE;
2233 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2234 }
2235 } else {
2236 /* only disable if set by overflow, not by user */
2237 if (old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
2238 DEBUGOUT(" Leaving address overflow promisc mode\n");
2239 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2240 fctrl &= ~IXGBE_FCTRL_UPE;
2241 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2242 }
2243 }
2244
2245 DEBUGOUT("ixgbe_update_uc_addr_list_generic Complete\n");
2246 return IXGBE_SUCCESS;
2247 }
2248
2249 /**
2250 * ixgbe_mta_vector - Determines bit-vector in multicast table to set
2251 * @hw: pointer to hardware structure
2252 * @mc_addr: the multicast address
2253 *
2254 * Extracts the 12 bits, from a multicast address, to determine which
2255 * bit-vector to set in the multicast table. The hardware uses 12 bits, from
2256 * incoming rx multicast addresses, to determine the bit-vector to check in
2257 * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
2258 * by the MO field of the MCSTCTRL. The MO field is set during initialization
2259 * to mc_filter_type.
2260 **/
2261 static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
2262 {
2263 u32 vector = 0;
2264
2265 DEBUGFUNC("ixgbe_mta_vector");
2266
2267 switch (hw->mac.mc_filter_type) {
2268 case 0: /* use bits [47:36] of the address */
2269 vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
2270 break;
2271 case 1: /* use bits [46:35] of the address */
2272 vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
2273 break;
2274 case 2: /* use bits [45:34] of the address */
2275 vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
2276 break;
2277 case 3: /* use bits [43:32] of the address */
2278 vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
2279 break;
2280 default: /* Invalid mc_filter_type */
2281 DEBUGOUT("MC filter type param set incorrectly\n");
2282 ASSERT(0);
2283 break;
2284 }
2285
2286 /* vector can only be 12-bits or boundary will be exceeded */
2287 vector &= 0xFFF;
2288 return vector;
2289 }
2290
2291 /**
2292 * ixgbe_set_mta - Set bit-vector in multicast table
2293 * @hw: pointer to hardware structure
2294 * @hash_value: Multicast address hash value
2295 *
2296 * Sets the bit-vector in the multicast table.
2297 **/
2298 void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr)
2299 {
2300 u32 vector;
2301 u32 vector_bit;
2302 u32 vector_reg;
2303
2304 DEBUGFUNC("ixgbe_set_mta");
2305
2306 hw->addr_ctrl.mta_in_use++;
2307
2308 vector = ixgbe_mta_vector(hw, mc_addr);
2309 DEBUGOUT1(" bit-vector = 0x%03X\n", vector);
2310
2311 /*
2312 * The MTA is a register array of 128 32-bit registers. It is treated
2313 * like an array of 4096 bits. We want to set bit
2314 * BitArray[vector_value]. So we figure out what register the bit is
2315 * in, read it, OR in the new bit, then write back the new value. The
2316 * register is determined by the upper 7 bits of the vector value and
2317 * the bit within that register are determined by the lower 5 bits of
2318 * the value.
2319 */
2320 vector_reg = (vector >> 5) & 0x7F;
2321 vector_bit = vector & 0x1F;
2322 hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit);
2323 }
2324
2325 /**
2326 * ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses
2327 * @hw: pointer to hardware structure
2328 * @mc_addr_list: the list of new multicast addresses
2329 * @mc_addr_count: number of addresses
2330 * @next: iterator function to walk the multicast address list
2331 * @clear: flag, when set clears the table beforehand
2332 *
2333 * When the clear flag is set, the given list replaces any existing list.
2334 * Hashes the given addresses into the multicast table.
2335 **/
2336 s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
2337 u32 mc_addr_count, ixgbe_mc_addr_itr next,
2338 bool clear)
2339 {
2340 u32 i;
2341 u32 vmdq;
2342
2343 DEBUGFUNC("ixgbe_update_mc_addr_list_generic");
2344
2345 /*
2346 * Set the new number of MC addresses that we are being requested to
2347 * use.
2348 */
2349 hw->addr_ctrl.num_mc_addrs = mc_addr_count;
2350 hw->addr_ctrl.mta_in_use = 0;
2351
2352 /* Clear mta_shadow */
2353 if (clear) {
2354 DEBUGOUT(" Clearing MTA\n");
2355 memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
2356 }
2357
2358 /* Update mta_shadow */
2359 for (i = 0; i < mc_addr_count; i++) {
2360 DEBUGOUT(" Adding the multicast addresses:\n");
2361 ixgbe_set_mta(hw, next(hw, &mc_addr_list, &vmdq));
2362 }
2363
2364 /* Enable mta */
2365 for (i = 0; i < hw->mac.mcft_size; i++)
2366 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_MTA(0), i,
2367 hw->mac.mta_shadow[i]);
2368
2369 if (hw->addr_ctrl.mta_in_use > 0)
2370 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
2371 IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
2372
2373 DEBUGOUT("ixgbe_update_mc_addr_list_generic Complete\n");
2374 return IXGBE_SUCCESS;
2375 }
2376
2377 /**
2378 * ixgbe_enable_mc_generic - Enable multicast address in RAR
2379 * @hw: pointer to hardware structure
2380 *
2381 * Enables multicast address in RAR and the use of the multicast hash table.
2382 **/
2383 s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
2384 {
2385 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
2386
2387 DEBUGFUNC("ixgbe_enable_mc_generic");
2388
2389 if (a->mta_in_use > 0)
2390 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE |
2391 hw->mac.mc_filter_type);
2392
2393 return IXGBE_SUCCESS;
2394 }
2395
2396 /**
2397 * ixgbe_disable_mc_generic - Disable multicast address in RAR
2398 * @hw: pointer to hardware structure
2399 *
2400 * Disables multicast address in RAR and the use of the multicast hash table.
2401 **/
2402 s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw)
2403 {
2404 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
2405
2406 DEBUGFUNC("ixgbe_disable_mc_generic");
2407
2408 if (a->mta_in_use > 0)
2409 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
2410
2411 return IXGBE_SUCCESS;
2412 }
2413
2414 /**
2415 * ixgbe_fc_enable_generic - Enable flow control
2416 * @hw: pointer to hardware structure
2417 *
2418 * Enable flow control according to the current settings.
2419 **/
2420 s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
2421 {
2422 s32 ret_val = IXGBE_SUCCESS;
2423 u32 mflcn_reg, fccfg_reg;
2424 u32 reg;
2425 u32 fcrtl, fcrth;
2426 int i;
2427
2428 DEBUGFUNC("ixgbe_fc_enable_generic");
2429
2430 /* Validate the water mark configuration */
2431 if (!hw->fc.pause_time) {
2432 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2433 goto out;
2434 }
2435
2436 /* Low water mark of zero causes XOFF floods */
2437 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
2438 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
2439 hw->fc.high_water[i]) {
2440 if (!hw->fc.low_water[i] ||
2441 hw->fc.low_water[i] >= hw->fc.high_water[i]) {
2442 DEBUGOUT("Invalid water mark configuration\n");
2443 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2444 goto out;
2445 }
2446 }
2447 }
2448
2449 /* Negotiate the fc mode to use */
2450 ixgbe_fc_autoneg(hw);
2451
2452 /* Disable any previous flow control settings */
2453 mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
2454 mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE);
2455
2456 fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
2457 fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY);
2458
2459 /*
2460 * The possible values of fc.current_mode are:
2461 * 0: Flow control is completely disabled
2462 * 1: Rx flow control is enabled (we can receive pause frames,
2463 * but not send pause frames).
2464 * 2: Tx flow control is enabled (we can send pause frames but
2465 * we do not support receiving pause frames).
2466 * 3: Both Rx and Tx flow control (symmetric) are enabled.
2467 * other: Invalid.
2468 */
2469 switch (hw->fc.current_mode) {
2470 case ixgbe_fc_none:
2471 /*
2472 * Flow control is disabled by software override or autoneg.
2473 * The code below will actually disable it in the HW.
2474 */
2475 break;
2476 case ixgbe_fc_rx_pause:
2477 /*
2478 * Rx Flow control is enabled and Tx Flow control is
2479 * disabled by software override. Since there really
2480 * isn't a way to advertise that we are capable of RX
2481 * Pause ONLY, we will advertise that we support both
2482 * symmetric and asymmetric Rx PAUSE. Later, we will
2483 * disable the adapter's ability to send PAUSE frames.
2484 */
2485 mflcn_reg |= IXGBE_MFLCN_RFCE;
2486 break;
2487 case ixgbe_fc_tx_pause:
2488 /*
2489 * Tx Flow control is enabled, and Rx Flow control is
2490 * disabled by software override.
2491 */
2492 fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
2493 break;
2494 case ixgbe_fc_full:
2495 /* Flow control (both Rx and Tx) is enabled by SW override. */
2496 mflcn_reg |= IXGBE_MFLCN_RFCE;
2497 fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
2498 break;
2499 default:
2500 DEBUGOUT("Flow control param set incorrectly\n");
2501 ret_val = IXGBE_ERR_CONFIG;
2502 goto out;
2503 break;
2504 }
2505
2506 /* Set 802.3x based flow control settings. */
2507 mflcn_reg |= IXGBE_MFLCN_DPF;
2508 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
2509 IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
2510
2511
2512 /* Set up and enable Rx high/low water mark thresholds, enable XON. */
2513 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
2514 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
2515 hw->fc.high_water[i]) {
2516 fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
2517 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl);
2518 fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
2519 } else {
2520 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
2521 /*
2522 * In order to prevent Tx hangs when the internal Tx
2523 * switch is enabled we must set the high water mark
2524 * to the maximum FCRTH value. This allows the Tx
2525 * switch to function even under heavy Rx workloads.
2526 */
2527 fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 32;
2528 }
2529
2530 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), fcrth);
2531 }
2532
2533 /* Configure pause time (2 TCs per register) */
2534 reg = hw->fc.pause_time * 0x00010001;
2535 for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
2536 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
2537
2538 /* Configure flow control refresh threshold value */
2539 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
2540
2541 out:
2542 return ret_val;
2543 }
2544
2545 /**
2546 * ixgbe_negotiate_fc - Negotiate flow control
2547 * @hw: pointer to hardware structure
2548 * @adv_reg: flow control advertised settings
2549 * @lp_reg: link partner's flow control settings
2550 * @adv_sym: symmetric pause bit in advertisement
2551 * @adv_asm: asymmetric pause bit in advertisement
2552 * @lp_sym: symmetric pause bit in link partner advertisement
2553 * @lp_asm: asymmetric pause bit in link partner advertisement
2554 *
2555 * Find the intersection between advertised settings and link partner's
2556 * advertised settings
2557 **/
2558 static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
2559 u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm)
2560 {
2561 if ((!(adv_reg)) || (!(lp_reg)))
2562 return IXGBE_ERR_FC_NOT_NEGOTIATED;
2563
2564 if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) {
2565 /*
2566 * Now we need to check if the user selected Rx ONLY
2567 * of pause frames. In this case, we had to advertise
2568 * FULL flow control because we could not advertise RX
2569 * ONLY. Hence, we must now check to see if we need to
2570 * turn OFF the TRANSMISSION of PAUSE frames.
2571 */
2572 if (hw->fc.requested_mode == ixgbe_fc_full) {
2573 hw->fc.current_mode = ixgbe_fc_full;
2574 DEBUGOUT("Flow Control = FULL.\n");
2575 } else {
2576 hw->fc.current_mode = ixgbe_fc_rx_pause;
2577 DEBUGOUT("Flow Control=RX PAUSE frames only\n");
2578 }
2579 } else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) &&
2580 (lp_reg & lp_sym) && (lp_reg & lp_asm)) {
2581 hw->fc.current_mode = ixgbe_fc_tx_pause;
2582 DEBUGOUT("Flow Control = TX PAUSE frames only.\n");
2583 } else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) &&
2584 !(lp_reg & lp_sym) && (lp_reg & lp_asm)) {
2585 hw->fc.current_mode = ixgbe_fc_rx_pause;
2586 DEBUGOUT("Flow Control = RX PAUSE frames only.\n");
2587 } else {
2588 hw->fc.current_mode = ixgbe_fc_none;
2589 DEBUGOUT("Flow Control = NONE.\n");
2590 }
2591 return IXGBE_SUCCESS;
2592 }
2593
2594 /**
2595 * ixgbe_fc_autoneg_fiber - Enable flow control on 1 gig fiber
2596 * @hw: pointer to hardware structure
2597 *
2598 * Enable flow control according on 1 gig fiber.
2599 **/
2600 static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
2601 {
2602 u32 pcs_anadv_reg, pcs_lpab_reg, linkstat;
2603 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2604
2605 /*
2606 * On multispeed fiber at 1g, bail out if
2607 * - link is up but AN did not complete, or if
2608 * - link is up and AN completed but timed out
2609 */
2610
2611 linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
2612 if ((!!(linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
2613 (!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1))
2614 goto out;
2615
2616 pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
2617 pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
2618
2619 ret_val = ixgbe_negotiate_fc(hw, pcs_anadv_reg,
2620 pcs_lpab_reg, IXGBE_PCS1GANA_SYM_PAUSE,
2621 IXGBE_PCS1GANA_ASM_PAUSE,
2622 IXGBE_PCS1GANA_SYM_PAUSE,
2623 IXGBE_PCS1GANA_ASM_PAUSE);
2624
2625 out:
2626 return ret_val;
2627 }
2628
2629 /**
2630 * ixgbe_fc_autoneg_backplane - Enable flow control IEEE clause 37
2631 * @hw: pointer to hardware structure
2632 *
2633 * Enable flow control according to IEEE clause 37.
2634 **/
2635 static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
2636 {
2637 u32 links2, anlp1_reg, autoc_reg, links;
2638 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2639
2640 /*
2641 * On backplane, bail out if
2642 * - backplane autoneg was not completed, or if
2643 * - we are 82599 and link partner is not AN enabled
2644 */
2645 links = IXGBE_READ_REG(hw, IXGBE_LINKS);
2646 if ((links & IXGBE_LINKS_KX_AN_COMP) == 0)
2647 goto out;
2648
2649 if (hw->mac.type == ixgbe_mac_82599EB) {
2650 links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
2651 if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0)
2652 goto out;
2653 }
2654 /*
2655 * Read the 10g AN autoc and LP ability registers and resolve
2656 * local flow control settings accordingly
2657 */
2658 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2659 anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
2660
2661 ret_val = ixgbe_negotiate_fc(hw, autoc_reg,
2662 anlp1_reg, IXGBE_AUTOC_SYM_PAUSE, IXGBE_AUTOC_ASM_PAUSE,
2663 IXGBE_ANLP1_SYM_PAUSE, IXGBE_ANLP1_ASM_PAUSE);
2664
2665 out:
2666 return ret_val;
2667 }
2668
2669 /**
2670 * ixgbe_fc_autoneg_copper - Enable flow control IEEE clause 37
2671 * @hw: pointer to hardware structure
2672 *
2673 * Enable flow control according to IEEE clause 37.
2674 **/
2675 static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw)
2676 {
2677 u16 technology_ability_reg = 0;
2678 u16 lp_technology_ability_reg = 0;
2679
2680 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
2681 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
2682 &technology_ability_reg);
2683 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_LP,
2684 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
2685 &lp_technology_ability_reg);
2686
2687 return ixgbe_negotiate_fc(hw, (u32)technology_ability_reg,
2688 (u32)lp_technology_ability_reg,
2689 IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE,
2690 IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE);
2691 }
2692
2693 /**
2694 * ixgbe_fc_autoneg - Configure flow control
2695 * @hw: pointer to hardware structure
2696 *
2697 * Compares our advertised flow control capabilities to those advertised by
2698 * our link partner, and determines the proper flow control mode to use.
2699 **/
2700 void ixgbe_fc_autoneg(struct ixgbe_hw *hw)
2701 {
2702 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2703 ixgbe_link_speed speed;
2704 bool link_up;
2705
2706 DEBUGFUNC("ixgbe_fc_autoneg");
2707
2708 /*
2709 * AN should have completed when the cable was plugged in.
2710 * Look for reasons to bail out. Bail out if:
2711 * - FC autoneg is disabled, or if
2712 * - link is not up.
2713 */
2714 if (hw->fc.disable_fc_autoneg)
2715 goto out;
2716
2717 hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
2718 if (!link_up)
2719 goto out;
2720
2721 switch (hw->phy.media_type) {
2722 /* Autoneg flow control on fiber adapters */
2723 case ixgbe_media_type_fiber:
2724 if (speed == IXGBE_LINK_SPEED_1GB_FULL)
2725 ret_val = ixgbe_fc_autoneg_fiber(hw);
2726 break;
2727
2728 /* Autoneg flow control on backplane adapters */
2729 case ixgbe_media_type_backplane:
2730 ret_val = ixgbe_fc_autoneg_backplane(hw);
2731 break;
2732
2733 /* Autoneg flow control on copper adapters */
2734 case ixgbe_media_type_copper:
2735 if (ixgbe_device_supports_autoneg_fc(hw) == IXGBE_SUCCESS)
2736 ret_val = ixgbe_fc_autoneg_copper(hw);
2737 break;
2738
2739 default:
2740 break;
2741 }
2742
2743 out:
2744 if (ret_val == IXGBE_SUCCESS) {
2745 hw->fc.fc_was_autonegged = TRUE;
2746 } else {
2747 hw->fc.fc_was_autonegged = FALSE;
2748 hw->fc.current_mode = hw->fc.requested_mode;
2749 }
2750 }
2751
2752 /**
2753 * ixgbe_disable_pcie_master - Disable PCI-express master access
2754 * @hw: pointer to hardware structure
2755 *
2756 * Disables PCI-Express master access and verifies there are no pending
2757 * requests. IXGBE_ERR_MASTER_REQUESTS_PENDING is returned if master disable
2758 * bit hasn't caused the master requests to be disabled, else IXGBE_SUCCESS
2759 * is returned signifying master requests disabled.
2760 **/
2761 s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
2762 {
2763 s32 status = IXGBE_SUCCESS;
2764 u32 i;
2765
2766 DEBUGFUNC("ixgbe_disable_pcie_master");
2767
2768 /* Always set this bit to ensure any future transactions are blocked */
2769 IXGBE_WRITE_REG(hw, IXGBE_CTRL, IXGBE_CTRL_GIO_DIS);
2770
2771 /* Exit if master requets are blocked */
2772 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
2773 goto out;
2774
2775 /* Poll for master request bit to clear */
2776 for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
2777 usec_delay(100);
2778 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
2779 goto out;
2780 }
2781
2782 /*
2783 * Two consecutive resets are required via CTRL.RST per datasheet
2784 * 5.2.5.3.2 Master Disable. We set a flag to inform the reset routine
2785 * of this need. The first reset prevents new master requests from
2786 * being issued by our device. We then must wait 1usec or more for any
2787 * remaining completions from the PCIe bus to trickle in, and then reset
2788 * again to clear out any effects they may have had on our device.
2789 */
2790 DEBUGOUT("GIO Master Disable bit didn't clear - requesting resets\n");
2791 hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
2792
2793 /*
2794 * Before proceeding, make sure that the PCIe block does not have
2795 * transactions pending.
2796 */
2797 for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
2798 usec_delay(100);
2799 if (!(IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS) &
2800 IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
2801 goto out;
2802 }
2803
2804 DEBUGOUT("PCIe transaction pending bit also did not clear.\n");
2805 status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
2806
2807 out:
2808 return status;
2809 }
2810
2811 /**
2812 * ixgbe_acquire_swfw_sync - Acquire SWFW semaphore
2813 * @hw: pointer to hardware structure
2814 * @mask: Mask to specify which semaphore to acquire
2815 *
2816 * Acquires the SWFW semaphore through the GSSR register for the specified
2817 * function (CSR, PHY0, PHY1, EEPROM, Flash)
2818 **/
2819 s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
2820 {
2821 u32 gssr;
2822 u32 swmask = mask;
2823 u32 fwmask = mask << 5;
2824 s32 timeout = 200;
2825
2826 DEBUGFUNC("ixgbe_acquire_swfw_sync");
2827
2828 while (timeout) {
2829 /*
2830 * SW EEPROM semaphore bit is used for access to all
2831 * SW_FW_SYNC/GSSR bits (not just EEPROM)
2832 */
2833 if (ixgbe_get_eeprom_semaphore(hw))
2834 return IXGBE_ERR_SWFW_SYNC;
2835
2836 gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
2837 if (!(gssr & (fwmask | swmask)))
2838 break;
2839
2840 /*
2841 * Firmware currently using resource (fwmask) or other software
2842 * thread currently using resource (swmask)
2843 */
2844 ixgbe_release_eeprom_semaphore(hw);
2845 msec_delay(5);
2846 timeout--;
2847 }
2848
2849 if (!timeout) {
2850 DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n");
2851 return IXGBE_ERR_SWFW_SYNC;
2852 }
2853
2854 gssr |= swmask;
2855 IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
2856
2857 ixgbe_release_eeprom_semaphore(hw);
2858 return IXGBE_SUCCESS;
2859 }
2860
2861 /**
2862 * ixgbe_release_swfw_sync - Release SWFW semaphore
2863 * @hw: pointer to hardware structure
2864 * @mask: Mask to specify which semaphore to release
2865 *
2866 * Releases the SWFW semaphore through the GSSR register for the specified
2867 * function (CSR, PHY0, PHY1, EEPROM, Flash)
2868 **/
2869 void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask)
2870 {
2871 u32 gssr;
2872 u32 swmask = mask;
2873
2874 DEBUGFUNC("ixgbe_release_swfw_sync");
2875
2876 ixgbe_get_eeprom_semaphore(hw);
2877
2878 gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
2879 gssr &= ~swmask;
2880 IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
2881
2882 ixgbe_release_eeprom_semaphore(hw);
2883 }
2884
2885 /**
2886 * ixgbe_disable_sec_rx_path_generic - Stops the receive data path
2887 * @hw: pointer to hardware structure
2888 *
2889 * Stops the receive data path and waits for the HW to internally empty
2890 * the Rx security block
2891 **/
2892 s32 ixgbe_disable_sec_rx_path_generic(struct ixgbe_hw *hw)
2893 {
2894 #define IXGBE_MAX_SECRX_POLL 40
2895
2896 int i;
2897 int secrxreg;
2898
2899 DEBUGFUNC("ixgbe_disable_sec_rx_path_generic");
2900
2901
2902 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
2903 secrxreg |= IXGBE_SECRXCTRL_RX_DIS;
2904 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
2905 for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) {
2906 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT);
2907 if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY)
2908 break;
2909 else
2910 /* Use interrupt-safe sleep just in case */
2911 usec_delay(1000);
2912 }
2913
2914 /* For informational purposes only */
2915 if (i >= IXGBE_MAX_SECRX_POLL)
2916 DEBUGOUT("Rx unit being enabled before security "
2917 "path fully disabled. Continuing with init.\n");
2918
2919 return IXGBE_SUCCESS;
2920 }
2921
2922 /**
2923 * ixgbe_enable_sec_rx_path_generic - Enables the receive data path
2924 * @hw: pointer to hardware structure
2925 *
2926 * Enables the receive data path.
2927 **/
2928 s32 ixgbe_enable_sec_rx_path_generic(struct ixgbe_hw *hw)
2929 {
2930 int secrxreg;
2931
2932 DEBUGFUNC("ixgbe_enable_sec_rx_path_generic");
2933
2934 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
2935 secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS;
2936 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
2937 IXGBE_WRITE_FLUSH(hw);
2938
2939 return IXGBE_SUCCESS;
2940 }
2941
2942 /**
2943 * ixgbe_enable_rx_dma_generic - Enable the Rx DMA unit
2944 * @hw: pointer to hardware structure
2945 * @regval: register value to write to RXCTRL
2946 *
2947 * Enables the Rx DMA unit
2948 **/
2949 s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval)
2950 {
2951 DEBUGFUNC("ixgbe_enable_rx_dma_generic");
2952
2953 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval);
2954
2955 return IXGBE_SUCCESS;
2956 }
2957
2958 /**
2959 * ixgbe_blink_led_start_generic - Blink LED based on index.
2960 * @hw: pointer to hardware structure
2961 * @index: led number to blink
2962 **/
2963 s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
2964 {
2965 ixgbe_link_speed speed = 0;
2966 bool link_up = 0;
2967 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2968 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
2969
2970 DEBUGFUNC("ixgbe_blink_led_start_generic");
2971
2972 /*
2973 * Link must be up to auto-blink the LEDs;
2974 * Force it if link is down.
2975 */
2976 hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
2977
2978 if (!link_up) {
2979 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
2980 autoc_reg |= IXGBE_AUTOC_FLU;
2981 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
2982 IXGBE_WRITE_FLUSH(hw);
2983 msec_delay(10);
2984 }
2985
2986 led_reg &= ~IXGBE_LED_MODE_MASK(index);
2987 led_reg |= IXGBE_LED_BLINK(index);
2988 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
2989 IXGBE_WRITE_FLUSH(hw);
2990
2991 return IXGBE_SUCCESS;
2992 }
2993
2994 /**
2995 * ixgbe_blink_led_stop_generic - Stop blinking LED based on index.
2996 * @hw: pointer to hardware structure
2997 * @index: led number to stop blinking
2998 **/
2999 s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
3000 {
3001 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
3002 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
3003
3004 DEBUGFUNC("ixgbe_blink_led_stop_generic");
3005
3006
3007 autoc_reg &= ~IXGBE_AUTOC_FLU;
3008 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
3009 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
3010
3011 led_reg &= ~IXGBE_LED_MODE_MASK(index);
3012 led_reg &= ~IXGBE_LED_BLINK(index);
3013 led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index);
3014 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
3015 IXGBE_WRITE_FLUSH(hw);
3016
3017 return IXGBE_SUCCESS;
3018 }
3019
3020 /**
3021 * ixgbe_get_san_mac_addr_offset - Get SAN MAC address offset from the EEPROM
3022 * @hw: pointer to hardware structure
3023 * @san_mac_offset: SAN MAC address offset
3024 *
3025 * This function will read the EEPROM location for the SAN MAC address
3026 * pointer, and returns the value at that location. This is used in both
3027 * get and set mac_addr routines.
3028 **/
3029 static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
3030 u16 *san_mac_offset)
3031 {
3032 DEBUGFUNC("ixgbe_get_san_mac_addr_offset");
3033
3034 /*
3035 * First read the EEPROM pointer to see if the MAC addresses are
3036 * available.
3037 */
3038 hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR, san_mac_offset);
3039
3040 return IXGBE_SUCCESS;
3041 }
3042
3043 /**
3044 * ixgbe_get_san_mac_addr_generic - SAN MAC address retrieval from the EEPROM
3045 * @hw: pointer to hardware structure
3046 * @san_mac_addr: SAN MAC address
3047 *
3048 * Reads the SAN MAC address from the EEPROM, if it's available. This is
3049 * per-port, so set_lan_id() must be called before reading the addresses.
3050 * set_lan_id() is called by identify_sfp(), but this cannot be relied
3051 * upon for non-SFP connections, so we must call it here.
3052 **/
3053 s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
3054 {
3055 u16 san_mac_data, san_mac_offset;
3056 u8 i;
3057
3058 DEBUGFUNC("ixgbe_get_san_mac_addr_generic");
3059
3060 /*
3061 * First read the EEPROM pointer to see if the MAC addresses are
3062 * available. If they're not, no point in calling set_lan_id() here.
3063 */
3064 ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
3065
3066 if ((san_mac_offset == 0) || (san_mac_offset == 0xFFFF)) {
3067 /*
3068 * No addresses available in this EEPROM. It's not an
3069 * error though, so just wipe the local address and return.
3070 */
3071 for (i = 0; i < 6; i++)
3072 san_mac_addr[i] = 0xFF;
3073
3074 goto san_mac_addr_out;
3075 }
3076
3077 /* make sure we know which port we need to program */
3078 hw->mac.ops.set_lan_id(hw);
3079 /* apply the port offset to the address offset */
3080 (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
3081 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
3082 for (i = 0; i < 3; i++) {
3083 hw->eeprom.ops.read(hw, san_mac_offset, &san_mac_data);
3084 san_mac_addr[i * 2] = (u8)(san_mac_data);
3085 san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8);
3086 san_mac_offset++;
3087 }
3088
3089 san_mac_addr_out:
3090 return IXGBE_SUCCESS;
3091 }
3092
3093 /**
3094 * ixgbe_set_san_mac_addr_generic - Write the SAN MAC address to the EEPROM
3095 * @hw: pointer to hardware structure
3096 * @san_mac_addr: SAN MAC address
3097 *
3098 * Write a SAN MAC address to the EEPROM.
3099 **/
3100 s32 ixgbe_set_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
3101 {
3102 s32 status = IXGBE_SUCCESS;
3103 u16 san_mac_data, san_mac_offset;
3104 u8 i;
3105
3106 DEBUGFUNC("ixgbe_set_san_mac_addr_generic");
3107
3108 /* Look for SAN mac address pointer. If not defined, return */
3109 ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
3110
3111 if ((san_mac_offset == 0) || (san_mac_offset == 0xFFFF)) {
3112 status = IXGBE_ERR_NO_SAN_ADDR_PTR;
3113 goto san_mac_addr_out;
3114 }
3115
3116 /* Make sure we know which port we need to write */
3117 hw->mac.ops.set_lan_id(hw);
3118 /* Apply the port offset to the address offset */
3119 (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
3120 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
3121
3122 for (i = 0; i < 3; i++) {
3123 san_mac_data = (u16)((u16)(san_mac_addr[i * 2 + 1]) << 8);
3124 san_mac_data |= (u16)(san_mac_addr[i * 2]);
3125 hw->eeprom.ops.write(hw, san_mac_offset, san_mac_data);
3126 san_mac_offset++;
3127 }
3128
3129 san_mac_addr_out:
3130 return status;
3131 }
3132
3133 /**
3134 * ixgbe_get_pcie_msix_count_generic - Gets MSI-X vector count
3135 * @hw: pointer to hardware structure
3136 *
3137 * Read PCIe configuration space, and get the MSI-X vector count from
3138 * the capabilities table.
3139 **/
3140 u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
3141 {
3142 u16 msix_count = 1;
3143 u16 max_msix_count;
3144 u16 pcie_offset;
3145
3146 switch (hw->mac.type) {
3147 case ixgbe_mac_82598EB:
3148 pcie_offset = IXGBE_PCIE_MSIX_82598_CAPS;
3149 max_msix_count = IXGBE_MAX_MSIX_VECTORS_82598;
3150 break;
3151 case ixgbe_mac_82599EB:
3152 case ixgbe_mac_X540:
3153 pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS;
3154 max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599;
3155 break;
3156 default:
3157 return msix_count;
3158 }
3159
3160 DEBUGFUNC("ixgbe_get_pcie_msix_count_generic");
3161 msix_count = IXGBE_READ_PCIE_WORD(hw, pcie_offset);
3162 msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
3163
3164 /* MSI-X count is zero-based in HW */
3165 msix_count++;
3166
3167 if (msix_count > max_msix_count)
3168 msix_count = max_msix_count;
3169
3170 return msix_count;
3171 }
3172
3173 /**
3174 * ixgbe_insert_mac_addr_generic - Find a RAR for this mac address
3175 * @hw: pointer to hardware structure
3176 * @addr: Address to put into receive address register
3177 * @vmdq: VMDq pool to assign
3178 *
3179 * Puts an ethernet address into a receive address register, or
3180 * finds the rar that it is aleady in; adds to the pool list
3181 **/
3182 s32 ixgbe_insert_mac_addr_generic(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
3183 {
3184 static const u32 NO_EMPTY_RAR_FOUND = 0xFFFFFFFF;
3185 u32 first_empty_rar = NO_EMPTY_RAR_FOUND;
3186 u32 rar;
3187 u32 rar_low, rar_high;
3188 u32 addr_low, addr_high;
3189
3190 DEBUGFUNC("ixgbe_insert_mac_addr_generic");
3191
3192 /* swap bytes for HW little endian */
3193 addr_low = addr[0] | (addr[1] << 8)
3194 | (addr[2] << 16)
3195 | (addr[3] << 24);
3196 addr_high = addr[4] | (addr[5] << 8);
3197
3198 /*
3199 * Either find the mac_id in rar or find the first empty space.
3200 * rar_highwater points to just after the highest currently used
3201 * rar in order to shorten the search. It grows when we add a new
3202 * rar to the top.
3203 */
3204 for (rar = 0; rar < hw->mac.rar_highwater; rar++) {
3205 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
3206
3207 if (((IXGBE_RAH_AV & rar_high) == 0)
3208 && first_empty_rar == NO_EMPTY_RAR_FOUND) {
3209 first_empty_rar = rar;
3210 } else if ((rar_high & 0xFFFF) == addr_high) {
3211 rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(rar));
3212 if (rar_low == addr_low)
3213 break; /* found it already in the rars */
3214 }
3215 }
3216
3217 if (rar < hw->mac.rar_highwater) {
3218 /* already there so just add to the pool bits */
3219 ixgbe_set_vmdq(hw, rar, vmdq);
3220 } else if (first_empty_rar != NO_EMPTY_RAR_FOUND) {
3221 /* stick it into first empty RAR slot we found */
3222 rar = first_empty_rar;
3223 ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
3224 } else if (rar == hw->mac.rar_highwater) {
3225 /* add it to the top of the list and inc the highwater mark */
3226 ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
3227 hw->mac.rar_highwater++;
3228 } else if (rar >= hw->mac.num_rar_entries) {
3229 return IXGBE_ERR_INVALID_MAC_ADDR;
3230 }
3231
3232 /*
3233 * If we found rar[0], make sure the default pool bit (we use pool 0)
3234 * remains cleared to be sure default pool packets will get delivered
3235 */
3236 if (rar == 0)
3237 ixgbe_clear_vmdq(hw, rar, 0);
3238
3239 return rar;
3240 }
3241
3242 /**
3243 * ixgbe_clear_vmdq_generic - Disassociate a VMDq pool index from a rx address
3244 * @hw: pointer to hardware struct
3245 * @rar: receive address register index to disassociate
3246 * @vmdq: VMDq pool index to remove from the rar
3247 **/
3248 s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
3249 {
3250 u32 mpsar_lo, mpsar_hi;
3251 u32 rar_entries = hw->mac.num_rar_entries;
3252
3253 DEBUGFUNC("ixgbe_clear_vmdq_generic");
3254
3255 /* Make sure we are using a valid rar index range */
3256 if (rar >= rar_entries) {
3257 DEBUGOUT1("RAR index %d is out of range.\n", rar);
3258 return IXGBE_ERR_INVALID_ARGUMENT;
3259 }
3260
3261 mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
3262 mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
3263
3264 if (!mpsar_lo && !mpsar_hi)
3265 goto done;
3266
3267 if (vmdq == IXGBE_CLEAR_VMDQ_ALL) {
3268 if (mpsar_lo) {
3269 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
3270 mpsar_lo = 0;
3271 }
3272 if (mpsar_hi) {
3273 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
3274 mpsar_hi = 0;
3275 }
3276 } else if (vmdq < 32) {
3277 mpsar_lo &= ~(1 << vmdq);
3278 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo);
3279 } else {
3280 mpsar_hi &= ~(1 << (vmdq - 32));
3281 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi);
3282 }
3283
3284 /* was that the last pool using this rar? */
3285 if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0)
3286 hw->mac.ops.clear_rar(hw, rar);
3287 done:
3288 return IXGBE_SUCCESS;
3289 }
3290
3291 /**
3292 * ixgbe_set_vmdq_generic - Associate a VMDq pool index with a rx address
3293 * @hw: pointer to hardware struct
3294 * @rar: receive address register index to associate with a VMDq index
3295 * @vmdq: VMDq pool index
3296 **/
3297 s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
3298 {
3299 u32 mpsar;
3300 u32 rar_entries = hw->mac.num_rar_entries;
3301
3302 DEBUGFUNC("ixgbe_set_vmdq_generic");
3303
3304 /* Make sure we are using a valid rar index range */
3305 if (rar >= rar_entries) {
3306 DEBUGOUT1("RAR index %d is out of range.\n", rar);
3307 return IXGBE_ERR_INVALID_ARGUMENT;
3308 }
3309
3310 if (vmdq < 32) {
3311 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
3312 mpsar |= 1 << vmdq;
3313 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar);
3314 } else {
3315 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
3316 mpsar |= 1 << (vmdq - 32);
3317 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar);
3318 }
3319 return IXGBE_SUCCESS;
3320 }
3321
3322 /**
3323 * This function should only be involved in the IOV mode.
3324 * In IOV mode, Default pool is next pool after the number of
3325 * VFs advertized and not 0.
3326 * MPSAR table needs to be updated for SAN_MAC RAR [hw->mac.san_mac_rar_index]
3327 *
3328 * ixgbe_set_vmdq_san_mac - Associate default VMDq pool index with a rx address
3329 * @hw: pointer to hardware struct
3330 * @vmdq: VMDq pool index
3331 **/
3332 s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq)
3333 {
3334 u32 rar = hw->mac.san_mac_rar_index;
3335
3336 DEBUGFUNC("ixgbe_set_vmdq_san_mac");
3337
3338 if (vmdq < 32) {
3339 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 1 << vmdq);
3340 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
3341 } else {
3342 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
3343 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 1 << (vmdq - 32));
3344 }
3345
3346 return IXGBE_SUCCESS;
3347 }
3348
3349 /**
3350 * ixgbe_init_uta_tables_generic - Initialize the Unicast Table Array
3351 * @hw: pointer to hardware structure
3352 **/
3353 s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw)
3354 {
3355 int i;
3356
3357 DEBUGFUNC("ixgbe_init_uta_tables_generic");
3358 DEBUGOUT(" Clearing UTA\n");
3359
3360 for (i = 0; i < 128; i++)
3361 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
3362
3363 return IXGBE_SUCCESS;
3364 }
3365
3366 /**
3367 * ixgbe_find_vlvf_slot - find the vlanid or the first empty slot
3368 * @hw: pointer to hardware structure
3369 * @vlan: VLAN id to write to VLAN filter
3370 *
3371 * return the VLVF index where this VLAN id should be placed
3372 *
3373 **/
3374 s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan)
3375 {
3376 u32 bits = 0;
3377 u32 first_empty_slot = 0;
3378 s32 regindex;
3379
3380 /* short cut the special case */
3381 if (vlan == 0)
3382 return 0;
3383
3384 /*
3385 * Search for the vlan id in the VLVF entries. Save off the first empty
3386 * slot found along the way
3387 */
3388 for (regindex = 1; regindex < IXGBE_VLVF_ENTRIES; regindex++) {
3389 bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex));
3390 if (!bits && !(first_empty_slot))
3391 first_empty_slot = regindex;
3392 else if ((bits & 0x0FFF) == vlan)
3393 break;
3394 }
3395
3396 /*
3397 * If regindex is less than IXGBE_VLVF_ENTRIES, then we found the vlan
3398 * in the VLVF. Else use the first empty VLVF register for this
3399 * vlan id.
3400 */
3401 if (regindex >= IXGBE_VLVF_ENTRIES) {
3402 if (first_empty_slot)
3403 regindex = first_empty_slot;
3404 else {
3405 DEBUGOUT("No space in VLVF.\n");
3406 regindex = IXGBE_ERR_NO_SPACE;
3407 }
3408 }
3409
3410 return regindex;
3411 }
3412
3413 /**
3414 * ixgbe_set_vfta_generic - Set VLAN filter table
3415 * @hw: pointer to hardware structure
3416 * @vlan: VLAN id to write to VLAN filter
3417 * @vind: VMDq output index that maps queue to VLAN id in VFVFB
3418 * @vlan_on: boolean flag to turn on/off VLAN in VFVF
3419 *
3420 * Turn on/off specified VLAN in the VLAN filter table.
3421 **/
3422 s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
3423 bool vlan_on)
3424 {
3425 s32 regindex;
3426 u32 bitindex;
3427 u32 vfta;
3428 u32 targetbit;
3429 s32 ret_val = IXGBE_SUCCESS;
3430 bool vfta_changed = FALSE;
3431
3432 DEBUGFUNC("ixgbe_set_vfta_generic");
3433
3434 if (vlan > 4095)
3435 return IXGBE_ERR_PARAM;
3436
3437 /*
3438 * this is a 2 part operation - first the VFTA, then the
3439 * VLVF and VLVFB if VT Mode is set
3440 * We don't write the VFTA until we know the VLVF part succeeded.
3441 */
3442
3443 /* Part 1
3444 * The VFTA is a bitstring made up of 128 32-bit registers
3445 * that enable the particular VLAN id, much like the MTA:
3446 * bits[11-5]: which register
3447 * bits[4-0]: which bit in the register
3448 */
3449 regindex = (vlan >> 5) & 0x7F;
3450 bitindex = vlan & 0x1F;
3451 targetbit = (1 << bitindex);
3452 vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
3453
3454 if (vlan_on) {
3455 if (!(vfta & targetbit)) {
3456 vfta |= targetbit;
3457 vfta_changed = TRUE;
3458 }
3459 } else {
3460 if ((vfta & targetbit)) {
3461 vfta &= ~targetbit;
3462 vfta_changed = TRUE;
3463 }
3464 }
3465
3466 /* Part 2
3467 * Call ixgbe_set_vlvf_generic to set VLVFB and VLVF
3468 */
3469 ret_val = ixgbe_set_vlvf_generic(hw, vlan, vind, vlan_on,
3470 &vfta_changed);
3471 if (ret_val != IXGBE_SUCCESS)
3472 return ret_val;
3473
3474 if (vfta_changed)
3475 IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), vfta);
3476
3477 return IXGBE_SUCCESS;
3478 }
3479
3480 /**
3481 * ixgbe_set_vlvf_generic - Set VLAN Pool Filter
3482 * @hw: pointer to hardware structure
3483 * @vlan: VLAN id to write to VLAN filter
3484 * @vind: VMDq output index that maps queue to VLAN id in VFVFB
3485 * @vlan_on: boolean flag to turn on/off VLAN in VFVF
3486 * @vfta_changed: pointer to boolean flag which indicates whether VFTA
3487 * should be changed
3488 *
3489 * Turn on/off specified bit in VLVF table.
3490 **/
3491 s32 ixgbe_set_vlvf_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
3492 bool vlan_on, bool *vfta_changed)
3493 {
3494 u32 vt;
3495
3496 DEBUGFUNC("ixgbe_set_vlvf_generic");
3497
3498 if (vlan > 4095)
3499 return IXGBE_ERR_PARAM;
3500
3501 /* If VT Mode is set
3502 * Either vlan_on
3503 * make sure the vlan is in VLVF
3504 * set the vind bit in the matching VLVFB
3505 * Or !vlan_on
3506 * clear the pool bit and possibly the vind
3507 */
3508 vt = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
3509 if (vt & IXGBE_VT_CTL_VT_ENABLE) {
3510 s32 vlvf_index;
3511 u32 bits;
3512
3513 vlvf_index = ixgbe_find_vlvf_slot(hw, vlan);
3514 if (vlvf_index < 0)
3515 return vlvf_index;
3516
3517 if (vlan_on) {
3518 /* set the pool bit */
3519 if (vind < 32) {
3520 bits = IXGBE_READ_REG(hw,
3521 IXGBE_VLVFB(vlvf_index * 2));
3522 bits |= (1 << vind);
3523 IXGBE_WRITE_REG(hw,
3524 IXGBE_VLVFB(vlvf_index * 2),
3525 bits);
3526 } else {
3527 bits = IXGBE_READ_REG(hw,
3528 IXGBE_VLVFB((vlvf_index * 2) + 1));
3529 bits |= (1 << (vind - 32));
3530 IXGBE_WRITE_REG(hw,
3531 IXGBE_VLVFB((vlvf_index * 2) + 1),
3532 bits);
3533 }
3534 } else {
3535 /* clear the pool bit */
3536 if (vind < 32) {
3537 bits = IXGBE_READ_REG(hw,
3538 IXGBE_VLVFB(vlvf_index * 2));
3539 bits &= ~(1 << vind);
3540 IXGBE_WRITE_REG(hw,
3541 IXGBE_VLVFB(vlvf_index * 2),
3542 bits);
3543 bits |= IXGBE_READ_REG(hw,
3544 IXGBE_VLVFB((vlvf_index * 2) + 1));
3545 } else {
3546 bits = IXGBE_READ_REG(hw,
3547 IXGBE_VLVFB((vlvf_index * 2) + 1));
3548 bits &= ~(1 << (vind - 32));
3549 IXGBE_WRITE_REG(hw,
3550 IXGBE_VLVFB((vlvf_index * 2) + 1),
3551 bits);
3552 bits |= IXGBE_READ_REG(hw,
3553 IXGBE_VLVFB(vlvf_index * 2));
3554 }
3555 }
3556
3557 /*
3558 * If there are still bits set in the VLVFB registers
3559 * for the VLAN ID indicated we need to see if the
3560 * caller is requesting that we clear the VFTA entry bit.
3561 * If the caller has requested that we clear the VFTA
3562 * entry bit but there are still pools/VFs using this VLAN
3563 * ID entry then ignore the request. We're not worried
3564 * about the case where we're turning the VFTA VLAN ID
3565 * entry bit on, only when requested to turn it off as
3566 * there may be multiple pools and/or VFs using the
3567 * VLAN ID entry. In that case we cannot clear the
3568 * VFTA bit until all pools/VFs using that VLAN ID have also
3569 * been cleared. This will be indicated by "bits" being
3570 * zero.
3571 */
3572 if (bits) {
3573 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index),
3574 (IXGBE_VLVF_VIEN | vlan));
3575 if ((!vlan_on) && (vfta_changed != NULL)) {
3576 /* someone wants to clear the vfta entry
3577 * but some pools/VFs are still using it.
3578 * Ignore it. */
3579 *vfta_changed = FALSE;
3580 }
3581 } else
3582 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0);
3583 }
3584
3585 return IXGBE_SUCCESS;
3586 }
3587
3588 /**
3589 * ixgbe_clear_vfta_generic - Clear VLAN filter table
3590 * @hw: pointer to hardware structure
3591 *
3592 * Clears the VLAN filer table, and the VMDq index associated with the filter
3593 **/
3594 s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
3595 {
3596 u32 offset;
3597
3598 DEBUGFUNC("ixgbe_clear_vfta_generic");
3599
3600 for (offset = 0; offset < hw->mac.vft_size; offset++)
3601 IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
3602
3603 for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) {
3604 IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0);
3605 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2), 0);
3606 IXGBE_WRITE_REG(hw, IXGBE_VLVFB((offset * 2) + 1), 0);
3607 }
3608
3609 return IXGBE_SUCCESS;
3610 }
3611
3612 /**
3613 * ixgbe_check_mac_link_generic - Determine link and speed status
3614 * @hw: pointer to hardware structure
3615 * @speed: pointer to link speed
3616 * @link_up: TRUE when link is up
3617 * @link_up_wait_to_complete: bool used to wait for link up or not
3618 *
3619 * Reads the links register to determine if link is up and the current speed
3620 **/
3621 s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
3622 bool *link_up, bool link_up_wait_to_complete)
3623 {
3624 u32 links_reg, links_orig;
3625 u32 i;
3626
3627 DEBUGFUNC("ixgbe_check_mac_link_generic");
3628
3629 /* clear the old state */
3630 links_orig = IXGBE_READ_REG(hw, IXGBE_LINKS);
3631
3632 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
3633
3634 if (links_orig != links_reg) {
3635 DEBUGOUT2("LINKS changed from %08X to %08X\n",
3636 links_orig, links_reg);
3637 }
3638
3639 if (link_up_wait_to_complete) {
3640 for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
3641 if (links_reg & IXGBE_LINKS_UP) {
3642 *link_up = TRUE;
3643 break;
3644 } else {
3645 *link_up = FALSE;
3646 }
3647 msec_delay(100);
3648 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
3649 }
3650 } else {
3651 if (links_reg & IXGBE_LINKS_UP)
3652 *link_up = TRUE;
3653 else
3654 *link_up = FALSE;
3655 }
3656
3657 if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
3658 IXGBE_LINKS_SPEED_10G_82599)
3659 *speed = IXGBE_LINK_SPEED_10GB_FULL;
3660 else if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
3661 IXGBE_LINKS_SPEED_1G_82599)
3662 *speed = IXGBE_LINK_SPEED_1GB_FULL;
3663 else if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
3664 IXGBE_LINKS_SPEED_100_82599)
3665 *speed = IXGBE_LINK_SPEED_100_FULL;
3666 else
3667 *speed = IXGBE_LINK_SPEED_UNKNOWN;
3668
3669 return IXGBE_SUCCESS;
3670 }
3671
3672 /**
3673 * ixgbe_get_wwn_prefix_generic - Get alternative WWNN/WWPN prefix from
3674 * the EEPROM
3675 * @hw: pointer to hardware structure
3676 * @wwnn_prefix: the alternative WWNN prefix
3677 * @wwpn_prefix: the alternative WWPN prefix
3678 *
3679 * This function will read the EEPROM from the alternative SAN MAC address
3680 * block to check the support for the alternative WWNN/WWPN prefix support.
3681 **/
3682 s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
3683 u16 *wwpn_prefix)
3684 {
3685 u16 offset, caps;
3686 u16 alt_san_mac_blk_offset;
3687
3688 DEBUGFUNC("ixgbe_get_wwn_prefix_generic");
3689
3690 /* clear output first */
3691 *wwnn_prefix = 0xFFFF;
3692 *wwpn_prefix = 0xFFFF;
3693
3694 /* check if alternative SAN MAC is supported */
3695 hw->eeprom.ops.read(hw, IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR,
3696 &alt_san_mac_blk_offset);
3697
3698 if ((alt_san_mac_blk_offset == 0) ||
3699 (alt_san_mac_blk_offset == 0xFFFF))
3700 goto wwn_prefix_out;
3701
3702 /* check capability in alternative san mac address block */
3703 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET;
3704 hw->eeprom.ops.read(hw, offset, &caps);
3705 if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN))
3706 goto wwn_prefix_out;
3707
3708 /* get the corresponding prefix for WWNN/WWPN */
3709 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET;
3710 hw->eeprom.ops.read(hw, offset, wwnn_prefix);
3711
3712 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET;
3713 hw->eeprom.ops.read(hw, offset, wwpn_prefix);
3714
3715 wwn_prefix_out:
3716 return IXGBE_SUCCESS;
3717 }
3718
3719 /**
3720 * ixgbe_get_fcoe_boot_status_generic - Get FCOE boot status from EEPROM
3721 * @hw: pointer to hardware structure
3722 * @bs: the fcoe boot status
3723 *
3724 * This function will read the FCOE boot status from the iSCSI FCOE block
3725 **/
3726 s32 ixgbe_get_fcoe_boot_status_generic(struct ixgbe_hw *hw, u16 *bs)
3727 {
3728 u16 offset, caps, flags;
3729 s32 status;
3730
3731 DEBUGFUNC("ixgbe_get_fcoe_boot_status_generic");
3732
3733 /* clear output first */
3734 *bs = ixgbe_fcoe_bootstatus_unavailable;
3735
3736 /* check if FCOE IBA block is present */
3737 offset = IXGBE_FCOE_IBA_CAPS_BLK_PTR;
3738 status = hw->eeprom.ops.read(hw, offset, &caps);
3739 if (status != IXGBE_SUCCESS)
3740 goto out;
3741
3742 if (!(caps & IXGBE_FCOE_IBA_CAPS_FCOE))
3743 goto out;
3744
3745 /* check if iSCSI FCOE block is populated */
3746 status = hw->eeprom.ops.read(hw, IXGBE_ISCSI_FCOE_BLK_PTR, &offset);
3747 if (status != IXGBE_SUCCESS)
3748 goto out;
3749
3750 if ((offset == 0) || (offset == 0xFFFF))
3751 goto out;
3752
3753 /* read fcoe flags in iSCSI FCOE block */
3754 offset = offset + IXGBE_ISCSI_FCOE_FLAGS_OFFSET;
3755 status = hw->eeprom.ops.read(hw, offset, &flags);
3756 if (status != IXGBE_SUCCESS)
3757 goto out;
3758
3759 if (flags & IXGBE_ISCSI_FCOE_FLAGS_ENABLE)
3760 *bs = ixgbe_fcoe_bootstatus_enabled;
3761 else
3762 *bs = ixgbe_fcoe_bootstatus_disabled;
3763
3764 out:
3765 return status;
3766 }
3767
3768 /**
3769 * ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing
3770 * @hw: pointer to hardware structure
3771 * @enable: enable or disable switch for anti-spoofing
3772 * @pf: Physical Function pool - do not enable anti-spoofing for the PF
3773 *
3774 **/
3775 void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf)
3776 {
3777 int j;
3778 int pf_target_reg = pf >> 3;
3779 int pf_target_shift = pf % 8;
3780 u32 pfvfspoof = 0;
3781
3782 if (hw->mac.type == ixgbe_mac_82598EB)
3783 return;
3784
3785 if (enable)
3786 pfvfspoof = IXGBE_SPOOF_MACAS_MASK;
3787
3788 /*
3789 * PFVFSPOOF register array is size 8 with 8 bits assigned to
3790 * MAC anti-spoof enables in each register array element.
3791 */
3792 for (j = 0; j < pf_target_reg; j++)
3793 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof);
3794
3795 /*
3796 * The PF should be allowed to spoof so that it can support
3797 * emulation mode NICs. Do not set the bits assigned to the PF
3798 */
3799 pfvfspoof &= (1 << pf_target_shift) - 1;
3800 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof);
3801
3802 /*
3803 * Remaining pools belong to the PF so they do not need to have
3804 * anti-spoofing enabled.
3805 */
3806 for (j++; j < IXGBE_PFVFSPOOF_REG_COUNT; j++)
3807 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), 0);
3808 }
3809
3810 /**
3811 * ixgbe_set_vlan_anti_spoofing - Enable/Disable VLAN anti-spoofing
3812 * @hw: pointer to hardware structure
3813 * @enable: enable or disable switch for VLAN anti-spoofing
3814 * @pf: Virtual Function pool - VF Pool to set for VLAN anti-spoofing
3815 *
3816 **/
3817 void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
3818 {
3819 int vf_target_reg = vf >> 3;
3820 int vf_target_shift = vf % 8 + IXGBE_SPOOF_VLANAS_SHIFT;
3821 u32 pfvfspoof;
3822
3823 if (hw->mac.type == ixgbe_mac_82598EB)
3824 return;
3825
3826 pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
3827 if (enable)
3828 pfvfspoof |= (1 << vf_target_shift);
3829 else
3830 pfvfspoof &= ~(1 << vf_target_shift);
3831 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
3832 }
3833
3834 /**
3835 * ixgbe_get_device_caps_generic - Get additional device capabilities
3836 * @hw: pointer to hardware structure
3837 * @device_caps: the EEPROM word with the extra device capabilities
3838 *
3839 * This function will read the EEPROM location for the device capabilities,
3840 * and return the word through device_caps.
3841 **/
3842 s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps)
3843 {
3844 DEBUGFUNC("ixgbe_get_device_caps_generic");
3845
3846 hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps);
3847
3848 return IXGBE_SUCCESS;
3849 }
3850
3851 /**
3852 * ixgbe_enable_relaxed_ordering_gen2 - Enable relaxed ordering
3853 * @hw: pointer to hardware structure
3854 *
3855 **/
3856 void ixgbe_enable_relaxed_ordering_gen2(struct ixgbe_hw *hw)
3857 {
3858 u32 regval;
3859 u32 i;
3860
3861 DEBUGFUNC("ixgbe_enable_relaxed_ordering_gen2");
3862
3863 /* Enable relaxed ordering */
3864 for (i = 0; i < hw->mac.max_tx_queues; i++) {
3865 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
3866 regval |= IXGBE_DCA_TXCTRL_DESC_WRO_EN;
3867 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
3868 }
3869
3870 for (i = 0; i < hw->mac.max_rx_queues; i++) {
3871 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
3872 regval |= IXGBE_DCA_RXCTRL_DATA_WRO_EN |
3873 IXGBE_DCA_RXCTRL_HEAD_WRO_EN;
3874 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
3875 }
3876
3877 }
3878
3879 /**
3880 * ixgbe_calculate_checksum - Calculate checksum for buffer
3881 * @buffer: pointer to EEPROM
3882 * @length: size of EEPROM to calculate a checksum for
3883 * Calculates the checksum for some buffer on a specified length. The
3884 * checksum calculated is returned.
3885 **/
3886 static u8 ixgbe_calculate_checksum(u8 *buffer, u32 length)
3887 {
3888 u32 i;
3889 u8 sum = 0;
3890
3891 DEBUGFUNC("ixgbe_calculate_checksum");
3892
3893 if (!buffer)
3894 return 0;
3895
3896 for (i = 0; i < length; i++)
3897 sum += buffer[i];
3898
3899 return (u8) (0 - sum);
3900 }
3901
3902 /**
3903 * ixgbe_host_interface_command - Issue command to manageability block
3904 * @hw: pointer to the HW structure
3905 * @buffer: contains the command to write and where the return status will
3906 * be placed
3907 * @length: length of buffer, must be multiple of 4 bytes
3908 *
3909 * Communicates with the manageability block. On success return IXGBE_SUCCESS
3910 * else return IXGBE_ERR_HOST_INTERFACE_COMMAND.
3911 **/
3912 static s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
3913 u32 length)
3914 {
3915 u32 hicr, i, bi;
3916 u32 hdr_size = sizeof(struct ixgbe_hic_hdr);
3917 u8 buf_len, dword_len;
3918
3919 s32 ret_val = IXGBE_SUCCESS;
3920
3921 DEBUGFUNC("ixgbe_host_interface_command");
3922
3923 if (length == 0 || length & 0x3 ||
3924 length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
3925 DEBUGOUT("Buffer length failure.\n");
3926 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
3927 goto out;
3928 }
3929
3930 /* Check that the host interface is enabled. */
3931 hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
3932 if ((hicr & IXGBE_HICR_EN) == 0) {
3933 DEBUGOUT("IXGBE_HOST_EN bit disabled.\n");
3934 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
3935 goto out;
3936 }
3937
3938 /* Calculate length in DWORDs */
3939 dword_len = length >> 2;
3940
3941 /*
3942 * The device driver writes the relevant command block
3943 * into the ram area.
3944 */
3945 for (i = 0; i < dword_len; i++)
3946 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG,
3947 i, IXGBE_CPU_TO_LE32(buffer[i]));
3948
3949 /* Setting this bit tells the ARC that a new command is pending. */
3950 IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C);
3951
3952 for (i = 0; i < IXGBE_HI_COMMAND_TIMEOUT; i++) {
3953 hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
3954 if (!(hicr & IXGBE_HICR_C))
3955 break;
3956 msec_delay(1);
3957 }
3958
3959 /* Check command successful completion. */
3960 if (i == IXGBE_HI_COMMAND_TIMEOUT ||
3961 (!(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV))) {
3962 DEBUGOUT("Command has failed with no status valid.\n");
3963 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
3964 goto out;
3965 }
3966
3967 /* Calculate length in DWORDs */
3968 dword_len = hdr_size >> 2;
3969
3970 /* first pull in the header so we know the buffer length */
3971 for (bi = 0; bi < dword_len; bi++) {
3972 buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
3973 IXGBE_LE32_TO_CPUS(&buffer[bi]);
3974 }
3975
3976 /* If there is any thing in data position pull it in */
3977 buf_len = ((struct ixgbe_hic_hdr *)buffer)->buf_len;
3978 if (buf_len == 0)
3979 goto out;
3980
3981 if (length < (buf_len + hdr_size)) {
3982 DEBUGOUT("Buffer not large enough for reply message.\n");
3983 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
3984 goto out;
3985 }
3986
3987 /* Calculate length in DWORDs, add 3 for odd lengths */
3988 dword_len = (buf_len + 3) >> 2;
3989
3990 /* Pull in the rest of the buffer (bi is where we left off)*/
3991 for (; bi <= dword_len; bi++) {
3992 buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
3993 IXGBE_LE32_TO_CPUS(&buffer[bi]);
3994 }
3995
3996 out:
3997 return ret_val;
3998 }
3999
4000 /**
4001 * ixgbe_set_fw_drv_ver_generic - Sends driver version to firmware
4002 * @hw: pointer to the HW structure
4003 * @maj: driver version major number
4004 * @min: driver version minor number
4005 * @build: driver version build number
4006 * @sub: driver version sub build number
4007 *
4008 * Sends driver version number to firmware through the manageability
4009 * block. On success return IXGBE_SUCCESS
4010 * else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring
4011 * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
4012 **/
4013 s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
4014 u8 build, u8 sub)
4015 {
4016 struct ixgbe_hic_drv_info fw_cmd;
4017 int i;
4018 s32 ret_val = IXGBE_SUCCESS;
4019
4020 DEBUGFUNC("ixgbe_set_fw_drv_ver_generic");
4021
4022 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM)
4023 != IXGBE_SUCCESS) {
4024 ret_val = IXGBE_ERR_SWFW_SYNC;
4025 goto out;
4026 }
4027
4028 fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO;
4029 fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN;
4030 fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
4031 fw_cmd.port_num = (u8)hw->bus.func;
4032 fw_cmd.ver_maj = maj;
4033 fw_cmd.ver_min = min;
4034 fw_cmd.ver_build = build;
4035 fw_cmd.ver_sub = sub;
4036 fw_cmd.hdr.checksum = 0;
4037 fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd,
4038 (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len));
4039 fw_cmd.pad = 0;
4040 fw_cmd.pad2 = 0;
4041
4042 for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
4043 ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
4044 sizeof(fw_cmd));
4045 if (ret_val != IXGBE_SUCCESS)
4046 continue;
4047
4048 if (fw_cmd.hdr.cmd_or_resp.ret_status ==
4049 FW_CEM_RESP_STATUS_SUCCESS)
4050 ret_val = IXGBE_SUCCESS;
4051 else
4052 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
4053
4054 break;
4055 }
4056
4057 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
4058 out:
4059 return ret_val;
4060 }
4061
4062 /**
4063 * ixgbe_set_rxpba_generic - Initialize Rx packet buffer
4064 * @hw: pointer to hardware structure
4065 * @num_pb: number of packet buffers to allocate
4066 * @headroom: reserve n KB of headroom
4067 * @strategy: packet buffer allocation strategy
4068 **/
4069 void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, u32 headroom,
4070 int strategy)
4071 {
4072 u32 pbsize = hw->mac.rx_pb_size;
4073 int i = 0;
4074 u32 rxpktsize, txpktsize, txpbthresh;
4075
4076 /* Reserve headroom */
4077 pbsize -= headroom;
4078
4079 if (!num_pb)
4080 num_pb = 1;
4081
4082 /* Divide remaining packet buffer space amongst the number of packet
4083 * buffers requested using supplied strategy.
4084 */
4085 switch (strategy) {
4086 case PBA_STRATEGY_WEIGHTED:
4087 /* ixgbe_dcb_pba_80_48 strategy weight first half of packet
4088 * buffer with 5/8 of the packet buffer space.
4089 */
4090 rxpktsize = (pbsize * 5) / (num_pb * 4);
4091 pbsize -= rxpktsize * (num_pb / 2);
4092 rxpktsize <<= IXGBE_RXPBSIZE_SHIFT;
4093 for (; i < (num_pb / 2); i++)
4094 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
4095 /* Fall through to configure remaining packet buffers */
4096 case PBA_STRATEGY_EQUAL:
4097 rxpktsize = (pbsize / (num_pb - i)) << IXGBE_RXPBSIZE_SHIFT;
4098 for (; i < num_pb; i++)
4099 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
4100 break;
4101 default:
4102 break;
4103 }
4104
4105 /* Only support an equally distributed Tx packet buffer strategy. */
4106 txpktsize = IXGBE_TXPBSIZE_MAX / num_pb;
4107 txpbthresh = (txpktsize / 1024) - IXGBE_TXPKT_SIZE_MAX;
4108 for (i = 0; i < num_pb; i++) {
4109 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize);
4110 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh);
4111 }
4112
4113 /* Clear unused TCs, if any, to zero buffer size*/
4114 for (; i < IXGBE_MAX_PB; i++) {
4115 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
4116 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0);
4117 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0);
4118 }
4119 }
4120
4121 /**
4122 * ixgbe_clear_tx_pending - Clear pending TX work from the PCIe fifo
4123 * @hw: pointer to the hardware structure
4124 *
4125 * The 82599 and x540 MACs can experience issues if TX work is still pending
4126 * when a reset occurs. This function prevents this by flushing the PCIe
4127 * buffers on the system.
4128 **/
4129 void ixgbe_clear_tx_pending(struct ixgbe_hw *hw)
4130 {
4131 u32 gcr_ext, hlreg0;
4132
4133 /*
4134 * If double reset is not requested then all transactions should
4135 * already be clear and as such there is no work to do
4136 */
4137 if (!(hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED))
4138 return;
4139
4140 /*
4141 * Set loopback enable to prevent any transmits from being sent
4142 * should the link come up. This assumes that the RXCTRL.RXEN bit
4143 * has already been cleared.
4144 */
4145 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
4146 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0 | IXGBE_HLREG0_LPBK);
4147
4148 /* initiate cleaning flow for buffers in the PCIe transaction layer */
4149 gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
4150 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT,
4151 gcr_ext | IXGBE_GCR_EXT_BUFFERS_CLEAR);
4152
4153 /* Flush all writes and allow 20usec for all transactions to clear */
4154 IXGBE_WRITE_FLUSH(hw);
4155 usec_delay(20);
4156
4157 /* restore previous register values */
4158 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
4159 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
4160 }
4161
4162