ixgbe_dcb_82599.c revision 1.5 1 /******************************************************************************
2 SPDX-License-Identifier: BSD-3-Clause
3
4 Copyright (c) 2001-2017, Intel Corporation
5 All rights reserved.
6
7 Redistribution and use in source and binary forms, with or without
8 modification, are permitted provided that the following conditions are met:
9
10 1. Redistributions of source code must retain the above copyright notice,
11 this list of conditions and the following disclaimer.
12
13 2. Redistributions in binary form must reproduce the above copyright
14 notice, this list of conditions and the following disclaimer in the
15 documentation and/or other materials provided with the distribution.
16
17 3. Neither the name of the Intel Corporation nor the names of its
18 contributors may be used to endorse or promote products derived from
19 this software without specific prior written permission.
20
21 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 POSSIBILITY OF SUCH DAMAGE.
32
33 ******************************************************************************/
34 /*$FreeBSD: head/sys/dev/ixgbe/ixgbe_dcb_82599.c 320688 2017-07-05 17:27:03Z erj $*/
35
36
37 #include "ixgbe_type.h"
38 #include "ixgbe_dcb.h"
39 #include "ixgbe_dcb_82599.h"
40
41 /**
42 * ixgbe_dcb_get_tc_stats_82599 - Returns status for each traffic class
43 * @hw: pointer to hardware structure
44 * @stats: pointer to statistics structure
45 * @tc_count: Number of elements in bwg_array.
46 *
47 * This function returns the status data for each of the Traffic Classes in use.
48 */
49 s32 ixgbe_dcb_get_tc_stats_82599(struct ixgbe_hw *hw,
50 struct ixgbe_hw_stats *stats,
51 u8 tc_count)
52 {
53 int tc;
54
55 DEBUGFUNC("dcb_get_tc_stats");
56
57 if (tc_count > IXGBE_DCB_MAX_TRAFFIC_CLASS)
58 return IXGBE_ERR_PARAM;
59
60 /* Statistics pertaining to each traffic class */
61 for (tc = 0; tc < tc_count; tc++) {
62 /* Transmitted Packets */
63 stats->qptc[tc] += IXGBE_READ_REG(hw, IXGBE_QPTC(tc));
64 /* Transmitted Bytes (read low first to prevent missed carry) */
65 stats->qbtc[tc] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(tc));
66 stats->qbtc[tc] +=
67 (((u64)(IXGBE_READ_REG(hw, IXGBE_QBTC_H(tc)))) << 32);
68 /* Received Packets */
69 stats->qprc[tc] += IXGBE_READ_REG(hw, IXGBE_QPRC(tc));
70 /* Received Bytes (read low first to prevent missed carry) */
71 stats->qbrc[tc] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(tc));
72 stats->qbrc[tc] +=
73 (((u64)(IXGBE_READ_REG(hw, IXGBE_QBRC_H(tc)))) << 32);
74
75 /* Received Dropped Packet */
76 stats->qprdc[tc] += IXGBE_READ_REG(hw, IXGBE_QPRDC(tc));
77 }
78
79 return IXGBE_SUCCESS;
80 }
81
82 /**
83 * ixgbe_dcb_get_pfc_stats_82599 - Return CBFC status data
84 * @hw: pointer to hardware structure
85 * @stats: pointer to statistics structure
86 * @tc_count: Number of elements in bwg_array.
87 *
88 * This function returns the CBFC status data for each of the Traffic Classes.
89 */
90 s32 ixgbe_dcb_get_pfc_stats_82599(struct ixgbe_hw *hw,
91 struct ixgbe_hw_stats *stats,
92 u8 tc_count)
93 {
94 int tc;
95
96 DEBUGFUNC("dcb_get_pfc_stats");
97
98 if (tc_count > IXGBE_DCB_MAX_TRAFFIC_CLASS)
99 return IXGBE_ERR_PARAM;
100
101 for (tc = 0; tc < tc_count; tc++) {
102 /* Priority XOFF Transmitted */
103 stats->pxofftxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(tc));
104 /* Priority XOFF Received */
105 stats->pxoffrxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(tc));
106 }
107
108 return IXGBE_SUCCESS;
109 }
110
111 /**
112 * ixgbe_dcb_config_rx_arbiter_82599 - Config Rx Data arbiter
113 * @hw: pointer to hardware structure
114 * @dcb_config: pointer to ixgbe_dcb_config structure
115 *
116 * Configure Rx Packet Arbiter and credits for each traffic class.
117 */
118 s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw, u16 *refill,
119 u16 *max, u8 *bwg_id, u8 *tsa,
120 u8 *map)
121 {
122 u32 reg = 0;
123 u32 credit_refill = 0;
124 u32 credit_max = 0;
125 u8 i = 0;
126
127 /*
128 * Disable the arbiter before changing parameters
129 * (always enable recycle mode; WSP)
130 */
131 reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC | IXGBE_RTRPCS_ARBDIS;
132 IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
133
134 /*
135 * map all UPs to TCs. up_to_tc_bitmap for each TC has corresponding
136 * bits sets for the UPs that needs to be mappped to that TC.
137 * e.g if priorities 6 and 7 are to be mapped to a TC then the
138 * up_to_tc_bitmap value for that TC will be 11000000 in binary.
139 */
140 reg = 0;
141 for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++)
142 reg |= (map[i] << (i * IXGBE_RTRUP2TC_UP_SHIFT));
143
144 IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, reg);
145
146 /* Configure traffic class credits and priority */
147 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
148 credit_refill = refill[i];
149 credit_max = max[i];
150 reg = credit_refill | (credit_max << IXGBE_RTRPT4C_MCL_SHIFT);
151
152 reg |= (u32)(bwg_id[i]) << IXGBE_RTRPT4C_BWG_SHIFT;
153
154 if (tsa[i] == ixgbe_dcb_tsa_strict)
155 reg |= IXGBE_RTRPT4C_LSP;
156
157 IXGBE_WRITE_REG(hw, IXGBE_RTRPT4C(i), reg);
158 }
159
160 /*
161 * Configure Rx packet plane (recycle mode; WSP) and
162 * enable arbiter
163 */
164 reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC;
165 IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
166
167 return IXGBE_SUCCESS;
168 }
169
170 /**
171 * ixgbe_dcb_config_tx_desc_arbiter_82599 - Config Tx Desc. arbiter
172 * @hw: pointer to hardware structure
173 * @dcb_config: pointer to ixgbe_dcb_config structure
174 *
175 * Configure Tx Descriptor Arbiter and credits for each traffic class.
176 */
177 s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw, u16 *refill,
178 u16 *max, u8 *bwg_id, u8 *tsa)
179 {
180 u32 reg, max_credits;
181 u8 i;
182
183 /* Clear the per-Tx queue credits; we use per-TC instead */
184 for (i = 0; i < 128; i++) {
185 IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i);
186 IXGBE_WRITE_REG(hw, IXGBE_RTTDT1C, 0);
187 }
188
189 /* Configure traffic class credits and priority */
190 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
191 max_credits = max[i];
192 reg = max_credits << IXGBE_RTTDT2C_MCL_SHIFT;
193 reg |= refill[i];
194 reg |= (u32)(bwg_id[i]) << IXGBE_RTTDT2C_BWG_SHIFT;
195
196 if (tsa[i] == ixgbe_dcb_tsa_group_strict_cee)
197 reg |= IXGBE_RTTDT2C_GSP;
198
199 if (tsa[i] == ixgbe_dcb_tsa_strict)
200 reg |= IXGBE_RTTDT2C_LSP;
201
202 IXGBE_WRITE_REG(hw, IXGBE_RTTDT2C(i), reg);
203 }
204
205 /*
206 * Configure Tx descriptor plane (recycle mode; WSP) and
207 * enable arbiter
208 */
209 reg = IXGBE_RTTDCS_TDPAC | IXGBE_RTTDCS_TDRM;
210 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
211
212 return IXGBE_SUCCESS;
213 }
214
215 /**
216 * ixgbe_dcb_config_tx_data_arbiter_82599 - Config Tx Data arbiter
217 * @hw: pointer to hardware structure
218 * @dcb_config: pointer to ixgbe_dcb_config structure
219 *
220 * Configure Tx Packet Arbiter and credits for each traffic class.
221 */
222 s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw, u16 *refill,
223 u16 *max, u8 *bwg_id, u8 *tsa,
224 u8 *map)
225 {
226 u32 reg;
227 u8 i;
228
229 /*
230 * Disable the arbiter before changing parameters
231 * (always enable recycle mode; SP; arb delay)
232 */
233 reg = IXGBE_RTTPCS_TPPAC | IXGBE_RTTPCS_TPRM |
234 (IXGBE_RTTPCS_ARBD_DCB << IXGBE_RTTPCS_ARBD_SHIFT) |
235 IXGBE_RTTPCS_ARBDIS;
236 IXGBE_WRITE_REG(hw, IXGBE_RTTPCS, reg);
237
238 /*
239 * map all UPs to TCs. up_to_tc_bitmap for each TC has corresponding
240 * bits sets for the UPs that needs to be mappped to that TC.
241 * e.g if priorities 6 and 7 are to be mapped to a TC then the
242 * up_to_tc_bitmap value for that TC will be 11000000 in binary.
243 */
244 reg = 0;
245 for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++)
246 reg |= (map[i] << (i * IXGBE_RTTUP2TC_UP_SHIFT));
247
248 IXGBE_WRITE_REG(hw, IXGBE_RTTUP2TC, reg);
249
250 /* Configure traffic class credits and priority */
251 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
252 reg = refill[i];
253 reg |= (u32)(max[i]) << IXGBE_RTTPT2C_MCL_SHIFT;
254 reg |= (u32)(bwg_id[i]) << IXGBE_RTTPT2C_BWG_SHIFT;
255
256 if (tsa[i] == ixgbe_dcb_tsa_group_strict_cee)
257 reg |= IXGBE_RTTPT2C_GSP;
258
259 if (tsa[i] == ixgbe_dcb_tsa_strict)
260 reg |= IXGBE_RTTPT2C_LSP;
261
262 IXGBE_WRITE_REG(hw, IXGBE_RTTPT2C(i), reg);
263 }
264
265 /*
266 * Configure Tx packet plane (recycle mode; SP; arb delay) and
267 * enable arbiter
268 */
269 reg = IXGBE_RTTPCS_TPPAC | IXGBE_RTTPCS_TPRM |
270 (IXGBE_RTTPCS_ARBD_DCB << IXGBE_RTTPCS_ARBD_SHIFT);
271 IXGBE_WRITE_REG(hw, IXGBE_RTTPCS, reg);
272
273 return IXGBE_SUCCESS;
274 }
275
276 /**
277 * ixgbe_dcb_config_pfc_82599 - Configure priority flow control
278 * @hw: pointer to hardware structure
279 * @pfc_en: enabled pfc bitmask
280 * @map: priority to tc assignments indexed by priority
281 *
282 * Configure Priority Flow Control (PFC) for each traffic class.
283 */
284 s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *map)
285 {
286 u32 i, j, fcrtl, reg;
287 u8 max_tc = 0;
288
289 /* Enable Transmit Priority Flow Control */
290 IXGBE_WRITE_REG(hw, IXGBE_FCCFG, IXGBE_FCCFG_TFCE_PRIORITY);
291
292 /* Enable Receive Priority Flow Control */
293 reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
294 reg |= IXGBE_MFLCN_DPF;
295
296 /*
297 * X540 supports per TC Rx priority flow control. So
298 * clear all TCs and only enable those that should be
299 * enabled.
300 */
301 reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE);
302
303 if (hw->mac.type >= ixgbe_mac_X540)
304 reg |= pfc_en << IXGBE_MFLCN_RPFCE_SHIFT;
305
306 if (pfc_en)
307 reg |= IXGBE_MFLCN_RPFCE;
308
309 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, reg);
310
311 for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++) {
312 if (map[i] > max_tc)
313 max_tc = map[i];
314 }
315
316
317 /* Configure PFC Tx thresholds per TC */
318 for (i = 0; i <= max_tc; i++) {
319 int enabled = 0;
320
321 for (j = 0; j < IXGBE_DCB_MAX_USER_PRIORITY; j++) {
322 if ((map[j] == i) && (pfc_en & (1 << j))) {
323 enabled = 1;
324 break;
325 }
326 }
327
328 if (enabled) {
329 reg = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
330 fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
331 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl);
332 } else {
333 /*
334 * In order to prevent Tx hangs when the internal Tx
335 * switch is enabled we must set the high water mark
336 * to the Rx packet buffer size - 24KB. This allows
337 * the Tx switch to function even under heavy Rx
338 * workloads.
339 */
340 reg = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 24576;
341 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
342 }
343
344 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), reg);
345 }
346
347 for (; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
348 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
349 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), 0);
350 }
351
352 /* Configure pause time (2 TCs per register) */
353 reg = hw->fc.pause_time | (hw->fc.pause_time << 16);
354 for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
355 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
356
357 /* Configure flow control refresh threshold value */
358 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
359
360 return IXGBE_SUCCESS;
361 }
362
363 /**
364 * ixgbe_dcb_config_tc_stats_82599 - Config traffic class statistics
365 * @hw: pointer to hardware structure
366 *
367 * Configure queue statistics registers, all queues belonging to same traffic
368 * class uses a single set of queue statistics counters.
369 */
370 s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *hw,
371 struct ixgbe_dcb_config *dcb_config)
372 {
373 u32 reg = 0;
374 u8 i = 0;
375 u8 tc_count = 8;
376 bool vt_mode = FALSE;
377
378 if (dcb_config != NULL) {
379 tc_count = dcb_config->num_tcs.pg_tcs;
380 vt_mode = dcb_config->vt_mode;
381 }
382
383 if (!((tc_count == 8 && vt_mode == FALSE) || tc_count == 4))
384 return IXGBE_ERR_PARAM;
385
386 if (tc_count == 8 && vt_mode == FALSE) {
387 /*
388 * Receive Queues stats setting
389 * 32 RQSMR registers, each configuring 4 queues.
390 *
391 * Set all 16 queues of each TC to the same stat
392 * with TC 'n' going to stat 'n'.
393 */
394 for (i = 0; i < 32; i++) {
395 reg = 0x01010101 * (i / 4);
396 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg);
397 }
398 /*
399 * Transmit Queues stats setting
400 * 32 TQSM registers, each controlling 4 queues.
401 *
402 * Set all queues of each TC to the same stat
403 * with TC 'n' going to stat 'n'.
404 * Tx queues are allocated non-uniformly to TCs:
405 * 32, 32, 16, 16, 8, 8, 8, 8.
406 */
407 for (i = 0; i < 32; i++) {
408 if (i < 8)
409 reg = 0x00000000;
410 else if (i < 16)
411 reg = 0x01010101;
412 else if (i < 20)
413 reg = 0x02020202;
414 else if (i < 24)
415 reg = 0x03030303;
416 else if (i < 26)
417 reg = 0x04040404;
418 else if (i < 28)
419 reg = 0x05050505;
420 else if (i < 30)
421 reg = 0x06060606;
422 else
423 reg = 0x07070707;
424 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), reg);
425 }
426 } else if (tc_count == 4 && vt_mode == FALSE) {
427 /*
428 * Receive Queues stats setting
429 * 32 RQSMR registers, each configuring 4 queues.
430 *
431 * Set all 16 queues of each TC to the same stat
432 * with TC 'n' going to stat 'n'.
433 */
434 for (i = 0; i < 32; i++) {
435 if (i % 8 > 3)
436 /* In 4 TC mode, odd 16-queue ranges are
437 * not used.
438 */
439 continue;
440 reg = 0x01010101 * (i / 8);
441 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg);
442 }
443 /*
444 * Transmit Queues stats setting
445 * 32 TQSM registers, each controlling 4 queues.
446 *
447 * Set all queues of each TC to the same stat
448 * with TC 'n' going to stat 'n'.
449 * Tx queues are allocated non-uniformly to TCs:
450 * 64, 32, 16, 16.
451 */
452 for (i = 0; i < 32; i++) {
453 if (i < 16)
454 reg = 0x00000000;
455 else if (i < 24)
456 reg = 0x01010101;
457 else if (i < 28)
458 reg = 0x02020202;
459 else
460 reg = 0x03030303;
461 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), reg);
462 }
463 } else if (tc_count == 4 && vt_mode == TRUE) {
464 /*
465 * Receive Queues stats setting
466 * 32 RQSMR registers, each configuring 4 queues.
467 *
468 * Queue Indexing in 32 VF with DCB mode maps 4 TC's to each
469 * pool. Set all 32 queues of each TC across pools to the same
470 * stat with TC 'n' going to stat 'n'.
471 */
472 for (i = 0; i < 32; i++)
473 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), 0x03020100);
474 /*
475 * Transmit Queues stats setting
476 * 32 TQSM registers, each controlling 4 queues.
477 *
478 * Queue Indexing in 32 VF with DCB mode maps 4 TC's to each
479 * pool. Set all 32 queues of each TC across pools to the same
480 * stat with TC 'n' going to stat 'n'.
481 */
482 for (i = 0; i < 32; i++)
483 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), 0x03020100);
484 }
485
486 return IXGBE_SUCCESS;
487 }
488
489 /**
490 * ixgbe_dcb_config_82599 - Configure general DCB parameters
491 * @hw: pointer to hardware structure
492 * @dcb_config: pointer to ixgbe_dcb_config structure
493 *
494 * Configure general DCB parameters.
495 */
496 s32 ixgbe_dcb_config_82599(struct ixgbe_hw *hw,
497 struct ixgbe_dcb_config *dcb_config)
498 {
499 u32 reg;
500 u32 q;
501
502 /* Disable the Tx desc arbiter so that MTQC can be changed */
503 reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
504 reg |= IXGBE_RTTDCS_ARBDIS;
505 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
506
507 reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
508 if (dcb_config->num_tcs.pg_tcs == 8) {
509 /* Enable DCB for Rx with 8 TCs */
510 switch (reg & IXGBE_MRQC_MRQE_MASK) {
511 case 0:
512 case IXGBE_MRQC_RT4TCEN:
513 /* RSS disabled cases */
514 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
515 IXGBE_MRQC_RT8TCEN;
516 break;
517 case IXGBE_MRQC_RSSEN:
518 case IXGBE_MRQC_RTRSS4TCEN:
519 /* RSS enabled cases */
520 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
521 IXGBE_MRQC_RTRSS8TCEN;
522 break;
523 default:
524 /*
525 * Unsupported value, assume stale data,
526 * overwrite no RSS
527 */
528 ASSERT(0);
529 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
530 IXGBE_MRQC_RT8TCEN;
531 }
532 }
533 if (dcb_config->num_tcs.pg_tcs == 4) {
534 /* We support both VT-on and VT-off with 4 TCs. */
535 if (dcb_config->vt_mode)
536 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
537 IXGBE_MRQC_VMDQRT4TCEN;
538 else
539 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
540 IXGBE_MRQC_RTRSS4TCEN;
541 }
542 IXGBE_WRITE_REG(hw, IXGBE_MRQC, reg);
543
544 /* Enable DCB for Tx with 8 TCs */
545 if (dcb_config->num_tcs.pg_tcs == 8)
546 reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
547 else {
548 /* We support both VT-on and VT-off with 4 TCs. */
549 reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
550 if (dcb_config->vt_mode)
551 reg |= IXGBE_MTQC_VT_ENA;
552 }
553 IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg);
554
555 /* Disable drop for all queues */
556 for (q = 0; q < 128; q++)
557 IXGBE_WRITE_REG(hw, IXGBE_QDE,
558 (IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT)));
559
560 /* Enable the Tx desc arbiter */
561 reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
562 reg &= ~IXGBE_RTTDCS_ARBDIS;
563 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
564
565 /* Enable Security TX Buffer IFG for DCB */
566 reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
567 reg |= IXGBE_SECTX_DCB;
568 IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
569
570 return IXGBE_SUCCESS;
571 }
572
573 /**
574 * ixgbe_dcb_hw_config_82599 - Configure and enable DCB
575 * @hw: pointer to hardware structure
576 * @dcb_config: pointer to ixgbe_dcb_config structure
577 *
578 * Configure dcb settings and enable dcb mode.
579 */
580 s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw, int link_speed,
581 u16 *refill, u16 *max, u8 *bwg_id, u8 *tsa,
582 u8 *map)
583 {
584 UNREFERENCED_1PARAMETER(link_speed);
585
586 ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id, tsa,
587 map);
588 ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, bwg_id,
589 tsa);
590 ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id,
591 tsa, map);
592
593 return IXGBE_SUCCESS;
594 }
595
596