ixgbe_dcb_82599.c revision 1.4 1 1.1 msaitoh /******************************************************************************
2 1.1 msaitoh
3 1.4 msaitoh Copyright (c) 2001-2017, Intel Corporation
4 1.1 msaitoh All rights reserved.
5 1.4 msaitoh
6 1.4 msaitoh Redistribution and use in source and binary forms, with or without
7 1.1 msaitoh modification, are permitted provided that the following conditions are met:
8 1.4 msaitoh
9 1.4 msaitoh 1. Redistributions of source code must retain the above copyright notice,
10 1.1 msaitoh this list of conditions and the following disclaimer.
11 1.4 msaitoh
12 1.4 msaitoh 2. Redistributions in binary form must reproduce the above copyright
13 1.4 msaitoh notice, this list of conditions and the following disclaimer in the
14 1.1 msaitoh documentation and/or other materials provided with the distribution.
15 1.4 msaitoh
16 1.4 msaitoh 3. Neither the name of the Intel Corporation nor the names of its
17 1.4 msaitoh contributors may be used to endorse or promote products derived from
18 1.1 msaitoh this software without specific prior written permission.
19 1.4 msaitoh
20 1.1 msaitoh THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 1.4 msaitoh AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 1.4 msaitoh IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 1.4 msaitoh ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 1.4 msaitoh LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 1.4 msaitoh CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 1.4 msaitoh SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 1.4 msaitoh INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 1.4 msaitoh CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 1.1 msaitoh ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 1.1 msaitoh POSSIBILITY OF SUCH DAMAGE.
31 1.1 msaitoh
32 1.1 msaitoh ******************************************************************************/
33 1.4 msaitoh /*$FreeBSD: head/sys/dev/ixgbe/ixgbe_dcb_82599.c 320688 2017-07-05 17:27:03Z erj $*/
34 1.1 msaitoh
35 1.1 msaitoh
36 1.1 msaitoh #include "ixgbe_type.h"
37 1.1 msaitoh #include "ixgbe_dcb.h"
38 1.1 msaitoh #include "ixgbe_dcb_82599.h"
39 1.1 msaitoh
40 1.1 msaitoh /**
41 1.1 msaitoh * ixgbe_dcb_get_tc_stats_82599 - Returns status for each traffic class
42 1.1 msaitoh * @hw: pointer to hardware structure
43 1.1 msaitoh * @stats: pointer to statistics structure
44 1.1 msaitoh * @tc_count: Number of elements in bwg_array.
45 1.1 msaitoh *
46 1.1 msaitoh * This function returns the status data for each of the Traffic Classes in use.
47 1.1 msaitoh */
48 1.1 msaitoh s32 ixgbe_dcb_get_tc_stats_82599(struct ixgbe_hw *hw,
49 1.1 msaitoh struct ixgbe_hw_stats *stats,
50 1.1 msaitoh u8 tc_count)
51 1.1 msaitoh {
52 1.1 msaitoh int tc;
53 1.1 msaitoh
54 1.1 msaitoh DEBUGFUNC("dcb_get_tc_stats");
55 1.1 msaitoh
56 1.1 msaitoh if (tc_count > IXGBE_DCB_MAX_TRAFFIC_CLASS)
57 1.1 msaitoh return IXGBE_ERR_PARAM;
58 1.1 msaitoh
59 1.1 msaitoh /* Statistics pertaining to each traffic class */
60 1.1 msaitoh for (tc = 0; tc < tc_count; tc++) {
61 1.1 msaitoh /* Transmitted Packets */
62 1.1 msaitoh stats->qptc[tc] += IXGBE_READ_REG(hw, IXGBE_QPTC(tc));
63 1.1 msaitoh /* Transmitted Bytes (read low first to prevent missed carry) */
64 1.1 msaitoh stats->qbtc[tc] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(tc));
65 1.1 msaitoh stats->qbtc[tc] +=
66 1.1 msaitoh (((u64)(IXGBE_READ_REG(hw, IXGBE_QBTC_H(tc)))) << 32);
67 1.1 msaitoh /* Received Packets */
68 1.1 msaitoh stats->qprc[tc] += IXGBE_READ_REG(hw, IXGBE_QPRC(tc));
69 1.1 msaitoh /* Received Bytes (read low first to prevent missed carry) */
70 1.1 msaitoh stats->qbrc[tc] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(tc));
71 1.1 msaitoh stats->qbrc[tc] +=
72 1.1 msaitoh (((u64)(IXGBE_READ_REG(hw, IXGBE_QBRC_H(tc)))) << 32);
73 1.1 msaitoh
74 1.1 msaitoh /* Received Dropped Packet */
75 1.1 msaitoh stats->qprdc[tc] += IXGBE_READ_REG(hw, IXGBE_QPRDC(tc));
76 1.1 msaitoh }
77 1.1 msaitoh
78 1.1 msaitoh return IXGBE_SUCCESS;
79 1.1 msaitoh }
80 1.1 msaitoh
81 1.1 msaitoh /**
82 1.1 msaitoh * ixgbe_dcb_get_pfc_stats_82599 - Return CBFC status data
83 1.1 msaitoh * @hw: pointer to hardware structure
84 1.1 msaitoh * @stats: pointer to statistics structure
85 1.1 msaitoh * @tc_count: Number of elements in bwg_array.
86 1.1 msaitoh *
87 1.1 msaitoh * This function returns the CBFC status data for each of the Traffic Classes.
88 1.1 msaitoh */
89 1.1 msaitoh s32 ixgbe_dcb_get_pfc_stats_82599(struct ixgbe_hw *hw,
90 1.1 msaitoh struct ixgbe_hw_stats *stats,
91 1.1 msaitoh u8 tc_count)
92 1.1 msaitoh {
93 1.1 msaitoh int tc;
94 1.1 msaitoh
95 1.1 msaitoh DEBUGFUNC("dcb_get_pfc_stats");
96 1.1 msaitoh
97 1.1 msaitoh if (tc_count > IXGBE_DCB_MAX_TRAFFIC_CLASS)
98 1.1 msaitoh return IXGBE_ERR_PARAM;
99 1.1 msaitoh
100 1.1 msaitoh for (tc = 0; tc < tc_count; tc++) {
101 1.1 msaitoh /* Priority XOFF Transmitted */
102 1.1 msaitoh stats->pxofftxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(tc));
103 1.1 msaitoh /* Priority XOFF Received */
104 1.1 msaitoh stats->pxoffrxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(tc));
105 1.1 msaitoh }
106 1.1 msaitoh
107 1.1 msaitoh return IXGBE_SUCCESS;
108 1.1 msaitoh }
109 1.1 msaitoh
110 1.1 msaitoh /**
111 1.1 msaitoh * ixgbe_dcb_config_rx_arbiter_82599 - Config Rx Data arbiter
112 1.1 msaitoh * @hw: pointer to hardware structure
113 1.1 msaitoh * @dcb_config: pointer to ixgbe_dcb_config structure
114 1.1 msaitoh *
115 1.1 msaitoh * Configure Rx Packet Arbiter and credits for each traffic class.
116 1.1 msaitoh */
117 1.1 msaitoh s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw, u16 *refill,
118 1.1 msaitoh u16 *max, u8 *bwg_id, u8 *tsa,
119 1.1 msaitoh u8 *map)
120 1.1 msaitoh {
121 1.1 msaitoh u32 reg = 0;
122 1.1 msaitoh u32 credit_refill = 0;
123 1.1 msaitoh u32 credit_max = 0;
124 1.1 msaitoh u8 i = 0;
125 1.1 msaitoh
126 1.1 msaitoh /*
127 1.1 msaitoh * Disable the arbiter before changing parameters
128 1.1 msaitoh * (always enable recycle mode; WSP)
129 1.1 msaitoh */
130 1.1 msaitoh reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC | IXGBE_RTRPCS_ARBDIS;
131 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
132 1.1 msaitoh
133 1.1 msaitoh /*
134 1.1 msaitoh * map all UPs to TCs. up_to_tc_bitmap for each TC has corresponding
135 1.1 msaitoh * bits sets for the UPs that needs to be mappped to that TC.
136 1.1 msaitoh * e.g if priorities 6 and 7 are to be mapped to a TC then the
137 1.1 msaitoh * up_to_tc_bitmap value for that TC will be 11000000 in binary.
138 1.1 msaitoh */
139 1.1 msaitoh reg = 0;
140 1.1 msaitoh for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++)
141 1.1 msaitoh reg |= (map[i] << (i * IXGBE_RTRUP2TC_UP_SHIFT));
142 1.1 msaitoh
143 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, reg);
144 1.1 msaitoh
145 1.1 msaitoh /* Configure traffic class credits and priority */
146 1.1 msaitoh for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
147 1.1 msaitoh credit_refill = refill[i];
148 1.1 msaitoh credit_max = max[i];
149 1.1 msaitoh reg = credit_refill | (credit_max << IXGBE_RTRPT4C_MCL_SHIFT);
150 1.1 msaitoh
151 1.1 msaitoh reg |= (u32)(bwg_id[i]) << IXGBE_RTRPT4C_BWG_SHIFT;
152 1.1 msaitoh
153 1.1 msaitoh if (tsa[i] == ixgbe_dcb_tsa_strict)
154 1.1 msaitoh reg |= IXGBE_RTRPT4C_LSP;
155 1.1 msaitoh
156 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RTRPT4C(i), reg);
157 1.1 msaitoh }
158 1.1 msaitoh
159 1.1 msaitoh /*
160 1.1 msaitoh * Configure Rx packet plane (recycle mode; WSP) and
161 1.1 msaitoh * enable arbiter
162 1.1 msaitoh */
163 1.1 msaitoh reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC;
164 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
165 1.1 msaitoh
166 1.1 msaitoh return IXGBE_SUCCESS;
167 1.1 msaitoh }
168 1.1 msaitoh
169 1.1 msaitoh /**
170 1.1 msaitoh * ixgbe_dcb_config_tx_desc_arbiter_82599 - Config Tx Desc. arbiter
171 1.1 msaitoh * @hw: pointer to hardware structure
172 1.1 msaitoh * @dcb_config: pointer to ixgbe_dcb_config structure
173 1.1 msaitoh *
174 1.1 msaitoh * Configure Tx Descriptor Arbiter and credits for each traffic class.
175 1.1 msaitoh */
176 1.1 msaitoh s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw, u16 *refill,
177 1.1 msaitoh u16 *max, u8 *bwg_id, u8 *tsa)
178 1.1 msaitoh {
179 1.1 msaitoh u32 reg, max_credits;
180 1.1 msaitoh u8 i;
181 1.1 msaitoh
182 1.1 msaitoh /* Clear the per-Tx queue credits; we use per-TC instead */
183 1.1 msaitoh for (i = 0; i < 128; i++) {
184 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i);
185 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RTTDT1C, 0);
186 1.1 msaitoh }
187 1.1 msaitoh
188 1.1 msaitoh /* Configure traffic class credits and priority */
189 1.1 msaitoh for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
190 1.1 msaitoh max_credits = max[i];
191 1.1 msaitoh reg = max_credits << IXGBE_RTTDT2C_MCL_SHIFT;
192 1.1 msaitoh reg |= refill[i];
193 1.1 msaitoh reg |= (u32)(bwg_id[i]) << IXGBE_RTTDT2C_BWG_SHIFT;
194 1.1 msaitoh
195 1.1 msaitoh if (tsa[i] == ixgbe_dcb_tsa_group_strict_cee)
196 1.1 msaitoh reg |= IXGBE_RTTDT2C_GSP;
197 1.1 msaitoh
198 1.1 msaitoh if (tsa[i] == ixgbe_dcb_tsa_strict)
199 1.1 msaitoh reg |= IXGBE_RTTDT2C_LSP;
200 1.1 msaitoh
201 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RTTDT2C(i), reg);
202 1.1 msaitoh }
203 1.1 msaitoh
204 1.1 msaitoh /*
205 1.1 msaitoh * Configure Tx descriptor plane (recycle mode; WSP) and
206 1.1 msaitoh * enable arbiter
207 1.1 msaitoh */
208 1.1 msaitoh reg = IXGBE_RTTDCS_TDPAC | IXGBE_RTTDCS_TDRM;
209 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
210 1.1 msaitoh
211 1.1 msaitoh return IXGBE_SUCCESS;
212 1.1 msaitoh }
213 1.1 msaitoh
214 1.1 msaitoh /**
215 1.1 msaitoh * ixgbe_dcb_config_tx_data_arbiter_82599 - Config Tx Data arbiter
216 1.1 msaitoh * @hw: pointer to hardware structure
217 1.1 msaitoh * @dcb_config: pointer to ixgbe_dcb_config structure
218 1.1 msaitoh *
219 1.1 msaitoh * Configure Tx Packet Arbiter and credits for each traffic class.
220 1.1 msaitoh */
221 1.1 msaitoh s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw, u16 *refill,
222 1.1 msaitoh u16 *max, u8 *bwg_id, u8 *tsa,
223 1.1 msaitoh u8 *map)
224 1.1 msaitoh {
225 1.1 msaitoh u32 reg;
226 1.1 msaitoh u8 i;
227 1.1 msaitoh
228 1.1 msaitoh /*
229 1.1 msaitoh * Disable the arbiter before changing parameters
230 1.1 msaitoh * (always enable recycle mode; SP; arb delay)
231 1.1 msaitoh */
232 1.1 msaitoh reg = IXGBE_RTTPCS_TPPAC | IXGBE_RTTPCS_TPRM |
233 1.1 msaitoh (IXGBE_RTTPCS_ARBD_DCB << IXGBE_RTTPCS_ARBD_SHIFT) |
234 1.1 msaitoh IXGBE_RTTPCS_ARBDIS;
235 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RTTPCS, reg);
236 1.1 msaitoh
237 1.1 msaitoh /*
238 1.1 msaitoh * map all UPs to TCs. up_to_tc_bitmap for each TC has corresponding
239 1.1 msaitoh * bits sets for the UPs that needs to be mappped to that TC.
240 1.1 msaitoh * e.g if priorities 6 and 7 are to be mapped to a TC then the
241 1.1 msaitoh * up_to_tc_bitmap value for that TC will be 11000000 in binary.
242 1.1 msaitoh */
243 1.1 msaitoh reg = 0;
244 1.1 msaitoh for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++)
245 1.1 msaitoh reg |= (map[i] << (i * IXGBE_RTTUP2TC_UP_SHIFT));
246 1.1 msaitoh
247 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RTTUP2TC, reg);
248 1.1 msaitoh
249 1.1 msaitoh /* Configure traffic class credits and priority */
250 1.1 msaitoh for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
251 1.1 msaitoh reg = refill[i];
252 1.1 msaitoh reg |= (u32)(max[i]) << IXGBE_RTTPT2C_MCL_SHIFT;
253 1.1 msaitoh reg |= (u32)(bwg_id[i]) << IXGBE_RTTPT2C_BWG_SHIFT;
254 1.1 msaitoh
255 1.1 msaitoh if (tsa[i] == ixgbe_dcb_tsa_group_strict_cee)
256 1.1 msaitoh reg |= IXGBE_RTTPT2C_GSP;
257 1.1 msaitoh
258 1.1 msaitoh if (tsa[i] == ixgbe_dcb_tsa_strict)
259 1.1 msaitoh reg |= IXGBE_RTTPT2C_LSP;
260 1.1 msaitoh
261 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RTTPT2C(i), reg);
262 1.1 msaitoh }
263 1.1 msaitoh
264 1.1 msaitoh /*
265 1.1 msaitoh * Configure Tx packet plane (recycle mode; SP; arb delay) and
266 1.1 msaitoh * enable arbiter
267 1.1 msaitoh */
268 1.1 msaitoh reg = IXGBE_RTTPCS_TPPAC | IXGBE_RTTPCS_TPRM |
269 1.1 msaitoh (IXGBE_RTTPCS_ARBD_DCB << IXGBE_RTTPCS_ARBD_SHIFT);
270 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RTTPCS, reg);
271 1.1 msaitoh
272 1.1 msaitoh return IXGBE_SUCCESS;
273 1.1 msaitoh }
274 1.1 msaitoh
275 1.1 msaitoh /**
276 1.1 msaitoh * ixgbe_dcb_config_pfc_82599 - Configure priority flow control
277 1.1 msaitoh * @hw: pointer to hardware structure
278 1.1 msaitoh * @pfc_en: enabled pfc bitmask
279 1.1 msaitoh * @map: priority to tc assignments indexed by priority
280 1.1 msaitoh *
281 1.1 msaitoh * Configure Priority Flow Control (PFC) for each traffic class.
282 1.1 msaitoh */
283 1.1 msaitoh s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *map)
284 1.1 msaitoh {
285 1.1 msaitoh u32 i, j, fcrtl, reg;
286 1.1 msaitoh u8 max_tc = 0;
287 1.1 msaitoh
288 1.1 msaitoh /* Enable Transmit Priority Flow Control */
289 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_FCCFG, IXGBE_FCCFG_TFCE_PRIORITY);
290 1.1 msaitoh
291 1.1 msaitoh /* Enable Receive Priority Flow Control */
292 1.1 msaitoh reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
293 1.1 msaitoh reg |= IXGBE_MFLCN_DPF;
294 1.1 msaitoh
295 1.1 msaitoh /*
296 1.1 msaitoh * X540 supports per TC Rx priority flow control. So
297 1.1 msaitoh * clear all TCs and only enable those that should be
298 1.1 msaitoh * enabled.
299 1.1 msaitoh */
300 1.1 msaitoh reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE);
301 1.1 msaitoh
302 1.2 msaitoh if (hw->mac.type >= ixgbe_mac_X540)
303 1.1 msaitoh reg |= pfc_en << IXGBE_MFLCN_RPFCE_SHIFT;
304 1.1 msaitoh
305 1.1 msaitoh if (pfc_en)
306 1.1 msaitoh reg |= IXGBE_MFLCN_RPFCE;
307 1.1 msaitoh
308 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_MFLCN, reg);
309 1.1 msaitoh
310 1.1 msaitoh for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++) {
311 1.1 msaitoh if (map[i] > max_tc)
312 1.1 msaitoh max_tc = map[i];
313 1.1 msaitoh }
314 1.1 msaitoh
315 1.1 msaitoh
316 1.1 msaitoh /* Configure PFC Tx thresholds per TC */
317 1.1 msaitoh for (i = 0; i <= max_tc; i++) {
318 1.1 msaitoh int enabled = 0;
319 1.1 msaitoh
320 1.1 msaitoh for (j = 0; j < IXGBE_DCB_MAX_USER_PRIORITY; j++) {
321 1.1 msaitoh if ((map[j] == i) && (pfc_en & (1 << j))) {
322 1.1 msaitoh enabled = 1;
323 1.1 msaitoh break;
324 1.1 msaitoh }
325 1.1 msaitoh }
326 1.1 msaitoh
327 1.1 msaitoh if (enabled) {
328 1.1 msaitoh reg = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
329 1.1 msaitoh fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
330 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl);
331 1.1 msaitoh } else {
332 1.2 msaitoh /*
333 1.2 msaitoh * In order to prevent Tx hangs when the internal Tx
334 1.2 msaitoh * switch is enabled we must set the high water mark
335 1.2 msaitoh * to the Rx packet buffer size - 24KB. This allows
336 1.2 msaitoh * the Tx switch to function even under heavy Rx
337 1.2 msaitoh * workloads.
338 1.2 msaitoh */
339 1.2 msaitoh reg = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 24576;
340 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
341 1.1 msaitoh }
342 1.1 msaitoh
343 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), reg);
344 1.1 msaitoh }
345 1.1 msaitoh
346 1.1 msaitoh for (; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
347 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
348 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), 0);
349 1.1 msaitoh }
350 1.1 msaitoh
351 1.1 msaitoh /* Configure pause time (2 TCs per register) */
352 1.1 msaitoh reg = hw->fc.pause_time | (hw->fc.pause_time << 16);
353 1.1 msaitoh for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
354 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
355 1.1 msaitoh
356 1.1 msaitoh /* Configure flow control refresh threshold value */
357 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
358 1.1 msaitoh
359 1.1 msaitoh return IXGBE_SUCCESS;
360 1.1 msaitoh }
361 1.1 msaitoh
362 1.1 msaitoh /**
363 1.1 msaitoh * ixgbe_dcb_config_tc_stats_82599 - Config traffic class statistics
364 1.1 msaitoh * @hw: pointer to hardware structure
365 1.1 msaitoh *
366 1.1 msaitoh * Configure queue statistics registers, all queues belonging to same traffic
367 1.1 msaitoh * class uses a single set of queue statistics counters.
368 1.1 msaitoh */
369 1.1 msaitoh s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *hw,
370 1.1 msaitoh struct ixgbe_dcb_config *dcb_config)
371 1.1 msaitoh {
372 1.1 msaitoh u32 reg = 0;
373 1.1 msaitoh u8 i = 0;
374 1.1 msaitoh u8 tc_count = 8;
375 1.1 msaitoh bool vt_mode = FALSE;
376 1.1 msaitoh
377 1.1 msaitoh if (dcb_config != NULL) {
378 1.1 msaitoh tc_count = dcb_config->num_tcs.pg_tcs;
379 1.1 msaitoh vt_mode = dcb_config->vt_mode;
380 1.1 msaitoh }
381 1.1 msaitoh
382 1.1 msaitoh if (!((tc_count == 8 && vt_mode == FALSE) || tc_count == 4))
383 1.1 msaitoh return IXGBE_ERR_PARAM;
384 1.1 msaitoh
385 1.1 msaitoh if (tc_count == 8 && vt_mode == FALSE) {
386 1.1 msaitoh /*
387 1.1 msaitoh * Receive Queues stats setting
388 1.1 msaitoh * 32 RQSMR registers, each configuring 4 queues.
389 1.1 msaitoh *
390 1.1 msaitoh * Set all 16 queues of each TC to the same stat
391 1.1 msaitoh * with TC 'n' going to stat 'n'.
392 1.1 msaitoh */
393 1.1 msaitoh for (i = 0; i < 32; i++) {
394 1.1 msaitoh reg = 0x01010101 * (i / 4);
395 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg);
396 1.1 msaitoh }
397 1.1 msaitoh /*
398 1.1 msaitoh * Transmit Queues stats setting
399 1.1 msaitoh * 32 TQSM registers, each controlling 4 queues.
400 1.1 msaitoh *
401 1.1 msaitoh * Set all queues of each TC to the same stat
402 1.1 msaitoh * with TC 'n' going to stat 'n'.
403 1.1 msaitoh * Tx queues are allocated non-uniformly to TCs:
404 1.1 msaitoh * 32, 32, 16, 16, 8, 8, 8, 8.
405 1.1 msaitoh */
406 1.1 msaitoh for (i = 0; i < 32; i++) {
407 1.1 msaitoh if (i < 8)
408 1.1 msaitoh reg = 0x00000000;
409 1.1 msaitoh else if (i < 16)
410 1.1 msaitoh reg = 0x01010101;
411 1.1 msaitoh else if (i < 20)
412 1.1 msaitoh reg = 0x02020202;
413 1.1 msaitoh else if (i < 24)
414 1.1 msaitoh reg = 0x03030303;
415 1.1 msaitoh else if (i < 26)
416 1.1 msaitoh reg = 0x04040404;
417 1.1 msaitoh else if (i < 28)
418 1.1 msaitoh reg = 0x05050505;
419 1.1 msaitoh else if (i < 30)
420 1.1 msaitoh reg = 0x06060606;
421 1.1 msaitoh else
422 1.1 msaitoh reg = 0x07070707;
423 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), reg);
424 1.1 msaitoh }
425 1.1 msaitoh } else if (tc_count == 4 && vt_mode == FALSE) {
426 1.1 msaitoh /*
427 1.1 msaitoh * Receive Queues stats setting
428 1.1 msaitoh * 32 RQSMR registers, each configuring 4 queues.
429 1.1 msaitoh *
430 1.1 msaitoh * Set all 16 queues of each TC to the same stat
431 1.1 msaitoh * with TC 'n' going to stat 'n'.
432 1.1 msaitoh */
433 1.1 msaitoh for (i = 0; i < 32; i++) {
434 1.1 msaitoh if (i % 8 > 3)
435 1.1 msaitoh /* In 4 TC mode, odd 16-queue ranges are
436 1.1 msaitoh * not used.
437 1.1 msaitoh */
438 1.1 msaitoh continue;
439 1.1 msaitoh reg = 0x01010101 * (i / 8);
440 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg);
441 1.1 msaitoh }
442 1.1 msaitoh /*
443 1.1 msaitoh * Transmit Queues stats setting
444 1.1 msaitoh * 32 TQSM registers, each controlling 4 queues.
445 1.1 msaitoh *
446 1.1 msaitoh * Set all queues of each TC to the same stat
447 1.1 msaitoh * with TC 'n' going to stat 'n'.
448 1.1 msaitoh * Tx queues are allocated non-uniformly to TCs:
449 1.1 msaitoh * 64, 32, 16, 16.
450 1.1 msaitoh */
451 1.1 msaitoh for (i = 0; i < 32; i++) {
452 1.1 msaitoh if (i < 16)
453 1.1 msaitoh reg = 0x00000000;
454 1.1 msaitoh else if (i < 24)
455 1.1 msaitoh reg = 0x01010101;
456 1.1 msaitoh else if (i < 28)
457 1.1 msaitoh reg = 0x02020202;
458 1.1 msaitoh else
459 1.1 msaitoh reg = 0x03030303;
460 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), reg);
461 1.1 msaitoh }
462 1.1 msaitoh } else if (tc_count == 4 && vt_mode == TRUE) {
463 1.1 msaitoh /*
464 1.1 msaitoh * Receive Queues stats setting
465 1.1 msaitoh * 32 RQSMR registers, each configuring 4 queues.
466 1.1 msaitoh *
467 1.1 msaitoh * Queue Indexing in 32 VF with DCB mode maps 4 TC's to each
468 1.1 msaitoh * pool. Set all 32 queues of each TC across pools to the same
469 1.1 msaitoh * stat with TC 'n' going to stat 'n'.
470 1.1 msaitoh */
471 1.1 msaitoh for (i = 0; i < 32; i++)
472 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), 0x03020100);
473 1.1 msaitoh /*
474 1.1 msaitoh * Transmit Queues stats setting
475 1.1 msaitoh * 32 TQSM registers, each controlling 4 queues.
476 1.1 msaitoh *
477 1.1 msaitoh * Queue Indexing in 32 VF with DCB mode maps 4 TC's to each
478 1.1 msaitoh * pool. Set all 32 queues of each TC across pools to the same
479 1.1 msaitoh * stat with TC 'n' going to stat 'n'.
480 1.1 msaitoh */
481 1.1 msaitoh for (i = 0; i < 32; i++)
482 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), 0x03020100);
483 1.1 msaitoh }
484 1.1 msaitoh
485 1.1 msaitoh return IXGBE_SUCCESS;
486 1.1 msaitoh }
487 1.1 msaitoh
488 1.1 msaitoh /**
489 1.1 msaitoh * ixgbe_dcb_config_82599 - Configure general DCB parameters
490 1.1 msaitoh * @hw: pointer to hardware structure
491 1.1 msaitoh * @dcb_config: pointer to ixgbe_dcb_config structure
492 1.1 msaitoh *
493 1.1 msaitoh * Configure general DCB parameters.
494 1.1 msaitoh */
495 1.1 msaitoh s32 ixgbe_dcb_config_82599(struct ixgbe_hw *hw,
496 1.1 msaitoh struct ixgbe_dcb_config *dcb_config)
497 1.1 msaitoh {
498 1.1 msaitoh u32 reg;
499 1.1 msaitoh u32 q;
500 1.1 msaitoh
501 1.1 msaitoh /* Disable the Tx desc arbiter so that MTQC can be changed */
502 1.1 msaitoh reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
503 1.1 msaitoh reg |= IXGBE_RTTDCS_ARBDIS;
504 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
505 1.1 msaitoh
506 1.1 msaitoh reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
507 1.1 msaitoh if (dcb_config->num_tcs.pg_tcs == 8) {
508 1.1 msaitoh /* Enable DCB for Rx with 8 TCs */
509 1.1 msaitoh switch (reg & IXGBE_MRQC_MRQE_MASK) {
510 1.1 msaitoh case 0:
511 1.1 msaitoh case IXGBE_MRQC_RT4TCEN:
512 1.1 msaitoh /* RSS disabled cases */
513 1.1 msaitoh reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
514 1.1 msaitoh IXGBE_MRQC_RT8TCEN;
515 1.1 msaitoh break;
516 1.1 msaitoh case IXGBE_MRQC_RSSEN:
517 1.1 msaitoh case IXGBE_MRQC_RTRSS4TCEN:
518 1.1 msaitoh /* RSS enabled cases */
519 1.1 msaitoh reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
520 1.1 msaitoh IXGBE_MRQC_RTRSS8TCEN;
521 1.1 msaitoh break;
522 1.1 msaitoh default:
523 1.1 msaitoh /*
524 1.1 msaitoh * Unsupported value, assume stale data,
525 1.1 msaitoh * overwrite no RSS
526 1.1 msaitoh */
527 1.1 msaitoh ASSERT(0);
528 1.1 msaitoh reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
529 1.1 msaitoh IXGBE_MRQC_RT8TCEN;
530 1.1 msaitoh }
531 1.1 msaitoh }
532 1.1 msaitoh if (dcb_config->num_tcs.pg_tcs == 4) {
533 1.1 msaitoh /* We support both VT-on and VT-off with 4 TCs. */
534 1.1 msaitoh if (dcb_config->vt_mode)
535 1.1 msaitoh reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
536 1.1 msaitoh IXGBE_MRQC_VMDQRT4TCEN;
537 1.1 msaitoh else
538 1.1 msaitoh reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
539 1.1 msaitoh IXGBE_MRQC_RTRSS4TCEN;
540 1.1 msaitoh }
541 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_MRQC, reg);
542 1.1 msaitoh
543 1.1 msaitoh /* Enable DCB for Tx with 8 TCs */
544 1.1 msaitoh if (dcb_config->num_tcs.pg_tcs == 8)
545 1.1 msaitoh reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
546 1.1 msaitoh else {
547 1.1 msaitoh /* We support both VT-on and VT-off with 4 TCs. */
548 1.1 msaitoh reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
549 1.1 msaitoh if (dcb_config->vt_mode)
550 1.1 msaitoh reg |= IXGBE_MTQC_VT_ENA;
551 1.1 msaitoh }
552 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg);
553 1.1 msaitoh
554 1.1 msaitoh /* Disable drop for all queues */
555 1.1 msaitoh for (q = 0; q < 128; q++)
556 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_QDE,
557 1.1 msaitoh (IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT)));
558 1.1 msaitoh
559 1.1 msaitoh /* Enable the Tx desc arbiter */
560 1.1 msaitoh reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
561 1.1 msaitoh reg &= ~IXGBE_RTTDCS_ARBDIS;
562 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
563 1.1 msaitoh
564 1.1 msaitoh /* Enable Security TX Buffer IFG for DCB */
565 1.1 msaitoh reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
566 1.1 msaitoh reg |= IXGBE_SECTX_DCB;
567 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
568 1.1 msaitoh
569 1.1 msaitoh return IXGBE_SUCCESS;
570 1.1 msaitoh }
571 1.1 msaitoh
572 1.1 msaitoh /**
573 1.1 msaitoh * ixgbe_dcb_hw_config_82599 - Configure and enable DCB
574 1.1 msaitoh * @hw: pointer to hardware structure
575 1.1 msaitoh * @dcb_config: pointer to ixgbe_dcb_config structure
576 1.1 msaitoh *
577 1.1 msaitoh * Configure dcb settings and enable dcb mode.
578 1.1 msaitoh */
579 1.1 msaitoh s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw, int link_speed,
580 1.1 msaitoh u16 *refill, u16 *max, u8 *bwg_id, u8 *tsa,
581 1.1 msaitoh u8 *map)
582 1.1 msaitoh {
583 1.2 msaitoh UNREFERENCED_1PARAMETER(link_speed);
584 1.1 msaitoh
585 1.1 msaitoh ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id, tsa,
586 1.1 msaitoh map);
587 1.1 msaitoh ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, bwg_id,
588 1.1 msaitoh tsa);
589 1.1 msaitoh ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id,
590 1.1 msaitoh tsa, map);
591 1.1 msaitoh
592 1.1 msaitoh return IXGBE_SUCCESS;
593 1.1 msaitoh }
594 1.1 msaitoh
595