ixgbe_dcb_82599.c revision 1.5 1 1.1 msaitoh /******************************************************************************
2 1.5 msaitoh SPDX-License-Identifier: BSD-3-Clause
3 1.1 msaitoh
4 1.4 msaitoh Copyright (c) 2001-2017, Intel Corporation
5 1.1 msaitoh All rights reserved.
6 1.4 msaitoh
7 1.4 msaitoh Redistribution and use in source and binary forms, with or without
8 1.1 msaitoh modification, are permitted provided that the following conditions are met:
9 1.4 msaitoh
10 1.4 msaitoh 1. Redistributions of source code must retain the above copyright notice,
11 1.1 msaitoh this list of conditions and the following disclaimer.
12 1.4 msaitoh
13 1.4 msaitoh 2. Redistributions in binary form must reproduce the above copyright
14 1.4 msaitoh notice, this list of conditions and the following disclaimer in the
15 1.1 msaitoh documentation and/or other materials provided with the distribution.
16 1.4 msaitoh
17 1.4 msaitoh 3. Neither the name of the Intel Corporation nor the names of its
18 1.4 msaitoh contributors may be used to endorse or promote products derived from
19 1.1 msaitoh this software without specific prior written permission.
20 1.4 msaitoh
21 1.1 msaitoh THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 1.4 msaitoh AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 1.4 msaitoh IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 1.4 msaitoh ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25 1.4 msaitoh LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 1.4 msaitoh CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 1.4 msaitoh SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 1.4 msaitoh INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 1.4 msaitoh CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 1.1 msaitoh ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 1.1 msaitoh POSSIBILITY OF SUCH DAMAGE.
32 1.1 msaitoh
33 1.1 msaitoh ******************************************************************************/
34 1.4 msaitoh /*$FreeBSD: head/sys/dev/ixgbe/ixgbe_dcb_82599.c 320688 2017-07-05 17:27:03Z erj $*/
35 1.1 msaitoh
36 1.1 msaitoh
37 1.1 msaitoh #include "ixgbe_type.h"
38 1.1 msaitoh #include "ixgbe_dcb.h"
39 1.1 msaitoh #include "ixgbe_dcb_82599.h"
40 1.1 msaitoh
41 1.1 msaitoh /**
42 1.1 msaitoh * ixgbe_dcb_get_tc_stats_82599 - Returns status for each traffic class
43 1.1 msaitoh * @hw: pointer to hardware structure
44 1.1 msaitoh * @stats: pointer to statistics structure
45 1.1 msaitoh * @tc_count: Number of elements in bwg_array.
46 1.1 msaitoh *
47 1.1 msaitoh * This function returns the status data for each of the Traffic Classes in use.
48 1.1 msaitoh */
49 1.1 msaitoh s32 ixgbe_dcb_get_tc_stats_82599(struct ixgbe_hw *hw,
50 1.1 msaitoh struct ixgbe_hw_stats *stats,
51 1.1 msaitoh u8 tc_count)
52 1.1 msaitoh {
53 1.1 msaitoh int tc;
54 1.1 msaitoh
55 1.1 msaitoh DEBUGFUNC("dcb_get_tc_stats");
56 1.1 msaitoh
57 1.1 msaitoh if (tc_count > IXGBE_DCB_MAX_TRAFFIC_CLASS)
58 1.1 msaitoh return IXGBE_ERR_PARAM;
59 1.1 msaitoh
60 1.1 msaitoh /* Statistics pertaining to each traffic class */
61 1.1 msaitoh for (tc = 0; tc < tc_count; tc++) {
62 1.1 msaitoh /* Transmitted Packets */
63 1.1 msaitoh stats->qptc[tc] += IXGBE_READ_REG(hw, IXGBE_QPTC(tc));
64 1.1 msaitoh /* Transmitted Bytes (read low first to prevent missed carry) */
65 1.1 msaitoh stats->qbtc[tc] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(tc));
66 1.1 msaitoh stats->qbtc[tc] +=
67 1.1 msaitoh (((u64)(IXGBE_READ_REG(hw, IXGBE_QBTC_H(tc)))) << 32);
68 1.1 msaitoh /* Received Packets */
69 1.1 msaitoh stats->qprc[tc] += IXGBE_READ_REG(hw, IXGBE_QPRC(tc));
70 1.1 msaitoh /* Received Bytes (read low first to prevent missed carry) */
71 1.1 msaitoh stats->qbrc[tc] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(tc));
72 1.1 msaitoh stats->qbrc[tc] +=
73 1.1 msaitoh (((u64)(IXGBE_READ_REG(hw, IXGBE_QBRC_H(tc)))) << 32);
74 1.1 msaitoh
75 1.1 msaitoh /* Received Dropped Packet */
76 1.1 msaitoh stats->qprdc[tc] += IXGBE_READ_REG(hw, IXGBE_QPRDC(tc));
77 1.1 msaitoh }
78 1.1 msaitoh
79 1.1 msaitoh return IXGBE_SUCCESS;
80 1.1 msaitoh }
81 1.1 msaitoh
82 1.1 msaitoh /**
83 1.1 msaitoh * ixgbe_dcb_get_pfc_stats_82599 - Return CBFC status data
84 1.1 msaitoh * @hw: pointer to hardware structure
85 1.1 msaitoh * @stats: pointer to statistics structure
86 1.1 msaitoh * @tc_count: Number of elements in bwg_array.
87 1.1 msaitoh *
88 1.1 msaitoh * This function returns the CBFC status data for each of the Traffic Classes.
89 1.1 msaitoh */
90 1.1 msaitoh s32 ixgbe_dcb_get_pfc_stats_82599(struct ixgbe_hw *hw,
91 1.1 msaitoh struct ixgbe_hw_stats *stats,
92 1.1 msaitoh u8 tc_count)
93 1.1 msaitoh {
94 1.1 msaitoh int tc;
95 1.1 msaitoh
96 1.1 msaitoh DEBUGFUNC("dcb_get_pfc_stats");
97 1.1 msaitoh
98 1.1 msaitoh if (tc_count > IXGBE_DCB_MAX_TRAFFIC_CLASS)
99 1.1 msaitoh return IXGBE_ERR_PARAM;
100 1.1 msaitoh
101 1.1 msaitoh for (tc = 0; tc < tc_count; tc++) {
102 1.1 msaitoh /* Priority XOFF Transmitted */
103 1.1 msaitoh stats->pxofftxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(tc));
104 1.1 msaitoh /* Priority XOFF Received */
105 1.1 msaitoh stats->pxoffrxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(tc));
106 1.1 msaitoh }
107 1.1 msaitoh
108 1.1 msaitoh return IXGBE_SUCCESS;
109 1.1 msaitoh }
110 1.1 msaitoh
111 1.1 msaitoh /**
112 1.1 msaitoh * ixgbe_dcb_config_rx_arbiter_82599 - Config Rx Data arbiter
113 1.1 msaitoh * @hw: pointer to hardware structure
114 1.1 msaitoh * @dcb_config: pointer to ixgbe_dcb_config structure
115 1.1 msaitoh *
116 1.1 msaitoh * Configure Rx Packet Arbiter and credits for each traffic class.
117 1.1 msaitoh */
118 1.1 msaitoh s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw, u16 *refill,
119 1.1 msaitoh u16 *max, u8 *bwg_id, u8 *tsa,
120 1.1 msaitoh u8 *map)
121 1.1 msaitoh {
122 1.1 msaitoh u32 reg = 0;
123 1.1 msaitoh u32 credit_refill = 0;
124 1.1 msaitoh u32 credit_max = 0;
125 1.1 msaitoh u8 i = 0;
126 1.1 msaitoh
127 1.1 msaitoh /*
128 1.1 msaitoh * Disable the arbiter before changing parameters
129 1.1 msaitoh * (always enable recycle mode; WSP)
130 1.1 msaitoh */
131 1.1 msaitoh reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC | IXGBE_RTRPCS_ARBDIS;
132 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
133 1.1 msaitoh
134 1.1 msaitoh /*
135 1.1 msaitoh * map all UPs to TCs. up_to_tc_bitmap for each TC has corresponding
136 1.1 msaitoh * bits sets for the UPs that needs to be mappped to that TC.
137 1.1 msaitoh * e.g if priorities 6 and 7 are to be mapped to a TC then the
138 1.1 msaitoh * up_to_tc_bitmap value for that TC will be 11000000 in binary.
139 1.1 msaitoh */
140 1.1 msaitoh reg = 0;
141 1.1 msaitoh for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++)
142 1.1 msaitoh reg |= (map[i] << (i * IXGBE_RTRUP2TC_UP_SHIFT));
143 1.1 msaitoh
144 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, reg);
145 1.1 msaitoh
146 1.1 msaitoh /* Configure traffic class credits and priority */
147 1.1 msaitoh for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
148 1.1 msaitoh credit_refill = refill[i];
149 1.1 msaitoh credit_max = max[i];
150 1.1 msaitoh reg = credit_refill | (credit_max << IXGBE_RTRPT4C_MCL_SHIFT);
151 1.1 msaitoh
152 1.1 msaitoh reg |= (u32)(bwg_id[i]) << IXGBE_RTRPT4C_BWG_SHIFT;
153 1.1 msaitoh
154 1.1 msaitoh if (tsa[i] == ixgbe_dcb_tsa_strict)
155 1.1 msaitoh reg |= IXGBE_RTRPT4C_LSP;
156 1.1 msaitoh
157 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RTRPT4C(i), reg);
158 1.1 msaitoh }
159 1.1 msaitoh
160 1.1 msaitoh /*
161 1.1 msaitoh * Configure Rx packet plane (recycle mode; WSP) and
162 1.1 msaitoh * enable arbiter
163 1.1 msaitoh */
164 1.1 msaitoh reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC;
165 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
166 1.1 msaitoh
167 1.1 msaitoh return IXGBE_SUCCESS;
168 1.1 msaitoh }
169 1.1 msaitoh
170 1.1 msaitoh /**
171 1.1 msaitoh * ixgbe_dcb_config_tx_desc_arbiter_82599 - Config Tx Desc. arbiter
172 1.1 msaitoh * @hw: pointer to hardware structure
173 1.1 msaitoh * @dcb_config: pointer to ixgbe_dcb_config structure
174 1.1 msaitoh *
175 1.1 msaitoh * Configure Tx Descriptor Arbiter and credits for each traffic class.
176 1.1 msaitoh */
177 1.1 msaitoh s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw, u16 *refill,
178 1.1 msaitoh u16 *max, u8 *bwg_id, u8 *tsa)
179 1.1 msaitoh {
180 1.1 msaitoh u32 reg, max_credits;
181 1.1 msaitoh u8 i;
182 1.1 msaitoh
183 1.1 msaitoh /* Clear the per-Tx queue credits; we use per-TC instead */
184 1.1 msaitoh for (i = 0; i < 128; i++) {
185 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i);
186 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RTTDT1C, 0);
187 1.1 msaitoh }
188 1.1 msaitoh
189 1.1 msaitoh /* Configure traffic class credits and priority */
190 1.1 msaitoh for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
191 1.1 msaitoh max_credits = max[i];
192 1.1 msaitoh reg = max_credits << IXGBE_RTTDT2C_MCL_SHIFT;
193 1.1 msaitoh reg |= refill[i];
194 1.1 msaitoh reg |= (u32)(bwg_id[i]) << IXGBE_RTTDT2C_BWG_SHIFT;
195 1.1 msaitoh
196 1.1 msaitoh if (tsa[i] == ixgbe_dcb_tsa_group_strict_cee)
197 1.1 msaitoh reg |= IXGBE_RTTDT2C_GSP;
198 1.1 msaitoh
199 1.1 msaitoh if (tsa[i] == ixgbe_dcb_tsa_strict)
200 1.1 msaitoh reg |= IXGBE_RTTDT2C_LSP;
201 1.1 msaitoh
202 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RTTDT2C(i), reg);
203 1.1 msaitoh }
204 1.1 msaitoh
205 1.1 msaitoh /*
206 1.1 msaitoh * Configure Tx descriptor plane (recycle mode; WSP) and
207 1.1 msaitoh * enable arbiter
208 1.1 msaitoh */
209 1.1 msaitoh reg = IXGBE_RTTDCS_TDPAC | IXGBE_RTTDCS_TDRM;
210 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
211 1.1 msaitoh
212 1.1 msaitoh return IXGBE_SUCCESS;
213 1.1 msaitoh }
214 1.1 msaitoh
215 1.1 msaitoh /**
216 1.1 msaitoh * ixgbe_dcb_config_tx_data_arbiter_82599 - Config Tx Data arbiter
217 1.1 msaitoh * @hw: pointer to hardware structure
218 1.1 msaitoh * @dcb_config: pointer to ixgbe_dcb_config structure
219 1.1 msaitoh *
220 1.1 msaitoh * Configure Tx Packet Arbiter and credits for each traffic class.
221 1.1 msaitoh */
222 1.1 msaitoh s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw, u16 *refill,
223 1.1 msaitoh u16 *max, u8 *bwg_id, u8 *tsa,
224 1.1 msaitoh u8 *map)
225 1.1 msaitoh {
226 1.1 msaitoh u32 reg;
227 1.1 msaitoh u8 i;
228 1.1 msaitoh
229 1.1 msaitoh /*
230 1.1 msaitoh * Disable the arbiter before changing parameters
231 1.1 msaitoh * (always enable recycle mode; SP; arb delay)
232 1.1 msaitoh */
233 1.1 msaitoh reg = IXGBE_RTTPCS_TPPAC | IXGBE_RTTPCS_TPRM |
234 1.1 msaitoh (IXGBE_RTTPCS_ARBD_DCB << IXGBE_RTTPCS_ARBD_SHIFT) |
235 1.1 msaitoh IXGBE_RTTPCS_ARBDIS;
236 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RTTPCS, reg);
237 1.1 msaitoh
238 1.1 msaitoh /*
239 1.1 msaitoh * map all UPs to TCs. up_to_tc_bitmap for each TC has corresponding
240 1.1 msaitoh * bits sets for the UPs that needs to be mappped to that TC.
241 1.1 msaitoh * e.g if priorities 6 and 7 are to be mapped to a TC then the
242 1.1 msaitoh * up_to_tc_bitmap value for that TC will be 11000000 in binary.
243 1.1 msaitoh */
244 1.1 msaitoh reg = 0;
245 1.1 msaitoh for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++)
246 1.1 msaitoh reg |= (map[i] << (i * IXGBE_RTTUP2TC_UP_SHIFT));
247 1.1 msaitoh
248 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RTTUP2TC, reg);
249 1.1 msaitoh
250 1.1 msaitoh /* Configure traffic class credits and priority */
251 1.1 msaitoh for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
252 1.1 msaitoh reg = refill[i];
253 1.1 msaitoh reg |= (u32)(max[i]) << IXGBE_RTTPT2C_MCL_SHIFT;
254 1.1 msaitoh reg |= (u32)(bwg_id[i]) << IXGBE_RTTPT2C_BWG_SHIFT;
255 1.1 msaitoh
256 1.1 msaitoh if (tsa[i] == ixgbe_dcb_tsa_group_strict_cee)
257 1.1 msaitoh reg |= IXGBE_RTTPT2C_GSP;
258 1.1 msaitoh
259 1.1 msaitoh if (tsa[i] == ixgbe_dcb_tsa_strict)
260 1.1 msaitoh reg |= IXGBE_RTTPT2C_LSP;
261 1.1 msaitoh
262 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RTTPT2C(i), reg);
263 1.1 msaitoh }
264 1.1 msaitoh
265 1.1 msaitoh /*
266 1.1 msaitoh * Configure Tx packet plane (recycle mode; SP; arb delay) and
267 1.1 msaitoh * enable arbiter
268 1.1 msaitoh */
269 1.1 msaitoh reg = IXGBE_RTTPCS_TPPAC | IXGBE_RTTPCS_TPRM |
270 1.1 msaitoh (IXGBE_RTTPCS_ARBD_DCB << IXGBE_RTTPCS_ARBD_SHIFT);
271 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RTTPCS, reg);
272 1.1 msaitoh
273 1.1 msaitoh return IXGBE_SUCCESS;
274 1.1 msaitoh }
275 1.1 msaitoh
276 1.1 msaitoh /**
277 1.1 msaitoh * ixgbe_dcb_config_pfc_82599 - Configure priority flow control
278 1.1 msaitoh * @hw: pointer to hardware structure
279 1.1 msaitoh * @pfc_en: enabled pfc bitmask
280 1.1 msaitoh * @map: priority to tc assignments indexed by priority
281 1.1 msaitoh *
282 1.1 msaitoh * Configure Priority Flow Control (PFC) for each traffic class.
283 1.1 msaitoh */
284 1.1 msaitoh s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *map)
285 1.1 msaitoh {
286 1.1 msaitoh u32 i, j, fcrtl, reg;
287 1.1 msaitoh u8 max_tc = 0;
288 1.1 msaitoh
289 1.1 msaitoh /* Enable Transmit Priority Flow Control */
290 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_FCCFG, IXGBE_FCCFG_TFCE_PRIORITY);
291 1.1 msaitoh
292 1.1 msaitoh /* Enable Receive Priority Flow Control */
293 1.1 msaitoh reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
294 1.1 msaitoh reg |= IXGBE_MFLCN_DPF;
295 1.1 msaitoh
296 1.1 msaitoh /*
297 1.1 msaitoh * X540 supports per TC Rx priority flow control. So
298 1.1 msaitoh * clear all TCs and only enable those that should be
299 1.1 msaitoh * enabled.
300 1.1 msaitoh */
301 1.1 msaitoh reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE);
302 1.1 msaitoh
303 1.2 msaitoh if (hw->mac.type >= ixgbe_mac_X540)
304 1.1 msaitoh reg |= pfc_en << IXGBE_MFLCN_RPFCE_SHIFT;
305 1.1 msaitoh
306 1.1 msaitoh if (pfc_en)
307 1.1 msaitoh reg |= IXGBE_MFLCN_RPFCE;
308 1.1 msaitoh
309 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_MFLCN, reg);
310 1.1 msaitoh
311 1.1 msaitoh for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++) {
312 1.1 msaitoh if (map[i] > max_tc)
313 1.1 msaitoh max_tc = map[i];
314 1.1 msaitoh }
315 1.1 msaitoh
316 1.1 msaitoh
317 1.1 msaitoh /* Configure PFC Tx thresholds per TC */
318 1.1 msaitoh for (i = 0; i <= max_tc; i++) {
319 1.1 msaitoh int enabled = 0;
320 1.1 msaitoh
321 1.1 msaitoh for (j = 0; j < IXGBE_DCB_MAX_USER_PRIORITY; j++) {
322 1.1 msaitoh if ((map[j] == i) && (pfc_en & (1 << j))) {
323 1.1 msaitoh enabled = 1;
324 1.1 msaitoh break;
325 1.1 msaitoh }
326 1.1 msaitoh }
327 1.1 msaitoh
328 1.1 msaitoh if (enabled) {
329 1.1 msaitoh reg = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
330 1.1 msaitoh fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
331 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl);
332 1.1 msaitoh } else {
333 1.2 msaitoh /*
334 1.2 msaitoh * In order to prevent Tx hangs when the internal Tx
335 1.2 msaitoh * switch is enabled we must set the high water mark
336 1.2 msaitoh * to the Rx packet buffer size - 24KB. This allows
337 1.2 msaitoh * the Tx switch to function even under heavy Rx
338 1.2 msaitoh * workloads.
339 1.2 msaitoh */
340 1.2 msaitoh reg = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 24576;
341 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
342 1.1 msaitoh }
343 1.1 msaitoh
344 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), reg);
345 1.1 msaitoh }
346 1.1 msaitoh
347 1.1 msaitoh for (; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
348 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
349 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), 0);
350 1.1 msaitoh }
351 1.1 msaitoh
352 1.1 msaitoh /* Configure pause time (2 TCs per register) */
353 1.1 msaitoh reg = hw->fc.pause_time | (hw->fc.pause_time << 16);
354 1.1 msaitoh for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
355 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
356 1.1 msaitoh
357 1.1 msaitoh /* Configure flow control refresh threshold value */
358 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
359 1.1 msaitoh
360 1.1 msaitoh return IXGBE_SUCCESS;
361 1.1 msaitoh }
362 1.1 msaitoh
363 1.1 msaitoh /**
364 1.1 msaitoh * ixgbe_dcb_config_tc_stats_82599 - Config traffic class statistics
365 1.1 msaitoh * @hw: pointer to hardware structure
366 1.1 msaitoh *
367 1.1 msaitoh * Configure queue statistics registers, all queues belonging to same traffic
368 1.1 msaitoh * class uses a single set of queue statistics counters.
369 1.1 msaitoh */
370 1.1 msaitoh s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *hw,
371 1.1 msaitoh struct ixgbe_dcb_config *dcb_config)
372 1.1 msaitoh {
373 1.1 msaitoh u32 reg = 0;
374 1.1 msaitoh u8 i = 0;
375 1.1 msaitoh u8 tc_count = 8;
376 1.1 msaitoh bool vt_mode = FALSE;
377 1.1 msaitoh
378 1.1 msaitoh if (dcb_config != NULL) {
379 1.1 msaitoh tc_count = dcb_config->num_tcs.pg_tcs;
380 1.1 msaitoh vt_mode = dcb_config->vt_mode;
381 1.1 msaitoh }
382 1.1 msaitoh
383 1.1 msaitoh if (!((tc_count == 8 && vt_mode == FALSE) || tc_count == 4))
384 1.1 msaitoh return IXGBE_ERR_PARAM;
385 1.1 msaitoh
386 1.1 msaitoh if (tc_count == 8 && vt_mode == FALSE) {
387 1.1 msaitoh /*
388 1.1 msaitoh * Receive Queues stats setting
389 1.1 msaitoh * 32 RQSMR registers, each configuring 4 queues.
390 1.1 msaitoh *
391 1.1 msaitoh * Set all 16 queues of each TC to the same stat
392 1.1 msaitoh * with TC 'n' going to stat 'n'.
393 1.1 msaitoh */
394 1.1 msaitoh for (i = 0; i < 32; i++) {
395 1.1 msaitoh reg = 0x01010101 * (i / 4);
396 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg);
397 1.1 msaitoh }
398 1.1 msaitoh /*
399 1.1 msaitoh * Transmit Queues stats setting
400 1.1 msaitoh * 32 TQSM registers, each controlling 4 queues.
401 1.1 msaitoh *
402 1.1 msaitoh * Set all queues of each TC to the same stat
403 1.1 msaitoh * with TC 'n' going to stat 'n'.
404 1.1 msaitoh * Tx queues are allocated non-uniformly to TCs:
405 1.1 msaitoh * 32, 32, 16, 16, 8, 8, 8, 8.
406 1.1 msaitoh */
407 1.1 msaitoh for (i = 0; i < 32; i++) {
408 1.1 msaitoh if (i < 8)
409 1.1 msaitoh reg = 0x00000000;
410 1.1 msaitoh else if (i < 16)
411 1.1 msaitoh reg = 0x01010101;
412 1.1 msaitoh else if (i < 20)
413 1.1 msaitoh reg = 0x02020202;
414 1.1 msaitoh else if (i < 24)
415 1.1 msaitoh reg = 0x03030303;
416 1.1 msaitoh else if (i < 26)
417 1.1 msaitoh reg = 0x04040404;
418 1.1 msaitoh else if (i < 28)
419 1.1 msaitoh reg = 0x05050505;
420 1.1 msaitoh else if (i < 30)
421 1.1 msaitoh reg = 0x06060606;
422 1.1 msaitoh else
423 1.1 msaitoh reg = 0x07070707;
424 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), reg);
425 1.1 msaitoh }
426 1.1 msaitoh } else if (tc_count == 4 && vt_mode == FALSE) {
427 1.1 msaitoh /*
428 1.1 msaitoh * Receive Queues stats setting
429 1.1 msaitoh * 32 RQSMR registers, each configuring 4 queues.
430 1.1 msaitoh *
431 1.1 msaitoh * Set all 16 queues of each TC to the same stat
432 1.1 msaitoh * with TC 'n' going to stat 'n'.
433 1.1 msaitoh */
434 1.1 msaitoh for (i = 0; i < 32; i++) {
435 1.1 msaitoh if (i % 8 > 3)
436 1.1 msaitoh /* In 4 TC mode, odd 16-queue ranges are
437 1.1 msaitoh * not used.
438 1.1 msaitoh */
439 1.1 msaitoh continue;
440 1.1 msaitoh reg = 0x01010101 * (i / 8);
441 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg);
442 1.1 msaitoh }
443 1.1 msaitoh /*
444 1.1 msaitoh * Transmit Queues stats setting
445 1.1 msaitoh * 32 TQSM registers, each controlling 4 queues.
446 1.1 msaitoh *
447 1.1 msaitoh * Set all queues of each TC to the same stat
448 1.1 msaitoh * with TC 'n' going to stat 'n'.
449 1.1 msaitoh * Tx queues are allocated non-uniformly to TCs:
450 1.1 msaitoh * 64, 32, 16, 16.
451 1.1 msaitoh */
452 1.1 msaitoh for (i = 0; i < 32; i++) {
453 1.1 msaitoh if (i < 16)
454 1.1 msaitoh reg = 0x00000000;
455 1.1 msaitoh else if (i < 24)
456 1.1 msaitoh reg = 0x01010101;
457 1.1 msaitoh else if (i < 28)
458 1.1 msaitoh reg = 0x02020202;
459 1.1 msaitoh else
460 1.1 msaitoh reg = 0x03030303;
461 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), reg);
462 1.1 msaitoh }
463 1.1 msaitoh } else if (tc_count == 4 && vt_mode == TRUE) {
464 1.1 msaitoh /*
465 1.1 msaitoh * Receive Queues stats setting
466 1.1 msaitoh * 32 RQSMR registers, each configuring 4 queues.
467 1.1 msaitoh *
468 1.1 msaitoh * Queue Indexing in 32 VF with DCB mode maps 4 TC's to each
469 1.1 msaitoh * pool. Set all 32 queues of each TC across pools to the same
470 1.1 msaitoh * stat with TC 'n' going to stat 'n'.
471 1.1 msaitoh */
472 1.1 msaitoh for (i = 0; i < 32; i++)
473 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), 0x03020100);
474 1.1 msaitoh /*
475 1.1 msaitoh * Transmit Queues stats setting
476 1.1 msaitoh * 32 TQSM registers, each controlling 4 queues.
477 1.1 msaitoh *
478 1.1 msaitoh * Queue Indexing in 32 VF with DCB mode maps 4 TC's to each
479 1.1 msaitoh * pool. Set all 32 queues of each TC across pools to the same
480 1.1 msaitoh * stat with TC 'n' going to stat 'n'.
481 1.1 msaitoh */
482 1.1 msaitoh for (i = 0; i < 32; i++)
483 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), 0x03020100);
484 1.1 msaitoh }
485 1.1 msaitoh
486 1.1 msaitoh return IXGBE_SUCCESS;
487 1.1 msaitoh }
488 1.1 msaitoh
489 1.1 msaitoh /**
490 1.1 msaitoh * ixgbe_dcb_config_82599 - Configure general DCB parameters
491 1.1 msaitoh * @hw: pointer to hardware structure
492 1.1 msaitoh * @dcb_config: pointer to ixgbe_dcb_config structure
493 1.1 msaitoh *
494 1.1 msaitoh * Configure general DCB parameters.
495 1.1 msaitoh */
496 1.1 msaitoh s32 ixgbe_dcb_config_82599(struct ixgbe_hw *hw,
497 1.1 msaitoh struct ixgbe_dcb_config *dcb_config)
498 1.1 msaitoh {
499 1.1 msaitoh u32 reg;
500 1.1 msaitoh u32 q;
501 1.1 msaitoh
502 1.1 msaitoh /* Disable the Tx desc arbiter so that MTQC can be changed */
503 1.1 msaitoh reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
504 1.1 msaitoh reg |= IXGBE_RTTDCS_ARBDIS;
505 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
506 1.1 msaitoh
507 1.1 msaitoh reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
508 1.1 msaitoh if (dcb_config->num_tcs.pg_tcs == 8) {
509 1.1 msaitoh /* Enable DCB for Rx with 8 TCs */
510 1.1 msaitoh switch (reg & IXGBE_MRQC_MRQE_MASK) {
511 1.1 msaitoh case 0:
512 1.1 msaitoh case IXGBE_MRQC_RT4TCEN:
513 1.1 msaitoh /* RSS disabled cases */
514 1.1 msaitoh reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
515 1.1 msaitoh IXGBE_MRQC_RT8TCEN;
516 1.1 msaitoh break;
517 1.1 msaitoh case IXGBE_MRQC_RSSEN:
518 1.1 msaitoh case IXGBE_MRQC_RTRSS4TCEN:
519 1.1 msaitoh /* RSS enabled cases */
520 1.1 msaitoh reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
521 1.1 msaitoh IXGBE_MRQC_RTRSS8TCEN;
522 1.1 msaitoh break;
523 1.1 msaitoh default:
524 1.1 msaitoh /*
525 1.1 msaitoh * Unsupported value, assume stale data,
526 1.1 msaitoh * overwrite no RSS
527 1.1 msaitoh */
528 1.1 msaitoh ASSERT(0);
529 1.1 msaitoh reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
530 1.1 msaitoh IXGBE_MRQC_RT8TCEN;
531 1.1 msaitoh }
532 1.1 msaitoh }
533 1.1 msaitoh if (dcb_config->num_tcs.pg_tcs == 4) {
534 1.1 msaitoh /* We support both VT-on and VT-off with 4 TCs. */
535 1.1 msaitoh if (dcb_config->vt_mode)
536 1.1 msaitoh reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
537 1.1 msaitoh IXGBE_MRQC_VMDQRT4TCEN;
538 1.1 msaitoh else
539 1.1 msaitoh reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
540 1.1 msaitoh IXGBE_MRQC_RTRSS4TCEN;
541 1.1 msaitoh }
542 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_MRQC, reg);
543 1.1 msaitoh
544 1.1 msaitoh /* Enable DCB for Tx with 8 TCs */
545 1.1 msaitoh if (dcb_config->num_tcs.pg_tcs == 8)
546 1.1 msaitoh reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
547 1.1 msaitoh else {
548 1.1 msaitoh /* We support both VT-on and VT-off with 4 TCs. */
549 1.1 msaitoh reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
550 1.1 msaitoh if (dcb_config->vt_mode)
551 1.1 msaitoh reg |= IXGBE_MTQC_VT_ENA;
552 1.1 msaitoh }
553 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg);
554 1.1 msaitoh
555 1.1 msaitoh /* Disable drop for all queues */
556 1.1 msaitoh for (q = 0; q < 128; q++)
557 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_QDE,
558 1.1 msaitoh (IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT)));
559 1.1 msaitoh
560 1.1 msaitoh /* Enable the Tx desc arbiter */
561 1.1 msaitoh reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
562 1.1 msaitoh reg &= ~IXGBE_RTTDCS_ARBDIS;
563 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
564 1.1 msaitoh
565 1.1 msaitoh /* Enable Security TX Buffer IFG for DCB */
566 1.1 msaitoh reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
567 1.1 msaitoh reg |= IXGBE_SECTX_DCB;
568 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
569 1.1 msaitoh
570 1.1 msaitoh return IXGBE_SUCCESS;
571 1.1 msaitoh }
572 1.1 msaitoh
573 1.1 msaitoh /**
574 1.1 msaitoh * ixgbe_dcb_hw_config_82599 - Configure and enable DCB
575 1.1 msaitoh * @hw: pointer to hardware structure
576 1.1 msaitoh * @dcb_config: pointer to ixgbe_dcb_config structure
577 1.1 msaitoh *
578 1.1 msaitoh * Configure dcb settings and enable dcb mode.
579 1.1 msaitoh */
580 1.1 msaitoh s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw, int link_speed,
581 1.1 msaitoh u16 *refill, u16 *max, u8 *bwg_id, u8 *tsa,
582 1.1 msaitoh u8 *map)
583 1.1 msaitoh {
584 1.2 msaitoh UNREFERENCED_1PARAMETER(link_speed);
585 1.1 msaitoh
586 1.1 msaitoh ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id, tsa,
587 1.1 msaitoh map);
588 1.1 msaitoh ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, bwg_id,
589 1.1 msaitoh tsa);
590 1.1 msaitoh ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id,
591 1.1 msaitoh tsa, map);
592 1.1 msaitoh
593 1.1 msaitoh return IXGBE_SUCCESS;
594 1.1 msaitoh }
595 1.1 msaitoh
596