1 1.12 andvar /* $NetBSD: ixgbe_dcb_82599.c,v 1.12 2023/07/15 21:41:26 andvar Exp $ */ 2 1.1 msaitoh /****************************************************************************** 3 1.5 msaitoh SPDX-License-Identifier: BSD-3-Clause 4 1.1 msaitoh 5 1.11 msaitoh Copyright (c) 2001-2020, Intel Corporation 6 1.1 msaitoh All rights reserved. 7 1.4 msaitoh 8 1.4 msaitoh Redistribution and use in source and binary forms, with or without 9 1.1 msaitoh modification, are permitted provided that the following conditions are met: 10 1.4 msaitoh 11 1.4 msaitoh 1. Redistributions of source code must retain the above copyright notice, 12 1.1 msaitoh this list of conditions and the following disclaimer. 13 1.4 msaitoh 14 1.4 msaitoh 2. Redistributions in binary form must reproduce the above copyright 15 1.4 msaitoh notice, this list of conditions and the following disclaimer in the 16 1.1 msaitoh documentation and/or other materials provided with the distribution. 17 1.4 msaitoh 18 1.4 msaitoh 3. Neither the name of the Intel Corporation nor the names of its 19 1.4 msaitoh contributors may be used to endorse or promote products derived from 20 1.1 msaitoh this software without specific prior written permission. 21 1.4 msaitoh 22 1.1 msaitoh THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 23 1.4 msaitoh AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 1.4 msaitoh IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 1.4 msaitoh ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 26 1.4 msaitoh LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 1.4 msaitoh CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 1.4 msaitoh SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 1.4 msaitoh INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 1.4 msaitoh CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 1.1 msaitoh ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 32 1.1 msaitoh POSSIBILITY OF SUCH DAMAGE. 33 1.1 msaitoh 34 1.1 msaitoh ******************************************************************************/ 35 1.7 msaitoh /*$FreeBSD: head/sys/dev/ixgbe/ixgbe_dcb_82599.c 331224 2018-03-19 20:55:05Z erj $*/ 36 1.1 msaitoh 37 1.9 msaitoh #include <sys/cdefs.h> 38 1.12 andvar __KERNEL_RCSID(0, "$NetBSD: ixgbe_dcb_82599.c,v 1.12 2023/07/15 21:41:26 andvar Exp $"); 39 1.1 msaitoh 40 1.1 msaitoh #include "ixgbe_type.h" 41 1.1 msaitoh #include "ixgbe_dcb.h" 42 1.1 msaitoh #include "ixgbe_dcb_82599.h" 43 1.1 msaitoh 44 1.1 msaitoh /** 45 1.1 msaitoh * ixgbe_dcb_get_tc_stats_82599 - Returns status for each traffic class 46 1.1 msaitoh * @hw: pointer to hardware structure 47 1.1 msaitoh * @stats: pointer to statistics structure 48 1.1 msaitoh * @tc_count: Number of elements in bwg_array. 49 1.1 msaitoh * 50 1.1 msaitoh * This function returns the status data for each of the Traffic Classes in use. 51 1.1 msaitoh */ 52 1.1 msaitoh s32 ixgbe_dcb_get_tc_stats_82599(struct ixgbe_hw *hw, 53 1.1 msaitoh struct ixgbe_hw_stats *stats, 54 1.1 msaitoh u8 tc_count) 55 1.1 msaitoh { 56 1.1 msaitoh int tc; 57 1.1 msaitoh 58 1.1 msaitoh DEBUGFUNC("dcb_get_tc_stats"); 59 1.1 msaitoh 60 1.1 msaitoh if (tc_count > IXGBE_DCB_MAX_TRAFFIC_CLASS) 61 1.1 msaitoh return IXGBE_ERR_PARAM; 62 1.1 msaitoh 63 1.1 msaitoh /* Statistics pertaining to each traffic class */ 64 1.1 msaitoh for (tc = 0; tc < tc_count; tc++) { 65 1.1 msaitoh /* Transmitted Packets */ 66 1.1 msaitoh stats->qptc[tc] += IXGBE_READ_REG(hw, IXGBE_QPTC(tc)); 67 1.1 msaitoh /* Transmitted Bytes (read low first to prevent missed carry) */ 68 1.1 msaitoh stats->qbtc[tc] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(tc)); 69 1.1 msaitoh stats->qbtc[tc] += 70 1.1 msaitoh (((u64)(IXGBE_READ_REG(hw, IXGBE_QBTC_H(tc)))) << 32); 71 1.1 msaitoh /* Received Packets */ 72 1.1 msaitoh stats->qprc[tc] += IXGBE_READ_REG(hw, IXGBE_QPRC(tc)); 73 1.1 msaitoh /* Received Bytes (read low first to prevent missed carry) */ 74 1.1 msaitoh stats->qbrc[tc] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(tc)); 75 1.1 msaitoh stats->qbrc[tc] += 76 1.1 msaitoh (((u64)(IXGBE_READ_REG(hw, IXGBE_QBRC_H(tc)))) << 32); 77 1.1 msaitoh 78 1.1 msaitoh /* Received Dropped Packet */ 79 1.1 msaitoh stats->qprdc[tc] += IXGBE_READ_REG(hw, IXGBE_QPRDC(tc)); 80 1.1 msaitoh } 81 1.1 msaitoh 82 1.1 msaitoh return IXGBE_SUCCESS; 83 1.1 msaitoh } 84 1.1 msaitoh 85 1.1 msaitoh /** 86 1.1 msaitoh * ixgbe_dcb_get_pfc_stats_82599 - Return CBFC status data 87 1.1 msaitoh * @hw: pointer to hardware structure 88 1.1 msaitoh * @stats: pointer to statistics structure 89 1.1 msaitoh * @tc_count: Number of elements in bwg_array. 90 1.1 msaitoh * 91 1.1 msaitoh * This function returns the CBFC status data for each of the Traffic Classes. 92 1.1 msaitoh */ 93 1.1 msaitoh s32 ixgbe_dcb_get_pfc_stats_82599(struct ixgbe_hw *hw, 94 1.1 msaitoh struct ixgbe_hw_stats *stats, 95 1.1 msaitoh u8 tc_count) 96 1.1 msaitoh { 97 1.1 msaitoh int tc; 98 1.1 msaitoh 99 1.1 msaitoh DEBUGFUNC("dcb_get_pfc_stats"); 100 1.1 msaitoh 101 1.1 msaitoh if (tc_count > IXGBE_DCB_MAX_TRAFFIC_CLASS) 102 1.1 msaitoh return IXGBE_ERR_PARAM; 103 1.1 msaitoh 104 1.1 msaitoh for (tc = 0; tc < tc_count; tc++) { 105 1.1 msaitoh /* Priority XOFF Transmitted */ 106 1.1 msaitoh stats->pxofftxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(tc)); 107 1.1 msaitoh /* Priority XOFF Received */ 108 1.1 msaitoh stats->pxoffrxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(tc)); 109 1.1 msaitoh } 110 1.1 msaitoh 111 1.1 msaitoh return IXGBE_SUCCESS; 112 1.1 msaitoh } 113 1.1 msaitoh 114 1.1 msaitoh /** 115 1.1 msaitoh * ixgbe_dcb_config_rx_arbiter_82599 - Config Rx Data arbiter 116 1.1 msaitoh * @hw: pointer to hardware structure 117 1.7 msaitoh * @refill: refill credits index by traffic class 118 1.7 msaitoh * @max: max credits index by traffic class 119 1.7 msaitoh * @bwg_id: bandwidth grouping indexed by traffic class 120 1.7 msaitoh * @tsa: transmission selection algorithm indexed by traffic class 121 1.7 msaitoh * @map: priority to tc assignments indexed by priority 122 1.1 msaitoh * 123 1.1 msaitoh * Configure Rx Packet Arbiter and credits for each traffic class. 124 1.1 msaitoh */ 125 1.1 msaitoh s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw, u16 *refill, 126 1.1 msaitoh u16 *max, u8 *bwg_id, u8 *tsa, 127 1.1 msaitoh u8 *map) 128 1.1 msaitoh { 129 1.1 msaitoh u32 reg = 0; 130 1.1 msaitoh u32 credit_refill = 0; 131 1.1 msaitoh u32 credit_max = 0; 132 1.1 msaitoh u8 i = 0; 133 1.1 msaitoh 134 1.1 msaitoh /* 135 1.1 msaitoh * Disable the arbiter before changing parameters 136 1.1 msaitoh * (always enable recycle mode; WSP) 137 1.1 msaitoh */ 138 1.1 msaitoh reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC | IXGBE_RTRPCS_ARBDIS; 139 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg); 140 1.1 msaitoh 141 1.1 msaitoh /* 142 1.1 msaitoh * map all UPs to TCs. up_to_tc_bitmap for each TC has corresponding 143 1.12 andvar * bits sets for the UPs that needs to be mapped to that TC. 144 1.1 msaitoh * e.g if priorities 6 and 7 are to be mapped to a TC then the 145 1.1 msaitoh * up_to_tc_bitmap value for that TC will be 11000000 in binary. 146 1.1 msaitoh */ 147 1.1 msaitoh reg = 0; 148 1.1 msaitoh for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++) 149 1.1 msaitoh reg |= (map[i] << (i * IXGBE_RTRUP2TC_UP_SHIFT)); 150 1.1 msaitoh 151 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, reg); 152 1.1 msaitoh 153 1.1 msaitoh /* Configure traffic class credits and priority */ 154 1.1 msaitoh for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { 155 1.1 msaitoh credit_refill = refill[i]; 156 1.1 msaitoh credit_max = max[i]; 157 1.1 msaitoh reg = credit_refill | (credit_max << IXGBE_RTRPT4C_MCL_SHIFT); 158 1.1 msaitoh 159 1.1 msaitoh reg |= (u32)(bwg_id[i]) << IXGBE_RTRPT4C_BWG_SHIFT; 160 1.1 msaitoh 161 1.1 msaitoh if (tsa[i] == ixgbe_dcb_tsa_strict) 162 1.1 msaitoh reg |= IXGBE_RTRPT4C_LSP; 163 1.1 msaitoh 164 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RTRPT4C(i), reg); 165 1.1 msaitoh } 166 1.1 msaitoh 167 1.1 msaitoh /* 168 1.1 msaitoh * Configure Rx packet plane (recycle mode; WSP) and 169 1.1 msaitoh * enable arbiter 170 1.1 msaitoh */ 171 1.1 msaitoh reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC; 172 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg); 173 1.1 msaitoh 174 1.1 msaitoh return IXGBE_SUCCESS; 175 1.1 msaitoh } 176 1.1 msaitoh 177 1.1 msaitoh /** 178 1.1 msaitoh * ixgbe_dcb_config_tx_desc_arbiter_82599 - Config Tx Desc. arbiter 179 1.1 msaitoh * @hw: pointer to hardware structure 180 1.7 msaitoh * @refill: refill credits index by traffic class 181 1.7 msaitoh * @max: max credits index by traffic class 182 1.7 msaitoh * @bwg_id: bandwidth grouping indexed by traffic class 183 1.7 msaitoh * @tsa: transmission selection algorithm indexed by traffic class 184 1.1 msaitoh * 185 1.1 msaitoh * Configure Tx Descriptor Arbiter and credits for each traffic class. 186 1.1 msaitoh */ 187 1.1 msaitoh s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw, u16 *refill, 188 1.1 msaitoh u16 *max, u8 *bwg_id, u8 *tsa) 189 1.1 msaitoh { 190 1.1 msaitoh u32 reg, max_credits; 191 1.1 msaitoh u8 i; 192 1.1 msaitoh 193 1.1 msaitoh /* Clear the per-Tx queue credits; we use per-TC instead */ 194 1.1 msaitoh for (i = 0; i < 128; i++) { 195 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i); 196 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RTTDT1C, 0); 197 1.1 msaitoh } 198 1.1 msaitoh 199 1.1 msaitoh /* Configure traffic class credits and priority */ 200 1.1 msaitoh for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { 201 1.1 msaitoh max_credits = max[i]; 202 1.1 msaitoh reg = max_credits << IXGBE_RTTDT2C_MCL_SHIFT; 203 1.10 msaitoh reg |= (u32)(refill[i]); 204 1.1 msaitoh reg |= (u32)(bwg_id[i]) << IXGBE_RTTDT2C_BWG_SHIFT; 205 1.1 msaitoh 206 1.1 msaitoh if (tsa[i] == ixgbe_dcb_tsa_group_strict_cee) 207 1.1 msaitoh reg |= IXGBE_RTTDT2C_GSP; 208 1.1 msaitoh 209 1.1 msaitoh if (tsa[i] == ixgbe_dcb_tsa_strict) 210 1.1 msaitoh reg |= IXGBE_RTTDT2C_LSP; 211 1.1 msaitoh 212 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RTTDT2C(i), reg); 213 1.1 msaitoh } 214 1.1 msaitoh 215 1.1 msaitoh /* 216 1.1 msaitoh * Configure Tx descriptor plane (recycle mode; WSP) and 217 1.1 msaitoh * enable arbiter 218 1.1 msaitoh */ 219 1.1 msaitoh reg = IXGBE_RTTDCS_TDPAC | IXGBE_RTTDCS_TDRM; 220 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg); 221 1.1 msaitoh 222 1.1 msaitoh return IXGBE_SUCCESS; 223 1.1 msaitoh } 224 1.1 msaitoh 225 1.1 msaitoh /** 226 1.1 msaitoh * ixgbe_dcb_config_tx_data_arbiter_82599 - Config Tx Data arbiter 227 1.1 msaitoh * @hw: pointer to hardware structure 228 1.7 msaitoh * @refill: refill credits index by traffic class 229 1.7 msaitoh * @max: max credits index by traffic class 230 1.7 msaitoh * @bwg_id: bandwidth grouping indexed by traffic class 231 1.7 msaitoh * @tsa: transmission selection algorithm indexed by traffic class 232 1.7 msaitoh * @map: priority to tc assignments indexed by priority 233 1.1 msaitoh * 234 1.1 msaitoh * Configure Tx Packet Arbiter and credits for each traffic class. 235 1.1 msaitoh */ 236 1.1 msaitoh s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw, u16 *refill, 237 1.1 msaitoh u16 *max, u8 *bwg_id, u8 *tsa, 238 1.1 msaitoh u8 *map) 239 1.1 msaitoh { 240 1.1 msaitoh u32 reg; 241 1.1 msaitoh u8 i; 242 1.1 msaitoh 243 1.1 msaitoh /* 244 1.1 msaitoh * Disable the arbiter before changing parameters 245 1.1 msaitoh * (always enable recycle mode; SP; arb delay) 246 1.1 msaitoh */ 247 1.1 msaitoh reg = IXGBE_RTTPCS_TPPAC | IXGBE_RTTPCS_TPRM | 248 1.1 msaitoh (IXGBE_RTTPCS_ARBD_DCB << IXGBE_RTTPCS_ARBD_SHIFT) | 249 1.1 msaitoh IXGBE_RTTPCS_ARBDIS; 250 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RTTPCS, reg); 251 1.1 msaitoh 252 1.1 msaitoh /* 253 1.1 msaitoh * map all UPs to TCs. up_to_tc_bitmap for each TC has corresponding 254 1.12 andvar * bits sets for the UPs that needs to be mapped to that TC. 255 1.1 msaitoh * e.g if priorities 6 and 7 are to be mapped to a TC then the 256 1.1 msaitoh * up_to_tc_bitmap value for that TC will be 11000000 in binary. 257 1.1 msaitoh */ 258 1.1 msaitoh reg = 0; 259 1.1 msaitoh for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++) 260 1.1 msaitoh reg |= (map[i] << (i * IXGBE_RTTUP2TC_UP_SHIFT)); 261 1.1 msaitoh 262 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RTTUP2TC, reg); 263 1.1 msaitoh 264 1.1 msaitoh /* Configure traffic class credits and priority */ 265 1.1 msaitoh for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { 266 1.1 msaitoh reg = refill[i]; 267 1.1 msaitoh reg |= (u32)(max[i]) << IXGBE_RTTPT2C_MCL_SHIFT; 268 1.1 msaitoh reg |= (u32)(bwg_id[i]) << IXGBE_RTTPT2C_BWG_SHIFT; 269 1.1 msaitoh 270 1.1 msaitoh if (tsa[i] == ixgbe_dcb_tsa_group_strict_cee) 271 1.1 msaitoh reg |= IXGBE_RTTPT2C_GSP; 272 1.1 msaitoh 273 1.1 msaitoh if (tsa[i] == ixgbe_dcb_tsa_strict) 274 1.1 msaitoh reg |= IXGBE_RTTPT2C_LSP; 275 1.1 msaitoh 276 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RTTPT2C(i), reg); 277 1.1 msaitoh } 278 1.1 msaitoh 279 1.1 msaitoh /* 280 1.1 msaitoh * Configure Tx packet plane (recycle mode; SP; arb delay) and 281 1.1 msaitoh * enable arbiter 282 1.1 msaitoh */ 283 1.1 msaitoh reg = IXGBE_RTTPCS_TPPAC | IXGBE_RTTPCS_TPRM | 284 1.1 msaitoh (IXGBE_RTTPCS_ARBD_DCB << IXGBE_RTTPCS_ARBD_SHIFT); 285 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RTTPCS, reg); 286 1.1 msaitoh 287 1.1 msaitoh return IXGBE_SUCCESS; 288 1.1 msaitoh } 289 1.1 msaitoh 290 1.1 msaitoh /** 291 1.1 msaitoh * ixgbe_dcb_config_pfc_82599 - Configure priority flow control 292 1.1 msaitoh * @hw: pointer to hardware structure 293 1.1 msaitoh * @pfc_en: enabled pfc bitmask 294 1.1 msaitoh * @map: priority to tc assignments indexed by priority 295 1.1 msaitoh * 296 1.1 msaitoh * Configure Priority Flow Control (PFC) for each traffic class. 297 1.1 msaitoh */ 298 1.1 msaitoh s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *map) 299 1.1 msaitoh { 300 1.1 msaitoh u32 i, j, fcrtl, reg; 301 1.1 msaitoh u8 max_tc = 0; 302 1.1 msaitoh 303 1.1 msaitoh /* Enable Transmit Priority Flow Control */ 304 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_FCCFG, IXGBE_FCCFG_TFCE_PRIORITY); 305 1.1 msaitoh 306 1.1 msaitoh /* Enable Receive Priority Flow Control */ 307 1.1 msaitoh reg = IXGBE_READ_REG(hw, IXGBE_MFLCN); 308 1.1 msaitoh reg |= IXGBE_MFLCN_DPF; 309 1.1 msaitoh 310 1.1 msaitoh /* 311 1.1 msaitoh * X540 supports per TC Rx priority flow control. So 312 1.1 msaitoh * clear all TCs and only enable those that should be 313 1.1 msaitoh * enabled. 314 1.1 msaitoh */ 315 1.1 msaitoh reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE); 316 1.1 msaitoh 317 1.2 msaitoh if (hw->mac.type >= ixgbe_mac_X540) 318 1.1 msaitoh reg |= pfc_en << IXGBE_MFLCN_RPFCE_SHIFT; 319 1.1 msaitoh 320 1.1 msaitoh if (pfc_en) 321 1.1 msaitoh reg |= IXGBE_MFLCN_RPFCE; 322 1.1 msaitoh 323 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_MFLCN, reg); 324 1.1 msaitoh 325 1.1 msaitoh for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++) { 326 1.1 msaitoh if (map[i] > max_tc) 327 1.1 msaitoh max_tc = map[i]; 328 1.1 msaitoh } 329 1.1 msaitoh 330 1.1 msaitoh 331 1.1 msaitoh /* Configure PFC Tx thresholds per TC */ 332 1.1 msaitoh for (i = 0; i <= max_tc; i++) { 333 1.1 msaitoh int enabled = 0; 334 1.1 msaitoh 335 1.1 msaitoh for (j = 0; j < IXGBE_DCB_MAX_USER_PRIORITY; j++) { 336 1.1 msaitoh if ((map[j] == i) && (pfc_en & (1 << j))) { 337 1.1 msaitoh enabled = 1; 338 1.1 msaitoh break; 339 1.1 msaitoh } 340 1.1 msaitoh } 341 1.1 msaitoh 342 1.1 msaitoh if (enabled) { 343 1.1 msaitoh reg = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN; 344 1.1 msaitoh fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE; 345 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl); 346 1.1 msaitoh } else { 347 1.2 msaitoh /* 348 1.2 msaitoh * In order to prevent Tx hangs when the internal Tx 349 1.2 msaitoh * switch is enabled we must set the high water mark 350 1.2 msaitoh * to the Rx packet buffer size - 24KB. This allows 351 1.2 msaitoh * the Tx switch to function even under heavy Rx 352 1.2 msaitoh * workloads. 353 1.2 msaitoh */ 354 1.2 msaitoh reg = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 24576; 355 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0); 356 1.1 msaitoh } 357 1.1 msaitoh 358 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), reg); 359 1.1 msaitoh } 360 1.1 msaitoh 361 1.1 msaitoh for (; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { 362 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0); 363 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), 0); 364 1.1 msaitoh } 365 1.1 msaitoh 366 1.1 msaitoh /* Configure pause time (2 TCs per register) */ 367 1.1 msaitoh reg = hw->fc.pause_time | (hw->fc.pause_time << 16); 368 1.1 msaitoh for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++) 369 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg); 370 1.1 msaitoh 371 1.1 msaitoh /* Configure flow control refresh threshold value */ 372 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2); 373 1.1 msaitoh 374 1.1 msaitoh return IXGBE_SUCCESS; 375 1.1 msaitoh } 376 1.1 msaitoh 377 1.1 msaitoh /** 378 1.1 msaitoh * ixgbe_dcb_config_tc_stats_82599 - Config traffic class statistics 379 1.1 msaitoh * @hw: pointer to hardware structure 380 1.7 msaitoh * @dcb_config: pointer to ixgbe_dcb_config structure 381 1.1 msaitoh * 382 1.1 msaitoh * Configure queue statistics registers, all queues belonging to same traffic 383 1.1 msaitoh * class uses a single set of queue statistics counters. 384 1.1 msaitoh */ 385 1.1 msaitoh s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *hw, 386 1.1 msaitoh struct ixgbe_dcb_config *dcb_config) 387 1.1 msaitoh { 388 1.1 msaitoh u32 reg = 0; 389 1.1 msaitoh u8 i = 0; 390 1.1 msaitoh u8 tc_count = 8; 391 1.1 msaitoh bool vt_mode = FALSE; 392 1.1 msaitoh 393 1.1 msaitoh if (dcb_config != NULL) { 394 1.1 msaitoh tc_count = dcb_config->num_tcs.pg_tcs; 395 1.1 msaitoh vt_mode = dcb_config->vt_mode; 396 1.1 msaitoh } 397 1.1 msaitoh 398 1.1 msaitoh if (!((tc_count == 8 && vt_mode == FALSE) || tc_count == 4)) 399 1.1 msaitoh return IXGBE_ERR_PARAM; 400 1.1 msaitoh 401 1.1 msaitoh if (tc_count == 8 && vt_mode == FALSE) { 402 1.1 msaitoh /* 403 1.1 msaitoh * Receive Queues stats setting 404 1.1 msaitoh * 32 RQSMR registers, each configuring 4 queues. 405 1.1 msaitoh * 406 1.1 msaitoh * Set all 16 queues of each TC to the same stat 407 1.1 msaitoh * with TC 'n' going to stat 'n'. 408 1.1 msaitoh */ 409 1.1 msaitoh for (i = 0; i < 32; i++) { 410 1.1 msaitoh reg = 0x01010101 * (i / 4); 411 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg); 412 1.1 msaitoh } 413 1.1 msaitoh /* 414 1.1 msaitoh * Transmit Queues stats setting 415 1.1 msaitoh * 32 TQSM registers, each controlling 4 queues. 416 1.1 msaitoh * 417 1.1 msaitoh * Set all queues of each TC to the same stat 418 1.1 msaitoh * with TC 'n' going to stat 'n'. 419 1.1 msaitoh * Tx queues are allocated non-uniformly to TCs: 420 1.1 msaitoh * 32, 32, 16, 16, 8, 8, 8, 8. 421 1.1 msaitoh */ 422 1.1 msaitoh for (i = 0; i < 32; i++) { 423 1.1 msaitoh if (i < 8) 424 1.1 msaitoh reg = 0x00000000; 425 1.1 msaitoh else if (i < 16) 426 1.1 msaitoh reg = 0x01010101; 427 1.1 msaitoh else if (i < 20) 428 1.1 msaitoh reg = 0x02020202; 429 1.1 msaitoh else if (i < 24) 430 1.1 msaitoh reg = 0x03030303; 431 1.1 msaitoh else if (i < 26) 432 1.1 msaitoh reg = 0x04040404; 433 1.1 msaitoh else if (i < 28) 434 1.1 msaitoh reg = 0x05050505; 435 1.1 msaitoh else if (i < 30) 436 1.1 msaitoh reg = 0x06060606; 437 1.1 msaitoh else 438 1.1 msaitoh reg = 0x07070707; 439 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), reg); 440 1.1 msaitoh } 441 1.1 msaitoh } else if (tc_count == 4 && vt_mode == FALSE) { 442 1.1 msaitoh /* 443 1.1 msaitoh * Receive Queues stats setting 444 1.1 msaitoh * 32 RQSMR registers, each configuring 4 queues. 445 1.1 msaitoh * 446 1.1 msaitoh * Set all 16 queues of each TC to the same stat 447 1.1 msaitoh * with TC 'n' going to stat 'n'. 448 1.1 msaitoh */ 449 1.1 msaitoh for (i = 0; i < 32; i++) { 450 1.1 msaitoh if (i % 8 > 3) 451 1.1 msaitoh /* In 4 TC mode, odd 16-queue ranges are 452 1.1 msaitoh * not used. 453 1.1 msaitoh */ 454 1.1 msaitoh continue; 455 1.1 msaitoh reg = 0x01010101 * (i / 8); 456 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg); 457 1.1 msaitoh } 458 1.1 msaitoh /* 459 1.1 msaitoh * Transmit Queues stats setting 460 1.1 msaitoh * 32 TQSM registers, each controlling 4 queues. 461 1.1 msaitoh * 462 1.1 msaitoh * Set all queues of each TC to the same stat 463 1.1 msaitoh * with TC 'n' going to stat 'n'. 464 1.1 msaitoh * Tx queues are allocated non-uniformly to TCs: 465 1.1 msaitoh * 64, 32, 16, 16. 466 1.1 msaitoh */ 467 1.1 msaitoh for (i = 0; i < 32; i++) { 468 1.1 msaitoh if (i < 16) 469 1.1 msaitoh reg = 0x00000000; 470 1.1 msaitoh else if (i < 24) 471 1.1 msaitoh reg = 0x01010101; 472 1.1 msaitoh else if (i < 28) 473 1.1 msaitoh reg = 0x02020202; 474 1.1 msaitoh else 475 1.1 msaitoh reg = 0x03030303; 476 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), reg); 477 1.1 msaitoh } 478 1.1 msaitoh } else if (tc_count == 4 && vt_mode == TRUE) { 479 1.1 msaitoh /* 480 1.1 msaitoh * Receive Queues stats setting 481 1.1 msaitoh * 32 RQSMR registers, each configuring 4 queues. 482 1.1 msaitoh * 483 1.1 msaitoh * Queue Indexing in 32 VF with DCB mode maps 4 TC's to each 484 1.1 msaitoh * pool. Set all 32 queues of each TC across pools to the same 485 1.1 msaitoh * stat with TC 'n' going to stat 'n'. 486 1.1 msaitoh */ 487 1.1 msaitoh for (i = 0; i < 32; i++) 488 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), 0x03020100); 489 1.1 msaitoh /* 490 1.1 msaitoh * Transmit Queues stats setting 491 1.1 msaitoh * 32 TQSM registers, each controlling 4 queues. 492 1.1 msaitoh * 493 1.1 msaitoh * Queue Indexing in 32 VF with DCB mode maps 4 TC's to each 494 1.1 msaitoh * pool. Set all 32 queues of each TC across pools to the same 495 1.1 msaitoh * stat with TC 'n' going to stat 'n'. 496 1.1 msaitoh */ 497 1.1 msaitoh for (i = 0; i < 32; i++) 498 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), 0x03020100); 499 1.1 msaitoh } 500 1.1 msaitoh 501 1.1 msaitoh return IXGBE_SUCCESS; 502 1.1 msaitoh } 503 1.1 msaitoh 504 1.1 msaitoh /** 505 1.1 msaitoh * ixgbe_dcb_config_82599 - Configure general DCB parameters 506 1.1 msaitoh * @hw: pointer to hardware structure 507 1.1 msaitoh * @dcb_config: pointer to ixgbe_dcb_config structure 508 1.1 msaitoh * 509 1.1 msaitoh * Configure general DCB parameters. 510 1.1 msaitoh */ 511 1.1 msaitoh s32 ixgbe_dcb_config_82599(struct ixgbe_hw *hw, 512 1.1 msaitoh struct ixgbe_dcb_config *dcb_config) 513 1.1 msaitoh { 514 1.1 msaitoh u32 reg; 515 1.1 msaitoh u32 q; 516 1.1 msaitoh 517 1.1 msaitoh /* Disable the Tx desc arbiter so that MTQC can be changed */ 518 1.1 msaitoh reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS); 519 1.1 msaitoh reg |= IXGBE_RTTDCS_ARBDIS; 520 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg); 521 1.1 msaitoh 522 1.1 msaitoh reg = IXGBE_READ_REG(hw, IXGBE_MRQC); 523 1.1 msaitoh if (dcb_config->num_tcs.pg_tcs == 8) { 524 1.1 msaitoh /* Enable DCB for Rx with 8 TCs */ 525 1.1 msaitoh switch (reg & IXGBE_MRQC_MRQE_MASK) { 526 1.1 msaitoh case 0: 527 1.1 msaitoh case IXGBE_MRQC_RT4TCEN: 528 1.1 msaitoh /* RSS disabled cases */ 529 1.1 msaitoh reg = (reg & ~IXGBE_MRQC_MRQE_MASK) | 530 1.1 msaitoh IXGBE_MRQC_RT8TCEN; 531 1.1 msaitoh break; 532 1.1 msaitoh case IXGBE_MRQC_RSSEN: 533 1.1 msaitoh case IXGBE_MRQC_RTRSS4TCEN: 534 1.1 msaitoh /* RSS enabled cases */ 535 1.1 msaitoh reg = (reg & ~IXGBE_MRQC_MRQE_MASK) | 536 1.1 msaitoh IXGBE_MRQC_RTRSS8TCEN; 537 1.1 msaitoh break; 538 1.1 msaitoh default: 539 1.1 msaitoh /* 540 1.1 msaitoh * Unsupported value, assume stale data, 541 1.1 msaitoh * overwrite no RSS 542 1.1 msaitoh */ 543 1.1 msaitoh ASSERT(0); 544 1.1 msaitoh reg = (reg & ~IXGBE_MRQC_MRQE_MASK) | 545 1.1 msaitoh IXGBE_MRQC_RT8TCEN; 546 1.1 msaitoh } 547 1.1 msaitoh } 548 1.1 msaitoh if (dcb_config->num_tcs.pg_tcs == 4) { 549 1.1 msaitoh /* We support both VT-on and VT-off with 4 TCs. */ 550 1.1 msaitoh if (dcb_config->vt_mode) 551 1.1 msaitoh reg = (reg & ~IXGBE_MRQC_MRQE_MASK) | 552 1.1 msaitoh IXGBE_MRQC_VMDQRT4TCEN; 553 1.1 msaitoh else 554 1.1 msaitoh reg = (reg & ~IXGBE_MRQC_MRQE_MASK) | 555 1.1 msaitoh IXGBE_MRQC_RTRSS4TCEN; 556 1.1 msaitoh } 557 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_MRQC, reg); 558 1.1 msaitoh 559 1.1 msaitoh /* Enable DCB for Tx with 8 TCs */ 560 1.1 msaitoh if (dcb_config->num_tcs.pg_tcs == 8) 561 1.1 msaitoh reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ; 562 1.1 msaitoh else { 563 1.1 msaitoh /* We support both VT-on and VT-off with 4 TCs. */ 564 1.1 msaitoh reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ; 565 1.1 msaitoh if (dcb_config->vt_mode) 566 1.1 msaitoh reg |= IXGBE_MTQC_VT_ENA; 567 1.1 msaitoh } 568 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg); 569 1.1 msaitoh 570 1.1 msaitoh /* Disable drop for all queues */ 571 1.1 msaitoh for (q = 0; q < 128; q++) 572 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_QDE, 573 1.1 msaitoh (IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT))); 574 1.1 msaitoh 575 1.1 msaitoh /* Enable the Tx desc arbiter */ 576 1.1 msaitoh reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS); 577 1.1 msaitoh reg &= ~IXGBE_RTTDCS_ARBDIS; 578 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg); 579 1.1 msaitoh 580 1.1 msaitoh /* Enable Security TX Buffer IFG for DCB */ 581 1.1 msaitoh reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG); 582 1.1 msaitoh reg |= IXGBE_SECTX_DCB; 583 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg); 584 1.1 msaitoh 585 1.1 msaitoh return IXGBE_SUCCESS; 586 1.1 msaitoh } 587 1.1 msaitoh 588 1.1 msaitoh /** 589 1.1 msaitoh * ixgbe_dcb_hw_config_82599 - Configure and enable DCB 590 1.1 msaitoh * @hw: pointer to hardware structure 591 1.7 msaitoh * @link_speed: unused 592 1.7 msaitoh * @refill: refill credits index by traffic class 593 1.7 msaitoh * @max: max credits index by traffic class 594 1.7 msaitoh * @bwg_id: bandwidth grouping indexed by traffic class 595 1.7 msaitoh * @tsa: transmission selection algorithm indexed by traffic class 596 1.7 msaitoh * @map: priority to tc assignments indexed by priority 597 1.1 msaitoh * 598 1.1 msaitoh * Configure dcb settings and enable dcb mode. 599 1.1 msaitoh */ 600 1.1 msaitoh s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw, int link_speed, 601 1.1 msaitoh u16 *refill, u16 *max, u8 *bwg_id, u8 *tsa, 602 1.1 msaitoh u8 *map) 603 1.1 msaitoh { 604 1.2 msaitoh UNREFERENCED_1PARAMETER(link_speed); 605 1.1 msaitoh 606 1.1 msaitoh ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id, tsa, 607 1.1 msaitoh map); 608 1.1 msaitoh ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, bwg_id, 609 1.1 msaitoh tsa); 610 1.1 msaitoh ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id, 611 1.1 msaitoh tsa, map); 612 1.1 msaitoh 613 1.1 msaitoh return IXGBE_SUCCESS; 614 1.1 msaitoh } 615 1.1 msaitoh 616