Home | History | Annotate | Line # | Download | only in radeon
radeon_ci_dpm.c revision 1.1.6.2
      1 /*	$NetBSD: radeon_ci_dpm.c,v 1.1.6.2 2019/06/10 22:08:26 christos Exp $	*/
      2 
      3 /*
      4  * Copyright 2013 Advanced Micro Devices, Inc.
      5  *
      6  * Permission is hereby granted, free of charge, to any person obtaining a
      7  * copy of this software and associated documentation files (the "Software"),
      8  * to deal in the Software without restriction, including without limitation
      9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
     10  * and/or sell copies of the Software, and to permit persons to whom the
     11  * Software is furnished to do so, subject to the following conditions:
     12  *
     13  * The above copyright notice and this permission notice shall be included in
     14  * all copies or substantial portions of the Software.
     15  *
     16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
     20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     22  * OTHER DEALINGS IN THE SOFTWARE.
     23  *
     24  */
     25 
     26 #include <sys/cdefs.h>
     27 __KERNEL_RCSID(0, "$NetBSD: radeon_ci_dpm.c,v 1.1.6.2 2019/06/10 22:08:26 christos Exp $");
     28 
     29 #include <linux/firmware.h>
     30 #include "drmP.h"
     31 #include "radeon.h"
     32 #include "radeon_asic.h"
     33 #include "radeon_ucode.h"
     34 #include "cikd.h"
     35 #include "r600_dpm.h"
     36 #include "ci_dpm.h"
     37 #include "atom.h"
     38 #include <linux/seq_file.h>
     39 
     40 #define MC_CG_ARB_FREQ_F0           0x0a
     41 #define MC_CG_ARB_FREQ_F1           0x0b
     42 #define MC_CG_ARB_FREQ_F2           0x0c
     43 #define MC_CG_ARB_FREQ_F3           0x0d
     44 
     45 #define SMC_RAM_END 0x40000
     46 
     47 #define VOLTAGE_SCALE               4
     48 #define VOLTAGE_VID_OFFSET_SCALE1    625
     49 #define VOLTAGE_VID_OFFSET_SCALE2    100
     50 
     51 static const struct ci_pt_defaults defaults_hawaii_xt =
     52 {
     53 	1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0xB0000,
     54 	{ 0x2E,  0x00,  0x00,  0x88,  0x00,  0x00,  0x72,  0x60,  0x51,  0xA7,  0x79,  0x6B,  0x90,  0xBD,  0x79  },
     55 	{ 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
     56 };
     57 
     58 static const struct ci_pt_defaults defaults_hawaii_pro =
     59 {
     60 	1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0x65062,
     61 	{ 0x2E,  0x00,  0x00,  0x88,  0x00,  0x00,  0x72,  0x60,  0x51,  0xA7,  0x79,  0x6B,  0x90,  0xBD,  0x79  },
     62 	{ 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
     63 };
     64 
     65 static const struct ci_pt_defaults defaults_bonaire_xt =
     66 {
     67 	1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
     68 	{ 0x79,  0x253, 0x25D, 0xAE,  0x72,  0x80,  0x83,  0x86,  0x6F,  0xC8,  0xC9,  0xC9,  0x2F,  0x4D,  0x61  },
     69 	{ 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 }
     70 };
     71 
     72 static const struct ci_pt_defaults defaults_bonaire_pro __unused =
     73 {
     74 	1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x65062,
     75 	{ 0x8C,  0x23F, 0x244, 0xA6,  0x83,  0x85,  0x86,  0x86,  0x83,  0xDB,  0xDB,  0xDA,  0x67,  0x60,  0x5F  },
     76 	{ 0x187, 0x193, 0x193, 0x1C7, 0x1D1, 0x1D1, 0x210, 0x219, 0x219, 0x266, 0x26C, 0x26C, 0x2C9, 0x2CB, 0x2CB }
     77 };
     78 
     79 static const struct ci_pt_defaults defaults_saturn_xt =
     80 {
     81 	1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x70000,
     82 	{ 0x8C,  0x247, 0x249, 0xA6,  0x80,  0x81,  0x8B,  0x89,  0x86,  0xC9,  0xCA,  0xC9,  0x4D,  0x4D,  0x4D  },
     83 	{ 0x187, 0x187, 0x187, 0x1C7, 0x1C7, 0x1C7, 0x210, 0x210, 0x210, 0x266, 0x266, 0x266, 0x2C9, 0x2C9, 0x2C9 }
     84 };
     85 
     86 static const struct ci_pt_defaults defaults_saturn_pro __unused =
     87 {
     88 	1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x30000,
     89 	{ 0x96,  0x21D, 0x23B, 0xA1,  0x85,  0x87,  0x83,  0x84,  0x81,  0xE6,  0xE6,  0xE6,  0x71,  0x6A,  0x6A  },
     90 	{ 0x193, 0x19E, 0x19E, 0x1D2, 0x1DC, 0x1DC, 0x21A, 0x223, 0x223, 0x26E, 0x27E, 0x274, 0x2CF, 0x2D2, 0x2D2 }
     91 };
     92 
     93 static const struct ci_pt_config_reg didt_config_ci[] =
     94 {
     95 	{ 0x10, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
     96 	{ 0x10, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
     97 	{ 0x10, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
     98 	{ 0x10, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
     99 	{ 0x11, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
    100 	{ 0x11, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
    101 	{ 0x11, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
    102 	{ 0x11, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
    103 	{ 0x12, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
    104 	{ 0x12, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
    105 	{ 0x12, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
    106 	{ 0x12, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
    107 	{ 0x2, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
    108 	{ 0x2, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
    109 	{ 0x2, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
    110 	{ 0x1, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
    111 	{ 0x1, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
    112 	{ 0x0, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
    113 	{ 0x30, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
    114 	{ 0x30, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
    115 	{ 0x30, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
    116 	{ 0x30, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
    117 	{ 0x31, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
    118 	{ 0x31, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
    119 	{ 0x31, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
    120 	{ 0x31, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
    121 	{ 0x32, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
    122 	{ 0x32, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
    123 	{ 0x32, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
    124 	{ 0x32, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
    125 	{ 0x22, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
    126 	{ 0x22, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
    127 	{ 0x22, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
    128 	{ 0x21, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
    129 	{ 0x21, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
    130 	{ 0x20, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
    131 	{ 0x50, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
    132 	{ 0x50, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
    133 	{ 0x50, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
    134 	{ 0x50, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
    135 	{ 0x51, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
    136 	{ 0x51, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
    137 	{ 0x51, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
    138 	{ 0x51, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
    139 	{ 0x52, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
    140 	{ 0x52, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
    141 	{ 0x52, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
    142 	{ 0x52, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
    143 	{ 0x42, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
    144 	{ 0x42, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
    145 	{ 0x42, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
    146 	{ 0x41, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
    147 	{ 0x41, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
    148 	{ 0x40, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
    149 	{ 0x70, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
    150 	{ 0x70, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
    151 	{ 0x70, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
    152 	{ 0x70, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
    153 	{ 0x71, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
    154 	{ 0x71, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
    155 	{ 0x71, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
    156 	{ 0x71, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
    157 	{ 0x72, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
    158 	{ 0x72, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
    159 	{ 0x72, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
    160 	{ 0x72, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
    161 	{ 0x62, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
    162 	{ 0x62, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
    163 	{ 0x62, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
    164 	{ 0x61, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
    165 	{ 0x61, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
    166 	{ 0x60, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
    167 	{ 0xFFFFFFFF }
    168 };
    169 
    170 extern u8 rv770_get_memory_module_index(struct radeon_device *rdev);
    171 extern int ni_copy_and_switch_arb_sets(struct radeon_device *rdev,
    172 				       u32 arb_freq_src, u32 arb_freq_dest);
    173 extern u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock);
    174 extern u8 si_get_mclk_frequency_ratio(u32 memory_clock, bool strobe_mode);
    175 extern void si_trim_voltage_table_to_fit_state_table(struct radeon_device *rdev,
    176 						     u32 max_voltage_steps,
    177 						     struct atom_voltage_table *voltage_table);
    178 extern void cik_enter_rlc_safe_mode(struct radeon_device *rdev);
    179 extern void cik_exit_rlc_safe_mode(struct radeon_device *rdev);
    180 extern int ci_mc_load_microcode(struct radeon_device *rdev);
    181 extern void cik_update_cg(struct radeon_device *rdev,
    182 			  u32 block, bool enable);
    183 
    184 static int ci_get_std_voltage_value_sidd(struct radeon_device *rdev,
    185 					 struct atom_voltage_table_entry *voltage_table,
    186 					 u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd);
    187 static int ci_set_power_limit(struct radeon_device *rdev, u32 n);
    188 static int ci_set_overdrive_target_tdp(struct radeon_device *rdev,
    189 				       u32 target_tdp);
    190 static int ci_update_uvd_dpm(struct radeon_device *rdev, bool gate);
    191 
    192 static PPSMC_Result ci_send_msg_to_smc_with_parameter(struct radeon_device *rdev,
    193 						      PPSMC_Msg msg, u32 parameter);
    194 
    195 static void ci_thermal_start_smc_fan_control(struct radeon_device *rdev);
    196 static void ci_fan_ctrl_set_default_mode(struct radeon_device *rdev);
    197 
    198 static struct ci_power_info *ci_get_pi(struct radeon_device *rdev)
    199 {
    200         struct ci_power_info *pi = rdev->pm.dpm.priv;
    201 
    202         return pi;
    203 }
    204 
    205 static struct ci_ps *ci_get_ps(struct radeon_ps *rps)
    206 {
    207 	struct ci_ps *ps = rps->ps_priv;
    208 
    209 	return ps;
    210 }
    211 
    212 static void ci_initialize_powertune_defaults(struct radeon_device *rdev)
    213 {
    214 	struct ci_power_info *pi = ci_get_pi(rdev);
    215 
    216 	switch (rdev->pdev->device) {
    217 	case 0x6649:
    218 	case 0x6650:
    219 	case 0x6651:
    220 	case 0x6658:
    221 	case 0x665C:
    222 	case 0x665D:
    223 	default:
    224 		pi->powertune_defaults = &defaults_bonaire_xt;
    225 		break;
    226 	case 0x6640:
    227 	case 0x6641:
    228 	case 0x6646:
    229 	case 0x6647:
    230 		pi->powertune_defaults = &defaults_saturn_xt;
    231 		break;
    232 	case 0x67B8:
    233 	case 0x67B0:
    234 		pi->powertune_defaults = &defaults_hawaii_xt;
    235 		break;
    236 	case 0x67BA:
    237 	case 0x67B1:
    238 		pi->powertune_defaults = &defaults_hawaii_pro;
    239 		break;
    240 	case 0x67A0:
    241 	case 0x67A1:
    242 	case 0x67A2:
    243 	case 0x67A8:
    244 	case 0x67A9:
    245 	case 0x67AA:
    246 	case 0x67B9:
    247 	case 0x67BE:
    248 		pi->powertune_defaults = &defaults_bonaire_xt;
    249 		break;
    250 	}
    251 
    252 	pi->dte_tj_offset = 0;
    253 
    254 	pi->caps_power_containment = true;
    255 	pi->caps_cac = false;
    256 	pi->caps_sq_ramping = false;
    257 	pi->caps_db_ramping = false;
    258 	pi->caps_td_ramping = false;
    259 	pi->caps_tcp_ramping = false;
    260 
    261 	if (pi->caps_power_containment) {
    262 		pi->caps_cac = true;
    263 		if (rdev->family == CHIP_HAWAII)
    264 			pi->enable_bapm_feature = false;
    265 		else
    266 			pi->enable_bapm_feature = true;
    267 		pi->enable_tdc_limit_feature = true;
    268 		pi->enable_pkg_pwr_tracking_feature = true;
    269 	}
    270 }
    271 
    272 static u8 ci_convert_to_vid(u16 vddc)
    273 {
    274 	return (6200 - (vddc * VOLTAGE_SCALE)) / 25;
    275 }
    276 
    277 static int ci_populate_bapm_vddc_vid_sidd(struct radeon_device *rdev)
    278 {
    279 	struct ci_power_info *pi = ci_get_pi(rdev);
    280 	u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd;
    281 	u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd;
    282 	u8 *hi2_vid = pi->smc_powertune_table.BapmVddCVidHiSidd2;
    283 	u32 i;
    284 
    285 	if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries == NULL)
    286 		return -EINVAL;
    287 	if (rdev->pm.dpm.dyn_state.cac_leakage_table.count > 8)
    288 		return -EINVAL;
    289 	if (rdev->pm.dpm.dyn_state.cac_leakage_table.count !=
    290 	    rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count)
    291 		return -EINVAL;
    292 
    293 	for (i = 0; i < rdev->pm.dpm.dyn_state.cac_leakage_table.count; i++) {
    294 		if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
    295 			lo_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1);
    296 			hi_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2);
    297 			hi2_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3);
    298 		} else {
    299 			lo_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc);
    300 			hi_vid[i] = ci_convert_to_vid((u16)rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage);
    301 		}
    302 	}
    303 	return 0;
    304 }
    305 
    306 static int ci_populate_vddc_vid(struct radeon_device *rdev)
    307 {
    308 	struct ci_power_info *pi = ci_get_pi(rdev);
    309 	u8 *vid = pi->smc_powertune_table.VddCVid;
    310 	u32 i;
    311 
    312 	if (pi->vddc_voltage_table.count > 8)
    313 		return -EINVAL;
    314 
    315 	for (i = 0; i < pi->vddc_voltage_table.count; i++)
    316 		vid[i] = ci_convert_to_vid(pi->vddc_voltage_table.entries[i].value);
    317 
    318 	return 0;
    319 }
    320 
    321 static int ci_populate_svi_load_line(struct radeon_device *rdev)
    322 {
    323 	struct ci_power_info *pi = ci_get_pi(rdev);
    324 	const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
    325 
    326 	pi->smc_powertune_table.SviLoadLineEn = pt_defaults->svi_load_line_en;
    327 	pi->smc_powertune_table.SviLoadLineVddC = pt_defaults->svi_load_line_vddc;
    328 	pi->smc_powertune_table.SviLoadLineTrimVddC = 3;
    329 	pi->smc_powertune_table.SviLoadLineOffsetVddC = 0;
    330 
    331 	return 0;
    332 }
    333 
    334 static int ci_populate_tdc_limit(struct radeon_device *rdev)
    335 {
    336 	struct ci_power_info *pi = ci_get_pi(rdev);
    337 	const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
    338 	u16 tdc_limit;
    339 
    340 	tdc_limit = rdev->pm.dpm.dyn_state.cac_tdp_table->tdc * 256;
    341 	pi->smc_powertune_table.TDC_VDDC_PkgLimit = cpu_to_be16(tdc_limit);
    342 	pi->smc_powertune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
    343 		pt_defaults->tdc_vddc_throttle_release_limit_perc;
    344 	pi->smc_powertune_table.TDC_MAWt = pt_defaults->tdc_mawt;
    345 
    346 	return 0;
    347 }
    348 
    349 static int ci_populate_dw8(struct radeon_device *rdev)
    350 {
    351 	struct ci_power_info *pi = ci_get_pi(rdev);
    352 	const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
    353 	int ret;
    354 
    355 	ret = ci_read_smc_sram_dword(rdev,
    356 				     SMU7_FIRMWARE_HEADER_LOCATION +
    357 				     offsetof(SMU7_Firmware_Header, PmFuseTable) +
    358 				     offsetof(SMU7_Discrete_PmFuses, TdcWaterfallCtl),
    359 				     (u32 *)&pi->smc_powertune_table.TdcWaterfallCtl,
    360 				     pi->sram_end);
    361 	if (ret)
    362 		return -EINVAL;
    363 	else
    364 		pi->smc_powertune_table.TdcWaterfallCtl = pt_defaults->tdc_waterfall_ctl;
    365 
    366 	return 0;
    367 }
    368 
    369 static int ci_populate_fuzzy_fan(struct radeon_device *rdev)
    370 {
    371 	struct ci_power_info *pi = ci_get_pi(rdev);
    372 
    373 	if ((rdev->pm.dpm.fan.fan_output_sensitivity & (1 << 15)) ||
    374 	    (rdev->pm.dpm.fan.fan_output_sensitivity == 0))
    375 		rdev->pm.dpm.fan.fan_output_sensitivity =
    376 			rdev->pm.dpm.fan.default_fan_output_sensitivity;
    377 
    378 	pi->smc_powertune_table.FuzzyFan_PwmSetDelta =
    379 		cpu_to_be16(rdev->pm.dpm.fan.fan_output_sensitivity);
    380 
    381 	return 0;
    382 }
    383 
    384 static int ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct radeon_device *rdev)
    385 {
    386 	struct ci_power_info *pi = ci_get_pi(rdev);
    387 	u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd;
    388 	u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd;
    389 	int i, vmin, vmax;
    390 
    391 	vmin = vmax = hi_vid[0];
    392 	for (i = 0; i < 8; i++) {
    393 		if (0 != hi_vid[i]) {
    394 			if (vmin > hi_vid[i])
    395 				vmin = hi_vid[i];
    396 			if (vmax < hi_vid[i])
    397 				vmax = hi_vid[i];
    398 		}
    399 
    400 		if (0 != lo_vid[i]) {
    401 			if (vmin > lo_vid[i])
    402 				vmin = lo_vid[i];
    403 			if (vmax < lo_vid[i])
    404 				vmax = lo_vid[i];
    405 		}
    406 	}
    407 
    408 	if ((vmin == 0) || (vmax == 0))
    409 		return -EINVAL;
    410 	pi->smc_powertune_table.GnbLPMLMaxVid = (u8)vmax;
    411 	pi->smc_powertune_table.GnbLPMLMinVid = (u8)vmin;
    412 
    413 	return 0;
    414 }
    415 
    416 static int ci_populate_bapm_vddc_base_leakage_sidd(struct radeon_device *rdev)
    417 {
    418 	struct ci_power_info *pi = ci_get_pi(rdev);
    419 	u16 hi_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd;
    420 	u16 lo_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd;
    421 	struct radeon_cac_tdp_table *cac_tdp_table =
    422 		rdev->pm.dpm.dyn_state.cac_tdp_table;
    423 
    424 	hi_sidd = cac_tdp_table->high_cac_leakage / 100 * 256;
    425 	lo_sidd = cac_tdp_table->low_cac_leakage / 100 * 256;
    426 
    427 	pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd = cpu_to_be16(hi_sidd);
    428 	pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd = cpu_to_be16(lo_sidd);
    429 
    430 	return 0;
    431 }
    432 
    433 static int ci_populate_bapm_parameters_in_dpm_table(struct radeon_device *rdev)
    434 {
    435 	struct ci_power_info *pi = ci_get_pi(rdev);
    436 	const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
    437 	SMU7_Discrete_DpmTable  *dpm_table = &pi->smc_state_table;
    438 	struct radeon_cac_tdp_table *cac_tdp_table =
    439 		rdev->pm.dpm.dyn_state.cac_tdp_table;
    440 	struct radeon_ppm_table *ppm = rdev->pm.dpm.dyn_state.ppm_table;
    441 	int i, j, k;
    442 	const u16 *def1;
    443 	const u16 *def2;
    444 
    445 	dpm_table->DefaultTdp = cac_tdp_table->tdp * 256;
    446 	dpm_table->TargetTdp = cac_tdp_table->configurable_tdp * 256;
    447 
    448 	dpm_table->DTETjOffset = (u8)pi->dte_tj_offset;
    449 	dpm_table->GpuTjMax =
    450 		(u8)(pi->thermal_temp_setting.temperature_high / 1000);
    451 	dpm_table->GpuTjHyst = 8;
    452 
    453 	dpm_table->DTEAmbientTempBase = pt_defaults->dte_ambient_temp_base;
    454 
    455 	if (ppm) {
    456 		dpm_table->PPM_PkgPwrLimit = cpu_to_be16((u16)ppm->dgpu_tdp * 256 / 1000);
    457 		dpm_table->PPM_TemperatureLimit = cpu_to_be16((u16)ppm->tj_max * 256);
    458 	} else {
    459 		dpm_table->PPM_PkgPwrLimit = cpu_to_be16(0);
    460 		dpm_table->PPM_TemperatureLimit = cpu_to_be16(0);
    461 	}
    462 
    463 	dpm_table->BAPM_TEMP_GRADIENT = cpu_to_be32(pt_defaults->bapm_temp_gradient);
    464 	def1 = pt_defaults->bapmti_r;
    465 	def2 = pt_defaults->bapmti_rc;
    466 
    467 	for (i = 0; i < SMU7_DTE_ITERATIONS; i++) {
    468 		for (j = 0; j < SMU7_DTE_SOURCES; j++) {
    469 			for (k = 0; k < SMU7_DTE_SINKS; k++) {
    470 				dpm_table->BAPMTI_R[i][j][k] = cpu_to_be16(*def1);
    471 				dpm_table->BAPMTI_RC[i][j][k] = cpu_to_be16(*def2);
    472 				def1++;
    473 				def2++;
    474 			}
    475 		}
    476 	}
    477 
    478 	return 0;
    479 }
    480 
    481 static int ci_populate_pm_base(struct radeon_device *rdev)
    482 {
    483 	struct ci_power_info *pi = ci_get_pi(rdev);
    484 	u32 pm_fuse_table_offset;
    485 	int ret;
    486 
    487 	if (pi->caps_power_containment) {
    488 		ret = ci_read_smc_sram_dword(rdev,
    489 					     SMU7_FIRMWARE_HEADER_LOCATION +
    490 					     offsetof(SMU7_Firmware_Header, PmFuseTable),
    491 					     &pm_fuse_table_offset, pi->sram_end);
    492 		if (ret)
    493 			return ret;
    494 		ret = ci_populate_bapm_vddc_vid_sidd(rdev);
    495 		if (ret)
    496 			return ret;
    497 		ret = ci_populate_vddc_vid(rdev);
    498 		if (ret)
    499 			return ret;
    500 		ret = ci_populate_svi_load_line(rdev);
    501 		if (ret)
    502 			return ret;
    503 		ret = ci_populate_tdc_limit(rdev);
    504 		if (ret)
    505 			return ret;
    506 		ret = ci_populate_dw8(rdev);
    507 		if (ret)
    508 			return ret;
    509 		ret = ci_populate_fuzzy_fan(rdev);
    510 		if (ret)
    511 			return ret;
    512 		ret = ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(rdev);
    513 		if (ret)
    514 			return ret;
    515 		ret = ci_populate_bapm_vddc_base_leakage_sidd(rdev);
    516 		if (ret)
    517 			return ret;
    518 		ret = ci_copy_bytes_to_smc(rdev, pm_fuse_table_offset,
    519 					   (u8 *)&pi->smc_powertune_table,
    520 					   sizeof(SMU7_Discrete_PmFuses), pi->sram_end);
    521 		if (ret)
    522 			return ret;
    523 	}
    524 
    525 	return 0;
    526 }
    527 
    528 static void ci_do_enable_didt(struct radeon_device *rdev, const bool enable)
    529 {
    530 	struct ci_power_info *pi = ci_get_pi(rdev);
    531 	u32 data;
    532 
    533 	if (pi->caps_sq_ramping) {
    534 		data = RREG32_DIDT(DIDT_SQ_CTRL0);
    535 		if (enable)
    536 			data |= DIDT_CTRL_EN;
    537 		else
    538 			data &= ~DIDT_CTRL_EN;
    539 		WREG32_DIDT(DIDT_SQ_CTRL0, data);
    540 	}
    541 
    542 	if (pi->caps_db_ramping) {
    543 		data = RREG32_DIDT(DIDT_DB_CTRL0);
    544 		if (enable)
    545 			data |= DIDT_CTRL_EN;
    546 		else
    547 			data &= ~DIDT_CTRL_EN;
    548 		WREG32_DIDT(DIDT_DB_CTRL0, data);
    549 	}
    550 
    551 	if (pi->caps_td_ramping) {
    552 		data = RREG32_DIDT(DIDT_TD_CTRL0);
    553 		if (enable)
    554 			data |= DIDT_CTRL_EN;
    555 		else
    556 			data &= ~DIDT_CTRL_EN;
    557 		WREG32_DIDT(DIDT_TD_CTRL0, data);
    558 	}
    559 
    560 	if (pi->caps_tcp_ramping) {
    561 		data = RREG32_DIDT(DIDT_TCP_CTRL0);
    562 		if (enable)
    563 			data |= DIDT_CTRL_EN;
    564 		else
    565 			data &= ~DIDT_CTRL_EN;
    566 		WREG32_DIDT(DIDT_TCP_CTRL0, data);
    567 	}
    568 }
    569 
    570 static int ci_program_pt_config_registers(struct radeon_device *rdev,
    571 					  const struct ci_pt_config_reg *cac_config_regs)
    572 {
    573 	const struct ci_pt_config_reg *config_regs = cac_config_regs;
    574 	u32 data;
    575 	u32 cache = 0;
    576 
    577 	if (config_regs == NULL)
    578 		return -EINVAL;
    579 
    580 	while (config_regs->offset != 0xFFFFFFFF) {
    581 		if (config_regs->type == CISLANDS_CONFIGREG_CACHE) {
    582 			cache |= ((config_regs->value << config_regs->shift) & config_regs->mask);
    583 		} else {
    584 			switch (config_regs->type) {
    585 			case CISLANDS_CONFIGREG_SMC_IND:
    586 				data = RREG32_SMC(config_regs->offset);
    587 				break;
    588 			case CISLANDS_CONFIGREG_DIDT_IND:
    589 				data = RREG32_DIDT(config_regs->offset);
    590 				break;
    591 			default:
    592 				data = RREG32(config_regs->offset << 2);
    593 				break;
    594 			}
    595 
    596 			data &= ~config_regs->mask;
    597 			data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
    598 			data |= cache;
    599 
    600 			switch (config_regs->type) {
    601 			case CISLANDS_CONFIGREG_SMC_IND:
    602 				WREG32_SMC(config_regs->offset, data);
    603 				break;
    604 			case CISLANDS_CONFIGREG_DIDT_IND:
    605 				WREG32_DIDT(config_regs->offset, data);
    606 				break;
    607 			default:
    608 				WREG32(config_regs->offset << 2, data);
    609 				break;
    610 			}
    611 			cache = 0;
    612 		}
    613 		config_regs++;
    614 	}
    615 	return 0;
    616 }
    617 
    618 static int ci_enable_didt(struct radeon_device *rdev, bool enable)
    619 {
    620 	struct ci_power_info *pi = ci_get_pi(rdev);
    621 	int ret;
    622 
    623 	if (pi->caps_sq_ramping || pi->caps_db_ramping ||
    624 	    pi->caps_td_ramping || pi->caps_tcp_ramping) {
    625 		cik_enter_rlc_safe_mode(rdev);
    626 
    627 		if (enable) {
    628 			ret = ci_program_pt_config_registers(rdev, didt_config_ci);
    629 			if (ret) {
    630 				cik_exit_rlc_safe_mode(rdev);
    631 				return ret;
    632 			}
    633 		}
    634 
    635 		ci_do_enable_didt(rdev, enable);
    636 
    637 		cik_exit_rlc_safe_mode(rdev);
    638 	}
    639 
    640 	return 0;
    641 }
    642 
    643 static int ci_enable_power_containment(struct radeon_device *rdev, bool enable)
    644 {
    645 	struct ci_power_info *pi = ci_get_pi(rdev);
    646 	PPSMC_Result smc_result;
    647 	int ret = 0;
    648 
    649 	if (enable) {
    650 		pi->power_containment_features = 0;
    651 		if (pi->caps_power_containment) {
    652 			if (pi->enable_bapm_feature) {
    653 				smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableDTE);
    654 				if (smc_result != PPSMC_Result_OK)
    655 					ret = -EINVAL;
    656 				else
    657 					pi->power_containment_features |= POWERCONTAINMENT_FEATURE_BAPM;
    658 			}
    659 
    660 			if (pi->enable_tdc_limit_feature) {
    661 				smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_TDCLimitEnable);
    662 				if (smc_result != PPSMC_Result_OK)
    663 					ret = -EINVAL;
    664 				else
    665 					pi->power_containment_features |= POWERCONTAINMENT_FEATURE_TDCLimit;
    666 			}
    667 
    668 			if (pi->enable_pkg_pwr_tracking_feature) {
    669 				smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PkgPwrLimitEnable);
    670 				if (smc_result != PPSMC_Result_OK) {
    671 					ret = -EINVAL;
    672 				} else {
    673 					struct radeon_cac_tdp_table *cac_tdp_table =
    674 						rdev->pm.dpm.dyn_state.cac_tdp_table;
    675 					u32 default_pwr_limit =
    676 						(u32)(cac_tdp_table->maximum_power_delivery_limit * 256);
    677 
    678 					pi->power_containment_features |= POWERCONTAINMENT_FEATURE_PkgPwrLimit;
    679 
    680 					ci_set_power_limit(rdev, default_pwr_limit);
    681 				}
    682 			}
    683 		}
    684 	} else {
    685 		if (pi->caps_power_containment && pi->power_containment_features) {
    686 			if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_TDCLimit)
    687 				ci_send_msg_to_smc(rdev, PPSMC_MSG_TDCLimitDisable);
    688 
    689 			if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_BAPM)
    690 				ci_send_msg_to_smc(rdev, PPSMC_MSG_DisableDTE);
    691 
    692 			if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit)
    693 				ci_send_msg_to_smc(rdev, PPSMC_MSG_PkgPwrLimitDisable);
    694 			pi->power_containment_features = 0;
    695 		}
    696 	}
    697 
    698 	return ret;
    699 }
    700 
    701 static int ci_enable_smc_cac(struct radeon_device *rdev, bool enable)
    702 {
    703 	struct ci_power_info *pi = ci_get_pi(rdev);
    704 	PPSMC_Result smc_result;
    705 	int ret = 0;
    706 
    707 	if (pi->caps_cac) {
    708 		if (enable) {
    709 			smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableCac);
    710 			if (smc_result != PPSMC_Result_OK) {
    711 				ret = -EINVAL;
    712 				pi->cac_enabled = false;
    713 			} else {
    714 				pi->cac_enabled = true;
    715 			}
    716 		} else if (pi->cac_enabled) {
    717 			ci_send_msg_to_smc(rdev, PPSMC_MSG_DisableCac);
    718 			pi->cac_enabled = false;
    719 		}
    720 	}
    721 
    722 	return ret;
    723 }
    724 
    725 static int ci_enable_thermal_based_sclk_dpm(struct radeon_device *rdev,
    726 					    bool enable)
    727 {
    728 	struct ci_power_info *pi = ci_get_pi(rdev);
    729 	PPSMC_Result smc_result = PPSMC_Result_OK;
    730 
    731 	if (pi->thermal_sclk_dpm_enabled) {
    732 		if (enable)
    733 			smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_ENABLE_THERMAL_DPM);
    734 		else
    735 			smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_DISABLE_THERMAL_DPM);
    736 	}
    737 
    738 	if (smc_result == PPSMC_Result_OK)
    739 		return 0;
    740 	else
    741 		return -EINVAL;
    742 }
    743 
    744 static int ci_power_control_set_level(struct radeon_device *rdev)
    745 {
    746 	struct ci_power_info *pi = ci_get_pi(rdev);
    747 	struct radeon_cac_tdp_table *cac_tdp_table =
    748 		rdev->pm.dpm.dyn_state.cac_tdp_table;
    749 	s32 adjust_percent;
    750 	s32 target_tdp;
    751 	int ret = 0;
    752 	bool adjust_polarity = false; /* ??? */
    753 
    754 	if (pi->caps_power_containment) {
    755 		adjust_percent = adjust_polarity ?
    756 			rdev->pm.dpm.tdp_adjustment : (-1 * rdev->pm.dpm.tdp_adjustment);
    757 		target_tdp = ((100 + adjust_percent) *
    758 			      (s32)cac_tdp_table->configurable_tdp) / 100;
    759 
    760 		ret = ci_set_overdrive_target_tdp(rdev, (u32)target_tdp);
    761 	}
    762 
    763 	return ret;
    764 }
    765 
    766 void ci_dpm_powergate_uvd(struct radeon_device *rdev, bool gate)
    767 {
    768 	struct ci_power_info *pi = ci_get_pi(rdev);
    769 
    770 	if (pi->uvd_power_gated == gate)
    771 		return;
    772 
    773 	pi->uvd_power_gated = gate;
    774 
    775 	ci_update_uvd_dpm(rdev, gate);
    776 }
    777 
    778 bool ci_dpm_vblank_too_short(struct radeon_device *rdev)
    779 {
    780 	struct ci_power_info *pi = ci_get_pi(rdev);
    781 	u32 vblank_time = r600_dpm_get_vblank_time(rdev);
    782 	u32 switch_limit = pi->mem_gddr5 ? 450 : 300;
    783 
    784 	/* disable mclk switching if the refresh is >120Hz, even if the
    785         * blanking period would allow it
    786         */
    787 	if (r600_dpm_get_vrefresh(rdev) > 120)
    788 		return true;
    789 
    790 	/* disable mclk switching if the refresh is >120Hz, even if the
    791         * blanking period would allow it
    792         */
    793 	if (r600_dpm_get_vrefresh(rdev) > 120)
    794 		return true;
    795 
    796 	if (vblank_time < switch_limit)
    797 		return true;
    798 	else
    799 		return false;
    800 
    801 }
    802 
    803 static void ci_apply_state_adjust_rules(struct radeon_device *rdev,
    804 					struct radeon_ps *rps)
    805 {
    806 	struct ci_ps *ps = ci_get_ps(rps);
    807 	struct ci_power_info *pi = ci_get_pi(rdev);
    808 	struct radeon_clock_and_voltage_limits *max_limits;
    809 	bool disable_mclk_switching;
    810 	u32 sclk, mclk;
    811 	int i;
    812 
    813 	if (rps->vce_active) {
    814 		rps->evclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].evclk;
    815 		rps->ecclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].ecclk;
    816 	} else {
    817 		rps->evclk = 0;
    818 		rps->ecclk = 0;
    819 	}
    820 
    821 	if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
    822 	    ci_dpm_vblank_too_short(rdev))
    823 		disable_mclk_switching = true;
    824 	else
    825 		disable_mclk_switching = false;
    826 
    827 	if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY)
    828 		pi->battery_state = true;
    829 	else
    830 		pi->battery_state = false;
    831 
    832 	if (rdev->pm.dpm.ac_power)
    833 		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
    834 	else
    835 		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
    836 
    837 	if (rdev->pm.dpm.ac_power == false) {
    838 		for (i = 0; i < ps->performance_level_count; i++) {
    839 			if (ps->performance_levels[i].mclk > max_limits->mclk)
    840 				ps->performance_levels[i].mclk = max_limits->mclk;
    841 			if (ps->performance_levels[i].sclk > max_limits->sclk)
    842 				ps->performance_levels[i].sclk = max_limits->sclk;
    843 		}
    844 	}
    845 
    846 	/* XXX validate the min clocks required for display */
    847 
    848 	if (disable_mclk_switching) {
    849 		mclk  = ps->performance_levels[ps->performance_level_count - 1].mclk;
    850 		sclk = ps->performance_levels[0].sclk;
    851 	} else {
    852 		mclk = ps->performance_levels[0].mclk;
    853 		sclk = ps->performance_levels[0].sclk;
    854 	}
    855 
    856 	if (rps->vce_active) {
    857 		if (sclk < rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].sclk)
    858 			sclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].sclk;
    859 		if (mclk < rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].mclk)
    860 			mclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].mclk;
    861 	}
    862 
    863 	ps->performance_levels[0].sclk = sclk;
    864 	ps->performance_levels[0].mclk = mclk;
    865 
    866 	if (ps->performance_levels[1].sclk < ps->performance_levels[0].sclk)
    867 		ps->performance_levels[1].sclk = ps->performance_levels[0].sclk;
    868 
    869 	if (disable_mclk_switching) {
    870 		if (ps->performance_levels[0].mclk < ps->performance_levels[1].mclk)
    871 			ps->performance_levels[0].mclk = ps->performance_levels[1].mclk;
    872 	} else {
    873 		if (ps->performance_levels[1].mclk < ps->performance_levels[0].mclk)
    874 			ps->performance_levels[1].mclk = ps->performance_levels[0].mclk;
    875 	}
    876 }
    877 
    878 static int ci_thermal_set_temperature_range(struct radeon_device *rdev,
    879 					    int min_temp, int max_temp)
    880 {
    881 	int low_temp = 0 * 1000;
    882 	int high_temp = 255 * 1000;
    883 	u32 tmp;
    884 
    885 	if (low_temp < min_temp)
    886 		low_temp = min_temp;
    887 	if (high_temp > max_temp)
    888 		high_temp = max_temp;
    889 	if (high_temp < low_temp) {
    890 		DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
    891 		return -EINVAL;
    892 	}
    893 
    894 	tmp = RREG32_SMC(CG_THERMAL_INT);
    895 	tmp &= ~(CI_DIG_THERM_INTH_MASK | CI_DIG_THERM_INTL_MASK);
    896 	tmp |= CI_DIG_THERM_INTH(high_temp / 1000) |
    897 		CI_DIG_THERM_INTL(low_temp / 1000);
    898 	WREG32_SMC(CG_THERMAL_INT, tmp);
    899 
    900 #if 0
    901 	/* XXX: need to figure out how to handle this properly */
    902 	tmp = RREG32_SMC(CG_THERMAL_CTRL);
    903 	tmp &= DIG_THERM_DPM_MASK;
    904 	tmp |= DIG_THERM_DPM(high_temp / 1000);
    905 	WREG32_SMC(CG_THERMAL_CTRL, tmp);
    906 #endif
    907 
    908 	rdev->pm.dpm.thermal.min_temp = low_temp;
    909 	rdev->pm.dpm.thermal.max_temp = high_temp;
    910 
    911 	return 0;
    912 }
    913 
    914 static int ci_thermal_enable_alert(struct radeon_device *rdev,
    915 				   bool enable)
    916 {
    917 	u32 thermal_int = RREG32_SMC(CG_THERMAL_INT);
    918 	PPSMC_Result result;
    919 
    920 	if (enable) {
    921 		thermal_int &= ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
    922 		WREG32_SMC(CG_THERMAL_INT, thermal_int);
    923 		rdev->irq.dpm_thermal = false;
    924 		result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Thermal_Cntl_Enable);
    925 		if (result != PPSMC_Result_OK) {
    926 			DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
    927 			return -EINVAL;
    928 		}
    929 	} else {
    930 		thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW;
    931 		WREG32_SMC(CG_THERMAL_INT, thermal_int);
    932 		rdev->irq.dpm_thermal = true;
    933 		result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Thermal_Cntl_Disable);
    934 		if (result != PPSMC_Result_OK) {
    935 			DRM_DEBUG_KMS("Could not disable thermal interrupts.\n");
    936 			return -EINVAL;
    937 		}
    938 	}
    939 
    940 	return 0;
    941 }
    942 
    943 static void ci_fan_ctrl_set_static_mode(struct radeon_device *rdev, u32 mode)
    944 {
    945 	struct ci_power_info *pi = ci_get_pi(rdev);
    946 	u32 tmp;
    947 
    948 	if (pi->fan_ctrl_is_in_default_mode) {
    949 		tmp = (RREG32_SMC(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK) >> FDO_PWM_MODE_SHIFT;
    950 		pi->fan_ctrl_default_mode = tmp;
    951 		tmp = (RREG32_SMC(CG_FDO_CTRL2) & TMIN_MASK) >> TMIN_SHIFT;
    952 		pi->t_min = tmp;
    953 		pi->fan_ctrl_is_in_default_mode = false;
    954 	}
    955 
    956 	tmp = RREG32_SMC(CG_FDO_CTRL2) & ~TMIN_MASK;
    957 	tmp |= TMIN(0);
    958 	WREG32_SMC(CG_FDO_CTRL2, tmp);
    959 
    960 	tmp = RREG32_SMC(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK;
    961 	tmp |= FDO_PWM_MODE(mode);
    962 	WREG32_SMC(CG_FDO_CTRL2, tmp);
    963 }
    964 
    965 static int ci_thermal_setup_fan_table(struct radeon_device *rdev)
    966 {
    967 	struct ci_power_info *pi = ci_get_pi(rdev);
    968 	SMU7_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
    969 	u32 duty100;
    970 	u32 t_diff1, t_diff2, pwm_diff1, pwm_diff2;
    971 	u16 fdo_min, slope1, slope2;
    972 	u32 reference_clock, tmp;
    973 	int ret;
    974 	u64 tmp64;
    975 
    976 	if (!pi->fan_table_start) {
    977 		rdev->pm.dpm.fan.ucode_fan_control = false;
    978 		return 0;
    979 	}
    980 
    981 	duty100 = (RREG32_SMC(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
    982 
    983 	if (duty100 == 0) {
    984 		rdev->pm.dpm.fan.ucode_fan_control = false;
    985 		return 0;
    986 	}
    987 
    988 	tmp64 = (u64)rdev->pm.dpm.fan.pwm_min * duty100;
    989 	do_div(tmp64, 10000);
    990 	fdo_min = (u16)tmp64;
    991 
    992 	t_diff1 = rdev->pm.dpm.fan.t_med - rdev->pm.dpm.fan.t_min;
    993 	t_diff2 = rdev->pm.dpm.fan.t_high - rdev->pm.dpm.fan.t_med;
    994 
    995 	pwm_diff1 = rdev->pm.dpm.fan.pwm_med - rdev->pm.dpm.fan.pwm_min;
    996 	pwm_diff2 = rdev->pm.dpm.fan.pwm_high - rdev->pm.dpm.fan.pwm_med;
    997 
    998 	slope1 = (u16)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
    999 	slope2 = (u16)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
   1000 
   1001 	fan_table.TempMin = cpu_to_be16((50 + rdev->pm.dpm.fan.t_min) / 100);
   1002 	fan_table.TempMed = cpu_to_be16((50 + rdev->pm.dpm.fan.t_med) / 100);
   1003 	fan_table.TempMax = cpu_to_be16((50 + rdev->pm.dpm.fan.t_max) / 100);
   1004 
   1005 	fan_table.Slope1 = cpu_to_be16(slope1);
   1006 	fan_table.Slope2 = cpu_to_be16(slope2);
   1007 
   1008 	fan_table.FdoMin = cpu_to_be16(fdo_min);
   1009 
   1010 	fan_table.HystDown = cpu_to_be16(rdev->pm.dpm.fan.t_hyst);
   1011 
   1012 	fan_table.HystUp = cpu_to_be16(1);
   1013 
   1014 	fan_table.HystSlope = cpu_to_be16(1);
   1015 
   1016 	fan_table.TempRespLim = cpu_to_be16(5);
   1017 
   1018 	reference_clock = radeon_get_xclk(rdev);
   1019 
   1020 	fan_table.RefreshPeriod = cpu_to_be32((rdev->pm.dpm.fan.cycle_delay *
   1021 					       reference_clock) / 1600);
   1022 
   1023 	fan_table.FdoMax = cpu_to_be16((u16)duty100);
   1024 
   1025 	tmp = (RREG32_SMC(CG_MULT_THERMAL_CTRL) & TEMP_SEL_MASK) >> TEMP_SEL_SHIFT;
   1026 	fan_table.TempSrc = (uint8_t)tmp;
   1027 
   1028 	ret = ci_copy_bytes_to_smc(rdev,
   1029 				   pi->fan_table_start,
   1030 				   (u8 *)(&fan_table),
   1031 				   sizeof(fan_table),
   1032 				   pi->sram_end);
   1033 
   1034 	if (ret) {
   1035 		DRM_ERROR("Failed to load fan table to the SMC.");
   1036 		rdev->pm.dpm.fan.ucode_fan_control = false;
   1037 	}
   1038 
   1039 	return 0;
   1040 }
   1041 
   1042 static int ci_fan_ctrl_start_smc_fan_control(struct radeon_device *rdev)
   1043 {
   1044 	struct ci_power_info *pi = ci_get_pi(rdev);
   1045 	PPSMC_Result ret;
   1046 
   1047 	if (pi->caps_od_fuzzy_fan_control_support) {
   1048 		ret = ci_send_msg_to_smc_with_parameter(rdev,
   1049 							PPSMC_StartFanControl,
   1050 							FAN_CONTROL_FUZZY);
   1051 		if (ret != PPSMC_Result_OK)
   1052 			return -EINVAL;
   1053 		ret = ci_send_msg_to_smc_with_parameter(rdev,
   1054 							PPSMC_MSG_SetFanPwmMax,
   1055 							rdev->pm.dpm.fan.default_max_fan_pwm);
   1056 		if (ret != PPSMC_Result_OK)
   1057 			return -EINVAL;
   1058 	} else {
   1059 		ret = ci_send_msg_to_smc_with_parameter(rdev,
   1060 							PPSMC_StartFanControl,
   1061 							FAN_CONTROL_TABLE);
   1062 		if (ret != PPSMC_Result_OK)
   1063 			return -EINVAL;
   1064 	}
   1065 
   1066 	pi->fan_is_controlled_by_smc = true;
   1067 	return 0;
   1068 }
   1069 
   1070 static int ci_fan_ctrl_stop_smc_fan_control(struct radeon_device *rdev)
   1071 {
   1072 	PPSMC_Result ret;
   1073 	struct ci_power_info *pi = ci_get_pi(rdev);
   1074 
   1075 	ret = ci_send_msg_to_smc(rdev, PPSMC_StopFanControl);
   1076 	if (ret == PPSMC_Result_OK) {
   1077 		pi->fan_is_controlled_by_smc = false;
   1078 		return 0;
   1079 	} else
   1080 		return -EINVAL;
   1081 }
   1082 
   1083 int ci_fan_ctrl_get_fan_speed_percent(struct radeon_device *rdev,
   1084 					     u32 *speed)
   1085 {
   1086 	u32 duty, duty100;
   1087 	u64 tmp64;
   1088 
   1089 	if (rdev->pm.no_fan)
   1090 		return -ENOENT;
   1091 
   1092 	duty100 = (RREG32_SMC(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
   1093 	duty = (RREG32_SMC(CG_THERMAL_STATUS) & FDO_PWM_DUTY_MASK) >> FDO_PWM_DUTY_SHIFT;
   1094 
   1095 	if (duty100 == 0)
   1096 		return -EINVAL;
   1097 
   1098 	tmp64 = (u64)duty * 100;
   1099 	do_div(tmp64, duty100);
   1100 	*speed = (u32)tmp64;
   1101 
   1102 	if (*speed > 100)
   1103 		*speed = 100;
   1104 
   1105 	return 0;
   1106 }
   1107 
   1108 int ci_fan_ctrl_set_fan_speed_percent(struct radeon_device *rdev,
   1109 					     u32 speed)
   1110 {
   1111 	u32 tmp;
   1112 	u32 duty, duty100;
   1113 	u64 tmp64;
   1114 	struct ci_power_info *pi = ci_get_pi(rdev);
   1115 
   1116 	if (rdev->pm.no_fan)
   1117 		return -ENOENT;
   1118 
   1119 	if (pi->fan_is_controlled_by_smc)
   1120 		return -EINVAL;
   1121 
   1122 	if (speed > 100)
   1123 		return -EINVAL;
   1124 
   1125 	duty100 = (RREG32_SMC(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
   1126 
   1127 	if (duty100 == 0)
   1128 		return -EINVAL;
   1129 
   1130 	tmp64 = (u64)speed * duty100;
   1131 	do_div(tmp64, 100);
   1132 	duty = (u32)tmp64;
   1133 
   1134 	tmp = RREG32_SMC(CG_FDO_CTRL0) & ~FDO_STATIC_DUTY_MASK;
   1135 	tmp |= FDO_STATIC_DUTY(duty);
   1136 	WREG32_SMC(CG_FDO_CTRL0, tmp);
   1137 
   1138 	return 0;
   1139 }
   1140 
   1141 void ci_fan_ctrl_set_mode(struct radeon_device *rdev, u32 mode)
   1142 {
   1143 	if (mode) {
   1144 		/* stop auto-manage */
   1145 		if (rdev->pm.dpm.fan.ucode_fan_control)
   1146 			ci_fan_ctrl_stop_smc_fan_control(rdev);
   1147 		ci_fan_ctrl_set_static_mode(rdev, mode);
   1148 	} else {
   1149 		/* restart auto-manage */
   1150 		if (rdev->pm.dpm.fan.ucode_fan_control)
   1151 			ci_thermal_start_smc_fan_control(rdev);
   1152 		else
   1153 			ci_fan_ctrl_set_default_mode(rdev);
   1154 	}
   1155 }
   1156 
   1157 u32 ci_fan_ctrl_get_mode(struct radeon_device *rdev)
   1158 {
   1159 	struct ci_power_info *pi = ci_get_pi(rdev);
   1160 	u32 tmp;
   1161 
   1162 	if (pi->fan_is_controlled_by_smc)
   1163 		return 0;
   1164 
   1165 	tmp = RREG32_SMC(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK;
   1166 	return (tmp >> FDO_PWM_MODE_SHIFT);
   1167 }
   1168 
   1169 #if 0
   1170 static int ci_fan_ctrl_get_fan_speed_rpm(struct radeon_device *rdev,
   1171 					 u32 *speed)
   1172 {
   1173 	u32 tach_period;
   1174 	u32 xclk = radeon_get_xclk(rdev);
   1175 
   1176 	if (rdev->pm.no_fan)
   1177 		return -ENOENT;
   1178 
   1179 	if (rdev->pm.fan_pulses_per_revolution == 0)
   1180 		return -ENOENT;
   1181 
   1182 	tach_period = (RREG32_SMC(CG_TACH_STATUS) & TACH_PERIOD_MASK) >> TACH_PERIOD_SHIFT;
   1183 	if (tach_period == 0)
   1184 		return -ENOENT;
   1185 
   1186 	*speed = 60 * xclk * 10000 / tach_period;
   1187 
   1188 	return 0;
   1189 }
   1190 
   1191 static int ci_fan_ctrl_set_fan_speed_rpm(struct radeon_device *rdev,
   1192 					 u32 speed)
   1193 {
   1194 	u32 tach_period, tmp;
   1195 	u32 xclk = radeon_get_xclk(rdev);
   1196 
   1197 	if (rdev->pm.no_fan)
   1198 		return -ENOENT;
   1199 
   1200 	if (rdev->pm.fan_pulses_per_revolution == 0)
   1201 		return -ENOENT;
   1202 
   1203 	if ((speed < rdev->pm.fan_min_rpm) ||
   1204 	    (speed > rdev->pm.fan_max_rpm))
   1205 		return -EINVAL;
   1206 
   1207 	if (rdev->pm.dpm.fan.ucode_fan_control)
   1208 		ci_fan_ctrl_stop_smc_fan_control(rdev);
   1209 
   1210 	tach_period = 60 * xclk * 10000 / (8 * speed);
   1211 	tmp = RREG32_SMC(CG_TACH_CTRL) & ~TARGET_PERIOD_MASK;
   1212 	tmp |= TARGET_PERIOD(tach_period);
   1213 	WREG32_SMC(CG_TACH_CTRL, tmp);
   1214 
   1215 	ci_fan_ctrl_set_static_mode(rdev, FDO_PWM_MODE_STATIC_RPM);
   1216 
   1217 	return 0;
   1218 }
   1219 #endif
   1220 
   1221 static void ci_fan_ctrl_set_default_mode(struct radeon_device *rdev)
   1222 {
   1223 	struct ci_power_info *pi = ci_get_pi(rdev);
   1224 	u32 tmp;
   1225 
   1226 	if (!pi->fan_ctrl_is_in_default_mode) {
   1227 		tmp = RREG32_SMC(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK;
   1228 		tmp |= FDO_PWM_MODE(pi->fan_ctrl_default_mode);
   1229 		WREG32_SMC(CG_FDO_CTRL2, tmp);
   1230 
   1231 		tmp = RREG32_SMC(CG_FDO_CTRL2) & ~TMIN_MASK;
   1232 		tmp |= TMIN(pi->t_min);
   1233 		WREG32_SMC(CG_FDO_CTRL2, tmp);
   1234 		pi->fan_ctrl_is_in_default_mode = true;
   1235 	}
   1236 }
   1237 
   1238 static void ci_thermal_start_smc_fan_control(struct radeon_device *rdev)
   1239 {
   1240 	if (rdev->pm.dpm.fan.ucode_fan_control) {
   1241 		ci_fan_ctrl_start_smc_fan_control(rdev);
   1242 		ci_fan_ctrl_set_static_mode(rdev, FDO_PWM_MODE_STATIC);
   1243 	}
   1244 }
   1245 
   1246 static void ci_thermal_initialize(struct radeon_device *rdev)
   1247 {
   1248 	u32 tmp;
   1249 
   1250 	if (rdev->pm.fan_pulses_per_revolution) {
   1251 		tmp = RREG32_SMC(CG_TACH_CTRL) & ~EDGE_PER_REV_MASK;
   1252 		tmp |= EDGE_PER_REV(rdev->pm.fan_pulses_per_revolution -1);
   1253 		WREG32_SMC(CG_TACH_CTRL, tmp);
   1254 	}
   1255 
   1256 	tmp = RREG32_SMC(CG_FDO_CTRL2) & ~TACH_PWM_RESP_RATE_MASK;
   1257 	tmp |= TACH_PWM_RESP_RATE(0x28);
   1258 	WREG32_SMC(CG_FDO_CTRL2, tmp);
   1259 }
   1260 
   1261 static int ci_thermal_start_thermal_controller(struct radeon_device *rdev)
   1262 {
   1263 	int ret;
   1264 
   1265 	ci_thermal_initialize(rdev);
   1266 	ret = ci_thermal_set_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
   1267 	if (ret)
   1268 		return ret;
   1269 	ret = ci_thermal_enable_alert(rdev, true);
   1270 	if (ret)
   1271 		return ret;
   1272 	if (rdev->pm.dpm.fan.ucode_fan_control) {
   1273 		ret = ci_thermal_setup_fan_table(rdev);
   1274 		if (ret)
   1275 			return ret;
   1276 		ci_thermal_start_smc_fan_control(rdev);
   1277 	}
   1278 
   1279 	return 0;
   1280 }
   1281 
   1282 static void ci_thermal_stop_thermal_controller(struct radeon_device *rdev)
   1283 {
   1284 	if (!rdev->pm.no_fan)
   1285 		ci_fan_ctrl_set_default_mode(rdev);
   1286 }
   1287 
   1288 #if 0
   1289 static int ci_read_smc_soft_register(struct radeon_device *rdev,
   1290 				     u16 reg_offset, u32 *value)
   1291 {
   1292 	struct ci_power_info *pi = ci_get_pi(rdev);
   1293 
   1294 	return ci_read_smc_sram_dword(rdev,
   1295 				      pi->soft_regs_start + reg_offset,
   1296 				      value, pi->sram_end);
   1297 }
   1298 #endif
   1299 
   1300 static int ci_write_smc_soft_register(struct radeon_device *rdev,
   1301 				      u16 reg_offset, u32 value)
   1302 {
   1303 	struct ci_power_info *pi = ci_get_pi(rdev);
   1304 
   1305 	return ci_write_smc_sram_dword(rdev,
   1306 				       pi->soft_regs_start + reg_offset,
   1307 				       value, pi->sram_end);
   1308 }
   1309 
   1310 static void ci_init_fps_limits(struct radeon_device *rdev)
   1311 {
   1312 	struct ci_power_info *pi = ci_get_pi(rdev);
   1313 	SMU7_Discrete_DpmTable *table = &pi->smc_state_table;
   1314 
   1315 	if (pi->caps_fps) {
   1316 		u16 tmp;
   1317 
   1318 		tmp = 45;
   1319 		table->FpsHighT = cpu_to_be16(tmp);
   1320 
   1321 		tmp = 30;
   1322 		table->FpsLowT = cpu_to_be16(tmp);
   1323 	}
   1324 }
   1325 
   1326 static int ci_update_sclk_t(struct radeon_device *rdev)
   1327 {
   1328 	struct ci_power_info *pi = ci_get_pi(rdev);
   1329 	int ret = 0;
   1330 	u32 low_sclk_interrupt_t = 0;
   1331 
   1332 	if (pi->caps_sclk_throttle_low_notification) {
   1333 		low_sclk_interrupt_t = cpu_to_be32(pi->low_sclk_interrupt_t);
   1334 
   1335 		ret = ci_copy_bytes_to_smc(rdev,
   1336 					   pi->dpm_table_start +
   1337 					   offsetof(SMU7_Discrete_DpmTable, LowSclkInterruptT),
   1338 					   (u8 *)&low_sclk_interrupt_t,
   1339 					   sizeof(u32), pi->sram_end);
   1340 
   1341 	}
   1342 
   1343 	return ret;
   1344 }
   1345 
   1346 static void ci_get_leakage_voltages(struct radeon_device *rdev)
   1347 {
   1348 	struct ci_power_info *pi = ci_get_pi(rdev);
   1349 	u16 leakage_id, virtual_voltage_id;
   1350 	u16 vddc, vddci;
   1351 	int i;
   1352 
   1353 	pi->vddc_leakage.count = 0;
   1354 	pi->vddci_leakage.count = 0;
   1355 
   1356 	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
   1357 		for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) {
   1358 			virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
   1359 			if (radeon_atom_get_voltage_evv(rdev, virtual_voltage_id, &vddc) != 0)
   1360 				continue;
   1361 			if (vddc != 0 && vddc != virtual_voltage_id) {
   1362 				pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc;
   1363 				pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id;
   1364 				pi->vddc_leakage.count++;
   1365 			}
   1366 		}
   1367 	} else if (radeon_atom_get_leakage_id_from_vbios(rdev, &leakage_id) == 0) {
   1368 		for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) {
   1369 			virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
   1370 			if (radeon_atom_get_leakage_vddc_based_on_leakage_params(rdev, &vddc, &vddci,
   1371 										 virtual_voltage_id,
   1372 										 leakage_id) == 0) {
   1373 				if (vddc != 0 && vddc != virtual_voltage_id) {
   1374 					pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc;
   1375 					pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id;
   1376 					pi->vddc_leakage.count++;
   1377 				}
   1378 				if (vddci != 0 && vddci != virtual_voltage_id) {
   1379 					pi->vddci_leakage.actual_voltage[pi->vddci_leakage.count] = vddci;
   1380 					pi->vddci_leakage.leakage_id[pi->vddci_leakage.count] = virtual_voltage_id;
   1381 					pi->vddci_leakage.count++;
   1382 				}
   1383 			}
   1384 		}
   1385 	}
   1386 }
   1387 
   1388 static void ci_set_dpm_event_sources(struct radeon_device *rdev, u32 sources)
   1389 {
   1390 	struct ci_power_info *pi = ci_get_pi(rdev);
   1391 	bool want_thermal_protection;
   1392 	enum radeon_dpm_event_src dpm_event_src;
   1393 	u32 tmp;
   1394 
   1395 	switch (sources) {
   1396 	case 0:
   1397 	default:
   1398 		want_thermal_protection = false;
   1399 		break;
   1400 	case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL):
   1401 		want_thermal_protection = true;
   1402 		dpm_event_src = RADEON_DPM_EVENT_SRC_DIGITAL;
   1403 		break;
   1404 	case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL):
   1405 		want_thermal_protection = true;
   1406 		dpm_event_src = RADEON_DPM_EVENT_SRC_EXTERNAL;
   1407 		break;
   1408 	case ((1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL) |
   1409 	      (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL)):
   1410 		want_thermal_protection = true;
   1411 		dpm_event_src = RADEON_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL;
   1412 		break;
   1413 	}
   1414 
   1415 	if (want_thermal_protection) {
   1416 #if 0
   1417 		/* XXX: need to figure out how to handle this properly */
   1418 		tmp = RREG32_SMC(CG_THERMAL_CTRL);
   1419 		tmp &= DPM_EVENT_SRC_MASK;
   1420 		tmp |= DPM_EVENT_SRC(dpm_event_src);
   1421 		WREG32_SMC(CG_THERMAL_CTRL, tmp);
   1422 #else
   1423 		(void)dpm_event_src;
   1424 #endif
   1425 
   1426 		tmp = RREG32_SMC(GENERAL_PWRMGT);
   1427 		if (pi->thermal_protection)
   1428 			tmp &= ~THERMAL_PROTECTION_DIS;
   1429 		else
   1430 			tmp |= THERMAL_PROTECTION_DIS;
   1431 		WREG32_SMC(GENERAL_PWRMGT, tmp);
   1432 	} else {
   1433 		tmp = RREG32_SMC(GENERAL_PWRMGT);
   1434 		tmp |= THERMAL_PROTECTION_DIS;
   1435 		WREG32_SMC(GENERAL_PWRMGT, tmp);
   1436 	}
   1437 }
   1438 
   1439 static void ci_enable_auto_throttle_source(struct radeon_device *rdev,
   1440 					   enum radeon_dpm_auto_throttle_src source,
   1441 					   bool enable)
   1442 {
   1443 	struct ci_power_info *pi = ci_get_pi(rdev);
   1444 
   1445 	if (enable) {
   1446 		if (!(pi->active_auto_throttle_sources & (1 << source))) {
   1447 			pi->active_auto_throttle_sources |= 1 << source;
   1448 			ci_set_dpm_event_sources(rdev, pi->active_auto_throttle_sources);
   1449 		}
   1450 	} else {
   1451 		if (pi->active_auto_throttle_sources & (1 << source)) {
   1452 			pi->active_auto_throttle_sources &= ~(1 << source);
   1453 			ci_set_dpm_event_sources(rdev, pi->active_auto_throttle_sources);
   1454 		}
   1455 	}
   1456 }
   1457 
   1458 static void ci_enable_vr_hot_gpio_interrupt(struct radeon_device *rdev)
   1459 {
   1460 	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REGULATOR_HOT)
   1461 		ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableVRHotGPIOInterrupt);
   1462 }
   1463 
   1464 static int ci_unfreeze_sclk_mclk_dpm(struct radeon_device *rdev)
   1465 {
   1466 	struct ci_power_info *pi = ci_get_pi(rdev);
   1467 	PPSMC_Result smc_result;
   1468 
   1469 	if (!pi->need_update_smu7_dpm_table)
   1470 		return 0;
   1471 
   1472 	if ((!pi->sclk_dpm_key_disabled) &&
   1473 	    (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) {
   1474 		smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_SCLKDPM_UnfreezeLevel);
   1475 		if (smc_result != PPSMC_Result_OK)
   1476 			return -EINVAL;
   1477 	}
   1478 
   1479 	if ((!pi->mclk_dpm_key_disabled) &&
   1480 	    (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
   1481 		smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_UnfreezeLevel);
   1482 		if (smc_result != PPSMC_Result_OK)
   1483 			return -EINVAL;
   1484 	}
   1485 
   1486 	pi->need_update_smu7_dpm_table = 0;
   1487 	return 0;
   1488 }
   1489 
   1490 static int ci_enable_sclk_mclk_dpm(struct radeon_device *rdev, bool enable)
   1491 {
   1492 	struct ci_power_info *pi = ci_get_pi(rdev);
   1493 	PPSMC_Result smc_result;
   1494 
   1495 	if (enable) {
   1496 		if (!pi->sclk_dpm_key_disabled) {
   1497 			smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_DPM_Enable);
   1498 			if (smc_result != PPSMC_Result_OK)
   1499 				return -EINVAL;
   1500 		}
   1501 
   1502 		if (!pi->mclk_dpm_key_disabled) {
   1503 			smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_Enable);
   1504 			if (smc_result != PPSMC_Result_OK)
   1505 				return -EINVAL;
   1506 
   1507 			WREG32_P(MC_SEQ_CNTL_3, CAC_EN, ~CAC_EN);
   1508 
   1509 			WREG32_SMC(LCAC_MC0_CNTL, 0x05);
   1510 			WREG32_SMC(LCAC_MC1_CNTL, 0x05);
   1511 			WREG32_SMC(LCAC_CPL_CNTL, 0x100005);
   1512 
   1513 			udelay(10);
   1514 
   1515 			WREG32_SMC(LCAC_MC0_CNTL, 0x400005);
   1516 			WREG32_SMC(LCAC_MC1_CNTL, 0x400005);
   1517 			WREG32_SMC(LCAC_CPL_CNTL, 0x500005);
   1518 		}
   1519 	} else {
   1520 		if (!pi->sclk_dpm_key_disabled) {
   1521 			smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_DPM_Disable);
   1522 			if (smc_result != PPSMC_Result_OK)
   1523 				return -EINVAL;
   1524 		}
   1525 
   1526 		if (!pi->mclk_dpm_key_disabled) {
   1527 			smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_Disable);
   1528 			if (smc_result != PPSMC_Result_OK)
   1529 				return -EINVAL;
   1530 		}
   1531 	}
   1532 
   1533 	return 0;
   1534 }
   1535 
   1536 static int ci_start_dpm(struct radeon_device *rdev)
   1537 {
   1538 	struct ci_power_info *pi = ci_get_pi(rdev);
   1539 	PPSMC_Result smc_result;
   1540 	int ret;
   1541 	u32 tmp;
   1542 
   1543 	tmp = RREG32_SMC(GENERAL_PWRMGT);
   1544 	tmp |= GLOBAL_PWRMGT_EN;
   1545 	WREG32_SMC(GENERAL_PWRMGT, tmp);
   1546 
   1547 	tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
   1548 	tmp |= DYNAMIC_PM_EN;
   1549 	WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
   1550 
   1551 	ci_write_smc_soft_register(rdev, offsetof(SMU7_SoftRegisters, VoltageChangeTimeout), 0x1000);
   1552 
   1553 	WREG32_P(BIF_LNCNT_RESET, 0, ~RESET_LNCNT_EN);
   1554 
   1555 	smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Voltage_Cntl_Enable);
   1556 	if (smc_result != PPSMC_Result_OK)
   1557 		return -EINVAL;
   1558 
   1559 	ret = ci_enable_sclk_mclk_dpm(rdev, true);
   1560 	if (ret)
   1561 		return ret;
   1562 
   1563 	if (!pi->pcie_dpm_key_disabled) {
   1564 		smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PCIeDPM_Enable);
   1565 		if (smc_result != PPSMC_Result_OK)
   1566 			return -EINVAL;
   1567 	}
   1568 
   1569 	return 0;
   1570 }
   1571 
   1572 static int ci_freeze_sclk_mclk_dpm(struct radeon_device *rdev)
   1573 {
   1574 	struct ci_power_info *pi = ci_get_pi(rdev);
   1575 	PPSMC_Result smc_result;
   1576 
   1577 	if (!pi->need_update_smu7_dpm_table)
   1578 		return 0;
   1579 
   1580 	if ((!pi->sclk_dpm_key_disabled) &&
   1581 	    (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) {
   1582 		smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_SCLKDPM_FreezeLevel);
   1583 		if (smc_result != PPSMC_Result_OK)
   1584 			return -EINVAL;
   1585 	}
   1586 
   1587 	if ((!pi->mclk_dpm_key_disabled) &&
   1588 	    (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
   1589 		smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_FreezeLevel);
   1590 		if (smc_result != PPSMC_Result_OK)
   1591 			return -EINVAL;
   1592 	}
   1593 
   1594 	return 0;
   1595 }
   1596 
   1597 static int ci_stop_dpm(struct radeon_device *rdev)
   1598 {
   1599 	struct ci_power_info *pi = ci_get_pi(rdev);
   1600 	PPSMC_Result smc_result;
   1601 	int ret;
   1602 	u32 tmp;
   1603 
   1604 	tmp = RREG32_SMC(GENERAL_PWRMGT);
   1605 	tmp &= ~GLOBAL_PWRMGT_EN;
   1606 	WREG32_SMC(GENERAL_PWRMGT, tmp);
   1607 
   1608 	tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
   1609 	tmp &= ~DYNAMIC_PM_EN;
   1610 	WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
   1611 
   1612 	if (!pi->pcie_dpm_key_disabled) {
   1613 		smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PCIeDPM_Disable);
   1614 		if (smc_result != PPSMC_Result_OK)
   1615 			return -EINVAL;
   1616 	}
   1617 
   1618 	ret = ci_enable_sclk_mclk_dpm(rdev, false);
   1619 	if (ret)
   1620 		return ret;
   1621 
   1622 	smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Voltage_Cntl_Disable);
   1623 	if (smc_result != PPSMC_Result_OK)
   1624 		return -EINVAL;
   1625 
   1626 	return 0;
   1627 }
   1628 
   1629 static void ci_enable_sclk_control(struct radeon_device *rdev, bool enable)
   1630 {
   1631 	u32 tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
   1632 
   1633 	if (enable)
   1634 		tmp &= ~SCLK_PWRMGT_OFF;
   1635 	else
   1636 		tmp |= SCLK_PWRMGT_OFF;
   1637 	WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
   1638 }
   1639 
   1640 #if 0
   1641 static int ci_notify_hw_of_power_source(struct radeon_device *rdev,
   1642 					bool ac_power)
   1643 {
   1644 	struct ci_power_info *pi = ci_get_pi(rdev);
   1645 	struct radeon_cac_tdp_table *cac_tdp_table =
   1646 		rdev->pm.dpm.dyn_state.cac_tdp_table;
   1647 	u32 power_limit;
   1648 
   1649 	if (ac_power)
   1650 		power_limit = (u32)(cac_tdp_table->maximum_power_delivery_limit * 256);
   1651 	else
   1652 		power_limit = (u32)(cac_tdp_table->battery_power_limit * 256);
   1653 
   1654         ci_set_power_limit(rdev, power_limit);
   1655 
   1656 	if (pi->caps_automatic_dc_transition) {
   1657 		if (ac_power)
   1658 			ci_send_msg_to_smc(rdev, PPSMC_MSG_RunningOnAC);
   1659 		else
   1660 			ci_send_msg_to_smc(rdev, PPSMC_MSG_Remove_DC_Clamp);
   1661 	}
   1662 
   1663 	return 0;
   1664 }
   1665 #endif
   1666 
   1667 static PPSMC_Result ci_send_msg_to_smc_with_parameter(struct radeon_device *rdev,
   1668 						      PPSMC_Msg msg, u32 parameter)
   1669 {
   1670 	WREG32(SMC_MSG_ARG_0, parameter);
   1671 	return ci_send_msg_to_smc(rdev, msg);
   1672 }
   1673 
   1674 static PPSMC_Result ci_send_msg_to_smc_return_parameter(struct radeon_device *rdev,
   1675 							PPSMC_Msg msg, u32 *parameter)
   1676 {
   1677 	PPSMC_Result smc_result;
   1678 
   1679 	smc_result = ci_send_msg_to_smc(rdev, msg);
   1680 
   1681 	if ((smc_result == PPSMC_Result_OK) && parameter)
   1682 		*parameter = RREG32(SMC_MSG_ARG_0);
   1683 
   1684 	return smc_result;
   1685 }
   1686 
   1687 static int ci_dpm_force_state_sclk(struct radeon_device *rdev, u32 n)
   1688 {
   1689 	struct ci_power_info *pi = ci_get_pi(rdev);
   1690 
   1691 	if (!pi->sclk_dpm_key_disabled) {
   1692 		PPSMC_Result smc_result =
   1693 			ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SCLKDPM_SetEnabledMask, 1 << n);
   1694 		if (smc_result != PPSMC_Result_OK)
   1695 			return -EINVAL;
   1696 	}
   1697 
   1698 	return 0;
   1699 }
   1700 
   1701 static int ci_dpm_force_state_mclk(struct radeon_device *rdev, u32 n)
   1702 {
   1703 	struct ci_power_info *pi = ci_get_pi(rdev);
   1704 
   1705 	if (!pi->mclk_dpm_key_disabled) {
   1706 		PPSMC_Result smc_result =
   1707 			ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_MCLKDPM_SetEnabledMask, 1 << n);
   1708 		if (smc_result != PPSMC_Result_OK)
   1709 			return -EINVAL;
   1710 	}
   1711 
   1712 	return 0;
   1713 }
   1714 
   1715 static int ci_dpm_force_state_pcie(struct radeon_device *rdev, u32 n)
   1716 {
   1717 	struct ci_power_info *pi = ci_get_pi(rdev);
   1718 
   1719 	if (!pi->pcie_dpm_key_disabled) {
   1720 		PPSMC_Result smc_result =
   1721 			ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_PCIeDPM_ForceLevel, n);
   1722 		if (smc_result != PPSMC_Result_OK)
   1723 			return -EINVAL;
   1724 	}
   1725 
   1726 	return 0;
   1727 }
   1728 
   1729 static int ci_set_power_limit(struct radeon_device *rdev, u32 n)
   1730 {
   1731 	struct ci_power_info *pi = ci_get_pi(rdev);
   1732 
   1733 	if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit) {
   1734 		PPSMC_Result smc_result =
   1735 			ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_PkgPwrSetLimit, n);
   1736 		if (smc_result != PPSMC_Result_OK)
   1737 			return -EINVAL;
   1738 	}
   1739 
   1740 	return 0;
   1741 }
   1742 
   1743 static int ci_set_overdrive_target_tdp(struct radeon_device *rdev,
   1744 				       u32 target_tdp)
   1745 {
   1746 	PPSMC_Result smc_result =
   1747 		ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_OverDriveSetTargetTdp, target_tdp);
   1748 	if (smc_result != PPSMC_Result_OK)
   1749 		return -EINVAL;
   1750 	return 0;
   1751 }
   1752 
   1753 #if 0
   1754 static int ci_set_boot_state(struct radeon_device *rdev)
   1755 {
   1756 	return ci_enable_sclk_mclk_dpm(rdev, false);
   1757 }
   1758 #endif
   1759 
   1760 static u32 ci_get_average_sclk_freq(struct radeon_device *rdev)
   1761 {
   1762 	u32 sclk_freq;
   1763 	PPSMC_Result smc_result =
   1764 		ci_send_msg_to_smc_return_parameter(rdev,
   1765 						    PPSMC_MSG_API_GetSclkFrequency,
   1766 						    &sclk_freq);
   1767 	if (smc_result != PPSMC_Result_OK)
   1768 		sclk_freq = 0;
   1769 
   1770 	return sclk_freq;
   1771 }
   1772 
   1773 static u32 ci_get_average_mclk_freq(struct radeon_device *rdev)
   1774 {
   1775 	u32 mclk_freq;
   1776 	PPSMC_Result smc_result =
   1777 		ci_send_msg_to_smc_return_parameter(rdev,
   1778 						    PPSMC_MSG_API_GetMclkFrequency,
   1779 						    &mclk_freq);
   1780 	if (smc_result != PPSMC_Result_OK)
   1781 		mclk_freq = 0;
   1782 
   1783 	return mclk_freq;
   1784 }
   1785 
   1786 static void ci_dpm_start_smc(struct radeon_device *rdev)
   1787 {
   1788 	int i;
   1789 
   1790 	ci_program_jump_on_start(rdev);
   1791 	ci_start_smc_clock(rdev);
   1792 	ci_start_smc(rdev);
   1793 	for (i = 0; i < rdev->usec_timeout; i++) {
   1794 		if (RREG32_SMC(FIRMWARE_FLAGS) & INTERRUPTS_ENABLED)
   1795 			break;
   1796 	}
   1797 }
   1798 
   1799 static void ci_dpm_stop_smc(struct radeon_device *rdev)
   1800 {
   1801 	ci_reset_smc(rdev);
   1802 	ci_stop_smc_clock(rdev);
   1803 }
   1804 
   1805 static int ci_process_firmware_header(struct radeon_device *rdev)
   1806 {
   1807 	struct ci_power_info *pi = ci_get_pi(rdev);
   1808 	u32 tmp;
   1809 	int ret;
   1810 
   1811 	ret = ci_read_smc_sram_dword(rdev,
   1812 				     SMU7_FIRMWARE_HEADER_LOCATION +
   1813 				     offsetof(SMU7_Firmware_Header, DpmTable),
   1814 				     &tmp, pi->sram_end);
   1815 	if (ret)
   1816 		return ret;
   1817 
   1818 	pi->dpm_table_start = tmp;
   1819 
   1820 	ret = ci_read_smc_sram_dword(rdev,
   1821 				     SMU7_FIRMWARE_HEADER_LOCATION +
   1822 				     offsetof(SMU7_Firmware_Header, SoftRegisters),
   1823 				     &tmp, pi->sram_end);
   1824 	if (ret)
   1825 		return ret;
   1826 
   1827 	pi->soft_regs_start = tmp;
   1828 
   1829 	ret = ci_read_smc_sram_dword(rdev,
   1830 				     SMU7_FIRMWARE_HEADER_LOCATION +
   1831 				     offsetof(SMU7_Firmware_Header, mcRegisterTable),
   1832 				     &tmp, pi->sram_end);
   1833 	if (ret)
   1834 		return ret;
   1835 
   1836 	pi->mc_reg_table_start = tmp;
   1837 
   1838 	ret = ci_read_smc_sram_dword(rdev,
   1839 				     SMU7_FIRMWARE_HEADER_LOCATION +
   1840 				     offsetof(SMU7_Firmware_Header, FanTable),
   1841 				     &tmp, pi->sram_end);
   1842 	if (ret)
   1843 		return ret;
   1844 
   1845 	pi->fan_table_start = tmp;
   1846 
   1847 	ret = ci_read_smc_sram_dword(rdev,
   1848 				     SMU7_FIRMWARE_HEADER_LOCATION +
   1849 				     offsetof(SMU7_Firmware_Header, mcArbDramTimingTable),
   1850 				     &tmp, pi->sram_end);
   1851 	if (ret)
   1852 		return ret;
   1853 
   1854 	pi->arb_table_start = tmp;
   1855 
   1856 	return 0;
   1857 }
   1858 
   1859 static void ci_read_clock_registers(struct radeon_device *rdev)
   1860 {
   1861 	struct ci_power_info *pi = ci_get_pi(rdev);
   1862 
   1863 	pi->clock_registers.cg_spll_func_cntl =
   1864 		RREG32_SMC(CG_SPLL_FUNC_CNTL);
   1865 	pi->clock_registers.cg_spll_func_cntl_2 =
   1866 		RREG32_SMC(CG_SPLL_FUNC_CNTL_2);
   1867 	pi->clock_registers.cg_spll_func_cntl_3 =
   1868 		RREG32_SMC(CG_SPLL_FUNC_CNTL_3);
   1869 	pi->clock_registers.cg_spll_func_cntl_4 =
   1870 		RREG32_SMC(CG_SPLL_FUNC_CNTL_4);
   1871 	pi->clock_registers.cg_spll_spread_spectrum =
   1872 		RREG32_SMC(CG_SPLL_SPREAD_SPECTRUM);
   1873 	pi->clock_registers.cg_spll_spread_spectrum_2 =
   1874 		RREG32_SMC(CG_SPLL_SPREAD_SPECTRUM_2);
   1875 	pi->clock_registers.dll_cntl = RREG32(DLL_CNTL);
   1876 	pi->clock_registers.mclk_pwrmgt_cntl = RREG32(MCLK_PWRMGT_CNTL);
   1877 	pi->clock_registers.mpll_ad_func_cntl = RREG32(MPLL_AD_FUNC_CNTL);
   1878 	pi->clock_registers.mpll_dq_func_cntl = RREG32(MPLL_DQ_FUNC_CNTL);
   1879 	pi->clock_registers.mpll_func_cntl = RREG32(MPLL_FUNC_CNTL);
   1880 	pi->clock_registers.mpll_func_cntl_1 = RREG32(MPLL_FUNC_CNTL_1);
   1881 	pi->clock_registers.mpll_func_cntl_2 = RREG32(MPLL_FUNC_CNTL_2);
   1882 	pi->clock_registers.mpll_ss1 = RREG32(MPLL_SS1);
   1883 	pi->clock_registers.mpll_ss2 = RREG32(MPLL_SS2);
   1884 }
   1885 
   1886 static void ci_init_sclk_t(struct radeon_device *rdev)
   1887 {
   1888 	struct ci_power_info *pi = ci_get_pi(rdev);
   1889 
   1890 	pi->low_sclk_interrupt_t = 0;
   1891 }
   1892 
   1893 static void ci_enable_thermal_protection(struct radeon_device *rdev,
   1894 					 bool enable)
   1895 {
   1896 	u32 tmp = RREG32_SMC(GENERAL_PWRMGT);
   1897 
   1898 	if (enable)
   1899 		tmp &= ~THERMAL_PROTECTION_DIS;
   1900 	else
   1901 		tmp |= THERMAL_PROTECTION_DIS;
   1902 	WREG32_SMC(GENERAL_PWRMGT, tmp);
   1903 }
   1904 
   1905 static void ci_enable_acpi_power_management(struct radeon_device *rdev)
   1906 {
   1907 	u32 tmp = RREG32_SMC(GENERAL_PWRMGT);
   1908 
   1909 	tmp |= STATIC_PM_EN;
   1910 
   1911 	WREG32_SMC(GENERAL_PWRMGT, tmp);
   1912 }
   1913 
   1914 #if 0
   1915 static int ci_enter_ulp_state(struct radeon_device *rdev)
   1916 {
   1917 
   1918 	WREG32(SMC_MESSAGE_0, PPSMC_MSG_SwitchToMinimumPower);
   1919 
   1920 	udelay(25000);
   1921 
   1922 	return 0;
   1923 }
   1924 
   1925 static int ci_exit_ulp_state(struct radeon_device *rdev)
   1926 {
   1927 	int i;
   1928 
   1929 	WREG32(SMC_MESSAGE_0, PPSMC_MSG_ResumeFromMinimumPower);
   1930 
   1931 	udelay(7000);
   1932 
   1933 	for (i = 0; i < rdev->usec_timeout; i++) {
   1934 		if (RREG32(SMC_RESP_0) == 1)
   1935 			break;
   1936 		udelay(1000);
   1937 	}
   1938 
   1939 	return 0;
   1940 }
   1941 #endif
   1942 
   1943 static int ci_notify_smc_display_change(struct radeon_device *rdev,
   1944 					bool has_display)
   1945 {
   1946 	PPSMC_Msg msg = has_display ? PPSMC_MSG_HasDisplay : PPSMC_MSG_NoDisplay;
   1947 
   1948 	return (ci_send_msg_to_smc(rdev, msg) == PPSMC_Result_OK) ?  0 : -EINVAL;
   1949 }
   1950 
   1951 static int ci_enable_ds_master_switch(struct radeon_device *rdev,
   1952 				      bool enable)
   1953 {
   1954 	struct ci_power_info *pi = ci_get_pi(rdev);
   1955 
   1956 	if (enable) {
   1957 		if (pi->caps_sclk_ds) {
   1958 			if (ci_send_msg_to_smc(rdev, PPSMC_MSG_MASTER_DeepSleep_ON) != PPSMC_Result_OK)
   1959 				return -EINVAL;
   1960 		} else {
   1961 			if (ci_send_msg_to_smc(rdev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK)
   1962 				return -EINVAL;
   1963 		}
   1964 	} else {
   1965 		if (pi->caps_sclk_ds) {
   1966 			if (ci_send_msg_to_smc(rdev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK)
   1967 				return -EINVAL;
   1968 		}
   1969 	}
   1970 
   1971 	return 0;
   1972 }
   1973 
   1974 static void ci_program_display_gap(struct radeon_device *rdev)
   1975 {
   1976 	u32 tmp = RREG32_SMC(CG_DISPLAY_GAP_CNTL);
   1977 	u32 pre_vbi_time_in_us;
   1978 	u32 frame_time_in_us;
   1979 	u32 ref_clock = rdev->clock.spll.reference_freq;
   1980 	u32 refresh_rate = r600_dpm_get_vrefresh(rdev);
   1981 	u32 vblank_time = r600_dpm_get_vblank_time(rdev);
   1982 
   1983 	tmp &= ~DISP_GAP_MASK;
   1984 	if (rdev->pm.dpm.new_active_crtc_count > 0)
   1985 		tmp |= DISP_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM);
   1986 	else
   1987 		tmp |= DISP_GAP(R600_PM_DISPLAY_GAP_IGNORE);
   1988 	WREG32_SMC(CG_DISPLAY_GAP_CNTL, tmp);
   1989 
   1990 	if (refresh_rate == 0)
   1991 		refresh_rate = 60;
   1992 	if (vblank_time == 0xffffffff)
   1993 		vblank_time = 500;
   1994 	frame_time_in_us = 1000000 / refresh_rate;
   1995 	pre_vbi_time_in_us =
   1996 		frame_time_in_us - 200 - vblank_time;
   1997 	tmp = pre_vbi_time_in_us * (ref_clock / 100);
   1998 
   1999 	WREG32_SMC(CG_DISPLAY_GAP_CNTL2, tmp);
   2000 	ci_write_smc_soft_register(rdev, offsetof(SMU7_SoftRegisters, PreVBlankGap), 0x64);
   2001 	ci_write_smc_soft_register(rdev, offsetof(SMU7_SoftRegisters, VBlankTimeout), (frame_time_in_us - pre_vbi_time_in_us));
   2002 
   2003 
   2004 	ci_notify_smc_display_change(rdev, (rdev->pm.dpm.new_active_crtc_count == 1));
   2005 
   2006 }
   2007 
   2008 static void ci_enable_spread_spectrum(struct radeon_device *rdev, bool enable)
   2009 {
   2010 	struct ci_power_info *pi = ci_get_pi(rdev);
   2011 	u32 tmp;
   2012 
   2013 	if (enable) {
   2014 		if (pi->caps_sclk_ss_support) {
   2015 			tmp = RREG32_SMC(GENERAL_PWRMGT);
   2016 			tmp |= DYN_SPREAD_SPECTRUM_EN;
   2017 			WREG32_SMC(GENERAL_PWRMGT, tmp);
   2018 		}
   2019 	} else {
   2020 		tmp = RREG32_SMC(CG_SPLL_SPREAD_SPECTRUM);
   2021 		tmp &= ~SSEN;
   2022 		WREG32_SMC(CG_SPLL_SPREAD_SPECTRUM, tmp);
   2023 
   2024 		tmp = RREG32_SMC(GENERAL_PWRMGT);
   2025 		tmp &= ~DYN_SPREAD_SPECTRUM_EN;
   2026 		WREG32_SMC(GENERAL_PWRMGT, tmp);
   2027 	}
   2028 }
   2029 
   2030 static void ci_program_sstp(struct radeon_device *rdev)
   2031 {
   2032 	WREG32_SMC(CG_SSP, (SSTU(R600_SSTU_DFLT) | SST(R600_SST_DFLT)));
   2033 }
   2034 
   2035 static void ci_enable_display_gap(struct radeon_device *rdev)
   2036 {
   2037 	u32 tmp = RREG32_SMC(CG_DISPLAY_GAP_CNTL);
   2038 
   2039         tmp &= ~(DISP_GAP_MASK | DISP_GAP_MCHG_MASK);
   2040         tmp |= (DISP_GAP(R600_PM_DISPLAY_GAP_IGNORE) |
   2041                 DISP_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK));
   2042 
   2043 	WREG32_SMC(CG_DISPLAY_GAP_CNTL, tmp);
   2044 }
   2045 
   2046 static void ci_program_vc(struct radeon_device *rdev)
   2047 {
   2048 	u32 tmp;
   2049 
   2050 	tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
   2051 	tmp &= ~(RESET_SCLK_CNT | RESET_BUSY_CNT);
   2052 	WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
   2053 
   2054 	WREG32_SMC(CG_FTV_0, CISLANDS_VRC_DFLT0);
   2055 	WREG32_SMC(CG_FTV_1, CISLANDS_VRC_DFLT1);
   2056 	WREG32_SMC(CG_FTV_2, CISLANDS_VRC_DFLT2);
   2057 	WREG32_SMC(CG_FTV_3, CISLANDS_VRC_DFLT3);
   2058 	WREG32_SMC(CG_FTV_4, CISLANDS_VRC_DFLT4);
   2059 	WREG32_SMC(CG_FTV_5, CISLANDS_VRC_DFLT5);
   2060 	WREG32_SMC(CG_FTV_6, CISLANDS_VRC_DFLT6);
   2061 	WREG32_SMC(CG_FTV_7, CISLANDS_VRC_DFLT7);
   2062 }
   2063 
   2064 static void ci_clear_vc(struct radeon_device *rdev)
   2065 {
   2066 	u32 tmp;
   2067 
   2068 	tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
   2069 	tmp |= (RESET_SCLK_CNT | RESET_BUSY_CNT);
   2070 	WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
   2071 
   2072 	WREG32_SMC(CG_FTV_0, 0);
   2073 	WREG32_SMC(CG_FTV_1, 0);
   2074 	WREG32_SMC(CG_FTV_2, 0);
   2075 	WREG32_SMC(CG_FTV_3, 0);
   2076 	WREG32_SMC(CG_FTV_4, 0);
   2077 	WREG32_SMC(CG_FTV_5, 0);
   2078 	WREG32_SMC(CG_FTV_6, 0);
   2079 	WREG32_SMC(CG_FTV_7, 0);
   2080 }
   2081 
   2082 static int ci_upload_firmware(struct radeon_device *rdev)
   2083 {
   2084 	struct ci_power_info *pi = ci_get_pi(rdev);
   2085 	int i, ret;
   2086 
   2087 	for (i = 0; i < rdev->usec_timeout; i++) {
   2088 		if (RREG32_SMC(RCU_UC_EVENTS) & BOOT_SEQ_DONE)
   2089 			break;
   2090 	}
   2091 	WREG32_SMC(SMC_SYSCON_MISC_CNTL, 1);
   2092 
   2093 	ci_stop_smc_clock(rdev);
   2094 	ci_reset_smc(rdev);
   2095 
   2096 	ret = ci_load_smc_ucode(rdev, pi->sram_end);
   2097 
   2098 	return ret;
   2099 
   2100 }
   2101 
   2102 static int ci_get_svi2_voltage_table(struct radeon_device *rdev,
   2103 				     struct radeon_clock_voltage_dependency_table *voltage_dependency_table,
   2104 				     struct atom_voltage_table *voltage_table)
   2105 {
   2106 	u32 i;
   2107 
   2108 	if (voltage_dependency_table == NULL)
   2109 		return -EINVAL;
   2110 
   2111 	voltage_table->mask_low = 0;
   2112 	voltage_table->phase_delay = 0;
   2113 
   2114 	voltage_table->count = voltage_dependency_table->count;
   2115 	for (i = 0; i < voltage_table->count; i++) {
   2116 		voltage_table->entries[i].value = voltage_dependency_table->entries[i].v;
   2117 		voltage_table->entries[i].smio_low = 0;
   2118 	}
   2119 
   2120 	return 0;
   2121 }
   2122 
   2123 static int ci_construct_voltage_tables(struct radeon_device *rdev)
   2124 {
   2125 	struct ci_power_info *pi = ci_get_pi(rdev);
   2126 	int ret;
   2127 
   2128 	if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
   2129 		ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDC,
   2130 						    VOLTAGE_OBJ_GPIO_LUT,
   2131 						    &pi->vddc_voltage_table);
   2132 		if (ret)
   2133 			return ret;
   2134 	} else if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
   2135 		ret = ci_get_svi2_voltage_table(rdev,
   2136 						&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
   2137 						&pi->vddc_voltage_table);
   2138 		if (ret)
   2139 			return ret;
   2140 	}
   2141 
   2142 	if (pi->vddc_voltage_table.count > SMU7_MAX_LEVELS_VDDC)
   2143 		si_trim_voltage_table_to_fit_state_table(rdev, SMU7_MAX_LEVELS_VDDC,
   2144 							 &pi->vddc_voltage_table);
   2145 
   2146 	if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
   2147 		ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDCI,
   2148 						    VOLTAGE_OBJ_GPIO_LUT,
   2149 						    &pi->vddci_voltage_table);
   2150 		if (ret)
   2151 			return ret;
   2152 	} else if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
   2153 		ret = ci_get_svi2_voltage_table(rdev,
   2154 						&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
   2155 						&pi->vddci_voltage_table);
   2156 		if (ret)
   2157 			return ret;
   2158 	}
   2159 
   2160 	if (pi->vddci_voltage_table.count > SMU7_MAX_LEVELS_VDDCI)
   2161 		si_trim_voltage_table_to_fit_state_table(rdev, SMU7_MAX_LEVELS_VDDCI,
   2162 							 &pi->vddci_voltage_table);
   2163 
   2164 	if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
   2165 		ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_MVDDC,
   2166 						    VOLTAGE_OBJ_GPIO_LUT,
   2167 						    &pi->mvdd_voltage_table);
   2168 		if (ret)
   2169 			return ret;
   2170 	} else if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
   2171 		ret = ci_get_svi2_voltage_table(rdev,
   2172 						&rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
   2173 						&pi->mvdd_voltage_table);
   2174 		if (ret)
   2175 			return ret;
   2176 	}
   2177 
   2178 	if (pi->mvdd_voltage_table.count > SMU7_MAX_LEVELS_MVDD)
   2179 		si_trim_voltage_table_to_fit_state_table(rdev, SMU7_MAX_LEVELS_MVDD,
   2180 							 &pi->mvdd_voltage_table);
   2181 
   2182 	return 0;
   2183 }
   2184 
   2185 static void ci_populate_smc_voltage_table(struct radeon_device *rdev,
   2186 					  struct atom_voltage_table_entry *voltage_table,
   2187 					  SMU7_Discrete_VoltageLevel *smc_voltage_table)
   2188 {
   2189 	int ret;
   2190 
   2191 	ret = ci_get_std_voltage_value_sidd(rdev, voltage_table,
   2192 					    &smc_voltage_table->StdVoltageHiSidd,
   2193 					    &smc_voltage_table->StdVoltageLoSidd);
   2194 
   2195 	if (ret) {
   2196 		smc_voltage_table->StdVoltageHiSidd = voltage_table->value * VOLTAGE_SCALE;
   2197 		smc_voltage_table->StdVoltageLoSidd = voltage_table->value * VOLTAGE_SCALE;
   2198 	}
   2199 
   2200 	smc_voltage_table->Voltage = cpu_to_be16(voltage_table->value * VOLTAGE_SCALE);
   2201 	smc_voltage_table->StdVoltageHiSidd =
   2202 		cpu_to_be16(smc_voltage_table->StdVoltageHiSidd);
   2203 	smc_voltage_table->StdVoltageLoSidd =
   2204 		cpu_to_be16(smc_voltage_table->StdVoltageLoSidd);
   2205 }
   2206 
   2207 static int ci_populate_smc_vddc_table(struct radeon_device *rdev,
   2208 				      SMU7_Discrete_DpmTable *table)
   2209 {
   2210 	struct ci_power_info *pi = ci_get_pi(rdev);
   2211 	unsigned int count;
   2212 
   2213 	table->VddcLevelCount = pi->vddc_voltage_table.count;
   2214 	for (count = 0; count < table->VddcLevelCount; count++) {
   2215 		ci_populate_smc_voltage_table(rdev,
   2216 					      &pi->vddc_voltage_table.entries[count],
   2217 					      &table->VddcLevel[count]);
   2218 
   2219 		if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
   2220 			table->VddcLevel[count].Smio |=
   2221 				pi->vddc_voltage_table.entries[count].smio_low;
   2222 		else
   2223 			table->VddcLevel[count].Smio = 0;
   2224 	}
   2225 	table->VddcLevelCount = cpu_to_be32(table->VddcLevelCount);
   2226 
   2227 	return 0;
   2228 }
   2229 
   2230 static int ci_populate_smc_vddci_table(struct radeon_device *rdev,
   2231 				       SMU7_Discrete_DpmTable *table)
   2232 {
   2233 	unsigned int count;
   2234 	struct ci_power_info *pi = ci_get_pi(rdev);
   2235 
   2236 	table->VddciLevelCount = pi->vddci_voltage_table.count;
   2237 	for (count = 0; count < table->VddciLevelCount; count++) {
   2238 		ci_populate_smc_voltage_table(rdev,
   2239 					      &pi->vddci_voltage_table.entries[count],
   2240 					      &table->VddciLevel[count]);
   2241 
   2242 		if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
   2243 			table->VddciLevel[count].Smio |=
   2244 				pi->vddci_voltage_table.entries[count].smio_low;
   2245 		else
   2246 			table->VddciLevel[count].Smio = 0;
   2247 	}
   2248 	table->VddciLevelCount = cpu_to_be32(table->VddciLevelCount);
   2249 
   2250 	return 0;
   2251 }
   2252 
   2253 static int ci_populate_smc_mvdd_table(struct radeon_device *rdev,
   2254 				      SMU7_Discrete_DpmTable *table)
   2255 {
   2256 	struct ci_power_info *pi = ci_get_pi(rdev);
   2257 	unsigned int count;
   2258 
   2259 	table->MvddLevelCount = pi->mvdd_voltage_table.count;
   2260 	for (count = 0; count < table->MvddLevelCount; count++) {
   2261 		ci_populate_smc_voltage_table(rdev,
   2262 					      &pi->mvdd_voltage_table.entries[count],
   2263 					      &table->MvddLevel[count]);
   2264 
   2265 		if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
   2266 			table->MvddLevel[count].Smio |=
   2267 				pi->mvdd_voltage_table.entries[count].smio_low;
   2268 		else
   2269 			table->MvddLevel[count].Smio = 0;
   2270 	}
   2271 	table->MvddLevelCount = cpu_to_be32(table->MvddLevelCount);
   2272 
   2273 	return 0;
   2274 }
   2275 
   2276 static int ci_populate_smc_voltage_tables(struct radeon_device *rdev,
   2277 					  SMU7_Discrete_DpmTable *table)
   2278 {
   2279 	int ret;
   2280 
   2281 	ret = ci_populate_smc_vddc_table(rdev, table);
   2282 	if (ret)
   2283 		return ret;
   2284 
   2285 	ret = ci_populate_smc_vddci_table(rdev, table);
   2286 	if (ret)
   2287 		return ret;
   2288 
   2289 	ret = ci_populate_smc_mvdd_table(rdev, table);
   2290 	if (ret)
   2291 		return ret;
   2292 
   2293 	return 0;
   2294 }
   2295 
   2296 static int ci_populate_mvdd_value(struct radeon_device *rdev, u32 mclk,
   2297 				  SMU7_Discrete_VoltageLevel *voltage)
   2298 {
   2299 	struct ci_power_info *pi = ci_get_pi(rdev);
   2300 	u32 i = 0;
   2301 
   2302 	if (pi->mvdd_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
   2303 		for (i = 0; i < rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count; i++) {
   2304 			if (mclk <= rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries[i].clk) {
   2305 				voltage->Voltage = pi->mvdd_voltage_table.entries[i].value;
   2306 				break;
   2307 			}
   2308 		}
   2309 
   2310 		if (i >= rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count)
   2311 			return -EINVAL;
   2312 	}
   2313 
   2314 	return -EINVAL;
   2315 }
   2316 
   2317 static int ci_get_std_voltage_value_sidd(struct radeon_device *rdev,
   2318 					 struct atom_voltage_table_entry *voltage_table,
   2319 					 u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd)
   2320 {
   2321 	u16 v_index, idx;
   2322 	bool voltage_found = false;
   2323 	*std_voltage_hi_sidd = voltage_table->value * VOLTAGE_SCALE;
   2324 	*std_voltage_lo_sidd = voltage_table->value * VOLTAGE_SCALE;
   2325 
   2326 	if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries == NULL)
   2327 		return -EINVAL;
   2328 
   2329 	if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries) {
   2330 		for (v_index = 0; (u32)v_index < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
   2331 			if (voltage_table->value ==
   2332 			    rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
   2333 				voltage_found = true;
   2334 				if ((u32)v_index < rdev->pm.dpm.dyn_state.cac_leakage_table.count)
   2335 					idx = v_index;
   2336 				else
   2337 					idx = rdev->pm.dpm.dyn_state.cac_leakage_table.count - 1;
   2338 				*std_voltage_lo_sidd =
   2339 					rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE;
   2340 				*std_voltage_hi_sidd =
   2341 					rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE;
   2342 				break;
   2343 			}
   2344 		}
   2345 
   2346 		if (!voltage_found) {
   2347 			for (v_index = 0; (u32)v_index < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
   2348 				if (voltage_table->value <=
   2349 				    rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
   2350 					voltage_found = true;
   2351 					if ((u32)v_index < rdev->pm.dpm.dyn_state.cac_leakage_table.count)
   2352 						idx = v_index;
   2353 					else
   2354 						idx = rdev->pm.dpm.dyn_state.cac_leakage_table.count - 1;
   2355 					*std_voltage_lo_sidd =
   2356 						rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE;
   2357 					*std_voltage_hi_sidd =
   2358 						rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE;
   2359 					break;
   2360 				}
   2361 			}
   2362 		}
   2363 	}
   2364 
   2365 	return 0;
   2366 }
   2367 
   2368 static void ci_populate_phase_value_based_on_sclk(struct radeon_device *rdev,
   2369 						  const struct radeon_phase_shedding_limits_table *limits,
   2370 						  u32 sclk,
   2371 						  u32 *phase_shedding)
   2372 {
   2373 	unsigned int i;
   2374 
   2375 	*phase_shedding = 1;
   2376 
   2377 	for (i = 0; i < limits->count; i++) {
   2378 		if (sclk < limits->entries[i].sclk) {
   2379 			*phase_shedding = i;
   2380 			break;
   2381 		}
   2382 	}
   2383 }
   2384 
   2385 static void ci_populate_phase_value_based_on_mclk(struct radeon_device *rdev,
   2386 						  const struct radeon_phase_shedding_limits_table *limits,
   2387 						  u32 mclk,
   2388 						  u32 *phase_shedding)
   2389 {
   2390 	unsigned int i;
   2391 
   2392 	*phase_shedding = 1;
   2393 
   2394 	for (i = 0; i < limits->count; i++) {
   2395 		if (mclk < limits->entries[i].mclk) {
   2396 			*phase_shedding = i;
   2397 			break;
   2398 		}
   2399 	}
   2400 }
   2401 
   2402 static int ci_init_arb_table_index(struct radeon_device *rdev)
   2403 {
   2404 	struct ci_power_info *pi = ci_get_pi(rdev);
   2405 	u32 tmp;
   2406 	int ret;
   2407 
   2408 	ret = ci_read_smc_sram_dword(rdev, pi->arb_table_start,
   2409 				     &tmp, pi->sram_end);
   2410 	if (ret)
   2411 		return ret;
   2412 
   2413 	tmp &= 0x00FFFFFF;
   2414 	tmp |= MC_CG_ARB_FREQ_F1 << 24;
   2415 
   2416 	return ci_write_smc_sram_dword(rdev, pi->arb_table_start,
   2417 				       tmp, pi->sram_end);
   2418 }
   2419 
   2420 static int ci_get_dependency_volt_by_clk(struct radeon_device *rdev,
   2421 					 struct radeon_clock_voltage_dependency_table *allowed_clock_voltage_table,
   2422 					 u32 clock, u32 *voltage)
   2423 {
   2424 	u32 i = 0;
   2425 
   2426 	if (allowed_clock_voltage_table->count == 0)
   2427 		return -EINVAL;
   2428 
   2429 	for (i = 0; i < allowed_clock_voltage_table->count; i++) {
   2430 		if (allowed_clock_voltage_table->entries[i].clk >= clock) {
   2431 			*voltage = allowed_clock_voltage_table->entries[i].v;
   2432 			return 0;
   2433 		}
   2434 	}
   2435 
   2436 	*voltage = allowed_clock_voltage_table->entries[i-1].v;
   2437 
   2438 	return 0;
   2439 }
   2440 
   2441 static u8 ci_get_sleep_divider_id_from_clock(struct radeon_device *rdev,
   2442 					     u32 sclk, u32 min_sclk_in_sr)
   2443 {
   2444 	u32 i;
   2445 	u32 tmp;
   2446 	u32 vmin = (min_sclk_in_sr > CISLAND_MINIMUM_ENGINE_CLOCK) ?
   2447 		min_sclk_in_sr : CISLAND_MINIMUM_ENGINE_CLOCK;
   2448 
   2449 	if (sclk < vmin)
   2450 		return 0;
   2451 
   2452 	for (i = CISLAND_MAX_DEEPSLEEP_DIVIDER_ID;  ; i--) {
   2453 		tmp = sclk / (1 << i);
   2454 		if (tmp >= vmin || i == 0)
   2455 			break;
   2456 	}
   2457 
   2458 	return (u8)i;
   2459 }
   2460 
   2461 static int ci_initial_switch_from_arb_f0_to_f1(struct radeon_device *rdev)
   2462 {
   2463 	return ni_copy_and_switch_arb_sets(rdev, MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
   2464 }
   2465 
   2466 static int ci_reset_to_default(struct radeon_device *rdev)
   2467 {
   2468 	return (ci_send_msg_to_smc(rdev, PPSMC_MSG_ResetToDefaults) == PPSMC_Result_OK) ?
   2469 		0 : -EINVAL;
   2470 }
   2471 
   2472 static int ci_force_switch_to_arb_f0(struct radeon_device *rdev)
   2473 {
   2474 	u32 tmp;
   2475 
   2476 	tmp = (RREG32_SMC(SMC_SCRATCH9) & 0x0000ff00) >> 8;
   2477 
   2478 	if (tmp == MC_CG_ARB_FREQ_F0)
   2479 		return 0;
   2480 
   2481 	return ni_copy_and_switch_arb_sets(rdev, tmp, MC_CG_ARB_FREQ_F0);
   2482 }
   2483 
   2484 static void ci_register_patching_mc_arb(struct radeon_device *rdev,
   2485 					const u32 engine_clock,
   2486 					const u32 memory_clock,
   2487 					u32 *dram_timimg2)
   2488 {
   2489 	bool patch;
   2490 	u32 tmp, tmp2;
   2491 
   2492 	tmp = RREG32(MC_SEQ_MISC0);
   2493 	patch = ((tmp & 0x0000f00) == 0x300) ? true : false;
   2494 
   2495 	if (patch &&
   2496 	    ((rdev->pdev->device == 0x67B0) ||
   2497 	     (rdev->pdev->device == 0x67B1))) {
   2498 		if ((memory_clock > 100000) && (memory_clock <= 125000)) {
   2499 			tmp2 = (((0x31 * engine_clock) / 125000) - 1) & 0xff;
   2500 			*dram_timimg2 &= ~0x00ff0000;
   2501 			*dram_timimg2 |= tmp2 << 16;
   2502 		} else if ((memory_clock > 125000) && (memory_clock <= 137500)) {
   2503 			tmp2 = (((0x36 * engine_clock) / 137500) - 1) & 0xff;
   2504 			*dram_timimg2 &= ~0x00ff0000;
   2505 			*dram_timimg2 |= tmp2 << 16;
   2506 		}
   2507 	}
   2508 }
   2509 
   2510 
   2511 static int ci_populate_memory_timing_parameters(struct radeon_device *rdev,
   2512 						u32 sclk,
   2513 						u32 mclk,
   2514 						SMU7_Discrete_MCArbDramTimingTableEntry *arb_regs)
   2515 {
   2516 	u32 dram_timing;
   2517 	u32 dram_timing2;
   2518 	u32 burst_time;
   2519 
   2520 	radeon_atom_set_engine_dram_timings(rdev, sclk, mclk);
   2521 
   2522 	dram_timing  = RREG32(MC_ARB_DRAM_TIMING);
   2523 	dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2);
   2524 	burst_time = RREG32(MC_ARB_BURST_TIME) & STATE0_MASK;
   2525 
   2526 	ci_register_patching_mc_arb(rdev, sclk, mclk, &dram_timing2);
   2527 
   2528 	arb_regs->McArbDramTiming  = cpu_to_be32(dram_timing);
   2529 	arb_regs->McArbDramTiming2 = cpu_to_be32(dram_timing2);
   2530 	arb_regs->McArbBurstTime = (u8)burst_time;
   2531 
   2532 	return 0;
   2533 }
   2534 
   2535 static int ci_do_program_memory_timing_parameters(struct radeon_device *rdev)
   2536 {
   2537 	struct ci_power_info *pi = ci_get_pi(rdev);
   2538 	SMU7_Discrete_MCArbDramTimingTable arb_regs;
   2539 	u32 i, j;
   2540 	int ret =  0;
   2541 
   2542 	memset(&arb_regs, 0, sizeof(SMU7_Discrete_MCArbDramTimingTable));
   2543 
   2544 	for (i = 0; i < pi->dpm_table.sclk_table.count; i++) {
   2545 		for (j = 0; j < pi->dpm_table.mclk_table.count; j++) {
   2546 			ret = ci_populate_memory_timing_parameters(rdev,
   2547 								   pi->dpm_table.sclk_table.dpm_levels[i].value,
   2548 								   pi->dpm_table.mclk_table.dpm_levels[j].value,
   2549 								   &arb_regs.entries[i][j]);
   2550 			if (ret)
   2551 				break;
   2552 		}
   2553 	}
   2554 
   2555 	if (ret == 0)
   2556 		ret = ci_copy_bytes_to_smc(rdev,
   2557 					   pi->arb_table_start,
   2558 					   (u8 *)&arb_regs,
   2559 					   sizeof(SMU7_Discrete_MCArbDramTimingTable),
   2560 					   pi->sram_end);
   2561 
   2562 	return ret;
   2563 }
   2564 
   2565 static int ci_program_memory_timing_parameters(struct radeon_device *rdev)
   2566 {
   2567 	struct ci_power_info *pi = ci_get_pi(rdev);
   2568 
   2569 	if (pi->need_update_smu7_dpm_table == 0)
   2570 		return 0;
   2571 
   2572 	return ci_do_program_memory_timing_parameters(rdev);
   2573 }
   2574 
   2575 static void ci_populate_smc_initial_state(struct radeon_device *rdev,
   2576 					  struct radeon_ps *radeon_boot_state)
   2577 {
   2578 	struct ci_ps *boot_state = ci_get_ps(radeon_boot_state);
   2579 	struct ci_power_info *pi = ci_get_pi(rdev);
   2580 	u32 level = 0;
   2581 
   2582 	for (level = 0; level < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; level++) {
   2583 		if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[level].clk >=
   2584 		    boot_state->performance_levels[0].sclk) {
   2585 			pi->smc_state_table.GraphicsBootLevel = level;
   2586 			break;
   2587 		}
   2588 	}
   2589 
   2590 	for (level = 0; level < rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.count; level++) {
   2591 		if (rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries[level].clk >=
   2592 		    boot_state->performance_levels[0].mclk) {
   2593 			pi->smc_state_table.MemoryBootLevel = level;
   2594 			break;
   2595 		}
   2596 	}
   2597 }
   2598 
   2599 static u32 ci_get_dpm_level_enable_mask_value(struct ci_single_dpm_table *dpm_table)
   2600 {
   2601 	u32 i;
   2602 	u32 mask_value = 0;
   2603 
   2604 	for (i = dpm_table->count; i > 0; i--) {
   2605 		mask_value = mask_value << 1;
   2606 		if (dpm_table->dpm_levels[i-1].enabled)
   2607 			mask_value |= 0x1;
   2608 		else
   2609 			mask_value &= 0xFFFFFFFE;
   2610 	}
   2611 
   2612 	return mask_value;
   2613 }
   2614 
   2615 static void ci_populate_smc_link_level(struct radeon_device *rdev,
   2616 				       SMU7_Discrete_DpmTable *table)
   2617 {
   2618 	struct ci_power_info *pi = ci_get_pi(rdev);
   2619 	struct ci_dpm_table *dpm_table = &pi->dpm_table;
   2620 	u32 i;
   2621 
   2622 	for (i = 0; i < dpm_table->pcie_speed_table.count; i++) {
   2623 		table->LinkLevel[i].PcieGenSpeed =
   2624 			(u8)dpm_table->pcie_speed_table.dpm_levels[i].value;
   2625 		table->LinkLevel[i].PcieLaneCount =
   2626 			r600_encode_pci_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1);
   2627 		table->LinkLevel[i].EnabledForActivity = 1;
   2628 		table->LinkLevel[i].DownT = cpu_to_be32(5);
   2629 		table->LinkLevel[i].UpT = cpu_to_be32(30);
   2630 	}
   2631 
   2632 	pi->smc_state_table.LinkLevelCount = (u8)dpm_table->pcie_speed_table.count;
   2633 	pi->dpm_level_enable_mask.pcie_dpm_enable_mask =
   2634 		ci_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
   2635 }
   2636 
   2637 static int ci_populate_smc_uvd_level(struct radeon_device *rdev,
   2638 				     SMU7_Discrete_DpmTable *table)
   2639 {
   2640 	u32 count;
   2641 	struct atom_clock_dividers dividers;
   2642 	int ret = -EINVAL;
   2643 
   2644 	table->UvdLevelCount =
   2645 		rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count;
   2646 
   2647 	for (count = 0; count < table->UvdLevelCount; count++) {
   2648 		table->UvdLevel[count].VclkFrequency =
   2649 			rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].vclk;
   2650 		table->UvdLevel[count].DclkFrequency =
   2651 			rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].dclk;
   2652 		table->UvdLevel[count].MinVddc =
   2653 			rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
   2654 		table->UvdLevel[count].MinVddcPhases = 1;
   2655 
   2656 		ret = radeon_atom_get_clock_dividers(rdev,
   2657 						     COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
   2658 						     table->UvdLevel[count].VclkFrequency, false, &dividers);
   2659 		if (ret)
   2660 			return ret;
   2661 
   2662 		table->UvdLevel[count].VclkDivider = (u8)dividers.post_divider;
   2663 
   2664 		ret = radeon_atom_get_clock_dividers(rdev,
   2665 						     COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
   2666 						     table->UvdLevel[count].DclkFrequency, false, &dividers);
   2667 		if (ret)
   2668 			return ret;
   2669 
   2670 		table->UvdLevel[count].DclkDivider = (u8)dividers.post_divider;
   2671 
   2672 		table->UvdLevel[count].VclkFrequency = cpu_to_be32(table->UvdLevel[count].VclkFrequency);
   2673 		table->UvdLevel[count].DclkFrequency = cpu_to_be32(table->UvdLevel[count].DclkFrequency);
   2674 		table->UvdLevel[count].MinVddc = cpu_to_be16(table->UvdLevel[count].MinVddc);
   2675 	}
   2676 
   2677 	return ret;
   2678 }
   2679 
   2680 static int ci_populate_smc_vce_level(struct radeon_device *rdev,
   2681 				     SMU7_Discrete_DpmTable *table)
   2682 {
   2683 	u32 count;
   2684 	struct atom_clock_dividers dividers;
   2685 	int ret = -EINVAL;
   2686 
   2687 	table->VceLevelCount =
   2688 		rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count;
   2689 
   2690 	for (count = 0; count < table->VceLevelCount; count++) {
   2691 		table->VceLevel[count].Frequency =
   2692 			rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].evclk;
   2693 		table->VceLevel[count].MinVoltage =
   2694 			(u16)rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
   2695 		table->VceLevel[count].MinPhases = 1;
   2696 
   2697 		ret = radeon_atom_get_clock_dividers(rdev,
   2698 						     COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
   2699 						     table->VceLevel[count].Frequency, false, &dividers);
   2700 		if (ret)
   2701 			return ret;
   2702 
   2703 		table->VceLevel[count].Divider = (u8)dividers.post_divider;
   2704 
   2705 		table->VceLevel[count].Frequency = cpu_to_be32(table->VceLevel[count].Frequency);
   2706 		table->VceLevel[count].MinVoltage = cpu_to_be16(table->VceLevel[count].MinVoltage);
   2707 	}
   2708 
   2709 	return ret;
   2710 
   2711 }
   2712 
   2713 static int ci_populate_smc_acp_level(struct radeon_device *rdev,
   2714 				     SMU7_Discrete_DpmTable *table)
   2715 {
   2716 	u32 count;
   2717 	struct atom_clock_dividers dividers;
   2718 	int ret = -EINVAL;
   2719 
   2720 	table->AcpLevelCount = (u8)
   2721 		(rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count);
   2722 
   2723 	for (count = 0; count < table->AcpLevelCount; count++) {
   2724 		table->AcpLevel[count].Frequency =
   2725 			rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].clk;
   2726 		table->AcpLevel[count].MinVoltage =
   2727 			rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].v;
   2728 		table->AcpLevel[count].MinPhases = 1;
   2729 
   2730 		ret = radeon_atom_get_clock_dividers(rdev,
   2731 						     COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
   2732 						     table->AcpLevel[count].Frequency, false, &dividers);
   2733 		if (ret)
   2734 			return ret;
   2735 
   2736 		table->AcpLevel[count].Divider = (u8)dividers.post_divider;
   2737 
   2738 		table->AcpLevel[count].Frequency = cpu_to_be32(table->AcpLevel[count].Frequency);
   2739 		table->AcpLevel[count].MinVoltage = cpu_to_be16(table->AcpLevel[count].MinVoltage);
   2740 	}
   2741 
   2742 	return ret;
   2743 }
   2744 
   2745 static int ci_populate_smc_samu_level(struct radeon_device *rdev,
   2746 				      SMU7_Discrete_DpmTable *table)
   2747 {
   2748 	u32 count;
   2749 	struct atom_clock_dividers dividers;
   2750 	int ret = -EINVAL;
   2751 
   2752 	table->SamuLevelCount =
   2753 		rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count;
   2754 
   2755 	for (count = 0; count < table->SamuLevelCount; count++) {
   2756 		table->SamuLevel[count].Frequency =
   2757 			rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].clk;
   2758 		table->SamuLevel[count].MinVoltage =
   2759 			rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
   2760 		table->SamuLevel[count].MinPhases = 1;
   2761 
   2762 		ret = radeon_atom_get_clock_dividers(rdev,
   2763 						     COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
   2764 						     table->SamuLevel[count].Frequency, false, &dividers);
   2765 		if (ret)
   2766 			return ret;
   2767 
   2768 		table->SamuLevel[count].Divider = (u8)dividers.post_divider;
   2769 
   2770 		table->SamuLevel[count].Frequency = cpu_to_be32(table->SamuLevel[count].Frequency);
   2771 		table->SamuLevel[count].MinVoltage = cpu_to_be16(table->SamuLevel[count].MinVoltage);
   2772 	}
   2773 
   2774 	return ret;
   2775 }
   2776 
   2777 static int ci_calculate_mclk_params(struct radeon_device *rdev,
   2778 				    u32 memory_clock,
   2779 				    SMU7_Discrete_MemoryLevel *mclk,
   2780 				    bool strobe_mode,
   2781 				    bool dll_state_on)
   2782 {
   2783 	struct ci_power_info *pi = ci_get_pi(rdev);
   2784 	u32  dll_cntl = pi->clock_registers.dll_cntl;
   2785 	u32  mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl;
   2786 	u32  mpll_ad_func_cntl = pi->clock_registers.mpll_ad_func_cntl;
   2787 	u32  mpll_dq_func_cntl = pi->clock_registers.mpll_dq_func_cntl;
   2788 	u32  mpll_func_cntl = pi->clock_registers.mpll_func_cntl;
   2789 	u32  mpll_func_cntl_1 = pi->clock_registers.mpll_func_cntl_1;
   2790 	u32  mpll_func_cntl_2 = pi->clock_registers.mpll_func_cntl_2;
   2791 	u32  mpll_ss1 = pi->clock_registers.mpll_ss1;
   2792 	u32  mpll_ss2 = pi->clock_registers.mpll_ss2;
   2793 	struct atom_mpll_param mpll_param;
   2794 	int ret;
   2795 
   2796 	ret = radeon_atom_get_memory_pll_dividers(rdev, memory_clock, strobe_mode, &mpll_param);
   2797 	if (ret)
   2798 		return ret;
   2799 
   2800 	mpll_func_cntl &= ~BWCTRL_MASK;
   2801 	mpll_func_cntl |= BWCTRL(mpll_param.bwcntl);
   2802 
   2803 	mpll_func_cntl_1 &= ~(CLKF_MASK | CLKFRAC_MASK | VCO_MODE_MASK);
   2804 	mpll_func_cntl_1 |= CLKF(mpll_param.clkf) |
   2805 		CLKFRAC(mpll_param.clkfrac) | VCO_MODE(mpll_param.vco_mode);
   2806 
   2807 	mpll_ad_func_cntl &= ~YCLK_POST_DIV_MASK;
   2808 	mpll_ad_func_cntl |= YCLK_POST_DIV(mpll_param.post_div);
   2809 
   2810 	if (pi->mem_gddr5) {
   2811 		mpll_dq_func_cntl &= ~(YCLK_SEL_MASK | YCLK_POST_DIV_MASK);
   2812 		mpll_dq_func_cntl |= YCLK_SEL(mpll_param.yclk_sel) |
   2813 			YCLK_POST_DIV(mpll_param.post_div);
   2814 	}
   2815 
   2816 	if (pi->caps_mclk_ss_support) {
   2817 		struct radeon_atom_ss ss;
   2818 		u32 freq_nom;
   2819 		u32 tmp;
   2820 		u32 reference_clock = rdev->clock.mpll.reference_freq;
   2821 
   2822 		if (mpll_param.qdr == 1)
   2823 			freq_nom = memory_clock * 4 * (1 << mpll_param.post_div);
   2824 		else
   2825 			freq_nom = memory_clock * 2 * (1 << mpll_param.post_div);
   2826 
   2827 		tmp = (freq_nom / reference_clock);
   2828 		tmp = tmp * tmp;
   2829 		if (radeon_atombios_get_asic_ss_info(rdev, &ss,
   2830 						     ASIC_INTERNAL_MEMORY_SS, freq_nom)) {
   2831 			u32 clks = reference_clock * 5 / ss.rate;
   2832 			u32 clkv = (u32)((((131 * ss.percentage * ss.rate) / 100) * tmp) / freq_nom);
   2833 
   2834 			mpll_ss1 &= ~CLKV_MASK;
   2835 			mpll_ss1 |= CLKV(clkv);
   2836 
   2837 			mpll_ss2 &= ~CLKS_MASK;
   2838 			mpll_ss2 |= CLKS(clks);
   2839 		}
   2840 	}
   2841 
   2842 	mclk_pwrmgt_cntl &= ~DLL_SPEED_MASK;
   2843 	mclk_pwrmgt_cntl |= DLL_SPEED(mpll_param.dll_speed);
   2844 
   2845 	if (dll_state_on)
   2846 		mclk_pwrmgt_cntl |= MRDCK0_PDNB | MRDCK1_PDNB;
   2847 	else
   2848 		mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB);
   2849 
   2850 	mclk->MclkFrequency = memory_clock;
   2851 	mclk->MpllFuncCntl = mpll_func_cntl;
   2852 	mclk->MpllFuncCntl_1 = mpll_func_cntl_1;
   2853 	mclk->MpllFuncCntl_2 = mpll_func_cntl_2;
   2854 	mclk->MpllAdFuncCntl = mpll_ad_func_cntl;
   2855 	mclk->MpllDqFuncCntl = mpll_dq_func_cntl;
   2856 	mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl;
   2857 	mclk->DllCntl = dll_cntl;
   2858 	mclk->MpllSs1 = mpll_ss1;
   2859 	mclk->MpllSs2 = mpll_ss2;
   2860 
   2861 	return 0;
   2862 }
   2863 
   2864 static int ci_populate_single_memory_level(struct radeon_device *rdev,
   2865 					   u32 memory_clock,
   2866 					   SMU7_Discrete_MemoryLevel *memory_level)
   2867 {
   2868 	struct ci_power_info *pi = ci_get_pi(rdev);
   2869 	int ret;
   2870 	bool dll_state_on;
   2871 
   2872 	if (rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries) {
   2873 		ret = ci_get_dependency_volt_by_clk(rdev,
   2874 						    &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
   2875 						    memory_clock, &memory_level->MinVddc);
   2876 		if (ret)
   2877 			return ret;
   2878 	}
   2879 
   2880 	if (rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries) {
   2881 		ret = ci_get_dependency_volt_by_clk(rdev,
   2882 						    &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
   2883 						    memory_clock, &memory_level->MinVddci);
   2884 		if (ret)
   2885 			return ret;
   2886 	}
   2887 
   2888 	if (rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries) {
   2889 		ret = ci_get_dependency_volt_by_clk(rdev,
   2890 						    &rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
   2891 						    memory_clock, &memory_level->MinMvdd);
   2892 		if (ret)
   2893 			return ret;
   2894 	}
   2895 
   2896 	memory_level->MinVddcPhases = 1;
   2897 
   2898 	if (pi->vddc_phase_shed_control)
   2899 		ci_populate_phase_value_based_on_mclk(rdev,
   2900 						      &rdev->pm.dpm.dyn_state.phase_shedding_limits_table,
   2901 						      memory_clock,
   2902 						      &memory_level->MinVddcPhases);
   2903 
   2904 	memory_level->EnabledForThrottle = 1;
   2905 	memory_level->UpH = 0;
   2906 	memory_level->DownH = 100;
   2907 	memory_level->VoltageDownH = 0;
   2908 	memory_level->ActivityLevel = (u16)pi->mclk_activity_target;
   2909 
   2910 	memory_level->StutterEnable = false;
   2911 	memory_level->StrobeEnable = false;
   2912 	memory_level->EdcReadEnable = false;
   2913 	memory_level->EdcWriteEnable = false;
   2914 	memory_level->RttEnable = false;
   2915 
   2916 	memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
   2917 
   2918 	if (pi->mclk_stutter_mode_threshold &&
   2919 	    (memory_clock <= pi->mclk_stutter_mode_threshold) &&
   2920 	    (pi->uvd_enabled == false) &&
   2921 	    (RREG32(DPG_PIPE_STUTTER_CONTROL) & STUTTER_ENABLE) &&
   2922 	    (rdev->pm.dpm.new_active_crtc_count <= 2))
   2923 		memory_level->StutterEnable = true;
   2924 
   2925 	if (pi->mclk_strobe_mode_threshold &&
   2926 	    (memory_clock <= pi->mclk_strobe_mode_threshold))
   2927 		memory_level->StrobeEnable = 1;
   2928 
   2929 	if (pi->mem_gddr5) {
   2930 		memory_level->StrobeRatio =
   2931 			si_get_mclk_frequency_ratio(memory_clock, memory_level->StrobeEnable);
   2932 		if (pi->mclk_edc_enable_threshold &&
   2933 		    (memory_clock > pi->mclk_edc_enable_threshold))
   2934 			memory_level->EdcReadEnable = true;
   2935 
   2936 		if (pi->mclk_edc_wr_enable_threshold &&
   2937 		    (memory_clock > pi->mclk_edc_wr_enable_threshold))
   2938 			memory_level->EdcWriteEnable = true;
   2939 
   2940 		if (memory_level->StrobeEnable) {
   2941 			if (si_get_mclk_frequency_ratio(memory_clock, true) >=
   2942 			    ((RREG32(MC_SEQ_MISC7) >> 16) & 0xf))
   2943 				dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
   2944 			else
   2945 				dll_state_on = ((RREG32(MC_SEQ_MISC6) >> 1) & 0x1) ? true : false;
   2946 		} else {
   2947 			dll_state_on = pi->dll_default_on;
   2948 		}
   2949 	} else {
   2950 		memory_level->StrobeRatio = si_get_ddr3_mclk_frequency_ratio(memory_clock);
   2951 		dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
   2952 	}
   2953 
   2954 	ret = ci_calculate_mclk_params(rdev, memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on);
   2955 	if (ret)
   2956 		return ret;
   2957 
   2958 	memory_level->MinVddc = cpu_to_be32(memory_level->MinVddc * VOLTAGE_SCALE);
   2959 	memory_level->MinVddcPhases = cpu_to_be32(memory_level->MinVddcPhases);
   2960         memory_level->MinVddci = cpu_to_be32(memory_level->MinVddci * VOLTAGE_SCALE);
   2961         memory_level->MinMvdd = cpu_to_be32(memory_level->MinMvdd * VOLTAGE_SCALE);
   2962 
   2963 	memory_level->MclkFrequency = cpu_to_be32(memory_level->MclkFrequency);
   2964 	memory_level->ActivityLevel = cpu_to_be16(memory_level->ActivityLevel);
   2965 	memory_level->MpllFuncCntl = cpu_to_be32(memory_level->MpllFuncCntl);
   2966 	memory_level->MpllFuncCntl_1 = cpu_to_be32(memory_level->MpllFuncCntl_1);
   2967 	memory_level->MpllFuncCntl_2 = cpu_to_be32(memory_level->MpllFuncCntl_2);
   2968 	memory_level->MpllAdFuncCntl = cpu_to_be32(memory_level->MpllAdFuncCntl);
   2969 	memory_level->MpllDqFuncCntl = cpu_to_be32(memory_level->MpllDqFuncCntl);
   2970 	memory_level->MclkPwrmgtCntl = cpu_to_be32(memory_level->MclkPwrmgtCntl);
   2971 	memory_level->DllCntl = cpu_to_be32(memory_level->DllCntl);
   2972 	memory_level->MpllSs1 = cpu_to_be32(memory_level->MpllSs1);
   2973 	memory_level->MpllSs2 = cpu_to_be32(memory_level->MpllSs2);
   2974 
   2975 	return 0;
   2976 }
   2977 
   2978 static int ci_populate_smc_acpi_level(struct radeon_device *rdev,
   2979 				      SMU7_Discrete_DpmTable *table)
   2980 {
   2981 	struct ci_power_info *pi = ci_get_pi(rdev);
   2982 	struct atom_clock_dividers dividers;
   2983 	SMU7_Discrete_VoltageLevel voltage_level;
   2984 	u32 spll_func_cntl = pi->clock_registers.cg_spll_func_cntl;
   2985 	u32 spll_func_cntl_2 = pi->clock_registers.cg_spll_func_cntl_2;
   2986 	u32 dll_cntl = pi->clock_registers.dll_cntl;
   2987 	u32 mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl;
   2988 	int ret;
   2989 
   2990 	table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
   2991 
   2992 	if (pi->acpi_vddc)
   2993 		table->ACPILevel.MinVddc = cpu_to_be32(pi->acpi_vddc * VOLTAGE_SCALE);
   2994 	else
   2995 		table->ACPILevel.MinVddc = cpu_to_be32(pi->min_vddc_in_pp_table * VOLTAGE_SCALE);
   2996 
   2997 	table->ACPILevel.MinVddcPhases = pi->vddc_phase_shed_control ? 0 : 1;
   2998 
   2999 	table->ACPILevel.SclkFrequency = rdev->clock.spll.reference_freq;
   3000 
   3001 	ret = radeon_atom_get_clock_dividers(rdev,
   3002 					     COMPUTE_GPUCLK_INPUT_FLAG_SCLK,
   3003 					     table->ACPILevel.SclkFrequency, false, &dividers);
   3004 	if (ret)
   3005 		return ret;
   3006 
   3007 	table->ACPILevel.SclkDid = (u8)dividers.post_divider;
   3008 	table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
   3009 	table->ACPILevel.DeepSleepDivId = 0;
   3010 
   3011 	spll_func_cntl &= ~SPLL_PWRON;
   3012 	spll_func_cntl |= SPLL_RESET;
   3013 
   3014 	spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
   3015 	spll_func_cntl_2 |= SCLK_MUX_SEL(4);
   3016 
   3017 	table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
   3018 	table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
   3019 	table->ACPILevel.CgSpllFuncCntl3 = pi->clock_registers.cg_spll_func_cntl_3;
   3020 	table->ACPILevel.CgSpllFuncCntl4 = pi->clock_registers.cg_spll_func_cntl_4;
   3021 	table->ACPILevel.SpllSpreadSpectrum = pi->clock_registers.cg_spll_spread_spectrum;
   3022 	table->ACPILevel.SpllSpreadSpectrum2 = pi->clock_registers.cg_spll_spread_spectrum_2;
   3023 	table->ACPILevel.CcPwrDynRm = 0;
   3024 	table->ACPILevel.CcPwrDynRm1 = 0;
   3025 
   3026 	table->ACPILevel.Flags = cpu_to_be32(table->ACPILevel.Flags);
   3027 	table->ACPILevel.MinVddcPhases = cpu_to_be32(table->ACPILevel.MinVddcPhases);
   3028 	table->ACPILevel.SclkFrequency = cpu_to_be32(table->ACPILevel.SclkFrequency);
   3029 	table->ACPILevel.CgSpllFuncCntl = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl);
   3030 	table->ACPILevel.CgSpllFuncCntl2 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl2);
   3031 	table->ACPILevel.CgSpllFuncCntl3 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl3);
   3032 	table->ACPILevel.CgSpllFuncCntl4 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl4);
   3033 	table->ACPILevel.SpllSpreadSpectrum = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum);
   3034 	table->ACPILevel.SpllSpreadSpectrum2 = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum2);
   3035 	table->ACPILevel.CcPwrDynRm = cpu_to_be32(table->ACPILevel.CcPwrDynRm);
   3036 	table->ACPILevel.CcPwrDynRm1 = cpu_to_be32(table->ACPILevel.CcPwrDynRm1);
   3037 
   3038 	table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc;
   3039 	table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;
   3040 
   3041 	if (pi->vddci_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
   3042 		if (pi->acpi_vddci)
   3043 			table->MemoryACPILevel.MinVddci =
   3044 				cpu_to_be32(pi->acpi_vddci * VOLTAGE_SCALE);
   3045 		else
   3046 			table->MemoryACPILevel.MinVddci =
   3047 				cpu_to_be32(pi->min_vddci_in_pp_table * VOLTAGE_SCALE);
   3048 	}
   3049 
   3050 	if (ci_populate_mvdd_value(rdev, 0, &voltage_level))
   3051 		table->MemoryACPILevel.MinMvdd = 0;
   3052 	else
   3053 		table->MemoryACPILevel.MinMvdd =
   3054 			cpu_to_be32(voltage_level.Voltage * VOLTAGE_SCALE);
   3055 
   3056 	mclk_pwrmgt_cntl |= MRDCK0_RESET | MRDCK1_RESET;
   3057 	mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB);
   3058 
   3059 	dll_cntl &= ~(MRDCK0_BYPASS | MRDCK1_BYPASS);
   3060 
   3061 	table->MemoryACPILevel.DllCntl = cpu_to_be32(dll_cntl);
   3062 	table->MemoryACPILevel.MclkPwrmgtCntl = cpu_to_be32(mclk_pwrmgt_cntl);
   3063 	table->MemoryACPILevel.MpllAdFuncCntl =
   3064 		cpu_to_be32(pi->clock_registers.mpll_ad_func_cntl);
   3065 	table->MemoryACPILevel.MpllDqFuncCntl =
   3066 		cpu_to_be32(pi->clock_registers.mpll_dq_func_cntl);
   3067 	table->MemoryACPILevel.MpllFuncCntl =
   3068 		cpu_to_be32(pi->clock_registers.mpll_func_cntl);
   3069 	table->MemoryACPILevel.MpllFuncCntl_1 =
   3070 		cpu_to_be32(pi->clock_registers.mpll_func_cntl_1);
   3071 	table->MemoryACPILevel.MpllFuncCntl_2 =
   3072 		cpu_to_be32(pi->clock_registers.mpll_func_cntl_2);
   3073 	table->MemoryACPILevel.MpllSs1 = cpu_to_be32(pi->clock_registers.mpll_ss1);
   3074 	table->MemoryACPILevel.MpllSs2 = cpu_to_be32(pi->clock_registers.mpll_ss2);
   3075 
   3076 	table->MemoryACPILevel.EnabledForThrottle = 0;
   3077 	table->MemoryACPILevel.EnabledForActivity = 0;
   3078 	table->MemoryACPILevel.UpH = 0;
   3079 	table->MemoryACPILevel.DownH = 100;
   3080 	table->MemoryACPILevel.VoltageDownH = 0;
   3081 	table->MemoryACPILevel.ActivityLevel =
   3082 		cpu_to_be16((u16)pi->mclk_activity_target);
   3083 
   3084 	table->MemoryACPILevel.StutterEnable = false;
   3085 	table->MemoryACPILevel.StrobeEnable = false;
   3086 	table->MemoryACPILevel.EdcReadEnable = false;
   3087 	table->MemoryACPILevel.EdcWriteEnable = false;
   3088 	table->MemoryACPILevel.RttEnable = false;
   3089 
   3090 	return 0;
   3091 }
   3092 
   3093 
   3094 static int ci_enable_ulv(struct radeon_device *rdev, bool enable)
   3095 {
   3096 	struct ci_power_info *pi = ci_get_pi(rdev);
   3097 	struct ci_ulv_parm *ulv = &pi->ulv;
   3098 
   3099 	if (ulv->supported) {
   3100 		if (enable)
   3101 			return (ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableULV) == PPSMC_Result_OK) ?
   3102 				0 : -EINVAL;
   3103 		else
   3104 			return (ci_send_msg_to_smc(rdev, PPSMC_MSG_DisableULV) == PPSMC_Result_OK) ?
   3105 				0 : -EINVAL;
   3106 	}
   3107 
   3108 	return 0;
   3109 }
   3110 
   3111 static int ci_populate_ulv_level(struct radeon_device *rdev,
   3112 				 SMU7_Discrete_Ulv *state)
   3113 {
   3114 	struct ci_power_info *pi = ci_get_pi(rdev);
   3115 	u16 ulv_voltage = rdev->pm.dpm.backbias_response_time;
   3116 
   3117 	state->CcPwrDynRm = 0;
   3118 	state->CcPwrDynRm1 = 0;
   3119 
   3120 	if (ulv_voltage == 0) {
   3121 		pi->ulv.supported = false;
   3122 		return 0;
   3123 	}
   3124 
   3125 	if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
   3126 		if (ulv_voltage > rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v)
   3127 			state->VddcOffset = 0;
   3128 		else
   3129 			state->VddcOffset =
   3130 				rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage;
   3131 	} else {
   3132 		if (ulv_voltage > rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v)
   3133 			state->VddcOffsetVid = 0;
   3134 		else
   3135 			state->VddcOffsetVid = (u8)
   3136 				((rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage) *
   3137 				 VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
   3138 	}
   3139 	state->VddcPhase = pi->vddc_phase_shed_control ? 0 : 1;
   3140 
   3141 	state->CcPwrDynRm = cpu_to_be32(state->CcPwrDynRm);
   3142 	state->CcPwrDynRm1 = cpu_to_be32(state->CcPwrDynRm1);
   3143 	state->VddcOffset = cpu_to_be16(state->VddcOffset);
   3144 
   3145 	return 0;
   3146 }
   3147 
   3148 static int ci_calculate_sclk_params(struct radeon_device *rdev,
   3149 				    u32 engine_clock,
   3150 				    SMU7_Discrete_GraphicsLevel *sclk)
   3151 {
   3152 	struct ci_power_info *pi = ci_get_pi(rdev);
   3153 	struct atom_clock_dividers dividers;
   3154 	u32 spll_func_cntl_3 = pi->clock_registers.cg_spll_func_cntl_3;
   3155 	u32 spll_func_cntl_4 = pi->clock_registers.cg_spll_func_cntl_4;
   3156 	u32 cg_spll_spread_spectrum = pi->clock_registers.cg_spll_spread_spectrum;
   3157 	u32 cg_spll_spread_spectrum_2 = pi->clock_registers.cg_spll_spread_spectrum_2;
   3158 	u32 reference_clock = rdev->clock.spll.reference_freq;
   3159 	u32 reference_divider;
   3160 	u32 fbdiv;
   3161 	int ret;
   3162 
   3163 	ret = radeon_atom_get_clock_dividers(rdev,
   3164 					     COMPUTE_GPUCLK_INPUT_FLAG_SCLK,
   3165 					     engine_clock, false, &dividers);
   3166 	if (ret)
   3167 		return ret;
   3168 
   3169 	reference_divider = 1 + dividers.ref_div;
   3170 	fbdiv = dividers.fb_div & 0x3FFFFFF;
   3171 
   3172 	spll_func_cntl_3 &= ~SPLL_FB_DIV_MASK;
   3173 	spll_func_cntl_3 |= SPLL_FB_DIV(fbdiv);
   3174         spll_func_cntl_3 |= SPLL_DITHEN;
   3175 
   3176 	if (pi->caps_sclk_ss_support) {
   3177 		struct radeon_atom_ss ss;
   3178 		u32 vco_freq = engine_clock * dividers.post_div;
   3179 
   3180 		if (radeon_atombios_get_asic_ss_info(rdev, &ss,
   3181 						     ASIC_INTERNAL_ENGINE_SS, vco_freq)) {
   3182 			u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate);
   3183 			u32 clk_v = 4 * ss.percentage * fbdiv / (clk_s * 10000);
   3184 
   3185 			cg_spll_spread_spectrum &= ~CLK_S_MASK;
   3186 			cg_spll_spread_spectrum |= CLK_S(clk_s);
   3187 			cg_spll_spread_spectrum |= SSEN;
   3188 
   3189 			cg_spll_spread_spectrum_2 &= ~CLK_V_MASK;
   3190 			cg_spll_spread_spectrum_2 |= CLK_V(clk_v);
   3191 		}
   3192 	}
   3193 
   3194 	sclk->SclkFrequency = engine_clock;
   3195 	sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
   3196 	sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
   3197 	sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
   3198 	sclk->SpllSpreadSpectrum2  = cg_spll_spread_spectrum_2;
   3199 	sclk->SclkDid = (u8)dividers.post_divider;
   3200 
   3201 	return 0;
   3202 }
   3203 
   3204 static int ci_populate_single_graphic_level(struct radeon_device *rdev,
   3205 					    u32 engine_clock,
   3206 					    u16 sclk_activity_level_t,
   3207 					    SMU7_Discrete_GraphicsLevel *graphic_level)
   3208 {
   3209 	struct ci_power_info *pi = ci_get_pi(rdev);
   3210 	int ret;
   3211 
   3212 	ret = ci_calculate_sclk_params(rdev, engine_clock, graphic_level);
   3213 	if (ret)
   3214 		return ret;
   3215 
   3216 	ret = ci_get_dependency_volt_by_clk(rdev,
   3217 					    &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
   3218 					    engine_clock, &graphic_level->MinVddc);
   3219 	if (ret)
   3220 		return ret;
   3221 
   3222 	graphic_level->SclkFrequency = engine_clock;
   3223 
   3224 	graphic_level->Flags =  0;
   3225 	graphic_level->MinVddcPhases = 1;
   3226 
   3227 	if (pi->vddc_phase_shed_control)
   3228 		ci_populate_phase_value_based_on_sclk(rdev,
   3229 						      &rdev->pm.dpm.dyn_state.phase_shedding_limits_table,
   3230 						      engine_clock,
   3231 						      &graphic_level->MinVddcPhases);
   3232 
   3233 	graphic_level->ActivityLevel = sclk_activity_level_t;
   3234 
   3235 	graphic_level->CcPwrDynRm = 0;
   3236 	graphic_level->CcPwrDynRm1 = 0;
   3237 	graphic_level->EnabledForThrottle = 1;
   3238 	graphic_level->UpH = 0;
   3239 	graphic_level->DownH = 0;
   3240 	graphic_level->VoltageDownH = 0;
   3241 	graphic_level->PowerThrottle = 0;
   3242 
   3243 	if (pi->caps_sclk_ds)
   3244 		graphic_level->DeepSleepDivId = ci_get_sleep_divider_id_from_clock(rdev,
   3245 										   engine_clock,
   3246 										   CISLAND_MINIMUM_ENGINE_CLOCK);
   3247 
   3248 	graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
   3249 
   3250 	graphic_level->Flags = cpu_to_be32(graphic_level->Flags);
   3251         graphic_level->MinVddc = cpu_to_be32(graphic_level->MinVddc * VOLTAGE_SCALE);
   3252 	graphic_level->MinVddcPhases = cpu_to_be32(graphic_level->MinVddcPhases);
   3253 	graphic_level->SclkFrequency = cpu_to_be32(graphic_level->SclkFrequency);
   3254 	graphic_level->ActivityLevel = cpu_to_be16(graphic_level->ActivityLevel);
   3255 	graphic_level->CgSpllFuncCntl3 = cpu_to_be32(graphic_level->CgSpllFuncCntl3);
   3256 	graphic_level->CgSpllFuncCntl4 = cpu_to_be32(graphic_level->CgSpllFuncCntl4);
   3257 	graphic_level->SpllSpreadSpectrum = cpu_to_be32(graphic_level->SpllSpreadSpectrum);
   3258 	graphic_level->SpllSpreadSpectrum2 = cpu_to_be32(graphic_level->SpllSpreadSpectrum2);
   3259 	graphic_level->CcPwrDynRm = cpu_to_be32(graphic_level->CcPwrDynRm);
   3260 	graphic_level->CcPwrDynRm1 = cpu_to_be32(graphic_level->CcPwrDynRm1);
   3261 
   3262 	return 0;
   3263 }
   3264 
   3265 static int ci_populate_all_graphic_levels(struct radeon_device *rdev)
   3266 {
   3267 	struct ci_power_info *pi = ci_get_pi(rdev);
   3268 	struct ci_dpm_table *dpm_table = &pi->dpm_table;
   3269 	u32 level_array_address = pi->dpm_table_start +
   3270 		offsetof(SMU7_Discrete_DpmTable, GraphicsLevel);
   3271 	u32 level_array_size = sizeof(SMU7_Discrete_GraphicsLevel) *
   3272 		SMU7_MAX_LEVELS_GRAPHICS;
   3273 	SMU7_Discrete_GraphicsLevel *levels = pi->smc_state_table.GraphicsLevel;
   3274 	u32 i, ret;
   3275 
   3276 	memset(levels, 0, level_array_size);
   3277 
   3278 	for (i = 0; i < dpm_table->sclk_table.count; i++) {
   3279 		ret = ci_populate_single_graphic_level(rdev,
   3280 						       dpm_table->sclk_table.dpm_levels[i].value,
   3281 						       (u16)pi->activity_target[i],
   3282 						       &pi->smc_state_table.GraphicsLevel[i]);
   3283 		if (ret)
   3284 			return ret;
   3285 		if (i > 1)
   3286 			pi->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0;
   3287 		if (i == (dpm_table->sclk_table.count - 1))
   3288 			pi->smc_state_table.GraphicsLevel[i].DisplayWatermark =
   3289 				PPSMC_DISPLAY_WATERMARK_HIGH;
   3290 	}
   3291 	pi->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
   3292 
   3293 	pi->smc_state_table.GraphicsDpmLevelCount = (u8)dpm_table->sclk_table.count;
   3294 	pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
   3295 		ci_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
   3296 
   3297 	ret = ci_copy_bytes_to_smc(rdev, level_array_address,
   3298 				   (u8 *)levels, level_array_size,
   3299 				   pi->sram_end);
   3300 	if (ret)
   3301 		return ret;
   3302 
   3303 	return 0;
   3304 }
   3305 
   3306 static int ci_populate_ulv_state(struct radeon_device *rdev,
   3307 				 SMU7_Discrete_Ulv *ulv_level)
   3308 {
   3309 	return ci_populate_ulv_level(rdev, ulv_level);
   3310 }
   3311 
   3312 static int ci_populate_all_memory_levels(struct radeon_device *rdev)
   3313 {
   3314 	struct ci_power_info *pi = ci_get_pi(rdev);
   3315 	struct ci_dpm_table *dpm_table = &pi->dpm_table;
   3316 	u32 level_array_address = pi->dpm_table_start +
   3317 		offsetof(SMU7_Discrete_DpmTable, MemoryLevel);
   3318 	u32 level_array_size = sizeof(SMU7_Discrete_MemoryLevel) *
   3319 		SMU7_MAX_LEVELS_MEMORY;
   3320 	SMU7_Discrete_MemoryLevel *levels = pi->smc_state_table.MemoryLevel;
   3321 	u32 i, ret;
   3322 
   3323 	memset(levels, 0, level_array_size);
   3324 
   3325 	for (i = 0; i < dpm_table->mclk_table.count; i++) {
   3326 		if (dpm_table->mclk_table.dpm_levels[i].value == 0)
   3327 			return -EINVAL;
   3328 		ret = ci_populate_single_memory_level(rdev,
   3329 						      dpm_table->mclk_table.dpm_levels[i].value,
   3330 						      &pi->smc_state_table.MemoryLevel[i]);
   3331 		if (ret)
   3332 			return ret;
   3333 	}
   3334 
   3335 	pi->smc_state_table.MemoryLevel[0].EnabledForActivity = 1;
   3336 
   3337 	if ((dpm_table->mclk_table.count >= 2) &&
   3338 	    ((rdev->pdev->device == 0x67B0) || (rdev->pdev->device == 0x67B1))) {
   3339 		pi->smc_state_table.MemoryLevel[1].MinVddc =
   3340 			pi->smc_state_table.MemoryLevel[0].MinVddc;
   3341 		pi->smc_state_table.MemoryLevel[1].MinVddcPhases =
   3342 			pi->smc_state_table.MemoryLevel[0].MinVddcPhases;
   3343 	}
   3344 
   3345 	pi->smc_state_table.MemoryLevel[0].ActivityLevel = cpu_to_be16(0x1F);
   3346 
   3347 	pi->smc_state_table.MemoryDpmLevelCount = (u8)dpm_table->mclk_table.count;
   3348 	pi->dpm_level_enable_mask.mclk_dpm_enable_mask =
   3349 		ci_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
   3350 
   3351 	pi->smc_state_table.MemoryLevel[dpm_table->mclk_table.count - 1].DisplayWatermark =
   3352 		PPSMC_DISPLAY_WATERMARK_HIGH;
   3353 
   3354 	ret = ci_copy_bytes_to_smc(rdev, level_array_address,
   3355 				   (u8 *)levels, level_array_size,
   3356 				   pi->sram_end);
   3357 	if (ret)
   3358 		return ret;
   3359 
   3360 	return 0;
   3361 }
   3362 
   3363 static void ci_reset_single_dpm_table(struct radeon_device *rdev,
   3364 				      struct ci_single_dpm_table* dpm_table,
   3365 				      u32 count)
   3366 {
   3367 	u32 i;
   3368 
   3369 	dpm_table->count = count;
   3370 	for (i = 0; i < MAX_REGULAR_DPM_NUMBER; i++)
   3371 		dpm_table->dpm_levels[i].enabled = false;
   3372 }
   3373 
   3374 static void ci_setup_pcie_table_entry(struct ci_single_dpm_table* dpm_table,
   3375 				      u32 index, u32 pcie_gen, u32 pcie_lanes)
   3376 {
   3377 	dpm_table->dpm_levels[index].value = pcie_gen;
   3378 	dpm_table->dpm_levels[index].param1 = pcie_lanes;
   3379 	dpm_table->dpm_levels[index].enabled = true;
   3380 }
   3381 
   3382 static int ci_setup_default_pcie_tables(struct radeon_device *rdev)
   3383 {
   3384 	struct ci_power_info *pi = ci_get_pi(rdev);
   3385 
   3386 	if (!pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels)
   3387 		return -EINVAL;
   3388 
   3389 	if (pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels) {
   3390 		pi->pcie_gen_powersaving = pi->pcie_gen_performance;
   3391 		pi->pcie_lane_powersaving = pi->pcie_lane_performance;
   3392 	} else if (!pi->use_pcie_performance_levels && pi->use_pcie_powersaving_levels) {
   3393 		pi->pcie_gen_performance = pi->pcie_gen_powersaving;
   3394 		pi->pcie_lane_performance = pi->pcie_lane_powersaving;
   3395 	}
   3396 
   3397 	ci_reset_single_dpm_table(rdev,
   3398 				  &pi->dpm_table.pcie_speed_table,
   3399 				  SMU7_MAX_LEVELS_LINK);
   3400 
   3401 	if (rdev->family == CHIP_BONAIRE)
   3402 		ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0,
   3403 					  pi->pcie_gen_powersaving.min,
   3404 					  pi->pcie_lane_powersaving.max);
   3405 	else
   3406 		ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0,
   3407 					  pi->pcie_gen_powersaving.min,
   3408 					  pi->pcie_lane_powersaving.min);
   3409 	ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 1,
   3410 				  pi->pcie_gen_performance.min,
   3411 				  pi->pcie_lane_performance.min);
   3412 	ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 2,
   3413 				  pi->pcie_gen_powersaving.min,
   3414 				  pi->pcie_lane_powersaving.max);
   3415 	ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 3,
   3416 				  pi->pcie_gen_performance.min,
   3417 				  pi->pcie_lane_performance.max);
   3418 	ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 4,
   3419 				  pi->pcie_gen_powersaving.max,
   3420 				  pi->pcie_lane_powersaving.max);
   3421 	ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 5,
   3422 				  pi->pcie_gen_performance.max,
   3423 				  pi->pcie_lane_performance.max);
   3424 
   3425 	pi->dpm_table.pcie_speed_table.count = 6;
   3426 
   3427 	return 0;
   3428 }
   3429 
   3430 static int ci_setup_default_dpm_tables(struct radeon_device *rdev)
   3431 {
   3432 	struct ci_power_info *pi = ci_get_pi(rdev);
   3433 	struct radeon_clock_voltage_dependency_table *allowed_sclk_vddc_table =
   3434 		&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
   3435 	struct radeon_clock_voltage_dependency_table *allowed_mclk_table =
   3436 		&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk;
   3437 	struct radeon_cac_leakage_table *std_voltage_table =
   3438 		&rdev->pm.dpm.dyn_state.cac_leakage_table;
   3439 	u32 i;
   3440 
   3441 	if (allowed_sclk_vddc_table == NULL)
   3442 		return -EINVAL;
   3443 	if (allowed_sclk_vddc_table->count < 1)
   3444 		return -EINVAL;
   3445 	if (allowed_mclk_table == NULL)
   3446 		return -EINVAL;
   3447 	if (allowed_mclk_table->count < 1)
   3448 		return -EINVAL;
   3449 
   3450 	memset(&pi->dpm_table, 0, sizeof(struct ci_dpm_table));
   3451 
   3452 	ci_reset_single_dpm_table(rdev,
   3453 				  &pi->dpm_table.sclk_table,
   3454 				  SMU7_MAX_LEVELS_GRAPHICS);
   3455 	ci_reset_single_dpm_table(rdev,
   3456 				  &pi->dpm_table.mclk_table,
   3457 				  SMU7_MAX_LEVELS_MEMORY);
   3458 	ci_reset_single_dpm_table(rdev,
   3459 				  &pi->dpm_table.vddc_table,
   3460 				  SMU7_MAX_LEVELS_VDDC);
   3461 	ci_reset_single_dpm_table(rdev,
   3462 				  &pi->dpm_table.vddci_table,
   3463 				  SMU7_MAX_LEVELS_VDDCI);
   3464 	ci_reset_single_dpm_table(rdev,
   3465 				  &pi->dpm_table.mvdd_table,
   3466 				  SMU7_MAX_LEVELS_MVDD);
   3467 
   3468 	pi->dpm_table.sclk_table.count = 0;
   3469 	for (i = 0; i < allowed_sclk_vddc_table->count; i++) {
   3470 		if ((i == 0) ||
   3471 		    (pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count-1].value !=
   3472 		     allowed_sclk_vddc_table->entries[i].clk)) {
   3473 			pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].value =
   3474 				allowed_sclk_vddc_table->entries[i].clk;
   3475 			pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].enabled =
   3476 				(i == 0) ? true : false;
   3477 			pi->dpm_table.sclk_table.count++;
   3478 		}
   3479 	}
   3480 
   3481 	pi->dpm_table.mclk_table.count = 0;
   3482 	for (i = 0; i < allowed_mclk_table->count; i++) {
   3483 		if ((i == 0) ||
   3484 		    (pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count-1].value !=
   3485 		     allowed_mclk_table->entries[i].clk)) {
   3486 			pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].value =
   3487 				allowed_mclk_table->entries[i].clk;
   3488 			pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].enabled =
   3489 				(i == 0) ? true : false;
   3490 			pi->dpm_table.mclk_table.count++;
   3491 		}
   3492 	}
   3493 
   3494 	for (i = 0; i < allowed_sclk_vddc_table->count; i++) {
   3495 		pi->dpm_table.vddc_table.dpm_levels[i].value =
   3496 			allowed_sclk_vddc_table->entries[i].v;
   3497 		pi->dpm_table.vddc_table.dpm_levels[i].param1 =
   3498 			std_voltage_table->entries[i].leakage;
   3499 		pi->dpm_table.vddc_table.dpm_levels[i].enabled = true;
   3500 	}
   3501 	pi->dpm_table.vddc_table.count = allowed_sclk_vddc_table->count;
   3502 
   3503 	allowed_mclk_table = &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk;
   3504 	if (allowed_mclk_table) {
   3505 		for (i = 0; i < allowed_mclk_table->count; i++) {
   3506 			pi->dpm_table.vddci_table.dpm_levels[i].value =
   3507 				allowed_mclk_table->entries[i].v;
   3508 			pi->dpm_table.vddci_table.dpm_levels[i].enabled = true;
   3509 		}
   3510 		pi->dpm_table.vddci_table.count = allowed_mclk_table->count;
   3511 	}
   3512 
   3513 	allowed_mclk_table = &rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk;
   3514 	if (allowed_mclk_table) {
   3515 		for (i = 0; i < allowed_mclk_table->count; i++) {
   3516 			pi->dpm_table.mvdd_table.dpm_levels[i].value =
   3517 				allowed_mclk_table->entries[i].v;
   3518 			pi->dpm_table.mvdd_table.dpm_levels[i].enabled = true;
   3519 		}
   3520 		pi->dpm_table.mvdd_table.count = allowed_mclk_table->count;
   3521 	}
   3522 
   3523 	ci_setup_default_pcie_tables(rdev);
   3524 
   3525 	return 0;
   3526 }
   3527 
   3528 static int ci_find_boot_level(struct ci_single_dpm_table *table,
   3529 			      u32 value, u32 *boot_level)
   3530 {
   3531 	u32 i;
   3532 	int ret = -EINVAL;
   3533 
   3534 	for(i = 0; i < table->count; i++) {
   3535 		if (value == table->dpm_levels[i].value) {
   3536 			*boot_level = i;
   3537 			ret = 0;
   3538 		}
   3539 	}
   3540 
   3541 	return ret;
   3542 }
   3543 
   3544 static int ci_init_smc_table(struct radeon_device *rdev)
   3545 {
   3546 	struct ci_power_info *pi = ci_get_pi(rdev);
   3547 	struct ci_ulv_parm *ulv = &pi->ulv;
   3548 	struct radeon_ps *radeon_boot_state = rdev->pm.dpm.boot_ps;
   3549 	SMU7_Discrete_DpmTable *table = &pi->smc_state_table;
   3550 	int ret;
   3551 
   3552 	ret = ci_setup_default_dpm_tables(rdev);
   3553 	if (ret)
   3554 		return ret;
   3555 
   3556 	if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE)
   3557 		ci_populate_smc_voltage_tables(rdev, table);
   3558 
   3559 	ci_init_fps_limits(rdev);
   3560 
   3561 	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_HARDWAREDC)
   3562 		table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
   3563 
   3564 	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC)
   3565 		table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
   3566 
   3567 	if (pi->mem_gddr5)
   3568 		table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
   3569 
   3570 	if (ulv->supported) {
   3571 		ret = ci_populate_ulv_state(rdev, &pi->smc_state_table.Ulv);
   3572 		if (ret)
   3573 			return ret;
   3574 		WREG32_SMC(CG_ULV_PARAMETER, ulv->cg_ulv_parameter);
   3575 	}
   3576 
   3577 	ret = ci_populate_all_graphic_levels(rdev);
   3578 	if (ret)
   3579 		return ret;
   3580 
   3581 	ret = ci_populate_all_memory_levels(rdev);
   3582 	if (ret)
   3583 		return ret;
   3584 
   3585 	ci_populate_smc_link_level(rdev, table);
   3586 
   3587 	ret = ci_populate_smc_acpi_level(rdev, table);
   3588 	if (ret)
   3589 		return ret;
   3590 
   3591 	ret = ci_populate_smc_vce_level(rdev, table);
   3592 	if (ret)
   3593 		return ret;
   3594 
   3595 	ret = ci_populate_smc_acp_level(rdev, table);
   3596 	if (ret)
   3597 		return ret;
   3598 
   3599 	ret = ci_populate_smc_samu_level(rdev, table);
   3600 	if (ret)
   3601 		return ret;
   3602 
   3603 	ret = ci_do_program_memory_timing_parameters(rdev);
   3604 	if (ret)
   3605 		return ret;
   3606 
   3607 	ret = ci_populate_smc_uvd_level(rdev, table);
   3608 	if (ret)
   3609 		return ret;
   3610 
   3611 	table->UvdBootLevel  = 0;
   3612 	table->VceBootLevel  = 0;
   3613 	table->AcpBootLevel  = 0;
   3614 	table->SamuBootLevel  = 0;
   3615 	table->GraphicsBootLevel  = 0;
   3616 	table->MemoryBootLevel  = 0;
   3617 
   3618 	ret = ci_find_boot_level(&pi->dpm_table.sclk_table,
   3619 				 pi->vbios_boot_state.sclk_bootup_value,
   3620 				 (u32 *)&pi->smc_state_table.GraphicsBootLevel);
   3621 
   3622 	ret = ci_find_boot_level(&pi->dpm_table.mclk_table,
   3623 				 pi->vbios_boot_state.mclk_bootup_value,
   3624 				 (u32 *)&pi->smc_state_table.MemoryBootLevel);
   3625 
   3626 	table->BootVddc = pi->vbios_boot_state.vddc_bootup_value;
   3627 	table->BootVddci = pi->vbios_boot_state.vddci_bootup_value;
   3628 	table->BootMVdd = pi->vbios_boot_state.mvdd_bootup_value;
   3629 
   3630 	ci_populate_smc_initial_state(rdev, radeon_boot_state);
   3631 
   3632 	ret = ci_populate_bapm_parameters_in_dpm_table(rdev);
   3633 	if (ret)
   3634 		return ret;
   3635 
   3636 	table->UVDInterval = 1;
   3637 	table->VCEInterval = 1;
   3638 	table->ACPInterval = 1;
   3639 	table->SAMUInterval = 1;
   3640 	table->GraphicsVoltageChangeEnable = 1;
   3641 	table->GraphicsThermThrottleEnable = 1;
   3642 	table->GraphicsInterval = 1;
   3643 	table->VoltageInterval = 1;
   3644 	table->ThermalInterval = 1;
   3645 	table->TemperatureLimitHigh = (u16)((pi->thermal_temp_setting.temperature_high *
   3646 					     CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000);
   3647 	table->TemperatureLimitLow = (u16)((pi->thermal_temp_setting.temperature_low *
   3648 					    CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000);
   3649 	table->MemoryVoltageChangeEnable = 1;
   3650 	table->MemoryInterval = 1;
   3651 	table->VoltageResponseTime = 0;
   3652 	table->VddcVddciDelta = 4000;
   3653 	table->PhaseResponseTime = 0;
   3654 	table->MemoryThermThrottleEnable = 1;
   3655 	table->PCIeBootLinkLevel = pi->dpm_table.pcie_speed_table.count - 1;
   3656 	table->PCIeGenInterval = 1;
   3657 	if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2)
   3658 		table->SVI2Enable  = 1;
   3659 	else
   3660 		table->SVI2Enable  = 0;
   3661 
   3662 	table->ThermGpio = 17;
   3663 	table->SclkStepSize = 0x4000;
   3664 
   3665 	table->SystemFlags = cpu_to_be32(table->SystemFlags);
   3666 	table->SmioMaskVddcVid = cpu_to_be32(table->SmioMaskVddcVid);
   3667 	table->SmioMaskVddcPhase = cpu_to_be32(table->SmioMaskVddcPhase);
   3668 	table->SmioMaskVddciVid = cpu_to_be32(table->SmioMaskVddciVid);
   3669 	table->SmioMaskMvddVid = cpu_to_be32(table->SmioMaskMvddVid);
   3670 	table->SclkStepSize = cpu_to_be32(table->SclkStepSize);
   3671 	table->TemperatureLimitHigh = cpu_to_be16(table->TemperatureLimitHigh);
   3672 	table->TemperatureLimitLow = cpu_to_be16(table->TemperatureLimitLow);
   3673 	table->VddcVddciDelta = cpu_to_be16(table->VddcVddciDelta);
   3674 	table->VoltageResponseTime = cpu_to_be16(table->VoltageResponseTime);
   3675 	table->PhaseResponseTime = cpu_to_be16(table->PhaseResponseTime);
   3676 	table->BootVddc = cpu_to_be16(table->BootVddc * VOLTAGE_SCALE);
   3677 	table->BootVddci = cpu_to_be16(table->BootVddci * VOLTAGE_SCALE);
   3678 	table->BootMVdd = cpu_to_be16(table->BootMVdd * VOLTAGE_SCALE);
   3679 
   3680 	ret = ci_copy_bytes_to_smc(rdev,
   3681 				   pi->dpm_table_start +
   3682 				   offsetof(SMU7_Discrete_DpmTable, SystemFlags),
   3683 				   (u8 *)&table->SystemFlags,
   3684 				   sizeof(SMU7_Discrete_DpmTable) - 3 * sizeof(SMU7_PIDController),
   3685 				   pi->sram_end);
   3686 	if (ret)
   3687 		return ret;
   3688 
   3689 	return 0;
   3690 }
   3691 
   3692 static void ci_trim_single_dpm_states(struct radeon_device *rdev,
   3693 				      struct ci_single_dpm_table *dpm_table,
   3694 				      u32 low_limit, u32 high_limit)
   3695 {
   3696 	u32 i;
   3697 
   3698 	for (i = 0; i < dpm_table->count; i++) {
   3699 		if ((dpm_table->dpm_levels[i].value < low_limit) ||
   3700 		    (dpm_table->dpm_levels[i].value > high_limit))
   3701 			dpm_table->dpm_levels[i].enabled = false;
   3702 		else
   3703 			dpm_table->dpm_levels[i].enabled = true;
   3704 	}
   3705 }
   3706 
   3707 static void ci_trim_pcie_dpm_states(struct radeon_device *rdev,
   3708 				    u32 speed_low, u32 lanes_low,
   3709 				    u32 speed_high, u32 lanes_high)
   3710 {
   3711 	struct ci_power_info *pi = ci_get_pi(rdev);
   3712 	struct ci_single_dpm_table *pcie_table = &pi->dpm_table.pcie_speed_table;
   3713 	u32 i, j;
   3714 
   3715 	for (i = 0; i < pcie_table->count; i++) {
   3716 		if ((pcie_table->dpm_levels[i].value < speed_low) ||
   3717 		    (pcie_table->dpm_levels[i].param1 < lanes_low) ||
   3718 		    (pcie_table->dpm_levels[i].value > speed_high) ||
   3719 		    (pcie_table->dpm_levels[i].param1 > lanes_high))
   3720 			pcie_table->dpm_levels[i].enabled = false;
   3721 		else
   3722 			pcie_table->dpm_levels[i].enabled = true;
   3723 	}
   3724 
   3725 	for (i = 0; i < pcie_table->count; i++) {
   3726 		if (pcie_table->dpm_levels[i].enabled) {
   3727 			for (j = i + 1; j < pcie_table->count; j++) {
   3728 				if (pcie_table->dpm_levels[j].enabled) {
   3729 					if ((pcie_table->dpm_levels[i].value == pcie_table->dpm_levels[j].value) &&
   3730 					    (pcie_table->dpm_levels[i].param1 == pcie_table->dpm_levels[j].param1))
   3731 						pcie_table->dpm_levels[j].enabled = false;
   3732 				}
   3733 			}
   3734 		}
   3735 	}
   3736 }
   3737 
   3738 static int ci_trim_dpm_states(struct radeon_device *rdev,
   3739 			      struct radeon_ps *radeon_state)
   3740 {
   3741 	struct ci_ps *state = ci_get_ps(radeon_state);
   3742 	struct ci_power_info *pi = ci_get_pi(rdev);
   3743 	u32 high_limit_count;
   3744 
   3745 	if (state->performance_level_count < 1)
   3746 		return -EINVAL;
   3747 
   3748 	if (state->performance_level_count == 1)
   3749 		high_limit_count = 0;
   3750 	else
   3751 		high_limit_count = 1;
   3752 
   3753 	ci_trim_single_dpm_states(rdev,
   3754 				  &pi->dpm_table.sclk_table,
   3755 				  state->performance_levels[0].sclk,
   3756 				  state->performance_levels[high_limit_count].sclk);
   3757 
   3758 	ci_trim_single_dpm_states(rdev,
   3759 				  &pi->dpm_table.mclk_table,
   3760 				  state->performance_levels[0].mclk,
   3761 				  state->performance_levels[high_limit_count].mclk);
   3762 
   3763 	ci_trim_pcie_dpm_states(rdev,
   3764 				state->performance_levels[0].pcie_gen,
   3765 				state->performance_levels[0].pcie_lane,
   3766 				state->performance_levels[high_limit_count].pcie_gen,
   3767 				state->performance_levels[high_limit_count].pcie_lane);
   3768 
   3769 	return 0;
   3770 }
   3771 
   3772 static int ci_apply_disp_minimum_voltage_request(struct radeon_device *rdev)
   3773 {
   3774 	struct radeon_clock_voltage_dependency_table *disp_voltage_table =
   3775 		&rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk;
   3776 	struct radeon_clock_voltage_dependency_table *vddc_table =
   3777 		&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
   3778 	u32 requested_voltage = 0;
   3779 	u32 i;
   3780 
   3781 	if (disp_voltage_table == NULL)
   3782 		return -EINVAL;
   3783 	if (!disp_voltage_table->count)
   3784 		return -EINVAL;
   3785 
   3786 	for (i = 0; i < disp_voltage_table->count; i++) {
   3787 		if (rdev->clock.current_dispclk == disp_voltage_table->entries[i].clk)
   3788 			requested_voltage = disp_voltage_table->entries[i].v;
   3789 	}
   3790 
   3791 	for (i = 0; i < vddc_table->count; i++) {
   3792 		if (requested_voltage <= vddc_table->entries[i].v) {
   3793 			requested_voltage = vddc_table->entries[i].v;
   3794 			return (ci_send_msg_to_smc_with_parameter(rdev,
   3795 								  PPSMC_MSG_VddC_Request,
   3796 								  requested_voltage * VOLTAGE_SCALE) == PPSMC_Result_OK) ?
   3797 				0 : -EINVAL;
   3798 		}
   3799 	}
   3800 
   3801 	return -EINVAL;
   3802 }
   3803 
   3804 static int ci_upload_dpm_level_enable_mask(struct radeon_device *rdev)
   3805 {
   3806 	struct ci_power_info *pi = ci_get_pi(rdev);
   3807 	PPSMC_Result result;
   3808 
   3809 	ci_apply_disp_minimum_voltage_request(rdev);
   3810 
   3811 	if (!pi->sclk_dpm_key_disabled) {
   3812 		if (pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
   3813 			result = ci_send_msg_to_smc_with_parameter(rdev,
   3814 								   PPSMC_MSG_SCLKDPM_SetEnabledMask,
   3815 								   pi->dpm_level_enable_mask.sclk_dpm_enable_mask);
   3816 			if (result != PPSMC_Result_OK)
   3817 				return -EINVAL;
   3818 		}
   3819 	}
   3820 
   3821 	if (!pi->mclk_dpm_key_disabled) {
   3822 		if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
   3823 			result = ci_send_msg_to_smc_with_parameter(rdev,
   3824 								   PPSMC_MSG_MCLKDPM_SetEnabledMask,
   3825 								   pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
   3826 			if (result != PPSMC_Result_OK)
   3827 				return -EINVAL;
   3828 		}
   3829 	}
   3830 #if 0
   3831 	if (!pi->pcie_dpm_key_disabled) {
   3832 		if (pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
   3833 			result = ci_send_msg_to_smc_with_parameter(rdev,
   3834 								   PPSMC_MSG_PCIeDPM_SetEnabledMask,
   3835 								   pi->dpm_level_enable_mask.pcie_dpm_enable_mask);
   3836 			if (result != PPSMC_Result_OK)
   3837 				return -EINVAL;
   3838 		}
   3839 	}
   3840 #endif
   3841 	return 0;
   3842 }
   3843 
   3844 static void ci_find_dpm_states_clocks_in_dpm_table(struct radeon_device *rdev,
   3845 						   struct radeon_ps *radeon_state)
   3846 {
   3847 	struct ci_power_info *pi = ci_get_pi(rdev);
   3848 	struct ci_ps *state = ci_get_ps(radeon_state);
   3849 	struct ci_single_dpm_table *sclk_table = &pi->dpm_table.sclk_table;
   3850 	u32 sclk = state->performance_levels[state->performance_level_count-1].sclk;
   3851 	struct ci_single_dpm_table *mclk_table = &pi->dpm_table.mclk_table;
   3852 	u32 mclk = state->performance_levels[state->performance_level_count-1].mclk;
   3853 	u32 i;
   3854 
   3855 	pi->need_update_smu7_dpm_table = 0;
   3856 
   3857 	for (i = 0; i < sclk_table->count; i++) {
   3858 		if (sclk == sclk_table->dpm_levels[i].value)
   3859 			break;
   3860 	}
   3861 
   3862 	if (i >= sclk_table->count) {
   3863 		pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
   3864 	} else {
   3865 		/* XXX check display min clock requirements */
   3866 		if (CISLAND_MINIMUM_ENGINE_CLOCK != CISLAND_MINIMUM_ENGINE_CLOCK)
   3867 			pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
   3868 	}
   3869 
   3870 	for (i = 0; i < mclk_table->count; i++) {
   3871 		if (mclk == mclk_table->dpm_levels[i].value)
   3872 			break;
   3873 	}
   3874 
   3875 	if (i >= mclk_table->count)
   3876 		pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
   3877 
   3878 	if (rdev->pm.dpm.current_active_crtc_count !=
   3879 	    rdev->pm.dpm.new_active_crtc_count)
   3880 		pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;
   3881 }
   3882 
   3883 static int ci_populate_and_upload_sclk_mclk_dpm_levels(struct radeon_device *rdev,
   3884 						       struct radeon_ps *radeon_state)
   3885 {
   3886 	struct ci_power_info *pi = ci_get_pi(rdev);
   3887 	struct ci_ps *state = ci_get_ps(radeon_state);
   3888 	u32 sclk = state->performance_levels[state->performance_level_count-1].sclk;
   3889 	u32 mclk = state->performance_levels[state->performance_level_count-1].mclk;
   3890 	struct ci_dpm_table *dpm_table = &pi->dpm_table;
   3891 	int ret;
   3892 
   3893 	if (!pi->need_update_smu7_dpm_table)
   3894 		return 0;
   3895 
   3896 	if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK)
   3897 		dpm_table->sclk_table.dpm_levels[dpm_table->sclk_table.count-1].value = sclk;
   3898 
   3899 	if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)
   3900 		dpm_table->mclk_table.dpm_levels[dpm_table->mclk_table.count-1].value = mclk;
   3901 
   3902 	if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK)) {
   3903 		ret = ci_populate_all_graphic_levels(rdev);
   3904 		if (ret)
   3905 			return ret;
   3906 	}
   3907 
   3908 	if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_MCLK | DPMTABLE_UPDATE_MCLK)) {
   3909 		ret = ci_populate_all_memory_levels(rdev);
   3910 		if (ret)
   3911 			return ret;
   3912 	}
   3913 
   3914 	return 0;
   3915 }
   3916 
   3917 static int ci_enable_uvd_dpm(struct radeon_device *rdev, bool enable)
   3918 {
   3919 	struct ci_power_info *pi = ci_get_pi(rdev);
   3920 	const struct radeon_clock_and_voltage_limits *max_limits;
   3921 	int i;
   3922 
   3923 	if (rdev->pm.dpm.ac_power)
   3924 		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
   3925 	else
   3926 		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
   3927 
   3928 	if (enable) {
   3929 		pi->dpm_level_enable_mask.uvd_dpm_enable_mask = 0;
   3930 
   3931 		for (i = rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
   3932 			if (rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
   3933 				pi->dpm_level_enable_mask.uvd_dpm_enable_mask |= 1 << i;
   3934 
   3935 				if (!pi->caps_uvd_dpm)
   3936 					break;
   3937 			}
   3938 		}
   3939 
   3940 		ci_send_msg_to_smc_with_parameter(rdev,
   3941 						  PPSMC_MSG_UVDDPM_SetEnabledMask,
   3942 						  pi->dpm_level_enable_mask.uvd_dpm_enable_mask);
   3943 
   3944 		if (pi->last_mclk_dpm_enable_mask & 0x1) {
   3945 			pi->uvd_enabled = true;
   3946 			pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
   3947 			ci_send_msg_to_smc_with_parameter(rdev,
   3948 							  PPSMC_MSG_MCLKDPM_SetEnabledMask,
   3949 							  pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
   3950 		}
   3951 	} else {
   3952 		if (pi->last_mclk_dpm_enable_mask & 0x1) {
   3953 			pi->uvd_enabled = false;
   3954 			pi->dpm_level_enable_mask.mclk_dpm_enable_mask |= 1;
   3955 			ci_send_msg_to_smc_with_parameter(rdev,
   3956 							  PPSMC_MSG_MCLKDPM_SetEnabledMask,
   3957 							  pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
   3958 		}
   3959 	}
   3960 
   3961 	return (ci_send_msg_to_smc(rdev, enable ?
   3962 				   PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable) == PPSMC_Result_OK) ?
   3963 		0 : -EINVAL;
   3964 }
   3965 
   3966 static int ci_enable_vce_dpm(struct radeon_device *rdev, bool enable)
   3967 {
   3968 	struct ci_power_info *pi = ci_get_pi(rdev);
   3969 	const struct radeon_clock_and_voltage_limits *max_limits;
   3970 	int i;
   3971 
   3972 	if (rdev->pm.dpm.ac_power)
   3973 		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
   3974 	else
   3975 		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
   3976 
   3977 	if (enable) {
   3978 		pi->dpm_level_enable_mask.vce_dpm_enable_mask = 0;
   3979 		for (i = rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
   3980 			if (rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
   3981 				pi->dpm_level_enable_mask.vce_dpm_enable_mask |= 1 << i;
   3982 
   3983 				if (!pi->caps_vce_dpm)
   3984 					break;
   3985 			}
   3986 		}
   3987 
   3988 		ci_send_msg_to_smc_with_parameter(rdev,
   3989 						  PPSMC_MSG_VCEDPM_SetEnabledMask,
   3990 						  pi->dpm_level_enable_mask.vce_dpm_enable_mask);
   3991 	}
   3992 
   3993 	return (ci_send_msg_to_smc(rdev, enable ?
   3994 				   PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable) == PPSMC_Result_OK) ?
   3995 		0 : -EINVAL;
   3996 }
   3997 
   3998 #if 0
   3999 static int ci_enable_samu_dpm(struct radeon_device *rdev, bool enable)
   4000 {
   4001 	struct ci_power_info *pi = ci_get_pi(rdev);
   4002 	const struct radeon_clock_and_voltage_limits *max_limits;
   4003 	int i;
   4004 
   4005 	if (rdev->pm.dpm.ac_power)
   4006 		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
   4007 	else
   4008 		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
   4009 
   4010 	if (enable) {
   4011 		pi->dpm_level_enable_mask.samu_dpm_enable_mask = 0;
   4012 		for (i = rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
   4013 			if (rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
   4014 				pi->dpm_level_enable_mask.samu_dpm_enable_mask |= 1 << i;
   4015 
   4016 				if (!pi->caps_samu_dpm)
   4017 					break;
   4018 			}
   4019 		}
   4020 
   4021 		ci_send_msg_to_smc_with_parameter(rdev,
   4022 						  PPSMC_MSG_SAMUDPM_SetEnabledMask,
   4023 						  pi->dpm_level_enable_mask.samu_dpm_enable_mask);
   4024 	}
   4025 	return (ci_send_msg_to_smc(rdev, enable ?
   4026 				   PPSMC_MSG_SAMUDPM_Enable : PPSMC_MSG_SAMUDPM_Disable) == PPSMC_Result_OK) ?
   4027 		0 : -EINVAL;
   4028 }
   4029 
   4030 static int ci_enable_acp_dpm(struct radeon_device *rdev, bool enable)
   4031 {
   4032 	struct ci_power_info *pi = ci_get_pi(rdev);
   4033 	const struct radeon_clock_and_voltage_limits *max_limits;
   4034 	int i;
   4035 
   4036 	if (rdev->pm.dpm.ac_power)
   4037 		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
   4038 	else
   4039 		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
   4040 
   4041 	if (enable) {
   4042 		pi->dpm_level_enable_mask.acp_dpm_enable_mask = 0;
   4043 		for (i = rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
   4044 			if (rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
   4045 				pi->dpm_level_enable_mask.acp_dpm_enable_mask |= 1 << i;
   4046 
   4047 				if (!pi->caps_acp_dpm)
   4048 					break;
   4049 			}
   4050 		}
   4051 
   4052 		ci_send_msg_to_smc_with_parameter(rdev,
   4053 						  PPSMC_MSG_ACPDPM_SetEnabledMask,
   4054 						  pi->dpm_level_enable_mask.acp_dpm_enable_mask);
   4055 	}
   4056 
   4057 	return (ci_send_msg_to_smc(rdev, enable ?
   4058 				   PPSMC_MSG_ACPDPM_Enable : PPSMC_MSG_ACPDPM_Disable) == PPSMC_Result_OK) ?
   4059 		0 : -EINVAL;
   4060 }
   4061 #endif
   4062 
   4063 static int ci_update_uvd_dpm(struct radeon_device *rdev, bool gate)
   4064 {
   4065 	struct ci_power_info *pi = ci_get_pi(rdev);
   4066 	u32 tmp;
   4067 
   4068 	if (!gate) {
   4069 		if (pi->caps_uvd_dpm ||
   4070 		    (rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count <= 0))
   4071 			pi->smc_state_table.UvdBootLevel = 0;
   4072 		else
   4073 			pi->smc_state_table.UvdBootLevel =
   4074 				rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1;
   4075 
   4076 		tmp = RREG32_SMC(DPM_TABLE_475);
   4077 		tmp &= ~UvdBootLevel_MASK;
   4078 		tmp |= UvdBootLevel(pi->smc_state_table.UvdBootLevel);
   4079 		WREG32_SMC(DPM_TABLE_475, tmp);
   4080 	}
   4081 
   4082 	return ci_enable_uvd_dpm(rdev, !gate);
   4083 }
   4084 
   4085 static u8 ci_get_vce_boot_level(struct radeon_device *rdev)
   4086 {
   4087 	u8 i;
   4088 	u32 min_evclk = 30000; /* ??? */
   4089 	struct radeon_vce_clock_voltage_dependency_table *table =
   4090 		&rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
   4091 
   4092 	for (i = 0; i < table->count; i++) {
   4093 		if (table->entries[i].evclk >= min_evclk)
   4094 			return i;
   4095 	}
   4096 
   4097 	return table->count - 1;
   4098 }
   4099 
   4100 static int ci_update_vce_dpm(struct radeon_device *rdev,
   4101 			     struct radeon_ps *radeon_new_state,
   4102 			     struct radeon_ps *radeon_current_state)
   4103 {
   4104 	struct ci_power_info *pi = ci_get_pi(rdev);
   4105 	int ret = 0;
   4106 	u32 tmp;
   4107 
   4108 	if (radeon_current_state->evclk != radeon_new_state->evclk) {
   4109 		if (radeon_new_state->evclk) {
   4110 			/* turn the clocks on when encoding */
   4111 			cik_update_cg(rdev, RADEON_CG_BLOCK_VCE, false);
   4112 
   4113 			pi->smc_state_table.VceBootLevel = ci_get_vce_boot_level(rdev);
   4114 			tmp = RREG32_SMC(DPM_TABLE_475);
   4115 			tmp &= ~VceBootLevel_MASK;
   4116 			tmp |= VceBootLevel(pi->smc_state_table.VceBootLevel);
   4117 			WREG32_SMC(DPM_TABLE_475, tmp);
   4118 
   4119 			ret = ci_enable_vce_dpm(rdev, true);
   4120 		} else {
   4121 			/* turn the clocks off when not encoding */
   4122 			cik_update_cg(rdev, RADEON_CG_BLOCK_VCE, true);
   4123 
   4124 			ret = ci_enable_vce_dpm(rdev, false);
   4125 		}
   4126 	}
   4127 	return ret;
   4128 }
   4129 
   4130 #if 0
   4131 static int ci_update_samu_dpm(struct radeon_device *rdev, bool gate)
   4132 {
   4133 	return ci_enable_samu_dpm(rdev, gate);
   4134 }
   4135 
   4136 static int ci_update_acp_dpm(struct radeon_device *rdev, bool gate)
   4137 {
   4138 	struct ci_power_info *pi = ci_get_pi(rdev);
   4139 	u32 tmp;
   4140 
   4141 	if (!gate) {
   4142 		pi->smc_state_table.AcpBootLevel = 0;
   4143 
   4144 		tmp = RREG32_SMC(DPM_TABLE_475);
   4145 		tmp &= ~AcpBootLevel_MASK;
   4146 		tmp |= AcpBootLevel(pi->smc_state_table.AcpBootLevel);
   4147 		WREG32_SMC(DPM_TABLE_475, tmp);
   4148 	}
   4149 
   4150 	return ci_enable_acp_dpm(rdev, !gate);
   4151 }
   4152 #endif
   4153 
   4154 static int ci_generate_dpm_level_enable_mask(struct radeon_device *rdev,
   4155 					     struct radeon_ps *radeon_state)
   4156 {
   4157 	struct ci_power_info *pi = ci_get_pi(rdev);
   4158 	int ret;
   4159 
   4160 	ret = ci_trim_dpm_states(rdev, radeon_state);
   4161 	if (ret)
   4162 		return ret;
   4163 
   4164 	pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
   4165 		ci_get_dpm_level_enable_mask_value(&pi->dpm_table.sclk_table);
   4166 	pi->dpm_level_enable_mask.mclk_dpm_enable_mask =
   4167 		ci_get_dpm_level_enable_mask_value(&pi->dpm_table.mclk_table);
   4168 	pi->last_mclk_dpm_enable_mask =
   4169 		pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
   4170 	if (pi->uvd_enabled) {
   4171 		if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask & 1)
   4172 			pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
   4173 	}
   4174 	pi->dpm_level_enable_mask.pcie_dpm_enable_mask =
   4175 		ci_get_dpm_level_enable_mask_value(&pi->dpm_table.pcie_speed_table);
   4176 
   4177 	return 0;
   4178 }
   4179 
   4180 static u32 ci_get_lowest_enabled_level(struct radeon_device *rdev,
   4181 				       u32 level_mask)
   4182 {
   4183 	u32 level = 0;
   4184 
   4185 	while ((level_mask & (1 << level)) == 0)
   4186 		level++;
   4187 
   4188 	return level;
   4189 }
   4190 
   4191 
   4192 int ci_dpm_force_performance_level(struct radeon_device *rdev,
   4193 				   enum radeon_dpm_forced_level level)
   4194 {
   4195 	struct ci_power_info *pi = ci_get_pi(rdev);
   4196 	u32 tmp, levels, i;
   4197 	int ret;
   4198 
   4199 	if (level == RADEON_DPM_FORCED_LEVEL_HIGH) {
   4200 		if ((!pi->pcie_dpm_key_disabled) &&
   4201 		    pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
   4202 			levels = 0;
   4203 			tmp = pi->dpm_level_enable_mask.pcie_dpm_enable_mask;
   4204 			while (tmp >>= 1)
   4205 				levels++;
   4206 			if (levels) {
   4207 				ret = ci_dpm_force_state_pcie(rdev, level);
   4208 				if (ret)
   4209 					return ret;
   4210 				for (i = 0; i < rdev->usec_timeout; i++) {
   4211 					tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX_1) &
   4212 					       CURR_PCIE_INDEX_MASK) >> CURR_PCIE_INDEX_SHIFT;
   4213 					if (tmp == levels)
   4214 						break;
   4215 					udelay(1);
   4216 				}
   4217 			}
   4218 		}
   4219 		if ((!pi->sclk_dpm_key_disabled) &&
   4220 		    pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
   4221 			levels = 0;
   4222 			tmp = pi->dpm_level_enable_mask.sclk_dpm_enable_mask;
   4223 			while (tmp >>= 1)
   4224 				levels++;
   4225 			if (levels) {
   4226 				ret = ci_dpm_force_state_sclk(rdev, levels);
   4227 				if (ret)
   4228 					return ret;
   4229 				for (i = 0; i < rdev->usec_timeout; i++) {
   4230 					tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
   4231 					       CURR_SCLK_INDEX_MASK) >> CURR_SCLK_INDEX_SHIFT;
   4232 					if (tmp == levels)
   4233 						break;
   4234 					udelay(1);
   4235 				}
   4236 			}
   4237 		}
   4238 		if ((!pi->mclk_dpm_key_disabled) &&
   4239 		    pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
   4240 			levels = 0;
   4241 			tmp = pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
   4242 			while (tmp >>= 1)
   4243 				levels++;
   4244 			if (levels) {
   4245 				ret = ci_dpm_force_state_mclk(rdev, levels);
   4246 				if (ret)
   4247 					return ret;
   4248 				for (i = 0; i < rdev->usec_timeout; i++) {
   4249 					tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
   4250 					       CURR_MCLK_INDEX_MASK) >> CURR_MCLK_INDEX_SHIFT;
   4251 					if (tmp == levels)
   4252 						break;
   4253 					udelay(1);
   4254 				}
   4255 			}
   4256 		}
   4257 	} else if (level == RADEON_DPM_FORCED_LEVEL_LOW) {
   4258 		if ((!pi->sclk_dpm_key_disabled) &&
   4259 		    pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
   4260 			levels = ci_get_lowest_enabled_level(rdev,
   4261 							     pi->dpm_level_enable_mask.sclk_dpm_enable_mask);
   4262 			ret = ci_dpm_force_state_sclk(rdev, levels);
   4263 			if (ret)
   4264 				return ret;
   4265 			for (i = 0; i < rdev->usec_timeout; i++) {
   4266 				tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
   4267 				       CURR_SCLK_INDEX_MASK) >> CURR_SCLK_INDEX_SHIFT;
   4268 				if (tmp == levels)
   4269 					break;
   4270 				udelay(1);
   4271 			}
   4272 		}
   4273 		if ((!pi->mclk_dpm_key_disabled) &&
   4274 		    pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
   4275 			levels = ci_get_lowest_enabled_level(rdev,
   4276 							     pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
   4277 			ret = ci_dpm_force_state_mclk(rdev, levels);
   4278 			if (ret)
   4279 				return ret;
   4280 			for (i = 0; i < rdev->usec_timeout; i++) {
   4281 				tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
   4282 				       CURR_MCLK_INDEX_MASK) >> CURR_MCLK_INDEX_SHIFT;
   4283 				if (tmp == levels)
   4284 					break;
   4285 				udelay(1);
   4286 			}
   4287 		}
   4288 		if ((!pi->pcie_dpm_key_disabled) &&
   4289 		    pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
   4290 			levels = ci_get_lowest_enabled_level(rdev,
   4291 							     pi->dpm_level_enable_mask.pcie_dpm_enable_mask);
   4292 			ret = ci_dpm_force_state_pcie(rdev, levels);
   4293 			if (ret)
   4294 				return ret;
   4295 			for (i = 0; i < rdev->usec_timeout; i++) {
   4296 				tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX_1) &
   4297 				       CURR_PCIE_INDEX_MASK) >> CURR_PCIE_INDEX_SHIFT;
   4298 				if (tmp == levels)
   4299 					break;
   4300 				udelay(1);
   4301 			}
   4302 		}
   4303 	} else if (level == RADEON_DPM_FORCED_LEVEL_AUTO) {
   4304 		if (!pi->pcie_dpm_key_disabled) {
   4305 			PPSMC_Result smc_result;
   4306 
   4307 			smc_result = ci_send_msg_to_smc(rdev,
   4308 							PPSMC_MSG_PCIeDPM_UnForceLevel);
   4309 			if (smc_result != PPSMC_Result_OK)
   4310 				return -EINVAL;
   4311 		}
   4312 		ret = ci_upload_dpm_level_enable_mask(rdev);
   4313 		if (ret)
   4314 			return ret;
   4315 	}
   4316 
   4317 	rdev->pm.dpm.forced_level = level;
   4318 
   4319 	return 0;
   4320 }
   4321 
   4322 static int ci_set_mc_special_registers(struct radeon_device *rdev,
   4323 				       struct ci_mc_reg_table *table)
   4324 {
   4325 	struct ci_power_info *pi = ci_get_pi(rdev);
   4326 	u8 i, j, k;
   4327 	u32 temp_reg;
   4328 
   4329 	for (i = 0, j = table->last; i < table->last; i++) {
   4330 		if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
   4331 			return -EINVAL;
   4332 		switch(table->mc_reg_address[i].s1 << 2) {
   4333 		case MC_SEQ_MISC1:
   4334 			temp_reg = RREG32(MC_PMG_CMD_EMRS);
   4335 			table->mc_reg_address[j].s1 = MC_PMG_CMD_EMRS >> 2;
   4336 			table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_EMRS_LP >> 2;
   4337 			for (k = 0; k < table->num_entries; k++) {
   4338 				table->mc_reg_table_entry[k].mc_data[j] =
   4339 					((temp_reg & 0xffff0000)) | ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
   4340 			}
   4341 			j++;
   4342 			if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
   4343 				return -EINVAL;
   4344 
   4345 			temp_reg = RREG32(MC_PMG_CMD_MRS);
   4346 			table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS >> 2;
   4347 			table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS_LP >> 2;
   4348 			for (k = 0; k < table->num_entries; k++) {
   4349 				table->mc_reg_table_entry[k].mc_data[j] =
   4350 					(temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
   4351 				if (!pi->mem_gddr5)
   4352 					table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
   4353 			}
   4354 			j++;
   4355 			if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
   4356 				return -EINVAL;
   4357 
   4358 			if (!pi->mem_gddr5) {
   4359 				table->mc_reg_address[j].s1 = MC_PMG_AUTO_CMD >> 2;
   4360 				table->mc_reg_address[j].s0 = MC_PMG_AUTO_CMD >> 2;
   4361 				for (k = 0; k < table->num_entries; k++) {
   4362 					table->mc_reg_table_entry[k].mc_data[j] =
   4363 						(table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
   4364 				}
   4365 				j++;
   4366 				if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
   4367 					return -EINVAL;
   4368 			}
   4369 			break;
   4370 		case MC_SEQ_RESERVE_M:
   4371 			temp_reg = RREG32(MC_PMG_CMD_MRS1);
   4372 			table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS1 >> 2;
   4373 			table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS1_LP >> 2;
   4374 			for (k = 0; k < table->num_entries; k++) {
   4375 				table->mc_reg_table_entry[k].mc_data[j] =
   4376 					(temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
   4377 			}
   4378 			j++;
   4379 			if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
   4380 				return -EINVAL;
   4381 			break;
   4382 		default:
   4383 			break;
   4384 		}
   4385 
   4386 	}
   4387 
   4388 	table->last = j;
   4389 
   4390 	return 0;
   4391 }
   4392 
   4393 static bool ci_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg)
   4394 {
   4395 	bool result = true;
   4396 
   4397 	switch(in_reg) {
   4398 	case MC_SEQ_RAS_TIMING >> 2:
   4399 		*out_reg = MC_SEQ_RAS_TIMING_LP >> 2;
   4400 		break;
   4401 	case MC_SEQ_DLL_STBY >> 2:
   4402 		*out_reg = MC_SEQ_DLL_STBY_LP >> 2;
   4403 		break;
   4404 	case MC_SEQ_G5PDX_CMD0 >> 2:
   4405 		*out_reg = MC_SEQ_G5PDX_CMD0_LP >> 2;
   4406 		break;
   4407 	case MC_SEQ_G5PDX_CMD1 >> 2:
   4408 		*out_reg = MC_SEQ_G5PDX_CMD1_LP >> 2;
   4409 		break;
   4410 	case MC_SEQ_G5PDX_CTRL >> 2:
   4411 		*out_reg = MC_SEQ_G5PDX_CTRL_LP >> 2;
   4412 		break;
   4413 	case MC_SEQ_CAS_TIMING >> 2:
   4414 		*out_reg = MC_SEQ_CAS_TIMING_LP >> 2;
   4415             break;
   4416 	case MC_SEQ_MISC_TIMING >> 2:
   4417 		*out_reg = MC_SEQ_MISC_TIMING_LP >> 2;
   4418 		break;
   4419 	case MC_SEQ_MISC_TIMING2 >> 2:
   4420 		*out_reg = MC_SEQ_MISC_TIMING2_LP >> 2;
   4421 		break;
   4422 	case MC_SEQ_PMG_DVS_CMD >> 2:
   4423 		*out_reg = MC_SEQ_PMG_DVS_CMD_LP >> 2;
   4424 		break;
   4425 	case MC_SEQ_PMG_DVS_CTL >> 2:
   4426 		*out_reg = MC_SEQ_PMG_DVS_CTL_LP >> 2;
   4427 		break;
   4428 	case MC_SEQ_RD_CTL_D0 >> 2:
   4429 		*out_reg = MC_SEQ_RD_CTL_D0_LP >> 2;
   4430 		break;
   4431 	case MC_SEQ_RD_CTL_D1 >> 2:
   4432 		*out_reg = MC_SEQ_RD_CTL_D1_LP >> 2;
   4433 		break;
   4434 	case MC_SEQ_WR_CTL_D0 >> 2:
   4435 		*out_reg = MC_SEQ_WR_CTL_D0_LP >> 2;
   4436 		break;
   4437 	case MC_SEQ_WR_CTL_D1 >> 2:
   4438 		*out_reg = MC_SEQ_WR_CTL_D1_LP >> 2;
   4439 		break;
   4440 	case MC_PMG_CMD_EMRS >> 2:
   4441 		*out_reg = MC_SEQ_PMG_CMD_EMRS_LP >> 2;
   4442 		break;
   4443 	case MC_PMG_CMD_MRS >> 2:
   4444 		*out_reg = MC_SEQ_PMG_CMD_MRS_LP >> 2;
   4445 		break;
   4446 	case MC_PMG_CMD_MRS1 >> 2:
   4447 		*out_reg = MC_SEQ_PMG_CMD_MRS1_LP >> 2;
   4448 		break;
   4449 	case MC_SEQ_PMG_TIMING >> 2:
   4450 		*out_reg = MC_SEQ_PMG_TIMING_LP >> 2;
   4451 		break;
   4452 	case MC_PMG_CMD_MRS2 >> 2:
   4453 		*out_reg = MC_SEQ_PMG_CMD_MRS2_LP >> 2;
   4454 		break;
   4455 	case MC_SEQ_WR_CTL_2 >> 2:
   4456 		*out_reg = MC_SEQ_WR_CTL_2_LP >> 2;
   4457 		break;
   4458 	default:
   4459 		result = false;
   4460 		break;
   4461 	}
   4462 
   4463 	return result;
   4464 }
   4465 
   4466 static void ci_set_valid_flag(struct ci_mc_reg_table *table)
   4467 {
   4468 	u8 i, j;
   4469 
   4470 	for (i = 0; i < table->last; i++) {
   4471 		for (j = 1; j < table->num_entries; j++) {
   4472 			if (table->mc_reg_table_entry[j-1].mc_data[i] !=
   4473 			    table->mc_reg_table_entry[j].mc_data[i]) {
   4474 				table->valid_flag |= 1 << i;
   4475 				break;
   4476 			}
   4477 		}
   4478 	}
   4479 }
   4480 
   4481 static void ci_set_s0_mc_reg_index(struct ci_mc_reg_table *table)
   4482 {
   4483 	u32 i;
   4484 	u16 address;
   4485 
   4486 	for (i = 0; i < table->last; i++) {
   4487 		table->mc_reg_address[i].s0 =
   4488 			ci_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) ?
   4489 			address : table->mc_reg_address[i].s1;
   4490 	}
   4491 }
   4492 
   4493 static int ci_copy_vbios_mc_reg_table(const struct atom_mc_reg_table *table,
   4494 				      struct ci_mc_reg_table *ci_table)
   4495 {
   4496 	u8 i, j;
   4497 
   4498 	if (table->last > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
   4499 		return -EINVAL;
   4500 	if (table->num_entries > MAX_AC_TIMING_ENTRIES)
   4501 		return -EINVAL;
   4502 
   4503 	for (i = 0; i < table->last; i++)
   4504 		ci_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
   4505 
   4506 	ci_table->last = table->last;
   4507 
   4508 	for (i = 0; i < table->num_entries; i++) {
   4509 		ci_table->mc_reg_table_entry[i].mclk_max =
   4510 			table->mc_reg_table_entry[i].mclk_max;
   4511 		for (j = 0; j < table->last; j++)
   4512 			ci_table->mc_reg_table_entry[i].mc_data[j] =
   4513 				table->mc_reg_table_entry[i].mc_data[j];
   4514 	}
   4515 	ci_table->num_entries = table->num_entries;
   4516 
   4517 	return 0;
   4518 }
   4519 
   4520 static int ci_register_patching_mc_seq(struct radeon_device *rdev,
   4521 				       struct ci_mc_reg_table *table)
   4522 {
   4523 	u8 i, k;
   4524 	u32 tmp;
   4525 	bool patch;
   4526 
   4527 	tmp = RREG32(MC_SEQ_MISC0);
   4528 	patch = ((tmp & 0x0000f00) == 0x300) ? true : false;
   4529 
   4530 	if (patch &&
   4531 	    ((rdev->pdev->device == 0x67B0) ||
   4532 	     (rdev->pdev->device == 0x67B1))) {
   4533 		for (i = 0; i < table->last; i++) {
   4534 			if (table->last >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
   4535 				return -EINVAL;
   4536 			switch(table->mc_reg_address[i].s1 >> 2) {
   4537 			case MC_SEQ_MISC1:
   4538 				for (k = 0; k < table->num_entries; k++) {
   4539 					if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
   4540 					    (table->mc_reg_table_entry[k].mclk_max == 137500))
   4541 						table->mc_reg_table_entry[k].mc_data[i] =
   4542 							(table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFF8) |
   4543 							0x00000007;
   4544 				}
   4545 				break;
   4546 			case MC_SEQ_WR_CTL_D0:
   4547 				for (k = 0; k < table->num_entries; k++) {
   4548 					if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
   4549 					    (table->mc_reg_table_entry[k].mclk_max == 137500))
   4550 						table->mc_reg_table_entry[k].mc_data[i] =
   4551 							(table->mc_reg_table_entry[k].mc_data[i] & 0xFFFF0F00) |
   4552 							0x0000D0DD;
   4553 				}
   4554 				break;
   4555 			case MC_SEQ_WR_CTL_D1:
   4556 				for (k = 0; k < table->num_entries; k++) {
   4557 					if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
   4558 					    (table->mc_reg_table_entry[k].mclk_max == 137500))
   4559 						table->mc_reg_table_entry[k].mc_data[i] =
   4560 							(table->mc_reg_table_entry[k].mc_data[i] & 0xFFFF0F00) |
   4561 							0x0000D0DD;
   4562 				}
   4563 				break;
   4564 			case MC_SEQ_WR_CTL_2:
   4565 				for (k = 0; k < table->num_entries; k++) {
   4566 					if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
   4567 					    (table->mc_reg_table_entry[k].mclk_max == 137500))
   4568 						table->mc_reg_table_entry[k].mc_data[i] = 0;
   4569 				}
   4570 				break;
   4571 			case MC_SEQ_CAS_TIMING:
   4572 				for (k = 0; k < table->num_entries; k++) {
   4573 					if (table->mc_reg_table_entry[k].mclk_max == 125000)
   4574 						table->mc_reg_table_entry[k].mc_data[i] =
   4575 							(table->mc_reg_table_entry[k].mc_data[i] & 0xFFE0FE0F) |
   4576 							0x000C0140;
   4577 					else if (table->mc_reg_table_entry[k].mclk_max == 137500)
   4578 						table->mc_reg_table_entry[k].mc_data[i] =
   4579 							(table->mc_reg_table_entry[k].mc_data[i] & 0xFFE0FE0F) |
   4580 							0x000C0150;
   4581 				}
   4582 				break;
   4583 			case MC_SEQ_MISC_TIMING:
   4584 				for (k = 0; k < table->num_entries; k++) {
   4585 					if (table->mc_reg_table_entry[k].mclk_max == 125000)
   4586 						table->mc_reg_table_entry[k].mc_data[i] =
   4587 							(table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFE0) |
   4588 							0x00000030;
   4589 					else if (table->mc_reg_table_entry[k].mclk_max == 137500)
   4590 						table->mc_reg_table_entry[k].mc_data[i] =
   4591 							(table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFE0) |
   4592 							0x00000035;
   4593 				}
   4594 				break;
   4595 			default:
   4596 				break;
   4597 			}
   4598 		}
   4599 
   4600 		WREG32(MC_SEQ_IO_DEBUG_INDEX, 3);
   4601 		tmp = RREG32(MC_SEQ_IO_DEBUG_DATA);
   4602 		tmp = (tmp & 0xFFF8FFFF) | (1 << 16);
   4603 		WREG32(MC_SEQ_IO_DEBUG_INDEX, 3);
   4604 		WREG32(MC_SEQ_IO_DEBUG_DATA, tmp);
   4605 	}
   4606 
   4607 	return 0;
   4608 }
   4609 
   4610 static int ci_initialize_mc_reg_table(struct radeon_device *rdev)
   4611 {
   4612 	struct ci_power_info *pi = ci_get_pi(rdev);
   4613 	struct atom_mc_reg_table *table;
   4614 	struct ci_mc_reg_table *ci_table = &pi->mc_reg_table;
   4615 	u8 module_index = rv770_get_memory_module_index(rdev);
   4616 	int ret;
   4617 
   4618 	table = kzalloc(sizeof(struct atom_mc_reg_table), GFP_KERNEL);
   4619 	if (!table)
   4620 		return -ENOMEM;
   4621 
   4622 	WREG32(MC_SEQ_RAS_TIMING_LP, RREG32(MC_SEQ_RAS_TIMING));
   4623 	WREG32(MC_SEQ_CAS_TIMING_LP, RREG32(MC_SEQ_CAS_TIMING));
   4624 	WREG32(MC_SEQ_DLL_STBY_LP, RREG32(MC_SEQ_DLL_STBY));
   4625 	WREG32(MC_SEQ_G5PDX_CMD0_LP, RREG32(MC_SEQ_G5PDX_CMD0));
   4626 	WREG32(MC_SEQ_G5PDX_CMD1_LP, RREG32(MC_SEQ_G5PDX_CMD1));
   4627 	WREG32(MC_SEQ_G5PDX_CTRL_LP, RREG32(MC_SEQ_G5PDX_CTRL));
   4628 	WREG32(MC_SEQ_PMG_DVS_CMD_LP, RREG32(MC_SEQ_PMG_DVS_CMD));
   4629 	WREG32(MC_SEQ_PMG_DVS_CTL_LP, RREG32(MC_SEQ_PMG_DVS_CTL));
   4630 	WREG32(MC_SEQ_MISC_TIMING_LP, RREG32(MC_SEQ_MISC_TIMING));
   4631 	WREG32(MC_SEQ_MISC_TIMING2_LP, RREG32(MC_SEQ_MISC_TIMING2));
   4632 	WREG32(MC_SEQ_PMG_CMD_EMRS_LP, RREG32(MC_PMG_CMD_EMRS));
   4633 	WREG32(MC_SEQ_PMG_CMD_MRS_LP, RREG32(MC_PMG_CMD_MRS));
   4634 	WREG32(MC_SEQ_PMG_CMD_MRS1_LP, RREG32(MC_PMG_CMD_MRS1));
   4635 	WREG32(MC_SEQ_WR_CTL_D0_LP, RREG32(MC_SEQ_WR_CTL_D0));
   4636 	WREG32(MC_SEQ_WR_CTL_D1_LP, RREG32(MC_SEQ_WR_CTL_D1));
   4637 	WREG32(MC_SEQ_RD_CTL_D0_LP, RREG32(MC_SEQ_RD_CTL_D0));
   4638 	WREG32(MC_SEQ_RD_CTL_D1_LP, RREG32(MC_SEQ_RD_CTL_D1));
   4639 	WREG32(MC_SEQ_PMG_TIMING_LP, RREG32(MC_SEQ_PMG_TIMING));
   4640 	WREG32(MC_SEQ_PMG_CMD_MRS2_LP, RREG32(MC_PMG_CMD_MRS2));
   4641 	WREG32(MC_SEQ_WR_CTL_2_LP, RREG32(MC_SEQ_WR_CTL_2));
   4642 
   4643 	ret = radeon_atom_init_mc_reg_table(rdev, module_index, table);
   4644 	if (ret)
   4645 		goto init_mc_done;
   4646 
   4647         ret = ci_copy_vbios_mc_reg_table(table, ci_table);
   4648 	if (ret)
   4649 		goto init_mc_done;
   4650 
   4651 	ci_set_s0_mc_reg_index(ci_table);
   4652 
   4653 	ret = ci_register_patching_mc_seq(rdev, ci_table);
   4654 	if (ret)
   4655 		goto init_mc_done;
   4656 
   4657 	ret = ci_set_mc_special_registers(rdev, ci_table);
   4658 	if (ret)
   4659 		goto init_mc_done;
   4660 
   4661 	ci_set_valid_flag(ci_table);
   4662 
   4663 init_mc_done:
   4664 	kfree(table);
   4665 
   4666 	return ret;
   4667 }
   4668 
   4669 static int ci_populate_mc_reg_addresses(struct radeon_device *rdev,
   4670 					SMU7_Discrete_MCRegisters *mc_reg_table)
   4671 {
   4672 	struct ci_power_info *pi = ci_get_pi(rdev);
   4673 	u32 i, j;
   4674 
   4675 	for (i = 0, j = 0; j < pi->mc_reg_table.last; j++) {
   4676 		if (pi->mc_reg_table.valid_flag & (1 << j)) {
   4677 			if (i >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
   4678 				return -EINVAL;
   4679 			mc_reg_table->address[i].s0 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s0);
   4680 			mc_reg_table->address[i].s1 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s1);
   4681 			i++;
   4682 		}
   4683 	}
   4684 
   4685 	mc_reg_table->last = (u8)i;
   4686 
   4687 	return 0;
   4688 }
   4689 
   4690 static void ci_convert_mc_registers(const struct ci_mc_reg_entry *entry,
   4691 				    SMU7_Discrete_MCRegisterSet *data,
   4692 				    u32 num_entries, u32 valid_flag)
   4693 {
   4694 	u32 i, j;
   4695 
   4696 	for (i = 0, j = 0; j < num_entries; j++) {
   4697 		if (valid_flag & (1 << j)) {
   4698 			data->value[i] = cpu_to_be32(entry->mc_data[j]);
   4699 			i++;
   4700 		}
   4701 	}
   4702 }
   4703 
   4704 static void ci_convert_mc_reg_table_entry_to_smc(struct radeon_device *rdev,
   4705 						 const u32 memory_clock,
   4706 						 SMU7_Discrete_MCRegisterSet *mc_reg_table_data)
   4707 {
   4708 	struct ci_power_info *pi = ci_get_pi(rdev);
   4709 	u32 i = 0;
   4710 
   4711 	for(i = 0; i < pi->mc_reg_table.num_entries; i++) {
   4712 		if (memory_clock <= pi->mc_reg_table.mc_reg_table_entry[i].mclk_max)
   4713 			break;
   4714 	}
   4715 
   4716 	if ((i == pi->mc_reg_table.num_entries) && (i > 0))
   4717 		--i;
   4718 
   4719 	ci_convert_mc_registers(&pi->mc_reg_table.mc_reg_table_entry[i],
   4720 				mc_reg_table_data, pi->mc_reg_table.last,
   4721 				pi->mc_reg_table.valid_flag);
   4722 }
   4723 
   4724 static void ci_convert_mc_reg_table_to_smc(struct radeon_device *rdev,
   4725 					   SMU7_Discrete_MCRegisters *mc_reg_table)
   4726 {
   4727 	struct ci_power_info *pi = ci_get_pi(rdev);
   4728 	u32 i;
   4729 
   4730 	for (i = 0; i < pi->dpm_table.mclk_table.count; i++)
   4731 		ci_convert_mc_reg_table_entry_to_smc(rdev,
   4732 						     pi->dpm_table.mclk_table.dpm_levels[i].value,
   4733 						     &mc_reg_table->data[i]);
   4734 }
   4735 
   4736 static int ci_populate_initial_mc_reg_table(struct radeon_device *rdev)
   4737 {
   4738 	struct ci_power_info *pi = ci_get_pi(rdev);
   4739 	int ret;
   4740 
   4741 	memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters));
   4742 
   4743 	ret = ci_populate_mc_reg_addresses(rdev, &pi->smc_mc_reg_table);
   4744 	if (ret)
   4745 		return ret;
   4746 	ci_convert_mc_reg_table_to_smc(rdev, &pi->smc_mc_reg_table);
   4747 
   4748 	return ci_copy_bytes_to_smc(rdev,
   4749 				    pi->mc_reg_table_start,
   4750 				    (u8 *)&pi->smc_mc_reg_table,
   4751 				    sizeof(SMU7_Discrete_MCRegisters),
   4752 				    pi->sram_end);
   4753 }
   4754 
   4755 static int ci_update_and_upload_mc_reg_table(struct radeon_device *rdev)
   4756 {
   4757 	struct ci_power_info *pi = ci_get_pi(rdev);
   4758 
   4759 	if (!(pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK))
   4760 		return 0;
   4761 
   4762 	memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters));
   4763 
   4764 	ci_convert_mc_reg_table_to_smc(rdev, &pi->smc_mc_reg_table);
   4765 
   4766 	return ci_copy_bytes_to_smc(rdev,
   4767 				    pi->mc_reg_table_start +
   4768 				    offsetof(SMU7_Discrete_MCRegisters, data[0]),
   4769 				    (u8 *)&pi->smc_mc_reg_table.data[0],
   4770 				    sizeof(SMU7_Discrete_MCRegisterSet) *
   4771 				    pi->dpm_table.mclk_table.count,
   4772 				    pi->sram_end);
   4773 }
   4774 
   4775 static void ci_enable_voltage_control(struct radeon_device *rdev)
   4776 {
   4777 	u32 tmp = RREG32_SMC(GENERAL_PWRMGT);
   4778 
   4779 	tmp |= VOLT_PWRMGT_EN;
   4780 	WREG32_SMC(GENERAL_PWRMGT, tmp);
   4781 }
   4782 
   4783 static enum radeon_pcie_gen ci_get_maximum_link_speed(struct radeon_device *rdev,
   4784 						      struct radeon_ps *radeon_state)
   4785 {
   4786 	struct ci_ps *state = ci_get_ps(radeon_state);
   4787 	int i;
   4788 	u16 pcie_speed, max_speed = 0;
   4789 
   4790 	for (i = 0; i < state->performance_level_count; i++) {
   4791 		pcie_speed = state->performance_levels[i].pcie_gen;
   4792 		if (max_speed < pcie_speed)
   4793 			max_speed = pcie_speed;
   4794 	}
   4795 
   4796 	return max_speed;
   4797 }
   4798 
   4799 static u16 ci_get_current_pcie_speed(struct radeon_device *rdev)
   4800 {
   4801 	u32 speed_cntl = 0;
   4802 
   4803 	speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL) & LC_CURRENT_DATA_RATE_MASK;
   4804 	speed_cntl >>= LC_CURRENT_DATA_RATE_SHIFT;
   4805 
   4806 	return (u16)speed_cntl;
   4807 }
   4808 
   4809 static int ci_get_current_pcie_lane_number(struct radeon_device *rdev)
   4810 {
   4811 	u32 link_width = 0;
   4812 
   4813 	link_width = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL) & LC_LINK_WIDTH_RD_MASK;
   4814 	link_width >>= LC_LINK_WIDTH_RD_SHIFT;
   4815 
   4816 	switch (link_width) {
   4817 	case RADEON_PCIE_LC_LINK_WIDTH_X1:
   4818 		return 1;
   4819 	case RADEON_PCIE_LC_LINK_WIDTH_X2:
   4820 		return 2;
   4821 	case RADEON_PCIE_LC_LINK_WIDTH_X4:
   4822 		return 4;
   4823 	case RADEON_PCIE_LC_LINK_WIDTH_X8:
   4824 		return 8;
   4825 	case RADEON_PCIE_LC_LINK_WIDTH_X12:
   4826 		/* not actually supported */
   4827 		return 12;
   4828 	case RADEON_PCIE_LC_LINK_WIDTH_X0:
   4829 	case RADEON_PCIE_LC_LINK_WIDTH_X16:
   4830 	default:
   4831 		return 16;
   4832 	}
   4833 }
   4834 
   4835 static void ci_request_link_speed_change_before_state_change(struct radeon_device *rdev,
   4836 							     struct radeon_ps *radeon_new_state,
   4837 							     struct radeon_ps *radeon_current_state)
   4838 {
   4839 	struct ci_power_info *pi = ci_get_pi(rdev);
   4840 	enum radeon_pcie_gen target_link_speed =
   4841 		ci_get_maximum_link_speed(rdev, radeon_new_state);
   4842 	enum radeon_pcie_gen current_link_speed;
   4843 
   4844 	if (pi->force_pcie_gen == RADEON_PCIE_GEN_INVALID)
   4845 		current_link_speed = ci_get_maximum_link_speed(rdev, radeon_current_state);
   4846 	else
   4847 		current_link_speed = pi->force_pcie_gen;
   4848 
   4849 	pi->force_pcie_gen = RADEON_PCIE_GEN_INVALID;
   4850 	pi->pspp_notify_required = false;
   4851 	if (target_link_speed > current_link_speed) {
   4852 		switch (target_link_speed) {
   4853 #ifdef CONFIG_ACPI
   4854 		case RADEON_PCIE_GEN3:
   4855 			if (radeon_acpi_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN3, false) == 0)
   4856 				break;
   4857 			pi->force_pcie_gen = RADEON_PCIE_GEN2;
   4858 			if (current_link_speed == RADEON_PCIE_GEN2)
   4859 				break;
   4860 		case RADEON_PCIE_GEN2:
   4861 			if (radeon_acpi_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN2, false) == 0)
   4862 				break;
   4863 #endif
   4864 		default:
   4865 			pi->force_pcie_gen = ci_get_current_pcie_speed(rdev);
   4866 			break;
   4867 		}
   4868 	} else {
   4869 		if (target_link_speed < current_link_speed)
   4870 			pi->pspp_notify_required = true;
   4871 	}
   4872 }
   4873 
   4874 static void ci_notify_link_speed_change_after_state_change(struct radeon_device *rdev,
   4875 							   struct radeon_ps *radeon_new_state,
   4876 							   struct radeon_ps *radeon_current_state)
   4877 {
   4878 	struct ci_power_info *pi = ci_get_pi(rdev);
   4879 	enum radeon_pcie_gen target_link_speed =
   4880 		ci_get_maximum_link_speed(rdev, radeon_new_state);
   4881 	u8 request;
   4882 
   4883 	if (pi->pspp_notify_required) {
   4884 		if (target_link_speed == RADEON_PCIE_GEN3)
   4885 			request = PCIE_PERF_REQ_PECI_GEN3;
   4886 		else if (target_link_speed == RADEON_PCIE_GEN2)
   4887 			request = PCIE_PERF_REQ_PECI_GEN2;
   4888 		else
   4889 			request = PCIE_PERF_REQ_PECI_GEN1;
   4890 
   4891 		if ((request == PCIE_PERF_REQ_PECI_GEN1) &&
   4892 		    (ci_get_current_pcie_speed(rdev) > 0))
   4893 			return;
   4894 
   4895 #ifdef CONFIG_ACPI
   4896 		radeon_acpi_pcie_performance_request(rdev, request, false);
   4897 #endif
   4898 	}
   4899 }
   4900 
   4901 static int ci_set_private_data_variables_based_on_pptable(struct radeon_device *rdev)
   4902 {
   4903 	struct ci_power_info *pi = ci_get_pi(rdev);
   4904 	struct radeon_clock_voltage_dependency_table *allowed_sclk_vddc_table =
   4905 		&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
   4906 	struct radeon_clock_voltage_dependency_table *allowed_mclk_vddc_table =
   4907 		&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk;
   4908 	struct radeon_clock_voltage_dependency_table *allowed_mclk_vddci_table =
   4909 		&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk;
   4910 
   4911 	if (allowed_sclk_vddc_table == NULL)
   4912 		return -EINVAL;
   4913 	if (allowed_sclk_vddc_table->count < 1)
   4914 		return -EINVAL;
   4915 	if (allowed_mclk_vddc_table == NULL)
   4916 		return -EINVAL;
   4917 	if (allowed_mclk_vddc_table->count < 1)
   4918 		return -EINVAL;
   4919 	if (allowed_mclk_vddci_table == NULL)
   4920 		return -EINVAL;
   4921 	if (allowed_mclk_vddci_table->count < 1)
   4922 		return -EINVAL;
   4923 
   4924 	pi->min_vddc_in_pp_table = allowed_sclk_vddc_table->entries[0].v;
   4925 	pi->max_vddc_in_pp_table =
   4926 		allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
   4927 
   4928 	pi->min_vddci_in_pp_table = allowed_mclk_vddci_table->entries[0].v;
   4929 	pi->max_vddci_in_pp_table =
   4930 		allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
   4931 
   4932 	rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk =
   4933 		allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
   4934 	rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk =
   4935 		allowed_mclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
   4936 	rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc =
   4937 		allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
   4938         rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci =
   4939 		allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
   4940 
   4941 	return 0;
   4942 }
   4943 
   4944 static void ci_patch_with_vddc_leakage(struct radeon_device *rdev, u16 *vddc)
   4945 {
   4946 	struct ci_power_info *pi = ci_get_pi(rdev);
   4947 	struct ci_leakage_voltage *leakage_table = &pi->vddc_leakage;
   4948 	u32 leakage_index;
   4949 
   4950 	for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) {
   4951 		if (leakage_table->leakage_id[leakage_index] == *vddc) {
   4952 			*vddc = leakage_table->actual_voltage[leakage_index];
   4953 			break;
   4954 		}
   4955 	}
   4956 }
   4957 
   4958 static void ci_patch_with_vddci_leakage(struct radeon_device *rdev, u16 *vddci)
   4959 {
   4960 	struct ci_power_info *pi = ci_get_pi(rdev);
   4961 	struct ci_leakage_voltage *leakage_table = &pi->vddci_leakage;
   4962 	u32 leakage_index;
   4963 
   4964 	for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) {
   4965 		if (leakage_table->leakage_id[leakage_index] == *vddci) {
   4966 			*vddci = leakage_table->actual_voltage[leakage_index];
   4967 			break;
   4968 		}
   4969 	}
   4970 }
   4971 
   4972 static void ci_patch_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device *rdev,
   4973 								      struct radeon_clock_voltage_dependency_table *table)
   4974 {
   4975 	u32 i;
   4976 
   4977 	if (table) {
   4978 		for (i = 0; i < table->count; i++)
   4979 			ci_patch_with_vddc_leakage(rdev, &table->entries[i].v);
   4980 	}
   4981 }
   4982 
   4983 static void ci_patch_clock_voltage_dependency_table_with_vddci_leakage(struct radeon_device *rdev,
   4984 								       struct radeon_clock_voltage_dependency_table *table)
   4985 {
   4986 	u32 i;
   4987 
   4988 	if (table) {
   4989 		for (i = 0; i < table->count; i++)
   4990 			ci_patch_with_vddci_leakage(rdev, &table->entries[i].v);
   4991 	}
   4992 }
   4993 
   4994 static void ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device *rdev,
   4995 									  struct radeon_vce_clock_voltage_dependency_table *table)
   4996 {
   4997 	u32 i;
   4998 
   4999 	if (table) {
   5000 		for (i = 0; i < table->count; i++)
   5001 			ci_patch_with_vddc_leakage(rdev, &table->entries[i].v);
   5002 	}
   5003 }
   5004 
   5005 static void ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device *rdev,
   5006 									  struct radeon_uvd_clock_voltage_dependency_table *table)
   5007 {
   5008 	u32 i;
   5009 
   5010 	if (table) {
   5011 		for (i = 0; i < table->count; i++)
   5012 			ci_patch_with_vddc_leakage(rdev, &table->entries[i].v);
   5013 	}
   5014 }
   5015 
   5016 static void ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(struct radeon_device *rdev,
   5017 								   struct radeon_phase_shedding_limits_table *table)
   5018 {
   5019 	u32 i;
   5020 
   5021 	if (table) {
   5022 		for (i = 0; i < table->count; i++)
   5023 			ci_patch_with_vddc_leakage(rdev, &table->entries[i].voltage);
   5024 	}
   5025 }
   5026 
   5027 static void ci_patch_clock_voltage_limits_with_vddc_leakage(struct radeon_device *rdev,
   5028 							    struct radeon_clock_and_voltage_limits *table)
   5029 {
   5030 	if (table) {
   5031 		ci_patch_with_vddc_leakage(rdev, (u16 *)&table->vddc);
   5032 		ci_patch_with_vddci_leakage(rdev, (u16 *)&table->vddci);
   5033 	}
   5034 }
   5035 
   5036 static void ci_patch_cac_leakage_table_with_vddc_leakage(struct radeon_device *rdev,
   5037 							 struct radeon_cac_leakage_table *table)
   5038 {
   5039 	u32 i;
   5040 
   5041 	if (table) {
   5042 		for (i = 0; i < table->count; i++)
   5043 			ci_patch_with_vddc_leakage(rdev, &table->entries[i].vddc);
   5044 	}
   5045 }
   5046 
   5047 static void ci_patch_dependency_tables_with_leakage(struct radeon_device *rdev)
   5048 {
   5049 
   5050 	ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
   5051 								  &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk);
   5052 	ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
   5053 								  &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk);
   5054 	ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
   5055 								  &rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk);
   5056 	ci_patch_clock_voltage_dependency_table_with_vddci_leakage(rdev,
   5057 								   &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk);
   5058 	ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(rdev,
   5059 								      &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table);
   5060 	ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(rdev,
   5061 								      &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table);
   5062 	ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
   5063 								  &rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table);
   5064 	ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
   5065 								  &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table);
   5066 	ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(rdev,
   5067 							       &rdev->pm.dpm.dyn_state.phase_shedding_limits_table);
   5068 	ci_patch_clock_voltage_limits_with_vddc_leakage(rdev,
   5069 							&rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac);
   5070 	ci_patch_clock_voltage_limits_with_vddc_leakage(rdev,
   5071 							&rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc);
   5072 	ci_patch_cac_leakage_table_with_vddc_leakage(rdev,
   5073 						     &rdev->pm.dpm.dyn_state.cac_leakage_table);
   5074 
   5075 }
   5076 
   5077 static void ci_get_memory_type(struct radeon_device *rdev)
   5078 {
   5079 	struct ci_power_info *pi = ci_get_pi(rdev);
   5080 	u32 tmp;
   5081 
   5082 	tmp = RREG32(MC_SEQ_MISC0);
   5083 
   5084 	if (((tmp & MC_SEQ_MISC0_GDDR5_MASK) >> MC_SEQ_MISC0_GDDR5_SHIFT) ==
   5085 	    MC_SEQ_MISC0_GDDR5_VALUE)
   5086 		pi->mem_gddr5 = true;
   5087 	else
   5088 		pi->mem_gddr5 = false;
   5089 
   5090 }
   5091 
   5092 static void ci_update_current_ps(struct radeon_device *rdev,
   5093 				 struct radeon_ps *rps)
   5094 {
   5095 	struct ci_ps *new_ps = ci_get_ps(rps);
   5096 	struct ci_power_info *pi = ci_get_pi(rdev);
   5097 
   5098 	pi->current_rps = *rps;
   5099 	pi->current_ps = *new_ps;
   5100 	pi->current_rps.ps_priv = &pi->current_ps;
   5101 }
   5102 
   5103 static void ci_update_requested_ps(struct radeon_device *rdev,
   5104 				   struct radeon_ps *rps)
   5105 {
   5106 	struct ci_ps *new_ps = ci_get_ps(rps);
   5107 	struct ci_power_info *pi = ci_get_pi(rdev);
   5108 
   5109 	pi->requested_rps = *rps;
   5110 	pi->requested_ps = *new_ps;
   5111 	pi->requested_rps.ps_priv = &pi->requested_ps;
   5112 }
   5113 
   5114 int ci_dpm_pre_set_power_state(struct radeon_device *rdev)
   5115 {
   5116 	struct ci_power_info *pi = ci_get_pi(rdev);
   5117 	struct radeon_ps requested_ps = *rdev->pm.dpm.requested_ps;
   5118 	struct radeon_ps *new_ps = &requested_ps;
   5119 
   5120 	ci_update_requested_ps(rdev, new_ps);
   5121 
   5122 	ci_apply_state_adjust_rules(rdev, &pi->requested_rps);
   5123 
   5124 	return 0;
   5125 }
   5126 
   5127 void ci_dpm_post_set_power_state(struct radeon_device *rdev)
   5128 {
   5129 	struct ci_power_info *pi = ci_get_pi(rdev);
   5130 	struct radeon_ps *new_ps = &pi->requested_rps;
   5131 
   5132 	ci_update_current_ps(rdev, new_ps);
   5133 }
   5134 
   5135 
   5136 void ci_dpm_setup_asic(struct radeon_device *rdev)
   5137 {
   5138 	int r;
   5139 
   5140 	r = ci_mc_load_microcode(rdev);
   5141 	if (r)
   5142 		DRM_ERROR("Failed to load MC firmware!\n");
   5143 	ci_read_clock_registers(rdev);
   5144 	ci_get_memory_type(rdev);
   5145 	ci_enable_acpi_power_management(rdev);
   5146 	ci_init_sclk_t(rdev);
   5147 }
   5148 
   5149 int ci_dpm_enable(struct radeon_device *rdev)
   5150 {
   5151 	struct ci_power_info *pi = ci_get_pi(rdev);
   5152 	struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
   5153 	int ret;
   5154 
   5155 	if (ci_is_smc_running(rdev))
   5156 		return -EINVAL;
   5157 	if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
   5158 		ci_enable_voltage_control(rdev);
   5159 		ret = ci_construct_voltage_tables(rdev);
   5160 		if (ret) {
   5161 			DRM_ERROR("ci_construct_voltage_tables failed\n");
   5162 			return ret;
   5163 		}
   5164 	}
   5165 	if (pi->caps_dynamic_ac_timing) {
   5166 		ret = ci_initialize_mc_reg_table(rdev);
   5167 		if (ret)
   5168 			pi->caps_dynamic_ac_timing = false;
   5169 	}
   5170 	if (pi->dynamic_ss)
   5171 		ci_enable_spread_spectrum(rdev, true);
   5172 	if (pi->thermal_protection)
   5173 		ci_enable_thermal_protection(rdev, true);
   5174 	ci_program_sstp(rdev);
   5175 	ci_enable_display_gap(rdev);
   5176 	ci_program_vc(rdev);
   5177 	ret = ci_upload_firmware(rdev);
   5178 	if (ret) {
   5179 		DRM_ERROR("ci_upload_firmware failed\n");
   5180 		return ret;
   5181 	}
   5182 	ret = ci_process_firmware_header(rdev);
   5183 	if (ret) {
   5184 		DRM_ERROR("ci_process_firmware_header failed\n");
   5185 		return ret;
   5186 	}
   5187 	ret = ci_initial_switch_from_arb_f0_to_f1(rdev);
   5188 	if (ret) {
   5189 		DRM_ERROR("ci_initial_switch_from_arb_f0_to_f1 failed\n");
   5190 		return ret;
   5191 	}
   5192 	ret = ci_init_smc_table(rdev);
   5193 	if (ret) {
   5194 		DRM_ERROR("ci_init_smc_table failed\n");
   5195 		return ret;
   5196 	}
   5197 	ret = ci_init_arb_table_index(rdev);
   5198 	if (ret) {
   5199 		DRM_ERROR("ci_init_arb_table_index failed\n");
   5200 		return ret;
   5201 	}
   5202 	if (pi->caps_dynamic_ac_timing) {
   5203 		ret = ci_populate_initial_mc_reg_table(rdev);
   5204 		if (ret) {
   5205 			DRM_ERROR("ci_populate_initial_mc_reg_table failed\n");
   5206 			return ret;
   5207 		}
   5208 	}
   5209 	ret = ci_populate_pm_base(rdev);
   5210 	if (ret) {
   5211 		DRM_ERROR("ci_populate_pm_base failed\n");
   5212 		return ret;
   5213 	}
   5214 	ci_dpm_start_smc(rdev);
   5215 	ci_enable_vr_hot_gpio_interrupt(rdev);
   5216 	ret = ci_notify_smc_display_change(rdev, false);
   5217 	if (ret) {
   5218 		DRM_ERROR("ci_notify_smc_display_change failed\n");
   5219 		return ret;
   5220 	}
   5221 	ci_enable_sclk_control(rdev, true);
   5222 	ret = ci_enable_ulv(rdev, true);
   5223 	if (ret) {
   5224 		DRM_ERROR("ci_enable_ulv failed\n");
   5225 		return ret;
   5226 	}
   5227 	ret = ci_enable_ds_master_switch(rdev, true);
   5228 	if (ret) {
   5229 		DRM_ERROR("ci_enable_ds_master_switch failed\n");
   5230 		return ret;
   5231 	}
   5232 	ret = ci_start_dpm(rdev);
   5233 	if (ret) {
   5234 		DRM_ERROR("ci_start_dpm failed\n");
   5235 		return ret;
   5236 	}
   5237 	ret = ci_enable_didt(rdev, true);
   5238 	if (ret) {
   5239 		DRM_ERROR("ci_enable_didt failed\n");
   5240 		return ret;
   5241 	}
   5242 	ret = ci_enable_smc_cac(rdev, true);
   5243 	if (ret) {
   5244 		DRM_ERROR("ci_enable_smc_cac failed\n");
   5245 		return ret;
   5246 	}
   5247 	ret = ci_enable_power_containment(rdev, true);
   5248 	if (ret) {
   5249 		DRM_ERROR("ci_enable_power_containment failed\n");
   5250 		return ret;
   5251 	}
   5252 
   5253 	ret = ci_power_control_set_level(rdev);
   5254 	if (ret) {
   5255 		DRM_ERROR("ci_power_control_set_level failed\n");
   5256 		return ret;
   5257 	}
   5258 
   5259 	ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
   5260 
   5261 	ret = ci_enable_thermal_based_sclk_dpm(rdev, true);
   5262 	if (ret) {
   5263 		DRM_ERROR("ci_enable_thermal_based_sclk_dpm failed\n");
   5264 		return ret;
   5265 	}
   5266 
   5267 	ci_thermal_start_thermal_controller(rdev);
   5268 
   5269 	ci_update_current_ps(rdev, boot_ps);
   5270 
   5271 	return 0;
   5272 }
   5273 
   5274 static int ci_set_temperature_range(struct radeon_device *rdev)
   5275 {
   5276 	int ret;
   5277 
   5278 	ret = ci_thermal_enable_alert(rdev, false);
   5279 	if (ret)
   5280 		return ret;
   5281 	ret = ci_thermal_set_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
   5282 	if (ret)
   5283 		return ret;
   5284 	ret = ci_thermal_enable_alert(rdev, true);
   5285 	if (ret)
   5286 		return ret;
   5287 
   5288 	return ret;
   5289 }
   5290 
   5291 int ci_dpm_late_enable(struct radeon_device *rdev)
   5292 {
   5293 	int ret;
   5294 
   5295 	ret = ci_set_temperature_range(rdev);
   5296 	if (ret)
   5297 		return ret;
   5298 
   5299 	ci_dpm_powergate_uvd(rdev, true);
   5300 
   5301 	return 0;
   5302 }
   5303 
   5304 void ci_dpm_disable(struct radeon_device *rdev)
   5305 {
   5306 	struct ci_power_info *pi = ci_get_pi(rdev);
   5307 	struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
   5308 
   5309 	ci_dpm_powergate_uvd(rdev, false);
   5310 
   5311 	if (!ci_is_smc_running(rdev))
   5312 		return;
   5313 
   5314 	ci_thermal_stop_thermal_controller(rdev);
   5315 
   5316 	if (pi->thermal_protection)
   5317 		ci_enable_thermal_protection(rdev, false);
   5318 	ci_enable_power_containment(rdev, false);
   5319 	ci_enable_smc_cac(rdev, false);
   5320 	ci_enable_didt(rdev, false);
   5321 	ci_enable_spread_spectrum(rdev, false);
   5322 	ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, false);
   5323 	ci_stop_dpm(rdev);
   5324 	ci_enable_ds_master_switch(rdev, false);
   5325 	ci_enable_ulv(rdev, false);
   5326 	ci_clear_vc(rdev);
   5327 	ci_reset_to_default(rdev);
   5328 	ci_dpm_stop_smc(rdev);
   5329 	ci_force_switch_to_arb_f0(rdev);
   5330 	ci_enable_thermal_based_sclk_dpm(rdev, false);
   5331 
   5332 	ci_update_current_ps(rdev, boot_ps);
   5333 }
   5334 
   5335 int ci_dpm_set_power_state(struct radeon_device *rdev)
   5336 {
   5337 	struct ci_power_info *pi = ci_get_pi(rdev);
   5338 	struct radeon_ps *new_ps = &pi->requested_rps;
   5339 	struct radeon_ps *old_ps = &pi->current_rps;
   5340 	int ret;
   5341 
   5342 	ci_find_dpm_states_clocks_in_dpm_table(rdev, new_ps);
   5343 	if (pi->pcie_performance_request)
   5344 		ci_request_link_speed_change_before_state_change(rdev, new_ps, old_ps);
   5345 	ret = ci_freeze_sclk_mclk_dpm(rdev);
   5346 	if (ret) {
   5347 		DRM_ERROR("ci_freeze_sclk_mclk_dpm failed\n");
   5348 		return ret;
   5349 	}
   5350 	ret = ci_populate_and_upload_sclk_mclk_dpm_levels(rdev, new_ps);
   5351 	if (ret) {
   5352 		DRM_ERROR("ci_populate_and_upload_sclk_mclk_dpm_levels failed\n");
   5353 		return ret;
   5354 	}
   5355 	ret = ci_generate_dpm_level_enable_mask(rdev, new_ps);
   5356 	if (ret) {
   5357 		DRM_ERROR("ci_generate_dpm_level_enable_mask failed\n");
   5358 		return ret;
   5359 	}
   5360 
   5361 	ret = ci_update_vce_dpm(rdev, new_ps, old_ps);
   5362 	if (ret) {
   5363 		DRM_ERROR("ci_update_vce_dpm failed\n");
   5364 		return ret;
   5365 	}
   5366 
   5367 	ret = ci_update_sclk_t(rdev);
   5368 	if (ret) {
   5369 		DRM_ERROR("ci_update_sclk_t failed\n");
   5370 		return ret;
   5371 	}
   5372 	if (pi->caps_dynamic_ac_timing) {
   5373 		ret = ci_update_and_upload_mc_reg_table(rdev);
   5374 		if (ret) {
   5375 			DRM_ERROR("ci_update_and_upload_mc_reg_table failed\n");
   5376 			return ret;
   5377 		}
   5378 	}
   5379 	ret = ci_program_memory_timing_parameters(rdev);
   5380 	if (ret) {
   5381 		DRM_ERROR("ci_program_memory_timing_parameters failed\n");
   5382 		return ret;
   5383 	}
   5384 	ret = ci_unfreeze_sclk_mclk_dpm(rdev);
   5385 	if (ret) {
   5386 		DRM_ERROR("ci_unfreeze_sclk_mclk_dpm failed\n");
   5387 		return ret;
   5388 	}
   5389 	ret = ci_upload_dpm_level_enable_mask(rdev);
   5390 	if (ret) {
   5391 		DRM_ERROR("ci_upload_dpm_level_enable_mask failed\n");
   5392 		return ret;
   5393 	}
   5394 	if (pi->pcie_performance_request)
   5395 		ci_notify_link_speed_change_after_state_change(rdev, new_ps, old_ps);
   5396 
   5397 	return 0;
   5398 }
   5399 
   5400 #if 0
   5401 void ci_dpm_reset_asic(struct radeon_device *rdev)
   5402 {
   5403 	ci_set_boot_state(rdev);
   5404 }
   5405 #endif
   5406 
   5407 void ci_dpm_display_configuration_changed(struct radeon_device *rdev)
   5408 {
   5409 	ci_program_display_gap(rdev);
   5410 }
   5411 
   5412 union power_info {
   5413 	struct _ATOM_POWERPLAY_INFO info;
   5414 	struct _ATOM_POWERPLAY_INFO_V2 info_2;
   5415 	struct _ATOM_POWERPLAY_INFO_V3 info_3;
   5416 	struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
   5417 	struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
   5418 	struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
   5419 };
   5420 
   5421 union pplib_clock_info {
   5422 	struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
   5423 	struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
   5424 	struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
   5425 	struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
   5426 	struct _ATOM_PPLIB_SI_CLOCK_INFO si;
   5427 	struct _ATOM_PPLIB_CI_CLOCK_INFO ci;
   5428 };
   5429 
   5430 union pplib_power_state {
   5431 	struct _ATOM_PPLIB_STATE v1;
   5432 	struct _ATOM_PPLIB_STATE_V2 v2;
   5433 };
   5434 
   5435 static void ci_parse_pplib_non_clock_info(struct radeon_device *rdev,
   5436 					  struct radeon_ps *rps,
   5437 					  struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
   5438 					  u8 table_rev)
   5439 {
   5440 	rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
   5441 	rps->class = le16_to_cpu(non_clock_info->usClassification);
   5442 	rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
   5443 
   5444 	if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
   5445 		rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
   5446 		rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
   5447 	} else {
   5448 		rps->vclk = 0;
   5449 		rps->dclk = 0;
   5450 	}
   5451 
   5452 	if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT)
   5453 		rdev->pm.dpm.boot_ps = rps;
   5454 	if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
   5455 		rdev->pm.dpm.uvd_ps = rps;
   5456 }
   5457 
   5458 static void ci_parse_pplib_clock_info(struct radeon_device *rdev,
   5459 				      struct radeon_ps *rps, int index,
   5460 				      union pplib_clock_info *clock_info)
   5461 {
   5462 	struct ci_power_info *pi = ci_get_pi(rdev);
   5463 	struct ci_ps *ps = ci_get_ps(rps);
   5464 	struct ci_pl *pl = &ps->performance_levels[index];
   5465 
   5466 	ps->performance_level_count = index + 1;
   5467 
   5468 	pl->sclk = le16_to_cpu(clock_info->ci.usEngineClockLow);
   5469 	pl->sclk |= clock_info->ci.ucEngineClockHigh << 16;
   5470 	pl->mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow);
   5471 	pl->mclk |= clock_info->ci.ucMemoryClockHigh << 16;
   5472 
   5473 	pl->pcie_gen = r600_get_pcie_gen_support(rdev,
   5474 						 pi->sys_pcie_mask,
   5475 						 pi->vbios_boot_state.pcie_gen_bootup_value,
   5476 						 clock_info->ci.ucPCIEGen);
   5477 	pl->pcie_lane = r600_get_pcie_lane_support(rdev,
   5478 						   pi->vbios_boot_state.pcie_lane_bootup_value,
   5479 						   le16_to_cpu(clock_info->ci.usPCIELane));
   5480 
   5481 	if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) {
   5482 		pi->acpi_pcie_gen = pl->pcie_gen;
   5483 	}
   5484 
   5485 	if (rps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) {
   5486 		pi->ulv.supported = true;
   5487 		pi->ulv.pl = *pl;
   5488 		pi->ulv.cg_ulv_parameter = CISLANDS_CGULVPARAMETER_DFLT;
   5489 	}
   5490 
   5491 	/* patch up boot state */
   5492 	if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
   5493 		pl->mclk = pi->vbios_boot_state.mclk_bootup_value;
   5494 		pl->sclk = pi->vbios_boot_state.sclk_bootup_value;
   5495 		pl->pcie_gen = pi->vbios_boot_state.pcie_gen_bootup_value;
   5496 		pl->pcie_lane = pi->vbios_boot_state.pcie_lane_bootup_value;
   5497 	}
   5498 
   5499 	switch (rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
   5500 	case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
   5501 		pi->use_pcie_powersaving_levels = true;
   5502 		if (pi->pcie_gen_powersaving.max < pl->pcie_gen)
   5503 			pi->pcie_gen_powersaving.max = pl->pcie_gen;
   5504 		if (pi->pcie_gen_powersaving.min > pl->pcie_gen)
   5505 			pi->pcie_gen_powersaving.min = pl->pcie_gen;
   5506 		if (pi->pcie_lane_powersaving.max < pl->pcie_lane)
   5507 			pi->pcie_lane_powersaving.max = pl->pcie_lane;
   5508 		if (pi->pcie_lane_powersaving.min > pl->pcie_lane)
   5509 			pi->pcie_lane_powersaving.min = pl->pcie_lane;
   5510 		break;
   5511 	case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
   5512 		pi->use_pcie_performance_levels = true;
   5513 		if (pi->pcie_gen_performance.max < pl->pcie_gen)
   5514 			pi->pcie_gen_performance.max = pl->pcie_gen;
   5515 		if (pi->pcie_gen_performance.min > pl->pcie_gen)
   5516 			pi->pcie_gen_performance.min = pl->pcie_gen;
   5517 		if (pi->pcie_lane_performance.max < pl->pcie_lane)
   5518 			pi->pcie_lane_performance.max = pl->pcie_lane;
   5519 		if (pi->pcie_lane_performance.min > pl->pcie_lane)
   5520 			pi->pcie_lane_performance.min = pl->pcie_lane;
   5521 		break;
   5522 	default:
   5523 		break;
   5524 	}
   5525 }
   5526 
   5527 static int ci_parse_power_table(struct radeon_device *rdev)
   5528 {
   5529 	struct radeon_mode_info *mode_info = &rdev->mode_info;
   5530 	struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
   5531 	union pplib_power_state *power_state;
   5532 	int i, j, k, non_clock_array_index, clock_array_index;
   5533 	union pplib_clock_info *clock_info;
   5534 	struct _StateArray *state_array;
   5535 	struct _ClockInfoArray *clock_info_array;
   5536 	struct _NonClockInfoArray *non_clock_info_array;
   5537 	union power_info *power_info;
   5538 	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
   5539         u16 data_offset;
   5540 	u8 frev, crev;
   5541 	u8 *power_state_offset;
   5542 	struct ci_ps *ps;
   5543 
   5544 	if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
   5545 				   &frev, &crev, &data_offset))
   5546 		return -EINVAL;
   5547 	power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
   5548 
   5549 	state_array = (struct _StateArray *)
   5550 		(mode_info->atom_context->bios + data_offset +
   5551 		 le16_to_cpu(power_info->pplib.usStateArrayOffset));
   5552 	clock_info_array = (struct _ClockInfoArray *)
   5553 		(mode_info->atom_context->bios + data_offset +
   5554 		 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
   5555 	non_clock_info_array = (struct _NonClockInfoArray *)
   5556 		(mode_info->atom_context->bios + data_offset +
   5557 		 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
   5558 
   5559 	rdev->pm.dpm.ps = kzalloc(sizeof(struct radeon_ps) *
   5560 				  state_array->ucNumEntries, GFP_KERNEL);
   5561 	if (!rdev->pm.dpm.ps)
   5562 		return -ENOMEM;
   5563 	power_state_offset = (u8 *)state_array->states;
   5564 	for (i = 0; i < state_array->ucNumEntries; i++) {
   5565 		u8 *idx;
   5566 		power_state = (union pplib_power_state *)power_state_offset;
   5567 		non_clock_array_index = power_state->v2.nonClockInfoIndex;
   5568 		non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
   5569 			&non_clock_info_array->nonClockInfo[non_clock_array_index];
   5570 		if (!rdev->pm.power_state[i].clock_info)
   5571 			return -EINVAL;
   5572 		ps = kzalloc(sizeof(struct ci_ps), GFP_KERNEL);
   5573 		if (ps == NULL) {
   5574 			kfree(rdev->pm.dpm.ps);
   5575 			return -ENOMEM;
   5576 		}
   5577 		rdev->pm.dpm.ps[i].ps_priv = ps;
   5578 		ci_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i],
   5579 					      non_clock_info,
   5580 					      non_clock_info_array->ucEntrySize);
   5581 		k = 0;
   5582 		idx = (u8 *)&power_state->v2.clockInfoIndex[0];
   5583 		for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
   5584 			clock_array_index = idx[j];
   5585 			if (clock_array_index >= clock_info_array->ucNumEntries)
   5586 				continue;
   5587 			if (k >= CISLANDS_MAX_HARDWARE_POWERLEVELS)
   5588 				break;
   5589 			clock_info = (union pplib_clock_info *)
   5590 				((u8 *)&clock_info_array->clockInfo[0] +
   5591 				 (clock_array_index * clock_info_array->ucEntrySize));
   5592 			ci_parse_pplib_clock_info(rdev,
   5593 						  &rdev->pm.dpm.ps[i], k,
   5594 						  clock_info);
   5595 			k++;
   5596 		}
   5597 		power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
   5598 	}
   5599 	rdev->pm.dpm.num_ps = state_array->ucNumEntries;
   5600 
   5601 	/* fill in the vce power states */
   5602 	for (i = 0; i < RADEON_MAX_VCE_LEVELS; i++) {
   5603 		u32 sclk, mclk;
   5604 		clock_array_index = rdev->pm.dpm.vce_states[i].clk_idx;
   5605 		clock_info = (union pplib_clock_info *)
   5606 			&clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
   5607 		sclk = le16_to_cpu(clock_info->ci.usEngineClockLow);
   5608 		sclk |= clock_info->ci.ucEngineClockHigh << 16;
   5609 		mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow);
   5610 		mclk |= clock_info->ci.ucMemoryClockHigh << 16;
   5611 		rdev->pm.dpm.vce_states[i].sclk = sclk;
   5612 		rdev->pm.dpm.vce_states[i].mclk = mclk;
   5613 	}
   5614 
   5615 	return 0;
   5616 }
   5617 
   5618 static int ci_get_vbios_boot_values(struct radeon_device *rdev,
   5619 				    struct ci_vbios_boot_state *boot_state)
   5620 {
   5621 	struct radeon_mode_info *mode_info = &rdev->mode_info;
   5622 	int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
   5623 	ATOM_FIRMWARE_INFO_V2_2 *firmware_info;
   5624 	u8 frev, crev;
   5625 	u16 data_offset;
   5626 
   5627 	if (atom_parse_data_header(mode_info->atom_context, index, NULL,
   5628 				   &frev, &crev, &data_offset)) {
   5629 		firmware_info =
   5630 			(ATOM_FIRMWARE_INFO_V2_2 *)(mode_info->atom_context->bios +
   5631 						    data_offset);
   5632 		boot_state->mvdd_bootup_value = le16_to_cpu(firmware_info->usBootUpMVDDCVoltage);
   5633 		boot_state->vddc_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCVoltage);
   5634 		boot_state->vddci_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCIVoltage);
   5635 		boot_state->pcie_gen_bootup_value = ci_get_current_pcie_speed(rdev);
   5636 		boot_state->pcie_lane_bootup_value = ci_get_current_pcie_lane_number(rdev);
   5637 		boot_state->sclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultEngineClock);
   5638 		boot_state->mclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultMemoryClock);
   5639 
   5640 		return 0;
   5641 	}
   5642 	return -EINVAL;
   5643 }
   5644 
   5645 void ci_dpm_fini(struct radeon_device *rdev)
   5646 {
   5647 	int i;
   5648 
   5649 	for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
   5650 		kfree(rdev->pm.dpm.ps[i].ps_priv);
   5651 	}
   5652 	kfree(rdev->pm.dpm.ps);
   5653 	kfree(rdev->pm.dpm.priv);
   5654 	kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries);
   5655 	r600_free_extended_power_table(rdev);
   5656 }
   5657 
   5658 int ci_dpm_init(struct radeon_device *rdev)
   5659 {
   5660 	int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
   5661 	SMU7_Discrete_DpmTable  *dpm_table;
   5662 	struct radeon_gpio_rec gpio;
   5663 	u16 data_offset, size;
   5664 	u8 frev, crev;
   5665 	struct ci_power_info *pi;
   5666 	int ret;
   5667 #ifndef __NetBSD__
   5668 	u32 mask;
   5669 #endif
   5670 
   5671 	pi = kzalloc(sizeof(struct ci_power_info), GFP_KERNEL);
   5672 	if (pi == NULL)
   5673 		return -ENOMEM;
   5674 	rdev->pm.dpm.priv = pi;
   5675 
   5676 #ifndef __NetBSD__
   5677 	ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
   5678 	if (ret)
   5679 		pi->sys_pcie_mask = 0;
   5680 	else
   5681 		pi->sys_pcie_mask = mask;
   5682 #endif
   5683 	pi->force_pcie_gen = RADEON_PCIE_GEN_INVALID;
   5684 
   5685 	pi->pcie_gen_performance.max = RADEON_PCIE_GEN1;
   5686 	pi->pcie_gen_performance.min = RADEON_PCIE_GEN3;
   5687 	pi->pcie_gen_powersaving.max = RADEON_PCIE_GEN1;
   5688 	pi->pcie_gen_powersaving.min = RADEON_PCIE_GEN3;
   5689 
   5690 	pi->pcie_lane_performance.max = 0;
   5691 	pi->pcie_lane_performance.min = 16;
   5692 	pi->pcie_lane_powersaving.max = 0;
   5693 	pi->pcie_lane_powersaving.min = 16;
   5694 
   5695 	ret = ci_get_vbios_boot_values(rdev, &pi->vbios_boot_state);
   5696 	if (ret) {
   5697 		ci_dpm_fini(rdev);
   5698 		return ret;
   5699 	}
   5700 
   5701 	ret = r600_get_platform_caps(rdev);
   5702 	if (ret) {
   5703 		ci_dpm_fini(rdev);
   5704 		return ret;
   5705 	}
   5706 
   5707 	ret = r600_parse_extended_power_table(rdev);
   5708 	if (ret) {
   5709 		ci_dpm_fini(rdev);
   5710 		return ret;
   5711 	}
   5712 
   5713 	ret = ci_parse_power_table(rdev);
   5714 	if (ret) {
   5715 		ci_dpm_fini(rdev);
   5716 		return ret;
   5717 	}
   5718 
   5719         pi->dll_default_on = false;
   5720         pi->sram_end = SMC_RAM_END;
   5721 
   5722 	pi->activity_target[0] = CISLAND_TARGETACTIVITY_DFLT;
   5723 	pi->activity_target[1] = CISLAND_TARGETACTIVITY_DFLT;
   5724 	pi->activity_target[2] = CISLAND_TARGETACTIVITY_DFLT;
   5725 	pi->activity_target[3] = CISLAND_TARGETACTIVITY_DFLT;
   5726 	pi->activity_target[4] = CISLAND_TARGETACTIVITY_DFLT;
   5727 	pi->activity_target[5] = CISLAND_TARGETACTIVITY_DFLT;
   5728 	pi->activity_target[6] = CISLAND_TARGETACTIVITY_DFLT;
   5729 	pi->activity_target[7] = CISLAND_TARGETACTIVITY_DFLT;
   5730 
   5731 	pi->mclk_activity_target = CISLAND_MCLK_TARGETACTIVITY_DFLT;
   5732 
   5733 	pi->sclk_dpm_key_disabled = 0;
   5734 	pi->mclk_dpm_key_disabled = 0;
   5735 	pi->pcie_dpm_key_disabled = 0;
   5736 	pi->thermal_sclk_dpm_enabled = 0;
   5737 
   5738 	/* mclk dpm is unstable on some R7 260X cards with the old mc ucode */
   5739 	if ((rdev->pdev->device == 0x6658) &&
   5740 	    (rdev->mc_fw->size == (BONAIRE_MC_UCODE_SIZE * 4))) {
   5741 		pi->mclk_dpm_key_disabled = 1;
   5742 	}
   5743 
   5744 	pi->caps_sclk_ds = true;
   5745 
   5746 	pi->mclk_strobe_mode_threshold = 40000;
   5747 	pi->mclk_stutter_mode_threshold = 40000;
   5748 	pi->mclk_edc_enable_threshold = 40000;
   5749 	pi->mclk_edc_wr_enable_threshold = 40000;
   5750 
   5751 	ci_initialize_powertune_defaults(rdev);
   5752 
   5753 	pi->caps_fps = false;
   5754 
   5755 	pi->caps_sclk_throttle_low_notification = false;
   5756 
   5757 	pi->caps_uvd_dpm = true;
   5758 	pi->caps_vce_dpm = true;
   5759 
   5760         ci_get_leakage_voltages(rdev);
   5761         ci_patch_dependency_tables_with_leakage(rdev);
   5762         ci_set_private_data_variables_based_on_pptable(rdev);
   5763 
   5764 	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries =
   5765 		kzalloc(4 * sizeof(struct radeon_clock_voltage_dependency_entry), GFP_KERNEL);
   5766 	if (!rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) {
   5767 		ci_dpm_fini(rdev);
   5768 		return -ENOMEM;
   5769 	}
   5770 	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count = 4;
   5771 	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0;
   5772 	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0;
   5773 	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].clk = 36000;
   5774 	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].v = 720;
   5775 	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].clk = 54000;
   5776 	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].v = 810;
   5777 	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].clk = 72000;
   5778 	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].v = 900;
   5779 
   5780 	rdev->pm.dpm.dyn_state.mclk_sclk_ratio = 4;
   5781 	rdev->pm.dpm.dyn_state.sclk_mclk_delta = 15000;
   5782 	rdev->pm.dpm.dyn_state.vddc_vddci_delta = 200;
   5783 
   5784 	rdev->pm.dpm.dyn_state.valid_sclk_values.count = 0;
   5785 	rdev->pm.dpm.dyn_state.valid_sclk_values.values = NULL;
   5786 	rdev->pm.dpm.dyn_state.valid_mclk_values.count = 0;
   5787 	rdev->pm.dpm.dyn_state.valid_mclk_values.values = NULL;
   5788 
   5789 	if (rdev->family == CHIP_HAWAII) {
   5790 		pi->thermal_temp_setting.temperature_low = 94500;
   5791 		pi->thermal_temp_setting.temperature_high = 95000;
   5792 		pi->thermal_temp_setting.temperature_shutdown = 104000;
   5793 	} else {
   5794 		pi->thermal_temp_setting.temperature_low = 99500;
   5795 		pi->thermal_temp_setting.temperature_high = 100000;
   5796 		pi->thermal_temp_setting.temperature_shutdown = 104000;
   5797 	}
   5798 
   5799 	pi->uvd_enabled = false;
   5800 
   5801 	dpm_table = &pi->smc_state_table;
   5802 
   5803 	gpio = radeon_atombios_lookup_gpio(rdev, VDDC_VRHOT_GPIO_PINID);
   5804 	if (gpio.valid) {
   5805 		dpm_table->VRHotGpio = gpio.shift;
   5806 		rdev->pm.dpm.platform_caps |= ATOM_PP_PLATFORM_CAP_REGULATOR_HOT;
   5807 	} else {
   5808 		dpm_table->VRHotGpio = CISLANDS_UNUSED_GPIO_PIN;
   5809 		rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_REGULATOR_HOT;
   5810 	}
   5811 
   5812 	gpio = radeon_atombios_lookup_gpio(rdev, PP_AC_DC_SWITCH_GPIO_PINID);
   5813 	if (gpio.valid) {
   5814 		dpm_table->AcDcGpio = gpio.shift;
   5815 		rdev->pm.dpm.platform_caps |= ATOM_PP_PLATFORM_CAP_HARDWAREDC;
   5816 	} else {
   5817 		dpm_table->AcDcGpio = CISLANDS_UNUSED_GPIO_PIN;
   5818 		rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_HARDWAREDC;
   5819 	}
   5820 
   5821 	gpio = radeon_atombios_lookup_gpio(rdev, VDDC_PCC_GPIO_PINID);
   5822 	if (gpio.valid) {
   5823 		u32 tmp = RREG32_SMC(CNB_PWRMGT_CNTL);
   5824 
   5825 		switch (gpio.shift) {
   5826 		case 0:
   5827 			tmp &= ~GNB_SLOW_MODE_MASK;
   5828 			tmp |= GNB_SLOW_MODE(1);
   5829 			break;
   5830 		case 1:
   5831 			tmp &= ~GNB_SLOW_MODE_MASK;
   5832 			tmp |= GNB_SLOW_MODE(2);
   5833 			break;
   5834 		case 2:
   5835 			tmp |= GNB_SLOW;
   5836 			break;
   5837 		case 3:
   5838 			tmp |= FORCE_NB_PS1;
   5839 			break;
   5840 		case 4:
   5841 			tmp |= DPM_ENABLED;
   5842 			break;
   5843 		default:
   5844 			DRM_DEBUG("Invalid PCC GPIO: %u!\n", gpio.shift);
   5845 			break;
   5846 		}
   5847 		WREG32_SMC(CNB_PWRMGT_CNTL, tmp);
   5848 	}
   5849 
   5850 	pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_NONE;
   5851 	pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_NONE;
   5852 	pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_NONE;
   5853 	if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT))
   5854 		pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
   5855 	else if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
   5856 		pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
   5857 
   5858 	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL) {
   5859 		if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
   5860 			pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
   5861 		else if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2))
   5862 			pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
   5863 		else
   5864 			rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL;
   5865         }
   5866 
   5867 	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_MVDDCONTROL) {
   5868 		if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT))
   5869 			pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
   5870 		else if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2))
   5871 			pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
   5872 		else
   5873 			rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_MVDDCONTROL;
   5874 	}
   5875 
   5876 	pi->vddc_phase_shed_control = true;
   5877 
   5878 #if defined(CONFIG_ACPI)
   5879 	pi->pcie_performance_request =
   5880 		radeon_acpi_is_pcie_performance_request_supported(rdev);
   5881 #else
   5882 	pi->pcie_performance_request = false;
   5883 #endif
   5884 
   5885 	if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
   5886                                    &frev, &crev, &data_offset)) {
   5887 		pi->caps_sclk_ss_support = true;
   5888 		pi->caps_mclk_ss_support = true;
   5889 		pi->dynamic_ss = true;
   5890 	} else {
   5891 		pi->caps_sclk_ss_support = false;
   5892 		pi->caps_mclk_ss_support = false;
   5893 		pi->dynamic_ss = true;
   5894 	}
   5895 
   5896 	if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)
   5897 		pi->thermal_protection = true;
   5898 	else
   5899 		pi->thermal_protection = false;
   5900 
   5901 	pi->caps_dynamic_ac_timing = true;
   5902 
   5903 	pi->uvd_power_gated = false;
   5904 
   5905 	/* make sure dc limits are valid */
   5906 	if ((rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) ||
   5907 	    (rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk == 0))
   5908 		rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc =
   5909 			rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
   5910 
   5911 	pi->fan_ctrl_is_in_default_mode = true;
   5912 
   5913 	return 0;
   5914 }
   5915 
   5916 #ifdef CONFIG_DEBUG_FS
   5917 void ci_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
   5918 						    struct seq_file *m)
   5919 {
   5920 	struct ci_power_info *pi = ci_get_pi(rdev);
   5921 	struct radeon_ps *rps = &pi->current_rps;
   5922 	u32 sclk = ci_get_average_sclk_freq(rdev);
   5923 	u32 mclk = ci_get_average_mclk_freq(rdev);
   5924 
   5925 	seq_printf(m, "uvd    %sabled\n", pi->uvd_enabled ? "en" : "dis");
   5926 	seq_printf(m, "vce    %sabled\n", rps->vce_active ? "en" : "dis");
   5927 	seq_printf(m, "power level avg    sclk: %u mclk: %u\n",
   5928 		   sclk, mclk);
   5929 }
   5930 #endif
   5931 
   5932 void ci_dpm_print_power_state(struct radeon_device *rdev,
   5933 			      struct radeon_ps *rps)
   5934 {
   5935 	struct ci_ps *ps = ci_get_ps(rps);
   5936 	struct ci_pl *pl;
   5937 	int i;
   5938 
   5939 	r600_dpm_print_class_info(rps->class, rps->class2);
   5940 	r600_dpm_print_cap_info(rps->caps);
   5941 	printk("\tuvd    vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
   5942 	for (i = 0; i < ps->performance_level_count; i++) {
   5943 		pl = &ps->performance_levels[i];
   5944 		printk("\t\tpower level %d    sclk: %u mclk: %u pcie gen: %u pcie lanes: %u\n",
   5945 		       i, pl->sclk, pl->mclk, pl->pcie_gen + 1, pl->pcie_lane);
   5946 	}
   5947 	r600_dpm_print_ps_status(rdev, rps);
   5948 }
   5949 
   5950 u32 ci_dpm_get_current_sclk(struct radeon_device *rdev)
   5951 {
   5952 	u32 sclk = ci_get_average_sclk_freq(rdev);
   5953 
   5954 	return sclk;
   5955 }
   5956 
   5957 u32 ci_dpm_get_current_mclk(struct radeon_device *rdev)
   5958 {
   5959 	u32 mclk = ci_get_average_mclk_freq(rdev);
   5960 
   5961 	return mclk;
   5962 }
   5963 
   5964 u32 ci_dpm_get_sclk(struct radeon_device *rdev, bool low)
   5965 {
   5966 	struct ci_power_info *pi = ci_get_pi(rdev);
   5967 	struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
   5968 
   5969 	if (low)
   5970 		return requested_state->performance_levels[0].sclk;
   5971 	else
   5972 		return requested_state->performance_levels[requested_state->performance_level_count - 1].sclk;
   5973 }
   5974 
   5975 u32 ci_dpm_get_mclk(struct radeon_device *rdev, bool low)
   5976 {
   5977 	struct ci_power_info *pi = ci_get_pi(rdev);
   5978 	struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
   5979 
   5980 	if (low)
   5981 		return requested_state->performance_levels[0].mclk;
   5982 	else
   5983 		return requested_state->performance_levels[requested_state->performance_level_count - 1].mclk;
   5984 }
   5985