Home | History | Annotate | Line # | Download | only in radeon
radeon_pm.c revision 1.3.18.1
      1 /*	$NetBSD: radeon_pm.c,v 1.3.18.1 2019/06/10 22:08:26 christos Exp $	*/
      2 
      3 /*
      4  * Permission is hereby granted, free of charge, to any person obtaining a
      5  * copy of this software and associated documentation files (the "Software"),
      6  * to deal in the Software without restriction, including without limitation
      7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
      8  * and/or sell copies of the Software, and to permit persons to whom the
      9  * Software is furnished to do so, subject to the following conditions:
     10  *
     11  * The above copyright notice and this permission notice shall be included in
     12  * all copies or substantial portions of the Software.
     13  *
     14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
     18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     20  * OTHER DEALINGS IN THE SOFTWARE.
     21  *
     22  * Authors: Rafa Miecki <zajec5 (at) gmail.com>
     23  *          Alex Deucher <alexdeucher (at) gmail.com>
     24  */
     25 #include <sys/cdefs.h>
     26 __KERNEL_RCSID(0, "$NetBSD: radeon_pm.c,v 1.3.18.1 2019/06/10 22:08:26 christos Exp $");
     27 
     28 #include <drm/drmP.h>
     29 #include "radeon.h"
     30 #include "avivod.h"
     31 #include "atom.h"
     32 #include "r600_dpm.h"
     33 #include <linux/power_supply.h>
     34 #include <linux/hwmon.h>
     35 #include <linux/hwmon-sysfs.h>
     36 
     37 #define RADEON_IDLE_LOOP_MS 100
     38 #define RADEON_RECLOCK_DELAY_MS 200
     39 #define RADEON_WAIT_VBLANK_TIMEOUT 200
     40 
     41 static const char *radeon_pm_state_type_name[5] = {
     42 	"",
     43 	"Powersave",
     44 	"Battery",
     45 	"Balanced",
     46 	"Performance",
     47 };
     48 
     49 static void radeon_dynpm_idle_work_handler(struct work_struct *work);
     50 static int radeon_debugfs_pm_init(struct radeon_device *rdev);
     51 static bool radeon_pm_in_vbl(struct radeon_device *rdev);
     52 static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish);
     53 static void radeon_pm_update_profile(struct radeon_device *rdev);
     54 static void radeon_pm_set_clocks(struct radeon_device *rdev);
     55 
     56 int radeon_pm_get_type_index(struct radeon_device *rdev,
     57 			     enum radeon_pm_state_type ps_type,
     58 			     int instance)
     59 {
     60 	int i;
     61 	int found_instance = -1;
     62 
     63 	for (i = 0; i < rdev->pm.num_power_states; i++) {
     64 		if (rdev->pm.power_state[i].type == ps_type) {
     65 			found_instance++;
     66 			if (found_instance == instance)
     67 				return i;
     68 		}
     69 	}
     70 	/* return default if no match */
     71 	return rdev->pm.default_power_state_index;
     72 }
     73 
     74 void radeon_pm_acpi_event_handler(struct radeon_device *rdev)
     75 {
     76 	if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
     77 		mutex_lock(&rdev->pm.mutex);
     78 		if (power_supply_is_system_supplied() > 0)
     79 			rdev->pm.dpm.ac_power = true;
     80 		else
     81 			rdev->pm.dpm.ac_power = false;
     82 		if (rdev->family == CHIP_ARUBA) {
     83 			if (rdev->asic->dpm.enable_bapm)
     84 				radeon_dpm_enable_bapm(rdev, rdev->pm.dpm.ac_power);
     85 		}
     86 		mutex_unlock(&rdev->pm.mutex);
     87         } else if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
     88 		if (rdev->pm.profile == PM_PROFILE_AUTO) {
     89 			mutex_lock(&rdev->pm.mutex);
     90 			radeon_pm_update_profile(rdev);
     91 			radeon_pm_set_clocks(rdev);
     92 			mutex_unlock(&rdev->pm.mutex);
     93 		}
     94 	}
     95 }
     96 
     97 static void radeon_pm_update_profile(struct radeon_device *rdev)
     98 {
     99 	switch (rdev->pm.profile) {
    100 	case PM_PROFILE_DEFAULT:
    101 		rdev->pm.profile_index = PM_PROFILE_DEFAULT_IDX;
    102 		break;
    103 	case PM_PROFILE_AUTO:
    104 		if (power_supply_is_system_supplied() > 0) {
    105 			if (rdev->pm.active_crtc_count > 1)
    106 				rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX;
    107 			else
    108 				rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX;
    109 		} else {
    110 			if (rdev->pm.active_crtc_count > 1)
    111 				rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX;
    112 			else
    113 				rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX;
    114 		}
    115 		break;
    116 	case PM_PROFILE_LOW:
    117 		if (rdev->pm.active_crtc_count > 1)
    118 			rdev->pm.profile_index = PM_PROFILE_LOW_MH_IDX;
    119 		else
    120 			rdev->pm.profile_index = PM_PROFILE_LOW_SH_IDX;
    121 		break;
    122 	case PM_PROFILE_MID:
    123 		if (rdev->pm.active_crtc_count > 1)
    124 			rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX;
    125 		else
    126 			rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX;
    127 		break;
    128 	case PM_PROFILE_HIGH:
    129 		if (rdev->pm.active_crtc_count > 1)
    130 			rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX;
    131 		else
    132 			rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX;
    133 		break;
    134 	}
    135 
    136 	if (rdev->pm.active_crtc_count == 0) {
    137 		rdev->pm.requested_power_state_index =
    138 			rdev->pm.profiles[rdev->pm.profile_index].dpms_off_ps_idx;
    139 		rdev->pm.requested_clock_mode_index =
    140 			rdev->pm.profiles[rdev->pm.profile_index].dpms_off_cm_idx;
    141 	} else {
    142 		rdev->pm.requested_power_state_index =
    143 			rdev->pm.profiles[rdev->pm.profile_index].dpms_on_ps_idx;
    144 		rdev->pm.requested_clock_mode_index =
    145 			rdev->pm.profiles[rdev->pm.profile_index].dpms_on_cm_idx;
    146 	}
    147 }
    148 
    149 static void radeon_unmap_vram_bos(struct radeon_device *rdev)
    150 {
    151 	struct radeon_bo *bo, *n;
    152 
    153 	if (list_empty(&rdev->gem.objects))
    154 		return;
    155 
    156 	list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
    157 		if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
    158 			ttm_bo_unmap_virtual(&bo->tbo);
    159 	}
    160 }
    161 
    162 static void radeon_sync_with_vblank(struct radeon_device *rdev)
    163 {
    164 	if (rdev->pm.active_crtcs) {
    165 #ifdef __NetBSD__
    166 		int ret __unused;
    167 
    168 		spin_lock(&rdev->irq.vblank_lock);
    169 		rdev->pm.vblank_sync = false;
    170 		DRM_SPIN_TIMED_WAIT_NOINTR_UNTIL(ret, &rdev->irq.vblank_queue,
    171 		    &rdev->irq.vblank_lock,
    172 		    msecs_to_jiffies(RADEON_WAIT_VBLANK_TIMEOUT),
    173 		    rdev->pm.vblank_sync);
    174 		spin_unlock(&rdev->irq.vblank_lock);
    175 #else
    176 		rdev->pm.vblank_sync = false;
    177 		wait_event_timeout(
    178 			rdev->irq.vblank_queue, rdev->pm.vblank_sync,
    179 			msecs_to_jiffies(RADEON_WAIT_VBLANK_TIMEOUT));
    180 #endif
    181 	}
    182 }
    183 
    184 static void radeon_set_power_state(struct radeon_device *rdev)
    185 {
    186 	u32 sclk, mclk;
    187 	bool misc_after = false;
    188 
    189 	if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) &&
    190 	    (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index))
    191 		return;
    192 
    193 	if (radeon_gui_idle(rdev)) {
    194 		sclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
    195 			clock_info[rdev->pm.requested_clock_mode_index].sclk;
    196 		if (sclk > rdev->pm.default_sclk)
    197 			sclk = rdev->pm.default_sclk;
    198 
    199 		/* starting with BTC, there is one state that is used for both
    200 		 * MH and SH.  Difference is that we always use the high clock index for
    201 		 * mclk and vddci.
    202 		 */
    203 		if ((rdev->pm.pm_method == PM_METHOD_PROFILE) &&
    204 		    (rdev->family >= CHIP_BARTS) &&
    205 		    rdev->pm.active_crtc_count &&
    206 		    ((rdev->pm.profile_index == PM_PROFILE_MID_MH_IDX) ||
    207 		     (rdev->pm.profile_index == PM_PROFILE_LOW_MH_IDX)))
    208 			mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
    209 				clock_info[rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx].mclk;
    210 		else
    211 			mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
    212 				clock_info[rdev->pm.requested_clock_mode_index].mclk;
    213 
    214 		if (mclk > rdev->pm.default_mclk)
    215 			mclk = rdev->pm.default_mclk;
    216 
    217 		/* upvolt before raising clocks, downvolt after lowering clocks */
    218 		if (sclk < rdev->pm.current_sclk)
    219 			misc_after = true;
    220 
    221 		radeon_sync_with_vblank(rdev);
    222 
    223 		if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
    224 			if (!radeon_pm_in_vbl(rdev))
    225 				return;
    226 		}
    227 
    228 		radeon_pm_prepare(rdev);
    229 
    230 		if (!misc_after)
    231 			/* voltage, pcie lanes, etc.*/
    232 			radeon_pm_misc(rdev);
    233 
    234 		/* set engine clock */
    235 		if (sclk != rdev->pm.current_sclk) {
    236 			radeon_pm_debug_check_in_vbl(rdev, false);
    237 			radeon_set_engine_clock(rdev, sclk);
    238 			radeon_pm_debug_check_in_vbl(rdev, true);
    239 			rdev->pm.current_sclk = sclk;
    240 			DRM_DEBUG_DRIVER("Setting: e: %d\n", sclk);
    241 		}
    242 
    243 		/* set memory clock */
    244 		if (rdev->asic->pm.set_memory_clock && (mclk != rdev->pm.current_mclk)) {
    245 			radeon_pm_debug_check_in_vbl(rdev, false);
    246 			radeon_set_memory_clock(rdev, mclk);
    247 			radeon_pm_debug_check_in_vbl(rdev, true);
    248 			rdev->pm.current_mclk = mclk;
    249 			DRM_DEBUG_DRIVER("Setting: m: %d\n", mclk);
    250 		}
    251 
    252 		if (misc_after)
    253 			/* voltage, pcie lanes, etc.*/
    254 			radeon_pm_misc(rdev);
    255 
    256 		radeon_pm_finish(rdev);
    257 
    258 		rdev->pm.current_power_state_index = rdev->pm.requested_power_state_index;
    259 		rdev->pm.current_clock_mode_index = rdev->pm.requested_clock_mode_index;
    260 	} else
    261 		DRM_DEBUG_DRIVER("pm: GUI not idle!!!\n");
    262 }
    263 
    264 static void radeon_pm_set_clocks(struct radeon_device *rdev)
    265 {
    266 	int i, r;
    267 
    268 	/* no need to take locks, etc. if nothing's going to change */
    269 	if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) &&
    270 	    (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index))
    271 		return;
    272 
    273 	down_write(&rdev->pm.mclk_lock);
    274 	mutex_lock(&rdev->ring_lock);
    275 
    276 	/* wait for the rings to drain */
    277 	for (i = 0; i < RADEON_NUM_RINGS; i++) {
    278 		struct radeon_ring *ring = &rdev->ring[i];
    279 		if (!ring->ready) {
    280 			continue;
    281 		}
    282 		r = radeon_fence_wait_empty(rdev, i);
    283 		if (r) {
    284 			/* needs a GPU reset dont reset here */
    285 			mutex_unlock(&rdev->ring_lock);
    286 			up_write(&rdev->pm.mclk_lock);
    287 			return;
    288 		}
    289 	}
    290 
    291 	radeon_unmap_vram_bos(rdev);
    292 
    293 	if (rdev->irq.installed) {
    294 		for (i = 0; i < rdev->num_crtc; i++) {
    295 			if (rdev->pm.active_crtcs & (1 << i)) {
    296 				rdev->pm.req_vblank |= (1 << i);
    297 				drm_vblank_get(rdev->ddev, i);
    298 			}
    299 		}
    300 	}
    301 
    302 	radeon_set_power_state(rdev);
    303 
    304 	if (rdev->irq.installed) {
    305 		for (i = 0; i < rdev->num_crtc; i++) {
    306 			if (rdev->pm.req_vblank & (1 << i)) {
    307 				rdev->pm.req_vblank &= ~(1 << i);
    308 				drm_vblank_put(rdev->ddev, i);
    309 			}
    310 		}
    311 	}
    312 
    313 	/* update display watermarks based on new power state */
    314 	radeon_update_bandwidth_info(rdev);
    315 	if (rdev->pm.active_crtc_count)
    316 		radeon_bandwidth_update(rdev);
    317 
    318 	rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
    319 
    320 	mutex_unlock(&rdev->ring_lock);
    321 	up_write(&rdev->pm.mclk_lock);
    322 }
    323 
    324 static void radeon_pm_print_states(struct radeon_device *rdev)
    325 {
    326 	int i, j;
    327 	struct radeon_power_state *power_state;
    328 	struct radeon_pm_clock_info *clock_info;
    329 
    330 	DRM_DEBUG_DRIVER("%d Power State(s)\n", rdev->pm.num_power_states);
    331 	for (i = 0; i < rdev->pm.num_power_states; i++) {
    332 		power_state = &rdev->pm.power_state[i];
    333 		DRM_DEBUG_DRIVER("State %d: %s\n", i,
    334 			radeon_pm_state_type_name[power_state->type]);
    335 		if (i == rdev->pm.default_power_state_index)
    336 			DRM_DEBUG_DRIVER("\tDefault");
    337 		if ((rdev->flags & RADEON_IS_PCIE) && !(rdev->flags & RADEON_IS_IGP))
    338 			DRM_DEBUG_DRIVER("\t%d PCIE Lanes\n", power_state->pcie_lanes);
    339 		if (power_state->flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
    340 			DRM_DEBUG_DRIVER("\tSingle display only\n");
    341 		DRM_DEBUG_DRIVER("\t%d Clock Mode(s)\n", power_state->num_clock_modes);
    342 		for (j = 0; j < power_state->num_clock_modes; j++) {
    343 			clock_info = &(power_state->clock_info[j]);
    344 			if (rdev->flags & RADEON_IS_IGP)
    345 				DRM_DEBUG_DRIVER("\t\t%d e: %d\n",
    346 						 j,
    347 						 clock_info->sclk * 10);
    348 			else
    349 				DRM_DEBUG_DRIVER("\t\t%d e: %d\tm: %d\tv: %d\n",
    350 						 j,
    351 						 clock_info->sclk * 10,
    352 						 clock_info->mclk * 10,
    353 						 clock_info->voltage.voltage);
    354 		}
    355 	}
    356 }
    357 
    358 #ifndef __NetBSD__		/* XXX radeon power */
    359 static ssize_t radeon_get_pm_profile(struct device *dev,
    360 				     struct device_attribute *attr,
    361 				     char *buf)
    362 {
    363 	struct drm_device *ddev = dev_get_drvdata(dev);
    364 	struct radeon_device *rdev = ddev->dev_private;
    365 	int cp = rdev->pm.profile;
    366 
    367 	return snprintf(buf, PAGE_SIZE, "%s\n",
    368 			(cp == PM_PROFILE_AUTO) ? "auto" :
    369 			(cp == PM_PROFILE_LOW) ? "low" :
    370 			(cp == PM_PROFILE_MID) ? "mid" :
    371 			(cp == PM_PROFILE_HIGH) ? "high" : "default");
    372 }
    373 
    374 static ssize_t radeon_set_pm_profile(struct device *dev,
    375 				     struct device_attribute *attr,
    376 				     const char *buf,
    377 				     size_t count)
    378 {
    379 	struct drm_device *ddev = dev_get_drvdata(dev);
    380 	struct radeon_device *rdev = ddev->dev_private;
    381 
    382 	/* Can't set profile when the card is off */
    383 	if  ((rdev->flags & RADEON_IS_PX) &&
    384 	     (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
    385 		return -EINVAL;
    386 
    387 	mutex_lock(&rdev->pm.mutex);
    388 	if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
    389 		if (strncmp("default", buf, strlen("default")) == 0)
    390 			rdev->pm.profile = PM_PROFILE_DEFAULT;
    391 		else if (strncmp("auto", buf, strlen("auto")) == 0)
    392 			rdev->pm.profile = PM_PROFILE_AUTO;
    393 		else if (strncmp("low", buf, strlen("low")) == 0)
    394 			rdev->pm.profile = PM_PROFILE_LOW;
    395 		else if (strncmp("mid", buf, strlen("mid")) == 0)
    396 			rdev->pm.profile = PM_PROFILE_MID;
    397 		else if (strncmp("high", buf, strlen("high")) == 0)
    398 			rdev->pm.profile = PM_PROFILE_HIGH;
    399 		else {
    400 			count = -EINVAL;
    401 			goto fail;
    402 		}
    403 		radeon_pm_update_profile(rdev);
    404 		radeon_pm_set_clocks(rdev);
    405 	} else
    406 		count = -EINVAL;
    407 
    408 fail:
    409 	mutex_unlock(&rdev->pm.mutex);
    410 
    411 	return count;
    412 }
    413 
    414 static ssize_t radeon_get_pm_method(struct device *dev,
    415 				    struct device_attribute *attr,
    416 				    char *buf)
    417 {
    418 	struct drm_device *ddev = dev_get_drvdata(dev);
    419 	struct radeon_device *rdev = ddev->dev_private;
    420 	int pm = rdev->pm.pm_method;
    421 
    422 	return snprintf(buf, PAGE_SIZE, "%s\n",
    423 			(pm == PM_METHOD_DYNPM) ? "dynpm" :
    424 			(pm == PM_METHOD_PROFILE) ? "profile" : "dpm");
    425 }
    426 
    427 static ssize_t radeon_set_pm_method(struct device *dev,
    428 				    struct device_attribute *attr,
    429 				    const char *buf,
    430 				    size_t count)
    431 {
    432 	struct drm_device *ddev = dev_get_drvdata(dev);
    433 	struct radeon_device *rdev = ddev->dev_private;
    434 
    435 	/* Can't set method when the card is off */
    436 	if  ((rdev->flags & RADEON_IS_PX) &&
    437 	     (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
    438 		count = -EINVAL;
    439 		goto fail;
    440 	}
    441 
    442 	/* we don't support the legacy modes with dpm */
    443 	if (rdev->pm.pm_method == PM_METHOD_DPM) {
    444 		count = -EINVAL;
    445 		goto fail;
    446 	}
    447 
    448 	if (strncmp("dynpm", buf, strlen("dynpm")) == 0) {
    449 		mutex_lock(&rdev->pm.mutex);
    450 		rdev->pm.pm_method = PM_METHOD_DYNPM;
    451 		rdev->pm.dynpm_state = DYNPM_STATE_PAUSED;
    452 		rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
    453 		mutex_unlock(&rdev->pm.mutex);
    454 	} else if (strncmp("profile", buf, strlen("profile")) == 0) {
    455 		mutex_lock(&rdev->pm.mutex);
    456 		/* disable dynpm */
    457 		rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
    458 		rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
    459 		rdev->pm.pm_method = PM_METHOD_PROFILE;
    460 		mutex_unlock(&rdev->pm.mutex);
    461 		cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
    462 	} else {
    463 		count = -EINVAL;
    464 		goto fail;
    465 	}
    466 	radeon_pm_compute_clocks(rdev);
    467 fail:
    468 	return count;
    469 }
    470 
    471 static ssize_t radeon_get_dpm_state(struct device *dev,
    472 				    struct device_attribute *attr,
    473 				    char *buf)
    474 {
    475 	struct drm_device *ddev = dev_get_drvdata(dev);
    476 	struct radeon_device *rdev = ddev->dev_private;
    477 	enum radeon_pm_state_type pm = rdev->pm.dpm.user_state;
    478 
    479 	return snprintf(buf, PAGE_SIZE, "%s\n",
    480 			(pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
    481 			(pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
    482 }
    483 
    484 static ssize_t radeon_set_dpm_state(struct device *dev,
    485 				    struct device_attribute *attr,
    486 				    const char *buf,
    487 				    size_t count)
    488 {
    489 	struct drm_device *ddev = dev_get_drvdata(dev);
    490 	struct radeon_device *rdev = ddev->dev_private;
    491 
    492 	mutex_lock(&rdev->pm.mutex);
    493 	if (strncmp("battery", buf, strlen("battery")) == 0)
    494 		rdev->pm.dpm.user_state = POWER_STATE_TYPE_BATTERY;
    495 	else if (strncmp("balanced", buf, strlen("balanced")) == 0)
    496 		rdev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
    497 	else if (strncmp("performance", buf, strlen("performance")) == 0)
    498 		rdev->pm.dpm.user_state = POWER_STATE_TYPE_PERFORMANCE;
    499 	else {
    500 		mutex_unlock(&rdev->pm.mutex);
    501 		count = -EINVAL;
    502 		goto fail;
    503 	}
    504 	mutex_unlock(&rdev->pm.mutex);
    505 
    506 	/* Can't set dpm state when the card is off */
    507 	if (!(rdev->flags & RADEON_IS_PX) ||
    508 	    (ddev->switch_power_state == DRM_SWITCH_POWER_ON))
    509 		radeon_pm_compute_clocks(rdev);
    510 
    511 fail:
    512 	return count;
    513 }
    514 
    515 static ssize_t radeon_get_dpm_forced_performance_level(struct device *dev,
    516 						       struct device_attribute *attr,
    517 						       char *buf)
    518 {
    519 	struct drm_device *ddev = dev_get_drvdata(dev);
    520 	struct radeon_device *rdev = ddev->dev_private;
    521 	enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level;
    522 
    523 	if  ((rdev->flags & RADEON_IS_PX) &&
    524 	     (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
    525 		return snprintf(buf, PAGE_SIZE, "off\n");
    526 
    527 	return snprintf(buf, PAGE_SIZE, "%s\n",
    528 			(level == RADEON_DPM_FORCED_LEVEL_AUTO) ? "auto" :
    529 			(level == RADEON_DPM_FORCED_LEVEL_LOW) ? "low" : "high");
    530 }
    531 
    532 static ssize_t radeon_set_dpm_forced_performance_level(struct device *dev,
    533 						       struct device_attribute *attr,
    534 						       const char *buf,
    535 						       size_t count)
    536 {
    537 	struct drm_device *ddev = dev_get_drvdata(dev);
    538 	struct radeon_device *rdev = ddev->dev_private;
    539 	enum radeon_dpm_forced_level level;
    540 	int ret = 0;
    541 
    542 	/* Can't force performance level when the card is off */
    543 	if  ((rdev->flags & RADEON_IS_PX) &&
    544 	     (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
    545 		return -EINVAL;
    546 
    547 	mutex_lock(&rdev->pm.mutex);
    548 	if (strncmp("low", buf, strlen("low")) == 0) {
    549 		level = RADEON_DPM_FORCED_LEVEL_LOW;
    550 	} else if (strncmp("high", buf, strlen("high")) == 0) {
    551 		level = RADEON_DPM_FORCED_LEVEL_HIGH;
    552 	} else if (strncmp("auto", buf, strlen("auto")) == 0) {
    553 		level = RADEON_DPM_FORCED_LEVEL_AUTO;
    554 	} else {
    555 		count = -EINVAL;
    556 		goto fail;
    557 	}
    558 	if (rdev->asic->dpm.force_performance_level) {
    559 		if (rdev->pm.dpm.thermal_active) {
    560 			count = -EINVAL;
    561 			goto fail;
    562 		}
    563 		ret = radeon_dpm_force_performance_level(rdev, level);
    564 		if (ret)
    565 			count = -EINVAL;
    566 	}
    567 fail:
    568 	mutex_unlock(&rdev->pm.mutex);
    569 
    570 	return count;
    571 }
    572 
    573 static ssize_t radeon_hwmon_get_pwm1_enable(struct device *dev,
    574 					    struct device_attribute *attr,
    575 					    char *buf)
    576 {
    577 	struct radeon_device *rdev = dev_get_drvdata(dev);
    578 	u32 pwm_mode = 0;
    579 
    580 	if (rdev->asic->dpm.fan_ctrl_get_mode)
    581 		pwm_mode = rdev->asic->dpm.fan_ctrl_get_mode(rdev);
    582 
    583 	/* never 0 (full-speed), fuse or smc-controlled always */
    584 	return sprintf(buf, "%i\n", pwm_mode == FDO_PWM_MODE_STATIC ? 1 : 2);
    585 }
    586 
    587 static ssize_t radeon_hwmon_set_pwm1_enable(struct device *dev,
    588 					    struct device_attribute *attr,
    589 					    const char *buf,
    590 					    size_t count)
    591 {
    592 	struct radeon_device *rdev = dev_get_drvdata(dev);
    593 	int err;
    594 	int value;
    595 
    596 	if(!rdev->asic->dpm.fan_ctrl_set_mode)
    597 		return -EINVAL;
    598 
    599 	err = kstrtoint(buf, 10, &value);
    600 	if (err)
    601 		return err;
    602 
    603 	switch (value) {
    604 	case 1: /* manual, percent-based */
    605 		rdev->asic->dpm.fan_ctrl_set_mode(rdev, FDO_PWM_MODE_STATIC);
    606 		break;
    607 	default: /* disable */
    608 		rdev->asic->dpm.fan_ctrl_set_mode(rdev, 0);
    609 		break;
    610 	}
    611 
    612 	return count;
    613 }
    614 
    615 static ssize_t radeon_hwmon_get_pwm1_min(struct device *dev,
    616 					 struct device_attribute *attr,
    617 					 char *buf)
    618 {
    619 	return sprintf(buf, "%i\n", 0);
    620 }
    621 
    622 static ssize_t radeon_hwmon_get_pwm1_max(struct device *dev,
    623 					 struct device_attribute *attr,
    624 					 char *buf)
    625 {
    626 	return sprintf(buf, "%i\n", 255);
    627 }
    628 
    629 static ssize_t radeon_hwmon_set_pwm1(struct device *dev,
    630 				     struct device_attribute *attr,
    631 				     const char *buf, size_t count)
    632 {
    633 	struct radeon_device *rdev = dev_get_drvdata(dev);
    634 	int err;
    635 	u32 value;
    636 
    637 	err = kstrtou32(buf, 10, &value);
    638 	if (err)
    639 		return err;
    640 
    641 	value = (value * 100) / 255;
    642 
    643 	err = rdev->asic->dpm.set_fan_speed_percent(rdev, value);
    644 	if (err)
    645 		return err;
    646 
    647 	return count;
    648 }
    649 
    650 static ssize_t radeon_hwmon_get_pwm1(struct device *dev,
    651 				     struct device_attribute *attr,
    652 				     char *buf)
    653 {
    654 	struct radeon_device *rdev = dev_get_drvdata(dev);
    655 	int err;
    656 	u32 speed;
    657 
    658 	err = rdev->asic->dpm.get_fan_speed_percent(rdev, &speed);
    659 	if (err)
    660 		return err;
    661 
    662 	speed = (speed * 255) / 100;
    663 
    664 	return sprintf(buf, "%i\n", speed);
    665 }
    666 
    667 static DEVICE_ATTR(power_profile, S_IRUGO | S_IWUSR, radeon_get_pm_profile, radeon_set_pm_profile);
    668 static DEVICE_ATTR(power_method, S_IRUGO | S_IWUSR, radeon_get_pm_method, radeon_set_pm_method);
    669 static DEVICE_ATTR(power_dpm_state, S_IRUGO | S_IWUSR, radeon_get_dpm_state, radeon_set_dpm_state);
    670 static DEVICE_ATTR(power_dpm_force_performance_level, S_IRUGO | S_IWUSR,
    671 		   radeon_get_dpm_forced_performance_level,
    672 		   radeon_set_dpm_forced_performance_level);
    673 #endif
    674 
    675 #ifndef __NetBSD__		/* XXX radeon hwmon */
    676 static ssize_t radeon_hwmon_show_temp(struct device *dev,
    677 				      struct device_attribute *attr,
    678 				      char *buf)
    679 {
    680 	struct radeon_device *rdev = dev_get_drvdata(dev);
    681 	struct drm_device *ddev = rdev->ddev;
    682 	int temp;
    683 
    684 	/* Can't get temperature when the card is off */
    685 	if  ((rdev->flags & RADEON_IS_PX) &&
    686 	     (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
    687 		return -EINVAL;
    688 
    689 	if (rdev->asic->pm.get_temperature)
    690 		temp = radeon_get_temperature(rdev);
    691 	else
    692 		temp = 0;
    693 
    694 	return snprintf(buf, PAGE_SIZE, "%d\n", temp);
    695 }
    696 
    697 static ssize_t radeon_hwmon_show_temp_thresh(struct device *dev,
    698 					     struct device_attribute *attr,
    699 					     char *buf)
    700 {
    701 	struct radeon_device *rdev = dev_get_drvdata(dev);
    702 	int hyst = to_sensor_dev_attr(attr)->index;
    703 	int temp;
    704 
    705 	if (hyst)
    706 		temp = rdev->pm.dpm.thermal.min_temp;
    707 	else
    708 		temp = rdev->pm.dpm.thermal.max_temp;
    709 
    710 	return snprintf(buf, PAGE_SIZE, "%d\n", temp);
    711 }
    712 
    713 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, radeon_hwmon_show_temp, NULL, 0);
    714 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, radeon_hwmon_show_temp_thresh, NULL, 0);
    715 static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, radeon_hwmon_show_temp_thresh, NULL, 1);
    716 static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, radeon_hwmon_get_pwm1, radeon_hwmon_set_pwm1, 0);
    717 static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, radeon_hwmon_get_pwm1_enable, radeon_hwmon_set_pwm1_enable, 0);
    718 static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO, radeon_hwmon_get_pwm1_min, NULL, 0);
    719 static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO, radeon_hwmon_get_pwm1_max, NULL, 0);
    720 
    721 
    722 static struct attribute *hwmon_attributes[] = {
    723 	&sensor_dev_attr_temp1_input.dev_attr.attr,
    724 	&sensor_dev_attr_temp1_crit.dev_attr.attr,
    725 	&sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
    726 	&sensor_dev_attr_pwm1.dev_attr.attr,
    727 	&sensor_dev_attr_pwm1_enable.dev_attr.attr,
    728 	&sensor_dev_attr_pwm1_min.dev_attr.attr,
    729 	&sensor_dev_attr_pwm1_max.dev_attr.attr,
    730 	NULL
    731 };
    732 
    733 static umode_t hwmon_attributes_visible(struct kobject *kobj,
    734 					struct attribute *attr, int index)
    735 {
    736 	struct device *dev = container_of(kobj, struct device, kobj);
    737 	struct radeon_device *rdev = dev_get_drvdata(dev);
    738 	umode_t effective_mode = attr->mode;
    739 
    740 	/* Skip attributes if DPM is not enabled */
    741 	if (rdev->pm.pm_method != PM_METHOD_DPM &&
    742 	    (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
    743 	     attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr ||
    744 	     attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
    745 	     attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
    746 	     attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
    747 	     attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
    748 		return 0;
    749 
    750 	/* Skip fan attributes if fan is not present */
    751 	if (rdev->pm.no_fan &&
    752 	    (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
    753 	     attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
    754 	     attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
    755 	     attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
    756 		return 0;
    757 
    758 	/* mask fan attributes if we have no bindings for this asic to expose */
    759 	if ((!rdev->asic->dpm.get_fan_speed_percent &&
    760 	     attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */
    761 	    (!rdev->asic->dpm.fan_ctrl_get_mode &&
    762 	     attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */
    763 		effective_mode &= ~S_IRUGO;
    764 
    765 	if ((!rdev->asic->dpm.set_fan_speed_percent &&
    766 	     attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */
    767 	    (!rdev->asic->dpm.fan_ctrl_set_mode &&
    768 	     attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */
    769 		effective_mode &= ~S_IWUSR;
    770 
    771 	/* hide max/min values if we can't both query and manage the fan */
    772 	if ((!rdev->asic->dpm.set_fan_speed_percent &&
    773 	     !rdev->asic->dpm.get_fan_speed_percent) &&
    774 	    (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
    775 	     attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
    776 		return 0;
    777 
    778 	return effective_mode;
    779 }
    780 
    781 static const struct attribute_group hwmon_attrgroup = {
    782 	.attrs = hwmon_attributes,
    783 	.is_visible = hwmon_attributes_visible,
    784 };
    785 
    786 static const struct attribute_group *hwmon_groups[] = {
    787 	&hwmon_attrgroup,
    788 	NULL
    789 };
    790 #endif
    791 
    792 static int radeon_hwmon_init(struct radeon_device *rdev)
    793 {
    794 	int err = 0;
    795 
    796 #ifndef __NetBSD__		/* XXX radeon hwmon */
    797 	switch (rdev->pm.int_thermal_type) {
    798 	case THERMAL_TYPE_RV6XX:
    799 	case THERMAL_TYPE_RV770:
    800 	case THERMAL_TYPE_EVERGREEN:
    801 	case THERMAL_TYPE_NI:
    802 	case THERMAL_TYPE_SUMO:
    803 	case THERMAL_TYPE_SI:
    804 	case THERMAL_TYPE_CI:
    805 	case THERMAL_TYPE_KV:
    806 		if (rdev->asic->pm.get_temperature == NULL)
    807 			return err;
    808 		rdev->pm.int_hwmon_dev = hwmon_device_register_with_groups(rdev->dev,
    809 									   "radeon", rdev,
    810 									   hwmon_groups);
    811 		if (IS_ERR(rdev->pm.int_hwmon_dev)) {
    812 			err = PTR_ERR(rdev->pm.int_hwmon_dev);
    813 			dev_err(rdev->dev,
    814 				"Unable to register hwmon device: %d\n", err);
    815 		}
    816 		break;
    817 	default:
    818 		break;
    819 	}
    820 #endif
    821 
    822 	return err;
    823 }
    824 
    825 static void radeon_hwmon_fini(struct radeon_device *rdev)
    826 {
    827 #ifndef __NetBSD__		/* XXX radeon hwmon */
    828 	if (rdev->pm.int_hwmon_dev)
    829 		hwmon_device_unregister(rdev->pm.int_hwmon_dev);
    830 #endif
    831 }
    832 
    833 static void radeon_dpm_thermal_work_handler(struct work_struct *work)
    834 {
    835 	struct radeon_device *rdev =
    836 		container_of(work, struct radeon_device,
    837 			     pm.dpm.thermal.work);
    838 	/* switch to the thermal state */
    839 	enum radeon_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL;
    840 
    841 	if (!rdev->pm.dpm_enabled)
    842 		return;
    843 
    844 	if (rdev->asic->pm.get_temperature) {
    845 		int temp = radeon_get_temperature(rdev);
    846 
    847 		if (temp < rdev->pm.dpm.thermal.min_temp)
    848 			/* switch back the user state */
    849 			dpm_state = rdev->pm.dpm.user_state;
    850 	} else {
    851 		if (rdev->pm.dpm.thermal.high_to_low)
    852 			/* switch back the user state */
    853 			dpm_state = rdev->pm.dpm.user_state;
    854 	}
    855 	mutex_lock(&rdev->pm.mutex);
    856 	if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL)
    857 		rdev->pm.dpm.thermal_active = true;
    858 	else
    859 		rdev->pm.dpm.thermal_active = false;
    860 	rdev->pm.dpm.state = dpm_state;
    861 	mutex_unlock(&rdev->pm.mutex);
    862 
    863 	radeon_pm_compute_clocks(rdev);
    864 }
    865 
    866 static bool radeon_dpm_single_display(struct radeon_device *rdev)
    867 {
    868 	bool single_display = (rdev->pm.dpm.new_active_crtc_count < 2) ?
    869 		true : false;
    870 
    871 	/* check if the vblank period is too short to adjust the mclk */
    872 	if (single_display && rdev->asic->dpm.vblank_too_short) {
    873 		if (radeon_dpm_vblank_too_short(rdev))
    874 			single_display = false;
    875 	}
    876 
    877 	/* 120hz tends to be problematic even if they are under the
    878 	 * vblank limit.
    879 	 */
    880 	if (single_display && (r600_dpm_get_vrefresh(rdev) >= 120))
    881 		single_display = false;
    882 
    883 	return single_display;
    884 }
    885 
    886 static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev,
    887 						     enum radeon_pm_state_type dpm_state)
    888 {
    889 	int i;
    890 	struct radeon_ps *ps;
    891 	u32 ui_class;
    892 	bool single_display = radeon_dpm_single_display(rdev);
    893 
    894 	/* certain older asics have a separare 3D performance state,
    895 	 * so try that first if the user selected performance
    896 	 */
    897 	if (dpm_state == POWER_STATE_TYPE_PERFORMANCE)
    898 		dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF;
    899 	/* balanced states don't exist at the moment */
    900 	if (dpm_state == POWER_STATE_TYPE_BALANCED)
    901 		dpm_state = POWER_STATE_TYPE_PERFORMANCE;
    902 
    903 restart_search:
    904 	/* Pick the best power state based on current conditions */
    905 	for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
    906 		ps = &rdev->pm.dpm.ps[i];
    907 		ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK;
    908 		switch (dpm_state) {
    909 		/* user states */
    910 		case POWER_STATE_TYPE_BATTERY:
    911 			if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) {
    912 				if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
    913 					if (single_display)
    914 						return ps;
    915 				} else
    916 					return ps;
    917 			}
    918 			break;
    919 		case POWER_STATE_TYPE_BALANCED:
    920 			if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) {
    921 				if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
    922 					if (single_display)
    923 						return ps;
    924 				} else
    925 					return ps;
    926 			}
    927 			break;
    928 		case POWER_STATE_TYPE_PERFORMANCE:
    929 			if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
    930 				if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
    931 					if (single_display)
    932 						return ps;
    933 				} else
    934 					return ps;
    935 			}
    936 			break;
    937 		/* internal states */
    938 		case POWER_STATE_TYPE_INTERNAL_UVD:
    939 			if (rdev->pm.dpm.uvd_ps)
    940 				return rdev->pm.dpm.uvd_ps;
    941 			else
    942 				break;
    943 		case POWER_STATE_TYPE_INTERNAL_UVD_SD:
    944 			if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
    945 				return ps;
    946 			break;
    947 		case POWER_STATE_TYPE_INTERNAL_UVD_HD:
    948 			if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
    949 				return ps;
    950 			break;
    951 		case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
    952 			if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
    953 				return ps;
    954 			break;
    955 		case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
    956 			if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
    957 				return ps;
    958 			break;
    959 		case POWER_STATE_TYPE_INTERNAL_BOOT:
    960 			return rdev->pm.dpm.boot_ps;
    961 		case POWER_STATE_TYPE_INTERNAL_THERMAL:
    962 			if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
    963 				return ps;
    964 			break;
    965 		case POWER_STATE_TYPE_INTERNAL_ACPI:
    966 			if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI)
    967 				return ps;
    968 			break;
    969 		case POWER_STATE_TYPE_INTERNAL_ULV:
    970 			if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
    971 				return ps;
    972 			break;
    973 		case POWER_STATE_TYPE_INTERNAL_3DPERF:
    974 			if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
    975 				return ps;
    976 			break;
    977 		default:
    978 			break;
    979 		}
    980 	}
    981 	/* use a fallback state if we didn't match */
    982 	switch (dpm_state) {
    983 	case POWER_STATE_TYPE_INTERNAL_UVD_SD:
    984 		dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
    985 		goto restart_search;
    986 	case POWER_STATE_TYPE_INTERNAL_UVD_HD:
    987 	case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
    988 	case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
    989 		if (rdev->pm.dpm.uvd_ps) {
    990 			return rdev->pm.dpm.uvd_ps;
    991 		} else {
    992 			dpm_state = POWER_STATE_TYPE_PERFORMANCE;
    993 			goto restart_search;
    994 		}
    995 	case POWER_STATE_TYPE_INTERNAL_THERMAL:
    996 		dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI;
    997 		goto restart_search;
    998 	case POWER_STATE_TYPE_INTERNAL_ACPI:
    999 		dpm_state = POWER_STATE_TYPE_BATTERY;
   1000 		goto restart_search;
   1001 	case POWER_STATE_TYPE_BATTERY:
   1002 	case POWER_STATE_TYPE_BALANCED:
   1003 	case POWER_STATE_TYPE_INTERNAL_3DPERF:
   1004 		dpm_state = POWER_STATE_TYPE_PERFORMANCE;
   1005 		goto restart_search;
   1006 	default:
   1007 		break;
   1008 	}
   1009 
   1010 	return NULL;
   1011 }
   1012 
   1013 static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev)
   1014 {
   1015 	int i;
   1016 	struct radeon_ps *ps;
   1017 	enum radeon_pm_state_type dpm_state;
   1018 	int ret;
   1019 	bool single_display = radeon_dpm_single_display(rdev);
   1020 
   1021 	/* if dpm init failed */
   1022 	if (!rdev->pm.dpm_enabled)
   1023 		return;
   1024 
   1025 	if (rdev->pm.dpm.user_state != rdev->pm.dpm.state) {
   1026 		/* add other state override checks here */
   1027 		if ((!rdev->pm.dpm.thermal_active) &&
   1028 		    (!rdev->pm.dpm.uvd_active))
   1029 			rdev->pm.dpm.state = rdev->pm.dpm.user_state;
   1030 	}
   1031 	dpm_state = rdev->pm.dpm.state;
   1032 
   1033 	ps = radeon_dpm_pick_power_state(rdev, dpm_state);
   1034 	if (ps)
   1035 		rdev->pm.dpm.requested_ps = ps;
   1036 	else
   1037 		return;
   1038 
   1039 	/* no need to reprogram if nothing changed unless we are on BTC+ */
   1040 	if (rdev->pm.dpm.current_ps == rdev->pm.dpm.requested_ps) {
   1041 		/* vce just modifies an existing state so force a change */
   1042 		if (ps->vce_active != rdev->pm.dpm.vce_active)
   1043 			goto force;
   1044 		/* user has made a display change (such as timing) */
   1045 		if (rdev->pm.dpm.single_display != single_display)
   1046 			goto force;
   1047 		if ((rdev->family < CHIP_BARTS) || (rdev->flags & RADEON_IS_IGP)) {
   1048 			/* for pre-BTC and APUs if the num crtcs changed but state is the same,
   1049 			 * all we need to do is update the display configuration.
   1050 			 */
   1051 			if (rdev->pm.dpm.new_active_crtcs != rdev->pm.dpm.current_active_crtcs) {
   1052 				/* update display watermarks based on new power state */
   1053 				radeon_bandwidth_update(rdev);
   1054 				/* update displays */
   1055 				radeon_dpm_display_configuration_changed(rdev);
   1056 				rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
   1057 				rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
   1058 			}
   1059 			return;
   1060 		} else {
   1061 			/* for BTC+ if the num crtcs hasn't changed and state is the same,
   1062 			 * nothing to do, if the num crtcs is > 1 and state is the same,
   1063 			 * update display configuration.
   1064 			 */
   1065 			if (rdev->pm.dpm.new_active_crtcs ==
   1066 			    rdev->pm.dpm.current_active_crtcs) {
   1067 				return;
   1068 			} else {
   1069 				if ((rdev->pm.dpm.current_active_crtc_count > 1) &&
   1070 				    (rdev->pm.dpm.new_active_crtc_count > 1)) {
   1071 					/* update display watermarks based on new power state */
   1072 					radeon_bandwidth_update(rdev);
   1073 					/* update displays */
   1074 					radeon_dpm_display_configuration_changed(rdev);
   1075 					rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
   1076 					rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
   1077 					return;
   1078 				}
   1079 			}
   1080 		}
   1081 	}
   1082 
   1083 force:
   1084 	if (radeon_dpm == 1) {
   1085 		printk("switching from power state:\n");
   1086 		radeon_dpm_print_power_state(rdev, rdev->pm.dpm.current_ps);
   1087 		printk("switching to power state:\n");
   1088 		radeon_dpm_print_power_state(rdev, rdev->pm.dpm.requested_ps);
   1089 	}
   1090 
   1091 	down_write(&rdev->pm.mclk_lock);
   1092 	mutex_lock(&rdev->ring_lock);
   1093 
   1094 	/* update whether vce is active */
   1095 	ps->vce_active = rdev->pm.dpm.vce_active;
   1096 
   1097 	ret = radeon_dpm_pre_set_power_state(rdev);
   1098 	if (ret)
   1099 		goto done;
   1100 
   1101 	/* update display watermarks based on new power state */
   1102 	radeon_bandwidth_update(rdev);
   1103 	/* update displays */
   1104 	radeon_dpm_display_configuration_changed(rdev);
   1105 
   1106 	/* wait for the rings to drain */
   1107 	for (i = 0; i < RADEON_NUM_RINGS; i++) {
   1108 		struct radeon_ring *ring = &rdev->ring[i];
   1109 		if (ring->ready)
   1110 			radeon_fence_wait_empty(rdev, i);
   1111 	}
   1112 
   1113 	/* program the new power state */
   1114 	radeon_dpm_set_power_state(rdev);
   1115 
   1116 	/* update current power state */
   1117 	rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps;
   1118 
   1119 	radeon_dpm_post_set_power_state(rdev);
   1120 
   1121 	rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
   1122 	rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
   1123 	rdev->pm.dpm.single_display = single_display;
   1124 
   1125 	if (rdev->asic->dpm.force_performance_level) {
   1126 		if (rdev->pm.dpm.thermal_active) {
   1127 			enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level;
   1128 			/* force low perf level for thermal */
   1129 			radeon_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_LOW);
   1130 			/* save the user's level */
   1131 			rdev->pm.dpm.forced_level = level;
   1132 		} else {
   1133 			/* otherwise, user selected level */
   1134 			radeon_dpm_force_performance_level(rdev, rdev->pm.dpm.forced_level);
   1135 		}
   1136 	}
   1137 
   1138 done:
   1139 	mutex_unlock(&rdev->ring_lock);
   1140 	up_write(&rdev->pm.mclk_lock);
   1141 }
   1142 
   1143 void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool enable)
   1144 {
   1145 	enum radeon_pm_state_type dpm_state;
   1146 
   1147 	if (rdev->asic->dpm.powergate_uvd) {
   1148 		mutex_lock(&rdev->pm.mutex);
   1149 		/* don't powergate anything if we
   1150 		   have active but pause streams */
   1151 		enable |= rdev->pm.dpm.sd > 0;
   1152 		enable |= rdev->pm.dpm.hd > 0;
   1153 		/* enable/disable UVD */
   1154 		radeon_dpm_powergate_uvd(rdev, !enable);
   1155 		mutex_unlock(&rdev->pm.mutex);
   1156 	} else {
   1157 		if (enable) {
   1158 			mutex_lock(&rdev->pm.mutex);
   1159 			rdev->pm.dpm.uvd_active = true;
   1160 			/* disable this for now */
   1161 #if 0
   1162 			if ((rdev->pm.dpm.sd == 1) && (rdev->pm.dpm.hd == 0))
   1163 				dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_SD;
   1164 			else if ((rdev->pm.dpm.sd == 2) && (rdev->pm.dpm.hd == 0))
   1165 				dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
   1166 			else if ((rdev->pm.dpm.sd == 0) && (rdev->pm.dpm.hd == 1))
   1167 				dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
   1168 			else if ((rdev->pm.dpm.sd == 0) && (rdev->pm.dpm.hd == 2))
   1169 				dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD2;
   1170 			else
   1171 #endif
   1172 				dpm_state = POWER_STATE_TYPE_INTERNAL_UVD;
   1173 			rdev->pm.dpm.state = dpm_state;
   1174 			mutex_unlock(&rdev->pm.mutex);
   1175 		} else {
   1176 			mutex_lock(&rdev->pm.mutex);
   1177 			rdev->pm.dpm.uvd_active = false;
   1178 			mutex_unlock(&rdev->pm.mutex);
   1179 		}
   1180 
   1181 		radeon_pm_compute_clocks(rdev);
   1182 	}
   1183 }
   1184 
   1185 void radeon_dpm_enable_vce(struct radeon_device *rdev, bool enable)
   1186 {
   1187 	if (enable) {
   1188 		mutex_lock(&rdev->pm.mutex);
   1189 		rdev->pm.dpm.vce_active = true;
   1190 		/* XXX select vce level based on ring/task */
   1191 		rdev->pm.dpm.vce_level = RADEON_VCE_LEVEL_AC_ALL;
   1192 		mutex_unlock(&rdev->pm.mutex);
   1193 	} else {
   1194 		mutex_lock(&rdev->pm.mutex);
   1195 		rdev->pm.dpm.vce_active = false;
   1196 		mutex_unlock(&rdev->pm.mutex);
   1197 	}
   1198 
   1199 	radeon_pm_compute_clocks(rdev);
   1200 }
   1201 
   1202 static void radeon_pm_suspend_old(struct radeon_device *rdev)
   1203 {
   1204 	mutex_lock(&rdev->pm.mutex);
   1205 	if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
   1206 		if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE)
   1207 			rdev->pm.dynpm_state = DYNPM_STATE_SUSPENDED;
   1208 	}
   1209 	mutex_unlock(&rdev->pm.mutex);
   1210 
   1211 	cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
   1212 }
   1213 
   1214 static void radeon_pm_suspend_dpm(struct radeon_device *rdev)
   1215 {
   1216 	mutex_lock(&rdev->pm.mutex);
   1217 	/* disable dpm */
   1218 	radeon_dpm_disable(rdev);
   1219 	/* reset the power state */
   1220 	rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps = rdev->pm.dpm.boot_ps;
   1221 	rdev->pm.dpm_enabled = false;
   1222 	mutex_unlock(&rdev->pm.mutex);
   1223 }
   1224 
   1225 void radeon_pm_suspend(struct radeon_device *rdev)
   1226 {
   1227 	if (rdev->pm.pm_method == PM_METHOD_DPM)
   1228 		radeon_pm_suspend_dpm(rdev);
   1229 	else
   1230 		radeon_pm_suspend_old(rdev);
   1231 }
   1232 
   1233 static void radeon_pm_resume_old(struct radeon_device *rdev)
   1234 {
   1235 	/* set up the default clocks if the MC ucode is loaded */
   1236 	if ((rdev->family >= CHIP_BARTS) &&
   1237 	    (rdev->family <= CHIP_CAYMAN) &&
   1238 	    rdev->mc_fw) {
   1239 		if (rdev->pm.default_vddc)
   1240 			radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
   1241 						SET_VOLTAGE_TYPE_ASIC_VDDC);
   1242 		if (rdev->pm.default_vddci)
   1243 			radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
   1244 						SET_VOLTAGE_TYPE_ASIC_VDDCI);
   1245 		if (rdev->pm.default_sclk)
   1246 			radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
   1247 		if (rdev->pm.default_mclk)
   1248 			radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
   1249 	}
   1250 	/* asic init will reset the default power state */
   1251 	mutex_lock(&rdev->pm.mutex);
   1252 	rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
   1253 	rdev->pm.current_clock_mode_index = 0;
   1254 	rdev->pm.current_sclk = rdev->pm.default_sclk;
   1255 	rdev->pm.current_mclk = rdev->pm.default_mclk;
   1256 	if (rdev->pm.power_state) {
   1257 		rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
   1258 		rdev->pm.current_vddci = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.vddci;
   1259 	}
   1260 	if (rdev->pm.pm_method == PM_METHOD_DYNPM
   1261 	    && rdev->pm.dynpm_state == DYNPM_STATE_SUSPENDED) {
   1262 		rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
   1263 		schedule_delayed_work(&rdev->pm.dynpm_idle_work,
   1264 				      msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
   1265 	}
   1266 	mutex_unlock(&rdev->pm.mutex);
   1267 	radeon_pm_compute_clocks(rdev);
   1268 }
   1269 
   1270 static void radeon_pm_resume_dpm(struct radeon_device *rdev)
   1271 {
   1272 	int ret;
   1273 
   1274 	/* asic init will reset to the boot state */
   1275 	mutex_lock(&rdev->pm.mutex);
   1276 	rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps = rdev->pm.dpm.boot_ps;
   1277 	radeon_dpm_setup_asic(rdev);
   1278 	ret = radeon_dpm_enable(rdev);
   1279 	mutex_unlock(&rdev->pm.mutex);
   1280 	if (ret)
   1281 		goto dpm_resume_fail;
   1282 	rdev->pm.dpm_enabled = true;
   1283 	return;
   1284 
   1285 dpm_resume_fail:
   1286 	DRM_ERROR("radeon: dpm resume failed\n");
   1287 	if ((rdev->family >= CHIP_BARTS) &&
   1288 	    (rdev->family <= CHIP_CAYMAN) &&
   1289 	    rdev->mc_fw) {
   1290 		if (rdev->pm.default_vddc)
   1291 			radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
   1292 						SET_VOLTAGE_TYPE_ASIC_VDDC);
   1293 		if (rdev->pm.default_vddci)
   1294 			radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
   1295 						SET_VOLTAGE_TYPE_ASIC_VDDCI);
   1296 		if (rdev->pm.default_sclk)
   1297 			radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
   1298 		if (rdev->pm.default_mclk)
   1299 			radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
   1300 	}
   1301 }
   1302 
   1303 void radeon_pm_resume(struct radeon_device *rdev)
   1304 {
   1305 	if (rdev->pm.pm_method == PM_METHOD_DPM)
   1306 		radeon_pm_resume_dpm(rdev);
   1307 	else
   1308 		radeon_pm_resume_old(rdev);
   1309 }
   1310 
   1311 static int radeon_pm_init_old(struct radeon_device *rdev)
   1312 {
   1313 	int ret;
   1314 
   1315 	rdev->pm.profile = PM_PROFILE_DEFAULT;
   1316 	rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
   1317 	rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
   1318 	rdev->pm.dynpm_can_upclock = true;
   1319 	rdev->pm.dynpm_can_downclock = true;
   1320 	rdev->pm.default_sclk = rdev->clock.default_sclk;
   1321 	rdev->pm.default_mclk = rdev->clock.default_mclk;
   1322 	rdev->pm.current_sclk = rdev->clock.default_sclk;
   1323 	rdev->pm.current_mclk = rdev->clock.default_mclk;
   1324 	rdev->pm.int_thermal_type = THERMAL_TYPE_NONE;
   1325 
   1326 	if (rdev->bios) {
   1327 		if (rdev->is_atom_bios)
   1328 			radeon_atombios_get_power_modes(rdev);
   1329 		else
   1330 			radeon_combios_get_power_modes(rdev);
   1331 		radeon_pm_print_states(rdev);
   1332 		radeon_pm_init_profile(rdev);
   1333 		/* set up the default clocks if the MC ucode is loaded */
   1334 		if ((rdev->family >= CHIP_BARTS) &&
   1335 		    (rdev->family <= CHIP_CAYMAN) &&
   1336 		    rdev->mc_fw) {
   1337 			if (rdev->pm.default_vddc)
   1338 				radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
   1339 							SET_VOLTAGE_TYPE_ASIC_VDDC);
   1340 			if (rdev->pm.default_vddci)
   1341 				radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
   1342 							SET_VOLTAGE_TYPE_ASIC_VDDCI);
   1343 			if (rdev->pm.default_sclk)
   1344 				radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
   1345 			if (rdev->pm.default_mclk)
   1346 				radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
   1347 		}
   1348 	}
   1349 
   1350 	/* set up the internal thermal sensor if applicable */
   1351 	ret = radeon_hwmon_init(rdev);
   1352 	if (ret)
   1353 		return ret;
   1354 
   1355 	INIT_DELAYED_WORK(&rdev->pm.dynpm_idle_work, radeon_dynpm_idle_work_handler);
   1356 
   1357 #ifndef __NetBSD__		/* XXX radeon power */
   1358 	if (rdev->pm.num_power_states > 1) {
   1359 		if (radeon_debugfs_pm_init(rdev)) {
   1360 			DRM_ERROR("Failed to register debugfs file for PM!\n");
   1361 		}
   1362 
   1363 		DRM_INFO("radeon: power management initialized\n");
   1364 	}
   1365 #endif
   1366 
   1367 	return 0;
   1368 }
   1369 
   1370 static void radeon_dpm_print_power_states(struct radeon_device *rdev)
   1371 {
   1372 	int i;
   1373 
   1374 	for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
   1375 		printk("== power state %d ==\n", i);
   1376 		radeon_dpm_print_power_state(rdev, &rdev->pm.dpm.ps[i]);
   1377 	}
   1378 }
   1379 
   1380 static int radeon_pm_init_dpm(struct radeon_device *rdev)
   1381 {
   1382 	int ret;
   1383 
   1384 	/* default to balanced state */
   1385 	rdev->pm.dpm.state = POWER_STATE_TYPE_BALANCED;
   1386 	rdev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
   1387 	rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO;
   1388 	rdev->pm.default_sclk = rdev->clock.default_sclk;
   1389 	rdev->pm.default_mclk = rdev->clock.default_mclk;
   1390 	rdev->pm.current_sclk = rdev->clock.default_sclk;
   1391 	rdev->pm.current_mclk = rdev->clock.default_mclk;
   1392 	rdev->pm.int_thermal_type = THERMAL_TYPE_NONE;
   1393 
   1394 	if (rdev->bios && rdev->is_atom_bios)
   1395 		radeon_atombios_get_power_modes(rdev);
   1396 	else
   1397 		return -EINVAL;
   1398 
   1399 	/* set up the internal thermal sensor if applicable */
   1400 	ret = radeon_hwmon_init(rdev);
   1401 	if (ret)
   1402 		return ret;
   1403 
   1404 	INIT_WORK(&rdev->pm.dpm.thermal.work, radeon_dpm_thermal_work_handler);
   1405 	mutex_lock(&rdev->pm.mutex);
   1406 	radeon_dpm_init(rdev);
   1407 	rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps = rdev->pm.dpm.boot_ps;
   1408 	if (radeon_dpm == 1)
   1409 		radeon_dpm_print_power_states(rdev);
   1410 	radeon_dpm_setup_asic(rdev);
   1411 	ret = radeon_dpm_enable(rdev);
   1412 	mutex_unlock(&rdev->pm.mutex);
   1413 	if (ret)
   1414 		goto dpm_failed;
   1415 	rdev->pm.dpm_enabled = true;
   1416 
   1417 	if (radeon_debugfs_pm_init(rdev)) {
   1418 		DRM_ERROR("Failed to register debugfs file for dpm!\n");
   1419 	}
   1420 
   1421 	DRM_INFO("radeon: dpm initialized\n");
   1422 
   1423 	return 0;
   1424 
   1425 dpm_failed:
   1426 	rdev->pm.dpm_enabled = false;
   1427 	if ((rdev->family >= CHIP_BARTS) &&
   1428 	    (rdev->family <= CHIP_CAYMAN) &&
   1429 	    rdev->mc_fw) {
   1430 		if (rdev->pm.default_vddc)
   1431 			radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
   1432 						SET_VOLTAGE_TYPE_ASIC_VDDC);
   1433 		if (rdev->pm.default_vddci)
   1434 			radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
   1435 						SET_VOLTAGE_TYPE_ASIC_VDDCI);
   1436 		if (rdev->pm.default_sclk)
   1437 			radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
   1438 		if (rdev->pm.default_mclk)
   1439 			radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
   1440 	}
   1441 	DRM_ERROR("radeon: dpm initialization failed\n");
   1442 	return ret;
   1443 }
   1444 
   1445 struct radeon_dpm_quirk {
   1446 	u32 chip_vendor;
   1447 	u32 chip_device;
   1448 	u32 subsys_vendor;
   1449 	u32 subsys_device;
   1450 };
   1451 
   1452 /* cards with dpm stability problems */
   1453 static struct radeon_dpm_quirk radeon_dpm_quirk_list[] = {
   1454 	/* TURKS - https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1386534 */
   1455 	{ PCI_VENDOR_ID_ATI, 0x6759, 0x1682, 0x3195 },
   1456 	/* TURKS - https://bugzilla.kernel.org/show_bug.cgi?id=83731 */
   1457 	{ PCI_VENDOR_ID_ATI, 0x6840, 0x1179, 0xfb81 },
   1458 	{ 0, 0, 0, 0 },
   1459 };
   1460 
   1461 int radeon_pm_init(struct radeon_device *rdev)
   1462 {
   1463 	struct radeon_dpm_quirk *p = radeon_dpm_quirk_list;
   1464 	bool disable_dpm = false;
   1465 
   1466 	/* Apply dpm quirks */
   1467 	while (p && p->chip_device != 0) {
   1468 		if (rdev->pdev->vendor == p->chip_vendor &&
   1469 		    rdev->pdev->device == p->chip_device &&
   1470 		    rdev->pdev->subsystem_vendor == p->subsys_vendor &&
   1471 		    rdev->pdev->subsystem_device == p->subsys_device) {
   1472 			disable_dpm = true;
   1473 			break;
   1474 		}
   1475 		++p;
   1476 	}
   1477 
   1478 	/* enable dpm on rv6xx+ */
   1479 	switch (rdev->family) {
   1480 	case CHIP_RV610:
   1481 	case CHIP_RV630:
   1482 	case CHIP_RV620:
   1483 	case CHIP_RV635:
   1484 	case CHIP_RV670:
   1485 	case CHIP_RS780:
   1486 	case CHIP_RS880:
   1487 	case CHIP_RV770:
   1488 		/* DPM requires the RLC, RV770+ dGPU requires SMC */
   1489 		if (!rdev->rlc_fw)
   1490 			rdev->pm.pm_method = PM_METHOD_PROFILE;
   1491 		else if ((rdev->family >= CHIP_RV770) &&
   1492 			 (!(rdev->flags & RADEON_IS_IGP)) &&
   1493 			 (!rdev->smc_fw))
   1494 			rdev->pm.pm_method = PM_METHOD_PROFILE;
   1495 		else if (radeon_dpm == 1)
   1496 			rdev->pm.pm_method = PM_METHOD_DPM;
   1497 		else
   1498 			rdev->pm.pm_method = PM_METHOD_PROFILE;
   1499 		break;
   1500 	case CHIP_RV730:
   1501 	case CHIP_RV710:
   1502 	case CHIP_RV740:
   1503 	case CHIP_CEDAR:
   1504 	case CHIP_REDWOOD:
   1505 	case CHIP_JUNIPER:
   1506 	case CHIP_CYPRESS:
   1507 	case CHIP_HEMLOCK:
   1508 	case CHIP_PALM:
   1509 	case CHIP_SUMO:
   1510 	case CHIP_SUMO2:
   1511 	case CHIP_BARTS:
   1512 	case CHIP_TURKS:
   1513 	case CHIP_CAICOS:
   1514 	case CHIP_CAYMAN:
   1515 	case CHIP_ARUBA:
   1516 	case CHIP_TAHITI:
   1517 	case CHIP_PITCAIRN:
   1518 	case CHIP_VERDE:
   1519 	case CHIP_OLAND:
   1520 	case CHIP_HAINAN:
   1521 	case CHIP_BONAIRE:
   1522 	case CHIP_KABINI:
   1523 	case CHIP_KAVERI:
   1524 	case CHIP_HAWAII:
   1525 	case CHIP_MULLINS:
   1526 		/* DPM requires the RLC, RV770+ dGPU requires SMC */
   1527 		if (!rdev->rlc_fw)
   1528 			rdev->pm.pm_method = PM_METHOD_PROFILE;
   1529 		else if ((rdev->family >= CHIP_RV770) &&
   1530 			 (!(rdev->flags & RADEON_IS_IGP)) &&
   1531 			 (!rdev->smc_fw))
   1532 			rdev->pm.pm_method = PM_METHOD_PROFILE;
   1533 		else if (disable_dpm && (radeon_dpm == -1))
   1534 			rdev->pm.pm_method = PM_METHOD_PROFILE;
   1535 		else if (radeon_dpm == 0)
   1536 			rdev->pm.pm_method = PM_METHOD_PROFILE;
   1537 		else
   1538 			rdev->pm.pm_method = PM_METHOD_DPM;
   1539 		break;
   1540 	default:
   1541 		/* default to profile method */
   1542 		rdev->pm.pm_method = PM_METHOD_PROFILE;
   1543 		break;
   1544 	}
   1545 
   1546 	if (rdev->pm.pm_method == PM_METHOD_DPM)
   1547 		return radeon_pm_init_dpm(rdev);
   1548 	else
   1549 		return radeon_pm_init_old(rdev);
   1550 }
   1551 
   1552 int radeon_pm_late_init(struct radeon_device *rdev)
   1553 {
   1554 	int ret = 0;
   1555 
   1556 	if (rdev->pm.pm_method == PM_METHOD_DPM) {
   1557 		if (rdev->pm.dpm_enabled) {
   1558 #ifndef __NetBSD__		/* XXX radeon sysfs */
   1559 			if (!rdev->pm.sysfs_initialized) {
   1560 				ret = device_create_file(rdev->dev, &dev_attr_power_dpm_state);
   1561 				if (ret)
   1562 					DRM_ERROR("failed to create device file for dpm state\n");
   1563 				ret = device_create_file(rdev->dev, &dev_attr_power_dpm_force_performance_level);
   1564 				if (ret)
   1565 					DRM_ERROR("failed to create device file for dpm state\n");
   1566 				/* XXX: these are noops for dpm but are here for backwards compat */
   1567 				ret = device_create_file(rdev->dev, &dev_attr_power_profile);
   1568 				if (ret)
   1569 					DRM_ERROR("failed to create device file for power profile\n");
   1570 				ret = device_create_file(rdev->dev, &dev_attr_power_method);
   1571 				if (ret)
   1572 					DRM_ERROR("failed to create device file for power method\n");
   1573 				rdev->pm.sysfs_initialized = true;
   1574 			}
   1575 #endif
   1576 
   1577 			mutex_lock(&rdev->pm.mutex);
   1578 			ret = radeon_dpm_late_enable(rdev);
   1579 			mutex_unlock(&rdev->pm.mutex);
   1580 			if (ret) {
   1581 				rdev->pm.dpm_enabled = false;
   1582 				DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
   1583 			} else {
   1584 				/* set the dpm state for PX since there won't be
   1585 				 * a modeset to call this.
   1586 				 */
   1587 				radeon_pm_compute_clocks(rdev);
   1588 			}
   1589 		}
   1590 	} else {
   1591 		if ((rdev->pm.num_power_states > 1) &&
   1592 		    (!rdev->pm.sysfs_initialized)) {
   1593 #ifndef __NetBSD__	     /* XXX radeon sysfs */
   1594 			/* where's the best place to put these? */
   1595 			ret = device_create_file(rdev->dev, &dev_attr_power_profile);
   1596 			if (ret)
   1597 				DRM_ERROR("failed to create device file for power profile\n");
   1598 			ret = device_create_file(rdev->dev, &dev_attr_power_method);
   1599 			if (ret)
   1600 				DRM_ERROR("failed to create device file for power method\n");
   1601 			if (!ret)
   1602 				rdev->pm.sysfs_initialized = true;
   1603 #endif
   1604 		}
   1605 	}
   1606 	return ret;
   1607 }
   1608 
   1609 static void radeon_pm_fini_old(struct radeon_device *rdev)
   1610 {
   1611 	if (rdev->pm.num_power_states > 1) {
   1612 		mutex_lock(&rdev->pm.mutex);
   1613 		if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
   1614 			rdev->pm.profile = PM_PROFILE_DEFAULT;
   1615 			radeon_pm_update_profile(rdev);
   1616 			radeon_pm_set_clocks(rdev);
   1617 		} else if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
   1618 			/* reset default clocks */
   1619 			rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
   1620 			rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
   1621 			radeon_pm_set_clocks(rdev);
   1622 		}
   1623 		mutex_unlock(&rdev->pm.mutex);
   1624 
   1625 		cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
   1626 
   1627 #ifndef __NetBSD__		/* XXX radeon power */
   1628 		device_remove_file(rdev->dev, &dev_attr_power_profile);
   1629 		device_remove_file(rdev->dev, &dev_attr_power_method);
   1630 #endif
   1631 	}
   1632 
   1633 	radeon_hwmon_fini(rdev);
   1634 	kfree(rdev->pm.power_state);
   1635 }
   1636 
   1637 static void radeon_pm_fini_dpm(struct radeon_device *rdev)
   1638 {
   1639 	if (rdev->pm.num_power_states > 1) {
   1640 		mutex_lock(&rdev->pm.mutex);
   1641 		radeon_dpm_disable(rdev);
   1642 		mutex_unlock(&rdev->pm.mutex);
   1643 
   1644 #ifndef __NetBSD__		/* XXX radeon power */
   1645 		device_remove_file(rdev->dev, &dev_attr_power_dpm_state);
   1646 		device_remove_file(rdev->dev, &dev_attr_power_dpm_force_performance_level);
   1647 		/* XXX backwards compat */
   1648 		device_remove_file(rdev->dev, &dev_attr_power_profile);
   1649 		device_remove_file(rdev->dev, &dev_attr_power_method);
   1650 #endif
   1651 	}
   1652 	radeon_dpm_fini(rdev);
   1653 
   1654 	radeon_hwmon_fini(rdev);
   1655 	kfree(rdev->pm.power_state);
   1656 }
   1657 
   1658 void radeon_pm_fini(struct radeon_device *rdev)
   1659 {
   1660 	if (rdev->pm.pm_method == PM_METHOD_DPM)
   1661 		radeon_pm_fini_dpm(rdev);
   1662 	else
   1663 		radeon_pm_fini_old(rdev);
   1664 }
   1665 
   1666 static void radeon_pm_compute_clocks_old(struct radeon_device *rdev)
   1667 {
   1668 	struct drm_device *ddev = rdev->ddev;
   1669 	struct drm_crtc *crtc;
   1670 	struct radeon_crtc *radeon_crtc;
   1671 
   1672 	if (rdev->pm.num_power_states < 2)
   1673 		return;
   1674 
   1675 	mutex_lock(&rdev->pm.mutex);
   1676 
   1677 	rdev->pm.active_crtcs = 0;
   1678 	rdev->pm.active_crtc_count = 0;
   1679 	if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
   1680 		list_for_each_entry(crtc,
   1681 				    &ddev->mode_config.crtc_list, head) {
   1682 			radeon_crtc = to_radeon_crtc(crtc);
   1683 			if (radeon_crtc->enabled) {
   1684 				rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id);
   1685 				rdev->pm.active_crtc_count++;
   1686 			}
   1687 		}
   1688 	}
   1689 
   1690 	if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
   1691 		radeon_pm_update_profile(rdev);
   1692 		radeon_pm_set_clocks(rdev);
   1693 	} else if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
   1694 		if (rdev->pm.dynpm_state != DYNPM_STATE_DISABLED) {
   1695 			if (rdev->pm.active_crtc_count > 1) {
   1696 				if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) {
   1697 					cancel_delayed_work(&rdev->pm.dynpm_idle_work);
   1698 
   1699 					rdev->pm.dynpm_state = DYNPM_STATE_PAUSED;
   1700 					rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
   1701 					radeon_pm_get_dynpm_state(rdev);
   1702 					radeon_pm_set_clocks(rdev);
   1703 
   1704 					DRM_DEBUG_DRIVER("radeon: dynamic power management deactivated\n");
   1705 				}
   1706 			} else if (rdev->pm.active_crtc_count == 1) {
   1707 				/* TODO: Increase clocks if needed for current mode */
   1708 
   1709 				if (rdev->pm.dynpm_state == DYNPM_STATE_MINIMUM) {
   1710 					rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
   1711 					rdev->pm.dynpm_planned_action = DYNPM_ACTION_UPCLOCK;
   1712 					radeon_pm_get_dynpm_state(rdev);
   1713 					radeon_pm_set_clocks(rdev);
   1714 
   1715 					schedule_delayed_work(&rdev->pm.dynpm_idle_work,
   1716 							      msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
   1717 				} else if (rdev->pm.dynpm_state == DYNPM_STATE_PAUSED) {
   1718 					rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
   1719 					schedule_delayed_work(&rdev->pm.dynpm_idle_work,
   1720 							      msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
   1721 					DRM_DEBUG_DRIVER("radeon: dynamic power management activated\n");
   1722 				}
   1723 			} else { /* count == 0 */
   1724 				if (rdev->pm.dynpm_state != DYNPM_STATE_MINIMUM) {
   1725 					cancel_delayed_work(&rdev->pm.dynpm_idle_work);
   1726 
   1727 					rdev->pm.dynpm_state = DYNPM_STATE_MINIMUM;
   1728 					rdev->pm.dynpm_planned_action = DYNPM_ACTION_MINIMUM;
   1729 					radeon_pm_get_dynpm_state(rdev);
   1730 					radeon_pm_set_clocks(rdev);
   1731 				}
   1732 			}
   1733 		}
   1734 	}
   1735 
   1736 	mutex_unlock(&rdev->pm.mutex);
   1737 }
   1738 
   1739 static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev)
   1740 {
   1741 	struct drm_device *ddev = rdev->ddev;
   1742 	struct drm_crtc *crtc;
   1743 	struct radeon_crtc *radeon_crtc;
   1744 
   1745 	if (!rdev->pm.dpm_enabled)
   1746 		return;
   1747 
   1748 	mutex_lock(&rdev->pm.mutex);
   1749 
   1750 	/* update active crtc counts */
   1751 	rdev->pm.dpm.new_active_crtcs = 0;
   1752 	rdev->pm.dpm.new_active_crtc_count = 0;
   1753 	if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
   1754 		list_for_each_entry(crtc,
   1755 				    &ddev->mode_config.crtc_list, head) {
   1756 			radeon_crtc = to_radeon_crtc(crtc);
   1757 			if (crtc->enabled) {
   1758 				rdev->pm.dpm.new_active_crtcs |= (1 << radeon_crtc->crtc_id);
   1759 				rdev->pm.dpm.new_active_crtc_count++;
   1760 			}
   1761 		}
   1762 	}
   1763 
   1764 	/* update battery/ac status */
   1765 	if (power_supply_is_system_supplied() > 0)
   1766 		rdev->pm.dpm.ac_power = true;
   1767 	else
   1768 		rdev->pm.dpm.ac_power = false;
   1769 
   1770 	radeon_dpm_change_power_state_locked(rdev);
   1771 
   1772 	mutex_unlock(&rdev->pm.mutex);
   1773 
   1774 }
   1775 
   1776 void radeon_pm_compute_clocks(struct radeon_device *rdev)
   1777 {
   1778 	if (rdev->pm.pm_method == PM_METHOD_DPM)
   1779 		radeon_pm_compute_clocks_dpm(rdev);
   1780 	else
   1781 		radeon_pm_compute_clocks_old(rdev);
   1782 }
   1783 
   1784 static bool radeon_pm_in_vbl(struct radeon_device *rdev)
   1785 {
   1786 	int  crtc, vpos, hpos, vbl_status;
   1787 	bool in_vbl = true;
   1788 
   1789 	/* Iterate over all active crtc's. All crtc's must be in vblank,
   1790 	 * otherwise return in_vbl == false.
   1791 	 */
   1792 	for (crtc = 0; (crtc < rdev->num_crtc) && in_vbl; crtc++) {
   1793 		if (rdev->pm.active_crtcs & (1 << crtc)) {
   1794 			vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev,
   1795 								crtc,
   1796 								USE_REAL_VBLANKSTART,
   1797 								&vpos, &hpos, NULL, NULL,
   1798 								&rdev->mode_info.crtcs[crtc]->base.hwmode);
   1799 			if ((vbl_status & DRM_SCANOUTPOS_VALID) &&
   1800 			    !(vbl_status & DRM_SCANOUTPOS_IN_VBLANK))
   1801 				in_vbl = false;
   1802 		}
   1803 	}
   1804 
   1805 	return in_vbl;
   1806 }
   1807 
   1808 static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish)
   1809 {
   1810 	u32 stat_crtc = 0;
   1811 	bool in_vbl = radeon_pm_in_vbl(rdev);
   1812 
   1813 	if (in_vbl == false)
   1814 		DRM_DEBUG_DRIVER("not in vbl for pm change %08x at %s\n", stat_crtc,
   1815 			 finish ? "exit" : "entry");
   1816 	return in_vbl;
   1817 }
   1818 
   1819 static void radeon_dynpm_idle_work_handler(struct work_struct *work)
   1820 {
   1821 	struct radeon_device *rdev;
   1822 	int resched;
   1823 	rdev = container_of(work, struct radeon_device,
   1824 				pm.dynpm_idle_work.work);
   1825 
   1826 	resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
   1827 	mutex_lock(&rdev->pm.mutex);
   1828 	if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) {
   1829 		int not_processed = 0;
   1830 		int i;
   1831 
   1832 		for (i = 0; i < RADEON_NUM_RINGS; ++i) {
   1833 			struct radeon_ring *ring = &rdev->ring[i];
   1834 
   1835 			if (ring->ready) {
   1836 				not_processed += radeon_fence_count_emitted(rdev, i);
   1837 				if (not_processed >= 3)
   1838 					break;
   1839 			}
   1840 		}
   1841 
   1842 		if (not_processed >= 3) { /* should upclock */
   1843 			if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_DOWNCLOCK) {
   1844 				rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
   1845 			} else if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_NONE &&
   1846 				   rdev->pm.dynpm_can_upclock) {
   1847 				rdev->pm.dynpm_planned_action =
   1848 					DYNPM_ACTION_UPCLOCK;
   1849 				rdev->pm.dynpm_action_timeout = jiffies +
   1850 				msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS);
   1851 			}
   1852 		} else if (not_processed == 0) { /* should downclock */
   1853 			if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_UPCLOCK) {
   1854 				rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
   1855 			} else if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_NONE &&
   1856 				   rdev->pm.dynpm_can_downclock) {
   1857 				rdev->pm.dynpm_planned_action =
   1858 					DYNPM_ACTION_DOWNCLOCK;
   1859 				rdev->pm.dynpm_action_timeout = jiffies +
   1860 				msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS);
   1861 			}
   1862 		}
   1863 
   1864 		/* Note, radeon_pm_set_clocks is called with static_switch set
   1865 		 * to false since we want to wait for vbl to avoid flicker.
   1866 		 */
   1867 		if (rdev->pm.dynpm_planned_action != DYNPM_ACTION_NONE &&
   1868 		    jiffies > rdev->pm.dynpm_action_timeout) {
   1869 			radeon_pm_get_dynpm_state(rdev);
   1870 			radeon_pm_set_clocks(rdev);
   1871 		}
   1872 
   1873 		schedule_delayed_work(&rdev->pm.dynpm_idle_work,
   1874 				      msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
   1875 	}
   1876 	mutex_unlock(&rdev->pm.mutex);
   1877 	ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
   1878 }
   1879 
   1880 /*
   1881  * Debugfs info
   1882  */
   1883 #if defined(CONFIG_DEBUG_FS)
   1884 
   1885 static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
   1886 {
   1887 	struct drm_info_node *node = (struct drm_info_node *) m->private;
   1888 	struct drm_device *dev = node->minor->dev;
   1889 	struct radeon_device *rdev = dev->dev_private;
   1890 	struct drm_device *ddev = rdev->ddev;
   1891 
   1892 	if  ((rdev->flags & RADEON_IS_PX) &&
   1893 	     (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
   1894 		seq_printf(m, "PX asic powered off\n");
   1895 	} else if (rdev->pm.dpm_enabled) {
   1896 		mutex_lock(&rdev->pm.mutex);
   1897 		if (rdev->asic->dpm.debugfs_print_current_performance_level)
   1898 			radeon_dpm_debugfs_print_current_performance_level(rdev, m);
   1899 		else
   1900 			seq_printf(m, "Debugfs support not implemented for this asic\n");
   1901 		mutex_unlock(&rdev->pm.mutex);
   1902 	} else {
   1903 		seq_printf(m, "default engine clock: %u0 kHz\n", rdev->pm.default_sclk);
   1904 		/* radeon_get_engine_clock is not reliable on APUs so just print the current clock */
   1905 		if ((rdev->family >= CHIP_PALM) && (rdev->flags & RADEON_IS_IGP))
   1906 			seq_printf(m, "current engine clock: %u0 kHz\n", rdev->pm.current_sclk);
   1907 		else
   1908 			seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
   1909 		seq_printf(m, "default memory clock: %u0 kHz\n", rdev->pm.default_mclk);
   1910 		if (rdev->asic->pm.get_memory_clock)
   1911 			seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev));
   1912 		if (rdev->pm.current_vddc)
   1913 			seq_printf(m, "voltage: %u mV\n", rdev->pm.current_vddc);
   1914 		if (rdev->asic->pm.get_pcie_lanes)
   1915 			seq_printf(m, "PCIE lanes: %d\n", radeon_get_pcie_lanes(rdev));
   1916 	}
   1917 
   1918 	return 0;
   1919 }
   1920 
   1921 static struct drm_info_list radeon_pm_info_list[] = {
   1922 	{"radeon_pm_info", radeon_debugfs_pm_info, 0, NULL},
   1923 };
   1924 #endif
   1925 
   1926 static int radeon_debugfs_pm_init(struct radeon_device *rdev)
   1927 {
   1928 #if defined(CONFIG_DEBUG_FS)
   1929 	return radeon_debugfs_add_files(rdev, radeon_pm_info_list, ARRAY_SIZE(radeon_pm_info_list));
   1930 #else
   1931 	return 0;
   1932 #endif
   1933 }
   1934