amdgpu_pm.c revision 1.1.1.1 1 /* $NetBSD: amdgpu_pm.c,v 1.1.1.1 2018/08/27 01:34:44 riastradh Exp $ */
2
3 /*
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Rafa Miecki <zajec5 (at) gmail.com>
23 * Alex Deucher <alexdeucher (at) gmail.com>
24 */
25 #include <sys/cdefs.h>
26 __KERNEL_RCSID(0, "$NetBSD: amdgpu_pm.c,v 1.1.1.1 2018/08/27 01:34:44 riastradh Exp $");
27
28 #include <drm/drmP.h>
29 #include "amdgpu.h"
30 #include "amdgpu_drv.h"
31 #include "amdgpu_pm.h"
32 #include "amdgpu_dpm.h"
33 #include "atom.h"
34 #include <linux/power_supply.h>
35 #include <linux/hwmon.h>
36 #include <linux/hwmon-sysfs.h>
37
38 static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev);
39
40 void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
41 {
42 if (adev->pm.dpm_enabled) {
43 mutex_lock(&adev->pm.mutex);
44 if (power_supply_is_system_supplied() > 0)
45 adev->pm.dpm.ac_power = true;
46 else
47 adev->pm.dpm.ac_power = false;
48 if (adev->pm.funcs->enable_bapm)
49 amdgpu_dpm_enable_bapm(adev, adev->pm.dpm.ac_power);
50 mutex_unlock(&adev->pm.mutex);
51 }
52 }
53
54 static ssize_t amdgpu_get_dpm_state(struct device *dev,
55 struct device_attribute *attr,
56 char *buf)
57 {
58 struct drm_device *ddev = dev_get_drvdata(dev);
59 struct amdgpu_device *adev = ddev->dev_private;
60 enum amdgpu_pm_state_type pm = adev->pm.dpm.user_state;
61
62 return snprintf(buf, PAGE_SIZE, "%s\n",
63 (pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
64 (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
65 }
66
67 static ssize_t amdgpu_set_dpm_state(struct device *dev,
68 struct device_attribute *attr,
69 const char *buf,
70 size_t count)
71 {
72 struct drm_device *ddev = dev_get_drvdata(dev);
73 struct amdgpu_device *adev = ddev->dev_private;
74
75 mutex_lock(&adev->pm.mutex);
76 if (strncmp("battery", buf, strlen("battery")) == 0)
77 adev->pm.dpm.user_state = POWER_STATE_TYPE_BATTERY;
78 else if (strncmp("balanced", buf, strlen("balanced")) == 0)
79 adev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
80 else if (strncmp("performance", buf, strlen("performance")) == 0)
81 adev->pm.dpm.user_state = POWER_STATE_TYPE_PERFORMANCE;
82 else {
83 mutex_unlock(&adev->pm.mutex);
84 count = -EINVAL;
85 goto fail;
86 }
87 mutex_unlock(&adev->pm.mutex);
88
89 /* Can't set dpm state when the card is off */
90 if (!(adev->flags & AMD_IS_PX) ||
91 (ddev->switch_power_state == DRM_SWITCH_POWER_ON))
92 amdgpu_pm_compute_clocks(adev);
93 fail:
94 return count;
95 }
96
97 static ssize_t amdgpu_get_dpm_forced_performance_level(struct device *dev,
98 struct device_attribute *attr,
99 char *buf)
100 {
101 struct drm_device *ddev = dev_get_drvdata(dev);
102 struct amdgpu_device *adev = ddev->dev_private;
103 enum amdgpu_dpm_forced_level level = adev->pm.dpm.forced_level;
104
105 return snprintf(buf, PAGE_SIZE, "%s\n",
106 (level == AMDGPU_DPM_FORCED_LEVEL_AUTO) ? "auto" :
107 (level == AMDGPU_DPM_FORCED_LEVEL_LOW) ? "low" : "high");
108 }
109
110 static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
111 struct device_attribute *attr,
112 const char *buf,
113 size_t count)
114 {
115 struct drm_device *ddev = dev_get_drvdata(dev);
116 struct amdgpu_device *adev = ddev->dev_private;
117 enum amdgpu_dpm_forced_level level;
118 int ret = 0;
119
120 mutex_lock(&adev->pm.mutex);
121 if (strncmp("low", buf, strlen("low")) == 0) {
122 level = AMDGPU_DPM_FORCED_LEVEL_LOW;
123 } else if (strncmp("high", buf, strlen("high")) == 0) {
124 level = AMDGPU_DPM_FORCED_LEVEL_HIGH;
125 } else if (strncmp("auto", buf, strlen("auto")) == 0) {
126 level = AMDGPU_DPM_FORCED_LEVEL_AUTO;
127 } else {
128 count = -EINVAL;
129 goto fail;
130 }
131 if (adev->pm.funcs->force_performance_level) {
132 if (adev->pm.dpm.thermal_active) {
133 count = -EINVAL;
134 goto fail;
135 }
136 ret = amdgpu_dpm_force_performance_level(adev, level);
137 if (ret)
138 count = -EINVAL;
139 }
140 fail:
141 mutex_unlock(&adev->pm.mutex);
142
143 return count;
144 }
145
146 static DEVICE_ATTR(power_dpm_state, S_IRUGO | S_IWUSR, amdgpu_get_dpm_state, amdgpu_set_dpm_state);
147 static DEVICE_ATTR(power_dpm_force_performance_level, S_IRUGO | S_IWUSR,
148 amdgpu_get_dpm_forced_performance_level,
149 amdgpu_set_dpm_forced_performance_level);
150
151 static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
152 struct device_attribute *attr,
153 char *buf)
154 {
155 struct amdgpu_device *adev = dev_get_drvdata(dev);
156 int temp;
157
158 if (adev->pm.funcs->get_temperature)
159 temp = amdgpu_dpm_get_temperature(adev);
160 else
161 temp = 0;
162
163 return snprintf(buf, PAGE_SIZE, "%d\n", temp);
164 }
165
166 static ssize_t amdgpu_hwmon_show_temp_thresh(struct device *dev,
167 struct device_attribute *attr,
168 char *buf)
169 {
170 struct amdgpu_device *adev = dev_get_drvdata(dev);
171 int hyst = to_sensor_dev_attr(attr)->index;
172 int temp;
173
174 if (hyst)
175 temp = adev->pm.dpm.thermal.min_temp;
176 else
177 temp = adev->pm.dpm.thermal.max_temp;
178
179 return snprintf(buf, PAGE_SIZE, "%d\n", temp);
180 }
181
182 static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
183 struct device_attribute *attr,
184 char *buf)
185 {
186 struct amdgpu_device *adev = dev_get_drvdata(dev);
187 u32 pwm_mode = 0;
188
189 if (adev->pm.funcs->get_fan_control_mode)
190 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
191
192 /* never 0 (full-speed), fuse or smc-controlled always */
193 return sprintf(buf, "%i\n", pwm_mode == FDO_PWM_MODE_STATIC ? 1 : 2);
194 }
195
196 static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
197 struct device_attribute *attr,
198 const char *buf,
199 size_t count)
200 {
201 struct amdgpu_device *adev = dev_get_drvdata(dev);
202 int err;
203 int value;
204
205 if(!adev->pm.funcs->set_fan_control_mode)
206 return -EINVAL;
207
208 err = kstrtoint(buf, 10, &value);
209 if (err)
210 return err;
211
212 switch (value) {
213 case 1: /* manual, percent-based */
214 amdgpu_dpm_set_fan_control_mode(adev, FDO_PWM_MODE_STATIC);
215 break;
216 default: /* disable */
217 amdgpu_dpm_set_fan_control_mode(adev, 0);
218 break;
219 }
220
221 return count;
222 }
223
224 static ssize_t amdgpu_hwmon_get_pwm1_min(struct device *dev,
225 struct device_attribute *attr,
226 char *buf)
227 {
228 return sprintf(buf, "%i\n", 0);
229 }
230
231 static ssize_t amdgpu_hwmon_get_pwm1_max(struct device *dev,
232 struct device_attribute *attr,
233 char *buf)
234 {
235 return sprintf(buf, "%i\n", 255);
236 }
237
238 static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
239 struct device_attribute *attr,
240 const char *buf, size_t count)
241 {
242 struct amdgpu_device *adev = dev_get_drvdata(dev);
243 int err;
244 u32 value;
245
246 err = kstrtou32(buf, 10, &value);
247 if (err)
248 return err;
249
250 value = (value * 100) / 255;
251
252 err = amdgpu_dpm_set_fan_speed_percent(adev, value);
253 if (err)
254 return err;
255
256 return count;
257 }
258
259 static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
260 struct device_attribute *attr,
261 char *buf)
262 {
263 struct amdgpu_device *adev = dev_get_drvdata(dev);
264 int err;
265 u32 speed;
266
267 err = amdgpu_dpm_get_fan_speed_percent(adev, &speed);
268 if (err)
269 return err;
270
271 speed = (speed * 255) / 100;
272
273 return sprintf(buf, "%i\n", speed);
274 }
275
276 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, 0);
277 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 0);
278 static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 1);
279 static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1, amdgpu_hwmon_set_pwm1, 0);
280 static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1_enable, amdgpu_hwmon_set_pwm1_enable, 0);
281 static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO, amdgpu_hwmon_get_pwm1_min, NULL, 0);
282 static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO, amdgpu_hwmon_get_pwm1_max, NULL, 0);
283
284 static struct attribute *hwmon_attributes[] = {
285 &sensor_dev_attr_temp1_input.dev_attr.attr,
286 &sensor_dev_attr_temp1_crit.dev_attr.attr,
287 &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
288 &sensor_dev_attr_pwm1.dev_attr.attr,
289 &sensor_dev_attr_pwm1_enable.dev_attr.attr,
290 &sensor_dev_attr_pwm1_min.dev_attr.attr,
291 &sensor_dev_attr_pwm1_max.dev_attr.attr,
292 NULL
293 };
294
295 static umode_t hwmon_attributes_visible(struct kobject *kobj,
296 struct attribute *attr, int index)
297 {
298 struct device *dev = container_of(kobj, struct device, kobj);
299 struct amdgpu_device *adev = dev_get_drvdata(dev);
300 umode_t effective_mode = attr->mode;
301
302 /* Skip attributes if DPM is not enabled */
303 if (!adev->pm.dpm_enabled &&
304 (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
305 attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr ||
306 attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
307 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
308 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
309 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
310 return 0;
311
312 /* Skip fan attributes if fan is not present */
313 if (adev->pm.no_fan &&
314 (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
315 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
316 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
317 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
318 return 0;
319
320 /* mask fan attributes if we have no bindings for this asic to expose */
321 if ((!adev->pm.funcs->get_fan_speed_percent &&
322 attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */
323 (!adev->pm.funcs->get_fan_control_mode &&
324 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */
325 effective_mode &= ~S_IRUGO;
326
327 if ((!adev->pm.funcs->set_fan_speed_percent &&
328 attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */
329 (!adev->pm.funcs->set_fan_control_mode &&
330 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */
331 effective_mode &= ~S_IWUSR;
332
333 /* hide max/min values if we can't both query and manage the fan */
334 if ((!adev->pm.funcs->set_fan_speed_percent &&
335 !adev->pm.funcs->get_fan_speed_percent) &&
336 (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
337 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
338 return 0;
339
340 return effective_mode;
341 }
342
343 static const struct attribute_group hwmon_attrgroup = {
344 .attrs = hwmon_attributes,
345 .is_visible = hwmon_attributes_visible,
346 };
347
348 static const struct attribute_group *hwmon_groups[] = {
349 &hwmon_attrgroup,
350 NULL
351 };
352
353 void amdgpu_dpm_thermal_work_handler(struct work_struct *work)
354 {
355 struct amdgpu_device *adev =
356 container_of(work, struct amdgpu_device,
357 pm.dpm.thermal.work);
358 /* switch to the thermal state */
359 enum amdgpu_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL;
360
361 if (!adev->pm.dpm_enabled)
362 return;
363
364 if (adev->pm.funcs->get_temperature) {
365 int temp = amdgpu_dpm_get_temperature(adev);
366
367 if (temp < adev->pm.dpm.thermal.min_temp)
368 /* switch back the user state */
369 dpm_state = adev->pm.dpm.user_state;
370 } else {
371 if (adev->pm.dpm.thermal.high_to_low)
372 /* switch back the user state */
373 dpm_state = adev->pm.dpm.user_state;
374 }
375 mutex_lock(&adev->pm.mutex);
376 if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL)
377 adev->pm.dpm.thermal_active = true;
378 else
379 adev->pm.dpm.thermal_active = false;
380 adev->pm.dpm.state = dpm_state;
381 mutex_unlock(&adev->pm.mutex);
382
383 amdgpu_pm_compute_clocks(adev);
384 }
385
386 static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev,
387 enum amdgpu_pm_state_type dpm_state)
388 {
389 int i;
390 struct amdgpu_ps *ps;
391 u32 ui_class;
392 bool single_display = (adev->pm.dpm.new_active_crtc_count < 2) ?
393 true : false;
394
395 /* check if the vblank period is too short to adjust the mclk */
396 if (single_display && adev->pm.funcs->vblank_too_short) {
397 if (amdgpu_dpm_vblank_too_short(adev))
398 single_display = false;
399 }
400
401 /* certain older asics have a separare 3D performance state,
402 * so try that first if the user selected performance
403 */
404 if (dpm_state == POWER_STATE_TYPE_PERFORMANCE)
405 dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF;
406 /* balanced states don't exist at the moment */
407 if (dpm_state == POWER_STATE_TYPE_BALANCED)
408 dpm_state = POWER_STATE_TYPE_PERFORMANCE;
409
410 restart_search:
411 /* Pick the best power state based on current conditions */
412 for (i = 0; i < adev->pm.dpm.num_ps; i++) {
413 ps = &adev->pm.dpm.ps[i];
414 ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK;
415 switch (dpm_state) {
416 /* user states */
417 case POWER_STATE_TYPE_BATTERY:
418 if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) {
419 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
420 if (single_display)
421 return ps;
422 } else
423 return ps;
424 }
425 break;
426 case POWER_STATE_TYPE_BALANCED:
427 if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) {
428 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
429 if (single_display)
430 return ps;
431 } else
432 return ps;
433 }
434 break;
435 case POWER_STATE_TYPE_PERFORMANCE:
436 if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
437 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
438 if (single_display)
439 return ps;
440 } else
441 return ps;
442 }
443 break;
444 /* internal states */
445 case POWER_STATE_TYPE_INTERNAL_UVD:
446 if (adev->pm.dpm.uvd_ps)
447 return adev->pm.dpm.uvd_ps;
448 else
449 break;
450 case POWER_STATE_TYPE_INTERNAL_UVD_SD:
451 if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
452 return ps;
453 break;
454 case POWER_STATE_TYPE_INTERNAL_UVD_HD:
455 if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
456 return ps;
457 break;
458 case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
459 if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
460 return ps;
461 break;
462 case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
463 if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
464 return ps;
465 break;
466 case POWER_STATE_TYPE_INTERNAL_BOOT:
467 return adev->pm.dpm.boot_ps;
468 case POWER_STATE_TYPE_INTERNAL_THERMAL:
469 if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
470 return ps;
471 break;
472 case POWER_STATE_TYPE_INTERNAL_ACPI:
473 if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI)
474 return ps;
475 break;
476 case POWER_STATE_TYPE_INTERNAL_ULV:
477 if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
478 return ps;
479 break;
480 case POWER_STATE_TYPE_INTERNAL_3DPERF:
481 if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
482 return ps;
483 break;
484 default:
485 break;
486 }
487 }
488 /* use a fallback state if we didn't match */
489 switch (dpm_state) {
490 case POWER_STATE_TYPE_INTERNAL_UVD_SD:
491 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
492 goto restart_search;
493 case POWER_STATE_TYPE_INTERNAL_UVD_HD:
494 case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
495 case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
496 if (adev->pm.dpm.uvd_ps) {
497 return adev->pm.dpm.uvd_ps;
498 } else {
499 dpm_state = POWER_STATE_TYPE_PERFORMANCE;
500 goto restart_search;
501 }
502 case POWER_STATE_TYPE_INTERNAL_THERMAL:
503 dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI;
504 goto restart_search;
505 case POWER_STATE_TYPE_INTERNAL_ACPI:
506 dpm_state = POWER_STATE_TYPE_BATTERY;
507 goto restart_search;
508 case POWER_STATE_TYPE_BATTERY:
509 case POWER_STATE_TYPE_BALANCED:
510 case POWER_STATE_TYPE_INTERNAL_3DPERF:
511 dpm_state = POWER_STATE_TYPE_PERFORMANCE;
512 goto restart_search;
513 default:
514 break;
515 }
516
517 return NULL;
518 }
519
520 static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
521 {
522 int i;
523 struct amdgpu_ps *ps;
524 enum amdgpu_pm_state_type dpm_state;
525 int ret;
526
527 /* if dpm init failed */
528 if (!adev->pm.dpm_enabled)
529 return;
530
531 if (adev->pm.dpm.user_state != adev->pm.dpm.state) {
532 /* add other state override checks here */
533 if ((!adev->pm.dpm.thermal_active) &&
534 (!adev->pm.dpm.uvd_active))
535 adev->pm.dpm.state = adev->pm.dpm.user_state;
536 }
537 dpm_state = adev->pm.dpm.state;
538
539 ps = amdgpu_dpm_pick_power_state(adev, dpm_state);
540 if (ps)
541 adev->pm.dpm.requested_ps = ps;
542 else
543 return;
544
545 /* no need to reprogram if nothing changed unless we are on BTC+ */
546 if (adev->pm.dpm.current_ps == adev->pm.dpm.requested_ps) {
547 /* vce just modifies an existing state so force a change */
548 if (ps->vce_active != adev->pm.dpm.vce_active)
549 goto force;
550 if (adev->flags & AMD_IS_APU) {
551 /* for APUs if the num crtcs changed but state is the same,
552 * all we need to do is update the display configuration.
553 */
554 if (adev->pm.dpm.new_active_crtcs != adev->pm.dpm.current_active_crtcs) {
555 /* update display watermarks based on new power state */
556 amdgpu_display_bandwidth_update(adev);
557 /* update displays */
558 amdgpu_dpm_display_configuration_changed(adev);
559 adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
560 adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
561 }
562 return;
563 } else {
564 /* for BTC+ if the num crtcs hasn't changed and state is the same,
565 * nothing to do, if the num crtcs is > 1 and state is the same,
566 * update display configuration.
567 */
568 if (adev->pm.dpm.new_active_crtcs ==
569 adev->pm.dpm.current_active_crtcs) {
570 return;
571 } else if ((adev->pm.dpm.current_active_crtc_count > 1) &&
572 (adev->pm.dpm.new_active_crtc_count > 1)) {
573 /* update display watermarks based on new power state */
574 amdgpu_display_bandwidth_update(adev);
575 /* update displays */
576 amdgpu_dpm_display_configuration_changed(adev);
577 adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
578 adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
579 return;
580 }
581 }
582 }
583
584 force:
585 if (amdgpu_dpm == 1) {
586 printk("switching from power state:\n");
587 amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps);
588 printk("switching to power state:\n");
589 amdgpu_dpm_print_power_state(adev, adev->pm.dpm.requested_ps);
590 }
591
592 mutex_lock(&adev->ring_lock);
593
594 /* update whether vce is active */
595 ps->vce_active = adev->pm.dpm.vce_active;
596
597 ret = amdgpu_dpm_pre_set_power_state(adev);
598 if (ret)
599 goto done;
600
601 /* update display watermarks based on new power state */
602 amdgpu_display_bandwidth_update(adev);
603
604 /* wait for the rings to drain */
605 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
606 struct amdgpu_ring *ring = adev->rings[i];
607 if (ring && ring->ready)
608 amdgpu_fence_wait_empty(ring);
609 }
610
611 /* program the new power state */
612 amdgpu_dpm_set_power_state(adev);
613
614 /* update current power state */
615 adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps;
616
617 amdgpu_dpm_post_set_power_state(adev);
618
619 /* update displays */
620 amdgpu_dpm_display_configuration_changed(adev);
621
622 adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
623 adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
624
625 if (adev->pm.funcs->force_performance_level) {
626 if (adev->pm.dpm.thermal_active) {
627 enum amdgpu_dpm_forced_level level = adev->pm.dpm.forced_level;
628 /* force low perf level for thermal */
629 amdgpu_dpm_force_performance_level(adev, AMDGPU_DPM_FORCED_LEVEL_LOW);
630 /* save the user's level */
631 adev->pm.dpm.forced_level = level;
632 } else {
633 /* otherwise, user selected level */
634 amdgpu_dpm_force_performance_level(adev, adev->pm.dpm.forced_level);
635 }
636 }
637
638 done:
639 mutex_unlock(&adev->ring_lock);
640 }
641
642 void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
643 {
644 if (adev->pm.funcs->powergate_uvd) {
645 mutex_lock(&adev->pm.mutex);
646 /* enable/disable UVD */
647 amdgpu_dpm_powergate_uvd(adev, !enable);
648 mutex_unlock(&adev->pm.mutex);
649 } else {
650 if (enable) {
651 mutex_lock(&adev->pm.mutex);
652 adev->pm.dpm.uvd_active = true;
653 adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD;
654 mutex_unlock(&adev->pm.mutex);
655 } else {
656 mutex_lock(&adev->pm.mutex);
657 adev->pm.dpm.uvd_active = false;
658 mutex_unlock(&adev->pm.mutex);
659 }
660
661 amdgpu_pm_compute_clocks(adev);
662 }
663 }
664
665 void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
666 {
667 if (adev->pm.funcs->powergate_vce) {
668 mutex_lock(&adev->pm.mutex);
669 /* enable/disable VCE */
670 amdgpu_dpm_powergate_vce(adev, !enable);
671
672 mutex_unlock(&adev->pm.mutex);
673 } else {
674 if (enable) {
675 mutex_lock(&adev->pm.mutex);
676 adev->pm.dpm.vce_active = true;
677 /* XXX select vce level based on ring/task */
678 adev->pm.dpm.vce_level = AMDGPU_VCE_LEVEL_AC_ALL;
679 mutex_unlock(&adev->pm.mutex);
680 } else {
681 mutex_lock(&adev->pm.mutex);
682 adev->pm.dpm.vce_active = false;
683 mutex_unlock(&adev->pm.mutex);
684 }
685
686 amdgpu_pm_compute_clocks(adev);
687 }
688 }
689
690 void amdgpu_pm_print_power_states(struct amdgpu_device *adev)
691 {
692 int i;
693
694 for (i = 0; i < adev->pm.dpm.num_ps; i++) {
695 printk("== power state %d ==\n", i);
696 amdgpu_dpm_print_power_state(adev, &adev->pm.dpm.ps[i]);
697 }
698 }
699
700 int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
701 {
702 int ret;
703
704 if (adev->pm.sysfs_initialized)
705 return 0;
706
707 if (adev->pm.funcs->get_temperature == NULL)
708 return 0;
709 adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev,
710 DRIVER_NAME, adev,
711 hwmon_groups);
712 if (IS_ERR(adev->pm.int_hwmon_dev)) {
713 ret = PTR_ERR(adev->pm.int_hwmon_dev);
714 dev_err(adev->dev,
715 "Unable to register hwmon device: %d\n", ret);
716 return ret;
717 }
718
719 ret = device_create_file(adev->dev, &dev_attr_power_dpm_state);
720 if (ret) {
721 DRM_ERROR("failed to create device file for dpm state\n");
722 return ret;
723 }
724 ret = device_create_file(adev->dev, &dev_attr_power_dpm_force_performance_level);
725 if (ret) {
726 DRM_ERROR("failed to create device file for dpm state\n");
727 return ret;
728 }
729 ret = amdgpu_debugfs_pm_init(adev);
730 if (ret) {
731 DRM_ERROR("Failed to register debugfs file for dpm!\n");
732 return ret;
733 }
734
735 adev->pm.sysfs_initialized = true;
736
737 return 0;
738 }
739
740 void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
741 {
742 if (adev->pm.int_hwmon_dev)
743 hwmon_device_unregister(adev->pm.int_hwmon_dev);
744 device_remove_file(adev->dev, &dev_attr_power_dpm_state);
745 device_remove_file(adev->dev, &dev_attr_power_dpm_force_performance_level);
746 }
747
748 void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
749 {
750 struct drm_device *ddev = adev->ddev;
751 struct drm_crtc *crtc;
752 struct amdgpu_crtc *amdgpu_crtc;
753
754 if (!adev->pm.dpm_enabled)
755 return;
756
757 mutex_lock(&adev->pm.mutex);
758
759 /* update active crtc counts */
760 adev->pm.dpm.new_active_crtcs = 0;
761 adev->pm.dpm.new_active_crtc_count = 0;
762 if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
763 list_for_each_entry(crtc,
764 &ddev->mode_config.crtc_list, head) {
765 amdgpu_crtc = to_amdgpu_crtc(crtc);
766 if (crtc->enabled) {
767 adev->pm.dpm.new_active_crtcs |= (1 << amdgpu_crtc->crtc_id);
768 adev->pm.dpm.new_active_crtc_count++;
769 }
770 }
771 }
772
773 /* update battery/ac status */
774 if (power_supply_is_system_supplied() > 0)
775 adev->pm.dpm.ac_power = true;
776 else
777 adev->pm.dpm.ac_power = false;
778
779 amdgpu_dpm_change_power_state_locked(adev);
780
781 mutex_unlock(&adev->pm.mutex);
782
783 }
784
785 /*
786 * Debugfs info
787 */
788 #if defined(CONFIG_DEBUG_FS)
789
790 static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data)
791 {
792 struct drm_info_node *node = (struct drm_info_node *) m->private;
793 struct drm_device *dev = node->minor->dev;
794 struct amdgpu_device *adev = dev->dev_private;
795
796 if (adev->pm.dpm_enabled) {
797 mutex_lock(&adev->pm.mutex);
798 if (adev->pm.funcs->debugfs_print_current_performance_level)
799 amdgpu_dpm_debugfs_print_current_performance_level(adev, m);
800 else
801 seq_printf(m, "Debugfs support not implemented for this asic\n");
802 mutex_unlock(&adev->pm.mutex);
803 }
804
805 return 0;
806 }
807
808 static struct drm_info_list amdgpu_pm_info_list[] = {
809 {"amdgpu_pm_info", amdgpu_debugfs_pm_info, 0, NULL},
810 };
811 #endif
812
813 static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev)
814 {
815 #if defined(CONFIG_DEBUG_FS)
816 return amdgpu_debugfs_add_files(adev, amdgpu_pm_info_list, ARRAY_SIZE(amdgpu_pm_info_list));
817 #else
818 return 0;
819 #endif
820 }
821