xref: /netbsd-src/sys/external/bsd/drm2/dist/drm/amd/amdgpu/amdgpu_pm.c (revision 7330f729ccf0bd976a06f95fad452fe774fc7fd1)
1 /*	$NetBSD: amdgpu_pm.c,v 1.3 2018/08/27 14:04:50 riastradh Exp $	*/
2 
3 /*
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Rafał Miłecki <zajec5@gmail.com>
23  *          Alex Deucher <alexdeucher@gmail.com>
24  */
25 #include <sys/cdefs.h>
26 __KERNEL_RCSID(0, "$NetBSD: amdgpu_pm.c,v 1.3 2018/08/27 14:04:50 riastradh Exp $");
27 
28 #include <drm/drmP.h>
29 #include "amdgpu.h"
30 #include "amdgpu_drv.h"
31 #include "amdgpu_pm.h"
32 #include "amdgpu_dpm.h"
33 #include "atom.h"
34 #include <linux/power_supply.h>
35 #include <linux/hwmon.h>
36 #include <linux/hwmon-sysfs.h>
37 
38 #ifndef __NetBSD__		/* XXX sysfs */
39 static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev);
40 #endif
41 
42 void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
43 {
44 	if (adev->pm.dpm_enabled) {
45 		mutex_lock(&adev->pm.mutex);
46 		if (power_supply_is_system_supplied() > 0)
47 			adev->pm.dpm.ac_power = true;
48 		else
49 			adev->pm.dpm.ac_power = false;
50 		if (adev->pm.funcs->enable_bapm)
51 			amdgpu_dpm_enable_bapm(adev, adev->pm.dpm.ac_power);
52 		mutex_unlock(&adev->pm.mutex);
53 	}
54 }
55 
56 #ifndef __NetBSD__		/* XXX sysfs */
57 
58 static ssize_t amdgpu_get_dpm_state(struct device *dev,
59 				    struct device_attribute *attr,
60 				    char *buf)
61 {
62 	struct drm_device *ddev = dev_get_drvdata(dev);
63 	struct amdgpu_device *adev = ddev->dev_private;
64 	enum amdgpu_pm_state_type pm = adev->pm.dpm.user_state;
65 
66 	return snprintf(buf, PAGE_SIZE, "%s\n",
67 			(pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
68 			(pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
69 }
70 
71 static ssize_t amdgpu_set_dpm_state(struct device *dev,
72 				    struct device_attribute *attr,
73 				    const char *buf,
74 				    size_t count)
75 {
76 	struct drm_device *ddev = dev_get_drvdata(dev);
77 	struct amdgpu_device *adev = ddev->dev_private;
78 
79 	mutex_lock(&adev->pm.mutex);
80 	if (strncmp("battery", buf, strlen("battery")) == 0)
81 		adev->pm.dpm.user_state = POWER_STATE_TYPE_BATTERY;
82 	else if (strncmp("balanced", buf, strlen("balanced")) == 0)
83 		adev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
84 	else if (strncmp("performance", buf, strlen("performance")) == 0)
85 		adev->pm.dpm.user_state = POWER_STATE_TYPE_PERFORMANCE;
86 	else {
87 		mutex_unlock(&adev->pm.mutex);
88 		count = -EINVAL;
89 		goto fail;
90 	}
91 	mutex_unlock(&adev->pm.mutex);
92 
93 	/* Can't set dpm state when the card is off */
94 	if (!(adev->flags & AMD_IS_PX) ||
95 	    (ddev->switch_power_state == DRM_SWITCH_POWER_ON))
96 		amdgpu_pm_compute_clocks(adev);
97 fail:
98 	return count;
99 }
100 
101 static ssize_t amdgpu_get_dpm_forced_performance_level(struct device *dev,
102 						       struct device_attribute *attr,
103 						       char *buf)
104 {
105 	struct drm_device *ddev = dev_get_drvdata(dev);
106 	struct amdgpu_device *adev = ddev->dev_private;
107 	enum amdgpu_dpm_forced_level level = adev->pm.dpm.forced_level;
108 
109 	return snprintf(buf, PAGE_SIZE, "%s\n",
110 			(level == AMDGPU_DPM_FORCED_LEVEL_AUTO) ? "auto" :
111 			(level == AMDGPU_DPM_FORCED_LEVEL_LOW) ? "low" : "high");
112 }
113 
114 static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
115 						       struct device_attribute *attr,
116 						       const char *buf,
117 						       size_t count)
118 {
119 	struct drm_device *ddev = dev_get_drvdata(dev);
120 	struct amdgpu_device *adev = ddev->dev_private;
121 	enum amdgpu_dpm_forced_level level;
122 	int ret = 0;
123 
124 	mutex_lock(&adev->pm.mutex);
125 	if (strncmp("low", buf, strlen("low")) == 0) {
126 		level = AMDGPU_DPM_FORCED_LEVEL_LOW;
127 	} else if (strncmp("high", buf, strlen("high")) == 0) {
128 		level = AMDGPU_DPM_FORCED_LEVEL_HIGH;
129 	} else if (strncmp("auto", buf, strlen("auto")) == 0) {
130 		level = AMDGPU_DPM_FORCED_LEVEL_AUTO;
131 	} else {
132 		count = -EINVAL;
133 		goto fail;
134 	}
135 	if (adev->pm.funcs->force_performance_level) {
136 		if (adev->pm.dpm.thermal_active) {
137 			count = -EINVAL;
138 			goto fail;
139 		}
140 		ret = amdgpu_dpm_force_performance_level(adev, level);
141 		if (ret)
142 			count = -EINVAL;
143 	}
144 fail:
145 	mutex_unlock(&adev->pm.mutex);
146 
147 	return count;
148 }
149 
150 static DEVICE_ATTR(power_dpm_state, S_IRUGO | S_IWUSR, amdgpu_get_dpm_state, amdgpu_set_dpm_state);
151 static DEVICE_ATTR(power_dpm_force_performance_level, S_IRUGO | S_IWUSR,
152 		   amdgpu_get_dpm_forced_performance_level,
153 		   amdgpu_set_dpm_forced_performance_level);
154 
155 static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
156 				      struct device_attribute *attr,
157 				      char *buf)
158 {
159 	struct amdgpu_device *adev = dev_get_drvdata(dev);
160 	int temp;
161 
162 	if (adev->pm.funcs->get_temperature)
163 		temp = amdgpu_dpm_get_temperature(adev);
164 	else
165 		temp = 0;
166 
167 	return snprintf(buf, PAGE_SIZE, "%d\n", temp);
168 }
169 
170 static ssize_t amdgpu_hwmon_show_temp_thresh(struct device *dev,
171 					     struct device_attribute *attr,
172 					     char *buf)
173 {
174 	struct amdgpu_device *adev = dev_get_drvdata(dev);
175 	int hyst = to_sensor_dev_attr(attr)->index;
176 	int temp;
177 
178 	if (hyst)
179 		temp = adev->pm.dpm.thermal.min_temp;
180 	else
181 		temp = adev->pm.dpm.thermal.max_temp;
182 
183 	return snprintf(buf, PAGE_SIZE, "%d\n", temp);
184 }
185 
186 static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
187 					    struct device_attribute *attr,
188 					    char *buf)
189 {
190 	struct amdgpu_device *adev = dev_get_drvdata(dev);
191 	u32 pwm_mode = 0;
192 
193 	if (adev->pm.funcs->get_fan_control_mode)
194 		pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
195 
196 	/* never 0 (full-speed), fuse or smc-controlled always */
197 	return sprintf(buf, "%i\n", pwm_mode == FDO_PWM_MODE_STATIC ? 1 : 2);
198 }
199 
200 static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
201 					    struct device_attribute *attr,
202 					    const char *buf,
203 					    size_t count)
204 {
205 	struct amdgpu_device *adev = dev_get_drvdata(dev);
206 	int err;
207 	int value;
208 
209 	if(!adev->pm.funcs->set_fan_control_mode)
210 		return -EINVAL;
211 
212 	err = kstrtoint(buf, 10, &value);
213 	if (err)
214 		return err;
215 
216 	switch (value) {
217 	case 1: /* manual, percent-based */
218 		amdgpu_dpm_set_fan_control_mode(adev, FDO_PWM_MODE_STATIC);
219 		break;
220 	default: /* disable */
221 		amdgpu_dpm_set_fan_control_mode(adev, 0);
222 		break;
223 	}
224 
225 	return count;
226 }
227 
228 static ssize_t amdgpu_hwmon_get_pwm1_min(struct device *dev,
229 					 struct device_attribute *attr,
230 					 char *buf)
231 {
232 	return sprintf(buf, "%i\n", 0);
233 }
234 
235 static ssize_t amdgpu_hwmon_get_pwm1_max(struct device *dev,
236 					 struct device_attribute *attr,
237 					 char *buf)
238 {
239 	return sprintf(buf, "%i\n", 255);
240 }
241 
242 static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
243 				     struct device_attribute *attr,
244 				     const char *buf, size_t count)
245 {
246 	struct amdgpu_device *adev = dev_get_drvdata(dev);
247 	int err;
248 	u32 value;
249 
250 	err = kstrtou32(buf, 10, &value);
251 	if (err)
252 		return err;
253 
254 	value = (value * 100) / 255;
255 
256 	err = amdgpu_dpm_set_fan_speed_percent(adev, value);
257 	if (err)
258 		return err;
259 
260 	return count;
261 }
262 
263 static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
264 				     struct device_attribute *attr,
265 				     char *buf)
266 {
267 	struct amdgpu_device *adev = dev_get_drvdata(dev);
268 	int err;
269 	u32 speed;
270 
271 	err = amdgpu_dpm_get_fan_speed_percent(adev, &speed);
272 	if (err)
273 		return err;
274 
275 	speed = (speed * 255) / 100;
276 
277 	return sprintf(buf, "%i\n", speed);
278 }
279 
280 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, 0);
281 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 0);
282 static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 1);
283 static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1, amdgpu_hwmon_set_pwm1, 0);
284 static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1_enable, amdgpu_hwmon_set_pwm1_enable, 0);
285 static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO, amdgpu_hwmon_get_pwm1_min, NULL, 0);
286 static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO, amdgpu_hwmon_get_pwm1_max, NULL, 0);
287 
288 static struct attribute *hwmon_attributes[] = {
289 	&sensor_dev_attr_temp1_input.dev_attr.attr,
290 	&sensor_dev_attr_temp1_crit.dev_attr.attr,
291 	&sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
292 	&sensor_dev_attr_pwm1.dev_attr.attr,
293 	&sensor_dev_attr_pwm1_enable.dev_attr.attr,
294 	&sensor_dev_attr_pwm1_min.dev_attr.attr,
295 	&sensor_dev_attr_pwm1_max.dev_attr.attr,
296 	NULL
297 };
298 
299 static umode_t hwmon_attributes_visible(struct kobject *kobj,
300 					struct attribute *attr, int index)
301 {
302 	struct device *dev = container_of(kobj, struct device, kobj);
303 	struct amdgpu_device *adev = dev_get_drvdata(dev);
304 	umode_t effective_mode = attr->mode;
305 
306 	/* Skip attributes if DPM is not enabled */
307 	if (!adev->pm.dpm_enabled &&
308 	    (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
309 	     attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr ||
310 	     attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
311 	     attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
312 	     attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
313 	     attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
314 		return 0;
315 
316 	/* Skip fan attributes if fan is not present */
317 	if (adev->pm.no_fan &&
318 	    (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
319 	     attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
320 	     attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
321 	     attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
322 		return 0;
323 
324 	/* mask fan attributes if we have no bindings for this asic to expose */
325 	if ((!adev->pm.funcs->get_fan_speed_percent &&
326 	     attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */
327 	    (!adev->pm.funcs->get_fan_control_mode &&
328 	     attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */
329 		effective_mode &= ~S_IRUGO;
330 
331 	if ((!adev->pm.funcs->set_fan_speed_percent &&
332 	     attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */
333 	    (!adev->pm.funcs->set_fan_control_mode &&
334 	     attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */
335 		effective_mode &= ~S_IWUSR;
336 
337 	/* hide max/min values if we can't both query and manage the fan */
338 	if ((!adev->pm.funcs->set_fan_speed_percent &&
339 	     !adev->pm.funcs->get_fan_speed_percent) &&
340 	    (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
341 	     attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
342 		return 0;
343 
344 	return effective_mode;
345 }
346 
347 static const struct attribute_group hwmon_attrgroup = {
348 	.attrs = hwmon_attributes,
349 	.is_visible = hwmon_attributes_visible,
350 };
351 
352 static const struct attribute_group *hwmon_groups[] = {
353 	&hwmon_attrgroup,
354 	NULL
355 };
356 
357 #endif	/* __NetBSD__ */
358 
359 void amdgpu_dpm_thermal_work_handler(struct work_struct *work)
360 {
361 	struct amdgpu_device *adev =
362 		container_of(work, struct amdgpu_device,
363 			     pm.dpm.thermal.work);
364 	/* switch to the thermal state */
365 	enum amdgpu_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL;
366 
367 	if (!adev->pm.dpm_enabled)
368 		return;
369 
370 	if (adev->pm.funcs->get_temperature) {
371 		int temp = amdgpu_dpm_get_temperature(adev);
372 
373 		if (temp < adev->pm.dpm.thermal.min_temp)
374 			/* switch back the user state */
375 			dpm_state = adev->pm.dpm.user_state;
376 	} else {
377 		if (adev->pm.dpm.thermal.high_to_low)
378 			/* switch back the user state */
379 			dpm_state = adev->pm.dpm.user_state;
380 	}
381 	mutex_lock(&adev->pm.mutex);
382 	if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL)
383 		adev->pm.dpm.thermal_active = true;
384 	else
385 		adev->pm.dpm.thermal_active = false;
386 	adev->pm.dpm.state = dpm_state;
387 	mutex_unlock(&adev->pm.mutex);
388 
389 	amdgpu_pm_compute_clocks(adev);
390 }
391 
392 static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev,
393 						     enum amdgpu_pm_state_type dpm_state)
394 {
395 	int i;
396 	struct amdgpu_ps *ps;
397 	u32 ui_class;
398 	bool single_display = (adev->pm.dpm.new_active_crtc_count < 2) ?
399 		true : false;
400 
401 	/* check if the vblank period is too short to adjust the mclk */
402 	if (single_display && adev->pm.funcs->vblank_too_short) {
403 		if (amdgpu_dpm_vblank_too_short(adev))
404 			single_display = false;
405 	}
406 
407 	/* certain older asics have a separare 3D performance state,
408 	 * so try that first if the user selected performance
409 	 */
410 	if (dpm_state == POWER_STATE_TYPE_PERFORMANCE)
411 		dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF;
412 	/* balanced states don't exist at the moment */
413 	if (dpm_state == POWER_STATE_TYPE_BALANCED)
414 		dpm_state = POWER_STATE_TYPE_PERFORMANCE;
415 
416 restart_search:
417 	/* Pick the best power state based on current conditions */
418 	for (i = 0; i < adev->pm.dpm.num_ps; i++) {
419 		ps = &adev->pm.dpm.ps[i];
420 		ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK;
421 		switch (dpm_state) {
422 		/* user states */
423 		case POWER_STATE_TYPE_BATTERY:
424 			if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) {
425 				if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
426 					if (single_display)
427 						return ps;
428 				} else
429 					return ps;
430 			}
431 			break;
432 		case POWER_STATE_TYPE_BALANCED:
433 			if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) {
434 				if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
435 					if (single_display)
436 						return ps;
437 				} else
438 					return ps;
439 			}
440 			break;
441 		case POWER_STATE_TYPE_PERFORMANCE:
442 			if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
443 				if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
444 					if (single_display)
445 						return ps;
446 				} else
447 					return ps;
448 			}
449 			break;
450 		/* internal states */
451 		case POWER_STATE_TYPE_INTERNAL_UVD:
452 			if (adev->pm.dpm.uvd_ps)
453 				return adev->pm.dpm.uvd_ps;
454 			else
455 				break;
456 		case POWER_STATE_TYPE_INTERNAL_UVD_SD:
457 			if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
458 				return ps;
459 			break;
460 		case POWER_STATE_TYPE_INTERNAL_UVD_HD:
461 			if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
462 				return ps;
463 			break;
464 		case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
465 			if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
466 				return ps;
467 			break;
468 		case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
469 			if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
470 				return ps;
471 			break;
472 		case POWER_STATE_TYPE_INTERNAL_BOOT:
473 			return adev->pm.dpm.boot_ps;
474 		case POWER_STATE_TYPE_INTERNAL_THERMAL:
475 			if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
476 				return ps;
477 			break;
478 		case POWER_STATE_TYPE_INTERNAL_ACPI:
479 			if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI)
480 				return ps;
481 			break;
482 		case POWER_STATE_TYPE_INTERNAL_ULV:
483 			if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
484 				return ps;
485 			break;
486 		case POWER_STATE_TYPE_INTERNAL_3DPERF:
487 			if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
488 				return ps;
489 			break;
490 		default:
491 			break;
492 		}
493 	}
494 	/* use a fallback state if we didn't match */
495 	switch (dpm_state) {
496 	case POWER_STATE_TYPE_INTERNAL_UVD_SD:
497 		dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
498 		goto restart_search;
499 	case POWER_STATE_TYPE_INTERNAL_UVD_HD:
500 	case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
501 	case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
502 		if (adev->pm.dpm.uvd_ps) {
503 			return adev->pm.dpm.uvd_ps;
504 		} else {
505 			dpm_state = POWER_STATE_TYPE_PERFORMANCE;
506 			goto restart_search;
507 		}
508 	case POWER_STATE_TYPE_INTERNAL_THERMAL:
509 		dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI;
510 		goto restart_search;
511 	case POWER_STATE_TYPE_INTERNAL_ACPI:
512 		dpm_state = POWER_STATE_TYPE_BATTERY;
513 		goto restart_search;
514 	case POWER_STATE_TYPE_BATTERY:
515 	case POWER_STATE_TYPE_BALANCED:
516 	case POWER_STATE_TYPE_INTERNAL_3DPERF:
517 		dpm_state = POWER_STATE_TYPE_PERFORMANCE;
518 		goto restart_search;
519 	default:
520 		break;
521 	}
522 
523 	return NULL;
524 }
525 
526 static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
527 {
528 	int i;
529 	struct amdgpu_ps *ps;
530 	enum amdgpu_pm_state_type dpm_state;
531 	int ret;
532 
533 	/* if dpm init failed */
534 	if (!adev->pm.dpm_enabled)
535 		return;
536 
537 	if (adev->pm.dpm.user_state != adev->pm.dpm.state) {
538 		/* add other state override checks here */
539 		if ((!adev->pm.dpm.thermal_active) &&
540 		    (!adev->pm.dpm.uvd_active))
541 			adev->pm.dpm.state = adev->pm.dpm.user_state;
542 	}
543 	dpm_state = adev->pm.dpm.state;
544 
545 	ps = amdgpu_dpm_pick_power_state(adev, dpm_state);
546 	if (ps)
547 		adev->pm.dpm.requested_ps = ps;
548 	else
549 		return;
550 
551 	/* no need to reprogram if nothing changed unless we are on BTC+ */
552 	if (adev->pm.dpm.current_ps == adev->pm.dpm.requested_ps) {
553 		/* vce just modifies an existing state so force a change */
554 		if (ps->vce_active != adev->pm.dpm.vce_active)
555 			goto force;
556 		if (adev->flags & AMD_IS_APU) {
557 			/* for APUs if the num crtcs changed but state is the same,
558 			 * all we need to do is update the display configuration.
559 			 */
560 			if (adev->pm.dpm.new_active_crtcs != adev->pm.dpm.current_active_crtcs) {
561 				/* update display watermarks based on new power state */
562 				amdgpu_display_bandwidth_update(adev);
563 				/* update displays */
564 				amdgpu_dpm_display_configuration_changed(adev);
565 				adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
566 				adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
567 			}
568 			return;
569 		} else {
570 			/* for BTC+ if the num crtcs hasn't changed and state is the same,
571 			 * nothing to do, if the num crtcs is > 1 and state is the same,
572 			 * update display configuration.
573 			 */
574 			if (adev->pm.dpm.new_active_crtcs ==
575 			    adev->pm.dpm.current_active_crtcs) {
576 				return;
577 			} else if ((adev->pm.dpm.current_active_crtc_count > 1) &&
578 				   (adev->pm.dpm.new_active_crtc_count > 1)) {
579 				/* update display watermarks based on new power state */
580 				amdgpu_display_bandwidth_update(adev);
581 				/* update displays */
582 				amdgpu_dpm_display_configuration_changed(adev);
583 				adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
584 				adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
585 				return;
586 			}
587 		}
588 	}
589 
590 force:
591 	if (amdgpu_dpm == 1) {
592 		printk("switching from power state:\n");
593 		amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps);
594 		printk("switching to power state:\n");
595 		amdgpu_dpm_print_power_state(adev, adev->pm.dpm.requested_ps);
596 	}
597 
598 	mutex_lock(&adev->ring_lock);
599 
600 	/* update whether vce is active */
601 	ps->vce_active = adev->pm.dpm.vce_active;
602 
603 	ret = amdgpu_dpm_pre_set_power_state(adev);
604 	if (ret)
605 		goto done;
606 
607 	/* update display watermarks based on new power state */
608 	amdgpu_display_bandwidth_update(adev);
609 
610 	/* wait for the rings to drain */
611 	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
612 		struct amdgpu_ring *ring = adev->rings[i];
613 		if (ring && ring->ready)
614 			amdgpu_fence_wait_empty(ring);
615 	}
616 
617 	/* program the new power state */
618 	amdgpu_dpm_set_power_state(adev);
619 
620 	/* update current power state */
621 	adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps;
622 
623 	amdgpu_dpm_post_set_power_state(adev);
624 
625 	/* update displays */
626 	amdgpu_dpm_display_configuration_changed(adev);
627 
628 	adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
629 	adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
630 
631 	if (adev->pm.funcs->force_performance_level) {
632 		if (adev->pm.dpm.thermal_active) {
633 			enum amdgpu_dpm_forced_level level = adev->pm.dpm.forced_level;
634 			/* force low perf level for thermal */
635 			amdgpu_dpm_force_performance_level(adev, AMDGPU_DPM_FORCED_LEVEL_LOW);
636 			/* save the user's level */
637 			adev->pm.dpm.forced_level = level;
638 		} else {
639 			/* otherwise, user selected level */
640 			amdgpu_dpm_force_performance_level(adev, adev->pm.dpm.forced_level);
641 		}
642 	}
643 
644 done:
645 	mutex_unlock(&adev->ring_lock);
646 }
647 
648 void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
649 {
650 	if (adev->pm.funcs->powergate_uvd) {
651 		mutex_lock(&adev->pm.mutex);
652 		/* enable/disable UVD */
653 		amdgpu_dpm_powergate_uvd(adev, !enable);
654 		mutex_unlock(&adev->pm.mutex);
655 	} else {
656 		if (enable) {
657 			mutex_lock(&adev->pm.mutex);
658 			adev->pm.dpm.uvd_active = true;
659 			adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD;
660 			mutex_unlock(&adev->pm.mutex);
661 		} else {
662 			mutex_lock(&adev->pm.mutex);
663 			adev->pm.dpm.uvd_active = false;
664 			mutex_unlock(&adev->pm.mutex);
665 		}
666 
667 		amdgpu_pm_compute_clocks(adev);
668 	}
669 }
670 
671 void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
672 {
673 	if (adev->pm.funcs->powergate_vce) {
674 		mutex_lock(&adev->pm.mutex);
675 		/* enable/disable VCE */
676 		amdgpu_dpm_powergate_vce(adev, !enable);
677 
678 		mutex_unlock(&adev->pm.mutex);
679 	} else {
680 		if (enable) {
681 			mutex_lock(&adev->pm.mutex);
682 			adev->pm.dpm.vce_active = true;
683 			/* XXX select vce level based on ring/task */
684 			adev->pm.dpm.vce_level = AMDGPU_VCE_LEVEL_AC_ALL;
685 			mutex_unlock(&adev->pm.mutex);
686 		} else {
687 			mutex_lock(&adev->pm.mutex);
688 			adev->pm.dpm.vce_active = false;
689 			mutex_unlock(&adev->pm.mutex);
690 		}
691 
692 		amdgpu_pm_compute_clocks(adev);
693 	}
694 }
695 
696 void amdgpu_pm_print_power_states(struct amdgpu_device *adev)
697 {
698 	int i;
699 
700 	for (i = 0; i < adev->pm.dpm.num_ps; i++) {
701 		printk("== power state %d ==\n", i);
702 		amdgpu_dpm_print_power_state(adev, &adev->pm.dpm.ps[i]);
703 	}
704 }
705 
706 int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
707 {
708 #ifdef __NetBSD__		/* XXX sysfs */
709 	return 0;
710 #else
711 	int ret;
712 
713 	if (adev->pm.sysfs_initialized)
714 		return 0;
715 
716 	if (adev->pm.funcs->get_temperature == NULL)
717 		return 0;
718 	adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev,
719 								   DRIVER_NAME, adev,
720 								   hwmon_groups);
721 	if (IS_ERR(adev->pm.int_hwmon_dev)) {
722 		ret = PTR_ERR(adev->pm.int_hwmon_dev);
723 		dev_err(adev->dev,
724 			"Unable to register hwmon device: %d\n", ret);
725 		return ret;
726 	}
727 
728 	ret = device_create_file(adev->dev, &dev_attr_power_dpm_state);
729 	if (ret) {
730 		DRM_ERROR("failed to create device file for dpm state\n");
731 		return ret;
732 	}
733 	ret = device_create_file(adev->dev, &dev_attr_power_dpm_force_performance_level);
734 	if (ret) {
735 		DRM_ERROR("failed to create device file for dpm state\n");
736 		return ret;
737 	}
738 	ret = amdgpu_debugfs_pm_init(adev);
739 	if (ret) {
740 		DRM_ERROR("Failed to register debugfs file for dpm!\n");
741 		return ret;
742 	}
743 
744 	adev->pm.sysfs_initialized = true;
745 
746 	return 0;
747 #endif
748 }
749 
750 void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
751 {
752 #ifndef __NetBSD__
753 	if (adev->pm.int_hwmon_dev)
754 		hwmon_device_unregister(adev->pm.int_hwmon_dev);
755 	device_remove_file(adev->dev, &dev_attr_power_dpm_state);
756 	device_remove_file(adev->dev, &dev_attr_power_dpm_force_performance_level);
757 #endif
758 }
759 
760 void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
761 {
762 	struct drm_device *ddev = adev->ddev;
763 	struct drm_crtc *crtc;
764 	struct amdgpu_crtc *amdgpu_crtc;
765 
766 	if (!adev->pm.dpm_enabled)
767 		return;
768 
769 	mutex_lock(&adev->pm.mutex);
770 
771 	/* update active crtc counts */
772 	adev->pm.dpm.new_active_crtcs = 0;
773 	adev->pm.dpm.new_active_crtc_count = 0;
774 	if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
775 		list_for_each_entry(crtc,
776 				    &ddev->mode_config.crtc_list, head) {
777 			amdgpu_crtc = to_amdgpu_crtc(crtc);
778 			if (crtc->enabled) {
779 				adev->pm.dpm.new_active_crtcs |= (1 << amdgpu_crtc->crtc_id);
780 				adev->pm.dpm.new_active_crtc_count++;
781 			}
782 		}
783 	}
784 
785 	/* update battery/ac status */
786 	if (power_supply_is_system_supplied() > 0)
787 		adev->pm.dpm.ac_power = true;
788 	else
789 		adev->pm.dpm.ac_power = false;
790 
791 	amdgpu_dpm_change_power_state_locked(adev);
792 
793 	mutex_unlock(&adev->pm.mutex);
794 
795 }
796 
797 /*
798  * Debugfs info
799  */
800 #if defined(CONFIG_DEBUG_FS)
801 
802 static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data)
803 {
804 	struct drm_info_node *node = (struct drm_info_node *) m->private;
805 	struct drm_device *dev = node->minor->dev;
806 	struct amdgpu_device *adev = dev->dev_private;
807 
808 	if (adev->pm.dpm_enabled) {
809 		mutex_lock(&adev->pm.mutex);
810 		if (adev->pm.funcs->debugfs_print_current_performance_level)
811 			amdgpu_dpm_debugfs_print_current_performance_level(adev, m);
812 		else
813 			seq_printf(m, "Debugfs support not implemented for this asic\n");
814 		mutex_unlock(&adev->pm.mutex);
815 	}
816 
817 	return 0;
818 }
819 
820 static struct drm_info_list amdgpu_pm_info_list[] = {
821 	{"amdgpu_pm_info", amdgpu_debugfs_pm_info, 0, NULL},
822 };
823 #endif
824 
825 #ifndef __NetBSD__		/* XXX sysfs */
826 static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev)
827 {
828 #if defined(CONFIG_DEBUG_FS)
829 	return amdgpu_debugfs_add_files(adev, amdgpu_pm_info_list, ARRAY_SIZE(amdgpu_pm_info_list));
830 #else
831 	return 0;
832 #endif
833 }
834 #endif
835