xref: /openbsd-src/sys/dev/pci/drm/radeon/radeon_pm.c (revision 50b7afb2c2c0993b0894d4e34bf857cb13ed9c80)
1 /*	$OpenBSD: radeon_pm.c,v 1.7 2014/02/15 12:43:38 jsg Exp $	*/
2 /*
3  * Permission is hereby granted, free of charge, to any person obtaining a
4  * copy of this software and associated documentation files (the "Software"),
5  * to deal in the Software without restriction, including without limitation
6  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
7  * and/or sell copies of the Software, and to permit persons to whom the
8  * Software is furnished to do so, subject to the following conditions:
9  *
10  * The above copyright notice and this permission notice shall be included in
11  * all copies or substantial portions of the Software.
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
17  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
18  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
19  * OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * Authors: Rafał Miłecki <zajec5@gmail.com>
22  *          Alex Deucher <alexdeucher@gmail.com>
23  */
24 #include <dev/pci/drm/drmP.h>
25 #include "radeon.h"
26 #include "avivod.h"
27 #include "atom.h"
28 
29 #define RADEON_IDLE_LOOP_MS 100
30 #define RADEON_RECLOCK_DELAY_MS 200
31 #define RADEON_WAIT_VBLANK_TIMEOUT 200
32 
33 #ifdef DRMDEBUG
34 static const char *radeon_pm_state_type_name[5] = {
35 	"",
36 	"Powersave",
37 	"Battery",
38 	"Balanced",
39 	"Performance",
40 };
41 #endif
42 
43 #ifdef notyet
44 static void radeon_dynpm_idle_work_handler(struct work_struct *work);
45 #endif
46 int radeon_debugfs_pm_init(struct radeon_device *rdev);
47 static bool radeon_pm_in_vbl(struct radeon_device *rdev);
48 static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish);
49 static void radeon_pm_update_profile(struct radeon_device *rdev);
50 static void radeon_pm_set_clocks(struct radeon_device *rdev);
51 
52 void	 radeon_pm_acpi_event_handler(struct radeon_device *);
53 ssize_t	 radeon_get_pm_profile(struct device *, struct device_attribute *, char *);
54 ssize_t	 radeon_get_pm_method(struct device *, struct device_attribute *, char *);
55 ssize_t	 radeon_set_pm_profile(struct device *, struct device_attribute *,
56 	     const char *, size_t);
57 ssize_t	 radeon_set_pm_method(struct device *, struct device_attribute *,
58 	     const char *, size_t);
59 void	 radeon_dynpm_idle_tick(void *);
60 void	 radeon_dynpm_idle_work_handler(void *, void *);
61 
62 extern int ticks;
63 
64 int radeon_pm_get_type_index(struct radeon_device *rdev,
65 			     enum radeon_pm_state_type ps_type,
66 			     int instance)
67 {
68 	int i;
69 	int found_instance = -1;
70 
71 	for (i = 0; i < rdev->pm.num_power_states; i++) {
72 		if (rdev->pm.power_state[i].type == ps_type) {
73 			found_instance++;
74 			if (found_instance == instance)
75 				return i;
76 		}
77 	}
78 	/* return default if no match */
79 	return rdev->pm.default_power_state_index;
80 }
81 
82 void radeon_pm_acpi_event_handler(struct radeon_device *rdev)
83 {
84 	if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
85 		if (rdev->pm.profile == PM_PROFILE_AUTO) {
86 			rw_enter_write(&rdev->pm.rwlock);
87 			radeon_pm_update_profile(rdev);
88 			radeon_pm_set_clocks(rdev);
89 			rw_exit_write(&rdev->pm.rwlock);
90 		}
91 	}
92 }
93 
94 int	power_supply_is_system_supplied(void);
95 
96 int
97 power_supply_is_system_supplied(void)
98 {
99 	/* XXX return 0 if on battery */
100 	return (1);
101 }
102 
103 static void radeon_pm_update_profile(struct radeon_device *rdev)
104 {
105 	switch (rdev->pm.profile) {
106 	case PM_PROFILE_DEFAULT:
107 		rdev->pm.profile_index = PM_PROFILE_DEFAULT_IDX;
108 		break;
109 	case PM_PROFILE_AUTO:
110 		if (power_supply_is_system_supplied() > 0) {
111 			if (rdev->pm.active_crtc_count > 1)
112 				rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX;
113 			else
114 				rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX;
115 		} else {
116 			if (rdev->pm.active_crtc_count > 1)
117 				rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX;
118 			else
119 				rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX;
120 		}
121 		break;
122 	case PM_PROFILE_LOW:
123 		if (rdev->pm.active_crtc_count > 1)
124 			rdev->pm.profile_index = PM_PROFILE_LOW_MH_IDX;
125 		else
126 			rdev->pm.profile_index = PM_PROFILE_LOW_SH_IDX;
127 		break;
128 	case PM_PROFILE_MID:
129 		if (rdev->pm.active_crtc_count > 1)
130 			rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX;
131 		else
132 			rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX;
133 		break;
134 	case PM_PROFILE_HIGH:
135 		if (rdev->pm.active_crtc_count > 1)
136 			rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX;
137 		else
138 			rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX;
139 		break;
140 	}
141 
142 	if (rdev->pm.active_crtc_count == 0) {
143 		rdev->pm.requested_power_state_index =
144 			rdev->pm.profiles[rdev->pm.profile_index].dpms_off_ps_idx;
145 		rdev->pm.requested_clock_mode_index =
146 			rdev->pm.profiles[rdev->pm.profile_index].dpms_off_cm_idx;
147 	} else {
148 		rdev->pm.requested_power_state_index =
149 			rdev->pm.profiles[rdev->pm.profile_index].dpms_on_ps_idx;
150 		rdev->pm.requested_clock_mode_index =
151 			rdev->pm.profiles[rdev->pm.profile_index].dpms_on_cm_idx;
152 	}
153 }
154 
155 static void radeon_unmap_vram_bos(struct radeon_device *rdev)
156 {
157 	struct radeon_bo *bo, *n;
158 
159 	if (list_empty(&rdev->gem.objects))
160 		return;
161 
162 	list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
163 		if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
164 			ttm_bo_unmap_virtual(&bo->tbo);
165 	}
166 }
167 
168 static void radeon_sync_with_vblank(struct radeon_device *rdev)
169 {
170 	if (rdev->pm.active_crtcs) {
171 		rdev->pm.vblank_sync = false;
172 		tsleep(&rdev->irq.vblank_queue, PZERO, "rdnsvb",
173 			msecs_to_jiffies(RADEON_WAIT_VBLANK_TIMEOUT));
174 	}
175 }
176 
177 static void radeon_set_power_state(struct radeon_device *rdev)
178 {
179 	u32 sclk, mclk;
180 	bool misc_after = false;
181 
182 	if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) &&
183 	    (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index))
184 		return;
185 
186 	if (radeon_gui_idle(rdev)) {
187 		sclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
188 			clock_info[rdev->pm.requested_clock_mode_index].sclk;
189 		if (sclk > rdev->pm.default_sclk)
190 			sclk = rdev->pm.default_sclk;
191 
192 		/* starting with BTC, there is one state that is used for both
193 		 * MH and SH.  Difference is that we always use the high clock index for
194 		 * mclk and vddci.
195 		 */
196 		if ((rdev->pm.pm_method == PM_METHOD_PROFILE) &&
197 		    (rdev->family >= CHIP_BARTS) &&
198 		    rdev->pm.active_crtc_count &&
199 		    ((rdev->pm.profile_index == PM_PROFILE_MID_MH_IDX) ||
200 		     (rdev->pm.profile_index == PM_PROFILE_LOW_MH_IDX)))
201 			mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
202 				clock_info[rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx].mclk;
203 		else
204 			mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
205 				clock_info[rdev->pm.requested_clock_mode_index].mclk;
206 
207 		if (mclk > rdev->pm.default_mclk)
208 			mclk = rdev->pm.default_mclk;
209 
210 		/* upvolt before raising clocks, downvolt after lowering clocks */
211 		if (sclk < rdev->pm.current_sclk)
212 			misc_after = true;
213 
214 		radeon_sync_with_vblank(rdev);
215 
216 		if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
217 			if (!radeon_pm_in_vbl(rdev))
218 				return;
219 		}
220 
221 		radeon_pm_prepare(rdev);
222 
223 		if (!misc_after)
224 			/* voltage, pcie lanes, etc.*/
225 			radeon_pm_misc(rdev);
226 
227 		/* set engine clock */
228 		if (sclk != rdev->pm.current_sclk) {
229 			radeon_pm_debug_check_in_vbl(rdev, false);
230 			radeon_set_engine_clock(rdev, sclk);
231 			radeon_pm_debug_check_in_vbl(rdev, true);
232 			rdev->pm.current_sclk = sclk;
233 			DRM_DEBUG_DRIVER("Setting: e: %d\n", sclk);
234 		}
235 
236 		/* set memory clock */
237 		if (rdev->asic->pm.set_memory_clock && (mclk != rdev->pm.current_mclk)) {
238 			radeon_pm_debug_check_in_vbl(rdev, false);
239 			radeon_set_memory_clock(rdev, mclk);
240 			radeon_pm_debug_check_in_vbl(rdev, true);
241 			rdev->pm.current_mclk = mclk;
242 			DRM_DEBUG_DRIVER("Setting: m: %d\n", mclk);
243 		}
244 
245 		if (misc_after)
246 			/* voltage, pcie lanes, etc.*/
247 			radeon_pm_misc(rdev);
248 
249 		radeon_pm_finish(rdev);
250 
251 		rdev->pm.current_power_state_index = rdev->pm.requested_power_state_index;
252 		rdev->pm.current_clock_mode_index = rdev->pm.requested_clock_mode_index;
253 	} else
254 		DRM_DEBUG_DRIVER("pm: GUI not idle!!!\n");
255 }
256 
257 static void radeon_pm_set_clocks(struct radeon_device *rdev)
258 {
259 	struct drm_device *dev = rdev->ddev;
260 	int i, r;
261 
262 	/* no need to take locks, etc. if nothing's going to change */
263 	if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) &&
264 	    (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index))
265 		return;
266 
267 	DRM_LOCK();
268 	rw_enter_write(&rdev->pm.mclk_lock);
269 	rw_enter_write(&rdev->ring_lock);
270 
271 	/* wait for the rings to drain */
272 	for (i = 0; i < RADEON_NUM_RINGS; i++) {
273 		struct radeon_ring *ring = &rdev->ring[i];
274 		if (!ring->ready) {
275 			continue;
276 		}
277 		r = radeon_fence_wait_empty_locked(rdev, i);
278 		if (r) {
279 			/* needs a GPU reset dont reset here */
280 			rw_exit_write(&rdev->ring_lock);
281 			rw_exit_write(&rdev->pm.mclk_lock);
282 			DRM_UNLOCK();
283 			return;
284 		}
285 	}
286 
287 	radeon_unmap_vram_bos(rdev);
288 
289 	if (rdev->irq.installed) {
290 		for (i = 0; i < rdev->num_crtc; i++) {
291 			if (rdev->pm.active_crtcs & (1 << i)) {
292 				rdev->pm.req_vblank |= (1 << i);
293 				drm_vblank_get(rdev->ddev, i);
294 			}
295 		}
296 	}
297 
298 	radeon_set_power_state(rdev);
299 
300 	if (rdev->irq.installed) {
301 		for (i = 0; i < rdev->num_crtc; i++) {
302 			if (rdev->pm.req_vblank & (1 << i)) {
303 				rdev->pm.req_vblank &= ~(1 << i);
304 				drm_vblank_put(rdev->ddev, i);
305 			}
306 		}
307 	}
308 
309 	/* update display watermarks based on new power state */
310 	radeon_update_bandwidth_info(rdev);
311 	if (rdev->pm.active_crtc_count)
312 		radeon_bandwidth_update(rdev);
313 
314 	rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
315 
316 	rw_exit_write(&rdev->ring_lock);
317 	rw_exit_write(&rdev->pm.mclk_lock);
318 	DRM_UNLOCK();
319 }
320 
321 static void radeon_pm_print_states(struct radeon_device *rdev)
322 {
323 	int i, j;
324 	struct radeon_power_state *power_state;
325 	struct radeon_pm_clock_info *clock_info;
326 
327 	DRM_DEBUG_DRIVER("%d Power State(s)\n", rdev->pm.num_power_states);
328 	for (i = 0; i < rdev->pm.num_power_states; i++) {
329 		power_state = &rdev->pm.power_state[i];
330 		DRM_DEBUG_DRIVER("State %d: %s\n", i,
331 			radeon_pm_state_type_name[power_state->type]);
332 		if (i == rdev->pm.default_power_state_index)
333 			DRM_DEBUG_DRIVER("\tDefault");
334 		if ((rdev->flags & RADEON_IS_PCIE) && !(rdev->flags & RADEON_IS_IGP))
335 			DRM_DEBUG_DRIVER("\t%d PCIE Lanes\n", power_state->pcie_lanes);
336 		if (power_state->flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
337 			DRM_DEBUG_DRIVER("\tSingle display only\n");
338 		DRM_DEBUG_DRIVER("\t%d Clock Mode(s)\n", power_state->num_clock_modes);
339 		for (j = 0; j < power_state->num_clock_modes; j++) {
340 			clock_info = &(power_state->clock_info[j]);
341 			if (rdev->flags & RADEON_IS_IGP)
342 				DRM_DEBUG_DRIVER("\t\t%d e: %d\n",
343 						 j,
344 						 clock_info->sclk * 10);
345 			else
346 				DRM_DEBUG_DRIVER("\t\t%d e: %d\tm: %d\tv: %d\n",
347 						 j,
348 						 clock_info->sclk * 10,
349 						 clock_info->mclk * 10,
350 						 clock_info->voltage.voltage);
351 		}
352 	}
353 }
354 
355 #ifdef notyet
356 ssize_t
357 radeon_get_pm_profile(struct device *dev,
358 				     struct device_attribute *attr,
359 				     char *buf)
360 {
361 	struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
362 	struct radeon_device *rdev = ddev->dev_private;
363 	int cp = rdev->pm.profile;
364 
365 	return snprintf(buf, PAGE_SIZE, "%s\n",
366 			(cp == PM_PROFILE_AUTO) ? "auto" :
367 			(cp == PM_PROFILE_LOW) ? "low" :
368 			(cp == PM_PROFILE_MID) ? "mid" :
369 			(cp == PM_PROFILE_HIGH) ? "high" : "default");
370 }
371 
372 ssize_t
373 radeon_set_pm_profile(struct device *dev,
374 				     struct device_attribute *attr,
375 				     const char *buf,
376 				     size_t count)
377 {
378 	struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
379 	struct radeon_device *rdev = ddev->dev_private;
380 
381 	rw_enter_write(&rdev->pm.rwlock);
382 	if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
383 		if (strncmp("default", buf, strlen("default")) == 0)
384 			rdev->pm.profile = PM_PROFILE_DEFAULT;
385 		else if (strncmp("auto", buf, strlen("auto")) == 0)
386 			rdev->pm.profile = PM_PROFILE_AUTO;
387 		else if (strncmp("low", buf, strlen("low")) == 0)
388 			rdev->pm.profile = PM_PROFILE_LOW;
389 		else if (strncmp("mid", buf, strlen("mid")) == 0)
390 			rdev->pm.profile = PM_PROFILE_MID;
391 		else if (strncmp("high", buf, strlen("high")) == 0)
392 			rdev->pm.profile = PM_PROFILE_HIGH;
393 		else {
394 			count = -EINVAL;
395 			goto fail;
396 		}
397 		radeon_pm_update_profile(rdev);
398 		radeon_pm_set_clocks(rdev);
399 	} else
400 		count = -EINVAL;
401 
402 fail:
403 	rw_exit_write(&rdev->pm.rwlock);
404 
405 	return count;
406 }
407 
408 ssize_t
409 radeon_get_pm_method(struct device *dev,
410 				    struct device_attribute *attr,
411 				    char *buf)
412 {
413 	struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
414 	struct radeon_device *rdev = ddev->dev_private;
415 	int pm = rdev->pm.pm_method;
416 
417 	return snprintf(buf, PAGE_SIZE, "%s\n",
418 			(pm == PM_METHOD_DYNPM) ? "dynpm" : "profile");
419 }
420 
421 ssize_t
422 radeon_set_pm_method(struct device *dev,
423 				    struct device_attribute *attr,
424 				    const char *buf,
425 				    size_t count)
426 {
427 	struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
428 	struct radeon_device *rdev = ddev->dev_private;
429 
430 
431 	if (strncmp("dynpm", buf, strlen("dynpm")) == 0) {
432 		rw_enter_write(&rdev->pm.rwlock);
433 		rdev->pm.pm_method = PM_METHOD_DYNPM;
434 		rdev->pm.dynpm_state = DYNPM_STATE_PAUSED;
435 		rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
436 		rw_exit_write(&rdev->pm.rwlock);
437 	} else if (strncmp("profile", buf, strlen("profile")) == 0) {
438 		rw_enter_write(&rdev->pm.rwlock);
439 		/* disable dynpm */
440 		rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
441 		rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
442 		rdev->pm.pm_method = PM_METHOD_PROFILE;
443 		rw_exit_write(&rdev->pm.rwlock);
444 		timeout_del(&rdev->pm.dynpm_idle_to);
445 		task_del(systq, &rdev->pm.dynpm_idle_task);
446 	} else {
447 		count = -EINVAL;
448 		goto fail;
449 	}
450 	radeon_pm_compute_clocks(rdev);
451 fail:
452 	return count;
453 }
454 
455 static DEVICE_ATTR(power_profile, S_IRUGO | S_IWUSR, radeon_get_pm_profile, radeon_set_pm_profile);
456 static DEVICE_ATTR(power_method, S_IRUGO | S_IWUSR, radeon_get_pm_method, radeon_set_pm_method);
457 #endif /* notyet */
458 
459 #ifdef notyet
460 static ssize_t radeon_hwmon_show_temp(struct device *dev,
461 				      struct device_attribute *attr,
462 				      char *buf)
463 {
464 	struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
465 	struct radeon_device *rdev = ddev->dev_private;
466 	int temp;
467 
468 	switch (rdev->pm.int_thermal_type) {
469 	case THERMAL_TYPE_RV6XX:
470 		temp = rv6xx_get_temp(rdev);
471 		break;
472 	case THERMAL_TYPE_RV770:
473 		temp = rv770_get_temp(rdev);
474 		break;
475 	case THERMAL_TYPE_EVERGREEN:
476 	case THERMAL_TYPE_NI:
477 		temp = evergreen_get_temp(rdev);
478 		break;
479 	case THERMAL_TYPE_SUMO:
480 		temp = sumo_get_temp(rdev);
481 		break;
482 	case THERMAL_TYPE_SI:
483 		temp = si_get_temp(rdev);
484 		break;
485 	default:
486 		temp = 0;
487 		break;
488 	}
489 
490 	return snprintf(buf, PAGE_SIZE, "%d\n", temp);
491 }
492 
493 static ssize_t radeon_hwmon_show_name(struct device *dev,
494 				      struct device_attribute *attr,
495 				      char *buf)
496 {
497 	return sprintf(buf, "radeon\n");
498 }
499 
500 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, radeon_hwmon_show_temp, NULL, 0);
501 static SENSOR_DEVICE_ATTR(name, S_IRUGO, radeon_hwmon_show_name, NULL, 0);
502 
503 static struct attribute *hwmon_attributes[] = {
504 	&sensor_dev_attr_temp1_input.dev_attr.attr,
505 	&sensor_dev_attr_name.dev_attr.attr,
506 	NULL
507 };
508 
509 static const struct attribute_group hwmon_attrgroup = {
510 	.attrs = hwmon_attributes,
511 };
512 #endif
513 
514 static int radeon_hwmon_init(struct radeon_device *rdev)
515 {
516 	int err = 0;
517 
518 	rdev->pm.int_hwmon_dev = NULL;
519 
520 	switch (rdev->pm.int_thermal_type) {
521 	case THERMAL_TYPE_RV6XX:
522 	case THERMAL_TYPE_RV770:
523 	case THERMAL_TYPE_EVERGREEN:
524 	case THERMAL_TYPE_NI:
525 	case THERMAL_TYPE_SUMO:
526 	case THERMAL_TYPE_SI:
527 		/* No support for TN yet */
528 		if (rdev->family == CHIP_ARUBA)
529 			return err;
530 #ifdef notyet
531 		rdev->pm.int_hwmon_dev = hwmon_device_register(rdev->dev);
532 		if (IS_ERR(rdev->pm.int_hwmon_dev)) {
533 			err = PTR_ERR(rdev->pm.int_hwmon_dev);
534 			dev_err(rdev->dev,
535 				"Unable to register hwmon device: %d\n", err);
536 			break;
537 		}
538 		dev_set_drvdata(rdev->pm.int_hwmon_dev, rdev->ddev);
539 		err = sysfs_create_group(&rdev->pm.int_hwmon_dev->kobj,
540 					 &hwmon_attrgroup);
541 		if (err) {
542 			dev_err(rdev->dev,
543 				"Unable to create hwmon sysfs file: %d\n", err);
544 			hwmon_device_unregister(rdev->dev);
545 		}
546 		break;
547 #endif
548 	default:
549 		break;
550 	}
551 
552 	return err;
553 }
554 
555 static void radeon_hwmon_fini(struct radeon_device *rdev)
556 {
557 	printf("%s stub\n", __func__);
558 #ifdef notyet
559 	if (rdev->pm.int_hwmon_dev) {
560 		sysfs_remove_group(&rdev->pm.int_hwmon_dev->kobj, &hwmon_attrgroup);
561 		hwmon_device_unregister(rdev->pm.int_hwmon_dev);
562 	}
563 #endif
564 }
565 
566 void radeon_pm_suspend(struct radeon_device *rdev)
567 {
568 	rw_enter_write(&rdev->pm.rwlock);
569 	if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
570 		if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE)
571 			rdev->pm.dynpm_state = DYNPM_STATE_SUSPENDED;
572 	}
573 	rw_exit_write(&rdev->pm.rwlock);
574 
575 	timeout_del(&rdev->pm.dynpm_idle_to);
576 	task_del(systq, &rdev->pm.dynpm_idle_task);
577 }
578 
579 void radeon_pm_resume(struct radeon_device *rdev)
580 {
581 	/* set up the default clocks if the MC ucode is loaded */
582 	if ((rdev->family >= CHIP_BARTS) &&
583 	    (rdev->family <= CHIP_CAYMAN) &&
584 	    rdev->mc_fw) {
585 		if (rdev->pm.default_vddc)
586 			radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
587 						SET_VOLTAGE_TYPE_ASIC_VDDC);
588 		if (rdev->pm.default_vddci)
589 			radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
590 						SET_VOLTAGE_TYPE_ASIC_VDDCI);
591 		if (rdev->pm.default_sclk)
592 			radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
593 		if (rdev->pm.default_mclk)
594 			radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
595 	}
596 	/* asic init will reset the default power state */
597 	rw_enter_write(&rdev->pm.rwlock);
598 	rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
599 	rdev->pm.current_clock_mode_index = 0;
600 	rdev->pm.current_sclk = rdev->pm.default_sclk;
601 	rdev->pm.current_mclk = rdev->pm.default_mclk;
602 	if (rdev->pm.power_state) {
603 		rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
604 		rdev->pm.current_vddci = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.vddci;
605 	}
606 	if (rdev->pm.pm_method == PM_METHOD_DYNPM
607 	    && rdev->pm.dynpm_state == DYNPM_STATE_SUSPENDED) {
608 		rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
609 		timeout_add_msec(&rdev->pm.dynpm_idle_to, RADEON_IDLE_LOOP_MS);
610 	}
611 	rw_exit_write(&rdev->pm.rwlock);
612 	radeon_pm_compute_clocks(rdev);
613 }
614 
615 int radeon_pm_init(struct radeon_device *rdev)
616 {
617 	int ret;
618 
619 	/* default to profile method */
620 	rdev->pm.pm_method = PM_METHOD_PROFILE;
621 	rdev->pm.profile = PM_PROFILE_DEFAULT;
622 	rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
623 	rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
624 	rdev->pm.dynpm_can_upclock = true;
625 	rdev->pm.dynpm_can_downclock = true;
626 	rdev->pm.default_sclk = rdev->clock.default_sclk;
627 	rdev->pm.default_mclk = rdev->clock.default_mclk;
628 	rdev->pm.current_sclk = rdev->clock.default_sclk;
629 	rdev->pm.current_mclk = rdev->clock.default_mclk;
630 	rdev->pm.int_thermal_type = THERMAL_TYPE_NONE;
631 
632 	if (rdev->bios) {
633 		if (rdev->is_atom_bios)
634 			radeon_atombios_get_power_modes(rdev);
635 		else
636 			radeon_combios_get_power_modes(rdev);
637 		radeon_pm_print_states(rdev);
638 		radeon_pm_init_profile(rdev);
639 		/* set up the default clocks if the MC ucode is loaded */
640 		if ((rdev->family >= CHIP_BARTS) &&
641 		    (rdev->family <= CHIP_CAYMAN) &&
642 		    rdev->mc_fw) {
643 			if (rdev->pm.default_vddc)
644 				radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
645 							SET_VOLTAGE_TYPE_ASIC_VDDC);
646 			if (rdev->pm.default_vddci)
647 				radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
648 							SET_VOLTAGE_TYPE_ASIC_VDDCI);
649 			if (rdev->pm.default_sclk)
650 				radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
651 			if (rdev->pm.default_mclk)
652 				radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
653 		}
654 	}
655 
656 	/* set up the internal thermal sensor if applicable */
657 	ret = radeon_hwmon_init(rdev);
658 	if (ret)
659 		return ret;
660 
661 	task_set(&rdev->pm.dynpm_idle_task, radeon_dynpm_idle_work_handler,
662 	    rdev, NULL);
663 	timeout_set(&rdev->pm.dynpm_idle_to, radeon_dynpm_idle_tick, rdev);
664 
665 	if (rdev->pm.num_power_states > 1) {
666 #ifdef notyet
667 		/* where's the best place to put these? */
668 		ret = device_create_file(rdev->dev, &dev_attr_power_profile);
669 		if (ret)
670 			DRM_ERROR("failed to create device file for power profile\n");
671 		ret = device_create_file(rdev->dev, &dev_attr_power_method);
672 		if (ret)
673 			DRM_ERROR("failed to create device file for power method\n");
674 
675 		if (radeon_debugfs_pm_init(rdev)) {
676 			DRM_ERROR("Failed to register debugfs file for PM!\n");
677 		}
678 #endif
679 
680 #ifdef DRMDEBUG
681 		DRM_INFO("radeon: power management initialized\n");
682 #endif
683 	}
684 
685 	return 0;
686 }
687 
688 void radeon_pm_fini(struct radeon_device *rdev)
689 {
690 	if (rdev->pm.num_power_states > 1) {
691 		rw_enter_write(&rdev->pm.rwlock);
692 		if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
693 			rdev->pm.profile = PM_PROFILE_DEFAULT;
694 			radeon_pm_update_profile(rdev);
695 			radeon_pm_set_clocks(rdev);
696 		} else if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
697 			/* reset default clocks */
698 			rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
699 			rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
700 			radeon_pm_set_clocks(rdev);
701 		}
702 		rw_exit_write(&rdev->pm.rwlock);
703 
704 		timeout_del(&rdev->pm.dynpm_idle_to);
705 		task_del(systq, &rdev->pm.dynpm_idle_task);
706 
707 #ifdef notyet
708 		device_remove_file(rdev->dev, &dev_attr_power_profile);
709 		device_remove_file(rdev->dev, &dev_attr_power_method);
710 #endif
711 	}
712 
713 	if (rdev->pm.power_state)
714 		kfree(rdev->pm.power_state);
715 
716 	radeon_hwmon_fini(rdev);
717 }
718 
719 void radeon_pm_compute_clocks(struct radeon_device *rdev)
720 {
721 	struct drm_device *ddev = rdev->ddev;
722 	struct drm_crtc *crtc;
723 	struct radeon_crtc *radeon_crtc;
724 
725 	if (rdev->pm.num_power_states < 2)
726 		return;
727 
728 	rw_enter_write(&rdev->pm.rwlock);
729 
730 	rdev->pm.active_crtcs = 0;
731 	rdev->pm.active_crtc_count = 0;
732 	list_for_each_entry(crtc,
733 		&ddev->mode_config.crtc_list, head) {
734 		radeon_crtc = to_radeon_crtc(crtc);
735 		if (radeon_crtc->enabled) {
736 			rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id);
737 			rdev->pm.active_crtc_count++;
738 		}
739 	}
740 
741 	if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
742 		radeon_pm_update_profile(rdev);
743 		radeon_pm_set_clocks(rdev);
744 	} else if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
745 		if (rdev->pm.dynpm_state != DYNPM_STATE_DISABLED) {
746 			if (rdev->pm.active_crtc_count > 1) {
747 				if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) {
748 					timeout_del(&rdev->pm.dynpm_idle_to);
749 					task_del(systq, &rdev->pm.dynpm_idle_task);
750 
751 					rdev->pm.dynpm_state = DYNPM_STATE_PAUSED;
752 					rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
753 					radeon_pm_get_dynpm_state(rdev);
754 					radeon_pm_set_clocks(rdev);
755 
756 					DRM_DEBUG_DRIVER("radeon: dynamic power management deactivated\n");
757 				}
758 			} else if (rdev->pm.active_crtc_count == 1) {
759 				/* TODO: Increase clocks if needed for current mode */
760 
761 				if (rdev->pm.dynpm_state == DYNPM_STATE_MINIMUM) {
762 					rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
763 					rdev->pm.dynpm_planned_action = DYNPM_ACTION_UPCLOCK;
764 					radeon_pm_get_dynpm_state(rdev);
765 					radeon_pm_set_clocks(rdev);
766 
767 					timeout_add_msec(&rdev->pm.dynpm_idle_to, RADEON_IDLE_LOOP_MS);
768 				} else if (rdev->pm.dynpm_state == DYNPM_STATE_PAUSED) {
769 					rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
770 					timeout_add_msec(&rdev->pm.dynpm_idle_to, RADEON_IDLE_LOOP_MS);
771 					DRM_DEBUG_DRIVER("radeon: dynamic power management activated\n");
772 				}
773 			} else { /* count == 0 */
774 				if (rdev->pm.dynpm_state != DYNPM_STATE_MINIMUM) {
775 					timeout_del(&rdev->pm.dynpm_idle_to);
776 					task_del(systq, &rdev->pm.dynpm_idle_task);
777 
778 					rdev->pm.dynpm_state = DYNPM_STATE_MINIMUM;
779 					rdev->pm.dynpm_planned_action = DYNPM_ACTION_MINIMUM;
780 					radeon_pm_get_dynpm_state(rdev);
781 					radeon_pm_set_clocks(rdev);
782 				}
783 			}
784 		}
785 	}
786 
787 	rw_exit_write(&rdev->pm.rwlock);
788 }
789 
790 static bool radeon_pm_in_vbl(struct radeon_device *rdev)
791 {
792 	int  crtc, vpos, hpos, vbl_status;
793 	bool in_vbl = true;
794 
795 	/* Iterate over all active crtc's. All crtc's must be in vblank,
796 	 * otherwise return in_vbl == false.
797 	 */
798 	for (crtc = 0; (crtc < rdev->num_crtc) && in_vbl; crtc++) {
799 		if (rdev->pm.active_crtcs & (1 << crtc)) {
800 			vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev, crtc, &vpos, &hpos);
801 			if ((vbl_status & DRM_SCANOUTPOS_VALID) &&
802 			    !(vbl_status & DRM_SCANOUTPOS_INVBL))
803 				in_vbl = false;
804 		}
805 	}
806 
807 	return in_vbl;
808 }
809 
810 static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish)
811 {
812 #ifdef DRMDEBUG
813 	u32 stat_crtc = 0;
814 #endif
815 	bool in_vbl = radeon_pm_in_vbl(rdev);
816 
817 	if (in_vbl == false)
818 		DRM_DEBUG_DRIVER("not in vbl for pm change %08x at %s\n", stat_crtc,
819 			 finish ? "exit" : "entry");
820 	return in_vbl;
821 }
822 
823 void
824 radeon_dynpm_idle_tick(void *arg)
825 {
826 	struct radeon_device *rdev = arg;
827 
828 	task_add(systq, &rdev->pm.dynpm_idle_task);
829 }
830 
831 void
832 radeon_dynpm_idle_work_handler(void *arg1, void *arg2)
833 {
834 	struct radeon_device *rdev = arg1;
835 	int resched;
836 
837 	resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
838 	rw_enter_write(&rdev->pm.rwlock);
839 	if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) {
840 		int not_processed = 0;
841 		int i;
842 
843 		for (i = 0; i < RADEON_NUM_RINGS; ++i) {
844 			struct radeon_ring *ring = &rdev->ring[i];
845 
846 			if (ring->ready) {
847 				not_processed += radeon_fence_count_emitted(rdev, i);
848 				if (not_processed >= 3)
849 					break;
850 			}
851 		}
852 
853 		if (not_processed >= 3) { /* should upclock */
854 			if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_DOWNCLOCK) {
855 				rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
856 			} else if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_NONE &&
857 				   rdev->pm.dynpm_can_upclock) {
858 				rdev->pm.dynpm_planned_action =
859 					DYNPM_ACTION_UPCLOCK;
860 				rdev->pm.dynpm_action_timeout = ticks +
861 				msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS);
862 			}
863 		} else if (not_processed == 0) { /* should downclock */
864 			if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_UPCLOCK) {
865 				rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
866 			} else if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_NONE &&
867 				   rdev->pm.dynpm_can_downclock) {
868 				rdev->pm.dynpm_planned_action =
869 					DYNPM_ACTION_DOWNCLOCK;
870 				rdev->pm.dynpm_action_timeout = ticks +
871 				msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS);
872 			}
873 		}
874 
875 		/* Note, radeon_pm_set_clocks is called with static_switch set
876 		 * to false since we want to wait for vbl to avoid flicker.
877 		 */
878 		if (rdev->pm.dynpm_planned_action != DYNPM_ACTION_NONE &&
879 		    ticks - rdev->pm.dynpm_action_timeout > 0) {
880 			radeon_pm_get_dynpm_state(rdev);
881 			radeon_pm_set_clocks(rdev);
882 		}
883 
884 		timeout_add_msec(&rdev->pm.dynpm_idle_to, RADEON_IDLE_LOOP_MS);
885 	}
886 	rw_exit_write(&rdev->pm.rwlock);
887 	ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
888 }
889 
890 /*
891  * Debugfs info
892  */
893 #if defined(CONFIG_DEBUG_FS)
894 
895 static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
896 {
897 	struct drm_info_node *node = (struct drm_info_node *) m->private;
898 	struct drm_device *dev = node->minor->dev;
899 	struct radeon_device *rdev = dev->dev_private;
900 
901 	seq_printf(m, "default engine clock: %u0 kHz\n", rdev->pm.default_sclk);
902 	/* radeon_get_engine_clock is not reliable on APUs so just print the current clock */
903 	if ((rdev->family >= CHIP_PALM) && (rdev->flags & RADEON_IS_IGP))
904 		seq_printf(m, "current engine clock: %u0 kHz\n", rdev->pm.current_sclk);
905 	else
906 		seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
907 	seq_printf(m, "default memory clock: %u0 kHz\n", rdev->pm.default_mclk);
908 	if (rdev->asic->pm.get_memory_clock)
909 		seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev));
910 	if (rdev->pm.current_vddc)
911 		seq_printf(m, "voltage: %u mV\n", rdev->pm.current_vddc);
912 	if (rdev->asic->pm.get_pcie_lanes)
913 		seq_printf(m, "PCIE lanes: %d\n", radeon_get_pcie_lanes(rdev));
914 
915 	return 0;
916 }
917 
918 static struct drm_info_list radeon_pm_info_list[] = {
919 	{"radeon_pm_info", radeon_debugfs_pm_info, 0, NULL},
920 };
921 #endif
922 
923 int
924 radeon_debugfs_pm_init(struct radeon_device *rdev)
925 {
926 #if defined(CONFIG_DEBUG_FS)
927 	return radeon_debugfs_add_files(rdev, radeon_pm_info_list, ARRAY_SIZE(radeon_pm_info_list));
928 #else
929 	return 0;
930 #endif
931 }
932