xref: /netbsd-src/sys/external/bsd/drm2/dist/drm/i915/intel_runtime_pm.h (revision 41ec02673d281bbb3d38e6c78504ce6e30c228c1)
1 /*	$NetBSD: intel_runtime_pm.h,v 1.2 2021/12/18 23:45:29 riastradh Exp $	*/
2 
3 /* SPDX-License-Identifier: MIT */
4 /*
5  * Copyright © 2019 Intel Corporation
6  */
7 
8 #ifndef __INTEL_RUNTIME_PM_H__
9 #define __INTEL_RUNTIME_PM_H__
10 
11 #include <linux/types.h>
12 
13 #include "display/intel_display.h"
14 
15 #include "intel_wakeref.h"
16 
17 #include "i915_utils.h"
18 
19 struct device;
20 struct drm_i915_private;
21 struct drm_printer;
22 
23 enum i915_drm_suspend_mode {
24 	I915_DRM_SUSPEND_IDLE,
25 	I915_DRM_SUSPEND_MEM,
26 	I915_DRM_SUSPEND_HIBERNATE,
27 };
28 
29 /*
30  * This struct helps tracking the state needed for runtime PM, which puts the
31  * device in PCI D3 state. Notice that when this happens, nothing on the
32  * graphics device works, even register access, so we don't get interrupts nor
33  * anything else.
34  *
35  * Every piece of our code that needs to actually touch the hardware needs to
36  * either call intel_runtime_pm_get or call intel_display_power_get with the
37  * appropriate power domain.
38  *
39  * Our driver uses the autosuspend delay feature, which means we'll only really
40  * suspend if we stay with zero refcount for a certain amount of time. The
41  * default value is currently very conservative (see intel_runtime_pm_enable), but
42  * it can be changed with the standard runtime PM files from sysfs.
43  *
44  * The irqs_disabled variable becomes true exactly after we disable the IRQs and
45  * goes back to false exactly before we reenable the IRQs. We use this variable
46  * to check if someone is trying to enable/disable IRQs while they're supposed
47  * to be disabled. This shouldn't happen and we'll print some error messages in
48  * case it happens.
49  *
50  * For more, read the Documentation/power/runtime_pm.rst.
51  */
52 struct intel_runtime_pm {
53 	atomic_t wakeref_count;
54 	struct device *kdev; /* points to i915->drm.pdev->dev */
55 	bool available;
56 	bool suspended;
57 	bool irqs_enabled;
58 
59 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
60 	/*
61 	 * To aide detection of wakeref leaks and general misuse, we
62 	 * track all wakeref holders. With manual markup (i.e. returning
63 	 * a cookie to each rpm_get caller which they then supply to their
64 	 * paired rpm_put) we can remove corresponding pairs of and keep
65 	 * the array trimmed to active wakerefs.
66 	 */
67 	struct intel_runtime_pm_debug {
68 		spinlock_t lock;
69 
70 		depot_stack_handle_t last_acquire;
71 		depot_stack_handle_t last_release;
72 
73 		depot_stack_handle_t *owners;
74 		unsigned long count;
75 	} debug;
76 #endif
77 };
78 
79 #define BITS_PER_WAKEREF	\
80 	BITS_PER_TYPE(struct_member(struct intel_runtime_pm, wakeref_count))
81 #define INTEL_RPM_WAKELOCK_SHIFT	(BITS_PER_WAKEREF / 2)
82 #define INTEL_RPM_WAKELOCK_BIAS		(1 << INTEL_RPM_WAKELOCK_SHIFT)
83 #define INTEL_RPM_RAW_WAKEREF_MASK	(INTEL_RPM_WAKELOCK_BIAS - 1)
84 
85 static inline int
intel_rpm_raw_wakeref_count(int wakeref_count)86 intel_rpm_raw_wakeref_count(int wakeref_count)
87 {
88 	return wakeref_count & INTEL_RPM_RAW_WAKEREF_MASK;
89 }
90 
91 static inline int
intel_rpm_wakelock_count(int wakeref_count)92 intel_rpm_wakelock_count(int wakeref_count)
93 {
94 	return wakeref_count >> INTEL_RPM_WAKELOCK_SHIFT;
95 }
96 
97 static inline void
assert_rpm_device_not_suspended(struct intel_runtime_pm * rpm)98 assert_rpm_device_not_suspended(struct intel_runtime_pm *rpm)
99 {
100 	WARN_ONCE(rpm->suspended,
101 		  "Device suspended during HW access\n");
102 }
103 
104 static inline void
__assert_rpm_raw_wakeref_held(struct intel_runtime_pm * rpm,int wakeref_count)105 __assert_rpm_raw_wakeref_held(struct intel_runtime_pm *rpm, int wakeref_count)
106 {
107 	assert_rpm_device_not_suspended(rpm);
108 	WARN_ONCE(!intel_rpm_raw_wakeref_count(wakeref_count),
109 		  "RPM raw-wakeref not held\n");
110 }
111 
112 static inline void
__assert_rpm_wakelock_held(struct intel_runtime_pm * rpm,int wakeref_count)113 __assert_rpm_wakelock_held(struct intel_runtime_pm *rpm, int wakeref_count)
114 {
115 	__assert_rpm_raw_wakeref_held(rpm, wakeref_count);
116 	WARN_ONCE(!intel_rpm_wakelock_count(wakeref_count),
117 		  "RPM wakelock ref not held during HW access\n");
118 }
119 
120 static inline void
assert_rpm_raw_wakeref_held(struct intel_runtime_pm * rpm)121 assert_rpm_raw_wakeref_held(struct intel_runtime_pm *rpm)
122 {
123 	__assert_rpm_raw_wakeref_held(rpm, atomic_read(&rpm->wakeref_count));
124 }
125 
126 static inline void
assert_rpm_wakelock_held(struct intel_runtime_pm * rpm)127 assert_rpm_wakelock_held(struct intel_runtime_pm *rpm)
128 {
129 	__assert_rpm_wakelock_held(rpm, atomic_read(&rpm->wakeref_count));
130 }
131 
132 /**
133  * disable_rpm_wakeref_asserts - disable the RPM assert checks
134  * @rpm: the intel_runtime_pm structure
135  *
136  * This function disable asserts that check if we hold an RPM wakelock
137  * reference, while keeping the device-not-suspended checks still enabled.
138  * It's meant to be used only in special circumstances where our rule about
139  * the wakelock refcount wrt. the device power state doesn't hold. According
140  * to this rule at any point where we access the HW or want to keep the HW in
141  * an active state we must hold an RPM wakelock reference acquired via one of
142  * the intel_runtime_pm_get() helpers. Currently there are a few special spots
143  * where this rule doesn't hold: the IRQ and suspend/resume handlers, the
144  * forcewake release timer, and the GPU RPS and hangcheck works. All other
145  * users should avoid using this function.
146  *
147  * Any calls to this function must have a symmetric call to
148  * enable_rpm_wakeref_asserts().
149  */
150 static inline void
disable_rpm_wakeref_asserts(struct intel_runtime_pm * rpm)151 disable_rpm_wakeref_asserts(struct intel_runtime_pm *rpm)
152 {
153 	atomic_add(INTEL_RPM_WAKELOCK_BIAS + 1,
154 		   &rpm->wakeref_count);
155 }
156 
157 /**
158  * enable_rpm_wakeref_asserts - re-enable the RPM assert checks
159  * @rpm: the intel_runtime_pm structure
160  *
161  * This function re-enables the RPM assert checks after disabling them with
162  * disable_rpm_wakeref_asserts. It's meant to be used only in special
163  * circumstances otherwise its use should be avoided.
164  *
165  * Any calls to this function must have a symmetric call to
166  * disable_rpm_wakeref_asserts().
167  */
168 static inline void
enable_rpm_wakeref_asserts(struct intel_runtime_pm * rpm)169 enable_rpm_wakeref_asserts(struct intel_runtime_pm *rpm)
170 {
171 	atomic_sub(INTEL_RPM_WAKELOCK_BIAS + 1,
172 		   &rpm->wakeref_count);
173 }
174 
175 void intel_runtime_pm_init_early(struct intel_runtime_pm *rpm);
176 void intel_runtime_pm_enable(struct intel_runtime_pm *rpm);
177 void intel_runtime_pm_disable(struct intel_runtime_pm *rpm);
178 void intel_runtime_pm_driver_release(struct intel_runtime_pm *rpm);
179 
180 intel_wakeref_t intel_runtime_pm_get(struct intel_runtime_pm *rpm);
181 intel_wakeref_t intel_runtime_pm_get_if_in_use(struct intel_runtime_pm *rpm);
182 intel_wakeref_t intel_runtime_pm_get_noresume(struct intel_runtime_pm *rpm);
183 intel_wakeref_t intel_runtime_pm_get_raw(struct intel_runtime_pm *rpm);
184 
185 #define with_intel_runtime_pm(rpm, wf) \
186 	for ((wf) = intel_runtime_pm_get(rpm); (wf); \
187 	     intel_runtime_pm_put((rpm), (wf)), (wf) = 0)
188 
189 #define with_intel_runtime_pm_if_in_use(rpm, wf) \
190 	for ((wf) = intel_runtime_pm_get_if_in_use(rpm); (wf); \
191 	     intel_runtime_pm_put((rpm), (wf)), (wf) = 0)
192 
193 void intel_runtime_pm_put_unchecked(struct intel_runtime_pm *rpm);
194 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
195 void intel_runtime_pm_put(struct intel_runtime_pm *rpm, intel_wakeref_t wref);
196 #else
197 static inline void
intel_runtime_pm_put(struct intel_runtime_pm * rpm,intel_wakeref_t wref)198 intel_runtime_pm_put(struct intel_runtime_pm *rpm, intel_wakeref_t wref)
199 {
200 	intel_runtime_pm_put_unchecked(rpm);
201 }
202 #endif
203 void intel_runtime_pm_put_raw(struct intel_runtime_pm *rpm, intel_wakeref_t wref);
204 
205 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
206 void print_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm,
207 				    struct drm_printer *p);
208 #else
print_intel_runtime_pm_wakeref(struct intel_runtime_pm * rpm,struct drm_printer * p)209 static inline void print_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm,
210 						  struct drm_printer *p)
211 {
212 }
213 #endif
214 
215 #endif /* __INTEL_RUNTIME_PM_H__ */
216