1 /* $NetBSD: intel_wakeref.h,v 1.4 2021/12/19 12:33:57 riastradh Exp $ */
2
3 /*
4 * SPDX-License-Identifier: MIT
5 *
6 * Copyright © 2019 Intel Corporation
7 */
8
9 #ifndef INTEL_WAKEREF_H
10 #define INTEL_WAKEREF_H
11
12 #include <linux/atomic.h>
13 #include <linux/bits.h>
14 #include <linux/lockdep.h>
15 #include <linux/mutex.h>
16 #include <linux/refcount.h>
17 #include <linux/stackdepot.h>
18 #include <linux/timer.h>
19 #include <linux/workqueue.h>
20
21 #include <drm/drm_wait_netbsd.h> /* XXX */
22
23 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
24 #define INTEL_WAKEREF_BUG_ON(expr) BUG_ON(expr)
25 #else
26 #define INTEL_WAKEREF_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr)
27 #endif
28
29 struct intel_runtime_pm;
30 struct intel_wakeref;
31
32 typedef depot_stack_handle_t intel_wakeref_t;
33
34 struct intel_wakeref_ops {
35 int (*get)(struct intel_wakeref *wf);
36 int (*put)(struct intel_wakeref *wf);
37 };
38
39 struct intel_wakeref {
40 atomic_t count;
41 struct mutex mutex;
42
43 intel_wakeref_t wakeref;
44 drm_waitqueue_t wq;
45
46 struct intel_runtime_pm *rpm;
47 const struct intel_wakeref_ops *ops;
48
49 struct work_struct work;
50 };
51
52 struct intel_wakeref_lockclass {
53 struct lock_class_key mutex;
54 struct lock_class_key work;
55 };
56
57 void __intel_wakeref_init(struct intel_wakeref *wf,
58 struct intel_runtime_pm *rpm,
59 const struct intel_wakeref_ops *ops,
60 struct intel_wakeref_lockclass *key);
61 #define intel_wakeref_init(wf, rpm, ops) do { \
62 static struct intel_wakeref_lockclass __key; \
63 \
64 __intel_wakeref_init((wf), (rpm), (ops), &__key); \
65 } while (0)
66
67 void intel_wakeref_fini(struct intel_wakeref *);
68
69 int __intel_wakeref_get_first(struct intel_wakeref *wf);
70 void __intel_wakeref_put_last(struct intel_wakeref *wf, unsigned long flags);
71
72 /**
73 * intel_wakeref_get: Acquire the wakeref
74 * @wf: the wakeref
75 *
76 * Acquire a hold on the wakeref. The first user to do so, will acquire
77 * the runtime pm wakeref and then call the @fn underneath the wakeref
78 * mutex.
79 *
80 * Note that @fn is allowed to fail, in which case the runtime-pm wakeref
81 * will be released and the acquisition unwound, and an error reported.
82 *
83 * Returns: 0 if the wakeref was acquired successfully, or a negative error
84 * code otherwise.
85 */
86 static inline int
intel_wakeref_get(struct intel_wakeref * wf)87 intel_wakeref_get(struct intel_wakeref *wf)
88 {
89 might_sleep();
90 if (unlikely(!atomic_inc_not_zero(&wf->count)))
91 return __intel_wakeref_get_first(wf);
92
93 return 0;
94 }
95
96 /**
97 * __intel_wakeref_get: Acquire the wakeref, again
98 * @wf: the wakeref
99 *
100 * Increment the wakeref counter, only valid if it is already held by
101 * the caller.
102 *
103 * See intel_wakeref_get().
104 */
105 static inline void
__intel_wakeref_get(struct intel_wakeref * wf)106 __intel_wakeref_get(struct intel_wakeref *wf)
107 {
108 INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0);
109 atomic_inc(&wf->count);
110 }
111
112 /**
113 * intel_wakeref_get_if_in_use: Acquire the wakeref
114 * @wf: the wakeref
115 *
116 * Acquire a hold on the wakeref, but only if the wakeref is already
117 * active.
118 *
119 * Returns: true if the wakeref was acquired, false otherwise.
120 */
121 static inline bool
intel_wakeref_get_if_active(struct intel_wakeref * wf)122 intel_wakeref_get_if_active(struct intel_wakeref *wf)
123 {
124 return atomic_inc_not_zero(&wf->count);
125 }
126
127 /**
128 * intel_wakeref_put_flags: Release the wakeref
129 * @wf: the wakeref
130 * @flags: control flags
131 *
132 * Release our hold on the wakeref. When there are no more users,
133 * the runtime pm wakeref will be released after the @fn callback is called
134 * underneath the wakeref mutex.
135 *
136 * Note that @fn is allowed to fail, in which case the runtime-pm wakeref
137 * is retained and an error reported.
138 *
139 * Returns: 0 if the wakeref was released successfully, or a negative error
140 * code otherwise.
141 */
142 static inline void
__intel_wakeref_put(struct intel_wakeref * wf,unsigned long flags)143 __intel_wakeref_put(struct intel_wakeref *wf, unsigned long flags)
144 #define INTEL_WAKEREF_PUT_ASYNC BIT(0)
145 {
146 INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0);
147 if (unlikely(!atomic_add_unless(&wf->count, -1, 1)))
148 __intel_wakeref_put_last(wf, flags);
149 }
150
151 static inline void
intel_wakeref_put(struct intel_wakeref * wf)152 intel_wakeref_put(struct intel_wakeref *wf)
153 {
154 might_sleep();
155 __intel_wakeref_put(wf, 0);
156 }
157
158 static inline void
intel_wakeref_put_async(struct intel_wakeref * wf)159 intel_wakeref_put_async(struct intel_wakeref *wf)
160 {
161 __intel_wakeref_put(wf, INTEL_WAKEREF_PUT_ASYNC);
162 }
163
164 /**
165 * intel_wakeref_lock: Lock the wakeref (mutex)
166 * @wf: the wakeref
167 *
168 * Locks the wakeref to prevent it being acquired or released. New users
169 * can still adjust the counter, but the wakeref itself (and callback)
170 * cannot be acquired or released.
171 */
172 static inline void
intel_wakeref_lock(struct intel_wakeref * wf)173 intel_wakeref_lock(struct intel_wakeref *wf)
174 __acquires(wf->mutex)
175 {
176 mutex_lock(&wf->mutex);
177 }
178
179 /**
180 * intel_wakeref_unlock: Unlock the wakeref
181 * @wf: the wakeref
182 *
183 * Releases a previously acquired intel_wakeref_lock().
184 */
185 static inline void
intel_wakeref_unlock(struct intel_wakeref * wf)186 intel_wakeref_unlock(struct intel_wakeref *wf)
187 __releases(wf->mutex)
188 {
189 mutex_unlock(&wf->mutex);
190 }
191
192 /**
193 * intel_wakeref_unlock_wait: Wait until the active callback is complete
194 * @wf: the wakeref
195 *
196 * Waits for the active callback (under the @wf->mutex or another CPU) is
197 * complete.
198 */
199 static inline void
intel_wakeref_unlock_wait(struct intel_wakeref * wf)200 intel_wakeref_unlock_wait(struct intel_wakeref *wf)
201 {
202 mutex_lock(&wf->mutex);
203 mutex_unlock(&wf->mutex);
204 flush_work(&wf->work);
205 }
206
207 /**
208 * intel_wakeref_is_active: Query whether the wakeref is currently held
209 * @wf: the wakeref
210 *
211 * Returns: true if the wakeref is currently held.
212 */
213 static inline bool
intel_wakeref_is_active(const struct intel_wakeref * wf)214 intel_wakeref_is_active(const struct intel_wakeref *wf)
215 {
216 return READ_ONCE(wf->wakeref);
217 }
218
219 /**
220 * __intel_wakeref_defer_park: Defer the current park callback
221 * @wf: the wakeref
222 */
223 static inline void
__intel_wakeref_defer_park(struct intel_wakeref * wf)224 __intel_wakeref_defer_park(struct intel_wakeref *wf)
225 {
226 lockdep_assert_held(&wf->mutex);
227 INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count));
228 atomic_set_release(&wf->count, 1);
229 }
230
231 /**
232 * intel_wakeref_wait_for_idle: Wait until the wakeref is idle
233 * @wf: the wakeref
234 *
235 * Wait for the earlier asynchronous release of the wakeref. Note
236 * this will wait for any third party as well, so make sure you only wait
237 * when you have control over the wakeref and trust no one else is acquiring
238 * it.
239 *
240 * Return: 0 on success, error code if killed.
241 */
242 int intel_wakeref_wait_for_idle(struct intel_wakeref *wf);
243
244 struct intel_wakeref_auto {
245 struct intel_runtime_pm *rpm;
246 struct timer_list timer;
247 intel_wakeref_t wakeref;
248 spinlock_t lock;
249 refcount_t count;
250 };
251
252 /**
253 * intel_wakeref_auto: Delay the runtime-pm autosuspend
254 * @wf: the wakeref
255 * @timeout: relative timeout in jiffies
256 *
257 * The runtime-pm core uses a suspend delay after the last wakeref
258 * is released before triggering runtime suspend of the device. That
259 * delay is configurable via sysfs with little regard to the device
260 * characteristics. Instead, we want to tune the autosuspend based on our
261 * HW knowledge. intel_wakeref_auto() delays the sleep by the supplied
262 * timeout.
263 *
264 * Pass @timeout = 0 to cancel a previous autosuspend by executing the
265 * suspend immediately.
266 */
267 void intel_wakeref_auto(struct intel_wakeref_auto *wf, unsigned long timeout);
268
269 void intel_wakeref_auto_init(struct intel_wakeref_auto *wf,
270 struct intel_runtime_pm *rpm);
271 void intel_wakeref_auto_fini(struct intel_wakeref_auto *wf);
272
273 #endif /* INTEL_WAKEREF_H */
274