1 /* $NetBSD: intel_wakeref.c,v 1.5 2021/12/19 12:33:57 riastradh Exp $ */
2
3 /*
4 * SPDX-License-Identifier: MIT
5 *
6 * Copyright © 2019 Intel Corporation
7 */
8
9 #include <sys/cdefs.h>
10 __KERNEL_RCSID(0, "$NetBSD: intel_wakeref.c,v 1.5 2021/12/19 12:33:57 riastradh Exp $");
11
12 #include <linux/wait_bit.h>
13
14 #include "intel_runtime_pm.h"
15 #include "intel_wakeref.h"
16
17 #include <linux/nbsd-namespace.h>
18
rpm_get(struct intel_wakeref * wf)19 static void rpm_get(struct intel_wakeref *wf)
20 {
21 wf->wakeref = intel_runtime_pm_get(wf->rpm);
22 }
23
rpm_put(struct intel_wakeref * wf)24 static void rpm_put(struct intel_wakeref *wf)
25 {
26 intel_wakeref_t wakeref = fetch_and_zero(&wf->wakeref);
27
28 intel_runtime_pm_put(wf->rpm, wakeref);
29 INTEL_WAKEREF_BUG_ON(!wakeref);
30
31 DRM_WAKEUP_ALL(&wf->wq, &wf->mutex);
32 }
33
__intel_wakeref_get_first(struct intel_wakeref * wf)34 int __intel_wakeref_get_first(struct intel_wakeref *wf)
35 {
36 /*
37 * Treat get/put as different subclasses, as we may need to run
38 * the put callback from under the shrinker and do not want to
39 * cross-contanimate that callback with any extra work performed
40 * upon acquiring the wakeref.
41 */
42 mutex_lock_nested(&wf->mutex, SINGLE_DEPTH_NESTING);
43 if (!atomic_read(&wf->count)) {
44 int err;
45
46 rpm_get(wf);
47
48 err = wf->ops->get(wf);
49 if (unlikely(err)) {
50 rpm_put(wf);
51 mutex_unlock(&wf->mutex);
52 return err;
53 }
54
55 smp_mb__before_atomic(); /* release wf->count */
56 }
57 atomic_inc(&wf->count);
58 mutex_unlock(&wf->mutex);
59
60 INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0);
61 return 0;
62 }
63
____intel_wakeref_put_last(struct intel_wakeref * wf)64 static void ____intel_wakeref_put_last(struct intel_wakeref *wf)
65 {
66 INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0);
67 if (unlikely(!atomic_dec_and_test(&wf->count))) {
68 mutex_unlock(&wf->mutex);
69 return;
70 }
71
72 /* ops->put() must reschedule its own release on error/deferral */
73 if (likely(!wf->ops->put(wf))) {
74 rpm_put(wf);
75 }
76
77 mutex_unlock(&wf->mutex);
78 }
79
__intel_wakeref_put_last(struct intel_wakeref * wf,unsigned long flags)80 void __intel_wakeref_put_last(struct intel_wakeref *wf, unsigned long flags)
81 {
82 INTEL_WAKEREF_BUG_ON(work_pending(&wf->work));
83
84 /* Assume we are not in process context and so cannot sleep. */
85 if (flags & INTEL_WAKEREF_PUT_ASYNC || !mutex_trylock(&wf->mutex)) {
86 schedule_work(&wf->work);
87 return;
88 }
89
90 ____intel_wakeref_put_last(wf);
91 }
92
__intel_wakeref_put_work(struct work_struct * wrk)93 static void __intel_wakeref_put_work(struct work_struct *wrk)
94 {
95 struct intel_wakeref *wf = container_of(wrk, typeof(*wf), work);
96
97 if (atomic_add_unless(&wf->count, -1, 1))
98 return;
99
100 mutex_lock(&wf->mutex);
101 ____intel_wakeref_put_last(wf);
102 }
103
__intel_wakeref_init(struct intel_wakeref * wf,struct intel_runtime_pm * rpm,const struct intel_wakeref_ops * ops,struct intel_wakeref_lockclass * key)104 void __intel_wakeref_init(struct intel_wakeref *wf,
105 struct intel_runtime_pm *rpm,
106 const struct intel_wakeref_ops *ops,
107 struct intel_wakeref_lockclass *key)
108 {
109 wf->rpm = rpm;
110 wf->ops = ops;
111
112 __mutex_init(&wf->mutex, "wakeref.mutex", &key->mutex);
113 atomic_set(&wf->count, 0);
114 wf->wakeref = 0;
115 DRM_INIT_WAITQUEUE(&wf->wq, "i915wake");
116
117 INIT_WORK(&wf->work, __intel_wakeref_put_work);
118 lockdep_init_map(&wf->work.lockdep_map, "wakeref.work", &key->work, 0);
119 }
120
121 void
intel_wakeref_fini(struct intel_wakeref * wf)122 intel_wakeref_fini(struct intel_wakeref *wf)
123 {
124
125 DRM_DESTROY_WAITQUEUE(&wf->wq);
126 mutex_destroy(&wf->mutex);
127 }
128
intel_wakeref_wait_for_idle(struct intel_wakeref * wf)129 int intel_wakeref_wait_for_idle(struct intel_wakeref *wf)
130 {
131 int err;
132
133 might_sleep();
134
135 mutex_lock(&wf->mutex);
136 DRM_WAIT_UNTIL(err, &wf->wq, &wf->mutex, !intel_wakeref_is_active(wf));
137 mutex_unlock(&wf->mutex);
138 if (err)
139 return err;
140
141 intel_wakeref_unlock_wait(wf);
142 return 0;
143 }
144
wakeref_auto_timeout(struct timer_list * t)145 static void wakeref_auto_timeout(struct timer_list *t)
146 {
147 struct intel_wakeref_auto *wf = from_timer(wf, t, timer);
148 intel_wakeref_t wakeref;
149 unsigned long flags;
150
151 if (!refcount_dec_and_lock_irqsave(&wf->count, &wf->lock, &flags))
152 return;
153
154 wakeref = fetch_and_zero(&wf->wakeref);
155 spin_unlock_irqrestore(&wf->lock, flags);
156
157 intel_runtime_pm_put(wf->rpm, wakeref);
158 }
159
intel_wakeref_auto_init(struct intel_wakeref_auto * wf,struct intel_runtime_pm * rpm)160 void intel_wakeref_auto_init(struct intel_wakeref_auto *wf,
161 struct intel_runtime_pm *rpm)
162 {
163 spin_lock_init(&wf->lock);
164 timer_setup(&wf->timer, wakeref_auto_timeout, 0);
165 refcount_set(&wf->count, 0);
166 wf->wakeref = 0;
167 wf->rpm = rpm;
168 }
169
intel_wakeref_auto(struct intel_wakeref_auto * wf,unsigned long timeout)170 void intel_wakeref_auto(struct intel_wakeref_auto *wf, unsigned long timeout)
171 {
172 unsigned long flags;
173
174 if (!timeout) {
175 if (del_timer_sync(&wf->timer))
176 wakeref_auto_timeout(&wf->timer);
177 return;
178 }
179
180 /* Our mission is that we only extend an already active wakeref */
181 assert_rpm_wakelock_held(wf->rpm);
182
183 if (!refcount_inc_not_zero(&wf->count)) {
184 spin_lock_irqsave(&wf->lock, flags);
185 if (!refcount_inc_not_zero(&wf->count)) {
186 INTEL_WAKEREF_BUG_ON(wf->wakeref);
187 wf->wakeref = intel_runtime_pm_get_if_in_use(wf->rpm);
188 refcount_set(&wf->count, 1);
189 }
190 spin_unlock_irqrestore(&wf->lock, flags);
191 }
192
193 /*
194 * If we extend a pending timer, we will only get a single timer
195 * callback and so need to cancel the local inc by running the
196 * elided callback to keep the wf->count balanced.
197 */
198 if (mod_timer(&wf->timer, jiffies + timeout))
199 wakeref_auto_timeout(&wf->timer);
200 }
201
intel_wakeref_auto_fini(struct intel_wakeref_auto * wf)202 void intel_wakeref_auto_fini(struct intel_wakeref_auto *wf)
203 {
204 intel_wakeref_auto(wf, 0);
205 INTEL_WAKEREF_BUG_ON(wf->wakeref);
206 spin_lock_destroy(&wf->lock);
207 }
208