1 /* $NetBSD: drm_lock.c,v 1.13 2021/12/19 12:30:05 riastradh Exp $ */
2
3 /*-
4 * Copyright (c) 2013 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Taylor R. Campbell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * DRM lock. Each drm master has a heavy-weight lock to provide mutual
34 * exclusion for access to the hardware. The lock can be held by the
35 * kernel or by a drm file; the kernel takes access only for unusual
36 * purposes, with drm_idlelock_take, mainly for idling the GPU when
37 * closing down.
38 *
39 * The physical memory storing the lock state is shared between
40 * userland and kernel: the pointer at dev->master->lock->hw_lock is
41 * mapped into both userland and kernel address spaces. This way,
42 * userland can try to take the hardware lock without a system call,
43 * although if it fails then it will use the DRM_LOCK ioctl to block
44 * atomically until the lock is available. All this means that the
45 * kernel must use atomic_ops to manage the lock state.
46 */
47
48 #include <sys/cdefs.h>
49 __KERNEL_RCSID(0, "$NetBSD: drm_lock.c,v 1.13 2021/12/19 12:30:05 riastradh Exp $");
50
51 #include <sys/types.h>
52 #include <sys/errno.h>
53 #include <sys/file.h>
54 #include <sys/systm.h>
55
56 #include <drm/drm_device.h>
57 #include <drm/drm_drv.h>
58 #include <drm/drm_file.h>
59 #include <drm/drm_print.h>
60
61 #include "../dist/drm/drm_internal.h"
62 #include "../dist/drm/drm_legacy.h"
63
64 static bool drm_lock_acquire(struct drm_lock_data *, int);
65 static void drm_lock_release(struct drm_lock_data *, int);
66
67 #if IS_ENABLED(CONFIG_DRM_LEGACY)
68 static int drm_lock_block_signals(struct drm_device *, struct drm_lock *,
69 struct drm_file *);
70 static void drm_lock_unblock_signals(struct drm_device *,
71 struct drm_lock *, struct drm_file *);
72
73 /*
74 * Take the lock on behalf of userland.
75 */
76 int
drm_legacy_lock(struct drm_device * dev,void * data,struct drm_file * file)77 drm_legacy_lock(struct drm_device *dev, void *data, struct drm_file *file)
78 {
79 struct drm_lock *lock_request = data;
80 struct drm_master *master = file->master;
81 int error;
82
83 /* Sanitize the drm global mutex bollocks until we get rid of it. */
84 KASSERT(mutex_is_locked(&drm_global_mutex));
85 mutex_unlock(&drm_global_mutex);
86
87 /* Refuse to lock on behalf of the kernel. */
88 if (lock_request->context == DRM_KERNEL_CONTEXT) {
89 error = -EINVAL;
90 goto out0;
91 }
92
93 /* Refuse to set the magic bits. */
94 if (lock_request->context !=
95 _DRM_LOCKING_CONTEXT(lock_request->context)) {
96 error = -EINVAL;
97 goto out0;
98 }
99
100 /* Count it in the file and device statistics (XXX why here?). */
101 file->lock_count++;
102
103 /* Wait until the hardware lock is gone or we can acquire it. */
104 spin_lock(&master->lock.spinlock);
105
106 if (master->lock.user_waiters == UINT32_MAX) {
107 error = -EBUSY;
108 goto out1;
109 }
110
111 master->lock.user_waiters++;
112 DRM_SPIN_WAIT_UNTIL(error, &master->lock.lock_queue,
113 &master->lock.spinlock,
114 ((master->lock.hw_lock == NULL) ||
115 drm_lock_acquire(&master->lock, lock_request->context)));
116 KASSERT(0 < master->lock.user_waiters);
117 master->lock.user_waiters--;
118 if (error)
119 goto out1;
120
121 /* If the lock is gone, give up. */
122 if (master->lock.hw_lock == NULL) {
123 #if 0 /* XXX Linux sends SIGTERM, but why? */
124 mutex_enter(&proc_lock);
125 psignal(curproc, SIGTERM);
126 mutex_exit(&proc_lock);
127 error = -EINTR;
128 #else
129 error = -ENXIO;
130 #endif
131 goto out1;
132 }
133
134 /* Mark the lock as owned by file. */
135 master->lock.file_priv = file;
136 master->lock.lock_time = jiffies; /* XXX Unused? */
137
138 /* Block signals while the lock is held. */
139 error = drm_lock_block_signals(dev, lock_request, file);
140 if (error)
141 goto fail2;
142
143 /* Enter the DMA quiescent state if requested and available. */
144 /* XXX Drop the spin lock first... */
145 if (ISSET(lock_request->flags, _DRM_LOCK_QUIESCENT) &&
146 (dev->driver->dma_quiescent != NULL)) {
147 error = (*dev->driver->dma_quiescent)(dev);
148 if (error)
149 goto fail3;
150 }
151
152 /* Success! */
153 error = 0;
154 goto out1;
155
156 fail3: drm_lock_unblock_signals(dev, lock_request, file);
157 fail2: drm_lock_release(&master->lock, lock_request->context);
158 master->lock.file_priv = NULL;
159 out1: spin_unlock(&master->lock.spinlock);
160 out0: mutex_lock(&drm_global_mutex);
161 return error;
162 }
163
164 /*
165 * Try to relinquish a lock that userland thinks it holds, per
166 * userland's request. Fail if it doesn't actually hold the lock.
167 */
168 int
drm_legacy_unlock(struct drm_device * dev,void * data,struct drm_file * file)169 drm_legacy_unlock(struct drm_device *dev, void *data, struct drm_file *file)
170 {
171 struct drm_lock *lock_request = data;
172 struct drm_master *master = file->master;
173 int error;
174
175 /* Sanitize the drm global mutex bollocks until we get rid of it. */
176 KASSERT(mutex_is_locked(&drm_global_mutex));
177 mutex_unlock(&drm_global_mutex);
178
179 /* Refuse to unlock on behalf of the kernel. */
180 if (lock_request->context == DRM_KERNEL_CONTEXT) {
181 error = -EINVAL;
182 goto out0;
183 }
184
185 /* Lock the internal spin lock to make changes. */
186 spin_lock(&master->lock.spinlock);
187
188 /* Make sure it's actually locked. */
189 if (!_DRM_LOCK_IS_HELD(master->lock.hw_lock->lock)) {
190 error = -EINVAL; /* XXX Right error? */
191 goto out1;
192 }
193
194 /* Make sure it's locked in the right context. */
195 if (_DRM_LOCKING_CONTEXT(master->lock.hw_lock->lock) !=
196 lock_request->context) {
197 error = -EACCES; /* XXX Right error? */
198 goto out1;
199 }
200
201 /* Make sure it's locked by us. */
202 if (master->lock.file_priv != file) {
203 error = -EACCES; /* XXX Right error? */
204 goto out1;
205 }
206
207 /* Actually release the lock. */
208 drm_lock_release(&master->lock, lock_request->context);
209
210 /* Clear the lock's file pointer, just in case. */
211 master->lock.file_priv = NULL;
212
213 /* Unblock the signals we blocked in drm_lock. */
214 drm_lock_unblock_signals(dev, lock_request, file);
215
216 /* Success! */
217 error = 0;
218
219 out1: spin_unlock(&master->lock.spinlock);
220 out0: mutex_lock(&drm_global_mutex);
221 return error;
222 }
223
224 void
drm_legacy_lock_master_cleanup(struct drm_device * dev,struct drm_master * master)225 drm_legacy_lock_master_cleanup(struct drm_device *dev,
226 struct drm_master *master)
227 {
228
229 if (!drm_core_check_feature(dev, DRIVER_LEGACY))
230 return;
231
232 /*
233 * XXX Synchronize with _DRM_SHM case of
234 * drm_legacy_rmmap_locked in drm_bufs.c.
235 */
236 spin_lock(&master->lock.spinlock);
237 if (master->lock.hw_lock) {
238 if (dev->sigdata.lock == master->lock.hw_lock)
239 dev->sigdata.lock = NULL;
240 master->lock.hw_lock = NULL;
241 master->lock.file_priv = NULL;
242 DRM_SPIN_WAKEUP_ALL(&master->lock.lock_queue,
243 &master->lock.spinlock);
244 }
245 spin_unlock(&master->lock.spinlock);
246 }
247 #endif /* CONFIG_DRM_LEGACY */
248
249 /*
250 * Try to acquire the lock. Whether or not we acquire it, guarantee
251 * that whoever next releases it relinquishes it to the kernel, not to
252 * anyone else.
253 */
254 void
drm_legacy_idlelock_take(struct drm_lock_data * lock_data)255 drm_legacy_idlelock_take(struct drm_lock_data *lock_data)
256 {
257
258 spin_lock(&lock_data->spinlock);
259 KASSERT(!lock_data->idle_has_lock);
260 KASSERT(lock_data->kernel_waiters < UINT32_MAX);
261 lock_data->kernel_waiters++;
262 /* Try to acquire the lock. */
263 if (drm_lock_acquire(lock_data, DRM_KERNEL_CONTEXT)) {
264 lock_data->idle_has_lock = 1;
265 } else {
266 /*
267 * Recording that there are kernel waiters will prevent
268 * userland from acquiring the lock again when it is
269 * next released.
270 */
271 }
272 spin_unlock(&lock_data->spinlock);
273 }
274
275 /*
276 * Release whatever drm_idlelock_take managed to acquire.
277 */
278 void
drm_legacy_idlelock_release(struct drm_lock_data * lock_data)279 drm_legacy_idlelock_release(struct drm_lock_data *lock_data)
280 {
281
282 spin_lock(&lock_data->spinlock);
283 KASSERT(0 < lock_data->kernel_waiters);
284 if (--lock_data->kernel_waiters == 0) {
285 if (lock_data->idle_has_lock) {
286 /* We did acquire it. Release it. */
287 drm_lock_release(lock_data, DRM_KERNEL_CONTEXT);
288 }
289 }
290 spin_unlock(&lock_data->spinlock);
291 }
292
293 #if IS_ENABLED(CONFIG_DRM_LEGACY)
294 /*
295 * Release the lock and free it on closing of a drm file.
296 */
297 void
drm_legacy_lock_release(struct drm_device * dev,struct file * fp)298 drm_legacy_lock_release(struct drm_device *dev, struct file *fp)
299 {
300 struct drm_file *const file = fp->f_data;
301 struct drm_lock_data *const lock_data = &file->master->lock;
302
303 /* If this file has never locked anything, nothing to do. */
304 if (file->lock_count == 0)
305 return;
306
307 spin_lock(&lock_data->spinlock);
308
309 /* If there is no lock, nothing to do. */
310 if (lock_data->hw_lock == NULL)
311 goto out;
312
313 /* If this lock is not held, nothing to do. */
314 if (!_DRM_LOCK_IS_HELD(lock_data->hw_lock->lock))
315 goto out;
316
317 /*
318 * Otherwise, it boils down to whether this file is the owner
319 * or someone else.
320 *
321 * XXX This is not reliable! Userland doesn't update this when
322 * it takes the lock...
323 */
324 if (file == lock_data->file_priv)
325 drm_lock_release(lock_data,
326 _DRM_LOCKING_CONTEXT(file->master->lock.hw_lock->lock));
327
328 out: spin_unlock(&lock_data->spinlock);
329 }
330 #endif
331
332 /*
333 * Try to acquire the lock. Return true if successful, false if not.
334 *
335 * This is hairy because it races with userland, and if userland
336 * already holds the lock, we must tell it, by marking it
337 * _DRM_LOCK_CONT (contended), that it must call ioctl(DRM_UNLOCK) to
338 * release the lock so that we can wake waiters.
339 *
340 * XXX What happens if the process is interrupted?
341 */
342 static bool
drm_lock_acquire(struct drm_lock_data * lock_data,int context)343 drm_lock_acquire(struct drm_lock_data *lock_data, int context)
344 {
345 volatile unsigned int *const lock = &lock_data->hw_lock->lock;
346 unsigned int old, new;
347
348 KASSERT(spin_is_locked(&lock_data->spinlock));
349
350 do {
351 old = *lock;
352 if (!_DRM_LOCK_IS_HELD(old)) {
353 new = (context | _DRM_LOCK_HELD);
354 if ((0 < lock_data->user_waiters) ||
355 (0 < lock_data->kernel_waiters))
356 new |= _DRM_LOCK_CONT;
357 } else if (_DRM_LOCKING_CONTEXT(old) != context) {
358 new = (old | _DRM_LOCK_CONT);
359 } else {
360 DRM_ERROR("%d already holds heavyweight lock\n",
361 context);
362 return false;
363 }
364 } while (atomic_cas_uint(lock, old, new) != old);
365
366 return !_DRM_LOCK_IS_HELD(old);
367 }
368
369 /*
370 * Release the lock held in the given context. Wake any waiters,
371 * preferring kernel waiters over userland waiters.
372 *
373 * Lock's spinlock must be held and lock must be held in this context.
374 */
375 static void
drm_lock_release(struct drm_lock_data * lock_data,int context)376 drm_lock_release(struct drm_lock_data *lock_data, int context)
377 {
378
379 (void)context; /* ignore */
380 KASSERT(spin_is_locked(&lock_data->spinlock));
381 KASSERT(_DRM_LOCK_IS_HELD(lock_data->hw_lock->lock));
382 KASSERT(_DRM_LOCKING_CONTEXT(lock_data->hw_lock->lock) == context);
383
384 lock_data->hw_lock->lock = 0;
385 DRM_SPIN_WAKEUP_ONE(&lock_data->lock_queue, &lock_data->spinlock);
386 }
387
388 #if IS_ENABLED(CONFIG_DRM_LEGACY)
389 /*
390 * Block signals for a process that holds a drm lock.
391 *
392 * XXX It's not processes but files that hold drm locks, so blocking
393 * signals in a process seems wrong, and it's not clear that blocking
394 * signals automatically is remotely sensible anyway.
395 */
396 static int
drm_lock_block_signals(struct drm_device * dev __unused,struct drm_lock * lock_request __unused,struct drm_file * file __unused)397 drm_lock_block_signals(struct drm_device *dev __unused,
398 struct drm_lock *lock_request __unused, struct drm_file *file __unused)
399 {
400 return 0;
401 }
402
403 /*
404 * Unblock the signals that drm_lock_block_signals blocked.
405 */
406 static void
drm_lock_unblock_signals(struct drm_device * dev __unused,struct drm_lock * lock_request __unused,struct drm_file * file __unused)407 drm_lock_unblock_signals(struct drm_device *dev __unused,
408 struct drm_lock *lock_request __unused, struct drm_file *file __unused)
409 {
410 }
411 #endif
412