1 /* $NetBSD: drm_lock.c,v 1.4 2016/04/02 22:40:43 riastradh Exp $ */ 2 3 /*- 4 * Copyright (c) 2013 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Taylor R. Campbell. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /* 33 * DRM lock. Each drm master has a heavy-weight lock to provide mutual 34 * exclusion for access to the hardware. The lock can be held by the 35 * kernel or by a drm file; the kernel takes access only for unusual 36 * purposes, with drm_idlelock_take, mainly for idling the GPU when 37 * closing down. 38 * 39 * The physical memory storing the lock state is shared between 40 * userland and kernel: the pointer at dev->master->lock->hw_lock is 41 * mapped into both userland and kernel address spaces. This way, 42 * userland can try to take the hardware lock without a system call, 43 * although if it fails then it will use the DRM_LOCK ioctl to block 44 * atomically until the lock is available. All this means that the 45 * kernel must use atomic_ops to manage the lock state. 46 */ 47 48 #include <sys/cdefs.h> 49 __KERNEL_RCSID(0, "$NetBSD: drm_lock.c,v 1.4 2016/04/02 22:40:43 riastradh Exp $"); 50 51 #include <sys/types.h> 52 #include <sys/errno.h> 53 #include <sys/systm.h> 54 55 #include <drm/drmP.h> 56 57 static bool drm_lock_acquire(struct drm_lock_data *, int); 58 static void drm_lock_release(struct drm_lock_data *, int); 59 static int drm_lock_block_signals(struct drm_device *, struct drm_lock *, 60 struct drm_file *); 61 static void drm_lock_unblock_signals(struct drm_device *, 62 struct drm_lock *, struct drm_file *); 63 64 /* 65 * Take the lock on behalf of userland. 66 */ 67 int 68 drm_lock(struct drm_device *dev, void *data, struct drm_file *file) 69 { 70 struct drm_lock *lock_request = data; 71 struct drm_master *master = file->master; 72 int error; 73 74 /* Sanitize the drm global mutex bollocks until we get rid of it. */ 75 KASSERT(mutex_is_locked(&drm_global_mutex)); 76 mutex_unlock(&drm_global_mutex); 77 78 /* Refuse to lock on behalf of the kernel. */ 79 if (lock_request->context == DRM_KERNEL_CONTEXT) { 80 error = -EINVAL; 81 goto out0; 82 } 83 84 /* Refuse to set the magic bits. */ 85 if (lock_request->context != 86 _DRM_LOCKING_CONTEXT(lock_request->context)) { 87 error = -EINVAL; 88 goto out0; 89 } 90 91 /* Count it in the file and device statistics (XXX why here?). */ 92 file->lock_count++; 93 94 /* Wait until the hardware lock is gone or we can acquire it. */ 95 spin_lock(&master->lock.spinlock); 96 97 if (master->lock.user_waiters == UINT32_MAX) { 98 error = -EBUSY; 99 goto out1; 100 } 101 102 master->lock.user_waiters++; 103 DRM_SPIN_WAIT_UNTIL(error, &master->lock.lock_queue, 104 &master->lock.spinlock, 105 ((master->lock.hw_lock == NULL) || 106 drm_lock_acquire(&master->lock, lock_request->context))); 107 KASSERT(0 < master->lock.user_waiters); 108 master->lock.user_waiters--; 109 if (error) 110 goto out1; 111 112 /* If the lock is gone, give up. */ 113 if (master->lock.hw_lock == NULL) { 114 #if 0 /* XXX Linux sends SIGTERM, but why? */ 115 mutex_enter(proc_lock); 116 psignal(curproc, SIGTERM); 117 mutex_exit(proc_lock); 118 error = -EINTR; 119 #else 120 error = -ENXIO; 121 #endif 122 goto out1; 123 } 124 125 /* Mark the lock as owned by file. */ 126 master->lock.file_priv = file; 127 master->lock.lock_time = jiffies; /* XXX Unused? */ 128 129 /* Block signals while the lock is held. */ 130 error = drm_lock_block_signals(dev, lock_request, file); 131 if (error) 132 goto fail2; 133 134 /* Enter the DMA quiescent state if requested and available. */ 135 /* XXX Drop the spin lock first... */ 136 if (ISSET(lock_request->flags, _DRM_LOCK_QUIESCENT) && 137 (dev->driver->dma_quiescent != NULL)) { 138 error = (*dev->driver->dma_quiescent)(dev); 139 if (error) 140 goto fail3; 141 } 142 143 /* Success! */ 144 error = 0; 145 goto out1; 146 147 fail3: drm_lock_unblock_signals(dev, lock_request, file); 148 fail2: drm_lock_release(&master->lock, lock_request->context); 149 master->lock.file_priv = NULL; 150 out1: spin_unlock(&master->lock.spinlock); 151 out0: mutex_lock(&drm_global_mutex); 152 return error; 153 } 154 155 /* 156 * Try to relinquish a lock that userland thinks it holds, per 157 * userland's request. Fail if it doesn't actually hold the lock. 158 */ 159 int 160 drm_unlock(struct drm_device *dev, void *data, struct drm_file *file) 161 { 162 struct drm_lock *lock_request = data; 163 struct drm_master *master = file->master; 164 int error; 165 166 /* Sanitize the drm global mutex bollocks until we get rid of it. */ 167 KASSERT(mutex_is_locked(&drm_global_mutex)); 168 mutex_unlock(&drm_global_mutex); 169 170 /* Refuse to unlock on behalf of the kernel. */ 171 if (lock_request->context == DRM_KERNEL_CONTEXT) { 172 error = -EINVAL; 173 goto out0; 174 } 175 176 /* Lock the internal spin lock to make changes. */ 177 spin_lock(&master->lock.spinlock); 178 179 /* Make sure it's actually locked. */ 180 if (!_DRM_LOCK_IS_HELD(master->lock.hw_lock->lock)) { 181 error = -EINVAL; /* XXX Right error? */ 182 goto out1; 183 } 184 185 /* Make sure it's locked in the right context. */ 186 if (_DRM_LOCKING_CONTEXT(master->lock.hw_lock->lock) != 187 lock_request->context) { 188 error = -EACCES; /* XXX Right error? */ 189 goto out1; 190 } 191 192 /* Make sure it's locked by us. */ 193 if (master->lock.file_priv != file) { 194 error = -EACCES; /* XXX Right error? */ 195 goto out1; 196 } 197 198 /* Actually release the lock. */ 199 drm_lock_release(&master->lock, lock_request->context); 200 201 /* Clear the lock's file pointer, just in case. */ 202 master->lock.file_priv = NULL; 203 204 /* Unblock the signals we blocked in drm_lock. */ 205 drm_lock_unblock_signals(dev, lock_request, file); 206 207 /* Success! */ 208 error = 0; 209 210 out1: spin_unlock(&master->lock.spinlock); 211 out0: mutex_lock(&drm_global_mutex); 212 return error; 213 } 214 215 /* 216 * Drop the lock. 217 * 218 * Return value is an artefact of Linux. Caller must guarantee 219 * preconditions; failure is fatal. 220 * 221 * XXX Should we also unblock signals like drm_unlock does? 222 */ 223 int 224 drm_lock_free(struct drm_lock_data *lock_data, unsigned int context) 225 { 226 227 spin_lock(&lock_data->spinlock); 228 drm_lock_release(lock_data, context); 229 spin_unlock(&lock_data->spinlock); 230 231 return 0; 232 } 233 234 /* 235 * Try to acquire the lock. Whether or not we acquire it, guarantee 236 * that whoever next releases it relinquishes it to the kernel, not to 237 * anyone else. 238 */ 239 void 240 drm_idlelock_take(struct drm_lock_data *lock_data) 241 { 242 243 spin_lock(&lock_data->spinlock); 244 KASSERT(!lock_data->idle_has_lock); 245 KASSERT(lock_data->kernel_waiters < UINT32_MAX); 246 lock_data->kernel_waiters++; 247 /* Try to acquire the lock. */ 248 if (drm_lock_acquire(lock_data, DRM_KERNEL_CONTEXT)) { 249 lock_data->idle_has_lock = 1; 250 } else { 251 /* 252 * Recording that there are kernel waiters will prevent 253 * userland from acquiring the lock again when it is 254 * next released. 255 */ 256 } 257 spin_unlock(&lock_data->spinlock); 258 } 259 260 /* 261 * Release whatever drm_idlelock_take managed to acquire. 262 */ 263 void 264 drm_idlelock_release(struct drm_lock_data *lock_data) 265 { 266 267 spin_lock(&lock_data->spinlock); 268 KASSERT(0 < lock_data->kernel_waiters); 269 if (--lock_data->kernel_waiters == 0) { 270 if (lock_data->idle_has_lock) { 271 /* We did acquire it. Release it. */ 272 drm_lock_release(lock_data, DRM_KERNEL_CONTEXT); 273 } 274 } 275 spin_unlock(&lock_data->spinlock); 276 } 277 278 /* 279 * Does this file hold this drm device's hardware lock? 280 * 281 * Used to decide whether to release the lock when the file is being 282 * closed. 283 * 284 * XXX I don't think this answers correctly in the case that the 285 * userland has taken the lock and it is uncontended. But I don't 286 * think we can know what the correct answer is in that case. 287 */ 288 int 289 drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file) 290 { 291 struct drm_lock_data *const lock_data = &file->master->lock; 292 int answer = 0; 293 294 /* If this file has never locked anything, then no. */ 295 if (file->lock_count == 0) 296 return 0; 297 298 spin_lock(&lock_data->spinlock); 299 300 /* If there is no lock, then this file doesn't hold it. */ 301 if (lock_data->hw_lock == NULL) 302 goto out; 303 304 /* If this lock is not held, then this file doesn't hold it. */ 305 if (!_DRM_LOCK_IS_HELD(lock_data->hw_lock->lock)) 306 goto out; 307 308 /* 309 * Otherwise, it boils down to whether this file is the owner 310 * or someone else. 311 * 312 * XXX This is not reliable! Userland doesn't update this when 313 * it takes the lock... 314 */ 315 answer = (file == lock_data->file_priv); 316 317 out: spin_unlock(&lock_data->spinlock); 318 return answer; 319 } 320 321 /* 322 * Try to acquire the lock. Return true if successful, false if not. 323 * 324 * This is hairy because it races with userland, and if userland 325 * already holds the lock, we must tell it, by marking it 326 * _DRM_LOCK_CONT (contended), that it must call ioctl(DRM_UNLOCK) to 327 * release the lock so that we can wake waiters. 328 * 329 * XXX What happens if the process is interrupted? 330 */ 331 static bool 332 drm_lock_acquire(struct drm_lock_data *lock_data, int context) 333 { 334 volatile unsigned int *const lock = &lock_data->hw_lock->lock; 335 unsigned int old, new; 336 337 KASSERT(spin_is_locked(&lock_data->spinlock)); 338 339 do { 340 old = *lock; 341 if (!_DRM_LOCK_IS_HELD(old)) { 342 new = (context | _DRM_LOCK_HELD); 343 if ((0 < lock_data->user_waiters) || 344 (0 < lock_data->kernel_waiters)) 345 new |= _DRM_LOCK_CONT; 346 } else if (_DRM_LOCKING_CONTEXT(old) != context) { 347 new = (old | _DRM_LOCK_CONT); 348 } else { 349 DRM_ERROR("%d already holds heavyweight lock\n", 350 context); 351 return false; 352 } 353 } while (atomic_cas_uint(lock, old, new) != old); 354 355 return !_DRM_LOCK_IS_HELD(old); 356 } 357 358 /* 359 * Release the lock held in the given context. Wake any waiters, 360 * preferring kernel waiters over userland waiters. 361 * 362 * Lock's spinlock must be held and lock must be held in this context. 363 */ 364 static void 365 drm_lock_release(struct drm_lock_data *lock_data, int context) 366 { 367 368 (void)context; /* ignore */ 369 KASSERT(spin_is_locked(&lock_data->spinlock)); 370 KASSERT(_DRM_LOCK_IS_HELD(lock_data->hw_lock->lock)); 371 KASSERT(_DRM_LOCKING_CONTEXT(lock_data->hw_lock->lock) == context); 372 373 lock_data->hw_lock->lock = 0; 374 DRM_SPIN_WAKEUP_ONE(&lock_data->lock_queue, &lock_data->spinlock); 375 } 376 377 /* 378 * Block signals for a process that holds a drm lock. 379 * 380 * XXX It's not processes but files that hold drm locks, so blocking 381 * signals in a process seems wrong, and it's not clear that blocking 382 * signals automatically is remotely sensible anyway. 383 */ 384 static int 385 drm_lock_block_signals(struct drm_device *dev __unused, 386 struct drm_lock *lock_request __unused, struct drm_file *file __unused) 387 { 388 return 0; 389 } 390 391 /* 392 * Unblock the signals that drm_lock_block_signals blocked. 393 */ 394 static void 395 drm_lock_unblock_signals(struct drm_device *dev __unused, 396 struct drm_lock *lock_request __unused, struct drm_file *file __unused) 397 { 398 } 399