1 /* $NetBSD: drm_lock.c,v 1.2 2014/03/18 18:20:42 riastradh Exp $ */ 2 3 /*- 4 * Copyright (c) 2013 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Taylor R. Campbell. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /* 33 * DRM lock. Each drm master has a heavy-weight lock to provide mutual 34 * exclusion for access to the hardware. The lock can be held by the 35 * kernel or by a drm file; the kernel takes access only for unusual 36 * purposes, with drm_idlelock_take, mainly for idling the GPU when 37 * closing down. 38 * 39 * The physical memory storing the lock state is shared between 40 * userland and kernel: the pointer at dev->master->lock->hw_lock is 41 * mapped into both userland and kernel address spaces. This way, 42 * userland can try to take the hardware lock without a system call, 43 * although if it fails then it will use the DRM_LOCK ioctl to block 44 * atomically until the lock is available. All this means that the 45 * kernel must use atomic_ops to manage the lock state. 46 */ 47 48 #include <sys/cdefs.h> 49 __KERNEL_RCSID(0, "$NetBSD: drm_lock.c,v 1.2 2014/03/18 18:20:42 riastradh Exp $"); 50 51 #include <sys/types.h> 52 #include <sys/errno.h> 53 #include <sys/systm.h> 54 55 #include <drm/drmP.h> 56 57 static bool drm_lock_acquire(struct drm_lock_data *, int); 58 static void drm_lock_release(struct drm_lock_data *, int); 59 static int drm_lock_block_signals(struct drm_device *, struct drm_lock *, 60 struct drm_file *); 61 static void drm_lock_unblock_signals(struct drm_device *, 62 struct drm_lock *, struct drm_file *); 63 64 /* 65 * Take the lock on behalf of userland. 66 */ 67 int 68 drm_lock(struct drm_device *dev, void *data, struct drm_file *file) 69 { 70 struct drm_lock *lock_request = data; 71 struct drm_master *master = file->master; 72 int error; 73 74 /* Sanitize the drm global mutex bollocks until we get rid of it. */ 75 KASSERT(mutex_is_locked(&drm_global_mutex)); 76 mutex_unlock(&drm_global_mutex); 77 78 /* Refuse to lock on behalf of the kernel. */ 79 if (lock_request->context == DRM_KERNEL_CONTEXT) { 80 error = -EINVAL; 81 goto out0; 82 } 83 84 /* Refuse to set the magic bits. */ 85 if (lock_request->context != 86 _DRM_LOCKING_CONTEXT(lock_request->context)) { 87 error = -EINVAL; 88 goto out0; 89 } 90 91 /* Count it in the file and device statistics (XXX why here?). */ 92 file->lock_count++; 93 atomic_inc(&dev->counts[_DRM_STAT_LOCKS]); 94 95 /* Wait until the hardware lock is gone or we can acquire it. */ 96 spin_lock(&master->lock.spinlock); 97 98 if (master->lock.user_waiters == UINT32_MAX) { 99 error = -EBUSY; 100 goto out1; 101 } 102 103 master->lock.user_waiters++; 104 DRM_SPIN_WAIT_UNTIL(error, &master->lock.lock_queue, 105 &master->lock.spinlock, 106 ((master->lock.hw_lock == NULL) || 107 drm_lock_acquire(&master->lock, lock_request->context))); 108 KASSERT(0 < master->lock.user_waiters); 109 master->lock.user_waiters--; 110 if (error) 111 goto out1; 112 113 /* If the lock is gone, give up. */ 114 if (master->lock.hw_lock == NULL) { 115 #if 0 /* XXX Linux sends SIGTERM, but why? */ 116 mutex_enter(proc_lock); 117 psignal(curproc, SIGTERM); 118 mutex_exit(proc_lock); 119 error = -EINTR; 120 #else 121 error = -ENXIO; 122 #endif 123 goto out1; 124 } 125 126 /* Mark the lock as owned by file. */ 127 master->lock.file_priv = file; 128 master->lock.lock_time = jiffies; /* XXX Unused? */ 129 130 /* Block signals while the lock is held. */ 131 error = drm_lock_block_signals(dev, lock_request, file); 132 if (error) 133 goto fail2; 134 135 /* Enter the DMA quiescent state if requested and available. */ 136 /* XXX Drop the spin lock first... */ 137 if (ISSET(lock_request->flags, _DRM_LOCK_QUIESCENT) && 138 (dev->driver->dma_quiescent != NULL)) { 139 error = (*dev->driver->dma_quiescent)(dev); 140 if (error) 141 goto fail3; 142 } 143 144 /* Success! */ 145 error = 0; 146 goto out1; 147 148 fail3: drm_lock_unblock_signals(dev, lock_request, file); 149 fail2: drm_lock_release(&master->lock, lock_request->context); 150 master->lock.file_priv = NULL; 151 out1: spin_unlock(&master->lock.spinlock); 152 out0: mutex_lock(&drm_global_mutex); 153 return error; 154 } 155 156 /* 157 * Try to relinquish a lock that userland thinks it holds, per 158 * userland's request. Fail if it doesn't actually hold the lock. 159 */ 160 int 161 drm_unlock(struct drm_device *dev, void *data, struct drm_file *file) 162 { 163 struct drm_lock *lock_request = data; 164 struct drm_master *master = file->master; 165 int error; 166 167 /* Sanitize the drm global mutex bollocks until we get rid of it. */ 168 KASSERT(mutex_is_locked(&drm_global_mutex)); 169 mutex_unlock(&drm_global_mutex); 170 171 /* Refuse to unlock on behalf of the kernel. */ 172 if (lock_request->context == DRM_KERNEL_CONTEXT) { 173 error = -EINVAL; 174 goto out0; 175 } 176 177 /* Count it in the device statistics. */ 178 atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]); 179 180 /* Lock the internal spin lock to make changes. */ 181 spin_lock(&master->lock.spinlock); 182 183 /* Make sure it's actually locked. */ 184 if (!_DRM_LOCK_IS_HELD(master->lock.hw_lock->lock)) { 185 error = -EINVAL; /* XXX Right error? */ 186 goto out1; 187 } 188 189 /* Make sure it's locked in the right context. */ 190 if (_DRM_LOCKING_CONTEXT(master->lock.hw_lock->lock) != 191 lock_request->context) { 192 error = -EACCES; /* XXX Right error? */ 193 goto out1; 194 } 195 196 /* Make sure it's locked by us. */ 197 if (master->lock.file_priv != file) { 198 error = -EACCES; /* XXX Right error? */ 199 goto out1; 200 } 201 202 /* Actually release the lock. */ 203 drm_lock_release(&master->lock, lock_request->context); 204 205 /* Clear the lock's file pointer, just in case. */ 206 master->lock.file_priv = NULL; 207 208 /* Unblock the signals we blocked in drm_lock. */ 209 drm_lock_unblock_signals(dev, lock_request, file); 210 211 /* Success! */ 212 error = 0; 213 214 out1: spin_unlock(&master->lock.spinlock); 215 out0: mutex_lock(&drm_global_mutex); 216 return error; 217 } 218 219 /* 220 * Drop the lock. 221 * 222 * Return value is an artefact of Linux. Caller must guarantee 223 * preconditions; failure is fatal. 224 * 225 * XXX Should we also unblock signals like drm_unlock does? 226 */ 227 int 228 drm_lock_free(struct drm_lock_data *lock_data, unsigned int context) 229 { 230 231 spin_lock(&lock_data->spinlock); 232 drm_lock_release(lock_data, context); 233 spin_unlock(&lock_data->spinlock); 234 235 return 0; 236 } 237 238 /* 239 * Take the lock for the kernel's use. 240 * 241 * XXX This is unimplemented because it's not clear that the Linux code 242 * makes sense at all. Linux's drm_idlelock_take never blocks, but it 243 * doesn't guarantee that the kernel holds the lock on return! For 244 * now, I'll hope that the code paths relying on this don't matter yet. 245 */ 246 void 247 drm_idlelock_take(struct drm_lock_data *lock_data __unused) 248 { 249 KASSERT(mutex_is_locked(&drm_global_mutex)); 250 panic("drm_idlelock_take is not yet implemented"); /* XXX */ 251 } 252 253 /* 254 * Release the lock from the kernel. 255 */ 256 void 257 drm_idlelock_release(struct drm_lock_data *lock_data __unused) 258 { 259 KASSERT(mutex_is_locked(&drm_global_mutex)); 260 panic("drm_idlelock_release is not yet implemented"); /* XXX */ 261 } 262 263 /* 264 * Does this file hold this drm device's hardware lock? 265 * 266 * Used to decide whether to release the lock when the file is being 267 * closed. 268 * 269 * XXX I don't think this answers correctly in the case that the 270 * userland has taken the lock and it is uncontended. But I don't 271 * think we can know what the correct answer is in that case. 272 */ 273 int 274 drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file) 275 { 276 struct drm_lock_data *const lock_data = &file->master->lock; 277 int answer = 0; 278 279 /* If this file has never locked anything, then no. */ 280 if (file->lock_count == 0) 281 return 0; 282 283 spin_lock(&lock_data->spinlock); 284 285 /* If there is no lock, then this file doesn't hold it. */ 286 if (lock_data->hw_lock == NULL) 287 goto out; 288 289 /* If this lock is not held, then this file doesn't hold it. */ 290 if (!_DRM_LOCK_IS_HELD(lock_data->hw_lock->lock)) 291 goto out; 292 293 /* 294 * Otherwise, it boils down to whether this file is the owner 295 * or someone else. 296 * 297 * XXX This is not reliable! Userland doesn't update this when 298 * it takes the lock... 299 */ 300 answer = (file == lock_data->file_priv); 301 302 out: spin_unlock(&lock_data->spinlock); 303 return answer; 304 } 305 306 /* 307 * Try to acquire the lock. Return true if successful, false if not. 308 * 309 * This is hairy because it races with userland, and if userland 310 * already holds the lock, we must tell it, by marking it 311 * _DRM_LOCK_CONT (contended), that it must call ioctl(DRM_UNLOCK) to 312 * release the lock so that we can wake waiters. 313 * 314 * XXX What happens if the process is interrupted? 315 */ 316 static bool 317 drm_lock_acquire(struct drm_lock_data *lock_data, int context) 318 { 319 volatile unsigned int *const lock = &lock_data->hw_lock->lock; 320 unsigned int old, new; 321 322 KASSERT(spin_is_locked(&lock_data->spinlock)); 323 324 do { 325 old = *lock; 326 if (!_DRM_LOCK_IS_HELD(old)) { 327 new = (context | _DRM_LOCK_HELD); 328 if ((0 < lock_data->user_waiters) || 329 (0 < lock_data->kernel_waiters)) 330 new |= _DRM_LOCK_CONT; 331 } else if (_DRM_LOCKING_CONTEXT(old) != context) { 332 new = (old | _DRM_LOCK_CONT); 333 } else { 334 DRM_ERROR("%d already holds heavyweight lock\n", 335 context); 336 return false; 337 } 338 } while (atomic_cas_uint(lock, old, new) != old); 339 340 return !_DRM_LOCK_IS_HELD(old); 341 } 342 343 /* 344 * Release the lock held in the given context. Wake any waiters, 345 * preferring kernel waiters over userland waiters. 346 * 347 * Lock's spinlock must be held and lock must be held in this context. 348 */ 349 static void 350 drm_lock_release(struct drm_lock_data *lock_data, int context) 351 { 352 353 (void)context; /* ignore */ 354 KASSERT(spin_is_locked(&lock_data->spinlock)); 355 KASSERT(_DRM_LOCK_IS_HELD(lock_data->hw_lock->lock)); 356 KASSERT(_DRM_LOCKING_CONTEXT(lock_data->hw_lock->lock) == context); 357 358 lock_data->hw_lock->lock = 0; 359 DRM_SPIN_WAKEUP_ONE(&lock_data->lock_queue, &lock_data->spinlock); 360 } 361 362 /* 363 * Block signals for a process that holds a drm lock. 364 * 365 * XXX It's not processes but files that hold drm locks, so blocking 366 * signals in a process seems wrong, and it's not clear that blocking 367 * signals automatically is remotely sensible anyway. 368 */ 369 static int 370 drm_lock_block_signals(struct drm_device *dev __unused, 371 struct drm_lock *lock_request __unused, struct drm_file *file __unused) 372 { 373 return 0; 374 } 375 376 /* 377 * Unblock the signals that drm_lock_block_signals blocked. 378 */ 379 static void 380 drm_lock_unblock_signals(struct drm_device *dev __unused, 381 struct drm_lock *lock_request __unused, struct drm_file *file __unused) 382 { 383 } 384