xref: /openbsd-src/sys/dev/pci/drm/drm_file.c (revision 505ee9ea3b177e2387d907a91ca7da069f3f14d8)
1 /*
2  * \author Rickard E. (Rik) Faith <faith@valinux.com>
3  * \author Daryll Strauss <daryll@valinux.com>
4  * \author Gareth Hughes <gareth@valinux.com>
5  */
6 
7 /*
8  * Created: Mon Jan  4 08:58:31 1999 by faith@valinux.com
9  *
10  * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
11  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
12  * All Rights Reserved.
13  *
14  * Permission is hereby granted, free of charge, to any person obtaining a
15  * copy of this software and associated documentation files (the "Software"),
16  * to deal in the Software without restriction, including without limitation
17  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
18  * and/or sell copies of the Software, and to permit persons to whom the
19  * Software is furnished to do so, subject to the following conditions:
20  *
21  * The above copyright notice and this permission notice (including the next
22  * paragraph) shall be included in all copies or substantial portions of the
23  * Software.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
26  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
27  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
28  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
29  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
30  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
31  * OTHER DEALINGS IN THE SOFTWARE.
32  */
33 
34 #include <linux/anon_inodes.h>
35 #include <linux/dma-fence.h>
36 #include <linux/file.h>
37 #include <linux/module.h>
38 #include <linux/pci.h>
39 #include <linux/poll.h>
40 #include <linux/slab.h>
41 
42 #include <drm/drm_client.h>
43 #include <drm/drm_drv.h>
44 #include <drm/drm_file.h>
45 #include <drm/drm_print.h>
46 
47 #include "drm_crtc_internal.h"
48 #include "drm_internal.h"
49 #include "drm_legacy.h"
50 
51 #if defined(CONFIG_MMU) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
52 #include <uapi/asm/mman.h>
53 #include <drm/drm_vma_manager.h>
54 #endif
55 
56 /* from BKL pushdown */
57 DEFINE_MUTEX(drm_global_mutex);
58 
59 bool drm_dev_needs_global_mutex(struct drm_device *dev)
60 {
61 	/*
62 	 * Legacy drivers rely on all kinds of BKL locking semantics, don't
63 	 * bother. They also still need BKL locking for their ioctls, so better
64 	 * safe than sorry.
65 	 */
66 	if (drm_core_check_feature(dev, DRIVER_LEGACY))
67 		return true;
68 
69 	/*
70 	 * The deprecated ->load callback must be called after the driver is
71 	 * already registered. This means such drivers rely on the BKL to make
72 	 * sure an open can't proceed until the driver is actually fully set up.
73 	 * Similar hilarity holds for the unload callback.
74 	 */
75 	if (dev->driver->load || dev->driver->unload)
76 		return true;
77 
78 	/*
79 	 * Drivers with the lastclose callback assume that it's synchronized
80 	 * against concurrent opens, which again needs the BKL. The proper fix
81 	 * is to use the drm_client infrastructure with proper locking for each
82 	 * client.
83 	 */
84 	if (dev->driver->lastclose)
85 		return true;
86 
87 	return false;
88 }
89 
90 /**
91  * DOC: file operations
92  *
93  * Drivers must define the file operations structure that forms the DRM
94  * userspace API entry point, even though most of those operations are
95  * implemented in the DRM core. The resulting &struct file_operations must be
96  * stored in the &drm_driver.fops field. The mandatory functions are drm_open(),
97  * drm_read(), drm_ioctl() and drm_compat_ioctl() if CONFIG_COMPAT is enabled
98  * Note that drm_compat_ioctl will be NULL if CONFIG_COMPAT=n, so there's no
99  * need to sprinkle #ifdef into the code. Drivers which implement private ioctls
100  * that require 32/64 bit compatibility support must provide their own
101  * &file_operations.compat_ioctl handler that processes private ioctls and calls
102  * drm_compat_ioctl() for core ioctls.
103  *
104  * In addition drm_read() and drm_poll() provide support for DRM events. DRM
105  * events are a generic and extensible means to send asynchronous events to
106  * userspace through the file descriptor. They are used to send vblank event and
107  * page flip completions by the KMS API. But drivers can also use it for their
108  * own needs, e.g. to signal completion of rendering.
109  *
110  * For the driver-side event interface see drm_event_reserve_init() and
111  * drm_send_event() as the main starting points.
112  *
113  * The memory mapping implementation will vary depending on how the driver
114  * manages memory. Legacy drivers will use the deprecated drm_legacy_mmap()
115  * function, modern drivers should use one of the provided memory-manager
116  * specific implementations. For GEM-based drivers this is drm_gem_mmap(), and
117  * for drivers which use the CMA GEM helpers it's drm_gem_cma_mmap().
118  *
119  * No other file operations are supported by the DRM userspace API. Overall the
120  * following is an example &file_operations structure::
121  *
122  *     static const example_drm_fops = {
123  *             .owner = THIS_MODULE,
124  *             .open = drm_open,
125  *             .release = drm_release,
126  *             .unlocked_ioctl = drm_ioctl,
127  *             .compat_ioctl = drm_compat_ioctl, // NULL if CONFIG_COMPAT=n
128  *             .poll = drm_poll,
129  *             .read = drm_read,
130  *             .llseek = no_llseek,
131  *             .mmap = drm_gem_mmap,
132  *     };
133  *
134  * For plain GEM based drivers there is the DEFINE_DRM_GEM_FOPS() macro, and for
135  * CMA based drivers there is the DEFINE_DRM_GEM_CMA_FOPS() macro to make this
136  * simpler.
137  *
138  * The driver's &file_operations must be stored in &drm_driver.fops.
139  *
140  * For driver-private IOCTL handling see the more detailed discussion in
141  * :ref:`IOCTL support in the userland interfaces chapter<drm_driver_ioctl>`.
142  */
143 
144 /**
145  * drm_file_alloc - allocate file context
146  * @minor: minor to allocate on
147  *
148  * This allocates a new DRM file context. It is not linked into any context and
149  * can be used by the caller freely. Note that the context keeps a pointer to
150  * @minor, so it must be freed before @minor is.
151  *
152  * RETURNS:
153  * Pointer to newly allocated context, ERR_PTR on failure.
154  */
155 struct drm_file *drm_file_alloc(struct drm_minor *minor)
156 {
157 	struct drm_device *dev = minor->dev;
158 	struct drm_file *file;
159 	int ret;
160 
161 #ifdef __linux__
162 	file = kzalloc(sizeof(*file), GFP_KERNEL);
163 #else
164 	file = kzalloc(max(dev->driver->file_priv_size, sizeof(*file)),
165 	    GFP_KERNEL);
166 #endif
167 	if (!file)
168 		return ERR_PTR(-ENOMEM);
169 
170 #ifdef __linux__
171 	file->pid = get_pid(task_pid(current));
172 #endif
173 	file->minor = minor;
174 
175 	/* for compatibility root is always authenticated */
176 	file->authenticated = capable(CAP_SYS_ADMIN);
177 
178 	INIT_LIST_HEAD(&file->lhead);
179 	INIT_LIST_HEAD(&file->fbs);
180 	rw_init(&file->fbs_lock, "fbslk");
181 	INIT_LIST_HEAD(&file->blobs);
182 	INIT_LIST_HEAD(&file->pending_event_list);
183 	INIT_LIST_HEAD(&file->event_list);
184 	init_waitqueue_head(&file->event_wait);
185 	file->event_space = 4096; /* set aside 4k for event buffer */
186 
187 	rw_init(&file->event_read_lock, "evread");
188 
189 	if (drm_core_check_feature(dev, DRIVER_GEM))
190 		drm_gem_open(dev, file);
191 
192 	if (drm_core_check_feature(dev, DRIVER_SYNCOBJ))
193 		drm_syncobj_open(file);
194 
195 	drm_prime_init_file_private(&file->prime);
196 
197 	if (dev->driver->open) {
198 		ret = dev->driver->open(dev, file);
199 		if (ret < 0)
200 			goto out_prime_destroy;
201 	}
202 
203 	return file;
204 
205 out_prime_destroy:
206 	drm_prime_destroy_file_private(&file->prime);
207 	if (drm_core_check_feature(dev, DRIVER_SYNCOBJ))
208 		drm_syncobj_release(file);
209 	if (drm_core_check_feature(dev, DRIVER_GEM))
210 		drm_gem_release(dev, file);
211 	put_pid(file->pid);
212 	kfree(file);
213 
214 	return ERR_PTR(ret);
215 }
216 
217 static void drm_events_release(struct drm_file *file_priv)
218 {
219 	struct drm_device *dev = file_priv->minor->dev;
220 	struct drm_pending_event *e, *et;
221 	unsigned long flags;
222 
223 	spin_lock_irqsave(&dev->event_lock, flags);
224 
225 	/* Unlink pending events */
226 	list_for_each_entry_safe(e, et, &file_priv->pending_event_list,
227 				 pending_link) {
228 		list_del(&e->pending_link);
229 		e->file_priv = NULL;
230 	}
231 
232 	/* Remove unconsumed events */
233 	list_for_each_entry_safe(e, et, &file_priv->event_list, link) {
234 		list_del(&e->link);
235 		kfree(e);
236 	}
237 
238 	spin_unlock_irqrestore(&dev->event_lock, flags);
239 }
240 
241 /**
242  * drm_file_free - free file context
243  * @file: context to free, or NULL
244  *
245  * This destroys and deallocates a DRM file context previously allocated via
246  * drm_file_alloc(). The caller must make sure to unlink it from any contexts
247  * before calling this.
248  *
249  * If NULL is passed, this is a no-op.
250  *
251  * RETURNS:
252  * 0 on success, or error code on failure.
253  */
254 void drm_file_free(struct drm_file *file)
255 {
256 	struct drm_device *dev;
257 
258 	if (!file)
259 		return;
260 
261 	dev = file->minor->dev;
262 
263 #ifdef __linux__
264 	DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
265 		  task_pid_nr(current),
266 		  (long)old_encode_dev(file->minor->kdev->devt),
267 		  atomic_read(&dev->open_count));
268 #else
269 	DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
270 	    curproc->p_p->ps_pid, (long)&dev->dev,
271 	    atomic_read(&dev->open_count));
272 #endif
273 
274 	if (drm_core_check_feature(dev, DRIVER_LEGACY) &&
275 	    dev->driver->preclose)
276 		dev->driver->preclose(dev, file);
277 
278 	if (drm_core_check_feature(dev, DRIVER_LEGACY))
279 		drm_legacy_lock_release(dev, file->filp);
280 
281 	if (drm_core_check_feature(dev, DRIVER_HAVE_DMA))
282 		drm_legacy_reclaim_buffers(dev, file);
283 
284 	drm_events_release(file);
285 
286 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
287 		drm_fb_release(file);
288 		drm_property_destroy_user_blobs(dev, file);
289 	}
290 
291 	if (drm_core_check_feature(dev, DRIVER_SYNCOBJ))
292 		drm_syncobj_release(file);
293 
294 	if (drm_core_check_feature(dev, DRIVER_GEM))
295 		drm_gem_release(dev, file);
296 
297 	drm_legacy_ctxbitmap_flush(dev, file);
298 
299 	if (drm_is_primary_client(file))
300 		drm_master_release(file);
301 
302 	if (dev->driver->postclose)
303 		dev->driver->postclose(dev, file);
304 
305 	drm_prime_destroy_file_private(&file->prime);
306 
307 	WARN_ON(!list_empty(&file->event_list));
308 
309 	put_pid(file->pid);
310 	kfree(file);
311 }
312 
313 #ifdef __linux__
314 
315 static void drm_close_helper(struct file *filp)
316 {
317 	struct drm_file *file_priv = filp->private_data;
318 	struct drm_device *dev = file_priv->minor->dev;
319 
320 	mutex_lock(&dev->filelist_mutex);
321 	list_del(&file_priv->lhead);
322 	mutex_unlock(&dev->filelist_mutex);
323 
324 	drm_file_free(file_priv);
325 }
326 
327 /*
328  * Check whether DRI will run on this CPU.
329  *
330  * \return non-zero if the DRI will run on this CPU, or zero otherwise.
331  */
332 static int drm_cpu_valid(void)
333 {
334 #if defined(__sparc__) && !defined(__sparc_v9__)
335 	return 0;		/* No cmpxchg before v9 sparc. */
336 #endif
337 	return 1;
338 }
339 
340 #endif /* __linux__ */
341 
342 /*
343  * Called whenever a process opens a drm node
344  *
345  * \param filp file pointer.
346  * \param minor acquired minor-object.
347  * \return zero on success or a negative number on failure.
348  *
349  * Creates and initializes a drm_file structure for the file private data in \p
350  * filp and add it into the double linked list in \p dev.
351  */
352 #ifdef __linux__
353 static int drm_open_helper(struct file *filp, struct drm_minor *minor)
354 {
355 	struct drm_device *dev = minor->dev;
356 	struct drm_file *priv;
357 	int ret;
358 
359 	if (filp->f_flags & O_EXCL)
360 		return -EBUSY;	/* No exclusive opens */
361 	if (!drm_cpu_valid())
362 		return -EINVAL;
363 	if (dev->switch_power_state != DRM_SWITCH_POWER_ON && dev->switch_power_state != DRM_SWITCH_POWER_DYNAMIC_OFF)
364 		return -EINVAL;
365 
366 	DRM_DEBUG("pid = %d, minor = %d\n", task_pid_nr(current), minor->index);
367 
368 	priv = drm_file_alloc(minor);
369 	if (IS_ERR(priv))
370 		return PTR_ERR(priv);
371 
372 	if (drm_is_primary_client(priv)) {
373 		ret = drm_master_open(priv);
374 		if (ret) {
375 			drm_file_free(priv);
376 			return ret;
377 		}
378 	}
379 
380 	filp->private_data = priv;
381 	filp->f_mode |= FMODE_UNSIGNED_OFFSET;
382 	priv->filp = filp;
383 
384 	mutex_lock(&dev->filelist_mutex);
385 	list_add(&priv->lhead, &dev->filelist);
386 	mutex_unlock(&dev->filelist_mutex);
387 
388 #ifdef __alpha__
389 	/*
390 	 * Default the hose
391 	 */
392 	if (!dev->hose) {
393 		struct pci_dev *pci_dev;
394 		pci_dev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, NULL);
395 		if (pci_dev) {
396 			dev->hose = pci_dev->sysdata;
397 			pci_dev_put(pci_dev);
398 		}
399 		if (!dev->hose) {
400 			struct pci_bus *b = list_entry(pci_root_buses.next,
401 				struct pci_bus, node);
402 			if (b)
403 				dev->hose = b->sysdata;
404 		}
405 	}
406 #endif
407 
408 	return 0;
409 }
410 #endif /* __linux__ */
411 
412 /**
413  * drm_open - open method for DRM file
414  * @inode: device inode
415  * @filp: file pointer.
416  *
417  * This function must be used by drivers as their &file_operations.open method.
418  * It looks up the correct DRM device and instantiates all the per-file
419  * resources for it. It also calls the &drm_driver.open driver callback.
420  *
421  * RETURNS:
422  *
423  * 0 on success or negative errno value on falure.
424  */
425 #ifdef __linux__
426 int drm_open(struct inode *inode, struct file *filp)
427 {
428 	struct drm_device *dev;
429 	struct drm_minor *minor;
430 	int retcode;
431 	int need_setup = 0;
432 
433 	minor = drm_minor_acquire(iminor(inode));
434 	if (IS_ERR(minor))
435 		return PTR_ERR(minor);
436 
437 	dev = minor->dev;
438 	if (drm_dev_needs_global_mutex(dev))
439 		mutex_lock(&drm_global_mutex);
440 
441 	if (!atomic_fetch_inc(&dev->open_count))
442 		need_setup = 1;
443 
444 	/* share address_space across all char-devs of a single device */
445 	filp->f_mapping = dev->anon_inode->i_mapping;
446 
447 	retcode = drm_open_helper(filp, minor);
448 	if (retcode)
449 		goto err_undo;
450 	if (need_setup) {
451 		retcode = drm_legacy_setup(dev);
452 		if (retcode) {
453 			drm_close_helper(filp);
454 			goto err_undo;
455 		}
456 	}
457 
458 	if (drm_dev_needs_global_mutex(dev))
459 		mutex_unlock(&drm_global_mutex);
460 
461 	return 0;
462 
463 err_undo:
464 	atomic_dec(&dev->open_count);
465 	if (drm_dev_needs_global_mutex(dev))
466 		mutex_unlock(&drm_global_mutex);
467 	drm_minor_release(minor);
468 	return retcode;
469 }
470 EXPORT_SYMBOL(drm_open);
471 #endif
472 
473 void drm_lastclose(struct drm_device * dev)
474 {
475 	DRM_DEBUG("\n");
476 
477 	if (dev->driver->lastclose)
478 		dev->driver->lastclose(dev);
479 	DRM_DEBUG("driver lastclose completed\n");
480 
481 	if (drm_core_check_feature(dev, DRIVER_LEGACY))
482 		drm_legacy_dev_reinit(dev);
483 
484 	drm_client_dev_restore(dev);
485 }
486 
487 /**
488  * drm_release - release method for DRM file
489  * @inode: device inode
490  * @filp: file pointer.
491  *
492  * This function must be used by drivers as their &file_operations.release
493  * method. It frees any resources associated with the open file, and calls the
494  * &drm_driver.postclose driver callback. If this is the last open file for the
495  * DRM device also proceeds to call the &drm_driver.lastclose driver callback.
496  *
497  * RETURNS:
498  *
499  * Always succeeds and returns 0.
500  */
501 int drm_release(struct inode *inode, struct file *filp)
502 {
503 	STUB();
504 	return -ENOSYS;
505 #ifdef notyet
506 	struct drm_file *file_priv = filp->private_data;
507 	struct drm_minor *minor = file_priv->minor;
508 	struct drm_device *dev = minor->dev;
509 
510 	if (drm_dev_needs_global_mutex(dev))
511 		mutex_lock(&drm_global_mutex);
512 
513 	DRM_DEBUG("open_count = %d\n", atomic_read(&dev->open_count));
514 
515 	drm_close_helper(filp);
516 
517 	if (atomic_dec_and_test(&dev->open_count))
518 		drm_lastclose(dev);
519 
520 	if (drm_dev_needs_global_mutex(dev))
521 		mutex_unlock(&drm_global_mutex);
522 
523 	drm_minor_release(minor);
524 
525 	return 0;
526 #endif
527 }
528 EXPORT_SYMBOL(drm_release);
529 
530 /**
531  * drm_release_noglobal - release method for DRM file
532  * @inode: device inode
533  * @filp: file pointer.
534  *
535  * This function may be used by drivers as their &file_operations.release
536  * method. It frees any resources associated with the open file prior to taking
537  * the drm_global_mutex, which then calls the &drm_driver.postclose driver
538  * callback. If this is the last open file for the DRM device also proceeds to
539  * call the &drm_driver.lastclose driver callback.
540  *
541  * RETURNS:
542  *
543  * Always succeeds and returns 0.
544  */
545 int drm_release_noglobal(struct inode *inode, struct file *filp)
546 {
547 	STUB();
548 	return -ENOSYS;
549 #ifdef notyet
550 	struct drm_file *file_priv = filp->private_data;
551 	struct drm_minor *minor = file_priv->minor;
552 	struct drm_device *dev = minor->dev;
553 
554 	drm_close_helper(filp);
555 
556 	if (atomic_dec_and_mutex_lock(&dev->open_count, &drm_global_mutex)) {
557 		drm_lastclose(dev);
558 		mutex_unlock(&drm_global_mutex);
559 	}
560 
561 	drm_minor_release(minor);
562 
563 	return 0;
564 #endif
565 }
566 EXPORT_SYMBOL(drm_release_noglobal);
567 
568 /**
569  * drm_read - read method for DRM file
570  * @filp: file pointer
571  * @buffer: userspace destination pointer for the read
572  * @count: count in bytes to read
573  * @offset: offset to read
574  *
575  * This function must be used by drivers as their &file_operations.read
576  * method iff they use DRM events for asynchronous signalling to userspace.
577  * Since events are used by the KMS API for vblank and page flip completion this
578  * means all modern display drivers must use it.
579  *
580  * @offset is ignored, DRM events are read like a pipe. Therefore drivers also
581  * must set the &file_operation.llseek to no_llseek(). Polling support is
582  * provided by drm_poll().
583  *
584  * This function will only ever read a full event. Therefore userspace must
585  * supply a big enough buffer to fit any event to ensure forward progress. Since
586  * the maximum event space is currently 4K it's recommended to just use that for
587  * safety.
588  *
589  * RETURNS:
590  *
591  * Number of bytes read (always aligned to full events, and can be 0) or a
592  * negative error code on failure.
593  */
594 ssize_t drm_read(struct file *filp, char __user *buffer,
595 		 size_t count, loff_t *offset)
596 {
597 	STUB();
598 	return -ENOSYS;
599 #ifdef notyet
600 	struct drm_file *file_priv = filp->private_data;
601 	struct drm_device *dev = file_priv->minor->dev;
602 	ssize_t ret;
603 
604 	if (!access_ok(buffer, count))
605 		return -EFAULT;
606 
607 	ret = mutex_lock_interruptible(&file_priv->event_read_lock);
608 	if (ret)
609 		return ret;
610 
611 	for (;;) {
612 		struct drm_pending_event *e = NULL;
613 
614 		spin_lock_irq(&dev->event_lock);
615 		if (!list_empty(&file_priv->event_list)) {
616 			e = list_first_entry(&file_priv->event_list,
617 					struct drm_pending_event, link);
618 			file_priv->event_space += e->event->length;
619 			list_del(&e->link);
620 		}
621 		spin_unlock_irq(&dev->event_lock);
622 
623 		if (e == NULL) {
624 			if (ret)
625 				break;
626 
627 			if (filp->f_flags & O_NONBLOCK) {
628 				ret = -EAGAIN;
629 				break;
630 			}
631 
632 			mutex_unlock(&file_priv->event_read_lock);
633 			ret = wait_event_interruptible(file_priv->event_wait,
634 						       !list_empty(&file_priv->event_list));
635 			if (ret >= 0)
636 				ret = mutex_lock_interruptible(&file_priv->event_read_lock);
637 			if (ret)
638 				return ret;
639 		} else {
640 			unsigned length = e->event->length;
641 
642 			if (length > count - ret) {
643 put_back_event:
644 				spin_lock_irq(&dev->event_lock);
645 				file_priv->event_space -= length;
646 				list_add(&e->link, &file_priv->event_list);
647 				spin_unlock_irq(&dev->event_lock);
648 				wake_up_interruptible(&file_priv->event_wait);
649 				break;
650 			}
651 
652 			if (copy_to_user(buffer + ret, e->event, length)) {
653 				if (ret == 0)
654 					ret = -EFAULT;
655 				goto put_back_event;
656 			}
657 
658 			ret += length;
659 			kfree(e);
660 		}
661 	}
662 	mutex_unlock(&file_priv->event_read_lock);
663 
664 	return ret;
665 #endif
666 }
667 EXPORT_SYMBOL(drm_read);
668 
669 #ifdef notyet
670 /**
671  * drm_poll - poll method for DRM file
672  * @filp: file pointer
673  * @wait: poll waiter table
674  *
675  * This function must be used by drivers as their &file_operations.read method
676  * iff they use DRM events for asynchronous signalling to userspace.  Since
677  * events are used by the KMS API for vblank and page flip completion this means
678  * all modern display drivers must use it.
679  *
680  * See also drm_read().
681  *
682  * RETURNS:
683  *
684  * Mask of POLL flags indicating the current status of the file.
685  */
686 __poll_t drm_poll(struct file *filp, struct poll_table_struct *wait)
687 {
688 	struct drm_file *file_priv = filp->private_data;
689 	__poll_t mask = 0;
690 
691 	poll_wait(filp, &file_priv->event_wait, wait);
692 
693 	if (!list_empty(&file_priv->event_list))
694 		mask |= EPOLLIN | EPOLLRDNORM;
695 
696 	return mask;
697 }
698 EXPORT_SYMBOL(drm_poll);
699 #endif
700 
701 /**
702  * drm_event_reserve_init_locked - init a DRM event and reserve space for it
703  * @dev: DRM device
704  * @file_priv: DRM file private data
705  * @p: tracking structure for the pending event
706  * @e: actual event data to deliver to userspace
707  *
708  * This function prepares the passed in event for eventual delivery. If the event
709  * doesn't get delivered (because the IOCTL fails later on, before queuing up
710  * anything) then the even must be cancelled and freed using
711  * drm_event_cancel_free(). Successfully initialized events should be sent out
712  * using drm_send_event() or drm_send_event_locked() to signal completion of the
713  * asynchronous event to userspace.
714  *
715  * If callers embedded @p into a larger structure it must be allocated with
716  * kmalloc and @p must be the first member element.
717  *
718  * This is the locked version of drm_event_reserve_init() for callers which
719  * already hold &drm_device.event_lock.
720  *
721  * RETURNS:
722  *
723  * 0 on success or a negative error code on failure.
724  */
725 int drm_event_reserve_init_locked(struct drm_device *dev,
726 				  struct drm_file *file_priv,
727 				  struct drm_pending_event *p,
728 				  struct drm_event *e)
729 {
730 	if (file_priv->event_space < e->length)
731 		return -ENOMEM;
732 
733 	file_priv->event_space -= e->length;
734 
735 	p->event = e;
736 	list_add(&p->pending_link, &file_priv->pending_event_list);
737 	p->file_priv = file_priv;
738 
739 	return 0;
740 }
741 EXPORT_SYMBOL(drm_event_reserve_init_locked);
742 
743 /**
744  * drm_event_reserve_init - init a DRM event and reserve space for it
745  * @dev: DRM device
746  * @file_priv: DRM file private data
747  * @p: tracking structure for the pending event
748  * @e: actual event data to deliver to userspace
749  *
750  * This function prepares the passed in event for eventual delivery. If the event
751  * doesn't get delivered (because the IOCTL fails later on, before queuing up
752  * anything) then the even must be cancelled and freed using
753  * drm_event_cancel_free(). Successfully initialized events should be sent out
754  * using drm_send_event() or drm_send_event_locked() to signal completion of the
755  * asynchronous event to userspace.
756  *
757  * If callers embedded @p into a larger structure it must be allocated with
758  * kmalloc and @p must be the first member element.
759  *
760  * Callers which already hold &drm_device.event_lock should use
761  * drm_event_reserve_init_locked() instead.
762  *
763  * RETURNS:
764  *
765  * 0 on success or a negative error code on failure.
766  */
767 int drm_event_reserve_init(struct drm_device *dev,
768 			   struct drm_file *file_priv,
769 			   struct drm_pending_event *p,
770 			   struct drm_event *e)
771 {
772 	unsigned long flags;
773 	int ret;
774 
775 	spin_lock_irqsave(&dev->event_lock, flags);
776 	ret = drm_event_reserve_init_locked(dev, file_priv, p, e);
777 	spin_unlock_irqrestore(&dev->event_lock, flags);
778 
779 	return ret;
780 }
781 EXPORT_SYMBOL(drm_event_reserve_init);
782 
783 /**
784  * drm_event_cancel_free - free a DRM event and release its space
785  * @dev: DRM device
786  * @p: tracking structure for the pending event
787  *
788  * This function frees the event @p initialized with drm_event_reserve_init()
789  * and releases any allocated space. It is used to cancel an event when the
790  * nonblocking operation could not be submitted and needed to be aborted.
791  */
792 void drm_event_cancel_free(struct drm_device *dev,
793 			   struct drm_pending_event *p)
794 {
795 	unsigned long flags;
796 	spin_lock_irqsave(&dev->event_lock, flags);
797 	if (p->file_priv) {
798 		p->file_priv->event_space += p->event->length;
799 		list_del(&p->pending_link);
800 	}
801 	spin_unlock_irqrestore(&dev->event_lock, flags);
802 
803 	if (p->fence)
804 		dma_fence_put(p->fence);
805 
806 	kfree(p);
807 }
808 EXPORT_SYMBOL(drm_event_cancel_free);
809 
810 /**
811  * drm_send_event_locked - send DRM event to file descriptor
812  * @dev: DRM device
813  * @e: DRM event to deliver
814  *
815  * This function sends the event @e, initialized with drm_event_reserve_init(),
816  * to its associated userspace DRM file. Callers must already hold
817  * &drm_device.event_lock, see drm_send_event() for the unlocked version.
818  *
819  * Note that the core will take care of unlinking and disarming events when the
820  * corresponding DRM file is closed. Drivers need not worry about whether the
821  * DRM file for this event still exists and can call this function upon
822  * completion of the asynchronous work unconditionally.
823  */
824 void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e)
825 {
826 	assert_spin_locked(&dev->event_lock);
827 
828 	if (e->completion) {
829 		complete_all(e->completion);
830 		e->completion_release(e->completion);
831 		e->completion = NULL;
832 	}
833 
834 	if (e->fence) {
835 		dma_fence_signal(e->fence);
836 		dma_fence_put(e->fence);
837 	}
838 
839 	if (!e->file_priv) {
840 		kfree(e);
841 		return;
842 	}
843 
844 	list_del(&e->pending_link);
845 	list_add_tail(&e->link,
846 		      &e->file_priv->event_list);
847 	wake_up_interruptible(&e->file_priv->event_wait);
848 #ifdef __OpenBSD__
849 	selwakeup(&e->file_priv->rsel);
850 #endif
851 }
852 EXPORT_SYMBOL(drm_send_event_locked);
853 
854 /**
855  * drm_send_event - send DRM event to file descriptor
856  * @dev: DRM device
857  * @e: DRM event to deliver
858  *
859  * This function sends the event @e, initialized with drm_event_reserve_init(),
860  * to its associated userspace DRM file. This function acquires
861  * &drm_device.event_lock, see drm_send_event_locked() for callers which already
862  * hold this lock.
863  *
864  * Note that the core will take care of unlinking and disarming events when the
865  * corresponding DRM file is closed. Drivers need not worry about whether the
866  * DRM file for this event still exists and can call this function upon
867  * completion of the asynchronous work unconditionally.
868  */
869 void drm_send_event(struct drm_device *dev, struct drm_pending_event *e)
870 {
871 	unsigned long irqflags;
872 
873 	spin_lock_irqsave(&dev->event_lock, irqflags);
874 	drm_send_event_locked(dev, e);
875 	spin_unlock_irqrestore(&dev->event_lock, irqflags);
876 }
877 EXPORT_SYMBOL(drm_send_event);
878 
879 /**
880  * mock_drm_getfile - Create a new struct file for the drm device
881  * @minor: drm minor to wrap (e.g. #drm_device.primary)
882  * @flags: file creation mode (O_RDWR etc)
883  *
884  * This create a new struct file that wraps a DRM file context around a
885  * DRM minor. This mimicks userspace opening e.g. /dev/dri/card0, but without
886  * invoking userspace. The struct file may be operated on using its f_op
887  * (the drm_device.driver.fops) to mimick userspace operations, or be supplied
888  * to userspace facing functions as an internal/anonymous client.
889  *
890  * RETURNS:
891  * Pointer to newly created struct file, ERR_PTR on failure.
892  */
893 struct file *mock_drm_getfile(struct drm_minor *minor, unsigned int flags)
894 {
895 	STUB();
896 	return ERR_PTR(-ENOSYS);
897 #ifdef notyet
898 	struct drm_device *dev = minor->dev;
899 	struct drm_file *priv;
900 	struct file *file;
901 
902 	priv = drm_file_alloc(minor);
903 	if (IS_ERR(priv))
904 		return ERR_CAST(priv);
905 
906 	file = anon_inode_getfile("drm", dev->driver->fops, priv, flags);
907 	if (IS_ERR(file)) {
908 		drm_file_free(priv);
909 		return file;
910 	}
911 
912 	/* Everyone shares a single global address space */
913 	file->f_mapping = dev->anon_inode->i_mapping;
914 
915 	drm_dev_get(dev);
916 	priv->filp = file;
917 
918 	return file;
919 #endif
920 }
921 EXPORT_SYMBOL_FOR_TESTS_ONLY(mock_drm_getfile);
922 
923 #ifdef CONFIG_MMU
924 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
925 /*
926  * drm_addr_inflate() attempts to construct an aligned area by inflating
927  * the area size and skipping the unaligned start of the area.
928  * adapted from shmem_get_unmapped_area()
929  */
930 static unsigned long drm_addr_inflate(unsigned long addr,
931 				      unsigned long len,
932 				      unsigned long pgoff,
933 				      unsigned long flags,
934 				      unsigned long huge_size)
935 {
936 	unsigned long offset, inflated_len;
937 	unsigned long inflated_addr;
938 	unsigned long inflated_offset;
939 
940 	offset = (pgoff << PAGE_SHIFT) & (huge_size - 1);
941 	if (offset && offset + len < 2 * huge_size)
942 		return addr;
943 	if ((addr & (huge_size - 1)) == offset)
944 		return addr;
945 
946 	inflated_len = len + huge_size - PAGE_SIZE;
947 	if (inflated_len > TASK_SIZE)
948 		return addr;
949 	if (inflated_len < len)
950 		return addr;
951 
952 	inflated_addr = current->mm->get_unmapped_area(NULL, 0, inflated_len,
953 						       0, flags);
954 	if (IS_ERR_VALUE(inflated_addr))
955 		return addr;
956 	if (inflated_addr & PAGE_MASK)
957 		return addr;
958 
959 	inflated_offset = inflated_addr & (huge_size - 1);
960 	inflated_addr += offset - inflated_offset;
961 	if (inflated_offset > offset)
962 		inflated_addr += huge_size;
963 
964 	if (inflated_addr > TASK_SIZE - len)
965 		return addr;
966 
967 	return inflated_addr;
968 }
969 
970 /**
971  * drm_get_unmapped_area() - Get an unused user-space virtual memory area
972  * suitable for huge page table entries.
973  * @file: The struct file representing the address space being mmap()'d.
974  * @uaddr: Start address suggested by user-space.
975  * @len: Length of the area.
976  * @pgoff: The page offset into the address space.
977  * @flags: mmap flags
978  * @mgr: The address space manager used by the drm driver. This argument can
979  * probably be removed at some point when all drivers use the same
980  * address space manager.
981  *
982  * This function attempts to find an unused user-space virtual memory area
983  * that can accommodate the size we want to map, and that is properly
984  * aligned to facilitate huge page table entries matching actual
985  * huge pages or huge page aligned memory in buffer objects. Buffer objects
986  * are assumed to start at huge page boundary pfns (io memory) or be
987  * populated by huge pages aligned to the start of the buffer object
988  * (system- or coherent memory). Adapted from shmem_get_unmapped_area.
989  *
990  * Return: aligned user-space address.
991  */
992 unsigned long drm_get_unmapped_area(struct file *file,
993 				    unsigned long uaddr, unsigned long len,
994 				    unsigned long pgoff, unsigned long flags,
995 				    struct drm_vma_offset_manager *mgr)
996 {
997 	unsigned long addr;
998 	unsigned long inflated_addr;
999 	struct drm_vma_offset_node *node;
1000 
1001 	if (len > TASK_SIZE)
1002 		return -ENOMEM;
1003 
1004 	/*
1005 	 * @pgoff is the file page-offset the huge page boundaries of
1006 	 * which typically aligns to physical address huge page boundaries.
1007 	 * That's not true for DRM, however, where physical address huge
1008 	 * page boundaries instead are aligned with the offset from
1009 	 * buffer object start. So adjust @pgoff to be the offset from
1010 	 * buffer object start.
1011 	 */
1012 	drm_vma_offset_lock_lookup(mgr);
1013 	node = drm_vma_offset_lookup_locked(mgr, pgoff, 1);
1014 	if (node)
1015 		pgoff -= node->vm_node.start;
1016 	drm_vma_offset_unlock_lookup(mgr);
1017 
1018 	addr = current->mm->get_unmapped_area(file, uaddr, len, pgoff, flags);
1019 	if (IS_ERR_VALUE(addr))
1020 		return addr;
1021 	if (addr & PAGE_MASK)
1022 		return addr;
1023 	if (addr > TASK_SIZE - len)
1024 		return addr;
1025 
1026 	if (len < HPAGE_PMD_SIZE)
1027 		return addr;
1028 	if (flags & MAP_FIXED)
1029 		return addr;
1030 	/*
1031 	 * Our priority is to support MAP_SHARED mapped hugely;
1032 	 * and support MAP_PRIVATE mapped hugely too, until it is COWed.
1033 	 * But if caller specified an address hint, respect that as before.
1034 	 */
1035 	if (uaddr)
1036 		return addr;
1037 
1038 	inflated_addr = drm_addr_inflate(addr, len, pgoff, flags,
1039 					 HPAGE_PMD_SIZE);
1040 
1041 	if (IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) &&
1042 	    len >= HPAGE_PUD_SIZE)
1043 		inflated_addr = drm_addr_inflate(inflated_addr, len, pgoff,
1044 						 flags, HPAGE_PUD_SIZE);
1045 	return inflated_addr;
1046 }
1047 #else /* CONFIG_TRANSPARENT_HUGEPAGE */
1048 unsigned long drm_get_unmapped_area(struct file *file,
1049 				    unsigned long uaddr, unsigned long len,
1050 				    unsigned long pgoff, unsigned long flags,
1051 				    struct drm_vma_offset_manager *mgr)
1052 {
1053 	return current->mm->get_unmapped_area(file, uaddr, len, pgoff, flags);
1054 }
1055 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1056 EXPORT_SYMBOL_GPL(drm_get_unmapped_area);
1057 #endif /* CONFIG_MMU */
1058