xref: /netbsd-src/sys/external/bsd/drm2/dist/drm/drm_gem.c (revision f3cfa6f6ce31685c6c4a758bc430e69eb99f50a4)
1 /*	$NetBSD: drm_gem.c,v 1.10 2018/08/27 15:22:53 riastradh Exp $	*/
2 
3 /*
4  * Copyright © 2008 Intel Corporation
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the next
14  * paragraph) shall be included in all copies or substantial portions of the
15  * Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
23  * IN THE SOFTWARE.
24  *
25  * Authors:
26  *    Eric Anholt <eric@anholt.net>
27  *
28  */
29 
30 #include <sys/cdefs.h>
31 __KERNEL_RCSID(0, "$NetBSD: drm_gem.c,v 1.10 2018/08/27 15:22:53 riastradh Exp $");
32 
33 #include <linux/types.h>
34 #include <linux/slab.h>
35 #include <linux/mm.h>
36 #include <linux/uaccess.h>
37 #include <linux/fs.h>
38 #include <linux/file.h>
39 #include <linux/module.h>
40 #include <linux/mman.h>
41 #include <linux/pagemap.h>
42 #include <linux/shmem_fs.h>
43 #include <linux/dma-buf.h>
44 #include <linux/err.h>
45 #include <linux/export.h>
46 #include <asm/bug.h>
47 #include <drm/drmP.h>
48 #include <drm/drm_vma_manager.h>
49 #include <drm/drm_gem.h>
50 #include "drm_internal.h"
51 
52 #ifdef __NetBSD__
53 #include <uvm/uvm_extern.h>
54 #endif
55 
56 /** @file drm_gem.c
57  *
58  * This file provides some of the base ioctls and library routines for
59  * the graphics memory manager implemented by each device driver.
60  *
61  * Because various devices have different requirements in terms of
62  * synchronization and migration strategies, implementing that is left up to
63  * the driver, and all that the general API provides should be generic --
64  * allocating objects, reading/writing data with the cpu, freeing objects.
65  * Even there, platform-dependent optimizations for reading/writing data with
66  * the CPU mean we'll likely hook those out to driver-specific calls.  However,
67  * the DRI2 implementation wants to have at least allocate/mmap be generic.
68  *
69  * The goal was to have swap-backed object allocation managed through
70  * struct file.  However, file descriptors as handles to a struct file have
71  * two major failings:
72  * - Process limits prevent more than 1024 or so being used at a time by
73  *   default.
74  * - Inability to allocate high fds will aggravate the X Server's select()
75  *   handling, and likely that of many GL client applications as well.
76  *
77  * This led to a plan of using our own integer IDs (called handles, following
78  * DRM terminology) to mimic fds, and implement the fd syscalls we need as
79  * ioctls.  The objects themselves will still include the struct file so
80  * that we can transition to fds if the required kernel infrastructure shows
81  * up at a later date, and as our interface with shmfs for memory allocation.
82  */
83 
84 /*
85  * We make up offsets for buffer objects so we can recognize them at
86  * mmap time.
87  */
88 
89 /* pgoff in mmap is an unsigned long, so we need to make sure that
90  * the faked up offset will fit
91  */
92 
93 #if BITS_PER_LONG == 64
94 #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
95 #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
96 #else
97 #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1)
98 #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16)
99 #endif
100 
101 /**
102  * drm_gem_init - Initialize the GEM device fields
103  * @dev: drm_devic structure to initialize
104  */
105 int
106 drm_gem_init(struct drm_device *dev)
107 {
108 	struct drm_vma_offset_manager *vma_offset_manager;
109 
110 #ifdef __NetBSD__
111 	linux_mutex_init(&dev->object_name_lock);
112 #else
113 	mutex_init(&dev->object_name_lock);
114 #endif
115 	idr_init(&dev->object_name_idr);
116 
117 	vma_offset_manager = kzalloc(sizeof(*vma_offset_manager), GFP_KERNEL);
118 	if (!vma_offset_manager) {
119 		DRM_ERROR("out of memory\n");
120 		return -ENOMEM;
121 	}
122 
123 	dev->vma_offset_manager = vma_offset_manager;
124 	drm_vma_offset_manager_init(vma_offset_manager,
125 				    DRM_FILE_PAGE_OFFSET_START,
126 				    DRM_FILE_PAGE_OFFSET_SIZE);
127 
128 	return 0;
129 }
130 
131 void
132 drm_gem_destroy(struct drm_device *dev)
133 {
134 
135 	drm_vma_offset_manager_destroy(dev->vma_offset_manager);
136 	kfree(dev->vma_offset_manager);
137 	dev->vma_offset_manager = NULL;
138 
139 	idr_destroy(&dev->object_name_idr);
140 #ifdef __NetBSD__
141 	linux_mutex_destroy(&dev->object_name_lock);
142 #endif
143 }
144 
145 /**
146  * drm_gem_object_init - initialize an allocated shmem-backed GEM object
147  * @dev: drm_device the object should be initialized for
148  * @obj: drm_gem_object to initialize
149  * @size: object size
150  *
151  * Initialize an already allocated GEM object of the specified size with
152  * shmfs backing store.
153  */
154 int drm_gem_object_init(struct drm_device *dev,
155 			struct drm_gem_object *obj, size_t size)
156 {
157 #ifndef __NetBSD__
158 	struct file *filp;
159 #endif
160 
161 	drm_gem_private_object_init(dev, obj, size);
162 
163 #ifdef __NetBSD__
164 	/*
165 	 * A uao may not have size 0, but a gem object may.  Allocate a
166 	 * spurious page so we needn't teach uao how to have size 0.
167 	 */
168 	obj->filp = uao_create(MAX(size, PAGE_SIZE), 0);
169 	/*
170 	 * XXX This is gross.  We ought to do it the other way around:
171 	 * set the uao to have the main uvm object's lock.  However,
172 	 * uvm_obj_setlock is not safe on uvm_aobjs.
173 	 */
174 	mutex_obj_hold(obj->filp->vmobjlock);
175 	uvm_obj_setlock(&obj->gemo_uvmobj, obj->filp->vmobjlock);
176 #else
177 	filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
178 	if (IS_ERR(filp))
179 		return PTR_ERR(filp);
180 
181 	obj->filp = filp;
182 #endif
183 
184 	return 0;
185 }
186 EXPORT_SYMBOL(drm_gem_object_init);
187 
188 /**
189  * drm_gem_private_object_init - initialize an allocated private GEM object
190  * @dev: drm_device the object should be initialized for
191  * @obj: drm_gem_object to initialize
192  * @size: object size
193  *
194  * Initialize an already allocated GEM object of the specified size with
195  * no GEM provided backing store. Instead the caller is responsible for
196  * backing the object and handling it.
197  */
198 void drm_gem_private_object_init(struct drm_device *dev,
199 				 struct drm_gem_object *obj, size_t size)
200 {
201 	BUG_ON((size & (PAGE_SIZE - 1)) != 0);
202 
203 	obj->dev = dev;
204 #ifdef __NetBSD__
205 	obj->filp = NULL;
206 	KASSERT(drm_core_check_feature(dev, DRIVER_GEM));
207 	KASSERT(dev->driver->gem_uvm_ops != NULL);
208 	uvm_obj_init(&obj->gemo_uvmobj, dev->driver->gem_uvm_ops, true, 1);
209 #else
210 	obj->filp = NULL;
211 #endif
212 
213 	kref_init(&obj->refcount);
214 	obj->handle_count = 0;
215 	obj->size = size;
216 #ifdef __NetBSD__
217 	drm_vma_node_init(&obj->vma_node);
218 #else
219 	drm_vma_node_reset(&obj->vma_node);
220 #endif
221 }
222 EXPORT_SYMBOL(drm_gem_private_object_init);
223 
224 static void
225 drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
226 {
227 	/*
228 	 * Note: obj->dma_buf can't disappear as long as we still hold a
229 	 * handle reference in obj->handle_count.
230 	 */
231 	mutex_lock(&filp->prime.lock);
232 	if (obj->dma_buf) {
233 		drm_prime_remove_buf_handle_locked(&filp->prime,
234 						   obj->dma_buf);
235 	}
236 	mutex_unlock(&filp->prime.lock);
237 }
238 
239 /**
240  * drm_gem_object_handle_free - release resources bound to userspace handles
241  * @obj: GEM object to clean up.
242  *
243  * Called after the last handle to the object has been closed
244  *
245  * Removes any name for the object. Note that this must be
246  * called before drm_gem_object_free or we'll be touching
247  * freed memory
248  */
249 static void drm_gem_object_handle_free(struct drm_gem_object *obj)
250 {
251 	struct drm_device *dev = obj->dev;
252 
253 	/* Remove any name for this object */
254 	if (obj->name) {
255 		idr_remove(&dev->object_name_idr, obj->name);
256 		obj->name = 0;
257 	}
258 }
259 
260 static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj)
261 {
262 #ifndef __NetBSD__
263 	/* Unbreak the reference cycle if we have an exported dma_buf. */
264 	if (obj->dma_buf) {
265 		dma_buf_put(obj->dma_buf);
266 		obj->dma_buf = NULL;
267 	}
268 #endif
269 }
270 
271 static void
272 drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj)
273 {
274 	if (WARN_ON(obj->handle_count == 0))
275 		return;
276 
277 	/*
278 	* Must bump handle count first as this may be the last
279 	* ref, in which case the object would disappear before we
280 	* checked for a name
281 	*/
282 
283 	mutex_lock(&obj->dev->object_name_lock);
284 	if (--obj->handle_count == 0) {
285 		drm_gem_object_handle_free(obj);
286 		drm_gem_object_exported_dma_buf_free(obj);
287 	}
288 	mutex_unlock(&obj->dev->object_name_lock);
289 
290 	drm_gem_object_unreference_unlocked(obj);
291 }
292 
293 /**
294  * drm_gem_handle_delete - deletes the given file-private handle
295  * @filp: drm file-private structure to use for the handle look up
296  * @handle: userspace handle to delete
297  *
298  * Removes the GEM handle from the @filp lookup table and if this is the last
299  * handle also cleans up linked resources like GEM names.
300  */
301 int
302 drm_gem_handle_delete(struct drm_file *filp, u32 handle)
303 {
304 	struct drm_device *dev;
305 	struct drm_gem_object *obj;
306 
307 	/* This is gross. The idr system doesn't let us try a delete and
308 	 * return an error code.  It just spews if you fail at deleting.
309 	 * So, we have to grab a lock around finding the object and then
310 	 * doing the delete on it and dropping the refcount, or the user
311 	 * could race us to double-decrement the refcount and cause a
312 	 * use-after-free later.  Given the frequency of our handle lookups,
313 	 * we may want to use ida for number allocation and a hash table
314 	 * for the pointers, anyway.
315 	 */
316 	spin_lock(&filp->table_lock);
317 
318 	/* Check if we currently have a reference on the object */
319 	obj = idr_find(&filp->object_idr, handle);
320 	if (obj == NULL) {
321 		spin_unlock(&filp->table_lock);
322 		return -EINVAL;
323 	}
324 	dev = obj->dev;
325 
326 	/* Release reference and decrement refcount. */
327 	idr_remove(&filp->object_idr, handle);
328 	spin_unlock(&filp->table_lock);
329 
330 	if (drm_core_check_feature(dev, DRIVER_PRIME))
331 		drm_gem_remove_prime_handles(obj, filp);
332 	drm_vma_node_revoke(&obj->vma_node, filp->filp);
333 
334 	if (dev->driver->gem_close_object)
335 		dev->driver->gem_close_object(obj, filp);
336 	drm_gem_object_handle_unreference_unlocked(obj);
337 
338 	return 0;
339 }
340 EXPORT_SYMBOL(drm_gem_handle_delete);
341 
342 /**
343  * drm_gem_dumb_destroy - dumb fb callback helper for gem based drivers
344  * @file: drm file-private structure to remove the dumb handle from
345  * @dev: corresponding drm_device
346  * @handle: the dumb handle to remove
347  *
348  * This implements the ->dumb_destroy kms driver callback for drivers which use
349  * gem to manage their backing storage.
350  */
351 int drm_gem_dumb_destroy(struct drm_file *file,
352 			 struct drm_device *dev,
353 			 uint32_t handle)
354 {
355 	return drm_gem_handle_delete(file, handle);
356 }
357 EXPORT_SYMBOL(drm_gem_dumb_destroy);
358 
359 /**
360  * drm_gem_handle_create_tail - internal functions to create a handle
361  * @file_priv: drm file-private structure to register the handle for
362  * @obj: object to register
363  * @handlep: pointer to return the created handle to the caller
364  *
365  * This expects the dev->object_name_lock to be held already and will drop it
366  * before returning. Used to avoid races in establishing new handles when
367  * importing an object from either an flink name or a dma-buf.
368  */
369 int
370 drm_gem_handle_create_tail(struct drm_file *file_priv,
371 			   struct drm_gem_object *obj,
372 			   u32 *handlep)
373 {
374 	struct drm_device *dev = obj->dev;
375 	int ret;
376 
377 	WARN_ON(!mutex_is_locked(&dev->object_name_lock));
378 
379 	/*
380 	 * Get the user-visible handle using idr.  Preload and perform
381 	 * allocation under our spinlock.
382 	 */
383 	idr_preload(GFP_KERNEL);
384 	spin_lock(&file_priv->table_lock);
385 
386 	ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT);
387 	drm_gem_object_reference(obj);
388 	obj->handle_count++;
389 	spin_unlock(&file_priv->table_lock);
390 	idr_preload_end();
391 	mutex_unlock(&dev->object_name_lock);
392 	if (ret < 0)
393 		goto err_unref;
394 
395 	*handlep = ret;
396 
397 	ret = drm_vma_node_allow(&obj->vma_node, file_priv->filp);
398 	if (ret)
399 		goto err_remove;
400 
401 	if (dev->driver->gem_open_object) {
402 		ret = dev->driver->gem_open_object(obj, file_priv);
403 		if (ret)
404 			goto err_revoke;
405 	}
406 
407 	return 0;
408 
409 err_revoke:
410 	drm_vma_node_revoke(&obj->vma_node, file_priv->filp);
411 err_remove:
412 	spin_lock(&file_priv->table_lock);
413 	idr_remove(&file_priv->object_idr, *handlep);
414 	spin_unlock(&file_priv->table_lock);
415 err_unref:
416 	drm_gem_object_handle_unreference_unlocked(obj);
417 	return ret;
418 }
419 
420 /**
421  * drm_gem_handle_create - create a gem handle for an object
422  * @file_priv: drm file-private structure to register the handle for
423  * @obj: object to register
424  * @handlep: pionter to return the created handle to the caller
425  *
426  * Create a handle for this object. This adds a handle reference
427  * to the object, which includes a regular reference count. Callers
428  * will likely want to dereference the object afterwards.
429  */
430 int drm_gem_handle_create(struct drm_file *file_priv,
431 			  struct drm_gem_object *obj,
432 			  u32 *handlep)
433 {
434 	mutex_lock(&obj->dev->object_name_lock);
435 
436 	return drm_gem_handle_create_tail(file_priv, obj, handlep);
437 }
438 EXPORT_SYMBOL(drm_gem_handle_create);
439 
440 
441 /**
442  * drm_gem_free_mmap_offset - release a fake mmap offset for an object
443  * @obj: obj in question
444  *
445  * This routine frees fake offsets allocated by drm_gem_create_mmap_offset().
446  */
447 void
448 drm_gem_free_mmap_offset(struct drm_gem_object *obj)
449 {
450 	struct drm_device *dev = obj->dev;
451 
452 	drm_vma_offset_remove(dev->vma_offset_manager, &obj->vma_node);
453 }
454 EXPORT_SYMBOL(drm_gem_free_mmap_offset);
455 
456 /**
457  * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object
458  * @obj: obj in question
459  * @size: the virtual size
460  *
461  * GEM memory mapping works by handing back to userspace a fake mmap offset
462  * it can use in a subsequent mmap(2) call.  The DRM core code then looks
463  * up the object based on the offset and sets up the various memory mapping
464  * structures.
465  *
466  * This routine allocates and attaches a fake offset for @obj, in cases where
467  * the virtual size differs from the physical size (ie. obj->size).  Otherwise
468  * just use drm_gem_create_mmap_offset().
469  */
470 int
471 drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size)
472 {
473 	struct drm_device *dev = obj->dev;
474 
475 	return drm_vma_offset_add(dev->vma_offset_manager, &obj->vma_node,
476 				  size / PAGE_SIZE);
477 }
478 EXPORT_SYMBOL(drm_gem_create_mmap_offset_size);
479 
480 /**
481  * drm_gem_create_mmap_offset - create a fake mmap offset for an object
482  * @obj: obj in question
483  *
484  * GEM memory mapping works by handing back to userspace a fake mmap offset
485  * it can use in a subsequent mmap(2) call.  The DRM core code then looks
486  * up the object based on the offset and sets up the various memory mapping
487  * structures.
488  *
489  * This routine allocates and attaches a fake offset for @obj.
490  */
491 int drm_gem_create_mmap_offset(struct drm_gem_object *obj)
492 {
493 	return drm_gem_create_mmap_offset_size(obj, obj->size);
494 }
495 EXPORT_SYMBOL(drm_gem_create_mmap_offset);
496 
497 /**
498  * drm_gem_get_pages - helper to allocate backing pages for a GEM object
499  * from shmem
500  * @obj: obj in question
501  *
502  * This reads the page-array of the shmem-backing storage of the given gem
503  * object. An array of pages is returned. If a page is not allocated or
504  * swapped-out, this will allocate/swap-in the required pages. Note that the
505  * whole object is covered by the page-array and pinned in memory.
506  *
507  * Use drm_gem_put_pages() to release the array and unpin all pages.
508  *
509  * This uses the GFP-mask set on the shmem-mapping (see mapping_set_gfp_mask()).
510  * If you require other GFP-masks, you have to do those allocations yourself.
511  *
512  * Note that you are not allowed to change gfp-zones during runtime. That is,
513  * shmem_read_mapping_page_gfp() must be called with the same gfp_zone(gfp) as
514  * set during initialization. If you have special zone constraints, set them
515  * after drm_gem_init_object() via mapping_set_gfp_mask(). shmem-core takes care
516  * to keep pages in the required zone during swap-in.
517  */
518 #ifdef __NetBSD__
519 struct page **
520 drm_gem_get_pages(struct drm_gem_object *obj)
521 {
522 	struct pglist pglist;
523 	struct vm_page *vm_page;
524 	struct page **pages;
525 	unsigned i;
526 	int ret;
527 
528 	KASSERT((obj->size & (PAGE_SIZE - 1)) != 0);
529 
530 	pages = drm_malloc_ab(obj->size >> PAGE_SHIFT, sizeof(*pages));
531 	if (pages == NULL) {
532 		ret = -ENOMEM;
533 		goto fail0;
534 	}
535 
536 	TAILQ_INIT(&pglist);
537 	/* XXX errno NetBSD->Linux */
538 	ret = -uvm_obj_wirepages(obj->filp, 0, obj->size, &pglist);
539 	if (ret)
540 		goto fail1;
541 
542 	i = 0;
543 	TAILQ_FOREACH(vm_page, &pglist, pageq.queue)
544 		pages[i++] = container_of(vm_page, struct page, p_vmp);
545 
546 	return pages;
547 
548 fail1:	drm_free_large(pages);
549 fail0:	return ERR_PTR(ret);
550 }
551 #else
552 struct page **drm_gem_get_pages(struct drm_gem_object *obj)
553 {
554 	struct address_space *mapping;
555 	struct page *p, **pages;
556 	int i, npages;
557 
558 	/* This is the shared memory object that backs the GEM resource */
559 	mapping = file_inode(obj->filp)->i_mapping;
560 
561 	/* We already BUG_ON() for non-page-aligned sizes in
562 	 * drm_gem_object_init(), so we should never hit this unless
563 	 * driver author is doing something really wrong:
564 	 */
565 	WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
566 
567 	npages = obj->size >> PAGE_SHIFT;
568 
569 	pages = drm_malloc_ab(npages, sizeof(struct page *));
570 	if (pages == NULL)
571 		return ERR_PTR(-ENOMEM);
572 
573 	for (i = 0; i < npages; i++) {
574 		p = shmem_read_mapping_page(mapping, i);
575 		if (IS_ERR(p))
576 			goto fail;
577 		pages[i] = p;
578 
579 		/* Make sure shmem keeps __GFP_DMA32 allocated pages in the
580 		 * correct region during swapin. Note that this requires
581 		 * __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping)
582 		 * so shmem can relocate pages during swapin if required.
583 		 */
584 		BUG_ON(mapping_gfp_constraint(mapping, __GFP_DMA32) &&
585 				(page_to_pfn(p) >= 0x00100000UL));
586 	}
587 
588 	return pages;
589 
590 fail:
591 	while (i--)
592 		page_cache_release(pages[i]);
593 
594 	drm_free_large(pages);
595 	return ERR_CAST(p);
596 }
597 #endif
598 EXPORT_SYMBOL(drm_gem_get_pages);
599 
600 /**
601  * drm_gem_put_pages - helper to free backing pages for a GEM object
602  * @obj: obj in question
603  * @pages: pages to free
604  * @dirty: if true, pages will be marked as dirty
605  * @accessed: if true, the pages will be marked as accessed
606  */
607 #ifdef __NetBSD__
608 void
609 drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, bool dirty,
610     bool accessed __unused /* XXX */)
611 {
612 	unsigned i;
613 
614 	for (i = 0; i < (obj->size >> PAGE_SHIFT); i++) {
615 		if (dirty)
616 			pages[i]->p_vmp.flags &= ~PG_CLEAN;
617 	}
618 
619 	uvm_obj_unwirepages(obj->filp, 0, obj->size);
620 }
621 #else
622 void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
623 		bool dirty, bool accessed)
624 {
625 	int i, npages;
626 
627 	/* We already BUG_ON() for non-page-aligned sizes in
628 	 * drm_gem_object_init(), so we should never hit this unless
629 	 * driver author is doing something really wrong:
630 	 */
631 	WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
632 
633 	npages = obj->size >> PAGE_SHIFT;
634 
635 	for (i = 0; i < npages; i++) {
636 		if (dirty)
637 			set_page_dirty(pages[i]);
638 
639 		if (accessed)
640 			mark_page_accessed(pages[i]);
641 
642 		/* Undo the reference we took when populating the table */
643 		page_cache_release(pages[i]);
644 	}
645 
646 	drm_free_large(pages);
647 }
648 #endif
649 EXPORT_SYMBOL(drm_gem_put_pages);
650 
651 /** Returns a reference to the object named by the handle. */
652 struct drm_gem_object *
653 drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
654 		      u32 handle)
655 {
656 	struct drm_gem_object *obj;
657 
658 	spin_lock(&filp->table_lock);
659 
660 	/* Check if we currently have a reference on the object */
661 	obj = idr_find(&filp->object_idr, handle);
662 	if (obj == NULL) {
663 		spin_unlock(&filp->table_lock);
664 		return NULL;
665 	}
666 
667 	drm_gem_object_reference(obj);
668 
669 	spin_unlock(&filp->table_lock);
670 
671 	return obj;
672 }
673 EXPORT_SYMBOL(drm_gem_object_lookup);
674 
675 /**
676  * drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl
677  * @dev: drm_device
678  * @data: ioctl data
679  * @file_priv: drm file-private structure
680  *
681  * Releases the handle to an mm object.
682  */
683 int
684 drm_gem_close_ioctl(struct drm_device *dev, void *data,
685 		    struct drm_file *file_priv)
686 {
687 	struct drm_gem_close *args = data;
688 	int ret;
689 
690 	if (!drm_core_check_feature(dev, DRIVER_GEM))
691 		return -ENODEV;
692 
693 	ret = drm_gem_handle_delete(file_priv, args->handle);
694 
695 	return ret;
696 }
697 
698 /**
699  * drm_gem_flink_ioctl - implementation of the GEM_FLINK ioctl
700  * @dev: drm_device
701  * @data: ioctl data
702  * @file_priv: drm file-private structure
703  *
704  * Create a global name for an object, returning the name.
705  *
706  * Note that the name does not hold a reference; when the object
707  * is freed, the name goes away.
708  */
709 int
710 drm_gem_flink_ioctl(struct drm_device *dev, void *data,
711 		    struct drm_file *file_priv)
712 {
713 	struct drm_gem_flink *args = data;
714 	struct drm_gem_object *obj;
715 	int ret;
716 
717 	if (!drm_core_check_feature(dev, DRIVER_GEM))
718 		return -ENODEV;
719 
720 	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
721 	if (obj == NULL)
722 		return -ENOENT;
723 
724 	idr_preload(GFP_KERNEL);
725 	mutex_lock(&dev->object_name_lock);
726 	/* prevent races with concurrent gem_close. */
727 	if (obj->handle_count == 0) {
728 		ret = -ENOENT;
729 		goto err;
730 	}
731 
732 	if (!obj->name) {
733 		ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_NOWAIT);
734 		if (ret < 0)
735 			goto err;
736 
737 		obj->name = ret;
738 	}
739 
740 	args->name = (uint64_t) obj->name;
741 	ret = 0;
742 
743 err:
744 	mutex_unlock(&dev->object_name_lock);
745 	idr_preload_end();
746 	drm_gem_object_unreference_unlocked(obj);
747 	return ret;
748 }
749 
750 /**
751  * drm_gem_open - implementation of the GEM_OPEN ioctl
752  * @dev: drm_device
753  * @data: ioctl data
754  * @file_priv: drm file-private structure
755  *
756  * Open an object using the global name, returning a handle and the size.
757  *
758  * This handle (of course) holds a reference to the object, so the object
759  * will not go away until the handle is deleted.
760  */
761 int
762 drm_gem_open_ioctl(struct drm_device *dev, void *data,
763 		   struct drm_file *file_priv)
764 {
765 	struct drm_gem_open *args = data;
766 	struct drm_gem_object *obj;
767 	int ret;
768 	u32 handle;
769 
770 	if (!drm_core_check_feature(dev, DRIVER_GEM))
771 		return -ENODEV;
772 
773 	mutex_lock(&dev->object_name_lock);
774 	obj = idr_find(&dev->object_name_idr, (int) args->name);
775 	if (obj) {
776 		drm_gem_object_reference(obj);
777 	} else {
778 		mutex_unlock(&dev->object_name_lock);
779 		return -ENOENT;
780 	}
781 
782 	/* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
783 	ret = drm_gem_handle_create_tail(file_priv, obj, &handle);
784 	drm_gem_object_unreference_unlocked(obj);
785 	if (ret)
786 		return ret;
787 
788 	args->handle = handle;
789 	args->size = obj->size;
790 
791 	return 0;
792 }
793 
794 /**
795  * gem_gem_open - initalizes GEM file-private structures at devnode open time
796  * @dev: drm_device which is being opened by userspace
797  * @file_private: drm file-private structure to set up
798  *
799  * Called at device open time, sets up the structure for handling refcounting
800  * of mm objects.
801  */
802 void
803 drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
804 {
805 	idr_init(&file_private->object_idr);
806 	spin_lock_init(&file_private->table_lock);
807 }
808 
809 /*
810  * Called at device close to release the file's
811  * handle references on objects.
812  */
813 static int
814 drm_gem_object_release_handle(int id, void *ptr, void *data)
815 {
816 	struct drm_file *file_priv = data;
817 	struct drm_gem_object *obj = ptr;
818 	struct drm_device *dev = obj->dev;
819 
820 	if (dev->driver->gem_close_object)
821 		dev->driver->gem_close_object(obj, file_priv);
822 
823 	if (drm_core_check_feature(dev, DRIVER_PRIME))
824 		drm_gem_remove_prime_handles(obj, file_priv);
825 	drm_vma_node_revoke(&obj->vma_node, file_priv->filp);
826 
827 	drm_gem_object_handle_unreference_unlocked(obj);
828 
829 	return 0;
830 }
831 
832 /**
833  * drm_gem_release - release file-private GEM resources
834  * @dev: drm_device which is being closed by userspace
835  * @file_private: drm file-private structure to clean up
836  *
837  * Called at close time when the filp is going away.
838  *
839  * Releases any remaining references on objects by this filp.
840  */
841 void
842 drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
843 {
844 	idr_for_each(&file_private->object_idr,
845 		     &drm_gem_object_release_handle, file_private);
846 	idr_destroy(&file_private->object_idr);
847 #ifdef __NetBSD__
848 	spin_lock_destroy(&file_private->table_lock);
849 #endif
850 }
851 
852 void
853 drm_gem_object_release(struct drm_gem_object *obj)
854 {
855 #ifndef __NetBSD__
856 	WARN_ON(obj->dma_buf);
857 #endif
858 
859 #ifdef __NetBSD__
860 	drm_vma_node_destroy(&obj->vma_node);
861 	if (obj->filp)
862 		uao_detach(obj->filp);
863 	uvm_obj_destroy(&obj->gemo_uvmobj, true);
864 #else
865 	if (obj->filp)
866 		fput(obj->filp);
867 #endif
868 
869 	drm_gem_free_mmap_offset(obj);
870 }
871 EXPORT_SYMBOL(drm_gem_object_release);
872 
873 /**
874  * drm_gem_object_free - free a GEM object
875  * @kref: kref of the object to free
876  *
877  * Called after the last reference to the object has been lost.
878  * Must be called holding struct_ mutex
879  *
880  * Frees the object
881  */
882 void
883 drm_gem_object_free(struct kref *kref)
884 {
885 	struct drm_gem_object *obj =
886 		container_of(kref, struct drm_gem_object, refcount);
887 	struct drm_device *dev = obj->dev;
888 
889 	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
890 
891 	if (dev->driver->gem_free_object != NULL)
892 		dev->driver->gem_free_object(obj);
893 }
894 EXPORT_SYMBOL(drm_gem_object_free);
895 
896 #ifndef __NetBSD__
897 
898 void drm_gem_vm_open(struct vm_area_struct *vma)
899 {
900 	struct drm_gem_object *obj = vma->vm_private_data;
901 
902 	drm_gem_object_reference(obj);
903 }
904 EXPORT_SYMBOL(drm_gem_vm_open);
905 
906 void drm_gem_vm_close(struct vm_area_struct *vma)
907 {
908 	struct drm_gem_object *obj = vma->vm_private_data;
909 
910 	drm_gem_object_unreference_unlocked(obj);
911 }
912 EXPORT_SYMBOL(drm_gem_vm_close);
913 
914 /**
915  * drm_gem_mmap_obj - memory map a GEM object
916  * @obj: the GEM object to map
917  * @obj_size: the object size to be mapped, in bytes
918  * @vma: VMA for the area to be mapped
919  *
920  * Set up the VMA to prepare mapping of the GEM object using the gem_vm_ops
921  * provided by the driver. Depending on their requirements, drivers can either
922  * provide a fault handler in their gem_vm_ops (in which case any accesses to
923  * the object will be trapped, to perform migration, GTT binding, surface
924  * register allocation, or performance monitoring), or mmap the buffer memory
925  * synchronously after calling drm_gem_mmap_obj.
926  *
927  * This function is mainly intended to implement the DMABUF mmap operation, when
928  * the GEM object is not looked up based on its fake offset. To implement the
929  * DRM mmap operation, drivers should use the drm_gem_mmap() function.
930  *
931  * drm_gem_mmap_obj() assumes the user is granted access to the buffer while
932  * drm_gem_mmap() prevents unprivileged users from mapping random objects. So
933  * callers must verify access restrictions before calling this helper.
934  *
935  * Return 0 or success or -EINVAL if the object size is smaller than the VMA
936  * size, or if no gem_vm_ops are provided.
937  */
938 int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
939 		     struct vm_area_struct *vma)
940 {
941 	struct drm_device *dev = obj->dev;
942 
943 	/* Check for valid size. */
944 	if (obj_size < vma->vm_end - vma->vm_start)
945 		return -EINVAL;
946 
947 	if (!dev->driver->gem_vm_ops)
948 		return -EINVAL;
949 
950 	vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
951 	vma->vm_ops = dev->driver->gem_vm_ops;
952 	vma->vm_private_data = obj;
953 	vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
954 
955 	/* Take a ref for this mapping of the object, so that the fault
956 	 * handler can dereference the mmap offset's pointer to the object.
957 	 * This reference is cleaned up by the corresponding vm_close
958 	 * (which should happen whether the vma was created by this call, or
959 	 * by a vm_open due to mremap or partial unmap or whatever).
960 	 */
961 	drm_gem_object_reference(obj);
962 
963 	return 0;
964 }
965 EXPORT_SYMBOL(drm_gem_mmap_obj);
966 
967 /**
968  * drm_gem_mmap - memory map routine for GEM objects
969  * @filp: DRM file pointer
970  * @vma: VMA for the area to be mapped
971  *
972  * If a driver supports GEM object mapping, mmap calls on the DRM file
973  * descriptor will end up here.
974  *
975  * Look up the GEM object based on the offset passed in (vma->vm_pgoff will
976  * contain the fake offset we created when the GTT map ioctl was called on
977  * the object) and map it with a call to drm_gem_mmap_obj().
978  *
979  * If the caller is not granted access to the buffer object, the mmap will fail
980  * with EACCES. Please see the vma manager for more information.
981  */
982 int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
983 {
984 	struct drm_file *priv = filp->private_data;
985 	struct drm_device *dev = priv->minor->dev;
986 	struct drm_gem_object *obj = NULL;
987 	struct drm_vma_offset_node *node;
988 	int ret;
989 
990 	if (drm_device_is_unplugged(dev))
991 		return -ENODEV;
992 
993 	drm_vma_offset_lock_lookup(dev->vma_offset_manager);
994 	node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
995 						  vma->vm_pgoff,
996 						  vma_pages(vma));
997 	if (likely(node)) {
998 		obj = container_of(node, struct drm_gem_object, vma_node);
999 		/*
1000 		 * When the object is being freed, after it hits 0-refcnt it
1001 		 * proceeds to tear down the object. In the process it will
1002 		 * attempt to remove the VMA offset and so acquire this
1003 		 * mgr->vm_lock.  Therefore if we find an object with a 0-refcnt
1004 		 * that matches our range, we know it is in the process of being
1005 		 * destroyed and will be freed as soon as we release the lock -
1006 		 * so we have to check for the 0-refcnted object and treat it as
1007 		 * invalid.
1008 		 */
1009 		if (!kref_get_unless_zero(&obj->refcount))
1010 			obj = NULL;
1011 	}
1012 	drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
1013 
1014 	if (!obj)
1015 		return -EINVAL;
1016 
1017 	if (!drm_vma_node_is_allowed(node, filp)) {
1018 		drm_gem_object_unreference_unlocked(obj);
1019 		return -EACCES;
1020 	}
1021 
1022 	ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT,
1023 			       vma);
1024 
1025 	drm_gem_object_unreference_unlocked(obj);
1026 
1027 	return ret;
1028 }
1029 EXPORT_SYMBOL(drm_gem_mmap);
1030 
1031 #endif	/* defined(__NetBSD__) */
1032