xref: /netbsd-src/sys/external/bsd/drm2/dist/drm/drm_gem.c (revision 82d56013d7b633d116a93943de88e08335357a7c)
1 /*	$NetBSD: drm_gem.c,v 1.15 2020/02/23 15:46:40 ad Exp $	*/
2 
3 /*
4  * Copyright © 2008 Intel Corporation
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the next
14  * paragraph) shall be included in all copies or substantial portions of the
15  * Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
23  * IN THE SOFTWARE.
24  *
25  * Authors:
26  *    Eric Anholt <eric@anholt.net>
27  *
28  */
29 
30 #include <sys/cdefs.h>
31 __KERNEL_RCSID(0, "$NetBSD: drm_gem.c,v 1.15 2020/02/23 15:46:40 ad Exp $");
32 
33 #include <linux/types.h>
34 #include <linux/slab.h>
35 #include <linux/mm.h>
36 #include <linux/uaccess.h>
37 #include <linux/fs.h>
38 #include <linux/file.h>
39 #include <linux/module.h>
40 #include <linux/mman.h>
41 #include <linux/pagemap.h>
42 #include <linux/shmem_fs.h>
43 #include <linux/dma-buf.h>
44 #include <drm/drmP.h>
45 #include <drm/drm_vma_manager.h>
46 #include <drm/drm_gem.h>
47 #include "drm_internal.h"
48 
49 #ifdef __NetBSD__
50 #include <uvm/uvm_extern.h>
51 #include <linux/nbsd-namespace.h>
52 #endif
53 
54 /** @file drm_gem.c
55  *
56  * This file provides some of the base ioctls and library routines for
57  * the graphics memory manager implemented by each device driver.
58  *
59  * Because various devices have different requirements in terms of
60  * synchronization and migration strategies, implementing that is left up to
61  * the driver, and all that the general API provides should be generic --
62  * allocating objects, reading/writing data with the cpu, freeing objects.
63  * Even there, platform-dependent optimizations for reading/writing data with
64  * the CPU mean we'll likely hook those out to driver-specific calls.  However,
65  * the DRI2 implementation wants to have at least allocate/mmap be generic.
66  *
67  * The goal was to have swap-backed object allocation managed through
68  * struct file.  However, file descriptors as handles to a struct file have
69  * two major failings:
70  * - Process limits prevent more than 1024 or so being used at a time by
71  *   default.
72  * - Inability to allocate high fds will aggravate the X Server's select()
73  *   handling, and likely that of many GL client applications as well.
74  *
75  * This led to a plan of using our own integer IDs (called handles, following
76  * DRM terminology) to mimic fds, and implement the fd syscalls we need as
77  * ioctls.  The objects themselves will still include the struct file so
78  * that we can transition to fds if the required kernel infrastructure shows
79  * up at a later date, and as our interface with shmfs for memory allocation.
80  */
81 
82 /*
83  * We make up offsets for buffer objects so we can recognize them at
84  * mmap time.
85  */
86 
87 /* pgoff in mmap is an unsigned long, so we need to make sure that
88  * the faked up offset will fit
89  */
90 
91 #if BITS_PER_LONG == 64
92 #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
93 #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
94 #else
95 #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1)
96 #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16)
97 #endif
98 
99 /**
100  * drm_gem_init - Initialize the GEM device fields
101  * @dev: drm_devic structure to initialize
102  */
103 int
104 drm_gem_init(struct drm_device *dev)
105 {
106 	struct drm_vma_offset_manager *vma_offset_manager;
107 
108 	mutex_init(&dev->object_name_lock);
109 	idr_init(&dev->object_name_idr);
110 
111 	vma_offset_manager = kzalloc(sizeof(*vma_offset_manager), GFP_KERNEL);
112 	if (!vma_offset_manager) {
113 		DRM_ERROR("out of memory\n");
114 		return -ENOMEM;
115 	}
116 
117 	dev->vma_offset_manager = vma_offset_manager;
118 	drm_vma_offset_manager_init(vma_offset_manager,
119 				    DRM_FILE_PAGE_OFFSET_START,
120 				    DRM_FILE_PAGE_OFFSET_SIZE);
121 
122 	return 0;
123 }
124 
125 void
126 drm_gem_destroy(struct drm_device *dev)
127 {
128 
129 	drm_vma_offset_manager_destroy(dev->vma_offset_manager);
130 	kfree(dev->vma_offset_manager);
131 	dev->vma_offset_manager = NULL;
132 
133 	idr_destroy(&dev->object_name_idr);
134 	mutex_destroy(&dev->object_name_lock);
135 }
136 
137 /**
138  * drm_gem_object_init - initialize an allocated shmem-backed GEM object
139  * @dev: drm_device the object should be initialized for
140  * @obj: drm_gem_object to initialize
141  * @size: object size
142  *
143  * Initialize an already allocated GEM object of the specified size with
144  * shmfs backing store.
145  */
146 int drm_gem_object_init(struct drm_device *dev,
147 			struct drm_gem_object *obj, size_t size)
148 {
149 #ifndef __NetBSD__
150 	struct file *filp;
151 #endif
152 
153 	drm_gem_private_object_init(dev, obj, size);
154 
155 #ifdef __NetBSD__
156 	/*
157 	 * A uao may not have size 0, but a gem object may.  Allocate a
158 	 * spurious page so we needn't teach uao how to have size 0.
159 	 */
160 	obj->filp = uao_create(MAX(size, PAGE_SIZE), 0);
161 	/*
162 	 * XXX This is gross.  We ought to do it the other way around:
163 	 * set the uao to have the main uvm object's lock.  However,
164 	 * uvm_obj_setlock is not safe on uvm_aobjs.
165 	 */
166 	rw_obj_hold(obj->filp->vmobjlock);
167 	uvm_obj_setlock(&obj->gemo_uvmobj, obj->filp->vmobjlock);
168 #else
169 	filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
170 	if (IS_ERR(filp))
171 		return PTR_ERR(filp);
172 
173 	obj->filp = filp;
174 #endif
175 
176 	return 0;
177 }
178 EXPORT_SYMBOL(drm_gem_object_init);
179 
180 /**
181  * drm_gem_private_object_init - initialize an allocated private GEM object
182  * @dev: drm_device the object should be initialized for
183  * @obj: drm_gem_object to initialize
184  * @size: object size
185  *
186  * Initialize an already allocated GEM object of the specified size with
187  * no GEM provided backing store. Instead the caller is responsible for
188  * backing the object and handling it.
189  */
190 void drm_gem_private_object_init(struct drm_device *dev,
191 				 struct drm_gem_object *obj, size_t size)
192 {
193 	BUG_ON((size & (PAGE_SIZE - 1)) != 0);
194 
195 	obj->dev = dev;
196 #ifdef __NetBSD__
197 	obj->filp = NULL;
198 	KASSERT(drm_core_check_feature(dev, DRIVER_GEM));
199 	KASSERT(dev->driver->gem_uvm_ops != NULL);
200 	uvm_obj_init(&obj->gemo_uvmobj, dev->driver->gem_uvm_ops, true, 1);
201 #else
202 	obj->filp = NULL;
203 #endif
204 
205 	kref_init(&obj->refcount);
206 	obj->handle_count = 0;
207 	obj->size = size;
208 #ifdef __NetBSD__
209 	drm_vma_node_init(&obj->vma_node);
210 #else
211 	drm_vma_node_reset(&obj->vma_node);
212 #endif
213 }
214 EXPORT_SYMBOL(drm_gem_private_object_init);
215 
216 static void
217 drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
218 {
219 	/*
220 	 * Note: obj->dma_buf can't disappear as long as we still hold a
221 	 * handle reference in obj->handle_count.
222 	 */
223 	mutex_lock(&filp->prime.lock);
224 	if (obj->dma_buf) {
225 		drm_prime_remove_buf_handle_locked(&filp->prime,
226 						   obj->dma_buf);
227 	}
228 	mutex_unlock(&filp->prime.lock);
229 }
230 
231 /**
232  * drm_gem_object_handle_free - release resources bound to userspace handles
233  * @obj: GEM object to clean up.
234  *
235  * Called after the last handle to the object has been closed
236  *
237  * Removes any name for the object. Note that this must be
238  * called before drm_gem_object_free or we'll be touching
239  * freed memory
240  */
241 static void drm_gem_object_handle_free(struct drm_gem_object *obj)
242 {
243 	struct drm_device *dev = obj->dev;
244 
245 	/* Remove any name for this object */
246 	if (obj->name) {
247 		idr_remove(&dev->object_name_idr, obj->name);
248 		obj->name = 0;
249 	}
250 }
251 
252 static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj)
253 {
254 #ifndef __NetBSD__
255 	/* Unbreak the reference cycle if we have an exported dma_buf. */
256 	if (obj->dma_buf) {
257 		dma_buf_put(obj->dma_buf);
258 		obj->dma_buf = NULL;
259 	}
260 #endif
261 }
262 
263 static void
264 drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj)
265 {
266 	if (WARN_ON(obj->handle_count == 0))
267 		return;
268 
269 	/*
270 	* Must bump handle count first as this may be the last
271 	* ref, in which case the object would disappear before we
272 	* checked for a name
273 	*/
274 
275 	mutex_lock(&obj->dev->object_name_lock);
276 	if (--obj->handle_count == 0) {
277 		drm_gem_object_handle_free(obj);
278 		drm_gem_object_exported_dma_buf_free(obj);
279 	}
280 	mutex_unlock(&obj->dev->object_name_lock);
281 
282 	drm_gem_object_unreference_unlocked(obj);
283 }
284 
285 /**
286  * drm_gem_handle_delete - deletes the given file-private handle
287  * @filp: drm file-private structure to use for the handle look up
288  * @handle: userspace handle to delete
289  *
290  * Removes the GEM handle from the @filp lookup table and if this is the last
291  * handle also cleans up linked resources like GEM names.
292  */
293 int
294 drm_gem_handle_delete(struct drm_file *filp, u32 handle)
295 {
296 	struct drm_device *dev;
297 	struct drm_gem_object *obj;
298 
299 	/* This is gross. The idr system doesn't let us try a delete and
300 	 * return an error code.  It just spews if you fail at deleting.
301 	 * So, we have to grab a lock around finding the object and then
302 	 * doing the delete on it and dropping the refcount, or the user
303 	 * could race us to double-decrement the refcount and cause a
304 	 * use-after-free later.  Given the frequency of our handle lookups,
305 	 * we may want to use ida for number allocation and a hash table
306 	 * for the pointers, anyway.
307 	 */
308 	spin_lock(&filp->table_lock);
309 
310 	/* Check if we currently have a reference on the object */
311 	obj = idr_find(&filp->object_idr, handle);
312 	if (obj == NULL) {
313 		spin_unlock(&filp->table_lock);
314 		return -EINVAL;
315 	}
316 	dev = obj->dev;
317 
318 	/* Release reference and decrement refcount. */
319 	idr_remove(&filp->object_idr, handle);
320 	spin_unlock(&filp->table_lock);
321 
322 	if (drm_core_check_feature(dev, DRIVER_PRIME))
323 		drm_gem_remove_prime_handles(obj, filp);
324 	drm_vma_node_revoke(&obj->vma_node, filp->filp);
325 
326 	if (dev->driver->gem_close_object)
327 		dev->driver->gem_close_object(obj, filp);
328 	drm_gem_object_handle_unreference_unlocked(obj);
329 
330 	return 0;
331 }
332 EXPORT_SYMBOL(drm_gem_handle_delete);
333 
334 /**
335  * drm_gem_dumb_destroy - dumb fb callback helper for gem based drivers
336  * @file: drm file-private structure to remove the dumb handle from
337  * @dev: corresponding drm_device
338  * @handle: the dumb handle to remove
339  *
340  * This implements the ->dumb_destroy kms driver callback for drivers which use
341  * gem to manage their backing storage.
342  */
343 int drm_gem_dumb_destroy(struct drm_file *file,
344 			 struct drm_device *dev,
345 			 uint32_t handle)
346 {
347 	return drm_gem_handle_delete(file, handle);
348 }
349 EXPORT_SYMBOL(drm_gem_dumb_destroy);
350 
351 /**
352  * drm_gem_handle_create_tail - internal functions to create a handle
353  * @file_priv: drm file-private structure to register the handle for
354  * @obj: object to register
355  * @handlep: pointer to return the created handle to the caller
356  *
357  * This expects the dev->object_name_lock to be held already and will drop it
358  * before returning. Used to avoid races in establishing new handles when
359  * importing an object from either an flink name or a dma-buf.
360  */
361 int
362 drm_gem_handle_create_tail(struct drm_file *file_priv,
363 			   struct drm_gem_object *obj,
364 			   u32 *handlep)
365 {
366 	struct drm_device *dev = obj->dev;
367 	int ret;
368 
369 	WARN_ON(!mutex_is_locked(&dev->object_name_lock));
370 
371 	/*
372 	 * Get the user-visible handle using idr.  Preload and perform
373 	 * allocation under our spinlock.
374 	 */
375 	idr_preload(GFP_KERNEL);
376 	spin_lock(&file_priv->table_lock);
377 
378 	ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT);
379 	drm_gem_object_reference(obj);
380 	obj->handle_count++;
381 	spin_unlock(&file_priv->table_lock);
382 	idr_preload_end();
383 	mutex_unlock(&dev->object_name_lock);
384 	if (ret < 0)
385 		goto err_unref;
386 
387 	*handlep = ret;
388 
389 	ret = drm_vma_node_allow(&obj->vma_node, file_priv->filp);
390 	if (ret)
391 		goto err_remove;
392 
393 	if (dev->driver->gem_open_object) {
394 		ret = dev->driver->gem_open_object(obj, file_priv);
395 		if (ret)
396 			goto err_revoke;
397 	}
398 
399 	return 0;
400 
401 err_revoke:
402 	drm_vma_node_revoke(&obj->vma_node, file_priv->filp);
403 err_remove:
404 	spin_lock(&file_priv->table_lock);
405 	idr_remove(&file_priv->object_idr, *handlep);
406 	spin_unlock(&file_priv->table_lock);
407 err_unref:
408 	drm_gem_object_handle_unreference_unlocked(obj);
409 	return ret;
410 }
411 
412 /**
413  * drm_gem_handle_create - create a gem handle for an object
414  * @file_priv: drm file-private structure to register the handle for
415  * @obj: object to register
416  * @handlep: pionter to return the created handle to the caller
417  *
418  * Create a handle for this object. This adds a handle reference
419  * to the object, which includes a regular reference count. Callers
420  * will likely want to dereference the object afterwards.
421  */
422 int drm_gem_handle_create(struct drm_file *file_priv,
423 			  struct drm_gem_object *obj,
424 			  u32 *handlep)
425 {
426 	mutex_lock(&obj->dev->object_name_lock);
427 
428 	return drm_gem_handle_create_tail(file_priv, obj, handlep);
429 }
430 EXPORT_SYMBOL(drm_gem_handle_create);
431 
432 
433 /**
434  * drm_gem_free_mmap_offset - release a fake mmap offset for an object
435  * @obj: obj in question
436  *
437  * This routine frees fake offsets allocated by drm_gem_create_mmap_offset().
438  */
439 void
440 drm_gem_free_mmap_offset(struct drm_gem_object *obj)
441 {
442 	struct drm_device *dev = obj->dev;
443 
444 	drm_vma_offset_remove(dev->vma_offset_manager, &obj->vma_node);
445 }
446 EXPORT_SYMBOL(drm_gem_free_mmap_offset);
447 
448 /**
449  * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object
450  * @obj: obj in question
451  * @size: the virtual size
452  *
453  * GEM memory mapping works by handing back to userspace a fake mmap offset
454  * it can use in a subsequent mmap(2) call.  The DRM core code then looks
455  * up the object based on the offset and sets up the various memory mapping
456  * structures.
457  *
458  * This routine allocates and attaches a fake offset for @obj, in cases where
459  * the virtual size differs from the physical size (ie. obj->size).  Otherwise
460  * just use drm_gem_create_mmap_offset().
461  */
462 int
463 drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size)
464 {
465 	struct drm_device *dev = obj->dev;
466 
467 	return drm_vma_offset_add(dev->vma_offset_manager, &obj->vma_node,
468 				  size / PAGE_SIZE);
469 }
470 EXPORT_SYMBOL(drm_gem_create_mmap_offset_size);
471 
472 /**
473  * drm_gem_create_mmap_offset - create a fake mmap offset for an object
474  * @obj: obj in question
475  *
476  * GEM memory mapping works by handing back to userspace a fake mmap offset
477  * it can use in a subsequent mmap(2) call.  The DRM core code then looks
478  * up the object based on the offset and sets up the various memory mapping
479  * structures.
480  *
481  * This routine allocates and attaches a fake offset for @obj.
482  */
483 int drm_gem_create_mmap_offset(struct drm_gem_object *obj)
484 {
485 	return drm_gem_create_mmap_offset_size(obj, obj->size);
486 }
487 EXPORT_SYMBOL(drm_gem_create_mmap_offset);
488 
489 /**
490  * drm_gem_get_pages - helper to allocate backing pages for a GEM object
491  * from shmem
492  * @obj: obj in question
493  *
494  * This reads the page-array of the shmem-backing storage of the given gem
495  * object. An array of pages is returned. If a page is not allocated or
496  * swapped-out, this will allocate/swap-in the required pages. Note that the
497  * whole object is covered by the page-array and pinned in memory.
498  *
499  * Use drm_gem_put_pages() to release the array and unpin all pages.
500  *
501  * This uses the GFP-mask set on the shmem-mapping (see mapping_set_gfp_mask()).
502  * If you require other GFP-masks, you have to do those allocations yourself.
503  *
504  * Note that you are not allowed to change gfp-zones during runtime. That is,
505  * shmem_read_mapping_page_gfp() must be called with the same gfp_zone(gfp) as
506  * set during initialization. If you have special zone constraints, set them
507  * after drm_gem_init_object() via mapping_set_gfp_mask(). shmem-core takes care
508  * to keep pages in the required zone during swap-in.
509  */
510 #ifdef __NetBSD__
511 struct page **
512 drm_gem_get_pages(struct drm_gem_object *obj)
513 {
514 	struct pglist pglist;
515 	struct vm_page *vm_page;
516 	struct page **pages;
517 	unsigned i;
518 	int ret;
519 
520 	KASSERT((obj->size & (PAGE_SIZE - 1)) != 0);
521 
522 	pages = drm_malloc_ab(obj->size >> PAGE_SHIFT, sizeof(*pages));
523 	if (pages == NULL) {
524 		ret = -ENOMEM;
525 		goto fail0;
526 	}
527 
528 	TAILQ_INIT(&pglist);
529 	/* XXX errno NetBSD->Linux */
530 	ret = -uvm_obj_wirepages(obj->filp, 0, obj->size, &pglist);
531 	if (ret)
532 		goto fail1;
533 
534 	i = 0;
535 	TAILQ_FOREACH(vm_page, &pglist, pageq.queue)
536 		pages[i++] = container_of(vm_page, struct page, p_vmp);
537 
538 	return pages;
539 
540 fail1:	drm_free_large(pages);
541 fail0:	return ERR_PTR(ret);
542 }
543 #else
544 struct page **drm_gem_get_pages(struct drm_gem_object *obj)
545 {
546 	struct address_space *mapping;
547 	struct page *p, **pages;
548 	int i, npages;
549 
550 	/* This is the shared memory object that backs the GEM resource */
551 	mapping = file_inode(obj->filp)->i_mapping;
552 
553 	/* We already BUG_ON() for non-page-aligned sizes in
554 	 * drm_gem_object_init(), so we should never hit this unless
555 	 * driver author is doing something really wrong:
556 	 */
557 	WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
558 
559 	npages = obj->size >> PAGE_SHIFT;
560 
561 	pages = drm_malloc_ab(npages, sizeof(struct page *));
562 	if (pages == NULL)
563 		return ERR_PTR(-ENOMEM);
564 
565 	for (i = 0; i < npages; i++) {
566 		p = shmem_read_mapping_page(mapping, i);
567 		if (IS_ERR(p))
568 			goto fail;
569 		pages[i] = p;
570 
571 		/* Make sure shmem keeps __GFP_DMA32 allocated pages in the
572 		 * correct region during swapin. Note that this requires
573 		 * __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping)
574 		 * so shmem can relocate pages during swapin if required.
575 		 */
576 		BUG_ON(mapping_gfp_constraint(mapping, __GFP_DMA32) &&
577 				(page_to_pfn(p) >= 0x00100000UL));
578 	}
579 
580 	return pages;
581 
582 fail:
583 	while (i--)
584 		page_cache_release(pages[i]);
585 
586 	drm_free_large(pages);
587 	return ERR_CAST(p);
588 }
589 #endif
590 EXPORT_SYMBOL(drm_gem_get_pages);
591 
592 /**
593  * drm_gem_put_pages - helper to free backing pages for a GEM object
594  * @obj: obj in question
595  * @pages: pages to free
596  * @dirty: if true, pages will be marked as dirty
597  * @accessed: if true, the pages will be marked as accessed
598  */
599 #ifdef __NetBSD__
600 void
601 drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, bool dirty,
602     bool accessed __unused /* XXX */)
603 {
604 	unsigned i;
605 
606 	for (i = 0; i < (obj->size >> PAGE_SHIFT); i++) {
607 		if (dirty) {
608 			rw_enter(obj->filp->vmobjlock, RW_WRITER);
609 			uvm_pagemarkdirty(&pages[i]->p_vmp,
610 			    UVM_PAGE_STATUS_DIRTY);
611 			rw_exit(obj->filp->vmobjlock);
612 		}
613 	}
614 
615 	uvm_obj_unwirepages(obj->filp, 0, obj->size);
616 }
617 #else
618 void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
619 		bool dirty, bool accessed)
620 {
621 	int i, npages;
622 
623 	/* We already BUG_ON() for non-page-aligned sizes in
624 	 * drm_gem_object_init(), so we should never hit this unless
625 	 * driver author is doing something really wrong:
626 	 */
627 	WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
628 
629 	npages = obj->size >> PAGE_SHIFT;
630 
631 	for (i = 0; i < npages; i++) {
632 		if (dirty)
633 			set_page_dirty(pages[i]);
634 
635 		if (accessed)
636 			mark_page_accessed(pages[i]);
637 
638 		/* Undo the reference we took when populating the table */
639 		page_cache_release(pages[i]);
640 	}
641 
642 	drm_free_large(pages);
643 }
644 #endif
645 EXPORT_SYMBOL(drm_gem_put_pages);
646 
647 /** Returns a reference to the object named by the handle. */
648 struct drm_gem_object *
649 drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
650 		      u32 handle)
651 {
652 	struct drm_gem_object *obj;
653 
654 	spin_lock(&filp->table_lock);
655 
656 	/* Check if we currently have a reference on the object */
657 	obj = idr_find(&filp->object_idr, handle);
658 	if (obj == NULL) {
659 		spin_unlock(&filp->table_lock);
660 		return NULL;
661 	}
662 
663 	drm_gem_object_reference(obj);
664 
665 	spin_unlock(&filp->table_lock);
666 
667 	return obj;
668 }
669 EXPORT_SYMBOL(drm_gem_object_lookup);
670 
671 /**
672  * drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl
673  * @dev: drm_device
674  * @data: ioctl data
675  * @file_priv: drm file-private structure
676  *
677  * Releases the handle to an mm object.
678  */
679 int
680 drm_gem_close_ioctl(struct drm_device *dev, void *data,
681 		    struct drm_file *file_priv)
682 {
683 	struct drm_gem_close *args = data;
684 	int ret;
685 
686 	if (!drm_core_check_feature(dev, DRIVER_GEM))
687 		return -ENODEV;
688 
689 	ret = drm_gem_handle_delete(file_priv, args->handle);
690 
691 	return ret;
692 }
693 
694 /**
695  * drm_gem_flink_ioctl - implementation of the GEM_FLINK ioctl
696  * @dev: drm_device
697  * @data: ioctl data
698  * @file_priv: drm file-private structure
699  *
700  * Create a global name for an object, returning the name.
701  *
702  * Note that the name does not hold a reference; when the object
703  * is freed, the name goes away.
704  */
705 int
706 drm_gem_flink_ioctl(struct drm_device *dev, void *data,
707 		    struct drm_file *file_priv)
708 {
709 	struct drm_gem_flink *args = data;
710 	struct drm_gem_object *obj;
711 	int ret;
712 
713 	if (!drm_core_check_feature(dev, DRIVER_GEM))
714 		return -ENODEV;
715 
716 	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
717 	if (obj == NULL)
718 		return -ENOENT;
719 
720 	idr_preload(GFP_KERNEL);
721 	mutex_lock(&dev->object_name_lock);
722 	/* prevent races with concurrent gem_close. */
723 	if (obj->handle_count == 0) {
724 		ret = -ENOENT;
725 		goto err;
726 	}
727 
728 	if (!obj->name) {
729 		ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_NOWAIT);
730 		if (ret < 0)
731 			goto err;
732 
733 		obj->name = ret;
734 	}
735 
736 	args->name = (uint64_t) obj->name;
737 	ret = 0;
738 
739 err:
740 	mutex_unlock(&dev->object_name_lock);
741 	idr_preload_end();
742 	drm_gem_object_unreference_unlocked(obj);
743 	return ret;
744 }
745 
746 /**
747  * drm_gem_open - implementation of the GEM_OPEN ioctl
748  * @dev: drm_device
749  * @data: ioctl data
750  * @file_priv: drm file-private structure
751  *
752  * Open an object using the global name, returning a handle and the size.
753  *
754  * This handle (of course) holds a reference to the object, so the object
755  * will not go away until the handle is deleted.
756  */
757 int
758 drm_gem_open_ioctl(struct drm_device *dev, void *data,
759 		   struct drm_file *file_priv)
760 {
761 	struct drm_gem_open *args = data;
762 	struct drm_gem_object *obj;
763 	int ret;
764 	u32 handle;
765 
766 	if (!drm_core_check_feature(dev, DRIVER_GEM))
767 		return -ENODEV;
768 
769 	mutex_lock(&dev->object_name_lock);
770 	obj = idr_find(&dev->object_name_idr, (int) args->name);
771 	if (obj) {
772 		drm_gem_object_reference(obj);
773 	} else {
774 		mutex_unlock(&dev->object_name_lock);
775 		return -ENOENT;
776 	}
777 
778 	/* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
779 	ret = drm_gem_handle_create_tail(file_priv, obj, &handle);
780 	drm_gem_object_unreference_unlocked(obj);
781 	if (ret)
782 		return ret;
783 
784 	args->handle = handle;
785 	args->size = obj->size;
786 
787 	return 0;
788 }
789 
790 /**
791  * gem_gem_open - initalizes GEM file-private structures at devnode open time
792  * @dev: drm_device which is being opened by userspace
793  * @file_private: drm file-private structure to set up
794  *
795  * Called at device open time, sets up the structure for handling refcounting
796  * of mm objects.
797  */
798 void
799 drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
800 {
801 	idr_init(&file_private->object_idr);
802 	spin_lock_init(&file_private->table_lock);
803 }
804 
805 /*
806  * Called at device close to release the file's
807  * handle references on objects.
808  */
809 static int
810 drm_gem_object_release_handle(int id, void *ptr, void *data)
811 {
812 	struct drm_file *file_priv = data;
813 	struct drm_gem_object *obj = ptr;
814 	struct drm_device *dev = obj->dev;
815 
816 	if (dev->driver->gem_close_object)
817 		dev->driver->gem_close_object(obj, file_priv);
818 
819 	if (drm_core_check_feature(dev, DRIVER_PRIME))
820 		drm_gem_remove_prime_handles(obj, file_priv);
821 	drm_vma_node_revoke(&obj->vma_node, file_priv->filp);
822 
823 	drm_gem_object_handle_unreference_unlocked(obj);
824 
825 	return 0;
826 }
827 
828 /**
829  * drm_gem_release - release file-private GEM resources
830  * @dev: drm_device which is being closed by userspace
831  * @file_private: drm file-private structure to clean up
832  *
833  * Called at close time when the filp is going away.
834  *
835  * Releases any remaining references on objects by this filp.
836  */
837 void
838 drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
839 {
840 	idr_for_each(&file_private->object_idr,
841 		     &drm_gem_object_release_handle, file_private);
842 	idr_destroy(&file_private->object_idr);
843 #ifdef __NetBSD__
844 	spin_lock_destroy(&file_private->table_lock);
845 #endif
846 }
847 
848 void
849 drm_gem_object_release(struct drm_gem_object *obj)
850 {
851 #ifndef __NetBSD__
852 	WARN_ON(obj->dma_buf);
853 #endif
854 
855 #ifdef __NetBSD__
856 	drm_vma_node_destroy(&obj->vma_node);
857 	if (obj->filp)
858 		uao_detach(obj->filp);
859 	uvm_obj_destroy(&obj->gemo_uvmobj, true);
860 #else
861 	if (obj->filp)
862 		fput(obj->filp);
863 #endif
864 
865 	drm_gem_free_mmap_offset(obj);
866 }
867 EXPORT_SYMBOL(drm_gem_object_release);
868 
869 /**
870  * drm_gem_object_free - free a GEM object
871  * @kref: kref of the object to free
872  *
873  * Called after the last reference to the object has been lost.
874  * Must be called holding struct_ mutex
875  *
876  * Frees the object
877  */
878 void
879 drm_gem_object_free(struct kref *kref)
880 {
881 	struct drm_gem_object *obj =
882 		container_of(kref, struct drm_gem_object, refcount);
883 	struct drm_device *dev = obj->dev;
884 
885 	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
886 
887 	if (dev->driver->gem_free_object != NULL)
888 		dev->driver->gem_free_object(obj);
889 }
890 EXPORT_SYMBOL(drm_gem_object_free);
891 
892 #ifndef __NetBSD__
893 
894 void drm_gem_vm_open(struct vm_area_struct *vma)
895 {
896 	struct drm_gem_object *obj = vma->vm_private_data;
897 
898 	drm_gem_object_reference(obj);
899 }
900 EXPORT_SYMBOL(drm_gem_vm_open);
901 
902 void drm_gem_vm_close(struct vm_area_struct *vma)
903 {
904 	struct drm_gem_object *obj = vma->vm_private_data;
905 
906 	drm_gem_object_unreference_unlocked(obj);
907 }
908 EXPORT_SYMBOL(drm_gem_vm_close);
909 
910 /**
911  * drm_gem_mmap_obj - memory map a GEM object
912  * @obj: the GEM object to map
913  * @obj_size: the object size to be mapped, in bytes
914  * @vma: VMA for the area to be mapped
915  *
916  * Set up the VMA to prepare mapping of the GEM object using the gem_vm_ops
917  * provided by the driver. Depending on their requirements, drivers can either
918  * provide a fault handler in their gem_vm_ops (in which case any accesses to
919  * the object will be trapped, to perform migration, GTT binding, surface
920  * register allocation, or performance monitoring), or mmap the buffer memory
921  * synchronously after calling drm_gem_mmap_obj.
922  *
923  * This function is mainly intended to implement the DMABUF mmap operation, when
924  * the GEM object is not looked up based on its fake offset. To implement the
925  * DRM mmap operation, drivers should use the drm_gem_mmap() function.
926  *
927  * drm_gem_mmap_obj() assumes the user is granted access to the buffer while
928  * drm_gem_mmap() prevents unprivileged users from mapping random objects. So
929  * callers must verify access restrictions before calling this helper.
930  *
931  * Return 0 or success or -EINVAL if the object size is smaller than the VMA
932  * size, or if no gem_vm_ops are provided.
933  */
934 int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
935 		     struct vm_area_struct *vma)
936 {
937 	struct drm_device *dev = obj->dev;
938 
939 	/* Check for valid size. */
940 	if (obj_size < vma->vm_end - vma->vm_start)
941 		return -EINVAL;
942 
943 	if (!dev->driver->gem_vm_ops)
944 		return -EINVAL;
945 
946 	vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
947 	vma->vm_ops = dev->driver->gem_vm_ops;
948 	vma->vm_private_data = obj;
949 	vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
950 
951 	/* Take a ref for this mapping of the object, so that the fault
952 	 * handler can dereference the mmap offset's pointer to the object.
953 	 * This reference is cleaned up by the corresponding vm_close
954 	 * (which should happen whether the vma was created by this call, or
955 	 * by a vm_open due to mremap or partial unmap or whatever).
956 	 */
957 	drm_gem_object_reference(obj);
958 
959 	return 0;
960 }
961 EXPORT_SYMBOL(drm_gem_mmap_obj);
962 
963 /**
964  * drm_gem_mmap - memory map routine for GEM objects
965  * @filp: DRM file pointer
966  * @vma: VMA for the area to be mapped
967  *
968  * If a driver supports GEM object mapping, mmap calls on the DRM file
969  * descriptor will end up here.
970  *
971  * Look up the GEM object based on the offset passed in (vma->vm_pgoff will
972  * contain the fake offset we created when the GTT map ioctl was called on
973  * the object) and map it with a call to drm_gem_mmap_obj().
974  *
975  * If the caller is not granted access to the buffer object, the mmap will fail
976  * with EACCES. Please see the vma manager for more information.
977  */
978 int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
979 {
980 	struct drm_file *priv = filp->private_data;
981 	struct drm_device *dev = priv->minor->dev;
982 	struct drm_gem_object *obj = NULL;
983 	struct drm_vma_offset_node *node;
984 	int ret;
985 
986 	if (drm_device_is_unplugged(dev))
987 		return -ENODEV;
988 
989 	drm_vma_offset_lock_lookup(dev->vma_offset_manager);
990 	node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
991 						  vma->vm_pgoff,
992 						  vma_pages(vma));
993 	if (likely(node)) {
994 		obj = container_of(node, struct drm_gem_object, vma_node);
995 		/*
996 		 * When the object is being freed, after it hits 0-refcnt it
997 		 * proceeds to tear down the object. In the process it will
998 		 * attempt to remove the VMA offset and so acquire this
999 		 * mgr->vm_lock.  Therefore if we find an object with a 0-refcnt
1000 		 * that matches our range, we know it is in the process of being
1001 		 * destroyed and will be freed as soon as we release the lock -
1002 		 * so we have to check for the 0-refcnted object and treat it as
1003 		 * invalid.
1004 		 */
1005 		if (!kref_get_unless_zero(&obj->refcount))
1006 			obj = NULL;
1007 	}
1008 	drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
1009 
1010 	if (!obj)
1011 		return -EINVAL;
1012 
1013 	if (!drm_vma_node_is_allowed(node, filp)) {
1014 		drm_gem_object_unreference_unlocked(obj);
1015 		return -EACCES;
1016 	}
1017 
1018 	ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT,
1019 			       vma);
1020 
1021 	drm_gem_object_unreference_unlocked(obj);
1022 
1023 	return ret;
1024 }
1025 EXPORT_SYMBOL(drm_gem_mmap);
1026 
1027 #endif	/* defined(__NetBSD__) */
1028