xref: /openbsd-src/sys/dev/pci/drm/drm_gem.c (revision c0dd97bfcad3dab6c31ec12b9de1274fd2d2f993)
1 /*	$OpenBSD: drm_gem.c,v 1.5 2017/09/03 13:28:54 jsg Exp $	*/
2 /*
3  * Copyright © 2008 Intel Corporation
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22  * IN THE SOFTWARE.
23  *
24  * Authors:
25  *    Eric Anholt <eric@anholt.net>
26  *
27  */
28 
29 #include <dev/pci/drm/drmP.h>
30 #include <dev/pci/drm/drm_vma_manager.h>
31 
32 #include <uvm/uvm.h>
33 
34 void drm_unref(struct uvm_object *);
35 void drm_ref(struct uvm_object *);
36 boolean_t drm_flush(struct uvm_object *, voff_t, voff_t, int);
37 int drm_fault(struct uvm_faultinfo *, vaddr_t, vm_page_t *, int, int,
38     vm_fault_t, vm_prot_t, int);
39 
40 struct uvm_pagerops drm_pgops = {
41 	NULL,
42 	drm_ref,
43 	drm_unref,
44 	drm_fault,
45 	drm_flush,
46 };
47 
48 void
49 drm_ref(struct uvm_object *uobj)
50 {
51 	struct drm_gem_object *obj =
52 	    container_of(uobj, struct drm_gem_object, uobj);
53 
54 	drm_gem_object_reference(obj);
55 }
56 
57 void
58 drm_unref(struct uvm_object *uobj)
59 {
60 	struct drm_gem_object *obj =
61 	    container_of(uobj, struct drm_gem_object, uobj);
62 
63 	drm_gem_object_unreference_unlocked(obj);
64 }
65 
66 int
67 drm_fault(struct uvm_faultinfo *ufi, vaddr_t vaddr, vm_page_t *pps,
68     int npages, int centeridx, vm_fault_t fault_type,
69     vm_prot_t access_type, int flags)
70 {
71 	struct vm_map_entry *entry = ufi->entry;
72 	struct uvm_object *uobj = entry->object.uvm_obj;
73 	struct drm_gem_object *obj =
74 	    container_of(uobj, struct drm_gem_object, uobj);
75 	struct drm_device *dev = obj->dev;
76 	int ret;
77 
78 	/*
79 	 * we do not allow device mappings to be mapped copy-on-write
80 	 * so we kill any attempt to do so here.
81 	 */
82 
83 	if (UVM_ET_ISCOPYONWRITE(entry)) {
84 		uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj, NULL);
85 		return(VM_PAGER_ERROR);
86 	}
87 
88 	/*
89 	 * We could end up here as the result of a copyin(9) or
90 	 * copyout(9) while handling an ioctl.  So we must be careful
91 	 * not to deadlock.  Therefore we only block if the quiesce
92 	 * count is zero, which guarantees we didn't enter from within
93 	 * an ioctl code path.
94 	 */
95 	mtx_enter(&dev->quiesce_mtx);
96 	if (dev->quiesce && dev->quiesce_count == 0) {
97 		mtx_leave(&dev->quiesce_mtx);
98 		uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj, NULL);
99 		mtx_enter(&dev->quiesce_mtx);
100 		while (dev->quiesce) {
101 			msleep(&dev->quiesce, &dev->quiesce_mtx,
102 			    PZERO, "drmflt", 0);
103 		}
104 		mtx_leave(&dev->quiesce_mtx);
105 		return(VM_PAGER_REFAULT);
106 	}
107 	dev->quiesce_count++;
108 	mtx_leave(&dev->quiesce_mtx);
109 
110 	/* Call down into driver to do the magic */
111 	ret = dev->driver->gem_fault(obj, ufi, entry->offset + (vaddr -
112 	    entry->start), vaddr, pps, npages, centeridx,
113 	    access_type, flags);
114 
115 	mtx_enter(&dev->quiesce_mtx);
116 	dev->quiesce_count--;
117 	if (dev->quiesce)
118 		wakeup(&dev->quiesce_count);
119 	mtx_leave(&dev->quiesce_mtx);
120 
121 	return (ret);
122 }
123 
124 boolean_t
125 drm_flush(struct uvm_object *uobj, voff_t start, voff_t stop, int flags)
126 {
127 	return (TRUE);
128 }
129 
130 struct uvm_object *
131 udv_attach_drm(dev_t device, vm_prot_t accessprot, voff_t off, vsize_t size)
132 {
133 	struct drm_device *dev = drm_get_device_from_kdev(device);
134 	struct drm_gem_object *obj;
135 	struct drm_vma_offset_node *node;
136 	struct drm_file *priv;
137 	struct file *filp;
138 
139 	if (cdevsw[major(device)].d_mmap != drmmmap)
140 		return NULL;
141 
142 	if (dev == NULL)
143 		return NULL;
144 
145 	if (dev->driver->mmap)
146 		return dev->driver->mmap(dev, off, size);
147 
148 	mutex_lock(&dev->struct_mutex);
149 
150 	priv = drm_find_file_by_minor(dev, minor(device));
151 	if (priv == 0) {
152 		mutex_unlock(&dev->struct_mutex);
153 		return NULL;
154 	}
155 	filp = priv->filp;
156 
157 	node = drm_vma_offset_exact_lookup(dev->vma_offset_manager,
158 					   off >> PAGE_SHIFT,
159 					   atop(round_page(size)));
160 	if (!node) {
161 		mutex_unlock(&dev->struct_mutex);
162 		return NULL;
163 	} else if (!drm_vma_node_is_allowed(node, filp)) {
164 		mutex_unlock(&dev->struct_mutex);
165 		return NULL;
166 	}
167 
168 	obj = container_of(node, struct drm_gem_object, vma_node);
169 	drm_gem_object_reference(obj);
170 
171 	mutex_unlock(&dev->struct_mutex);
172 	return &obj->uobj;
173 }
174 
175 /** @file drm_gem.c
176  *
177  * This file provides some of the base ioctls and library routines for
178  * the graphics memory manager implemented by each device driver.
179  *
180  * Because various devices have different requirements in terms of
181  * synchronization and migration strategies, implementing that is left up to
182  * the driver, and all that the general API provides should be generic --
183  * allocating objects, reading/writing data with the cpu, freeing objects.
184  * Even there, platform-dependent optimizations for reading/writing data with
185  * the CPU mean we'll likely hook those out to driver-specific calls.  However,
186  * the DRI2 implementation wants to have at least allocate/mmap be generic.
187  *
188  * The goal was to have swap-backed object allocation managed through
189  * struct file.  However, file descriptors as handles to a struct file have
190  * two major failings:
191  * - Process limits prevent more than 1024 or so being used at a time by
192  *   default.
193  * - Inability to allocate high fds will aggravate the X Server's select()
194  *   handling, and likely that of many GL client applications as well.
195  *
196  * This led to a plan of using our own integer IDs (called handles, following
197  * DRM terminology) to mimic fds, and implement the fd syscalls we need as
198  * ioctls.  The objects themselves will still include the struct file so
199  * that we can transition to fds if the required kernel infrastructure shows
200  * up at a later date, and as our interface with shmfs for memory allocation.
201  */
202 
203 /*
204  * We make up offsets for buffer objects so we can recognize them at
205  * mmap time.
206  */
207 
208 /* pgoff in mmap is an unsigned long, so we need to make sure that
209  * the faked up offset will fit
210  */
211 
212 #if BITS_PER_LONG == 64
213 #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
214 #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
215 #else
216 #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1)
217 #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16)
218 #endif
219 
220 /**
221  * Initialize the GEM device fields
222  */
223 
224 int
225 drm_gem_init(struct drm_device *dev)
226 {
227 	struct drm_vma_offset_manager *vma_offset_manager;
228 
229 	rw_init(&dev->object_name_lock, "drmonl");
230 	idr_init(&dev->object_name_idr);
231 
232 	vma_offset_manager = kzalloc(sizeof(*vma_offset_manager), GFP_KERNEL);
233 	if (!vma_offset_manager) {
234 		DRM_ERROR("out of memory\n");
235 		return -ENOMEM;
236 	}
237 
238 	dev->vma_offset_manager = vma_offset_manager;
239 	drm_vma_offset_manager_init(vma_offset_manager,
240 				    DRM_FILE_PAGE_OFFSET_START,
241 				    DRM_FILE_PAGE_OFFSET_SIZE);
242 
243 	return 0;
244 }
245 
246 void
247 drm_gem_destroy(struct drm_device *dev)
248 {
249 
250 	drm_vma_offset_manager_destroy(dev->vma_offset_manager);
251 	kfree(dev->vma_offset_manager);
252 	dev->vma_offset_manager = NULL;
253 }
254 
255 #ifdef __linux__
256 
257 /**
258  * Initialize an already allocated GEM object of the specified size with
259  * shmfs backing store.
260  */
261 int drm_gem_object_init(struct drm_device *dev,
262 			struct drm_gem_object *obj, size_t size)
263 {
264 	struct file *filp;
265 
266 	drm_gem_private_object_init(dev, obj, size);
267 
268 	filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
269 	if (IS_ERR(filp))
270 		return PTR_ERR(filp);
271 
272 	obj->filp = filp;
273 
274 	return 0;
275 }
276 EXPORT_SYMBOL(drm_gem_object_init);
277 
278 #else
279 
280 int drm_gem_object_init(struct drm_device *dev,
281 			struct drm_gem_object *obj, size_t size)
282 {
283 	drm_gem_private_object_init(dev, obj, size);
284 
285 	obj->uao = uao_create(size, 0);
286 	uvm_objinit(&obj->uobj, &drm_pgops, 1);
287 
288 	atomic_inc(&dev->obj_count);
289 	atomic_add(obj->size, &dev->obj_memory);
290 
291 	obj->filp = (void *)obj->uao;
292 
293 	return 0;
294 }
295 
296 #endif
297 
298 /**
299  * Initialize an already allocated GEM object of the specified size with
300  * no GEM provided backing store. Instead the caller is responsible for
301  * backing the object and handling it.
302  */
303 void drm_gem_private_object_init(struct drm_device *dev,
304 				 struct drm_gem_object *obj, size_t size)
305 {
306 	BUG_ON((size & (PAGE_SIZE - 1)) != 0);
307 
308 	obj->dev = dev;
309 	obj->filp = NULL;
310 
311 	kref_init(&obj->refcount);
312 	obj->handle_count = 0;
313 	obj->size = size;
314 	drm_vma_node_reset(&obj->vma_node);
315 }
316 EXPORT_SYMBOL(drm_gem_private_object_init);
317 
318 static void
319 drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
320 {
321 #ifdef __linux__
322 	/*
323 	 * Note: obj->dma_buf can't disappear as long as we still hold a
324 	 * handle reference in obj->handle_count.
325 	 */
326 	mutex_lock(&filp->prime.lock);
327 	if (obj->dma_buf) {
328 		drm_prime_remove_buf_handle_locked(&filp->prime,
329 						   obj->dma_buf);
330 	}
331 	mutex_unlock(&filp->prime.lock);
332 #endif
333 }
334 
335 /**
336  * Called after the last handle to the object has been closed
337  *
338  * Removes any name for the object. Note that this must be
339  * called before drm_gem_object_free or we'll be touching
340  * freed memory
341  */
342 static void drm_gem_object_handle_free(struct drm_gem_object *obj)
343 {
344 	struct drm_device *dev = obj->dev;
345 
346 	/* Remove any name for this object */
347 	if (obj->name) {
348 		idr_remove(&dev->object_name_idr, obj->name);
349 		obj->name = 0;
350 	}
351 }
352 
353 static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj)
354 {
355 #ifdef __linux__
356 	/* Unbreak the reference cycle if we have an exported dma_buf. */
357 	if (obj->dma_buf) {
358 		dma_buf_put(obj->dma_buf);
359 		obj->dma_buf = NULL;
360 	}
361 #endif
362 }
363 
364 static void
365 drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj)
366 {
367 	if (WARN_ON(obj->handle_count == 0))
368 		return;
369 
370 	/*
371 	* Must bump handle count first as this may be the last
372 	* ref, in which case the object would disappear before we
373 	* checked for a name
374 	*/
375 
376 	mutex_lock(&obj->dev->object_name_lock);
377 	if (--obj->handle_count == 0) {
378 		drm_gem_object_handle_free(obj);
379 		drm_gem_object_exported_dma_buf_free(obj);
380 	}
381 	mutex_unlock(&obj->dev->object_name_lock);
382 
383 	drm_gem_object_unreference_unlocked(obj);
384 }
385 
386 /**
387  * Removes the mapping from handle to filp for this object.
388  */
389 int
390 drm_gem_handle_delete(struct drm_file *filp, u32 handle)
391 {
392 	struct drm_device *dev;
393 	struct drm_gem_object *obj;
394 
395 	/* This is gross. The idr system doesn't let us try a delete and
396 	 * return an error code.  It just spews if you fail at deleting.
397 	 * So, we have to grab a lock around finding the object and then
398 	 * doing the delete on it and dropping the refcount, or the user
399 	 * could race us to double-decrement the refcount and cause a
400 	 * use-after-free later.  Given the frequency of our handle lookups,
401 	 * we may want to use ida for number allocation and a hash table
402 	 * for the pointers, anyway.
403 	 */
404 	spin_lock(&filp->table_lock);
405 
406 	/* Check if we currently have a reference on the object */
407 	obj = idr_find(&filp->object_idr, handle);
408 	if (obj == NULL) {
409 		spin_unlock(&filp->table_lock);
410 		return -EINVAL;
411 	}
412 	dev = obj->dev;
413 
414 	/* Release reference and decrement refcount. */
415 	idr_remove(&filp->object_idr, handle);
416 	spin_unlock(&filp->table_lock);
417 
418 	if (drm_core_check_feature(dev, DRIVER_PRIME))
419 		drm_gem_remove_prime_handles(obj, filp);
420 	drm_vma_node_revoke(&obj->vma_node, filp->filp);
421 
422 	if (dev->driver->gem_close_object)
423 		dev->driver->gem_close_object(obj, filp);
424 	drm_gem_object_handle_unreference_unlocked(obj);
425 
426 	return 0;
427 }
428 EXPORT_SYMBOL(drm_gem_handle_delete);
429 
430 /**
431  * drm_gem_dumb_destroy - dumb fb callback helper for gem based drivers
432  *
433  * This implements the ->dumb_destroy kms driver callback for drivers which use
434  * gem to manage their backing storage.
435  */
436 int drm_gem_dumb_destroy(struct drm_file *file,
437 			 struct drm_device *dev,
438 			 uint32_t handle)
439 {
440 	return drm_gem_handle_delete(file, handle);
441 }
442 EXPORT_SYMBOL(drm_gem_dumb_destroy);
443 
444 /**
445  * drm_gem_handle_create_tail - internal functions to create a handle
446  *
447  * This expects the dev->object_name_lock to be held already and will drop it
448  * before returning. Used to avoid races in establishing new handles when
449  * importing an object from either an flink name or a dma-buf.
450  */
451 int
452 drm_gem_handle_create_tail(struct drm_file *file_priv,
453 			   struct drm_gem_object *obj,
454 			   u32 *handlep)
455 {
456 	struct drm_device *dev = obj->dev;
457 	int ret;
458 
459 	WARN_ON(!mutex_is_locked(&dev->object_name_lock));
460 
461 	/*
462 	 * Get the user-visible handle using idr.  Preload and perform
463 	 * allocation under our spinlock.
464 	 */
465 	idr_preload(GFP_KERNEL);
466 	spin_lock(&file_priv->table_lock);
467 
468 	ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT);
469 	drm_gem_object_reference(obj);
470 	obj->handle_count++;
471 	spin_unlock(&file_priv->table_lock);
472 	idr_preload_end();
473 	mutex_unlock(&dev->object_name_lock);
474 	if (ret < 0)
475 		goto err_unref;
476 
477 	*handlep = ret;
478 
479 	ret = drm_vma_node_allow(&obj->vma_node, file_priv->filp);
480 	if (ret)
481 		goto err_remove;
482 
483 	if (dev->driver->gem_open_object) {
484 		ret = dev->driver->gem_open_object(obj, file_priv);
485 		if (ret)
486 			goto err_revoke;
487 	}
488 
489 	return 0;
490 
491 err_revoke:
492 	drm_vma_node_revoke(&obj->vma_node, file_priv->filp);
493 err_remove:
494 	spin_lock(&file_priv->table_lock);
495 	idr_remove(&file_priv->object_idr, *handlep);
496 	spin_unlock(&file_priv->table_lock);
497 err_unref:
498 	drm_gem_object_handle_unreference_unlocked(obj);
499 	return ret;
500 }
501 
502 /**
503  * Create a handle for this object. This adds a handle reference
504  * to the object, which includes a regular reference count. Callers
505  * will likely want to dereference the object afterwards.
506  */
507 int
508 drm_gem_handle_create(struct drm_file *file_priv,
509 		       struct drm_gem_object *obj,
510 		       u32 *handlep)
511 {
512 	mutex_lock(&obj->dev->object_name_lock);
513 
514 	return drm_gem_handle_create_tail(file_priv, obj, handlep);
515 }
516 EXPORT_SYMBOL(drm_gem_handle_create);
517 
518 
519 /**
520  * drm_gem_free_mmap_offset - release a fake mmap offset for an object
521  * @obj: obj in question
522  *
523  * This routine frees fake offsets allocated by drm_gem_create_mmap_offset().
524  */
525 void
526 drm_gem_free_mmap_offset(struct drm_gem_object *obj)
527 {
528 	struct drm_device *dev = obj->dev;
529 
530 	drm_vma_offset_remove(dev->vma_offset_manager, &obj->vma_node);
531 }
532 EXPORT_SYMBOL(drm_gem_free_mmap_offset);
533 
534 /**
535  * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object
536  * @obj: obj in question
537  * @size: the virtual size
538  *
539  * GEM memory mapping works by handing back to userspace a fake mmap offset
540  * it can use in a subsequent mmap(2) call.  The DRM core code then looks
541  * up the object based on the offset and sets up the various memory mapping
542  * structures.
543  *
544  * This routine allocates and attaches a fake offset for @obj, in cases where
545  * the virtual size differs from the physical size (ie. obj->size).  Otherwise
546  * just use drm_gem_create_mmap_offset().
547  */
548 int
549 drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size)
550 {
551 	struct drm_device *dev = obj->dev;
552 
553 	return drm_vma_offset_add(dev->vma_offset_manager, &obj->vma_node,
554 				  size / PAGE_SIZE);
555 }
556 EXPORT_SYMBOL(drm_gem_create_mmap_offset_size);
557 
558 /**
559  * drm_gem_create_mmap_offset - create a fake mmap offset for an object
560  * @obj: obj in question
561  *
562  * GEM memory mapping works by handing back to userspace a fake mmap offset
563  * it can use in a subsequent mmap(2) call.  The DRM core code then looks
564  * up the object based on the offset and sets up the various memory mapping
565  * structures.
566  *
567  * This routine allocates and attaches a fake offset for @obj.
568  */
569 int drm_gem_create_mmap_offset(struct drm_gem_object *obj)
570 {
571 	return drm_gem_create_mmap_offset_size(obj, obj->size);
572 }
573 EXPORT_SYMBOL(drm_gem_create_mmap_offset);
574 
575 #ifdef __linux__
576 
577 /**
578  * drm_gem_get_pages - helper to allocate backing pages for a GEM object
579  * from shmem
580  * @obj: obj in question
581  * @gfpmask: gfp mask of requested pages
582  */
583 struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask)
584 {
585 	struct inode *inode;
586 	struct address_space *mapping;
587 	struct page *p, **pages;
588 	int i, npages;
589 
590 	/* This is the shared memory object that backs the GEM resource */
591 	inode = file_inode(obj->filp);
592 	mapping = inode->i_mapping;
593 
594 	/* We already BUG_ON() for non-page-aligned sizes in
595 	 * drm_gem_object_init(), so we should never hit this unless
596 	 * driver author is doing something really wrong:
597 	 */
598 	WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
599 
600 	npages = obj->size >> PAGE_SHIFT;
601 
602 	pages = drm_malloc_ab(npages, sizeof(struct page *));
603 	if (pages == NULL)
604 		return ERR_PTR(-ENOMEM);
605 
606 	gfpmask |= mapping_gfp_mask(mapping);
607 
608 	for (i = 0; i < npages; i++) {
609 		p = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
610 		if (IS_ERR(p))
611 			goto fail;
612 		pages[i] = p;
613 
614 		/* There is a hypothetical issue w/ drivers that require
615 		 * buffer memory in the low 4GB.. if the pages are un-
616 		 * pinned, and swapped out, they can end up swapped back
617 		 * in above 4GB.  If pages are already in memory, then
618 		 * shmem_read_mapping_page_gfp will ignore the gfpmask,
619 		 * even if the already in-memory page disobeys the mask.
620 		 *
621 		 * It is only a theoretical issue today, because none of
622 		 * the devices with this limitation can be populated with
623 		 * enough memory to trigger the issue.  But this BUG_ON()
624 		 * is here as a reminder in case the problem with
625 		 * shmem_read_mapping_page_gfp() isn't solved by the time
626 		 * it does become a real issue.
627 		 *
628 		 * See this thread: http://lkml.org/lkml/2011/7/11/238
629 		 */
630 		BUG_ON((gfpmask & __GFP_DMA32) &&
631 				(page_to_pfn(p) >= 0x00100000UL));
632 	}
633 
634 	return pages;
635 
636 fail:
637 	while (i--)
638 		page_cache_release(pages[i]);
639 
640 	drm_free_large(pages);
641 	return ERR_CAST(p);
642 }
643 EXPORT_SYMBOL(drm_gem_get_pages);
644 
645 /**
646  * drm_gem_put_pages - helper to free backing pages for a GEM object
647  * @obj: obj in question
648  * @pages: pages to free
649  * @dirty: if true, pages will be marked as dirty
650  * @accessed: if true, the pages will be marked as accessed
651  */
652 void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
653 		bool dirty, bool accessed)
654 {
655 	int i, npages;
656 
657 	/* We already BUG_ON() for non-page-aligned sizes in
658 	 * drm_gem_object_init(), so we should never hit this unless
659 	 * driver author is doing something really wrong:
660 	 */
661 	WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
662 
663 	npages = obj->size >> PAGE_SHIFT;
664 
665 	for (i = 0; i < npages; i++) {
666 		if (dirty)
667 			set_page_dirty(pages[i]);
668 
669 		if (accessed)
670 			mark_page_accessed(pages[i]);
671 
672 		/* Undo the reference we took when populating the table */
673 		page_cache_release(pages[i]);
674 	}
675 
676 	drm_free_large(pages);
677 }
678 EXPORT_SYMBOL(drm_gem_put_pages);
679 
680 #endif
681 
682 /** Returns a reference to the object named by the handle. */
683 struct drm_gem_object *
684 drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
685 		      u32 handle)
686 {
687 	struct drm_gem_object *obj;
688 
689 	spin_lock(&filp->table_lock);
690 
691 	/* Check if we currently have a reference on the object */
692 	obj = idr_find(&filp->object_idr, handle);
693 	if (obj == NULL) {
694 		spin_unlock(&filp->table_lock);
695 		return NULL;
696 	}
697 
698 	drm_gem_object_reference(obj);
699 
700 	spin_unlock(&filp->table_lock);
701 
702 	return obj;
703 }
704 EXPORT_SYMBOL(drm_gem_object_lookup);
705 
706 /**
707  * Releases the handle to an mm object.
708  */
709 int
710 drm_gem_close_ioctl(struct drm_device *dev, void *data,
711 		    struct drm_file *file_priv)
712 {
713 	struct drm_gem_close *args = data;
714 	int ret;
715 
716 	if (!(dev->driver->driver_features & DRIVER_GEM))
717 		return -ENODEV;
718 
719 	ret = drm_gem_handle_delete(file_priv, args->handle);
720 
721 	return ret;
722 }
723 
724 /**
725  * Create a global name for an object, returning the name.
726  *
727  * Note that the name does not hold a reference; when the object
728  * is freed, the name goes away.
729  */
730 int
731 drm_gem_flink_ioctl(struct drm_device *dev, void *data,
732 		    struct drm_file *file_priv)
733 {
734 	struct drm_gem_flink *args = data;
735 	struct drm_gem_object *obj;
736 	int ret;
737 
738 	if (!(dev->driver->driver_features & DRIVER_GEM))
739 		return -ENODEV;
740 
741 	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
742 	if (obj == NULL)
743 		return -ENOENT;
744 
745 	mutex_lock(&dev->object_name_lock);
746 	idr_preload(GFP_KERNEL);
747 	/* prevent races with concurrent gem_close. */
748 	if (obj->handle_count == 0) {
749 		ret = -ENOENT;
750 		goto err;
751 	}
752 
753 	if (!obj->name) {
754 		ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_NOWAIT);
755 		if (ret < 0)
756 			goto err;
757 
758 		obj->name = ret;
759 	}
760 
761 	args->name = (uint64_t) obj->name;
762 	ret = 0;
763 
764 err:
765 	idr_preload_end();
766 	mutex_unlock(&dev->object_name_lock);
767 	drm_gem_object_unreference_unlocked(obj);
768 	return ret;
769 }
770 
771 /**
772  * Open an object using the global name, returning a handle and the size.
773  *
774  * This handle (of course) holds a reference to the object, so the object
775  * will not go away until the handle is deleted.
776  */
777 int
778 drm_gem_open_ioctl(struct drm_device *dev, void *data,
779 		   struct drm_file *file_priv)
780 {
781 	struct drm_gem_open *args = data;
782 	struct drm_gem_object *obj;
783 	int ret;
784 	u32 handle;
785 
786 	if (!(dev->driver->driver_features & DRIVER_GEM))
787 		return -ENODEV;
788 
789 	mutex_lock(&dev->object_name_lock);
790 	obj = idr_find(&dev->object_name_idr, (int) args->name);
791 	if (obj) {
792 		drm_gem_object_reference(obj);
793 	} else {
794 		mutex_unlock(&dev->object_name_lock);
795 		return -ENOENT;
796 	}
797 
798 	/* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
799 	ret = drm_gem_handle_create_tail(file_priv, obj, &handle);
800 	drm_gem_object_unreference_unlocked(obj);
801 	if (ret)
802 		return ret;
803 
804 	args->handle = handle;
805 	args->size = obj->size;
806 
807 	return 0;
808 }
809 
810 /**
811  * Called at device open time, sets up the structure for handling refcounting
812  * of mm objects.
813  */
814 void
815 drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
816 {
817 	idr_init(&file_private->object_idr);
818 	mtx_init(&file_private->table_lock, IPL_NONE);
819 }
820 
821 /**
822  * Called at device close to release the file's
823  * handle references on objects.
824  */
825 static int
826 drm_gem_object_release_handle(int id, void *ptr, void *data)
827 {
828 	struct drm_file *file_priv = data;
829 	struct drm_gem_object *obj = ptr;
830 	struct drm_device *dev = obj->dev;
831 
832 	if (dev->driver->gem_close_object)
833 		dev->driver->gem_close_object(obj, file_priv);
834 
835 	if (drm_core_check_feature(dev, DRIVER_PRIME))
836 		drm_gem_remove_prime_handles(obj, file_priv);
837 	drm_vma_node_revoke(&obj->vma_node, file_priv->filp);
838 
839 	drm_gem_object_handle_unreference_unlocked(obj);
840 
841 	return 0;
842 }
843 
844 /**
845  * Called at close time when the filp is going away.
846  *
847  * Releases any remaining references on objects by this filp.
848  */
849 void
850 drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
851 {
852 	idr_for_each(&file_private->object_idr,
853 		     &drm_gem_object_release_handle, file_private);
854 	idr_destroy(&file_private->object_idr);
855 }
856 
857 #ifdef __linux__
858 
859 void
860 drm_gem_object_release(struct drm_gem_object *obj)
861 {
862 	WARN_ON(obj->dma_buf);
863 
864 	if (obj->filp)
865 	    fput(obj->filp);
866 }
867 EXPORT_SYMBOL(drm_gem_object_release);
868 
869 #else
870 
871 void
872 drm_gem_object_release(struct drm_gem_object *obj)
873 {
874 	struct drm_device *dev = obj->dev;
875 
876 	if (obj->uao)
877 		uao_detach(obj->uao);
878 
879 	atomic_dec(&dev->obj_count);
880 	atomic_sub(obj->size, &dev->obj_memory);
881 }
882 
883 #endif
884 
885 /**
886  * Called after the last reference to the object has been lost.
887  * Must be called holding struct_ mutex
888  *
889  * Frees the object
890  */
891 void
892 drm_gem_object_free(struct kref *kref)
893 {
894 	struct drm_gem_object *obj = (struct drm_gem_object *) kref;
895 	struct drm_device *dev = obj->dev;
896 
897 	BUG_ON(!mutex_is_locked(&dev->struct_mutex));
898 
899 	if (dev->driver->gem_free_object != NULL)
900 		dev->driver->gem_free_object(obj);
901 }
902 EXPORT_SYMBOL(drm_gem_object_free);
903 
904 #ifdef __linux__
905 
906 void drm_gem_vm_open(struct vm_area_struct *vma)
907 {
908 	struct drm_gem_object *obj = vma->vm_private_data;
909 
910 	drm_gem_object_reference(obj);
911 
912 	mutex_lock(&obj->dev->struct_mutex);
913 	drm_vm_open_locked(obj->dev, vma);
914 	mutex_unlock(&obj->dev->struct_mutex);
915 }
916 EXPORT_SYMBOL(drm_gem_vm_open);
917 
918 void drm_gem_vm_close(struct vm_area_struct *vma)
919 {
920 	struct drm_gem_object *obj = vma->vm_private_data;
921 	struct drm_device *dev = obj->dev;
922 
923 	mutex_lock(&dev->struct_mutex);
924 	drm_vm_close_locked(obj->dev, vma);
925 	drm_gem_object_unreference(obj);
926 	mutex_unlock(&dev->struct_mutex);
927 }
928 EXPORT_SYMBOL(drm_gem_vm_close);
929 
930 /**
931  * drm_gem_mmap_obj - memory map a GEM object
932  * @obj: the GEM object to map
933  * @obj_size: the object size to be mapped, in bytes
934  * @vma: VMA for the area to be mapped
935  *
936  * Set up the VMA to prepare mapping of the GEM object using the gem_vm_ops
937  * provided by the driver. Depending on their requirements, drivers can either
938  * provide a fault handler in their gem_vm_ops (in which case any accesses to
939  * the object will be trapped, to perform migration, GTT binding, surface
940  * register allocation, or performance monitoring), or mmap the buffer memory
941  * synchronously after calling drm_gem_mmap_obj.
942  *
943  * This function is mainly intended to implement the DMABUF mmap operation, when
944  * the GEM object is not looked up based on its fake offset. To implement the
945  * DRM mmap operation, drivers should use the drm_gem_mmap() function.
946  *
947  * drm_gem_mmap_obj() assumes the user is granted access to the buffer while
948  * drm_gem_mmap() prevents unprivileged users from mapping random objects. So
949  * callers must verify access restrictions before calling this helper.
950  *
951  * NOTE: This function has to be protected with dev->struct_mutex
952  *
953  * Return 0 or success or -EINVAL if the object size is smaller than the VMA
954  * size, or if no gem_vm_ops are provided.
955  */
956 int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
957 		     struct vm_area_struct *vma)
958 {
959 	struct drm_device *dev = obj->dev;
960 
961 	lockdep_assert_held(&dev->struct_mutex);
962 
963 	/* Check for valid size. */
964 	if (obj_size < vma->vm_end - vma->vm_start)
965 		return -EINVAL;
966 
967 	if (!dev->driver->gem_vm_ops)
968 		return -EINVAL;
969 
970 	vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
971 	vma->vm_ops = dev->driver->gem_vm_ops;
972 	vma->vm_private_data = obj;
973 	vma->vm_page_prot =  pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
974 
975 	/* Take a ref for this mapping of the object, so that the fault
976 	 * handler can dereference the mmap offset's pointer to the object.
977 	 * This reference is cleaned up by the corresponding vm_close
978 	 * (which should happen whether the vma was created by this call, or
979 	 * by a vm_open due to mremap or partial unmap or whatever).
980 	 */
981 	drm_gem_object_reference(obj);
982 
983 	drm_vm_open_locked(dev, vma);
984 	return 0;
985 }
986 EXPORT_SYMBOL(drm_gem_mmap_obj);
987 
988 /**
989  * drm_gem_mmap - memory map routine for GEM objects
990  * @filp: DRM file pointer
991  * @vma: VMA for the area to be mapped
992  *
993  * If a driver supports GEM object mapping, mmap calls on the DRM file
994  * descriptor will end up here.
995  *
996  * Look up the GEM object based on the offset passed in (vma->vm_pgoff will
997  * contain the fake offset we created when the GTT map ioctl was called on
998  * the object) and map it with a call to drm_gem_mmap_obj().
999  *
1000  * If the caller is not granted access to the buffer object, the mmap will fail
1001  * with EACCES. Please see the vma manager for more information.
1002  */
1003 int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
1004 {
1005 	struct drm_file *priv = filp->private_data;
1006 	struct drm_device *dev = priv->minor->dev;
1007 	struct drm_gem_object *obj;
1008 	struct drm_vma_offset_node *node;
1009 	int ret = 0;
1010 
1011 	if (drm_device_is_unplugged(dev))
1012 		return -ENODEV;
1013 
1014 	mutex_lock(&dev->struct_mutex);
1015 
1016 	node = drm_vma_offset_exact_lookup(dev->vma_offset_manager,
1017 					   vma->vm_pgoff,
1018 					   vma_pages(vma));
1019 	if (!node) {
1020 		mutex_unlock(&dev->struct_mutex);
1021 		return drm_mmap(filp, vma);
1022 	} else if (!drm_vma_node_is_allowed(node, filp)) {
1023 		mutex_unlock(&dev->struct_mutex);
1024 		return -EACCES;
1025 	}
1026 
1027 	obj = container_of(node, struct drm_gem_object, vma_node);
1028 	ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT, vma);
1029 
1030 	mutex_unlock(&dev->struct_mutex);
1031 
1032 	return ret;
1033 }
1034 EXPORT_SYMBOL(drm_gem_mmap);
1035 
1036 #endif
1037