xref: /openbsd-src/sys/dev/pci/drm/drm_gem.c (revision 4e1ee0786f11cc571bd0be17d38e46f635c719fc)
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *
26  */
27 
28 #include <linux/types.h>
29 #include <linux/slab.h>
30 #include <linux/mm.h>
31 #include <linux/uaccess.h>
32 #include <linux/fs.h>
33 #include <linux/file.h>
34 #include <linux/module.h>
35 #include <linux/mman.h>
36 #include <linux/pagemap.h>
37 #include <linux/shmem_fs.h>
38 #include <linux/dma-buf.h>
39 #include <linux/mem_encrypt.h>
40 #include <linux/pagevec.h>
41 
42 #include <drm/drm.h>
43 #include <drm/drm_device.h>
44 #include <drm/drm_drv.h>
45 #include <drm/drm_file.h>
46 #include <drm/drm_gem.h>
47 #include <drm/drm_managed.h>
48 #include <drm/drm_print.h>
49 #include <drm/drm_vma_manager.h>
50 
51 #include "drm_internal.h"
52 
53 #include <sys/conf.h>
54 #include <uvm/uvm.h>
55 
56 void drm_unref(struct uvm_object *);
57 void drm_ref(struct uvm_object *);
58 boolean_t drm_flush(struct uvm_object *, voff_t, voff_t, int);
59 int drm_fault(struct uvm_faultinfo *, vaddr_t, vm_page_t *, int, int,
60     vm_fault_t, vm_prot_t, int);
61 
62 const struct uvm_pagerops drm_pgops = {
63 	.pgo_reference = drm_ref,
64 	.pgo_detach = drm_unref,
65 	.pgo_fault = drm_fault,
66 	.pgo_flush = drm_flush,
67 };
68 
69 void
70 drm_ref(struct uvm_object *uobj)
71 {
72 	struct drm_gem_object *obj =
73 	    container_of(uobj, struct drm_gem_object, uobj);
74 
75 	drm_gem_object_get(obj);
76 }
77 
78 void
79 drm_unref(struct uvm_object *uobj)
80 {
81 	struct drm_gem_object *obj =
82 	    container_of(uobj, struct drm_gem_object, uobj);
83 
84 	drm_gem_object_put(obj);
85 }
86 
87 int
88 drm_fault(struct uvm_faultinfo *ufi, vaddr_t vaddr, vm_page_t *pps,
89     int npages, int centeridx, vm_fault_t fault_type,
90     vm_prot_t access_type, int flags)
91 {
92 	struct vm_map_entry *entry = ufi->entry;
93 	struct uvm_object *uobj = entry->object.uvm_obj;
94 	struct drm_gem_object *obj =
95 	    container_of(uobj, struct drm_gem_object, uobj);
96 	struct drm_device *dev = obj->dev;
97 	int ret;
98 
99 	/*
100 	 * we do not allow device mappings to be mapped copy-on-write
101 	 * so we kill any attempt to do so here.
102 	 */
103 
104 	if (UVM_ET_ISCOPYONWRITE(entry)) {
105 		uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj);
106 		return(VM_PAGER_ERROR);
107 	}
108 
109 	/*
110 	 * We could end up here as the result of a copyin(9) or
111 	 * copyout(9) while handling an ioctl.  So we must be careful
112 	 * not to deadlock.  Therefore we only block if the quiesce
113 	 * count is zero, which guarantees we didn't enter from within
114 	 * an ioctl code path.
115 	 */
116 	mtx_enter(&dev->quiesce_mtx);
117 	if (dev->quiesce && dev->quiesce_count == 0) {
118 		mtx_leave(&dev->quiesce_mtx);
119 		uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj);
120 		mtx_enter(&dev->quiesce_mtx);
121 		while (dev->quiesce) {
122 			msleep_nsec(&dev->quiesce, &dev->quiesce_mtx,
123 			    PZERO, "drmflt", INFSLP);
124 		}
125 		mtx_leave(&dev->quiesce_mtx);
126 		return(VM_PAGER_REFAULT);
127 	}
128 	dev->quiesce_count++;
129 	mtx_leave(&dev->quiesce_mtx);
130 
131 	/* Call down into driver to do the magic */
132 	ret = dev->driver->gem_fault(obj, ufi, entry->offset + (vaddr -
133 	    entry->start), vaddr, pps, npages, centeridx,
134 	    access_type, flags);
135 
136 	mtx_enter(&dev->quiesce_mtx);
137 	dev->quiesce_count--;
138 	if (dev->quiesce)
139 		wakeup(&dev->quiesce_count);
140 	mtx_leave(&dev->quiesce_mtx);
141 
142 	return (ret);
143 }
144 
145 boolean_t
146 drm_flush(struct uvm_object *uobj, voff_t start, voff_t stop, int flags)
147 {
148 	return (TRUE);
149 }
150 
151 struct uvm_object *
152 udv_attach_drm(dev_t device, vm_prot_t accessprot, voff_t off, vsize_t size)
153 {
154 	struct drm_device *dev = drm_get_device_from_kdev(device);
155 	struct drm_gem_object *obj = NULL;
156 	struct drm_vma_offset_node *node;
157 	struct drm_file *priv;
158 	struct file *filp;
159 
160 	if (cdevsw[major(device)].d_mmap != drmmmap)
161 		return NULL;
162 
163 	if (dev == NULL)
164 		return NULL;
165 
166 	mutex_lock(&dev->filelist_mutex);
167 	priv = drm_find_file_by_minor(dev, minor(device));
168 	if (priv == NULL) {
169 		mutex_unlock(&dev->filelist_mutex);
170 		return NULL;
171 	}
172 	filp = priv->filp;
173 	mutex_unlock(&dev->filelist_mutex);
174 
175 	if (dev->driver->mmap)
176 		return dev->driver->mmap(filp, accessprot, off, size);
177 
178 	drm_vma_offset_lock_lookup(dev->vma_offset_manager);
179 	node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
180 						  off >> PAGE_SHIFT,
181 						  atop(round_page(size)));
182 	if (likely(node)) {
183 		obj = container_of(node, struct drm_gem_object, vma_node);
184 		/*
185 		 * When the object is being freed, after it hits 0-refcnt it
186 		 * proceeds to tear down the object. In the process it will
187 		 * attempt to remove the VMA offset and so acquire this
188 		 * mgr->vm_lock.  Therefore if we find an object with a 0-refcnt
189 		 * that matches our range, we know it is in the process of being
190 		 * destroyed and will be freed as soon as we release the lock -
191 		 * so we have to check for the 0-refcnted object and treat it as
192 		 * invalid.
193 		 */
194 		if (!kref_get_unless_zero(&obj->refcount))
195 			obj = NULL;
196 	}
197 	drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
198 
199 	if (!obj)
200 		return NULL;
201 
202 	if (!drm_vma_node_is_allowed(node, priv)) {
203 		drm_gem_object_put(obj);
204 		return NULL;
205 	}
206 
207 	if (node->readonly) {
208 		if (accessprot & PROT_WRITE) {
209 			drm_gem_object_put(obj);
210 			return NULL;
211 		}
212 	}
213 
214 	return &obj->uobj;
215 }
216 
217 /** @file drm_gem.c
218  *
219  * This file provides some of the base ioctls and library routines for
220  * the graphics memory manager implemented by each device driver.
221  *
222  * Because various devices have different requirements in terms of
223  * synchronization and migration strategies, implementing that is left up to
224  * the driver, and all that the general API provides should be generic --
225  * allocating objects, reading/writing data with the cpu, freeing objects.
226  * Even there, platform-dependent optimizations for reading/writing data with
227  * the CPU mean we'll likely hook those out to driver-specific calls.  However,
228  * the DRI2 implementation wants to have at least allocate/mmap be generic.
229  *
230  * The goal was to have swap-backed object allocation managed through
231  * struct file.  However, file descriptors as handles to a struct file have
232  * two major failings:
233  * - Process limits prevent more than 1024 or so being used at a time by
234  *   default.
235  * - Inability to allocate high fds will aggravate the X Server's select()
236  *   handling, and likely that of many GL client applications as well.
237  *
238  * This led to a plan of using our own integer IDs (called handles, following
239  * DRM terminology) to mimic fds, and implement the fd syscalls we need as
240  * ioctls.  The objects themselves will still include the struct file so
241  * that we can transition to fds if the required kernel infrastructure shows
242  * up at a later date, and as our interface with shmfs for memory allocation.
243  */
244 
245 static void
246 drm_gem_init_release(struct drm_device *dev, void *ptr)
247 {
248 	drm_vma_offset_manager_destroy(dev->vma_offset_manager);
249 }
250 
251 /**
252  * drm_gem_init - Initialize the GEM device fields
253  * @dev: drm_devic structure to initialize
254  */
255 int
256 drm_gem_init(struct drm_device *dev)
257 {
258 	struct drm_vma_offset_manager *vma_offset_manager;
259 
260 	rw_init(&dev->object_name_lock, "drmonl");
261 	idr_init_base(&dev->object_name_idr, 1);
262 
263 	vma_offset_manager = drmm_kzalloc(dev, sizeof(*vma_offset_manager),
264 					  GFP_KERNEL);
265 	if (!vma_offset_manager) {
266 		DRM_ERROR("out of memory\n");
267 		return -ENOMEM;
268 	}
269 
270 	dev->vma_offset_manager = vma_offset_manager;
271 	drm_vma_offset_manager_init(vma_offset_manager,
272 				    DRM_FILE_PAGE_OFFSET_START,
273 				    DRM_FILE_PAGE_OFFSET_SIZE);
274 
275 	return drmm_add_action(dev, drm_gem_init_release, NULL);
276 }
277 
278 #ifdef __linux__
279 
280 /**
281  * drm_gem_object_init - initialize an allocated shmem-backed GEM object
282  * @dev: drm_device the object should be initialized for
283  * @obj: drm_gem_object to initialize
284  * @size: object size
285  *
286  * Initialize an already allocated GEM object of the specified size with
287  * shmfs backing store.
288  */
289 int drm_gem_object_init(struct drm_device *dev,
290 			struct drm_gem_object *obj, size_t size)
291 {
292 	struct file *filp;
293 
294 	drm_gem_private_object_init(dev, obj, size);
295 
296 	filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
297 	if (IS_ERR(filp))
298 		return PTR_ERR(filp);
299 
300 	obj->filp = filp;
301 
302 	return 0;
303 }
304 EXPORT_SYMBOL(drm_gem_object_init);
305 
306 #else
307 
308 int drm_gem_object_init(struct drm_device *dev,
309 			struct drm_gem_object *obj, size_t size)
310 {
311 	drm_gem_private_object_init(dev, obj, size);
312 
313 	if (size > (512 * 1024 * 1024)) {
314 		printf("%s size too big %lu\n", __func__, size);
315 		return -ENOMEM;
316 	}
317 
318 	obj->uao = uao_create(size, 0);
319 	uvm_obj_init(&obj->uobj, &drm_pgops, 1);
320 
321 	return 0;
322 }
323 
324 #endif
325 
326 /**
327  * drm_gem_private_object_init - initialize an allocated private GEM object
328  * @dev: drm_device the object should be initialized for
329  * @obj: drm_gem_object to initialize
330  * @size: object size
331  *
332  * Initialize an already allocated GEM object of the specified size with
333  * no GEM provided backing store. Instead the caller is responsible for
334  * backing the object and handling it.
335  */
336 void drm_gem_private_object_init(struct drm_device *dev,
337 				 struct drm_gem_object *obj, size_t size)
338 {
339 	BUG_ON((size & (PAGE_SIZE - 1)) != 0);
340 
341 	obj->dev = dev;
342 #ifdef __linux__
343 	obj->filp = NULL;
344 #else
345 	obj->uao = NULL;
346 	obj->uobj.pgops = NULL;
347 #endif
348 
349 	kref_init(&obj->refcount);
350 	obj->handle_count = 0;
351 	obj->size = size;
352 	dma_resv_init(&obj->_resv);
353 	if (!obj->resv)
354 		obj->resv = &obj->_resv;
355 
356 	drm_vma_node_reset(&obj->vma_node);
357 }
358 EXPORT_SYMBOL(drm_gem_private_object_init);
359 
360 static void
361 drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
362 {
363 	/*
364 	 * Note: obj->dma_buf can't disappear as long as we still hold a
365 	 * handle reference in obj->handle_count.
366 	 */
367 	mutex_lock(&filp->prime.lock);
368 	if (obj->dma_buf) {
369 		drm_prime_remove_buf_handle_locked(&filp->prime,
370 						   obj->dma_buf);
371 	}
372 	mutex_unlock(&filp->prime.lock);
373 }
374 
375 /**
376  * drm_gem_object_handle_free - release resources bound to userspace handles
377  * @obj: GEM object to clean up.
378  *
379  * Called after the last handle to the object has been closed
380  *
381  * Removes any name for the object. Note that this must be
382  * called before drm_gem_object_free or we'll be touching
383  * freed memory
384  */
385 static void drm_gem_object_handle_free(struct drm_gem_object *obj)
386 {
387 	struct drm_device *dev = obj->dev;
388 
389 	/* Remove any name for this object */
390 	if (obj->name) {
391 		idr_remove(&dev->object_name_idr, obj->name);
392 		obj->name = 0;
393 	}
394 }
395 
396 static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj)
397 {
398 	/* Unbreak the reference cycle if we have an exported dma_buf. */
399 	if (obj->dma_buf) {
400 		dma_buf_put(obj->dma_buf);
401 		obj->dma_buf = NULL;
402 	}
403 }
404 
405 static void
406 drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj)
407 {
408 	struct drm_device *dev = obj->dev;
409 	bool final = false;
410 
411 	if (WARN_ON(READ_ONCE(obj->handle_count) == 0))
412 		return;
413 
414 	/*
415 	* Must bump handle count first as this may be the last
416 	* ref, in which case the object would disappear before we
417 	* checked for a name
418 	*/
419 
420 	mutex_lock(&dev->object_name_lock);
421 	if (--obj->handle_count == 0) {
422 		drm_gem_object_handle_free(obj);
423 		drm_gem_object_exported_dma_buf_free(obj);
424 		final = true;
425 	}
426 	mutex_unlock(&dev->object_name_lock);
427 
428 	if (final)
429 		drm_gem_object_put(obj);
430 }
431 
432 /*
433  * Called at device or object close to release the file's
434  * handle references on objects.
435  */
436 static int
437 drm_gem_object_release_handle(int id, void *ptr, void *data)
438 {
439 	struct drm_file *file_priv = data;
440 	struct drm_gem_object *obj = ptr;
441 	struct drm_device *dev = obj->dev;
442 
443 	if (obj->funcs && obj->funcs->close)
444 		obj->funcs->close(obj, file_priv);
445 	else if (dev->driver->gem_close_object)
446 		dev->driver->gem_close_object(obj, file_priv);
447 
448 	drm_gem_remove_prime_handles(obj, file_priv);
449 	drm_vma_node_revoke(&obj->vma_node, file_priv);
450 
451 	drm_gem_object_handle_put_unlocked(obj);
452 
453 	return 0;
454 }
455 
456 /**
457  * drm_gem_handle_delete - deletes the given file-private handle
458  * @filp: drm file-private structure to use for the handle look up
459  * @handle: userspace handle to delete
460  *
461  * Removes the GEM handle from the @filp lookup table which has been added with
462  * drm_gem_handle_create(). If this is the last handle also cleans up linked
463  * resources like GEM names.
464  */
465 int
466 drm_gem_handle_delete(struct drm_file *filp, u32 handle)
467 {
468 	struct drm_gem_object *obj;
469 
470 	spin_lock(&filp->table_lock);
471 
472 	/* Check if we currently have a reference on the object */
473 	obj = idr_replace(&filp->object_idr, NULL, handle);
474 	spin_unlock(&filp->table_lock);
475 	if (IS_ERR_OR_NULL(obj))
476 		return -EINVAL;
477 
478 	/* Release driver's reference and decrement refcount. */
479 	drm_gem_object_release_handle(handle, obj, filp);
480 
481 	/* And finally make the handle available for future allocations. */
482 	spin_lock(&filp->table_lock);
483 	idr_remove(&filp->object_idr, handle);
484 	spin_unlock(&filp->table_lock);
485 
486 	return 0;
487 }
488 EXPORT_SYMBOL(drm_gem_handle_delete);
489 
490 /**
491  * drm_gem_dumb_map_offset - return the fake mmap offset for a gem object
492  * @file: drm file-private structure containing the gem object
493  * @dev: corresponding drm_device
494  * @handle: gem object handle
495  * @offset: return location for the fake mmap offset
496  *
497  * This implements the &drm_driver.dumb_map_offset kms driver callback for
498  * drivers which use gem to manage their backing storage.
499  *
500  * Returns:
501  * 0 on success or a negative error code on failure.
502  */
503 int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
504 			    u32 handle, u64 *offset)
505 {
506 	struct drm_gem_object *obj;
507 	int ret;
508 
509 	obj = drm_gem_object_lookup(file, handle);
510 	if (!obj)
511 		return -ENOENT;
512 
513 	/* Don't allow imported objects to be mapped */
514 	if (obj->import_attach) {
515 		ret = -EINVAL;
516 		goto out;
517 	}
518 
519 	ret = drm_gem_create_mmap_offset(obj);
520 	if (ret)
521 		goto out;
522 
523 	*offset = drm_vma_node_offset_addr(&obj->vma_node);
524 out:
525 	drm_gem_object_put(obj);
526 
527 	return ret;
528 }
529 EXPORT_SYMBOL_GPL(drm_gem_dumb_map_offset);
530 
531 /**
532  * drm_gem_dumb_destroy - dumb fb callback helper for gem based drivers
533  * @file: drm file-private structure to remove the dumb handle from
534  * @dev: corresponding drm_device
535  * @handle: the dumb handle to remove
536  *
537  * This implements the &drm_driver.dumb_destroy kms driver callback for drivers
538  * which use gem to manage their backing storage.
539  */
540 int drm_gem_dumb_destroy(struct drm_file *file,
541 			 struct drm_device *dev,
542 			 uint32_t handle)
543 {
544 	return drm_gem_handle_delete(file, handle);
545 }
546 EXPORT_SYMBOL(drm_gem_dumb_destroy);
547 
548 /**
549  * drm_gem_handle_create_tail - internal functions to create a handle
550  * @file_priv: drm file-private structure to register the handle for
551  * @obj: object to register
552  * @handlep: pointer to return the created handle to the caller
553  *
554  * This expects the &drm_device.object_name_lock to be held already and will
555  * drop it before returning. Used to avoid races in establishing new handles
556  * when importing an object from either an flink name or a dma-buf.
557  *
558  * Handles must be release again through drm_gem_handle_delete(). This is done
559  * when userspace closes @file_priv for all attached handles, or through the
560  * GEM_CLOSE ioctl for individual handles.
561  */
562 int
563 drm_gem_handle_create_tail(struct drm_file *file_priv,
564 			   struct drm_gem_object *obj,
565 			   u32 *handlep)
566 {
567 	struct drm_device *dev = obj->dev;
568 	u32 handle;
569 	int ret;
570 
571 	WARN_ON(!mutex_is_locked(&dev->object_name_lock));
572 	if (obj->handle_count++ == 0)
573 		drm_gem_object_get(obj);
574 
575 	/*
576 	 * Get the user-visible handle using idr.  Preload and perform
577 	 * allocation under our spinlock.
578 	 */
579 	idr_preload(GFP_KERNEL);
580 	spin_lock(&file_priv->table_lock);
581 
582 	ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT);
583 
584 	spin_unlock(&file_priv->table_lock);
585 	idr_preload_end();
586 
587 	mutex_unlock(&dev->object_name_lock);
588 	if (ret < 0)
589 		goto err_unref;
590 
591 	handle = ret;
592 
593 	ret = drm_vma_node_allow(&obj->vma_node, file_priv);
594 	if (ret)
595 		goto err_remove;
596 
597 	if (obj->funcs && obj->funcs->open) {
598 		ret = obj->funcs->open(obj, file_priv);
599 		if (ret)
600 			goto err_revoke;
601 	} else if (dev->driver->gem_open_object) {
602 		ret = dev->driver->gem_open_object(obj, file_priv);
603 		if (ret)
604 			goto err_revoke;
605 	}
606 
607 	*handlep = handle;
608 	return 0;
609 
610 err_revoke:
611 	drm_vma_node_revoke(&obj->vma_node, file_priv);
612 err_remove:
613 	spin_lock(&file_priv->table_lock);
614 	idr_remove(&file_priv->object_idr, handle);
615 	spin_unlock(&file_priv->table_lock);
616 err_unref:
617 	drm_gem_object_handle_put_unlocked(obj);
618 	return ret;
619 }
620 
621 /**
622  * drm_gem_handle_create - create a gem handle for an object
623  * @file_priv: drm file-private structure to register the handle for
624  * @obj: object to register
625  * @handlep: pointer to return the created handle to the caller
626  *
627  * Create a handle for this object. This adds a handle reference to the object,
628  * which includes a regular reference count. Callers will likely want to
629  * dereference the object afterwards.
630  *
631  * Since this publishes @obj to userspace it must be fully set up by this point,
632  * drivers must call this last in their buffer object creation callbacks.
633  */
634 int drm_gem_handle_create(struct drm_file *file_priv,
635 			  struct drm_gem_object *obj,
636 			  u32 *handlep)
637 {
638 	mutex_lock(&obj->dev->object_name_lock);
639 
640 	return drm_gem_handle_create_tail(file_priv, obj, handlep);
641 }
642 EXPORT_SYMBOL(drm_gem_handle_create);
643 
644 
645 /**
646  * drm_gem_free_mmap_offset - release a fake mmap offset for an object
647  * @obj: obj in question
648  *
649  * This routine frees fake offsets allocated by drm_gem_create_mmap_offset().
650  *
651  * Note that drm_gem_object_release() already calls this function, so drivers
652  * don't have to take care of releasing the mmap offset themselves when freeing
653  * the GEM object.
654  */
655 void
656 drm_gem_free_mmap_offset(struct drm_gem_object *obj)
657 {
658 	struct drm_device *dev = obj->dev;
659 
660 	drm_vma_offset_remove(dev->vma_offset_manager, &obj->vma_node);
661 }
662 EXPORT_SYMBOL(drm_gem_free_mmap_offset);
663 
664 /**
665  * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object
666  * @obj: obj in question
667  * @size: the virtual size
668  *
669  * GEM memory mapping works by handing back to userspace a fake mmap offset
670  * it can use in a subsequent mmap(2) call.  The DRM core code then looks
671  * up the object based on the offset and sets up the various memory mapping
672  * structures.
673  *
674  * This routine allocates and attaches a fake offset for @obj, in cases where
675  * the virtual size differs from the physical size (ie. &drm_gem_object.size).
676  * Otherwise just use drm_gem_create_mmap_offset().
677  *
678  * This function is idempotent and handles an already allocated mmap offset
679  * transparently. Drivers do not need to check for this case.
680  */
681 int
682 drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size)
683 {
684 	struct drm_device *dev = obj->dev;
685 
686 	return drm_vma_offset_add(dev->vma_offset_manager, &obj->vma_node,
687 				  size / PAGE_SIZE);
688 }
689 EXPORT_SYMBOL(drm_gem_create_mmap_offset_size);
690 
691 /**
692  * drm_gem_create_mmap_offset - create a fake mmap offset for an object
693  * @obj: obj in question
694  *
695  * GEM memory mapping works by handing back to userspace a fake mmap offset
696  * it can use in a subsequent mmap(2) call.  The DRM core code then looks
697  * up the object based on the offset and sets up the various memory mapping
698  * structures.
699  *
700  * This routine allocates and attaches a fake offset for @obj.
701  *
702  * Drivers can call drm_gem_free_mmap_offset() before freeing @obj to release
703  * the fake offset again.
704  */
705 int drm_gem_create_mmap_offset(struct drm_gem_object *obj)
706 {
707 	return drm_gem_create_mmap_offset_size(obj, obj->size);
708 }
709 EXPORT_SYMBOL(drm_gem_create_mmap_offset);
710 
711 #ifdef notyet
712 /*
713  * Move pages to appropriate lru and release the pagevec, decrementing the
714  * ref count of those pages.
715  */
716 static void drm_gem_check_release_pagevec(struct pagevec *pvec)
717 {
718 	check_move_unevictable_pages(pvec);
719 	__pagevec_release(pvec);
720 	cond_resched();
721 }
722 #endif
723 
724 /**
725  * drm_gem_get_pages - helper to allocate backing pages for a GEM object
726  * from shmem
727  * @obj: obj in question
728  *
729  * This reads the page-array of the shmem-backing storage of the given gem
730  * object. An array of pages is returned. If a page is not allocated or
731  * swapped-out, this will allocate/swap-in the required pages. Note that the
732  * whole object is covered by the page-array and pinned in memory.
733  *
734  * Use drm_gem_put_pages() to release the array and unpin all pages.
735  *
736  * This uses the GFP-mask set on the shmem-mapping (see mapping_set_gfp_mask()).
737  * If you require other GFP-masks, you have to do those allocations yourself.
738  *
739  * Note that you are not allowed to change gfp-zones during runtime. That is,
740  * shmem_read_mapping_page_gfp() must be called with the same gfp_zone(gfp) as
741  * set during initialization. If you have special zone constraints, set them
742  * after drm_gem_object_init() via mapping_set_gfp_mask(). shmem-core takes care
743  * to keep pages in the required zone during swap-in.
744  *
745  * This function is only valid on objects initialized with
746  * drm_gem_object_init(), but not for those initialized with
747  * drm_gem_private_object_init() only.
748  */
749 struct vm_page **drm_gem_get_pages(struct drm_gem_object *obj)
750 {
751 	STUB();
752 	return ERR_PTR(-ENOSYS);
753 #ifdef notyet
754 	struct address_space *mapping;
755 	struct vm_page *p, **pages;
756 	struct pagevec pvec;
757 	int i, npages;
758 
759 
760 	if (WARN_ON(!obj->filp))
761 		return ERR_PTR(-EINVAL);
762 
763 	/* This is the shared memory object that backs the GEM resource */
764 	mapping = obj->filp->f_mapping;
765 
766 	/* We already BUG_ON() for non-page-aligned sizes in
767 	 * drm_gem_object_init(), so we should never hit this unless
768 	 * driver author is doing something really wrong:
769 	 */
770 	WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
771 
772 	npages = obj->size >> PAGE_SHIFT;
773 
774 	pages = kvmalloc_array(npages, sizeof(struct vm_page *), GFP_KERNEL);
775 	if (pages == NULL)
776 		return ERR_PTR(-ENOMEM);
777 
778 	mapping_set_unevictable(mapping);
779 
780 	for (i = 0; i < npages; i++) {
781 		p = shmem_read_mapping_page(mapping, i);
782 		if (IS_ERR(p))
783 			goto fail;
784 		pages[i] = p;
785 
786 		/* Make sure shmem keeps __GFP_DMA32 allocated pages in the
787 		 * correct region during swapin. Note that this requires
788 		 * __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping)
789 		 * so shmem can relocate pages during swapin if required.
790 		 */
791 		BUG_ON(mapping_gfp_constraint(mapping, __GFP_DMA32) &&
792 				(page_to_pfn(p) >= 0x00100000UL));
793 	}
794 
795 	return pages;
796 
797 fail:
798 	mapping_clear_unevictable(mapping);
799 	pagevec_init(&pvec);
800 	while (i--) {
801 		if (!pagevec_add(&pvec, pages[i]))
802 			drm_gem_check_release_pagevec(&pvec);
803 	}
804 	if (pagevec_count(&pvec))
805 		drm_gem_check_release_pagevec(&pvec);
806 
807 	kvfree(pages);
808 	return ERR_CAST(p);
809 #endif
810 }
811 EXPORT_SYMBOL(drm_gem_get_pages);
812 
813 /**
814  * drm_gem_put_pages - helper to free backing pages for a GEM object
815  * @obj: obj in question
816  * @pages: pages to free
817  * @dirty: if true, pages will be marked as dirty
818  * @accessed: if true, the pages will be marked as accessed
819  */
820 void drm_gem_put_pages(struct drm_gem_object *obj, struct vm_page **pages,
821 		bool dirty, bool accessed)
822 {
823 	STUB();
824 #ifdef notyet
825 	int i, npages;
826 	struct address_space *mapping;
827 	struct pagevec pvec;
828 
829 	mapping = file_inode(obj->filp)->i_mapping;
830 	mapping_clear_unevictable(mapping);
831 
832 	/* We already BUG_ON() for non-page-aligned sizes in
833 	 * drm_gem_object_init(), so we should never hit this unless
834 	 * driver author is doing something really wrong:
835 	 */
836 	WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
837 
838 	npages = obj->size >> PAGE_SHIFT;
839 
840 	pagevec_init(&pvec);
841 	for (i = 0; i < npages; i++) {
842 		if (!pages[i])
843 			continue;
844 
845 		if (dirty)
846 			set_page_dirty(pages[i]);
847 
848 		if (accessed)
849 			mark_page_accessed(pages[i]);
850 
851 		/* Undo the reference we took when populating the table */
852 		if (!pagevec_add(&pvec, pages[i]))
853 			drm_gem_check_release_pagevec(&pvec);
854 	}
855 	if (pagevec_count(&pvec))
856 		drm_gem_check_release_pagevec(&pvec);
857 
858 	kvfree(pages);
859 #endif
860 }
861 EXPORT_SYMBOL(drm_gem_put_pages);
862 
863 static int objects_lookup(struct drm_file *filp, u32 *handle, int count,
864 			  struct drm_gem_object **objs)
865 {
866 	int i, ret = 0;
867 	struct drm_gem_object *obj;
868 
869 	spin_lock(&filp->table_lock);
870 
871 	for (i = 0; i < count; i++) {
872 		/* Check if we currently have a reference on the object */
873 		obj = idr_find(&filp->object_idr, handle[i]);
874 		if (!obj) {
875 			ret = -ENOENT;
876 			break;
877 		}
878 		drm_gem_object_get(obj);
879 		objs[i] = obj;
880 	}
881 	spin_unlock(&filp->table_lock);
882 
883 	return ret;
884 }
885 
886 /**
887  * drm_gem_objects_lookup - look up GEM objects from an array of handles
888  * @filp: DRM file private date
889  * @bo_handles: user pointer to array of userspace handle
890  * @count: size of handle array
891  * @objs_out: returned pointer to array of drm_gem_object pointers
892  *
893  * Takes an array of userspace handles and returns a newly allocated array of
894  * GEM objects.
895  *
896  * For a single handle lookup, use drm_gem_object_lookup().
897  *
898  * Returns:
899  *
900  * @objs filled in with GEM object pointers. Returned GEM objects need to be
901  * released with drm_gem_object_put(). -ENOENT is returned on a lookup
902  * failure. 0 is returned on success.
903  *
904  */
905 int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles,
906 			   int count, struct drm_gem_object ***objs_out)
907 {
908 	int ret;
909 	u32 *handles;
910 	struct drm_gem_object **objs;
911 
912 	if (!count)
913 		return 0;
914 
915 	objs = kvmalloc_array(count, sizeof(struct drm_gem_object *),
916 			     GFP_KERNEL | __GFP_ZERO);
917 	if (!objs)
918 		return -ENOMEM;
919 
920 	*objs_out = objs;
921 
922 	handles = kvmalloc_array(count, sizeof(u32), GFP_KERNEL);
923 	if (!handles) {
924 		ret = -ENOMEM;
925 		goto out;
926 	}
927 
928 	if (copy_from_user(handles, bo_handles, count * sizeof(u32))) {
929 		ret = -EFAULT;
930 		DRM_DEBUG("Failed to copy in GEM handles\n");
931 		goto out;
932 	}
933 
934 	ret = objects_lookup(filp, handles, count, objs);
935 out:
936 	kvfree(handles);
937 	return ret;
938 
939 }
940 EXPORT_SYMBOL(drm_gem_objects_lookup);
941 
942 /**
943  * drm_gem_object_lookup - look up a GEM object from its handle
944  * @filp: DRM file private date
945  * @handle: userspace handle
946  *
947  * Returns:
948  *
949  * A reference to the object named by the handle if such exists on @filp, NULL
950  * otherwise.
951  *
952  * If looking up an array of handles, use drm_gem_objects_lookup().
953  */
954 struct drm_gem_object *
955 drm_gem_object_lookup(struct drm_file *filp, u32 handle)
956 {
957 	struct drm_gem_object *obj = NULL;
958 
959 	objects_lookup(filp, &handle, 1, &obj);
960 	return obj;
961 }
962 EXPORT_SYMBOL(drm_gem_object_lookup);
963 
964 /**
965  * drm_gem_dma_resv_wait - Wait on GEM object's reservation's objects
966  * shared and/or exclusive fences.
967  * @filep: DRM file private date
968  * @handle: userspace handle
969  * @wait_all: if true, wait on all fences, else wait on just exclusive fence
970  * @timeout: timeout value in jiffies or zero to return immediately
971  *
972  * Returns:
973  *
974  * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
975  * greater than 0 on success.
976  */
977 long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle,
978 				    bool wait_all, unsigned long timeout)
979 {
980 	long ret;
981 	struct drm_gem_object *obj;
982 
983 	obj = drm_gem_object_lookup(filep, handle);
984 	if (!obj) {
985 		DRM_DEBUG("Failed to look up GEM BO %d\n", handle);
986 		return -EINVAL;
987 	}
988 
989 	ret = dma_resv_wait_timeout_rcu(obj->resv, wait_all,
990 						  true, timeout);
991 	if (ret == 0)
992 		ret = -ETIME;
993 	else if (ret > 0)
994 		ret = 0;
995 
996 	drm_gem_object_put(obj);
997 
998 	return ret;
999 }
1000 EXPORT_SYMBOL(drm_gem_dma_resv_wait);
1001 
1002 /**
1003  * drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl
1004  * @dev: drm_device
1005  * @data: ioctl data
1006  * @file_priv: drm file-private structure
1007  *
1008  * Releases the handle to an mm object.
1009  */
1010 int
1011 drm_gem_close_ioctl(struct drm_device *dev, void *data,
1012 		    struct drm_file *file_priv)
1013 {
1014 	struct drm_gem_close *args = data;
1015 	int ret;
1016 
1017 	if (!drm_core_check_feature(dev, DRIVER_GEM))
1018 		return -EOPNOTSUPP;
1019 
1020 	ret = drm_gem_handle_delete(file_priv, args->handle);
1021 
1022 	return ret;
1023 }
1024 
1025 /**
1026  * drm_gem_flink_ioctl - implementation of the GEM_FLINK ioctl
1027  * @dev: drm_device
1028  * @data: ioctl data
1029  * @file_priv: drm file-private structure
1030  *
1031  * Create a global name for an object, returning the name.
1032  *
1033  * Note that the name does not hold a reference; when the object
1034  * is freed, the name goes away.
1035  */
1036 int
1037 drm_gem_flink_ioctl(struct drm_device *dev, void *data,
1038 		    struct drm_file *file_priv)
1039 {
1040 	struct drm_gem_flink *args = data;
1041 	struct drm_gem_object *obj;
1042 	int ret;
1043 
1044 	if (!drm_core_check_feature(dev, DRIVER_GEM))
1045 		return -EOPNOTSUPP;
1046 
1047 	obj = drm_gem_object_lookup(file_priv, args->handle);
1048 	if (obj == NULL)
1049 		return -ENOENT;
1050 
1051 	mutex_lock(&dev->object_name_lock);
1052 	/* prevent races with concurrent gem_close. */
1053 	if (obj->handle_count == 0) {
1054 		ret = -ENOENT;
1055 		goto err;
1056 	}
1057 
1058 	if (!obj->name) {
1059 		ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_KERNEL);
1060 		if (ret < 0)
1061 			goto err;
1062 
1063 		obj->name = ret;
1064 	}
1065 
1066 	args->name = (uint64_t) obj->name;
1067 	ret = 0;
1068 
1069 err:
1070 	mutex_unlock(&dev->object_name_lock);
1071 	drm_gem_object_put(obj);
1072 	return ret;
1073 }
1074 
1075 /**
1076  * drm_gem_open - implementation of the GEM_OPEN ioctl
1077  * @dev: drm_device
1078  * @data: ioctl data
1079  * @file_priv: drm file-private structure
1080  *
1081  * Open an object using the global name, returning a handle and the size.
1082  *
1083  * This handle (of course) holds a reference to the object, so the object
1084  * will not go away until the handle is deleted.
1085  */
1086 int
1087 drm_gem_open_ioctl(struct drm_device *dev, void *data,
1088 		   struct drm_file *file_priv)
1089 {
1090 	struct drm_gem_open *args = data;
1091 	struct drm_gem_object *obj;
1092 	int ret;
1093 	u32 handle;
1094 
1095 	if (!drm_core_check_feature(dev, DRIVER_GEM))
1096 		return -EOPNOTSUPP;
1097 
1098 	mutex_lock(&dev->object_name_lock);
1099 	obj = idr_find(&dev->object_name_idr, (int) args->name);
1100 	if (obj) {
1101 		drm_gem_object_get(obj);
1102 	} else {
1103 		mutex_unlock(&dev->object_name_lock);
1104 		return -ENOENT;
1105 	}
1106 
1107 	/* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
1108 	ret = drm_gem_handle_create_tail(file_priv, obj, &handle);
1109 	if (ret)
1110 		goto err;
1111 
1112 	args->handle = handle;
1113 	args->size = obj->size;
1114 
1115 err:
1116 	drm_gem_object_put(obj);
1117 	return ret;
1118 }
1119 
1120 /**
1121  * gem_gem_open - initalizes GEM file-private structures at devnode open time
1122  * @dev: drm_device which is being opened by userspace
1123  * @file_private: drm file-private structure to set up
1124  *
1125  * Called at device open time, sets up the structure for handling refcounting
1126  * of mm objects.
1127  */
1128 void
1129 drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
1130 {
1131 	idr_init_base(&file_private->object_idr, 1);
1132 	mtx_init(&file_private->table_lock, IPL_NONE);
1133 }
1134 
1135 /**
1136  * drm_gem_release - release file-private GEM resources
1137  * @dev: drm_device which is being closed by userspace
1138  * @file_private: drm file-private structure to clean up
1139  *
1140  * Called at close time when the filp is going away.
1141  *
1142  * Releases any remaining references on objects by this filp.
1143  */
1144 void
1145 drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
1146 {
1147 	idr_for_each(&file_private->object_idr,
1148 		     &drm_gem_object_release_handle, file_private);
1149 	idr_destroy(&file_private->object_idr);
1150 }
1151 
1152 /**
1153  * drm_gem_object_release - release GEM buffer object resources
1154  * @obj: GEM buffer object
1155  *
1156  * This releases any structures and resources used by @obj and is the invers of
1157  * drm_gem_object_init().
1158  */
1159 void
1160 drm_gem_object_release(struct drm_gem_object *obj)
1161 {
1162 	WARN_ON(obj->dma_buf);
1163 
1164 #ifdef __linux__
1165 	if (obj->filp)
1166 		fput(obj->filp);
1167 #else
1168 	if (obj->uao)
1169 		uao_detach(obj->uao);
1170 	if (obj->uobj.pgops)
1171 		uvm_obj_destroy(&obj->uobj);
1172 #endif
1173 
1174 	dma_resv_fini(&obj->_resv);
1175 	drm_gem_free_mmap_offset(obj);
1176 }
1177 EXPORT_SYMBOL(drm_gem_object_release);
1178 
1179 /**
1180  * drm_gem_object_free - free a GEM object
1181  * @kref: kref of the object to free
1182  *
1183  * Called after the last reference to the object has been lost.
1184  *
1185  * Frees the object
1186  */
1187 void
1188 drm_gem_object_free(struct kref *kref)
1189 {
1190 	struct drm_gem_object *obj =
1191 		container_of(kref, struct drm_gem_object, refcount);
1192 	struct drm_device *dev = obj->dev;
1193 
1194 	if (obj->funcs)
1195 		obj->funcs->free(obj);
1196 	else if (dev->driver->gem_free_object_unlocked)
1197 		dev->driver->gem_free_object_unlocked(obj);
1198 }
1199 EXPORT_SYMBOL(drm_gem_object_free);
1200 
1201 /**
1202  * drm_gem_object_put_locked - release a GEM buffer object reference
1203  * @obj: GEM buffer object
1204  *
1205  * This releases a reference to @obj. Callers must hold the
1206  * &drm_device.struct_mutex lock when calling this function, even when the
1207  * driver doesn't use &drm_device.struct_mutex for anything.
1208  *
1209  * For drivers not encumbered with legacy locking use
1210  * drm_gem_object_put() instead.
1211  */
1212 void
1213 drm_gem_object_put_locked(struct drm_gem_object *obj)
1214 {
1215 	if (obj) {
1216 		WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
1217 
1218 		kref_put(&obj->refcount, drm_gem_object_free);
1219 	}
1220 }
1221 EXPORT_SYMBOL(drm_gem_object_put_locked);
1222 
1223 #ifdef __linux__
1224 
1225 /**
1226  * drm_gem_vm_open - vma->ops->open implementation for GEM
1227  * @vma: VM area structure
1228  *
1229  * This function implements the #vm_operations_struct open() callback for GEM
1230  * drivers. This must be used together with drm_gem_vm_close().
1231  */
1232 void drm_gem_vm_open(struct vm_area_struct *vma)
1233 {
1234 	struct drm_gem_object *obj = vma->vm_private_data;
1235 
1236 	drm_gem_object_get(obj);
1237 }
1238 EXPORT_SYMBOL(drm_gem_vm_open);
1239 
1240 /**
1241  * drm_gem_vm_close - vma->ops->close implementation for GEM
1242  * @vma: VM area structure
1243  *
1244  * This function implements the #vm_operations_struct close() callback for GEM
1245  * drivers. This must be used together with drm_gem_vm_open().
1246  */
1247 void drm_gem_vm_close(struct vm_area_struct *vma)
1248 {
1249 	struct drm_gem_object *obj = vma->vm_private_data;
1250 
1251 	drm_gem_object_put(obj);
1252 }
1253 EXPORT_SYMBOL(drm_gem_vm_close);
1254 
1255 /**
1256  * drm_gem_mmap_obj - memory map a GEM object
1257  * @obj: the GEM object to map
1258  * @obj_size: the object size to be mapped, in bytes
1259  * @vma: VMA for the area to be mapped
1260  *
1261  * Set up the VMA to prepare mapping of the GEM object using the gem_vm_ops
1262  * provided by the driver. Depending on their requirements, drivers can either
1263  * provide a fault handler in their gem_vm_ops (in which case any accesses to
1264  * the object will be trapped, to perform migration, GTT binding, surface
1265  * register allocation, or performance monitoring), or mmap the buffer memory
1266  * synchronously after calling drm_gem_mmap_obj.
1267  *
1268  * This function is mainly intended to implement the DMABUF mmap operation, when
1269  * the GEM object is not looked up based on its fake offset. To implement the
1270  * DRM mmap operation, drivers should use the drm_gem_mmap() function.
1271  *
1272  * drm_gem_mmap_obj() assumes the user is granted access to the buffer while
1273  * drm_gem_mmap() prevents unprivileged users from mapping random objects. So
1274  * callers must verify access restrictions before calling this helper.
1275  *
1276  * Return 0 or success or -EINVAL if the object size is smaller than the VMA
1277  * size, or if no gem_vm_ops are provided.
1278  */
1279 int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
1280 		     struct vm_area_struct *vma)
1281 {
1282 	struct drm_device *dev = obj->dev;
1283 	int ret;
1284 
1285 	/* Check for valid size. */
1286 	if (obj_size < vma->vm_end - vma->vm_start)
1287 		return -EINVAL;
1288 
1289 	/* Take a ref for this mapping of the object, so that the fault
1290 	 * handler can dereference the mmap offset's pointer to the object.
1291 	 * This reference is cleaned up by the corresponding vm_close
1292 	 * (which should happen whether the vma was created by this call, or
1293 	 * by a vm_open due to mremap or partial unmap or whatever).
1294 	 */
1295 	drm_gem_object_get(obj);
1296 
1297 	vma->vm_private_data = obj;
1298 
1299 	if (obj->funcs && obj->funcs->mmap) {
1300 		ret = obj->funcs->mmap(obj, vma);
1301 		if (ret) {
1302 			drm_gem_object_put(obj);
1303 			return ret;
1304 		}
1305 		WARN_ON(!(vma->vm_flags & VM_DONTEXPAND));
1306 	} else {
1307 		if (obj->funcs && obj->funcs->vm_ops)
1308 			vma->vm_ops = obj->funcs->vm_ops;
1309 		else if (dev->driver->gem_vm_ops)
1310 			vma->vm_ops = dev->driver->gem_vm_ops;
1311 		else {
1312 			drm_gem_object_put(obj);
1313 			return -EINVAL;
1314 		}
1315 
1316 		vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
1317 		vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
1318 		vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
1319 	}
1320 
1321 	return 0;
1322 }
1323 EXPORT_SYMBOL(drm_gem_mmap_obj);
1324 
1325 /**
1326  * drm_gem_mmap - memory map routine for GEM objects
1327  * @filp: DRM file pointer
1328  * @vma: VMA for the area to be mapped
1329  *
1330  * If a driver supports GEM object mapping, mmap calls on the DRM file
1331  * descriptor will end up here.
1332  *
1333  * Look up the GEM object based on the offset passed in (vma->vm_pgoff will
1334  * contain the fake offset we created when the GTT map ioctl was called on
1335  * the object) and map it with a call to drm_gem_mmap_obj().
1336  *
1337  * If the caller is not granted access to the buffer object, the mmap will fail
1338  * with EACCES. Please see the vma manager for more information.
1339  */
1340 int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
1341 {
1342 	struct drm_file *priv = filp->private_data;
1343 	struct drm_device *dev = priv->minor->dev;
1344 	struct drm_gem_object *obj = NULL;
1345 	struct drm_vma_offset_node *node;
1346 	int ret;
1347 
1348 	if (drm_dev_is_unplugged(dev))
1349 		return -ENODEV;
1350 
1351 	drm_vma_offset_lock_lookup(dev->vma_offset_manager);
1352 	node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
1353 						  vma->vm_pgoff,
1354 						  vma_pages(vma));
1355 	if (likely(node)) {
1356 		obj = container_of(node, struct drm_gem_object, vma_node);
1357 		/*
1358 		 * When the object is being freed, after it hits 0-refcnt it
1359 		 * proceeds to tear down the object. In the process it will
1360 		 * attempt to remove the VMA offset and so acquire this
1361 		 * mgr->vm_lock.  Therefore if we find an object with a 0-refcnt
1362 		 * that matches our range, we know it is in the process of being
1363 		 * destroyed and will be freed as soon as we release the lock -
1364 		 * so we have to check for the 0-refcnted object and treat it as
1365 		 * invalid.
1366 		 */
1367 		if (!kref_get_unless_zero(&obj->refcount))
1368 			obj = NULL;
1369 	}
1370 	drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
1371 
1372 	if (!obj)
1373 		return -EINVAL;
1374 
1375 	if (!drm_vma_node_is_allowed(node, priv)) {
1376 		drm_gem_object_put(obj);
1377 		return -EACCES;
1378 	}
1379 
1380 	if (node->readonly) {
1381 		if (vma->vm_flags & VM_WRITE) {
1382 			drm_gem_object_put(obj);
1383 			return -EINVAL;
1384 		}
1385 
1386 		vma->vm_flags &= ~VM_MAYWRITE;
1387 	}
1388 
1389 	ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT,
1390 			       vma);
1391 
1392 	drm_gem_object_put(obj);
1393 
1394 	return ret;
1395 }
1396 EXPORT_SYMBOL(drm_gem_mmap);
1397 
1398 #endif /* __linux__ */
1399 
1400 void drm_gem_print_info(struct drm_printer *p, unsigned int indent,
1401 			const struct drm_gem_object *obj)
1402 {
1403 	drm_printf_indent(p, indent, "name=%d\n", obj->name);
1404 	drm_printf_indent(p, indent, "refcount=%u\n",
1405 			  kref_read(&obj->refcount));
1406 	drm_printf_indent(p, indent, "start=%08lx\n",
1407 			  drm_vma_node_start(&obj->vma_node));
1408 	drm_printf_indent(p, indent, "size=%zu\n", obj->size);
1409 	drm_printf_indent(p, indent, "imported=%s\n",
1410 			  obj->import_attach ? "yes" : "no");
1411 
1412 	if (obj->funcs && obj->funcs->print_info)
1413 		obj->funcs->print_info(p, indent, obj);
1414 }
1415 
1416 int drm_gem_pin(struct drm_gem_object *obj)
1417 {
1418 	if (obj->funcs && obj->funcs->pin)
1419 		return obj->funcs->pin(obj);
1420 	else if (obj->dev->driver->gem_prime_pin)
1421 		return obj->dev->driver->gem_prime_pin(obj);
1422 	else
1423 		return 0;
1424 }
1425 
1426 void drm_gem_unpin(struct drm_gem_object *obj)
1427 {
1428 	if (obj->funcs && obj->funcs->unpin)
1429 		obj->funcs->unpin(obj);
1430 	else if (obj->dev->driver->gem_prime_unpin)
1431 		obj->dev->driver->gem_prime_unpin(obj);
1432 }
1433 
1434 void *drm_gem_vmap(struct drm_gem_object *obj)
1435 {
1436 	void *vaddr;
1437 
1438 	if (obj->funcs && obj->funcs->vmap)
1439 		vaddr = obj->funcs->vmap(obj);
1440 	else if (obj->dev->driver->gem_prime_vmap)
1441 		vaddr = obj->dev->driver->gem_prime_vmap(obj);
1442 	else
1443 		vaddr = ERR_PTR(-EOPNOTSUPP);
1444 
1445 	if (!vaddr)
1446 		vaddr = ERR_PTR(-ENOMEM);
1447 
1448 	return vaddr;
1449 }
1450 
1451 void drm_gem_vunmap(struct drm_gem_object *obj, void *vaddr)
1452 {
1453 	if (!vaddr)
1454 		return;
1455 
1456 	if (obj->funcs && obj->funcs->vunmap)
1457 		obj->funcs->vunmap(obj, vaddr);
1458 	else if (obj->dev->driver->gem_prime_vunmap)
1459 		obj->dev->driver->gem_prime_vunmap(obj, vaddr);
1460 }
1461 
1462 /**
1463  * drm_gem_lock_reservations - Sets up the ww context and acquires
1464  * the lock on an array of GEM objects.
1465  *
1466  * Once you've locked your reservations, you'll want to set up space
1467  * for your shared fences (if applicable), submit your job, then
1468  * drm_gem_unlock_reservations().
1469  *
1470  * @objs: drm_gem_objects to lock
1471  * @count: Number of objects in @objs
1472  * @acquire_ctx: struct ww_acquire_ctx that will be initialized as
1473  * part of tracking this set of locked reservations.
1474  */
1475 int
1476 drm_gem_lock_reservations(struct drm_gem_object **objs, int count,
1477 			  struct ww_acquire_ctx *acquire_ctx)
1478 {
1479 	int contended = -1;
1480 	int i, ret;
1481 
1482 	ww_acquire_init(acquire_ctx, &reservation_ww_class);
1483 
1484 retry:
1485 	if (contended != -1) {
1486 		struct drm_gem_object *obj = objs[contended];
1487 
1488 		ret = dma_resv_lock_slow_interruptible(obj->resv,
1489 								 acquire_ctx);
1490 		if (ret) {
1491 			ww_acquire_done(acquire_ctx);
1492 			return ret;
1493 		}
1494 	}
1495 
1496 	for (i = 0; i < count; i++) {
1497 		if (i == contended)
1498 			continue;
1499 
1500 		ret = dma_resv_lock_interruptible(objs[i]->resv,
1501 							    acquire_ctx);
1502 		if (ret) {
1503 			int j;
1504 
1505 			for (j = 0; j < i; j++)
1506 				dma_resv_unlock(objs[j]->resv);
1507 
1508 			if (contended != -1 && contended >= i)
1509 				dma_resv_unlock(objs[contended]->resv);
1510 
1511 			if (ret == -EDEADLK) {
1512 				contended = i;
1513 				goto retry;
1514 			}
1515 
1516 			ww_acquire_done(acquire_ctx);
1517 			return ret;
1518 		}
1519 	}
1520 
1521 	ww_acquire_done(acquire_ctx);
1522 
1523 	return 0;
1524 }
1525 EXPORT_SYMBOL(drm_gem_lock_reservations);
1526 
1527 void
1528 drm_gem_unlock_reservations(struct drm_gem_object **objs, int count,
1529 			    struct ww_acquire_ctx *acquire_ctx)
1530 {
1531 	int i;
1532 
1533 	for (i = 0; i < count; i++)
1534 		dma_resv_unlock(objs[i]->resv);
1535 
1536 	ww_acquire_fini(acquire_ctx);
1537 }
1538 EXPORT_SYMBOL(drm_gem_unlock_reservations);
1539 
1540 #ifdef notyet
1541 /**
1542  * drm_gem_fence_array_add - Adds the fence to an array of fences to be
1543  * waited on, deduplicating fences from the same context.
1544  *
1545  * @fence_array: array of dma_fence * for the job to block on.
1546  * @fence: the dma_fence to add to the list of dependencies.
1547  *
1548  * Returns:
1549  * 0 on success, or an error on failing to expand the array.
1550  */
1551 int drm_gem_fence_array_add(struct xarray *fence_array,
1552 			    struct dma_fence *fence)
1553 {
1554 	struct dma_fence *entry;
1555 	unsigned long index;
1556 	u32 id = 0;
1557 	int ret;
1558 
1559 	if (!fence)
1560 		return 0;
1561 
1562 	/* Deduplicate if we already depend on a fence from the same context.
1563 	 * This lets the size of the array of deps scale with the number of
1564 	 * engines involved, rather than the number of BOs.
1565 	 */
1566 	xa_for_each(fence_array, index, entry) {
1567 		if (entry->context != fence->context)
1568 			continue;
1569 
1570 		if (dma_fence_is_later(fence, entry)) {
1571 			dma_fence_put(entry);
1572 			xa_store(fence_array, index, fence, GFP_KERNEL);
1573 		} else {
1574 			dma_fence_put(fence);
1575 		}
1576 		return 0;
1577 	}
1578 
1579 	ret = xa_alloc(fence_array, &id, fence, xa_limit_32b, GFP_KERNEL);
1580 	if (ret != 0)
1581 		dma_fence_put(fence);
1582 
1583 	return ret;
1584 }
1585 EXPORT_SYMBOL(drm_gem_fence_array_add);
1586 
1587 /**
1588  * drm_gem_fence_array_add_implicit - Adds the implicit dependencies tracked
1589  * in the GEM object's reservation object to an array of dma_fences for use in
1590  * scheduling a rendering job.
1591  *
1592  * This should be called after drm_gem_lock_reservations() on your array of
1593  * GEM objects used in the job but before updating the reservations with your
1594  * own fences.
1595  *
1596  * @fence_array: array of dma_fence * for the job to block on.
1597  * @obj: the gem object to add new dependencies from.
1598  * @write: whether the job might write the object (so we need to depend on
1599  * shared fences in the reservation object).
1600  */
1601 int drm_gem_fence_array_add_implicit(struct xarray *fence_array,
1602 				     struct drm_gem_object *obj,
1603 				     bool write)
1604 {
1605 	int ret;
1606 	struct dma_fence **fences;
1607 	unsigned int i, fence_count;
1608 
1609 	if (!write) {
1610 		struct dma_fence *fence =
1611 			dma_resv_get_excl_rcu(obj->resv);
1612 
1613 		return drm_gem_fence_array_add(fence_array, fence);
1614 	}
1615 
1616 	ret = dma_resv_get_fences_rcu(obj->resv, NULL,
1617 						&fence_count, &fences);
1618 	if (ret || !fence_count)
1619 		return ret;
1620 
1621 	for (i = 0; i < fence_count; i++) {
1622 		ret = drm_gem_fence_array_add(fence_array, fences[i]);
1623 		if (ret)
1624 			break;
1625 	}
1626 
1627 	for (; i < fence_count; i++)
1628 		dma_fence_put(fences[i]);
1629 	kfree(fences);
1630 	return ret;
1631 }
1632 EXPORT_SYMBOL(drm_gem_fence_array_add_implicit);
1633 
1634 #endif /* notyet */
1635