xref: /openbsd-src/sys/dev/pci/drm/drm_gem.c (revision a5429850edcc9dd5646cc8ddb251ed22eba08b09)
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *
26  */
27 
28 #include <linux/types.h>
29 #include <linux/slab.h>
30 #include <linux/mm.h>
31 #include <linux/uaccess.h>
32 #include <linux/fs.h>
33 #include <linux/file.h>
34 #include <linux/module.h>
35 #include <linux/mman.h>
36 #include <linux/pagemap.h>
37 #include <linux/shmem_fs.h>
38 #include <linux/dma-buf.h>
39 #include <linux/dma-buf-map.h>
40 #include <linux/mem_encrypt.h>
41 #include <linux/pagevec.h>
42 
43 #include <drm/drm.h>
44 #include <drm/drm_device.h>
45 #include <drm/drm_drv.h>
46 #include <drm/drm_file.h>
47 #include <drm/drm_gem.h>
48 #include <drm/drm_managed.h>
49 #include <drm/drm_print.h>
50 #include <drm/drm_vma_manager.h>
51 
52 #include "drm_internal.h"
53 
54 #include <sys/conf.h>
55 #include <uvm/uvm.h>
56 
57 void drm_unref(struct uvm_object *);
58 void drm_ref(struct uvm_object *);
59 boolean_t drm_flush(struct uvm_object *, voff_t, voff_t, int);
60 int drm_fault(struct uvm_faultinfo *, vaddr_t, vm_page_t *, int, int,
61     vm_fault_t, vm_prot_t, int);
62 
63 const struct uvm_pagerops drm_pgops = {
64 	.pgo_reference = drm_ref,
65 	.pgo_detach = drm_unref,
66 	.pgo_fault = drm_fault,
67 	.pgo_flush = drm_flush,
68 };
69 
70 void
71 drm_ref(struct uvm_object *uobj)
72 {
73 	struct drm_gem_object *obj =
74 	    container_of(uobj, struct drm_gem_object, uobj);
75 
76 	drm_gem_object_get(obj);
77 }
78 
79 void
80 drm_unref(struct uvm_object *uobj)
81 {
82 	struct drm_gem_object *obj =
83 	    container_of(uobj, struct drm_gem_object, uobj);
84 
85 	drm_gem_object_put(obj);
86 }
87 
88 int
89 drm_fault(struct uvm_faultinfo *ufi, vaddr_t vaddr, vm_page_t *pps,
90     int npages, int centeridx, vm_fault_t fault_type,
91     vm_prot_t access_type, int flags)
92 {
93 	struct vm_map_entry *entry = ufi->entry;
94 	struct uvm_object *uobj = entry->object.uvm_obj;
95 	struct drm_gem_object *obj =
96 	    container_of(uobj, struct drm_gem_object, uobj);
97 	struct drm_device *dev = obj->dev;
98 	int ret;
99 
100 	/*
101 	 * we do not allow device mappings to be mapped copy-on-write
102 	 * so we kill any attempt to do so here.
103 	 */
104 
105 	if (UVM_ET_ISCOPYONWRITE(entry)) {
106 		uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj);
107 		return(VM_PAGER_ERROR);
108 	}
109 
110 	/*
111 	 * We could end up here as the result of a copyin(9) or
112 	 * copyout(9) while handling an ioctl.  So we must be careful
113 	 * not to deadlock.  Therefore we only block if the quiesce
114 	 * count is zero, which guarantees we didn't enter from within
115 	 * an ioctl code path.
116 	 */
117 	mtx_enter(&dev->quiesce_mtx);
118 	if (dev->quiesce && dev->quiesce_count == 0) {
119 		mtx_leave(&dev->quiesce_mtx);
120 		uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj);
121 		mtx_enter(&dev->quiesce_mtx);
122 		while (dev->quiesce) {
123 			msleep_nsec(&dev->quiesce, &dev->quiesce_mtx,
124 			    PZERO, "drmflt", INFSLP);
125 		}
126 		mtx_leave(&dev->quiesce_mtx);
127 		return(VM_PAGER_REFAULT);
128 	}
129 	dev->quiesce_count++;
130 	mtx_leave(&dev->quiesce_mtx);
131 
132 	/* Call down into driver to do the magic */
133 	ret = dev->driver->gem_fault(obj, ufi, entry->offset + (vaddr -
134 	    entry->start), vaddr, pps, npages, centeridx,
135 	    access_type, flags);
136 
137 	mtx_enter(&dev->quiesce_mtx);
138 	dev->quiesce_count--;
139 	if (dev->quiesce)
140 		wakeup(&dev->quiesce_count);
141 	mtx_leave(&dev->quiesce_mtx);
142 
143 	return (ret);
144 }
145 
146 boolean_t
147 drm_flush(struct uvm_object *uobj, voff_t start, voff_t stop, int flags)
148 {
149 	return (TRUE);
150 }
151 
152 struct uvm_object *
153 udv_attach_drm(dev_t device, vm_prot_t accessprot, voff_t off, vsize_t size)
154 {
155 	struct drm_device *dev = drm_get_device_from_kdev(device);
156 	struct drm_gem_object *obj = NULL;
157 	struct drm_vma_offset_node *node;
158 	struct drm_file *priv;
159 	struct file *filp;
160 
161 	if (cdevsw[major(device)].d_mmap != drmmmap)
162 		return NULL;
163 
164 	if (dev == NULL)
165 		return NULL;
166 
167 	mutex_lock(&dev->filelist_mutex);
168 	priv = drm_find_file_by_minor(dev, minor(device));
169 	if (priv == NULL) {
170 		mutex_unlock(&dev->filelist_mutex);
171 		return NULL;
172 	}
173 	filp = priv->filp;
174 	mutex_unlock(&dev->filelist_mutex);
175 
176 	if (dev->driver->mmap)
177 		return dev->driver->mmap(filp, accessprot, off, size);
178 
179 	drm_vma_offset_lock_lookup(dev->vma_offset_manager);
180 	node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
181 						  off >> PAGE_SHIFT,
182 						  atop(round_page(size)));
183 	if (likely(node)) {
184 		obj = container_of(node, struct drm_gem_object, vma_node);
185 		/*
186 		 * When the object is being freed, after it hits 0-refcnt it
187 		 * proceeds to tear down the object. In the process it will
188 		 * attempt to remove the VMA offset and so acquire this
189 		 * mgr->vm_lock.  Therefore if we find an object with a 0-refcnt
190 		 * that matches our range, we know it is in the process of being
191 		 * destroyed and will be freed as soon as we release the lock -
192 		 * so we have to check for the 0-refcnted object and treat it as
193 		 * invalid.
194 		 */
195 		if (!kref_get_unless_zero(&obj->refcount))
196 			obj = NULL;
197 	}
198 	drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
199 
200 	if (!obj)
201 		return NULL;
202 
203 	if (!drm_vma_node_is_allowed(node, priv)) {
204 		drm_gem_object_put(obj);
205 		return NULL;
206 	}
207 
208 	return &obj->uobj;
209 }
210 
211 /** @file drm_gem.c
212  *
213  * This file provides some of the base ioctls and library routines for
214  * the graphics memory manager implemented by each device driver.
215  *
216  * Because various devices have different requirements in terms of
217  * synchronization and migration strategies, implementing that is left up to
218  * the driver, and all that the general API provides should be generic --
219  * allocating objects, reading/writing data with the cpu, freeing objects.
220  * Even there, platform-dependent optimizations for reading/writing data with
221  * the CPU mean we'll likely hook those out to driver-specific calls.  However,
222  * the DRI2 implementation wants to have at least allocate/mmap be generic.
223  *
224  * The goal was to have swap-backed object allocation managed through
225  * struct file.  However, file descriptors as handles to a struct file have
226  * two major failings:
227  * - Process limits prevent more than 1024 or so being used at a time by
228  *   default.
229  * - Inability to allocate high fds will aggravate the X Server's select()
230  *   handling, and likely that of many GL client applications as well.
231  *
232  * This led to a plan of using our own integer IDs (called handles, following
233  * DRM terminology) to mimic fds, and implement the fd syscalls we need as
234  * ioctls.  The objects themselves will still include the struct file so
235  * that we can transition to fds if the required kernel infrastructure shows
236  * up at a later date, and as our interface with shmfs for memory allocation.
237  */
238 
239 static void
240 drm_gem_init_release(struct drm_device *dev, void *ptr)
241 {
242 	drm_vma_offset_manager_destroy(dev->vma_offset_manager);
243 }
244 
245 /**
246  * drm_gem_init - Initialize the GEM device fields
247  * @dev: drm_devic structure to initialize
248  */
249 int
250 drm_gem_init(struct drm_device *dev)
251 {
252 	struct drm_vma_offset_manager *vma_offset_manager;
253 
254 	rw_init(&dev->object_name_lock, "drmonl");
255 	idr_init_base(&dev->object_name_idr, 1);
256 
257 	vma_offset_manager = drmm_kzalloc(dev, sizeof(*vma_offset_manager),
258 					  GFP_KERNEL);
259 	if (!vma_offset_manager) {
260 		DRM_ERROR("out of memory\n");
261 		return -ENOMEM;
262 	}
263 
264 	dev->vma_offset_manager = vma_offset_manager;
265 	drm_vma_offset_manager_init(vma_offset_manager,
266 				    DRM_FILE_PAGE_OFFSET_START,
267 				    DRM_FILE_PAGE_OFFSET_SIZE);
268 
269 	return drmm_add_action(dev, drm_gem_init_release, NULL);
270 }
271 
272 #ifdef __linux__
273 
274 /**
275  * drm_gem_object_init - initialize an allocated shmem-backed GEM object
276  * @dev: drm_device the object should be initialized for
277  * @obj: drm_gem_object to initialize
278  * @size: object size
279  *
280  * Initialize an already allocated GEM object of the specified size with
281  * shmfs backing store.
282  */
283 int drm_gem_object_init(struct drm_device *dev,
284 			struct drm_gem_object *obj, size_t size)
285 {
286 	struct file *filp;
287 
288 	drm_gem_private_object_init(dev, obj, size);
289 
290 	filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
291 	if (IS_ERR(filp))
292 		return PTR_ERR(filp);
293 
294 	obj->filp = filp;
295 
296 	return 0;
297 }
298 EXPORT_SYMBOL(drm_gem_object_init);
299 
300 #else
301 
302 int drm_gem_object_init(struct drm_device *dev,
303 			struct drm_gem_object *obj, size_t size)
304 {
305 	drm_gem_private_object_init(dev, obj, size);
306 
307 	if (size > (512 * 1024 * 1024)) {
308 		printf("%s size too big %lu\n", __func__, size);
309 		return -ENOMEM;
310 	}
311 
312 	obj->uao = uao_create(size, 0);
313 	uvm_obj_init(&obj->uobj, &drm_pgops, 1);
314 
315 	return 0;
316 }
317 
318 #endif
319 
320 /**
321  * drm_gem_private_object_init - initialize an allocated private GEM object
322  * @dev: drm_device the object should be initialized for
323  * @obj: drm_gem_object to initialize
324  * @size: object size
325  *
326  * Initialize an already allocated GEM object of the specified size with
327  * no GEM provided backing store. Instead the caller is responsible for
328  * backing the object and handling it.
329  */
330 void drm_gem_private_object_init(struct drm_device *dev,
331 				 struct drm_gem_object *obj, size_t size)
332 {
333 	BUG_ON((size & (PAGE_SIZE - 1)) != 0);
334 
335 	obj->dev = dev;
336 #ifdef __linux__
337 	obj->filp = NULL;
338 #else
339 	obj->uao = NULL;
340 	obj->uobj.pgops = NULL;
341 #endif
342 
343 	kref_init(&obj->refcount);
344 	obj->handle_count = 0;
345 	obj->size = size;
346 	dma_resv_init(&obj->_resv);
347 	if (!obj->resv)
348 		obj->resv = &obj->_resv;
349 
350 	drm_vma_node_reset(&obj->vma_node);
351 }
352 EXPORT_SYMBOL(drm_gem_private_object_init);
353 
354 /**
355  * drm_gem_object_handle_free - release resources bound to userspace handles
356  * @obj: GEM object to clean up.
357  *
358  * Called after the last handle to the object has been closed
359  *
360  * Removes any name for the object. Note that this must be
361  * called before drm_gem_object_free or we'll be touching
362  * freed memory
363  */
364 static void drm_gem_object_handle_free(struct drm_gem_object *obj)
365 {
366 	struct drm_device *dev = obj->dev;
367 
368 	/* Remove any name for this object */
369 	if (obj->name) {
370 		idr_remove(&dev->object_name_idr, obj->name);
371 		obj->name = 0;
372 	}
373 }
374 
375 static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj)
376 {
377 	/* Unbreak the reference cycle if we have an exported dma_buf. */
378 	if (obj->dma_buf) {
379 		dma_buf_put(obj->dma_buf);
380 		obj->dma_buf = NULL;
381 	}
382 }
383 
384 static void
385 drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj)
386 {
387 	struct drm_device *dev = obj->dev;
388 	bool final = false;
389 
390 	if (WARN_ON(READ_ONCE(obj->handle_count) == 0))
391 		return;
392 
393 	/*
394 	* Must bump handle count first as this may be the last
395 	* ref, in which case the object would disappear before we
396 	* checked for a name
397 	*/
398 
399 	mutex_lock(&dev->object_name_lock);
400 	if (--obj->handle_count == 0) {
401 		drm_gem_object_handle_free(obj);
402 		drm_gem_object_exported_dma_buf_free(obj);
403 		final = true;
404 	}
405 	mutex_unlock(&dev->object_name_lock);
406 
407 	if (final)
408 		drm_gem_object_put(obj);
409 }
410 
411 /*
412  * Called at device or object close to release the file's
413  * handle references on objects.
414  */
415 static int
416 drm_gem_object_release_handle(int id, void *ptr, void *data)
417 {
418 	struct drm_file *file_priv = data;
419 	struct drm_gem_object *obj = ptr;
420 
421 	if (obj->funcs->close)
422 		obj->funcs->close(obj, file_priv);
423 
424 	drm_prime_remove_buf_handle(&file_priv->prime, id);
425 	drm_vma_node_revoke(&obj->vma_node, file_priv);
426 
427 	drm_gem_object_handle_put_unlocked(obj);
428 
429 	return 0;
430 }
431 
432 /**
433  * drm_gem_handle_delete - deletes the given file-private handle
434  * @filp: drm file-private structure to use for the handle look up
435  * @handle: userspace handle to delete
436  *
437  * Removes the GEM handle from the @filp lookup table which has been added with
438  * drm_gem_handle_create(). If this is the last handle also cleans up linked
439  * resources like GEM names.
440  */
441 int
442 drm_gem_handle_delete(struct drm_file *filp, u32 handle)
443 {
444 	struct drm_gem_object *obj;
445 
446 	spin_lock(&filp->table_lock);
447 
448 	/* Check if we currently have a reference on the object */
449 	obj = idr_replace(&filp->object_idr, NULL, handle);
450 	spin_unlock(&filp->table_lock);
451 	if (IS_ERR_OR_NULL(obj))
452 		return -EINVAL;
453 
454 	/* Release driver's reference and decrement refcount. */
455 	drm_gem_object_release_handle(handle, obj, filp);
456 
457 	/* And finally make the handle available for future allocations. */
458 	spin_lock(&filp->table_lock);
459 	idr_remove(&filp->object_idr, handle);
460 	spin_unlock(&filp->table_lock);
461 
462 	return 0;
463 }
464 EXPORT_SYMBOL(drm_gem_handle_delete);
465 
466 /**
467  * drm_gem_dumb_map_offset - return the fake mmap offset for a gem object
468  * @file: drm file-private structure containing the gem object
469  * @dev: corresponding drm_device
470  * @handle: gem object handle
471  * @offset: return location for the fake mmap offset
472  *
473  * This implements the &drm_driver.dumb_map_offset kms driver callback for
474  * drivers which use gem to manage their backing storage.
475  *
476  * Returns:
477  * 0 on success or a negative error code on failure.
478  */
479 int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
480 			    u32 handle, u64 *offset)
481 {
482 	struct drm_gem_object *obj;
483 	int ret;
484 
485 	obj = drm_gem_object_lookup(file, handle);
486 	if (!obj)
487 		return -ENOENT;
488 
489 	/* Don't allow imported objects to be mapped */
490 	if (obj->import_attach) {
491 		ret = -EINVAL;
492 		goto out;
493 	}
494 
495 	ret = drm_gem_create_mmap_offset(obj);
496 	if (ret)
497 		goto out;
498 
499 	*offset = drm_vma_node_offset_addr(&obj->vma_node);
500 out:
501 	drm_gem_object_put(obj);
502 
503 	return ret;
504 }
505 EXPORT_SYMBOL_GPL(drm_gem_dumb_map_offset);
506 
507 int drm_gem_dumb_destroy(struct drm_file *file,
508 			 struct drm_device *dev,
509 			 u32 handle)
510 {
511 	return drm_gem_handle_delete(file, handle);
512 }
513 
514 /**
515  * drm_gem_handle_create_tail - internal functions to create a handle
516  * @file_priv: drm file-private structure to register the handle for
517  * @obj: object to register
518  * @handlep: pointer to return the created handle to the caller
519  *
520  * This expects the &drm_device.object_name_lock to be held already and will
521  * drop it before returning. Used to avoid races in establishing new handles
522  * when importing an object from either an flink name or a dma-buf.
523  *
524  * Handles must be release again through drm_gem_handle_delete(). This is done
525  * when userspace closes @file_priv for all attached handles, or through the
526  * GEM_CLOSE ioctl for individual handles.
527  */
528 int
529 drm_gem_handle_create_tail(struct drm_file *file_priv,
530 			   struct drm_gem_object *obj,
531 			   u32 *handlep)
532 {
533 	struct drm_device *dev = obj->dev;
534 	u32 handle;
535 	int ret;
536 
537 	WARN_ON(!mutex_is_locked(&dev->object_name_lock));
538 	if (obj->handle_count++ == 0)
539 		drm_gem_object_get(obj);
540 
541 	/*
542 	 * Get the user-visible handle using idr.  Preload and perform
543 	 * allocation under our spinlock.
544 	 */
545 	idr_preload(GFP_KERNEL);
546 	spin_lock(&file_priv->table_lock);
547 
548 	ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT);
549 
550 	spin_unlock(&file_priv->table_lock);
551 	idr_preload_end();
552 
553 	mutex_unlock(&dev->object_name_lock);
554 	if (ret < 0)
555 		goto err_unref;
556 
557 	handle = ret;
558 
559 	ret = drm_vma_node_allow(&obj->vma_node, file_priv);
560 	if (ret)
561 		goto err_remove;
562 
563 	if (obj->funcs->open) {
564 		ret = obj->funcs->open(obj, file_priv);
565 		if (ret)
566 			goto err_revoke;
567 	}
568 
569 	*handlep = handle;
570 	return 0;
571 
572 err_revoke:
573 	drm_vma_node_revoke(&obj->vma_node, file_priv);
574 err_remove:
575 	spin_lock(&file_priv->table_lock);
576 	idr_remove(&file_priv->object_idr, handle);
577 	spin_unlock(&file_priv->table_lock);
578 err_unref:
579 	drm_gem_object_handle_put_unlocked(obj);
580 	return ret;
581 }
582 
583 /**
584  * drm_gem_handle_create - create a gem handle for an object
585  * @file_priv: drm file-private structure to register the handle for
586  * @obj: object to register
587  * @handlep: pointer to return the created handle to the caller
588  *
589  * Create a handle for this object. This adds a handle reference to the object,
590  * which includes a regular reference count. Callers will likely want to
591  * dereference the object afterwards.
592  *
593  * Since this publishes @obj to userspace it must be fully set up by this point,
594  * drivers must call this last in their buffer object creation callbacks.
595  */
596 int drm_gem_handle_create(struct drm_file *file_priv,
597 			  struct drm_gem_object *obj,
598 			  u32 *handlep)
599 {
600 	mutex_lock(&obj->dev->object_name_lock);
601 
602 	return drm_gem_handle_create_tail(file_priv, obj, handlep);
603 }
604 EXPORT_SYMBOL(drm_gem_handle_create);
605 
606 
607 /**
608  * drm_gem_free_mmap_offset - release a fake mmap offset for an object
609  * @obj: obj in question
610  *
611  * This routine frees fake offsets allocated by drm_gem_create_mmap_offset().
612  *
613  * Note that drm_gem_object_release() already calls this function, so drivers
614  * don't have to take care of releasing the mmap offset themselves when freeing
615  * the GEM object.
616  */
617 void
618 drm_gem_free_mmap_offset(struct drm_gem_object *obj)
619 {
620 	struct drm_device *dev = obj->dev;
621 
622 	drm_vma_offset_remove(dev->vma_offset_manager, &obj->vma_node);
623 }
624 EXPORT_SYMBOL(drm_gem_free_mmap_offset);
625 
626 /**
627  * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object
628  * @obj: obj in question
629  * @size: the virtual size
630  *
631  * GEM memory mapping works by handing back to userspace a fake mmap offset
632  * it can use in a subsequent mmap(2) call.  The DRM core code then looks
633  * up the object based on the offset and sets up the various memory mapping
634  * structures.
635  *
636  * This routine allocates and attaches a fake offset for @obj, in cases where
637  * the virtual size differs from the physical size (ie. &drm_gem_object.size).
638  * Otherwise just use drm_gem_create_mmap_offset().
639  *
640  * This function is idempotent and handles an already allocated mmap offset
641  * transparently. Drivers do not need to check for this case.
642  */
643 int
644 drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size)
645 {
646 	struct drm_device *dev = obj->dev;
647 
648 	return drm_vma_offset_add(dev->vma_offset_manager, &obj->vma_node,
649 				  size / PAGE_SIZE);
650 }
651 EXPORT_SYMBOL(drm_gem_create_mmap_offset_size);
652 
653 /**
654  * drm_gem_create_mmap_offset - create a fake mmap offset for an object
655  * @obj: obj in question
656  *
657  * GEM memory mapping works by handing back to userspace a fake mmap offset
658  * it can use in a subsequent mmap(2) call.  The DRM core code then looks
659  * up the object based on the offset and sets up the various memory mapping
660  * structures.
661  *
662  * This routine allocates and attaches a fake offset for @obj.
663  *
664  * Drivers can call drm_gem_free_mmap_offset() before freeing @obj to release
665  * the fake offset again.
666  */
667 int drm_gem_create_mmap_offset(struct drm_gem_object *obj)
668 {
669 	return drm_gem_create_mmap_offset_size(obj, obj->size);
670 }
671 EXPORT_SYMBOL(drm_gem_create_mmap_offset);
672 
673 #ifdef notyet
674 /*
675  * Move pages to appropriate lru and release the pagevec, decrementing the
676  * ref count of those pages.
677  */
678 static void drm_gem_check_release_pagevec(struct pagevec *pvec)
679 {
680 	check_move_unevictable_pages(pvec);
681 	__pagevec_release(pvec);
682 	cond_resched();
683 }
684 #endif
685 
686 /**
687  * drm_gem_get_pages - helper to allocate backing pages for a GEM object
688  * from shmem
689  * @obj: obj in question
690  *
691  * This reads the page-array of the shmem-backing storage of the given gem
692  * object. An array of pages is returned. If a page is not allocated or
693  * swapped-out, this will allocate/swap-in the required pages. Note that the
694  * whole object is covered by the page-array and pinned in memory.
695  *
696  * Use drm_gem_put_pages() to release the array and unpin all pages.
697  *
698  * This uses the GFP-mask set on the shmem-mapping (see mapping_set_gfp_mask()).
699  * If you require other GFP-masks, you have to do those allocations yourself.
700  *
701  * Note that you are not allowed to change gfp-zones during runtime. That is,
702  * shmem_read_mapping_page_gfp() must be called with the same gfp_zone(gfp) as
703  * set during initialization. If you have special zone constraints, set them
704  * after drm_gem_object_init() via mapping_set_gfp_mask(). shmem-core takes care
705  * to keep pages in the required zone during swap-in.
706  *
707  * This function is only valid on objects initialized with
708  * drm_gem_object_init(), but not for those initialized with
709  * drm_gem_private_object_init() only.
710  */
711 struct vm_page **drm_gem_get_pages(struct drm_gem_object *obj)
712 {
713 	STUB();
714 	return ERR_PTR(-ENOSYS);
715 #ifdef notyet
716 	struct address_space *mapping;
717 	struct vm_page *p, **pages;
718 	struct pagevec pvec;
719 	int i, npages;
720 
721 
722 	if (WARN_ON(!obj->filp))
723 		return ERR_PTR(-EINVAL);
724 
725 	/* This is the shared memory object that backs the GEM resource */
726 	mapping = obj->filp->f_mapping;
727 
728 	/* We already BUG_ON() for non-page-aligned sizes in
729 	 * drm_gem_object_init(), so we should never hit this unless
730 	 * driver author is doing something really wrong:
731 	 */
732 	WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
733 
734 	npages = obj->size >> PAGE_SHIFT;
735 
736 	pages = kvmalloc_array(npages, sizeof(struct vm_page *), GFP_KERNEL);
737 	if (pages == NULL)
738 		return ERR_PTR(-ENOMEM);
739 
740 	mapping_set_unevictable(mapping);
741 
742 	for (i = 0; i < npages; i++) {
743 		p = shmem_read_mapping_page(mapping, i);
744 		if (IS_ERR(p))
745 			goto fail;
746 		pages[i] = p;
747 
748 		/* Make sure shmem keeps __GFP_DMA32 allocated pages in the
749 		 * correct region during swapin. Note that this requires
750 		 * __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping)
751 		 * so shmem can relocate pages during swapin if required.
752 		 */
753 		BUG_ON(mapping_gfp_constraint(mapping, __GFP_DMA32) &&
754 				(page_to_pfn(p) >= 0x00100000UL));
755 	}
756 
757 	return pages;
758 
759 fail:
760 	mapping_clear_unevictable(mapping);
761 	pagevec_init(&pvec);
762 	while (i--) {
763 		if (!pagevec_add(&pvec, pages[i]))
764 			drm_gem_check_release_pagevec(&pvec);
765 	}
766 	if (pagevec_count(&pvec))
767 		drm_gem_check_release_pagevec(&pvec);
768 
769 	kvfree(pages);
770 	return ERR_CAST(p);
771 #endif
772 }
773 EXPORT_SYMBOL(drm_gem_get_pages);
774 
775 /**
776  * drm_gem_put_pages - helper to free backing pages for a GEM object
777  * @obj: obj in question
778  * @pages: pages to free
779  * @dirty: if true, pages will be marked as dirty
780  * @accessed: if true, the pages will be marked as accessed
781  */
782 void drm_gem_put_pages(struct drm_gem_object *obj, struct vm_page **pages,
783 		bool dirty, bool accessed)
784 {
785 	STUB();
786 #ifdef notyet
787 	int i, npages;
788 	struct address_space *mapping;
789 	struct pagevec pvec;
790 
791 	mapping = file_inode(obj->filp)->i_mapping;
792 	mapping_clear_unevictable(mapping);
793 
794 	/* We already BUG_ON() for non-page-aligned sizes in
795 	 * drm_gem_object_init(), so we should never hit this unless
796 	 * driver author is doing something really wrong:
797 	 */
798 	WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
799 
800 	npages = obj->size >> PAGE_SHIFT;
801 
802 	pagevec_init(&pvec);
803 	for (i = 0; i < npages; i++) {
804 		if (!pages[i])
805 			continue;
806 
807 		if (dirty)
808 			set_page_dirty(pages[i]);
809 
810 		if (accessed)
811 			mark_page_accessed(pages[i]);
812 
813 		/* Undo the reference we took when populating the table */
814 		if (!pagevec_add(&pvec, pages[i]))
815 			drm_gem_check_release_pagevec(&pvec);
816 	}
817 	if (pagevec_count(&pvec))
818 		drm_gem_check_release_pagevec(&pvec);
819 
820 	kvfree(pages);
821 #endif
822 }
823 EXPORT_SYMBOL(drm_gem_put_pages);
824 
825 static int objects_lookup(struct drm_file *filp, u32 *handle, int count,
826 			  struct drm_gem_object **objs)
827 {
828 	int i, ret = 0;
829 	struct drm_gem_object *obj;
830 
831 	spin_lock(&filp->table_lock);
832 
833 	for (i = 0; i < count; i++) {
834 		/* Check if we currently have a reference on the object */
835 		obj = idr_find(&filp->object_idr, handle[i]);
836 		if (!obj) {
837 			ret = -ENOENT;
838 			break;
839 		}
840 		drm_gem_object_get(obj);
841 		objs[i] = obj;
842 	}
843 	spin_unlock(&filp->table_lock);
844 
845 	return ret;
846 }
847 
848 /**
849  * drm_gem_objects_lookup - look up GEM objects from an array of handles
850  * @filp: DRM file private date
851  * @bo_handles: user pointer to array of userspace handle
852  * @count: size of handle array
853  * @objs_out: returned pointer to array of drm_gem_object pointers
854  *
855  * Takes an array of userspace handles and returns a newly allocated array of
856  * GEM objects.
857  *
858  * For a single handle lookup, use drm_gem_object_lookup().
859  *
860  * Returns:
861  *
862  * @objs filled in with GEM object pointers. Returned GEM objects need to be
863  * released with drm_gem_object_put(). -ENOENT is returned on a lookup
864  * failure. 0 is returned on success.
865  *
866  */
867 int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles,
868 			   int count, struct drm_gem_object ***objs_out)
869 {
870 	int ret;
871 	u32 *handles;
872 	struct drm_gem_object **objs;
873 
874 	if (!count)
875 		return 0;
876 
877 	objs = kvmalloc_array(count, sizeof(struct drm_gem_object *),
878 			     GFP_KERNEL | __GFP_ZERO);
879 	if (!objs)
880 		return -ENOMEM;
881 
882 	*objs_out = objs;
883 
884 	handles = kvmalloc_array(count, sizeof(u32), GFP_KERNEL);
885 	if (!handles) {
886 		ret = -ENOMEM;
887 		goto out;
888 	}
889 
890 	if (copy_from_user(handles, bo_handles, count * sizeof(u32))) {
891 		ret = -EFAULT;
892 		DRM_DEBUG("Failed to copy in GEM handles\n");
893 		goto out;
894 	}
895 
896 	ret = objects_lookup(filp, handles, count, objs);
897 out:
898 	kvfree(handles);
899 	return ret;
900 
901 }
902 EXPORT_SYMBOL(drm_gem_objects_lookup);
903 
904 /**
905  * drm_gem_object_lookup - look up a GEM object from its handle
906  * @filp: DRM file private date
907  * @handle: userspace handle
908  *
909  * Returns:
910  *
911  * A reference to the object named by the handle if such exists on @filp, NULL
912  * otherwise.
913  *
914  * If looking up an array of handles, use drm_gem_objects_lookup().
915  */
916 struct drm_gem_object *
917 drm_gem_object_lookup(struct drm_file *filp, u32 handle)
918 {
919 	struct drm_gem_object *obj = NULL;
920 
921 	objects_lookup(filp, &handle, 1, &obj);
922 	return obj;
923 }
924 EXPORT_SYMBOL(drm_gem_object_lookup);
925 
926 /**
927  * drm_gem_dma_resv_wait - Wait on GEM object's reservation's objects
928  * shared and/or exclusive fences.
929  * @filep: DRM file private date
930  * @handle: userspace handle
931  * @wait_all: if true, wait on all fences, else wait on just exclusive fence
932  * @timeout: timeout value in jiffies or zero to return immediately
933  *
934  * Returns:
935  *
936  * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
937  * greater than 0 on success.
938  */
939 long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle,
940 				    bool wait_all, unsigned long timeout)
941 {
942 	long ret;
943 	struct drm_gem_object *obj;
944 
945 	obj = drm_gem_object_lookup(filep, handle);
946 	if (!obj) {
947 		DRM_DEBUG("Failed to look up GEM BO %d\n", handle);
948 		return -EINVAL;
949 	}
950 
951 	ret = dma_resv_wait_timeout(obj->resv, wait_all, true, timeout);
952 	if (ret == 0)
953 		ret = -ETIME;
954 	else if (ret > 0)
955 		ret = 0;
956 
957 	drm_gem_object_put(obj);
958 
959 	return ret;
960 }
961 EXPORT_SYMBOL(drm_gem_dma_resv_wait);
962 
963 /**
964  * drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl
965  * @dev: drm_device
966  * @data: ioctl data
967  * @file_priv: drm file-private structure
968  *
969  * Releases the handle to an mm object.
970  */
971 int
972 drm_gem_close_ioctl(struct drm_device *dev, void *data,
973 		    struct drm_file *file_priv)
974 {
975 	struct drm_gem_close *args = data;
976 	int ret;
977 
978 	if (!drm_core_check_feature(dev, DRIVER_GEM))
979 		return -EOPNOTSUPP;
980 
981 	ret = drm_gem_handle_delete(file_priv, args->handle);
982 
983 	return ret;
984 }
985 
986 /**
987  * drm_gem_flink_ioctl - implementation of the GEM_FLINK ioctl
988  * @dev: drm_device
989  * @data: ioctl data
990  * @file_priv: drm file-private structure
991  *
992  * Create a global name for an object, returning the name.
993  *
994  * Note that the name does not hold a reference; when the object
995  * is freed, the name goes away.
996  */
997 int
998 drm_gem_flink_ioctl(struct drm_device *dev, void *data,
999 		    struct drm_file *file_priv)
1000 {
1001 	struct drm_gem_flink *args = data;
1002 	struct drm_gem_object *obj;
1003 	int ret;
1004 
1005 	if (!drm_core_check_feature(dev, DRIVER_GEM))
1006 		return -EOPNOTSUPP;
1007 
1008 	obj = drm_gem_object_lookup(file_priv, args->handle);
1009 	if (obj == NULL)
1010 		return -ENOENT;
1011 
1012 	mutex_lock(&dev->object_name_lock);
1013 	/* prevent races with concurrent gem_close. */
1014 	if (obj->handle_count == 0) {
1015 		ret = -ENOENT;
1016 		goto err;
1017 	}
1018 
1019 	if (!obj->name) {
1020 		ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_KERNEL);
1021 		if (ret < 0)
1022 			goto err;
1023 
1024 		obj->name = ret;
1025 	}
1026 
1027 	args->name = (uint64_t) obj->name;
1028 	ret = 0;
1029 
1030 err:
1031 	mutex_unlock(&dev->object_name_lock);
1032 	drm_gem_object_put(obj);
1033 	return ret;
1034 }
1035 
1036 /**
1037  * drm_gem_open_ioctl - implementation of the GEM_OPEN ioctl
1038  * @dev: drm_device
1039  * @data: ioctl data
1040  * @file_priv: drm file-private structure
1041  *
1042  * Open an object using the global name, returning a handle and the size.
1043  *
1044  * This handle (of course) holds a reference to the object, so the object
1045  * will not go away until the handle is deleted.
1046  */
1047 int
1048 drm_gem_open_ioctl(struct drm_device *dev, void *data,
1049 		   struct drm_file *file_priv)
1050 {
1051 	struct drm_gem_open *args = data;
1052 	struct drm_gem_object *obj;
1053 	int ret;
1054 	u32 handle;
1055 
1056 	if (!drm_core_check_feature(dev, DRIVER_GEM))
1057 		return -EOPNOTSUPP;
1058 
1059 	mutex_lock(&dev->object_name_lock);
1060 	obj = idr_find(&dev->object_name_idr, (int) args->name);
1061 	if (obj) {
1062 		drm_gem_object_get(obj);
1063 	} else {
1064 		mutex_unlock(&dev->object_name_lock);
1065 		return -ENOENT;
1066 	}
1067 
1068 	/* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
1069 	ret = drm_gem_handle_create_tail(file_priv, obj, &handle);
1070 	if (ret)
1071 		goto err;
1072 
1073 	args->handle = handle;
1074 	args->size = obj->size;
1075 
1076 err:
1077 	drm_gem_object_put(obj);
1078 	return ret;
1079 }
1080 
1081 /**
1082  * drm_gem_open - initializes GEM file-private structures at devnode open time
1083  * @dev: drm_device which is being opened by userspace
1084  * @file_private: drm file-private structure to set up
1085  *
1086  * Called at device open time, sets up the structure for handling refcounting
1087  * of mm objects.
1088  */
1089 void
1090 drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
1091 {
1092 	idr_init_base(&file_private->object_idr, 1);
1093 	mtx_init(&file_private->table_lock, IPL_NONE);
1094 }
1095 
1096 /**
1097  * drm_gem_release - release file-private GEM resources
1098  * @dev: drm_device which is being closed by userspace
1099  * @file_private: drm file-private structure to clean up
1100  *
1101  * Called at close time when the filp is going away.
1102  *
1103  * Releases any remaining references on objects by this filp.
1104  */
1105 void
1106 drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
1107 {
1108 	idr_for_each(&file_private->object_idr,
1109 		     &drm_gem_object_release_handle, file_private);
1110 	idr_destroy(&file_private->object_idr);
1111 }
1112 
1113 /**
1114  * drm_gem_object_release - release GEM buffer object resources
1115  * @obj: GEM buffer object
1116  *
1117  * This releases any structures and resources used by @obj and is the inverse of
1118  * drm_gem_object_init().
1119  */
1120 void
1121 drm_gem_object_release(struct drm_gem_object *obj)
1122 {
1123 	WARN_ON(obj->dma_buf);
1124 
1125 #ifdef __linux__
1126 	if (obj->filp)
1127 		fput(obj->filp);
1128 #else
1129 	if (obj->uao)
1130 		uao_detach(obj->uao);
1131 	if (obj->uobj.pgops)
1132 		uvm_obj_destroy(&obj->uobj);
1133 #endif
1134 
1135 	dma_resv_fini(&obj->_resv);
1136 	drm_gem_free_mmap_offset(obj);
1137 }
1138 EXPORT_SYMBOL(drm_gem_object_release);
1139 
1140 /**
1141  * drm_gem_object_free - free a GEM object
1142  * @kref: kref of the object to free
1143  *
1144  * Called after the last reference to the object has been lost.
1145  *
1146  * Frees the object
1147  */
1148 void
1149 drm_gem_object_free(struct kref *kref)
1150 {
1151 	struct drm_gem_object *obj =
1152 		container_of(kref, struct drm_gem_object, refcount);
1153 
1154 	if (WARN_ON(!obj->funcs->free))
1155 		return;
1156 
1157 	obj->funcs->free(obj);
1158 }
1159 EXPORT_SYMBOL(drm_gem_object_free);
1160 
1161 #ifdef __linux__
1162 /**
1163  * drm_gem_vm_open - vma->ops->open implementation for GEM
1164  * @vma: VM area structure
1165  *
1166  * This function implements the #vm_operations_struct open() callback for GEM
1167  * drivers. This must be used together with drm_gem_vm_close().
1168  */
1169 void drm_gem_vm_open(struct vm_area_struct *vma)
1170 {
1171 	struct drm_gem_object *obj = vma->vm_private_data;
1172 
1173 	drm_gem_object_get(obj);
1174 }
1175 EXPORT_SYMBOL(drm_gem_vm_open);
1176 
1177 /**
1178  * drm_gem_vm_close - vma->ops->close implementation for GEM
1179  * @vma: VM area structure
1180  *
1181  * This function implements the #vm_operations_struct close() callback for GEM
1182  * drivers. This must be used together with drm_gem_vm_open().
1183  */
1184 void drm_gem_vm_close(struct vm_area_struct *vma)
1185 {
1186 	struct drm_gem_object *obj = vma->vm_private_data;
1187 
1188 	drm_gem_object_put(obj);
1189 }
1190 EXPORT_SYMBOL(drm_gem_vm_close);
1191 
1192 /**
1193  * drm_gem_mmap_obj - memory map a GEM object
1194  * @obj: the GEM object to map
1195  * @obj_size: the object size to be mapped, in bytes
1196  * @vma: VMA for the area to be mapped
1197  *
1198  * Set up the VMA to prepare mapping of the GEM object using the GEM object's
1199  * vm_ops. Depending on their requirements, GEM objects can either
1200  * provide a fault handler in their vm_ops (in which case any accesses to
1201  * the object will be trapped, to perform migration, GTT binding, surface
1202  * register allocation, or performance monitoring), or mmap the buffer memory
1203  * synchronously after calling drm_gem_mmap_obj.
1204  *
1205  * This function is mainly intended to implement the DMABUF mmap operation, when
1206  * the GEM object is not looked up based on its fake offset. To implement the
1207  * DRM mmap operation, drivers should use the drm_gem_mmap() function.
1208  *
1209  * drm_gem_mmap_obj() assumes the user is granted access to the buffer while
1210  * drm_gem_mmap() prevents unprivileged users from mapping random objects. So
1211  * callers must verify access restrictions before calling this helper.
1212  *
1213  * Return 0 or success or -EINVAL if the object size is smaller than the VMA
1214  * size, or if no vm_ops are provided.
1215  */
1216 int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
1217 		     struct vm_area_struct *vma)
1218 {
1219 	int ret;
1220 
1221 	/* Check for valid size. */
1222 	if (obj_size < vma->vm_end - vma->vm_start)
1223 		return -EINVAL;
1224 
1225 	/* Take a ref for this mapping of the object, so that the fault
1226 	 * handler can dereference the mmap offset's pointer to the object.
1227 	 * This reference is cleaned up by the corresponding vm_close
1228 	 * (which should happen whether the vma was created by this call, or
1229 	 * by a vm_open due to mremap or partial unmap or whatever).
1230 	 */
1231 	drm_gem_object_get(obj);
1232 
1233 	vma->vm_private_data = obj;
1234 	vma->vm_ops = obj->funcs->vm_ops;
1235 
1236 	if (obj->funcs->mmap) {
1237 		ret = obj->funcs->mmap(obj, vma);
1238 		if (ret)
1239 			goto err_drm_gem_object_put;
1240 		WARN_ON(!(vma->vm_flags & VM_DONTEXPAND));
1241 	} else {
1242 		if (!vma->vm_ops) {
1243 			ret = -EINVAL;
1244 			goto err_drm_gem_object_put;
1245 		}
1246 
1247 		vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
1248 		vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
1249 		vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
1250 	}
1251 
1252 	return 0;
1253 
1254 err_drm_gem_object_put:
1255 	drm_gem_object_put(obj);
1256 	return ret;
1257 }
1258 EXPORT_SYMBOL(drm_gem_mmap_obj);
1259 
1260 /**
1261  * drm_gem_mmap - memory map routine for GEM objects
1262  * @filp: DRM file pointer
1263  * @vma: VMA for the area to be mapped
1264  *
1265  * If a driver supports GEM object mapping, mmap calls on the DRM file
1266  * descriptor will end up here.
1267  *
1268  * Look up the GEM object based on the offset passed in (vma->vm_pgoff will
1269  * contain the fake offset we created when the GTT map ioctl was called on
1270  * the object) and map it with a call to drm_gem_mmap_obj().
1271  *
1272  * If the caller is not granted access to the buffer object, the mmap will fail
1273  * with EACCES. Please see the vma manager for more information.
1274  */
1275 int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
1276 {
1277 	struct drm_file *priv = filp->private_data;
1278 	struct drm_device *dev = priv->minor->dev;
1279 	struct drm_gem_object *obj = NULL;
1280 	struct drm_vma_offset_node *node;
1281 	int ret;
1282 
1283 	if (drm_dev_is_unplugged(dev))
1284 		return -ENODEV;
1285 
1286 	drm_vma_offset_lock_lookup(dev->vma_offset_manager);
1287 	node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
1288 						  vma->vm_pgoff,
1289 						  vma_pages(vma));
1290 	if (likely(node)) {
1291 		obj = container_of(node, struct drm_gem_object, vma_node);
1292 		/*
1293 		 * When the object is being freed, after it hits 0-refcnt it
1294 		 * proceeds to tear down the object. In the process it will
1295 		 * attempt to remove the VMA offset and so acquire this
1296 		 * mgr->vm_lock.  Therefore if we find an object with a 0-refcnt
1297 		 * that matches our range, we know it is in the process of being
1298 		 * destroyed and will be freed as soon as we release the lock -
1299 		 * so we have to check for the 0-refcnted object and treat it as
1300 		 * invalid.
1301 		 */
1302 		if (!kref_get_unless_zero(&obj->refcount))
1303 			obj = NULL;
1304 	}
1305 	drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
1306 
1307 	if (!obj)
1308 		return -EINVAL;
1309 
1310 	if (!drm_vma_node_is_allowed(node, priv)) {
1311 		drm_gem_object_put(obj);
1312 		return -EACCES;
1313 	}
1314 
1315 	ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT,
1316 			       vma);
1317 
1318 	drm_gem_object_put(obj);
1319 
1320 	return ret;
1321 }
1322 EXPORT_SYMBOL(drm_gem_mmap);
1323 #else /* ! __linux__ */
1324 
1325 int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
1326 		     vm_prot_t accessprot, voff_t off, vsize_t size)
1327 {
1328 	int ret;
1329 
1330 	/* Check for valid size. */
1331 	if (obj_size < size)
1332 		return -EINVAL;
1333 
1334 	/* Take a ref for this mapping of the object, so that the fault
1335 	 * handler can dereference the mmap offset's pointer to the object.
1336 	 * This reference is cleaned up by the corresponding vm_close
1337 	 * (which should happen whether the vma was created by this call, or
1338 	 * by a vm_open due to mremap or partial unmap or whatever).
1339 	 */
1340 	drm_gem_object_get(obj);
1341 
1342 #ifdef __linux__
1343 	vma->vm_private_data = obj;
1344 	vma->vm_ops = obj->funcs->vm_ops;
1345 #else
1346 	if (obj->uobj.pgops == NULL)
1347 		uvm_obj_init(&obj->uobj, obj->funcs->vm_ops, 1);
1348 #endif
1349 
1350 	if (obj->funcs->mmap) {
1351 		ret = obj->funcs->mmap(obj, accessprot, off, size);
1352 		if (ret)
1353 			goto err_drm_gem_object_put;
1354 #ifdef notyet
1355 		WARN_ON(!(vma->vm_flags & VM_DONTEXPAND));
1356 #endif
1357 	} else {
1358 #ifdef notyet
1359 		if (!vma->vm_ops) {
1360 			ret = -EINVAL;
1361 			goto err_drm_gem_object_put;
1362 		}
1363 
1364 		vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
1365 		vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
1366 		vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
1367 #else
1368 		ret = -EINVAL;
1369 		goto err_drm_gem_object_put;
1370 #endif
1371 	}
1372 
1373 	return 0;
1374 
1375 err_drm_gem_object_put:
1376 	drm_gem_object_put(obj);
1377 	return ret;
1378 }
1379 
1380 struct uvm_object *
1381 drm_gem_mmap(struct file *filp, vm_prot_t accessprot, voff_t off,
1382     vsize_t size)
1383 {
1384 	struct drm_file *priv = (void *)filp;
1385 	struct drm_device *dev = priv->minor->dev;
1386 	struct drm_gem_object *obj = NULL;
1387 	struct drm_vma_offset_node *node;
1388 	int ret;
1389 
1390 	if (drm_dev_is_unplugged(dev))
1391 		return NULL;
1392 
1393 	drm_vma_offset_lock_lookup(dev->vma_offset_manager);
1394 	node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
1395 						  off >> PAGE_SHIFT,
1396 						  atop(round_page(size)));
1397 	if (likely(node)) {
1398 		obj = container_of(node, struct drm_gem_object, vma_node);
1399 		/*
1400 		 * When the object is being freed, after it hits 0-refcnt it
1401 		 * proceeds to tear down the object. In the process it will
1402 		 * attempt to remove the VMA offset and so acquire this
1403 		 * mgr->vm_lock.  Therefore if we find an object with a 0-refcnt
1404 		 * that matches our range, we know it is in the process of being
1405 		 * destroyed and will be freed as soon as we release the lock -
1406 		 * so we have to check for the 0-refcnted object and treat it as
1407 		 * invalid.
1408 		 */
1409 		if (!kref_get_unless_zero(&obj->refcount))
1410 			obj = NULL;
1411 	}
1412 	drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
1413 
1414 	if (!obj)
1415 		return NULL;
1416 
1417 	if (!drm_vma_node_is_allowed(node, priv)) {
1418 		drm_gem_object_put(obj);
1419 		return NULL;
1420 	}
1421 
1422 	ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT,
1423 			       accessprot, off, size);
1424 
1425 	drm_gem_object_put(obj);
1426 
1427 	return &obj->uobj;
1428 }
1429 
1430 #endif /* __linux__ */
1431 
1432 void drm_gem_print_info(struct drm_printer *p, unsigned int indent,
1433 			const struct drm_gem_object *obj)
1434 {
1435 	drm_printf_indent(p, indent, "name=%d\n", obj->name);
1436 	drm_printf_indent(p, indent, "refcount=%u\n",
1437 			  kref_read(&obj->refcount));
1438 	drm_printf_indent(p, indent, "start=%08lx\n",
1439 			  drm_vma_node_start(&obj->vma_node));
1440 	drm_printf_indent(p, indent, "size=%zu\n", obj->size);
1441 	drm_printf_indent(p, indent, "imported=%s\n",
1442 			  obj->import_attach ? "yes" : "no");
1443 
1444 	if (obj->funcs->print_info)
1445 		obj->funcs->print_info(p, indent, obj);
1446 }
1447 
1448 int drm_gem_pin(struct drm_gem_object *obj)
1449 {
1450 	if (obj->funcs->pin)
1451 		return obj->funcs->pin(obj);
1452 	else
1453 		return 0;
1454 }
1455 
1456 void drm_gem_unpin(struct drm_gem_object *obj)
1457 {
1458 	if (obj->funcs->unpin)
1459 		obj->funcs->unpin(obj);
1460 }
1461 
1462 int drm_gem_vmap(struct drm_gem_object *obj, struct dma_buf_map *map)
1463 {
1464 	int ret;
1465 
1466 	if (!obj->funcs->vmap)
1467 		return -EOPNOTSUPP;
1468 
1469 	ret = obj->funcs->vmap(obj, map);
1470 	if (ret)
1471 		return ret;
1472 	else if (dma_buf_map_is_null(map))
1473 		return -ENOMEM;
1474 
1475 	return 0;
1476 }
1477 EXPORT_SYMBOL(drm_gem_vmap);
1478 
1479 void drm_gem_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map)
1480 {
1481 	if (dma_buf_map_is_null(map))
1482 		return;
1483 
1484 	if (obj->funcs->vunmap)
1485 		obj->funcs->vunmap(obj, map);
1486 
1487 	/* Always set the mapping to NULL. Callers may rely on this. */
1488 	dma_buf_map_clear(map);
1489 }
1490 EXPORT_SYMBOL(drm_gem_vunmap);
1491 
1492 /**
1493  * drm_gem_lock_reservations - Sets up the ww context and acquires
1494  * the lock on an array of GEM objects.
1495  *
1496  * Once you've locked your reservations, you'll want to set up space
1497  * for your shared fences (if applicable), submit your job, then
1498  * drm_gem_unlock_reservations().
1499  *
1500  * @objs: drm_gem_objects to lock
1501  * @count: Number of objects in @objs
1502  * @acquire_ctx: struct ww_acquire_ctx that will be initialized as
1503  * part of tracking this set of locked reservations.
1504  */
1505 int
1506 drm_gem_lock_reservations(struct drm_gem_object **objs, int count,
1507 			  struct ww_acquire_ctx *acquire_ctx)
1508 {
1509 	int contended = -1;
1510 	int i, ret;
1511 
1512 	ww_acquire_init(acquire_ctx, &reservation_ww_class);
1513 
1514 retry:
1515 	if (contended != -1) {
1516 		struct drm_gem_object *obj = objs[contended];
1517 
1518 		ret = dma_resv_lock_slow_interruptible(obj->resv,
1519 								 acquire_ctx);
1520 		if (ret) {
1521 			ww_acquire_fini(acquire_ctx);
1522 			return ret;
1523 		}
1524 	}
1525 
1526 	for (i = 0; i < count; i++) {
1527 		if (i == contended)
1528 			continue;
1529 
1530 		ret = dma_resv_lock_interruptible(objs[i]->resv,
1531 							    acquire_ctx);
1532 		if (ret) {
1533 			int j;
1534 
1535 			for (j = 0; j < i; j++)
1536 				dma_resv_unlock(objs[j]->resv);
1537 
1538 			if (contended != -1 && contended >= i)
1539 				dma_resv_unlock(objs[contended]->resv);
1540 
1541 			if (ret == -EDEADLK) {
1542 				contended = i;
1543 				goto retry;
1544 			}
1545 
1546 			ww_acquire_fini(acquire_ctx);
1547 			return ret;
1548 		}
1549 	}
1550 
1551 	ww_acquire_done(acquire_ctx);
1552 
1553 	return 0;
1554 }
1555 EXPORT_SYMBOL(drm_gem_lock_reservations);
1556 
1557 void
1558 drm_gem_unlock_reservations(struct drm_gem_object **objs, int count,
1559 			    struct ww_acquire_ctx *acquire_ctx)
1560 {
1561 	int i;
1562 
1563 	for (i = 0; i < count; i++)
1564 		dma_resv_unlock(objs[i]->resv);
1565 
1566 	ww_acquire_fini(acquire_ctx);
1567 }
1568 EXPORT_SYMBOL(drm_gem_unlock_reservations);
1569 
1570 #ifdef notyet
1571 /**
1572  * drm_gem_fence_array_add - Adds the fence to an array of fences to be
1573  * waited on, deduplicating fences from the same context.
1574  *
1575  * @fence_array: array of dma_fence * for the job to block on.
1576  * @fence: the dma_fence to add to the list of dependencies.
1577  *
1578  * This functions consumes the reference for @fence both on success and error
1579  * cases.
1580  *
1581  * Returns:
1582  * 0 on success, or an error on failing to expand the array.
1583  */
1584 int drm_gem_fence_array_add(struct xarray *fence_array,
1585 			    struct dma_fence *fence)
1586 {
1587 	struct dma_fence *entry;
1588 	unsigned long index;
1589 	u32 id = 0;
1590 	int ret;
1591 
1592 	if (!fence)
1593 		return 0;
1594 
1595 	/* Deduplicate if we already depend on a fence from the same context.
1596 	 * This lets the size of the array of deps scale with the number of
1597 	 * engines involved, rather than the number of BOs.
1598 	 */
1599 	xa_for_each(fence_array, index, entry) {
1600 		if (entry->context != fence->context)
1601 			continue;
1602 
1603 		if (dma_fence_is_later(fence, entry)) {
1604 			dma_fence_put(entry);
1605 			xa_store(fence_array, index, fence, GFP_KERNEL);
1606 		} else {
1607 			dma_fence_put(fence);
1608 		}
1609 		return 0;
1610 	}
1611 
1612 	ret = xa_alloc(fence_array, &id, fence, xa_limit_32b, GFP_KERNEL);
1613 	if (ret != 0)
1614 		dma_fence_put(fence);
1615 
1616 	return ret;
1617 }
1618 EXPORT_SYMBOL(drm_gem_fence_array_add);
1619 
1620 /**
1621  * drm_gem_fence_array_add_implicit - Adds the implicit dependencies tracked
1622  * in the GEM object's reservation object to an array of dma_fences for use in
1623  * scheduling a rendering job.
1624  *
1625  * This should be called after drm_gem_lock_reservations() on your array of
1626  * GEM objects used in the job but before updating the reservations with your
1627  * own fences.
1628  *
1629  * @fence_array: array of dma_fence * for the job to block on.
1630  * @obj: the gem object to add new dependencies from.
1631  * @write: whether the job might write the object (so we need to depend on
1632  * shared fences in the reservation object).
1633  */
1634 int drm_gem_fence_array_add_implicit(struct xarray *fence_array,
1635 				     struct drm_gem_object *obj,
1636 				     bool write)
1637 {
1638 	int ret;
1639 	struct dma_fence **fences;
1640 	unsigned int i, fence_count;
1641 
1642 	if (!write) {
1643 		struct dma_fence *fence =
1644 			dma_resv_get_excl_unlocked(obj->resv);
1645 
1646 		return drm_gem_fence_array_add(fence_array, fence);
1647 	}
1648 
1649 	ret = dma_resv_get_fences(obj->resv, NULL,
1650 						&fence_count, &fences);
1651 	if (ret || !fence_count)
1652 		return ret;
1653 
1654 	for (i = 0; i < fence_count; i++) {
1655 		ret = drm_gem_fence_array_add(fence_array, fences[i]);
1656 		if (ret)
1657 			break;
1658 	}
1659 
1660 	for (; i < fence_count; i++)
1661 		dma_fence_put(fences[i]);
1662 	kfree(fences);
1663 	return ret;
1664 }
1665 EXPORT_SYMBOL(drm_gem_fence_array_add_implicit);
1666 
1667 #endif /* notyet */
1668