xref: /openbsd-src/sys/dev/pci/drm/drm_gem.c (revision 3374c67d44f9b75b98444cbf63020f777792342e)
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *
26  */
27 
28 #include <linux/dma-buf.h>
29 #include <linux/file.h>
30 #include <linux/fs.h>
31 #include <linux/iosys-map.h>
32 #include <linux/mem_encrypt.h>
33 #include <linux/mm.h>
34 #include <linux/mman.h>
35 #include <linux/module.h>
36 #include <linux/pagemap.h>
37 #include <linux/pagevec.h>
38 #include <linux/shmem_fs.h>
39 #include <linux/slab.h>
40 #include <linux/string_helpers.h>
41 #include <linux/types.h>
42 #include <linux/uaccess.h>
43 
44 #include <drm/drm.h>
45 #include <drm/drm_device.h>
46 #include <drm/drm_drv.h>
47 #include <drm/drm_file.h>
48 #include <drm/drm_gem.h>
49 #include <drm/drm_managed.h>
50 #include <drm/drm_print.h>
51 #include <drm/drm_vma_manager.h>
52 
53 #include "drm_internal.h"
54 
55 #include <sys/conf.h>
56 #include <uvm/uvm.h>
57 
58 void drm_unref(struct uvm_object *);
59 void drm_ref(struct uvm_object *);
60 boolean_t drm_flush(struct uvm_object *, voff_t, voff_t, int);
61 int drm_fault(struct uvm_faultinfo *, vaddr_t, vm_page_t *, int, int,
62     vm_fault_t, vm_prot_t, int);
63 
64 const struct uvm_pagerops drm_pgops = {
65 	.pgo_reference = drm_ref,
66 	.pgo_detach = drm_unref,
67 	.pgo_fault = drm_fault,
68 	.pgo_flush = drm_flush,
69 };
70 
71 void
72 drm_ref(struct uvm_object *uobj)
73 {
74 	struct drm_gem_object *obj =
75 	    container_of(uobj, struct drm_gem_object, uobj);
76 
77 	drm_gem_object_get(obj);
78 }
79 
80 void
81 drm_unref(struct uvm_object *uobj)
82 {
83 	struct drm_gem_object *obj =
84 	    container_of(uobj, struct drm_gem_object, uobj);
85 
86 	drm_gem_object_put(obj);
87 }
88 
89 int
90 drm_fault(struct uvm_faultinfo *ufi, vaddr_t vaddr, vm_page_t *pps,
91     int npages, int centeridx, vm_fault_t fault_type,
92     vm_prot_t access_type, int flags)
93 {
94 	struct vm_map_entry *entry = ufi->entry;
95 	struct uvm_object *uobj = entry->object.uvm_obj;
96 	struct drm_gem_object *obj =
97 	    container_of(uobj, struct drm_gem_object, uobj);
98 	struct drm_device *dev = obj->dev;
99 	int ret;
100 
101 	/*
102 	 * we do not allow device mappings to be mapped copy-on-write
103 	 * so we kill any attempt to do so here.
104 	 */
105 
106 	if (UVM_ET_ISCOPYONWRITE(entry)) {
107 		uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj);
108 		return(VM_PAGER_ERROR);
109 	}
110 
111 	/*
112 	 * We could end up here as the result of a copyin(9) or
113 	 * copyout(9) while handling an ioctl.  So we must be careful
114 	 * not to deadlock.  Therefore we only block if the quiesce
115 	 * count is zero, which guarantees we didn't enter from within
116 	 * an ioctl code path.
117 	 */
118 	mtx_enter(&dev->quiesce_mtx);
119 	if (dev->quiesce && dev->quiesce_count == 0) {
120 		mtx_leave(&dev->quiesce_mtx);
121 		uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj);
122 		mtx_enter(&dev->quiesce_mtx);
123 		while (dev->quiesce) {
124 			msleep_nsec(&dev->quiesce, &dev->quiesce_mtx,
125 			    PZERO, "drmflt", INFSLP);
126 		}
127 		mtx_leave(&dev->quiesce_mtx);
128 		return(VM_PAGER_REFAULT);
129 	}
130 	dev->quiesce_count++;
131 	mtx_leave(&dev->quiesce_mtx);
132 
133 	/* Call down into driver to do the magic */
134 	ret = dev->driver->gem_fault(obj, ufi, entry->offset + (vaddr -
135 	    entry->start), vaddr, pps, npages, centeridx,
136 	    access_type, flags);
137 
138 	mtx_enter(&dev->quiesce_mtx);
139 	dev->quiesce_count--;
140 	if (dev->quiesce)
141 		wakeup(&dev->quiesce_count);
142 	mtx_leave(&dev->quiesce_mtx);
143 
144 	return (ret);
145 }
146 
147 boolean_t
148 drm_flush(struct uvm_object *uobj, voff_t start, voff_t stop, int flags)
149 {
150 	return (TRUE);
151 }
152 
153 struct uvm_object *
154 udv_attach_drm(dev_t device, vm_prot_t accessprot, voff_t off, vsize_t size)
155 {
156 	struct drm_device *dev = drm_get_device_from_kdev(device);
157 	struct drm_gem_object *obj = NULL;
158 	struct drm_vma_offset_node *node;
159 	struct drm_file *priv;
160 	struct file *filp;
161 
162 	if (cdevsw[major(device)].d_mmap != drmmmap)
163 		return NULL;
164 
165 	if (dev == NULL)
166 		return NULL;
167 
168 	mutex_lock(&dev->filelist_mutex);
169 	priv = drm_find_file_by_minor(dev, minor(device));
170 	if (priv == NULL) {
171 		mutex_unlock(&dev->filelist_mutex);
172 		return NULL;
173 	}
174 	filp = priv->filp;
175 	mutex_unlock(&dev->filelist_mutex);
176 
177 	if (dev->driver->mmap)
178 		return dev->driver->mmap(filp, accessprot, off, size);
179 
180 	drm_vma_offset_lock_lookup(dev->vma_offset_manager);
181 	node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
182 						  off >> PAGE_SHIFT,
183 						  atop(round_page(size)));
184 	if (likely(node)) {
185 		obj = container_of(node, struct drm_gem_object, vma_node);
186 		/*
187 		 * When the object is being freed, after it hits 0-refcnt it
188 		 * proceeds to tear down the object. In the process it will
189 		 * attempt to remove the VMA offset and so acquire this
190 		 * mgr->vm_lock.  Therefore if we find an object with a 0-refcnt
191 		 * that matches our range, we know it is in the process of being
192 		 * destroyed and will be freed as soon as we release the lock -
193 		 * so we have to check for the 0-refcnted object and treat it as
194 		 * invalid.
195 		 */
196 		if (!kref_get_unless_zero(&obj->refcount))
197 			obj = NULL;
198 	}
199 	drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
200 
201 	if (!obj)
202 		return NULL;
203 
204 	if (!drm_vma_node_is_allowed(node, priv)) {
205 		drm_gem_object_put(obj);
206 		return NULL;
207 	}
208 
209 	return &obj->uobj;
210 }
211 
212 /** @file drm_gem.c
213  *
214  * This file provides some of the base ioctls and library routines for
215  * the graphics memory manager implemented by each device driver.
216  *
217  * Because various devices have different requirements in terms of
218  * synchronization and migration strategies, implementing that is left up to
219  * the driver, and all that the general API provides should be generic --
220  * allocating objects, reading/writing data with the cpu, freeing objects.
221  * Even there, platform-dependent optimizations for reading/writing data with
222  * the CPU mean we'll likely hook those out to driver-specific calls.  However,
223  * the DRI2 implementation wants to have at least allocate/mmap be generic.
224  *
225  * The goal was to have swap-backed object allocation managed through
226  * struct file.  However, file descriptors as handles to a struct file have
227  * two major failings:
228  * - Process limits prevent more than 1024 or so being used at a time by
229  *   default.
230  * - Inability to allocate high fds will aggravate the X Server's select()
231  *   handling, and likely that of many GL client applications as well.
232  *
233  * This led to a plan of using our own integer IDs (called handles, following
234  * DRM terminology) to mimic fds, and implement the fd syscalls we need as
235  * ioctls.  The objects themselves will still include the struct file so
236  * that we can transition to fds if the required kernel infrastructure shows
237  * up at a later date, and as our interface with shmfs for memory allocation.
238  */
239 
240 static void
241 drm_gem_init_release(struct drm_device *dev, void *ptr)
242 {
243 	drm_vma_offset_manager_destroy(dev->vma_offset_manager);
244 }
245 
246 /**
247  * drm_gem_init - Initialize the GEM device fields
248  * @dev: drm_devic structure to initialize
249  */
250 int
251 drm_gem_init(struct drm_device *dev)
252 {
253 	struct drm_vma_offset_manager *vma_offset_manager;
254 
255 	rw_init(&dev->object_name_lock, "drmonl");
256 	idr_init_base(&dev->object_name_idr, 1);
257 
258 	vma_offset_manager = drmm_kzalloc(dev, sizeof(*vma_offset_manager),
259 					  GFP_KERNEL);
260 	if (!vma_offset_manager) {
261 		DRM_ERROR("out of memory\n");
262 		return -ENOMEM;
263 	}
264 
265 	dev->vma_offset_manager = vma_offset_manager;
266 	drm_vma_offset_manager_init(vma_offset_manager,
267 				    DRM_FILE_PAGE_OFFSET_START,
268 				    DRM_FILE_PAGE_OFFSET_SIZE);
269 
270 	return drmm_add_action(dev, drm_gem_init_release, NULL);
271 }
272 
273 #ifdef __linux__
274 
275 /**
276  * drm_gem_object_init - initialize an allocated shmem-backed GEM object
277  * @dev: drm_device the object should be initialized for
278  * @obj: drm_gem_object to initialize
279  * @size: object size
280  *
281  * Initialize an already allocated GEM object of the specified size with
282  * shmfs backing store.
283  */
284 int drm_gem_object_init(struct drm_device *dev,
285 			struct drm_gem_object *obj, size_t size)
286 {
287 	struct file *filp;
288 
289 	drm_gem_private_object_init(dev, obj, size);
290 
291 	filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
292 	if (IS_ERR(filp))
293 		return PTR_ERR(filp);
294 
295 	obj->filp = filp;
296 
297 	return 0;
298 }
299 EXPORT_SYMBOL(drm_gem_object_init);
300 
301 #else
302 
303 int drm_gem_object_init(struct drm_device *dev,
304 			struct drm_gem_object *obj, size_t size)
305 {
306 	drm_gem_private_object_init(dev, obj, size);
307 
308 	if (size > (512 * 1024 * 1024)) {
309 		printf("%s size too big %lu\n", __func__, size);
310 		return -ENOMEM;
311 	}
312 
313 	obj->uao = uao_create(size, 0);
314 	uvm_obj_init(&obj->uobj, &drm_pgops, 1);
315 
316 	return 0;
317 }
318 
319 #endif
320 
321 /**
322  * drm_gem_private_object_init - initialize an allocated private GEM object
323  * @dev: drm_device the object should be initialized for
324  * @obj: drm_gem_object to initialize
325  * @size: object size
326  *
327  * Initialize an already allocated GEM object of the specified size with
328  * no GEM provided backing store. Instead the caller is responsible for
329  * backing the object and handling it.
330  */
331 void drm_gem_private_object_init(struct drm_device *dev,
332 				 struct drm_gem_object *obj, size_t size)
333 {
334 	BUG_ON((size & (PAGE_SIZE - 1)) != 0);
335 
336 	obj->dev = dev;
337 #ifdef __linux__
338 	obj->filp = NULL;
339 #else
340 	obj->uao = NULL;
341 	obj->uobj.pgops = NULL;
342 #endif
343 
344 	kref_init(&obj->refcount);
345 	obj->handle_count = 0;
346 	obj->size = size;
347 	dma_resv_init(&obj->_resv);
348 	if (!obj->resv)
349 		obj->resv = &obj->_resv;
350 
351 	drm_vma_node_reset(&obj->vma_node);
352 	INIT_LIST_HEAD(&obj->lru_node);
353 }
354 EXPORT_SYMBOL(drm_gem_private_object_init);
355 
356 /**
357  * drm_gem_object_handle_free - release resources bound to userspace handles
358  * @obj: GEM object to clean up.
359  *
360  * Called after the last handle to the object has been closed
361  *
362  * Removes any name for the object. Note that this must be
363  * called before drm_gem_object_free or we'll be touching
364  * freed memory
365  */
366 static void drm_gem_object_handle_free(struct drm_gem_object *obj)
367 {
368 	struct drm_device *dev = obj->dev;
369 
370 	/* Remove any name for this object */
371 	if (obj->name) {
372 		idr_remove(&dev->object_name_idr, obj->name);
373 		obj->name = 0;
374 	}
375 }
376 
377 static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj)
378 {
379 	/* Unbreak the reference cycle if we have an exported dma_buf. */
380 	if (obj->dma_buf) {
381 		dma_buf_put(obj->dma_buf);
382 		obj->dma_buf = NULL;
383 	}
384 }
385 
386 static void
387 drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj)
388 {
389 	struct drm_device *dev = obj->dev;
390 	bool final = false;
391 
392 	if (WARN_ON(READ_ONCE(obj->handle_count) == 0))
393 		return;
394 
395 	/*
396 	* Must bump handle count first as this may be the last
397 	* ref, in which case the object would disappear before we
398 	* checked for a name
399 	*/
400 
401 	mutex_lock(&dev->object_name_lock);
402 	if (--obj->handle_count == 0) {
403 		drm_gem_object_handle_free(obj);
404 		drm_gem_object_exported_dma_buf_free(obj);
405 		final = true;
406 	}
407 	mutex_unlock(&dev->object_name_lock);
408 
409 	if (final)
410 		drm_gem_object_put(obj);
411 }
412 
413 /*
414  * Called at device or object close to release the file's
415  * handle references on objects.
416  */
417 static int
418 drm_gem_object_release_handle(int id, void *ptr, void *data)
419 {
420 	struct drm_file *file_priv = data;
421 	struct drm_gem_object *obj = ptr;
422 
423 	if (obj->funcs->close)
424 		obj->funcs->close(obj, file_priv);
425 
426 	drm_prime_remove_buf_handle(&file_priv->prime, id);
427 	drm_vma_node_revoke(&obj->vma_node, file_priv);
428 
429 	drm_gem_object_handle_put_unlocked(obj);
430 
431 	return 0;
432 }
433 
434 /**
435  * drm_gem_handle_delete - deletes the given file-private handle
436  * @filp: drm file-private structure to use for the handle look up
437  * @handle: userspace handle to delete
438  *
439  * Removes the GEM handle from the @filp lookup table which has been added with
440  * drm_gem_handle_create(). If this is the last handle also cleans up linked
441  * resources like GEM names.
442  */
443 int
444 drm_gem_handle_delete(struct drm_file *filp, u32 handle)
445 {
446 	struct drm_gem_object *obj;
447 
448 	spin_lock(&filp->table_lock);
449 
450 	/* Check if we currently have a reference on the object */
451 	obj = idr_replace(&filp->object_idr, NULL, handle);
452 	spin_unlock(&filp->table_lock);
453 	if (IS_ERR_OR_NULL(obj))
454 		return -EINVAL;
455 
456 	/* Release driver's reference and decrement refcount. */
457 	drm_gem_object_release_handle(handle, obj, filp);
458 
459 	/* And finally make the handle available for future allocations. */
460 	spin_lock(&filp->table_lock);
461 	idr_remove(&filp->object_idr, handle);
462 	spin_unlock(&filp->table_lock);
463 
464 	return 0;
465 }
466 EXPORT_SYMBOL(drm_gem_handle_delete);
467 
468 /**
469  * drm_gem_dumb_map_offset - return the fake mmap offset for a gem object
470  * @file: drm file-private structure containing the gem object
471  * @dev: corresponding drm_device
472  * @handle: gem object handle
473  * @offset: return location for the fake mmap offset
474  *
475  * This implements the &drm_driver.dumb_map_offset kms driver callback for
476  * drivers which use gem to manage their backing storage.
477  *
478  * Returns:
479  * 0 on success or a negative error code on failure.
480  */
481 int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
482 			    u32 handle, u64 *offset)
483 {
484 	struct drm_gem_object *obj;
485 	int ret;
486 
487 	obj = drm_gem_object_lookup(file, handle);
488 	if (!obj)
489 		return -ENOENT;
490 
491 	/* Don't allow imported objects to be mapped */
492 	if (obj->import_attach) {
493 		ret = -EINVAL;
494 		goto out;
495 	}
496 
497 	ret = drm_gem_create_mmap_offset(obj);
498 	if (ret)
499 		goto out;
500 
501 	*offset = drm_vma_node_offset_addr(&obj->vma_node);
502 out:
503 	drm_gem_object_put(obj);
504 
505 	return ret;
506 }
507 EXPORT_SYMBOL_GPL(drm_gem_dumb_map_offset);
508 
509 int drm_gem_dumb_destroy(struct drm_file *file,
510 			 struct drm_device *dev,
511 			 u32 handle)
512 {
513 	return drm_gem_handle_delete(file, handle);
514 }
515 
516 /**
517  * drm_gem_handle_create_tail - internal functions to create a handle
518  * @file_priv: drm file-private structure to register the handle for
519  * @obj: object to register
520  * @handlep: pointer to return the created handle to the caller
521  *
522  * This expects the &drm_device.object_name_lock to be held already and will
523  * drop it before returning. Used to avoid races in establishing new handles
524  * when importing an object from either an flink name or a dma-buf.
525  *
526  * Handles must be release again through drm_gem_handle_delete(). This is done
527  * when userspace closes @file_priv for all attached handles, or through the
528  * GEM_CLOSE ioctl for individual handles.
529  */
530 int
531 drm_gem_handle_create_tail(struct drm_file *file_priv,
532 			   struct drm_gem_object *obj,
533 			   u32 *handlep)
534 {
535 	struct drm_device *dev = obj->dev;
536 	u32 handle;
537 	int ret;
538 
539 	WARN_ON(!mutex_is_locked(&dev->object_name_lock));
540 	if (obj->handle_count++ == 0)
541 		drm_gem_object_get(obj);
542 
543 	/*
544 	 * Get the user-visible handle using idr.  Preload and perform
545 	 * allocation under our spinlock.
546 	 */
547 	idr_preload(GFP_KERNEL);
548 	spin_lock(&file_priv->table_lock);
549 
550 	ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT);
551 
552 	spin_unlock(&file_priv->table_lock);
553 	idr_preload_end();
554 
555 	mutex_unlock(&dev->object_name_lock);
556 	if (ret < 0)
557 		goto err_unref;
558 
559 	handle = ret;
560 
561 	ret = drm_vma_node_allow(&obj->vma_node, file_priv);
562 	if (ret)
563 		goto err_remove;
564 
565 	if (obj->funcs->open) {
566 		ret = obj->funcs->open(obj, file_priv);
567 		if (ret)
568 			goto err_revoke;
569 	}
570 
571 	*handlep = handle;
572 	return 0;
573 
574 err_revoke:
575 	drm_vma_node_revoke(&obj->vma_node, file_priv);
576 err_remove:
577 	spin_lock(&file_priv->table_lock);
578 	idr_remove(&file_priv->object_idr, handle);
579 	spin_unlock(&file_priv->table_lock);
580 err_unref:
581 	drm_gem_object_handle_put_unlocked(obj);
582 	return ret;
583 }
584 
585 /**
586  * drm_gem_handle_create - create a gem handle for an object
587  * @file_priv: drm file-private structure to register the handle for
588  * @obj: object to register
589  * @handlep: pointer to return the created handle to the caller
590  *
591  * Create a handle for this object. This adds a handle reference to the object,
592  * which includes a regular reference count. Callers will likely want to
593  * dereference the object afterwards.
594  *
595  * Since this publishes @obj to userspace it must be fully set up by this point,
596  * drivers must call this last in their buffer object creation callbacks.
597  */
598 int drm_gem_handle_create(struct drm_file *file_priv,
599 			  struct drm_gem_object *obj,
600 			  u32 *handlep)
601 {
602 	mutex_lock(&obj->dev->object_name_lock);
603 
604 	return drm_gem_handle_create_tail(file_priv, obj, handlep);
605 }
606 EXPORT_SYMBOL(drm_gem_handle_create);
607 
608 
609 /**
610  * drm_gem_free_mmap_offset - release a fake mmap offset for an object
611  * @obj: obj in question
612  *
613  * This routine frees fake offsets allocated by drm_gem_create_mmap_offset().
614  *
615  * Note that drm_gem_object_release() already calls this function, so drivers
616  * don't have to take care of releasing the mmap offset themselves when freeing
617  * the GEM object.
618  */
619 void
620 drm_gem_free_mmap_offset(struct drm_gem_object *obj)
621 {
622 	struct drm_device *dev = obj->dev;
623 
624 	drm_vma_offset_remove(dev->vma_offset_manager, &obj->vma_node);
625 }
626 EXPORT_SYMBOL(drm_gem_free_mmap_offset);
627 
628 /**
629  * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object
630  * @obj: obj in question
631  * @size: the virtual size
632  *
633  * GEM memory mapping works by handing back to userspace a fake mmap offset
634  * it can use in a subsequent mmap(2) call.  The DRM core code then looks
635  * up the object based on the offset and sets up the various memory mapping
636  * structures.
637  *
638  * This routine allocates and attaches a fake offset for @obj, in cases where
639  * the virtual size differs from the physical size (ie. &drm_gem_object.size).
640  * Otherwise just use drm_gem_create_mmap_offset().
641  *
642  * This function is idempotent and handles an already allocated mmap offset
643  * transparently. Drivers do not need to check for this case.
644  */
645 int
646 drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size)
647 {
648 	struct drm_device *dev = obj->dev;
649 
650 	return drm_vma_offset_add(dev->vma_offset_manager, &obj->vma_node,
651 				  size / PAGE_SIZE);
652 }
653 EXPORT_SYMBOL(drm_gem_create_mmap_offset_size);
654 
655 /**
656  * drm_gem_create_mmap_offset - create a fake mmap offset for an object
657  * @obj: obj in question
658  *
659  * GEM memory mapping works by handing back to userspace a fake mmap offset
660  * it can use in a subsequent mmap(2) call.  The DRM core code then looks
661  * up the object based on the offset and sets up the various memory mapping
662  * structures.
663  *
664  * This routine allocates and attaches a fake offset for @obj.
665  *
666  * Drivers can call drm_gem_free_mmap_offset() before freeing @obj to release
667  * the fake offset again.
668  */
669 int drm_gem_create_mmap_offset(struct drm_gem_object *obj)
670 {
671 	return drm_gem_create_mmap_offset_size(obj, obj->size);
672 }
673 EXPORT_SYMBOL(drm_gem_create_mmap_offset);
674 
675 #ifdef notyet
676 /*
677  * Move pages to appropriate lru and release the pagevec, decrementing the
678  * ref count of those pages.
679  */
680 static void drm_gem_check_release_pagevec(struct pagevec *pvec)
681 {
682 	check_move_unevictable_pages(pvec);
683 	__pagevec_release(pvec);
684 	cond_resched();
685 }
686 #endif
687 
688 /**
689  * drm_gem_get_pages - helper to allocate backing pages for a GEM object
690  * from shmem
691  * @obj: obj in question
692  *
693  * This reads the page-array of the shmem-backing storage of the given gem
694  * object. An array of pages is returned. If a page is not allocated or
695  * swapped-out, this will allocate/swap-in the required pages. Note that the
696  * whole object is covered by the page-array and pinned in memory.
697  *
698  * Use drm_gem_put_pages() to release the array and unpin all pages.
699  *
700  * This uses the GFP-mask set on the shmem-mapping (see mapping_set_gfp_mask()).
701  * If you require other GFP-masks, you have to do those allocations yourself.
702  *
703  * Note that you are not allowed to change gfp-zones during runtime. That is,
704  * shmem_read_mapping_page_gfp() must be called with the same gfp_zone(gfp) as
705  * set during initialization. If you have special zone constraints, set them
706  * after drm_gem_object_init() via mapping_set_gfp_mask(). shmem-core takes care
707  * to keep pages in the required zone during swap-in.
708  *
709  * This function is only valid on objects initialized with
710  * drm_gem_object_init(), but not for those initialized with
711  * drm_gem_private_object_init() only.
712  */
713 struct vm_page **drm_gem_get_pages(struct drm_gem_object *obj)
714 {
715 	STUB();
716 	return ERR_PTR(-ENOSYS);
717 #ifdef notyet
718 	struct address_space *mapping;
719 	struct vm_page *p, **pages;
720 	struct pagevec pvec;
721 	int i, npages;
722 
723 
724 	if (WARN_ON(!obj->filp))
725 		return ERR_PTR(-EINVAL);
726 
727 	/* This is the shared memory object that backs the GEM resource */
728 	mapping = obj->filp->f_mapping;
729 
730 	/* We already BUG_ON() for non-page-aligned sizes in
731 	 * drm_gem_object_init(), so we should never hit this unless
732 	 * driver author is doing something really wrong:
733 	 */
734 	WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
735 
736 	npages = obj->size >> PAGE_SHIFT;
737 
738 	pages = kvmalloc_array(npages, sizeof(struct vm_page *), GFP_KERNEL);
739 	if (pages == NULL)
740 		return ERR_PTR(-ENOMEM);
741 
742 	mapping_set_unevictable(mapping);
743 
744 	for (i = 0; i < npages; i++) {
745 		p = shmem_read_mapping_page(mapping, i);
746 		if (IS_ERR(p))
747 			goto fail;
748 		pages[i] = p;
749 
750 		/* Make sure shmem keeps __GFP_DMA32 allocated pages in the
751 		 * correct region during swapin. Note that this requires
752 		 * __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping)
753 		 * so shmem can relocate pages during swapin if required.
754 		 */
755 		BUG_ON(mapping_gfp_constraint(mapping, __GFP_DMA32) &&
756 				(page_to_pfn(p) >= 0x00100000UL));
757 	}
758 
759 	return pages;
760 
761 fail:
762 	mapping_clear_unevictable(mapping);
763 	pagevec_init(&pvec);
764 	while (i--) {
765 		if (!pagevec_add(&pvec, pages[i]))
766 			drm_gem_check_release_pagevec(&pvec);
767 	}
768 	if (pagevec_count(&pvec))
769 		drm_gem_check_release_pagevec(&pvec);
770 
771 	kvfree(pages);
772 	return ERR_CAST(p);
773 #endif
774 }
775 EXPORT_SYMBOL(drm_gem_get_pages);
776 
777 /**
778  * drm_gem_put_pages - helper to free backing pages for a GEM object
779  * @obj: obj in question
780  * @pages: pages to free
781  * @dirty: if true, pages will be marked as dirty
782  * @accessed: if true, the pages will be marked as accessed
783  */
784 void drm_gem_put_pages(struct drm_gem_object *obj, struct vm_page **pages,
785 		bool dirty, bool accessed)
786 {
787 	STUB();
788 #ifdef notyet
789 	int i, npages;
790 	struct address_space *mapping;
791 	struct pagevec pvec;
792 
793 	mapping = file_inode(obj->filp)->i_mapping;
794 	mapping_clear_unevictable(mapping);
795 
796 	/* We already BUG_ON() for non-page-aligned sizes in
797 	 * drm_gem_object_init(), so we should never hit this unless
798 	 * driver author is doing something really wrong:
799 	 */
800 	WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
801 
802 	npages = obj->size >> PAGE_SHIFT;
803 
804 	pagevec_init(&pvec);
805 	for (i = 0; i < npages; i++) {
806 		if (!pages[i])
807 			continue;
808 
809 		if (dirty)
810 			set_page_dirty(pages[i]);
811 
812 		if (accessed)
813 			mark_page_accessed(pages[i]);
814 
815 		/* Undo the reference we took when populating the table */
816 		if (!pagevec_add(&pvec, pages[i]))
817 			drm_gem_check_release_pagevec(&pvec);
818 	}
819 	if (pagevec_count(&pvec))
820 		drm_gem_check_release_pagevec(&pvec);
821 
822 	kvfree(pages);
823 #endif
824 }
825 EXPORT_SYMBOL(drm_gem_put_pages);
826 
827 static int objects_lookup(struct drm_file *filp, u32 *handle, int count,
828 			  struct drm_gem_object **objs)
829 {
830 	int i, ret = 0;
831 	struct drm_gem_object *obj;
832 
833 	spin_lock(&filp->table_lock);
834 
835 	for (i = 0; i < count; i++) {
836 		/* Check if we currently have a reference on the object */
837 		obj = idr_find(&filp->object_idr, handle[i]);
838 		if (!obj) {
839 			ret = -ENOENT;
840 			break;
841 		}
842 		drm_gem_object_get(obj);
843 		objs[i] = obj;
844 	}
845 	spin_unlock(&filp->table_lock);
846 
847 	return ret;
848 }
849 
850 /**
851  * drm_gem_objects_lookup - look up GEM objects from an array of handles
852  * @filp: DRM file private date
853  * @bo_handles: user pointer to array of userspace handle
854  * @count: size of handle array
855  * @objs_out: returned pointer to array of drm_gem_object pointers
856  *
857  * Takes an array of userspace handles and returns a newly allocated array of
858  * GEM objects.
859  *
860  * For a single handle lookup, use drm_gem_object_lookup().
861  *
862  * Returns:
863  *
864  * @objs filled in with GEM object pointers. Returned GEM objects need to be
865  * released with drm_gem_object_put(). -ENOENT is returned on a lookup
866  * failure. 0 is returned on success.
867  *
868  */
869 int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles,
870 			   int count, struct drm_gem_object ***objs_out)
871 {
872 	int ret;
873 	u32 *handles;
874 	struct drm_gem_object **objs;
875 
876 	if (!count)
877 		return 0;
878 
879 	objs = kvmalloc_array(count, sizeof(struct drm_gem_object *),
880 			     GFP_KERNEL | __GFP_ZERO);
881 	if (!objs)
882 		return -ENOMEM;
883 
884 	*objs_out = objs;
885 
886 	handles = kvmalloc_array(count, sizeof(u32), GFP_KERNEL);
887 	if (!handles) {
888 		ret = -ENOMEM;
889 		goto out;
890 	}
891 
892 	if (copy_from_user(handles, bo_handles, count * sizeof(u32))) {
893 		ret = -EFAULT;
894 		DRM_DEBUG("Failed to copy in GEM handles\n");
895 		goto out;
896 	}
897 
898 	ret = objects_lookup(filp, handles, count, objs);
899 out:
900 	kvfree(handles);
901 	return ret;
902 
903 }
904 EXPORT_SYMBOL(drm_gem_objects_lookup);
905 
906 /**
907  * drm_gem_object_lookup - look up a GEM object from its handle
908  * @filp: DRM file private date
909  * @handle: userspace handle
910  *
911  * Returns:
912  *
913  * A reference to the object named by the handle if such exists on @filp, NULL
914  * otherwise.
915  *
916  * If looking up an array of handles, use drm_gem_objects_lookup().
917  */
918 struct drm_gem_object *
919 drm_gem_object_lookup(struct drm_file *filp, u32 handle)
920 {
921 	struct drm_gem_object *obj = NULL;
922 
923 	objects_lookup(filp, &handle, 1, &obj);
924 	return obj;
925 }
926 EXPORT_SYMBOL(drm_gem_object_lookup);
927 
928 /**
929  * drm_gem_dma_resv_wait - Wait on GEM object's reservation's objects
930  * shared and/or exclusive fences.
931  * @filep: DRM file private date
932  * @handle: userspace handle
933  * @wait_all: if true, wait on all fences, else wait on just exclusive fence
934  * @timeout: timeout value in jiffies or zero to return immediately
935  *
936  * Returns:
937  *
938  * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
939  * greater than 0 on success.
940  */
941 long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle,
942 				    bool wait_all, unsigned long timeout)
943 {
944 	long ret;
945 	struct drm_gem_object *obj;
946 
947 	obj = drm_gem_object_lookup(filep, handle);
948 	if (!obj) {
949 		DRM_DEBUG("Failed to look up GEM BO %d\n", handle);
950 		return -EINVAL;
951 	}
952 
953 	ret = dma_resv_wait_timeout(obj->resv, dma_resv_usage_rw(wait_all),
954 				    true, timeout);
955 	if (ret == 0)
956 		ret = -ETIME;
957 	else if (ret > 0)
958 		ret = 0;
959 
960 	drm_gem_object_put(obj);
961 
962 	return ret;
963 }
964 EXPORT_SYMBOL(drm_gem_dma_resv_wait);
965 
966 /**
967  * drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl
968  * @dev: drm_device
969  * @data: ioctl data
970  * @file_priv: drm file-private structure
971  *
972  * Releases the handle to an mm object.
973  */
974 int
975 drm_gem_close_ioctl(struct drm_device *dev, void *data,
976 		    struct drm_file *file_priv)
977 {
978 	struct drm_gem_close *args = data;
979 	int ret;
980 
981 	if (!drm_core_check_feature(dev, DRIVER_GEM))
982 		return -EOPNOTSUPP;
983 
984 	ret = drm_gem_handle_delete(file_priv, args->handle);
985 
986 	return ret;
987 }
988 
989 /**
990  * drm_gem_flink_ioctl - implementation of the GEM_FLINK ioctl
991  * @dev: drm_device
992  * @data: ioctl data
993  * @file_priv: drm file-private structure
994  *
995  * Create a global name for an object, returning the name.
996  *
997  * Note that the name does not hold a reference; when the object
998  * is freed, the name goes away.
999  */
1000 int
1001 drm_gem_flink_ioctl(struct drm_device *dev, void *data,
1002 		    struct drm_file *file_priv)
1003 {
1004 	struct drm_gem_flink *args = data;
1005 	struct drm_gem_object *obj;
1006 	int ret;
1007 
1008 	if (!drm_core_check_feature(dev, DRIVER_GEM))
1009 		return -EOPNOTSUPP;
1010 
1011 	obj = drm_gem_object_lookup(file_priv, args->handle);
1012 	if (obj == NULL)
1013 		return -ENOENT;
1014 
1015 	mutex_lock(&dev->object_name_lock);
1016 	/* prevent races with concurrent gem_close. */
1017 	if (obj->handle_count == 0) {
1018 		ret = -ENOENT;
1019 		goto err;
1020 	}
1021 
1022 	if (!obj->name) {
1023 		ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_KERNEL);
1024 		if (ret < 0)
1025 			goto err;
1026 
1027 		obj->name = ret;
1028 	}
1029 
1030 	args->name = (uint64_t) obj->name;
1031 	ret = 0;
1032 
1033 err:
1034 	mutex_unlock(&dev->object_name_lock);
1035 	drm_gem_object_put(obj);
1036 	return ret;
1037 }
1038 
1039 /**
1040  * drm_gem_open_ioctl - implementation of the GEM_OPEN ioctl
1041  * @dev: drm_device
1042  * @data: ioctl data
1043  * @file_priv: drm file-private structure
1044  *
1045  * Open an object using the global name, returning a handle and the size.
1046  *
1047  * This handle (of course) holds a reference to the object, so the object
1048  * will not go away until the handle is deleted.
1049  */
1050 int
1051 drm_gem_open_ioctl(struct drm_device *dev, void *data,
1052 		   struct drm_file *file_priv)
1053 {
1054 	struct drm_gem_open *args = data;
1055 	struct drm_gem_object *obj;
1056 	int ret;
1057 	u32 handle;
1058 
1059 	if (!drm_core_check_feature(dev, DRIVER_GEM))
1060 		return -EOPNOTSUPP;
1061 
1062 	mutex_lock(&dev->object_name_lock);
1063 	obj = idr_find(&dev->object_name_idr, (int) args->name);
1064 	if (obj) {
1065 		drm_gem_object_get(obj);
1066 	} else {
1067 		mutex_unlock(&dev->object_name_lock);
1068 		return -ENOENT;
1069 	}
1070 
1071 	/* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
1072 	ret = drm_gem_handle_create_tail(file_priv, obj, &handle);
1073 	if (ret)
1074 		goto err;
1075 
1076 	args->handle = handle;
1077 	args->size = obj->size;
1078 
1079 err:
1080 	drm_gem_object_put(obj);
1081 	return ret;
1082 }
1083 
1084 /**
1085  * drm_gem_open - initializes GEM file-private structures at devnode open time
1086  * @dev: drm_device which is being opened by userspace
1087  * @file_private: drm file-private structure to set up
1088  *
1089  * Called at device open time, sets up the structure for handling refcounting
1090  * of mm objects.
1091  */
1092 void
1093 drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
1094 {
1095 	idr_init_base(&file_private->object_idr, 1);
1096 	mtx_init(&file_private->table_lock, IPL_NONE);
1097 }
1098 
1099 /**
1100  * drm_gem_release - release file-private GEM resources
1101  * @dev: drm_device which is being closed by userspace
1102  * @file_private: drm file-private structure to clean up
1103  *
1104  * Called at close time when the filp is going away.
1105  *
1106  * Releases any remaining references on objects by this filp.
1107  */
1108 void
1109 drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
1110 {
1111 	idr_for_each(&file_private->object_idr,
1112 		     &drm_gem_object_release_handle, file_private);
1113 	idr_destroy(&file_private->object_idr);
1114 }
1115 
1116 /**
1117  * drm_gem_object_release - release GEM buffer object resources
1118  * @obj: GEM buffer object
1119  *
1120  * This releases any structures and resources used by @obj and is the inverse of
1121  * drm_gem_object_init().
1122  */
1123 void
1124 drm_gem_object_release(struct drm_gem_object *obj)
1125 {
1126 	WARN_ON(obj->dma_buf);
1127 
1128 #ifdef __linux__
1129 	if (obj->filp)
1130 		fput(obj->filp);
1131 #else
1132 	if (obj->uao)
1133 		uao_detach(obj->uao);
1134 	if (obj->uobj.pgops)
1135 		uvm_obj_destroy(&obj->uobj);
1136 #endif
1137 
1138 	dma_resv_fini(&obj->_resv);
1139 	drm_gem_free_mmap_offset(obj);
1140 	drm_gem_lru_remove(obj);
1141 }
1142 EXPORT_SYMBOL(drm_gem_object_release);
1143 
1144 /**
1145  * drm_gem_object_free - free a GEM object
1146  * @kref: kref of the object to free
1147  *
1148  * Called after the last reference to the object has been lost.
1149  *
1150  * Frees the object
1151  */
1152 void
1153 drm_gem_object_free(struct kref *kref)
1154 {
1155 	struct drm_gem_object *obj =
1156 		container_of(kref, struct drm_gem_object, refcount);
1157 
1158 	if (WARN_ON(!obj->funcs->free))
1159 		return;
1160 
1161 	obj->funcs->free(obj);
1162 }
1163 EXPORT_SYMBOL(drm_gem_object_free);
1164 
1165 #ifdef __linux__
1166 /**
1167  * drm_gem_vm_open - vma->ops->open implementation for GEM
1168  * @vma: VM area structure
1169  *
1170  * This function implements the #vm_operations_struct open() callback for GEM
1171  * drivers. This must be used together with drm_gem_vm_close().
1172  */
1173 void drm_gem_vm_open(struct vm_area_struct *vma)
1174 {
1175 	struct drm_gem_object *obj = vma->vm_private_data;
1176 
1177 	drm_gem_object_get(obj);
1178 }
1179 EXPORT_SYMBOL(drm_gem_vm_open);
1180 
1181 /**
1182  * drm_gem_vm_close - vma->ops->close implementation for GEM
1183  * @vma: VM area structure
1184  *
1185  * This function implements the #vm_operations_struct close() callback for GEM
1186  * drivers. This must be used together with drm_gem_vm_open().
1187  */
1188 void drm_gem_vm_close(struct vm_area_struct *vma)
1189 {
1190 	struct drm_gem_object *obj = vma->vm_private_data;
1191 
1192 	drm_gem_object_put(obj);
1193 }
1194 EXPORT_SYMBOL(drm_gem_vm_close);
1195 
1196 /**
1197  * drm_gem_mmap_obj - memory map a GEM object
1198  * @obj: the GEM object to map
1199  * @obj_size: the object size to be mapped, in bytes
1200  * @vma: VMA for the area to be mapped
1201  *
1202  * Set up the VMA to prepare mapping of the GEM object using the GEM object's
1203  * vm_ops. Depending on their requirements, GEM objects can either
1204  * provide a fault handler in their vm_ops (in which case any accesses to
1205  * the object will be trapped, to perform migration, GTT binding, surface
1206  * register allocation, or performance monitoring), or mmap the buffer memory
1207  * synchronously after calling drm_gem_mmap_obj.
1208  *
1209  * This function is mainly intended to implement the DMABUF mmap operation, when
1210  * the GEM object is not looked up based on its fake offset. To implement the
1211  * DRM mmap operation, drivers should use the drm_gem_mmap() function.
1212  *
1213  * drm_gem_mmap_obj() assumes the user is granted access to the buffer while
1214  * drm_gem_mmap() prevents unprivileged users from mapping random objects. So
1215  * callers must verify access restrictions before calling this helper.
1216  *
1217  * Return 0 or success or -EINVAL if the object size is smaller than the VMA
1218  * size, or if no vm_ops are provided.
1219  */
1220 int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
1221 		     struct vm_area_struct *vma)
1222 {
1223 	int ret;
1224 
1225 	/* Check for valid size. */
1226 	if (obj_size < vma->vm_end - vma->vm_start)
1227 		return -EINVAL;
1228 
1229 	/* Take a ref for this mapping of the object, so that the fault
1230 	 * handler can dereference the mmap offset's pointer to the object.
1231 	 * This reference is cleaned up by the corresponding vm_close
1232 	 * (which should happen whether the vma was created by this call, or
1233 	 * by a vm_open due to mremap or partial unmap or whatever).
1234 	 */
1235 	drm_gem_object_get(obj);
1236 
1237 	vma->vm_private_data = obj;
1238 	vma->vm_ops = obj->funcs->vm_ops;
1239 
1240 	if (obj->funcs->mmap) {
1241 		ret = obj->funcs->mmap(obj, vma);
1242 		if (ret)
1243 			goto err_drm_gem_object_put;
1244 		WARN_ON(!(vma->vm_flags & VM_DONTEXPAND));
1245 	} else {
1246 		if (!vma->vm_ops) {
1247 			ret = -EINVAL;
1248 			goto err_drm_gem_object_put;
1249 		}
1250 
1251 		vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
1252 		vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
1253 		vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
1254 	}
1255 
1256 	return 0;
1257 
1258 err_drm_gem_object_put:
1259 	drm_gem_object_put(obj);
1260 	return ret;
1261 }
1262 EXPORT_SYMBOL(drm_gem_mmap_obj);
1263 
1264 /**
1265  * drm_gem_mmap - memory map routine for GEM objects
1266  * @filp: DRM file pointer
1267  * @vma: VMA for the area to be mapped
1268  *
1269  * If a driver supports GEM object mapping, mmap calls on the DRM file
1270  * descriptor will end up here.
1271  *
1272  * Look up the GEM object based on the offset passed in (vma->vm_pgoff will
1273  * contain the fake offset we created when the GTT map ioctl was called on
1274  * the object) and map it with a call to drm_gem_mmap_obj().
1275  *
1276  * If the caller is not granted access to the buffer object, the mmap will fail
1277  * with EACCES. Please see the vma manager for more information.
1278  */
1279 int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
1280 {
1281 	struct drm_file *priv = filp->private_data;
1282 	struct drm_device *dev = priv->minor->dev;
1283 	struct drm_gem_object *obj = NULL;
1284 	struct drm_vma_offset_node *node;
1285 	int ret;
1286 
1287 	if (drm_dev_is_unplugged(dev))
1288 		return -ENODEV;
1289 
1290 	drm_vma_offset_lock_lookup(dev->vma_offset_manager);
1291 	node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
1292 						  vma->vm_pgoff,
1293 						  vma_pages(vma));
1294 	if (likely(node)) {
1295 		obj = container_of(node, struct drm_gem_object, vma_node);
1296 		/*
1297 		 * When the object is being freed, after it hits 0-refcnt it
1298 		 * proceeds to tear down the object. In the process it will
1299 		 * attempt to remove the VMA offset and so acquire this
1300 		 * mgr->vm_lock.  Therefore if we find an object with a 0-refcnt
1301 		 * that matches our range, we know it is in the process of being
1302 		 * destroyed and will be freed as soon as we release the lock -
1303 		 * so we have to check for the 0-refcnted object and treat it as
1304 		 * invalid.
1305 		 */
1306 		if (!kref_get_unless_zero(&obj->refcount))
1307 			obj = NULL;
1308 	}
1309 	drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
1310 
1311 	if (!obj)
1312 		return -EINVAL;
1313 
1314 	if (!drm_vma_node_is_allowed(node, priv)) {
1315 		drm_gem_object_put(obj);
1316 		return -EACCES;
1317 	}
1318 
1319 	ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT,
1320 			       vma);
1321 
1322 	drm_gem_object_put(obj);
1323 
1324 	return ret;
1325 }
1326 EXPORT_SYMBOL(drm_gem_mmap);
1327 #else /* ! __linux__ */
1328 
1329 int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
1330 		     vm_prot_t accessprot, voff_t off, vsize_t size)
1331 {
1332 	int ret;
1333 
1334 	/* Check for valid size. */
1335 	if (obj_size < size)
1336 		return -EINVAL;
1337 
1338 	/* Take a ref for this mapping of the object, so that the fault
1339 	 * handler can dereference the mmap offset's pointer to the object.
1340 	 * This reference is cleaned up by the corresponding vm_close
1341 	 * (which should happen whether the vma was created by this call, or
1342 	 * by a vm_open due to mremap or partial unmap or whatever).
1343 	 */
1344 	drm_gem_object_get(obj);
1345 
1346 #ifdef __linux__
1347 	vma->vm_private_data = obj;
1348 	vma->vm_ops = obj->funcs->vm_ops;
1349 #else
1350 	if (obj->uobj.pgops == NULL)
1351 		uvm_obj_init(&obj->uobj, obj->funcs->vm_ops, 1);
1352 #endif
1353 
1354 	if (obj->funcs->mmap) {
1355 		ret = obj->funcs->mmap(obj, accessprot, off, size);
1356 		if (ret)
1357 			goto err_drm_gem_object_put;
1358 #ifdef notyet
1359 		WARN_ON(!(vma->vm_flags & VM_DONTEXPAND));
1360 #endif
1361 	} else {
1362 #ifdef notyet
1363 		if (!vma->vm_ops) {
1364 			ret = -EINVAL;
1365 			goto err_drm_gem_object_put;
1366 		}
1367 
1368 		vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
1369 		vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
1370 		vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
1371 #else
1372 		ret = -EINVAL;
1373 		goto err_drm_gem_object_put;
1374 #endif
1375 	}
1376 
1377 	return 0;
1378 
1379 err_drm_gem_object_put:
1380 	drm_gem_object_put(obj);
1381 	return ret;
1382 }
1383 
1384 struct uvm_object *
1385 drm_gem_mmap(struct file *filp, vm_prot_t accessprot, voff_t off,
1386     vsize_t size)
1387 {
1388 	struct drm_file *priv = (void *)filp;
1389 	struct drm_device *dev = priv->minor->dev;
1390 	struct drm_gem_object *obj = NULL;
1391 	struct drm_vma_offset_node *node;
1392 	int ret;
1393 
1394 	if (drm_dev_is_unplugged(dev))
1395 		return NULL;
1396 
1397 	drm_vma_offset_lock_lookup(dev->vma_offset_manager);
1398 	node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
1399 						  off >> PAGE_SHIFT,
1400 						  atop(round_page(size)));
1401 	if (likely(node)) {
1402 		obj = container_of(node, struct drm_gem_object, vma_node);
1403 		/*
1404 		 * When the object is being freed, after it hits 0-refcnt it
1405 		 * proceeds to tear down the object. In the process it will
1406 		 * attempt to remove the VMA offset and so acquire this
1407 		 * mgr->vm_lock.  Therefore if we find an object with a 0-refcnt
1408 		 * that matches our range, we know it is in the process of being
1409 		 * destroyed and will be freed as soon as we release the lock -
1410 		 * so we have to check for the 0-refcnted object and treat it as
1411 		 * invalid.
1412 		 */
1413 		if (!kref_get_unless_zero(&obj->refcount))
1414 			obj = NULL;
1415 	}
1416 	drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
1417 
1418 	if (!obj)
1419 		return NULL;
1420 
1421 	if (!drm_vma_node_is_allowed(node, priv)) {
1422 		drm_gem_object_put(obj);
1423 		return NULL;
1424 	}
1425 
1426 	ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT,
1427 			       accessprot, off, size);
1428 
1429 	drm_gem_object_put(obj);
1430 
1431 	return &obj->uobj;
1432 }
1433 
1434 #endif /* __linux__ */
1435 
1436 void drm_gem_print_info(struct drm_printer *p, unsigned int indent,
1437 			const struct drm_gem_object *obj)
1438 {
1439 	drm_printf_indent(p, indent, "name=%d\n", obj->name);
1440 	drm_printf_indent(p, indent, "refcount=%u\n",
1441 			  kref_read(&obj->refcount));
1442 	drm_printf_indent(p, indent, "start=%08lx\n",
1443 			  drm_vma_node_start(&obj->vma_node));
1444 	drm_printf_indent(p, indent, "size=%zu\n", obj->size);
1445 	drm_printf_indent(p, indent, "imported=%s\n",
1446 			  str_yes_no(obj->import_attach));
1447 
1448 	if (obj->funcs->print_info)
1449 		obj->funcs->print_info(p, indent, obj);
1450 }
1451 
1452 int drm_gem_pin(struct drm_gem_object *obj)
1453 {
1454 	if (obj->funcs->pin)
1455 		return obj->funcs->pin(obj);
1456 	else
1457 		return 0;
1458 }
1459 
1460 void drm_gem_unpin(struct drm_gem_object *obj)
1461 {
1462 	if (obj->funcs->unpin)
1463 		obj->funcs->unpin(obj);
1464 }
1465 
1466 int drm_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map)
1467 {
1468 	int ret;
1469 
1470 	if (!obj->funcs->vmap)
1471 		return -EOPNOTSUPP;
1472 
1473 	ret = obj->funcs->vmap(obj, map);
1474 	if (ret)
1475 		return ret;
1476 	else if (iosys_map_is_null(map))
1477 		return -ENOMEM;
1478 
1479 	return 0;
1480 }
1481 EXPORT_SYMBOL(drm_gem_vmap);
1482 
1483 void drm_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map)
1484 {
1485 	if (iosys_map_is_null(map))
1486 		return;
1487 
1488 	if (obj->funcs->vunmap)
1489 		obj->funcs->vunmap(obj, map);
1490 
1491 	/* Always set the mapping to NULL. Callers may rely on this. */
1492 	iosys_map_clear(map);
1493 }
1494 EXPORT_SYMBOL(drm_gem_vunmap);
1495 
1496 /**
1497  * drm_gem_lock_reservations - Sets up the ww context and acquires
1498  * the lock on an array of GEM objects.
1499  *
1500  * Once you've locked your reservations, you'll want to set up space
1501  * for your shared fences (if applicable), submit your job, then
1502  * drm_gem_unlock_reservations().
1503  *
1504  * @objs: drm_gem_objects to lock
1505  * @count: Number of objects in @objs
1506  * @acquire_ctx: struct ww_acquire_ctx that will be initialized as
1507  * part of tracking this set of locked reservations.
1508  */
1509 int
1510 drm_gem_lock_reservations(struct drm_gem_object **objs, int count,
1511 			  struct ww_acquire_ctx *acquire_ctx)
1512 {
1513 	int contended = -1;
1514 	int i, ret;
1515 
1516 	ww_acquire_init(acquire_ctx, &reservation_ww_class);
1517 
1518 retry:
1519 	if (contended != -1) {
1520 		struct drm_gem_object *obj = objs[contended];
1521 
1522 		ret = dma_resv_lock_slow_interruptible(obj->resv,
1523 								 acquire_ctx);
1524 		if (ret) {
1525 			ww_acquire_fini(acquire_ctx);
1526 			return ret;
1527 		}
1528 	}
1529 
1530 	for (i = 0; i < count; i++) {
1531 		if (i == contended)
1532 			continue;
1533 
1534 		ret = dma_resv_lock_interruptible(objs[i]->resv,
1535 							    acquire_ctx);
1536 		if (ret) {
1537 			int j;
1538 
1539 			for (j = 0; j < i; j++)
1540 				dma_resv_unlock(objs[j]->resv);
1541 
1542 			if (contended != -1 && contended >= i)
1543 				dma_resv_unlock(objs[contended]->resv);
1544 
1545 			if (ret == -EDEADLK) {
1546 				contended = i;
1547 				goto retry;
1548 			}
1549 
1550 			ww_acquire_fini(acquire_ctx);
1551 			return ret;
1552 		}
1553 	}
1554 
1555 	ww_acquire_done(acquire_ctx);
1556 
1557 	return 0;
1558 }
1559 EXPORT_SYMBOL(drm_gem_lock_reservations);
1560 
1561 void
1562 drm_gem_unlock_reservations(struct drm_gem_object **objs, int count,
1563 			    struct ww_acquire_ctx *acquire_ctx)
1564 {
1565 	int i;
1566 
1567 	for (i = 0; i < count; i++)
1568 		dma_resv_unlock(objs[i]->resv);
1569 
1570 	ww_acquire_fini(acquire_ctx);
1571 }
1572 EXPORT_SYMBOL(drm_gem_unlock_reservations);
1573 
1574 /**
1575  * drm_gem_lru_init - initialize a LRU
1576  *
1577  * @lru: The LRU to initialize
1578  * @lock: The lock protecting the LRU
1579  */
1580 void
1581 drm_gem_lru_init(struct drm_gem_lru *lru, struct rwlock *lock)
1582 {
1583 	lru->lock = lock;
1584 	lru->count = 0;
1585 	INIT_LIST_HEAD(&lru->list);
1586 }
1587 EXPORT_SYMBOL(drm_gem_lru_init);
1588 
1589 static void
1590 drm_gem_lru_remove_locked(struct drm_gem_object *obj)
1591 {
1592 	obj->lru->count -= obj->size >> PAGE_SHIFT;
1593 	WARN_ON(obj->lru->count < 0);
1594 	list_del(&obj->lru_node);
1595 	obj->lru = NULL;
1596 }
1597 
1598 /**
1599  * drm_gem_lru_remove - remove object from whatever LRU it is in
1600  *
1601  * If the object is currently in any LRU, remove it.
1602  *
1603  * @obj: The GEM object to remove from current LRU
1604  */
1605 void
1606 drm_gem_lru_remove(struct drm_gem_object *obj)
1607 {
1608 	struct drm_gem_lru *lru = obj->lru;
1609 
1610 	if (!lru)
1611 		return;
1612 
1613 	mutex_lock(lru->lock);
1614 	drm_gem_lru_remove_locked(obj);
1615 	mutex_unlock(lru->lock);
1616 }
1617 EXPORT_SYMBOL(drm_gem_lru_remove);
1618 
1619 static void
1620 drm_gem_lru_move_tail_locked(struct drm_gem_lru *lru, struct drm_gem_object *obj)
1621 {
1622 	lockdep_assert_held_once(lru->lock);
1623 
1624 	if (obj->lru)
1625 		drm_gem_lru_remove_locked(obj);
1626 
1627 	lru->count += obj->size >> PAGE_SHIFT;
1628 	list_add_tail(&obj->lru_node, &lru->list);
1629 	obj->lru = lru;
1630 }
1631 
1632 /**
1633  * drm_gem_lru_move_tail - move the object to the tail of the LRU
1634  *
1635  * If the object is already in this LRU it will be moved to the
1636  * tail.  Otherwise it will be removed from whichever other LRU
1637  * it is in (if any) and moved into this LRU.
1638  *
1639  * @lru: The LRU to move the object into.
1640  * @obj: The GEM object to move into this LRU
1641  */
1642 void
1643 drm_gem_lru_move_tail(struct drm_gem_lru *lru, struct drm_gem_object *obj)
1644 {
1645 	mutex_lock(lru->lock);
1646 	drm_gem_lru_move_tail_locked(lru, obj);
1647 	mutex_unlock(lru->lock);
1648 }
1649 EXPORT_SYMBOL(drm_gem_lru_move_tail);
1650 
1651 /**
1652  * drm_gem_lru_scan - helper to implement shrinker.scan_objects
1653  *
1654  * If the shrink callback succeeds, it is expected that the driver
1655  * move the object out of this LRU.
1656  *
1657  * If the LRU possibly contain active buffers, it is the responsibility
1658  * of the shrink callback to check for this (ie. dma_resv_test_signaled())
1659  * or if necessary block until the buffer becomes idle.
1660  *
1661  * @lru: The LRU to scan
1662  * @nr_to_scan: The number of pages to try to reclaim
1663  * @shrink: Callback to try to shrink/reclaim the object.
1664  */
1665 unsigned long
1666 drm_gem_lru_scan(struct drm_gem_lru *lru, unsigned nr_to_scan,
1667 		 bool (*shrink)(struct drm_gem_object *obj))
1668 {
1669 	struct drm_gem_lru still_in_lru;
1670 	struct drm_gem_object *obj;
1671 	unsigned freed = 0;
1672 
1673 	drm_gem_lru_init(&still_in_lru, lru->lock);
1674 
1675 	mutex_lock(lru->lock);
1676 
1677 	while (freed < nr_to_scan) {
1678 		obj = list_first_entry_or_null(&lru->list, typeof(*obj), lru_node);
1679 
1680 		if (!obj)
1681 			break;
1682 
1683 		drm_gem_lru_move_tail_locked(&still_in_lru, obj);
1684 
1685 		/*
1686 		 * If it's in the process of being freed, gem_object->free()
1687 		 * may be blocked on lock waiting to remove it.  So just
1688 		 * skip it.
1689 		 */
1690 		if (!kref_get_unless_zero(&obj->refcount))
1691 			continue;
1692 
1693 		/*
1694 		 * Now that we own a reference, we can drop the lock for the
1695 		 * rest of the loop body, to reduce contention with other
1696 		 * code paths that need the LRU lock
1697 		 */
1698 		mutex_unlock(lru->lock);
1699 
1700 		/*
1701 		 * Note that this still needs to be trylock, since we can
1702 		 * hit shrinker in response to trying to get backing pages
1703 		 * for this obj (ie. while it's lock is already held)
1704 		 */
1705 		if (!dma_resv_trylock(obj->resv))
1706 			goto tail;
1707 
1708 		if (shrink(obj)) {
1709 			freed += obj->size >> PAGE_SHIFT;
1710 
1711 			/*
1712 			 * If we succeeded in releasing the object's backing
1713 			 * pages, we expect the driver to have moved the object
1714 			 * out of this LRU
1715 			 */
1716 			WARN_ON(obj->lru == &still_in_lru);
1717 			WARN_ON(obj->lru == lru);
1718 		}
1719 
1720 		dma_resv_unlock(obj->resv);
1721 
1722 tail:
1723 		drm_gem_object_put(obj);
1724 		mutex_lock(lru->lock);
1725 	}
1726 
1727 	/*
1728 	 * Move objects we've skipped over out of the temporary still_in_lru
1729 	 * back into this LRU
1730 	 */
1731 	list_for_each_entry (obj, &still_in_lru.list, lru_node)
1732 		obj->lru = lru;
1733 	list_splice_tail(&still_in_lru.list, &lru->list);
1734 	lru->count += still_in_lru.count;
1735 
1736 	mutex_unlock(lru->lock);
1737 
1738 	return freed;
1739 }
1740 EXPORT_SYMBOL(drm_gem_lru_scan);
1741