xref: /openbsd-src/sys/dev/pci/drm/drm_gem.c (revision 99fd087599a8791921855f21bd7e36130f39aadc)
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *
26  */
27 
28 #include <linux/types.h>
29 #include <linux/slab.h>
30 #include <linux/mm.h>
31 #include <linux/uaccess.h>
32 #include <linux/fs.h>
33 #include <linux/file.h>
34 #include <linux/module.h>
35 #include <linux/mman.h>
36 #include <linux/pagemap.h>
37 #include <linux/shmem_fs.h>
38 #include <linux/dma-buf.h>
39 #include <linux/mem_encrypt.h>
40 #include <drm/drmP.h>
41 #include <drm/drm_vma_manager.h>
42 #include <drm/drm_gem.h>
43 #include <drm/drm_print.h>
44 #include "drm_internal.h"
45 
46 #include <uvm/uvm.h>
47 
48 void drm_unref(struct uvm_object *);
49 void drm_ref(struct uvm_object *);
50 boolean_t drm_flush(struct uvm_object *, voff_t, voff_t, int);
51 int drm_fault(struct uvm_faultinfo *, vaddr_t, vm_page_t *, int, int,
52     vm_fault_t, vm_prot_t, int);
53 
54 struct uvm_pagerops drm_pgops = {
55 	NULL,
56 	drm_ref,
57 	drm_unref,
58 	drm_fault,
59 	drm_flush,
60 };
61 
62 void
63 drm_ref(struct uvm_object *uobj)
64 {
65 	struct drm_gem_object *obj =
66 	    container_of(uobj, struct drm_gem_object, uobj);
67 
68 	drm_gem_object_get(obj);
69 }
70 
71 void
72 drm_unref(struct uvm_object *uobj)
73 {
74 	struct drm_gem_object *obj =
75 	    container_of(uobj, struct drm_gem_object, uobj);
76 
77 	drm_gem_object_put_unlocked(obj);
78 }
79 
80 int
81 drm_fault(struct uvm_faultinfo *ufi, vaddr_t vaddr, vm_page_t *pps,
82     int npages, int centeridx, vm_fault_t fault_type,
83     vm_prot_t access_type, int flags)
84 {
85 	struct vm_map_entry *entry = ufi->entry;
86 	struct uvm_object *uobj = entry->object.uvm_obj;
87 	struct drm_gem_object *obj =
88 	    container_of(uobj, struct drm_gem_object, uobj);
89 	struct drm_device *dev = obj->dev;
90 	int ret;
91 
92 	/*
93 	 * we do not allow device mappings to be mapped copy-on-write
94 	 * so we kill any attempt to do so here.
95 	 */
96 
97 	if (UVM_ET_ISCOPYONWRITE(entry)) {
98 		uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj, NULL);
99 		return(VM_PAGER_ERROR);
100 	}
101 
102 	/*
103 	 * We could end up here as the result of a copyin(9) or
104 	 * copyout(9) while handling an ioctl.  So we must be careful
105 	 * not to deadlock.  Therefore we only block if the quiesce
106 	 * count is zero, which guarantees we didn't enter from within
107 	 * an ioctl code path.
108 	 */
109 	mtx_enter(&dev->quiesce_mtx);
110 	if (dev->quiesce && dev->quiesce_count == 0) {
111 		mtx_leave(&dev->quiesce_mtx);
112 		uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj, NULL);
113 		mtx_enter(&dev->quiesce_mtx);
114 		while (dev->quiesce) {
115 			msleep_nsec(&dev->quiesce, &dev->quiesce_mtx,
116 			    PZERO, "drmflt", INFSLP);
117 		}
118 		mtx_leave(&dev->quiesce_mtx);
119 		return(VM_PAGER_REFAULT);
120 	}
121 	dev->quiesce_count++;
122 	mtx_leave(&dev->quiesce_mtx);
123 
124 	/* Call down into driver to do the magic */
125 	ret = dev->driver->gem_fault(obj, ufi, entry->offset + (vaddr -
126 	    entry->start), vaddr, pps, npages, centeridx,
127 	    access_type, flags);
128 
129 	mtx_enter(&dev->quiesce_mtx);
130 	dev->quiesce_count--;
131 	if (dev->quiesce)
132 		wakeup(&dev->quiesce_count);
133 	mtx_leave(&dev->quiesce_mtx);
134 
135 	return (ret);
136 }
137 
138 boolean_t
139 drm_flush(struct uvm_object *uobj, voff_t start, voff_t stop, int flags)
140 {
141 	return (TRUE);
142 }
143 
144 struct uvm_object *
145 udv_attach_drm(dev_t device, vm_prot_t accessprot, voff_t off, vsize_t size)
146 {
147 	struct drm_device *dev = drm_get_device_from_kdev(device);
148 	struct drm_gem_object *obj;
149 	struct drm_vma_offset_node *node;
150 	struct drm_file *priv;
151 	struct file *filp;
152 
153 	if (cdevsw[major(device)].d_mmap != drmmmap)
154 		return NULL;
155 
156 	if (dev == NULL)
157 		return NULL;
158 
159 	if (dev->driver->mmap)
160 		return dev->driver->mmap(dev, off, size);
161 
162 	mutex_lock(&dev->struct_mutex);
163 
164 	priv = drm_find_file_by_minor(dev, minor(device));
165 	if (priv == 0) {
166 		mutex_unlock(&dev->struct_mutex);
167 		return NULL;
168 	}
169 	filp = priv->filp;
170 
171 	node = drm_vma_offset_exact_lookup(dev->vma_offset_manager,
172 					   off >> PAGE_SHIFT,
173 					   atop(round_page(size)));
174 	if (!node) {
175 		mutex_unlock(&dev->struct_mutex);
176 		return NULL;
177 	} else if (!drm_vma_node_is_allowed(node, filp)) {
178 		mutex_unlock(&dev->struct_mutex);
179 		return NULL;
180 	}
181 
182 	obj = container_of(node, struct drm_gem_object, vma_node);
183 	drm_gem_object_get(obj);
184 
185 	mutex_unlock(&dev->struct_mutex);
186 	return &obj->uobj;
187 }
188 
189 /** @file drm_gem.c
190  *
191  * This file provides some of the base ioctls and library routines for
192  * the graphics memory manager implemented by each device driver.
193  *
194  * Because various devices have different requirements in terms of
195  * synchronization and migration strategies, implementing that is left up to
196  * the driver, and all that the general API provides should be generic --
197  * allocating objects, reading/writing data with the cpu, freeing objects.
198  * Even there, platform-dependent optimizations for reading/writing data with
199  * the CPU mean we'll likely hook those out to driver-specific calls.  However,
200  * the DRI2 implementation wants to have at least allocate/mmap be generic.
201  *
202  * The goal was to have swap-backed object allocation managed through
203  * struct file.  However, file descriptors as handles to a struct file have
204  * two major failings:
205  * - Process limits prevent more than 1024 or so being used at a time by
206  *   default.
207  * - Inability to allocate high fds will aggravate the X Server's select()
208  *   handling, and likely that of many GL client applications as well.
209  *
210  * This led to a plan of using our own integer IDs (called handles, following
211  * DRM terminology) to mimic fds, and implement the fd syscalls we need as
212  * ioctls.  The objects themselves will still include the struct file so
213  * that we can transition to fds if the required kernel infrastructure shows
214  * up at a later date, and as our interface with shmfs for memory allocation.
215  */
216 
217 /*
218  * We make up offsets for buffer objects so we can recognize them at
219  * mmap time.
220  */
221 
222 /* pgoff in mmap is an unsigned long, so we need to make sure that
223  * the faked up offset will fit
224  */
225 
226 #if BITS_PER_LONG == 64
227 #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
228 #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
229 #else
230 #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1)
231 #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16)
232 #endif
233 
234 /**
235  * drm_gem_init - Initialize the GEM device fields
236  * @dev: drm_devic structure to initialize
237  */
238 int
239 drm_gem_init(struct drm_device *dev)
240 {
241 	struct drm_vma_offset_manager *vma_offset_manager;
242 
243 	rw_init(&dev->object_name_lock, "drmonl");
244 	idr_init_base(&dev->object_name_idr, 1);
245 
246 	vma_offset_manager = kzalloc(sizeof(*vma_offset_manager), GFP_KERNEL);
247 	if (!vma_offset_manager) {
248 		DRM_ERROR("out of memory\n");
249 		return -ENOMEM;
250 	}
251 
252 	dev->vma_offset_manager = vma_offset_manager;
253 	drm_vma_offset_manager_init(vma_offset_manager,
254 				    DRM_FILE_PAGE_OFFSET_START,
255 				    DRM_FILE_PAGE_OFFSET_SIZE);
256 
257 	return 0;
258 }
259 
260 void
261 drm_gem_destroy(struct drm_device *dev)
262 {
263 
264 	drm_vma_offset_manager_destroy(dev->vma_offset_manager);
265 	kfree(dev->vma_offset_manager);
266 	dev->vma_offset_manager = NULL;
267 }
268 
269 #ifdef __linux__
270 
271 /**
272  * drm_gem_object_init - initialize an allocated shmem-backed GEM object
273  * @dev: drm_device the object should be initialized for
274  * @obj: drm_gem_object to initialize
275  * @size: object size
276  *
277  * Initialize an already allocated GEM object of the specified size with
278  * shmfs backing store.
279  */
280 int drm_gem_object_init(struct drm_device *dev,
281 			struct drm_gem_object *obj, size_t size)
282 {
283 	struct file *filp;
284 
285 	drm_gem_private_object_init(dev, obj, size);
286 
287 	filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
288 	if (IS_ERR(filp))
289 		return PTR_ERR(filp);
290 
291 	obj->filp = filp;
292 
293 	return 0;
294 }
295 EXPORT_SYMBOL(drm_gem_object_init);
296 
297 #else
298 
299 int drm_gem_object_init(struct drm_device *dev,
300 			struct drm_gem_object *obj, size_t size)
301 {
302 	drm_gem_private_object_init(dev, obj, size);
303 
304 	obj->uao = uao_create(size, 0);
305 	uvm_objinit(&obj->uobj, &drm_pgops, 1);
306 
307 	atomic_inc(&dev->obj_count);
308 	atomic_add(obj->size, &dev->obj_memory);
309 
310 	obj->filp = (void *)obj->uao;
311 
312 	return 0;
313 }
314 
315 #endif
316 
317 /**
318  * drm_gem_private_object_init - initialize an allocated private GEM object
319  * @dev: drm_device the object should be initialized for
320  * @obj: drm_gem_object to initialize
321  * @size: object size
322  *
323  * Initialize an already allocated GEM object of the specified size with
324  * no GEM provided backing store. Instead the caller is responsible for
325  * backing the object and handling it.
326  */
327 void drm_gem_private_object_init(struct drm_device *dev,
328 				 struct drm_gem_object *obj, size_t size)
329 {
330 	BUG_ON((size & (PAGE_SIZE - 1)) != 0);
331 
332 	obj->dev = dev;
333 	obj->filp = NULL;
334 
335 	kref_init(&obj->refcount);
336 	obj->handle_count = 0;
337 	obj->size = size;
338 	drm_vma_node_reset(&obj->vma_node);
339 }
340 EXPORT_SYMBOL(drm_gem_private_object_init);
341 
342 static void
343 drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
344 {
345 	/*
346 	 * Note: obj->dma_buf can't disappear as long as we still hold a
347 	 * handle reference in obj->handle_count.
348 	 */
349 	mutex_lock(&filp->prime.lock);
350 	if (obj->dma_buf) {
351 		drm_prime_remove_buf_handle_locked(&filp->prime,
352 						   obj->dma_buf);
353 	}
354 	mutex_unlock(&filp->prime.lock);
355 }
356 
357 /**
358  * drm_gem_object_handle_free - release resources bound to userspace handles
359  * @obj: GEM object to clean up.
360  *
361  * Called after the last handle to the object has been closed
362  *
363  * Removes any name for the object. Note that this must be
364  * called before drm_gem_object_free or we'll be touching
365  * freed memory
366  */
367 static void drm_gem_object_handle_free(struct drm_gem_object *obj)
368 {
369 	struct drm_device *dev = obj->dev;
370 
371 	/* Remove any name for this object */
372 	if (obj->name) {
373 		idr_remove(&dev->object_name_idr, obj->name);
374 		obj->name = 0;
375 	}
376 }
377 
378 static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj)
379 {
380 	/* Unbreak the reference cycle if we have an exported dma_buf. */
381 	if (obj->dma_buf) {
382 		dma_buf_put(obj->dma_buf);
383 		obj->dma_buf = NULL;
384 	}
385 }
386 
387 static void
388 drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj)
389 {
390 	struct drm_device *dev = obj->dev;
391 	bool final = false;
392 
393 	if (WARN_ON(obj->handle_count == 0))
394 		return;
395 
396 	/*
397 	* Must bump handle count first as this may be the last
398 	* ref, in which case the object would disappear before we
399 	* checked for a name
400 	*/
401 
402 	mutex_lock(&dev->object_name_lock);
403 	if (--obj->handle_count == 0) {
404 		drm_gem_object_handle_free(obj);
405 		drm_gem_object_exported_dma_buf_free(obj);
406 		final = true;
407 	}
408 	mutex_unlock(&dev->object_name_lock);
409 
410 	if (final)
411 		drm_gem_object_put_unlocked(obj);
412 }
413 
414 /*
415  * Called at device or object close to release the file's
416  * handle references on objects.
417  */
418 static int
419 drm_gem_object_release_handle(int id, void *ptr, void *data)
420 {
421 	struct drm_file *file_priv = data;
422 	struct drm_gem_object *obj = ptr;
423 	struct drm_device *dev = obj->dev;
424 
425 	if (dev->driver->gem_close_object)
426 		dev->driver->gem_close_object(obj, file_priv);
427 
428 	if (drm_core_check_feature(dev, DRIVER_PRIME))
429 		drm_gem_remove_prime_handles(obj, file_priv);
430 	drm_vma_node_revoke(&obj->vma_node, file_priv->filp);
431 
432 	drm_gem_object_handle_put_unlocked(obj);
433 
434 	return 0;
435 }
436 
437 /**
438  * drm_gem_handle_delete - deletes the given file-private handle
439  * @filp: drm file-private structure to use for the handle look up
440  * @handle: userspace handle to delete
441  *
442  * Removes the GEM handle from the @filp lookup table which has been added with
443  * drm_gem_handle_create(). If this is the last handle also cleans up linked
444  * resources like GEM names.
445  */
446 int
447 drm_gem_handle_delete(struct drm_file *filp, u32 handle)
448 {
449 	struct drm_gem_object *obj;
450 
451 	spin_lock(&filp->table_lock);
452 
453 	/* Check if we currently have a reference on the object */
454 	obj = idr_replace(&filp->object_idr, NULL, handle);
455 	spin_unlock(&filp->table_lock);
456 	if (IS_ERR_OR_NULL(obj))
457 		return -EINVAL;
458 
459 	/* Release driver's reference and decrement refcount. */
460 	drm_gem_object_release_handle(handle, obj, filp);
461 
462 	/* And finally make the handle available for future allocations. */
463 	spin_lock(&filp->table_lock);
464 	idr_remove(&filp->object_idr, handle);
465 	spin_unlock(&filp->table_lock);
466 
467 	return 0;
468 }
469 EXPORT_SYMBOL(drm_gem_handle_delete);
470 
471 /**
472  * drm_gem_dumb_map_offset - return the fake mmap offset for a gem object
473  * @file: drm file-private structure containing the gem object
474  * @dev: corresponding drm_device
475  * @handle: gem object handle
476  * @offset: return location for the fake mmap offset
477  *
478  * This implements the &drm_driver.dumb_map_offset kms driver callback for
479  * drivers which use gem to manage their backing storage.
480  *
481  * Returns:
482  * 0 on success or a negative error code on failure.
483  */
484 int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
485 			    u32 handle, u64 *offset)
486 {
487 	struct drm_gem_object *obj;
488 	int ret;
489 
490 	obj = drm_gem_object_lookup(file, handle);
491 	if (!obj)
492 		return -ENOENT;
493 
494 	/* Don't allow imported objects to be mapped */
495 	if (obj->import_attach) {
496 		ret = -EINVAL;
497 		goto out;
498 	}
499 
500 	ret = drm_gem_create_mmap_offset(obj);
501 	if (ret)
502 		goto out;
503 
504 	*offset = drm_vma_node_offset_addr(&obj->vma_node);
505 out:
506 	drm_gem_object_put_unlocked(obj);
507 
508 	return ret;
509 }
510 EXPORT_SYMBOL_GPL(drm_gem_dumb_map_offset);
511 
512 /**
513  * drm_gem_dumb_destroy - dumb fb callback helper for gem based drivers
514  * @file: drm file-private structure to remove the dumb handle from
515  * @dev: corresponding drm_device
516  * @handle: the dumb handle to remove
517  *
518  * This implements the &drm_driver.dumb_destroy kms driver callback for drivers
519  * which use gem to manage their backing storage.
520  */
521 int drm_gem_dumb_destroy(struct drm_file *file,
522 			 struct drm_device *dev,
523 			 uint32_t handle)
524 {
525 	return drm_gem_handle_delete(file, handle);
526 }
527 EXPORT_SYMBOL(drm_gem_dumb_destroy);
528 
529 /**
530  * drm_gem_handle_create_tail - internal functions to create a handle
531  * @file_priv: drm file-private structure to register the handle for
532  * @obj: object to register
533  * @handlep: pointer to return the created handle to the caller
534  *
535  * This expects the &drm_device.object_name_lock to be held already and will
536  * drop it before returning. Used to avoid races in establishing new handles
537  * when importing an object from either an flink name or a dma-buf.
538  *
539  * Handles must be release again through drm_gem_handle_delete(). This is done
540  * when userspace closes @file_priv for all attached handles, or through the
541  * GEM_CLOSE ioctl for individual handles.
542  */
543 int
544 drm_gem_handle_create_tail(struct drm_file *file_priv,
545 			   struct drm_gem_object *obj,
546 			   u32 *handlep)
547 {
548 	struct drm_device *dev = obj->dev;
549 	u32 handle;
550 	int ret;
551 
552 	WARN_ON(!mutex_is_locked(&dev->object_name_lock));
553 	if (obj->handle_count++ == 0)
554 		drm_gem_object_get(obj);
555 
556 	/*
557 	 * Get the user-visible handle using idr.  Preload and perform
558 	 * allocation under our spinlock.
559 	 */
560 	idr_preload(GFP_KERNEL);
561 	spin_lock(&file_priv->table_lock);
562 
563 	ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT);
564 
565 	spin_unlock(&file_priv->table_lock);
566 	idr_preload_end();
567 
568 	mutex_unlock(&dev->object_name_lock);
569 	if (ret < 0)
570 		goto err_unref;
571 
572 	handle = ret;
573 
574 	ret = drm_vma_node_allow(&obj->vma_node, file_priv->filp);
575 	if (ret)
576 		goto err_remove;
577 
578 	if (dev->driver->gem_open_object) {
579 		ret = dev->driver->gem_open_object(obj, file_priv);
580 		if (ret)
581 			goto err_revoke;
582 	}
583 
584 	*handlep = handle;
585 	return 0;
586 
587 err_revoke:
588 	drm_vma_node_revoke(&obj->vma_node, file_priv->filp);
589 err_remove:
590 	spin_lock(&file_priv->table_lock);
591 	idr_remove(&file_priv->object_idr, handle);
592 	spin_unlock(&file_priv->table_lock);
593 err_unref:
594 	drm_gem_object_handle_put_unlocked(obj);
595 	return ret;
596 }
597 
598 /**
599  * drm_gem_handle_create - create a gem handle for an object
600  * @file_priv: drm file-private structure to register the handle for
601  * @obj: object to register
602  * @handlep: pionter to return the created handle to the caller
603  *
604  * Create a handle for this object. This adds a handle reference to the object,
605  * which includes a regular reference count. Callers will likely want to
606  * dereference the object afterwards.
607  *
608  * Since this publishes @obj to userspace it must be fully set up by this point,
609  * drivers must call this last in their buffer object creation callbacks.
610  */
611 int drm_gem_handle_create(struct drm_file *file_priv,
612 			  struct drm_gem_object *obj,
613 			  u32 *handlep)
614 {
615 	mutex_lock(&obj->dev->object_name_lock);
616 
617 	return drm_gem_handle_create_tail(file_priv, obj, handlep);
618 }
619 EXPORT_SYMBOL(drm_gem_handle_create);
620 
621 
622 /**
623  * drm_gem_free_mmap_offset - release a fake mmap offset for an object
624  * @obj: obj in question
625  *
626  * This routine frees fake offsets allocated by drm_gem_create_mmap_offset().
627  *
628  * Note that drm_gem_object_release() already calls this function, so drivers
629  * don't have to take care of releasing the mmap offset themselves when freeing
630  * the GEM object.
631  */
632 void
633 drm_gem_free_mmap_offset(struct drm_gem_object *obj)
634 {
635 	struct drm_device *dev = obj->dev;
636 
637 	drm_vma_offset_remove(dev->vma_offset_manager, &obj->vma_node);
638 }
639 EXPORT_SYMBOL(drm_gem_free_mmap_offset);
640 
641 /**
642  * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object
643  * @obj: obj in question
644  * @size: the virtual size
645  *
646  * GEM memory mapping works by handing back to userspace a fake mmap offset
647  * it can use in a subsequent mmap(2) call.  The DRM core code then looks
648  * up the object based on the offset and sets up the various memory mapping
649  * structures.
650  *
651  * This routine allocates and attaches a fake offset for @obj, in cases where
652  * the virtual size differs from the physical size (ie. &drm_gem_object.size).
653  * Otherwise just use drm_gem_create_mmap_offset().
654  *
655  * This function is idempotent and handles an already allocated mmap offset
656  * transparently. Drivers do not need to check for this case.
657  */
658 int
659 drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size)
660 {
661 	struct drm_device *dev = obj->dev;
662 
663 	return drm_vma_offset_add(dev->vma_offset_manager, &obj->vma_node,
664 				  size / PAGE_SIZE);
665 }
666 EXPORT_SYMBOL(drm_gem_create_mmap_offset_size);
667 
668 /**
669  * drm_gem_create_mmap_offset - create a fake mmap offset for an object
670  * @obj: obj in question
671  *
672  * GEM memory mapping works by handing back to userspace a fake mmap offset
673  * it can use in a subsequent mmap(2) call.  The DRM core code then looks
674  * up the object based on the offset and sets up the various memory mapping
675  * structures.
676  *
677  * This routine allocates and attaches a fake offset for @obj.
678  *
679  * Drivers can call drm_gem_free_mmap_offset() before freeing @obj to release
680  * the fake offset again.
681  */
682 int drm_gem_create_mmap_offset(struct drm_gem_object *obj)
683 {
684 	return drm_gem_create_mmap_offset_size(obj, obj->size);
685 }
686 EXPORT_SYMBOL(drm_gem_create_mmap_offset);
687 
688 #ifdef __linux__
689 
690 /**
691  * drm_gem_get_pages - helper to allocate backing pages for a GEM object
692  * from shmem
693  * @obj: obj in question
694  *
695  * This reads the page-array of the shmem-backing storage of the given gem
696  * object. An array of pages is returned. If a page is not allocated or
697  * swapped-out, this will allocate/swap-in the required pages. Note that the
698  * whole object is covered by the page-array and pinned in memory.
699  *
700  * Use drm_gem_put_pages() to release the array and unpin all pages.
701  *
702  * This uses the GFP-mask set on the shmem-mapping (see mapping_set_gfp_mask()).
703  * If you require other GFP-masks, you have to do those allocations yourself.
704  *
705  * Note that you are not allowed to change gfp-zones during runtime. That is,
706  * shmem_read_mapping_page_gfp() must be called with the same gfp_zone(gfp) as
707  * set during initialization. If you have special zone constraints, set them
708  * after drm_gem_object_init() via mapping_set_gfp_mask(). shmem-core takes care
709  * to keep pages in the required zone during swap-in.
710  */
711 struct page **drm_gem_get_pages(struct drm_gem_object *obj)
712 {
713 	struct address_space *mapping;
714 	struct page *p, **pages;
715 	int i, npages;
716 
717 	/* This is the shared memory object that backs the GEM resource */
718 	mapping = obj->filp->f_mapping;
719 
720 	/* We already BUG_ON() for non-page-aligned sizes in
721 	 * drm_gem_object_init(), so we should never hit this unless
722 	 * driver author is doing something really wrong:
723 	 */
724 	WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
725 
726 	npages = obj->size >> PAGE_SHIFT;
727 
728 	pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
729 	if (pages == NULL)
730 		return ERR_PTR(-ENOMEM);
731 
732 	for (i = 0; i < npages; i++) {
733 		p = shmem_read_mapping_page(mapping, i);
734 		if (IS_ERR(p))
735 			goto fail;
736 		pages[i] = p;
737 
738 		/* Make sure shmem keeps __GFP_DMA32 allocated pages in the
739 		 * correct region during swapin. Note that this requires
740 		 * __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping)
741 		 * so shmem can relocate pages during swapin if required.
742 		 */
743 		BUG_ON(mapping_gfp_constraint(mapping, __GFP_DMA32) &&
744 				(page_to_pfn(p) >= 0x00100000UL));
745 	}
746 
747 	return pages;
748 
749 fail:
750 	while (i--)
751 		put_page(pages[i]);
752 
753 	kvfree(pages);
754 	return ERR_CAST(p);
755 }
756 EXPORT_SYMBOL(drm_gem_get_pages);
757 
758 /**
759  * drm_gem_put_pages - helper to free backing pages for a GEM object
760  * @obj: obj in question
761  * @pages: pages to free
762  * @dirty: if true, pages will be marked as dirty
763  * @accessed: if true, the pages will be marked as accessed
764  */
765 void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
766 		bool dirty, bool accessed)
767 {
768 	int i, npages;
769 
770 	/* We already BUG_ON() for non-page-aligned sizes in
771 	 * drm_gem_object_init(), so we should never hit this unless
772 	 * driver author is doing something really wrong:
773 	 */
774 	WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
775 
776 	npages = obj->size >> PAGE_SHIFT;
777 
778 	for (i = 0; i < npages; i++) {
779 		if (dirty)
780 			set_page_dirty(pages[i]);
781 
782 		if (accessed)
783 			mark_page_accessed(pages[i]);
784 
785 		/* Undo the reference we took when populating the table */
786 		put_page(pages[i]);
787 	}
788 
789 	kvfree(pages);
790 }
791 EXPORT_SYMBOL(drm_gem_put_pages);
792 
793 #endif
794 
795 /**
796  * drm_gem_object_lookup - look up a GEM object from it's handle
797  * @filp: DRM file private date
798  * @handle: userspace handle
799  *
800  * Returns:
801  *
802  * A reference to the object named by the handle if such exists on @filp, NULL
803  * otherwise.
804  */
805 struct drm_gem_object *
806 drm_gem_object_lookup(struct drm_file *filp, u32 handle)
807 {
808 	struct drm_gem_object *obj;
809 
810 	spin_lock(&filp->table_lock);
811 
812 	/* Check if we currently have a reference on the object */
813 	obj = idr_find(&filp->object_idr, handle);
814 	if (obj)
815 		drm_gem_object_get(obj);
816 
817 	spin_unlock(&filp->table_lock);
818 
819 	return obj;
820 }
821 EXPORT_SYMBOL(drm_gem_object_lookup);
822 
823 /**
824  * drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl
825  * @dev: drm_device
826  * @data: ioctl data
827  * @file_priv: drm file-private structure
828  *
829  * Releases the handle to an mm object.
830  */
831 int
832 drm_gem_close_ioctl(struct drm_device *dev, void *data,
833 		    struct drm_file *file_priv)
834 {
835 	struct drm_gem_close *args = data;
836 	int ret;
837 
838 	if (!drm_core_check_feature(dev, DRIVER_GEM))
839 		return -ENODEV;
840 
841 	ret = drm_gem_handle_delete(file_priv, args->handle);
842 
843 	return ret;
844 }
845 
846 /**
847  * drm_gem_flink_ioctl - implementation of the GEM_FLINK ioctl
848  * @dev: drm_device
849  * @data: ioctl data
850  * @file_priv: drm file-private structure
851  *
852  * Create a global name for an object, returning the name.
853  *
854  * Note that the name does not hold a reference; when the object
855  * is freed, the name goes away.
856  */
857 int
858 drm_gem_flink_ioctl(struct drm_device *dev, void *data,
859 		    struct drm_file *file_priv)
860 {
861 	struct drm_gem_flink *args = data;
862 	struct drm_gem_object *obj;
863 	int ret;
864 
865 	if (!drm_core_check_feature(dev, DRIVER_GEM))
866 		return -ENODEV;
867 
868 	obj = drm_gem_object_lookup(file_priv, args->handle);
869 	if (obj == NULL)
870 		return -ENOENT;
871 
872 	mutex_lock(&dev->object_name_lock);
873 	/* prevent races with concurrent gem_close. */
874 	if (obj->handle_count == 0) {
875 		ret = -ENOENT;
876 		goto err;
877 	}
878 
879 	if (!obj->name) {
880 		ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_KERNEL);
881 		if (ret < 0)
882 			goto err;
883 
884 		obj->name = ret;
885 	}
886 
887 	args->name = (uint64_t) obj->name;
888 	ret = 0;
889 
890 err:
891 	mutex_unlock(&dev->object_name_lock);
892 	drm_gem_object_put_unlocked(obj);
893 	return ret;
894 }
895 
896 /**
897  * drm_gem_open - implementation of the GEM_OPEN ioctl
898  * @dev: drm_device
899  * @data: ioctl data
900  * @file_priv: drm file-private structure
901  *
902  * Open an object using the global name, returning a handle and the size.
903  *
904  * This handle (of course) holds a reference to the object, so the object
905  * will not go away until the handle is deleted.
906  */
907 int
908 drm_gem_open_ioctl(struct drm_device *dev, void *data,
909 		   struct drm_file *file_priv)
910 {
911 	struct drm_gem_open *args = data;
912 	struct drm_gem_object *obj;
913 	int ret;
914 	u32 handle;
915 
916 	if (!drm_core_check_feature(dev, DRIVER_GEM))
917 		return -ENODEV;
918 
919 	mutex_lock(&dev->object_name_lock);
920 	obj = idr_find(&dev->object_name_idr, (int) args->name);
921 	if (obj) {
922 		drm_gem_object_get(obj);
923 	} else {
924 		mutex_unlock(&dev->object_name_lock);
925 		return -ENOENT;
926 	}
927 
928 	/* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
929 	ret = drm_gem_handle_create_tail(file_priv, obj, &handle);
930 	drm_gem_object_put_unlocked(obj);
931 	if (ret)
932 		return ret;
933 
934 	args->handle = handle;
935 	args->size = obj->size;
936 
937 	return 0;
938 }
939 
940 /**
941  * gem_gem_open - initalizes GEM file-private structures at devnode open time
942  * @dev: drm_device which is being opened by userspace
943  * @file_private: drm file-private structure to set up
944  *
945  * Called at device open time, sets up the structure for handling refcounting
946  * of mm objects.
947  */
948 void
949 drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
950 {
951 	idr_init_base(&file_private->object_idr, 1);
952 	mtx_init(&file_private->table_lock, IPL_NONE);
953 }
954 
955 /**
956  * drm_gem_release - release file-private GEM resources
957  * @dev: drm_device which is being closed by userspace
958  * @file_private: drm file-private structure to clean up
959  *
960  * Called at close time when the filp is going away.
961  *
962  * Releases any remaining references on objects by this filp.
963  */
964 void
965 drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
966 {
967 	idr_for_each(&file_private->object_idr,
968 		     &drm_gem_object_release_handle, file_private);
969 	idr_destroy(&file_private->object_idr);
970 }
971 
972 #ifdef __linux__
973 
974 /**
975  * drm_gem_object_release - release GEM buffer object resources
976  * @obj: GEM buffer object
977  *
978  * This releases any structures and resources used by @obj and is the invers of
979  * drm_gem_object_init().
980  */
981 void
982 drm_gem_object_release(struct drm_gem_object *obj)
983 {
984 	WARN_ON(obj->dma_buf);
985 
986 	if (obj->filp)
987 		fput(obj->filp);
988 
989 	drm_gem_free_mmap_offset(obj);
990 }
991 EXPORT_SYMBOL(drm_gem_object_release);
992 
993 #else
994 
995 void
996 drm_gem_object_release(struct drm_gem_object *obj)
997 {
998 	struct drm_device *dev = obj->dev;
999 
1000 	WARN_ON(obj->dma_buf);
1001 
1002 	if (obj->uao)
1003 		uao_detach(obj->uao);
1004 
1005 	atomic_dec(&dev->obj_count);
1006 	atomic_sub(obj->size, &dev->obj_memory);
1007 
1008 	drm_gem_free_mmap_offset(obj);
1009 }
1010 
1011 #endif
1012 
1013 /**
1014  * drm_gem_object_free - free a GEM object
1015  * @kref: kref of the object to free
1016  *
1017  * Called after the last reference to the object has been lost.
1018  * Must be called holding &drm_device.struct_mutex.
1019  *
1020  * Frees the object
1021  */
1022 void
1023 drm_gem_object_free(struct kref *kref)
1024 {
1025 	struct drm_gem_object *obj =
1026 		container_of(kref, struct drm_gem_object, refcount);
1027 	struct drm_device *dev = obj->dev;
1028 
1029 	if (dev->driver->gem_free_object_unlocked) {
1030 		dev->driver->gem_free_object_unlocked(obj);
1031 	} else if (dev->driver->gem_free_object) {
1032 		WARN_ON(!mutex_is_locked(&dev->struct_mutex));
1033 
1034 		dev->driver->gem_free_object(obj);
1035 	}
1036 }
1037 EXPORT_SYMBOL(drm_gem_object_free);
1038 
1039 /**
1040  * drm_gem_object_put_unlocked - drop a GEM buffer object reference
1041  * @obj: GEM buffer object
1042  *
1043  * This releases a reference to @obj. Callers must not hold the
1044  * &drm_device.struct_mutex lock when calling this function.
1045  *
1046  * See also __drm_gem_object_put().
1047  */
1048 void
1049 drm_gem_object_put_unlocked(struct drm_gem_object *obj)
1050 {
1051 	struct drm_device *dev;
1052 
1053 	if (!obj)
1054 		return;
1055 
1056 	dev = obj->dev;
1057 
1058 	if (dev->driver->gem_free_object_unlocked) {
1059 		kref_put(&obj->refcount, drm_gem_object_free);
1060 	} else {
1061 		might_lock(&dev->struct_mutex);
1062 		if (kref_put_mutex(&obj->refcount, drm_gem_object_free,
1063 				&dev->struct_mutex))
1064 			mutex_unlock(&dev->struct_mutex);
1065 	}
1066 }
1067 EXPORT_SYMBOL(drm_gem_object_put_unlocked);
1068 
1069 /**
1070  * drm_gem_object_put - release a GEM buffer object reference
1071  * @obj: GEM buffer object
1072  *
1073  * This releases a reference to @obj. Callers must hold the
1074  * &drm_device.struct_mutex lock when calling this function, even when the
1075  * driver doesn't use &drm_device.struct_mutex for anything.
1076  *
1077  * For drivers not encumbered with legacy locking use
1078  * drm_gem_object_put_unlocked() instead.
1079  */
1080 void
1081 drm_gem_object_put(struct drm_gem_object *obj)
1082 {
1083 	if (obj) {
1084 		WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
1085 
1086 		kref_put(&obj->refcount, drm_gem_object_free);
1087 	}
1088 }
1089 EXPORT_SYMBOL(drm_gem_object_put);
1090 
1091 #ifdef __linux
1092 
1093 /**
1094  * drm_gem_vm_open - vma->ops->open implementation for GEM
1095  * @vma: VM area structure
1096  *
1097  * This function implements the #vm_operations_struct open() callback for GEM
1098  * drivers. This must be used together with drm_gem_vm_close().
1099  */
1100 void drm_gem_vm_open(struct vm_area_struct *vma)
1101 {
1102 	struct drm_gem_object *obj = vma->vm_private_data;
1103 
1104 	drm_gem_object_get(obj);
1105 }
1106 EXPORT_SYMBOL(drm_gem_vm_open);
1107 
1108 /**
1109  * drm_gem_vm_close - vma->ops->close implementation for GEM
1110  * @vma: VM area structure
1111  *
1112  * This function implements the #vm_operations_struct close() callback for GEM
1113  * drivers. This must be used together with drm_gem_vm_open().
1114  */
1115 void drm_gem_vm_close(struct vm_area_struct *vma)
1116 {
1117 	struct drm_gem_object *obj = vma->vm_private_data;
1118 
1119 	drm_gem_object_put_unlocked(obj);
1120 }
1121 EXPORT_SYMBOL(drm_gem_vm_close);
1122 
1123 /**
1124  * drm_gem_mmap_obj - memory map a GEM object
1125  * @obj: the GEM object to map
1126  * @obj_size: the object size to be mapped, in bytes
1127  * @vma: VMA for the area to be mapped
1128  *
1129  * Set up the VMA to prepare mapping of the GEM object using the gem_vm_ops
1130  * provided by the driver. Depending on their requirements, drivers can either
1131  * provide a fault handler in their gem_vm_ops (in which case any accesses to
1132  * the object will be trapped, to perform migration, GTT binding, surface
1133  * register allocation, or performance monitoring), or mmap the buffer memory
1134  * synchronously after calling drm_gem_mmap_obj.
1135  *
1136  * This function is mainly intended to implement the DMABUF mmap operation, when
1137  * the GEM object is not looked up based on its fake offset. To implement the
1138  * DRM mmap operation, drivers should use the drm_gem_mmap() function.
1139  *
1140  * drm_gem_mmap_obj() assumes the user is granted access to the buffer while
1141  * drm_gem_mmap() prevents unprivileged users from mapping random objects. So
1142  * callers must verify access restrictions before calling this helper.
1143  *
1144  * Return 0 or success or -EINVAL if the object size is smaller than the VMA
1145  * size, or if no gem_vm_ops are provided.
1146  */
1147 int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
1148 		     struct vm_area_struct *vma)
1149 {
1150 	struct drm_device *dev = obj->dev;
1151 
1152 	/* Check for valid size. */
1153 	if (obj_size < vma->vm_end - vma->vm_start)
1154 		return -EINVAL;
1155 
1156 	if (!dev->driver->gem_vm_ops)
1157 		return -EINVAL;
1158 
1159 	vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
1160 	vma->vm_ops = dev->driver->gem_vm_ops;
1161 	vma->vm_private_data = obj;
1162 	vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
1163 	vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
1164 
1165 	/* Take a ref for this mapping of the object, so that the fault
1166 	 * handler can dereference the mmap offset's pointer to the object.
1167 	 * This reference is cleaned up by the corresponding vm_close
1168 	 * (which should happen whether the vma was created by this call, or
1169 	 * by a vm_open due to mremap or partial unmap or whatever).
1170 	 */
1171 	drm_gem_object_get(obj);
1172 
1173 	return 0;
1174 }
1175 EXPORT_SYMBOL(drm_gem_mmap_obj);
1176 
1177 /**
1178  * drm_gem_mmap - memory map routine for GEM objects
1179  * @filp: DRM file pointer
1180  * @vma: VMA for the area to be mapped
1181  *
1182  * If a driver supports GEM object mapping, mmap calls on the DRM file
1183  * descriptor will end up here.
1184  *
1185  * Look up the GEM object based on the offset passed in (vma->vm_pgoff will
1186  * contain the fake offset we created when the GTT map ioctl was called on
1187  * the object) and map it with a call to drm_gem_mmap_obj().
1188  *
1189  * If the caller is not granted access to the buffer object, the mmap will fail
1190  * with EACCES. Please see the vma manager for more information.
1191  */
1192 int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
1193 {
1194 	struct drm_file *priv = filp->private_data;
1195 	struct drm_device *dev = priv->minor->dev;
1196 	struct drm_gem_object *obj = NULL;
1197 	struct drm_vma_offset_node *node;
1198 	int ret;
1199 
1200 	if (drm_dev_is_unplugged(dev))
1201 		return -ENODEV;
1202 
1203 	drm_vma_offset_lock_lookup(dev->vma_offset_manager);
1204 	node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
1205 						  vma->vm_pgoff,
1206 						  vma_pages(vma));
1207 	if (likely(node)) {
1208 		obj = container_of(node, struct drm_gem_object, vma_node);
1209 		/*
1210 		 * When the object is being freed, after it hits 0-refcnt it
1211 		 * proceeds to tear down the object. In the process it will
1212 		 * attempt to remove the VMA offset and so acquire this
1213 		 * mgr->vm_lock.  Therefore if we find an object with a 0-refcnt
1214 		 * that matches our range, we know it is in the process of being
1215 		 * destroyed and will be freed as soon as we release the lock -
1216 		 * so we have to check for the 0-refcnted object and treat it as
1217 		 * invalid.
1218 		 */
1219 		if (!kref_get_unless_zero(&obj->refcount))
1220 			obj = NULL;
1221 	}
1222 	drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
1223 
1224 	if (!obj)
1225 		return -EINVAL;
1226 
1227 	if (!drm_vma_node_is_allowed(node, priv)) {
1228 		drm_gem_object_put_unlocked(obj);
1229 		return -EACCES;
1230 	}
1231 
1232 	if (node->readonly) {
1233 		if (vma->vm_flags & VM_WRITE) {
1234 			drm_gem_object_put_unlocked(obj);
1235 			return -EINVAL;
1236 		}
1237 
1238 		vma->vm_flags &= ~VM_MAYWRITE;
1239 	}
1240 
1241 	ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT,
1242 			       vma);
1243 
1244 	drm_gem_object_put_unlocked(obj);
1245 
1246 	return ret;
1247 }
1248 EXPORT_SYMBOL(drm_gem_mmap);
1249 
1250 #endif
1251 
1252 void drm_gem_print_info(struct drm_printer *p, unsigned int indent,
1253 			const struct drm_gem_object *obj)
1254 {
1255 	drm_printf_indent(p, indent, "name=%d\n", obj->name);
1256 	drm_printf_indent(p, indent, "refcount=%u\n",
1257 			  kref_read(&obj->refcount));
1258 	drm_printf_indent(p, indent, "start=%08lx\n",
1259 			  drm_vma_node_start(&obj->vma_node));
1260 	drm_printf_indent(p, indent, "size=%zu\n", obj->size);
1261 	drm_printf_indent(p, indent, "imported=%s\n",
1262 			  obj->import_attach ? "yes" : "no");
1263 
1264 	if (obj->dev->driver->gem_print_info)
1265 		obj->dev->driver->gem_print_info(p, indent, obj);
1266 }
1267