xref: /netbsd-src/sys/external/bsd/drm2/dist/drm/drm_gem.c (revision 48e3cda1b46186d0a11a3aefb14fdeb550ed51fe)
1 /*	$NetBSD: drm_gem.c,v 1.25 2024/05/22 15:59:12 riastradh Exp $	*/
2 
3 /*
4  * Copyright © 2008 Intel Corporation
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the next
14  * paragraph) shall be included in all copies or substantial portions of the
15  * Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
23  * IN THE SOFTWARE.
24  *
25  * Authors:
26  *    Eric Anholt <eric@anholt.net>
27  *
28  */
29 
30 #include <sys/cdefs.h>
31 __KERNEL_RCSID(0, "$NetBSD: drm_gem.c,v 1.25 2024/05/22 15:59:12 riastradh Exp $");
32 
33 #include <linux/types.h>
34 #include <linux/slab.h>
35 #include <linux/mm.h>
36 #include <linux/uaccess.h>
37 #include <linux/fs.h>
38 #include <linux/file.h>
39 #include <linux/module.h>
40 #include <linux/mman.h>
41 #include <linux/pagemap.h>
42 #include <linux/shmem_fs.h>
43 #include <linux/dma-buf.h>
44 #include <linux/mem_encrypt.h>
45 #include <linux/pagevec.h>
46 
47 #include <drm/drm.h>
48 #include <drm/drm_device.h>
49 #include <drm/drm_drv.h>
50 #include <drm/drm_file.h>
51 #include <drm/drm_gem.h>
52 #include <drm/drm_print.h>
53 #include <drm/drm_vma_manager.h>
54 
55 #include "drm_internal.h"
56 
57 #ifdef __NetBSD__
58 #include <uvm/uvm_extern.h>
59 #include <linux/nbsd-namespace.h>
60 #endif
61 
62 /** @file drm_gem.c
63  *
64  * This file provides some of the base ioctls and library routines for
65  * the graphics memory manager implemented by each device driver.
66  *
67  * Because various devices have different requirements in terms of
68  * synchronization and migration strategies, implementing that is left up to
69  * the driver, and all that the general API provides should be generic --
70  * allocating objects, reading/writing data with the cpu, freeing objects.
71  * Even there, platform-dependent optimizations for reading/writing data with
72  * the CPU mean we'll likely hook those out to driver-specific calls.  However,
73  * the DRI2 implementation wants to have at least allocate/mmap be generic.
74  *
75  * The goal was to have swap-backed object allocation managed through
76  * struct file.  However, file descriptors as handles to a struct file have
77  * two major failings:
78  * - Process limits prevent more than 1024 or so being used at a time by
79  *   default.
80  * - Inability to allocate high fds will aggravate the X Server's select()
81  *   handling, and likely that of many GL client applications as well.
82  *
83  * This led to a plan of using our own integer IDs (called handles, following
84  * DRM terminology) to mimic fds, and implement the fd syscalls we need as
85  * ioctls.  The objects themselves will still include the struct file so
86  * that we can transition to fds if the required kernel infrastructure shows
87  * up at a later date, and as our interface with shmfs for memory allocation.
88  */
89 
90 /**
91  * drm_gem_init - Initialize the GEM device fields
92  * @dev: drm_devic structure to initialize
93  */
94 int
drm_gem_init(struct drm_device * dev)95 drm_gem_init(struct drm_device *dev)
96 {
97 	struct drm_vma_offset_manager *vma_offset_manager;
98 
99 	mutex_init(&dev->object_name_lock);
100 	idr_init_base(&dev->object_name_idr, 1);
101 
102 	vma_offset_manager = kzalloc(sizeof(*vma_offset_manager), GFP_KERNEL);
103 	if (!vma_offset_manager) {
104 		DRM_ERROR("out of memory\n");
105 		return -ENOMEM;
106 	}
107 
108 	dev->vma_offset_manager = vma_offset_manager;
109 	drm_vma_offset_manager_init(vma_offset_manager,
110 				    DRM_FILE_PAGE_OFFSET_START,
111 				    DRM_FILE_PAGE_OFFSET_SIZE);
112 
113 	return 0;
114 }
115 
116 void
drm_gem_destroy(struct drm_device * dev)117 drm_gem_destroy(struct drm_device *dev)
118 {
119 
120 	drm_vma_offset_manager_destroy(dev->vma_offset_manager);
121 	kfree(dev->vma_offset_manager);
122 	dev->vma_offset_manager = NULL;
123 
124 	idr_destroy(&dev->object_name_idr);
125 	mutex_destroy(&dev->object_name_lock);
126 }
127 
128 /**
129  * drm_gem_object_init - initialize an allocated shmem-backed GEM object
130  * @dev: drm_device the object should be initialized for
131  * @obj: drm_gem_object to initialize
132  * @size: object size
133  *
134  * Initialize an already allocated GEM object of the specified size with
135  * shmfs backing store.
136  */
drm_gem_object_init(struct drm_device * dev,struct drm_gem_object * obj,size_t size)137 int drm_gem_object_init(struct drm_device *dev,
138 			struct drm_gem_object *obj, size_t size)
139 {
140 #ifndef __NetBSD__
141 	struct file *filp;
142 #endif
143 
144 	drm_gem_private_object_init(dev, obj, size);
145 
146 #ifdef __NetBSD__
147 	/*
148 	 * A uao may not have size 0, but a gem object may.  Allocate a
149 	 * spurious page so we needn't teach uao how to have size 0.
150 	 */
151 	obj->filp = uao_create(MAX(size, PAGE_SIZE), 0);
152 	/*
153 	 * XXX This is gross.  We ought to do it the other way around:
154 	 * set the uao to have the main uvm object's lock.  However,
155 	 * uvm_obj_setlock is not safe on uvm_aobjs.
156 	 */
157 	rw_obj_hold(obj->filp->vmobjlock);
158 	uvm_obj_setlock(&obj->gemo_uvmobj, obj->filp->vmobjlock);
159 #else
160 	filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
161 	if (IS_ERR(filp))
162 		return PTR_ERR(filp);
163 
164 	obj->filp = filp;
165 #endif
166 
167 	return 0;
168 }
169 EXPORT_SYMBOL(drm_gem_object_init);
170 
171 /**
172  * drm_gem_private_object_init - initialize an allocated private GEM object
173  * @dev: drm_device the object should be initialized for
174  * @obj: drm_gem_object to initialize
175  * @size: object size
176  *
177  * Initialize an already allocated GEM object of the specified size with
178  * no GEM provided backing store. Instead the caller is responsible for
179  * backing the object and handling it.
180  */
drm_gem_private_object_init(struct drm_device * dev,struct drm_gem_object * obj,size_t size)181 void drm_gem_private_object_init(struct drm_device *dev,
182 				 struct drm_gem_object *obj, size_t size)
183 {
184 	BUG_ON((size & (PAGE_SIZE - 1)) != 0);
185 
186 	obj->dev = dev;
187 #ifdef __NetBSD__
188 	obj->filp = NULL;
189 	KASSERT(drm_core_check_feature(dev, DRIVER_GEM));
190 	KASSERT(dev->driver->gem_uvm_ops != NULL);
191 	uvm_obj_init(&obj->gemo_uvmobj, dev->driver->gem_uvm_ops,
192 	    /*allocate lock*/true, /*nrefs*/1);
193 #else
194 	obj->filp = NULL;
195 #endif
196 
197 	kref_init(&obj->refcount);
198 	obj->handle_count = 0;
199 	obj->size = size;
200 	dma_resv_init(&obj->_resv);
201 	if (!obj->resv)
202 		obj->resv = &obj->_resv;
203 
204 #ifdef __NetBSD__
205 	drm_vma_node_init(&obj->vma_node);
206 #else
207 	drm_vma_node_reset(&obj->vma_node);
208 #endif
209 }
210 EXPORT_SYMBOL(drm_gem_private_object_init);
211 
212 static void
drm_gem_remove_prime_handles(struct drm_gem_object * obj,struct drm_file * filp)213 drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
214 {
215 	/*
216 	 * Note: obj->dma_buf can't disappear as long as we still hold a
217 	 * handle reference in obj->handle_count.
218 	 */
219 	mutex_lock(&filp->prime.lock);
220 	if (obj->dma_buf) {
221 		drm_prime_remove_buf_handle_locked(&filp->prime,
222 						   obj->dma_buf);
223 	}
224 	mutex_unlock(&filp->prime.lock);
225 }
226 
227 /**
228  * drm_gem_object_handle_free - release resources bound to userspace handles
229  * @obj: GEM object to clean up.
230  *
231  * Called after the last handle to the object has been closed
232  *
233  * Removes any name for the object. Note that this must be
234  * called before drm_gem_object_free or we'll be touching
235  * freed memory
236  */
drm_gem_object_handle_free(struct drm_gem_object * obj)237 static void drm_gem_object_handle_free(struct drm_gem_object *obj)
238 {
239 	struct drm_device *dev = obj->dev;
240 
241 	/* Remove any name for this object */
242 	if (obj->name) {
243 		idr_remove(&dev->object_name_idr, obj->name);
244 		obj->name = 0;
245 	}
246 }
247 
drm_gem_object_exported_dma_buf_free(struct drm_gem_object * obj)248 static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj)
249 {
250 #ifndef __NetBSD__
251 	/* Unbreak the reference cycle if we have an exported dma_buf. */
252 	if (obj->dma_buf) {
253 		dma_buf_put(obj->dma_buf);
254 		obj->dma_buf = NULL;
255 	}
256 #endif
257 }
258 
259 static void
drm_gem_object_handle_put_unlocked(struct drm_gem_object * obj)260 drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj)
261 {
262 	struct drm_device *dev = obj->dev;
263 	bool final = false;
264 
265 	if (WARN_ON(obj->handle_count == 0))
266 		return;
267 
268 	/*
269 	* Must bump handle count first as this may be the last
270 	* ref, in which case the object would disappear before we
271 	* checked for a name
272 	*/
273 
274 	mutex_lock(&dev->object_name_lock);
275 	if (--obj->handle_count == 0) {
276 		drm_gem_object_handle_free(obj);
277 		drm_gem_object_exported_dma_buf_free(obj);
278 		final = true;
279 	}
280 	mutex_unlock(&dev->object_name_lock);
281 
282 	if (final)
283 		drm_gem_object_put_unlocked(obj);
284 }
285 
286 /*
287  * Called at device or object close to release the file's
288  * handle references on objects.
289  */
290 static int
drm_gem_object_release_handle(int id,void * ptr,void * data)291 drm_gem_object_release_handle(int id, void *ptr, void *data)
292 {
293 	struct drm_file *file_priv = data;
294 	struct drm_gem_object *obj = ptr;
295 	struct drm_device *dev = obj->dev;
296 
297 	if (obj->funcs && obj->funcs->close)
298 		obj->funcs->close(obj, file_priv);
299 	else if (dev->driver->gem_close_object)
300 		dev->driver->gem_close_object(obj, file_priv);
301 
302 	drm_gem_remove_prime_handles(obj, file_priv);
303 	drm_vma_node_revoke(&obj->vma_node, file_priv);
304 
305 	drm_gem_object_handle_put_unlocked(obj);
306 
307 	return 0;
308 }
309 
310 /**
311  * drm_gem_handle_delete - deletes the given file-private handle
312  * @filp: drm file-private structure to use for the handle look up
313  * @handle: userspace handle to delete
314  *
315  * Removes the GEM handle from the @filp lookup table which has been added with
316  * drm_gem_handle_create(). If this is the last handle also cleans up linked
317  * resources like GEM names.
318  */
319 int
drm_gem_handle_delete(struct drm_file * filp,u32 handle)320 drm_gem_handle_delete(struct drm_file *filp, u32 handle)
321 {
322 	struct drm_gem_object *obj;
323 
324 	spin_lock(&filp->table_lock);
325 
326 	/* Check if we currently have a reference on the object */
327 	obj = idr_replace(&filp->object_idr, NULL, handle);
328 	spin_unlock(&filp->table_lock);
329 	if (IS_ERR_OR_NULL(obj))
330 		return -EINVAL;
331 
332 	/* Release driver's reference and decrement refcount. */
333 	drm_gem_object_release_handle(handle, obj, filp);
334 
335 	/* And finally make the handle available for future allocations. */
336 	spin_lock(&filp->table_lock);
337 	idr_remove(&filp->object_idr, handle);
338 	spin_unlock(&filp->table_lock);
339 
340 	return 0;
341 }
342 EXPORT_SYMBOL(drm_gem_handle_delete);
343 
344 /**
345  * drm_gem_dumb_map_offset - return the fake mmap offset for a gem object
346  * @file: drm file-private structure containing the gem object
347  * @dev: corresponding drm_device
348  * @handle: gem object handle
349  * @offset: return location for the fake mmap offset
350  *
351  * This implements the &drm_driver.dumb_map_offset kms driver callback for
352  * drivers which use gem to manage their backing storage.
353  *
354  * Returns:
355  * 0 on success or a negative error code on failure.
356  */
drm_gem_dumb_map_offset(struct drm_file * file,struct drm_device * dev,u32 handle,u64 * offset)357 int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
358 			    u32 handle, u64 *offset)
359 {
360 	struct drm_gem_object *obj;
361 	int ret;
362 
363 	obj = drm_gem_object_lookup(file, handle);
364 	if (!obj)
365 		return -ENOENT;
366 
367 	/* Don't allow imported objects to be mapped */
368 	if (obj->import_attach) {
369 		ret = -EINVAL;
370 		goto out;
371 	}
372 
373 	ret = drm_gem_create_mmap_offset(obj);
374 	if (ret)
375 		goto out;
376 
377 	*offset = drm_vma_node_offset_addr(&obj->vma_node);
378 out:
379 	drm_gem_object_put_unlocked(obj);
380 
381 	return ret;
382 }
383 EXPORT_SYMBOL_GPL(drm_gem_dumb_map_offset);
384 
385 /**
386  * drm_gem_dumb_destroy - dumb fb callback helper for gem based drivers
387  * @file: drm file-private structure to remove the dumb handle from
388  * @dev: corresponding drm_device
389  * @handle: the dumb handle to remove
390  *
391  * This implements the &drm_driver.dumb_destroy kms driver callback for drivers
392  * which use gem to manage their backing storage.
393  */
drm_gem_dumb_destroy(struct drm_file * file,struct drm_device * dev,uint32_t handle)394 int drm_gem_dumb_destroy(struct drm_file *file,
395 			 struct drm_device *dev,
396 			 uint32_t handle)
397 {
398 	return drm_gem_handle_delete(file, handle);
399 }
400 EXPORT_SYMBOL(drm_gem_dumb_destroy);
401 
402 /**
403  * drm_gem_handle_create_tail - internal functions to create a handle
404  * @file_priv: drm file-private structure to register the handle for
405  * @obj: object to register
406  * @handlep: pointer to return the created handle to the caller
407  *
408  * This expects the &drm_device.object_name_lock to be held already and will
409  * drop it before returning. Used to avoid races in establishing new handles
410  * when importing an object from either an flink name or a dma-buf.
411  *
412  * Handles must be release again through drm_gem_handle_delete(). This is done
413  * when userspace closes @file_priv for all attached handles, or through the
414  * GEM_CLOSE ioctl for individual handles.
415  */
416 int
drm_gem_handle_create_tail(struct drm_file * file_priv,struct drm_gem_object * obj,u32 * handlep)417 drm_gem_handle_create_tail(struct drm_file *file_priv,
418 			   struct drm_gem_object *obj,
419 			   u32 *handlep)
420 {
421 	struct drm_device *dev = obj->dev;
422 	u32 handle;
423 	int ret;
424 
425 	WARN_ON(!mutex_is_locked(&dev->object_name_lock));
426 	if (obj->handle_count++ == 0)
427 		drm_gem_object_get(obj);
428 
429 	/*
430 	 * Get the user-visible handle using idr.  Preload and perform
431 	 * allocation under our spinlock.
432 	 */
433 	idr_preload(GFP_KERNEL);
434 	spin_lock(&file_priv->table_lock);
435 
436 	ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT);
437 
438 	spin_unlock(&file_priv->table_lock);
439 	idr_preload_end();
440 
441 	mutex_unlock(&dev->object_name_lock);
442 	if (ret < 0)
443 		goto err_unref;
444 
445 	handle = ret;
446 
447 	ret = drm_vma_node_allow(&obj->vma_node, file_priv);
448 	if (ret)
449 		goto err_remove;
450 
451 	if (obj->funcs && obj->funcs->open) {
452 		ret = obj->funcs->open(obj, file_priv);
453 		if (ret)
454 			goto err_revoke;
455 	} else if (dev->driver->gem_open_object) {
456 		ret = dev->driver->gem_open_object(obj, file_priv);
457 		if (ret)
458 			goto err_revoke;
459 	}
460 
461 	*handlep = handle;
462 	return 0;
463 
464 err_revoke:
465 	drm_vma_node_revoke(&obj->vma_node, file_priv);
466 err_remove:
467 	spin_lock(&file_priv->table_lock);
468 	idr_remove(&file_priv->object_idr, handle);
469 	spin_unlock(&file_priv->table_lock);
470 err_unref:
471 	drm_gem_object_handle_put_unlocked(obj);
472 	return ret;
473 }
474 
475 /**
476  * drm_gem_handle_create - create a gem handle for an object
477  * @file_priv: drm file-private structure to register the handle for
478  * @obj: object to register
479  * @handlep: pionter to return the created handle to the caller
480  *
481  * Create a handle for this object. This adds a handle reference to the object,
482  * which includes a regular reference count. Callers will likely want to
483  * dereference the object afterwards.
484  *
485  * Since this publishes @obj to userspace it must be fully set up by this point,
486  * drivers must call this last in their buffer object creation callbacks.
487  */
drm_gem_handle_create(struct drm_file * file_priv,struct drm_gem_object * obj,u32 * handlep)488 int drm_gem_handle_create(struct drm_file *file_priv,
489 			  struct drm_gem_object *obj,
490 			  u32 *handlep)
491 {
492 	mutex_lock(&obj->dev->object_name_lock);
493 
494 	return drm_gem_handle_create_tail(file_priv, obj, handlep);
495 }
496 EXPORT_SYMBOL(drm_gem_handle_create);
497 
498 
499 /**
500  * drm_gem_free_mmap_offset - release a fake mmap offset for an object
501  * @obj: obj in question
502  *
503  * This routine frees fake offsets allocated by drm_gem_create_mmap_offset().
504  *
505  * Note that drm_gem_object_release() already calls this function, so drivers
506  * don't have to take care of releasing the mmap offset themselves when freeing
507  * the GEM object.
508  */
509 void
drm_gem_free_mmap_offset(struct drm_gem_object * obj)510 drm_gem_free_mmap_offset(struct drm_gem_object *obj)
511 {
512 	struct drm_device *dev = obj->dev;
513 
514 	drm_vma_offset_remove(dev->vma_offset_manager, &obj->vma_node);
515 }
516 EXPORT_SYMBOL(drm_gem_free_mmap_offset);
517 
518 /**
519  * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object
520  * @obj: obj in question
521  * @size: the virtual size
522  *
523  * GEM memory mapping works by handing back to userspace a fake mmap offset
524  * it can use in a subsequent mmap(2) call.  The DRM core code then looks
525  * up the object based on the offset and sets up the various memory mapping
526  * structures.
527  *
528  * This routine allocates and attaches a fake offset for @obj, in cases where
529  * the virtual size differs from the physical size (ie. &drm_gem_object.size).
530  * Otherwise just use drm_gem_create_mmap_offset().
531  *
532  * This function is idempotent and handles an already allocated mmap offset
533  * transparently. Drivers do not need to check for this case.
534  */
535 int
drm_gem_create_mmap_offset_size(struct drm_gem_object * obj,size_t size)536 drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size)
537 {
538 	struct drm_device *dev = obj->dev;
539 
540 	return drm_vma_offset_add(dev->vma_offset_manager, &obj->vma_node,
541 				  size / PAGE_SIZE);
542 }
543 EXPORT_SYMBOL(drm_gem_create_mmap_offset_size);
544 
545 /**
546  * drm_gem_create_mmap_offset - create a fake mmap offset for an object
547  * @obj: obj in question
548  *
549  * GEM memory mapping works by handing back to userspace a fake mmap offset
550  * it can use in a subsequent mmap(2) call.  The DRM core code then looks
551  * up the object based on the offset and sets up the various memory mapping
552  * structures.
553  *
554  * This routine allocates and attaches a fake offset for @obj.
555  *
556  * Drivers can call drm_gem_free_mmap_offset() before freeing @obj to release
557  * the fake offset again.
558  */
drm_gem_create_mmap_offset(struct drm_gem_object * obj)559 int drm_gem_create_mmap_offset(struct drm_gem_object *obj)
560 {
561 	return drm_gem_create_mmap_offset_size(obj, obj->size);
562 }
563 EXPORT_SYMBOL(drm_gem_create_mmap_offset);
564 
565 /*
566  * Move pages to appropriate lru and release the pagevec, decrementing the
567  * ref count of those pages.
568  */
569 #ifndef __NetBSD__
drm_gem_check_release_pagevec(struct pagevec * pvec)570 static void drm_gem_check_release_pagevec(struct pagevec *pvec)
571 {
572 	check_move_unevictable_pages(pvec);
573 	__pagevec_release(pvec);
574 	cond_resched();
575 }
576 #endif
577 
578 /**
579  * drm_gem_get_pages - helper to allocate backing pages for a GEM object
580  * from shmem
581  * @obj: obj in question
582  *
583  * This reads the page-array of the shmem-backing storage of the given gem
584  * object. An array of pages is returned. If a page is not allocated or
585  * swapped-out, this will allocate/swap-in the required pages. Note that the
586  * whole object is covered by the page-array and pinned in memory.
587  *
588  * Use drm_gem_put_pages() to release the array and unpin all pages.
589  *
590  * This uses the GFP-mask set on the shmem-mapping (see mapping_set_gfp_mask()).
591  * If you require other GFP-masks, you have to do those allocations yourself.
592  *
593  * Note that you are not allowed to change gfp-zones during runtime. That is,
594  * shmem_read_mapping_page_gfp() must be called with the same gfp_zone(gfp) as
595  * set during initialization. If you have special zone constraints, set them
596  * after drm_gem_object_init() via mapping_set_gfp_mask(). shmem-core takes care
597  * to keep pages in the required zone during swap-in.
598  */
599 #ifdef __NetBSD__
600 struct page **
drm_gem_get_pages(struct drm_gem_object * obj)601 drm_gem_get_pages(struct drm_gem_object *obj)
602 {
603 	struct vm_page *vm_page;
604 	struct page **pages;
605 	unsigned i, npages;
606 	int ret;
607 
608 	KASSERT((obj->size & (PAGE_SIZE - 1)) == 0);
609 
610 	npages = obj->size >> PAGE_SHIFT;
611 	pages = kvmalloc_array(npages, sizeof(*pages), GFP_KERNEL);
612 	if (pages == NULL) {
613 		ret = -ENOMEM;
614 		goto fail0;
615 	}
616 
617 	/* XXX errno NetBSD->Linux */
618 	ret = -uvm_obj_wirepages(obj->filp, 0, obj->size, NULL);
619 	if (ret)
620 		goto fail1;
621 
622 	rw_enter(obj->filp->vmobjlock, RW_READER);
623 	for (i = 0; i < npages; i++) {
624 		vm_page = uvm_pagelookup(obj->filp, ptoa(i));
625 		pages[i] = container_of(vm_page, struct page, p_vmp);
626 	}
627 	rw_exit(obj->filp->vmobjlock);
628 
629 	return pages;
630 
631 fail1:	kvfree(pages);
632 fail0:	return ERR_PTR(ret);
633 }
634 #else
drm_gem_get_pages(struct drm_gem_object * obj)635 struct page **drm_gem_get_pages(struct drm_gem_object *obj)
636 {
637 	struct address_space *mapping;
638 	struct page *p, **pages;
639 	struct pagevec pvec;
640 	int i, npages;
641 
642 	/* This is the shared memory object that backs the GEM resource */
643 	mapping = obj->filp->f_mapping;
644 
645 	/* We already BUG_ON() for non-page-aligned sizes in
646 	 * drm_gem_object_init(), so we should never hit this unless
647 	 * driver author is doing something really wrong:
648 	 */
649 	WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
650 
651 	npages = obj->size >> PAGE_SHIFT;
652 
653 	pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
654 	if (pages == NULL)
655 		return ERR_PTR(-ENOMEM);
656 
657 	mapping_set_unevictable(mapping);
658 
659 	for (i = 0; i < npages; i++) {
660 		p = shmem_read_mapping_page(mapping, i);
661 		if (IS_ERR(p))
662 			goto fail;
663 		pages[i] = p;
664 
665 		/* Make sure shmem keeps __GFP_DMA32 allocated pages in the
666 		 * correct region during swapin. Note that this requires
667 		 * __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping)
668 		 * so shmem can relocate pages during swapin if required.
669 		 */
670 		BUG_ON(mapping_gfp_constraint(mapping, __GFP_DMA32) &&
671 				(page_to_pfn(p) >= 0x00100000UL));
672 	}
673 
674 	return pages;
675 
676 fail:
677 	mapping_clear_unevictable(mapping);
678 	pagevec_init(&pvec);
679 	while (i--) {
680 		if (!pagevec_add(&pvec, pages[i]))
681 			drm_gem_check_release_pagevec(&pvec);
682 	}
683 	if (pagevec_count(&pvec))
684 		drm_gem_check_release_pagevec(&pvec);
685 
686 	kvfree(pages);
687 	return ERR_CAST(p);
688 }
689 #endif
690 EXPORT_SYMBOL(drm_gem_get_pages);
691 
692 /**
693  * drm_gem_put_pages - helper to free backing pages for a GEM object
694  * @obj: obj in question
695  * @pages: pages to free
696  * @dirty: if true, pages will be marked as dirty
697  * @accessed: if true, the pages will be marked as accessed
698  */
699 #ifdef __NetBSD__
700 void
drm_gem_put_pages(struct drm_gem_object * obj,struct page ** pages,bool dirty,bool accessed __unused)701 drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, bool dirty,
702     bool accessed __unused /* XXX */)
703 {
704 	unsigned i;
705 
706 	for (i = 0; i < (obj->size >> PAGE_SHIFT); i++) {
707 		if (dirty) {
708 			rw_enter(obj->filp->vmobjlock, RW_WRITER);
709 			uvm_pagemarkdirty(&pages[i]->p_vmp,
710 			    UVM_PAGE_STATUS_DIRTY);
711 			rw_exit(obj->filp->vmobjlock);
712 		}
713 	}
714 
715 	uvm_obj_unwirepages(obj->filp, 0, obj->size);
716 }
717 #else
drm_gem_put_pages(struct drm_gem_object * obj,struct page ** pages,bool dirty,bool accessed)718 void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
719 		bool dirty, bool accessed)
720 {
721 	int i, npages;
722 	struct address_space *mapping;
723 	struct pagevec pvec;
724 
725 	mapping = file_inode(obj->filp)->i_mapping;
726 	mapping_clear_unevictable(mapping);
727 
728 	/* We already BUG_ON() for non-page-aligned sizes in
729 	 * drm_gem_object_init(), so we should never hit this unless
730 	 * driver author is doing something really wrong:
731 	 */
732 	WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
733 
734 	npages = obj->size >> PAGE_SHIFT;
735 
736 	pagevec_init(&pvec);
737 	for (i = 0; i < npages; i++) {
738 		if (!pages[i])
739 			continue;
740 
741 		if (dirty)
742 			set_page_dirty(pages[i]);
743 
744 		if (accessed)
745 			mark_page_accessed(pages[i]);
746 
747 		/* Undo the reference we took when populating the table */
748 		if (!pagevec_add(&pvec, pages[i]))
749 			drm_gem_check_release_pagevec(&pvec);
750 	}
751 	if (pagevec_count(&pvec))
752 		drm_gem_check_release_pagevec(&pvec);
753 
754 	kvfree(pages);
755 }
756 #endif
757 EXPORT_SYMBOL(drm_gem_put_pages);
758 
objects_lookup(struct drm_file * filp,u32 * handle,int count,struct drm_gem_object ** objs)759 static int objects_lookup(struct drm_file *filp, u32 *handle, int count,
760 			  struct drm_gem_object **objs)
761 {
762 	int i, ret = 0;
763 	struct drm_gem_object *obj;
764 
765 	spin_lock(&filp->table_lock);
766 
767 	for (i = 0; i < count; i++) {
768 		/* Check if we currently have a reference on the object */
769 		obj = idr_find(&filp->object_idr, handle[i]);
770 		if (!obj) {
771 			ret = -ENOENT;
772 			break;
773 		}
774 		drm_gem_object_get(obj);
775 		objs[i] = obj;
776 	}
777 	spin_unlock(&filp->table_lock);
778 
779 	return ret;
780 }
781 
782 /**
783  * drm_gem_objects_lookup - look up GEM objects from an array of handles
784  * @filp: DRM file private date
785  * @bo_handles: user pointer to array of userspace handle
786  * @count: size of handle array
787  * @objs_out: returned pointer to array of drm_gem_object pointers
788  *
789  * Takes an array of userspace handles and returns a newly allocated array of
790  * GEM objects.
791  *
792  * For a single handle lookup, use drm_gem_object_lookup().
793  *
794  * Returns:
795  *
796  * @objs filled in with GEM object pointers. Returned GEM objects need to be
797  * released with drm_gem_object_put(). -ENOENT is returned on a lookup
798  * failure. 0 is returned on success.
799  *
800  */
drm_gem_objects_lookup(struct drm_file * filp,void __user * bo_handles,int count,struct drm_gem_object *** objs_out)801 int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles,
802 			   int count, struct drm_gem_object ***objs_out)
803 {
804 	int ret;
805 	u32 *handles;
806 	struct drm_gem_object **objs;
807 
808 	if (!count)
809 		return 0;
810 
811 	objs = kvmalloc_array(count, sizeof(struct drm_gem_object *),
812 			     GFP_KERNEL | __GFP_ZERO);
813 	if (!objs)
814 		return -ENOMEM;
815 
816 	handles = kvmalloc_array(count, sizeof(u32), GFP_KERNEL);
817 	if (!handles) {
818 		ret = -ENOMEM;
819 		goto out;
820 	}
821 
822 	if (copy_from_user(handles, bo_handles, count * sizeof(u32))) {
823 		ret = -EFAULT;
824 		DRM_DEBUG("Failed to copy in GEM handles\n");
825 		goto out;
826 	}
827 
828 	ret = objects_lookup(filp, handles, count, objs);
829 	*objs_out = objs;
830 
831 out:
832 	kvfree(handles);
833 	return ret;
834 
835 }
836 EXPORT_SYMBOL(drm_gem_objects_lookup);
837 
838 /**
839  * drm_gem_object_lookup - look up a GEM object from its handle
840  * @filp: DRM file private date
841  * @handle: userspace handle
842  *
843  * Returns:
844  *
845  * A reference to the object named by the handle if such exists on @filp, NULL
846  * otherwise.
847  *
848  * If looking up an array of handles, use drm_gem_objects_lookup().
849  */
850 struct drm_gem_object *
drm_gem_object_lookup(struct drm_file * filp,u32 handle)851 drm_gem_object_lookup(struct drm_file *filp, u32 handle)
852 {
853 	struct drm_gem_object *obj = NULL;
854 
855 	objects_lookup(filp, &handle, 1, &obj);
856 	return obj;
857 }
858 EXPORT_SYMBOL(drm_gem_object_lookup);
859 
860 /**
861  * drm_gem_dma_resv_wait - Wait on GEM object's reservation's objects
862  * shared and/or exclusive fences.
863  * @filep: DRM file private date
864  * @handle: userspace handle
865  * @wait_all: if true, wait on all fences, else wait on just exclusive fence
866  * @timeout: timeout value in jiffies or zero to return immediately
867  *
868  * Returns:
869  *
870  * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
871  * greater than 0 on success.
872  */
drm_gem_dma_resv_wait(struct drm_file * filep,u32 handle,bool wait_all,unsigned long timeout)873 long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle,
874 				    bool wait_all, unsigned long timeout)
875 {
876 	long ret;
877 	struct drm_gem_object *obj;
878 
879 	obj = drm_gem_object_lookup(filep, handle);
880 	if (!obj) {
881 		DRM_DEBUG("Failed to look up GEM BO %d\n", handle);
882 		return -EINVAL;
883 	}
884 
885 	ret = dma_resv_wait_timeout_rcu(obj->resv, wait_all,
886 						  true, timeout);
887 	if (ret == 0)
888 		ret = -ETIME;
889 	else if (ret > 0)
890 		ret = 0;
891 
892 	drm_gem_object_put_unlocked(obj);
893 
894 	return ret;
895 }
896 EXPORT_SYMBOL(drm_gem_dma_resv_wait);
897 
898 /**
899  * drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl
900  * @dev: drm_device
901  * @data: ioctl data
902  * @file_priv: drm file-private structure
903  *
904  * Releases the handle to an mm object.
905  */
906 int
drm_gem_close_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)907 drm_gem_close_ioctl(struct drm_device *dev, void *data,
908 		    struct drm_file *file_priv)
909 {
910 	struct drm_gem_close *args = data;
911 	int ret;
912 
913 	if (!drm_core_check_feature(dev, DRIVER_GEM))
914 		return -EOPNOTSUPP;
915 
916 	ret = drm_gem_handle_delete(file_priv, args->handle);
917 
918 	return ret;
919 }
920 
921 /**
922  * drm_gem_flink_ioctl - implementation of the GEM_FLINK ioctl
923  * @dev: drm_device
924  * @data: ioctl data
925  * @file_priv: drm file-private structure
926  *
927  * Create a global name for an object, returning the name.
928  *
929  * Note that the name does not hold a reference; when the object
930  * is freed, the name goes away.
931  */
932 int
drm_gem_flink_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)933 drm_gem_flink_ioctl(struct drm_device *dev, void *data,
934 		    struct drm_file *file_priv)
935 {
936 	struct drm_gem_flink *args = data;
937 	struct drm_gem_object *obj;
938 	int ret;
939 
940 	if (!drm_core_check_feature(dev, DRIVER_GEM))
941 		return -EOPNOTSUPP;
942 
943 	obj = drm_gem_object_lookup(file_priv, args->handle);
944 	if (obj == NULL)
945 		return -ENOENT;
946 
947 	idr_preload(GFP_KERNEL);
948 	mutex_lock(&dev->object_name_lock);
949 	/* prevent races with concurrent gem_close. */
950 	if (obj->handle_count == 0) {
951 		ret = -ENOENT;
952 		goto err;
953 	}
954 
955 	if (!obj->name) {
956 		ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_KERNEL);
957 		if (ret < 0)
958 			goto err;
959 
960 		obj->name = ret;
961 	}
962 
963 	args->name = (uint64_t) obj->name;
964 	ret = 0;
965 
966 err:
967 	mutex_unlock(&dev->object_name_lock);
968 	idr_preload_end();
969 	drm_gem_object_put_unlocked(obj);
970 	return ret;
971 }
972 
973 /**
974  * drm_gem_open - implementation of the GEM_OPEN ioctl
975  * @dev: drm_device
976  * @data: ioctl data
977  * @file_priv: drm file-private structure
978  *
979  * Open an object using the global name, returning a handle and the size.
980  *
981  * This handle (of course) holds a reference to the object, so the object
982  * will not go away until the handle is deleted.
983  */
984 int
drm_gem_open_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)985 drm_gem_open_ioctl(struct drm_device *dev, void *data,
986 		   struct drm_file *file_priv)
987 {
988 	struct drm_gem_open *args = data;
989 	struct drm_gem_object *obj;
990 	int ret;
991 	u32 handle;
992 
993 	if (!drm_core_check_feature(dev, DRIVER_GEM))
994 		return -EOPNOTSUPP;
995 
996 	mutex_lock(&dev->object_name_lock);
997 	obj = idr_find(&dev->object_name_idr, (int) args->name);
998 	if (obj) {
999 		drm_gem_object_get(obj);
1000 	} else {
1001 		mutex_unlock(&dev->object_name_lock);
1002 		return -ENOENT;
1003 	}
1004 
1005 	/* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
1006 	ret = drm_gem_handle_create_tail(file_priv, obj, &handle);
1007 	drm_gem_object_put_unlocked(obj);
1008 	if (ret)
1009 		return ret;
1010 
1011 	args->handle = handle;
1012 	args->size = obj->size;
1013 
1014 	return 0;
1015 }
1016 
1017 /**
1018  * gem_gem_open - initalizes GEM file-private structures at devnode open time
1019  * @dev: drm_device which is being opened by userspace
1020  * @file_private: drm file-private structure to set up
1021  *
1022  * Called at device open time, sets up the structure for handling refcounting
1023  * of mm objects.
1024  */
1025 void
drm_gem_open(struct drm_device * dev,struct drm_file * file_private)1026 drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
1027 {
1028 	idr_init_base(&file_private->object_idr, 1);
1029 	spin_lock_init(&file_private->table_lock);
1030 }
1031 
1032 /**
1033  * drm_gem_release - release file-private GEM resources
1034  * @dev: drm_device which is being closed by userspace
1035  * @file_private: drm file-private structure to clean up
1036  *
1037  * Called at close time when the filp is going away.
1038  *
1039  * Releases any remaining references on objects by this filp.
1040  */
1041 void
drm_gem_release(struct drm_device * dev,struct drm_file * file_private)1042 drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
1043 {
1044 	idr_for_each(&file_private->object_idr,
1045 		     &drm_gem_object_release_handle, file_private);
1046 	idr_destroy(&file_private->object_idr);
1047 #ifdef __NetBSD__
1048 	spin_lock_destroy(&file_private->table_lock);
1049 #endif
1050 }
1051 
1052 /**
1053  * drm_gem_object_release - release GEM buffer object resources
1054  * @obj: GEM buffer object
1055  *
1056  * This releases any structures and resources used by @obj and is the invers of
1057  * drm_gem_object_init().
1058  */
1059 void
drm_gem_object_release(struct drm_gem_object * obj)1060 drm_gem_object_release(struct drm_gem_object *obj)
1061 {
1062 #ifndef __NetBSD__
1063 	WARN_ON(obj->dma_buf);
1064 #endif
1065 
1066 #ifdef __NetBSD__
1067 	if (obj->filp)
1068 		uao_detach(obj->filp);
1069 	uvm_obj_destroy(&obj->gemo_uvmobj, /*free lock*/true);
1070 #else
1071 	if (obj->filp)
1072 		fput(obj->filp);
1073 #endif
1074 
1075 	dma_resv_fini(&obj->_resv);
1076 	drm_gem_free_mmap_offset(obj);
1077 #ifdef __NetBSD__
1078 	drm_vma_node_destroy(&obj->vma_node);
1079 #endif
1080 }
1081 EXPORT_SYMBOL(drm_gem_object_release);
1082 
1083 /**
1084  * drm_gem_object_free - free a GEM object
1085  * @kref: kref of the object to free
1086  *
1087  * Called after the last reference to the object has been lost.
1088  * Must be called holding &drm_device.struct_mutex.
1089  *
1090  * Frees the object
1091  */
1092 void
drm_gem_object_free(struct kref * kref)1093 drm_gem_object_free(struct kref *kref)
1094 {
1095 	struct drm_gem_object *obj =
1096 		container_of(kref, struct drm_gem_object, refcount);
1097 	struct drm_device *dev = obj->dev;
1098 
1099 	if (obj->funcs) {
1100 		obj->funcs->free(obj);
1101 	} else if (dev->driver->gem_free_object_unlocked) {
1102 		dev->driver->gem_free_object_unlocked(obj);
1103 	} else if (dev->driver->gem_free_object) {
1104 		WARN_ON(!mutex_is_locked(&dev->struct_mutex));
1105 
1106 		dev->driver->gem_free_object(obj);
1107 	}
1108 }
1109 EXPORT_SYMBOL(drm_gem_object_free);
1110 
1111 /**
1112  * drm_gem_object_put_unlocked - drop a GEM buffer object reference
1113  * @obj: GEM buffer object
1114  *
1115  * This releases a reference to @obj. Callers must not hold the
1116  * &drm_device.struct_mutex lock when calling this function.
1117  *
1118  * See also __drm_gem_object_put().
1119  */
1120 void
drm_gem_object_put_unlocked(struct drm_gem_object * obj)1121 drm_gem_object_put_unlocked(struct drm_gem_object *obj)
1122 {
1123 	struct drm_device *dev;
1124 
1125 	if (!obj)
1126 		return;
1127 
1128 	dev = obj->dev;
1129 
1130 	if (dev->driver->gem_free_object) {
1131 		might_lock(&dev->struct_mutex);
1132 		if (kref_put_mutex(&obj->refcount, drm_gem_object_free,
1133 				&dev->struct_mutex))
1134 			mutex_unlock(&dev->struct_mutex);
1135 	} else {
1136 		kref_put(&obj->refcount, drm_gem_object_free);
1137 	}
1138 }
1139 EXPORT_SYMBOL(drm_gem_object_put_unlocked);
1140 
1141 /**
1142  * drm_gem_object_put - release a GEM buffer object reference
1143  * @obj: GEM buffer object
1144  *
1145  * This releases a reference to @obj. Callers must hold the
1146  * &drm_device.struct_mutex lock when calling this function, even when the
1147  * driver doesn't use &drm_device.struct_mutex for anything.
1148  *
1149  * For drivers not encumbered with legacy locking use
1150  * drm_gem_object_put_unlocked() instead.
1151  */
1152 void
drm_gem_object_put(struct drm_gem_object * obj)1153 drm_gem_object_put(struct drm_gem_object *obj)
1154 {
1155 	if (obj) {
1156 		WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
1157 
1158 		kref_put(&obj->refcount, drm_gem_object_free);
1159 	}
1160 }
1161 EXPORT_SYMBOL(drm_gem_object_put);
1162 
1163 #ifndef __NetBSD__
1164 /**
1165  * drm_gem_vm_open - vma->ops->open implementation for GEM
1166  * @vma: VM area structure
1167  *
1168  * This function implements the #vm_operations_struct open() callback for GEM
1169  * drivers. This must be used together with drm_gem_vm_close().
1170  */
drm_gem_vm_open(struct vm_area_struct * vma)1171 void drm_gem_vm_open(struct vm_area_struct *vma)
1172 {
1173 	struct drm_gem_object *obj = vma->vm_private_data;
1174 
1175 	drm_gem_object_get(obj);
1176 }
1177 EXPORT_SYMBOL(drm_gem_vm_open);
1178 
1179 /**
1180  * drm_gem_vm_close - vma->ops->close implementation for GEM
1181  * @vma: VM area structure
1182  *
1183  * This function implements the #vm_operations_struct close() callback for GEM
1184  * drivers. This must be used together with drm_gem_vm_open().
1185  */
drm_gem_vm_close(struct vm_area_struct * vma)1186 void drm_gem_vm_close(struct vm_area_struct *vma)
1187 {
1188 	struct drm_gem_object *obj = vma->vm_private_data;
1189 
1190 	drm_gem_object_put_unlocked(obj);
1191 }
1192 EXPORT_SYMBOL(drm_gem_vm_close);
1193 
1194 /**
1195  * drm_gem_mmap_obj - memory map a GEM object
1196  * @obj: the GEM object to map
1197  * @obj_size: the object size to be mapped, in bytes
1198  * @vma: VMA for the area to be mapped
1199  *
1200  * Set up the VMA to prepare mapping of the GEM object using the gem_vm_ops
1201  * provided by the driver. Depending on their requirements, drivers can either
1202  * provide a fault handler in their gem_vm_ops (in which case any accesses to
1203  * the object will be trapped, to perform migration, GTT binding, surface
1204  * register allocation, or performance monitoring), or mmap the buffer memory
1205  * synchronously after calling drm_gem_mmap_obj.
1206  *
1207  * This function is mainly intended to implement the DMABUF mmap operation, when
1208  * the GEM object is not looked up based on its fake offset. To implement the
1209  * DRM mmap operation, drivers should use the drm_gem_mmap() function.
1210  *
1211  * drm_gem_mmap_obj() assumes the user is granted access to the buffer while
1212  * drm_gem_mmap() prevents unprivileged users from mapping random objects. So
1213  * callers must verify access restrictions before calling this helper.
1214  *
1215  * Return 0 or success or -EINVAL if the object size is smaller than the VMA
1216  * size, or if no gem_vm_ops are provided.
1217  */
drm_gem_mmap_obj(struct drm_gem_object * obj,unsigned long obj_size,struct vm_area_struct * vma)1218 int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
1219 		     struct vm_area_struct *vma)
1220 {
1221 	struct drm_device *dev = obj->dev;
1222 	int ret;
1223 
1224 	/* Check for valid size. */
1225 	if (obj_size < vma->vm_end - vma->vm_start)
1226 		return -EINVAL;
1227 
1228 	/* Take a ref for this mapping of the object, so that the fault
1229 	 * handler can dereference the mmap offset's pointer to the object.
1230 	 * This reference is cleaned up by the corresponding vm_close
1231 	 * (which should happen whether the vma was created by this call, or
1232 	 * by a vm_open due to mremap or partial unmap or whatever).
1233 	 */
1234 	drm_gem_object_get(obj);
1235 
1236 	if (obj->funcs && obj->funcs->mmap) {
1237 		ret = obj->funcs->mmap(obj, vma);
1238 		if (ret) {
1239 			drm_gem_object_put_unlocked(obj);
1240 			return ret;
1241 		}
1242 		WARN_ON(!(vma->vm_flags & VM_DONTEXPAND));
1243 	} else {
1244 		if (obj->funcs && obj->funcs->vm_ops)
1245 			vma->vm_ops = obj->funcs->vm_ops;
1246 		else if (dev->driver->gem_vm_ops)
1247 			vma->vm_ops = dev->driver->gem_vm_ops;
1248 		else {
1249 			drm_gem_object_put_unlocked(obj);
1250 			return -EINVAL;
1251 		}
1252 
1253 		vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
1254 		vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
1255 		vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
1256 	}
1257 
1258 	vma->vm_private_data = obj;
1259 
1260 	return 0;
1261 }
1262 EXPORT_SYMBOL(drm_gem_mmap_obj);
1263 
1264 /**
1265  * drm_gem_mmap - memory map routine for GEM objects
1266  * @filp: DRM file pointer
1267  * @vma: VMA for the area to be mapped
1268  *
1269  * If a driver supports GEM object mapping, mmap calls on the DRM file
1270  * descriptor will end up here.
1271  *
1272  * Look up the GEM object based on the offset passed in (vma->vm_pgoff will
1273  * contain the fake offset we created when the GTT map ioctl was called on
1274  * the object) and map it with a call to drm_gem_mmap_obj().
1275  *
1276  * If the caller is not granted access to the buffer object, the mmap will fail
1277  * with EACCES. Please see the vma manager for more information.
1278  */
drm_gem_mmap(struct file * filp,struct vm_area_struct * vma)1279 int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
1280 {
1281 	struct drm_file *priv = filp->private_data;
1282 	struct drm_device *dev = priv->minor->dev;
1283 	struct drm_gem_object *obj = NULL;
1284 	struct drm_vma_offset_node *node;
1285 	int ret;
1286 
1287 	if (drm_dev_is_unplugged(dev))
1288 		return -ENODEV;
1289 
1290 	drm_vma_offset_lock_lookup(dev->vma_offset_manager);
1291 	node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
1292 						  vma->vm_pgoff,
1293 						  vma_pages(vma));
1294 	if (likely(node)) {
1295 		obj = container_of(node, struct drm_gem_object, vma_node);
1296 		/*
1297 		 * When the object is being freed, after it hits 0-refcnt it
1298 		 * proceeds to tear down the object. In the process it will
1299 		 * attempt to remove the VMA offset and so acquire this
1300 		 * mgr->vm_lock.  Therefore if we find an object with a 0-refcnt
1301 		 * that matches our range, we know it is in the process of being
1302 		 * destroyed and will be freed as soon as we release the lock -
1303 		 * so we have to check for the 0-refcnted object and treat it as
1304 		 * invalid.
1305 		 */
1306 		if (!kref_get_unless_zero(&obj->refcount))
1307 			obj = NULL;
1308 	}
1309 	drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
1310 
1311 	if (!obj)
1312 		return -EINVAL;
1313 
1314 	if (!drm_vma_node_is_allowed(node, priv)) {
1315 		drm_gem_object_put_unlocked(obj);
1316 		return -EACCES;
1317 	}
1318 
1319 	if (node->readonly) {
1320 		if (vma->vm_flags & VM_WRITE) {
1321 			drm_gem_object_put_unlocked(obj);
1322 			return -EINVAL;
1323 		}
1324 
1325 		vma->vm_flags &= ~VM_MAYWRITE;
1326 	}
1327 
1328 	ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT,
1329 			       vma);
1330 
1331 	drm_gem_object_put_unlocked(obj);
1332 
1333 	return ret;
1334 }
1335 EXPORT_SYMBOL(drm_gem_mmap);
1336 #endif	/* defined(__NetBSD__) */
1337 
drm_gem_print_info(struct drm_printer * p,unsigned int indent,const struct drm_gem_object * obj)1338 void drm_gem_print_info(struct drm_printer *p, unsigned int indent,
1339 			const struct drm_gem_object *obj)
1340 {
1341 	drm_printf_indent(p, indent, "name=%d\n", obj->name);
1342 	drm_printf_indent(p, indent, "refcount=%u\n",
1343 			  kref_read(&obj->refcount));
1344 	drm_printf_indent(p, indent, "start=%08lx\n",
1345 			  drm_vma_node_start(&obj->vma_node));
1346 	drm_printf_indent(p, indent, "size=%zu\n", obj->size);
1347 	drm_printf_indent(p, indent, "imported=%s\n",
1348 			  obj->import_attach ? "yes" : "no");
1349 
1350 	if (obj->funcs && obj->funcs->print_info)
1351 		obj->funcs->print_info(p, indent, obj);
1352 	else if (obj->dev->driver->gem_print_info)
1353 		obj->dev->driver->gem_print_info(p, indent, obj);
1354 }
1355 
drm_gem_pin(struct drm_gem_object * obj)1356 int drm_gem_pin(struct drm_gem_object *obj)
1357 {
1358 	if (obj->funcs && obj->funcs->pin)
1359 		return obj->funcs->pin(obj);
1360 	else if (obj->dev->driver->gem_prime_pin)
1361 		return obj->dev->driver->gem_prime_pin(obj);
1362 	else
1363 		return 0;
1364 }
1365 
drm_gem_unpin(struct drm_gem_object * obj)1366 void drm_gem_unpin(struct drm_gem_object *obj)
1367 {
1368 	if (obj->funcs && obj->funcs->unpin)
1369 		obj->funcs->unpin(obj);
1370 	else if (obj->dev->driver->gem_prime_unpin)
1371 		obj->dev->driver->gem_prime_unpin(obj);
1372 }
1373 
drm_gem_vmap(struct drm_gem_object * obj)1374 void *drm_gem_vmap(struct drm_gem_object *obj)
1375 {
1376 	void *vaddr;
1377 
1378 	if (obj->funcs && obj->funcs->vmap)
1379 		vaddr = obj->funcs->vmap(obj);
1380 	else if (obj->dev->driver->gem_prime_vmap)
1381 		vaddr = obj->dev->driver->gem_prime_vmap(obj);
1382 	else
1383 		vaddr = ERR_PTR(-EOPNOTSUPP);
1384 
1385 	if (!vaddr)
1386 		vaddr = ERR_PTR(-ENOMEM);
1387 
1388 	return vaddr;
1389 }
1390 
drm_gem_vunmap(struct drm_gem_object * obj,void * vaddr)1391 void drm_gem_vunmap(struct drm_gem_object *obj, void *vaddr)
1392 {
1393 	if (!vaddr)
1394 		return;
1395 
1396 	if (obj->funcs && obj->funcs->vunmap)
1397 		obj->funcs->vunmap(obj, vaddr);
1398 	else if (obj->dev->driver->gem_prime_vunmap)
1399 		obj->dev->driver->gem_prime_vunmap(obj, vaddr);
1400 }
1401 
1402 /**
1403  * drm_gem_lock_reservations - Sets up the ww context and acquires
1404  * the lock on an array of GEM objects.
1405  *
1406  * Once you've locked your reservations, you'll want to set up space
1407  * for your shared fences (if applicable), submit your job, then
1408  * drm_gem_unlock_reservations().
1409  *
1410  * @objs: drm_gem_objects to lock
1411  * @count: Number of objects in @objs
1412  * @acquire_ctx: struct ww_acquire_ctx that will be initialized as
1413  * part of tracking this set of locked reservations.
1414  */
1415 int
drm_gem_lock_reservations(struct drm_gem_object ** objs,int count,struct ww_acquire_ctx * acquire_ctx)1416 drm_gem_lock_reservations(struct drm_gem_object **objs, int count,
1417 			  struct ww_acquire_ctx *acquire_ctx)
1418 {
1419 	int contended = -1;
1420 	int i, ret;
1421 
1422 	ww_acquire_init(acquire_ctx, &reservation_ww_class);
1423 
1424 retry:
1425 	if (contended != -1) {
1426 		struct drm_gem_object *obj = objs[contended];
1427 
1428 		ret = dma_resv_lock_slow_interruptible(obj->resv,
1429 								 acquire_ctx);
1430 		if (ret) {
1431 			ww_acquire_done(acquire_ctx);
1432 			return ret;
1433 		}
1434 	}
1435 
1436 	for (i = 0; i < count; i++) {
1437 		if (i == contended)
1438 			continue;
1439 
1440 		ret = dma_resv_lock_interruptible(objs[i]->resv,
1441 							    acquire_ctx);
1442 		if (ret) {
1443 			int j;
1444 
1445 			for (j = 0; j < i; j++)
1446 				dma_resv_unlock(objs[j]->resv);
1447 
1448 			if (contended != -1 && contended >= i)
1449 				dma_resv_unlock(objs[contended]->resv);
1450 
1451 			if (ret == -EDEADLK) {
1452 				contended = i;
1453 				goto retry;
1454 			}
1455 
1456 			ww_acquire_done(acquire_ctx);
1457 			return ret;
1458 		}
1459 	}
1460 
1461 	ww_acquire_done(acquire_ctx);
1462 
1463 	return 0;
1464 }
1465 EXPORT_SYMBOL(drm_gem_lock_reservations);
1466 
1467 void
drm_gem_unlock_reservations(struct drm_gem_object ** objs,int count,struct ww_acquire_ctx * acquire_ctx)1468 drm_gem_unlock_reservations(struct drm_gem_object **objs, int count,
1469 			    struct ww_acquire_ctx *acquire_ctx)
1470 {
1471 	int i;
1472 
1473 	for (i = 0; i < count; i++)
1474 		dma_resv_unlock(objs[i]->resv);
1475 
1476 	ww_acquire_fini(acquire_ctx);
1477 }
1478 EXPORT_SYMBOL(drm_gem_unlock_reservations);
1479 
1480 /**
1481  * drm_gem_fence_array_add - Adds the fence to an array of fences to be
1482  * waited on, deduplicating fences from the same context.
1483  *
1484  * @fence_array: array of dma_fence * for the job to block on.
1485  * @fence: the dma_fence to add to the list of dependencies.
1486  *
1487  * Returns:
1488  * 0 on success, or an error on failing to expand the array.
1489  */
drm_gem_fence_array_add(struct xarray * fence_array,struct dma_fence * fence)1490 int drm_gem_fence_array_add(struct xarray *fence_array,
1491 			    struct dma_fence *fence)
1492 {
1493 	struct dma_fence *entry;
1494 	unsigned long index;
1495 	u32 id = 0;
1496 	int ret;
1497 
1498 	if (!fence)
1499 		return 0;
1500 
1501 	/* Deduplicate if we already depend on a fence from the same context.
1502 	 * This lets the size of the array of deps scale with the number of
1503 	 * engines involved, rather than the number of BOs.
1504 	 */
1505 	xa_for_each(fence_array, index, entry) {
1506 		if (entry->context != fence->context)
1507 			continue;
1508 
1509 		if (dma_fence_is_later(fence, entry)) {
1510 			dma_fence_put(entry);
1511 			xa_store(fence_array, index, fence, GFP_KERNEL);
1512 		} else {
1513 			dma_fence_put(fence);
1514 		}
1515 		return 0;
1516 	}
1517 
1518 	ret = xa_alloc(fence_array, &id, fence, xa_limit_32b, GFP_KERNEL);
1519 	if (ret != 0)
1520 		dma_fence_put(fence);
1521 
1522 	return ret;
1523 }
1524 EXPORT_SYMBOL(drm_gem_fence_array_add);
1525 
1526 /**
1527  * drm_gem_fence_array_add_implicit - Adds the implicit dependencies tracked
1528  * in the GEM object's reservation object to an array of dma_fences for use in
1529  * scheduling a rendering job.
1530  *
1531  * This should be called after drm_gem_lock_reservations() on your array of
1532  * GEM objects used in the job but before updating the reservations with your
1533  * own fences.
1534  *
1535  * @fence_array: array of dma_fence * for the job to block on.
1536  * @obj: the gem object to add new dependencies from.
1537  * @write: whether the job might write the object (so we need to depend on
1538  * shared fences in the reservation object).
1539  */
drm_gem_fence_array_add_implicit(struct xarray * fence_array,struct drm_gem_object * obj,bool write)1540 int drm_gem_fence_array_add_implicit(struct xarray *fence_array,
1541 				     struct drm_gem_object *obj,
1542 				     bool write)
1543 {
1544 	int ret;
1545 	struct dma_fence **fences;
1546 	unsigned int i, fence_count;
1547 
1548 	if (!write) {
1549 		struct dma_fence *fence =
1550 			dma_resv_get_excl_rcu(obj->resv);
1551 
1552 		return drm_gem_fence_array_add(fence_array, fence);
1553 	}
1554 
1555 	ret = dma_resv_get_fences_rcu(obj->resv, NULL,
1556 						&fence_count, &fences);
1557 	if (ret || !fence_count)
1558 		return ret;
1559 
1560 	for (i = 0; i < fence_count; i++) {
1561 		ret = drm_gem_fence_array_add(fence_array, fences[i]);
1562 		if (ret)
1563 			break;
1564 	}
1565 
1566 	for (; i < fence_count; i++)
1567 		dma_fence_put(fences[i]);
1568 	kfree(fences);
1569 	return ret;
1570 }
1571 EXPORT_SYMBOL(drm_gem_fence_array_add_implicit);
1572