xref: /openbsd-src/sys/dev/pci/drm/amd/amdgpu/amdgpu_gem.c (revision f84b1df5a16cdd762c93854218de246e79975d3b)
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <linux/ktime.h>
29 #include <linux/module.h>
30 #include <linux/pagemap.h>
31 #include <linux/pci.h>
32 #include <linux/dma-buf.h>
33 
34 #include <drm/amdgpu_drm.h>
35 #include <drm/drm_drv.h>
36 #include <drm/drm_gem_ttm_helper.h>
37 
38 #include "amdgpu.h"
39 #include "amdgpu_display.h"
40 #include "amdgpu_dma_buf.h"
41 #include "amdgpu_xgmi.h"
42 
43 static const struct drm_gem_object_funcs amdgpu_gem_object_funcs;
44 
45 #ifdef __linux__
46 static vm_fault_t amdgpu_gem_fault(struct vm_fault *vmf)
47 {
48 	struct ttm_buffer_object *bo = vmf->vma->vm_private_data;
49 	struct drm_device *ddev = bo->base.dev;
50 	vm_fault_t ret;
51 	int idx;
52 
53 	ret = ttm_bo_vm_reserve(bo, vmf);
54 	if (ret)
55 		return ret;
56 
57 	if (drm_dev_enter(ddev, &idx)) {
58 		ret = amdgpu_bo_fault_reserve_notify(bo);
59 		if (ret) {
60 			drm_dev_exit(idx);
61 			goto unlock;
62 		}
63 
64 		 ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
65 						TTM_BO_VM_NUM_PREFAULT);
66 
67 		 drm_dev_exit(idx);
68 	} else {
69 		ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot);
70 	}
71 	if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
72 		return ret;
73 
74 unlock:
75 	dma_resv_unlock(bo->base.resv);
76 	return ret;
77 }
78 
79 static const struct vm_operations_struct amdgpu_gem_vm_ops = {
80 	.fault = amdgpu_gem_fault,
81 	.open = ttm_bo_vm_open,
82 	.close = ttm_bo_vm_close,
83 	.access = ttm_bo_vm_access
84 };
85 #else /* !__linux__ */
86 int
87 amdgpu_gem_fault(struct uvm_faultinfo *ufi, vaddr_t vaddr, vm_page_t *pps,
88     int npages, int centeridx, vm_fault_t fault_type,
89     vm_prot_t access_type, int flags)
90 {
91 	struct uvm_object *uobj = ufi->entry->object.uvm_obj;
92 	struct ttm_buffer_object *bo = (struct ttm_buffer_object *)uobj;
93 	struct drm_device *ddev = bo->base.dev;
94 	vm_fault_t ret;
95 	int idx;
96 
97 	ret = ttm_bo_vm_reserve(bo);
98 	if (ret) {
99 		switch (ret) {
100 		case VM_FAULT_NOPAGE:
101 			ret = VM_PAGER_OK;
102 			break;
103 		case VM_FAULT_RETRY:
104 			ret = VM_PAGER_REFAULT;
105 			break;
106 		default:
107 			ret = VM_PAGER_BAD;
108 			break;
109 		}
110 		uvmfault_unlockall(ufi, NULL, uobj);
111 		return ret;
112 	}
113 
114 	if (drm_dev_enter(ddev, &idx)) {
115 		ret = amdgpu_bo_fault_reserve_notify(bo);
116 		if (ret) {
117 			drm_dev_exit(idx);
118 			goto unlock;
119 		}
120 
121 		 ret = ttm_bo_vm_fault_reserved(ufi, vaddr,
122 						TTM_BO_VM_NUM_PREFAULT, 1);
123 
124 		 drm_dev_exit(idx);
125 	} else {
126 		STUB();
127 #ifdef notyet
128 		ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot);
129 #endif
130 	}
131 #ifdef __linux__
132 	if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
133 		return ret;
134 #endif
135 
136 unlock:
137 	switch (ret) {
138 	case VM_FAULT_NOPAGE:
139 		ret = VM_PAGER_OK;
140 		break;
141 	case VM_FAULT_RETRY:
142 		ret = VM_PAGER_REFAULT;
143 		break;
144 	default:
145 		ret = VM_PAGER_BAD;
146 		break;
147 	}
148 	dma_resv_unlock(bo->base.resv);
149 	uvmfault_unlockall(ufi, NULL, uobj);
150 	return ret;
151 }
152 
153 void
154 amdgpu_gem_vm_reference(struct uvm_object *uobj)
155 {
156 	struct ttm_buffer_object *bo = (struct ttm_buffer_object *)uobj;
157 
158 	ttm_bo_get(bo);
159 }
160 
161 void
162 amdgpu_gem_vm_detach(struct uvm_object *uobj)
163 {
164 	struct ttm_buffer_object *bo = (struct ttm_buffer_object *)uobj;
165 
166 	ttm_bo_put(bo);
167 }
168 
169 static const struct uvm_pagerops amdgpu_gem_vm_ops = {
170 	.pgo_fault = amdgpu_gem_fault,
171 	.pgo_reference = amdgpu_gem_vm_reference,
172 	.pgo_detach = amdgpu_gem_vm_detach
173 };
174 #endif /* !__linux__ */
175 
176 static void amdgpu_gem_object_free(struct drm_gem_object *gobj)
177 {
178 	struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj);
179 
180 	if (robj) {
181 		amdgpu_mn_unregister(robj);
182 		amdgpu_bo_unref(&robj);
183 	}
184 }
185 
186 int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
187 			     int alignment, u32 initial_domain,
188 			     u64 flags, enum ttm_bo_type type,
189 			     struct dma_resv *resv,
190 			     struct drm_gem_object **obj)
191 {
192 	struct amdgpu_bo *bo;
193 	struct amdgpu_bo_user *ubo;
194 	struct amdgpu_bo_param bp;
195 	int r;
196 
197 	memset(&bp, 0, sizeof(bp));
198 	*obj = NULL;
199 
200 	bp.size = size;
201 	bp.byte_align = alignment;
202 	bp.type = type;
203 	bp.resv = resv;
204 	bp.preferred_domain = initial_domain;
205 	bp.flags = flags;
206 	bp.domain = initial_domain;
207 	bp.bo_ptr_size = sizeof(struct amdgpu_bo);
208 
209 	r = amdgpu_bo_create_user(adev, &bp, &ubo);
210 	if (r)
211 		return r;
212 
213 	bo = &ubo->bo;
214 	*obj = &bo->tbo.base;
215 	(*obj)->funcs = &amdgpu_gem_object_funcs;
216 
217 	return 0;
218 }
219 
220 int	drm_file_cmp(struct drm_file *, struct drm_file *);
221 SPLAY_PROTOTYPE(drm_file_tree, drm_file, link, drm_file_cmp);
222 
223 void amdgpu_gem_force_release(struct amdgpu_device *adev)
224 {
225 	struct drm_device *ddev = adev_to_drm(adev);
226 	struct drm_file *file;
227 
228 	mutex_lock(&ddev->filelist_mutex);
229 
230 #ifdef __linux__
231 	list_for_each_entry(file, &ddev->filelist, lhead) {
232 #else
233 	SPLAY_FOREACH(file, drm_file_tree, &ddev->files) {
234 #endif
235 		struct drm_gem_object *gobj;
236 		int handle;
237 
238 		WARN_ONCE(1, "Still active user space clients!\n");
239 		spin_lock(&file->table_lock);
240 		idr_for_each_entry(&file->object_idr, gobj, handle) {
241 			WARN_ONCE(1, "And also active allocations!\n");
242 			drm_gem_object_put(gobj);
243 		}
244 		idr_destroy(&file->object_idr);
245 		spin_unlock(&file->table_lock);
246 	}
247 
248 	mutex_unlock(&ddev->filelist_mutex);
249 }
250 
251 /*
252  * Call from drm_gem_handle_create which appear in both new and open ioctl
253  * case.
254  */
255 static int amdgpu_gem_object_open(struct drm_gem_object *obj,
256 				  struct drm_file *file_priv)
257 {
258 	struct amdgpu_bo *abo = gem_to_amdgpu_bo(obj);
259 	struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
260 	struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
261 	struct amdgpu_vm *vm = &fpriv->vm;
262 	struct amdgpu_bo_va *bo_va;
263 #ifdef notyet
264 	struct mm_struct *mm;
265 #endif
266 	int r;
267 
268 #ifdef notyet
269 	mm = amdgpu_ttm_tt_get_usermm(abo->tbo.ttm);
270 	if (mm && mm != current->mm)
271 		return -EPERM;
272 #endif
273 
274 	if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID &&
275 	    abo->tbo.base.resv != vm->root.bo->tbo.base.resv)
276 		return -EPERM;
277 
278 	r = amdgpu_bo_reserve(abo, false);
279 	if (r)
280 		return r;
281 
282 	bo_va = amdgpu_vm_bo_find(vm, abo);
283 	if (!bo_va) {
284 		bo_va = amdgpu_vm_bo_add(adev, vm, abo);
285 	} else {
286 		++bo_va->ref_count;
287 	}
288 	amdgpu_bo_unreserve(abo);
289 	return 0;
290 }
291 
292 static void amdgpu_gem_object_close(struct drm_gem_object *obj,
293 				    struct drm_file *file_priv)
294 {
295 	struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
296 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
297 	struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
298 	struct amdgpu_vm *vm = &fpriv->vm;
299 
300 	struct amdgpu_bo_list_entry vm_pd;
301 	struct list_head list, duplicates;
302 	struct dma_fence *fence = NULL;
303 	struct ttm_validate_buffer tv;
304 	struct ww_acquire_ctx ticket;
305 	struct amdgpu_bo_va *bo_va;
306 	long r;
307 
308 	INIT_LIST_HEAD(&list);
309 	INIT_LIST_HEAD(&duplicates);
310 
311 	tv.bo = &bo->tbo;
312 	tv.num_shared = 2;
313 	list_add(&tv.head, &list);
314 
315 	amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
316 
317 	r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates);
318 	if (r) {
319 		dev_err(adev->dev, "leaking bo va because "
320 			"we fail to reserve bo (%ld)\n", r);
321 		return;
322 	}
323 	bo_va = amdgpu_vm_bo_find(vm, bo);
324 	if (!bo_va || --bo_va->ref_count)
325 		goto out_unlock;
326 
327 	amdgpu_vm_bo_rmv(adev, bo_va);
328 	if (!amdgpu_vm_ready(vm))
329 		goto out_unlock;
330 
331 	fence = dma_resv_excl_fence(bo->tbo.base.resv);
332 	if (fence) {
333 		amdgpu_bo_fence(bo, fence, true);
334 		fence = NULL;
335 	}
336 
337 	r = amdgpu_vm_clear_freed(adev, vm, &fence);
338 	if (r || !fence)
339 		goto out_unlock;
340 
341 	amdgpu_bo_fence(bo, fence, true);
342 	dma_fence_put(fence);
343 
344 out_unlock:
345 	if (unlikely(r < 0))
346 		dev_err(adev->dev, "failed to clear page "
347 			"tables on GEM object close (%ld)\n", r);
348 	ttm_eu_backoff_reservation(&ticket, &list);
349 }
350 
351 #ifdef __linux__
352 static int amdgpu_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
353 {
354 	struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
355 
356 	if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
357 		return -EPERM;
358 	if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
359 		return -EPERM;
360 
361 	/* Workaround for Thunk bug creating PROT_NONE,MAP_PRIVATE mappings
362 	 * for debugger access to invisible VRAM. Should have used MAP_SHARED
363 	 * instead. Clearing VM_MAYWRITE prevents the mapping from ever
364 	 * becoming writable and makes is_cow_mapping(vm_flags) false.
365 	 */
366 	if (is_cow_mapping(vma->vm_flags) &&
367 	    !(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)))
368 		vma->vm_flags &= ~VM_MAYWRITE;
369 
370 	return drm_gem_ttm_mmap(obj, vma);
371 }
372 #else
373 static int amdgpu_gem_object_mmap(struct drm_gem_object *obj,
374     vm_prot_t accessprot, voff_t off, vsize_t size)
375 {
376 	struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
377 
378 	if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
379 		return -EPERM;
380 	if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
381 		return -EPERM;
382 
383 	/* Workaround for Thunk bug creating PROT_NONE,MAP_PRIVATE mappings
384 	 * for debugger access to invisible VRAM. Should have used MAP_SHARED
385 	 * instead. Clearing VM_MAYWRITE prevents the mapping from ever
386 	 * becoming writable and makes is_cow_mapping(vm_flags) false.
387 	 */
388 #ifdef notyet
389 	if (is_cow_mapping(vma->vm_flags) &&
390 	    !(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)))
391 		vma->vm_flags &= ~VM_MAYWRITE;
392 #endif
393 
394 	return drm_gem_ttm_mmap(obj, accessprot, off, size);
395 }
396 #endif
397 
398 static const struct drm_gem_object_funcs amdgpu_gem_object_funcs = {
399 	.free = amdgpu_gem_object_free,
400 	.open = amdgpu_gem_object_open,
401 	.close = amdgpu_gem_object_close,
402 	.export = amdgpu_gem_prime_export,
403 	.vmap = drm_gem_ttm_vmap,
404 	.vunmap = drm_gem_ttm_vunmap,
405 	.mmap = amdgpu_gem_object_mmap,
406 	.vm_ops = &amdgpu_gem_vm_ops,
407 };
408 
409 /*
410  * GEM ioctls.
411  */
412 int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
413 			    struct drm_file *filp)
414 {
415 	struct amdgpu_device *adev = drm_to_adev(dev);
416 	struct amdgpu_fpriv *fpriv = filp->driver_priv;
417 	struct amdgpu_vm *vm = &fpriv->vm;
418 	union drm_amdgpu_gem_create *args = data;
419 	uint64_t flags = args->in.domain_flags;
420 	uint64_t size = args->in.bo_size;
421 	struct dma_resv *resv = NULL;
422 	struct drm_gem_object *gobj;
423 	uint32_t handle, initial_domain;
424 	int r;
425 
426 	/* reject invalid gem flags */
427 	if (flags & ~(AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
428 		      AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
429 		      AMDGPU_GEM_CREATE_CPU_GTT_USWC |
430 		      AMDGPU_GEM_CREATE_VRAM_CLEARED |
431 		      AMDGPU_GEM_CREATE_VM_ALWAYS_VALID |
432 		      AMDGPU_GEM_CREATE_EXPLICIT_SYNC |
433 		      AMDGPU_GEM_CREATE_ENCRYPTED))
434 
435 		return -EINVAL;
436 
437 	/* reject invalid gem domains */
438 	if (args->in.domains & ~AMDGPU_GEM_DOMAIN_MASK)
439 		return -EINVAL;
440 
441 	if (!amdgpu_is_tmz(adev) && (flags & AMDGPU_GEM_CREATE_ENCRYPTED)) {
442 		DRM_NOTE_ONCE("Cannot allocate secure buffer since TMZ is disabled\n");
443 		return -EINVAL;
444 	}
445 
446 	/* create a gem object to contain this object in */
447 	if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS |
448 	    AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
449 		if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
450 			/* if gds bo is created from user space, it must be
451 			 * passed to bo list
452 			 */
453 			DRM_ERROR("GDS bo cannot be per-vm-bo\n");
454 			return -EINVAL;
455 		}
456 		flags |= AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
457 	}
458 
459 	if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
460 		r = amdgpu_bo_reserve(vm->root.bo, false);
461 		if (r)
462 			return r;
463 
464 		resv = vm->root.bo->tbo.base.resv;
465 	}
466 
467 	initial_domain = (u32)(0xffffffff & args->in.domains);
468 retry:
469 	r = amdgpu_gem_object_create(adev, size, args->in.alignment,
470 				     initial_domain,
471 				     flags, ttm_bo_type_device, resv, &gobj);
472 	if (r && r != -ERESTARTSYS) {
473 		if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
474 			flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
475 			goto retry;
476 		}
477 
478 		if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
479 			initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
480 			goto retry;
481 		}
482 		DRM_DEBUG("Failed to allocate GEM object (%llu, %d, %llu, %d)\n",
483 				size, initial_domain, args->in.alignment, r);
484 	}
485 
486 	if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
487 		if (!r) {
488 			struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
489 
490 			abo->parent = amdgpu_bo_ref(vm->root.bo);
491 		}
492 		amdgpu_bo_unreserve(vm->root.bo);
493 	}
494 	if (r)
495 		return r;
496 
497 	r = drm_gem_handle_create(filp, gobj, &handle);
498 	/* drop reference from allocate - handle holds it now */
499 	drm_gem_object_put(gobj);
500 	if (r)
501 		return r;
502 
503 	memset(args, 0, sizeof(*args));
504 	args->out.handle = handle;
505 	return 0;
506 }
507 
508 int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
509 			     struct drm_file *filp)
510 {
511 	return -ENOSYS;
512 #ifdef notyet
513 	struct ttm_operation_ctx ctx = { true, false };
514 	struct amdgpu_device *adev = drm_to_adev(dev);
515 	struct drm_amdgpu_gem_userptr *args = data;
516 	struct drm_gem_object *gobj;
517 	struct amdgpu_bo *bo;
518 	uint32_t handle;
519 	int r;
520 
521 	args->addr = untagged_addr(args->addr);
522 
523 	if (offset_in_page(args->addr | args->size))
524 		return -EINVAL;
525 
526 	/* reject unknown flag values */
527 	if (args->flags & ~(AMDGPU_GEM_USERPTR_READONLY |
528 	    AMDGPU_GEM_USERPTR_ANONONLY | AMDGPU_GEM_USERPTR_VALIDATE |
529 	    AMDGPU_GEM_USERPTR_REGISTER))
530 		return -EINVAL;
531 
532 	if (!(args->flags & AMDGPU_GEM_USERPTR_READONLY) &&
533 	     !(args->flags & AMDGPU_GEM_USERPTR_REGISTER)) {
534 
535 		/* if we want to write to it we must install a MMU notifier */
536 		return -EACCES;
537 	}
538 
539 	/* create a gem object to contain this object in */
540 	r = amdgpu_gem_object_create(adev, args->size, 0, AMDGPU_GEM_DOMAIN_CPU,
541 				     0, ttm_bo_type_device, NULL, &gobj);
542 	if (r)
543 		return r;
544 
545 	bo = gem_to_amdgpu_bo(gobj);
546 	bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
547 	bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
548 	r = amdgpu_ttm_tt_set_userptr(&bo->tbo, args->addr, args->flags);
549 	if (r)
550 		goto release_object;
551 
552 	if (args->flags & AMDGPU_GEM_USERPTR_REGISTER) {
553 		r = amdgpu_mn_register(bo, args->addr);
554 		if (r)
555 			goto release_object;
556 	}
557 
558 	if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) {
559 		r = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
560 		if (r)
561 			goto release_object;
562 
563 		r = amdgpu_bo_reserve(bo, true);
564 		if (r)
565 			goto user_pages_done;
566 
567 		amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
568 		r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
569 		amdgpu_bo_unreserve(bo);
570 		if (r)
571 			goto user_pages_done;
572 	}
573 
574 	r = drm_gem_handle_create(filp, gobj, &handle);
575 	if (r)
576 		goto user_pages_done;
577 
578 	args->handle = handle;
579 
580 user_pages_done:
581 	if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE)
582 		amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
583 
584 release_object:
585 	drm_gem_object_put(gobj);
586 
587 	return r;
588 #endif
589 }
590 
591 int amdgpu_mode_dumb_mmap(struct drm_file *filp,
592 			  struct drm_device *dev,
593 			  uint32_t handle, uint64_t *offset_p)
594 {
595 	struct drm_gem_object *gobj;
596 	struct amdgpu_bo *robj;
597 
598 	gobj = drm_gem_object_lookup(filp, handle);
599 	if (gobj == NULL) {
600 		return -ENOENT;
601 	}
602 	robj = gem_to_amdgpu_bo(gobj);
603 	if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm) ||
604 	    (robj->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) {
605 		drm_gem_object_put(gobj);
606 		return -EPERM;
607 	}
608 	*offset_p = amdgpu_bo_mmap_offset(robj);
609 	drm_gem_object_put(gobj);
610 	return 0;
611 }
612 
613 int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data,
614 			  struct drm_file *filp)
615 {
616 	union drm_amdgpu_gem_mmap *args = data;
617 	uint32_t handle = args->in.handle;
618 	memset(args, 0, sizeof(*args));
619 	return amdgpu_mode_dumb_mmap(filp, dev, handle, &args->out.addr_ptr);
620 }
621 
622 /**
623  * amdgpu_gem_timeout - calculate jiffies timeout from absolute value
624  *
625  * @timeout_ns: timeout in ns
626  *
627  * Calculate the timeout in jiffies from an absolute timeout in ns.
628  */
629 unsigned long amdgpu_gem_timeout(uint64_t timeout_ns)
630 {
631 	unsigned long timeout_jiffies;
632 	ktime_t timeout;
633 
634 	/* clamp timeout if it's to large */
635 	if (((int64_t)timeout_ns) < 0)
636 		return MAX_SCHEDULE_TIMEOUT;
637 
638 	timeout = ktime_sub(ns_to_ktime(timeout_ns), ktime_get());
639 	if (ktime_to_ns(timeout) < 0)
640 		return 0;
641 
642 	timeout_jiffies = nsecs_to_jiffies(ktime_to_ns(timeout));
643 	/*  clamp timeout to avoid unsigned-> signed overflow */
644 	if (timeout_jiffies > MAX_SCHEDULE_TIMEOUT )
645 		return MAX_SCHEDULE_TIMEOUT - 1;
646 
647 	return timeout_jiffies;
648 }
649 
650 int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
651 			      struct drm_file *filp)
652 {
653 	union drm_amdgpu_gem_wait_idle *args = data;
654 	struct drm_gem_object *gobj;
655 	struct amdgpu_bo *robj;
656 	uint32_t handle = args->in.handle;
657 	unsigned long timeout = amdgpu_gem_timeout(args->in.timeout);
658 	int r = 0;
659 	long ret;
660 
661 	gobj = drm_gem_object_lookup(filp, handle);
662 	if (gobj == NULL) {
663 		return -ENOENT;
664 	}
665 	robj = gem_to_amdgpu_bo(gobj);
666 	ret = dma_resv_wait_timeout(robj->tbo.base.resv, true, true, timeout);
667 
668 	/* ret == 0 means not signaled,
669 	 * ret > 0 means signaled
670 	 * ret < 0 means interrupted before timeout
671 	 */
672 	if (ret >= 0) {
673 		memset(args, 0, sizeof(*args));
674 		args->out.status = (ret == 0);
675 	} else
676 		r = ret;
677 
678 	drm_gem_object_put(gobj);
679 	return r;
680 }
681 
682 int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
683 				struct drm_file *filp)
684 {
685 	struct drm_amdgpu_gem_metadata *args = data;
686 	struct drm_gem_object *gobj;
687 	struct amdgpu_bo *robj;
688 	int r = -1;
689 
690 	DRM_DEBUG("%d \n", args->handle);
691 	gobj = drm_gem_object_lookup(filp, args->handle);
692 	if (gobj == NULL)
693 		return -ENOENT;
694 	robj = gem_to_amdgpu_bo(gobj);
695 
696 	r = amdgpu_bo_reserve(robj, false);
697 	if (unlikely(r != 0))
698 		goto out;
699 
700 	if (args->op == AMDGPU_GEM_METADATA_OP_GET_METADATA) {
701 		amdgpu_bo_get_tiling_flags(robj, &args->data.tiling_info);
702 		r = amdgpu_bo_get_metadata(robj, args->data.data,
703 					   sizeof(args->data.data),
704 					   &args->data.data_size_bytes,
705 					   &args->data.flags);
706 	} else if (args->op == AMDGPU_GEM_METADATA_OP_SET_METADATA) {
707 		if (args->data.data_size_bytes > sizeof(args->data.data)) {
708 			r = -EINVAL;
709 			goto unreserve;
710 		}
711 		r = amdgpu_bo_set_tiling_flags(robj, args->data.tiling_info);
712 		if (!r)
713 			r = amdgpu_bo_set_metadata(robj, args->data.data,
714 						   args->data.data_size_bytes,
715 						   args->data.flags);
716 	}
717 
718 unreserve:
719 	amdgpu_bo_unreserve(robj);
720 out:
721 	drm_gem_object_put(gobj);
722 	return r;
723 }
724 
725 /**
726  * amdgpu_gem_va_update_vm -update the bo_va in its VM
727  *
728  * @adev: amdgpu_device pointer
729  * @vm: vm to update
730  * @bo_va: bo_va to update
731  * @operation: map, unmap or clear
732  *
733  * Update the bo_va directly after setting its address. Errors are not
734  * vital here, so they are not reported back to userspace.
735  */
736 static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
737 				    struct amdgpu_vm *vm,
738 				    struct amdgpu_bo_va *bo_va,
739 				    uint32_t operation)
740 {
741 	int r;
742 
743 	if (!amdgpu_vm_ready(vm))
744 		return;
745 
746 	r = amdgpu_vm_clear_freed(adev, vm, NULL);
747 	if (r)
748 		goto error;
749 
750 	if (operation == AMDGPU_VA_OP_MAP ||
751 	    operation == AMDGPU_VA_OP_REPLACE) {
752 		r = amdgpu_vm_bo_update(adev, bo_va, false, NULL);
753 		if (r)
754 			goto error;
755 	}
756 
757 	r = amdgpu_vm_update_pdes(adev, vm, false);
758 
759 error:
760 	if (r && r != -ERESTARTSYS)
761 		DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
762 }
763 
764 /**
765  * amdgpu_gem_va_map_flags - map GEM UAPI flags into hardware flags
766  *
767  * @adev: amdgpu_device pointer
768  * @flags: GEM UAPI flags
769  *
770  * Returns the GEM UAPI flags mapped into hardware for the ASIC.
771  */
772 uint64_t amdgpu_gem_va_map_flags(struct amdgpu_device *adev, uint32_t flags)
773 {
774 	uint64_t pte_flag = 0;
775 
776 	if (flags & AMDGPU_VM_PAGE_EXECUTABLE)
777 		pte_flag |= AMDGPU_PTE_EXECUTABLE;
778 	if (flags & AMDGPU_VM_PAGE_READABLE)
779 		pte_flag |= AMDGPU_PTE_READABLE;
780 	if (flags & AMDGPU_VM_PAGE_WRITEABLE)
781 		pte_flag |= AMDGPU_PTE_WRITEABLE;
782 	if (flags & AMDGPU_VM_PAGE_PRT)
783 		pte_flag |= AMDGPU_PTE_PRT;
784 
785 	if (adev->gmc.gmc_funcs->map_mtype)
786 		pte_flag |= amdgpu_gmc_map_mtype(adev,
787 						 flags & AMDGPU_VM_MTYPE_MASK);
788 
789 	return pte_flag;
790 }
791 
792 int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
793 			  struct drm_file *filp)
794 {
795 	const uint32_t valid_flags = AMDGPU_VM_DELAY_UPDATE |
796 		AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE |
797 		AMDGPU_VM_PAGE_EXECUTABLE | AMDGPU_VM_MTYPE_MASK;
798 	const uint32_t prt_flags = AMDGPU_VM_DELAY_UPDATE |
799 		AMDGPU_VM_PAGE_PRT;
800 
801 	struct drm_amdgpu_gem_va *args = data;
802 	struct drm_gem_object *gobj;
803 	struct amdgpu_device *adev = drm_to_adev(dev);
804 	struct amdgpu_fpriv *fpriv = filp->driver_priv;
805 	struct amdgpu_bo *abo;
806 	struct amdgpu_bo_va *bo_va;
807 	struct amdgpu_bo_list_entry vm_pd;
808 	struct ttm_validate_buffer tv;
809 	struct ww_acquire_ctx ticket;
810 	struct list_head list, duplicates;
811 	uint64_t va_flags;
812 	uint64_t vm_size;
813 	int r = 0;
814 
815 	if (args->va_address < AMDGPU_VA_RESERVED_SIZE) {
816 		dev_dbg(dev->dev,
817 			"va_address 0x%llX is in reserved area 0x%llX\n",
818 			args->va_address, AMDGPU_VA_RESERVED_SIZE);
819 		return -EINVAL;
820 	}
821 
822 	if (args->va_address >= AMDGPU_GMC_HOLE_START &&
823 	    args->va_address < AMDGPU_GMC_HOLE_END) {
824 		dev_dbg(dev->dev,
825 			"va_address 0x%llX is in VA hole 0x%llX-0x%llX\n",
826 			args->va_address, AMDGPU_GMC_HOLE_START,
827 			AMDGPU_GMC_HOLE_END);
828 		return -EINVAL;
829 	}
830 
831 	args->va_address &= AMDGPU_GMC_HOLE_MASK;
832 
833 	vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
834 	vm_size -= AMDGPU_VA_RESERVED_SIZE;
835 	if (args->va_address + args->map_size > vm_size) {
836 		dev_dbg(dev->dev,
837 			"va_address 0x%llx is in top reserved area 0x%llx\n",
838 			args->va_address + args->map_size, vm_size);
839 		return -EINVAL;
840 	}
841 
842 	if ((args->flags & ~valid_flags) && (args->flags & ~prt_flags)) {
843 		dev_dbg(dev->dev, "invalid flags combination 0x%08X\n",
844 			args->flags);
845 		return -EINVAL;
846 	}
847 
848 	switch (args->operation) {
849 	case AMDGPU_VA_OP_MAP:
850 	case AMDGPU_VA_OP_UNMAP:
851 	case AMDGPU_VA_OP_CLEAR:
852 	case AMDGPU_VA_OP_REPLACE:
853 		break;
854 	default:
855 		dev_dbg(dev->dev, "unsupported operation %d\n",
856 			args->operation);
857 		return -EINVAL;
858 	}
859 
860 	INIT_LIST_HEAD(&list);
861 	INIT_LIST_HEAD(&duplicates);
862 	if ((args->operation != AMDGPU_VA_OP_CLEAR) &&
863 	    !(args->flags & AMDGPU_VM_PAGE_PRT)) {
864 		gobj = drm_gem_object_lookup(filp, args->handle);
865 		if (gobj == NULL)
866 			return -ENOENT;
867 		abo = gem_to_amdgpu_bo(gobj);
868 		tv.bo = &abo->tbo;
869 		if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
870 			tv.num_shared = 1;
871 		else
872 			tv.num_shared = 0;
873 		list_add(&tv.head, &list);
874 	} else {
875 		gobj = NULL;
876 		abo = NULL;
877 	}
878 
879 	amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd);
880 
881 	r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
882 	if (r)
883 		goto error_unref;
884 
885 	if (abo) {
886 		bo_va = amdgpu_vm_bo_find(&fpriv->vm, abo);
887 		if (!bo_va) {
888 			r = -ENOENT;
889 			goto error_backoff;
890 		}
891 	} else if (args->operation != AMDGPU_VA_OP_CLEAR) {
892 		bo_va = fpriv->prt_va;
893 	} else {
894 		bo_va = NULL;
895 	}
896 
897 	switch (args->operation) {
898 	case AMDGPU_VA_OP_MAP:
899 		va_flags = amdgpu_gem_va_map_flags(adev, args->flags);
900 		r = amdgpu_vm_bo_map(adev, bo_va, args->va_address,
901 				     args->offset_in_bo, args->map_size,
902 				     va_flags);
903 		break;
904 	case AMDGPU_VA_OP_UNMAP:
905 		r = amdgpu_vm_bo_unmap(adev, bo_va, args->va_address);
906 		break;
907 
908 	case AMDGPU_VA_OP_CLEAR:
909 		r = amdgpu_vm_bo_clear_mappings(adev, &fpriv->vm,
910 						args->va_address,
911 						args->map_size);
912 		break;
913 	case AMDGPU_VA_OP_REPLACE:
914 		va_flags = amdgpu_gem_va_map_flags(adev, args->flags);
915 		r = amdgpu_vm_bo_replace_map(adev, bo_va, args->va_address,
916 					     args->offset_in_bo, args->map_size,
917 					     va_flags);
918 		break;
919 	default:
920 		break;
921 	}
922 	if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) && !amdgpu_vm_debug)
923 		amdgpu_gem_va_update_vm(adev, &fpriv->vm, bo_va,
924 					args->operation);
925 
926 error_backoff:
927 	ttm_eu_backoff_reservation(&ticket, &list);
928 
929 error_unref:
930 	drm_gem_object_put(gobj);
931 	return r;
932 }
933 
934 int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
935 			struct drm_file *filp)
936 {
937 	struct amdgpu_device *adev = drm_to_adev(dev);
938 	struct drm_amdgpu_gem_op *args = data;
939 	struct drm_gem_object *gobj;
940 	struct amdgpu_vm_bo_base *base;
941 	struct amdgpu_bo *robj;
942 	int r;
943 
944 	gobj = drm_gem_object_lookup(filp, args->handle);
945 	if (gobj == NULL) {
946 		return -ENOENT;
947 	}
948 	robj = gem_to_amdgpu_bo(gobj);
949 
950 	r = amdgpu_bo_reserve(robj, false);
951 	if (unlikely(r))
952 		goto out;
953 
954 	switch (args->op) {
955 	case AMDGPU_GEM_OP_GET_GEM_CREATE_INFO: {
956 		struct drm_amdgpu_gem_create_in info;
957 		void __user *out = u64_to_user_ptr(args->value);
958 
959 		info.bo_size = robj->tbo.base.size;
960 		info.alignment = robj->tbo.page_alignment << PAGE_SHIFT;
961 		info.domains = robj->preferred_domains;
962 		info.domain_flags = robj->flags;
963 		amdgpu_bo_unreserve(robj);
964 		if (copy_to_user(out, &info, sizeof(info)))
965 			r = -EFAULT;
966 		break;
967 	}
968 	case AMDGPU_GEM_OP_SET_PLACEMENT:
969 		if (robj->tbo.base.import_attach &&
970 		    args->value & AMDGPU_GEM_DOMAIN_VRAM) {
971 			r = -EINVAL;
972 			amdgpu_bo_unreserve(robj);
973 			break;
974 		}
975 		if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm)) {
976 			r = -EPERM;
977 			amdgpu_bo_unreserve(robj);
978 			break;
979 		}
980 		for (base = robj->vm_bo; base; base = base->next)
981 			if (amdgpu_xgmi_same_hive(amdgpu_ttm_adev(robj->tbo.bdev),
982 				amdgpu_ttm_adev(base->vm->root.bo->tbo.bdev))) {
983 				r = -EINVAL;
984 				amdgpu_bo_unreserve(robj);
985 				goto out;
986 			}
987 
988 
989 		robj->preferred_domains = args->value & (AMDGPU_GEM_DOMAIN_VRAM |
990 							AMDGPU_GEM_DOMAIN_GTT |
991 							AMDGPU_GEM_DOMAIN_CPU);
992 		robj->allowed_domains = robj->preferred_domains;
993 		if (robj->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
994 			robj->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
995 
996 		if (robj->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
997 			amdgpu_vm_bo_invalidate(adev, robj, true);
998 
999 		amdgpu_bo_unreserve(robj);
1000 		break;
1001 	default:
1002 		amdgpu_bo_unreserve(robj);
1003 		r = -EINVAL;
1004 	}
1005 
1006 out:
1007 	drm_gem_object_put(gobj);
1008 	return r;
1009 }
1010 
1011 int amdgpu_mode_dumb_create(struct drm_file *file_priv,
1012 			    struct drm_device *dev,
1013 			    struct drm_mode_create_dumb *args)
1014 {
1015 	struct amdgpu_device *adev = drm_to_adev(dev);
1016 	struct drm_gem_object *gobj;
1017 	uint32_t handle;
1018 	u64 flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
1019 		    AMDGPU_GEM_CREATE_CPU_GTT_USWC;
1020 	u32 domain;
1021 	int r;
1022 
1023 	/*
1024 	 * The buffer returned from this function should be cleared, but
1025 	 * it can only be done if the ring is enabled or we'll fail to
1026 	 * create the buffer.
1027 	 */
1028 	if (adev->mman.buffer_funcs_enabled)
1029 		flags |= AMDGPU_GEM_CREATE_VRAM_CLEARED;
1030 
1031 	args->pitch = amdgpu_align_pitch(adev, args->width,
1032 					 DIV_ROUND_UP(args->bpp, 8), 0);
1033 	args->size = (u64)args->pitch * args->height;
1034 	args->size = roundup2(args->size, PAGE_SIZE);
1035 	domain = amdgpu_bo_get_preferred_domain(adev,
1036 				amdgpu_display_supported_domains(adev, flags));
1037 	r = amdgpu_gem_object_create(adev, args->size, 0, domain, flags,
1038 				     ttm_bo_type_device, NULL, &gobj);
1039 	if (r)
1040 		return -ENOMEM;
1041 
1042 	r = drm_gem_handle_create(file_priv, gobj, &handle);
1043 	/* drop reference from allocate - handle holds it now */
1044 	drm_gem_object_put(gobj);
1045 	if (r) {
1046 		return r;
1047 	}
1048 	args->handle = handle;
1049 	return 0;
1050 }
1051 
1052 #if defined(CONFIG_DEBUG_FS)
1053 static int amdgpu_debugfs_gem_info_show(struct seq_file *m, void *unused)
1054 {
1055 	struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
1056 	struct drm_device *dev = adev_to_drm(adev);
1057 	struct drm_file *file;
1058 	int r;
1059 
1060 	r = mutex_lock_interruptible(&dev->filelist_mutex);
1061 	if (r)
1062 		return r;
1063 
1064 	list_for_each_entry(file, &dev->filelist, lhead) {
1065 		struct task_struct *task;
1066 		struct drm_gem_object *gobj;
1067 		int id;
1068 
1069 		/*
1070 		 * Although we have a valid reference on file->pid, that does
1071 		 * not guarantee that the task_struct who called get_pid() is
1072 		 * still alive (e.g. get_pid(current) => fork() => exit()).
1073 		 * Therefore, we need to protect this ->comm access using RCU.
1074 		 */
1075 		rcu_read_lock();
1076 		task = pid_task(file->pid, PIDTYPE_PID);
1077 		seq_printf(m, "pid %8d command %s:\n", pid_nr(file->pid),
1078 			   task ? task->comm : "<unknown>");
1079 		rcu_read_unlock();
1080 
1081 		spin_lock(&file->table_lock);
1082 		idr_for_each_entry(&file->object_idr, gobj, id) {
1083 			struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
1084 
1085 			amdgpu_bo_print_info(id, bo, m);
1086 		}
1087 		spin_unlock(&file->table_lock);
1088 	}
1089 
1090 	mutex_unlock(&dev->filelist_mutex);
1091 	return 0;
1092 }
1093 
1094 DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_gem_info);
1095 
1096 #endif
1097 
1098 void amdgpu_debugfs_gem_init(struct amdgpu_device *adev)
1099 {
1100 #if defined(CONFIG_DEBUG_FS)
1101 	struct drm_minor *minor = adev_to_drm(adev)->primary;
1102 	struct dentry *root = minor->debugfs_root;
1103 
1104 	debugfs_create_file("amdgpu_gem_info", 0444, root, adev,
1105 			    &amdgpu_debugfs_gem_info_fops);
1106 #endif
1107 }
1108