xref: /openbsd-src/sys/dev/pci/drm/amd/amdgpu/amdgpu_gem.c (revision 5c389b79544373bccfce668b646e62e7ba9802a3)
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <linux/ktime.h>
29 #include <linux/module.h>
30 #include <linux/pagemap.h>
31 #include <linux/pci.h>
32 #include <linux/dma-buf.h>
33 
34 #include <drm/amdgpu_drm.h>
35 #include <drm/drm_drv.h>
36 #include <drm/drm_gem_ttm_helper.h>
37 
38 #include "amdgpu.h"
39 #include "amdgpu_display.h"
40 #include "amdgpu_dma_buf.h"
41 #include "amdgpu_xgmi.h"
42 
43 static const struct drm_gem_object_funcs amdgpu_gem_object_funcs;
44 
45 #ifdef __linux__
46 static vm_fault_t amdgpu_gem_fault(struct vm_fault *vmf)
47 {
48 	struct ttm_buffer_object *bo = vmf->vma->vm_private_data;
49 	struct drm_device *ddev = bo->base.dev;
50 	vm_fault_t ret;
51 	int idx;
52 
53 	ret = ttm_bo_vm_reserve(bo, vmf);
54 	if (ret)
55 		return ret;
56 
57 	if (drm_dev_enter(ddev, &idx)) {
58 		ret = amdgpu_bo_fault_reserve_notify(bo);
59 		if (ret) {
60 			drm_dev_exit(idx);
61 			goto unlock;
62 		}
63 
64 		 ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
65 						TTM_BO_VM_NUM_PREFAULT);
66 
67 		 drm_dev_exit(idx);
68 	} else {
69 		ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot);
70 	}
71 	if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
72 		return ret;
73 
74 unlock:
75 	dma_resv_unlock(bo->base.resv);
76 	return ret;
77 }
78 
79 static const struct vm_operations_struct amdgpu_gem_vm_ops = {
80 	.fault = amdgpu_gem_fault,
81 	.open = ttm_bo_vm_open,
82 	.close = ttm_bo_vm_close,
83 	.access = ttm_bo_vm_access
84 };
85 #else /* !__linux__ */
86 int
87 amdgpu_gem_fault(struct uvm_faultinfo *ufi, vaddr_t vaddr, vm_page_t *pps,
88     int npages, int centeridx, vm_fault_t fault_type,
89     vm_prot_t access_type, int flags)
90 {
91 	struct uvm_object *uobj = ufi->entry->object.uvm_obj;
92 	struct ttm_buffer_object *bo = (struct ttm_buffer_object *)uobj;
93 	struct drm_device *ddev = bo->base.dev;
94 	vm_fault_t ret;
95 	int idx;
96 
97 	ret = ttm_bo_vm_reserve(bo);
98 	if (ret) {
99 		switch (ret) {
100 		case VM_FAULT_NOPAGE:
101 			ret = VM_PAGER_OK;
102 			break;
103 		case VM_FAULT_RETRY:
104 			ret = VM_PAGER_REFAULT;
105 			break;
106 		default:
107 			ret = VM_PAGER_BAD;
108 			break;
109 		}
110 		uvmfault_unlockall(ufi, NULL, uobj);
111 		return ret;
112 	}
113 
114 	if (drm_dev_enter(ddev, &idx)) {
115 		ret = amdgpu_bo_fault_reserve_notify(bo);
116 		if (ret) {
117 			drm_dev_exit(idx);
118 			goto unlock;
119 		}
120 
121 		 ret = ttm_bo_vm_fault_reserved(ufi, vaddr,
122 						TTM_BO_VM_NUM_PREFAULT, 1);
123 
124 		 drm_dev_exit(idx);
125 	} else {
126 		STUB();
127 #ifdef notyet
128 		ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot);
129 #endif
130 	}
131 #ifdef __linux__
132 	if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
133 		return ret;
134 #endif
135 
136 unlock:
137 	switch (ret) {
138 	case VM_FAULT_NOPAGE:
139 		ret = VM_PAGER_OK;
140 		break;
141 	case VM_FAULT_RETRY:
142 		ret = VM_PAGER_REFAULT;
143 		break;
144 	default:
145 		ret = VM_PAGER_BAD;
146 		break;
147 	}
148 	dma_resv_unlock(bo->base.resv);
149 	uvmfault_unlockall(ufi, NULL, uobj);
150 	return ret;
151 }
152 
153 void
154 amdgpu_gem_vm_reference(struct uvm_object *uobj)
155 {
156 	struct ttm_buffer_object *bo = (struct ttm_buffer_object *)uobj;
157 
158 	ttm_bo_get(bo);
159 }
160 
161 void
162 amdgpu_gem_vm_detach(struct uvm_object *uobj)
163 {
164 	struct ttm_buffer_object *bo = (struct ttm_buffer_object *)uobj;
165 
166 	ttm_bo_put(bo);
167 }
168 
169 static const struct uvm_pagerops amdgpu_gem_vm_ops = {
170 	.pgo_fault = amdgpu_gem_fault,
171 	.pgo_reference = amdgpu_gem_vm_reference,
172 	.pgo_detach = amdgpu_gem_vm_detach
173 };
174 #endif /* !__linux__ */
175 
176 static void amdgpu_gem_object_free(struct drm_gem_object *gobj)
177 {
178 	struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj);
179 
180 	if (robj) {
181 		amdgpu_mn_unregister(robj);
182 		amdgpu_bo_unref(&robj);
183 	}
184 }
185 
186 int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
187 			     int alignment, u32 initial_domain,
188 			     u64 flags, enum ttm_bo_type type,
189 			     struct dma_resv *resv,
190 			     struct drm_gem_object **obj)
191 {
192 	struct amdgpu_bo *bo;
193 	struct amdgpu_bo_user *ubo;
194 	struct amdgpu_bo_param bp;
195 	int r;
196 
197 	memset(&bp, 0, sizeof(bp));
198 	*obj = NULL;
199 
200 	bp.size = size;
201 	bp.byte_align = alignment;
202 	bp.type = type;
203 	bp.resv = resv;
204 	bp.preferred_domain = initial_domain;
205 	bp.flags = flags;
206 	bp.domain = initial_domain;
207 	bp.bo_ptr_size = sizeof(struct amdgpu_bo);
208 
209 	r = amdgpu_bo_create_user(adev, &bp, &ubo);
210 	if (r)
211 		return r;
212 
213 	bo = &ubo->bo;
214 	*obj = &bo->tbo.base;
215 	(*obj)->funcs = &amdgpu_gem_object_funcs;
216 
217 	return 0;
218 }
219 
220 int	drm_file_cmp(struct drm_file *, struct drm_file *);
221 SPLAY_PROTOTYPE(drm_file_tree, drm_file, link, drm_file_cmp);
222 
223 void amdgpu_gem_force_release(struct amdgpu_device *adev)
224 {
225 	struct drm_device *ddev = adev_to_drm(adev);
226 	struct drm_file *file;
227 
228 	mutex_lock(&ddev->filelist_mutex);
229 
230 #ifdef __linux__
231 	list_for_each_entry(file, &ddev->filelist, lhead) {
232 #else
233 	SPLAY_FOREACH(file, drm_file_tree, &ddev->files) {
234 #endif
235 		struct drm_gem_object *gobj;
236 		int handle;
237 
238 		WARN_ONCE(1, "Still active user space clients!\n");
239 		spin_lock(&file->table_lock);
240 		idr_for_each_entry(&file->object_idr, gobj, handle) {
241 			WARN_ONCE(1, "And also active allocations!\n");
242 			drm_gem_object_put(gobj);
243 		}
244 		idr_destroy(&file->object_idr);
245 		spin_unlock(&file->table_lock);
246 	}
247 
248 	mutex_unlock(&ddev->filelist_mutex);
249 }
250 
251 /*
252  * Call from drm_gem_handle_create which appear in both new and open ioctl
253  * case.
254  */
255 static int amdgpu_gem_object_open(struct drm_gem_object *obj,
256 				  struct drm_file *file_priv)
257 {
258 	struct amdgpu_bo *abo = gem_to_amdgpu_bo(obj);
259 	struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
260 	struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
261 	struct amdgpu_vm *vm = &fpriv->vm;
262 	struct amdgpu_bo_va *bo_va;
263 #ifdef notyet
264 	struct mm_struct *mm;
265 #endif
266 	int r;
267 
268 #ifdef notyet
269 	mm = amdgpu_ttm_tt_get_usermm(abo->tbo.ttm);
270 	if (mm && mm != current->mm)
271 		return -EPERM;
272 #endif
273 
274 	if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID &&
275 	    abo->tbo.base.resv != vm->root.bo->tbo.base.resv)
276 		return -EPERM;
277 
278 	r = amdgpu_bo_reserve(abo, false);
279 	if (r)
280 		return r;
281 
282 	bo_va = amdgpu_vm_bo_find(vm, abo);
283 	if (!bo_va) {
284 		bo_va = amdgpu_vm_bo_add(adev, vm, abo);
285 	} else {
286 		++bo_va->ref_count;
287 	}
288 	amdgpu_bo_unreserve(abo);
289 	return 0;
290 }
291 
292 static void amdgpu_gem_object_close(struct drm_gem_object *obj,
293 				    struct drm_file *file_priv)
294 {
295 	struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
296 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
297 	struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
298 	struct amdgpu_vm *vm = &fpriv->vm;
299 
300 	struct amdgpu_bo_list_entry vm_pd;
301 	struct list_head list, duplicates;
302 	struct dma_fence *fence = NULL;
303 	struct ttm_validate_buffer tv;
304 	struct ww_acquire_ctx ticket;
305 	struct amdgpu_bo_va *bo_va;
306 	long r;
307 
308 	INIT_LIST_HEAD(&list);
309 	INIT_LIST_HEAD(&duplicates);
310 
311 	tv.bo = &bo->tbo;
312 	tv.num_shared = 2;
313 	list_add(&tv.head, &list);
314 
315 	amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
316 
317 	r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates);
318 	if (r) {
319 		dev_err(adev->dev, "leaking bo va because "
320 			"we fail to reserve bo (%ld)\n", r);
321 		return;
322 	}
323 	bo_va = amdgpu_vm_bo_find(vm, bo);
324 	if (!bo_va || --bo_va->ref_count)
325 		goto out_unlock;
326 
327 	amdgpu_vm_bo_del(adev, bo_va);
328 	if (!amdgpu_vm_ready(vm))
329 		goto out_unlock;
330 
331 	r = amdgpu_vm_clear_freed(adev, vm, &fence);
332 	if (r || !fence)
333 		goto out_unlock;
334 
335 	amdgpu_bo_fence(bo, fence, true);
336 	dma_fence_put(fence);
337 
338 out_unlock:
339 	if (unlikely(r < 0))
340 		dev_err(adev->dev, "failed to clear page "
341 			"tables on GEM object close (%ld)\n", r);
342 	ttm_eu_backoff_reservation(&ticket, &list);
343 }
344 
345 #ifdef __linux__
346 static int amdgpu_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
347 {
348 	struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
349 
350 	if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
351 		return -EPERM;
352 	if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
353 		return -EPERM;
354 
355 	/* Workaround for Thunk bug creating PROT_NONE,MAP_PRIVATE mappings
356 	 * for debugger access to invisible VRAM. Should have used MAP_SHARED
357 	 * instead. Clearing VM_MAYWRITE prevents the mapping from ever
358 	 * becoming writable and makes is_cow_mapping(vm_flags) false.
359 	 */
360 	if (is_cow_mapping(vma->vm_flags) &&
361 	    !(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)))
362 		vma->vm_flags &= ~VM_MAYWRITE;
363 
364 	return drm_gem_ttm_mmap(obj, vma);
365 }
366 #else
367 static int amdgpu_gem_object_mmap(struct drm_gem_object *obj,
368     vm_prot_t accessprot, voff_t off, vsize_t size)
369 {
370 	struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
371 
372 	if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
373 		return -EPERM;
374 	if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
375 		return -EPERM;
376 
377 	/* Workaround for Thunk bug creating PROT_NONE,MAP_PRIVATE mappings
378 	 * for debugger access to invisible VRAM. Should have used MAP_SHARED
379 	 * instead. Clearing VM_MAYWRITE prevents the mapping from ever
380 	 * becoming writable and makes is_cow_mapping(vm_flags) false.
381 	 */
382 #ifdef notyet
383 	if (is_cow_mapping(vma->vm_flags) &&
384 	    !(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)))
385 		vma->vm_flags &= ~VM_MAYWRITE;
386 #endif
387 
388 	return drm_gem_ttm_mmap(obj, accessprot, off, size);
389 }
390 #endif
391 
392 static const struct drm_gem_object_funcs amdgpu_gem_object_funcs = {
393 	.free = amdgpu_gem_object_free,
394 	.open = amdgpu_gem_object_open,
395 	.close = amdgpu_gem_object_close,
396 	.export = amdgpu_gem_prime_export,
397 	.vmap = drm_gem_ttm_vmap,
398 	.vunmap = drm_gem_ttm_vunmap,
399 	.mmap = amdgpu_gem_object_mmap,
400 	.vm_ops = &amdgpu_gem_vm_ops,
401 };
402 
403 /*
404  * GEM ioctls.
405  */
406 int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
407 			    struct drm_file *filp)
408 {
409 	struct amdgpu_device *adev = drm_to_adev(dev);
410 	struct amdgpu_fpriv *fpriv = filp->driver_priv;
411 	struct amdgpu_vm *vm = &fpriv->vm;
412 	union drm_amdgpu_gem_create *args = data;
413 	uint64_t flags = args->in.domain_flags;
414 	uint64_t size = args->in.bo_size;
415 	struct dma_resv *resv = NULL;
416 	struct drm_gem_object *gobj;
417 	uint32_t handle, initial_domain;
418 	int r;
419 
420 	/* reject invalid gem flags */
421 	if (flags & ~(AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
422 		      AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
423 		      AMDGPU_GEM_CREATE_CPU_GTT_USWC |
424 		      AMDGPU_GEM_CREATE_VRAM_CLEARED |
425 		      AMDGPU_GEM_CREATE_VM_ALWAYS_VALID |
426 		      AMDGPU_GEM_CREATE_EXPLICIT_SYNC |
427 		      AMDGPU_GEM_CREATE_ENCRYPTED |
428 		      AMDGPU_GEM_CREATE_DISCARDABLE))
429 		return -EINVAL;
430 
431 	/* reject invalid gem domains */
432 	if (args->in.domains & ~AMDGPU_GEM_DOMAIN_MASK)
433 		return -EINVAL;
434 
435 	if (!amdgpu_is_tmz(adev) && (flags & AMDGPU_GEM_CREATE_ENCRYPTED)) {
436 		DRM_NOTE_ONCE("Cannot allocate secure buffer since TMZ is disabled\n");
437 		return -EINVAL;
438 	}
439 
440 	/* create a gem object to contain this object in */
441 	if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS |
442 	    AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
443 		if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
444 			/* if gds bo is created from user space, it must be
445 			 * passed to bo list
446 			 */
447 			DRM_ERROR("GDS bo cannot be per-vm-bo\n");
448 			return -EINVAL;
449 		}
450 		flags |= AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
451 	}
452 
453 	if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
454 		r = amdgpu_bo_reserve(vm->root.bo, false);
455 		if (r)
456 			return r;
457 
458 		resv = vm->root.bo->tbo.base.resv;
459 	}
460 
461 	initial_domain = (u32)(0xffffffff & args->in.domains);
462 retry:
463 	r = amdgpu_gem_object_create(adev, size, args->in.alignment,
464 				     initial_domain,
465 				     flags, ttm_bo_type_device, resv, &gobj);
466 	if (r && r != -ERESTARTSYS) {
467 		if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
468 			flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
469 			goto retry;
470 		}
471 
472 		if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
473 			initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
474 			goto retry;
475 		}
476 		DRM_DEBUG("Failed to allocate GEM object (%llu, %d, %llu, %d)\n",
477 				size, initial_domain, args->in.alignment, r);
478 	}
479 
480 	if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
481 		if (!r) {
482 			struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
483 
484 			abo->parent = amdgpu_bo_ref(vm->root.bo);
485 		}
486 		amdgpu_bo_unreserve(vm->root.bo);
487 	}
488 	if (r)
489 		return r;
490 
491 	r = drm_gem_handle_create(filp, gobj, &handle);
492 	/* drop reference from allocate - handle holds it now */
493 	drm_gem_object_put(gobj);
494 	if (r)
495 		return r;
496 
497 	memset(args, 0, sizeof(*args));
498 	args->out.handle = handle;
499 	return 0;
500 }
501 
502 int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
503 			     struct drm_file *filp)
504 {
505 	return -ENOSYS;
506 #ifdef notyet
507 	struct ttm_operation_ctx ctx = { true, false };
508 	struct amdgpu_device *adev = drm_to_adev(dev);
509 	struct drm_amdgpu_gem_userptr *args = data;
510 	struct drm_gem_object *gobj;
511 	struct hmm_range *range;
512 	struct amdgpu_bo *bo;
513 	uint32_t handle;
514 	int r;
515 
516 	args->addr = untagged_addr(args->addr);
517 
518 	if (offset_in_page(args->addr | args->size))
519 		return -EINVAL;
520 
521 	/* reject unknown flag values */
522 	if (args->flags & ~(AMDGPU_GEM_USERPTR_READONLY |
523 	    AMDGPU_GEM_USERPTR_ANONONLY | AMDGPU_GEM_USERPTR_VALIDATE |
524 	    AMDGPU_GEM_USERPTR_REGISTER))
525 		return -EINVAL;
526 
527 	if (!(args->flags & AMDGPU_GEM_USERPTR_READONLY) &&
528 	     !(args->flags & AMDGPU_GEM_USERPTR_REGISTER)) {
529 
530 		/* if we want to write to it we must install a MMU notifier */
531 		return -EACCES;
532 	}
533 
534 	/* create a gem object to contain this object in */
535 	r = amdgpu_gem_object_create(adev, args->size, 0, AMDGPU_GEM_DOMAIN_CPU,
536 				     0, ttm_bo_type_device, NULL, &gobj);
537 	if (r)
538 		return r;
539 
540 	bo = gem_to_amdgpu_bo(gobj);
541 	bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
542 	bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
543 	r = amdgpu_ttm_tt_set_userptr(&bo->tbo, args->addr, args->flags);
544 	if (r)
545 		goto release_object;
546 
547 	r = amdgpu_mn_register(bo, args->addr);
548 	if (r)
549 		goto release_object;
550 
551 	if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) {
552 		r = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages,
553 						 &range);
554 		if (r)
555 			goto release_object;
556 
557 		r = amdgpu_bo_reserve(bo, true);
558 		if (r)
559 			goto user_pages_done;
560 
561 		amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
562 		r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
563 		amdgpu_bo_unreserve(bo);
564 		if (r)
565 			goto user_pages_done;
566 	}
567 
568 	r = drm_gem_handle_create(filp, gobj, &handle);
569 	if (r)
570 		goto user_pages_done;
571 
572 	args->handle = handle;
573 
574 user_pages_done:
575 	if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE)
576 		amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, range);
577 
578 release_object:
579 	drm_gem_object_put(gobj);
580 
581 	return r;
582 #endif
583 }
584 
585 int amdgpu_mode_dumb_mmap(struct drm_file *filp,
586 			  struct drm_device *dev,
587 			  uint32_t handle, uint64_t *offset_p)
588 {
589 	struct drm_gem_object *gobj;
590 	struct amdgpu_bo *robj;
591 
592 	gobj = drm_gem_object_lookup(filp, handle);
593 	if (gobj == NULL) {
594 		return -ENOENT;
595 	}
596 	robj = gem_to_amdgpu_bo(gobj);
597 	if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm) ||
598 	    (robj->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) {
599 		drm_gem_object_put(gobj);
600 		return -EPERM;
601 	}
602 	*offset_p = amdgpu_bo_mmap_offset(robj);
603 	drm_gem_object_put(gobj);
604 	return 0;
605 }
606 
607 int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data,
608 			  struct drm_file *filp)
609 {
610 	union drm_amdgpu_gem_mmap *args = data;
611 	uint32_t handle = args->in.handle;
612 	memset(args, 0, sizeof(*args));
613 	return amdgpu_mode_dumb_mmap(filp, dev, handle, &args->out.addr_ptr);
614 }
615 
616 /**
617  * amdgpu_gem_timeout - calculate jiffies timeout from absolute value
618  *
619  * @timeout_ns: timeout in ns
620  *
621  * Calculate the timeout in jiffies from an absolute timeout in ns.
622  */
623 unsigned long amdgpu_gem_timeout(uint64_t timeout_ns)
624 {
625 	unsigned long timeout_jiffies;
626 	ktime_t timeout;
627 
628 	/* clamp timeout if it's to large */
629 	if (((int64_t)timeout_ns) < 0)
630 		return MAX_SCHEDULE_TIMEOUT;
631 
632 	timeout = ktime_sub(ns_to_ktime(timeout_ns), ktime_get());
633 	if (ktime_to_ns(timeout) < 0)
634 		return 0;
635 
636 	timeout_jiffies = nsecs_to_jiffies(ktime_to_ns(timeout));
637 	/*  clamp timeout to avoid unsigned-> signed overflow */
638 	if (timeout_jiffies > MAX_SCHEDULE_TIMEOUT )
639 		return MAX_SCHEDULE_TIMEOUT - 1;
640 
641 	return timeout_jiffies;
642 }
643 
644 int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
645 			      struct drm_file *filp)
646 {
647 	union drm_amdgpu_gem_wait_idle *args = data;
648 	struct drm_gem_object *gobj;
649 	struct amdgpu_bo *robj;
650 	uint32_t handle = args->in.handle;
651 	unsigned long timeout = amdgpu_gem_timeout(args->in.timeout);
652 	int r = 0;
653 	long ret;
654 
655 	gobj = drm_gem_object_lookup(filp, handle);
656 	if (gobj == NULL) {
657 		return -ENOENT;
658 	}
659 	robj = gem_to_amdgpu_bo(gobj);
660 	ret = dma_resv_wait_timeout(robj->tbo.base.resv, DMA_RESV_USAGE_READ,
661 				    true, timeout);
662 
663 	/* ret == 0 means not signaled,
664 	 * ret > 0 means signaled
665 	 * ret < 0 means interrupted before timeout
666 	 */
667 	if (ret >= 0) {
668 		memset(args, 0, sizeof(*args));
669 		args->out.status = (ret == 0);
670 	} else
671 		r = ret;
672 
673 	drm_gem_object_put(gobj);
674 	return r;
675 }
676 
677 int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
678 				struct drm_file *filp)
679 {
680 	struct drm_amdgpu_gem_metadata *args = data;
681 	struct drm_gem_object *gobj;
682 	struct amdgpu_bo *robj;
683 	int r = -1;
684 
685 	DRM_DEBUG("%d \n", args->handle);
686 	gobj = drm_gem_object_lookup(filp, args->handle);
687 	if (gobj == NULL)
688 		return -ENOENT;
689 	robj = gem_to_amdgpu_bo(gobj);
690 
691 	r = amdgpu_bo_reserve(robj, false);
692 	if (unlikely(r != 0))
693 		goto out;
694 
695 	if (args->op == AMDGPU_GEM_METADATA_OP_GET_METADATA) {
696 		amdgpu_bo_get_tiling_flags(robj, &args->data.tiling_info);
697 		r = amdgpu_bo_get_metadata(robj, args->data.data,
698 					   sizeof(args->data.data),
699 					   &args->data.data_size_bytes,
700 					   &args->data.flags);
701 	} else if (args->op == AMDGPU_GEM_METADATA_OP_SET_METADATA) {
702 		if (args->data.data_size_bytes > sizeof(args->data.data)) {
703 			r = -EINVAL;
704 			goto unreserve;
705 		}
706 		r = amdgpu_bo_set_tiling_flags(robj, args->data.tiling_info);
707 		if (!r)
708 			r = amdgpu_bo_set_metadata(robj, args->data.data,
709 						   args->data.data_size_bytes,
710 						   args->data.flags);
711 	}
712 
713 unreserve:
714 	amdgpu_bo_unreserve(robj);
715 out:
716 	drm_gem_object_put(gobj);
717 	return r;
718 }
719 
720 /**
721  * amdgpu_gem_va_update_vm -update the bo_va in its VM
722  *
723  * @adev: amdgpu_device pointer
724  * @vm: vm to update
725  * @bo_va: bo_va to update
726  * @operation: map, unmap or clear
727  *
728  * Update the bo_va directly after setting its address. Errors are not
729  * vital here, so they are not reported back to userspace.
730  */
731 static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
732 				    struct amdgpu_vm *vm,
733 				    struct amdgpu_bo_va *bo_va,
734 				    uint32_t operation)
735 {
736 	int r;
737 
738 	if (!amdgpu_vm_ready(vm))
739 		return;
740 
741 	r = amdgpu_vm_clear_freed(adev, vm, NULL);
742 	if (r)
743 		goto error;
744 
745 	if (operation == AMDGPU_VA_OP_MAP ||
746 	    operation == AMDGPU_VA_OP_REPLACE) {
747 		r = amdgpu_vm_bo_update(adev, bo_va, false);
748 		if (r)
749 			goto error;
750 	}
751 
752 	r = amdgpu_vm_update_pdes(adev, vm, false);
753 
754 error:
755 	if (r && r != -ERESTARTSYS)
756 		DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
757 }
758 
759 /**
760  * amdgpu_gem_va_map_flags - map GEM UAPI flags into hardware flags
761  *
762  * @adev: amdgpu_device pointer
763  * @flags: GEM UAPI flags
764  *
765  * Returns the GEM UAPI flags mapped into hardware for the ASIC.
766  */
767 uint64_t amdgpu_gem_va_map_flags(struct amdgpu_device *adev, uint32_t flags)
768 {
769 	uint64_t pte_flag = 0;
770 
771 	if (flags & AMDGPU_VM_PAGE_EXECUTABLE)
772 		pte_flag |= AMDGPU_PTE_EXECUTABLE;
773 	if (flags & AMDGPU_VM_PAGE_READABLE)
774 		pte_flag |= AMDGPU_PTE_READABLE;
775 	if (flags & AMDGPU_VM_PAGE_WRITEABLE)
776 		pte_flag |= AMDGPU_PTE_WRITEABLE;
777 	if (flags & AMDGPU_VM_PAGE_PRT)
778 		pte_flag |= AMDGPU_PTE_PRT;
779 	if (flags & AMDGPU_VM_PAGE_NOALLOC)
780 		pte_flag |= AMDGPU_PTE_NOALLOC;
781 
782 	if (adev->gmc.gmc_funcs->map_mtype)
783 		pte_flag |= amdgpu_gmc_map_mtype(adev,
784 						 flags & AMDGPU_VM_MTYPE_MASK);
785 
786 	return pte_flag;
787 }
788 
789 int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
790 			  struct drm_file *filp)
791 {
792 	const uint32_t valid_flags = AMDGPU_VM_DELAY_UPDATE |
793 		AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE |
794 		AMDGPU_VM_PAGE_EXECUTABLE | AMDGPU_VM_MTYPE_MASK |
795 		AMDGPU_VM_PAGE_NOALLOC;
796 	const uint32_t prt_flags = AMDGPU_VM_DELAY_UPDATE |
797 		AMDGPU_VM_PAGE_PRT;
798 
799 	struct drm_amdgpu_gem_va *args = data;
800 	struct drm_gem_object *gobj;
801 	struct amdgpu_device *adev = drm_to_adev(dev);
802 	struct amdgpu_fpriv *fpriv = filp->driver_priv;
803 	struct amdgpu_bo *abo;
804 	struct amdgpu_bo_va *bo_va;
805 	struct amdgpu_bo_list_entry vm_pd;
806 	struct ttm_validate_buffer tv;
807 	struct ww_acquire_ctx ticket;
808 	struct list_head list, duplicates;
809 	uint64_t va_flags;
810 	uint64_t vm_size;
811 	int r = 0;
812 
813 	if (args->va_address < AMDGPU_VA_RESERVED_SIZE) {
814 		dev_dbg(dev->dev,
815 			"va_address 0x%llX is in reserved area 0x%llX\n",
816 			args->va_address, AMDGPU_VA_RESERVED_SIZE);
817 		return -EINVAL;
818 	}
819 
820 	if (args->va_address >= AMDGPU_GMC_HOLE_START &&
821 	    args->va_address < AMDGPU_GMC_HOLE_END) {
822 		dev_dbg(dev->dev,
823 			"va_address 0x%llX is in VA hole 0x%llX-0x%llX\n",
824 			args->va_address, AMDGPU_GMC_HOLE_START,
825 			AMDGPU_GMC_HOLE_END);
826 		return -EINVAL;
827 	}
828 
829 	args->va_address &= AMDGPU_GMC_HOLE_MASK;
830 
831 	vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
832 	vm_size -= AMDGPU_VA_RESERVED_SIZE;
833 	if (args->va_address + args->map_size > vm_size) {
834 		dev_dbg(dev->dev,
835 			"va_address 0x%llx is in top reserved area 0x%llx\n",
836 			args->va_address + args->map_size, vm_size);
837 		return -EINVAL;
838 	}
839 
840 	if ((args->flags & ~valid_flags) && (args->flags & ~prt_flags)) {
841 		dev_dbg(dev->dev, "invalid flags combination 0x%08X\n",
842 			args->flags);
843 		return -EINVAL;
844 	}
845 
846 	switch (args->operation) {
847 	case AMDGPU_VA_OP_MAP:
848 	case AMDGPU_VA_OP_UNMAP:
849 	case AMDGPU_VA_OP_CLEAR:
850 	case AMDGPU_VA_OP_REPLACE:
851 		break;
852 	default:
853 		dev_dbg(dev->dev, "unsupported operation %d\n",
854 			args->operation);
855 		return -EINVAL;
856 	}
857 
858 	INIT_LIST_HEAD(&list);
859 	INIT_LIST_HEAD(&duplicates);
860 	if ((args->operation != AMDGPU_VA_OP_CLEAR) &&
861 	    !(args->flags & AMDGPU_VM_PAGE_PRT)) {
862 		gobj = drm_gem_object_lookup(filp, args->handle);
863 		if (gobj == NULL)
864 			return -ENOENT;
865 		abo = gem_to_amdgpu_bo(gobj);
866 		tv.bo = &abo->tbo;
867 		if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
868 			tv.num_shared = 1;
869 		else
870 			tv.num_shared = 0;
871 		list_add(&tv.head, &list);
872 	} else {
873 		gobj = NULL;
874 		abo = NULL;
875 	}
876 
877 	amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd);
878 
879 	r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
880 	if (r)
881 		goto error_unref;
882 
883 	if (abo) {
884 		bo_va = amdgpu_vm_bo_find(&fpriv->vm, abo);
885 		if (!bo_va) {
886 			r = -ENOENT;
887 			goto error_backoff;
888 		}
889 	} else if (args->operation != AMDGPU_VA_OP_CLEAR) {
890 		bo_va = fpriv->prt_va;
891 	} else {
892 		bo_va = NULL;
893 	}
894 
895 	switch (args->operation) {
896 	case AMDGPU_VA_OP_MAP:
897 		va_flags = amdgpu_gem_va_map_flags(adev, args->flags);
898 		r = amdgpu_vm_bo_map(adev, bo_va, args->va_address,
899 				     args->offset_in_bo, args->map_size,
900 				     va_flags);
901 		break;
902 	case AMDGPU_VA_OP_UNMAP:
903 		r = amdgpu_vm_bo_unmap(adev, bo_va, args->va_address);
904 		break;
905 
906 	case AMDGPU_VA_OP_CLEAR:
907 		r = amdgpu_vm_bo_clear_mappings(adev, &fpriv->vm,
908 						args->va_address,
909 						args->map_size);
910 		break;
911 	case AMDGPU_VA_OP_REPLACE:
912 		va_flags = amdgpu_gem_va_map_flags(adev, args->flags);
913 		r = amdgpu_vm_bo_replace_map(adev, bo_va, args->va_address,
914 					     args->offset_in_bo, args->map_size,
915 					     va_flags);
916 		break;
917 	default:
918 		break;
919 	}
920 	if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) && !amdgpu_vm_debug)
921 		amdgpu_gem_va_update_vm(adev, &fpriv->vm, bo_va,
922 					args->operation);
923 
924 error_backoff:
925 	ttm_eu_backoff_reservation(&ticket, &list);
926 
927 error_unref:
928 	drm_gem_object_put(gobj);
929 	return r;
930 }
931 
932 int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
933 			struct drm_file *filp)
934 {
935 	struct amdgpu_device *adev = drm_to_adev(dev);
936 	struct drm_amdgpu_gem_op *args = data;
937 	struct drm_gem_object *gobj;
938 	struct amdgpu_vm_bo_base *base;
939 	struct amdgpu_bo *robj;
940 	int r;
941 
942 	gobj = drm_gem_object_lookup(filp, args->handle);
943 	if (gobj == NULL) {
944 		return -ENOENT;
945 	}
946 	robj = gem_to_amdgpu_bo(gobj);
947 
948 	r = amdgpu_bo_reserve(robj, false);
949 	if (unlikely(r))
950 		goto out;
951 
952 	switch (args->op) {
953 	case AMDGPU_GEM_OP_GET_GEM_CREATE_INFO: {
954 		struct drm_amdgpu_gem_create_in info;
955 		void __user *out = u64_to_user_ptr(args->value);
956 
957 		info.bo_size = robj->tbo.base.size;
958 		info.alignment = robj->tbo.page_alignment << PAGE_SHIFT;
959 		info.domains = robj->preferred_domains;
960 		info.domain_flags = robj->flags;
961 		amdgpu_bo_unreserve(robj);
962 		if (copy_to_user(out, &info, sizeof(info)))
963 			r = -EFAULT;
964 		break;
965 	}
966 	case AMDGPU_GEM_OP_SET_PLACEMENT:
967 		if (robj->tbo.base.import_attach &&
968 		    args->value & AMDGPU_GEM_DOMAIN_VRAM) {
969 			r = -EINVAL;
970 			amdgpu_bo_unreserve(robj);
971 			break;
972 		}
973 		if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm)) {
974 			r = -EPERM;
975 			amdgpu_bo_unreserve(robj);
976 			break;
977 		}
978 		for (base = robj->vm_bo; base; base = base->next)
979 			if (amdgpu_xgmi_same_hive(amdgpu_ttm_adev(robj->tbo.bdev),
980 				amdgpu_ttm_adev(base->vm->root.bo->tbo.bdev))) {
981 				r = -EINVAL;
982 				amdgpu_bo_unreserve(robj);
983 				goto out;
984 			}
985 
986 
987 		robj->preferred_domains = args->value & (AMDGPU_GEM_DOMAIN_VRAM |
988 							AMDGPU_GEM_DOMAIN_GTT |
989 							AMDGPU_GEM_DOMAIN_CPU);
990 		robj->allowed_domains = robj->preferred_domains;
991 		if (robj->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
992 			robj->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
993 
994 		if (robj->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
995 			amdgpu_vm_bo_invalidate(adev, robj, true);
996 
997 		amdgpu_bo_unreserve(robj);
998 		break;
999 	default:
1000 		amdgpu_bo_unreserve(robj);
1001 		r = -EINVAL;
1002 	}
1003 
1004 out:
1005 	drm_gem_object_put(gobj);
1006 	return r;
1007 }
1008 
1009 static int amdgpu_gem_align_pitch(struct amdgpu_device *adev,
1010 				  int width,
1011 				  int cpp,
1012 				  bool tiled)
1013 {
1014 	int aligned = width;
1015 	int pitch_mask = 0;
1016 
1017 	switch (cpp) {
1018 	case 1:
1019 		pitch_mask = 255;
1020 		break;
1021 	case 2:
1022 		pitch_mask = 127;
1023 		break;
1024 	case 3:
1025 	case 4:
1026 		pitch_mask = 63;
1027 		break;
1028 	}
1029 
1030 	aligned += pitch_mask;
1031 	aligned &= ~pitch_mask;
1032 	return aligned * cpp;
1033 }
1034 
1035 int amdgpu_mode_dumb_create(struct drm_file *file_priv,
1036 			    struct drm_device *dev,
1037 			    struct drm_mode_create_dumb *args)
1038 {
1039 	struct amdgpu_device *adev = drm_to_adev(dev);
1040 	struct drm_gem_object *gobj;
1041 	uint32_t handle;
1042 	u64 flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
1043 		    AMDGPU_GEM_CREATE_CPU_GTT_USWC |
1044 		    AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
1045 	u32 domain;
1046 	int r;
1047 
1048 	/*
1049 	 * The buffer returned from this function should be cleared, but
1050 	 * it can only be done if the ring is enabled or we'll fail to
1051 	 * create the buffer.
1052 	 */
1053 	if (adev->mman.buffer_funcs_enabled)
1054 		flags |= AMDGPU_GEM_CREATE_VRAM_CLEARED;
1055 
1056 	args->pitch = amdgpu_gem_align_pitch(adev, args->width,
1057 					     DIV_ROUND_UP(args->bpp, 8), 0);
1058 	args->size = (u64)args->pitch * args->height;
1059 	args->size = roundup2(args->size, PAGE_SIZE);
1060 	domain = amdgpu_bo_get_preferred_domain(adev,
1061 				amdgpu_display_supported_domains(adev, flags));
1062 	r = amdgpu_gem_object_create(adev, args->size, 0, domain, flags,
1063 				     ttm_bo_type_device, NULL, &gobj);
1064 	if (r)
1065 		return -ENOMEM;
1066 
1067 	r = drm_gem_handle_create(file_priv, gobj, &handle);
1068 	/* drop reference from allocate - handle holds it now */
1069 	drm_gem_object_put(gobj);
1070 	if (r) {
1071 		return r;
1072 	}
1073 	args->handle = handle;
1074 	return 0;
1075 }
1076 
1077 #if defined(CONFIG_DEBUG_FS)
1078 static int amdgpu_debugfs_gem_info_show(struct seq_file *m, void *unused)
1079 {
1080 	struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
1081 	struct drm_device *dev = adev_to_drm(adev);
1082 	struct drm_file *file;
1083 	int r;
1084 
1085 	r = mutex_lock_interruptible(&dev->filelist_mutex);
1086 	if (r)
1087 		return r;
1088 
1089 	list_for_each_entry(file, &dev->filelist, lhead) {
1090 		struct task_struct *task;
1091 		struct drm_gem_object *gobj;
1092 		int id;
1093 
1094 		/*
1095 		 * Although we have a valid reference on file->pid, that does
1096 		 * not guarantee that the task_struct who called get_pid() is
1097 		 * still alive (e.g. get_pid(current) => fork() => exit()).
1098 		 * Therefore, we need to protect this ->comm access using RCU.
1099 		 */
1100 		rcu_read_lock();
1101 		task = pid_task(file->pid, PIDTYPE_PID);
1102 		seq_printf(m, "pid %8d command %s:\n", pid_nr(file->pid),
1103 			   task ? task->comm : "<unknown>");
1104 		rcu_read_unlock();
1105 
1106 		spin_lock(&file->table_lock);
1107 		idr_for_each_entry(&file->object_idr, gobj, id) {
1108 			struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
1109 
1110 			amdgpu_bo_print_info(id, bo, m);
1111 		}
1112 		spin_unlock(&file->table_lock);
1113 	}
1114 
1115 	mutex_unlock(&dev->filelist_mutex);
1116 	return 0;
1117 }
1118 
1119 DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_gem_info);
1120 
1121 #endif
1122 
1123 void amdgpu_debugfs_gem_init(struct amdgpu_device *adev)
1124 {
1125 #if defined(CONFIG_DEBUG_FS)
1126 	struct drm_minor *minor = adev_to_drm(adev)->primary;
1127 	struct dentry *root = minor->debugfs_root;
1128 
1129 	debugfs_create_file("amdgpu_gem_info", 0444, root, adev,
1130 			    &amdgpu_debugfs_gem_info_fops);
1131 #endif
1132 }
1133