xref: /openbsd-src/sys/dev/pci/drm/amd/amdgpu/amdgpu_gem.c (revision 46035553bfdd96e63c94e32da0210227ec2e3cf1)
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <linux/ktime.h>
29 #include <linux/module.h>
30 #include <linux/pagemap.h>
31 #include <linux/pci.h>
32 
33 #include <drm/amdgpu_drm.h>
34 #include <drm/drm_debugfs.h>
35 
36 #include "amdgpu.h"
37 #include "amdgpu_display.h"
38 #include "amdgpu_xgmi.h"
39 
40 void amdgpu_gem_object_free(struct drm_gem_object *gobj)
41 {
42 	struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj);
43 
44 	if (robj) {
45 		amdgpu_mn_unregister(robj);
46 		amdgpu_bo_unref(&robj);
47 	}
48 }
49 
50 int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
51 			     int alignment, u32 initial_domain,
52 			     u64 flags, enum ttm_bo_type type,
53 			     struct dma_resv *resv,
54 			     struct drm_gem_object **obj)
55 {
56 	struct amdgpu_bo *bo;
57 	struct amdgpu_bo_param bp;
58 	int r;
59 
60 	memset(&bp, 0, sizeof(bp));
61 	*obj = NULL;
62 
63 	bp.size = size;
64 	bp.byte_align = alignment;
65 	bp.type = type;
66 	bp.resv = resv;
67 	bp.preferred_domain = initial_domain;
68 retry:
69 	bp.flags = flags;
70 	bp.domain = initial_domain;
71 	r = amdgpu_bo_create(adev, &bp, &bo);
72 	if (r) {
73 		if (r != -ERESTARTSYS) {
74 			if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
75 				flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
76 				goto retry;
77 			}
78 
79 			if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
80 				initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
81 				goto retry;
82 			}
83 			DRM_DEBUG("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
84 				  size, initial_domain, alignment, r);
85 		}
86 		return r;
87 	}
88 	*obj = &bo->tbo.base;
89 
90 	return 0;
91 }
92 
93 void amdgpu_gem_force_release(struct amdgpu_device *adev)
94 {
95 	STUB();
96 #ifdef notyet
97 	struct drm_device *ddev = adev->ddev;
98 	struct drm_file *file;
99 
100 	mutex_lock(&ddev->filelist_mutex);
101 
102 	list_for_each_entry(file, &ddev->filelist, lhead) {
103 		struct drm_gem_object *gobj;
104 		int handle;
105 
106 		WARN_ONCE(1, "Still active user space clients!\n");
107 		spin_lock(&file->table_lock);
108 		idr_for_each_entry(&file->object_idr, gobj, handle) {
109 			WARN_ONCE(1, "And also active allocations!\n");
110 			drm_gem_object_put_unlocked(gobj);
111 		}
112 		idr_destroy(&file->object_idr);
113 		spin_unlock(&file->table_lock);
114 	}
115 
116 	mutex_unlock(&ddev->filelist_mutex);
117 #endif
118 }
119 
120 /*
121  * Call from drm_gem_handle_create which appear in both new and open ioctl
122  * case.
123  */
124 int amdgpu_gem_object_open(struct drm_gem_object *obj,
125 			   struct drm_file *file_priv)
126 {
127 	struct amdgpu_bo *abo = gem_to_amdgpu_bo(obj);
128 	struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
129 	struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
130 	struct amdgpu_vm *vm = &fpriv->vm;
131 	struct amdgpu_bo_va *bo_va;
132 #ifdef notyet
133 	struct mm_struct *mm;
134 #endif
135 	int r;
136 
137 #ifdef notyet
138 	mm = amdgpu_ttm_tt_get_usermm(abo->tbo.ttm);
139 	if (mm && mm != current->mm)
140 		return -EPERM;
141 #endif
142 
143 	if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID &&
144 	    abo->tbo.base.resv != vm->root.base.bo->tbo.base.resv)
145 		return -EPERM;
146 
147 	r = amdgpu_bo_reserve(abo, false);
148 	if (r)
149 		return r;
150 
151 	bo_va = amdgpu_vm_bo_find(vm, abo);
152 	if (!bo_va) {
153 		bo_va = amdgpu_vm_bo_add(adev, vm, abo);
154 	} else {
155 		++bo_va->ref_count;
156 	}
157 	amdgpu_bo_unreserve(abo);
158 	return 0;
159 }
160 
161 void amdgpu_gem_object_close(struct drm_gem_object *obj,
162 			     struct drm_file *file_priv)
163 {
164 	struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
165 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
166 	struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
167 	struct amdgpu_vm *vm = &fpriv->vm;
168 
169 	struct amdgpu_bo_list_entry vm_pd;
170 	struct list_head list, duplicates;
171 	struct dma_fence *fence = NULL;
172 	struct ttm_validate_buffer tv;
173 	struct ww_acquire_ctx ticket;
174 	struct amdgpu_bo_va *bo_va;
175 	long r;
176 
177 	INIT_LIST_HEAD(&list);
178 	INIT_LIST_HEAD(&duplicates);
179 
180 	tv.bo = &bo->tbo;
181 	tv.num_shared = 2;
182 	list_add(&tv.head, &list);
183 
184 	amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
185 
186 	r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates);
187 	if (r) {
188 		dev_err(adev->dev, "leaking bo va because "
189 			"we fail to reserve bo (%ld)\n", r);
190 		return;
191 	}
192 	bo_va = amdgpu_vm_bo_find(vm, bo);
193 	if (!bo_va || --bo_va->ref_count)
194 		goto out_unlock;
195 
196 	amdgpu_vm_bo_rmv(adev, bo_va);
197 	if (!amdgpu_vm_ready(vm))
198 		goto out_unlock;
199 
200 	fence = dma_resv_get_excl(bo->tbo.base.resv);
201 	if (fence) {
202 		amdgpu_bo_fence(bo, fence, true);
203 		fence = NULL;
204 	}
205 
206 	r = amdgpu_vm_clear_freed(adev, vm, &fence);
207 	if (r || !fence)
208 		goto out_unlock;
209 
210 	amdgpu_bo_fence(bo, fence, true);
211 	dma_fence_put(fence);
212 
213 out_unlock:
214 	if (unlikely(r < 0))
215 		dev_err(adev->dev, "failed to clear page "
216 			"tables on GEM object close (%ld)\n", r);
217 	ttm_eu_backoff_reservation(&ticket, &list);
218 }
219 
220 /*
221  * GEM ioctls.
222  */
223 int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
224 			    struct drm_file *filp)
225 {
226 	struct amdgpu_device *adev = dev->dev_private;
227 	struct amdgpu_fpriv *fpriv = filp->driver_priv;
228 	struct amdgpu_vm *vm = &fpriv->vm;
229 	union drm_amdgpu_gem_create *args = data;
230 	uint64_t flags = args->in.domain_flags;
231 	uint64_t size = args->in.bo_size;
232 	struct dma_resv *resv = NULL;
233 	struct drm_gem_object *gobj;
234 	uint32_t handle;
235 	int r;
236 
237 	/* reject invalid gem flags */
238 	if (flags & ~(AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
239 		      AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
240 		      AMDGPU_GEM_CREATE_CPU_GTT_USWC |
241 		      AMDGPU_GEM_CREATE_VRAM_CLEARED |
242 		      AMDGPU_GEM_CREATE_VM_ALWAYS_VALID |
243 		      AMDGPU_GEM_CREATE_EXPLICIT_SYNC))
244 
245 		return -EINVAL;
246 
247 	/* reject invalid gem domains */
248 	if (args->in.domains & ~AMDGPU_GEM_DOMAIN_MASK)
249 		return -EINVAL;
250 
251 	/* create a gem object to contain this object in */
252 	if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS |
253 	    AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
254 		if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
255 			/* if gds bo is created from user space, it must be
256 			 * passed to bo list
257 			 */
258 			DRM_ERROR("GDS bo cannot be per-vm-bo\n");
259 			return -EINVAL;
260 		}
261 		flags |= AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
262 	}
263 
264 	if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
265 		r = amdgpu_bo_reserve(vm->root.base.bo, false);
266 		if (r)
267 			return r;
268 
269 		resv = vm->root.base.bo->tbo.base.resv;
270 	}
271 
272 	r = amdgpu_gem_object_create(adev, size, args->in.alignment,
273 				     (u32)(0xffffffff & args->in.domains),
274 				     flags, ttm_bo_type_device, resv, &gobj);
275 	if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
276 		if (!r) {
277 			struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
278 
279 			abo->parent = amdgpu_bo_ref(vm->root.base.bo);
280 		}
281 		amdgpu_bo_unreserve(vm->root.base.bo);
282 	}
283 	if (r)
284 		return r;
285 
286 	r = drm_gem_handle_create(filp, gobj, &handle);
287 	/* drop reference from allocate - handle holds it now */
288 	drm_gem_object_put_unlocked(gobj);
289 	if (r)
290 		return r;
291 
292 	memset(args, 0, sizeof(*args));
293 	args->out.handle = handle;
294 	return 0;
295 }
296 
297 int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
298 			     struct drm_file *filp)
299 {
300 	STUB();
301 	return -ENOSYS;
302 #ifdef notyet
303 	struct ttm_operation_ctx ctx = { true, false };
304 	struct amdgpu_device *adev = dev->dev_private;
305 	struct drm_amdgpu_gem_userptr *args = data;
306 	struct drm_gem_object *gobj;
307 	struct amdgpu_bo *bo;
308 	uint32_t handle;
309 	int r;
310 
311 	args->addr = untagged_addr(args->addr);
312 
313 	if (offset_in_page(args->addr | args->size))
314 		return -EINVAL;
315 
316 	/* reject unknown flag values */
317 	if (args->flags & ~(AMDGPU_GEM_USERPTR_READONLY |
318 	    AMDGPU_GEM_USERPTR_ANONONLY | AMDGPU_GEM_USERPTR_VALIDATE |
319 	    AMDGPU_GEM_USERPTR_REGISTER))
320 		return -EINVAL;
321 
322 	if (!(args->flags & AMDGPU_GEM_USERPTR_READONLY) &&
323 	     !(args->flags & AMDGPU_GEM_USERPTR_REGISTER)) {
324 
325 		/* if we want to write to it we must install a MMU notifier */
326 		return -EACCES;
327 	}
328 
329 	/* create a gem object to contain this object in */
330 	r = amdgpu_gem_object_create(adev, args->size, 0, AMDGPU_GEM_DOMAIN_CPU,
331 				     0, ttm_bo_type_device, NULL, &gobj);
332 	if (r)
333 		return r;
334 
335 	bo = gem_to_amdgpu_bo(gobj);
336 	bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
337 	bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
338 	r = amdgpu_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags);
339 	if (r)
340 		goto release_object;
341 
342 	if (args->flags & AMDGPU_GEM_USERPTR_REGISTER) {
343 		r = amdgpu_mn_register(bo, args->addr);
344 		if (r)
345 			goto release_object;
346 	}
347 
348 	if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) {
349 		r = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
350 		if (r)
351 			goto release_object;
352 
353 		r = amdgpu_bo_reserve(bo, true);
354 		if (r)
355 			goto user_pages_done;
356 
357 		amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
358 		r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
359 		amdgpu_bo_unreserve(bo);
360 		if (r)
361 			goto user_pages_done;
362 	}
363 
364 	r = drm_gem_handle_create(filp, gobj, &handle);
365 	if (r)
366 		goto user_pages_done;
367 
368 	args->handle = handle;
369 
370 user_pages_done:
371 	if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE)
372 		amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
373 
374 release_object:
375 	drm_gem_object_put_unlocked(gobj);
376 
377 	return r;
378 #endif
379 }
380 
381 int amdgpu_mode_dumb_mmap(struct drm_file *filp,
382 			  struct drm_device *dev,
383 			  uint32_t handle, uint64_t *offset_p)
384 {
385 	struct drm_gem_object *gobj;
386 	struct amdgpu_bo *robj;
387 
388 	gobj = drm_gem_object_lookup(filp, handle);
389 	if (gobj == NULL) {
390 		return -ENOENT;
391 	}
392 	robj = gem_to_amdgpu_bo(gobj);
393 	if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm) ||
394 	    (robj->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) {
395 		drm_gem_object_put_unlocked(gobj);
396 		return -EPERM;
397 	}
398 	*offset_p = amdgpu_bo_mmap_offset(robj);
399 	drm_gem_object_put_unlocked(gobj);
400 	return 0;
401 }
402 
403 int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data,
404 			  struct drm_file *filp)
405 {
406 	union drm_amdgpu_gem_mmap *args = data;
407 	uint32_t handle = args->in.handle;
408 	memset(args, 0, sizeof(*args));
409 	return amdgpu_mode_dumb_mmap(filp, dev, handle, &args->out.addr_ptr);
410 }
411 
412 /**
413  * amdgpu_gem_timeout - calculate jiffies timeout from absolute value
414  *
415  * @timeout_ns: timeout in ns
416  *
417  * Calculate the timeout in jiffies from an absolute timeout in ns.
418  */
419 unsigned long amdgpu_gem_timeout(uint64_t timeout_ns)
420 {
421 	unsigned long timeout_jiffies;
422 	ktime_t timeout;
423 
424 	/* clamp timeout if it's to large */
425 	if (((int64_t)timeout_ns) < 0)
426 		return MAX_SCHEDULE_TIMEOUT;
427 
428 	timeout = ktime_sub(ns_to_ktime(timeout_ns), ktime_get());
429 	if (ktime_to_ns(timeout) < 0)
430 		return 0;
431 
432 	timeout_jiffies = nsecs_to_jiffies(ktime_to_ns(timeout));
433 	/*  clamp timeout to avoid unsigned-> signed overflow */
434 	if (timeout_jiffies > MAX_SCHEDULE_TIMEOUT )
435 		return MAX_SCHEDULE_TIMEOUT - 1;
436 
437 	return timeout_jiffies;
438 }
439 
440 int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
441 			      struct drm_file *filp)
442 {
443 	union drm_amdgpu_gem_wait_idle *args = data;
444 	struct drm_gem_object *gobj;
445 	struct amdgpu_bo *robj;
446 	uint32_t handle = args->in.handle;
447 	unsigned long timeout = amdgpu_gem_timeout(args->in.timeout);
448 	int r = 0;
449 	long ret;
450 
451 	gobj = drm_gem_object_lookup(filp, handle);
452 	if (gobj == NULL) {
453 		return -ENOENT;
454 	}
455 	robj = gem_to_amdgpu_bo(gobj);
456 	ret = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true,
457 						  timeout);
458 
459 	/* ret == 0 means not signaled,
460 	 * ret > 0 means signaled
461 	 * ret < 0 means interrupted before timeout
462 	 */
463 	if (ret >= 0) {
464 		memset(args, 0, sizeof(*args));
465 		args->out.status = (ret == 0);
466 	} else
467 		r = ret;
468 
469 	drm_gem_object_put_unlocked(gobj);
470 	return r;
471 }
472 
473 int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
474 				struct drm_file *filp)
475 {
476 	struct drm_amdgpu_gem_metadata *args = data;
477 	struct drm_gem_object *gobj;
478 	struct amdgpu_bo *robj;
479 	int r = -1;
480 
481 	DRM_DEBUG("%d \n", args->handle);
482 	gobj = drm_gem_object_lookup(filp, args->handle);
483 	if (gobj == NULL)
484 		return -ENOENT;
485 	robj = gem_to_amdgpu_bo(gobj);
486 
487 	r = amdgpu_bo_reserve(robj, false);
488 	if (unlikely(r != 0))
489 		goto out;
490 
491 	if (args->op == AMDGPU_GEM_METADATA_OP_GET_METADATA) {
492 		amdgpu_bo_get_tiling_flags(robj, &args->data.tiling_info);
493 		r = amdgpu_bo_get_metadata(robj, args->data.data,
494 					   sizeof(args->data.data),
495 					   &args->data.data_size_bytes,
496 					   &args->data.flags);
497 	} else if (args->op == AMDGPU_GEM_METADATA_OP_SET_METADATA) {
498 		if (args->data.data_size_bytes > sizeof(args->data.data)) {
499 			r = -EINVAL;
500 			goto unreserve;
501 		}
502 		r = amdgpu_bo_set_tiling_flags(robj, args->data.tiling_info);
503 		if (!r)
504 			r = amdgpu_bo_set_metadata(robj, args->data.data,
505 						   args->data.data_size_bytes,
506 						   args->data.flags);
507 	}
508 
509 unreserve:
510 	amdgpu_bo_unreserve(robj);
511 out:
512 	drm_gem_object_put_unlocked(gobj);
513 	return r;
514 }
515 
516 /**
517  * amdgpu_gem_va_update_vm -update the bo_va in its VM
518  *
519  * @adev: amdgpu_device pointer
520  * @vm: vm to update
521  * @bo_va: bo_va to update
522  * @operation: map, unmap or clear
523  *
524  * Update the bo_va directly after setting its address. Errors are not
525  * vital here, so they are not reported back to userspace.
526  */
527 static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
528 				    struct amdgpu_vm *vm,
529 				    struct amdgpu_bo_va *bo_va,
530 				    uint32_t operation)
531 {
532 	int r;
533 
534 	if (!amdgpu_vm_ready(vm))
535 		return;
536 
537 	r = amdgpu_vm_clear_freed(adev, vm, NULL);
538 	if (r)
539 		goto error;
540 
541 	if (operation == AMDGPU_VA_OP_MAP ||
542 	    operation == AMDGPU_VA_OP_REPLACE) {
543 		r = amdgpu_vm_bo_update(adev, bo_va, false);
544 		if (r)
545 			goto error;
546 	}
547 
548 	r = amdgpu_vm_update_pdes(adev, vm, false);
549 
550 error:
551 	if (r && r != -ERESTARTSYS)
552 		DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
553 }
554 
555 /**
556  * amdgpu_gem_va_map_flags - map GEM UAPI flags into hardware flags
557  *
558  * @adev: amdgpu_device pointer
559  * @flags: GEM UAPI flags
560  *
561  * Returns the GEM UAPI flags mapped into hardware for the ASIC.
562  */
563 uint64_t amdgpu_gem_va_map_flags(struct amdgpu_device *adev, uint32_t flags)
564 {
565 	uint64_t pte_flag = 0;
566 
567 	if (flags & AMDGPU_VM_PAGE_EXECUTABLE)
568 		pte_flag |= AMDGPU_PTE_EXECUTABLE;
569 	if (flags & AMDGPU_VM_PAGE_READABLE)
570 		pte_flag |= AMDGPU_PTE_READABLE;
571 	if (flags & AMDGPU_VM_PAGE_WRITEABLE)
572 		pte_flag |= AMDGPU_PTE_WRITEABLE;
573 	if (flags & AMDGPU_VM_PAGE_PRT)
574 		pte_flag |= AMDGPU_PTE_PRT;
575 
576 	if (adev->gmc.gmc_funcs->map_mtype)
577 		pte_flag |= amdgpu_gmc_map_mtype(adev,
578 						 flags & AMDGPU_VM_MTYPE_MASK);
579 
580 	return pte_flag;
581 }
582 
583 int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
584 			  struct drm_file *filp)
585 {
586 	const uint32_t valid_flags = AMDGPU_VM_DELAY_UPDATE |
587 		AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE |
588 		AMDGPU_VM_PAGE_EXECUTABLE | AMDGPU_VM_MTYPE_MASK;
589 	const uint32_t prt_flags = AMDGPU_VM_DELAY_UPDATE |
590 		AMDGPU_VM_PAGE_PRT;
591 
592 	struct drm_amdgpu_gem_va *args = data;
593 	struct drm_gem_object *gobj;
594 	struct amdgpu_device *adev = dev->dev_private;
595 	struct amdgpu_fpriv *fpriv = filp->driver_priv;
596 	struct amdgpu_bo *abo;
597 	struct amdgpu_bo_va *bo_va;
598 	struct amdgpu_bo_list_entry vm_pd;
599 	struct ttm_validate_buffer tv;
600 	struct ww_acquire_ctx ticket;
601 	struct list_head list, duplicates;
602 	uint64_t va_flags;
603 	int r = 0;
604 
605 	if (args->va_address < AMDGPU_VA_RESERVED_SIZE) {
606 		dev_dbg(&dev->pdev->dev,
607 			"va_address 0x%LX is in reserved area 0x%LX\n",
608 			args->va_address, AMDGPU_VA_RESERVED_SIZE);
609 		return -EINVAL;
610 	}
611 
612 	if (args->va_address >= AMDGPU_GMC_HOLE_START &&
613 	    args->va_address < AMDGPU_GMC_HOLE_END) {
614 		dev_dbg(&dev->pdev->dev,
615 			"va_address 0x%LX is in VA hole 0x%LX-0x%LX\n",
616 			args->va_address, AMDGPU_GMC_HOLE_START,
617 			AMDGPU_GMC_HOLE_END);
618 		return -EINVAL;
619 	}
620 
621 	args->va_address &= AMDGPU_GMC_HOLE_MASK;
622 
623 	if ((args->flags & ~valid_flags) && (args->flags & ~prt_flags)) {
624 		dev_dbg(&dev->pdev->dev, "invalid flags combination 0x%08X\n",
625 			args->flags);
626 		return -EINVAL;
627 	}
628 
629 	switch (args->operation) {
630 	case AMDGPU_VA_OP_MAP:
631 	case AMDGPU_VA_OP_UNMAP:
632 	case AMDGPU_VA_OP_CLEAR:
633 	case AMDGPU_VA_OP_REPLACE:
634 		break;
635 	default:
636 		dev_dbg(&dev->pdev->dev, "unsupported operation %d\n",
637 			args->operation);
638 		return -EINVAL;
639 	}
640 
641 	INIT_LIST_HEAD(&list);
642 	INIT_LIST_HEAD(&duplicates);
643 	if ((args->operation != AMDGPU_VA_OP_CLEAR) &&
644 	    !(args->flags & AMDGPU_VM_PAGE_PRT)) {
645 		gobj = drm_gem_object_lookup(filp, args->handle);
646 		if (gobj == NULL)
647 			return -ENOENT;
648 		abo = gem_to_amdgpu_bo(gobj);
649 		tv.bo = &abo->tbo;
650 		if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
651 			tv.num_shared = 1;
652 		else
653 			tv.num_shared = 0;
654 		list_add(&tv.head, &list);
655 	} else {
656 		gobj = NULL;
657 		abo = NULL;
658 	}
659 
660 	amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd);
661 
662 	r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
663 	if (r)
664 		goto error_unref;
665 
666 	if (abo) {
667 		bo_va = amdgpu_vm_bo_find(&fpriv->vm, abo);
668 		if (!bo_va) {
669 			r = -ENOENT;
670 			goto error_backoff;
671 		}
672 	} else if (args->operation != AMDGPU_VA_OP_CLEAR) {
673 		bo_va = fpriv->prt_va;
674 	} else {
675 		bo_va = NULL;
676 	}
677 
678 	switch (args->operation) {
679 	case AMDGPU_VA_OP_MAP:
680 		va_flags = amdgpu_gem_va_map_flags(adev, args->flags);
681 		r = amdgpu_vm_bo_map(adev, bo_va, args->va_address,
682 				     args->offset_in_bo, args->map_size,
683 				     va_flags);
684 		break;
685 	case AMDGPU_VA_OP_UNMAP:
686 		r = amdgpu_vm_bo_unmap(adev, bo_va, args->va_address);
687 		break;
688 
689 	case AMDGPU_VA_OP_CLEAR:
690 		r = amdgpu_vm_bo_clear_mappings(adev, &fpriv->vm,
691 						args->va_address,
692 						args->map_size);
693 		break;
694 	case AMDGPU_VA_OP_REPLACE:
695 		va_flags = amdgpu_gem_va_map_flags(adev, args->flags);
696 		r = amdgpu_vm_bo_replace_map(adev, bo_va, args->va_address,
697 					     args->offset_in_bo, args->map_size,
698 					     va_flags);
699 		break;
700 	default:
701 		break;
702 	}
703 	if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) && !amdgpu_vm_debug)
704 		amdgpu_gem_va_update_vm(adev, &fpriv->vm, bo_va,
705 					args->operation);
706 
707 error_backoff:
708 	ttm_eu_backoff_reservation(&ticket, &list);
709 
710 error_unref:
711 	drm_gem_object_put_unlocked(gobj);
712 	return r;
713 }
714 
715 int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
716 			struct drm_file *filp)
717 {
718 	struct amdgpu_device *adev = dev->dev_private;
719 	struct drm_amdgpu_gem_op *args = data;
720 	struct drm_gem_object *gobj;
721 	struct amdgpu_vm_bo_base *base;
722 	struct amdgpu_bo *robj;
723 	int r;
724 
725 	gobj = drm_gem_object_lookup(filp, args->handle);
726 	if (gobj == NULL) {
727 		return -ENOENT;
728 	}
729 	robj = gem_to_amdgpu_bo(gobj);
730 
731 	r = amdgpu_bo_reserve(robj, false);
732 	if (unlikely(r))
733 		goto out;
734 
735 	switch (args->op) {
736 	case AMDGPU_GEM_OP_GET_GEM_CREATE_INFO: {
737 		struct drm_amdgpu_gem_create_in info;
738 		void __user *out = u64_to_user_ptr(args->value);
739 
740 		info.bo_size = robj->tbo.base.size;
741 		info.alignment = robj->tbo.mem.page_alignment << PAGE_SHIFT;
742 		info.domains = robj->preferred_domains;
743 		info.domain_flags = robj->flags;
744 		amdgpu_bo_unreserve(robj);
745 		if (copy_to_user(out, &info, sizeof(info)))
746 			r = -EFAULT;
747 		break;
748 	}
749 	case AMDGPU_GEM_OP_SET_PLACEMENT:
750 		if (robj->prime_shared_count && (args->value & AMDGPU_GEM_DOMAIN_VRAM)) {
751 			r = -EINVAL;
752 			amdgpu_bo_unreserve(robj);
753 			break;
754 		}
755 		if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm)) {
756 			r = -EPERM;
757 			amdgpu_bo_unreserve(robj);
758 			break;
759 		}
760 		for (base = robj->vm_bo; base; base = base->next)
761 			if (amdgpu_xgmi_same_hive(amdgpu_ttm_adev(robj->tbo.bdev),
762 				amdgpu_ttm_adev(base->vm->root.base.bo->tbo.bdev))) {
763 				r = -EINVAL;
764 				amdgpu_bo_unreserve(robj);
765 				goto out;
766 			}
767 
768 
769 		robj->preferred_domains = args->value & (AMDGPU_GEM_DOMAIN_VRAM |
770 							AMDGPU_GEM_DOMAIN_GTT |
771 							AMDGPU_GEM_DOMAIN_CPU);
772 		robj->allowed_domains = robj->preferred_domains;
773 		if (robj->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
774 			robj->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
775 
776 		if (robj->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
777 			amdgpu_vm_bo_invalidate(adev, robj, true);
778 
779 		amdgpu_bo_unreserve(robj);
780 		break;
781 	default:
782 		amdgpu_bo_unreserve(robj);
783 		r = -EINVAL;
784 	}
785 
786 out:
787 	drm_gem_object_put_unlocked(gobj);
788 	return r;
789 }
790 
791 int amdgpu_mode_dumb_create(struct drm_file *file_priv,
792 			    struct drm_device *dev,
793 			    struct drm_mode_create_dumb *args)
794 {
795 	struct amdgpu_device *adev = dev->dev_private;
796 	struct drm_gem_object *gobj;
797 	uint32_t handle;
798 	u64 flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
799 		    AMDGPU_GEM_CREATE_CPU_GTT_USWC;
800 	u32 domain;
801 	int r;
802 
803 	/*
804 	 * The buffer returned from this function should be cleared, but
805 	 * it can only be done if the ring is enabled or we'll fail to
806 	 * create the buffer.
807 	 */
808 	if (adev->mman.buffer_funcs_enabled)
809 		flags |= AMDGPU_GEM_CREATE_VRAM_CLEARED;
810 
811 	args->pitch = amdgpu_align_pitch(adev, args->width,
812 					 DIV_ROUND_UP(args->bpp, 8), 0);
813 	args->size = (u64)args->pitch * args->height;
814 	args->size = roundup2(args->size, PAGE_SIZE);
815 	domain = amdgpu_bo_get_preferred_pin_domain(adev,
816 				amdgpu_display_supported_domains(adev, flags));
817 	r = amdgpu_gem_object_create(adev, args->size, 0, domain, flags,
818 				     ttm_bo_type_device, NULL, &gobj);
819 	if (r)
820 		return -ENOMEM;
821 
822 	r = drm_gem_handle_create(file_priv, gobj, &handle);
823 	/* drop reference from allocate - handle holds it now */
824 	drm_gem_object_put_unlocked(gobj);
825 	if (r) {
826 		return r;
827 	}
828 	args->handle = handle;
829 	return 0;
830 }
831 
832 #if defined(CONFIG_DEBUG_FS)
833 
834 #define amdgpu_debugfs_gem_bo_print_flag(m, bo, flag)	\
835 	if (bo->flags & (AMDGPU_GEM_CREATE_ ## flag)) {	\
836 		seq_printf((m), " " #flag);		\
837 	}
838 
839 static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data)
840 {
841 	struct drm_gem_object *gobj = ptr;
842 	struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
843 	struct seq_file *m = data;
844 
845 	struct dma_buf_attachment *attachment;
846 	struct dma_buf *dma_buf;
847 	unsigned domain;
848 	const char *placement;
849 	unsigned pin_count;
850 
851 	domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
852 	switch (domain) {
853 	case AMDGPU_GEM_DOMAIN_VRAM:
854 		placement = "VRAM";
855 		break;
856 	case AMDGPU_GEM_DOMAIN_GTT:
857 		placement = " GTT";
858 		break;
859 	case AMDGPU_GEM_DOMAIN_CPU:
860 	default:
861 		placement = " CPU";
862 		break;
863 	}
864 	seq_printf(m, "\t0x%08x: %12ld byte %s",
865 		   id, amdgpu_bo_size(bo), placement);
866 
867 	pin_count = READ_ONCE(bo->pin_count);
868 	if (pin_count)
869 		seq_printf(m, " pin count %d", pin_count);
870 
871 	dma_buf = READ_ONCE(bo->tbo.base.dma_buf);
872 	attachment = READ_ONCE(bo->tbo.base.import_attach);
873 
874 	if (attachment)
875 		seq_printf(m, " imported from %p", dma_buf);
876 	else if (dma_buf)
877 		seq_printf(m, " exported as %p", dma_buf);
878 
879 	amdgpu_debugfs_gem_bo_print_flag(m, bo, CPU_ACCESS_REQUIRED);
880 	amdgpu_debugfs_gem_bo_print_flag(m, bo, NO_CPU_ACCESS);
881 	amdgpu_debugfs_gem_bo_print_flag(m, bo, CPU_GTT_USWC);
882 	amdgpu_debugfs_gem_bo_print_flag(m, bo, VRAM_CLEARED);
883 	amdgpu_debugfs_gem_bo_print_flag(m, bo, SHADOW);
884 	amdgpu_debugfs_gem_bo_print_flag(m, bo, VRAM_CONTIGUOUS);
885 	amdgpu_debugfs_gem_bo_print_flag(m, bo, VM_ALWAYS_VALID);
886 	amdgpu_debugfs_gem_bo_print_flag(m, bo, EXPLICIT_SYNC);
887 
888 	seq_printf(m, "\n");
889 
890 	return 0;
891 }
892 
893 static int amdgpu_debugfs_gem_info(struct seq_file *m, void *data)
894 {
895 	struct drm_info_node *node = (struct drm_info_node *)m->private;
896 	struct drm_device *dev = node->minor->dev;
897 	struct drm_file *file;
898 	int r;
899 
900 	r = mutex_lock_interruptible(&dev->filelist_mutex);
901 	if (r)
902 		return r;
903 
904 	list_for_each_entry(file, &dev->filelist, lhead) {
905 		struct task_struct *task;
906 
907 		/*
908 		 * Although we have a valid reference on file->pid, that does
909 		 * not guarantee that the task_struct who called get_pid() is
910 		 * still alive (e.g. get_pid(current) => fork() => exit()).
911 		 * Therefore, we need to protect this ->comm access using RCU.
912 		 */
913 		rcu_read_lock();
914 		task = pid_task(file->pid, PIDTYPE_PID);
915 		seq_printf(m, "pid %8d command %s:\n", pid_nr(file->pid),
916 			   task ? task->comm : "<unknown>");
917 		rcu_read_unlock();
918 
919 		spin_lock(&file->table_lock);
920 		idr_for_each(&file->object_idr, amdgpu_debugfs_gem_bo_info, m);
921 		spin_unlock(&file->table_lock);
922 	}
923 
924 	mutex_unlock(&dev->filelist_mutex);
925 	return 0;
926 }
927 
928 static const struct drm_info_list amdgpu_debugfs_gem_list[] = {
929 	{"amdgpu_gem_info", &amdgpu_debugfs_gem_info, 0, NULL},
930 };
931 #endif
932 
933 int amdgpu_debugfs_gem_init(struct amdgpu_device *adev)
934 {
935 #if defined(CONFIG_DEBUG_FS)
936 	return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_gem_list, 1);
937 #endif
938 	return 0;
939 }
940