xref: /openbsd-src/sys/dev/pci/drm/radeon/radeon_gem.c (revision fc405d53b73a2d73393cb97f684863d17b583e38)
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 
29 #include <linux/iosys-map.h>
30 #include <linux/pci.h>
31 
32 #include <drm/drm_device.h>
33 #include <drm/drm_file.h>
34 #include <drm/drm_gem_ttm_helper.h>
35 #include <drm/radeon_drm.h>
36 
37 #include "radeon.h"
38 #include "radeon_prime.h"
39 
40 struct dma_buf *radeon_gem_prime_export(struct drm_gem_object *gobj,
41 					int flags);
42 struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj);
43 int radeon_gem_prime_pin(struct drm_gem_object *obj);
44 void radeon_gem_prime_unpin(struct drm_gem_object *obj);
45 
46 const struct drm_gem_object_funcs radeon_gem_object_funcs;
47 
48 #ifdef __linux__
49 static vm_fault_t radeon_gem_fault(struct vm_fault *vmf)
50 {
51 	struct ttm_buffer_object *bo = vmf->vma->vm_private_data;
52 	struct radeon_device *rdev = radeon_get_rdev(bo->bdev);
53 	vm_fault_t ret;
54 
55 	down_read(&rdev->pm.mclk_lock);
56 
57 	ret = ttm_bo_vm_reserve(bo, vmf);
58 	if (ret)
59 		goto unlock_mclk;
60 
61 	ret = radeon_bo_fault_reserve_notify(bo);
62 	if (ret)
63 		goto unlock_resv;
64 
65 	ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
66 				       TTM_BO_VM_NUM_PREFAULT);
67 	if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
68 		goto unlock_mclk;
69 
70 unlock_resv:
71 	dma_resv_unlock(bo->base.resv);
72 
73 unlock_mclk:
74 	up_read(&rdev->pm.mclk_lock);
75 	return ret;
76 }
77 
78 static const struct vm_operations_struct radeon_gem_vm_ops = {
79 	.fault = radeon_gem_fault,
80 	.open = ttm_bo_vm_open,
81 	.close = ttm_bo_vm_close,
82 	.access = ttm_bo_vm_access
83 };
84 #else /* !__linux__ */
85 int
86 radeon_gem_fault(struct uvm_faultinfo *ufi, vaddr_t vaddr, vm_page_t *pps,
87     int npages, int centeridx, vm_fault_t fault_type,
88     vm_prot_t access_type, int flags)
89 {
90 	struct uvm_object *uobj = ufi->entry->object.uvm_obj;
91 	struct ttm_buffer_object *bo = (struct ttm_buffer_object *)uobj;
92 	struct radeon_device *rdev = radeon_get_rdev(bo->bdev);
93 	vm_fault_t ret;
94 
95 	down_read(&rdev->pm.mclk_lock);
96 
97 	ret = ttm_bo_vm_reserve(bo);
98 	if (ret)
99 		goto unlock_mclk;
100 
101 	ret = radeon_bo_fault_reserve_notify(bo);
102 	if (ret)
103 		goto unlock_resv;
104 
105 	ret = ttm_bo_vm_fault_reserved(ufi, vaddr,
106 				       TTM_BO_VM_NUM_PREFAULT, 1);
107 #ifdef notyet
108 	if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
109 		goto unlock_mclk;
110 #endif
111 
112 unlock_resv:
113 	dma_resv_unlock(bo->base.resv);
114 
115 unlock_mclk:
116 	switch (ret) {
117 	case VM_FAULT_NOPAGE:
118 		ret = VM_PAGER_OK;
119 		break;
120 	case VM_FAULT_RETRY:
121 		ret = VM_PAGER_REFAULT;
122 		break;
123 	default:
124 		ret = VM_PAGER_BAD;
125 		break;
126 	}
127 	up_read(&rdev->pm.mclk_lock);
128 	uvmfault_unlockall(ufi, NULL, uobj);
129 	return ret;
130 }
131 
132 void
133 radeon_gem_vm_reference(struct uvm_object *uobj)
134 {
135 	struct ttm_buffer_object *bo = (struct ttm_buffer_object *)uobj;
136 
137 	ttm_bo_get(bo);
138 }
139 
140 void
141 radeon_gem_vm_detach(struct uvm_object *uobj)
142 {
143 	struct ttm_buffer_object *bo = (struct ttm_buffer_object *)uobj;
144 
145 	ttm_bo_put(bo);
146 }
147 
148 static const struct uvm_pagerops radeon_gem_vm_ops = {
149 	.pgo_fault = radeon_gem_fault,
150 	.pgo_reference = radeon_gem_vm_reference,
151 	.pgo_detach = radeon_gem_vm_detach
152 };
153 #endif /* !__linux__ */
154 
155 static void radeon_gem_object_free(struct drm_gem_object *gobj)
156 {
157 	struct radeon_bo *robj = gem_to_radeon_bo(gobj);
158 
159 	if (robj) {
160 		radeon_mn_unregister(robj);
161 		radeon_bo_unref(&robj);
162 	}
163 }
164 
165 int radeon_gem_object_create(struct radeon_device *rdev, unsigned long size,
166 				int alignment, int initial_domain,
167 				u32 flags, bool kernel,
168 				struct drm_gem_object **obj)
169 {
170 	struct radeon_bo *robj;
171 	unsigned long max_size;
172 	int r;
173 
174 	*obj = NULL;
175 	/* At least align on page size */
176 	if (alignment < PAGE_SIZE) {
177 		alignment = PAGE_SIZE;
178 	}
179 
180 	/* Maximum bo size is the unpinned gtt size since we use the gtt to
181 	 * handle vram to system pool migrations.
182 	 */
183 	max_size = rdev->mc.gtt_size - rdev->gart_pin_size;
184 	if (size > max_size) {
185 		DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n",
186 			  size >> 20, max_size >> 20);
187 		return -ENOMEM;
188 	}
189 
190 retry:
191 	r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain,
192 			     flags, NULL, NULL, &robj);
193 	if (r) {
194 		if (r != -ERESTARTSYS) {
195 			if (initial_domain == RADEON_GEM_DOMAIN_VRAM) {
196 				initial_domain |= RADEON_GEM_DOMAIN_GTT;
197 				goto retry;
198 			}
199 			DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
200 				  size, initial_domain, alignment, r);
201 		}
202 		return r;
203 	}
204 	*obj = &robj->tbo.base;
205 	(*obj)->funcs = &radeon_gem_object_funcs;
206 #ifdef __linux__
207 	robj->pid = task_pid_nr(current);
208 #else
209 	robj->pid = curproc->p_p->ps_pid;
210 #endif
211 
212 	mutex_lock(&rdev->gem.mutex);
213 	list_add_tail(&robj->list, &rdev->gem.objects);
214 	mutex_unlock(&rdev->gem.mutex);
215 
216 	return 0;
217 }
218 
219 static int radeon_gem_set_domain(struct drm_gem_object *gobj,
220 			  uint32_t rdomain, uint32_t wdomain)
221 {
222 	struct radeon_bo *robj;
223 	uint32_t domain;
224 	long r;
225 
226 	/* FIXME: reeimplement */
227 	robj = gem_to_radeon_bo(gobj);
228 	/* work out where to validate the buffer to */
229 	domain = wdomain;
230 	if (!domain) {
231 		domain = rdomain;
232 	}
233 	if (!domain) {
234 		/* Do nothings */
235 		pr_warn("Set domain without domain !\n");
236 		return 0;
237 	}
238 	if (domain == RADEON_GEM_DOMAIN_CPU) {
239 		/* Asking for cpu access wait for object idle */
240 		r = dma_resv_wait_timeout(robj->tbo.base.resv,
241 					  DMA_RESV_USAGE_BOOKKEEP,
242 					  true, 30 * HZ);
243 		if (!r)
244 			r = -EBUSY;
245 
246 		if (r < 0 && r != -EINTR) {
247 			pr_err("Failed to wait for object: %li\n", r);
248 			return r;
249 		}
250 	}
251 	if (domain == RADEON_GEM_DOMAIN_VRAM && robj->prime_shared_count) {
252 		/* A BO that is associated with a dma-buf cannot be sensibly migrated to VRAM */
253 		return -EINVAL;
254 	}
255 	return 0;
256 }
257 
258 int radeon_gem_init(struct radeon_device *rdev)
259 {
260 	INIT_LIST_HEAD(&rdev->gem.objects);
261 	return 0;
262 }
263 
264 void radeon_gem_fini(struct radeon_device *rdev)
265 {
266 	radeon_bo_force_delete(rdev);
267 }
268 
269 /*
270  * Call from drm_gem_handle_create which appear in both new and open ioctl
271  * case.
272  */
273 static int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
274 {
275 	struct radeon_bo *rbo = gem_to_radeon_bo(obj);
276 	struct radeon_device *rdev = rbo->rdev;
277 	struct radeon_fpriv *fpriv = file_priv->driver_priv;
278 	struct radeon_vm *vm = &fpriv->vm;
279 	struct radeon_bo_va *bo_va;
280 	int r;
281 
282 	if ((rdev->family < CHIP_CAYMAN) ||
283 	    (!rdev->accel_working)) {
284 		return 0;
285 	}
286 
287 	r = radeon_bo_reserve(rbo, false);
288 	if (r) {
289 		return r;
290 	}
291 
292 	bo_va = radeon_vm_bo_find(vm, rbo);
293 	if (!bo_va) {
294 		bo_va = radeon_vm_bo_add(rdev, vm, rbo);
295 	} else {
296 		++bo_va->ref_count;
297 	}
298 	radeon_bo_unreserve(rbo);
299 
300 	return 0;
301 }
302 
303 static void radeon_gem_object_close(struct drm_gem_object *obj,
304 				    struct drm_file *file_priv)
305 {
306 	struct radeon_bo *rbo = gem_to_radeon_bo(obj);
307 	struct radeon_device *rdev = rbo->rdev;
308 	struct radeon_fpriv *fpriv = file_priv->driver_priv;
309 	struct radeon_vm *vm = &fpriv->vm;
310 	struct radeon_bo_va *bo_va;
311 	int r;
312 
313 	if ((rdev->family < CHIP_CAYMAN) ||
314 	    (!rdev->accel_working)) {
315 		return;
316 	}
317 
318 	r = radeon_bo_reserve(rbo, true);
319 	if (r) {
320 		dev_err(rdev->dev, "leaking bo va because "
321 			"we fail to reserve bo (%d)\n", r);
322 		return;
323 	}
324 	bo_va = radeon_vm_bo_find(vm, rbo);
325 	if (bo_va) {
326 		if (--bo_va->ref_count == 0) {
327 			radeon_vm_bo_rmv(rdev, bo_va);
328 		}
329 	}
330 	radeon_bo_unreserve(rbo);
331 }
332 
333 static int radeon_gem_handle_lockup(struct radeon_device *rdev, int r)
334 {
335 	if (r == -EDEADLK) {
336 		r = radeon_gpu_reset(rdev);
337 		if (!r)
338 			r = -EAGAIN;
339 	}
340 	return r;
341 }
342 
343 #ifdef __linux__
344 static int radeon_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
345 {
346 	struct radeon_bo *bo = gem_to_radeon_bo(obj);
347 	struct radeon_device *rdev = radeon_get_rdev(bo->tbo.bdev);
348 
349 	if (radeon_ttm_tt_has_userptr(rdev, bo->tbo.ttm))
350 		return -EPERM;
351 
352 	return drm_gem_ttm_mmap(obj, vma);
353 }
354 #else
355 static int
356 radeon_gem_object_mmap(struct drm_gem_object *obj,
357     vm_prot_t accessprot, voff_t off, vsize_t size)
358 {
359 	struct radeon_bo *bo = gem_to_radeon_bo(obj);
360 	struct radeon_device *rdev = radeon_get_rdev(bo->tbo.bdev);
361 
362 	if (radeon_ttm_tt_has_userptr(rdev, bo->tbo.ttm))
363 		return -EPERM;
364 
365 	return drm_gem_ttm_mmap(obj, accessprot, off, size);
366 }
367 #endif
368 
369 const struct drm_gem_object_funcs radeon_gem_object_funcs = {
370 	.free = radeon_gem_object_free,
371 	.open = radeon_gem_object_open,
372 	.close = radeon_gem_object_close,
373 	.export = radeon_gem_prime_export,
374 	.pin = radeon_gem_prime_pin,
375 	.unpin = radeon_gem_prime_unpin,
376 	.get_sg_table = radeon_gem_prime_get_sg_table,
377 	.vmap = drm_gem_ttm_vmap,
378 	.vunmap = drm_gem_ttm_vunmap,
379 	.mmap = radeon_gem_object_mmap,
380 	.vm_ops = &radeon_gem_vm_ops,
381 };
382 
383 /*
384  * GEM ioctls.
385  */
386 int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
387 			  struct drm_file *filp)
388 {
389 	struct radeon_device *rdev = dev->dev_private;
390 	struct drm_radeon_gem_info *args = data;
391 	struct ttm_resource_manager *man;
392 
393 	man = ttm_manager_type(&rdev->mman.bdev, TTM_PL_VRAM);
394 
395 	args->vram_size = (u64)man->size << PAGE_SHIFT;
396 	args->vram_visible = rdev->mc.visible_vram_size;
397 	args->vram_visible -= rdev->vram_pin_size;
398 	args->gart_size = rdev->mc.gtt_size;
399 	args->gart_size -= rdev->gart_pin_size;
400 
401 	return 0;
402 }
403 
404 int radeon_gem_pread_ioctl(struct drm_device *dev, void *data,
405 			   struct drm_file *filp)
406 {
407 	/* TODO: implement */
408 	DRM_ERROR("unimplemented %s\n", __func__);
409 	return -ENOSYS;
410 }
411 
412 int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data,
413 			    struct drm_file *filp)
414 {
415 	/* TODO: implement */
416 	DRM_ERROR("unimplemented %s\n", __func__);
417 	return -ENOSYS;
418 }
419 
420 int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
421 			    struct drm_file *filp)
422 {
423 	struct radeon_device *rdev = dev->dev_private;
424 	struct drm_radeon_gem_create *args = data;
425 	struct drm_gem_object *gobj;
426 	uint32_t handle;
427 	int r;
428 
429 	down_read(&rdev->exclusive_lock);
430 	/* create a gem object to contain this object in */
431 	args->size = roundup(args->size, PAGE_SIZE);
432 	r = radeon_gem_object_create(rdev, args->size, args->alignment,
433 				     args->initial_domain, args->flags,
434 				     false, &gobj);
435 	if (r) {
436 		up_read(&rdev->exclusive_lock);
437 		r = radeon_gem_handle_lockup(rdev, r);
438 		return r;
439 	}
440 	r = drm_gem_handle_create(filp, gobj, &handle);
441 	/* drop reference from allocate - handle holds it now */
442 	drm_gem_object_put(gobj);
443 	if (r) {
444 		up_read(&rdev->exclusive_lock);
445 		r = radeon_gem_handle_lockup(rdev, r);
446 		return r;
447 	}
448 	args->handle = handle;
449 	up_read(&rdev->exclusive_lock);
450 	return 0;
451 }
452 
453 int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data,
454 			     struct drm_file *filp)
455 {
456 	return -ENOSYS;
457 #ifdef notyet
458 	struct ttm_operation_ctx ctx = { true, false };
459 	struct radeon_device *rdev = dev->dev_private;
460 	struct drm_radeon_gem_userptr *args = data;
461 	struct drm_gem_object *gobj;
462 	struct radeon_bo *bo;
463 	uint32_t handle;
464 	int r;
465 
466 	args->addr = untagged_addr(args->addr);
467 
468 	if (offset_in_page(args->addr | args->size))
469 		return -EINVAL;
470 
471 	/* reject unknown flag values */
472 	if (args->flags & ~(RADEON_GEM_USERPTR_READONLY |
473 	    RADEON_GEM_USERPTR_ANONONLY | RADEON_GEM_USERPTR_VALIDATE |
474 	    RADEON_GEM_USERPTR_REGISTER))
475 		return -EINVAL;
476 
477 	if (args->flags & RADEON_GEM_USERPTR_READONLY) {
478 		/* readonly pages not tested on older hardware */
479 		if (rdev->family < CHIP_R600)
480 			return -EINVAL;
481 
482 	} else if (!(args->flags & RADEON_GEM_USERPTR_ANONONLY) ||
483 		   !(args->flags & RADEON_GEM_USERPTR_REGISTER)) {
484 
485 		/* if we want to write to it we must require anonymous
486 		   memory and install a MMU notifier */
487 		return -EACCES;
488 	}
489 
490 	down_read(&rdev->exclusive_lock);
491 
492 	/* create a gem object to contain this object in */
493 	r = radeon_gem_object_create(rdev, args->size, 0,
494 				     RADEON_GEM_DOMAIN_CPU, 0,
495 				     false, &gobj);
496 	if (r)
497 		goto handle_lockup;
498 
499 	bo = gem_to_radeon_bo(gobj);
500 	r = radeon_ttm_tt_set_userptr(rdev, bo->tbo.ttm, args->addr, args->flags);
501 	if (r)
502 		goto release_object;
503 
504 	if (args->flags & RADEON_GEM_USERPTR_REGISTER) {
505 		r = radeon_mn_register(bo, args->addr);
506 		if (r)
507 			goto release_object;
508 	}
509 
510 	if (args->flags & RADEON_GEM_USERPTR_VALIDATE) {
511 		mmap_read_lock(current->mm);
512 		r = radeon_bo_reserve(bo, true);
513 		if (r) {
514 			mmap_read_unlock(current->mm);
515 			goto release_object;
516 		}
517 
518 		radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_GTT);
519 		r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
520 		radeon_bo_unreserve(bo);
521 		mmap_read_unlock(current->mm);
522 		if (r)
523 			goto release_object;
524 	}
525 
526 	r = drm_gem_handle_create(filp, gobj, &handle);
527 	/* drop reference from allocate - handle holds it now */
528 	drm_gem_object_put(gobj);
529 	if (r)
530 		goto handle_lockup;
531 
532 	args->handle = handle;
533 	up_read(&rdev->exclusive_lock);
534 	return 0;
535 
536 release_object:
537 	drm_gem_object_put(gobj);
538 
539 handle_lockup:
540 	up_read(&rdev->exclusive_lock);
541 	r = radeon_gem_handle_lockup(rdev, r);
542 
543 	return r;
544 #endif
545 }
546 
547 int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
548 				struct drm_file *filp)
549 {
550 	/* transition the BO to a domain -
551 	 * just validate the BO into a certain domain */
552 	struct radeon_device *rdev = dev->dev_private;
553 	struct drm_radeon_gem_set_domain *args = data;
554 	struct drm_gem_object *gobj;
555 	struct radeon_bo *robj;
556 	int r;
557 
558 	/* for now if someone requests domain CPU -
559 	 * just make sure the buffer is finished with */
560 	down_read(&rdev->exclusive_lock);
561 
562 	/* just do a BO wait for now */
563 	gobj = drm_gem_object_lookup(filp, args->handle);
564 	if (gobj == NULL) {
565 		up_read(&rdev->exclusive_lock);
566 		return -ENOENT;
567 	}
568 	robj = gem_to_radeon_bo(gobj);
569 
570 	r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
571 
572 	drm_gem_object_put(gobj);
573 	up_read(&rdev->exclusive_lock);
574 	r = radeon_gem_handle_lockup(robj->rdev, r);
575 	return r;
576 }
577 
578 int radeon_mode_dumb_mmap(struct drm_file *filp,
579 			  struct drm_device *dev,
580 			  uint32_t handle, uint64_t *offset_p)
581 {
582 	struct drm_gem_object *gobj;
583 	struct radeon_bo *robj;
584 
585 	gobj = drm_gem_object_lookup(filp, handle);
586 	if (gobj == NULL) {
587 		return -ENOENT;
588 	}
589 	robj = gem_to_radeon_bo(gobj);
590 	if (radeon_ttm_tt_has_userptr(robj->rdev, robj->tbo.ttm)) {
591 		drm_gem_object_put(gobj);
592 		return -EPERM;
593 	}
594 	*offset_p = radeon_bo_mmap_offset(robj);
595 	drm_gem_object_put(gobj);
596 	return 0;
597 }
598 
599 int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
600 			  struct drm_file *filp)
601 {
602 	struct drm_radeon_gem_mmap *args = data;
603 
604 	return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr);
605 }
606 
607 int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
608 			  struct drm_file *filp)
609 {
610 	struct drm_radeon_gem_busy *args = data;
611 	struct drm_gem_object *gobj;
612 	struct radeon_bo *robj;
613 	int r;
614 	uint32_t cur_placement = 0;
615 
616 	gobj = drm_gem_object_lookup(filp, args->handle);
617 	if (gobj == NULL) {
618 		return -ENOENT;
619 	}
620 	robj = gem_to_radeon_bo(gobj);
621 
622 	r = dma_resv_test_signaled(robj->tbo.base.resv, DMA_RESV_USAGE_READ);
623 	if (r == 0)
624 		r = -EBUSY;
625 	else
626 		r = 0;
627 
628 	cur_placement = READ_ONCE(robj->tbo.resource->mem_type);
629 	args->domain = radeon_mem_type_to_domain(cur_placement);
630 	drm_gem_object_put(gobj);
631 	return r;
632 }
633 
634 int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
635 			      struct drm_file *filp)
636 {
637 	struct radeon_device *rdev = dev->dev_private;
638 	struct drm_radeon_gem_wait_idle *args = data;
639 	struct drm_gem_object *gobj;
640 	struct radeon_bo *robj;
641 	int r = 0;
642 	uint32_t cur_placement = 0;
643 	long ret;
644 
645 	gobj = drm_gem_object_lookup(filp, args->handle);
646 	if (gobj == NULL) {
647 		return -ENOENT;
648 	}
649 	robj = gem_to_radeon_bo(gobj);
650 
651 	ret = dma_resv_wait_timeout(robj->tbo.base.resv, DMA_RESV_USAGE_READ,
652 				    true, 30 * HZ);
653 	if (ret == 0)
654 		r = -EBUSY;
655 	else if (ret < 0)
656 		r = ret;
657 
658 	/* Flush HDP cache via MMIO if necessary */
659 	cur_placement = READ_ONCE(robj->tbo.resource->mem_type);
660 	if (rdev->asic->mmio_hdp_flush &&
661 	    radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM)
662 		robj->rdev->asic->mmio_hdp_flush(rdev);
663 	drm_gem_object_put(gobj);
664 	r = radeon_gem_handle_lockup(rdev, r);
665 	return r;
666 }
667 
668 int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
669 				struct drm_file *filp)
670 {
671 	struct drm_radeon_gem_set_tiling *args = data;
672 	struct drm_gem_object *gobj;
673 	struct radeon_bo *robj;
674 	int r = 0;
675 
676 	DRM_DEBUG("%d \n", args->handle);
677 	gobj = drm_gem_object_lookup(filp, args->handle);
678 	if (gobj == NULL)
679 		return -ENOENT;
680 	robj = gem_to_radeon_bo(gobj);
681 	r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch);
682 	drm_gem_object_put(gobj);
683 	return r;
684 }
685 
686 int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
687 				struct drm_file *filp)
688 {
689 	struct drm_radeon_gem_get_tiling *args = data;
690 	struct drm_gem_object *gobj;
691 	struct radeon_bo *rbo;
692 	int r = 0;
693 
694 	DRM_DEBUG("\n");
695 	gobj = drm_gem_object_lookup(filp, args->handle);
696 	if (gobj == NULL)
697 		return -ENOENT;
698 	rbo = gem_to_radeon_bo(gobj);
699 	r = radeon_bo_reserve(rbo, false);
700 	if (unlikely(r != 0))
701 		goto out;
702 	radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch);
703 	radeon_bo_unreserve(rbo);
704 out:
705 	drm_gem_object_put(gobj);
706 	return r;
707 }
708 
709 /**
710  * radeon_gem_va_update_vm -update the bo_va in its VM
711  *
712  * @rdev: radeon_device pointer
713  * @bo_va: bo_va to update
714  *
715  * Update the bo_va directly after setting it's address. Errors are not
716  * vital here, so they are not reported back to userspace.
717  */
718 static void radeon_gem_va_update_vm(struct radeon_device *rdev,
719 				    struct radeon_bo_va *bo_va)
720 {
721 	struct ttm_validate_buffer tv, *entry;
722 	struct radeon_bo_list *vm_bos;
723 	struct ww_acquire_ctx ticket;
724 	struct list_head list;
725 	unsigned domain;
726 	int r;
727 
728 	INIT_LIST_HEAD(&list);
729 
730 	tv.bo = &bo_va->bo->tbo;
731 	tv.num_shared = 1;
732 	list_add(&tv.head, &list);
733 
734 	vm_bos = radeon_vm_get_bos(rdev, bo_va->vm, &list);
735 	if (!vm_bos)
736 		return;
737 
738 	r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
739 	if (r)
740 		goto error_free;
741 
742 	list_for_each_entry(entry, &list, head) {
743 		domain = radeon_mem_type_to_domain(entry->bo->resource->mem_type);
744 		/* if anything is swapped out don't swap it in here,
745 		   just abort and wait for the next CS */
746 		if (domain == RADEON_GEM_DOMAIN_CPU)
747 			goto error_unreserve;
748 	}
749 
750 	mutex_lock(&bo_va->vm->mutex);
751 	r = radeon_vm_clear_freed(rdev, bo_va->vm);
752 	if (r)
753 		goto error_unlock;
754 
755 	if (bo_va->it.start)
756 		r = radeon_vm_bo_update(rdev, bo_va, bo_va->bo->tbo.resource);
757 
758 error_unlock:
759 	mutex_unlock(&bo_va->vm->mutex);
760 
761 error_unreserve:
762 	ttm_eu_backoff_reservation(&ticket, &list);
763 
764 error_free:
765 	kvfree(vm_bos);
766 
767 	if (r && r != -ERESTARTSYS)
768 		DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
769 }
770 
771 int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
772 			  struct drm_file *filp)
773 {
774 	struct drm_radeon_gem_va *args = data;
775 	struct drm_gem_object *gobj;
776 	struct radeon_device *rdev = dev->dev_private;
777 	struct radeon_fpriv *fpriv = filp->driver_priv;
778 	struct radeon_bo *rbo;
779 	struct radeon_bo_va *bo_va;
780 	u32 invalid_flags;
781 	int r = 0;
782 
783 	if (!rdev->vm_manager.enabled) {
784 		args->operation = RADEON_VA_RESULT_ERROR;
785 		return -ENOTTY;
786 	}
787 
788 	/* !! DONT REMOVE !!
789 	 * We don't support vm_id yet, to be sure we don't have broken
790 	 * userspace, reject anyone trying to use non 0 value thus moving
791 	 * forward we can use those fields without breaking existant userspace
792 	 */
793 	if (args->vm_id) {
794 		args->operation = RADEON_VA_RESULT_ERROR;
795 		return -EINVAL;
796 	}
797 
798 	if (args->offset < RADEON_VA_RESERVED_SIZE) {
799 		dev_err(dev->dev,
800 			"offset 0x%lX is in reserved area 0x%X\n",
801 			(unsigned long)args->offset,
802 			RADEON_VA_RESERVED_SIZE);
803 		args->operation = RADEON_VA_RESULT_ERROR;
804 		return -EINVAL;
805 	}
806 
807 	/* don't remove, we need to enforce userspace to set the snooped flag
808 	 * otherwise we will endup with broken userspace and we won't be able
809 	 * to enable this feature without adding new interface
810 	 */
811 	invalid_flags = RADEON_VM_PAGE_VALID | RADEON_VM_PAGE_SYSTEM;
812 	if ((args->flags & invalid_flags)) {
813 		dev_err(dev->dev, "invalid flags 0x%08X vs 0x%08X\n",
814 			args->flags, invalid_flags);
815 		args->operation = RADEON_VA_RESULT_ERROR;
816 		return -EINVAL;
817 	}
818 
819 	switch (args->operation) {
820 	case RADEON_VA_MAP:
821 	case RADEON_VA_UNMAP:
822 		break;
823 	default:
824 		dev_err(dev->dev, "unsupported operation %d\n",
825 			args->operation);
826 		args->operation = RADEON_VA_RESULT_ERROR;
827 		return -EINVAL;
828 	}
829 
830 	gobj = drm_gem_object_lookup(filp, args->handle);
831 	if (gobj == NULL) {
832 		args->operation = RADEON_VA_RESULT_ERROR;
833 		return -ENOENT;
834 	}
835 	rbo = gem_to_radeon_bo(gobj);
836 	r = radeon_bo_reserve(rbo, false);
837 	if (r) {
838 		args->operation = RADEON_VA_RESULT_ERROR;
839 		drm_gem_object_put(gobj);
840 		return r;
841 	}
842 	bo_va = radeon_vm_bo_find(&fpriv->vm, rbo);
843 	if (!bo_va) {
844 		args->operation = RADEON_VA_RESULT_ERROR;
845 		radeon_bo_unreserve(rbo);
846 		drm_gem_object_put(gobj);
847 		return -ENOENT;
848 	}
849 
850 	switch (args->operation) {
851 	case RADEON_VA_MAP:
852 		if (bo_va->it.start) {
853 			args->operation = RADEON_VA_RESULT_VA_EXIST;
854 			args->offset = bo_va->it.start * RADEON_GPU_PAGE_SIZE;
855 			radeon_bo_unreserve(rbo);
856 			goto out;
857 		}
858 		r = radeon_vm_bo_set_addr(rdev, bo_va, args->offset, args->flags);
859 		break;
860 	case RADEON_VA_UNMAP:
861 		r = radeon_vm_bo_set_addr(rdev, bo_va, 0, 0);
862 		break;
863 	default:
864 		break;
865 	}
866 	if (!r)
867 		radeon_gem_va_update_vm(rdev, bo_va);
868 	args->operation = RADEON_VA_RESULT_OK;
869 	if (r) {
870 		args->operation = RADEON_VA_RESULT_ERROR;
871 	}
872 out:
873 	drm_gem_object_put(gobj);
874 	return r;
875 }
876 
877 int radeon_gem_op_ioctl(struct drm_device *dev, void *data,
878 			struct drm_file *filp)
879 {
880 	struct drm_radeon_gem_op *args = data;
881 	struct drm_gem_object *gobj;
882 	struct radeon_bo *robj;
883 	int r;
884 
885 	gobj = drm_gem_object_lookup(filp, args->handle);
886 	if (gobj == NULL) {
887 		return -ENOENT;
888 	}
889 	robj = gem_to_radeon_bo(gobj);
890 
891 	r = -EPERM;
892 	if (radeon_ttm_tt_has_userptr(robj->rdev, robj->tbo.ttm))
893 		goto out;
894 
895 	r = radeon_bo_reserve(robj, false);
896 	if (unlikely(r))
897 		goto out;
898 
899 	switch (args->op) {
900 	case RADEON_GEM_OP_GET_INITIAL_DOMAIN:
901 		args->value = robj->initial_domain;
902 		break;
903 	case RADEON_GEM_OP_SET_INITIAL_DOMAIN:
904 		robj->initial_domain = args->value & (RADEON_GEM_DOMAIN_VRAM |
905 						      RADEON_GEM_DOMAIN_GTT |
906 						      RADEON_GEM_DOMAIN_CPU);
907 		break;
908 	default:
909 		r = -EINVAL;
910 	}
911 
912 	radeon_bo_unreserve(robj);
913 out:
914 	drm_gem_object_put(gobj);
915 	return r;
916 }
917 
918 int radeon_mode_dumb_create(struct drm_file *file_priv,
919 			    struct drm_device *dev,
920 			    struct drm_mode_create_dumb *args)
921 {
922 	struct radeon_device *rdev = dev->dev_private;
923 	struct drm_gem_object *gobj;
924 	uint32_t handle;
925 	int r;
926 
927 	args->pitch = radeon_align_pitch(rdev, args->width,
928 					 DIV_ROUND_UP(args->bpp, 8), 0);
929 	args->size = (u64)args->pitch * args->height;
930 	args->size = roundup2(args->size, PAGE_SIZE);
931 
932 	r = radeon_gem_object_create(rdev, args->size, 0,
933 				     RADEON_GEM_DOMAIN_VRAM, 0,
934 				     false, &gobj);
935 	if (r)
936 		return -ENOMEM;
937 
938 	r = drm_gem_handle_create(file_priv, gobj, &handle);
939 	/* drop reference from allocate - handle holds it now */
940 	drm_gem_object_put(gobj);
941 	if (r) {
942 		return r;
943 	}
944 	args->handle = handle;
945 	return 0;
946 }
947 
948 #if defined(CONFIG_DEBUG_FS)
949 static int radeon_debugfs_gem_info_show(struct seq_file *m, void *unused)
950 {
951 	struct radeon_device *rdev = (struct radeon_device *)m->private;
952 	struct radeon_bo *rbo;
953 	unsigned i = 0;
954 
955 	mutex_lock(&rdev->gem.mutex);
956 	list_for_each_entry(rbo, &rdev->gem.objects, list) {
957 		unsigned domain;
958 		const char *placement;
959 
960 		domain = radeon_mem_type_to_domain(rbo->tbo.resource->mem_type);
961 		switch (domain) {
962 		case RADEON_GEM_DOMAIN_VRAM:
963 			placement = "VRAM";
964 			break;
965 		case RADEON_GEM_DOMAIN_GTT:
966 			placement = " GTT";
967 			break;
968 		case RADEON_GEM_DOMAIN_CPU:
969 		default:
970 			placement = " CPU";
971 			break;
972 		}
973 		seq_printf(m, "bo[0x%08x] %8ldkB %8ldMB %s pid %8ld\n",
974 			   i, radeon_bo_size(rbo) >> 10, radeon_bo_size(rbo) >> 20,
975 			   placement, (unsigned long)rbo->pid);
976 		i++;
977 	}
978 	mutex_unlock(&rdev->gem.mutex);
979 	return 0;
980 }
981 
982 DEFINE_SHOW_ATTRIBUTE(radeon_debugfs_gem_info);
983 #endif
984 
985 void radeon_gem_debugfs_init(struct radeon_device *rdev)
986 {
987 #if defined(CONFIG_DEBUG_FS)
988 	struct dentry *root = rdev->ddev->primary->debugfs_root;
989 
990 	debugfs_create_file("radeon_gem_info", 0444, root, rdev,
991 			    &radeon_debugfs_gem_info_fops);
992 
993 #endif
994 }
995