xref: /openbsd-src/sys/dev/pci/drm/radeon/radeon_gem.c (revision 99fd087599a8791921855f21bd7e36130f39aadc)
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <drm/drmP.h>
29 #include <drm/radeon_drm.h>
30 #include "radeon.h"
31 
32 void radeon_gem_object_free(struct drm_gem_object *gobj)
33 {
34 	struct radeon_bo *robj = gem_to_radeon_bo(gobj);
35 
36 	if (robj) {
37 		radeon_mn_unregister(robj);
38 		radeon_bo_unref(&robj);
39 	}
40 }
41 
42 int radeon_gem_object_create(struct radeon_device *rdev, unsigned long size,
43 				int alignment, int initial_domain,
44 				u32 flags, bool kernel,
45 				struct drm_gem_object **obj)
46 {
47 	struct radeon_bo *robj;
48 	unsigned long max_size;
49 	int r;
50 
51 	*obj = NULL;
52 	/* At least align on page size */
53 	if (alignment < PAGE_SIZE) {
54 		alignment = PAGE_SIZE;
55 	}
56 
57 	/* Maximum bo size is the unpinned gtt size since we use the gtt to
58 	 * handle vram to system pool migrations.
59 	 */
60 	max_size = rdev->mc.gtt_size - rdev->gart_pin_size;
61 	if (size > max_size) {
62 		DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n",
63 			  size >> 20, max_size >> 20);
64 		return -ENOMEM;
65 	}
66 
67 retry:
68 	r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain,
69 			     flags, NULL, NULL, &robj);
70 	if (r) {
71 		if (r != -ERESTARTSYS) {
72 			if (initial_domain == RADEON_GEM_DOMAIN_VRAM) {
73 				initial_domain |= RADEON_GEM_DOMAIN_GTT;
74 				goto retry;
75 			}
76 			DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
77 				  size, initial_domain, alignment, r);
78 		}
79 		return r;
80 	}
81 	*obj = &robj->gem_base;
82 	robj->pid = curproc->p_p->ps_pid;
83 
84 	mutex_lock(&rdev->gem.mutex);
85 	list_add_tail(&robj->list, &rdev->gem.objects);
86 	mutex_unlock(&rdev->gem.mutex);
87 
88 	return 0;
89 }
90 
91 static int radeon_gem_set_domain(struct drm_gem_object *gobj,
92 			  uint32_t rdomain, uint32_t wdomain)
93 {
94 	struct radeon_bo *robj;
95 	uint32_t domain;
96 	long r;
97 
98 	/* FIXME: reeimplement */
99 	robj = gem_to_radeon_bo(gobj);
100 	/* work out where to validate the buffer to */
101 	domain = wdomain;
102 	if (!domain) {
103 		domain = rdomain;
104 	}
105 	if (!domain) {
106 		/* Do nothings */
107 		pr_warn("Set domain without domain !\n");
108 		return 0;
109 	}
110 	if (domain == RADEON_GEM_DOMAIN_CPU) {
111 		/* Asking for cpu access wait for object idle */
112 		r = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true, 30 * HZ);
113 		if (!r)
114 			r = -EBUSY;
115 
116 		if (r < 0 && r != -EINTR) {
117 			pr_err("Failed to wait for object: %li\n", r);
118 			return r;
119 		}
120 	}
121 	if (domain == RADEON_GEM_DOMAIN_VRAM && robj->prime_shared_count) {
122 		/* A BO that is associated with a dma-buf cannot be sensibly migrated to VRAM */
123 		return -EINVAL;
124 	}
125 	return 0;
126 }
127 
128 int radeon_gem_init(struct radeon_device *rdev)
129 {
130 	INIT_LIST_HEAD(&rdev->gem.objects);
131 	return 0;
132 }
133 
134 void radeon_gem_fini(struct radeon_device *rdev)
135 {
136 	radeon_bo_force_delete(rdev);
137 }
138 
139 /*
140  * Call from drm_gem_handle_create which appear in both new and open ioctl
141  * case.
142  */
143 int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
144 {
145 	struct radeon_bo *rbo = gem_to_radeon_bo(obj);
146 	struct radeon_device *rdev = rbo->rdev;
147 	struct radeon_fpriv *fpriv = file_priv->driver_priv;
148 	struct radeon_vm *vm = &fpriv->vm;
149 	struct radeon_bo_va *bo_va;
150 	int r;
151 
152 	if ((rdev->family < CHIP_CAYMAN) ||
153 	    (!rdev->accel_working)) {
154 		return 0;
155 	}
156 
157 	r = radeon_bo_reserve(rbo, false);
158 	if (r) {
159 		return r;
160 	}
161 
162 	bo_va = radeon_vm_bo_find(vm, rbo);
163 	if (!bo_va) {
164 		bo_va = radeon_vm_bo_add(rdev, vm, rbo);
165 	} else {
166 		++bo_va->ref_count;
167 	}
168 	radeon_bo_unreserve(rbo);
169 
170 	return 0;
171 }
172 
173 void radeon_gem_object_close(struct drm_gem_object *obj,
174 			     struct drm_file *file_priv)
175 {
176 	struct radeon_bo *rbo = gem_to_radeon_bo(obj);
177 	struct radeon_device *rdev = rbo->rdev;
178 	struct radeon_fpriv *fpriv = file_priv->driver_priv;
179 	struct radeon_vm *vm = &fpriv->vm;
180 	struct radeon_bo_va *bo_va;
181 	int r;
182 
183 	if ((rdev->family < CHIP_CAYMAN) ||
184 	    (!rdev->accel_working)) {
185 		return;
186 	}
187 
188 	r = radeon_bo_reserve(rbo, true);
189 	if (r) {
190 		dev_err(rdev->dev, "leaking bo va because "
191 			"we fail to reserve bo (%d)\n", r);
192 		return;
193 	}
194 	bo_va = radeon_vm_bo_find(vm, rbo);
195 	if (bo_va) {
196 		if (--bo_va->ref_count == 0) {
197 			radeon_vm_bo_rmv(rdev, bo_va);
198 		}
199 	}
200 	radeon_bo_unreserve(rbo);
201 }
202 
203 static int radeon_gem_handle_lockup(struct radeon_device *rdev, int r)
204 {
205 	if (r == -EDEADLK) {
206 		r = radeon_gpu_reset(rdev);
207 		if (!r)
208 			r = -EAGAIN;
209 	}
210 	return r;
211 }
212 
213 /*
214  * GEM ioctls.
215  */
216 int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
217 			  struct drm_file *filp)
218 {
219 	struct radeon_device *rdev = dev->dev_private;
220 	struct drm_radeon_gem_info *args = data;
221 	struct ttm_mem_type_manager *man;
222 
223 	man = &rdev->mman.bdev.man[TTM_PL_VRAM];
224 
225 	args->vram_size = (u64)man->size << PAGE_SHIFT;
226 	args->vram_visible = rdev->mc.visible_vram_size;
227 	args->vram_visible -= rdev->vram_pin_size;
228 	args->gart_size = rdev->mc.gtt_size;
229 	args->gart_size -= rdev->gart_pin_size;
230 
231 	return 0;
232 }
233 
234 int radeon_gem_pread_ioctl(struct drm_device *dev, void *data,
235 			   struct drm_file *filp)
236 {
237 	/* TODO: implement */
238 	DRM_ERROR("unimplemented %s\n", __func__);
239 	return -ENOSYS;
240 }
241 
242 int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data,
243 			    struct drm_file *filp)
244 {
245 	/* TODO: implement */
246 	DRM_ERROR("unimplemented %s\n", __func__);
247 	return -ENOSYS;
248 }
249 
250 int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
251 			    struct drm_file *filp)
252 {
253 	struct radeon_device *rdev = dev->dev_private;
254 	struct drm_radeon_gem_create *args = data;
255 	struct drm_gem_object *gobj;
256 	uint32_t handle;
257 	int r;
258 
259 	down_read(&rdev->exclusive_lock);
260 	/* create a gem object to contain this object in */
261 	args->size = roundup(args->size, PAGE_SIZE);
262 	r = radeon_gem_object_create(rdev, args->size, args->alignment,
263 				     args->initial_domain, args->flags,
264 				     false, &gobj);
265 	if (r) {
266 		up_read(&rdev->exclusive_lock);
267 		r = radeon_gem_handle_lockup(rdev, r);
268 		return r;
269 	}
270 	r = drm_gem_handle_create(filp, gobj, &handle);
271 	/* drop reference from allocate - handle holds it now */
272 	drm_gem_object_put_unlocked(gobj);
273 	if (r) {
274 		up_read(&rdev->exclusive_lock);
275 		r = radeon_gem_handle_lockup(rdev, r);
276 		return r;
277 	}
278 	args->handle = handle;
279 	up_read(&rdev->exclusive_lock);
280 	return 0;
281 }
282 
283 int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data,
284 			     struct drm_file *filp)
285 {
286 	return -ENOSYS;
287 #ifdef notyet
288 	struct ttm_operation_ctx ctx = { true, false };
289 	struct radeon_device *rdev = dev->dev_private;
290 	struct drm_radeon_gem_userptr *args = data;
291 	struct drm_gem_object *gobj;
292 	struct radeon_bo *bo;
293 	uint32_t handle;
294 	int r;
295 
296 	if (offset_in_page(args->addr | args->size))
297 		return -EINVAL;
298 
299 	/* reject unknown flag values */
300 	if (args->flags & ~(RADEON_GEM_USERPTR_READONLY |
301 	    RADEON_GEM_USERPTR_ANONONLY | RADEON_GEM_USERPTR_VALIDATE |
302 	    RADEON_GEM_USERPTR_REGISTER))
303 		return -EINVAL;
304 
305 	if (args->flags & RADEON_GEM_USERPTR_READONLY) {
306 		/* readonly pages not tested on older hardware */
307 		if (rdev->family < CHIP_R600)
308 			return -EINVAL;
309 
310 	} else if (!(args->flags & RADEON_GEM_USERPTR_ANONONLY) ||
311 		   !(args->flags & RADEON_GEM_USERPTR_REGISTER)) {
312 
313 		/* if we want to write to it we must require anonymous
314 		   memory and install a MMU notifier */
315 		return -EACCES;
316 	}
317 
318 	down_read(&rdev->exclusive_lock);
319 
320 	/* create a gem object to contain this object in */
321 	r = radeon_gem_object_create(rdev, args->size, 0,
322 				     RADEON_GEM_DOMAIN_CPU, 0,
323 				     false, &gobj);
324 	if (r)
325 		goto handle_lockup;
326 
327 	bo = gem_to_radeon_bo(gobj);
328 	r = radeon_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags);
329 	if (r)
330 		goto release_object;
331 
332 	if (args->flags & RADEON_GEM_USERPTR_REGISTER) {
333 		r = radeon_mn_register(bo, args->addr);
334 		if (r)
335 			goto release_object;
336 	}
337 
338 	if (args->flags & RADEON_GEM_USERPTR_VALIDATE) {
339 		down_read(&current->mm->mmap_sem);
340 		r = radeon_bo_reserve(bo, true);
341 		if (r) {
342 			up_read(&current->mm->mmap_sem);
343 			goto release_object;
344 		}
345 
346 		radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_GTT);
347 		r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
348 		radeon_bo_unreserve(bo);
349 		up_read(&current->mm->mmap_sem);
350 		if (r)
351 			goto release_object;
352 	}
353 
354 	r = drm_gem_handle_create(filp, gobj, &handle);
355 	/* drop reference from allocate - handle holds it now */
356 	drm_gem_object_put_unlocked(gobj);
357 	if (r)
358 		goto handle_lockup;
359 
360 	args->handle = handle;
361 	up_read(&rdev->exclusive_lock);
362 	return 0;
363 
364 release_object:
365 	drm_gem_object_put_unlocked(gobj);
366 
367 handle_lockup:
368 	up_read(&rdev->exclusive_lock);
369 	r = radeon_gem_handle_lockup(rdev, r);
370 
371 	return r;
372 #endif
373 }
374 
375 int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
376 				struct drm_file *filp)
377 {
378 	/* transition the BO to a domain -
379 	 * just validate the BO into a certain domain */
380 	struct radeon_device *rdev = dev->dev_private;
381 	struct drm_radeon_gem_set_domain *args = data;
382 	struct drm_gem_object *gobj;
383 	struct radeon_bo *robj;
384 	int r;
385 
386 	/* for now if someone requests domain CPU -
387 	 * just make sure the buffer is finished with */
388 	down_read(&rdev->exclusive_lock);
389 
390 	/* just do a BO wait for now */
391 	gobj = drm_gem_object_lookup(filp, args->handle);
392 	if (gobj == NULL) {
393 		up_read(&rdev->exclusive_lock);
394 		return -ENOENT;
395 	}
396 	robj = gem_to_radeon_bo(gobj);
397 
398 	r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
399 
400 	drm_gem_object_put_unlocked(gobj);
401 	up_read(&rdev->exclusive_lock);
402 	r = radeon_gem_handle_lockup(robj->rdev, r);
403 	return r;
404 }
405 
406 int radeon_mode_dumb_mmap(struct drm_file *filp,
407 			  struct drm_device *dev,
408 			  uint32_t handle, uint64_t *offset_p)
409 {
410 	struct drm_gem_object *gobj;
411 	struct radeon_bo *robj;
412 
413 	gobj = drm_gem_object_lookup(filp, handle);
414 	if (gobj == NULL) {
415 		return -ENOENT;
416 	}
417 	robj = gem_to_radeon_bo(gobj);
418 	if (radeon_ttm_tt_has_userptr(robj->tbo.ttm)) {
419 		drm_gem_object_put_unlocked(gobj);
420 		return -EPERM;
421 	}
422 	*offset_p = radeon_bo_mmap_offset(robj);
423 	drm_gem_object_put_unlocked(gobj);
424 	return 0;
425 }
426 
427 int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
428 			  struct drm_file *filp)
429 {
430 	struct drm_radeon_gem_mmap *args = data;
431 
432 	return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr);
433 }
434 
435 int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
436 			  struct drm_file *filp)
437 {
438 	struct drm_radeon_gem_busy *args = data;
439 	struct drm_gem_object *gobj;
440 	struct radeon_bo *robj;
441 	int r;
442 	uint32_t cur_placement = 0;
443 
444 	gobj = drm_gem_object_lookup(filp, args->handle);
445 	if (gobj == NULL) {
446 		return -ENOENT;
447 	}
448 	robj = gem_to_radeon_bo(gobj);
449 
450 	r = reservation_object_test_signaled_rcu(robj->tbo.resv, true);
451 	if (r == 0)
452 		r = -EBUSY;
453 	else
454 		r = 0;
455 
456 	cur_placement = READ_ONCE(robj->tbo.mem.mem_type);
457 	args->domain = radeon_mem_type_to_domain(cur_placement);
458 	drm_gem_object_put_unlocked(gobj);
459 	return r;
460 }
461 
462 int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
463 			      struct drm_file *filp)
464 {
465 	struct radeon_device *rdev = dev->dev_private;
466 	struct drm_radeon_gem_wait_idle *args = data;
467 	struct drm_gem_object *gobj;
468 	struct radeon_bo *robj;
469 	int r = 0;
470 	uint32_t cur_placement = 0;
471 	long ret;
472 
473 	gobj = drm_gem_object_lookup(filp, args->handle);
474 	if (gobj == NULL) {
475 		return -ENOENT;
476 	}
477 	robj = gem_to_radeon_bo(gobj);
478 
479 	ret = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true, 30 * HZ);
480 	if (ret == 0)
481 		r = -EBUSY;
482 	else if (ret < 0)
483 		r = ret;
484 
485 	/* Flush HDP cache via MMIO if necessary */
486 	cur_placement = READ_ONCE(robj->tbo.mem.mem_type);
487 	if (rdev->asic->mmio_hdp_flush &&
488 	    radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM)
489 		robj->rdev->asic->mmio_hdp_flush(rdev);
490 	drm_gem_object_put_unlocked(gobj);
491 	r = radeon_gem_handle_lockup(rdev, r);
492 	return r;
493 }
494 
495 int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
496 				struct drm_file *filp)
497 {
498 	struct drm_radeon_gem_set_tiling *args = data;
499 	struct drm_gem_object *gobj;
500 	struct radeon_bo *robj;
501 	int r = 0;
502 
503 	DRM_DEBUG("%d \n", args->handle);
504 	gobj = drm_gem_object_lookup(filp, args->handle);
505 	if (gobj == NULL)
506 		return -ENOENT;
507 	robj = gem_to_radeon_bo(gobj);
508 	r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch);
509 	drm_gem_object_put_unlocked(gobj);
510 	return r;
511 }
512 
513 int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
514 				struct drm_file *filp)
515 {
516 	struct drm_radeon_gem_get_tiling *args = data;
517 	struct drm_gem_object *gobj;
518 	struct radeon_bo *rbo;
519 	int r = 0;
520 
521 	DRM_DEBUG("\n");
522 	gobj = drm_gem_object_lookup(filp, args->handle);
523 	if (gobj == NULL)
524 		return -ENOENT;
525 	rbo = gem_to_radeon_bo(gobj);
526 	r = radeon_bo_reserve(rbo, false);
527 	if (unlikely(r != 0))
528 		goto out;
529 	radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch);
530 	radeon_bo_unreserve(rbo);
531 out:
532 	drm_gem_object_put_unlocked(gobj);
533 	return r;
534 }
535 
536 /**
537  * radeon_gem_va_update_vm -update the bo_va in its VM
538  *
539  * @rdev: radeon_device pointer
540  * @bo_va: bo_va to update
541  *
542  * Update the bo_va directly after setting it's address. Errors are not
543  * vital here, so they are not reported back to userspace.
544  */
545 static void radeon_gem_va_update_vm(struct radeon_device *rdev,
546 				    struct radeon_bo_va *bo_va)
547 {
548 	struct ttm_validate_buffer tv, *entry;
549 	struct radeon_bo_list *vm_bos;
550 	struct ww_acquire_ctx ticket;
551 	struct list_head list;
552 	unsigned domain;
553 	int r;
554 
555 	INIT_LIST_HEAD(&list);
556 
557 	tv.bo = &bo_va->bo->tbo;
558 	tv.shared = true;
559 	list_add(&tv.head, &list);
560 
561 	vm_bos = radeon_vm_get_bos(rdev, bo_va->vm, &list);
562 	if (!vm_bos)
563 		return;
564 
565 	r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
566 	if (r)
567 		goto error_free;
568 
569 	list_for_each_entry(entry, &list, head) {
570 		domain = radeon_mem_type_to_domain(entry->bo->mem.mem_type);
571 		/* if anything is swapped out don't swap it in here,
572 		   just abort and wait for the next CS */
573 		if (domain == RADEON_GEM_DOMAIN_CPU)
574 			goto error_unreserve;
575 	}
576 
577 	mutex_lock(&bo_va->vm->mutex);
578 	r = radeon_vm_clear_freed(rdev, bo_va->vm);
579 	if (r)
580 		goto error_unlock;
581 
582 	if (bo_va->it.start)
583 		r = radeon_vm_bo_update(rdev, bo_va, &bo_va->bo->tbo.mem);
584 
585 error_unlock:
586 	mutex_unlock(&bo_va->vm->mutex);
587 
588 error_unreserve:
589 	ttm_eu_backoff_reservation(&ticket, &list);
590 
591 error_free:
592 	kvfree(vm_bos);
593 
594 	if (r && r != -ERESTARTSYS)
595 		DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
596 }
597 
598 int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
599 			  struct drm_file *filp)
600 {
601 	struct drm_radeon_gem_va *args = data;
602 	struct drm_gem_object *gobj;
603 	struct radeon_device *rdev = dev->dev_private;
604 	struct radeon_fpriv *fpriv = filp->driver_priv;
605 	struct radeon_bo *rbo;
606 	struct radeon_bo_va *bo_va;
607 	u32 invalid_flags;
608 	int r = 0;
609 
610 	if (!rdev->vm_manager.enabled) {
611 		args->operation = RADEON_VA_RESULT_ERROR;
612 		return -ENOTTY;
613 	}
614 
615 	/* !! DONT REMOVE !!
616 	 * We don't support vm_id yet, to be sure we don't have have broken
617 	 * userspace, reject anyone trying to use non 0 value thus moving
618 	 * forward we can use those fields without breaking existant userspace
619 	 */
620 	if (args->vm_id) {
621 		args->operation = RADEON_VA_RESULT_ERROR;
622 		return -EINVAL;
623 	}
624 
625 	if (args->offset < RADEON_VA_RESERVED_SIZE) {
626 		dev_err(&dev->pdev->dev,
627 			"offset 0x%lX is in reserved area 0x%X\n",
628 			(unsigned long)args->offset,
629 			RADEON_VA_RESERVED_SIZE);
630 		args->operation = RADEON_VA_RESULT_ERROR;
631 		return -EINVAL;
632 	}
633 
634 	/* don't remove, we need to enforce userspace to set the snooped flag
635 	 * otherwise we will endup with broken userspace and we won't be able
636 	 * to enable this feature without adding new interface
637 	 */
638 	invalid_flags = RADEON_VM_PAGE_VALID | RADEON_VM_PAGE_SYSTEM;
639 	if ((args->flags & invalid_flags)) {
640 		dev_err(&dev->pdev->dev, "invalid flags 0x%08X vs 0x%08X\n",
641 			args->flags, invalid_flags);
642 		args->operation = RADEON_VA_RESULT_ERROR;
643 		return -EINVAL;
644 	}
645 
646 	switch (args->operation) {
647 	case RADEON_VA_MAP:
648 	case RADEON_VA_UNMAP:
649 		break;
650 	default:
651 		dev_err(&dev->pdev->dev, "unsupported operation %d\n",
652 			args->operation);
653 		args->operation = RADEON_VA_RESULT_ERROR;
654 		return -EINVAL;
655 	}
656 
657 	gobj = drm_gem_object_lookup(filp, args->handle);
658 	if (gobj == NULL) {
659 		args->operation = RADEON_VA_RESULT_ERROR;
660 		return -ENOENT;
661 	}
662 	rbo = gem_to_radeon_bo(gobj);
663 	r = radeon_bo_reserve(rbo, false);
664 	if (r) {
665 		args->operation = RADEON_VA_RESULT_ERROR;
666 		drm_gem_object_put_unlocked(gobj);
667 		return r;
668 	}
669 	bo_va = radeon_vm_bo_find(&fpriv->vm, rbo);
670 	if (!bo_va) {
671 		args->operation = RADEON_VA_RESULT_ERROR;
672 		radeon_bo_unreserve(rbo);
673 		drm_gem_object_put_unlocked(gobj);
674 		return -ENOENT;
675 	}
676 
677 	switch (args->operation) {
678 	case RADEON_VA_MAP:
679 		if (bo_va->it.start) {
680 			args->operation = RADEON_VA_RESULT_VA_EXIST;
681 			args->offset = bo_va->it.start * RADEON_GPU_PAGE_SIZE;
682 			radeon_bo_unreserve(rbo);
683 			goto out;
684 		}
685 		r = radeon_vm_bo_set_addr(rdev, bo_va, args->offset, args->flags);
686 		break;
687 	case RADEON_VA_UNMAP:
688 		r = radeon_vm_bo_set_addr(rdev, bo_va, 0, 0);
689 		break;
690 	default:
691 		break;
692 	}
693 	if (!r)
694 		radeon_gem_va_update_vm(rdev, bo_va);
695 	args->operation = RADEON_VA_RESULT_OK;
696 	if (r) {
697 		args->operation = RADEON_VA_RESULT_ERROR;
698 	}
699 out:
700 	drm_gem_object_put_unlocked(gobj);
701 	return r;
702 }
703 
704 int radeon_gem_op_ioctl(struct drm_device *dev, void *data,
705 			struct drm_file *filp)
706 {
707 	struct drm_radeon_gem_op *args = data;
708 	struct drm_gem_object *gobj;
709 	struct radeon_bo *robj;
710 	int r;
711 
712 	gobj = drm_gem_object_lookup(filp, args->handle);
713 	if (gobj == NULL) {
714 		return -ENOENT;
715 	}
716 	robj = gem_to_radeon_bo(gobj);
717 
718 	r = -EPERM;
719 	if (radeon_ttm_tt_has_userptr(robj->tbo.ttm))
720 		goto out;
721 
722 	r = radeon_bo_reserve(robj, false);
723 	if (unlikely(r))
724 		goto out;
725 
726 	switch (args->op) {
727 	case RADEON_GEM_OP_GET_INITIAL_DOMAIN:
728 		args->value = robj->initial_domain;
729 		break;
730 	case RADEON_GEM_OP_SET_INITIAL_DOMAIN:
731 		robj->initial_domain = args->value & (RADEON_GEM_DOMAIN_VRAM |
732 						      RADEON_GEM_DOMAIN_GTT |
733 						      RADEON_GEM_DOMAIN_CPU);
734 		break;
735 	default:
736 		r = -EINVAL;
737 	}
738 
739 	radeon_bo_unreserve(robj);
740 out:
741 	drm_gem_object_put_unlocked(gobj);
742 	return r;
743 }
744 
745 int radeon_mode_dumb_create(struct drm_file *file_priv,
746 			    struct drm_device *dev,
747 			    struct drm_mode_create_dumb *args)
748 {
749 	struct radeon_device *rdev = dev->dev_private;
750 	struct drm_gem_object *gobj;
751 	uint32_t handle;
752 	int r;
753 
754 	args->pitch = radeon_align_pitch(rdev, args->width,
755 					 DIV_ROUND_UP(args->bpp, 8), 0);
756 	args->size = args->pitch * args->height;
757 	args->size = roundup2(args->size, PAGE_SIZE);
758 
759 	r = radeon_gem_object_create(rdev, args->size, 0,
760 				     RADEON_GEM_DOMAIN_VRAM, 0,
761 				     false, &gobj);
762 	if (r)
763 		return -ENOMEM;
764 
765 	r = drm_gem_handle_create(file_priv, gobj, &handle);
766 	/* drop reference from allocate - handle holds it now */
767 	drm_gem_object_put_unlocked(gobj);
768 	if (r) {
769 		return r;
770 	}
771 	args->handle = handle;
772 	return 0;
773 }
774 
775 #if defined(CONFIG_DEBUG_FS)
776 static int radeon_debugfs_gem_info(struct seq_file *m, void *data)
777 {
778 	struct drm_info_node *node = (struct drm_info_node *)m->private;
779 	struct drm_device *dev = node->minor->dev;
780 	struct radeon_device *rdev = dev->dev_private;
781 	struct radeon_bo *rbo;
782 	unsigned i = 0;
783 
784 	mutex_lock(&rdev->gem.mutex);
785 	list_for_each_entry(rbo, &rdev->gem.objects, list) {
786 		unsigned domain;
787 		const char *placement;
788 
789 		domain = radeon_mem_type_to_domain(rbo->tbo.mem.mem_type);
790 		switch (domain) {
791 		case RADEON_GEM_DOMAIN_VRAM:
792 			placement = "VRAM";
793 			break;
794 		case RADEON_GEM_DOMAIN_GTT:
795 			placement = " GTT";
796 			break;
797 		case RADEON_GEM_DOMAIN_CPU:
798 		default:
799 			placement = " CPU";
800 			break;
801 		}
802 		seq_printf(m, "bo[0x%08x] %8ldkB %8ldMB %s pid %8ld\n",
803 			   i, radeon_bo_size(rbo) >> 10, radeon_bo_size(rbo) >> 20,
804 			   placement, (unsigned long)rbo->pid);
805 		i++;
806 	}
807 	mutex_unlock(&rdev->gem.mutex);
808 	return 0;
809 }
810 
811 static struct drm_info_list radeon_debugfs_gem_list[] = {
812 	{"radeon_gem_info", &radeon_debugfs_gem_info, 0, NULL},
813 };
814 #endif
815 
816 int radeon_gem_debugfs_init(struct radeon_device *rdev)
817 {
818 #if defined(CONFIG_DEBUG_FS)
819 	return radeon_debugfs_add_files(rdev, radeon_debugfs_gem_list, 1);
820 #endif
821 	return 0;
822 }
823