xref: /netbsd-src/sys/external/bsd/drm2/dist/drm/radeon/radeon_gem.c (revision 9fb66d812c00ebfb445c0b47dea128f32aa6fe96)
1 /*	$NetBSD: radeon_gem.c,v 1.8 2020/02/14 04:38:24 riastradh Exp $	*/
2 
3 /*
4  * Copyright 2008 Advanced Micro Devices, Inc.
5  * Copyright 2008 Red Hat Inc.
6  * Copyright 2009 Jerome Glisse.
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining a
9  * copy of this software and associated documentation files (the "Software"),
10  * to deal in the Software without restriction, including without limitation
11  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12  * and/or sell copies of the Software, and to permit persons to whom the
13  * Software is furnished to do so, subject to the following conditions:
14  *
15  * The above copyright notice and this permission notice shall be included in
16  * all copies or substantial portions of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
22  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
23  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
24  * OTHER DEALINGS IN THE SOFTWARE.
25  *
26  * Authors: Dave Airlie
27  *          Alex Deucher
28  *          Jerome Glisse
29  */
30 #include <sys/cdefs.h>
31 __KERNEL_RCSID(0, "$NetBSD: radeon_gem.c,v 1.8 2020/02/14 04:38:24 riastradh Exp $");
32 
33 #include <drm/drmP.h>
34 #include <drm/radeon_drm.h>
35 #include "radeon.h"
36 
37 #include <linux/nbsd-namespace.h>
38 
39 void radeon_gem_object_free(struct drm_gem_object *gobj)
40 {
41 	struct radeon_bo *robj = gem_to_radeon_bo(gobj);
42 
43 	if (robj) {
44 		if (robj->gem_base.import_attach)
45 			drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg);
46 		radeon_mn_unregister(robj);
47 		radeon_bo_unref(&robj);
48 	}
49 }
50 
51 int radeon_gem_object_create(struct radeon_device *rdev, unsigned long size,
52 				int alignment, int initial_domain,
53 				u32 flags, bool kernel,
54 				struct drm_gem_object **obj)
55 {
56 	struct radeon_bo *robj;
57 	unsigned long max_size;
58 	int r;
59 
60 	*obj = NULL;
61 	/* At least align on page size */
62 	if (alignment < PAGE_SIZE) {
63 		alignment = PAGE_SIZE;
64 	}
65 
66 	/* Maximum bo size is the unpinned gtt size since we use the gtt to
67 	 * handle vram to system pool migrations.
68 	 */
69 	max_size = rdev->mc.gtt_size - rdev->gart_pin_size;
70 	if (size > max_size) {
71 		DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n",
72 			  size >> 20, max_size >> 20);
73 		return -ENOMEM;
74 	}
75 
76 retry:
77 	r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain,
78 			     flags, NULL, NULL, &robj);
79 	if (r) {
80 		if (r != -ERESTARTSYS) {
81 			if (initial_domain == RADEON_GEM_DOMAIN_VRAM) {
82 				initial_domain |= RADEON_GEM_DOMAIN_GTT;
83 				goto retry;
84 			}
85 			DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
86 				  size, initial_domain, alignment, r);
87 		}
88 		return r;
89 	}
90 	*obj = &robj->gem_base;
91 #ifndef __NetBSD__
92 	robj->pid = task_pid_nr(current);
93 #endif
94 
95 	mutex_lock(&rdev->gem.mutex);
96 	list_add_tail(&robj->list, &rdev->gem.objects);
97 	mutex_unlock(&rdev->gem.mutex);
98 
99 	return 0;
100 }
101 
102 static int radeon_gem_set_domain(struct drm_gem_object *gobj,
103 			  uint32_t rdomain, uint32_t wdomain)
104 {
105 	struct radeon_bo *robj;
106 	uint32_t domain;
107 	long r;
108 
109 	/* FIXME: reeimplement */
110 	robj = gem_to_radeon_bo(gobj);
111 	/* work out where to validate the buffer to */
112 	domain = wdomain;
113 	if (!domain) {
114 		domain = rdomain;
115 	}
116 	if (!domain) {
117 		/* Do nothings */
118 		printk(KERN_WARNING "Set domain without domain !\n");
119 		return 0;
120 	}
121 	if (domain == RADEON_GEM_DOMAIN_CPU) {
122 		/* Asking for cpu access wait for object idle */
123 		r = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true, 30 * HZ);
124 		if (!r)
125 			r = -EBUSY;
126 
127 		if (r < 0 && r != -EINTR) {
128 			printk(KERN_ERR "Failed to wait for object: %li\n", r);
129 			return r;
130 		}
131 	}
132 	return 0;
133 }
134 
135 int radeon_gem_init(struct radeon_device *rdev)
136 {
137 	INIT_LIST_HEAD(&rdev->gem.objects);
138 	return 0;
139 }
140 
141 void radeon_gem_fini(struct radeon_device *rdev)
142 {
143 	radeon_bo_force_delete(rdev);
144 }
145 
146 /*
147  * Call from drm_gem_handle_create which appear in both new and open ioctl
148  * case.
149  */
150 int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
151 {
152 	struct radeon_bo *rbo = gem_to_radeon_bo(obj);
153 	struct radeon_device *rdev = rbo->rdev;
154 	struct radeon_fpriv *fpriv = file_priv->driver_priv;
155 	struct radeon_vm *vm = &fpriv->vm;
156 	struct radeon_bo_va *bo_va;
157 	int r;
158 
159 	if ((rdev->family < CHIP_CAYMAN) ||
160 	    (!rdev->accel_working)) {
161 		return 0;
162 	}
163 
164 	r = radeon_bo_reserve(rbo, false);
165 	if (r) {
166 		return r;
167 	}
168 
169 	bo_va = radeon_vm_bo_find(vm, rbo);
170 	if (!bo_va) {
171 		bo_va = radeon_vm_bo_add(rdev, vm, rbo);
172 	} else {
173 		++bo_va->ref_count;
174 	}
175 	radeon_bo_unreserve(rbo);
176 
177 	return 0;
178 }
179 
180 void radeon_gem_object_close(struct drm_gem_object *obj,
181 			     struct drm_file *file_priv)
182 {
183 	struct radeon_bo *rbo = gem_to_radeon_bo(obj);
184 	struct radeon_device *rdev = rbo->rdev;
185 	struct radeon_fpriv *fpriv = file_priv->driver_priv;
186 	struct radeon_vm *vm = &fpriv->vm;
187 	struct radeon_bo_va *bo_va;
188 	int r;
189 
190 	if ((rdev->family < CHIP_CAYMAN) ||
191 	    (!rdev->accel_working)) {
192 		return;
193 	}
194 
195 	r = radeon_bo_reserve(rbo, true);
196 	if (r) {
197 		dev_err(rdev->dev, "leaking bo va because "
198 			"we fail to reserve bo (%d)\n", r);
199 		return;
200 	}
201 	bo_va = radeon_vm_bo_find(vm, rbo);
202 	if (bo_va) {
203 		if (--bo_va->ref_count == 0) {
204 			radeon_vm_bo_rmv(rdev, bo_va);
205 		}
206 	}
207 	radeon_bo_unreserve(rbo);
208 }
209 
210 static int radeon_gem_handle_lockup(struct radeon_device *rdev, int r)
211 {
212 	if (r == -EDEADLK) {
213 		r = radeon_gpu_reset(rdev);
214 		if (!r)
215 			r = -EAGAIN;
216 	}
217 	return r;
218 }
219 
220 /*
221  * GEM ioctls.
222  */
223 int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
224 			  struct drm_file *filp)
225 {
226 	struct radeon_device *rdev = dev->dev_private;
227 	struct drm_radeon_gem_info *args = data;
228 	struct ttm_mem_type_manager *man;
229 
230 	man = &rdev->mman.bdev.man[TTM_PL_VRAM];
231 
232 	args->vram_size = rdev->mc.real_vram_size;
233 	args->vram_visible = (u64)man->size << PAGE_SHIFT;
234 	args->vram_visible -= rdev->vram_pin_size;
235 	args->gart_size = rdev->mc.gtt_size;
236 	args->gart_size -= rdev->gart_pin_size;
237 
238 	return 0;
239 }
240 
241 int radeon_gem_pread_ioctl(struct drm_device *dev, void *data,
242 			   struct drm_file *filp)
243 {
244 	/* TODO: implement */
245 	DRM_ERROR("unimplemented %s\n", __func__);
246 	return -ENOSYS;
247 }
248 
249 int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data,
250 			    struct drm_file *filp)
251 {
252 	/* TODO: implement */
253 	DRM_ERROR("unimplemented %s\n", __func__);
254 	return -ENOSYS;
255 }
256 
257 int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
258 			    struct drm_file *filp)
259 {
260 	struct radeon_device *rdev = dev->dev_private;
261 	struct drm_radeon_gem_create *args = data;
262 	struct drm_gem_object *gobj;
263 	uint32_t handle;
264 	int r;
265 
266 	down_read(&rdev->exclusive_lock);
267 	/* create a gem object to contain this object in */
268 	args->size = roundup(args->size, PAGE_SIZE);
269 	r = radeon_gem_object_create(rdev, args->size, args->alignment,
270 				     args->initial_domain, args->flags,
271 				     false, &gobj);
272 	if (r) {
273 		up_read(&rdev->exclusive_lock);
274 		r = radeon_gem_handle_lockup(rdev, r);
275 		return r;
276 	}
277 	r = drm_gem_handle_create(filp, gobj, &handle);
278 	/* drop reference from allocate - handle holds it now */
279 	drm_gem_object_unreference_unlocked(gobj);
280 	if (r) {
281 		up_read(&rdev->exclusive_lock);
282 		r = radeon_gem_handle_lockup(rdev, r);
283 		return r;
284 	}
285 	args->handle = handle;
286 	up_read(&rdev->exclusive_lock);
287 	return 0;
288 }
289 
290 int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data,
291 			     struct drm_file *filp)
292 {
293 	struct radeon_device *rdev = dev->dev_private;
294 	struct drm_radeon_gem_userptr *args = data;
295 	struct drm_gem_object *gobj;
296 	struct radeon_bo *bo;
297 	uint32_t handle;
298 	int r;
299 
300 	if (offset_in_page(args->addr | args->size))
301 		return -EINVAL;
302 
303 	/* reject unknown flag values */
304 	if (args->flags & ~(RADEON_GEM_USERPTR_READONLY |
305 	    RADEON_GEM_USERPTR_ANONONLY | RADEON_GEM_USERPTR_VALIDATE |
306 	    RADEON_GEM_USERPTR_REGISTER))
307 		return -EINVAL;
308 
309 	if (args->flags & RADEON_GEM_USERPTR_READONLY) {
310 		/* readonly pages not tested on older hardware */
311 		if (rdev->family < CHIP_R600)
312 			return -EINVAL;
313 
314 	} else if (!(args->flags & RADEON_GEM_USERPTR_ANONONLY) ||
315 		   !(args->flags & RADEON_GEM_USERPTR_REGISTER)) {
316 
317 		/* if we want to write to it we must require anonymous
318 		   memory and install a MMU notifier */
319 		return -EACCES;
320 	}
321 
322 	down_read(&rdev->exclusive_lock);
323 
324 	/* create a gem object to contain this object in */
325 	r = radeon_gem_object_create(rdev, args->size, 0,
326 				     RADEON_GEM_DOMAIN_CPU, 0,
327 				     false, &gobj);
328 	if (r)
329 		goto handle_lockup;
330 
331 	bo = gem_to_radeon_bo(gobj);
332 	r = radeon_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags);
333 	if (r)
334 		goto release_object;
335 
336 	if (args->flags & RADEON_GEM_USERPTR_REGISTER) {
337 		r = radeon_mn_register(bo, args->addr);
338 		if (r)
339 			goto release_object;
340 	}
341 
342 	if (args->flags & RADEON_GEM_USERPTR_VALIDATE) {
343 #ifdef __NetBSD__
344 		vm_map_lock_read(&curproc->p_vmspace->vm_map);
345 #else
346 		down_read(&current->mm->mmap_sem);
347 #endif
348 		r = radeon_bo_reserve(bo, true);
349 		if (r) {
350 #ifdef __NetBSD__
351 			vm_map_unlock_read(&curproc->p_vmspace->vm_map);
352 #else
353 			up_read(&current->mm->mmap_sem);
354 #endif
355 			goto release_object;
356 		}
357 
358 		radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_GTT);
359 		r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
360 		radeon_bo_unreserve(bo);
361 #ifdef __NetBSD__
362 		vm_map_unlock_read(&curproc->p_vmspace->vm_map);
363 #else
364 		up_read(&current->mm->mmap_sem);
365 #endif
366 		if (r)
367 			goto release_object;
368 	}
369 
370 	r = drm_gem_handle_create(filp, gobj, &handle);
371 	/* drop reference from allocate - handle holds it now */
372 	drm_gem_object_unreference_unlocked(gobj);
373 	if (r)
374 		goto handle_lockup;
375 
376 	args->handle = handle;
377 	up_read(&rdev->exclusive_lock);
378 	return 0;
379 
380 release_object:
381 	drm_gem_object_unreference_unlocked(gobj);
382 
383 handle_lockup:
384 	up_read(&rdev->exclusive_lock);
385 	r = radeon_gem_handle_lockup(rdev, r);
386 
387 	return r;
388 }
389 
390 int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
391 				struct drm_file *filp)
392 {
393 	/* transition the BO to a domain -
394 	 * just validate the BO into a certain domain */
395 	struct radeon_device *rdev = dev->dev_private;
396 	struct drm_radeon_gem_set_domain *args = data;
397 	struct drm_gem_object *gobj;
398 	struct radeon_bo *robj;
399 	int r;
400 
401 	/* for now if someone requests domain CPU -
402 	 * just make sure the buffer is finished with */
403 	down_read(&rdev->exclusive_lock);
404 
405 	/* just do a BO wait for now */
406 	gobj = drm_gem_object_lookup(dev, filp, args->handle);
407 	if (gobj == NULL) {
408 		up_read(&rdev->exclusive_lock);
409 		return -ENOENT;
410 	}
411 	robj = gem_to_radeon_bo(gobj);
412 
413 	r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
414 
415 	drm_gem_object_unreference_unlocked(gobj);
416 	up_read(&rdev->exclusive_lock);
417 	r = radeon_gem_handle_lockup(robj->rdev, r);
418 	return r;
419 }
420 
421 int radeon_mode_dumb_mmap(struct drm_file *filp,
422 			  struct drm_device *dev,
423 			  uint32_t handle, uint64_t *offset_p)
424 {
425 	struct drm_gem_object *gobj;
426 	struct radeon_bo *robj;
427 
428 	gobj = drm_gem_object_lookup(dev, filp, handle);
429 	if (gobj == NULL) {
430 		return -ENOENT;
431 	}
432 	robj = gem_to_radeon_bo(gobj);
433 	if (radeon_ttm_tt_has_userptr(robj->tbo.ttm)) {
434 		drm_gem_object_unreference_unlocked(gobj);
435 		return -EPERM;
436 	}
437 	*offset_p = radeon_bo_mmap_offset(robj);
438 	drm_gem_object_unreference_unlocked(gobj);
439 	return 0;
440 }
441 
442 int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
443 			  struct drm_file *filp)
444 {
445 	struct drm_radeon_gem_mmap *args = data;
446 
447 	return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr);
448 }
449 
450 int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
451 			  struct drm_file *filp)
452 {
453 	struct drm_radeon_gem_busy *args = data;
454 	struct drm_gem_object *gobj;
455 	struct radeon_bo *robj;
456 	int r;
457 	uint32_t cur_placement = 0;
458 
459 	gobj = drm_gem_object_lookup(dev, filp, args->handle);
460 	if (gobj == NULL) {
461 		return -ENOENT;
462 	}
463 	robj = gem_to_radeon_bo(gobj);
464 
465 	r = reservation_object_test_signaled_rcu(robj->tbo.resv, true);
466 	if (r == 0)
467 		r = -EBUSY;
468 	else
469 		r = 0;
470 
471 	cur_placement = ACCESS_ONCE(robj->tbo.mem.mem_type);
472 	args->domain = radeon_mem_type_to_domain(cur_placement);
473 	drm_gem_object_unreference_unlocked(gobj);
474 	return r;
475 }
476 
477 int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
478 			      struct drm_file *filp)
479 {
480 	struct radeon_device *rdev = dev->dev_private;
481 	struct drm_radeon_gem_wait_idle *args = data;
482 	struct drm_gem_object *gobj;
483 	struct radeon_bo *robj;
484 	int r = 0;
485 	uint32_t cur_placement = 0;
486 	long ret;
487 
488 	gobj = drm_gem_object_lookup(dev, filp, args->handle);
489 	if (gobj == NULL) {
490 		return -ENOENT;
491 	}
492 	robj = gem_to_radeon_bo(gobj);
493 
494 	ret = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true, 30 * HZ);
495 	if (ret == 0)
496 		r = -EBUSY;
497 	else if (ret < 0)
498 		r = ret;
499 
500 	/* Flush HDP cache via MMIO if necessary */
501 	cur_placement = ACCESS_ONCE(robj->tbo.mem.mem_type);
502 	if (rdev->asic->mmio_hdp_flush &&
503 	    radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM)
504 		robj->rdev->asic->mmio_hdp_flush(rdev);
505 	drm_gem_object_unreference_unlocked(gobj);
506 	r = radeon_gem_handle_lockup(rdev, r);
507 	return r;
508 }
509 
510 int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
511 				struct drm_file *filp)
512 {
513 	struct drm_radeon_gem_set_tiling *args = data;
514 	struct drm_gem_object *gobj;
515 	struct radeon_bo *robj;
516 	int r = 0;
517 
518 	DRM_DEBUG("%d \n", args->handle);
519 	gobj = drm_gem_object_lookup(dev, filp, args->handle);
520 	if (gobj == NULL)
521 		return -ENOENT;
522 	robj = gem_to_radeon_bo(gobj);
523 	r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch);
524 	drm_gem_object_unreference_unlocked(gobj);
525 	return r;
526 }
527 
528 int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
529 				struct drm_file *filp)
530 {
531 	struct drm_radeon_gem_get_tiling *args = data;
532 	struct drm_gem_object *gobj;
533 	struct radeon_bo *rbo;
534 	int r = 0;
535 
536 	DRM_DEBUG("\n");
537 	gobj = drm_gem_object_lookup(dev, filp, args->handle);
538 	if (gobj == NULL)
539 		return -ENOENT;
540 	rbo = gem_to_radeon_bo(gobj);
541 	r = radeon_bo_reserve(rbo, false);
542 	if (unlikely(r != 0))
543 		goto out;
544 	radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch);
545 	radeon_bo_unreserve(rbo);
546 out:
547 	drm_gem_object_unreference_unlocked(gobj);
548 	return r;
549 }
550 
551 /**
552  * radeon_gem_va_update_vm -update the bo_va in its VM
553  *
554  * @rdev: radeon_device pointer
555  * @bo_va: bo_va to update
556  *
557  * Update the bo_va directly after setting it's address. Errors are not
558  * vital here, so they are not reported back to userspace.
559  */
560 static void radeon_gem_va_update_vm(struct radeon_device *rdev,
561 				    struct radeon_bo_va *bo_va)
562 {
563 	struct ttm_validate_buffer tv, *entry;
564 	struct radeon_bo_list *vm_bos;
565 	struct ww_acquire_ctx ticket;
566 	struct list_head list;
567 	unsigned domain;
568 	int r;
569 
570 	INIT_LIST_HEAD(&list);
571 
572 	tv.bo = &bo_va->bo->tbo;
573 	tv.shared = true;
574 	list_add(&tv.head, &list);
575 
576 	vm_bos = radeon_vm_get_bos(rdev, bo_va->vm, &list);
577 	if (!vm_bos)
578 		return;
579 
580 	r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
581 	if (r)
582 		goto error_free;
583 
584 	list_for_each_entry(entry, &list, head) {
585 		domain = radeon_mem_type_to_domain(entry->bo->mem.mem_type);
586 		/* if anything is swapped out don't swap it in here,
587 		   just abort and wait for the next CS */
588 		if (domain == RADEON_GEM_DOMAIN_CPU)
589 			goto error_unreserve;
590 	}
591 
592 	mutex_lock(&bo_va->vm->mutex);
593 	r = radeon_vm_clear_freed(rdev, bo_va->vm);
594 	if (r)
595 		goto error_unlock;
596 
597 	if (bo_va->it.start)
598 		r = radeon_vm_bo_update(rdev, bo_va, &bo_va->bo->tbo.mem);
599 
600 error_unlock:
601 	mutex_unlock(&bo_va->vm->mutex);
602 
603 error_unreserve:
604 	ttm_eu_backoff_reservation(&ticket, &list);
605 
606 error_free:
607 	drm_free_large(vm_bos);
608 
609 	if (r && r != -ERESTARTSYS)
610 		DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
611 }
612 
613 int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
614 			  struct drm_file *filp)
615 {
616 	struct drm_radeon_gem_va *args = data;
617 	struct drm_gem_object *gobj;
618 	struct radeon_device *rdev = dev->dev_private;
619 	struct radeon_fpriv *fpriv = filp->driver_priv;
620 	struct radeon_bo *rbo;
621 	struct radeon_bo_va *bo_va;
622 	u32 invalid_flags;
623 	int r = 0;
624 
625 	if (!rdev->vm_manager.enabled) {
626 		args->operation = RADEON_VA_RESULT_ERROR;
627 		return -ENOTTY;
628 	}
629 
630 	/* !! DONT REMOVE !!
631 	 * We don't support vm_id yet, to be sure we don't have have broken
632 	 * userspace, reject anyone trying to use non 0 value thus moving
633 	 * forward we can use those fields without breaking existant userspace
634 	 */
635 	if (args->vm_id) {
636 		args->operation = RADEON_VA_RESULT_ERROR;
637 		return -EINVAL;
638 	}
639 
640 	if (args->offset < RADEON_VA_RESERVED_SIZE) {
641 		dev_err(dev->dev,
642 			"offset 0x%lX is in reserved area 0x%X\n",
643 			(unsigned long)args->offset,
644 			RADEON_VA_RESERVED_SIZE);
645 		args->operation = RADEON_VA_RESULT_ERROR;
646 		return -EINVAL;
647 	}
648 
649 	/* don't remove, we need to enforce userspace to set the snooped flag
650 	 * otherwise we will endup with broken userspace and we won't be able
651 	 * to enable this feature without adding new interface
652 	 */
653 	invalid_flags = RADEON_VM_PAGE_VALID | RADEON_VM_PAGE_SYSTEM;
654 	if ((args->flags & invalid_flags)) {
655 		dev_err(dev->dev, "invalid flags 0x%08X vs 0x%08X\n",
656 			args->flags, invalid_flags);
657 		args->operation = RADEON_VA_RESULT_ERROR;
658 		return -EINVAL;
659 	}
660 
661 	switch (args->operation) {
662 	case RADEON_VA_MAP:
663 	case RADEON_VA_UNMAP:
664 		break;
665 	default:
666 		dev_err(dev->dev, "unsupported operation %d\n",
667 			args->operation);
668 		args->operation = RADEON_VA_RESULT_ERROR;
669 		return -EINVAL;
670 	}
671 
672 	gobj = drm_gem_object_lookup(dev, filp, args->handle);
673 	if (gobj == NULL) {
674 		args->operation = RADEON_VA_RESULT_ERROR;
675 		return -ENOENT;
676 	}
677 	rbo = gem_to_radeon_bo(gobj);
678 	r = radeon_bo_reserve(rbo, false);
679 	if (r) {
680 		args->operation = RADEON_VA_RESULT_ERROR;
681 		drm_gem_object_unreference_unlocked(gobj);
682 		return r;
683 	}
684 	bo_va = radeon_vm_bo_find(&fpriv->vm, rbo);
685 	if (!bo_va) {
686 		args->operation = RADEON_VA_RESULT_ERROR;
687 		drm_gem_object_unreference_unlocked(gobj);
688 		return -ENOENT;
689 	}
690 
691 	switch (args->operation) {
692 	case RADEON_VA_MAP:
693 		if (bo_va->it.start) {
694 			args->operation = RADEON_VA_RESULT_VA_EXIST;
695 			args->offset = bo_va->it.start * RADEON_GPU_PAGE_SIZE;
696 			radeon_bo_unreserve(rbo);
697 			goto out;
698 		}
699 		r = radeon_vm_bo_set_addr(rdev, bo_va, args->offset, args->flags);
700 		break;
701 	case RADEON_VA_UNMAP:
702 		r = radeon_vm_bo_set_addr(rdev, bo_va, 0, 0);
703 		break;
704 	default:
705 		break;
706 	}
707 	if (!r)
708 		radeon_gem_va_update_vm(rdev, bo_va);
709 	args->operation = RADEON_VA_RESULT_OK;
710 	if (r) {
711 		args->operation = RADEON_VA_RESULT_ERROR;
712 	}
713 out:
714 	drm_gem_object_unreference_unlocked(gobj);
715 	return r;
716 }
717 
718 int radeon_gem_op_ioctl(struct drm_device *dev, void *data,
719 			struct drm_file *filp)
720 {
721 	struct drm_radeon_gem_op *args = data;
722 	struct drm_gem_object *gobj;
723 	struct radeon_bo *robj;
724 	int r;
725 
726 	gobj = drm_gem_object_lookup(dev, filp, args->handle);
727 	if (gobj == NULL) {
728 		return -ENOENT;
729 	}
730 	robj = gem_to_radeon_bo(gobj);
731 
732 	r = -EPERM;
733 	if (radeon_ttm_tt_has_userptr(robj->tbo.ttm))
734 		goto out;
735 
736 	r = radeon_bo_reserve(robj, false);
737 	if (unlikely(r))
738 		goto out;
739 
740 	switch (args->op) {
741 	case RADEON_GEM_OP_GET_INITIAL_DOMAIN:
742 		args->value = robj->initial_domain;
743 		break;
744 	case RADEON_GEM_OP_SET_INITIAL_DOMAIN:
745 		robj->initial_domain = args->value & (RADEON_GEM_DOMAIN_VRAM |
746 						      RADEON_GEM_DOMAIN_GTT |
747 						      RADEON_GEM_DOMAIN_CPU);
748 		break;
749 	default:
750 		r = -EINVAL;
751 	}
752 
753 	radeon_bo_unreserve(robj);
754 out:
755 	drm_gem_object_unreference_unlocked(gobj);
756 	return r;
757 }
758 
759 int radeon_mode_dumb_create(struct drm_file *file_priv,
760 			    struct drm_device *dev,
761 			    struct drm_mode_create_dumb *args)
762 {
763 	struct radeon_device *rdev = dev->dev_private;
764 	struct drm_gem_object *gobj;
765 	uint32_t handle;
766 	int r;
767 
768 	args->pitch = radeon_align_pitch(rdev, args->width, args->bpp, 0) * ((args->bpp + 1) / 8);
769 	args->size = args->pitch * args->height;
770 	args->size = ALIGN(args->size, PAGE_SIZE);
771 
772 	r = radeon_gem_object_create(rdev, args->size, 0,
773 				     RADEON_GEM_DOMAIN_VRAM, 0,
774 				     false, &gobj);
775 	if (r)
776 		return -ENOMEM;
777 
778 	r = drm_gem_handle_create(file_priv, gobj, &handle);
779 	/* drop reference from allocate - handle holds it now */
780 	drm_gem_object_unreference_unlocked(gobj);
781 	if (r) {
782 		return r;
783 	}
784 	args->handle = handle;
785 	return 0;
786 }
787 
788 #if defined(CONFIG_DEBUG_FS)
789 static int radeon_debugfs_gem_info(struct seq_file *m, void *data)
790 {
791 	struct drm_info_node *node = (struct drm_info_node *)m->private;
792 	struct drm_device *dev = node->minor->dev;
793 	struct radeon_device *rdev = dev->dev_private;
794 	struct radeon_bo *rbo;
795 	unsigned i = 0;
796 
797 	mutex_lock(&rdev->gem.mutex);
798 	list_for_each_entry(rbo, &rdev->gem.objects, list) {
799 		unsigned domain;
800 		const char *placement;
801 
802 		domain = radeon_mem_type_to_domain(rbo->tbo.mem.mem_type);
803 		switch (domain) {
804 		case RADEON_GEM_DOMAIN_VRAM:
805 			placement = "VRAM";
806 			break;
807 		case RADEON_GEM_DOMAIN_GTT:
808 			placement = " GTT";
809 			break;
810 		case RADEON_GEM_DOMAIN_CPU:
811 		default:
812 			placement = " CPU";
813 			break;
814 		}
815 		seq_printf(m, "bo[0x%08x] %8ldkB %8ldMB %s pid %8ld\n",
816 			   i, radeon_bo_size(rbo) >> 10, radeon_bo_size(rbo) >> 20,
817 			   placement, (unsigned long)rbo->pid);
818 		i++;
819 	}
820 	mutex_unlock(&rdev->gem.mutex);
821 	return 0;
822 }
823 
824 static struct drm_info_list radeon_debugfs_gem_list[] = {
825 	{"radeon_gem_info", &radeon_debugfs_gem_info, 0, NULL},
826 };
827 #endif
828 
829 int radeon_gem_debugfs_init(struct radeon_device *rdev)
830 {
831 #if defined(CONFIG_DEBUG_FS)
832 	return radeon_debugfs_add_files(rdev, radeon_debugfs_gem_list, 1);
833 #endif
834 	return 0;
835 }
836