xref: /netbsd-src/sys/external/bsd/drm2/dist/drm/radeon/radeon_gem.c (revision bdc22b2e01993381dcefeff2bc9b56ca75a4235c)
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <drm/drmP.h>
29 #include <drm/radeon_drm.h>
30 #include "radeon.h"
31 
32 void radeon_gem_object_free(struct drm_gem_object *gobj)
33 {
34 	struct radeon_bo *robj = gem_to_radeon_bo(gobj);
35 
36 	if (robj) {
37 #ifndef __NetBSD__		/* XXX drm prime */
38 		if (robj->gem_base.import_attach)
39 			drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg);
40 #endif
41 		radeon_bo_unref(&robj);
42 	}
43 }
44 
45 int radeon_gem_object_create(struct radeon_device *rdev, int size,
46 				int alignment, int initial_domain,
47 				bool discardable, bool kernel,
48 				struct drm_gem_object **obj)
49 {
50 	struct radeon_bo *robj;
51 	unsigned long max_size;
52 	int r;
53 
54 	*obj = NULL;
55 	/* At least align on page size */
56 	if (alignment < PAGE_SIZE) {
57 		alignment = PAGE_SIZE;
58 	}
59 
60 	/* maximun bo size is the minimun btw visible vram and gtt size */
61 	max_size = min(rdev->mc.visible_vram_size, rdev->mc.gtt_size);
62 	if (size > max_size) {
63 		printk(KERN_WARNING "%s:%d alloc size %dMb bigger than %ldMb limit\n",
64 		       __func__, __LINE__, size >> 20, max_size >> 20);
65 		return -ENOMEM;
66 	}
67 
68 retry:
69 	r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, NULL, &robj);
70 	if (r) {
71 		if (r != -ERESTARTSYS) {
72 			if (initial_domain == RADEON_GEM_DOMAIN_VRAM) {
73 				initial_domain |= RADEON_GEM_DOMAIN_GTT;
74 				goto retry;
75 			}
76 			DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n",
77 				  size, initial_domain, alignment, r);
78 		}
79 		return r;
80 	}
81 	*obj = &robj->gem_base;
82 #ifndef __NetBSD__
83 	robj->pid = task_pid_nr(current);
84 #endif
85 
86 	mutex_lock(&rdev->gem.mutex);
87 	list_add_tail(&robj->list, &rdev->gem.objects);
88 	mutex_unlock(&rdev->gem.mutex);
89 
90 	return 0;
91 }
92 
93 static int radeon_gem_set_domain(struct drm_gem_object *gobj,
94 			  uint32_t rdomain, uint32_t wdomain)
95 {
96 	struct radeon_bo *robj;
97 	uint32_t domain;
98 	int r;
99 
100 	/* FIXME: reeimplement */
101 	robj = gem_to_radeon_bo(gobj);
102 	/* work out where to validate the buffer to */
103 	domain = wdomain;
104 	if (!domain) {
105 		domain = rdomain;
106 	}
107 	if (!domain) {
108 		/* Do nothings */
109 		printk(KERN_WARNING "Set domain without domain !\n");
110 		return 0;
111 	}
112 	if (domain == RADEON_GEM_DOMAIN_CPU) {
113 		/* Asking for cpu access wait for object idle */
114 		r = radeon_bo_wait(robj, NULL, false);
115 		if (r) {
116 			printk(KERN_ERR "Failed to wait for object !\n");
117 			return r;
118 		}
119 	}
120 	return 0;
121 }
122 
123 int radeon_gem_init(struct radeon_device *rdev)
124 {
125 	INIT_LIST_HEAD(&rdev->gem.objects);
126 	return 0;
127 }
128 
129 void radeon_gem_fini(struct radeon_device *rdev)
130 {
131 	radeon_bo_force_delete(rdev);
132 }
133 
134 /*
135  * Call from drm_gem_handle_create which appear in both new and open ioctl
136  * case.
137  */
138 int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
139 {
140 	struct radeon_bo *rbo = gem_to_radeon_bo(obj);
141 	struct radeon_device *rdev = rbo->rdev;
142 	struct radeon_fpriv *fpriv = file_priv->driver_priv;
143 	struct radeon_vm *vm = &fpriv->vm;
144 	struct radeon_bo_va *bo_va;
145 	int r;
146 
147 	if (rdev->family < CHIP_CAYMAN) {
148 		return 0;
149 	}
150 
151 	r = radeon_bo_reserve(rbo, false);
152 	if (r) {
153 		return r;
154 	}
155 
156 	bo_va = radeon_vm_bo_find(vm, rbo);
157 	if (!bo_va) {
158 		bo_va = radeon_vm_bo_add(rdev, vm, rbo);
159 	} else {
160 		++bo_va->ref_count;
161 	}
162 	radeon_bo_unreserve(rbo);
163 
164 	return 0;
165 }
166 
167 void radeon_gem_object_close(struct drm_gem_object *obj,
168 			     struct drm_file *file_priv)
169 {
170 	struct radeon_bo *rbo = gem_to_radeon_bo(obj);
171 	struct radeon_device *rdev = rbo->rdev;
172 	struct radeon_fpriv *fpriv = file_priv->driver_priv;
173 	struct radeon_vm *vm = &fpriv->vm;
174 	struct radeon_bo_va *bo_va;
175 	int r;
176 
177 	if (rdev->family < CHIP_CAYMAN) {
178 		return;
179 	}
180 
181 	r = radeon_bo_reserve(rbo, true);
182 	if (r) {
183 		dev_err(rdev->dev, "leaking bo va because "
184 			"we fail to reserve bo (%d)\n", r);
185 		return;
186 	}
187 	bo_va = radeon_vm_bo_find(vm, rbo);
188 	if (bo_va) {
189 		if (--bo_va->ref_count == 0) {
190 			radeon_vm_bo_rmv(rdev, bo_va);
191 		}
192 	}
193 	radeon_bo_unreserve(rbo);
194 }
195 
196 static int radeon_gem_handle_lockup(struct radeon_device *rdev, int r)
197 {
198 	if (r == -EDEADLK) {
199 		r = radeon_gpu_reset(rdev);
200 		if (!r)
201 			r = -EAGAIN;
202 	}
203 	return r;
204 }
205 
206 /*
207  * GEM ioctls.
208  */
209 int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
210 			  struct drm_file *filp)
211 {
212 	struct radeon_device *rdev = dev->dev_private;
213 	struct drm_radeon_gem_info *args = data;
214 	struct ttm_mem_type_manager *man;
215 	unsigned i;
216 
217 	man = &rdev->mman.bdev.man[TTM_PL_VRAM];
218 
219 	args->vram_size = rdev->mc.real_vram_size;
220 	args->vram_visible = (u64)man->size << PAGE_SHIFT;
221 	if (rdev->stollen_vga_memory)
222 		args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory);
223 	args->vram_visible -= radeon_fbdev_total_size(rdev);
224 	args->gart_size = rdev->mc.gtt_size - 4096 - RADEON_IB_POOL_SIZE*64*1024;
225 	for(i = 0; i < RADEON_NUM_RINGS; ++i)
226 		args->gart_size -= rdev->ring[i].ring_size;
227 	return 0;
228 }
229 
230 int radeon_gem_pread_ioctl(struct drm_device *dev, void *data,
231 			   struct drm_file *filp)
232 {
233 	/* TODO: implement */
234 	DRM_ERROR("unimplemented %s\n", __func__);
235 	return -ENOSYS;
236 }
237 
238 int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data,
239 			    struct drm_file *filp)
240 {
241 	/* TODO: implement */
242 	DRM_ERROR("unimplemented %s\n", __func__);
243 	return -ENOSYS;
244 }
245 
246 int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
247 			    struct drm_file *filp)
248 {
249 	struct radeon_device *rdev = dev->dev_private;
250 	struct drm_radeon_gem_create *args = data;
251 	struct drm_gem_object *gobj;
252 	uint32_t handle;
253 	int r;
254 
255 	down_read(&rdev->exclusive_lock);
256 	/* create a gem object to contain this object in */
257 	args->size = roundup(args->size, PAGE_SIZE);
258 	r = radeon_gem_object_create(rdev, args->size, args->alignment,
259 					args->initial_domain, false,
260 					false, &gobj);
261 	if (r) {
262 		up_read(&rdev->exclusive_lock);
263 		r = radeon_gem_handle_lockup(rdev, r);
264 		return r;
265 	}
266 	r = drm_gem_handle_create(filp, gobj, &handle);
267 	/* drop reference from allocate - handle holds it now */
268 	drm_gem_object_unreference_unlocked(gobj);
269 	if (r) {
270 		up_read(&rdev->exclusive_lock);
271 		r = radeon_gem_handle_lockup(rdev, r);
272 		return r;
273 	}
274 	args->handle = handle;
275 	up_read(&rdev->exclusive_lock);
276 	return 0;
277 }
278 
279 int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
280 				struct drm_file *filp)
281 {
282 	/* transition the BO to a domain -
283 	 * just validate the BO into a certain domain */
284 	struct radeon_device *rdev = dev->dev_private;
285 	struct drm_radeon_gem_set_domain *args = data;
286 	struct drm_gem_object *gobj;
287 	struct radeon_bo *robj;
288 	int r;
289 
290 	/* for now if someone requests domain CPU -
291 	 * just make sure the buffer is finished with */
292 	down_read(&rdev->exclusive_lock);
293 
294 	/* just do a BO wait for now */
295 	gobj = drm_gem_object_lookup(dev, filp, args->handle);
296 	if (gobj == NULL) {
297 		up_read(&rdev->exclusive_lock);
298 		return -ENOENT;
299 	}
300 	robj = gem_to_radeon_bo(gobj);
301 
302 	r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
303 
304 	drm_gem_object_unreference_unlocked(gobj);
305 	up_read(&rdev->exclusive_lock);
306 	r = radeon_gem_handle_lockup(robj->rdev, r);
307 	return r;
308 }
309 
310 int radeon_mode_dumb_mmap(struct drm_file *filp,
311 			  struct drm_device *dev,
312 			  uint32_t handle, uint64_t *offset_p)
313 {
314 	struct drm_gem_object *gobj;
315 	struct radeon_bo *robj;
316 
317 	gobj = drm_gem_object_lookup(dev, filp, handle);
318 	if (gobj == NULL) {
319 		return -ENOENT;
320 	}
321 	robj = gem_to_radeon_bo(gobj);
322 	*offset_p = radeon_bo_mmap_offset(robj);
323 	drm_gem_object_unreference_unlocked(gobj);
324 	return 0;
325 }
326 
327 int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
328 			  struct drm_file *filp)
329 {
330 	struct drm_radeon_gem_mmap *args = data;
331 
332 	return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr);
333 }
334 
335 int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
336 			  struct drm_file *filp)
337 {
338 	struct radeon_device *rdev = dev->dev_private;
339 	struct drm_radeon_gem_busy *args = data;
340 	struct drm_gem_object *gobj;
341 	struct radeon_bo *robj;
342 	int r;
343 	uint32_t cur_placement = 0;
344 
345 	gobj = drm_gem_object_lookup(dev, filp, args->handle);
346 	if (gobj == NULL) {
347 		return -ENOENT;
348 	}
349 	robj = gem_to_radeon_bo(gobj);
350 	r = radeon_bo_wait(robj, &cur_placement, true);
351 	args->domain = radeon_mem_type_to_domain(cur_placement);
352 	drm_gem_object_unreference_unlocked(gobj);
353 	r = radeon_gem_handle_lockup(rdev, r);
354 	return r;
355 }
356 
357 int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
358 			      struct drm_file *filp)
359 {
360 	struct radeon_device *rdev = dev->dev_private;
361 	struct drm_radeon_gem_wait_idle *args = data;
362 	struct drm_gem_object *gobj;
363 	struct radeon_bo *robj;
364 	int r;
365 
366 	gobj = drm_gem_object_lookup(dev, filp, args->handle);
367 	if (gobj == NULL) {
368 		return -ENOENT;
369 	}
370 	robj = gem_to_radeon_bo(gobj);
371 	r = radeon_bo_wait(robj, NULL, false);
372 	/* callback hw specific functions if any */
373 	if (rdev->asic->ioctl_wait_idle)
374 		robj->rdev->asic->ioctl_wait_idle(rdev, robj);
375 	drm_gem_object_unreference_unlocked(gobj);
376 	r = radeon_gem_handle_lockup(rdev, r);
377 	return r;
378 }
379 
380 int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
381 				struct drm_file *filp)
382 {
383 	struct drm_radeon_gem_set_tiling *args = data;
384 	struct drm_gem_object *gobj;
385 	struct radeon_bo *robj;
386 	int r = 0;
387 
388 	DRM_DEBUG("%d \n", args->handle);
389 	gobj = drm_gem_object_lookup(dev, filp, args->handle);
390 	if (gobj == NULL)
391 		return -ENOENT;
392 	robj = gem_to_radeon_bo(gobj);
393 	r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch);
394 	drm_gem_object_unreference_unlocked(gobj);
395 	return r;
396 }
397 
398 int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
399 				struct drm_file *filp)
400 {
401 	struct drm_radeon_gem_get_tiling *args = data;
402 	struct drm_gem_object *gobj;
403 	struct radeon_bo *rbo;
404 	int r = 0;
405 
406 	DRM_DEBUG("\n");
407 	gobj = drm_gem_object_lookup(dev, filp, args->handle);
408 	if (gobj == NULL)
409 		return -ENOENT;
410 	rbo = gem_to_radeon_bo(gobj);
411 	r = radeon_bo_reserve(rbo, false);
412 	if (unlikely(r != 0))
413 		goto out;
414 	radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch);
415 	radeon_bo_unreserve(rbo);
416 out:
417 	drm_gem_object_unreference_unlocked(gobj);
418 	return r;
419 }
420 
421 int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
422 			  struct drm_file *filp)
423 {
424 	struct drm_radeon_gem_va *args = data;
425 	struct drm_gem_object *gobj;
426 	struct radeon_device *rdev = dev->dev_private;
427 	struct radeon_fpriv *fpriv = filp->driver_priv;
428 	struct radeon_bo *rbo;
429 	struct radeon_bo_va *bo_va;
430 	u32 invalid_flags;
431 	int r = 0;
432 
433 	if (!rdev->vm_manager.enabled) {
434 		args->operation = RADEON_VA_RESULT_ERROR;
435 		return -ENOTTY;
436 	}
437 
438 	/* !! DONT REMOVE !!
439 	 * We don't support vm_id yet, to be sure we don't have have broken
440 	 * userspace, reject anyone trying to use non 0 value thus moving
441 	 * forward we can use those fields without breaking existant userspace
442 	 */
443 	if (args->vm_id) {
444 		args->operation = RADEON_VA_RESULT_ERROR;
445 		return -EINVAL;
446 	}
447 
448 	if (args->offset < RADEON_VA_RESERVED_SIZE) {
449 		dev_err(dev->dev,
450 			"offset 0x%lX is in reserved area 0x%X\n",
451 			(unsigned long)args->offset,
452 			RADEON_VA_RESERVED_SIZE);
453 		args->operation = RADEON_VA_RESULT_ERROR;
454 		return -EINVAL;
455 	}
456 
457 	/* don't remove, we need to enforce userspace to set the snooped flag
458 	 * otherwise we will endup with broken userspace and we won't be able
459 	 * to enable this feature without adding new interface
460 	 */
461 	invalid_flags = RADEON_VM_PAGE_VALID | RADEON_VM_PAGE_SYSTEM;
462 	if ((args->flags & invalid_flags)) {
463 		dev_err(dev->dev, "invalid flags 0x%08X vs 0x%08X\n",
464 			args->flags, invalid_flags);
465 		args->operation = RADEON_VA_RESULT_ERROR;
466 		return -EINVAL;
467 	}
468 	if (!(args->flags & RADEON_VM_PAGE_SNOOPED)) {
469 		dev_err(dev->dev, "only supported snooped mapping for now\n");
470 		args->operation = RADEON_VA_RESULT_ERROR;
471 		return -EINVAL;
472 	}
473 
474 	switch (args->operation) {
475 	case RADEON_VA_MAP:
476 	case RADEON_VA_UNMAP:
477 		break;
478 	default:
479 		dev_err(dev->dev, "unsupported operation %d\n",
480 			args->operation);
481 		args->operation = RADEON_VA_RESULT_ERROR;
482 		return -EINVAL;
483 	}
484 
485 	gobj = drm_gem_object_lookup(dev, filp, args->handle);
486 	if (gobj == NULL) {
487 		args->operation = RADEON_VA_RESULT_ERROR;
488 		return -ENOENT;
489 	}
490 	rbo = gem_to_radeon_bo(gobj);
491 	r = radeon_bo_reserve(rbo, false);
492 	if (r) {
493 		args->operation = RADEON_VA_RESULT_ERROR;
494 		drm_gem_object_unreference_unlocked(gobj);
495 		return r;
496 	}
497 	bo_va = radeon_vm_bo_find(&fpriv->vm, rbo);
498 	if (!bo_va) {
499 		args->operation = RADEON_VA_RESULT_ERROR;
500 		drm_gem_object_unreference_unlocked(gobj);
501 		return -ENOENT;
502 	}
503 
504 	switch (args->operation) {
505 	case RADEON_VA_MAP:
506 		if (bo_va->soffset) {
507 			args->operation = RADEON_VA_RESULT_VA_EXIST;
508 			args->offset = bo_va->soffset;
509 			goto out;
510 		}
511 		r = radeon_vm_bo_set_addr(rdev, bo_va, args->offset, args->flags);
512 		break;
513 	case RADEON_VA_UNMAP:
514 		r = radeon_vm_bo_set_addr(rdev, bo_va, 0, 0);
515 		break;
516 	default:
517 		break;
518 	}
519 	args->operation = RADEON_VA_RESULT_OK;
520 	if (r) {
521 		args->operation = RADEON_VA_RESULT_ERROR;
522 	}
523 out:
524 	radeon_bo_unreserve(rbo);
525 	drm_gem_object_unreference_unlocked(gobj);
526 	return r;
527 }
528 
529 int radeon_gem_op_ioctl(struct drm_device *dev, void *data,
530 			struct drm_file *filp)
531 {
532 	struct drm_radeon_gem_op *args = data;
533 	struct drm_gem_object *gobj;
534 	struct radeon_bo *robj;
535 	int r;
536 
537 	gobj = drm_gem_object_lookup(dev, filp, args->handle);
538 	if (gobj == NULL) {
539 		return -ENOENT;
540 	}
541 	robj = gem_to_radeon_bo(gobj);
542 	r = radeon_bo_reserve(robj, false);
543 	if (unlikely(r))
544 		goto out;
545 
546 	switch (args->op) {
547 	case RADEON_GEM_OP_GET_INITIAL_DOMAIN:
548 		args->value = robj->initial_domain;
549 		break;
550 	case RADEON_GEM_OP_SET_INITIAL_DOMAIN:
551 		robj->initial_domain = args->value & (RADEON_GEM_DOMAIN_VRAM |
552 						      RADEON_GEM_DOMAIN_GTT |
553 						      RADEON_GEM_DOMAIN_CPU);
554 		break;
555 	default:
556 		r = -EINVAL;
557 	}
558 
559 	radeon_bo_unreserve(robj);
560 out:
561 	drm_gem_object_unreference_unlocked(gobj);
562 	return r;
563 }
564 
565 int radeon_mode_dumb_create(struct drm_file *file_priv,
566 			    struct drm_device *dev,
567 			    struct drm_mode_create_dumb *args)
568 {
569 	struct radeon_device *rdev = dev->dev_private;
570 	struct drm_gem_object *gobj;
571 	uint32_t handle;
572 	int r;
573 
574 	args->pitch = radeon_align_pitch(rdev, args->width, args->bpp, 0) * ((args->bpp + 1) / 8);
575 	args->size = args->pitch * args->height;
576 #ifdef __NetBSD__		/* XXX ALIGN means something else.  */
577 	args->size = round_up(args->size, PAGE_SIZE);
578 #else
579 	args->size = ALIGN(args->size, PAGE_SIZE);
580 #endif
581 
582 	r = radeon_gem_object_create(rdev, args->size, 0,
583 				     RADEON_GEM_DOMAIN_VRAM,
584 				     false, ttm_bo_type_device,
585 				     &gobj);
586 	if (r)
587 		return -ENOMEM;
588 
589 	r = drm_gem_handle_create(file_priv, gobj, &handle);
590 	/* drop reference from allocate - handle holds it now */
591 	drm_gem_object_unreference_unlocked(gobj);
592 	if (r) {
593 		return r;
594 	}
595 	args->handle = handle;
596 	return 0;
597 }
598 
599 #if defined(CONFIG_DEBUG_FS)
600 static int radeon_debugfs_gem_info(struct seq_file *m, void *data)
601 {
602 	struct drm_info_node *node = (struct drm_info_node *)m->private;
603 	struct drm_device *dev = node->minor->dev;
604 	struct radeon_device *rdev = dev->dev_private;
605 	struct radeon_bo *rbo;
606 	unsigned i = 0;
607 
608 	mutex_lock(&rdev->gem.mutex);
609 	list_for_each_entry(rbo, &rdev->gem.objects, list) {
610 		unsigned domain;
611 		const char *placement;
612 
613 		domain = radeon_mem_type_to_domain(rbo->tbo.mem.mem_type);
614 		switch (domain) {
615 		case RADEON_GEM_DOMAIN_VRAM:
616 			placement = "VRAM";
617 			break;
618 		case RADEON_GEM_DOMAIN_GTT:
619 			placement = " GTT";
620 			break;
621 		case RADEON_GEM_DOMAIN_CPU:
622 		default:
623 			placement = " CPU";
624 			break;
625 		}
626 		seq_printf(m, "bo[0x%08x] %8ldkB %8ldMB %s pid %8ld\n",
627 			   i, radeon_bo_size(rbo) >> 10, radeon_bo_size(rbo) >> 20,
628 			   placement, (unsigned long)rbo->pid);
629 		i++;
630 	}
631 	mutex_unlock(&rdev->gem.mutex);
632 	return 0;
633 }
634 
635 static struct drm_info_list radeon_debugfs_gem_list[] = {
636 	{"radeon_gem_info", &radeon_debugfs_gem_info, 0, NULL},
637 };
638 #endif
639 
640 int radeon_gem_debugfs_init(struct radeon_device *rdev)
641 {
642 #if defined(CONFIG_DEBUG_FS)
643 	return radeon_debugfs_add_files(rdev, radeon_debugfs_gem_list, 1);
644 #endif
645 	return 0;
646 }
647