1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 * 28 * $FreeBSD: head/sys/dev/drm2/radeon/radeon_gem.c 254885 2013-08-25 19:37:15Z dumbbell $ 29 */ 30 31 #include <drm/drmP.h> 32 #include <uapi_drm/radeon_drm.h> 33 #include "radeon.h" 34 #include "radeon_gem.h" 35 36 void radeon_gem_object_free(struct drm_gem_object *gobj) 37 { 38 struct radeon_bo *robj = gem_to_radeon_bo(gobj); 39 40 if (robj) { 41 #ifdef DUMBBELL_WIP 42 if (robj->gem_base.import_attach) 43 drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg); 44 #endif /* DUMBBELL_WIP */ 45 radeon_bo_unref(&robj); 46 } 47 } 48 49 int radeon_gem_object_create(struct radeon_device *rdev, int size, 50 int alignment, int initial_domain, 51 bool discardable, bool kernel, 52 struct drm_gem_object **obj) 53 { 54 struct radeon_bo *robj; 55 unsigned long max_size; 56 int r; 57 58 *obj = NULL; 59 /* At least align on page size */ 60 if (alignment < PAGE_SIZE) { 61 alignment = PAGE_SIZE; 62 } 63 64 /* maximun bo size is the minimun btw visible vram and gtt size */ 65 max_size = min(rdev->mc.visible_vram_size, rdev->mc.gtt_size); 66 if (size > max_size) { 67 DRM_ERROR("%s:%d alloc size %dMb bigger than %ldMb limit\n", 68 __func__, __LINE__, size >> 20, max_size >> 20); 69 return -ENOMEM; 70 } 71 72 retry: 73 r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, NULL, &robj); 74 if (r) { 75 if (r != -ERESTARTSYS) { 76 if (initial_domain == RADEON_GEM_DOMAIN_VRAM) { 77 initial_domain |= RADEON_GEM_DOMAIN_GTT; 78 goto retry; 79 } 80 DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n", 81 size, initial_domain, alignment, r); 82 } 83 return r; 84 } 85 *obj = &robj->gem_base; 86 87 spin_lock(&rdev->gem.mutex); 88 list_add_tail(&robj->list, &rdev->gem.objects); 89 spin_unlock(&rdev->gem.mutex); 90 91 return 0; 92 } 93 94 static int radeon_gem_set_domain(struct drm_gem_object *gobj, 95 uint32_t rdomain, uint32_t wdomain) 96 { 97 struct radeon_bo *robj; 98 uint32_t domain; 99 int r; 100 101 /* FIXME: reeimplement */ 102 robj = gem_to_radeon_bo(gobj); 103 /* work out where to validate the buffer to */ 104 domain = wdomain; 105 if (!domain) { 106 domain = rdomain; 107 } 108 if (!domain) { 109 /* Do nothings */ 110 DRM_ERROR("Set domain without domain !\n"); 111 return 0; 112 } 113 if (domain == RADEON_GEM_DOMAIN_CPU) { 114 /* Asking for cpu access wait for object idle */ 115 r = radeon_bo_wait(robj, NULL, false); 116 if (r) { 117 DRM_ERROR("Failed to wait for object !\n"); 118 return r; 119 } 120 } 121 return 0; 122 } 123 124 int radeon_gem_init(struct radeon_device *rdev) 125 { 126 INIT_LIST_HEAD(&rdev->gem.objects); 127 return 0; 128 } 129 130 void radeon_gem_fini(struct radeon_device *rdev) 131 { 132 radeon_bo_force_delete(rdev); 133 } 134 135 /* 136 * Call from drm_gem_handle_create which appear in both new and open ioctl 137 * case. 138 */ 139 int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv) 140 { 141 struct radeon_bo *rbo = gem_to_radeon_bo(obj); 142 struct radeon_device *rdev = rbo->rdev; 143 struct radeon_fpriv *fpriv = file_priv->driver_priv; 144 struct radeon_vm *vm = &fpriv->vm; 145 struct radeon_bo_va *bo_va; 146 int r; 147 148 if (rdev->family < CHIP_CAYMAN) { 149 return 0; 150 } 151 152 r = radeon_bo_reserve(rbo, false); 153 if (r) { 154 return r; 155 } 156 157 bo_va = radeon_vm_bo_find(vm, rbo); 158 if (!bo_va) { 159 bo_va = radeon_vm_bo_add(rdev, vm, rbo); 160 } else { 161 ++bo_va->ref_count; 162 } 163 radeon_bo_unreserve(rbo); 164 165 return 0; 166 } 167 168 void radeon_gem_object_close(struct drm_gem_object *obj, 169 struct drm_file *file_priv) 170 { 171 struct radeon_bo *rbo = gem_to_radeon_bo(obj); 172 struct radeon_device *rdev = rbo->rdev; 173 struct radeon_fpriv *fpriv = file_priv->driver_priv; 174 struct radeon_vm *vm = &fpriv->vm; 175 struct radeon_bo_va *bo_va; 176 int r; 177 178 if (rdev->family < CHIP_CAYMAN) { 179 return; 180 } 181 182 r = radeon_bo_reserve(rbo, true); 183 if (r) { 184 dev_err(rdev->dev, "leaking bo va because " 185 "we fail to reserve bo (%d)\n", r); 186 return; 187 } 188 bo_va = radeon_vm_bo_find(vm, rbo); 189 if (bo_va) { 190 if (--bo_va->ref_count == 0) { 191 radeon_vm_bo_rmv(rdev, bo_va); 192 } 193 } 194 radeon_bo_unreserve(rbo); 195 } 196 197 static int radeon_gem_handle_lockup(struct radeon_device *rdev, int r) 198 { 199 if (r == -EDEADLK) { 200 r = radeon_gpu_reset(rdev); 201 if (!r) 202 r = -EAGAIN; 203 } 204 return r; 205 } 206 207 /* 208 * GEM ioctls. 209 */ 210 int radeon_gem_info_ioctl(struct drm_device *dev, void *data, 211 struct drm_file *filp) 212 { 213 struct radeon_device *rdev = dev->dev_private; 214 struct drm_radeon_gem_info *args = data; 215 struct ttm_mem_type_manager *man; 216 unsigned i; 217 218 man = &rdev->mman.bdev.man[TTM_PL_VRAM]; 219 220 args->vram_size = rdev->mc.real_vram_size; 221 args->vram_visible = (u64)man->size << PAGE_SHIFT; 222 if (rdev->stollen_vga_memory) 223 args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory); 224 args->vram_visible -= radeon_fbdev_total_size(rdev); 225 args->gart_size = rdev->mc.gtt_size - 4096 - RADEON_IB_POOL_SIZE*64*1024; 226 for(i = 0; i < RADEON_NUM_RINGS; ++i) 227 args->gart_size -= rdev->ring[i].ring_size; 228 return 0; 229 } 230 231 int radeon_gem_pread_ioctl(struct drm_device *dev, void *data, 232 struct drm_file *filp) 233 { 234 /* TODO: implement */ 235 DRM_ERROR("unimplemented %s\n", __func__); 236 return -ENOSYS; 237 } 238 239 int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data, 240 struct drm_file *filp) 241 { 242 /* TODO: implement */ 243 DRM_ERROR("unimplemented %s\n", __func__); 244 return -ENOSYS; 245 } 246 247 int radeon_gem_create_ioctl(struct drm_device *dev, void *data, 248 struct drm_file *filp) 249 { 250 struct radeon_device *rdev = dev->dev_private; 251 struct drm_radeon_gem_create *args = data; 252 struct drm_gem_object *gobj; 253 uint32_t handle; 254 int r; 255 256 lockmgr(&rdev->exclusive_lock, LK_SHARED); 257 /* create a gem object to contain this object in */ 258 args->size = roundup(args->size, PAGE_SIZE); 259 r = radeon_gem_object_create(rdev, args->size, args->alignment, 260 args->initial_domain, false, 261 false, &gobj); 262 if (r) { 263 if (r == -ERESTARTSYS) 264 r = -EINTR; 265 lockmgr(&rdev->exclusive_lock, LK_RELEASE); 266 r = radeon_gem_handle_lockup(rdev, r); 267 return r; 268 } 269 handle = 0; 270 r = drm_gem_handle_create(filp, gobj, &handle); 271 /* drop reference from allocate - handle holds it now */ 272 drm_gem_object_unreference_unlocked(gobj); 273 if (r) { 274 lockmgr(&rdev->exclusive_lock, LK_RELEASE); 275 r = radeon_gem_handle_lockup(rdev, r); 276 return r; 277 } 278 args->handle = handle; 279 lockmgr(&rdev->exclusive_lock, LK_RELEASE); 280 return 0; 281 } 282 283 int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data, 284 struct drm_file *filp) 285 { 286 /* transition the BO to a domain - 287 * just validate the BO into a certain domain */ 288 struct radeon_device *rdev = dev->dev_private; 289 struct drm_radeon_gem_set_domain *args = data; 290 struct drm_gem_object *gobj; 291 struct radeon_bo *robj; 292 int r; 293 294 /* for now if someone requests domain CPU - 295 * just make sure the buffer is finished with */ 296 lockmgr(&rdev->exclusive_lock, LK_SHARED); 297 298 /* just do a BO wait for now */ 299 gobj = drm_gem_object_lookup(dev, filp, args->handle); 300 if (gobj == NULL) { 301 lockmgr(&rdev->exclusive_lock, LK_RELEASE); 302 return -ENOENT; 303 } 304 robj = gem_to_radeon_bo(gobj); 305 306 r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain); 307 308 drm_gem_object_unreference_unlocked(gobj); 309 lockmgr(&rdev->exclusive_lock, LK_RELEASE); 310 r = radeon_gem_handle_lockup(robj->rdev, r); 311 return r; 312 } 313 314 int radeon_mode_dumb_mmap(struct drm_file *filp, 315 struct drm_device *dev, 316 uint32_t handle, uint64_t *offset_p) 317 { 318 struct drm_gem_object *gobj; 319 struct radeon_bo *robj; 320 321 gobj = drm_gem_object_lookup(dev, filp, handle); 322 if (gobj == NULL) { 323 return -ENOENT; 324 } 325 robj = gem_to_radeon_bo(gobj); 326 *offset_p = radeon_bo_mmap_offset(robj); 327 drm_gem_object_unreference_unlocked(gobj); 328 return 0; 329 } 330 331 int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data, 332 struct drm_file *filp) 333 { 334 struct drm_radeon_gem_mmap *args = data; 335 336 return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr); 337 } 338 339 int radeon_gem_busy_ioctl(struct drm_device *dev, void *data, 340 struct drm_file *filp) 341 { 342 struct radeon_device *rdev = dev->dev_private; 343 struct drm_radeon_gem_busy *args = data; 344 struct drm_gem_object *gobj; 345 struct radeon_bo *robj; 346 int r; 347 uint32_t cur_placement = 0; 348 349 gobj = drm_gem_object_lookup(dev, filp, args->handle); 350 if (gobj == NULL) { 351 return -ENOENT; 352 } 353 robj = gem_to_radeon_bo(gobj); 354 r = radeon_bo_wait(robj, &cur_placement, true); 355 switch (cur_placement) { 356 case TTM_PL_VRAM: 357 args->domain = RADEON_GEM_DOMAIN_VRAM; 358 break; 359 case TTM_PL_TT: 360 args->domain = RADEON_GEM_DOMAIN_GTT; 361 break; 362 case TTM_PL_SYSTEM: 363 args->domain = RADEON_GEM_DOMAIN_CPU; 364 default: 365 break; 366 } 367 drm_gem_object_unreference_unlocked(gobj); 368 r = radeon_gem_handle_lockup(rdev, r); 369 return r; 370 } 371 372 int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data, 373 struct drm_file *filp) 374 { 375 struct radeon_device *rdev = dev->dev_private; 376 struct drm_radeon_gem_wait_idle *args = data; 377 struct drm_gem_object *gobj; 378 struct radeon_bo *robj; 379 int r; 380 381 gobj = drm_gem_object_lookup(dev, filp, args->handle); 382 if (gobj == NULL) { 383 return -ENOENT; 384 } 385 robj = gem_to_radeon_bo(gobj); 386 r = radeon_bo_wait(robj, NULL, false); 387 /* callback hw specific functions if any */ 388 if (rdev->asic->ioctl_wait_idle) 389 robj->rdev->asic->ioctl_wait_idle(rdev, robj); 390 drm_gem_object_unreference_unlocked(gobj); 391 if (r == -ERESTARTSYS) 392 r = -EINTR; 393 r = radeon_gem_handle_lockup(rdev, r); 394 return r; 395 } 396 397 int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data, 398 struct drm_file *filp) 399 { 400 struct drm_radeon_gem_set_tiling *args = data; 401 struct drm_gem_object *gobj; 402 struct radeon_bo *robj; 403 int r = 0; 404 405 DRM_DEBUG("%d \n", args->handle); 406 gobj = drm_gem_object_lookup(dev, filp, args->handle); 407 if (gobj == NULL) 408 return -ENOENT; 409 robj = gem_to_radeon_bo(gobj); 410 r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch); 411 drm_gem_object_unreference_unlocked(gobj); 412 return r; 413 } 414 415 int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data, 416 struct drm_file *filp) 417 { 418 struct drm_radeon_gem_get_tiling *args = data; 419 struct drm_gem_object *gobj; 420 struct radeon_bo *rbo; 421 int r = 0; 422 423 DRM_DEBUG("\n"); 424 gobj = drm_gem_object_lookup(dev, filp, args->handle); 425 if (gobj == NULL) 426 return -ENOENT; 427 rbo = gem_to_radeon_bo(gobj); 428 r = radeon_bo_reserve(rbo, false); 429 if (unlikely(r != 0)) 430 goto out; 431 radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch); 432 radeon_bo_unreserve(rbo); 433 out: 434 drm_gem_object_unreference_unlocked(gobj); 435 return r; 436 } 437 438 int radeon_gem_va_ioctl(struct drm_device *dev, void *data, 439 struct drm_file *filp) 440 { 441 struct drm_radeon_gem_va *args = data; 442 struct drm_gem_object *gobj; 443 struct radeon_device *rdev = dev->dev_private; 444 struct radeon_fpriv *fpriv = filp->driver_priv; 445 struct radeon_bo *rbo; 446 struct radeon_bo_va *bo_va; 447 u32 invalid_flags; 448 int r = 0; 449 450 if (!rdev->vm_manager.enabled) { 451 args->operation = RADEON_VA_RESULT_ERROR; 452 return -ENOTTY; 453 } 454 455 /* !! DONT REMOVE !! 456 * We don't support vm_id yet, to be sure we don't have have broken 457 * userspace, reject anyone trying to use non 0 value thus moving 458 * forward we can use those fields without breaking existant userspace 459 */ 460 if (args->vm_id) { 461 args->operation = RADEON_VA_RESULT_ERROR; 462 return -EINVAL; 463 } 464 465 if (args->offset < RADEON_VA_RESERVED_SIZE) { 466 dev_err(dev->dev, 467 "offset 0x%lX is in reserved area 0x%X\n", 468 (unsigned long)args->offset, 469 RADEON_VA_RESERVED_SIZE); 470 args->operation = RADEON_VA_RESULT_ERROR; 471 return -EINVAL; 472 } 473 474 /* don't remove, we need to enforce userspace to set the snooped flag 475 * otherwise we will endup with broken userspace and we won't be able 476 * to enable this feature without adding new interface 477 */ 478 invalid_flags = RADEON_VM_PAGE_VALID | RADEON_VM_PAGE_SYSTEM; 479 if ((args->flags & invalid_flags)) { 480 dev_err(dev->dev, "invalid flags 0x%08X vs 0x%08X\n", 481 args->flags, invalid_flags); 482 args->operation = RADEON_VA_RESULT_ERROR; 483 return -EINVAL; 484 } 485 if (!(args->flags & RADEON_VM_PAGE_SNOOPED)) { 486 dev_err(dev->dev, "only supported snooped mapping for now\n"); 487 args->operation = RADEON_VA_RESULT_ERROR; 488 return -EINVAL; 489 } 490 491 switch (args->operation) { 492 case RADEON_VA_MAP: 493 case RADEON_VA_UNMAP: 494 break; 495 default: 496 dev_err(dev->dev, "unsupported operation %d\n", 497 args->operation); 498 args->operation = RADEON_VA_RESULT_ERROR; 499 return -EINVAL; 500 } 501 502 gobj = drm_gem_object_lookup(dev, filp, args->handle); 503 if (gobj == NULL) { 504 args->operation = RADEON_VA_RESULT_ERROR; 505 return -ENOENT; 506 } 507 rbo = gem_to_radeon_bo(gobj); 508 r = radeon_bo_reserve(rbo, false); 509 if (r) { 510 args->operation = RADEON_VA_RESULT_ERROR; 511 drm_gem_object_unreference_unlocked(gobj); 512 return r; 513 } 514 bo_va = radeon_vm_bo_find(&fpriv->vm, rbo); 515 if (!bo_va) { 516 args->operation = RADEON_VA_RESULT_ERROR; 517 drm_gem_object_unreference_unlocked(gobj); 518 return -ENOENT; 519 } 520 521 switch (args->operation) { 522 case RADEON_VA_MAP: 523 if (bo_va->soffset) { 524 args->operation = RADEON_VA_RESULT_VA_EXIST; 525 args->offset = bo_va->soffset; 526 goto out; 527 } 528 r = radeon_vm_bo_set_addr(rdev, bo_va, args->offset, args->flags); 529 break; 530 case RADEON_VA_UNMAP: 531 r = radeon_vm_bo_set_addr(rdev, bo_va, 0, 0); 532 break; 533 default: 534 break; 535 } 536 args->operation = RADEON_VA_RESULT_OK; 537 if (r) { 538 args->operation = RADEON_VA_RESULT_ERROR; 539 } 540 out: 541 radeon_bo_unreserve(rbo); 542 drm_gem_object_unreference_unlocked(gobj); 543 return r; 544 } 545 546 int radeon_mode_dumb_create(struct drm_file *file_priv, 547 struct drm_device *dev, 548 struct drm_mode_create_dumb *args) 549 { 550 struct radeon_device *rdev = dev->dev_private; 551 struct drm_gem_object *gobj; 552 uint32_t handle; 553 int r; 554 555 args->pitch = radeon_align_pitch(rdev, args->width, args->bpp, 0) * ((args->bpp + 1) / 8); 556 args->size = args->pitch * args->height; 557 args->size = roundup2(args->size, PAGE_SIZE); 558 559 r = radeon_gem_object_create(rdev, args->size, 0, 560 RADEON_GEM_DOMAIN_VRAM, 561 false, ttm_bo_type_device, 562 &gobj); 563 if (r) 564 return -ENOMEM; 565 566 r = drm_gem_handle_create(file_priv, gobj, &handle); 567 /* drop reference from allocate - handle holds it now */ 568 drm_gem_object_unreference_unlocked(gobj); 569 if (r) { 570 return r; 571 } 572 args->handle = handle; 573 return 0; 574 } 575 576 int radeon_mode_dumb_destroy(struct drm_file *file_priv, 577 struct drm_device *dev, 578 uint32_t handle) 579 { 580 return drm_gem_handle_delete(file_priv, handle); 581 } 582