1 /* $OpenBSD: radeon_gem.c,v 1.4 2013/12/05 13:29:56 kettenis Exp $ */ 2 /* 3 * Copyright 2008 Advanced Micro Devices, Inc. 4 * Copyright 2008 Red Hat Inc. 5 * Copyright 2009 Jerome Glisse. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the "Software"), 9 * to deal in the Software without restriction, including without limitation 10 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 11 * and/or sell copies of the Software, and to permit persons to whom the 12 * Software is furnished to do so, subject to the following conditions: 13 * 14 * The above copyright notice and this permission notice shall be included in 15 * all copies or substantial portions of the Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 20 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 23 * OTHER DEALINGS IN THE SOFTWARE. 24 * 25 * Authors: Dave Airlie 26 * Alex Deucher 27 * Jerome Glisse 28 */ 29 #include <dev/pci/drm/drmP.h> 30 #include <dev/pci/drm/radeon_drm.h> 31 #include "radeon.h" 32 33 int radeon_gem_object_init(struct drm_gem_object *); 34 void radeon_gem_object_free(struct drm_gem_object *); 35 int radeon_gem_set_domain(struct drm_gem_object *, uint32_t, uint32_t); 36 int radeon_gem_object_open(struct drm_gem_object *, struct drm_file *); 37 void radeon_gem_object_close(struct drm_gem_object *, struct drm_file *); 38 39 int radeon_gem_object_init(struct drm_gem_object *obj) 40 { 41 BUG(); 42 43 return 0; 44 } 45 46 void radeon_gem_object_free(struct drm_gem_object *gobj) 47 { 48 struct radeon_bo *robj = gem_to_radeon_bo(gobj); 49 50 if (robj) { 51 #ifdef notyet 52 if (robj->gem_base.import_attach) 53 drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg); 54 #endif 55 radeon_bo_unref(&robj); 56 } 57 } 58 59 int radeon_gem_object_create(struct radeon_device *rdev, int size, 60 int alignment, int initial_domain, 61 bool discardable, bool kernel, 62 struct drm_gem_object **obj) 63 { 64 struct radeon_bo *robj; 65 unsigned long max_size; 66 int r; 67 68 *obj = NULL; 69 /* At least align on page size */ 70 if (alignment < PAGE_SIZE) { 71 alignment = PAGE_SIZE; 72 } 73 74 /* maximun bo size is the minimun btw visible vram and gtt size */ 75 max_size = min(rdev->mc.visible_vram_size, rdev->mc.gtt_size); 76 if (size > max_size) { 77 printf("%s:%d alloc size %dMb bigger than %ldMb limit\n", 78 __func__, __LINE__, size >> 20, max_size >> 20); 79 return -ENOMEM; 80 } 81 82 retry: 83 r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, NULL, &robj); 84 if (r) { 85 if (r != -ERESTARTSYS) { 86 if (initial_domain == RADEON_GEM_DOMAIN_VRAM) { 87 initial_domain |= RADEON_GEM_DOMAIN_GTT; 88 goto retry; 89 } 90 DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n", 91 size, initial_domain, alignment, r); 92 } 93 return r; 94 } 95 *obj = &robj->gem_base; 96 97 rw_enter_write(&rdev->gem.rwlock); 98 list_add_tail(&robj->list, &rdev->gem.objects); 99 rw_exit_write(&rdev->gem.rwlock); 100 101 return 0; 102 } 103 104 int radeon_gem_set_domain(struct drm_gem_object *gobj, 105 uint32_t rdomain, uint32_t wdomain) 106 { 107 struct radeon_bo *robj; 108 uint32_t domain; 109 int r; 110 111 /* FIXME: reeimplement */ 112 robj = gem_to_radeon_bo(gobj); 113 /* work out where to validate the buffer to */ 114 domain = wdomain; 115 if (!domain) { 116 domain = rdomain; 117 } 118 if (!domain) { 119 /* Do nothings */ 120 DRM_ERROR("Set domain without domain !\n"); 121 return 0; 122 } 123 if (domain == RADEON_GEM_DOMAIN_CPU) { 124 /* Asking for cpu access wait for object idle */ 125 r = radeon_bo_wait(robj, NULL, false); 126 if (r) { 127 DRM_ERROR("Failed to wait for object !\n"); 128 return r; 129 } 130 } 131 return 0; 132 } 133 134 int radeon_gem_init(struct radeon_device *rdev) 135 { 136 INIT_LIST_HEAD(&rdev->gem.objects); 137 return 0; 138 } 139 140 void radeon_gem_fini(struct radeon_device *rdev) 141 { 142 radeon_bo_force_delete(rdev); 143 } 144 145 /* 146 * Call from drm_gem_handle_create which appear in both new and open ioctl 147 * case. 148 */ 149 int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv) 150 { 151 struct radeon_bo *rbo = gem_to_radeon_bo(obj); 152 struct radeon_device *rdev = rbo->rdev; 153 struct radeon_fpriv *fpriv = file_priv->driver_priv; 154 struct radeon_vm *vm = &fpriv->vm; 155 struct radeon_bo_va *bo_va; 156 int r; 157 158 if (rdev->family < CHIP_CAYMAN) { 159 return 0; 160 } 161 162 r = radeon_bo_reserve(rbo, false); 163 if (r) { 164 return r; 165 } 166 167 bo_va = radeon_vm_bo_find(vm, rbo); 168 if (!bo_va) { 169 bo_va = radeon_vm_bo_add(rdev, vm, rbo); 170 } else { 171 ++bo_va->ref_count; 172 } 173 radeon_bo_unreserve(rbo); 174 175 return 0; 176 } 177 178 void radeon_gem_object_close(struct drm_gem_object *obj, 179 struct drm_file *file_priv) 180 { 181 struct radeon_bo *rbo = gem_to_radeon_bo(obj); 182 struct radeon_device *rdev = rbo->rdev; 183 struct radeon_fpriv *fpriv = file_priv->driver_priv; 184 struct radeon_vm *vm = &fpriv->vm; 185 struct radeon_bo_va *bo_va; 186 int r; 187 188 if (rdev->family < CHIP_CAYMAN) { 189 return; 190 } 191 192 r = radeon_bo_reserve(rbo, true); 193 if (r) { 194 DRM_ERROR("leaking bo va because " 195 "we fail to reserve bo (%d)\n", r); 196 return; 197 } 198 bo_va = radeon_vm_bo_find(vm, rbo); 199 if (bo_va) { 200 if (--bo_va->ref_count == 0) { 201 radeon_vm_bo_rmv(rdev, bo_va); 202 } 203 } 204 radeon_bo_unreserve(rbo); 205 } 206 207 static int radeon_gem_handle_lockup(struct radeon_device *rdev, int r) 208 { 209 if (r == -EDEADLK) { 210 r = radeon_gpu_reset(rdev); 211 if (!r) 212 r = -EAGAIN; 213 } 214 return r; 215 } 216 217 /* 218 * GEM ioctls. 219 */ 220 int radeon_gem_info_ioctl(struct drm_device *dev, void *data, 221 struct drm_file *filp) 222 { 223 struct radeon_device *rdev = dev->dev_private; 224 struct drm_radeon_gem_info *args = data; 225 struct ttm_mem_type_manager *man; 226 unsigned i; 227 228 man = &rdev->mman.bdev.man[TTM_PL_VRAM]; 229 230 args->vram_size = rdev->mc.real_vram_size; 231 args->vram_visible = (u64)man->size << PAGE_SHIFT; 232 if (rdev->stollen_vga_memory) 233 args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory); 234 args->vram_visible -= radeon_fbdev_total_size(rdev); 235 args->gart_size = rdev->mc.gtt_size - 4096 - RADEON_IB_POOL_SIZE*64*1024; 236 for(i = 0; i < RADEON_NUM_RINGS; ++i) 237 args->gart_size -= rdev->ring[i].ring_size; 238 return 0; 239 } 240 241 int radeon_gem_pread_ioctl(struct drm_device *dev, void *data, 242 struct drm_file *filp) 243 { 244 /* TODO: implement */ 245 DRM_ERROR("unimplemented %s\n", __func__); 246 return -ENOSYS; 247 } 248 249 int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data, 250 struct drm_file *filp) 251 { 252 /* TODO: implement */ 253 DRM_ERROR("unimplemented %s\n", __func__); 254 return -ENOSYS; 255 } 256 257 int radeon_gem_create_ioctl(struct drm_device *dev, void *data, 258 struct drm_file *filp) 259 { 260 struct radeon_device *rdev = dev->dev_private; 261 struct drm_radeon_gem_create *args = data; 262 struct drm_gem_object *gobj; 263 uint32_t handle; 264 int r; 265 266 rw_enter_read(&rdev->exclusive_lock); 267 /* create a gem object to contain this object in */ 268 args->size = roundup(args->size, PAGE_SIZE); 269 r = radeon_gem_object_create(rdev, args->size, args->alignment, 270 args->initial_domain, false, 271 false, &gobj); 272 if (r) { 273 rw_exit_read(&rdev->exclusive_lock); 274 r = radeon_gem_handle_lockup(rdev, r); 275 return r; 276 } 277 r = drm_gem_handle_create(filp, gobj, &handle); 278 /* drop reference from allocate - handle holds it now */ 279 drm_gem_object_unreference_unlocked(gobj); 280 if (r) { 281 rw_exit_read(&rdev->exclusive_lock); 282 r = radeon_gem_handle_lockup(rdev, r); 283 return r; 284 } 285 args->handle = handle; 286 rw_exit_read(&rdev->exclusive_lock); 287 return 0; 288 } 289 290 int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data, 291 struct drm_file *filp) 292 { 293 /* transition the BO to a domain - 294 * just validate the BO into a certain domain */ 295 struct radeon_device *rdev = dev->dev_private; 296 struct drm_radeon_gem_set_domain *args = data; 297 struct drm_gem_object *gobj; 298 struct radeon_bo *robj; 299 int r; 300 301 /* for now if someone requests domain CPU - 302 * just make sure the buffer is finished with */ 303 rw_enter_read(&rdev->exclusive_lock); 304 305 /* just do a BO wait for now */ 306 gobj = drm_gem_object_lookup(dev, filp, args->handle); 307 if (gobj == NULL) { 308 rw_exit_read(&rdev->exclusive_lock); 309 return -ENOENT; 310 } 311 robj = gem_to_radeon_bo(gobj); 312 313 r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain); 314 315 drm_gem_object_unreference_unlocked(gobj); 316 rw_exit_read(&rdev->exclusive_lock); 317 r = radeon_gem_handle_lockup(robj->rdev, r); 318 return r; 319 } 320 321 int radeon_mode_dumb_mmap(struct drm_file *filp, 322 struct drm_device *dev, 323 uint32_t handle, uint64_t *offset_p) 324 { 325 struct drm_gem_object *gobj; 326 struct radeon_bo *robj; 327 328 gobj = drm_gem_object_lookup(dev, filp, handle); 329 if (gobj == NULL) { 330 return -ENOENT; 331 } 332 robj = gem_to_radeon_bo(gobj); 333 *offset_p = radeon_bo_mmap_offset(robj); 334 drm_gem_object_unreference_unlocked(gobj); 335 return 0; 336 } 337 338 int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data, 339 struct drm_file *filp) 340 { 341 struct drm_radeon_gem_mmap *args = data; 342 343 return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr); 344 } 345 346 int radeon_gem_busy_ioctl(struct drm_device *dev, void *data, 347 struct drm_file *filp) 348 { 349 struct radeon_device *rdev = dev->dev_private; 350 struct drm_radeon_gem_busy *args = data; 351 struct drm_gem_object *gobj; 352 struct radeon_bo *robj; 353 int r; 354 uint32_t cur_placement = 0; 355 356 gobj = drm_gem_object_lookup(dev, filp, args->handle); 357 if (gobj == NULL) { 358 return -ENOENT; 359 } 360 robj = gem_to_radeon_bo(gobj); 361 r = radeon_bo_wait(robj, &cur_placement, true); 362 switch (cur_placement) { 363 case TTM_PL_VRAM: 364 args->domain = RADEON_GEM_DOMAIN_VRAM; 365 break; 366 case TTM_PL_TT: 367 args->domain = RADEON_GEM_DOMAIN_GTT; 368 break; 369 case TTM_PL_SYSTEM: 370 args->domain = RADEON_GEM_DOMAIN_CPU; 371 default: 372 break; 373 } 374 drm_gem_object_unreference_unlocked(gobj); 375 r = radeon_gem_handle_lockup(rdev, r); 376 return r; 377 } 378 379 int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data, 380 struct drm_file *filp) 381 { 382 struct radeon_device *rdev = dev->dev_private; 383 struct drm_radeon_gem_wait_idle *args = data; 384 struct drm_gem_object *gobj; 385 struct radeon_bo *robj; 386 int r; 387 388 gobj = drm_gem_object_lookup(dev, filp, args->handle); 389 if (gobj == NULL) { 390 return -ENOENT; 391 } 392 robj = gem_to_radeon_bo(gobj); 393 r = radeon_bo_wait(robj, NULL, false); 394 /* callback hw specific functions if any */ 395 if (rdev->asic->ioctl_wait_idle) 396 robj->rdev->asic->ioctl_wait_idle(rdev, robj); 397 drm_gem_object_unreference_unlocked(gobj); 398 r = radeon_gem_handle_lockup(rdev, r); 399 return r; 400 } 401 402 int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data, 403 struct drm_file *filp) 404 { 405 struct drm_radeon_gem_set_tiling *args = data; 406 struct drm_gem_object *gobj; 407 struct radeon_bo *robj; 408 int r = 0; 409 410 DRM_DEBUG("%d \n", args->handle); 411 gobj = drm_gem_object_lookup(dev, filp, args->handle); 412 if (gobj == NULL) 413 return -ENOENT; 414 robj = gem_to_radeon_bo(gobj); 415 r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch); 416 drm_gem_object_unreference_unlocked(gobj); 417 return r; 418 } 419 420 int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data, 421 struct drm_file *filp) 422 { 423 struct drm_radeon_gem_get_tiling *args = data; 424 struct drm_gem_object *gobj; 425 struct radeon_bo *rbo; 426 int r = 0; 427 428 DRM_DEBUG("\n"); 429 gobj = drm_gem_object_lookup(dev, filp, args->handle); 430 if (gobj == NULL) 431 return -ENOENT; 432 rbo = gem_to_radeon_bo(gobj); 433 r = radeon_bo_reserve(rbo, false); 434 if (unlikely(r != 0)) 435 goto out; 436 radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch); 437 radeon_bo_unreserve(rbo); 438 out: 439 drm_gem_object_unreference_unlocked(gobj); 440 return r; 441 } 442 443 int radeon_gem_va_ioctl(struct drm_device *dev, void *data, 444 struct drm_file *filp) 445 { 446 struct drm_radeon_gem_va *args = data; 447 struct drm_gem_object *gobj; 448 struct radeon_device *rdev = dev->dev_private; 449 struct radeon_fpriv *fpriv = filp->driver_priv; 450 struct radeon_bo *rbo; 451 struct radeon_bo_va *bo_va; 452 u32 invalid_flags; 453 int r = 0; 454 455 if (!rdev->vm_manager.enabled) { 456 args->operation = RADEON_VA_RESULT_ERROR; 457 return -ENOTTY; 458 } 459 460 /* !! DONT REMOVE !! 461 * We don't support vm_id yet, to be sure we don't have have broken 462 * userspace, reject anyone trying to use non 0 value thus moving 463 * forward we can use those fields without breaking existant userspace 464 */ 465 if (args->vm_id) { 466 args->operation = RADEON_VA_RESULT_ERROR; 467 return -EINVAL; 468 } 469 470 if (args->offset < RADEON_VA_RESERVED_SIZE) { 471 DRM_ERROR("offset 0x%lX is in reserved area 0x%X\n", 472 (unsigned long)args->offset, 473 RADEON_VA_RESERVED_SIZE); 474 args->operation = RADEON_VA_RESULT_ERROR; 475 return -EINVAL; 476 } 477 478 /* don't remove, we need to enforce userspace to set the snooped flag 479 * otherwise we will endup with broken userspace and we won't be able 480 * to enable this feature without adding new interface 481 */ 482 invalid_flags = RADEON_VM_PAGE_VALID | RADEON_VM_PAGE_SYSTEM; 483 if ((args->flags & invalid_flags)) { 484 DRM_ERROR("invalid flags 0x%08X vs 0x%08X\n", 485 args->flags, invalid_flags); 486 args->operation = RADEON_VA_RESULT_ERROR; 487 return -EINVAL; 488 } 489 if (!(args->flags & RADEON_VM_PAGE_SNOOPED)) { 490 DRM_ERROR("only supported snooped mapping for now\n"); 491 args->operation = RADEON_VA_RESULT_ERROR; 492 return -EINVAL; 493 } 494 495 switch (args->operation) { 496 case RADEON_VA_MAP: 497 case RADEON_VA_UNMAP: 498 break; 499 default: 500 DRM_ERROR("unsupported operation %d\n", 501 args->operation); 502 args->operation = RADEON_VA_RESULT_ERROR; 503 return -EINVAL; 504 } 505 506 gobj = drm_gem_object_lookup(dev, filp, args->handle); 507 if (gobj == NULL) { 508 args->operation = RADEON_VA_RESULT_ERROR; 509 return -ENOENT; 510 } 511 rbo = gem_to_radeon_bo(gobj); 512 r = radeon_bo_reserve(rbo, false); 513 if (r) { 514 args->operation = RADEON_VA_RESULT_ERROR; 515 drm_gem_object_unreference_unlocked(gobj); 516 return r; 517 } 518 bo_va = radeon_vm_bo_find(&fpriv->vm, rbo); 519 if (!bo_va) { 520 args->operation = RADEON_VA_RESULT_ERROR; 521 drm_gem_object_unreference_unlocked(gobj); 522 return -ENOENT; 523 } 524 525 switch (args->operation) { 526 case RADEON_VA_MAP: 527 if (bo_va->soffset) { 528 args->operation = RADEON_VA_RESULT_VA_EXIST; 529 args->offset = bo_va->soffset; 530 goto out; 531 } 532 r = radeon_vm_bo_set_addr(rdev, bo_va, args->offset, args->flags); 533 break; 534 case RADEON_VA_UNMAP: 535 r = radeon_vm_bo_set_addr(rdev, bo_va, 0, 0); 536 break; 537 default: 538 break; 539 } 540 args->operation = RADEON_VA_RESULT_OK; 541 if (r) { 542 args->operation = RADEON_VA_RESULT_ERROR; 543 } 544 out: 545 radeon_bo_unreserve(rbo); 546 drm_gem_object_unreference_unlocked(gobj); 547 return r; 548 } 549 550 int radeon_mode_dumb_create(struct drm_file *file_priv, 551 struct drm_device *dev, 552 struct drm_mode_create_dumb *args) 553 { 554 struct radeon_device *rdev = dev->dev_private; 555 struct drm_gem_object *gobj; 556 uint32_t handle; 557 int r; 558 559 args->pitch = radeon_align_pitch(rdev, args->width, args->bpp, 0) * ((args->bpp + 1) / 8); 560 args->size = args->pitch * args->height; 561 args->size = PAGE_ALIGN(args->size); 562 563 r = radeon_gem_object_create(rdev, args->size, 0, 564 RADEON_GEM_DOMAIN_VRAM, 565 false, ttm_bo_type_device, 566 &gobj); 567 if (r) 568 return -ENOMEM; 569 570 r = drm_gem_handle_create(file_priv, gobj, &handle); 571 /* drop reference from allocate - handle holds it now */ 572 drm_gem_object_unreference_unlocked(gobj); 573 if (r) { 574 return r; 575 } 576 args->handle = handle; 577 return 0; 578 } 579 580 int radeon_mode_dumb_destroy(struct drm_file *file_priv, 581 struct drm_device *dev, 582 uint32_t handle) 583 { 584 return drm_gem_handle_delete(file_priv, handle); 585 } 586