1 /* $OpenBSD: radeon_gem.c,v 1.8 2015/11/22 15:35:49 kettenis Exp $ */ 2 /* 3 * Copyright 2008 Advanced Micro Devices, Inc. 4 * Copyright 2008 Red Hat Inc. 5 * Copyright 2009 Jerome Glisse. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the "Software"), 9 * to deal in the Software without restriction, including without limitation 10 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 11 * and/or sell copies of the Software, and to permit persons to whom the 12 * Software is furnished to do so, subject to the following conditions: 13 * 14 * The above copyright notice and this permission notice shall be included in 15 * all copies or substantial portions of the Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 20 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 23 * OTHER DEALINGS IN THE SOFTWARE. 24 * 25 * Authors: Dave Airlie 26 * Alex Deucher 27 * Jerome Glisse 28 */ 29 #include <dev/pci/drm/drmP.h> 30 #include <dev/pci/drm/radeon_drm.h> 31 #include "radeon.h" 32 33 void radeon_gem_object_free(struct drm_gem_object *gobj) 34 { 35 struct radeon_bo *robj = gem_to_radeon_bo(gobj); 36 37 if (robj) { 38 #ifdef notyet 39 if (robj->gem_base.import_attach) 40 drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg); 41 #endif 42 radeon_bo_unref(&robj); 43 } 44 } 45 46 int radeon_gem_object_create(struct radeon_device *rdev, int size, 47 int alignment, int initial_domain, 48 bool discardable, bool kernel, 49 struct drm_gem_object **obj) 50 { 51 struct radeon_bo *robj; 52 unsigned long max_size; 53 int r; 54 55 *obj = NULL; 56 /* At least align on page size */ 57 if (alignment < PAGE_SIZE) { 58 alignment = PAGE_SIZE; 59 } 60 61 /* maximun bo size is the minimun btw visible vram and gtt size */ 62 max_size = min(rdev->mc.visible_vram_size, rdev->mc.gtt_size); 63 if (size > max_size) { 64 printk(KERN_WARNING "%s:%d alloc size %dMb bigger than %ldMb limit\n", 65 __func__, __LINE__, size >> 20, max_size >> 20); 66 return -ENOMEM; 67 } 68 69 retry: 70 r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, NULL, &robj); 71 if (r) { 72 if (r != -ERESTARTSYS) { 73 if (initial_domain == RADEON_GEM_DOMAIN_VRAM) { 74 initial_domain |= RADEON_GEM_DOMAIN_GTT; 75 goto retry; 76 } 77 DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n", 78 size, initial_domain, alignment, r); 79 } 80 return r; 81 } 82 *obj = &robj->gem_base; 83 84 mutex_lock(&rdev->gem.mutex); 85 list_add_tail(&robj->list, &rdev->gem.objects); 86 mutex_unlock(&rdev->gem.mutex); 87 88 return 0; 89 } 90 91 int radeon_gem_set_domain(struct drm_gem_object *gobj, 92 uint32_t rdomain, uint32_t wdomain) 93 { 94 struct radeon_bo *robj; 95 uint32_t domain; 96 int r; 97 98 /* FIXME: reeimplement */ 99 robj = gem_to_radeon_bo(gobj); 100 /* work out where to validate the buffer to */ 101 domain = wdomain; 102 if (!domain) { 103 domain = rdomain; 104 } 105 if (!domain) { 106 /* Do nothings */ 107 printk(KERN_WARNING "Set domain without domain !\n"); 108 return 0; 109 } 110 if (domain == RADEON_GEM_DOMAIN_CPU) { 111 /* Asking for cpu access wait for object idle */ 112 r = radeon_bo_wait(robj, NULL, false); 113 if (r) { 114 printk(KERN_ERR "Failed to wait for object !\n"); 115 return r; 116 } 117 } 118 return 0; 119 } 120 121 int radeon_gem_init(struct radeon_device *rdev) 122 { 123 INIT_LIST_HEAD(&rdev->gem.objects); 124 return 0; 125 } 126 127 void radeon_gem_fini(struct radeon_device *rdev) 128 { 129 radeon_bo_force_delete(rdev); 130 } 131 132 /* 133 * Call from drm_gem_handle_create which appear in both new and open ioctl 134 * case. 135 */ 136 int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv) 137 { 138 struct radeon_bo *rbo = gem_to_radeon_bo(obj); 139 struct radeon_device *rdev = rbo->rdev; 140 struct radeon_fpriv *fpriv = file_priv->driver_priv; 141 struct radeon_vm *vm = &fpriv->vm; 142 struct radeon_bo_va *bo_va; 143 int r; 144 145 if (rdev->family < CHIP_CAYMAN) { 146 return 0; 147 } 148 149 r = radeon_bo_reserve(rbo, false); 150 if (r) { 151 return r; 152 } 153 154 bo_va = radeon_vm_bo_find(vm, rbo); 155 if (!bo_va) { 156 bo_va = radeon_vm_bo_add(rdev, vm, rbo); 157 } else { 158 ++bo_va->ref_count; 159 } 160 radeon_bo_unreserve(rbo); 161 162 return 0; 163 } 164 165 void radeon_gem_object_close(struct drm_gem_object *obj, 166 struct drm_file *file_priv) 167 { 168 struct radeon_bo *rbo = gem_to_radeon_bo(obj); 169 struct radeon_device *rdev = rbo->rdev; 170 struct radeon_fpriv *fpriv = file_priv->driver_priv; 171 struct radeon_vm *vm = &fpriv->vm; 172 struct radeon_bo_va *bo_va; 173 int r; 174 175 if (rdev->family < CHIP_CAYMAN) { 176 return; 177 } 178 179 r = radeon_bo_reserve(rbo, true); 180 if (r) { 181 dev_err(rdev->dev, "leaking bo va because " 182 "we fail to reserve bo (%d)\n", r); 183 return; 184 } 185 bo_va = radeon_vm_bo_find(vm, rbo); 186 if (bo_va) { 187 if (--bo_va->ref_count == 0) { 188 radeon_vm_bo_rmv(rdev, bo_va); 189 } 190 } 191 radeon_bo_unreserve(rbo); 192 } 193 194 static int radeon_gem_handle_lockup(struct radeon_device *rdev, int r) 195 { 196 if (r == -EDEADLK) { 197 r = radeon_gpu_reset(rdev); 198 if (!r) 199 r = -EAGAIN; 200 } 201 return r; 202 } 203 204 /* 205 * GEM ioctls. 206 */ 207 int radeon_gem_info_ioctl(struct drm_device *dev, void *data, 208 struct drm_file *filp) 209 { 210 struct radeon_device *rdev = dev->dev_private; 211 struct drm_radeon_gem_info *args = data; 212 struct ttm_mem_type_manager *man; 213 unsigned i; 214 215 man = &rdev->mman.bdev.man[TTM_PL_VRAM]; 216 217 args->vram_size = rdev->mc.real_vram_size; 218 args->vram_visible = (u64)man->size << PAGE_SHIFT; 219 if (rdev->stollen_vga_memory) 220 args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory); 221 args->vram_visible -= radeon_fbdev_total_size(rdev); 222 args->gart_size = rdev->mc.gtt_size - 4096 - RADEON_IB_POOL_SIZE*64*1024; 223 for(i = 0; i < RADEON_NUM_RINGS; ++i) 224 args->gart_size -= rdev->ring[i].ring_size; 225 return 0; 226 } 227 228 int radeon_gem_pread_ioctl(struct drm_device *dev, void *data, 229 struct drm_file *filp) 230 { 231 /* TODO: implement */ 232 DRM_ERROR("unimplemented %s\n", __func__); 233 return -ENOSYS; 234 } 235 236 int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data, 237 struct drm_file *filp) 238 { 239 /* TODO: implement */ 240 DRM_ERROR("unimplemented %s\n", __func__); 241 return -ENOSYS; 242 } 243 244 int radeon_gem_create_ioctl(struct drm_device *dev, void *data, 245 struct drm_file *filp) 246 { 247 struct radeon_device *rdev = dev->dev_private; 248 struct drm_radeon_gem_create *args = data; 249 struct drm_gem_object *gobj; 250 uint32_t handle; 251 int r; 252 253 down_read(&rdev->exclusive_lock); 254 /* create a gem object to contain this object in */ 255 args->size = roundup(args->size, PAGE_SIZE); 256 r = radeon_gem_object_create(rdev, args->size, args->alignment, 257 args->initial_domain, false, 258 false, &gobj); 259 if (r) { 260 up_read(&rdev->exclusive_lock); 261 r = radeon_gem_handle_lockup(rdev, r); 262 return r; 263 } 264 r = drm_gem_handle_create(filp, gobj, &handle); 265 /* drop reference from allocate - handle holds it now */ 266 drm_gem_object_unreference_unlocked(gobj); 267 if (r) { 268 up_read(&rdev->exclusive_lock); 269 r = radeon_gem_handle_lockup(rdev, r); 270 return r; 271 } 272 args->handle = handle; 273 up_read(&rdev->exclusive_lock); 274 return 0; 275 } 276 277 int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data, 278 struct drm_file *filp) 279 { 280 /* transition the BO to a domain - 281 * just validate the BO into a certain domain */ 282 struct radeon_device *rdev = dev->dev_private; 283 struct drm_radeon_gem_set_domain *args = data; 284 struct drm_gem_object *gobj; 285 struct radeon_bo *robj; 286 int r; 287 288 /* for now if someone requests domain CPU - 289 * just make sure the buffer is finished with */ 290 down_read(&rdev->exclusive_lock); 291 292 /* just do a BO wait for now */ 293 gobj = drm_gem_object_lookup(dev, filp, args->handle); 294 if (gobj == NULL) { 295 up_read(&rdev->exclusive_lock); 296 return -ENOENT; 297 } 298 robj = gem_to_radeon_bo(gobj); 299 300 r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain); 301 302 drm_gem_object_unreference_unlocked(gobj); 303 up_read(&rdev->exclusive_lock); 304 r = radeon_gem_handle_lockup(robj->rdev, r); 305 return r; 306 } 307 308 int radeon_mode_dumb_mmap(struct drm_file *filp, 309 struct drm_device *dev, 310 uint32_t handle, uint64_t *offset_p) 311 { 312 struct drm_gem_object *gobj; 313 struct radeon_bo *robj; 314 315 gobj = drm_gem_object_lookup(dev, filp, handle); 316 if (gobj == NULL) { 317 return -ENOENT; 318 } 319 robj = gem_to_radeon_bo(gobj); 320 *offset_p = radeon_bo_mmap_offset(robj); 321 drm_gem_object_unreference_unlocked(gobj); 322 return 0; 323 } 324 325 int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data, 326 struct drm_file *filp) 327 { 328 struct drm_radeon_gem_mmap *args = data; 329 330 return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr); 331 } 332 333 int radeon_gem_busy_ioctl(struct drm_device *dev, void *data, 334 struct drm_file *filp) 335 { 336 struct radeon_device *rdev = dev->dev_private; 337 struct drm_radeon_gem_busy *args = data; 338 struct drm_gem_object *gobj; 339 struct radeon_bo *robj; 340 int r; 341 uint32_t cur_placement = 0; 342 343 gobj = drm_gem_object_lookup(dev, filp, args->handle); 344 if (gobj == NULL) { 345 return -ENOENT; 346 } 347 robj = gem_to_radeon_bo(gobj); 348 r = radeon_bo_wait(robj, &cur_placement, true); 349 switch (cur_placement) { 350 case TTM_PL_VRAM: 351 args->domain = RADEON_GEM_DOMAIN_VRAM; 352 break; 353 case TTM_PL_TT: 354 args->domain = RADEON_GEM_DOMAIN_GTT; 355 break; 356 case TTM_PL_SYSTEM: 357 args->domain = RADEON_GEM_DOMAIN_CPU; 358 default: 359 break; 360 } 361 drm_gem_object_unreference_unlocked(gobj); 362 r = radeon_gem_handle_lockup(rdev, r); 363 return r; 364 } 365 366 int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data, 367 struct drm_file *filp) 368 { 369 struct radeon_device *rdev = dev->dev_private; 370 struct drm_radeon_gem_wait_idle *args = data; 371 struct drm_gem_object *gobj; 372 struct radeon_bo *robj; 373 int r; 374 375 gobj = drm_gem_object_lookup(dev, filp, args->handle); 376 if (gobj == NULL) { 377 return -ENOENT; 378 } 379 robj = gem_to_radeon_bo(gobj); 380 r = radeon_bo_wait(robj, NULL, false); 381 /* callback hw specific functions if any */ 382 if (rdev->asic->ioctl_wait_idle) 383 robj->rdev->asic->ioctl_wait_idle(rdev, robj); 384 drm_gem_object_unreference_unlocked(gobj); 385 r = radeon_gem_handle_lockup(rdev, r); 386 return r; 387 } 388 389 int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data, 390 struct drm_file *filp) 391 { 392 struct drm_radeon_gem_set_tiling *args = data; 393 struct drm_gem_object *gobj; 394 struct radeon_bo *robj; 395 int r = 0; 396 397 DRM_DEBUG("%d \n", args->handle); 398 gobj = drm_gem_object_lookup(dev, filp, args->handle); 399 if (gobj == NULL) 400 return -ENOENT; 401 robj = gem_to_radeon_bo(gobj); 402 r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch); 403 drm_gem_object_unreference_unlocked(gobj); 404 return r; 405 } 406 407 int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data, 408 struct drm_file *filp) 409 { 410 struct drm_radeon_gem_get_tiling *args = data; 411 struct drm_gem_object *gobj; 412 struct radeon_bo *rbo; 413 int r = 0; 414 415 DRM_DEBUG("\n"); 416 gobj = drm_gem_object_lookup(dev, filp, args->handle); 417 if (gobj == NULL) 418 return -ENOENT; 419 rbo = gem_to_radeon_bo(gobj); 420 r = radeon_bo_reserve(rbo, false); 421 if (unlikely(r != 0)) 422 goto out; 423 radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch); 424 radeon_bo_unreserve(rbo); 425 out: 426 drm_gem_object_unreference_unlocked(gobj); 427 return r; 428 } 429 430 int radeon_gem_va_ioctl(struct drm_device *dev, void *data, 431 struct drm_file *filp) 432 { 433 struct drm_radeon_gem_va *args = data; 434 struct drm_gem_object *gobj; 435 struct radeon_device *rdev = dev->dev_private; 436 struct radeon_fpriv *fpriv = filp->driver_priv; 437 struct radeon_bo *rbo; 438 struct radeon_bo_va *bo_va; 439 u32 invalid_flags; 440 int r = 0; 441 442 if (!rdev->vm_manager.enabled) { 443 args->operation = RADEON_VA_RESULT_ERROR; 444 return -ENOTTY; 445 } 446 447 /* !! DONT REMOVE !! 448 * We don't support vm_id yet, to be sure we don't have have broken 449 * userspace, reject anyone trying to use non 0 value thus moving 450 * forward we can use those fields without breaking existant userspace 451 */ 452 if (args->vm_id) { 453 args->operation = RADEON_VA_RESULT_ERROR; 454 return -EINVAL; 455 } 456 457 if (args->offset < RADEON_VA_RESERVED_SIZE) { 458 dev_err(&dev->pdev->dev, 459 "offset 0x%lX is in reserved area 0x%X\n", 460 (unsigned long)args->offset, 461 RADEON_VA_RESERVED_SIZE); 462 args->operation = RADEON_VA_RESULT_ERROR; 463 return -EINVAL; 464 } 465 466 /* don't remove, we need to enforce userspace to set the snooped flag 467 * otherwise we will endup with broken userspace and we won't be able 468 * to enable this feature without adding new interface 469 */ 470 invalid_flags = RADEON_VM_PAGE_VALID | RADEON_VM_PAGE_SYSTEM; 471 if ((args->flags & invalid_flags)) { 472 dev_err(&dev->pdev->dev, "invalid flags 0x%08X vs 0x%08X\n", 473 args->flags, invalid_flags); 474 args->operation = RADEON_VA_RESULT_ERROR; 475 return -EINVAL; 476 } 477 if (!(args->flags & RADEON_VM_PAGE_SNOOPED)) { 478 dev_err(&dev->pdev->dev, "only supported snooped mapping for now\n"); 479 args->operation = RADEON_VA_RESULT_ERROR; 480 return -EINVAL; 481 } 482 483 switch (args->operation) { 484 case RADEON_VA_MAP: 485 case RADEON_VA_UNMAP: 486 break; 487 default: 488 dev_err(&dev->pdev->dev, "unsupported operation %d\n", 489 args->operation); 490 args->operation = RADEON_VA_RESULT_ERROR; 491 return -EINVAL; 492 } 493 494 gobj = drm_gem_object_lookup(dev, filp, args->handle); 495 if (gobj == NULL) { 496 args->operation = RADEON_VA_RESULT_ERROR; 497 return -ENOENT; 498 } 499 rbo = gem_to_radeon_bo(gobj); 500 r = radeon_bo_reserve(rbo, false); 501 if (r) { 502 args->operation = RADEON_VA_RESULT_ERROR; 503 drm_gem_object_unreference_unlocked(gobj); 504 return r; 505 } 506 bo_va = radeon_vm_bo_find(&fpriv->vm, rbo); 507 if (!bo_va) { 508 args->operation = RADEON_VA_RESULT_ERROR; 509 drm_gem_object_unreference_unlocked(gobj); 510 return -ENOENT; 511 } 512 513 switch (args->operation) { 514 case RADEON_VA_MAP: 515 if (bo_va->soffset) { 516 args->operation = RADEON_VA_RESULT_VA_EXIST; 517 args->offset = bo_va->soffset; 518 goto out; 519 } 520 r = radeon_vm_bo_set_addr(rdev, bo_va, args->offset, args->flags); 521 break; 522 case RADEON_VA_UNMAP: 523 r = radeon_vm_bo_set_addr(rdev, bo_va, 0, 0); 524 break; 525 default: 526 break; 527 } 528 args->operation = RADEON_VA_RESULT_OK; 529 if (r) { 530 args->operation = RADEON_VA_RESULT_ERROR; 531 } 532 out: 533 radeon_bo_unreserve(rbo); 534 drm_gem_object_unreference_unlocked(gobj); 535 return r; 536 } 537 538 int radeon_mode_dumb_create(struct drm_file *file_priv, 539 struct drm_device *dev, 540 struct drm_mode_create_dumb *args) 541 { 542 struct radeon_device *rdev = dev->dev_private; 543 struct drm_gem_object *gobj; 544 uint32_t handle; 545 int r; 546 547 args->pitch = radeon_align_pitch(rdev, args->width, args->bpp, 0) * ((args->bpp + 1) / 8); 548 args->size = args->pitch * args->height; 549 args->size = PAGE_ALIGN(args->size); 550 551 r = radeon_gem_object_create(rdev, args->size, 0, 552 RADEON_GEM_DOMAIN_VRAM, 553 false, ttm_bo_type_device, 554 &gobj); 555 if (r) 556 return -ENOMEM; 557 558 r = drm_gem_handle_create(file_priv, gobj, &handle); 559 /* drop reference from allocate - handle holds it now */ 560 drm_gem_object_unreference_unlocked(gobj); 561 if (r) { 562 return r; 563 } 564 args->handle = handle; 565 return 0; 566 } 567 568 int radeon_mode_dumb_destroy(struct drm_file *file_priv, 569 struct drm_device *dev, 570 uint32_t handle) 571 { 572 return drm_gem_handle_delete(file_priv, handle); 573 } 574