1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 */ 28 29 #include <linux/pci.h> 30 31 #include <drm/drm_device.h> 32 #include <drm/drm_file.h> 33 #include <drm/drm_gem_ttm_helper.h> 34 #include <drm/radeon_drm.h> 35 36 #include "radeon.h" 37 #include "radeon_prime.h" 38 39 struct dma_buf *radeon_gem_prime_export(struct drm_gem_object *gobj, 40 int flags); 41 struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj); 42 int radeon_gem_prime_pin(struct drm_gem_object *obj); 43 void radeon_gem_prime_unpin(struct drm_gem_object *obj); 44 45 const struct drm_gem_object_funcs radeon_gem_object_funcs; 46 47 #ifdef __linux__ 48 static vm_fault_t radeon_gem_fault(struct vm_fault *vmf) 49 { 50 struct ttm_buffer_object *bo = vmf->vma->vm_private_data; 51 struct radeon_device *rdev = radeon_get_rdev(bo->bdev); 52 vm_fault_t ret; 53 54 down_read(&rdev->pm.mclk_lock); 55 56 ret = ttm_bo_vm_reserve(bo, vmf); 57 if (ret) 58 goto unlock_mclk; 59 60 ret = radeon_bo_fault_reserve_notify(bo); 61 if (ret) 62 goto unlock_resv; 63 64 ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot, 65 TTM_BO_VM_NUM_PREFAULT); 66 if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) 67 goto unlock_mclk; 68 69 unlock_resv: 70 dma_resv_unlock(bo->base.resv); 71 72 unlock_mclk: 73 up_read(&rdev->pm.mclk_lock); 74 return ret; 75 } 76 77 static const struct vm_operations_struct radeon_gem_vm_ops = { 78 .fault = radeon_gem_fault, 79 .open = ttm_bo_vm_open, 80 .close = ttm_bo_vm_close, 81 .access = ttm_bo_vm_access 82 }; 83 #else /* !__linux__ */ 84 int 85 radeon_gem_fault(struct uvm_faultinfo *ufi, vaddr_t vaddr, vm_page_t *pps, 86 int npages, int centeridx, vm_fault_t fault_type, 87 vm_prot_t access_type, int flags) 88 { 89 struct uvm_object *uobj = ufi->entry->object.uvm_obj; 90 struct ttm_buffer_object *bo = (struct ttm_buffer_object *)uobj; 91 struct radeon_device *rdev = radeon_get_rdev(bo->bdev); 92 vm_fault_t ret; 93 94 down_read(&rdev->pm.mclk_lock); 95 96 ret = ttm_bo_vm_reserve(bo); 97 if (ret) 98 goto unlock_mclk; 99 100 ret = radeon_bo_fault_reserve_notify(bo); 101 if (ret) 102 goto unlock_resv; 103 104 ret = ttm_bo_vm_fault_reserved(ufi, vaddr, 105 TTM_BO_VM_NUM_PREFAULT, 1); 106 #ifdef notyet 107 if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) 108 goto unlock_mclk; 109 #endif 110 111 unlock_resv: 112 dma_resv_unlock(bo->base.resv); 113 114 unlock_mclk: 115 switch (ret) { 116 case VM_FAULT_NOPAGE: 117 ret = VM_PAGER_OK; 118 break; 119 case VM_FAULT_RETRY: 120 ret = VM_PAGER_REFAULT; 121 break; 122 default: 123 ret = VM_PAGER_BAD; 124 break; 125 } 126 up_read(&rdev->pm.mclk_lock); 127 uvmfault_unlockall(ufi, NULL, uobj); 128 return ret; 129 } 130 131 void 132 radeon_gem_vm_reference(struct uvm_object *uobj) 133 { 134 struct ttm_buffer_object *bo = (struct ttm_buffer_object *)uobj; 135 136 ttm_bo_get(bo); 137 } 138 139 void 140 radeon_gem_vm_detach(struct uvm_object *uobj) 141 { 142 struct ttm_buffer_object *bo = (struct ttm_buffer_object *)uobj; 143 144 ttm_bo_put(bo); 145 } 146 147 static const struct uvm_pagerops radeon_gem_vm_ops = { 148 .pgo_fault = radeon_gem_fault, 149 .pgo_reference = radeon_gem_vm_reference, 150 .pgo_detach = radeon_gem_vm_detach 151 }; 152 #endif /* !__linux__ */ 153 154 static void radeon_gem_object_free(struct drm_gem_object *gobj) 155 { 156 struct radeon_bo *robj = gem_to_radeon_bo(gobj); 157 158 if (robj) { 159 radeon_mn_unregister(robj); 160 radeon_bo_unref(&robj); 161 } 162 } 163 164 int radeon_gem_object_create(struct radeon_device *rdev, unsigned long size, 165 int alignment, int initial_domain, 166 u32 flags, bool kernel, 167 struct drm_gem_object **obj) 168 { 169 struct radeon_bo *robj; 170 unsigned long max_size; 171 int r; 172 173 *obj = NULL; 174 /* At least align on page size */ 175 if (alignment < PAGE_SIZE) { 176 alignment = PAGE_SIZE; 177 } 178 179 /* Maximum bo size is the unpinned gtt size since we use the gtt to 180 * handle vram to system pool migrations. 181 */ 182 max_size = rdev->mc.gtt_size - rdev->gart_pin_size; 183 if (size > max_size) { 184 DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n", 185 size >> 20, max_size >> 20); 186 return -ENOMEM; 187 } 188 189 retry: 190 r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, 191 flags, NULL, NULL, &robj); 192 if (r) { 193 if (r != -ERESTARTSYS) { 194 if (initial_domain == RADEON_GEM_DOMAIN_VRAM) { 195 initial_domain |= RADEON_GEM_DOMAIN_GTT; 196 goto retry; 197 } 198 DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n", 199 size, initial_domain, alignment, r); 200 } 201 return r; 202 } 203 *obj = &robj->tbo.base; 204 (*obj)->funcs = &radeon_gem_object_funcs; 205 #ifdef __linux__ 206 robj->pid = task_pid_nr(current); 207 #else 208 robj->pid = curproc->p_p->ps_pid; 209 #endif 210 211 mutex_lock(&rdev->gem.mutex); 212 list_add_tail(&robj->list, &rdev->gem.objects); 213 mutex_unlock(&rdev->gem.mutex); 214 215 return 0; 216 } 217 218 static int radeon_gem_set_domain(struct drm_gem_object *gobj, 219 uint32_t rdomain, uint32_t wdomain) 220 { 221 struct radeon_bo *robj; 222 uint32_t domain; 223 long r; 224 225 /* FIXME: reeimplement */ 226 robj = gem_to_radeon_bo(gobj); 227 /* work out where to validate the buffer to */ 228 domain = wdomain; 229 if (!domain) { 230 domain = rdomain; 231 } 232 if (!domain) { 233 /* Do nothings */ 234 pr_warn("Set domain without domain !\n"); 235 return 0; 236 } 237 if (domain == RADEON_GEM_DOMAIN_CPU) { 238 /* Asking for cpu access wait for object idle */ 239 r = dma_resv_wait_timeout(robj->tbo.base.resv, true, true, 30 * HZ); 240 if (!r) 241 r = -EBUSY; 242 243 if (r < 0 && r != -EINTR) { 244 pr_err("Failed to wait for object: %li\n", r); 245 return r; 246 } 247 } 248 if (domain == RADEON_GEM_DOMAIN_VRAM && robj->prime_shared_count) { 249 /* A BO that is associated with a dma-buf cannot be sensibly migrated to VRAM */ 250 return -EINVAL; 251 } 252 return 0; 253 } 254 255 int radeon_gem_init(struct radeon_device *rdev) 256 { 257 INIT_LIST_HEAD(&rdev->gem.objects); 258 return 0; 259 } 260 261 void radeon_gem_fini(struct radeon_device *rdev) 262 { 263 radeon_bo_force_delete(rdev); 264 } 265 266 /* 267 * Call from drm_gem_handle_create which appear in both new and open ioctl 268 * case. 269 */ 270 static int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv) 271 { 272 struct radeon_bo *rbo = gem_to_radeon_bo(obj); 273 struct radeon_device *rdev = rbo->rdev; 274 struct radeon_fpriv *fpriv = file_priv->driver_priv; 275 struct radeon_vm *vm = &fpriv->vm; 276 struct radeon_bo_va *bo_va; 277 int r; 278 279 if ((rdev->family < CHIP_CAYMAN) || 280 (!rdev->accel_working)) { 281 return 0; 282 } 283 284 r = radeon_bo_reserve(rbo, false); 285 if (r) { 286 return r; 287 } 288 289 bo_va = radeon_vm_bo_find(vm, rbo); 290 if (!bo_va) { 291 bo_va = radeon_vm_bo_add(rdev, vm, rbo); 292 } else { 293 ++bo_va->ref_count; 294 } 295 radeon_bo_unreserve(rbo); 296 297 return 0; 298 } 299 300 static void radeon_gem_object_close(struct drm_gem_object *obj, 301 struct drm_file *file_priv) 302 { 303 struct radeon_bo *rbo = gem_to_radeon_bo(obj); 304 struct radeon_device *rdev = rbo->rdev; 305 struct radeon_fpriv *fpriv = file_priv->driver_priv; 306 struct radeon_vm *vm = &fpriv->vm; 307 struct radeon_bo_va *bo_va; 308 int r; 309 310 if ((rdev->family < CHIP_CAYMAN) || 311 (!rdev->accel_working)) { 312 return; 313 } 314 315 r = radeon_bo_reserve(rbo, true); 316 if (r) { 317 dev_err(rdev->dev, "leaking bo va because " 318 "we fail to reserve bo (%d)\n", r); 319 return; 320 } 321 bo_va = radeon_vm_bo_find(vm, rbo); 322 if (bo_va) { 323 if (--bo_va->ref_count == 0) { 324 radeon_vm_bo_rmv(rdev, bo_va); 325 } 326 } 327 radeon_bo_unreserve(rbo); 328 } 329 330 static int radeon_gem_handle_lockup(struct radeon_device *rdev, int r) 331 { 332 if (r == -EDEADLK) { 333 r = radeon_gpu_reset(rdev); 334 if (!r) 335 r = -EAGAIN; 336 } 337 return r; 338 } 339 340 #ifdef __linux__ 341 static int radeon_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) 342 { 343 struct radeon_bo *bo = gem_to_radeon_bo(obj); 344 struct radeon_device *rdev = radeon_get_rdev(bo->tbo.bdev); 345 346 if (radeon_ttm_tt_has_userptr(rdev, bo->tbo.ttm)) 347 return -EPERM; 348 349 return drm_gem_ttm_mmap(obj, vma); 350 } 351 #else 352 static int 353 radeon_gem_object_mmap(struct drm_gem_object *obj, 354 vm_prot_t accessprot, voff_t off, vsize_t size) 355 { 356 struct radeon_bo *bo = gem_to_radeon_bo(obj); 357 struct radeon_device *rdev = radeon_get_rdev(bo->tbo.bdev); 358 359 if (radeon_ttm_tt_has_userptr(rdev, bo->tbo.ttm)) 360 return -EPERM; 361 362 return drm_gem_ttm_mmap(obj, accessprot, off, size); 363 } 364 #endif 365 366 const struct drm_gem_object_funcs radeon_gem_object_funcs = { 367 .free = radeon_gem_object_free, 368 .open = radeon_gem_object_open, 369 .close = radeon_gem_object_close, 370 .export = radeon_gem_prime_export, 371 .pin = radeon_gem_prime_pin, 372 .unpin = radeon_gem_prime_unpin, 373 .get_sg_table = radeon_gem_prime_get_sg_table, 374 .vmap = drm_gem_ttm_vmap, 375 .vunmap = drm_gem_ttm_vunmap, 376 .mmap = radeon_gem_object_mmap, 377 .vm_ops = &radeon_gem_vm_ops, 378 }; 379 380 /* 381 * GEM ioctls. 382 */ 383 int radeon_gem_info_ioctl(struct drm_device *dev, void *data, 384 struct drm_file *filp) 385 { 386 struct radeon_device *rdev = dev->dev_private; 387 struct drm_radeon_gem_info *args = data; 388 struct ttm_resource_manager *man; 389 390 man = ttm_manager_type(&rdev->mman.bdev, TTM_PL_VRAM); 391 392 args->vram_size = (u64)man->size << PAGE_SHIFT; 393 args->vram_visible = rdev->mc.visible_vram_size; 394 args->vram_visible -= rdev->vram_pin_size; 395 args->gart_size = rdev->mc.gtt_size; 396 args->gart_size -= rdev->gart_pin_size; 397 398 return 0; 399 } 400 401 int radeon_gem_pread_ioctl(struct drm_device *dev, void *data, 402 struct drm_file *filp) 403 { 404 /* TODO: implement */ 405 DRM_ERROR("unimplemented %s\n", __func__); 406 return -ENOSYS; 407 } 408 409 int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data, 410 struct drm_file *filp) 411 { 412 /* TODO: implement */ 413 DRM_ERROR("unimplemented %s\n", __func__); 414 return -ENOSYS; 415 } 416 417 int radeon_gem_create_ioctl(struct drm_device *dev, void *data, 418 struct drm_file *filp) 419 { 420 struct radeon_device *rdev = dev->dev_private; 421 struct drm_radeon_gem_create *args = data; 422 struct drm_gem_object *gobj; 423 uint32_t handle; 424 int r; 425 426 down_read(&rdev->exclusive_lock); 427 /* create a gem object to contain this object in */ 428 args->size = roundup(args->size, PAGE_SIZE); 429 r = radeon_gem_object_create(rdev, args->size, args->alignment, 430 args->initial_domain, args->flags, 431 false, &gobj); 432 if (r) { 433 up_read(&rdev->exclusive_lock); 434 r = radeon_gem_handle_lockup(rdev, r); 435 return r; 436 } 437 r = drm_gem_handle_create(filp, gobj, &handle); 438 /* drop reference from allocate - handle holds it now */ 439 drm_gem_object_put(gobj); 440 if (r) { 441 up_read(&rdev->exclusive_lock); 442 r = radeon_gem_handle_lockup(rdev, r); 443 return r; 444 } 445 args->handle = handle; 446 up_read(&rdev->exclusive_lock); 447 return 0; 448 } 449 450 int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data, 451 struct drm_file *filp) 452 { 453 return -ENOSYS; 454 #ifdef notyet 455 struct ttm_operation_ctx ctx = { true, false }; 456 struct radeon_device *rdev = dev->dev_private; 457 struct drm_radeon_gem_userptr *args = data; 458 struct drm_gem_object *gobj; 459 struct radeon_bo *bo; 460 uint32_t handle; 461 int r; 462 463 args->addr = untagged_addr(args->addr); 464 465 if (offset_in_page(args->addr | args->size)) 466 return -EINVAL; 467 468 /* reject unknown flag values */ 469 if (args->flags & ~(RADEON_GEM_USERPTR_READONLY | 470 RADEON_GEM_USERPTR_ANONONLY | RADEON_GEM_USERPTR_VALIDATE | 471 RADEON_GEM_USERPTR_REGISTER)) 472 return -EINVAL; 473 474 if (args->flags & RADEON_GEM_USERPTR_READONLY) { 475 /* readonly pages not tested on older hardware */ 476 if (rdev->family < CHIP_R600) 477 return -EINVAL; 478 479 } else if (!(args->flags & RADEON_GEM_USERPTR_ANONONLY) || 480 !(args->flags & RADEON_GEM_USERPTR_REGISTER)) { 481 482 /* if we want to write to it we must require anonymous 483 memory and install a MMU notifier */ 484 return -EACCES; 485 } 486 487 down_read(&rdev->exclusive_lock); 488 489 /* create a gem object to contain this object in */ 490 r = radeon_gem_object_create(rdev, args->size, 0, 491 RADEON_GEM_DOMAIN_CPU, 0, 492 false, &gobj); 493 if (r) 494 goto handle_lockup; 495 496 bo = gem_to_radeon_bo(gobj); 497 r = radeon_ttm_tt_set_userptr(rdev, bo->tbo.ttm, args->addr, args->flags); 498 if (r) 499 goto release_object; 500 501 if (args->flags & RADEON_GEM_USERPTR_REGISTER) { 502 r = radeon_mn_register(bo, args->addr); 503 if (r) 504 goto release_object; 505 } 506 507 if (args->flags & RADEON_GEM_USERPTR_VALIDATE) { 508 mmap_read_lock(current->mm); 509 r = radeon_bo_reserve(bo, true); 510 if (r) { 511 mmap_read_unlock(current->mm); 512 goto release_object; 513 } 514 515 radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_GTT); 516 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 517 radeon_bo_unreserve(bo); 518 mmap_read_unlock(current->mm); 519 if (r) 520 goto release_object; 521 } 522 523 r = drm_gem_handle_create(filp, gobj, &handle); 524 /* drop reference from allocate - handle holds it now */ 525 drm_gem_object_put(gobj); 526 if (r) 527 goto handle_lockup; 528 529 args->handle = handle; 530 up_read(&rdev->exclusive_lock); 531 return 0; 532 533 release_object: 534 drm_gem_object_put(gobj); 535 536 handle_lockup: 537 up_read(&rdev->exclusive_lock); 538 r = radeon_gem_handle_lockup(rdev, r); 539 540 return r; 541 #endif 542 } 543 544 int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data, 545 struct drm_file *filp) 546 { 547 /* transition the BO to a domain - 548 * just validate the BO into a certain domain */ 549 struct radeon_device *rdev = dev->dev_private; 550 struct drm_radeon_gem_set_domain *args = data; 551 struct drm_gem_object *gobj; 552 struct radeon_bo *robj; 553 int r; 554 555 /* for now if someone requests domain CPU - 556 * just make sure the buffer is finished with */ 557 down_read(&rdev->exclusive_lock); 558 559 /* just do a BO wait for now */ 560 gobj = drm_gem_object_lookup(filp, args->handle); 561 if (gobj == NULL) { 562 up_read(&rdev->exclusive_lock); 563 return -ENOENT; 564 } 565 robj = gem_to_radeon_bo(gobj); 566 567 r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain); 568 569 drm_gem_object_put(gobj); 570 up_read(&rdev->exclusive_lock); 571 r = radeon_gem_handle_lockup(robj->rdev, r); 572 return r; 573 } 574 575 int radeon_mode_dumb_mmap(struct drm_file *filp, 576 struct drm_device *dev, 577 uint32_t handle, uint64_t *offset_p) 578 { 579 struct drm_gem_object *gobj; 580 struct radeon_bo *robj; 581 582 gobj = drm_gem_object_lookup(filp, handle); 583 if (gobj == NULL) { 584 return -ENOENT; 585 } 586 robj = gem_to_radeon_bo(gobj); 587 if (radeon_ttm_tt_has_userptr(robj->rdev, robj->tbo.ttm)) { 588 drm_gem_object_put(gobj); 589 return -EPERM; 590 } 591 *offset_p = radeon_bo_mmap_offset(robj); 592 drm_gem_object_put(gobj); 593 return 0; 594 } 595 596 int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data, 597 struct drm_file *filp) 598 { 599 struct drm_radeon_gem_mmap *args = data; 600 601 return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr); 602 } 603 604 int radeon_gem_busy_ioctl(struct drm_device *dev, void *data, 605 struct drm_file *filp) 606 { 607 struct drm_radeon_gem_busy *args = data; 608 struct drm_gem_object *gobj; 609 struct radeon_bo *robj; 610 int r; 611 uint32_t cur_placement = 0; 612 613 gobj = drm_gem_object_lookup(filp, args->handle); 614 if (gobj == NULL) { 615 return -ENOENT; 616 } 617 robj = gem_to_radeon_bo(gobj); 618 619 r = dma_resv_test_signaled(robj->tbo.base.resv, true); 620 if (r == 0) 621 r = -EBUSY; 622 else 623 r = 0; 624 625 cur_placement = READ_ONCE(robj->tbo.resource->mem_type); 626 args->domain = radeon_mem_type_to_domain(cur_placement); 627 drm_gem_object_put(gobj); 628 return r; 629 } 630 631 int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data, 632 struct drm_file *filp) 633 { 634 struct radeon_device *rdev = dev->dev_private; 635 struct drm_radeon_gem_wait_idle *args = data; 636 struct drm_gem_object *gobj; 637 struct radeon_bo *robj; 638 int r = 0; 639 uint32_t cur_placement = 0; 640 long ret; 641 642 gobj = drm_gem_object_lookup(filp, args->handle); 643 if (gobj == NULL) { 644 return -ENOENT; 645 } 646 robj = gem_to_radeon_bo(gobj); 647 648 ret = dma_resv_wait_timeout(robj->tbo.base.resv, true, true, 30 * HZ); 649 if (ret == 0) 650 r = -EBUSY; 651 else if (ret < 0) 652 r = ret; 653 654 /* Flush HDP cache via MMIO if necessary */ 655 cur_placement = READ_ONCE(robj->tbo.resource->mem_type); 656 if (rdev->asic->mmio_hdp_flush && 657 radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM) 658 robj->rdev->asic->mmio_hdp_flush(rdev); 659 drm_gem_object_put(gobj); 660 r = radeon_gem_handle_lockup(rdev, r); 661 return r; 662 } 663 664 int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data, 665 struct drm_file *filp) 666 { 667 struct drm_radeon_gem_set_tiling *args = data; 668 struct drm_gem_object *gobj; 669 struct radeon_bo *robj; 670 int r = 0; 671 672 DRM_DEBUG("%d \n", args->handle); 673 gobj = drm_gem_object_lookup(filp, args->handle); 674 if (gobj == NULL) 675 return -ENOENT; 676 robj = gem_to_radeon_bo(gobj); 677 r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch); 678 drm_gem_object_put(gobj); 679 return r; 680 } 681 682 int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data, 683 struct drm_file *filp) 684 { 685 struct drm_radeon_gem_get_tiling *args = data; 686 struct drm_gem_object *gobj; 687 struct radeon_bo *rbo; 688 int r = 0; 689 690 DRM_DEBUG("\n"); 691 gobj = drm_gem_object_lookup(filp, args->handle); 692 if (gobj == NULL) 693 return -ENOENT; 694 rbo = gem_to_radeon_bo(gobj); 695 r = radeon_bo_reserve(rbo, false); 696 if (unlikely(r != 0)) 697 goto out; 698 radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch); 699 radeon_bo_unreserve(rbo); 700 out: 701 drm_gem_object_put(gobj); 702 return r; 703 } 704 705 /** 706 * radeon_gem_va_update_vm -update the bo_va in its VM 707 * 708 * @rdev: radeon_device pointer 709 * @bo_va: bo_va to update 710 * 711 * Update the bo_va directly after setting it's address. Errors are not 712 * vital here, so they are not reported back to userspace. 713 */ 714 static void radeon_gem_va_update_vm(struct radeon_device *rdev, 715 struct radeon_bo_va *bo_va) 716 { 717 struct ttm_validate_buffer tv, *entry; 718 struct radeon_bo_list *vm_bos; 719 struct ww_acquire_ctx ticket; 720 struct list_head list; 721 unsigned domain; 722 int r; 723 724 INIT_LIST_HEAD(&list); 725 726 tv.bo = &bo_va->bo->tbo; 727 tv.num_shared = 1; 728 list_add(&tv.head, &list); 729 730 vm_bos = radeon_vm_get_bos(rdev, bo_va->vm, &list); 731 if (!vm_bos) 732 return; 733 734 r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL); 735 if (r) 736 goto error_free; 737 738 list_for_each_entry(entry, &list, head) { 739 domain = radeon_mem_type_to_domain(entry->bo->resource->mem_type); 740 /* if anything is swapped out don't swap it in here, 741 just abort and wait for the next CS */ 742 if (domain == RADEON_GEM_DOMAIN_CPU) 743 goto error_unreserve; 744 } 745 746 mutex_lock(&bo_va->vm->mutex); 747 r = radeon_vm_clear_freed(rdev, bo_va->vm); 748 if (r) 749 goto error_unlock; 750 751 if (bo_va->it.start) 752 r = radeon_vm_bo_update(rdev, bo_va, bo_va->bo->tbo.resource); 753 754 error_unlock: 755 mutex_unlock(&bo_va->vm->mutex); 756 757 error_unreserve: 758 ttm_eu_backoff_reservation(&ticket, &list); 759 760 error_free: 761 kvfree(vm_bos); 762 763 if (r && r != -ERESTARTSYS) 764 DRM_ERROR("Couldn't update BO_VA (%d)\n", r); 765 } 766 767 int radeon_gem_va_ioctl(struct drm_device *dev, void *data, 768 struct drm_file *filp) 769 { 770 struct drm_radeon_gem_va *args = data; 771 struct drm_gem_object *gobj; 772 struct radeon_device *rdev = dev->dev_private; 773 struct radeon_fpriv *fpriv = filp->driver_priv; 774 struct radeon_bo *rbo; 775 struct radeon_bo_va *bo_va; 776 u32 invalid_flags; 777 int r = 0; 778 779 if (!rdev->vm_manager.enabled) { 780 args->operation = RADEON_VA_RESULT_ERROR; 781 return -ENOTTY; 782 } 783 784 /* !! DONT REMOVE !! 785 * We don't support vm_id yet, to be sure we don't have have broken 786 * userspace, reject anyone trying to use non 0 value thus moving 787 * forward we can use those fields without breaking existant userspace 788 */ 789 if (args->vm_id) { 790 args->operation = RADEON_VA_RESULT_ERROR; 791 return -EINVAL; 792 } 793 794 if (args->offset < RADEON_VA_RESERVED_SIZE) { 795 dev_err(dev->dev, 796 "offset 0x%lX is in reserved area 0x%X\n", 797 (unsigned long)args->offset, 798 RADEON_VA_RESERVED_SIZE); 799 args->operation = RADEON_VA_RESULT_ERROR; 800 return -EINVAL; 801 } 802 803 /* don't remove, we need to enforce userspace to set the snooped flag 804 * otherwise we will endup with broken userspace and we won't be able 805 * to enable this feature without adding new interface 806 */ 807 invalid_flags = RADEON_VM_PAGE_VALID | RADEON_VM_PAGE_SYSTEM; 808 if ((args->flags & invalid_flags)) { 809 dev_err(dev->dev, "invalid flags 0x%08X vs 0x%08X\n", 810 args->flags, invalid_flags); 811 args->operation = RADEON_VA_RESULT_ERROR; 812 return -EINVAL; 813 } 814 815 switch (args->operation) { 816 case RADEON_VA_MAP: 817 case RADEON_VA_UNMAP: 818 break; 819 default: 820 dev_err(dev->dev, "unsupported operation %d\n", 821 args->operation); 822 args->operation = RADEON_VA_RESULT_ERROR; 823 return -EINVAL; 824 } 825 826 gobj = drm_gem_object_lookup(filp, args->handle); 827 if (gobj == NULL) { 828 args->operation = RADEON_VA_RESULT_ERROR; 829 return -ENOENT; 830 } 831 rbo = gem_to_radeon_bo(gobj); 832 r = radeon_bo_reserve(rbo, false); 833 if (r) { 834 args->operation = RADEON_VA_RESULT_ERROR; 835 drm_gem_object_put(gobj); 836 return r; 837 } 838 bo_va = radeon_vm_bo_find(&fpriv->vm, rbo); 839 if (!bo_va) { 840 args->operation = RADEON_VA_RESULT_ERROR; 841 radeon_bo_unreserve(rbo); 842 drm_gem_object_put(gobj); 843 return -ENOENT; 844 } 845 846 switch (args->operation) { 847 case RADEON_VA_MAP: 848 if (bo_va->it.start) { 849 args->operation = RADEON_VA_RESULT_VA_EXIST; 850 args->offset = bo_va->it.start * RADEON_GPU_PAGE_SIZE; 851 radeon_bo_unreserve(rbo); 852 goto out; 853 } 854 r = radeon_vm_bo_set_addr(rdev, bo_va, args->offset, args->flags); 855 break; 856 case RADEON_VA_UNMAP: 857 r = radeon_vm_bo_set_addr(rdev, bo_va, 0, 0); 858 break; 859 default: 860 break; 861 } 862 if (!r) 863 radeon_gem_va_update_vm(rdev, bo_va); 864 args->operation = RADEON_VA_RESULT_OK; 865 if (r) { 866 args->operation = RADEON_VA_RESULT_ERROR; 867 } 868 out: 869 drm_gem_object_put(gobj); 870 return r; 871 } 872 873 int radeon_gem_op_ioctl(struct drm_device *dev, void *data, 874 struct drm_file *filp) 875 { 876 struct drm_radeon_gem_op *args = data; 877 struct drm_gem_object *gobj; 878 struct radeon_bo *robj; 879 int r; 880 881 gobj = drm_gem_object_lookup(filp, args->handle); 882 if (gobj == NULL) { 883 return -ENOENT; 884 } 885 robj = gem_to_radeon_bo(gobj); 886 887 r = -EPERM; 888 if (radeon_ttm_tt_has_userptr(robj->rdev, robj->tbo.ttm)) 889 goto out; 890 891 r = radeon_bo_reserve(robj, false); 892 if (unlikely(r)) 893 goto out; 894 895 switch (args->op) { 896 case RADEON_GEM_OP_GET_INITIAL_DOMAIN: 897 args->value = robj->initial_domain; 898 break; 899 case RADEON_GEM_OP_SET_INITIAL_DOMAIN: 900 robj->initial_domain = args->value & (RADEON_GEM_DOMAIN_VRAM | 901 RADEON_GEM_DOMAIN_GTT | 902 RADEON_GEM_DOMAIN_CPU); 903 break; 904 default: 905 r = -EINVAL; 906 } 907 908 radeon_bo_unreserve(robj); 909 out: 910 drm_gem_object_put(gobj); 911 return r; 912 } 913 914 int radeon_mode_dumb_create(struct drm_file *file_priv, 915 struct drm_device *dev, 916 struct drm_mode_create_dumb *args) 917 { 918 struct radeon_device *rdev = dev->dev_private; 919 struct drm_gem_object *gobj; 920 uint32_t handle; 921 int r; 922 923 args->pitch = radeon_align_pitch(rdev, args->width, 924 DIV_ROUND_UP(args->bpp, 8), 0); 925 args->size = args->pitch * args->height; 926 args->size = roundup2(args->size, PAGE_SIZE); 927 928 r = radeon_gem_object_create(rdev, args->size, 0, 929 RADEON_GEM_DOMAIN_VRAM, 0, 930 false, &gobj); 931 if (r) 932 return -ENOMEM; 933 934 r = drm_gem_handle_create(file_priv, gobj, &handle); 935 /* drop reference from allocate - handle holds it now */ 936 drm_gem_object_put(gobj); 937 if (r) { 938 return r; 939 } 940 args->handle = handle; 941 return 0; 942 } 943 944 #if defined(CONFIG_DEBUG_FS) 945 static int radeon_debugfs_gem_info_show(struct seq_file *m, void *unused) 946 { 947 struct radeon_device *rdev = (struct radeon_device *)m->private; 948 struct radeon_bo *rbo; 949 unsigned i = 0; 950 951 mutex_lock(&rdev->gem.mutex); 952 list_for_each_entry(rbo, &rdev->gem.objects, list) { 953 unsigned domain; 954 const char *placement; 955 956 domain = radeon_mem_type_to_domain(rbo->tbo.resource->mem_type); 957 switch (domain) { 958 case RADEON_GEM_DOMAIN_VRAM: 959 placement = "VRAM"; 960 break; 961 case RADEON_GEM_DOMAIN_GTT: 962 placement = " GTT"; 963 break; 964 case RADEON_GEM_DOMAIN_CPU: 965 default: 966 placement = " CPU"; 967 break; 968 } 969 seq_printf(m, "bo[0x%08x] %8ldkB %8ldMB %s pid %8ld\n", 970 i, radeon_bo_size(rbo) >> 10, radeon_bo_size(rbo) >> 20, 971 placement, (unsigned long)rbo->pid); 972 i++; 973 } 974 mutex_unlock(&rdev->gem.mutex); 975 return 0; 976 } 977 978 DEFINE_SHOW_ATTRIBUTE(radeon_debugfs_gem_info); 979 #endif 980 981 void radeon_gem_debugfs_init(struct radeon_device *rdev) 982 { 983 #if defined(CONFIG_DEBUG_FS) 984 struct dentry *root = rdev->ddev->primary->debugfs_root; 985 986 debugfs_create_file("radeon_gem_info", 0444, root, rdev, 987 &radeon_debugfs_gem_info_fops); 988 989 #endif 990 } 991