1 /* 2 * Copyright 2008 Jerome Glisse. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice (including the next 13 * paragraph) shall be included in all copies or substantial portions of the 14 * Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 22 * DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: 25 * Jerome Glisse <glisse@freedesktop.org> 26 */ 27 #include <linux/pagemap.h> 28 #include <linux/sync_file.h> 29 #include <drm/drmP.h> 30 #include <drm/amdgpu_drm.h> 31 #include <drm/drm_syncobj.h> 32 #include "amdgpu.h" 33 #include "amdgpu_trace.h" 34 #include "amdgpu_gmc.h" 35 36 static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p, 37 struct drm_amdgpu_cs_chunk_fence *data, 38 uint32_t *offset) 39 { 40 struct drm_gem_object *gobj; 41 unsigned long size; 42 int r; 43 44 gobj = drm_gem_object_lookup(p->filp, data->handle); 45 if (gobj == NULL) 46 return -EINVAL; 47 48 p->uf_entry.robj = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj)); 49 p->uf_entry.priority = 0; 50 p->uf_entry.tv.bo = &p->uf_entry.robj->tbo; 51 p->uf_entry.tv.shared = true; 52 p->uf_entry.user_pages = NULL; 53 54 drm_gem_object_put_unlocked(gobj); 55 56 size = amdgpu_bo_size(p->uf_entry.robj); 57 if (size != PAGE_SIZE || (data->offset + 8) > size) { 58 r = -EINVAL; 59 goto error_unref; 60 } 61 62 if (amdgpu_ttm_tt_get_usermm(p->uf_entry.robj->tbo.ttm)) { 63 r = -EINVAL; 64 goto error_unref; 65 } 66 67 *offset = data->offset; 68 69 return 0; 70 71 error_unref: 72 amdgpu_bo_unref(&p->uf_entry.robj); 73 return r; 74 } 75 76 static int amdgpu_cs_bo_handles_chunk(struct amdgpu_cs_parser *p, 77 struct drm_amdgpu_bo_list_in *data) 78 { 79 int r; 80 struct drm_amdgpu_bo_list_entry *info = NULL; 81 82 r = amdgpu_bo_create_list_entry_array(data, &info); 83 if (r) 84 return r; 85 86 r = amdgpu_bo_list_create(p->adev, p->filp, info, data->bo_number, 87 &p->bo_list); 88 if (r) 89 goto error_free; 90 91 kvfree(info); 92 return 0; 93 94 error_free: 95 if (info) 96 kvfree(info); 97 98 return r; 99 } 100 101 static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs *cs) 102 { 103 struct amdgpu_fpriv *fpriv = p->filp->driver_priv; 104 struct amdgpu_vm *vm = &fpriv->vm; 105 uint64_t *chunk_array_user; 106 uint64_t *chunk_array; 107 unsigned size, num_ibs = 0; 108 uint32_t uf_offset = 0; 109 int i; 110 int ret; 111 112 if (cs->in.num_chunks == 0) 113 return 0; 114 115 chunk_array = kmalloc_array(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL); 116 if (!chunk_array) 117 return -ENOMEM; 118 119 p->ctx = amdgpu_ctx_get(fpriv, cs->in.ctx_id); 120 if (!p->ctx) { 121 ret = -EINVAL; 122 goto free_chunk; 123 } 124 125 mutex_lock(&p->ctx->lock); 126 127 /* skip guilty context job */ 128 if (atomic_read(&p->ctx->guilty) == 1) { 129 ret = -ECANCELED; 130 goto free_chunk; 131 } 132 133 /* get chunks */ 134 chunk_array_user = u64_to_user_ptr(cs->in.chunks); 135 if (copy_from_user(chunk_array, chunk_array_user, 136 sizeof(uint64_t)*cs->in.num_chunks)) { 137 ret = -EFAULT; 138 goto free_chunk; 139 } 140 141 p->nchunks = cs->in.num_chunks; 142 p->chunks = kmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk), 143 GFP_KERNEL); 144 if (!p->chunks) { 145 ret = -ENOMEM; 146 goto free_chunk; 147 } 148 149 for (i = 0; i < p->nchunks; i++) { 150 struct drm_amdgpu_cs_chunk __user **chunk_ptr = NULL; 151 struct drm_amdgpu_cs_chunk user_chunk; 152 uint32_t __user *cdata; 153 154 chunk_ptr = u64_to_user_ptr(chunk_array[i]); 155 if (copy_from_user(&user_chunk, chunk_ptr, 156 sizeof(struct drm_amdgpu_cs_chunk))) { 157 ret = -EFAULT; 158 i--; 159 goto free_partial_kdata; 160 } 161 p->chunks[i].chunk_id = user_chunk.chunk_id; 162 p->chunks[i].length_dw = user_chunk.length_dw; 163 164 size = p->chunks[i].length_dw; 165 cdata = u64_to_user_ptr(user_chunk.chunk_data); 166 167 p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t), GFP_KERNEL); 168 if (p->chunks[i].kdata == NULL) { 169 ret = -ENOMEM; 170 i--; 171 goto free_partial_kdata; 172 } 173 size *= sizeof(uint32_t); 174 if (copy_from_user(p->chunks[i].kdata, cdata, size)) { 175 ret = -EFAULT; 176 goto free_partial_kdata; 177 } 178 179 switch (p->chunks[i].chunk_id) { 180 case AMDGPU_CHUNK_ID_IB: 181 ++num_ibs; 182 break; 183 184 case AMDGPU_CHUNK_ID_FENCE: 185 size = sizeof(struct drm_amdgpu_cs_chunk_fence); 186 if (p->chunks[i].length_dw * sizeof(uint32_t) < size) { 187 ret = -EINVAL; 188 goto free_partial_kdata; 189 } 190 191 ret = amdgpu_cs_user_fence_chunk(p, p->chunks[i].kdata, 192 &uf_offset); 193 if (ret) 194 goto free_partial_kdata; 195 196 break; 197 198 case AMDGPU_CHUNK_ID_BO_HANDLES: 199 size = sizeof(struct drm_amdgpu_bo_list_in); 200 if (p->chunks[i].length_dw * sizeof(uint32_t) < size) { 201 ret = -EINVAL; 202 goto free_partial_kdata; 203 } 204 205 ret = amdgpu_cs_bo_handles_chunk(p, p->chunks[i].kdata); 206 if (ret) 207 goto free_partial_kdata; 208 209 break; 210 211 case AMDGPU_CHUNK_ID_DEPENDENCIES: 212 case AMDGPU_CHUNK_ID_SYNCOBJ_IN: 213 case AMDGPU_CHUNK_ID_SYNCOBJ_OUT: 214 break; 215 216 default: 217 ret = -EINVAL; 218 goto free_partial_kdata; 219 } 220 } 221 222 ret = amdgpu_job_alloc(p->adev, num_ibs, &p->job, vm); 223 if (ret) 224 goto free_all_kdata; 225 226 if (p->ctx->vram_lost_counter != p->job->vram_lost_counter) { 227 ret = -ECANCELED; 228 goto free_all_kdata; 229 } 230 231 if (p->uf_entry.robj) 232 p->job->uf_addr = uf_offset; 233 kfree(chunk_array); 234 235 /* Use this opportunity to fill in task info for the vm */ 236 amdgpu_vm_set_task_info(vm); 237 238 return 0; 239 240 free_all_kdata: 241 i = p->nchunks - 1; 242 free_partial_kdata: 243 for (; i >= 0; i--) 244 kvfree(p->chunks[i].kdata); 245 kfree(p->chunks); 246 p->chunks = NULL; 247 p->nchunks = 0; 248 free_chunk: 249 kfree(chunk_array); 250 251 return ret; 252 } 253 254 /* Convert microseconds to bytes. */ 255 static u64 us_to_bytes(struct amdgpu_device *adev, s64 us) 256 { 257 if (us <= 0 || !adev->mm_stats.log2_max_MBps) 258 return 0; 259 260 /* Since accum_us is incremented by a million per second, just 261 * multiply it by the number of MB/s to get the number of bytes. 262 */ 263 return us << adev->mm_stats.log2_max_MBps; 264 } 265 266 static s64 bytes_to_us(struct amdgpu_device *adev, u64 bytes) 267 { 268 if (!adev->mm_stats.log2_max_MBps) 269 return 0; 270 271 return bytes >> adev->mm_stats.log2_max_MBps; 272 } 273 274 /* Returns how many bytes TTM can move right now. If no bytes can be moved, 275 * it returns 0. If it returns non-zero, it's OK to move at least one buffer, 276 * which means it can go over the threshold once. If that happens, the driver 277 * will be in debt and no other buffer migrations can be done until that debt 278 * is repaid. 279 * 280 * This approach allows moving a buffer of any size (it's important to allow 281 * that). 282 * 283 * The currency is simply time in microseconds and it increases as the clock 284 * ticks. The accumulated microseconds (us) are converted to bytes and 285 * returned. 286 */ 287 static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev, 288 u64 *max_bytes, 289 u64 *max_vis_bytes) 290 { 291 s64 time_us, increment_us; 292 u64 free_vram, total_vram, used_vram; 293 294 /* Allow a maximum of 200 accumulated ms. This is basically per-IB 295 * throttling. 296 * 297 * It means that in order to get full max MBps, at least 5 IBs per 298 * second must be submitted and not more than 200ms apart from each 299 * other. 300 */ 301 const s64 us_upper_bound = 200000; 302 303 if (!adev->mm_stats.log2_max_MBps) { 304 *max_bytes = 0; 305 *max_vis_bytes = 0; 306 return; 307 } 308 309 total_vram = adev->gmc.real_vram_size - atomic64_read(&adev->vram_pin_size); 310 used_vram = amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]); 311 free_vram = used_vram >= total_vram ? 0 : total_vram - used_vram; 312 313 spin_lock(&adev->mm_stats.lock); 314 315 /* Increase the amount of accumulated us. */ 316 time_us = ktime_to_us(ktime_get()); 317 increment_us = time_us - adev->mm_stats.last_update_us; 318 adev->mm_stats.last_update_us = time_us; 319 adev->mm_stats.accum_us = min(adev->mm_stats.accum_us + increment_us, 320 us_upper_bound); 321 322 /* This prevents the short period of low performance when the VRAM 323 * usage is low and the driver is in debt or doesn't have enough 324 * accumulated us to fill VRAM quickly. 325 * 326 * The situation can occur in these cases: 327 * - a lot of VRAM is freed by userspace 328 * - the presence of a big buffer causes a lot of evictions 329 * (solution: split buffers into smaller ones) 330 * 331 * If 128 MB or 1/8th of VRAM is free, start filling it now by setting 332 * accum_us to a positive number. 333 */ 334 if (free_vram >= 128 * 1024 * 1024 || free_vram >= total_vram / 8) { 335 s64 min_us; 336 337 /* Be more aggresive on dGPUs. Try to fill a portion of free 338 * VRAM now. 339 */ 340 if (!(adev->flags & AMD_IS_APU)) 341 min_us = bytes_to_us(adev, free_vram / 4); 342 else 343 min_us = 0; /* Reset accum_us on APUs. */ 344 345 adev->mm_stats.accum_us = max(min_us, adev->mm_stats.accum_us); 346 } 347 348 /* This is set to 0 if the driver is in debt to disallow (optional) 349 * buffer moves. 350 */ 351 *max_bytes = us_to_bytes(adev, adev->mm_stats.accum_us); 352 353 /* Do the same for visible VRAM if half of it is free */ 354 if (!amdgpu_gmc_vram_full_visible(&adev->gmc)) { 355 u64 total_vis_vram = adev->gmc.visible_vram_size; 356 u64 used_vis_vram = 357 amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]); 358 359 if (used_vis_vram < total_vis_vram) { 360 u64 free_vis_vram = total_vis_vram - used_vis_vram; 361 adev->mm_stats.accum_us_vis = min(adev->mm_stats.accum_us_vis + 362 increment_us, us_upper_bound); 363 364 if (free_vis_vram >= total_vis_vram / 2) 365 adev->mm_stats.accum_us_vis = 366 max(bytes_to_us(adev, free_vis_vram / 2), 367 adev->mm_stats.accum_us_vis); 368 } 369 370 *max_vis_bytes = us_to_bytes(adev, adev->mm_stats.accum_us_vis); 371 } else { 372 *max_vis_bytes = 0; 373 } 374 375 spin_unlock(&adev->mm_stats.lock); 376 } 377 378 /* Report how many bytes have really been moved for the last command 379 * submission. This can result in a debt that can stop buffer migrations 380 * temporarily. 381 */ 382 void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes, 383 u64 num_vis_bytes) 384 { 385 spin_lock(&adev->mm_stats.lock); 386 adev->mm_stats.accum_us -= bytes_to_us(adev, num_bytes); 387 adev->mm_stats.accum_us_vis -= bytes_to_us(adev, num_vis_bytes); 388 spin_unlock(&adev->mm_stats.lock); 389 } 390 391 static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p, 392 struct amdgpu_bo *bo) 393 { 394 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 395 struct ttm_operation_ctx ctx = { 396 .interruptible = true, 397 .no_wait_gpu = false, 398 .resv = bo->tbo.resv, 399 .flags = 0 400 }; 401 uint32_t domain; 402 int r; 403 404 if (bo->pin_count) 405 return 0; 406 407 /* Don't move this buffer if we have depleted our allowance 408 * to move it. Don't move anything if the threshold is zero. 409 */ 410 if (p->bytes_moved < p->bytes_moved_threshold) { 411 if (!amdgpu_gmc_vram_full_visible(&adev->gmc) && 412 (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) { 413 /* And don't move a CPU_ACCESS_REQUIRED BO to limited 414 * visible VRAM if we've depleted our allowance to do 415 * that. 416 */ 417 if (p->bytes_moved_vis < p->bytes_moved_vis_threshold) 418 domain = bo->preferred_domains; 419 else 420 domain = bo->allowed_domains; 421 } else { 422 domain = bo->preferred_domains; 423 } 424 } else { 425 domain = bo->allowed_domains; 426 } 427 428 retry: 429 amdgpu_bo_placement_from_domain(bo, domain); 430 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 431 432 p->bytes_moved += ctx.bytes_moved; 433 if (!amdgpu_gmc_vram_full_visible(&adev->gmc) && 434 amdgpu_bo_in_cpu_visible_vram(bo)) 435 p->bytes_moved_vis += ctx.bytes_moved; 436 437 if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) { 438 domain = bo->allowed_domains; 439 goto retry; 440 } 441 442 return r; 443 } 444 445 /* Last resort, try to evict something from the current working set */ 446 static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p, 447 struct amdgpu_bo *validated) 448 { 449 uint32_t domain = validated->allowed_domains; 450 struct ttm_operation_ctx ctx = { true, false }; 451 int r; 452 453 if (!p->evictable) 454 return false; 455 456 for (;&p->evictable->tv.head != &p->validated; 457 p->evictable = list_prev_entry(p->evictable, tv.head)) { 458 459 struct amdgpu_bo_list_entry *candidate = p->evictable; 460 struct amdgpu_bo *bo = candidate->robj; 461 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 462 bool update_bytes_moved_vis; 463 uint32_t other; 464 465 /* If we reached our current BO we can forget it */ 466 if (candidate->robj == validated) 467 break; 468 469 /* We can't move pinned BOs here */ 470 if (bo->pin_count) 471 continue; 472 473 other = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type); 474 475 /* Check if this BO is in one of the domains we need space for */ 476 if (!(other & domain)) 477 continue; 478 479 /* Check if we can move this BO somewhere else */ 480 other = bo->allowed_domains & ~domain; 481 if (!other) 482 continue; 483 484 /* Good we can try to move this BO somewhere else */ 485 update_bytes_moved_vis = 486 !amdgpu_gmc_vram_full_visible(&adev->gmc) && 487 amdgpu_bo_in_cpu_visible_vram(bo); 488 amdgpu_bo_placement_from_domain(bo, other); 489 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 490 p->bytes_moved += ctx.bytes_moved; 491 if (update_bytes_moved_vis) 492 p->bytes_moved_vis += ctx.bytes_moved; 493 494 if (unlikely(r)) 495 break; 496 497 p->evictable = list_prev_entry(p->evictable, tv.head); 498 list_move(&candidate->tv.head, &p->validated); 499 500 return true; 501 } 502 503 return false; 504 } 505 506 static int amdgpu_cs_validate(void *param, struct amdgpu_bo *bo) 507 { 508 struct amdgpu_cs_parser *p = param; 509 int r; 510 511 do { 512 r = amdgpu_cs_bo_validate(p, bo); 513 } while (r == -ENOMEM && amdgpu_cs_try_evict(p, bo)); 514 if (r) 515 return r; 516 517 if (bo->shadow) 518 r = amdgpu_cs_bo_validate(p, bo->shadow); 519 520 return r; 521 } 522 523 static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p, 524 struct list_head *validated) 525 { 526 struct ttm_operation_ctx ctx = { true, false }; 527 struct amdgpu_bo_list_entry *lobj; 528 int r; 529 530 list_for_each_entry(lobj, validated, tv.head) { 531 struct amdgpu_bo *bo = lobj->robj; 532 bool binding_userptr = false; 533 struct mm_struct *usermm; 534 535 #ifdef notyet 536 usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm); 537 if (usermm && usermm != current->mm) 538 return -EPERM; 539 540 /* Check if we have user pages and nobody bound the BO already */ 541 if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm) && 542 lobj->user_pages) { 543 amdgpu_bo_placement_from_domain(bo, 544 AMDGPU_GEM_DOMAIN_CPU); 545 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 546 if (r) 547 return r; 548 amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm, 549 lobj->user_pages); 550 binding_userptr = true; 551 } 552 #endif 553 554 if (p->evictable == lobj) 555 p->evictable = NULL; 556 557 r = amdgpu_cs_validate(p, bo); 558 if (r) 559 return r; 560 561 if (binding_userptr) { 562 kvfree(lobj->user_pages); 563 lobj->user_pages = NULL; 564 } 565 } 566 return 0; 567 } 568 569 static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, 570 union drm_amdgpu_cs *cs) 571 { 572 struct amdgpu_fpriv *fpriv = p->filp->driver_priv; 573 struct amdgpu_vm *vm = &fpriv->vm; 574 struct amdgpu_bo_list_entry *e; 575 struct list_head duplicates; 576 struct amdgpu_bo *gds; 577 struct amdgpu_bo *gws; 578 struct amdgpu_bo *oa; 579 unsigned tries = 10; 580 int r; 581 582 INIT_LIST_HEAD(&p->validated); 583 584 /* p->bo_list could already be assigned if AMDGPU_CHUNK_ID_BO_HANDLES is present */ 585 if (cs->in.bo_list_handle) { 586 if (p->bo_list) 587 return -EINVAL; 588 589 r = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle, 590 &p->bo_list); 591 if (r) 592 return r; 593 } else if (!p->bo_list) { 594 /* Create a empty bo_list when no handle is provided */ 595 r = amdgpu_bo_list_create(p->adev, p->filp, NULL, 0, 596 &p->bo_list); 597 if (r) 598 return r; 599 } 600 601 amdgpu_bo_list_get_list(p->bo_list, &p->validated); 602 if (p->bo_list->first_userptr != p->bo_list->num_entries) 603 p->mn = amdgpu_mn_get(p->adev, AMDGPU_MN_TYPE_GFX); 604 605 INIT_LIST_HEAD(&duplicates); 606 amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd); 607 608 if (p->uf_entry.robj && !p->uf_entry.robj->parent) 609 list_add(&p->uf_entry.tv.head, &p->validated); 610 611 while (1) { 612 struct list_head need_pages; 613 614 r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true, 615 &duplicates); 616 if (unlikely(r != 0)) { 617 if (r != -ERESTARTSYS) 618 DRM_ERROR("ttm_eu_reserve_buffers failed.\n"); 619 goto error_free_pages; 620 } 621 622 INIT_LIST_HEAD(&need_pages); 623 amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) { 624 struct amdgpu_bo *bo = e->robj; 625 626 if (amdgpu_ttm_tt_userptr_invalidated(bo->tbo.ttm, 627 &e->user_invalidated) && e->user_pages) { 628 629 /* We acquired a page array, but somebody 630 * invalidated it. Free it and try again 631 */ 632 #ifdef notyet 633 release_pages(e->user_pages, 634 bo->tbo.ttm->num_pages); 635 #endif 636 kvfree(e->user_pages); 637 e->user_pages = NULL; 638 } 639 640 if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm) && 641 !e->user_pages) { 642 list_del(&e->tv.head); 643 list_add(&e->tv.head, &need_pages); 644 645 amdgpu_bo_unreserve(e->robj); 646 } 647 } 648 649 if (list_empty(&need_pages)) 650 break; 651 652 /* Unreserve everything again. */ 653 ttm_eu_backoff_reservation(&p->ticket, &p->validated); 654 655 /* We tried too many times, just abort */ 656 if (!--tries) { 657 r = -EDEADLK; 658 DRM_ERROR("deadlock in %s\n", __func__); 659 goto error_free_pages; 660 } 661 662 /* Fill the page arrays for all userptrs. */ 663 list_for_each_entry(e, &need_pages, tv.head) { 664 struct ttm_tt *ttm = e->robj->tbo.ttm; 665 666 e->user_pages = kvmalloc_array(ttm->num_pages, 667 sizeof(struct vm_page*), 668 GFP_KERNEL | __GFP_ZERO); 669 if (!e->user_pages) { 670 r = -ENOMEM; 671 DRM_ERROR("calloc failure in %s\n", __func__); 672 goto error_free_pages; 673 } 674 675 r = amdgpu_ttm_tt_get_user_pages(ttm, e->user_pages); 676 if (r) { 677 DRM_ERROR("amdgpu_ttm_tt_get_user_pages failed.\n"); 678 kvfree(e->user_pages); 679 e->user_pages = NULL; 680 goto error_free_pages; 681 } 682 } 683 684 /* And try again. */ 685 list_splice(&need_pages, &p->validated); 686 } 687 688 amdgpu_cs_get_threshold_for_moves(p->adev, &p->bytes_moved_threshold, 689 &p->bytes_moved_vis_threshold); 690 p->bytes_moved = 0; 691 p->bytes_moved_vis = 0; 692 p->evictable = list_last_entry(&p->validated, 693 struct amdgpu_bo_list_entry, 694 tv.head); 695 696 r = amdgpu_vm_validate_pt_bos(p->adev, &fpriv->vm, 697 amdgpu_cs_validate, p); 698 if (r) { 699 DRM_ERROR("amdgpu_vm_validate_pt_bos() failed.\n"); 700 goto error_validate; 701 } 702 703 r = amdgpu_cs_list_validate(p, &duplicates); 704 if (r) { 705 DRM_ERROR("amdgpu_cs_list_validate(duplicates) failed.\n"); 706 goto error_validate; 707 } 708 709 r = amdgpu_cs_list_validate(p, &p->validated); 710 if (r) { 711 DRM_ERROR("amdgpu_cs_list_validate(validated) failed.\n"); 712 goto error_validate; 713 } 714 715 amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved, 716 p->bytes_moved_vis); 717 718 gds = p->bo_list->gds_obj; 719 gws = p->bo_list->gws_obj; 720 oa = p->bo_list->oa_obj; 721 722 amdgpu_bo_list_for_each_entry(e, p->bo_list) 723 e->bo_va = amdgpu_vm_bo_find(vm, e->robj); 724 725 if (gds) { 726 p->job->gds_base = amdgpu_bo_gpu_offset(gds); 727 p->job->gds_size = amdgpu_bo_size(gds); 728 } 729 if (gws) { 730 p->job->gws_base = amdgpu_bo_gpu_offset(gws); 731 p->job->gws_size = amdgpu_bo_size(gws); 732 } 733 if (oa) { 734 p->job->oa_base = amdgpu_bo_gpu_offset(oa); 735 p->job->oa_size = amdgpu_bo_size(oa); 736 } 737 738 if (!r && p->uf_entry.robj) { 739 struct amdgpu_bo *uf = p->uf_entry.robj; 740 741 r = amdgpu_ttm_alloc_gart(&uf->tbo); 742 p->job->uf_addr += amdgpu_bo_gpu_offset(uf); 743 } 744 745 error_validate: 746 if (r) 747 ttm_eu_backoff_reservation(&p->ticket, &p->validated); 748 749 error_free_pages: 750 751 amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) { 752 if (!e->user_pages) 753 continue; 754 755 #ifdef notyet 756 release_pages(e->user_pages, 757 e->robj->tbo.ttm->num_pages); 758 #endif 759 kvfree(e->user_pages); 760 } 761 762 return r; 763 } 764 765 static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p) 766 { 767 struct amdgpu_bo_list_entry *e; 768 int r; 769 770 list_for_each_entry(e, &p->validated, tv.head) { 771 struct reservation_object *resv = e->robj->tbo.resv; 772 r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, p->filp, 773 amdgpu_bo_explicit_sync(e->robj)); 774 775 if (r) 776 return r; 777 } 778 return 0; 779 } 780 781 /** 782 * cs_parser_fini() - clean parser states 783 * @parser: parser structure holding parsing context. 784 * @error: error number 785 * 786 * If error is set than unvalidate buffer, otherwise just free memory 787 * used by parsing context. 788 **/ 789 static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, 790 bool backoff) 791 { 792 unsigned i; 793 794 if (error && backoff) 795 ttm_eu_backoff_reservation(&parser->ticket, 796 &parser->validated); 797 798 for (i = 0; i < parser->num_post_dep_syncobjs; i++) 799 drm_syncobj_put(parser->post_dep_syncobjs[i]); 800 kfree(parser->post_dep_syncobjs); 801 802 dma_fence_put(parser->fence); 803 804 if (parser->ctx) { 805 mutex_unlock(&parser->ctx->lock); 806 amdgpu_ctx_put(parser->ctx); 807 } 808 if (parser->bo_list) 809 amdgpu_bo_list_put(parser->bo_list); 810 811 for (i = 0; i < parser->nchunks; i++) 812 kvfree(parser->chunks[i].kdata); 813 kfree(parser->chunks); 814 if (parser->job) 815 amdgpu_job_free(parser->job); 816 amdgpu_bo_unref(&parser->uf_entry.robj); 817 } 818 819 static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p) 820 { 821 struct amdgpu_fpriv *fpriv = p->filp->driver_priv; 822 struct amdgpu_device *adev = p->adev; 823 struct amdgpu_vm *vm = &fpriv->vm; 824 struct amdgpu_bo_list_entry *e; 825 struct amdgpu_bo_va *bo_va; 826 struct amdgpu_bo *bo; 827 int r; 828 829 r = amdgpu_vm_clear_freed(adev, vm, NULL); 830 if (r) 831 return r; 832 833 r = amdgpu_vm_bo_update(adev, fpriv->prt_va, false); 834 if (r) 835 return r; 836 837 r = amdgpu_sync_fence(adev, &p->job->sync, 838 fpriv->prt_va->last_pt_update, false); 839 if (r) 840 return r; 841 842 if (amdgpu_sriov_vf(adev)) { 843 struct dma_fence *f; 844 845 bo_va = fpriv->csa_va; 846 BUG_ON(!bo_va); 847 r = amdgpu_vm_bo_update(adev, bo_va, false); 848 if (r) 849 return r; 850 851 f = bo_va->last_pt_update; 852 r = amdgpu_sync_fence(adev, &p->job->sync, f, false); 853 if (r) 854 return r; 855 } 856 857 amdgpu_bo_list_for_each_entry(e, p->bo_list) { 858 struct dma_fence *f; 859 860 /* ignore duplicates */ 861 bo = e->robj; 862 if (!bo) 863 continue; 864 865 bo_va = e->bo_va; 866 if (bo_va == NULL) 867 continue; 868 869 r = amdgpu_vm_bo_update(adev, bo_va, false); 870 if (r) 871 return r; 872 873 f = bo_va->last_pt_update; 874 r = amdgpu_sync_fence(adev, &p->job->sync, f, false); 875 if (r) 876 return r; 877 } 878 879 r = amdgpu_vm_handle_moved(adev, vm); 880 if (r) 881 return r; 882 883 r = amdgpu_vm_update_directories(adev, vm); 884 if (r) 885 return r; 886 887 r = amdgpu_sync_fence(adev, &p->job->sync, vm->last_update, false); 888 if (r) 889 return r; 890 891 if (amdgpu_vm_debug) { 892 /* Invalidate all BOs to test for userspace bugs */ 893 amdgpu_bo_list_for_each_entry(e, p->bo_list) { 894 /* ignore duplicates */ 895 if (!e->robj) 896 continue; 897 898 amdgpu_vm_bo_invalidate(adev, e->robj, false); 899 } 900 } 901 902 return r; 903 } 904 905 static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev, 906 struct amdgpu_cs_parser *p) 907 { 908 struct amdgpu_fpriv *fpriv = p->filp->driver_priv; 909 struct amdgpu_vm *vm = &fpriv->vm; 910 struct amdgpu_ring *ring = p->ring; 911 int r; 912 913 /* Only for UVD/VCE VM emulation */ 914 if (p->ring->funcs->parse_cs || p->ring->funcs->patch_cs_in_place) { 915 unsigned i, j; 916 917 for (i = 0, j = 0; i < p->nchunks && j < p->job->num_ibs; i++) { 918 struct drm_amdgpu_cs_chunk_ib *chunk_ib; 919 struct amdgpu_bo_va_mapping *m; 920 struct amdgpu_bo *aobj = NULL; 921 struct amdgpu_cs_chunk *chunk; 922 uint64_t offset, va_start; 923 struct amdgpu_ib *ib; 924 uint8_t *kptr; 925 926 chunk = &p->chunks[i]; 927 ib = &p->job->ibs[j]; 928 chunk_ib = chunk->kdata; 929 930 if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB) 931 continue; 932 933 va_start = chunk_ib->va_start & AMDGPU_VA_HOLE_MASK; 934 r = amdgpu_cs_find_mapping(p, va_start, &aobj, &m); 935 if (r) { 936 DRM_ERROR("IB va_start is invalid\n"); 937 return r; 938 } 939 940 if ((va_start + chunk_ib->ib_bytes) > 941 (m->last + 1) * AMDGPU_GPU_PAGE_SIZE) { 942 DRM_ERROR("IB va_start+ib_bytes is invalid\n"); 943 return -EINVAL; 944 } 945 946 /* the IB should be reserved at this point */ 947 r = amdgpu_bo_kmap(aobj, (void **)&kptr); 948 if (r) { 949 return r; 950 } 951 952 offset = m->start * AMDGPU_GPU_PAGE_SIZE; 953 kptr += va_start - offset; 954 955 if (p->ring->funcs->parse_cs) { 956 memcpy(ib->ptr, kptr, chunk_ib->ib_bytes); 957 amdgpu_bo_kunmap(aobj); 958 959 r = amdgpu_ring_parse_cs(ring, p, j); 960 if (r) 961 return r; 962 } else { 963 ib->ptr = (uint32_t *)kptr; 964 r = amdgpu_ring_patch_cs_in_place(ring, p, j); 965 amdgpu_bo_kunmap(aobj); 966 if (r) 967 return r; 968 } 969 970 j++; 971 } 972 } 973 974 if (p->job->vm) { 975 p->job->vm_pd_addr = amdgpu_bo_gpu_offset(vm->root.base.bo); 976 977 r = amdgpu_bo_vm_update_pte(p); 978 if (r) 979 return r; 980 981 r = reservation_object_reserve_shared(vm->root.base.bo->tbo.resv); 982 if (r) 983 return r; 984 } 985 986 return amdgpu_cs_sync_rings(p); 987 } 988 989 static int amdgpu_cs_ib_fill(struct amdgpu_device *adev, 990 struct amdgpu_cs_parser *parser) 991 { 992 struct amdgpu_fpriv *fpriv = parser->filp->driver_priv; 993 struct amdgpu_vm *vm = &fpriv->vm; 994 int i, j; 995 int r, ce_preempt = 0, de_preempt = 0; 996 997 for (i = 0, j = 0; i < parser->nchunks && j < parser->job->num_ibs; i++) { 998 struct amdgpu_cs_chunk *chunk; 999 struct amdgpu_ib *ib; 1000 struct drm_amdgpu_cs_chunk_ib *chunk_ib; 1001 struct amdgpu_ring *ring; 1002 1003 chunk = &parser->chunks[i]; 1004 ib = &parser->job->ibs[j]; 1005 chunk_ib = (struct drm_amdgpu_cs_chunk_ib *)chunk->kdata; 1006 1007 if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB) 1008 continue; 1009 1010 if (chunk_ib->ip_type == AMDGPU_HW_IP_GFX && amdgpu_sriov_vf(adev)) { 1011 if (chunk_ib->flags & AMDGPU_IB_FLAG_PREEMPT) { 1012 if (chunk_ib->flags & AMDGPU_IB_FLAG_CE) 1013 ce_preempt++; 1014 else 1015 de_preempt++; 1016 } 1017 1018 /* each GFX command submit allows 0 or 1 IB preemptible for CE & DE */ 1019 if (ce_preempt > 1 || de_preempt > 1) 1020 return -EINVAL; 1021 } 1022 1023 r = amdgpu_queue_mgr_map(adev, &parser->ctx->queue_mgr, chunk_ib->ip_type, 1024 chunk_ib->ip_instance, chunk_ib->ring, &ring); 1025 if (r) 1026 return r; 1027 1028 if (chunk_ib->flags & AMDGPU_IB_FLAG_PREAMBLE) 1029 parser->job->preamble_status |= 1030 AMDGPU_PREAMBLE_IB_PRESENT; 1031 1032 if (parser->ring && parser->ring != ring) 1033 return -EINVAL; 1034 1035 parser->ring = ring; 1036 1037 r = amdgpu_ib_get(adev, vm, 1038 ring->funcs->parse_cs ? chunk_ib->ib_bytes : 0, 1039 ib); 1040 if (r) { 1041 DRM_ERROR("Failed to get ib !\n"); 1042 return r; 1043 } 1044 1045 ib->gpu_addr = chunk_ib->va_start; 1046 ib->length_dw = chunk_ib->ib_bytes / 4; 1047 ib->flags = chunk_ib->flags; 1048 1049 j++; 1050 } 1051 1052 /* UVD & VCE fw doesn't support user fences */ 1053 if (parser->job->uf_addr && ( 1054 parser->ring->funcs->type == AMDGPU_RING_TYPE_UVD || 1055 parser->ring->funcs->type == AMDGPU_RING_TYPE_VCE)) 1056 return -EINVAL; 1057 1058 return amdgpu_ctx_wait_prev_fence(parser->ctx, parser->ring->idx); 1059 } 1060 1061 static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p, 1062 struct amdgpu_cs_chunk *chunk) 1063 { 1064 struct amdgpu_fpriv *fpriv = p->filp->driver_priv; 1065 unsigned num_deps; 1066 int i, r; 1067 struct drm_amdgpu_cs_chunk_dep *deps; 1068 1069 deps = (struct drm_amdgpu_cs_chunk_dep *)chunk->kdata; 1070 num_deps = chunk->length_dw * 4 / 1071 sizeof(struct drm_amdgpu_cs_chunk_dep); 1072 1073 for (i = 0; i < num_deps; ++i) { 1074 struct amdgpu_ring *ring; 1075 struct amdgpu_ctx *ctx; 1076 struct dma_fence *fence; 1077 1078 ctx = amdgpu_ctx_get(fpriv, deps[i].ctx_id); 1079 if (ctx == NULL) 1080 return -EINVAL; 1081 1082 r = amdgpu_queue_mgr_map(p->adev, &ctx->queue_mgr, 1083 deps[i].ip_type, 1084 deps[i].ip_instance, 1085 deps[i].ring, &ring); 1086 if (r) { 1087 amdgpu_ctx_put(ctx); 1088 return r; 1089 } 1090 1091 fence = amdgpu_ctx_get_fence(ctx, ring, 1092 deps[i].handle); 1093 if (IS_ERR(fence)) { 1094 r = PTR_ERR(fence); 1095 amdgpu_ctx_put(ctx); 1096 return r; 1097 } else if (fence) { 1098 r = amdgpu_sync_fence(p->adev, &p->job->sync, fence, 1099 true); 1100 dma_fence_put(fence); 1101 amdgpu_ctx_put(ctx); 1102 if (r) 1103 return r; 1104 } 1105 } 1106 return 0; 1107 } 1108 1109 static int amdgpu_syncobj_lookup_and_add_to_sync(struct amdgpu_cs_parser *p, 1110 uint32_t handle) 1111 { 1112 int r; 1113 struct dma_fence *fence; 1114 r = drm_syncobj_find_fence(p->filp, handle, &fence); 1115 if (r) 1116 return r; 1117 1118 r = amdgpu_sync_fence(p->adev, &p->job->sync, fence, true); 1119 dma_fence_put(fence); 1120 1121 return r; 1122 } 1123 1124 static int amdgpu_cs_process_syncobj_in_dep(struct amdgpu_cs_parser *p, 1125 struct amdgpu_cs_chunk *chunk) 1126 { 1127 unsigned num_deps; 1128 int i, r; 1129 struct drm_amdgpu_cs_chunk_sem *deps; 1130 1131 deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata; 1132 num_deps = chunk->length_dw * 4 / 1133 sizeof(struct drm_amdgpu_cs_chunk_sem); 1134 1135 for (i = 0; i < num_deps; ++i) { 1136 r = amdgpu_syncobj_lookup_and_add_to_sync(p, deps[i].handle); 1137 if (r) 1138 return r; 1139 } 1140 return 0; 1141 } 1142 1143 static int amdgpu_cs_process_syncobj_out_dep(struct amdgpu_cs_parser *p, 1144 struct amdgpu_cs_chunk *chunk) 1145 { 1146 unsigned num_deps; 1147 int i; 1148 struct drm_amdgpu_cs_chunk_sem *deps; 1149 deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata; 1150 num_deps = chunk->length_dw * 4 / 1151 sizeof(struct drm_amdgpu_cs_chunk_sem); 1152 1153 p->post_dep_syncobjs = kmalloc_array(num_deps, 1154 sizeof(struct drm_syncobj *), 1155 GFP_KERNEL); 1156 p->num_post_dep_syncobjs = 0; 1157 1158 if (!p->post_dep_syncobjs) 1159 return -ENOMEM; 1160 1161 for (i = 0; i < num_deps; ++i) { 1162 p->post_dep_syncobjs[i] = drm_syncobj_find(p->filp, deps[i].handle); 1163 if (!p->post_dep_syncobjs[i]) 1164 return -EINVAL; 1165 p->num_post_dep_syncobjs++; 1166 } 1167 return 0; 1168 } 1169 1170 static int amdgpu_cs_dependencies(struct amdgpu_device *adev, 1171 struct amdgpu_cs_parser *p) 1172 { 1173 int i, r; 1174 1175 for (i = 0; i < p->nchunks; ++i) { 1176 struct amdgpu_cs_chunk *chunk; 1177 1178 chunk = &p->chunks[i]; 1179 1180 if (chunk->chunk_id == AMDGPU_CHUNK_ID_DEPENDENCIES) { 1181 r = amdgpu_cs_process_fence_dep(p, chunk); 1182 if (r) 1183 return r; 1184 } else if (chunk->chunk_id == AMDGPU_CHUNK_ID_SYNCOBJ_IN) { 1185 r = amdgpu_cs_process_syncobj_in_dep(p, chunk); 1186 if (r) 1187 return r; 1188 } else if (chunk->chunk_id == AMDGPU_CHUNK_ID_SYNCOBJ_OUT) { 1189 r = amdgpu_cs_process_syncobj_out_dep(p, chunk); 1190 if (r) 1191 return r; 1192 } 1193 } 1194 1195 return 0; 1196 } 1197 1198 static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p) 1199 { 1200 int i; 1201 1202 for (i = 0; i < p->num_post_dep_syncobjs; ++i) 1203 drm_syncobj_replace_fence(p->post_dep_syncobjs[i], p->fence); 1204 } 1205 1206 static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, 1207 union drm_amdgpu_cs *cs) 1208 { 1209 struct amdgpu_fpriv *fpriv = p->filp->driver_priv; 1210 struct amdgpu_ring *ring = p->ring; 1211 struct drm_sched_entity *entity = &p->ctx->rings[ring->idx].entity; 1212 enum drm_sched_priority priority; 1213 struct amdgpu_bo_list_entry *e; 1214 struct amdgpu_job *job; 1215 uint64_t seq; 1216 1217 int r; 1218 1219 job = p->job; 1220 p->job = NULL; 1221 1222 r = drm_sched_job_init(&job->base, entity, p->filp); 1223 if (r) 1224 goto error_unlock; 1225 1226 /* No memory allocation is allowed while holding the mn lock */ 1227 amdgpu_mn_lock(p->mn); 1228 amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) { 1229 struct amdgpu_bo *bo = e->robj; 1230 1231 if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm)) { 1232 r = -ERESTARTSYS; 1233 goto error_abort; 1234 } 1235 } 1236 1237 job->owner = p->filp; 1238 p->fence = dma_fence_get(&job->base.s_fence->finished); 1239 1240 r = amdgpu_ctx_add_fence(p->ctx, ring, p->fence, &seq); 1241 if (r) { 1242 dma_fence_put(p->fence); 1243 dma_fence_put(&job->base.s_fence->finished); 1244 amdgpu_job_free(job); 1245 amdgpu_mn_unlock(p->mn); 1246 return r; 1247 } 1248 1249 amdgpu_cs_post_dependencies(p); 1250 1251 if ((job->preamble_status & AMDGPU_PREAMBLE_IB_PRESENT) && 1252 !p->ctx->preamble_presented) { 1253 job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST; 1254 p->ctx->preamble_presented = true; 1255 } 1256 1257 cs->out.handle = seq; 1258 job->uf_sequence = seq; 1259 1260 amdgpu_job_free_resources(job); 1261 1262 trace_amdgpu_cs_ioctl(job); 1263 amdgpu_vm_bo_trace_cs(&fpriv->vm, &p->ticket); 1264 priority = job->base.s_priority; 1265 drm_sched_entity_push_job(&job->base, entity); 1266 1267 ring = to_amdgpu_ring(entity->rq->sched); 1268 amdgpu_ring_priority_get(ring, priority); 1269 1270 ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence); 1271 amdgpu_mn_unlock(p->mn); 1272 1273 return 0; 1274 1275 error_abort: 1276 dma_fence_put(&job->base.s_fence->finished); 1277 job->base.s_fence = NULL; 1278 amdgpu_mn_unlock(p->mn); 1279 1280 error_unlock: 1281 amdgpu_job_free(job); 1282 return r; 1283 } 1284 1285 int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) 1286 { 1287 struct amdgpu_device *adev = dev->dev_private; 1288 union drm_amdgpu_cs *cs = data; 1289 struct amdgpu_cs_parser parser = {}; 1290 bool reserved_buffers = false; 1291 int i, r; 1292 1293 if (!adev->accel_working) 1294 return -EBUSY; 1295 1296 parser.adev = adev; 1297 parser.filp = filp; 1298 1299 r = amdgpu_cs_parser_init(&parser, data); 1300 if (r) { 1301 DRM_ERROR("Failed to initialize parser !\n"); 1302 goto out; 1303 } 1304 1305 r = amdgpu_cs_ib_fill(adev, &parser); 1306 if (r) 1307 goto out; 1308 1309 r = amdgpu_cs_parser_bos(&parser, data); 1310 if (r) { 1311 if (r == -ENOMEM) 1312 DRM_ERROR("Not enough memory for command submission!\n"); 1313 else if (r != -ERESTARTSYS) 1314 DRM_ERROR("Failed to process the buffer list %d!\n", r); 1315 goto out; 1316 } 1317 1318 reserved_buffers = true; 1319 1320 r = amdgpu_cs_dependencies(adev, &parser); 1321 if (r) { 1322 DRM_ERROR("Failed in the dependencies handling %d!\n", r); 1323 goto out; 1324 } 1325 1326 for (i = 0; i < parser.job->num_ibs; i++) 1327 trace_amdgpu_cs(&parser, i); 1328 1329 r = amdgpu_cs_ib_vm_chunk(adev, &parser); 1330 if (r) 1331 goto out; 1332 1333 r = amdgpu_cs_submit(&parser, cs); 1334 1335 out: 1336 amdgpu_cs_parser_fini(&parser, r, reserved_buffers); 1337 return r; 1338 } 1339 1340 /** 1341 * amdgpu_cs_wait_ioctl - wait for a command submission to finish 1342 * 1343 * @dev: drm device 1344 * @data: data from userspace 1345 * @filp: file private 1346 * 1347 * Wait for the command submission identified by handle to finish. 1348 */ 1349 int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, 1350 struct drm_file *filp) 1351 { 1352 union drm_amdgpu_wait_cs *wait = data; 1353 struct amdgpu_device *adev = dev->dev_private; 1354 unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout); 1355 struct amdgpu_ring *ring = NULL; 1356 struct amdgpu_ctx *ctx; 1357 struct dma_fence *fence; 1358 long r; 1359 1360 ctx = amdgpu_ctx_get(filp->driver_priv, wait->in.ctx_id); 1361 if (ctx == NULL) 1362 return -EINVAL; 1363 1364 r = amdgpu_queue_mgr_map(adev, &ctx->queue_mgr, 1365 wait->in.ip_type, wait->in.ip_instance, 1366 wait->in.ring, &ring); 1367 if (r) { 1368 amdgpu_ctx_put(ctx); 1369 return r; 1370 } 1371 1372 fence = amdgpu_ctx_get_fence(ctx, ring, wait->in.handle); 1373 if (IS_ERR(fence)) 1374 r = PTR_ERR(fence); 1375 else if (fence) { 1376 r = dma_fence_wait_timeout(fence, true, timeout); 1377 if (r > 0 && fence->error) 1378 r = fence->error; 1379 dma_fence_put(fence); 1380 } else 1381 r = 1; 1382 1383 amdgpu_ctx_put(ctx); 1384 if (r < 0) 1385 return r; 1386 1387 memset(wait, 0, sizeof(*wait)); 1388 wait->out.status = (r == 0); 1389 1390 return 0; 1391 } 1392 1393 /** 1394 * amdgpu_cs_get_fence - helper to get fence from drm_amdgpu_fence 1395 * 1396 * @adev: amdgpu device 1397 * @filp: file private 1398 * @user: drm_amdgpu_fence copied from user space 1399 */ 1400 static struct dma_fence *amdgpu_cs_get_fence(struct amdgpu_device *adev, 1401 struct drm_file *filp, 1402 struct drm_amdgpu_fence *user) 1403 { 1404 struct amdgpu_ring *ring; 1405 struct amdgpu_ctx *ctx; 1406 struct dma_fence *fence; 1407 int r; 1408 1409 ctx = amdgpu_ctx_get(filp->driver_priv, user->ctx_id); 1410 if (ctx == NULL) 1411 return ERR_PTR(-EINVAL); 1412 1413 r = amdgpu_queue_mgr_map(adev, &ctx->queue_mgr, user->ip_type, 1414 user->ip_instance, user->ring, &ring); 1415 if (r) { 1416 amdgpu_ctx_put(ctx); 1417 return ERR_PTR(r); 1418 } 1419 1420 fence = amdgpu_ctx_get_fence(ctx, ring, user->seq_no); 1421 amdgpu_ctx_put(ctx); 1422 1423 return fence; 1424 } 1425 1426 int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data, 1427 struct drm_file *filp) 1428 { 1429 struct amdgpu_device *adev = dev->dev_private; 1430 union drm_amdgpu_fence_to_handle *info = data; 1431 struct dma_fence *fence; 1432 struct drm_syncobj *syncobj; 1433 struct sync_file *sync_file; 1434 int fd, r; 1435 1436 fence = amdgpu_cs_get_fence(adev, filp, &info->in.fence); 1437 if (IS_ERR(fence)) 1438 return PTR_ERR(fence); 1439 1440 switch (info->in.what) { 1441 case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ: 1442 r = drm_syncobj_create(&syncobj, 0, fence); 1443 dma_fence_put(fence); 1444 if (r) 1445 return r; 1446 r = drm_syncobj_get_handle(filp, syncobj, &info->out.handle); 1447 drm_syncobj_put(syncobj); 1448 return r; 1449 1450 case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD: 1451 r = drm_syncobj_create(&syncobj, 0, fence); 1452 dma_fence_put(fence); 1453 if (r) 1454 return r; 1455 r = drm_syncobj_get_fd(syncobj, (int*)&info->out.handle); 1456 drm_syncobj_put(syncobj); 1457 return r; 1458 1459 case AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD: 1460 fd = get_unused_fd_flags(O_CLOEXEC); 1461 if (fd < 0) { 1462 dma_fence_put(fence); 1463 return fd; 1464 } 1465 1466 sync_file = sync_file_create(fence); 1467 dma_fence_put(fence); 1468 if (!sync_file) { 1469 put_unused_fd(fd); 1470 return -ENOMEM; 1471 } 1472 1473 fd_install(fd, sync_file->file); 1474 info->out.handle = fd; 1475 return 0; 1476 1477 default: 1478 return -EINVAL; 1479 } 1480 } 1481 1482 /** 1483 * amdgpu_cs_wait_all_fence - wait on all fences to signal 1484 * 1485 * @adev: amdgpu device 1486 * @filp: file private 1487 * @wait: wait parameters 1488 * @fences: array of drm_amdgpu_fence 1489 */ 1490 static int amdgpu_cs_wait_all_fences(struct amdgpu_device *adev, 1491 struct drm_file *filp, 1492 union drm_amdgpu_wait_fences *wait, 1493 struct drm_amdgpu_fence *fences) 1494 { 1495 uint32_t fence_count = wait->in.fence_count; 1496 unsigned int i; 1497 long r = 1; 1498 1499 for (i = 0; i < fence_count; i++) { 1500 struct dma_fence *fence; 1501 unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns); 1502 1503 fence = amdgpu_cs_get_fence(adev, filp, &fences[i]); 1504 if (IS_ERR(fence)) 1505 return PTR_ERR(fence); 1506 else if (!fence) 1507 continue; 1508 1509 r = dma_fence_wait_timeout(fence, true, timeout); 1510 dma_fence_put(fence); 1511 if (r < 0) 1512 return r; 1513 1514 if (r == 0) 1515 break; 1516 1517 if (fence->error) 1518 return fence->error; 1519 } 1520 1521 memset(wait, 0, sizeof(*wait)); 1522 wait->out.status = (r > 0); 1523 1524 return 0; 1525 } 1526 1527 /** 1528 * amdgpu_cs_wait_any_fence - wait on any fence to signal 1529 * 1530 * @adev: amdgpu device 1531 * @filp: file private 1532 * @wait: wait parameters 1533 * @fences: array of drm_amdgpu_fence 1534 */ 1535 static int amdgpu_cs_wait_any_fence(struct amdgpu_device *adev, 1536 struct drm_file *filp, 1537 union drm_amdgpu_wait_fences *wait, 1538 struct drm_amdgpu_fence *fences) 1539 { 1540 STUB(); 1541 return -ENOSYS; 1542 #if 0 1543 unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns); 1544 uint32_t fence_count = wait->in.fence_count; 1545 uint32_t first = ~0; 1546 struct dma_fence **array; 1547 unsigned int i; 1548 long r; 1549 1550 /* Prepare the fence array */ 1551 array = kcalloc(fence_count, sizeof(struct dma_fence *), GFP_KERNEL); 1552 1553 if (array == NULL) 1554 return -ENOMEM; 1555 1556 for (i = 0; i < fence_count; i++) { 1557 struct dma_fence *fence; 1558 1559 fence = amdgpu_cs_get_fence(adev, filp, &fences[i]); 1560 if (IS_ERR(fence)) { 1561 r = PTR_ERR(fence); 1562 goto err_free_fence_array; 1563 } else if (fence) { 1564 array[i] = fence; 1565 } else { /* NULL, the fence has been already signaled */ 1566 r = 1; 1567 first = i; 1568 goto out; 1569 } 1570 } 1571 1572 r = dma_fence_wait_any_timeout(array, fence_count, true, timeout, 1573 &first); 1574 if (r < 0) 1575 goto err_free_fence_array; 1576 1577 out: 1578 memset(wait, 0, sizeof(*wait)); 1579 wait->out.status = (r > 0); 1580 wait->out.first_signaled = first; 1581 1582 if (first < fence_count && array[first]) 1583 r = array[first]->error; 1584 else 1585 r = 0; 1586 1587 err_free_fence_array: 1588 for (i = 0; i < fence_count; i++) 1589 dma_fence_put(array[i]); 1590 kfree(array); 1591 1592 return r; 1593 #endif 1594 } 1595 1596 /** 1597 * amdgpu_cs_wait_fences_ioctl - wait for multiple command submissions to finish 1598 * 1599 * @dev: drm device 1600 * @data: data from userspace 1601 * @filp: file private 1602 */ 1603 int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data, 1604 struct drm_file *filp) 1605 { 1606 struct amdgpu_device *adev = dev->dev_private; 1607 union drm_amdgpu_wait_fences *wait = data; 1608 uint32_t fence_count = wait->in.fence_count; 1609 struct drm_amdgpu_fence *fences_user; 1610 struct drm_amdgpu_fence *fences; 1611 int r; 1612 1613 /* Get the fences from userspace */ 1614 fences = kmalloc_array(fence_count, sizeof(struct drm_amdgpu_fence), 1615 GFP_KERNEL); 1616 if (fences == NULL) 1617 return -ENOMEM; 1618 1619 fences_user = u64_to_user_ptr(wait->in.fences); 1620 if (copy_from_user(fences, fences_user, 1621 sizeof(struct drm_amdgpu_fence) * fence_count)) { 1622 r = -EFAULT; 1623 goto err_free_fences; 1624 } 1625 1626 if (wait->in.wait_all) 1627 r = amdgpu_cs_wait_all_fences(adev, filp, wait, fences); 1628 else 1629 r = amdgpu_cs_wait_any_fence(adev, filp, wait, fences); 1630 1631 err_free_fences: 1632 kfree(fences); 1633 1634 return r; 1635 } 1636 1637 /** 1638 * amdgpu_cs_find_bo_va - find bo_va for VM address 1639 * 1640 * @parser: command submission parser context 1641 * @addr: VM address 1642 * @bo: resulting BO of the mapping found 1643 * 1644 * Search the buffer objects in the command submission context for a certain 1645 * virtual memory address. Returns allocation structure when found, NULL 1646 * otherwise. 1647 */ 1648 int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser, 1649 uint64_t addr, struct amdgpu_bo **bo, 1650 struct amdgpu_bo_va_mapping **map) 1651 { 1652 struct amdgpu_fpriv *fpriv = parser->filp->driver_priv; 1653 struct ttm_operation_ctx ctx = { false, false }; 1654 struct amdgpu_vm *vm = &fpriv->vm; 1655 struct amdgpu_bo_va_mapping *mapping; 1656 int r; 1657 1658 addr /= AMDGPU_GPU_PAGE_SIZE; 1659 1660 mapping = amdgpu_vm_bo_lookup_mapping(vm, addr); 1661 if (!mapping || !mapping->bo_va || !mapping->bo_va->base.bo) 1662 return -EINVAL; 1663 1664 *bo = mapping->bo_va->base.bo; 1665 *map = mapping; 1666 1667 /* Double check that the BO is reserved by this CS */ 1668 if (READ_ONCE((*bo)->tbo.resv->lock.ctx) != &parser->ticket) 1669 return -EINVAL; 1670 1671 if (!((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) { 1672 (*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; 1673 amdgpu_bo_placement_from_domain(*bo, (*bo)->allowed_domains); 1674 r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, &ctx); 1675 if (r) 1676 return r; 1677 } 1678 1679 return amdgpu_ttm_alloc_gart(&(*bo)->tbo); 1680 } 1681