1 /* 2 * Copyright 2008 Jerome Glisse. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice (including the next 13 * paragraph) shall be included in all copies or substantial portions of the 14 * Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 22 * DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: 25 * Jerome Glisse <glisse@freedesktop.org> 26 */ 27 28 #include <linux/file.h> 29 #include <linux/pagemap.h> 30 #include <linux/sync_file.h> 31 #include <linux/dma-buf.h> 32 33 #include <drm/amdgpu_drm.h> 34 #include <drm/drm_syncobj.h> 35 #include "amdgpu_cs.h" 36 #include "amdgpu.h" 37 #include "amdgpu_trace.h" 38 #include "amdgpu_gmc.h" 39 #include "amdgpu_gem.h" 40 #include "amdgpu_ras.h" 41 42 static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, 43 struct amdgpu_device *adev, 44 struct drm_file *filp, 45 union drm_amdgpu_cs *cs) 46 { 47 struct amdgpu_fpriv *fpriv = filp->driver_priv; 48 49 if (cs->in.num_chunks == 0) 50 return -EINVAL; 51 52 memset(p, 0, sizeof(*p)); 53 p->adev = adev; 54 p->filp = filp; 55 56 p->ctx = amdgpu_ctx_get(fpriv, cs->in.ctx_id); 57 if (!p->ctx) 58 return -EINVAL; 59 60 if (atomic_read(&p->ctx->guilty)) { 61 amdgpu_ctx_put(p->ctx); 62 return -ECANCELED; 63 } 64 return 0; 65 } 66 67 static int amdgpu_cs_job_idx(struct amdgpu_cs_parser *p, 68 struct drm_amdgpu_cs_chunk_ib *chunk_ib) 69 { 70 struct drm_sched_entity *entity; 71 unsigned int i; 72 int r; 73 74 r = amdgpu_ctx_get_entity(p->ctx, chunk_ib->ip_type, 75 chunk_ib->ip_instance, 76 chunk_ib->ring, &entity); 77 if (r) 78 return r; 79 80 /* 81 * Abort if there is no run queue associated with this entity. 82 * Possibly because of disabled HW IP. 83 */ 84 if (entity->rq == NULL) 85 return -EINVAL; 86 87 /* Check if we can add this IB to some existing job */ 88 for (i = 0; i < p->gang_size; ++i) 89 if (p->entities[i] == entity) 90 return i; 91 92 /* If not increase the gang size if possible */ 93 if (i == AMDGPU_CS_GANG_SIZE) 94 return -EINVAL; 95 96 p->entities[i] = entity; 97 p->gang_size = i + 1; 98 return i; 99 } 100 101 static int amdgpu_cs_p1_ib(struct amdgpu_cs_parser *p, 102 struct drm_amdgpu_cs_chunk_ib *chunk_ib, 103 unsigned int *num_ibs) 104 { 105 int r; 106 107 r = amdgpu_cs_job_idx(p, chunk_ib); 108 if (r < 0) 109 return r; 110 111 ++(num_ibs[r]); 112 p->gang_leader_idx = r; 113 return 0; 114 } 115 116 static int amdgpu_cs_p1_user_fence(struct amdgpu_cs_parser *p, 117 struct drm_amdgpu_cs_chunk_fence *data, 118 uint32_t *offset) 119 { 120 struct drm_gem_object *gobj; 121 struct amdgpu_bo *bo; 122 unsigned long size; 123 int r; 124 125 gobj = drm_gem_object_lookup(p->filp, data->handle); 126 if (gobj == NULL) 127 return -EINVAL; 128 129 bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj)); 130 p->uf_entry.priority = 0; 131 p->uf_entry.tv.bo = &bo->tbo; 132 /* One for TTM and two for the CS job */ 133 p->uf_entry.tv.num_shared = 3; 134 135 drm_gem_object_put(gobj); 136 137 size = amdgpu_bo_size(bo); 138 if (size != PAGE_SIZE || (data->offset + 8) > size) { 139 r = -EINVAL; 140 goto error_unref; 141 } 142 143 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) { 144 r = -EINVAL; 145 goto error_unref; 146 } 147 148 *offset = data->offset; 149 150 return 0; 151 152 error_unref: 153 amdgpu_bo_unref(&bo); 154 return r; 155 } 156 157 static int amdgpu_cs_p1_bo_handles(struct amdgpu_cs_parser *p, 158 struct drm_amdgpu_bo_list_in *data) 159 { 160 struct drm_amdgpu_bo_list_entry *info; 161 int r; 162 163 r = amdgpu_bo_create_list_entry_array(data, &info); 164 if (r) 165 return r; 166 167 r = amdgpu_bo_list_create(p->adev, p->filp, info, data->bo_number, 168 &p->bo_list); 169 if (r) 170 goto error_free; 171 172 kvfree(info); 173 return 0; 174 175 error_free: 176 kvfree(info); 177 178 return r; 179 } 180 181 /* Copy the data from userspace and go over it the first time */ 182 static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p, 183 union drm_amdgpu_cs *cs) 184 { 185 struct amdgpu_fpriv *fpriv = p->filp->driver_priv; 186 unsigned int num_ibs[AMDGPU_CS_GANG_SIZE] = { }; 187 struct amdgpu_vm *vm = &fpriv->vm; 188 uint64_t *chunk_array_user; 189 uint64_t *chunk_array; 190 uint32_t uf_offset = 0; 191 unsigned int size; 192 int ret; 193 int i; 194 195 chunk_array = kvmalloc_array(cs->in.num_chunks, sizeof(uint64_t), 196 GFP_KERNEL); 197 if (!chunk_array) 198 return -ENOMEM; 199 200 /* get chunks */ 201 chunk_array_user = u64_to_user_ptr(cs->in.chunks); 202 if (copy_from_user(chunk_array, chunk_array_user, 203 sizeof(uint64_t)*cs->in.num_chunks)) { 204 ret = -EFAULT; 205 goto free_chunk; 206 } 207 208 p->nchunks = cs->in.num_chunks; 209 p->chunks = kvmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk), 210 GFP_KERNEL); 211 if (!p->chunks) { 212 ret = -ENOMEM; 213 goto free_chunk; 214 } 215 216 for (i = 0; i < p->nchunks; i++) { 217 struct drm_amdgpu_cs_chunk __user **chunk_ptr = NULL; 218 struct drm_amdgpu_cs_chunk user_chunk; 219 uint32_t __user *cdata; 220 221 chunk_ptr = u64_to_user_ptr(chunk_array[i]); 222 if (copy_from_user(&user_chunk, chunk_ptr, 223 sizeof(struct drm_amdgpu_cs_chunk))) { 224 ret = -EFAULT; 225 i--; 226 goto free_partial_kdata; 227 } 228 p->chunks[i].chunk_id = user_chunk.chunk_id; 229 p->chunks[i].length_dw = user_chunk.length_dw; 230 231 size = p->chunks[i].length_dw; 232 cdata = u64_to_user_ptr(user_chunk.chunk_data); 233 234 p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t), 235 GFP_KERNEL); 236 if (p->chunks[i].kdata == NULL) { 237 ret = -ENOMEM; 238 i--; 239 goto free_partial_kdata; 240 } 241 size *= sizeof(uint32_t); 242 if (copy_from_user(p->chunks[i].kdata, cdata, size)) { 243 ret = -EFAULT; 244 goto free_partial_kdata; 245 } 246 247 /* Assume the worst on the following checks */ 248 ret = -EINVAL; 249 switch (p->chunks[i].chunk_id) { 250 case AMDGPU_CHUNK_ID_IB: 251 if (size < sizeof(struct drm_amdgpu_cs_chunk_ib)) 252 goto free_partial_kdata; 253 254 ret = amdgpu_cs_p1_ib(p, p->chunks[i].kdata, num_ibs); 255 if (ret) 256 goto free_partial_kdata; 257 break; 258 259 case AMDGPU_CHUNK_ID_FENCE: 260 if (size < sizeof(struct drm_amdgpu_cs_chunk_fence)) 261 goto free_partial_kdata; 262 263 ret = amdgpu_cs_p1_user_fence(p, p->chunks[i].kdata, 264 &uf_offset); 265 if (ret) 266 goto free_partial_kdata; 267 break; 268 269 case AMDGPU_CHUNK_ID_BO_HANDLES: 270 if (size < sizeof(struct drm_amdgpu_bo_list_in)) 271 goto free_partial_kdata; 272 273 ret = amdgpu_cs_p1_bo_handles(p, p->chunks[i].kdata); 274 if (ret) 275 goto free_partial_kdata; 276 break; 277 278 case AMDGPU_CHUNK_ID_DEPENDENCIES: 279 case AMDGPU_CHUNK_ID_SYNCOBJ_IN: 280 case AMDGPU_CHUNK_ID_SYNCOBJ_OUT: 281 case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES: 282 case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT: 283 case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL: 284 break; 285 286 default: 287 goto free_partial_kdata; 288 } 289 } 290 291 if (!p->gang_size) { 292 ret = -EINVAL; 293 goto free_partial_kdata; 294 } 295 296 for (i = 0; i < p->gang_size; ++i) { 297 ret = amdgpu_job_alloc(p->adev, num_ibs[i], &p->jobs[i], vm); 298 if (ret) 299 goto free_all_kdata; 300 301 ret = drm_sched_job_init(&p->jobs[i]->base, p->entities[i], 302 &fpriv->vm); 303 if (ret) 304 goto free_all_kdata; 305 } 306 p->gang_leader = p->jobs[p->gang_leader_idx]; 307 308 if (p->ctx->vram_lost_counter != p->gang_leader->vram_lost_counter) { 309 ret = -ECANCELED; 310 goto free_all_kdata; 311 } 312 313 if (p->uf_entry.tv.bo) 314 p->gang_leader->uf_addr = uf_offset; 315 kvfree(chunk_array); 316 317 /* Use this opportunity to fill in task info for the vm */ 318 amdgpu_vm_set_task_info(vm); 319 320 return 0; 321 322 free_all_kdata: 323 i = p->nchunks - 1; 324 free_partial_kdata: 325 for (; i >= 0; i--) 326 kvfree(p->chunks[i].kdata); 327 kvfree(p->chunks); 328 p->chunks = NULL; 329 p->nchunks = 0; 330 free_chunk: 331 kvfree(chunk_array); 332 333 return ret; 334 } 335 336 static int amdgpu_cs_p2_ib(struct amdgpu_cs_parser *p, 337 struct amdgpu_cs_chunk *chunk, 338 unsigned int *ce_preempt, 339 unsigned int *de_preempt) 340 { 341 struct drm_amdgpu_cs_chunk_ib *chunk_ib = chunk->kdata; 342 struct amdgpu_fpriv *fpriv = p->filp->driver_priv; 343 struct amdgpu_vm *vm = &fpriv->vm; 344 struct amdgpu_ring *ring; 345 struct amdgpu_job *job; 346 struct amdgpu_ib *ib; 347 int r; 348 349 r = amdgpu_cs_job_idx(p, chunk_ib); 350 if (r < 0) 351 return r; 352 353 job = p->jobs[r]; 354 ring = amdgpu_job_ring(job); 355 ib = &job->ibs[job->num_ibs++]; 356 357 /* MM engine doesn't support user fences */ 358 if (p->uf_entry.tv.bo && ring->funcs->no_user_fence) 359 return -EINVAL; 360 361 if (chunk_ib->ip_type == AMDGPU_HW_IP_GFX && 362 chunk_ib->flags & AMDGPU_IB_FLAG_PREEMPT) { 363 if (chunk_ib->flags & AMDGPU_IB_FLAG_CE) 364 (*ce_preempt)++; 365 else 366 (*de_preempt)++; 367 368 /* Each GFX command submit allows only 1 IB max 369 * preemptible for CE & DE */ 370 if (*ce_preempt > 1 || *de_preempt > 1) 371 return -EINVAL; 372 } 373 374 if (chunk_ib->flags & AMDGPU_IB_FLAG_PREAMBLE) 375 job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT; 376 377 r = amdgpu_ib_get(p->adev, vm, ring->funcs->parse_cs ? 378 chunk_ib->ib_bytes : 0, 379 AMDGPU_IB_POOL_DELAYED, ib); 380 if (r) { 381 DRM_ERROR("Failed to get ib !\n"); 382 return r; 383 } 384 385 ib->gpu_addr = chunk_ib->va_start; 386 ib->length_dw = chunk_ib->ib_bytes / 4; 387 ib->flags = chunk_ib->flags; 388 return 0; 389 } 390 391 static int amdgpu_cs_p2_dependencies(struct amdgpu_cs_parser *p, 392 struct amdgpu_cs_chunk *chunk) 393 { 394 struct drm_amdgpu_cs_chunk_dep *deps = chunk->kdata; 395 struct amdgpu_fpriv *fpriv = p->filp->driver_priv; 396 unsigned num_deps; 397 int i, r; 398 399 num_deps = chunk->length_dw * 4 / 400 sizeof(struct drm_amdgpu_cs_chunk_dep); 401 402 for (i = 0; i < num_deps; ++i) { 403 struct amdgpu_ctx *ctx; 404 struct drm_sched_entity *entity; 405 struct dma_fence *fence; 406 407 ctx = amdgpu_ctx_get(fpriv, deps[i].ctx_id); 408 if (ctx == NULL) 409 return -EINVAL; 410 411 r = amdgpu_ctx_get_entity(ctx, deps[i].ip_type, 412 deps[i].ip_instance, 413 deps[i].ring, &entity); 414 if (r) { 415 amdgpu_ctx_put(ctx); 416 return r; 417 } 418 419 fence = amdgpu_ctx_get_fence(ctx, entity, deps[i].handle); 420 amdgpu_ctx_put(ctx); 421 422 if (IS_ERR(fence)) 423 return PTR_ERR(fence); 424 else if (!fence) 425 continue; 426 427 if (chunk->chunk_id == AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES) { 428 struct drm_sched_fence *s_fence; 429 struct dma_fence *old = fence; 430 431 s_fence = to_drm_sched_fence(fence); 432 fence = dma_fence_get(&s_fence->scheduled); 433 dma_fence_put(old); 434 } 435 436 r = amdgpu_sync_fence(&p->gang_leader->sync, fence); 437 dma_fence_put(fence); 438 if (r) 439 return r; 440 } 441 return 0; 442 } 443 444 static int amdgpu_syncobj_lookup_and_add(struct amdgpu_cs_parser *p, 445 uint32_t handle, u64 point, 446 u64 flags) 447 { 448 struct dma_fence *fence; 449 int r; 450 451 r = drm_syncobj_find_fence(p->filp, handle, point, flags, &fence); 452 if (r) { 453 DRM_ERROR("syncobj %u failed to find fence @ %llu (%d)!\n", 454 handle, point, r); 455 return r; 456 } 457 458 r = amdgpu_sync_fence(&p->gang_leader->sync, fence); 459 dma_fence_put(fence); 460 461 return r; 462 } 463 464 static int amdgpu_cs_p2_syncobj_in(struct amdgpu_cs_parser *p, 465 struct amdgpu_cs_chunk *chunk) 466 { 467 struct drm_amdgpu_cs_chunk_sem *deps = chunk->kdata; 468 unsigned num_deps; 469 int i, r; 470 471 num_deps = chunk->length_dw * 4 / 472 sizeof(struct drm_amdgpu_cs_chunk_sem); 473 for (i = 0; i < num_deps; ++i) { 474 r = amdgpu_syncobj_lookup_and_add(p, deps[i].handle, 0, 0); 475 if (r) 476 return r; 477 } 478 479 return 0; 480 } 481 482 static int amdgpu_cs_p2_syncobj_timeline_wait(struct amdgpu_cs_parser *p, 483 struct amdgpu_cs_chunk *chunk) 484 { 485 struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps = chunk->kdata; 486 unsigned num_deps; 487 int i, r; 488 489 num_deps = chunk->length_dw * 4 / 490 sizeof(struct drm_amdgpu_cs_chunk_syncobj); 491 for (i = 0; i < num_deps; ++i) { 492 r = amdgpu_syncobj_lookup_and_add(p, syncobj_deps[i].handle, 493 syncobj_deps[i].point, 494 syncobj_deps[i].flags); 495 if (r) 496 return r; 497 } 498 499 return 0; 500 } 501 502 static int amdgpu_cs_p2_syncobj_out(struct amdgpu_cs_parser *p, 503 struct amdgpu_cs_chunk *chunk) 504 { 505 struct drm_amdgpu_cs_chunk_sem *deps = chunk->kdata; 506 unsigned num_deps; 507 int i; 508 509 num_deps = chunk->length_dw * 4 / 510 sizeof(struct drm_amdgpu_cs_chunk_sem); 511 512 if (p->post_deps) 513 return -EINVAL; 514 515 p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps), 516 GFP_KERNEL); 517 p->num_post_deps = 0; 518 519 if (!p->post_deps) 520 return -ENOMEM; 521 522 523 for (i = 0; i < num_deps; ++i) { 524 p->post_deps[i].syncobj = 525 drm_syncobj_find(p->filp, deps[i].handle); 526 if (!p->post_deps[i].syncobj) 527 return -EINVAL; 528 p->post_deps[i].chain = NULL; 529 p->post_deps[i].point = 0; 530 p->num_post_deps++; 531 } 532 533 return 0; 534 } 535 536 static int amdgpu_cs_p2_syncobj_timeline_signal(struct amdgpu_cs_parser *p, 537 struct amdgpu_cs_chunk *chunk) 538 { 539 struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps = chunk->kdata; 540 unsigned num_deps; 541 int i; 542 543 num_deps = chunk->length_dw * 4 / 544 sizeof(struct drm_amdgpu_cs_chunk_syncobj); 545 546 if (p->post_deps) 547 return -EINVAL; 548 549 p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps), 550 GFP_KERNEL); 551 p->num_post_deps = 0; 552 553 if (!p->post_deps) 554 return -ENOMEM; 555 556 for (i = 0; i < num_deps; ++i) { 557 struct amdgpu_cs_post_dep *dep = &p->post_deps[i]; 558 559 dep->chain = NULL; 560 if (syncobj_deps[i].point) { 561 dep->chain = dma_fence_chain_alloc(); 562 if (!dep->chain) 563 return -ENOMEM; 564 } 565 566 dep->syncobj = drm_syncobj_find(p->filp, 567 syncobj_deps[i].handle); 568 if (!dep->syncobj) { 569 dma_fence_chain_free(dep->chain); 570 return -EINVAL; 571 } 572 dep->point = syncobj_deps[i].point; 573 p->num_post_deps++; 574 } 575 576 return 0; 577 } 578 579 static int amdgpu_cs_pass2(struct amdgpu_cs_parser *p) 580 { 581 unsigned int ce_preempt = 0, de_preempt = 0; 582 int i, r; 583 584 for (i = 0; i < p->nchunks; ++i) { 585 struct amdgpu_cs_chunk *chunk; 586 587 chunk = &p->chunks[i]; 588 589 switch (chunk->chunk_id) { 590 case AMDGPU_CHUNK_ID_IB: 591 r = amdgpu_cs_p2_ib(p, chunk, &ce_preempt, &de_preempt); 592 if (r) 593 return r; 594 break; 595 case AMDGPU_CHUNK_ID_DEPENDENCIES: 596 case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES: 597 r = amdgpu_cs_p2_dependencies(p, chunk); 598 if (r) 599 return r; 600 break; 601 case AMDGPU_CHUNK_ID_SYNCOBJ_IN: 602 r = amdgpu_cs_p2_syncobj_in(p, chunk); 603 if (r) 604 return r; 605 break; 606 case AMDGPU_CHUNK_ID_SYNCOBJ_OUT: 607 r = amdgpu_cs_p2_syncobj_out(p, chunk); 608 if (r) 609 return r; 610 break; 611 case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT: 612 r = amdgpu_cs_p2_syncobj_timeline_wait(p, chunk); 613 if (r) 614 return r; 615 break; 616 case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL: 617 r = amdgpu_cs_p2_syncobj_timeline_signal(p, chunk); 618 if (r) 619 return r; 620 break; 621 } 622 } 623 624 return 0; 625 } 626 627 /* Convert microseconds to bytes. */ 628 static u64 us_to_bytes(struct amdgpu_device *adev, s64 us) 629 { 630 if (us <= 0 || !adev->mm_stats.log2_max_MBps) 631 return 0; 632 633 /* Since accum_us is incremented by a million per second, just 634 * multiply it by the number of MB/s to get the number of bytes. 635 */ 636 return us << adev->mm_stats.log2_max_MBps; 637 } 638 639 static s64 bytes_to_us(struct amdgpu_device *adev, u64 bytes) 640 { 641 if (!adev->mm_stats.log2_max_MBps) 642 return 0; 643 644 return bytes >> adev->mm_stats.log2_max_MBps; 645 } 646 647 /* Returns how many bytes TTM can move right now. If no bytes can be moved, 648 * it returns 0. If it returns non-zero, it's OK to move at least one buffer, 649 * which means it can go over the threshold once. If that happens, the driver 650 * will be in debt and no other buffer migrations can be done until that debt 651 * is repaid. 652 * 653 * This approach allows moving a buffer of any size (it's important to allow 654 * that). 655 * 656 * The currency is simply time in microseconds and it increases as the clock 657 * ticks. The accumulated microseconds (us) are converted to bytes and 658 * returned. 659 */ 660 static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev, 661 u64 *max_bytes, 662 u64 *max_vis_bytes) 663 { 664 s64 time_us, increment_us; 665 u64 free_vram, total_vram, used_vram; 666 /* Allow a maximum of 200 accumulated ms. This is basically per-IB 667 * throttling. 668 * 669 * It means that in order to get full max MBps, at least 5 IBs per 670 * second must be submitted and not more than 200ms apart from each 671 * other. 672 */ 673 const s64 us_upper_bound = 200000; 674 675 if (!adev->mm_stats.log2_max_MBps) { 676 *max_bytes = 0; 677 *max_vis_bytes = 0; 678 return; 679 } 680 681 total_vram = adev->gmc.real_vram_size - atomic64_read(&adev->vram_pin_size); 682 used_vram = ttm_resource_manager_usage(&adev->mman.vram_mgr.manager); 683 free_vram = used_vram >= total_vram ? 0 : total_vram - used_vram; 684 685 spin_lock(&adev->mm_stats.lock); 686 687 /* Increase the amount of accumulated us. */ 688 time_us = ktime_to_us(ktime_get()); 689 increment_us = time_us - adev->mm_stats.last_update_us; 690 adev->mm_stats.last_update_us = time_us; 691 adev->mm_stats.accum_us = min(adev->mm_stats.accum_us + increment_us, 692 us_upper_bound); 693 694 /* This prevents the short period of low performance when the VRAM 695 * usage is low and the driver is in debt or doesn't have enough 696 * accumulated us to fill VRAM quickly. 697 * 698 * The situation can occur in these cases: 699 * - a lot of VRAM is freed by userspace 700 * - the presence of a big buffer causes a lot of evictions 701 * (solution: split buffers into smaller ones) 702 * 703 * If 128 MB or 1/8th of VRAM is free, start filling it now by setting 704 * accum_us to a positive number. 705 */ 706 if (free_vram >= 128 * 1024 * 1024 || free_vram >= total_vram / 8) { 707 s64 min_us; 708 709 /* Be more aggressive on dGPUs. Try to fill a portion of free 710 * VRAM now. 711 */ 712 if (!(adev->flags & AMD_IS_APU)) 713 min_us = bytes_to_us(adev, free_vram / 4); 714 else 715 min_us = 0; /* Reset accum_us on APUs. */ 716 717 adev->mm_stats.accum_us = max(min_us, adev->mm_stats.accum_us); 718 } 719 720 /* This is set to 0 if the driver is in debt to disallow (optional) 721 * buffer moves. 722 */ 723 *max_bytes = us_to_bytes(adev, adev->mm_stats.accum_us); 724 725 /* Do the same for visible VRAM if half of it is free */ 726 if (!amdgpu_gmc_vram_full_visible(&adev->gmc)) { 727 u64 total_vis_vram = adev->gmc.visible_vram_size; 728 u64 used_vis_vram = 729 amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr); 730 731 if (used_vis_vram < total_vis_vram) { 732 u64 free_vis_vram = total_vis_vram - used_vis_vram; 733 adev->mm_stats.accum_us_vis = min(adev->mm_stats.accum_us_vis + 734 increment_us, us_upper_bound); 735 736 if (free_vis_vram >= total_vis_vram / 2) 737 adev->mm_stats.accum_us_vis = 738 max(bytes_to_us(adev, free_vis_vram / 2), 739 adev->mm_stats.accum_us_vis); 740 } 741 742 *max_vis_bytes = us_to_bytes(adev, adev->mm_stats.accum_us_vis); 743 } else { 744 *max_vis_bytes = 0; 745 } 746 747 spin_unlock(&adev->mm_stats.lock); 748 } 749 750 /* Report how many bytes have really been moved for the last command 751 * submission. This can result in a debt that can stop buffer migrations 752 * temporarily. 753 */ 754 void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes, 755 u64 num_vis_bytes) 756 { 757 spin_lock(&adev->mm_stats.lock); 758 adev->mm_stats.accum_us -= bytes_to_us(adev, num_bytes); 759 adev->mm_stats.accum_us_vis -= bytes_to_us(adev, num_vis_bytes); 760 spin_unlock(&adev->mm_stats.lock); 761 } 762 763 static int amdgpu_cs_bo_validate(void *param, struct amdgpu_bo *bo) 764 { 765 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 766 struct amdgpu_cs_parser *p = param; 767 struct ttm_operation_ctx ctx = { 768 .interruptible = true, 769 .no_wait_gpu = false, 770 .resv = bo->tbo.base.resv 771 }; 772 uint32_t domain; 773 int r; 774 775 if (bo->tbo.pin_count) 776 return 0; 777 778 /* Don't move this buffer if we have depleted our allowance 779 * to move it. Don't move anything if the threshold is zero. 780 */ 781 if (p->bytes_moved < p->bytes_moved_threshold && 782 (!bo->tbo.base.dma_buf || 783 list_empty(&bo->tbo.base.dma_buf->attachments))) { 784 if (!amdgpu_gmc_vram_full_visible(&adev->gmc) && 785 (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) { 786 /* And don't move a CPU_ACCESS_REQUIRED BO to limited 787 * visible VRAM if we've depleted our allowance to do 788 * that. 789 */ 790 if (p->bytes_moved_vis < p->bytes_moved_vis_threshold) 791 domain = bo->preferred_domains; 792 else 793 domain = bo->allowed_domains; 794 } else { 795 domain = bo->preferred_domains; 796 } 797 } else { 798 domain = bo->allowed_domains; 799 } 800 801 retry: 802 amdgpu_bo_placement_from_domain(bo, domain); 803 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 804 805 p->bytes_moved += ctx.bytes_moved; 806 if (!amdgpu_gmc_vram_full_visible(&adev->gmc) && 807 amdgpu_bo_in_cpu_visible_vram(bo)) 808 p->bytes_moved_vis += ctx.bytes_moved; 809 810 if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) { 811 domain = bo->allowed_domains; 812 goto retry; 813 } 814 815 return r; 816 } 817 818 static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p, 819 struct list_head *validated) 820 { 821 struct ttm_operation_ctx ctx = { true, false }; 822 struct amdgpu_bo_list_entry *lobj; 823 int r; 824 825 list_for_each_entry(lobj, validated, tv.head) { 826 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(lobj->tv.bo); 827 struct mm_struct *usermm; 828 829 #ifdef notyet 830 usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm); 831 if (usermm && usermm != current->mm) 832 return -EPERM; 833 #endif 834 835 if (amdgpu_ttm_tt_is_userptr(bo->tbo.ttm) && 836 lobj->user_invalidated && lobj->user_pages) { 837 amdgpu_bo_placement_from_domain(bo, 838 AMDGPU_GEM_DOMAIN_CPU); 839 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 840 if (r) 841 return r; 842 843 amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm, 844 lobj->user_pages); 845 } 846 847 r = amdgpu_cs_bo_validate(p, bo); 848 if (r) 849 return r; 850 851 kvfree(lobj->user_pages); 852 lobj->user_pages = NULL; 853 } 854 return 0; 855 } 856 857 static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, 858 union drm_amdgpu_cs *cs) 859 { 860 struct amdgpu_fpriv *fpriv = p->filp->driver_priv; 861 struct amdgpu_vm *vm = &fpriv->vm; 862 struct amdgpu_bo_list_entry *e; 863 struct list_head duplicates; 864 unsigned int i; 865 int r; 866 867 INIT_LIST_HEAD(&p->validated); 868 869 /* p->bo_list could already be assigned if AMDGPU_CHUNK_ID_BO_HANDLES is present */ 870 if (cs->in.bo_list_handle) { 871 if (p->bo_list) 872 return -EINVAL; 873 874 r = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle, 875 &p->bo_list); 876 if (r) 877 return r; 878 } else if (!p->bo_list) { 879 /* Create a empty bo_list when no handle is provided */ 880 r = amdgpu_bo_list_create(p->adev, p->filp, NULL, 0, 881 &p->bo_list); 882 if (r) 883 return r; 884 } 885 886 mutex_lock(&p->bo_list->bo_list_mutex); 887 888 /* One for TTM and one for the CS job */ 889 amdgpu_bo_list_for_each_entry(e, p->bo_list) 890 e->tv.num_shared = 2; 891 892 amdgpu_bo_list_get_list(p->bo_list, &p->validated); 893 894 INIT_LIST_HEAD(&duplicates); 895 amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd); 896 897 if (p->uf_entry.tv.bo && !ttm_to_amdgpu_bo(p->uf_entry.tv.bo)->parent) 898 list_add(&p->uf_entry.tv.head, &p->validated); 899 900 /* Get userptr backing pages. If pages are updated after registered 901 * in amdgpu_gem_userptr_ioctl(), amdgpu_cs_list_validate() will do 902 * amdgpu_ttm_backend_bind() to flush and invalidate new pages 903 */ 904 amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) { 905 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo); 906 bool userpage_invalidated = false; 907 int i; 908 909 e->user_pages = kvmalloc_array(bo->tbo.ttm->num_pages, 910 sizeof(struct vm_page *), 911 GFP_KERNEL | __GFP_ZERO); 912 if (!e->user_pages) { 913 DRM_ERROR("kvmalloc_array failure\n"); 914 r = -ENOMEM; 915 goto out_free_user_pages; 916 } 917 918 r = amdgpu_ttm_tt_get_user_pages(bo, e->user_pages, &e->range); 919 if (r) { 920 kvfree(e->user_pages); 921 e->user_pages = NULL; 922 goto out_free_user_pages; 923 } 924 925 for (i = 0; i < bo->tbo.ttm->num_pages; i++) { 926 if (bo->tbo.ttm->pages[i] != e->user_pages[i]) { 927 userpage_invalidated = true; 928 break; 929 } 930 } 931 e->user_invalidated = userpage_invalidated; 932 } 933 934 r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true, 935 &duplicates); 936 if (unlikely(r != 0)) { 937 if (r != -ERESTARTSYS) 938 DRM_ERROR("ttm_eu_reserve_buffers failed.\n"); 939 goto out_free_user_pages; 940 } 941 942 amdgpu_bo_list_for_each_entry(e, p->bo_list) { 943 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo); 944 945 e->bo_va = amdgpu_vm_bo_find(vm, bo); 946 } 947 948 amdgpu_cs_get_threshold_for_moves(p->adev, &p->bytes_moved_threshold, 949 &p->bytes_moved_vis_threshold); 950 p->bytes_moved = 0; 951 p->bytes_moved_vis = 0; 952 953 r = amdgpu_vm_validate_pt_bos(p->adev, &fpriv->vm, 954 amdgpu_cs_bo_validate, p); 955 if (r) { 956 DRM_ERROR("amdgpu_vm_validate_pt_bos() failed.\n"); 957 goto error_validate; 958 } 959 960 r = amdgpu_cs_list_validate(p, &duplicates); 961 if (r) 962 goto error_validate; 963 964 r = amdgpu_cs_list_validate(p, &p->validated); 965 if (r) 966 goto error_validate; 967 968 if (p->uf_entry.tv.bo) { 969 struct amdgpu_bo *uf = ttm_to_amdgpu_bo(p->uf_entry.tv.bo); 970 971 r = amdgpu_ttm_alloc_gart(&uf->tbo); 972 if (r) 973 goto error_validate; 974 975 p->gang_leader->uf_addr += amdgpu_bo_gpu_offset(uf); 976 } 977 978 amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved, 979 p->bytes_moved_vis); 980 981 for (i = 0; i < p->gang_size; ++i) 982 amdgpu_job_set_resources(p->jobs[i], p->bo_list->gds_obj, 983 p->bo_list->gws_obj, 984 p->bo_list->oa_obj); 985 return 0; 986 987 error_validate: 988 ttm_eu_backoff_reservation(&p->ticket, &p->validated); 989 990 out_free_user_pages: 991 amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) { 992 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo); 993 994 if (!e->user_pages) 995 continue; 996 amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, e->range); 997 kvfree(e->user_pages); 998 e->user_pages = NULL; 999 e->range = NULL; 1000 } 1001 mutex_unlock(&p->bo_list->bo_list_mutex); 1002 return r; 1003 } 1004 1005 static void trace_amdgpu_cs_ibs(struct amdgpu_cs_parser *p) 1006 { 1007 int i, j; 1008 1009 if (!trace_amdgpu_cs_enabled()) 1010 return; 1011 1012 for (i = 0; i < p->gang_size; ++i) { 1013 struct amdgpu_job *job = p->jobs[i]; 1014 1015 for (j = 0; j < job->num_ibs; ++j) 1016 trace_amdgpu_cs(p, job, &job->ibs[j]); 1017 } 1018 } 1019 1020 static int amdgpu_cs_patch_ibs(struct amdgpu_cs_parser *p, 1021 struct amdgpu_job *job) 1022 { 1023 struct amdgpu_ring *ring = amdgpu_job_ring(job); 1024 unsigned int i; 1025 int r; 1026 1027 /* Only for UVD/VCE VM emulation */ 1028 if (!ring->funcs->parse_cs && !ring->funcs->patch_cs_in_place) 1029 return 0; 1030 1031 for (i = 0; i < job->num_ibs; ++i) { 1032 struct amdgpu_ib *ib = &job->ibs[i]; 1033 struct amdgpu_bo_va_mapping *m; 1034 struct amdgpu_bo *aobj; 1035 uint64_t va_start; 1036 uint8_t *kptr; 1037 1038 va_start = ib->gpu_addr & AMDGPU_GMC_HOLE_MASK; 1039 r = amdgpu_cs_find_mapping(p, va_start, &aobj, &m); 1040 if (r) { 1041 DRM_ERROR("IB va_start is invalid\n"); 1042 return r; 1043 } 1044 1045 if ((va_start + ib->length_dw * 4) > 1046 (m->last + 1) * AMDGPU_GPU_PAGE_SIZE) { 1047 DRM_ERROR("IB va_start+ib_bytes is invalid\n"); 1048 return -EINVAL; 1049 } 1050 1051 /* the IB should be reserved at this point */ 1052 r = amdgpu_bo_kmap(aobj, (void **)&kptr); 1053 if (r) { 1054 return r; 1055 } 1056 1057 kptr += va_start - (m->start * AMDGPU_GPU_PAGE_SIZE); 1058 1059 if (ring->funcs->parse_cs) { 1060 memcpy(ib->ptr, kptr, ib->length_dw * 4); 1061 amdgpu_bo_kunmap(aobj); 1062 1063 r = amdgpu_ring_parse_cs(ring, p, job, ib); 1064 if (r) 1065 return r; 1066 } else { 1067 ib->ptr = (uint32_t *)kptr; 1068 r = amdgpu_ring_patch_cs_in_place(ring, p, job, ib); 1069 amdgpu_bo_kunmap(aobj); 1070 if (r) 1071 return r; 1072 } 1073 } 1074 1075 return 0; 1076 } 1077 1078 static int amdgpu_cs_patch_jobs(struct amdgpu_cs_parser *p) 1079 { 1080 unsigned int i; 1081 int r; 1082 1083 for (i = 0; i < p->gang_size; ++i) { 1084 r = amdgpu_cs_patch_ibs(p, p->jobs[i]); 1085 if (r) 1086 return r; 1087 } 1088 return 0; 1089 } 1090 1091 static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p) 1092 { 1093 struct amdgpu_fpriv *fpriv = p->filp->driver_priv; 1094 struct amdgpu_job *job = p->gang_leader; 1095 struct amdgpu_device *adev = p->adev; 1096 struct amdgpu_vm *vm = &fpriv->vm; 1097 struct amdgpu_bo_list_entry *e; 1098 struct amdgpu_bo_va *bo_va; 1099 struct amdgpu_bo *bo; 1100 unsigned int i; 1101 int r; 1102 1103 r = amdgpu_vm_clear_freed(adev, vm, NULL); 1104 if (r) 1105 return r; 1106 1107 r = amdgpu_vm_bo_update(adev, fpriv->prt_va, false); 1108 if (r) 1109 return r; 1110 1111 r = amdgpu_sync_fence(&job->sync, fpriv->prt_va->last_pt_update); 1112 if (r) 1113 return r; 1114 1115 if (fpriv->csa_va) { 1116 bo_va = fpriv->csa_va; 1117 BUG_ON(!bo_va); 1118 r = amdgpu_vm_bo_update(adev, bo_va, false); 1119 if (r) 1120 return r; 1121 1122 r = amdgpu_sync_fence(&job->sync, bo_va->last_pt_update); 1123 if (r) 1124 return r; 1125 } 1126 1127 amdgpu_bo_list_for_each_entry(e, p->bo_list) { 1128 /* ignore duplicates */ 1129 bo = ttm_to_amdgpu_bo(e->tv.bo); 1130 if (!bo) 1131 continue; 1132 1133 bo_va = e->bo_va; 1134 if (bo_va == NULL) 1135 continue; 1136 1137 r = amdgpu_vm_bo_update(adev, bo_va, false); 1138 if (r) 1139 return r; 1140 1141 r = amdgpu_sync_fence(&job->sync, bo_va->last_pt_update); 1142 if (r) 1143 return r; 1144 } 1145 1146 r = amdgpu_vm_handle_moved(adev, vm); 1147 if (r) 1148 return r; 1149 1150 r = amdgpu_vm_update_pdes(adev, vm, false); 1151 if (r) 1152 return r; 1153 1154 r = amdgpu_sync_fence(&job->sync, vm->last_update); 1155 if (r) 1156 return r; 1157 1158 for (i = 0; i < p->gang_size; ++i) { 1159 job = p->jobs[i]; 1160 1161 if (!job->vm) 1162 continue; 1163 1164 job->vm_pd_addr = amdgpu_gmc_pd_addr(vm->root.bo); 1165 } 1166 1167 if (amdgpu_vm_debug) { 1168 /* Invalidate all BOs to test for userspace bugs */ 1169 amdgpu_bo_list_for_each_entry(e, p->bo_list) { 1170 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo); 1171 1172 /* ignore duplicates */ 1173 if (!bo) 1174 continue; 1175 1176 amdgpu_vm_bo_invalidate(adev, bo, false); 1177 } 1178 } 1179 1180 return 0; 1181 } 1182 1183 static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p) 1184 { 1185 struct amdgpu_fpriv *fpriv = p->filp->driver_priv; 1186 struct amdgpu_job *leader = p->gang_leader; 1187 struct amdgpu_bo_list_entry *e; 1188 unsigned int i; 1189 int r; 1190 1191 list_for_each_entry(e, &p->validated, tv.head) { 1192 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo); 1193 struct dma_resv *resv = bo->tbo.base.resv; 1194 enum amdgpu_sync_mode sync_mode; 1195 1196 sync_mode = amdgpu_bo_explicit_sync(bo) ? 1197 AMDGPU_SYNC_EXPLICIT : AMDGPU_SYNC_NE_OWNER; 1198 r = amdgpu_sync_resv(p->adev, &leader->sync, resv, sync_mode, 1199 &fpriv->vm); 1200 if (r) 1201 return r; 1202 } 1203 1204 for (i = 0; i < p->gang_size; ++i) { 1205 if (p->jobs[i] == leader) 1206 continue; 1207 1208 r = amdgpu_sync_clone(&leader->sync, &p->jobs[i]->sync); 1209 if (r) 1210 return r; 1211 } 1212 1213 r = amdgpu_ctx_wait_prev_fence(p->ctx, p->entities[p->gang_leader_idx]); 1214 if (r && r != -ERESTARTSYS) 1215 DRM_ERROR("amdgpu_ctx_wait_prev_fence failed.\n"); 1216 return r; 1217 } 1218 1219 static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p) 1220 { 1221 int i; 1222 1223 for (i = 0; i < p->num_post_deps; ++i) { 1224 if (p->post_deps[i].chain && p->post_deps[i].point) { 1225 drm_syncobj_add_point(p->post_deps[i].syncobj, 1226 p->post_deps[i].chain, 1227 p->fence, p->post_deps[i].point); 1228 p->post_deps[i].chain = NULL; 1229 } else { 1230 drm_syncobj_replace_fence(p->post_deps[i].syncobj, 1231 p->fence); 1232 } 1233 } 1234 } 1235 1236 static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, 1237 union drm_amdgpu_cs *cs) 1238 { 1239 struct amdgpu_fpriv *fpriv = p->filp->driver_priv; 1240 struct amdgpu_job *leader = p->gang_leader; 1241 struct amdgpu_bo_list_entry *e; 1242 unsigned int i; 1243 uint64_t seq; 1244 int r; 1245 1246 for (i = 0; i < p->gang_size; ++i) 1247 drm_sched_job_arm(&p->jobs[i]->base); 1248 1249 for (i = 0; i < p->gang_size; ++i) { 1250 struct dma_fence *fence; 1251 1252 if (p->jobs[i] == leader) 1253 continue; 1254 1255 fence = &p->jobs[i]->base.s_fence->scheduled; 1256 r = amdgpu_sync_fence(&leader->sync, fence); 1257 if (r) 1258 goto error_cleanup; 1259 } 1260 1261 if (p->gang_size > 1) { 1262 for (i = 0; i < p->gang_size; ++i) 1263 amdgpu_job_set_gang_leader(p->jobs[i], leader); 1264 } 1265 1266 /* No memory allocation is allowed while holding the notifier lock. 1267 * The lock is held until amdgpu_cs_submit is finished and fence is 1268 * added to BOs. 1269 */ 1270 mutex_lock(&p->adev->notifier_lock); 1271 1272 /* If userptr are invalidated after amdgpu_cs_parser_bos(), return 1273 * -EAGAIN, drmIoctl in libdrm will restart the amdgpu_cs_ioctl. 1274 */ 1275 r = 0; 1276 amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) { 1277 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo); 1278 1279 r |= !amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, e->range); 1280 e->range = NULL; 1281 } 1282 if (r) { 1283 r = -EAGAIN; 1284 goto error_unlock; 1285 } 1286 1287 p->fence = dma_fence_get(&leader->base.s_fence->finished); 1288 list_for_each_entry(e, &p->validated, tv.head) { 1289 1290 /* Everybody except for the gang leader uses READ */ 1291 for (i = 0; i < p->gang_size; ++i) { 1292 if (p->jobs[i] == leader) 1293 continue; 1294 1295 dma_resv_add_fence(e->tv.bo->base.resv, 1296 &p->jobs[i]->base.s_fence->finished, 1297 DMA_RESV_USAGE_READ); 1298 } 1299 1300 /* The gang leader is remembered as writer */ 1301 e->tv.num_shared = 0; 1302 } 1303 1304 seq = amdgpu_ctx_add_fence(p->ctx, p->entities[p->gang_leader_idx], 1305 p->fence); 1306 amdgpu_cs_post_dependencies(p); 1307 1308 if ((leader->preamble_status & AMDGPU_PREAMBLE_IB_PRESENT) && 1309 !p->ctx->preamble_presented) { 1310 leader->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST; 1311 p->ctx->preamble_presented = true; 1312 } 1313 1314 cs->out.handle = seq; 1315 leader->uf_sequence = seq; 1316 1317 amdgpu_vm_bo_trace_cs(&fpriv->vm, &p->ticket); 1318 for (i = 0; i < p->gang_size; ++i) { 1319 amdgpu_job_free_resources(p->jobs[i]); 1320 trace_amdgpu_cs_ioctl(p->jobs[i]); 1321 drm_sched_entity_push_job(&p->jobs[i]->base); 1322 p->jobs[i] = NULL; 1323 } 1324 1325 amdgpu_vm_move_to_lru_tail(p->adev, &fpriv->vm); 1326 ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence); 1327 1328 mutex_unlock(&p->adev->notifier_lock); 1329 mutex_unlock(&p->bo_list->bo_list_mutex); 1330 return 0; 1331 1332 error_unlock: 1333 mutex_unlock(&p->adev->notifier_lock); 1334 1335 error_cleanup: 1336 for (i = 0; i < p->gang_size; ++i) 1337 drm_sched_job_cleanup(&p->jobs[i]->base); 1338 return r; 1339 } 1340 1341 /* Cleanup the parser structure */ 1342 static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser) 1343 { 1344 unsigned i; 1345 1346 for (i = 0; i < parser->num_post_deps; i++) { 1347 drm_syncobj_put(parser->post_deps[i].syncobj); 1348 kfree(parser->post_deps[i].chain); 1349 } 1350 kfree(parser->post_deps); 1351 1352 dma_fence_put(parser->fence); 1353 1354 if (parser->ctx) 1355 amdgpu_ctx_put(parser->ctx); 1356 if (parser->bo_list) 1357 amdgpu_bo_list_put(parser->bo_list); 1358 1359 for (i = 0; i < parser->nchunks; i++) 1360 kvfree(parser->chunks[i].kdata); 1361 kvfree(parser->chunks); 1362 for (i = 0; i < parser->gang_size; ++i) { 1363 if (parser->jobs[i]) 1364 amdgpu_job_free(parser->jobs[i]); 1365 } 1366 if (parser->uf_entry.tv.bo) { 1367 struct amdgpu_bo *uf = ttm_to_amdgpu_bo(parser->uf_entry.tv.bo); 1368 1369 amdgpu_bo_unref(&uf); 1370 } 1371 } 1372 1373 int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) 1374 { 1375 struct amdgpu_device *adev = drm_to_adev(dev); 1376 struct amdgpu_cs_parser parser; 1377 int r; 1378 1379 if (amdgpu_ras_intr_triggered()) 1380 return -EHWPOISON; 1381 1382 if (!adev->accel_working) 1383 return -EBUSY; 1384 1385 r = amdgpu_cs_parser_init(&parser, adev, filp, data); 1386 if (r) { 1387 if (printk_ratelimit()) 1388 DRM_ERROR("Failed to initialize parser %d!\n", r); 1389 return r; 1390 } 1391 1392 r = amdgpu_cs_pass1(&parser, data); 1393 if (r) 1394 goto error_fini; 1395 1396 r = amdgpu_cs_pass2(&parser); 1397 if (r) 1398 goto error_fini; 1399 1400 r = amdgpu_cs_parser_bos(&parser, data); 1401 if (r) { 1402 if (r == -ENOMEM) 1403 DRM_ERROR("Not enough memory for command submission!\n"); 1404 else if (r != -ERESTARTSYS && r != -EAGAIN) 1405 DRM_ERROR("Failed to process the buffer list %d!\n", r); 1406 goto error_fini; 1407 } 1408 1409 r = amdgpu_cs_patch_jobs(&parser); 1410 if (r) 1411 goto error_backoff; 1412 1413 r = amdgpu_cs_vm_handling(&parser); 1414 if (r) 1415 goto error_backoff; 1416 1417 r = amdgpu_cs_sync_rings(&parser); 1418 if (r) 1419 goto error_backoff; 1420 1421 trace_amdgpu_cs_ibs(&parser); 1422 1423 r = amdgpu_cs_submit(&parser, data); 1424 if (r) 1425 goto error_backoff; 1426 1427 amdgpu_cs_parser_fini(&parser); 1428 return 0; 1429 1430 error_backoff: 1431 ttm_eu_backoff_reservation(&parser.ticket, &parser.validated); 1432 mutex_unlock(&parser.bo_list->bo_list_mutex); 1433 1434 error_fini: 1435 amdgpu_cs_parser_fini(&parser); 1436 return r; 1437 } 1438 1439 /** 1440 * amdgpu_cs_wait_ioctl - wait for a command submission to finish 1441 * 1442 * @dev: drm device 1443 * @data: data from userspace 1444 * @filp: file private 1445 * 1446 * Wait for the command submission identified by handle to finish. 1447 */ 1448 int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, 1449 struct drm_file *filp) 1450 { 1451 union drm_amdgpu_wait_cs *wait = data; 1452 unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout); 1453 struct drm_sched_entity *entity; 1454 struct amdgpu_ctx *ctx; 1455 struct dma_fence *fence; 1456 long r; 1457 1458 ctx = amdgpu_ctx_get(filp->driver_priv, wait->in.ctx_id); 1459 if (ctx == NULL) 1460 return -EINVAL; 1461 1462 r = amdgpu_ctx_get_entity(ctx, wait->in.ip_type, wait->in.ip_instance, 1463 wait->in.ring, &entity); 1464 if (r) { 1465 amdgpu_ctx_put(ctx); 1466 return r; 1467 } 1468 1469 fence = amdgpu_ctx_get_fence(ctx, entity, wait->in.handle); 1470 if (IS_ERR(fence)) 1471 r = PTR_ERR(fence); 1472 else if (fence) { 1473 r = dma_fence_wait_timeout(fence, true, timeout); 1474 if (r > 0 && fence->error) 1475 r = fence->error; 1476 dma_fence_put(fence); 1477 } else 1478 r = 1; 1479 1480 amdgpu_ctx_put(ctx); 1481 if (r < 0) 1482 return r; 1483 1484 memset(wait, 0, sizeof(*wait)); 1485 wait->out.status = (r == 0); 1486 1487 return 0; 1488 } 1489 1490 /** 1491 * amdgpu_cs_get_fence - helper to get fence from drm_amdgpu_fence 1492 * 1493 * @adev: amdgpu device 1494 * @filp: file private 1495 * @user: drm_amdgpu_fence copied from user space 1496 */ 1497 static struct dma_fence *amdgpu_cs_get_fence(struct amdgpu_device *adev, 1498 struct drm_file *filp, 1499 struct drm_amdgpu_fence *user) 1500 { 1501 struct drm_sched_entity *entity; 1502 struct amdgpu_ctx *ctx; 1503 struct dma_fence *fence; 1504 int r; 1505 1506 ctx = amdgpu_ctx_get(filp->driver_priv, user->ctx_id); 1507 if (ctx == NULL) 1508 return ERR_PTR(-EINVAL); 1509 1510 r = amdgpu_ctx_get_entity(ctx, user->ip_type, user->ip_instance, 1511 user->ring, &entity); 1512 if (r) { 1513 amdgpu_ctx_put(ctx); 1514 return ERR_PTR(r); 1515 } 1516 1517 fence = amdgpu_ctx_get_fence(ctx, entity, user->seq_no); 1518 amdgpu_ctx_put(ctx); 1519 1520 return fence; 1521 } 1522 1523 int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data, 1524 struct drm_file *filp) 1525 { 1526 struct amdgpu_device *adev = drm_to_adev(dev); 1527 union drm_amdgpu_fence_to_handle *info = data; 1528 struct dma_fence *fence; 1529 struct drm_syncobj *syncobj; 1530 struct sync_file *sync_file; 1531 int fd, r; 1532 1533 fence = amdgpu_cs_get_fence(adev, filp, &info->in.fence); 1534 if (IS_ERR(fence)) 1535 return PTR_ERR(fence); 1536 1537 if (!fence) 1538 fence = dma_fence_get_stub(); 1539 1540 switch (info->in.what) { 1541 case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ: 1542 r = drm_syncobj_create(&syncobj, 0, fence); 1543 dma_fence_put(fence); 1544 if (r) 1545 return r; 1546 r = drm_syncobj_get_handle(filp, syncobj, &info->out.handle); 1547 drm_syncobj_put(syncobj); 1548 return r; 1549 1550 case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD: 1551 r = drm_syncobj_create(&syncobj, 0, fence); 1552 dma_fence_put(fence); 1553 if (r) 1554 return r; 1555 r = drm_syncobj_get_fd(syncobj, (int *)&info->out.handle); 1556 drm_syncobj_put(syncobj); 1557 return r; 1558 1559 case AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD: 1560 fd = get_unused_fd_flags(O_CLOEXEC); 1561 if (fd < 0) { 1562 dma_fence_put(fence); 1563 return fd; 1564 } 1565 1566 sync_file = sync_file_create(fence); 1567 dma_fence_put(fence); 1568 if (!sync_file) { 1569 put_unused_fd(fd); 1570 return -ENOMEM; 1571 } 1572 1573 fd_install(fd, sync_file->file); 1574 info->out.handle = fd; 1575 return 0; 1576 1577 default: 1578 dma_fence_put(fence); 1579 return -EINVAL; 1580 } 1581 } 1582 1583 /** 1584 * amdgpu_cs_wait_all_fences - wait on all fences to signal 1585 * 1586 * @adev: amdgpu device 1587 * @filp: file private 1588 * @wait: wait parameters 1589 * @fences: array of drm_amdgpu_fence 1590 */ 1591 static int amdgpu_cs_wait_all_fences(struct amdgpu_device *adev, 1592 struct drm_file *filp, 1593 union drm_amdgpu_wait_fences *wait, 1594 struct drm_amdgpu_fence *fences) 1595 { 1596 uint32_t fence_count = wait->in.fence_count; 1597 unsigned int i; 1598 long r = 1; 1599 1600 for (i = 0; i < fence_count; i++) { 1601 struct dma_fence *fence; 1602 unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns); 1603 1604 fence = amdgpu_cs_get_fence(adev, filp, &fences[i]); 1605 if (IS_ERR(fence)) 1606 return PTR_ERR(fence); 1607 else if (!fence) 1608 continue; 1609 1610 r = dma_fence_wait_timeout(fence, true, timeout); 1611 dma_fence_put(fence); 1612 if (r < 0) 1613 return r; 1614 1615 if (r == 0) 1616 break; 1617 1618 if (fence->error) 1619 return fence->error; 1620 } 1621 1622 memset(wait, 0, sizeof(*wait)); 1623 wait->out.status = (r > 0); 1624 1625 return 0; 1626 } 1627 1628 /** 1629 * amdgpu_cs_wait_any_fence - wait on any fence to signal 1630 * 1631 * @adev: amdgpu device 1632 * @filp: file private 1633 * @wait: wait parameters 1634 * @fences: array of drm_amdgpu_fence 1635 */ 1636 static int amdgpu_cs_wait_any_fence(struct amdgpu_device *adev, 1637 struct drm_file *filp, 1638 union drm_amdgpu_wait_fences *wait, 1639 struct drm_amdgpu_fence *fences) 1640 { 1641 unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns); 1642 uint32_t fence_count = wait->in.fence_count; 1643 uint32_t first = ~0; 1644 struct dma_fence **array; 1645 unsigned int i; 1646 long r; 1647 1648 /* Prepare the fence array */ 1649 array = kcalloc(fence_count, sizeof(struct dma_fence *), GFP_KERNEL); 1650 1651 if (array == NULL) 1652 return -ENOMEM; 1653 1654 for (i = 0; i < fence_count; i++) { 1655 struct dma_fence *fence; 1656 1657 fence = amdgpu_cs_get_fence(adev, filp, &fences[i]); 1658 if (IS_ERR(fence)) { 1659 r = PTR_ERR(fence); 1660 goto err_free_fence_array; 1661 } else if (fence) { 1662 array[i] = fence; 1663 } else { /* NULL, the fence has been already signaled */ 1664 r = 1; 1665 first = i; 1666 goto out; 1667 } 1668 } 1669 1670 r = dma_fence_wait_any_timeout(array, fence_count, true, timeout, 1671 &first); 1672 if (r < 0) 1673 goto err_free_fence_array; 1674 1675 out: 1676 memset(wait, 0, sizeof(*wait)); 1677 wait->out.status = (r > 0); 1678 wait->out.first_signaled = first; 1679 1680 if (first < fence_count && array[first]) 1681 r = array[first]->error; 1682 else 1683 r = 0; 1684 1685 err_free_fence_array: 1686 for (i = 0; i < fence_count; i++) 1687 dma_fence_put(array[i]); 1688 kfree(array); 1689 1690 return r; 1691 } 1692 1693 /** 1694 * amdgpu_cs_wait_fences_ioctl - wait for multiple command submissions to finish 1695 * 1696 * @dev: drm device 1697 * @data: data from userspace 1698 * @filp: file private 1699 */ 1700 int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data, 1701 struct drm_file *filp) 1702 { 1703 struct amdgpu_device *adev = drm_to_adev(dev); 1704 union drm_amdgpu_wait_fences *wait = data; 1705 uint32_t fence_count = wait->in.fence_count; 1706 struct drm_amdgpu_fence *fences_user; 1707 struct drm_amdgpu_fence *fences; 1708 int r; 1709 1710 /* Get the fences from userspace */ 1711 fences = kmalloc_array(fence_count, sizeof(struct drm_amdgpu_fence), 1712 GFP_KERNEL); 1713 if (fences == NULL) 1714 return -ENOMEM; 1715 1716 fences_user = u64_to_user_ptr(wait->in.fences); 1717 if (copy_from_user(fences, fences_user, 1718 sizeof(struct drm_amdgpu_fence) * fence_count)) { 1719 r = -EFAULT; 1720 goto err_free_fences; 1721 } 1722 1723 if (wait->in.wait_all) 1724 r = amdgpu_cs_wait_all_fences(adev, filp, wait, fences); 1725 else 1726 r = amdgpu_cs_wait_any_fence(adev, filp, wait, fences); 1727 1728 err_free_fences: 1729 kfree(fences); 1730 1731 return r; 1732 } 1733 1734 /** 1735 * amdgpu_cs_find_mapping - find bo_va for VM address 1736 * 1737 * @parser: command submission parser context 1738 * @addr: VM address 1739 * @bo: resulting BO of the mapping found 1740 * @map: Placeholder to return found BO mapping 1741 * 1742 * Search the buffer objects in the command submission context for a certain 1743 * virtual memory address. Returns allocation structure when found, NULL 1744 * otherwise. 1745 */ 1746 int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser, 1747 uint64_t addr, struct amdgpu_bo **bo, 1748 struct amdgpu_bo_va_mapping **map) 1749 { 1750 struct amdgpu_fpriv *fpriv = parser->filp->driver_priv; 1751 struct ttm_operation_ctx ctx = { false, false }; 1752 struct amdgpu_vm *vm = &fpriv->vm; 1753 struct amdgpu_bo_va_mapping *mapping; 1754 int r; 1755 1756 addr /= AMDGPU_GPU_PAGE_SIZE; 1757 1758 mapping = amdgpu_vm_bo_lookup_mapping(vm, addr); 1759 if (!mapping || !mapping->bo_va || !mapping->bo_va->base.bo) 1760 return -EINVAL; 1761 1762 *bo = mapping->bo_va->base.bo; 1763 *map = mapping; 1764 1765 /* Double check that the BO is reserved by this CS */ 1766 if (dma_resv_locking_ctx((*bo)->tbo.base.resv) != &parser->ticket) 1767 return -EINVAL; 1768 1769 if (!((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) { 1770 (*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; 1771 amdgpu_bo_placement_from_domain(*bo, (*bo)->allowed_domains); 1772 r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, &ctx); 1773 if (r) 1774 return r; 1775 } 1776 1777 return amdgpu_ttm_alloc_gart(&(*bo)->tbo); 1778 } 1779