Lines Matching defs:ret
169 int ret = 0;
217 ret = -ENOMEM;
237 return ret;
310 int ret;
313 ret = amdgpu_bo_reserve(mem->bo, false);
314 if (ret)
315 return ret;
321 ret = amdgpu_gem_object_create(adev, mem->bo->tbo.base.size, 1,
327 if (ret) {
328 pr_err("Error in creating DMA mappable SG BO on domain: %d\n", ret);
334 return ret;
371 int ret;
393 ret = amdgpu_amdkfd_remove_eviction_fence(bo, ef);
397 return ret;
404 int ret;
416 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
417 if (ret)
423 return ret;
442 int ret;
444 ret = amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_amdkfd_validate_vm_bo, NULL);
445 if (ret) {
447 return ret;
459 int ret;
461 ret = amdgpu_vm_update_pdes(adev, vm, false);
462 if (ret)
463 return ret;
525 int ret;
535 ret = sg_alloc_table_from_pages(ttm->sg, src_ttm->pages,
539 if (unlikely(ret))
542 ret = dma_map_sgtable(adev->dev, ttm->sg, direction, 0);
543 if (unlikely(ret))
547 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
548 if (ret)
556 pr_err("DMA map userptr failed: %d\n", ret);
561 return ret;
569 int ret;
572 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
573 if (ret)
574 return ret;
617 int ret;
633 ret = dma_mapping_error(adev->dev, dma_addr);
634 if (unlikely(ret))
635 return ret;
640 ret = -ENOMEM;
645 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
646 if (unlikely(ret))
649 return ret;
658 return ret;
781 struct dma_buf *ret = amdgpu_gem_prime_export(
785 if (IS_ERR(ret))
786 return PTR_ERR(ret);
787 mem->dmabuf = ret;
798 int ret;
800 ret = kfd_mem_export_dmabuf(mem);
801 if (ret)
802 return ret;
836 int i, ret;
864 ret = -ENOMEM;
889 ret = create_dmamap_sg_bo(adev, mem, &bo[i]);
890 if (ret)
898 ret = create_dmamap_sg_bo(adev, mem, &bo[i]);
899 if (ret)
905 ret = kfd_mem_attach_dmabuf(adev, mem, &bo[i]);
906 if (ret)
911 ret = -EINVAL;
916 ret = amdgpu_bo_reserve(bo[i], false);
917 if (ret) {
924 ret = -ENOMEM;
925 pr_err("Failed to add BO object to VM. ret == %d\n",
926 ret);
953 return ret;
1008 int ret = 0;
1012 ret = amdgpu_ttm_tt_set_userptr(&bo->tbo, user_addr, 0);
1013 if (ret) {
1014 pr_err("%s: Failed to set userptr: %d\n", __func__, ret);
1018 ret = amdgpu_hmm_register(bo, user_addr);
1019 if (ret) {
1021 __func__, ret);
1039 ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages, &range);
1040 if (ret) {
1041 pr_err("%s: Failed to get user pages: %d\n", __func__, ret);
1045 ret = amdgpu_bo_reserve(bo, true);
1046 if (ret) {
1051 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1052 if (ret)
1059 if (ret)
1063 return ret;
1097 int ret;
1105 ret = amdgpu_vm_lock_pd(vm, &ctx->exec, 2);
1107 if (unlikely(ret))
1110 ret = drm_exec_prepare_obj(&ctx->exec, &bo->tbo.base, 1);
1112 if (unlikely(ret))
1120 return ret;
1139 int ret;
1152 ret = amdgpu_vm_lock_pd(entry->bo_va->base.vm,
1155 if (unlikely(ret))
1160 ret = drm_exec_prepare_obj(&ctx->exec, &bo->tbo.base, 1);
1162 if (unlikely(ret))
1170 return ret;
1186 int ret = 0;
1189 ret = amdgpu_sync_wait(ctx->sync, intr);
1193 return ret;
1217 int ret;
1219 ret = kfd_mem_dmamap_attachment(mem, entry);
1220 if (ret)
1221 return ret;
1224 ret = amdgpu_vm_bo_update(adev, bo_va, false);
1225 if (ret) {
1227 return ret;
1238 int ret;
1241 ret = amdgpu_vm_bo_map(entry->adev, entry->bo_va, entry->va, 0,
1244 if (ret) {
1245 pr_err("Failed to map VA 0x%llx in vm. ret %d\n",
1246 entry->va, ret);
1247 return ret;
1253 ret = update_gpuvm_pte(mem, entry, sync);
1254 if (ret) {
1264 return ret;
1270 int ret;
1274 ret = vm_validate_pt_pd_bos(peer_vm);
1275 if (ret)
1276 return ret;
1286 int ret;
1292 ret = amdgpu_sync_resv(NULL, sync, pd->tbo.base.resv,
1295 if (ret)
1296 return ret;
1306 int ret;
1310 ret = vm_update_pds(peer_vm, sync);
1311 if (ret)
1312 return ret;
1322 int ret;
1342 ret = -ENOMEM;
1357 ret = amdgpu_bo_reserve(vm->root.bo, true);
1358 if (ret)
1360 ret = vm_validate_pt_pd_bos(vm);
1361 if (ret) {
1365 ret = amdgpu_bo_sync_wait(vm->root.bo,
1367 if (ret)
1369 ret = dma_resv_reserve_fences(vm->root.bo->tbo.base.resv, 1);
1370 if (ret)
1404 return ret;
1420 int ret = 0;
1422 ret = amdgpu_bo_reserve(bo, false);
1423 if (unlikely(ret))
1424 return ret;
1426 ret = amdgpu_bo_pin_restricted(bo, domain, 0, 0);
1427 if (ret)
1433 return ret;
1446 int ret = 0;
1448 ret = amdgpu_bo_reserve(bo, false);
1449 if (unlikely(ret))
1460 int ret;
1470 ret = amdgpu_vm_set_pasid(adev, avm, pasid);
1471 if (ret)
1472 return ret;
1482 int ret;
1489 ret = amdgpu_vm_make_compute(adev, avm);
1490 if (ret)
1491 return ret;
1494 ret = init_kfd_vm(avm, process_info, ef);
1495 if (ret)
1496 return ret;
1577 int ret = 0;
1586 ret = -EINVAL;
1594 return ret;
1651 int ret;
1702 ret = -ENOMEM;
1721 ret = amdgpu_amdkfd_reserve_mem_limit(adev, aligned_size, flags,
1723 if (ret) {
1732 ret = amdgpu_gem_object_create(adev, aligned_size, 1, alloc_domain, alloc_flags,
1734 if (ret) {
1735 pr_debug("Failed to create BO on domain %s. ret %d\n",
1736 domain_string(alloc_domain), ret);
1739 ret = drm_vma_node_allow(&gobj->vma_node, drm_priv);
1740 if (ret) {
1741 pr_debug("Failed to allow vma node access. ret %d\n", ret);
1763 ret = init_user_pages(*mem, user_addr, criu_resume);
1764 if (ret)
1768 ret = amdgpu_amdkfd_gpuvm_pin_bo(bo, AMDGPU_GEM_DOMAIN_GTT);
1769 if (ret) {
1803 return ret;
1816 int ret;
1854 ret = reserve_bo_and_cond_vms(mem, NULL, BO_VM_ALL, &ctx);
1855 if (unlikely(ret))
1856 return ret;
1873 ret = unreserve_bo_and_vms(&ctx, false, false);
1919 return ret;
1927 int ret;
1968 ret = kfd_mem_attach(adev, mem, avm, mem->aql_queue);
1969 if (ret)
1973 ret = reserve_bo_and_vm(mem, avm, &ctx);
1974 if (unlikely(ret))
1986 ret = vm_validate_pt_pd_bos(avm);
1987 if (unlikely(ret))
1996 ret = amdgpu_amdkfd_bo_validate(bo, domain, true);
1997 if (ret) {
2010 ret = map_bo_to_gpuvm(mem, entry, ctx.sync,
2012 if (ret) {
2017 ret = vm_update_pds(avm, ctx.sync);
2018 if (ret) {
2033 ret = unreserve_bo_and_vms(&ctx, false, false);
2042 return ret;
2049 int ret;
2055 ret = amdgpu_bo_reserve(mem->bo, true);
2056 if (ret)
2073 return ret;
2084 int ret;
2088 ret = reserve_bo_and_cond_vms(mem, avm, BO_VM_MAPPED, &ctx);
2089 if (unlikely(ret))
2093 ret = -EINVAL;
2097 ret = vm_validate_pt_pd_bos(avm);
2098 if (unlikely(ret))
2134 return ret;
2141 int ret;
2149 ret = amdgpu_sync_wait(&sync, intr);
2151 return ret;
2164 int ret;
2166 ret = amdgpu_bo_reserve(bo, true);
2167 if (ret) {
2168 pr_err("Failed to reserve bo. ret %d\n", ret);
2172 ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
2173 if (ret) {
2174 pr_err("Failed to pin bo. ret %d\n", ret);
2178 ret = amdgpu_ttm_alloc_gart(&bo->tbo);
2179 if (ret) {
2180 pr_err("Failed to bind bo to GART. ret %d\n", ret);
2199 return ret;
2218 int ret;
2228 ret = amdgpu_bo_reserve(bo, true);
2229 if (ret) {
2230 pr_err("Failed to reserve bo. ret %d\n", ret);
2234 ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
2235 if (ret) {
2236 pr_err("Failed to pin bo. ret %d\n", ret);
2240 ret = amdgpu_bo_kmap(bo, kptr);
2241 if (ret) {
2242 pr_err("Failed to map bo to kernel. ret %d\n", ret);
2264 return ret;
2305 int ret;
2315 ret = -EINVAL;
2321 ret = -ENOMEM;
2325 ret = drm_vma_node_allow(&obj->vma_node, drm_priv);
2326 if (ret)
2363 return ret;
2369 int ret;
2372 ret = kfd_mem_export_dmabuf(mem);
2373 if (ret)
2380 return ret;
2436 int ret = 0;
2475 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
2477 if (ret) {
2485 ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages,
2487 if (ret) {
2488 pr_debug("Failed %d to get user pages\n", ret);
2497 if (ret != -EFAULT)
2498 return ret;
2500 ret = 0;
2509 ret = -EAGAIN;
2520 return ret;
2537 int ret;
2547 ret = amdgpu_vm_lock_pd(peer_vm, &exec, 2);
2549 if (unlikely(ret))
2559 ret = drm_exec_prepare_obj(&exec, gobj, 1);
2561 if (unlikely(ret))
2566 ret = process_validate_vms(process_info);
2567 if (ret)
2581 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
2582 if (ret) {
2599 ret = update_gpuvm_pte(mem, attachment, &sync);
2600 if (ret) {
2612 ret = process_update_pds(process_info, &sync);
2619 return ret;
2629 int ret = 0;
2647 ret = -EAGAIN;
2653 ret = -EAGAIN;
2661 return ret;
2776 int ret;
2786 ret = amdgpu_vm_lock_pd(peer_vm, &exec, 2);
2788 if (unlikely(ret))
2800 ret = drm_exec_prepare_obj(&exec, gobj, 1);
2802 if (unlikely(ret))
2810 ret = process_validate_vms(process_info);
2811 if (ret)
2814 ret = process_sync_pds_resv(process_info, &sync_obj);
2815 if (ret) {
2832 ret = amdgpu_amdkfd_bo_validate(bo, domain, false);
2833 if (ret) {
2836 ret = amdgpu_amdkfd_bo_validate(bo,
2838 if (ret) {
2845 ret = amdgpu_sync_fence(&sync_obj, fence);
2846 if (ret) {
2856 ret = update_gpuvm_pte(mem, attachment, &sync_obj);
2857 if (ret) {
2868 ret = process_update_pds(process_info, &sync_obj);
2869 if (ret) {
2887 ret = -ENOMEM;
2918 return ret;
2925 int ret;
2945 ret = amdgpu_bo_reserve(gws_bo, false);
2946 if (unlikely(ret)) {
2947 pr_err("Reserve gws bo failed %d\n", ret);
2951 ret = amdgpu_amdkfd_bo_validate(gws_bo, AMDGPU_GEM_DOMAIN_GWS, true);
2952 if (ret) {
2953 pr_err("GWS BO validate failed %d\n", ret);
2960 ret = dma_resv_reserve_fences(gws_bo->tbo.base.resv, 1);
2961 if (ret)
2969 return ret;
2982 return ret;
2987 int ret;
2997 ret = amdgpu_bo_reserve(gws_bo, false);
2998 if (unlikely(ret)) {
2999 pr_err("Reserve gws bo failed %d\n", ret);
3001 return ret;