Lines Matching defs:pdd

71 	struct kfd_process_device *pdd;
74 pdd = kfd_process_device_data_by_id(p, gpu_id);
76 if (pdd)
77 return pdd;
83 static inline void kfd_unlock_pdd(struct kfd_process_device *pdd)
85 mutex_unlock(&pdd->process->mutex);
305 struct kfd_process_device *pdd;
322 pdd = kfd_process_device_data_by_id(p, args->gpu_id);
323 if (!pdd) {
328 dev = pdd->dev;
330 pdd = kfd_bind_process_to_device(dev, p);
331 if (IS_ERR(pdd)) {
336 if (!pdd->qpd.proc_doorbells) {
337 err = kfd_alloc_process_doorbells(dev->kfd, pdd);
353 wptr_vm = drm_priv_to_vm(pdd->drm_priv);
576 struct kfd_process_device *pdd;
590 pdd = kfd_process_device_data_by_id(p, args->gpu_id);
591 if (!pdd) {
597 pdd = kfd_bind_process_to_device(pdd->dev, p);
598 if (IS_ERR(pdd)) {
610 if (!pdd->dev->dqm->ops.set_cache_memory_policy(pdd->dev->dqm,
611 &pdd->qpd,
630 struct kfd_process_device *pdd;
634 pdd = kfd_process_device_data_by_id(p, args->gpu_id);
635 if (!pdd) {
640 pdd = kfd_bind_process_to_device(pdd->dev, p);
641 if (IS_ERR(pdd)) {
646 kfd_process_set_trap_handler(&pdd->qpd, args->tba_addr, args->tma_addr);
684 struct kfd_process_device *pdd;
687 pdd = kfd_process_device_data_by_id(p, args->gpu_id);
689 if (pdd)
691 args->gpu_clock_counter = amdgpu_amdkfd_get_gpu_clock_counter(pdd->dev->adev);
719 /* Run over all pdd of the process */
721 struct kfd_process_device *pdd = p->pdds[i];
725 pAperture->gpu_id = pdd->dev->id;
726 pAperture->lds_base = pdd->lds_base;
727 pAperture->lds_limit = pdd->lds_limit;
728 pAperture->gpuvm_base = pdd->gpuvm_base;
729 pAperture->gpuvm_limit = pdd->gpuvm_limit;
730 pAperture->scratch_base = pdd->scratch_base;
731 pAperture->scratch_limit = pdd->scratch_limit;
736 "gpu id %u\n", pdd->dev->id);
738 "lds_base %llX\n", pdd->lds_base);
740 "lds_limit %llX\n", pdd->lds_limit);
742 "gpuvm_base %llX\n", pdd->gpuvm_base);
744 "gpuvm_limit %llX\n", pdd->gpuvm_limit);
746 "scratch_base %llX\n", pdd->scratch_base);
748 "scratch_limit %llX\n", pdd->scratch_limit);
794 /* Run over all pdd of the process */
796 struct kfd_process_device *pdd = p->pdds[i];
798 pa[i].gpu_id = pdd->dev->id;
799 pa[i].lds_base = pdd->lds_base;
800 pa[i].lds_limit = pdd->lds_limit;
801 pa[i].gpuvm_base = pdd->gpuvm_base;
802 pa[i].gpuvm_limit = pdd->gpuvm_limit;
803 pa[i].scratch_base = pdd->scratch_base;
804 pa[i].scratch_limit = pdd->scratch_limit;
807 "gpu id %u\n", pdd->dev->id);
809 "lds_base %llX\n", pdd->lds_base);
811 "lds_limit %llX\n", pdd->lds_limit);
813 "gpuvm_base %llX\n", pdd->gpuvm_base);
815 "gpuvm_limit %llX\n", pdd->gpuvm_limit);
817 "scratch_base %llX\n", pdd->scratch_base);
819 "scratch_limit %llX\n", pdd->scratch_limit);
902 struct kfd_process_device *pdd;
907 pdd = kfd_process_device_data_by_id(p, args->gpu_id);
908 if (!pdd) {
912 dev = pdd->dev;
914 pdd = kfd_bind_process_to_device(dev, p);
915 if (IS_ERR(pdd)) {
916 err = PTR_ERR(pdd);
920 pdd->qpd.sh_hidden_private_base = args->va_addr;
925 pdd->qpd.vmid != 0 && dev->kfd2kgd->set_scratch_backing_va)
927 dev->adev, args->va_addr, pdd->qpd.vmid);
941 struct kfd_process_device *pdd;
946 pdd = kfd_process_device_data_by_id(p, args->gpu_id);
948 if (!pdd)
951 amdgpu_amdkfd_get_tile_config(pdd->dev->adev, &config);
985 struct kfd_process_device *pdd;
994 pdd = kfd_process_device_data_by_id(p, args->gpu_id);
995 if (!pdd) {
1000 if (pdd->drm_file) {
1001 ret = pdd->drm_file == drm_file ? 0 : -EBUSY;
1005 ret = kfd_process_device_init_vm(pdd, drm_file);
1046 struct kfd_process_device *pdd = kfd_lock_pdd_by_id(p, args->gpu_id);
1048 if (!pdd)
1050 args->available = amdgpu_amdkfd_get_available_memory(pdd->dev->adev,
1051 pdd->dev->node_id);
1052 kfd_unlock_pdd(pdd);
1060 struct kfd_process_device *pdd;
1103 pdd = kfd_process_device_data_by_id(p, args->gpu_id);
1104 if (!pdd) {
1109 dev = pdd->dev;
1119 pdd = kfd_bind_process_to_device(dev, p);
1120 if (IS_ERR(pdd)) {
1121 err = PTR_ERR(pdd);
1130 offset = kfd_get_process_doorbells(pdd);
1149 pdd->drm_priv, (struct kgd_mem **) &mem, &offset,
1155 idr_handle = kfd_process_device_create_obj_handle(pdd, mem);
1167 atomic64_add(PAGE_ALIGN(size), &pdd->vram_usage);
1186 pdd->drm_priv, NULL);
1198 struct kfd_process_device *pdd;
1214 pdd = kfd_process_device_data_by_id(p, GET_GPU_ID(args->handle));
1215 if (!pdd) {
1222 pdd, GET_IDR_HANDLE(args->handle));
1228 ret = amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->adev,
1229 (struct kgd_mem *)mem, pdd->drm_priv, &size);
1236 pdd, GET_IDR_HANDLE(args->handle));
1238 atomic64_sub(size, &pdd->vram_usage);
1250 struct kfd_process_device *pdd, *peer_pdd;
1280 pdd = kfd_process_device_data_by_id(p, GET_GPU_ID(args->handle));
1281 if (!pdd) {
1285 dev = pdd->dev;
1287 pdd = kfd_bind_process_to_device(dev, p);
1288 if (IS_ERR(pdd)) {
1289 err = PTR_ERR(pdd);
1293 mem = kfd_process_device_translate_handle(pdd,
1368 struct kfd_process_device *pdd, *peer_pdd;
1397 pdd = kfd_process_device_data_by_id(p, GET_GPU_ID(args->handle));
1398 if (!pdd) {
1403 mem = kfd_process_device_translate_handle(pdd,
1426 flush_tlb = kfd_flush_tlb_after_unmap(pdd->dev->kfd);
1428 err = amdgpu_amdkfd_gpuvm_sync_memory(pdd->dev->adev,
1568 struct kfd_process_device *pdd;
1580 pdd = kfd_process_device_data_by_id(p, args->gpu_id);
1581 if (!pdd) {
1586 pdd = kfd_bind_process_to_device(pdd->dev, p);
1587 if (IS_ERR(pdd)) {
1588 r = PTR_ERR(pdd);
1592 r = amdgpu_amdkfd_gpuvm_import_dmabuf(pdd->dev->adev, dmabuf,
1593 args->va_addr, pdd->drm_priv,
1599 idr_handle = kfd_process_device_create_obj_handle(pdd, mem);
1613 amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->adev, (struct kgd_mem *)mem,
1614 pdd->drm_priv, NULL);
1625 struct kfd_process_device *pdd;
1637 pdd = kfd_get_process_device_data(dev, p);
1638 if (!pdd) {
1643 mem = kfd_process_device_translate_handle(pdd,
1678 struct kfd_process_device *pdd;
1682 pdd = kfd_process_device_data_by_id(p, args->gpuid);
1684 if (!pdd)
1687 return kfd_smi_event_open(pdd->dev, &args->anon_fd);
1806 struct kfd_process_device *pdd = p->pdds[i];
1808 device_buckets[i].user_gpu_id = pdd->user_gpu_id;
1809 device_buckets[i].actual_gpu_id = pdd->dev->id;
1846 struct kfd_process_device *pdd = p->pdds[i];
1850 idr_for_each_entry(&pdd->alloc_idr, mem, id) {
1853 if (!kgd_mem->va || kgd_mem->va > pdd->gpuvm_base)
1908 struct kfd_process_device *pdd = p->pdds[pdd_index];
1912 idr_for_each_entry(&pdd->alloc_idr, mem, id) {
1929 if (kgd_mem->va && kgd_mem->va <= pdd->gpuvm_base)
1935 bo_bucket->gpu_id = pdd->user_gpu_id;
1963 KFD_MMAP_GPU_ID(pdd->dev->id);
1967 KFD_MMAP_GPU_ID(pdd->dev->id);
2067 pr_err("No pdd for given process\n");
2213 struct kfd_process_device *pdd;
2232 pdd = kfd_get_process_device_data(dev, p);
2233 if (!pdd) {
2234 pr_err("Failed to get pdd for gpu_id = %x\n",
2239 pdd->user_gpu_id = device_buckets[i].user_gpu_id;
2249 if (pdd->drm_file) {
2254 /* create the vm using render nodes for kfd pdd */
2255 if (kfd_process_device_init_vm(pdd, drm_file)) {
2256 pr_err("could not init vm for given pdd\n");
2263 * pdd now already has the vm bound to render node so below api won't create a new
2267 pdd = kfd_bind_process_to_device(dev, p);
2268 if (IS_ERR(pdd)) {
2269 ret = PTR_ERR(pdd);
2273 if (!pdd->qpd.proc_doorbells) {
2274 ret = kfd_alloc_process_doorbells(dev->kfd, pdd);
2291 static int criu_restore_memory_of_gpu(struct kfd_process_device *pdd,
2303 kfd_doorbell_process_slice(pdd->dev->kfd))
2306 offset = kfd_get_process_doorbells(pdd);
2315 offset = pdd->dev->adev->rmmio_remap.bus_addr;
2324 ret = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(pdd->dev->adev, bo_bucket->addr,
2325 bo_bucket->size, pdd->drm_priv, kgd_mem,
2336 idr_handle = idr_alloc(&pdd->alloc_idr, *kgd_mem, bo_priv->idr_handle,
2341 amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->adev, *kgd_mem, pdd->drm_priv,
2347 bo_bucket->restored_offset = KFD_MMAP_TYPE_DOORBELL | KFD_MMAP_GPU_ID(pdd->dev->id);
2349 bo_bucket->restored_offset = KFD_MMAP_TYPE_MMIO | KFD_MMAP_GPU_ID(pdd->dev->id);
2355 atomic64_add(bo_bucket->size, &pdd->vram_usage);
2364 struct kfd_process_device *pdd;
2373 pdd = kfd_process_device_data_by_id(p, bo_bucket->gpu_id);
2374 if (!pdd) {
2375 pr_err("Failed to get pdd\n");
2379 ret = criu_restore_memory_of_gpu(pdd, bo_bucket, bo_priv, &kgd_mem);
2684 pr_err("No pdd for given process\n");
2762 struct kfd_process_device *pdd = p->pdds[i];
2764 if (pdd->qpd.queue_count)
2774 if (pdd->dev->kfd->shared_resources.enable_mes)
2775 kfd_dbg_set_mes_debug_mode(pdd, !kfd_dbg_has_cwsr_workaround(pdd->dev));
2784 struct kfd_process_device *pdd = p->pdds[i];
2786 if (!kfd_dbg_is_rlc_restore_supported(pdd->dev)) {
2787 amdgpu_gfx_off_ctrl(pdd->dev->adev, false);
2788 pdd->dev->kfd2kgd->enable_debug_trap(
2789 pdd->dev->adev,
2791 pdd->dev->vm_info.last_vmid_kfd);
2792 } else if (kfd_dbg_is_per_vmid_supported(pdd->dev)) {
2793 pdd->spi_dbg_override = pdd->dev->kfd2kgd->enable_debug_trap(
2794 pdd->dev->adev,
2846 struct kfd_process_device *pdd = p->pdds[i];
2848 if (!kfd_dbg_is_rlc_restore_supported(pdd->dev))
2849 amdgpu_gfx_off_ctrl(pdd->dev->adev, true);
2857 struct kfd_process_device *pdd = p->pdds[i];
2859 if (kfd_dbg_is_per_vmid_supported(pdd->dev)) {
2860 pdd->spi_dbg_override =
2861 pdd->dev->kfd2kgd->disable_debug_trap(
2862 pdd->dev->adev,
2864 pdd->dev->vm_info.last_vmid_kfd);
2866 if (!pdd->dev->kfd->shared_resources.enable_mes)
2867 debug_refresh_runlist(pdd->dev->dqm);
2869 kfd_dbg_set_mes_debug_mode(pdd,
2870 !kfd_dbg_has_cwsr_workaround(pdd->dev));
2902 struct kfd_process_device *pdd = NULL;
2987 pdd = kfd_process_device_data_by_id(target, user_gpu_id);
2988 if (user_gpu_id == -EINVAL || !pdd) {
3045 r = kfd_dbg_trap_set_dev_address_watch(pdd,
3052 r = kfd_dbg_trap_clear_dev_address_watch(pdd,