Lines Matching defs:pdd
216 struct kfd_process_device *pdd;
219 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
220 if (!pdd) {
225 r = svm_range_dma_map_dev(pdd->dev->adev, prange, offset, npages,
254 struct kfd_process_device *pdd;
267 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
268 if (!pdd) {
272 dev = &pdd->dev->adev->pdev->dev;
396 struct kfd_process_device *pdd;
407 pdd = kfd_get_process_device_data(svm_bo->node, p);
408 if (pdd)
409 atomic64_sub(amdgpu_bo_size(svm_bo->bo), &pdd->vram_usage);
542 struct kfd_process_device *pdd;
634 pdd = svm_range_get_pdd_by_node(prange, node);
635 if (pdd)
636 atomic64_add(amdgpu_bo_size(bo), &pdd->vram_usage);
667 struct kfd_process_device *pdd;
670 pdd = kfd_process_device_data_by_id(p, gpu_id);
671 if (!pdd) {
676 return pdd->dev;
1332 struct kfd_process_device *pdd;
1355 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
1356 if (!pdd) {
1361 kfd_smi_event_unmap_from_gpu(pdd->dev, p->lead_thread->pid,
1364 r = svm_range_unmap_from_gpu(pdd->dev->adev,
1365 drm_priv_to_vm(pdd->drm_priv),
1377 kfd_flush_tlb(pdd, TLB_FLUSH_HEAVYWEIGHT);
1384 svm_range_map_to_gpu(struct kfd_process_device *pdd, struct svm_range *prange,
1389 struct amdgpu_device *adev = pdd->dev->adev;
1390 struct amdgpu_vm *vm = drm_priv_to_vm(pdd->drm_priv);
1416 pte_flags = svm_range_get_pte_flags(pdd->dev, prange, last_domain);
1465 struct kfd_process_device *pdd;
1478 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
1479 if (!pdd) {
1484 pdd = kfd_bind_process_to_device(pdd->dev, p);
1485 if (IS_ERR(pdd))
1488 if (bo_adev && pdd->dev->adev != bo_adev &&
1489 !amdgpu_xgmi_same_hive(pdd->dev->adev, bo_adev)) {
1494 r = svm_range_map_to_gpu(pdd, prange, offset, npages, readonly,
1511 kfd_flush_tlb(pdd, TLB_FLUSH_LEGACY);
1527 struct kfd_process_device *pdd;
1535 pdd = kfd_process_device_from_gpuidx(ctx->process, gpuidx);
1536 if (!pdd) {
1541 vm = drm_priv_to_vm(pdd->drm_priv);
1553 pdd = kfd_process_device_from_gpuidx(ctx->process, gpuidx);
1554 if (!pdd) {
1560 r = amdgpu_vm_validate_pt_bos(pdd->dev->adev,
1561 drm_priv_to_vm(pdd->drm_priv),
1583 struct kfd_process_device *pdd;
1585 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
1586 if (!pdd)
1589 return SVM_ADEV_PGMAP_OWNER(pdd->dev->adev);
2276 struct kfd_process_device *pdd;
2289 pdd = p->pdds[i];
2290 if (!pdd)
2295 amdgpu_ih_wait_on_checkpoint_process_ts(pdd->dev->adev,
2296 pdd->dev->adev->irq.retry_cam_enabled ?
2297 &pdd->dev->adev->irq.ih :
2298 &pdd->dev->adev->irq.ih1);
2300 if (pdd->dev->adev->irq.retry_cam_enabled)
2301 amdgpu_ih_wait_on_checkpoint_process_ts(pdd->dev->adev,
2302 &pdd->dev->adev->irq.ih_soft);
2876 struct kfd_process_device *pdd;
2894 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
2895 if (pdd)
2896 WRITE_ONCE(pdd->faults, pdd->faults + 1);
3231 * It looks for each pdd in the kfd_process.
3345 struct kfd_process_device *pdd;
3374 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
3375 if (!pdd) {
3380 if (pdd->dev->adev == bo_node->adev)
3383 if (!svm_nodes_in_same_hive(pdd->dev, bo_node)) {
4073 struct kfd_process_device *pdd = p->pdds[index];
4077 query_attr[index + nattr_common].value = pdd->user_gpu_id;