xref: /openbsd-src/sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c (revision 25c4e8bd056e974b28f4a0ffd39d76c190a56013)
1 /*
2  * Copyright 2014-2018 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 #include <linux/dma-buf.h>
23 #include <linux/list.h>
24 #include <linux/pagemap.h>
25 #include <linux/sched/mm.h>
26 #include <linux/sched/task.h>
27 
28 #include "amdgpu_object.h"
29 #include "amdgpu_gem.h"
30 #include "amdgpu_vm.h"
31 #include "amdgpu_amdkfd.h"
32 #include "amdgpu_dma_buf.h"
33 #include <uapi/linux/kfd_ioctl.h>
34 #include "amdgpu_xgmi.h"
35 
36 /* Userptr restore delay, just long enough to allow consecutive VM
37  * changes to accumulate
38  */
39 #define AMDGPU_USERPTR_RESTORE_DELAY_MS 1
40 
41 /* Impose limit on how much memory KFD can use */
42 static struct {
43 	uint64_t max_system_mem_limit;
44 	uint64_t max_ttm_mem_limit;
45 	int64_t system_mem_used;
46 	int64_t ttm_mem_used;
47 	spinlock_t mem_limit_lock;
48 } kfd_mem_limit;
49 
50 static const char * const domain_bit_to_string[] = {
51 		"CPU",
52 		"GTT",
53 		"VRAM",
54 		"GDS",
55 		"GWS",
56 		"OA"
57 };
58 
59 #define domain_string(domain) domain_bit_to_string[ffs(domain)-1]
60 
61 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work);
62 
63 
64 static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
65 {
66 	return (struct amdgpu_device *)kgd;
67 }
68 
69 static bool kfd_mem_is_attached(struct amdgpu_vm *avm,
70 		struct kgd_mem *mem)
71 {
72 	struct kfd_mem_attachment *entry;
73 
74 	list_for_each_entry(entry, &mem->attachments, list)
75 		if (entry->bo_va->base.vm == avm)
76 			return true;
77 
78 	return false;
79 }
80 
81 /* Set memory usage limits. Current, limits are
82  *  System (TTM + userptr) memory - 15/16th System RAM
83  *  TTM memory - 3/8th System RAM
84  */
85 void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
86 {
87 	struct sysinfo si;
88 	uint64_t mem;
89 
90 	si_meminfo(&si);
91 	mem = si.freeram - si.freehigh;
92 	mem *= si.mem_unit;
93 
94 	mtx_init(&kfd_mem_limit.mem_limit_lock, IPL_TTY);
95 	kfd_mem_limit.max_system_mem_limit = mem - (mem >> 4);
96 	kfd_mem_limit.max_ttm_mem_limit = (mem >> 1) - (mem >> 3);
97 	pr_debug("Kernel memory limit %lluM, TTM limit %lluM\n",
98 		(kfd_mem_limit.max_system_mem_limit >> 20),
99 		(kfd_mem_limit.max_ttm_mem_limit >> 20));
100 }
101 
102 void amdgpu_amdkfd_reserve_system_mem(uint64_t size)
103 {
104 	kfd_mem_limit.system_mem_used += size;
105 }
106 
107 /* Estimate page table size needed to represent a given memory size
108  *
109  * With 4KB pages, we need one 8 byte PTE for each 4KB of memory
110  * (factor 512, >> 9). With 2MB pages, we need one 8 byte PTE for 2MB
111  * of memory (factor 256K, >> 18). ROCm user mode tries to optimize
112  * for 2MB pages for TLB efficiency. However, small allocations and
113  * fragmented system memory still need some 4KB pages. We choose a
114  * compromise that should work in most cases without reserving too
115  * much memory for page tables unnecessarily (factor 16K, >> 14).
116  */
117 #define ESTIMATE_PT_SIZE(mem_size) ((mem_size) >> 14)
118 
119 static size_t amdgpu_amdkfd_acc_size(uint64_t size)
120 {
121 	size >>= PAGE_SHIFT;
122 	size *= sizeof(dma_addr_t) + sizeof(void *);
123 
124 	return __roundup_pow_of_two(sizeof(struct amdgpu_bo)) +
125 		__roundup_pow_of_two(sizeof(struct ttm_tt)) +
126 		PAGE_ALIGN(size);
127 }
128 
129 static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
130 		uint64_t size, u32 domain, bool sg)
131 {
132 	uint64_t reserved_for_pt =
133 		ESTIMATE_PT_SIZE(amdgpu_amdkfd_total_mem_size);
134 	size_t acc_size, system_mem_needed, ttm_mem_needed, vram_needed;
135 	int ret = 0;
136 
137 	acc_size = amdgpu_amdkfd_acc_size(size);
138 
139 	vram_needed = 0;
140 	if (domain == AMDGPU_GEM_DOMAIN_GTT) {
141 		/* TTM GTT memory */
142 		system_mem_needed = acc_size + size;
143 		ttm_mem_needed = acc_size + size;
144 	} else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) {
145 		/* Userptr */
146 		system_mem_needed = acc_size + size;
147 		ttm_mem_needed = acc_size;
148 	} else {
149 		/* VRAM and SG */
150 		system_mem_needed = acc_size;
151 		ttm_mem_needed = acc_size;
152 		if (domain == AMDGPU_GEM_DOMAIN_VRAM)
153 			vram_needed = size;
154 	}
155 
156 	spin_lock(&kfd_mem_limit.mem_limit_lock);
157 
158 	if (kfd_mem_limit.system_mem_used + system_mem_needed >
159 	    kfd_mem_limit.max_system_mem_limit)
160 		pr_debug("Set no_system_mem_limit=1 if using shared memory\n");
161 
162 	if ((kfd_mem_limit.system_mem_used + system_mem_needed >
163 	     kfd_mem_limit.max_system_mem_limit && !no_system_mem_limit) ||
164 	    (kfd_mem_limit.ttm_mem_used + ttm_mem_needed >
165 	     kfd_mem_limit.max_ttm_mem_limit) ||
166 	    (adev->kfd.vram_used + vram_needed >
167 	     adev->gmc.real_vram_size - reserved_for_pt)) {
168 		ret = -ENOMEM;
169 	} else {
170 		kfd_mem_limit.system_mem_used += system_mem_needed;
171 		kfd_mem_limit.ttm_mem_used += ttm_mem_needed;
172 		adev->kfd.vram_used += vram_needed;
173 	}
174 
175 	spin_unlock(&kfd_mem_limit.mem_limit_lock);
176 	return ret;
177 }
178 
179 static void unreserve_mem_limit(struct amdgpu_device *adev,
180 		uint64_t size, u32 domain, bool sg)
181 {
182 	size_t acc_size;
183 
184 	acc_size = amdgpu_amdkfd_acc_size(size);
185 
186 	spin_lock(&kfd_mem_limit.mem_limit_lock);
187 	if (domain == AMDGPU_GEM_DOMAIN_GTT) {
188 		kfd_mem_limit.system_mem_used -= (acc_size + size);
189 		kfd_mem_limit.ttm_mem_used -= (acc_size + size);
190 	} else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) {
191 		kfd_mem_limit.system_mem_used -= (acc_size + size);
192 		kfd_mem_limit.ttm_mem_used -= acc_size;
193 	} else {
194 		kfd_mem_limit.system_mem_used -= acc_size;
195 		kfd_mem_limit.ttm_mem_used -= acc_size;
196 		if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
197 			adev->kfd.vram_used -= size;
198 			WARN_ONCE(adev->kfd.vram_used < 0,
199 				  "kfd VRAM memory accounting unbalanced");
200 		}
201 	}
202 	WARN_ONCE(kfd_mem_limit.system_mem_used < 0,
203 		  "kfd system memory accounting unbalanced");
204 	WARN_ONCE(kfd_mem_limit.ttm_mem_used < 0,
205 		  "kfd TTM memory accounting unbalanced");
206 
207 	spin_unlock(&kfd_mem_limit.mem_limit_lock);
208 }
209 
210 void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo)
211 {
212 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
213 	u32 domain = bo->preferred_domains;
214 	bool sg = (bo->preferred_domains == AMDGPU_GEM_DOMAIN_CPU);
215 
216 	if (bo->flags & AMDGPU_AMDKFD_CREATE_USERPTR_BO) {
217 		domain = AMDGPU_GEM_DOMAIN_CPU;
218 		sg = false;
219 	}
220 
221 	unreserve_mem_limit(adev, amdgpu_bo_size(bo), domain, sg);
222 }
223 
224 
225 /* amdgpu_amdkfd_remove_eviction_fence - Removes eviction fence from BO's
226  *  reservation object.
227  *
228  * @bo: [IN] Remove eviction fence(s) from this BO
229  * @ef: [IN] This eviction fence is removed if it
230  *  is present in the shared list.
231  *
232  * NOTE: Must be called with BO reserved i.e. bo->tbo.resv->lock held.
233  */
234 static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
235 					struct amdgpu_amdkfd_fence *ef)
236 {
237 	struct dma_resv *resv = bo->tbo.base.resv;
238 	struct dma_resv_list *old, *new;
239 	unsigned int i, j, k;
240 
241 	if (!ef)
242 		return -EINVAL;
243 
244 	old = dma_resv_shared_list(resv);
245 	if (!old)
246 		return 0;
247 
248 	new = kmalloc(struct_size(new, shared, old->shared_max), GFP_KERNEL);
249 	if (!new)
250 		return -ENOMEM;
251 
252 	/* Go through all the shared fences in the resevation object and sort
253 	 * the interesting ones to the end of the list.
254 	 */
255 	for (i = 0, j = old->shared_count, k = 0; i < old->shared_count; ++i) {
256 		struct dma_fence *f;
257 
258 		f = rcu_dereference_protected(old->shared[i],
259 					      dma_resv_held(resv));
260 
261 		if (f->context == ef->base.context)
262 			RCU_INIT_POINTER(new->shared[--j], f);
263 		else
264 			RCU_INIT_POINTER(new->shared[k++], f);
265 	}
266 	new->shared_max = old->shared_max;
267 	new->shared_count = k;
268 
269 	/* Install the new fence list, seqcount provides the barriers */
270 	write_seqcount_begin(&resv->seq);
271 	RCU_INIT_POINTER(resv->fence, new);
272 	write_seqcount_end(&resv->seq);
273 
274 	/* Drop the references to the removed fences or move them to ef_list */
275 	for (i = j; i < old->shared_count; ++i) {
276 		struct dma_fence *f;
277 
278 		f = rcu_dereference_protected(new->shared[i],
279 					      dma_resv_held(resv));
280 		dma_fence_put(f);
281 	}
282 	kfree_rcu(old, rcu);
283 
284 	return 0;
285 }
286 
287 int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo)
288 {
289 	struct amdgpu_bo *root = bo;
290 	struct amdgpu_vm_bo_base *vm_bo;
291 	struct amdgpu_vm *vm;
292 	struct amdkfd_process_info *info;
293 	struct amdgpu_amdkfd_fence *ef;
294 	int ret;
295 
296 	/* we can always get vm_bo from root PD bo.*/
297 	while (root->parent)
298 		root = root->parent;
299 
300 	vm_bo = root->vm_bo;
301 	if (!vm_bo)
302 		return 0;
303 
304 	vm = vm_bo->vm;
305 	if (!vm)
306 		return 0;
307 
308 	info = vm->process_info;
309 	if (!info || !info->eviction_fence)
310 		return 0;
311 
312 	ef = container_of(dma_fence_get(&info->eviction_fence->base),
313 			struct amdgpu_amdkfd_fence, base);
314 
315 	BUG_ON(!dma_resv_trylock(bo->tbo.base.resv));
316 	ret = amdgpu_amdkfd_remove_eviction_fence(bo, ef);
317 	dma_resv_unlock(bo->tbo.base.resv);
318 
319 	dma_fence_put(&ef->base);
320 	return ret;
321 }
322 
323 static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain,
324 				     bool wait)
325 {
326 	struct ttm_operation_ctx ctx = { false, false };
327 	int ret;
328 
329 	if (WARN(amdgpu_ttm_tt_get_usermm(bo->tbo.ttm),
330 		 "Called with userptr BO"))
331 		return -EINVAL;
332 
333 	amdgpu_bo_placement_from_domain(bo, domain);
334 
335 	ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
336 	if (ret)
337 		goto validate_fail;
338 	if (wait)
339 		amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false);
340 
341 validate_fail:
342 	return ret;
343 }
344 
345 static int amdgpu_amdkfd_validate_vm_bo(void *_unused, struct amdgpu_bo *bo)
346 {
347 	return amdgpu_amdkfd_bo_validate(bo, bo->allowed_domains, false);
348 }
349 
350 /* vm_validate_pt_pd_bos - Validate page table and directory BOs
351  *
352  * Page directories are not updated here because huge page handling
353  * during page table updates can invalidate page directory entries
354  * again. Page directories are only updated after updating page
355  * tables.
356  */
357 static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm)
358 {
359 	struct amdgpu_bo *pd = vm->root.bo;
360 	struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
361 	int ret;
362 
363 	ret = amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_amdkfd_validate_vm_bo, NULL);
364 	if (ret) {
365 		pr_err("failed to validate PT BOs\n");
366 		return ret;
367 	}
368 
369 	ret = amdgpu_amdkfd_validate_vm_bo(NULL, pd);
370 	if (ret) {
371 		pr_err("failed to validate PD\n");
372 		return ret;
373 	}
374 
375 	vm->pd_phys_addr = amdgpu_gmc_pd_addr(vm->root.bo);
376 
377 	if (vm->use_cpu_for_update) {
378 		ret = amdgpu_bo_kmap(pd, NULL);
379 		if (ret) {
380 			pr_err("failed to kmap PD, ret=%d\n", ret);
381 			return ret;
382 		}
383 	}
384 
385 	return 0;
386 }
387 
388 static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync)
389 {
390 	struct amdgpu_bo *pd = vm->root.bo;
391 	struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
392 	int ret;
393 
394 	ret = amdgpu_vm_update_pdes(adev, vm, false);
395 	if (ret)
396 		return ret;
397 
398 	return amdgpu_sync_fence(sync, vm->last_update);
399 }
400 
401 static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem)
402 {
403 	struct amdgpu_device *bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev);
404 	bool coherent = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_COHERENT;
405 	bool uncached = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_UNCACHED;
406 	uint32_t mapping_flags;
407 	uint64_t pte_flags;
408 	bool snoop = false;
409 
410 	mapping_flags = AMDGPU_VM_PAGE_READABLE;
411 	if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE)
412 		mapping_flags |= AMDGPU_VM_PAGE_WRITEABLE;
413 	if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE)
414 		mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
415 
416 	switch (adev->asic_type) {
417 	case CHIP_ARCTURUS:
418 		if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
419 			if (bo_adev == adev)
420 				mapping_flags |= coherent ?
421 					AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
422 			else
423 				mapping_flags |= coherent ?
424 					AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
425 		} else {
426 			mapping_flags |= coherent ?
427 				AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
428 		}
429 		break;
430 	case CHIP_ALDEBARAN:
431 		if (coherent && uncached) {
432 			if (adev->gmc.xgmi.connected_to_cpu ||
433 				!(mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM))
434 				snoop = true;
435 			mapping_flags |= AMDGPU_VM_MTYPE_UC;
436 		} else if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
437 			if (bo_adev == adev) {
438 				mapping_flags |= coherent ?
439 					AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
440 				if (adev->gmc.xgmi.connected_to_cpu)
441 					snoop = true;
442 			} else {
443 				mapping_flags |= coherent ?
444 					AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
445 				if (amdgpu_xgmi_same_hive(adev, bo_adev))
446 					snoop = true;
447 			}
448 		} else {
449 			snoop = true;
450 			mapping_flags |= coherent ?
451 				AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
452 		}
453 		break;
454 	default:
455 		mapping_flags |= coherent ?
456 			AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
457 	}
458 
459 	pte_flags = amdgpu_gem_va_map_flags(adev, mapping_flags);
460 	pte_flags |= snoop ? AMDGPU_PTE_SNOOPED : 0;
461 
462 	return pte_flags;
463 }
464 
465 static int
466 kfd_mem_dmamap_userptr(struct kgd_mem *mem,
467 		       struct kfd_mem_attachment *attachment)
468 {
469 	enum dma_data_direction direction =
470 		mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
471 		DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
472 	struct ttm_operation_ctx ctx = {.interruptible = true};
473 	struct amdgpu_bo *bo = attachment->bo_va->base.bo;
474 	struct amdgpu_device *adev = attachment->adev;
475 	struct ttm_tt *src_ttm = mem->bo->tbo.ttm;
476 	struct ttm_tt *ttm = bo->tbo.ttm;
477 	int ret;
478 
479 	ttm->sg = kmalloc(sizeof(*ttm->sg), GFP_KERNEL);
480 	if (unlikely(!ttm->sg))
481 		return -ENOMEM;
482 
483 	if (WARN_ON(ttm->num_pages != src_ttm->num_pages))
484 		return -EINVAL;
485 
486 	/* Same sequence as in amdgpu_ttm_tt_pin_userptr */
487 	ret = sg_alloc_table_from_pages(ttm->sg, src_ttm->pages,
488 					ttm->num_pages, 0,
489 					(u64)ttm->num_pages << PAGE_SHIFT,
490 					GFP_KERNEL);
491 	if (unlikely(ret))
492 		goto free_sg;
493 
494 	ret = dma_map_sgtable(adev->dev, ttm->sg, direction, 0);
495 	if (unlikely(ret))
496 		goto release_sg;
497 
498 	drm_prime_sg_to_dma_addr_array(ttm->sg, ttm->dma_address,
499 				       ttm->num_pages);
500 
501 	amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
502 	ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
503 	if (ret)
504 		goto unmap_sg;
505 
506 	return 0;
507 
508 unmap_sg:
509 	dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0);
510 release_sg:
511 	pr_err("DMA map userptr failed: %d\n", ret);
512 	sg_free_table(ttm->sg);
513 free_sg:
514 	kfree(ttm->sg);
515 	ttm->sg = NULL;
516 	return ret;
517 }
518 
519 static int
520 kfd_mem_dmamap_dmabuf(struct kfd_mem_attachment *attachment)
521 {
522 	struct ttm_operation_ctx ctx = {.interruptible = true};
523 	struct amdgpu_bo *bo = attachment->bo_va->base.bo;
524 
525 	amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
526 	return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
527 }
528 
529 static int
530 kfd_mem_dmamap_attachment(struct kgd_mem *mem,
531 			  struct kfd_mem_attachment *attachment)
532 {
533 	switch (attachment->type) {
534 	case KFD_MEM_ATT_SHARED:
535 		return 0;
536 	case KFD_MEM_ATT_USERPTR:
537 		return kfd_mem_dmamap_userptr(mem, attachment);
538 	case KFD_MEM_ATT_DMABUF:
539 		return kfd_mem_dmamap_dmabuf(attachment);
540 	default:
541 		WARN_ON_ONCE(1);
542 	}
543 	return -EINVAL;
544 }
545 
546 static void
547 kfd_mem_dmaunmap_userptr(struct kgd_mem *mem,
548 			 struct kfd_mem_attachment *attachment)
549 {
550 	enum dma_data_direction direction =
551 		mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
552 		DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
553 	struct ttm_operation_ctx ctx = {.interruptible = false};
554 	struct amdgpu_bo *bo = attachment->bo_va->base.bo;
555 	struct amdgpu_device *adev = attachment->adev;
556 	struct ttm_tt *ttm = bo->tbo.ttm;
557 
558 	if (unlikely(!ttm->sg))
559 		return;
560 
561 	amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
562 	ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
563 
564 	dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0);
565 	sg_free_table(ttm->sg);
566 	kfree(ttm->sg);
567 	ttm->sg = NULL;
568 }
569 
570 static void
571 kfd_mem_dmaunmap_dmabuf(struct kfd_mem_attachment *attachment)
572 {
573 	struct ttm_operation_ctx ctx = {.interruptible = true};
574 	struct amdgpu_bo *bo = attachment->bo_va->base.bo;
575 
576 	amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
577 	ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
578 }
579 
580 static void
581 kfd_mem_dmaunmap_attachment(struct kgd_mem *mem,
582 			    struct kfd_mem_attachment *attachment)
583 {
584 	switch (attachment->type) {
585 	case KFD_MEM_ATT_SHARED:
586 		break;
587 	case KFD_MEM_ATT_USERPTR:
588 		kfd_mem_dmaunmap_userptr(mem, attachment);
589 		break;
590 	case KFD_MEM_ATT_DMABUF:
591 		kfd_mem_dmaunmap_dmabuf(attachment);
592 		break;
593 	default:
594 		WARN_ON_ONCE(1);
595 	}
596 }
597 
598 static int
599 kfd_mem_attach_userptr(struct amdgpu_device *adev, struct kgd_mem *mem,
600 		       struct amdgpu_bo **bo)
601 {
602 	unsigned long bo_size = mem->bo->tbo.base.size;
603 	struct drm_gem_object *gobj;
604 	int ret;
605 
606 	ret = amdgpu_bo_reserve(mem->bo, false);
607 	if (ret)
608 		return ret;
609 
610 	ret = amdgpu_gem_object_create(adev, bo_size, 1,
611 				       AMDGPU_GEM_DOMAIN_CPU,
612 				       AMDGPU_GEM_CREATE_PREEMPTIBLE,
613 				       ttm_bo_type_sg, mem->bo->tbo.base.resv,
614 				       &gobj);
615 	amdgpu_bo_unreserve(mem->bo);
616 	if (ret)
617 		return ret;
618 
619 	*bo = gem_to_amdgpu_bo(gobj);
620 	(*bo)->parent = amdgpu_bo_ref(mem->bo);
621 
622 	return 0;
623 }
624 
625 static int
626 kfd_mem_attach_dmabuf(struct amdgpu_device *adev, struct kgd_mem *mem,
627 		      struct amdgpu_bo **bo)
628 {
629 	struct drm_gem_object *gobj;
630 	int ret;
631 
632 	if (!mem->dmabuf) {
633 		mem->dmabuf = amdgpu_gem_prime_export(&mem->bo->tbo.base,
634 			mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
635 				DRM_RDWR : 0);
636 		if (IS_ERR(mem->dmabuf)) {
637 			ret = PTR_ERR(mem->dmabuf);
638 			mem->dmabuf = NULL;
639 			return ret;
640 		}
641 	}
642 
643 	gobj = amdgpu_gem_prime_import(adev_to_drm(adev), mem->dmabuf);
644 	if (IS_ERR(gobj))
645 		return PTR_ERR(gobj);
646 
647 	/* Import takes an extra reference on the dmabuf. Drop it now to
648 	 * avoid leaking it. We only need the one reference in
649 	 * kgd_mem->dmabuf.
650 	 */
651 	dma_buf_put(mem->dmabuf);
652 
653 	*bo = gem_to_amdgpu_bo(gobj);
654 	(*bo)->flags |= AMDGPU_GEM_CREATE_PREEMPTIBLE;
655 	(*bo)->parent = amdgpu_bo_ref(mem->bo);
656 
657 	return 0;
658 }
659 
660 /* kfd_mem_attach - Add a BO to a VM
661  *
662  * Everything that needs to bo done only once when a BO is first added
663  * to a VM. It can later be mapped and unmapped many times without
664  * repeating these steps.
665  *
666  * 0. Create BO for DMA mapping, if needed
667  * 1. Allocate and initialize BO VA entry data structure
668  * 2. Add BO to the VM
669  * 3. Determine ASIC-specific PTE flags
670  * 4. Alloc page tables and directories if needed
671  * 4a.  Validate new page tables and directories
672  */
673 static int kfd_mem_attach(struct amdgpu_device *adev, struct kgd_mem *mem,
674 		struct amdgpu_vm *vm, bool is_aql)
675 {
676 	struct amdgpu_device *bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev);
677 	unsigned long bo_size = mem->bo->tbo.base.size;
678 	uint64_t va = mem->va;
679 	struct kfd_mem_attachment *attachment[2] = {NULL, NULL};
680 	struct amdgpu_bo *bo[2] = {NULL, NULL};
681 	int i, ret;
682 
683 	if (!va) {
684 		pr_err("Invalid VA when adding BO to VM\n");
685 		return -EINVAL;
686 	}
687 
688 	for (i = 0; i <= is_aql; i++) {
689 		attachment[i] = kzalloc(sizeof(*attachment[i]), GFP_KERNEL);
690 		if (unlikely(!attachment[i])) {
691 			ret = -ENOMEM;
692 			goto unwind;
693 		}
694 
695 		pr_debug("\t add VA 0x%llx - 0x%llx to vm %p\n", va,
696 			 va + bo_size, vm);
697 
698 		if (adev == bo_adev || (mem->domain == AMDGPU_GEM_DOMAIN_VRAM &&
699 					amdgpu_xgmi_same_hive(adev, bo_adev))) {
700 			/* Mappings on the local GPU and VRAM mappings in the
701 			 * local hive share the original BO
702 			 */
703 			attachment[i]->type = KFD_MEM_ATT_SHARED;
704 			bo[i] = mem->bo;
705 			drm_gem_object_get(&bo[i]->tbo.base);
706 		} else if (i > 0) {
707 			/* Multiple mappings on the same GPU share the BO */
708 			attachment[i]->type = KFD_MEM_ATT_SHARED;
709 			bo[i] = bo[0];
710 			drm_gem_object_get(&bo[i]->tbo.base);
711 		} else if (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm)) {
712 			/* Create an SG BO to DMA-map userptrs on other GPUs */
713 			attachment[i]->type = KFD_MEM_ATT_USERPTR;
714 			ret = kfd_mem_attach_userptr(adev, mem, &bo[i]);
715 			if (ret)
716 				goto unwind;
717 		} else if (mem->domain == AMDGPU_GEM_DOMAIN_GTT &&
718 			   mem->bo->tbo.type != ttm_bo_type_sg) {
719 			/* GTT BOs use DMA-mapping ability of dynamic-attach
720 			 * DMA bufs. TODO: The same should work for VRAM on
721 			 * large-BAR GPUs.
722 			 */
723 			attachment[i]->type = KFD_MEM_ATT_DMABUF;
724 			ret = kfd_mem_attach_dmabuf(adev, mem, &bo[i]);
725 			if (ret)
726 				goto unwind;
727 		} else {
728 			/* FIXME: Need to DMA-map other BO types:
729 			 * large-BAR VRAM, doorbells, MMIO remap
730 			 */
731 			attachment[i]->type = KFD_MEM_ATT_SHARED;
732 			bo[i] = mem->bo;
733 			drm_gem_object_get(&bo[i]->tbo.base);
734 		}
735 
736 		/* Add BO to VM internal data structures */
737 		attachment[i]->bo_va = amdgpu_vm_bo_add(adev, vm, bo[i]);
738 		if (unlikely(!attachment[i]->bo_va)) {
739 			ret = -ENOMEM;
740 			pr_err("Failed to add BO object to VM. ret == %d\n",
741 			       ret);
742 			goto unwind;
743 		}
744 
745 		attachment[i]->va = va;
746 		attachment[i]->pte_flags = get_pte_flags(adev, mem);
747 		attachment[i]->adev = adev;
748 		list_add(&attachment[i]->list, &mem->attachments);
749 
750 		va += bo_size;
751 	}
752 
753 	return 0;
754 
755 unwind:
756 	for (; i >= 0; i--) {
757 		if (!attachment[i])
758 			continue;
759 		if (attachment[i]->bo_va) {
760 			amdgpu_vm_bo_rmv(adev, attachment[i]->bo_va);
761 			list_del(&attachment[i]->list);
762 		}
763 		if (bo[i])
764 			drm_gem_object_put(&bo[i]->tbo.base);
765 		kfree(attachment[i]);
766 	}
767 	return ret;
768 }
769 
770 static void kfd_mem_detach(struct kfd_mem_attachment *attachment)
771 {
772 	struct amdgpu_bo *bo = attachment->bo_va->base.bo;
773 
774 	pr_debug("\t remove VA 0x%llx in entry %p\n",
775 			attachment->va, attachment);
776 	amdgpu_vm_bo_rmv(attachment->adev, attachment->bo_va);
777 	drm_gem_object_put(&bo->tbo.base);
778 	list_del(&attachment->list);
779 	kfree(attachment);
780 }
781 
782 static void add_kgd_mem_to_kfd_bo_list(struct kgd_mem *mem,
783 				struct amdkfd_process_info *process_info,
784 				bool userptr)
785 {
786 	struct ttm_validate_buffer *entry = &mem->validate_list;
787 	struct amdgpu_bo *bo = mem->bo;
788 
789 	INIT_LIST_HEAD(&entry->head);
790 	entry->num_shared = 1;
791 	entry->bo = &bo->tbo;
792 	mutex_lock(&process_info->lock);
793 	if (userptr)
794 		list_add_tail(&entry->head, &process_info->userptr_valid_list);
795 	else
796 		list_add_tail(&entry->head, &process_info->kfd_bo_list);
797 	mutex_unlock(&process_info->lock);
798 }
799 
800 static void remove_kgd_mem_from_kfd_bo_list(struct kgd_mem *mem,
801 		struct amdkfd_process_info *process_info)
802 {
803 	struct ttm_validate_buffer *bo_list_entry;
804 
805 	bo_list_entry = &mem->validate_list;
806 	mutex_lock(&process_info->lock);
807 	list_del(&bo_list_entry->head);
808 	mutex_unlock(&process_info->lock);
809 }
810 
811 /* Initializes user pages. It registers the MMU notifier and validates
812  * the userptr BO in the GTT domain.
813  *
814  * The BO must already be on the userptr_valid_list. Otherwise an
815  * eviction and restore may happen that leaves the new BO unmapped
816  * with the user mode queues running.
817  *
818  * Takes the process_info->lock to protect against concurrent restore
819  * workers.
820  *
821  * Returns 0 for success, negative errno for errors.
822  */
823 static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr)
824 {
825 	struct amdkfd_process_info *process_info = mem->process_info;
826 	struct amdgpu_bo *bo = mem->bo;
827 	struct ttm_operation_ctx ctx = { true, false };
828 	int ret = 0;
829 
830 	mutex_lock(&process_info->lock);
831 
832 	ret = amdgpu_ttm_tt_set_userptr(&bo->tbo, user_addr, 0);
833 	if (ret) {
834 		pr_err("%s: Failed to set userptr: %d\n", __func__, ret);
835 		goto out;
836 	}
837 
838 	ret = amdgpu_mn_register(bo, user_addr);
839 	if (ret) {
840 		pr_err("%s: Failed to register MMU notifier: %d\n",
841 		       __func__, ret);
842 		goto out;
843 	}
844 
845 	ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
846 	if (ret) {
847 		pr_err("%s: Failed to get user pages: %d\n", __func__, ret);
848 		goto unregister_out;
849 	}
850 
851 	ret = amdgpu_bo_reserve(bo, true);
852 	if (ret) {
853 		pr_err("%s: Failed to reserve BO\n", __func__);
854 		goto release_out;
855 	}
856 	amdgpu_bo_placement_from_domain(bo, mem->domain);
857 	ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
858 	if (ret)
859 		pr_err("%s: failed to validate BO\n", __func__);
860 	amdgpu_bo_unreserve(bo);
861 
862 release_out:
863 	amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
864 unregister_out:
865 	if (ret)
866 		amdgpu_mn_unregister(bo);
867 out:
868 	mutex_unlock(&process_info->lock);
869 	return ret;
870 }
871 
872 /* Reserving a BO and its page table BOs must happen atomically to
873  * avoid deadlocks. Some operations update multiple VMs at once. Track
874  * all the reservation info in a context structure. Optionally a sync
875  * object can track VM updates.
876  */
877 struct bo_vm_reservation_context {
878 	struct amdgpu_bo_list_entry kfd_bo; /* BO list entry for the KFD BO */
879 	unsigned int n_vms;		    /* Number of VMs reserved	    */
880 	struct amdgpu_bo_list_entry *vm_pd; /* Array of VM BO list entries  */
881 	struct ww_acquire_ctx ticket;	    /* Reservation ticket	    */
882 	struct list_head list, duplicates;  /* BO lists			    */
883 	struct amdgpu_sync *sync;	    /* Pointer to sync object	    */
884 	bool reserved;			    /* Whether BOs are reserved	    */
885 };
886 
887 enum bo_vm_match {
888 	BO_VM_NOT_MAPPED = 0,	/* Match VMs where a BO is not mapped */
889 	BO_VM_MAPPED,		/* Match VMs where a BO is mapped     */
890 	BO_VM_ALL,		/* Match all VMs a BO was added to    */
891 };
892 
893 /**
894  * reserve_bo_and_vm - reserve a BO and a VM unconditionally.
895  * @mem: KFD BO structure.
896  * @vm: the VM to reserve.
897  * @ctx: the struct that will be used in unreserve_bo_and_vms().
898  */
899 static int reserve_bo_and_vm(struct kgd_mem *mem,
900 			      struct amdgpu_vm *vm,
901 			      struct bo_vm_reservation_context *ctx)
902 {
903 	struct amdgpu_bo *bo = mem->bo;
904 	int ret;
905 
906 	WARN_ON(!vm);
907 
908 	ctx->reserved = false;
909 	ctx->n_vms = 1;
910 	ctx->sync = &mem->sync;
911 
912 	INIT_LIST_HEAD(&ctx->list);
913 	INIT_LIST_HEAD(&ctx->duplicates);
914 
915 	ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd), GFP_KERNEL);
916 	if (!ctx->vm_pd)
917 		return -ENOMEM;
918 
919 	ctx->kfd_bo.priority = 0;
920 	ctx->kfd_bo.tv.bo = &bo->tbo;
921 	ctx->kfd_bo.tv.num_shared = 1;
922 	list_add(&ctx->kfd_bo.tv.head, &ctx->list);
923 
924 	amdgpu_vm_get_pd_bo(vm, &ctx->list, &ctx->vm_pd[0]);
925 
926 	ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
927 				     false, &ctx->duplicates);
928 	if (ret) {
929 		pr_err("Failed to reserve buffers in ttm.\n");
930 		kfree(ctx->vm_pd);
931 		ctx->vm_pd = NULL;
932 		return ret;
933 	}
934 
935 	ctx->reserved = true;
936 	return 0;
937 }
938 
939 /**
940  * reserve_bo_and_cond_vms - reserve a BO and some VMs conditionally
941  * @mem: KFD BO structure.
942  * @vm: the VM to reserve. If NULL, then all VMs associated with the BO
943  * is used. Otherwise, a single VM associated with the BO.
944  * @map_type: the mapping status that will be used to filter the VMs.
945  * @ctx: the struct that will be used in unreserve_bo_and_vms().
946  *
947  * Returns 0 for success, negative for failure.
948  */
949 static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
950 				struct amdgpu_vm *vm, enum bo_vm_match map_type,
951 				struct bo_vm_reservation_context *ctx)
952 {
953 	struct amdgpu_bo *bo = mem->bo;
954 	struct kfd_mem_attachment *entry;
955 	unsigned int i;
956 	int ret;
957 
958 	ctx->reserved = false;
959 	ctx->n_vms = 0;
960 	ctx->vm_pd = NULL;
961 	ctx->sync = &mem->sync;
962 
963 	INIT_LIST_HEAD(&ctx->list);
964 	INIT_LIST_HEAD(&ctx->duplicates);
965 
966 	list_for_each_entry(entry, &mem->attachments, list) {
967 		if ((vm && vm != entry->bo_va->base.vm) ||
968 			(entry->is_mapped != map_type
969 			&& map_type != BO_VM_ALL))
970 			continue;
971 
972 		ctx->n_vms++;
973 	}
974 
975 	if (ctx->n_vms != 0) {
976 		ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd),
977 				     GFP_KERNEL);
978 		if (!ctx->vm_pd)
979 			return -ENOMEM;
980 	}
981 
982 	ctx->kfd_bo.priority = 0;
983 	ctx->kfd_bo.tv.bo = &bo->tbo;
984 	ctx->kfd_bo.tv.num_shared = 1;
985 	list_add(&ctx->kfd_bo.tv.head, &ctx->list);
986 
987 	i = 0;
988 	list_for_each_entry(entry, &mem->attachments, list) {
989 		if ((vm && vm != entry->bo_va->base.vm) ||
990 			(entry->is_mapped != map_type
991 			&& map_type != BO_VM_ALL))
992 			continue;
993 
994 		amdgpu_vm_get_pd_bo(entry->bo_va->base.vm, &ctx->list,
995 				&ctx->vm_pd[i]);
996 		i++;
997 	}
998 
999 	ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
1000 				     false, &ctx->duplicates);
1001 	if (ret) {
1002 		pr_err("Failed to reserve buffers in ttm.\n");
1003 		kfree(ctx->vm_pd);
1004 		ctx->vm_pd = NULL;
1005 		return ret;
1006 	}
1007 
1008 	ctx->reserved = true;
1009 	return 0;
1010 }
1011 
1012 /**
1013  * unreserve_bo_and_vms - Unreserve BO and VMs from a reservation context
1014  * @ctx: Reservation context to unreserve
1015  * @wait: Optionally wait for a sync object representing pending VM updates
1016  * @intr: Whether the wait is interruptible
1017  *
1018  * Also frees any resources allocated in
1019  * reserve_bo_and_(cond_)vm(s). Returns the status from
1020  * amdgpu_sync_wait.
1021  */
1022 static int unreserve_bo_and_vms(struct bo_vm_reservation_context *ctx,
1023 				 bool wait, bool intr)
1024 {
1025 	int ret = 0;
1026 
1027 	if (wait)
1028 		ret = amdgpu_sync_wait(ctx->sync, intr);
1029 
1030 	if (ctx->reserved)
1031 		ttm_eu_backoff_reservation(&ctx->ticket, &ctx->list);
1032 	kfree(ctx->vm_pd);
1033 
1034 	ctx->sync = NULL;
1035 
1036 	ctx->reserved = false;
1037 	ctx->vm_pd = NULL;
1038 
1039 	return ret;
1040 }
1041 
1042 static void unmap_bo_from_gpuvm(struct kgd_mem *mem,
1043 				struct kfd_mem_attachment *entry,
1044 				struct amdgpu_sync *sync)
1045 {
1046 	struct amdgpu_bo_va *bo_va = entry->bo_va;
1047 	struct amdgpu_device *adev = entry->adev;
1048 	struct amdgpu_vm *vm = bo_va->base.vm;
1049 
1050 	amdgpu_vm_bo_unmap(adev, bo_va, entry->va);
1051 
1052 	amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update);
1053 
1054 	amdgpu_sync_fence(sync, bo_va->last_pt_update);
1055 
1056 	kfd_mem_dmaunmap_attachment(mem, entry);
1057 }
1058 
1059 static int update_gpuvm_pte(struct kgd_mem *mem,
1060 			    struct kfd_mem_attachment *entry,
1061 			    struct amdgpu_sync *sync,
1062 			    bool *table_freed)
1063 {
1064 	struct amdgpu_bo_va *bo_va = entry->bo_va;
1065 	struct amdgpu_device *adev = entry->adev;
1066 	int ret;
1067 
1068 	ret = kfd_mem_dmamap_attachment(mem, entry);
1069 	if (ret)
1070 		return ret;
1071 
1072 	/* Update the page tables  */
1073 	ret = amdgpu_vm_bo_update(adev, bo_va, false, table_freed);
1074 	if (ret) {
1075 		pr_err("amdgpu_vm_bo_update failed\n");
1076 		return ret;
1077 	}
1078 
1079 	return amdgpu_sync_fence(sync, bo_va->last_pt_update);
1080 }
1081 
1082 static int map_bo_to_gpuvm(struct kgd_mem *mem,
1083 			   struct kfd_mem_attachment *entry,
1084 			   struct amdgpu_sync *sync,
1085 			   bool no_update_pte,
1086 			   bool *table_freed)
1087 {
1088 	int ret;
1089 
1090 	/* Set virtual address for the allocation */
1091 	ret = amdgpu_vm_bo_map(entry->adev, entry->bo_va, entry->va, 0,
1092 			       amdgpu_bo_size(entry->bo_va->base.bo),
1093 			       entry->pte_flags);
1094 	if (ret) {
1095 		pr_err("Failed to map VA 0x%llx in vm. ret %d\n",
1096 				entry->va, ret);
1097 		return ret;
1098 	}
1099 
1100 	if (no_update_pte)
1101 		return 0;
1102 
1103 	ret = update_gpuvm_pte(mem, entry, sync, table_freed);
1104 	if (ret) {
1105 		pr_err("update_gpuvm_pte() failed\n");
1106 		goto update_gpuvm_pte_failed;
1107 	}
1108 
1109 	return 0;
1110 
1111 update_gpuvm_pte_failed:
1112 	unmap_bo_from_gpuvm(mem, entry, sync);
1113 	return ret;
1114 }
1115 
1116 static struct sg_table *create_doorbell_sg(uint64_t addr, uint32_t size)
1117 {
1118 	struct sg_table *sg = kmalloc(sizeof(*sg), GFP_KERNEL);
1119 
1120 	if (!sg)
1121 		return NULL;
1122 	if (sg_alloc_table(sg, 1, GFP_KERNEL)) {
1123 		kfree(sg);
1124 		return NULL;
1125 	}
1126 	sg->sgl->dma_address = addr;
1127 	sg->sgl->length = size;
1128 #ifdef CONFIG_NEED_SG_DMA_LENGTH
1129 	sg->sgl->dma_length = size;
1130 #endif
1131 	return sg;
1132 }
1133 
1134 static int process_validate_vms(struct amdkfd_process_info *process_info)
1135 {
1136 	struct amdgpu_vm *peer_vm;
1137 	int ret;
1138 
1139 	list_for_each_entry(peer_vm, &process_info->vm_list_head,
1140 			    vm_list_node) {
1141 		ret = vm_validate_pt_pd_bos(peer_vm);
1142 		if (ret)
1143 			return ret;
1144 	}
1145 
1146 	return 0;
1147 }
1148 
1149 static int process_sync_pds_resv(struct amdkfd_process_info *process_info,
1150 				 struct amdgpu_sync *sync)
1151 {
1152 	struct amdgpu_vm *peer_vm;
1153 	int ret;
1154 
1155 	list_for_each_entry(peer_vm, &process_info->vm_list_head,
1156 			    vm_list_node) {
1157 		struct amdgpu_bo *pd = peer_vm->root.bo;
1158 
1159 		ret = amdgpu_sync_resv(NULL, sync, pd->tbo.base.resv,
1160 				       AMDGPU_SYNC_NE_OWNER,
1161 				       AMDGPU_FENCE_OWNER_KFD);
1162 		if (ret)
1163 			return ret;
1164 	}
1165 
1166 	return 0;
1167 }
1168 
1169 static int process_update_pds(struct amdkfd_process_info *process_info,
1170 			      struct amdgpu_sync *sync)
1171 {
1172 	struct amdgpu_vm *peer_vm;
1173 	int ret;
1174 
1175 	list_for_each_entry(peer_vm, &process_info->vm_list_head,
1176 			    vm_list_node) {
1177 		ret = vm_update_pds(peer_vm, sync);
1178 		if (ret)
1179 			return ret;
1180 	}
1181 
1182 	return 0;
1183 }
1184 
1185 static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
1186 		       struct dma_fence **ef)
1187 {
1188 	struct amdkfd_process_info *info = NULL;
1189 	int ret;
1190 
1191 	if (!*process_info) {
1192 		info = kzalloc(sizeof(*info), GFP_KERNEL);
1193 		if (!info)
1194 			return -ENOMEM;
1195 
1196 		rw_init(&info->lock, "aginfo");
1197 		INIT_LIST_HEAD(&info->vm_list_head);
1198 		INIT_LIST_HEAD(&info->kfd_bo_list);
1199 		INIT_LIST_HEAD(&info->userptr_valid_list);
1200 		INIT_LIST_HEAD(&info->userptr_inval_list);
1201 
1202 		info->eviction_fence =
1203 			amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1),
1204 						   current->mm,
1205 						   NULL);
1206 		if (!info->eviction_fence) {
1207 			pr_err("Failed to create eviction fence\n");
1208 			ret = -ENOMEM;
1209 			goto create_evict_fence_fail;
1210 		}
1211 
1212 		info->pid = get_task_pid(current->group_leader, PIDTYPE_PID);
1213 		atomic_set(&info->evicted_bos, 0);
1214 		INIT_DELAYED_WORK(&info->restore_userptr_work,
1215 				  amdgpu_amdkfd_restore_userptr_worker);
1216 
1217 		*process_info = info;
1218 		*ef = dma_fence_get(&info->eviction_fence->base);
1219 	}
1220 
1221 	vm->process_info = *process_info;
1222 
1223 	/* Validate page directory and attach eviction fence */
1224 	ret = amdgpu_bo_reserve(vm->root.bo, true);
1225 	if (ret)
1226 		goto reserve_pd_fail;
1227 	ret = vm_validate_pt_pd_bos(vm);
1228 	if (ret) {
1229 		pr_err("validate_pt_pd_bos() failed\n");
1230 		goto validate_pd_fail;
1231 	}
1232 	ret = amdgpu_bo_sync_wait(vm->root.bo,
1233 				  AMDGPU_FENCE_OWNER_KFD, false);
1234 	if (ret)
1235 		goto wait_pd_fail;
1236 	ret = dma_resv_reserve_shared(vm->root.bo->tbo.base.resv, 1);
1237 	if (ret)
1238 		goto reserve_shared_fail;
1239 	amdgpu_bo_fence(vm->root.bo,
1240 			&vm->process_info->eviction_fence->base, true);
1241 	amdgpu_bo_unreserve(vm->root.bo);
1242 
1243 	/* Update process info */
1244 	mutex_lock(&vm->process_info->lock);
1245 	list_add_tail(&vm->vm_list_node,
1246 			&(vm->process_info->vm_list_head));
1247 	vm->process_info->n_vms++;
1248 	mutex_unlock(&vm->process_info->lock);
1249 
1250 	return 0;
1251 
1252 reserve_shared_fail:
1253 wait_pd_fail:
1254 validate_pd_fail:
1255 	amdgpu_bo_unreserve(vm->root.bo);
1256 reserve_pd_fail:
1257 	vm->process_info = NULL;
1258 	if (info) {
1259 		/* Two fence references: one in info and one in *ef */
1260 		dma_fence_put(&info->eviction_fence->base);
1261 		dma_fence_put(*ef);
1262 		*ef = NULL;
1263 		*process_info = NULL;
1264 		put_pid(info->pid);
1265 create_evict_fence_fail:
1266 		mutex_destroy(&info->lock);
1267 		kfree(info);
1268 	}
1269 	return ret;
1270 }
1271 
1272 int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd,
1273 					   struct file *filp, u32 pasid,
1274 					   void **process_info,
1275 					   struct dma_fence **ef)
1276 {
1277 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
1278 	struct amdgpu_fpriv *drv_priv;
1279 	struct amdgpu_vm *avm;
1280 	int ret;
1281 
1282 	ret = amdgpu_file_to_fpriv(filp, &drv_priv);
1283 	if (ret)
1284 		return ret;
1285 	avm = &drv_priv->vm;
1286 
1287 	/* Already a compute VM? */
1288 	if (avm->process_info)
1289 		return -EINVAL;
1290 
1291 	/* Free the original amdgpu allocated pasid,
1292 	 * will be replaced with kfd allocated pasid.
1293 	 */
1294 	if (avm->pasid) {
1295 		amdgpu_pasid_free(avm->pasid);
1296 		amdgpu_vm_set_pasid(adev, avm, 0);
1297 	}
1298 
1299 	/* Convert VM into a compute VM */
1300 	ret = amdgpu_vm_make_compute(adev, avm);
1301 	if (ret)
1302 		return ret;
1303 
1304 	ret = amdgpu_vm_set_pasid(adev, avm, pasid);
1305 	if (ret)
1306 		return ret;
1307 	/* Initialize KFD part of the VM and process info */
1308 	ret = init_kfd_vm(avm, process_info, ef);
1309 	if (ret)
1310 		return ret;
1311 
1312 	amdgpu_vm_set_task_info(avm);
1313 
1314 	return 0;
1315 }
1316 
1317 void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
1318 				    struct amdgpu_vm *vm)
1319 {
1320 	struct amdkfd_process_info *process_info = vm->process_info;
1321 	struct amdgpu_bo *pd = vm->root.bo;
1322 
1323 	if (!process_info)
1324 		return;
1325 
1326 	/* Release eviction fence from PD */
1327 	amdgpu_bo_reserve(pd, false);
1328 	amdgpu_bo_fence(pd, NULL, false);
1329 	amdgpu_bo_unreserve(pd);
1330 
1331 	/* Update process info */
1332 	mutex_lock(&process_info->lock);
1333 	process_info->n_vms--;
1334 	list_del(&vm->vm_list_node);
1335 	mutex_unlock(&process_info->lock);
1336 
1337 	vm->process_info = NULL;
1338 
1339 	/* Release per-process resources when last compute VM is destroyed */
1340 	if (!process_info->n_vms) {
1341 		WARN_ON(!list_empty(&process_info->kfd_bo_list));
1342 		WARN_ON(!list_empty(&process_info->userptr_valid_list));
1343 		WARN_ON(!list_empty(&process_info->userptr_inval_list));
1344 
1345 		dma_fence_put(&process_info->eviction_fence->base);
1346 		cancel_delayed_work_sync(&process_info->restore_userptr_work);
1347 		put_pid(process_info->pid);
1348 		mutex_destroy(&process_info->lock);
1349 		kfree(process_info);
1350 	}
1351 }
1352 
1353 void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *drm_priv)
1354 {
1355 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
1356 	struct amdgpu_vm *avm;
1357 
1358 	if (WARN_ON(!kgd || !drm_priv))
1359 		return;
1360 
1361 	avm = drm_priv_to_vm(drm_priv);
1362 
1363 	pr_debug("Releasing process vm %p\n", avm);
1364 
1365 	/* The original pasid of amdgpu vm has already been
1366 	 * released during making a amdgpu vm to a compute vm
1367 	 * The current pasid is managed by kfd and will be
1368 	 * released on kfd process destroy. Set amdgpu pasid
1369 	 * to 0 to avoid duplicate release.
1370 	 */
1371 	amdgpu_vm_release_compute(adev, avm);
1372 }
1373 
1374 uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *drm_priv)
1375 {
1376 	struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
1377 	struct amdgpu_bo *pd = avm->root.bo;
1378 	struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
1379 
1380 	if (adev->asic_type < CHIP_VEGA10)
1381 		return avm->pd_phys_addr >> AMDGPU_GPU_PAGE_SHIFT;
1382 	return avm->pd_phys_addr;
1383 }
1384 
1385 int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
1386 		struct kgd_dev *kgd, uint64_t va, uint64_t size,
1387 		void *drm_priv, struct kgd_mem **mem,
1388 		uint64_t *offset, uint32_t flags)
1389 {
1390 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
1391 	struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
1392 	enum ttm_bo_type bo_type = ttm_bo_type_device;
1393 	struct sg_table *sg = NULL;
1394 	uint64_t user_addr = 0;
1395 	struct amdgpu_bo *bo;
1396 	struct drm_gem_object *gobj = NULL;
1397 	u32 domain, alloc_domain;
1398 	u64 alloc_flags;
1399 	int ret;
1400 
1401 	/*
1402 	 * Check on which domain to allocate BO
1403 	 */
1404 	if (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
1405 		domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM;
1406 		alloc_flags = AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE;
1407 		alloc_flags |= (flags & KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC) ?
1408 			AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED : 0;
1409 	} else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_GTT) {
1410 		domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT;
1411 		alloc_flags = 0;
1412 	} else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
1413 		domain = AMDGPU_GEM_DOMAIN_GTT;
1414 		alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
1415 		alloc_flags = AMDGPU_GEM_CREATE_PREEMPTIBLE;
1416 		if (!offset || !*offset)
1417 			return -EINVAL;
1418 		user_addr = untagged_addr(*offset);
1419 	} else if (flags & (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
1420 			KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) {
1421 		domain = AMDGPU_GEM_DOMAIN_GTT;
1422 		alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
1423 		bo_type = ttm_bo_type_sg;
1424 		alloc_flags = 0;
1425 		if (size > UINT_MAX)
1426 			return -EINVAL;
1427 		sg = create_doorbell_sg(*offset, size);
1428 		if (!sg)
1429 			return -ENOMEM;
1430 	} else {
1431 		return -EINVAL;
1432 	}
1433 
1434 	*mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
1435 	if (!*mem) {
1436 		ret = -ENOMEM;
1437 		goto err;
1438 	}
1439 	INIT_LIST_HEAD(&(*mem)->attachments);
1440 	rw_init(&(*mem)->lock, "gpuvma");
1441 	(*mem)->aql_queue = !!(flags & KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM);
1442 
1443 	/* Workaround for AQL queue wraparound bug. Map the same
1444 	 * memory twice. That means we only actually allocate half
1445 	 * the memory.
1446 	 */
1447 	if ((*mem)->aql_queue)
1448 		size = size >> 1;
1449 
1450 	(*mem)->alloc_flags = flags;
1451 
1452 	amdgpu_sync_create(&(*mem)->sync);
1453 
1454 	ret = amdgpu_amdkfd_reserve_mem_limit(adev, size, alloc_domain, !!sg);
1455 	if (ret) {
1456 		pr_debug("Insufficient memory\n");
1457 		goto err_reserve_limit;
1458 	}
1459 
1460 	pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n",
1461 			va, size, domain_string(alloc_domain));
1462 
1463 	ret = amdgpu_gem_object_create(adev, size, 1, alloc_domain, alloc_flags,
1464 				       bo_type, NULL, &gobj);
1465 	if (ret) {
1466 		pr_debug("Failed to create BO on domain %s. ret %d\n",
1467 			 domain_string(alloc_domain), ret);
1468 		goto err_bo_create;
1469 	}
1470 	ret = drm_vma_node_allow(&gobj->vma_node, drm_priv);
1471 	if (ret) {
1472 		pr_debug("Failed to allow vma node access. ret %d\n", ret);
1473 		goto err_node_allow;
1474 	}
1475 	bo = gem_to_amdgpu_bo(gobj);
1476 	if (bo_type == ttm_bo_type_sg) {
1477 		bo->tbo.sg = sg;
1478 		bo->tbo.ttm->sg = sg;
1479 	}
1480 	bo->kfd_bo = *mem;
1481 	(*mem)->bo = bo;
1482 	if (user_addr)
1483 		bo->flags |= AMDGPU_AMDKFD_CREATE_USERPTR_BO;
1484 
1485 	(*mem)->va = va;
1486 	(*mem)->domain = domain;
1487 	(*mem)->mapped_to_gpu_memory = 0;
1488 	(*mem)->process_info = avm->process_info;
1489 	add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, user_addr);
1490 
1491 	if (user_addr) {
1492 		ret = init_user_pages(*mem, user_addr);
1493 		if (ret)
1494 			goto allocate_init_user_pages_failed;
1495 	}
1496 
1497 	if (offset)
1498 		*offset = amdgpu_bo_mmap_offset(bo);
1499 
1500 	return 0;
1501 
1502 allocate_init_user_pages_failed:
1503 	remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info);
1504 	drm_vma_node_revoke(&gobj->vma_node, drm_priv);
1505 err_node_allow:
1506 	/* Don't unreserve system mem limit twice */
1507 	goto err_reserve_limit;
1508 err_bo_create:
1509 	unreserve_mem_limit(adev, size, alloc_domain, !!sg);
1510 err_reserve_limit:
1511 	mutex_destroy(&(*mem)->lock);
1512 	if (gobj)
1513 		drm_gem_object_put(gobj);
1514 	else
1515 		kfree(*mem);
1516 err:
1517 	if (sg) {
1518 		sg_free_table(sg);
1519 		kfree(sg);
1520 	}
1521 	return ret;
1522 }
1523 
1524 int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
1525 		struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv,
1526 		uint64_t *size)
1527 {
1528 	struct amdkfd_process_info *process_info = mem->process_info;
1529 	unsigned long bo_size = mem->bo->tbo.base.size;
1530 	struct kfd_mem_attachment *entry, *tmp;
1531 	struct bo_vm_reservation_context ctx;
1532 	struct ttm_validate_buffer *bo_list_entry;
1533 	unsigned int mapped_to_gpu_memory;
1534 	int ret;
1535 	bool is_imported = false;
1536 
1537 	mutex_lock(&mem->lock);
1538 	mapped_to_gpu_memory = mem->mapped_to_gpu_memory;
1539 	is_imported = mem->is_imported;
1540 	mutex_unlock(&mem->lock);
1541 	/* lock is not needed after this, since mem is unused and will
1542 	 * be freed anyway
1543 	 */
1544 
1545 	if (mapped_to_gpu_memory > 0) {
1546 		pr_debug("BO VA 0x%llx size 0x%lx is still mapped.\n",
1547 				mem->va, bo_size);
1548 		return -EBUSY;
1549 	}
1550 
1551 	/* Make sure restore workers don't access the BO any more */
1552 	bo_list_entry = &mem->validate_list;
1553 	mutex_lock(&process_info->lock);
1554 	list_del(&bo_list_entry->head);
1555 	mutex_unlock(&process_info->lock);
1556 
1557 	/* No more MMU notifiers */
1558 	amdgpu_mn_unregister(mem->bo);
1559 
1560 	ret = reserve_bo_and_cond_vms(mem, NULL, BO_VM_ALL, &ctx);
1561 	if (unlikely(ret))
1562 		return ret;
1563 
1564 	/* The eviction fence should be removed by the last unmap.
1565 	 * TODO: Log an error condition if the bo still has the eviction fence
1566 	 * attached
1567 	 */
1568 	amdgpu_amdkfd_remove_eviction_fence(mem->bo,
1569 					process_info->eviction_fence);
1570 	pr_debug("Release VA 0x%llx - 0x%llx\n", mem->va,
1571 		mem->va + bo_size * (1 + mem->aql_queue));
1572 
1573 	ret = unreserve_bo_and_vms(&ctx, false, false);
1574 
1575 	/* Remove from VM internal data structures */
1576 	list_for_each_entry_safe(entry, tmp, &mem->attachments, list)
1577 		kfd_mem_detach(entry);
1578 
1579 	/* Free the sync object */
1580 	amdgpu_sync_free(&mem->sync);
1581 
1582 	/* If the SG is not NULL, it's one we created for a doorbell or mmio
1583 	 * remap BO. We need to free it.
1584 	 */
1585 	if (mem->bo->tbo.sg) {
1586 		sg_free_table(mem->bo->tbo.sg);
1587 		kfree(mem->bo->tbo.sg);
1588 	}
1589 
1590 	/* Update the size of the BO being freed if it was allocated from
1591 	 * VRAM and is not imported.
1592 	 */
1593 	if (size) {
1594 		if ((mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_VRAM) &&
1595 		    (!is_imported))
1596 			*size = bo_size;
1597 		else
1598 			*size = 0;
1599 	}
1600 
1601 	/* Free the BO*/
1602 	drm_vma_node_revoke(&mem->bo->tbo.base.vma_node, drm_priv);
1603 	if (mem->dmabuf)
1604 		dma_buf_put(mem->dmabuf);
1605 	drm_gem_object_put(&mem->bo->tbo.base);
1606 	mutex_destroy(&mem->lock);
1607 	kfree(mem);
1608 
1609 	return ret;
1610 }
1611 
1612 int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
1613 		struct kgd_dev *kgd, struct kgd_mem *mem,
1614 		void *drm_priv, bool *table_freed)
1615 {
1616 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
1617 	struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
1618 	int ret;
1619 	struct amdgpu_bo *bo;
1620 	uint32_t domain;
1621 	struct kfd_mem_attachment *entry;
1622 	struct bo_vm_reservation_context ctx;
1623 	unsigned long bo_size;
1624 	bool is_invalid_userptr = false;
1625 
1626 	bo = mem->bo;
1627 	if (!bo) {
1628 		pr_err("Invalid BO when mapping memory to GPU\n");
1629 		return -EINVAL;
1630 	}
1631 
1632 	/* Make sure restore is not running concurrently. Since we
1633 	 * don't map invalid userptr BOs, we rely on the next restore
1634 	 * worker to do the mapping
1635 	 */
1636 	mutex_lock(&mem->process_info->lock);
1637 
1638 	/* Lock mmap-sem. If we find an invalid userptr BO, we can be
1639 	 * sure that the MMU notifier is no longer running
1640 	 * concurrently and the queues are actually stopped
1641 	 */
1642 	if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1643 		mmap_write_lock(current->mm);
1644 		is_invalid_userptr = atomic_read(&mem->invalid);
1645 		mmap_write_unlock(current->mm);
1646 	}
1647 
1648 	mutex_lock(&mem->lock);
1649 
1650 	domain = mem->domain;
1651 	bo_size = bo->tbo.base.size;
1652 
1653 	pr_debug("Map VA 0x%llx - 0x%llx to vm %p domain %s\n",
1654 			mem->va,
1655 			mem->va + bo_size * (1 + mem->aql_queue),
1656 			avm, domain_string(domain));
1657 
1658 	if (!kfd_mem_is_attached(avm, mem)) {
1659 		ret = kfd_mem_attach(adev, mem, avm, mem->aql_queue);
1660 		if (ret)
1661 			goto out;
1662 	}
1663 
1664 	ret = reserve_bo_and_vm(mem, avm, &ctx);
1665 	if (unlikely(ret))
1666 		goto out;
1667 
1668 	/* Userptr can be marked as "not invalid", but not actually be
1669 	 * validated yet (still in the system domain). In that case
1670 	 * the queues are still stopped and we can leave mapping for
1671 	 * the next restore worker
1672 	 */
1673 	if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) &&
1674 	    bo->tbo.resource->mem_type == TTM_PL_SYSTEM)
1675 		is_invalid_userptr = true;
1676 
1677 	ret = vm_validate_pt_pd_bos(avm);
1678 	if (unlikely(ret))
1679 		goto out_unreserve;
1680 
1681 	if (mem->mapped_to_gpu_memory == 0 &&
1682 	    !amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1683 		/* Validate BO only once. The eviction fence gets added to BO
1684 		 * the first time it is mapped. Validate will wait for all
1685 		 * background evictions to complete.
1686 		 */
1687 		ret = amdgpu_amdkfd_bo_validate(bo, domain, true);
1688 		if (ret) {
1689 			pr_debug("Validate failed\n");
1690 			goto out_unreserve;
1691 		}
1692 	}
1693 
1694 	list_for_each_entry(entry, &mem->attachments, list) {
1695 		if (entry->bo_va->base.vm != avm || entry->is_mapped)
1696 			continue;
1697 
1698 		pr_debug("\t map VA 0x%llx - 0x%llx in entry %p\n",
1699 			 entry->va, entry->va + bo_size, entry);
1700 
1701 		ret = map_bo_to_gpuvm(mem, entry, ctx.sync,
1702 				      is_invalid_userptr, table_freed);
1703 		if (ret) {
1704 			pr_err("Failed to map bo to gpuvm\n");
1705 			goto out_unreserve;
1706 		}
1707 
1708 		ret = vm_update_pds(avm, ctx.sync);
1709 		if (ret) {
1710 			pr_err("Failed to update page directories\n");
1711 			goto out_unreserve;
1712 		}
1713 
1714 		entry->is_mapped = true;
1715 		mem->mapped_to_gpu_memory++;
1716 		pr_debug("\t INC mapping count %d\n",
1717 			 mem->mapped_to_gpu_memory);
1718 	}
1719 
1720 	if (!amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && !bo->tbo.pin_count)
1721 		amdgpu_bo_fence(bo,
1722 				&avm->process_info->eviction_fence->base,
1723 				true);
1724 	ret = unreserve_bo_and_vms(&ctx, false, false);
1725 
1726 	/* Only apply no TLB flush on Aldebaran to
1727 	 * workaround regressions on other Asics.
1728 	 */
1729 	if (table_freed && (adev->asic_type != CHIP_ALDEBARAN))
1730 		*table_freed = true;
1731 
1732 	goto out;
1733 
1734 out_unreserve:
1735 	unreserve_bo_and_vms(&ctx, false, false);
1736 out:
1737 	mutex_unlock(&mem->process_info->lock);
1738 	mutex_unlock(&mem->lock);
1739 	return ret;
1740 }
1741 
1742 int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
1743 		struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv)
1744 {
1745 	struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
1746 	struct amdkfd_process_info *process_info = avm->process_info;
1747 	unsigned long bo_size = mem->bo->tbo.base.size;
1748 	struct kfd_mem_attachment *entry;
1749 	struct bo_vm_reservation_context ctx;
1750 	int ret;
1751 
1752 	mutex_lock(&mem->lock);
1753 
1754 	ret = reserve_bo_and_cond_vms(mem, avm, BO_VM_MAPPED, &ctx);
1755 	if (unlikely(ret))
1756 		goto out;
1757 	/* If no VMs were reserved, it means the BO wasn't actually mapped */
1758 	if (ctx.n_vms == 0) {
1759 		ret = -EINVAL;
1760 		goto unreserve_out;
1761 	}
1762 
1763 	ret = vm_validate_pt_pd_bos(avm);
1764 	if (unlikely(ret))
1765 		goto unreserve_out;
1766 
1767 	pr_debug("Unmap VA 0x%llx - 0x%llx from vm %p\n",
1768 		mem->va,
1769 		mem->va + bo_size * (1 + mem->aql_queue),
1770 		avm);
1771 
1772 	list_for_each_entry(entry, &mem->attachments, list) {
1773 		if (entry->bo_va->base.vm != avm || !entry->is_mapped)
1774 			continue;
1775 
1776 		pr_debug("\t unmap VA 0x%llx - 0x%llx from entry %p\n",
1777 			 entry->va, entry->va + bo_size, entry);
1778 
1779 		unmap_bo_from_gpuvm(mem, entry, ctx.sync);
1780 		entry->is_mapped = false;
1781 
1782 		mem->mapped_to_gpu_memory--;
1783 		pr_debug("\t DEC mapping count %d\n",
1784 			 mem->mapped_to_gpu_memory);
1785 	}
1786 
1787 	/* If BO is unmapped from all VMs, unfence it. It can be evicted if
1788 	 * required.
1789 	 */
1790 	if (mem->mapped_to_gpu_memory == 0 &&
1791 	    !amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) &&
1792 	    !mem->bo->tbo.pin_count)
1793 		amdgpu_amdkfd_remove_eviction_fence(mem->bo,
1794 						process_info->eviction_fence);
1795 
1796 unreserve_out:
1797 	unreserve_bo_and_vms(&ctx, false, false);
1798 out:
1799 	mutex_unlock(&mem->lock);
1800 	return ret;
1801 }
1802 
1803 int amdgpu_amdkfd_gpuvm_sync_memory(
1804 		struct kgd_dev *kgd, struct kgd_mem *mem, bool intr)
1805 {
1806 	struct amdgpu_sync sync;
1807 	int ret;
1808 
1809 	amdgpu_sync_create(&sync);
1810 
1811 	mutex_lock(&mem->lock);
1812 	amdgpu_sync_clone(&mem->sync, &sync);
1813 	mutex_unlock(&mem->lock);
1814 
1815 	ret = amdgpu_sync_wait(&sync, intr);
1816 	amdgpu_sync_free(&sync);
1817 	return ret;
1818 }
1819 
1820 int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev *kgd,
1821 		struct kgd_mem *mem, void **kptr, uint64_t *size)
1822 {
1823 	int ret;
1824 	struct amdgpu_bo *bo = mem->bo;
1825 
1826 	if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1827 		pr_err("userptr can't be mapped to kernel\n");
1828 		return -EINVAL;
1829 	}
1830 
1831 	mutex_lock(&mem->process_info->lock);
1832 
1833 	ret = amdgpu_bo_reserve(bo, true);
1834 	if (ret) {
1835 		pr_err("Failed to reserve bo. ret %d\n", ret);
1836 		goto bo_reserve_failed;
1837 	}
1838 
1839 	ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
1840 	if (ret) {
1841 		pr_err("Failed to pin bo. ret %d\n", ret);
1842 		goto pin_failed;
1843 	}
1844 
1845 	ret = amdgpu_bo_kmap(bo, kptr);
1846 	if (ret) {
1847 		pr_err("Failed to map bo to kernel. ret %d\n", ret);
1848 		goto kmap_failed;
1849 	}
1850 
1851 	amdgpu_amdkfd_remove_eviction_fence(
1852 		bo, mem->process_info->eviction_fence);
1853 
1854 	if (size)
1855 		*size = amdgpu_bo_size(bo);
1856 
1857 	amdgpu_bo_unreserve(bo);
1858 
1859 	mutex_unlock(&mem->process_info->lock);
1860 	return 0;
1861 
1862 kmap_failed:
1863 	amdgpu_bo_unpin(bo);
1864 pin_failed:
1865 	amdgpu_bo_unreserve(bo);
1866 bo_reserve_failed:
1867 	mutex_unlock(&mem->process_info->lock);
1868 
1869 	return ret;
1870 }
1871 
1872 int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd,
1873 					      struct kfd_vm_fault_info *mem)
1874 {
1875 	struct amdgpu_device *adev;
1876 
1877 	adev = (struct amdgpu_device *)kgd;
1878 	if (atomic_read(&adev->gmc.vm_fault_info_updated) == 1) {
1879 		*mem = *adev->gmc.vm_fault_info;
1880 		mb();
1881 		atomic_set(&adev->gmc.vm_fault_info_updated, 0);
1882 	}
1883 	return 0;
1884 }
1885 
1886 int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
1887 				      struct dma_buf *dma_buf,
1888 				      uint64_t va, void *drm_priv,
1889 				      struct kgd_mem **mem, uint64_t *size,
1890 				      uint64_t *mmap_offset)
1891 {
1892 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
1893 	struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
1894 	struct drm_gem_object *obj;
1895 	struct amdgpu_bo *bo;
1896 	int ret;
1897 
1898 	if (dma_buf->ops != &amdgpu_dmabuf_ops)
1899 		/* Can't handle non-graphics buffers */
1900 		return -EINVAL;
1901 
1902 	obj = dma_buf->priv;
1903 	if (drm_to_adev(obj->dev) != adev)
1904 		/* Can't handle buffers from other devices */
1905 		return -EINVAL;
1906 
1907 	bo = gem_to_amdgpu_bo(obj);
1908 	if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM |
1909 				    AMDGPU_GEM_DOMAIN_GTT)))
1910 		/* Only VRAM and GTT BOs are supported */
1911 		return -EINVAL;
1912 
1913 	*mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
1914 	if (!*mem)
1915 		return -ENOMEM;
1916 
1917 	ret = drm_vma_node_allow(&obj->vma_node, drm_priv);
1918 	if (ret) {
1919 		kfree(mem);
1920 		return ret;
1921 	}
1922 
1923 	if (size)
1924 		*size = amdgpu_bo_size(bo);
1925 
1926 	if (mmap_offset)
1927 		*mmap_offset = amdgpu_bo_mmap_offset(bo);
1928 
1929 	INIT_LIST_HEAD(&(*mem)->attachments);
1930 	rw_init(&(*mem)->lock, "gpuvmi");
1931 
1932 	(*mem)->alloc_flags =
1933 		((bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
1934 		KFD_IOC_ALLOC_MEM_FLAGS_VRAM : KFD_IOC_ALLOC_MEM_FLAGS_GTT)
1935 		| KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE
1936 		| KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE;
1937 
1938 	drm_gem_object_get(&bo->tbo.base);
1939 	(*mem)->bo = bo;
1940 	(*mem)->va = va;
1941 	(*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
1942 		AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT;
1943 	(*mem)->mapped_to_gpu_memory = 0;
1944 	(*mem)->process_info = avm->process_info;
1945 	add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, false);
1946 	amdgpu_sync_create(&(*mem)->sync);
1947 	(*mem)->is_imported = true;
1948 
1949 	return 0;
1950 }
1951 
1952 /* Evict a userptr BO by stopping the queues if necessary
1953  *
1954  * Runs in MMU notifier, may be in RECLAIM_FS context. This means it
1955  * cannot do any memory allocations, and cannot take any locks that
1956  * are held elsewhere while allocating memory. Therefore this is as
1957  * simple as possible, using atomic counters.
1958  *
1959  * It doesn't do anything to the BO itself. The real work happens in
1960  * restore, where we get updated page addresses. This function only
1961  * ensures that GPU access to the BO is stopped.
1962  */
1963 int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem,
1964 				struct mm_struct *mm)
1965 {
1966 	struct amdkfd_process_info *process_info = mem->process_info;
1967 	int evicted_bos;
1968 	int r = 0;
1969 
1970 	atomic_inc(&mem->invalid);
1971 	evicted_bos = atomic_inc_return(&process_info->evicted_bos);
1972 	if (evicted_bos == 1) {
1973 		/* First eviction, stop the queues */
1974 		r = kgd2kfd_quiesce_mm(mm);
1975 		if (r)
1976 			pr_err("Failed to quiesce KFD\n");
1977 		schedule_delayed_work(&process_info->restore_userptr_work,
1978 			msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
1979 	}
1980 
1981 	return r;
1982 }
1983 
1984 /* Update invalid userptr BOs
1985  *
1986  * Moves invalidated (evicted) userptr BOs from userptr_valid_list to
1987  * userptr_inval_list and updates user pages for all BOs that have
1988  * been invalidated since their last update.
1989  */
1990 static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
1991 				     struct mm_struct *mm)
1992 {
1993 	struct kgd_mem *mem, *tmp_mem;
1994 	struct amdgpu_bo *bo;
1995 	struct ttm_operation_ctx ctx = { false, false };
1996 	int invalid, ret;
1997 
1998 	/* Move all invalidated BOs to the userptr_inval_list and
1999 	 * release their user pages by migration to the CPU domain
2000 	 */
2001 	list_for_each_entry_safe(mem, tmp_mem,
2002 				 &process_info->userptr_valid_list,
2003 				 validate_list.head) {
2004 		if (!atomic_read(&mem->invalid))
2005 			continue; /* BO is still valid */
2006 
2007 		bo = mem->bo;
2008 
2009 		if (amdgpu_bo_reserve(bo, true))
2010 			return -EAGAIN;
2011 		amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
2012 		ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
2013 		amdgpu_bo_unreserve(bo);
2014 		if (ret) {
2015 			pr_err("%s: Failed to invalidate userptr BO\n",
2016 			       __func__);
2017 			return -EAGAIN;
2018 		}
2019 
2020 		list_move_tail(&mem->validate_list.head,
2021 			       &process_info->userptr_inval_list);
2022 	}
2023 
2024 	if (list_empty(&process_info->userptr_inval_list))
2025 		return 0; /* All evicted userptr BOs were freed */
2026 
2027 	/* Go through userptr_inval_list and update any invalid user_pages */
2028 	list_for_each_entry(mem, &process_info->userptr_inval_list,
2029 			    validate_list.head) {
2030 		invalid = atomic_read(&mem->invalid);
2031 		if (!invalid)
2032 			/* BO hasn't been invalidated since the last
2033 			 * revalidation attempt. Keep its BO list.
2034 			 */
2035 			continue;
2036 
2037 		bo = mem->bo;
2038 
2039 		/* Get updated user pages */
2040 		ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
2041 		if (ret) {
2042 			pr_debug("%s: Failed to get user pages: %d\n",
2043 				__func__, ret);
2044 
2045 			/* Return error -EBUSY or -ENOMEM, retry restore */
2046 			return ret;
2047 		}
2048 
2049 		/*
2050 		 * FIXME: Cannot ignore the return code, must hold
2051 		 * notifier_lock
2052 		 */
2053 		amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
2054 
2055 		/* Mark the BO as valid unless it was invalidated
2056 		 * again concurrently.
2057 		 */
2058 		if (atomic_cmpxchg(&mem->invalid, invalid, 0) != invalid)
2059 			return -EAGAIN;
2060 	}
2061 
2062 	return 0;
2063 }
2064 
2065 /* Validate invalid userptr BOs
2066  *
2067  * Validates BOs on the userptr_inval_list, and moves them back to the
2068  * userptr_valid_list. Also updates GPUVM page tables with new page
2069  * addresses and waits for the page table updates to complete.
2070  */
2071 static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
2072 {
2073 	struct amdgpu_bo_list_entry *pd_bo_list_entries;
2074 	struct list_head resv_list, duplicates;
2075 	struct ww_acquire_ctx ticket;
2076 	struct amdgpu_sync sync;
2077 
2078 	struct amdgpu_vm *peer_vm;
2079 	struct kgd_mem *mem, *tmp_mem;
2080 	struct amdgpu_bo *bo;
2081 	struct ttm_operation_ctx ctx = { false, false };
2082 	int i, ret;
2083 
2084 	pd_bo_list_entries = kcalloc(process_info->n_vms,
2085 				     sizeof(struct amdgpu_bo_list_entry),
2086 				     GFP_KERNEL);
2087 	if (!pd_bo_list_entries) {
2088 		pr_err("%s: Failed to allocate PD BO list entries\n", __func__);
2089 		ret = -ENOMEM;
2090 		goto out_no_mem;
2091 	}
2092 
2093 	INIT_LIST_HEAD(&resv_list);
2094 	INIT_LIST_HEAD(&duplicates);
2095 
2096 	/* Get all the page directory BOs that need to be reserved */
2097 	i = 0;
2098 	list_for_each_entry(peer_vm, &process_info->vm_list_head,
2099 			    vm_list_node)
2100 		amdgpu_vm_get_pd_bo(peer_vm, &resv_list,
2101 				    &pd_bo_list_entries[i++]);
2102 	/* Add the userptr_inval_list entries to resv_list */
2103 	list_for_each_entry(mem, &process_info->userptr_inval_list,
2104 			    validate_list.head) {
2105 		list_add_tail(&mem->resv_list.head, &resv_list);
2106 		mem->resv_list.bo = mem->validate_list.bo;
2107 		mem->resv_list.num_shared = mem->validate_list.num_shared;
2108 	}
2109 
2110 	/* Reserve all BOs and page tables for validation */
2111 	ret = ttm_eu_reserve_buffers(&ticket, &resv_list, false, &duplicates);
2112 	WARN(!list_empty(&duplicates), "Duplicates should be empty");
2113 	if (ret)
2114 		goto out_free;
2115 
2116 	amdgpu_sync_create(&sync);
2117 
2118 	ret = process_validate_vms(process_info);
2119 	if (ret)
2120 		goto unreserve_out;
2121 
2122 	/* Validate BOs and update GPUVM page tables */
2123 	list_for_each_entry_safe(mem, tmp_mem,
2124 				 &process_info->userptr_inval_list,
2125 				 validate_list.head) {
2126 		struct kfd_mem_attachment *attachment;
2127 
2128 		bo = mem->bo;
2129 
2130 		/* Validate the BO if we got user pages */
2131 		if (bo->tbo.ttm->pages[0]) {
2132 			amdgpu_bo_placement_from_domain(bo, mem->domain);
2133 			ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
2134 			if (ret) {
2135 				pr_err("%s: failed to validate BO\n", __func__);
2136 				goto unreserve_out;
2137 			}
2138 		}
2139 
2140 		list_move_tail(&mem->validate_list.head,
2141 			       &process_info->userptr_valid_list);
2142 
2143 		/* Update mapping. If the BO was not validated
2144 		 * (because we couldn't get user pages), this will
2145 		 * clear the page table entries, which will result in
2146 		 * VM faults if the GPU tries to access the invalid
2147 		 * memory.
2148 		 */
2149 		list_for_each_entry(attachment, &mem->attachments, list) {
2150 			if (!attachment->is_mapped)
2151 				continue;
2152 
2153 			kfd_mem_dmaunmap_attachment(mem, attachment);
2154 			ret = update_gpuvm_pte(mem, attachment, &sync, NULL);
2155 			if (ret) {
2156 				pr_err("%s: update PTE failed\n", __func__);
2157 				/* make sure this gets validated again */
2158 				atomic_inc(&mem->invalid);
2159 				goto unreserve_out;
2160 			}
2161 		}
2162 	}
2163 
2164 	/* Update page directories */
2165 	ret = process_update_pds(process_info, &sync);
2166 
2167 unreserve_out:
2168 	ttm_eu_backoff_reservation(&ticket, &resv_list);
2169 	amdgpu_sync_wait(&sync, false);
2170 	amdgpu_sync_free(&sync);
2171 out_free:
2172 	kfree(pd_bo_list_entries);
2173 out_no_mem:
2174 
2175 	return ret;
2176 }
2177 
2178 /* Worker callback to restore evicted userptr BOs
2179  *
2180  * Tries to update and validate all userptr BOs. If successful and no
2181  * concurrent evictions happened, the queues are restarted. Otherwise,
2182  * reschedule for another attempt later.
2183  */
2184 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work)
2185 {
2186 	struct delayed_work *dwork = to_delayed_work(work);
2187 	struct amdkfd_process_info *process_info =
2188 		container_of(dwork, struct amdkfd_process_info,
2189 			     restore_userptr_work);
2190 	struct task_struct *usertask;
2191 	struct mm_struct *mm;
2192 	int evicted_bos;
2193 
2194 	evicted_bos = atomic_read(&process_info->evicted_bos);
2195 	if (!evicted_bos)
2196 		return;
2197 
2198 	/* Reference task and mm in case of concurrent process termination */
2199 	usertask = get_pid_task(process_info->pid, PIDTYPE_PID);
2200 	if (!usertask)
2201 		return;
2202 	mm = get_task_mm(usertask);
2203 	if (!mm) {
2204 		put_task_struct(usertask);
2205 		return;
2206 	}
2207 
2208 	mutex_lock(&process_info->lock);
2209 
2210 	if (update_invalid_user_pages(process_info, mm))
2211 		goto unlock_out;
2212 	/* userptr_inval_list can be empty if all evicted userptr BOs
2213 	 * have been freed. In that case there is nothing to validate
2214 	 * and we can just restart the queues.
2215 	 */
2216 	if (!list_empty(&process_info->userptr_inval_list)) {
2217 		if (atomic_read(&process_info->evicted_bos) != evicted_bos)
2218 			goto unlock_out; /* Concurrent eviction, try again */
2219 
2220 		if (validate_invalid_user_pages(process_info))
2221 			goto unlock_out;
2222 	}
2223 	/* Final check for concurrent evicton and atomic update. If
2224 	 * another eviction happens after successful update, it will
2225 	 * be a first eviction that calls quiesce_mm. The eviction
2226 	 * reference counting inside KFD will handle this case.
2227 	 */
2228 	if (atomic_cmpxchg(&process_info->evicted_bos, evicted_bos, 0) !=
2229 	    evicted_bos)
2230 		goto unlock_out;
2231 	evicted_bos = 0;
2232 	if (kgd2kfd_resume_mm(mm)) {
2233 		pr_err("%s: Failed to resume KFD\n", __func__);
2234 		/* No recovery from this failure. Probably the CP is
2235 		 * hanging. No point trying again.
2236 		 */
2237 	}
2238 
2239 unlock_out:
2240 	mutex_unlock(&process_info->lock);
2241 	mmput(mm);
2242 	put_task_struct(usertask);
2243 
2244 	/* If validation failed, reschedule another attempt */
2245 	if (evicted_bos)
2246 		schedule_delayed_work(&process_info->restore_userptr_work,
2247 			msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
2248 }
2249 
2250 /** amdgpu_amdkfd_gpuvm_restore_process_bos - Restore all BOs for the given
2251  *   KFD process identified by process_info
2252  *
2253  * @process_info: amdkfd_process_info of the KFD process
2254  *
2255  * After memory eviction, restore thread calls this function. The function
2256  * should be called when the Process is still valid. BO restore involves -
2257  *
2258  * 1.  Release old eviction fence and create new one
2259  * 2.  Get two copies of PD BO list from all the VMs. Keep one copy as pd_list.
2260  * 3   Use the second PD list and kfd_bo_list to create a list (ctx.list) of
2261  *     BOs that need to be reserved.
2262  * 4.  Reserve all the BOs
2263  * 5.  Validate of PD and PT BOs.
2264  * 6.  Validate all KFD BOs using kfd_bo_list and Map them and add new fence
2265  * 7.  Add fence to all PD and PT BOs.
2266  * 8.  Unreserve all BOs
2267  */
2268 int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
2269 {
2270 	struct amdgpu_bo_list_entry *pd_bo_list;
2271 	struct amdkfd_process_info *process_info = info;
2272 	struct amdgpu_vm *peer_vm;
2273 	struct kgd_mem *mem;
2274 	struct bo_vm_reservation_context ctx;
2275 	struct amdgpu_amdkfd_fence *new_fence;
2276 	int ret = 0, i;
2277 	struct list_head duplicate_save;
2278 	struct amdgpu_sync sync_obj;
2279 	unsigned long failed_size = 0;
2280 	unsigned long total_size = 0;
2281 
2282 	INIT_LIST_HEAD(&duplicate_save);
2283 	INIT_LIST_HEAD(&ctx.list);
2284 	INIT_LIST_HEAD(&ctx.duplicates);
2285 
2286 	pd_bo_list = kcalloc(process_info->n_vms,
2287 			     sizeof(struct amdgpu_bo_list_entry),
2288 			     GFP_KERNEL);
2289 	if (!pd_bo_list)
2290 		return -ENOMEM;
2291 
2292 	i = 0;
2293 	mutex_lock(&process_info->lock);
2294 	list_for_each_entry(peer_vm, &process_info->vm_list_head,
2295 			vm_list_node)
2296 		amdgpu_vm_get_pd_bo(peer_vm, &ctx.list, &pd_bo_list[i++]);
2297 
2298 	/* Reserve all BOs and page tables/directory. Add all BOs from
2299 	 * kfd_bo_list to ctx.list
2300 	 */
2301 	list_for_each_entry(mem, &process_info->kfd_bo_list,
2302 			    validate_list.head) {
2303 
2304 		list_add_tail(&mem->resv_list.head, &ctx.list);
2305 		mem->resv_list.bo = mem->validate_list.bo;
2306 		mem->resv_list.num_shared = mem->validate_list.num_shared;
2307 	}
2308 
2309 	ret = ttm_eu_reserve_buffers(&ctx.ticket, &ctx.list,
2310 				     false, &duplicate_save);
2311 	if (ret) {
2312 		pr_debug("Memory eviction: TTM Reserve Failed. Try again\n");
2313 		goto ttm_reserve_fail;
2314 	}
2315 
2316 	amdgpu_sync_create(&sync_obj);
2317 
2318 	/* Validate PDs and PTs */
2319 	ret = process_validate_vms(process_info);
2320 	if (ret)
2321 		goto validate_map_fail;
2322 
2323 	ret = process_sync_pds_resv(process_info, &sync_obj);
2324 	if (ret) {
2325 		pr_debug("Memory eviction: Failed to sync to PD BO moving fence. Try again\n");
2326 		goto validate_map_fail;
2327 	}
2328 
2329 	/* Validate BOs and map them to GPUVM (update VM page tables). */
2330 	list_for_each_entry(mem, &process_info->kfd_bo_list,
2331 			    validate_list.head) {
2332 
2333 		struct amdgpu_bo *bo = mem->bo;
2334 		uint32_t domain = mem->domain;
2335 		struct kfd_mem_attachment *attachment;
2336 
2337 		total_size += amdgpu_bo_size(bo);
2338 
2339 		ret = amdgpu_amdkfd_bo_validate(bo, domain, false);
2340 		if (ret) {
2341 			pr_debug("Memory eviction: Validate BOs failed\n");
2342 			failed_size += amdgpu_bo_size(bo);
2343 			ret = amdgpu_amdkfd_bo_validate(bo,
2344 						AMDGPU_GEM_DOMAIN_GTT, false);
2345 			if (ret) {
2346 				pr_debug("Memory eviction: Try again\n");
2347 				goto validate_map_fail;
2348 			}
2349 		}
2350 		ret = amdgpu_sync_fence(&sync_obj, bo->tbo.moving);
2351 		if (ret) {
2352 			pr_debug("Memory eviction: Sync BO fence failed. Try again\n");
2353 			goto validate_map_fail;
2354 		}
2355 		list_for_each_entry(attachment, &mem->attachments, list) {
2356 			if (!attachment->is_mapped)
2357 				continue;
2358 
2359 			kfd_mem_dmaunmap_attachment(mem, attachment);
2360 			ret = update_gpuvm_pte(mem, attachment, &sync_obj, NULL);
2361 			if (ret) {
2362 				pr_debug("Memory eviction: update PTE failed. Try again\n");
2363 				goto validate_map_fail;
2364 			}
2365 		}
2366 	}
2367 
2368 	if (failed_size)
2369 		pr_debug("0x%lx/0x%lx in system\n", failed_size, total_size);
2370 
2371 	/* Update page directories */
2372 	ret = process_update_pds(process_info, &sync_obj);
2373 	if (ret) {
2374 		pr_debug("Memory eviction: update PDs failed. Try again\n");
2375 		goto validate_map_fail;
2376 	}
2377 
2378 	/* Wait for validate and PT updates to finish */
2379 	amdgpu_sync_wait(&sync_obj, false);
2380 
2381 	/* Release old eviction fence and create new one, because fence only
2382 	 * goes from unsignaled to signaled, fence cannot be reused.
2383 	 * Use context and mm from the old fence.
2384 	 */
2385 	new_fence = amdgpu_amdkfd_fence_create(
2386 				process_info->eviction_fence->base.context,
2387 				process_info->eviction_fence->mm,
2388 				NULL);
2389 	if (!new_fence) {
2390 		pr_err("Failed to create eviction fence\n");
2391 		ret = -ENOMEM;
2392 		goto validate_map_fail;
2393 	}
2394 	dma_fence_put(&process_info->eviction_fence->base);
2395 	process_info->eviction_fence = new_fence;
2396 	*ef = dma_fence_get(&new_fence->base);
2397 
2398 	/* Attach new eviction fence to all BOs except pinned ones */
2399 	list_for_each_entry(mem, &process_info->kfd_bo_list,
2400 		validate_list.head) {
2401 		if (mem->bo->tbo.pin_count)
2402 			continue;
2403 
2404 		amdgpu_bo_fence(mem->bo,
2405 			&process_info->eviction_fence->base, true);
2406 	}
2407 	/* Attach eviction fence to PD / PT BOs */
2408 	list_for_each_entry(peer_vm, &process_info->vm_list_head,
2409 			    vm_list_node) {
2410 		struct amdgpu_bo *bo = peer_vm->root.bo;
2411 
2412 		amdgpu_bo_fence(bo, &process_info->eviction_fence->base, true);
2413 	}
2414 
2415 validate_map_fail:
2416 	ttm_eu_backoff_reservation(&ctx.ticket, &ctx.list);
2417 	amdgpu_sync_free(&sync_obj);
2418 ttm_reserve_fail:
2419 	mutex_unlock(&process_info->lock);
2420 	kfree(pd_bo_list);
2421 	return ret;
2422 }
2423 
2424 int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem)
2425 {
2426 	struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info;
2427 	struct amdgpu_bo *gws_bo = (struct amdgpu_bo *)gws;
2428 	int ret;
2429 
2430 	if (!info || !gws)
2431 		return -EINVAL;
2432 
2433 	*mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
2434 	if (!*mem)
2435 		return -ENOMEM;
2436 
2437 	rw_init(&(*mem)->lock, "aggws");
2438 	INIT_LIST_HEAD(&(*mem)->attachments);
2439 	(*mem)->bo = amdgpu_bo_ref(gws_bo);
2440 	(*mem)->domain = AMDGPU_GEM_DOMAIN_GWS;
2441 	(*mem)->process_info = process_info;
2442 	add_kgd_mem_to_kfd_bo_list(*mem, process_info, false);
2443 	amdgpu_sync_create(&(*mem)->sync);
2444 
2445 
2446 	/* Validate gws bo the first time it is added to process */
2447 	mutex_lock(&(*mem)->process_info->lock);
2448 	ret = amdgpu_bo_reserve(gws_bo, false);
2449 	if (unlikely(ret)) {
2450 		pr_err("Reserve gws bo failed %d\n", ret);
2451 		goto bo_reservation_failure;
2452 	}
2453 
2454 	ret = amdgpu_amdkfd_bo_validate(gws_bo, AMDGPU_GEM_DOMAIN_GWS, true);
2455 	if (ret) {
2456 		pr_err("GWS BO validate failed %d\n", ret);
2457 		goto bo_validation_failure;
2458 	}
2459 	/* GWS resource is shared b/t amdgpu and amdkfd
2460 	 * Add process eviction fence to bo so they can
2461 	 * evict each other.
2462 	 */
2463 	ret = dma_resv_reserve_shared(gws_bo->tbo.base.resv, 1);
2464 	if (ret)
2465 		goto reserve_shared_fail;
2466 	amdgpu_bo_fence(gws_bo, &process_info->eviction_fence->base, true);
2467 	amdgpu_bo_unreserve(gws_bo);
2468 	mutex_unlock(&(*mem)->process_info->lock);
2469 
2470 	return ret;
2471 
2472 reserve_shared_fail:
2473 bo_validation_failure:
2474 	amdgpu_bo_unreserve(gws_bo);
2475 bo_reservation_failure:
2476 	mutex_unlock(&(*mem)->process_info->lock);
2477 	amdgpu_sync_free(&(*mem)->sync);
2478 	remove_kgd_mem_from_kfd_bo_list(*mem, process_info);
2479 	amdgpu_bo_unref(&gws_bo);
2480 	mutex_destroy(&(*mem)->lock);
2481 	kfree(*mem);
2482 	*mem = NULL;
2483 	return ret;
2484 }
2485 
2486 int amdgpu_amdkfd_remove_gws_from_process(void *info, void *mem)
2487 {
2488 	int ret;
2489 	struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info;
2490 	struct kgd_mem *kgd_mem = (struct kgd_mem *)mem;
2491 	struct amdgpu_bo *gws_bo = kgd_mem->bo;
2492 
2493 	/* Remove BO from process's validate list so restore worker won't touch
2494 	 * it anymore
2495 	 */
2496 	remove_kgd_mem_from_kfd_bo_list(kgd_mem, process_info);
2497 
2498 	ret = amdgpu_bo_reserve(gws_bo, false);
2499 	if (unlikely(ret)) {
2500 		pr_err("Reserve gws bo failed %d\n", ret);
2501 		//TODO add BO back to validate_list?
2502 		return ret;
2503 	}
2504 	amdgpu_amdkfd_remove_eviction_fence(gws_bo,
2505 			process_info->eviction_fence);
2506 	amdgpu_bo_unreserve(gws_bo);
2507 	amdgpu_sync_free(&kgd_mem->sync);
2508 	amdgpu_bo_unref(&gws_bo);
2509 	mutex_destroy(&kgd_mem->lock);
2510 	kfree(mem);
2511 	return 0;
2512 }
2513 
2514 /* Returns GPU-specific tiling mode information */
2515 int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd,
2516 				struct tile_config *config)
2517 {
2518 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
2519 
2520 	config->gb_addr_config = adev->gfx.config.gb_addr_config;
2521 	config->tile_config_ptr = adev->gfx.config.tile_mode_array;
2522 	config->num_tile_configs =
2523 			ARRAY_SIZE(adev->gfx.config.tile_mode_array);
2524 	config->macro_tile_config_ptr =
2525 			adev->gfx.config.macrotile_mode_array;
2526 	config->num_macro_tile_configs =
2527 			ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
2528 
2529 	/* Those values are not set from GFX9 onwards */
2530 	config->num_banks = adev->gfx.config.num_banks;
2531 	config->num_ranks = adev->gfx.config.num_ranks;
2532 
2533 	return 0;
2534 }
2535