xref: /openbsd-src/sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c (revision 7350f337b9e3eb4461d99580e625c7ef148d107c)
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <linux/dma-fence-array.h>
29 #include <linux/interval_tree_generic.h>
30 #include <linux/idr.h>
31 #include <drm/drmP.h>
32 #include <drm/amdgpu_drm.h>
33 #include "amdgpu.h"
34 #include "amdgpu_trace.h"
35 #include "amdgpu_amdkfd.h"
36 #include "amdgpu_gmc.h"
37 
38 /**
39  * DOC: GPUVM
40  *
41  * GPUVM is similar to the legacy gart on older asics, however
42  * rather than there being a single global gart table
43  * for the entire GPU, there are multiple VM page tables active
44  * at any given time.  The VM page tables can contain a mix
45  * vram pages and system memory pages and system memory pages
46  * can be mapped as snooped (cached system pages) or unsnooped
47  * (uncached system pages).
48  * Each VM has an ID associated with it and there is a page table
49  * associated with each VMID.  When execting a command buffer,
50  * the kernel tells the the ring what VMID to use for that command
51  * buffer.  VMIDs are allocated dynamically as commands are submitted.
52  * The userspace drivers maintain their own address space and the kernel
53  * sets up their pages tables accordingly when they submit their
54  * command buffers and a VMID is assigned.
55  * Cayman/Trinity support up to 8 active VMs at any given time;
56  * SI supports 16.
57  */
58 
59 #define START(node) ((node)->start)
60 #define LAST(node) ((node)->last)
61 
62 #ifdef __linux__
63 INTERVAL_TREE_DEFINE(struct amdgpu_bo_va_mapping, rb, uint64_t, __subtree_last,
64 		     START, LAST, static, amdgpu_vm_it)
65 #else
66 static struct amdgpu_bo_va_mapping *
67 amdgpu_vm_it_iter_first(struct rb_root_cached *root, uint64_t start,
68     uint64_t last)
69 {
70 	struct amdgpu_bo_va_mapping *node;
71 	struct rb_node *rb;
72 
73 	for (rb = rb_first(root); rb; rb = rb_next(rb)) {
74 		node = rb_entry(rb, typeof(*node), rb);
75 		if (LAST(node) >= start && START(node) <= last)
76 			return node;
77 	}
78 	return NULL;
79 }
80 
81 static struct amdgpu_bo_va_mapping *
82 amdgpu_vm_it_iter_next(struct amdgpu_bo_va_mapping *node, uint64_t start,
83     uint64_t last)
84 {
85 	STUB();
86 	struct rb_node *rb = &node->rb;
87 
88 	for (rb = rb_next(rb); rb; rb = rb_next(rb)) {
89 		node = rb_entry(rb, typeof(*node), rb);
90 		if (LAST(node) >= start && START(node) <= last)
91 			return node;
92 	}
93 	return NULL;
94 }
95 
96 static void
97 amdgpu_vm_it_remove(struct amdgpu_bo_va_mapping *node,
98     struct rb_root_cached *root)
99 {
100 	rb_erase(&node->rb, root);
101 }
102 
103 static void
104 amdgpu_vm_it_insert(struct amdgpu_bo_va_mapping *node,
105     struct rb_root_cached *root)
106 {
107 	struct rb_node **iter = &root->rb_node;
108 	struct rb_node *parent = NULL;
109 	struct amdgpu_bo_va_mapping *iter_node;
110 
111 	while (*iter) {
112 		parent = *iter;
113 		iter_node = rb_entry(*iter, struct amdgpu_bo_va_mapping, rb);
114 
115 		if (node->start < iter_node->start)
116 			iter = &(*iter)->rb_left;
117 		else
118 			iter = &(*iter)->rb_right;
119 	}
120 
121 	rb_link_node(&node->rb, parent, iter);
122 	rb_insert_color(&node->rb, root);
123 }
124 #endif
125 
126 #undef START
127 #undef LAST
128 
129 /**
130  * struct amdgpu_pte_update_params - Local structure
131  *
132  * Encapsulate some VM table update parameters to reduce
133  * the number of function parameters
134  *
135  */
136 struct amdgpu_pte_update_params {
137 
138 	/**
139 	 * @adev: amdgpu device we do this update for
140 	 */
141 	struct amdgpu_device *adev;
142 
143 	/**
144 	 * @vm: optional amdgpu_vm we do this update for
145 	 */
146 	struct amdgpu_vm *vm;
147 
148 	/**
149 	 * @src: address where to copy page table entries from
150 	 */
151 	uint64_t src;
152 
153 	/**
154 	 * @ib: indirect buffer to fill with commands
155 	 */
156 	struct amdgpu_ib *ib;
157 
158 	/**
159 	 * @func: Function which actually does the update
160 	 */
161 	void (*func)(struct amdgpu_pte_update_params *params,
162 		     struct amdgpu_bo *bo, uint64_t pe,
163 		     uint64_t addr, unsigned count, uint32_t incr,
164 		     uint64_t flags);
165 	/**
166 	 * @pages_addr:
167 	 *
168 	 * DMA addresses to use for mapping, used during VM update by CPU
169 	 */
170 	dma_addr_t *pages_addr;
171 
172 	/**
173 	 * @kptr:
174 	 *
175 	 * Kernel pointer of PD/PT BO that needs to be updated,
176 	 * used during VM update by CPU
177 	 */
178 	void *kptr;
179 };
180 
181 /**
182  * struct amdgpu_prt_cb - Helper to disable partial resident texture feature from a fence callback
183  */
184 struct amdgpu_prt_cb {
185 
186 	/**
187 	 * @adev: amdgpu device
188 	 */
189 	struct amdgpu_device *adev;
190 
191 	/**
192 	 * @cb: callback
193 	 */
194 	struct dma_fence_cb cb;
195 };
196 
197 /**
198  * amdgpu_vm_bo_base_init - Adds bo to the list of bos associated with the vm
199  *
200  * @base: base structure for tracking BO usage in a VM
201  * @vm: vm to which bo is to be added
202  * @bo: amdgpu buffer object
203  *
204  * Initialize a bo_va_base structure and add it to the appropriate lists
205  *
206  */
207 static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
208 				   struct amdgpu_vm *vm,
209 				   struct amdgpu_bo *bo)
210 {
211 	base->vm = vm;
212 	base->bo = bo;
213 	INIT_LIST_HEAD(&base->bo_list);
214 	INIT_LIST_HEAD(&base->vm_status);
215 
216 	if (!bo)
217 		return;
218 	list_add_tail(&base->bo_list, &bo->va);
219 
220 	if (bo->tbo.type == ttm_bo_type_kernel)
221 		list_move(&base->vm_status, &vm->relocated);
222 
223 	if (bo->tbo.resv != vm->root.base.bo->tbo.resv)
224 		return;
225 
226 	if (bo->preferred_domains &
227 	    amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type))
228 		return;
229 
230 	/*
231 	 * we checked all the prerequisites, but it looks like this per vm bo
232 	 * is currently evicted. add the bo to the evicted list to make sure it
233 	 * is validated on next vm use to avoid fault.
234 	 * */
235 	list_move_tail(&base->vm_status, &vm->evicted);
236 	base->moved = true;
237 }
238 
239 /**
240  * amdgpu_vm_level_shift - return the addr shift for each level
241  *
242  * @adev: amdgpu_device pointer
243  * @level: VMPT level
244  *
245  * Returns:
246  * The number of bits the pfn needs to be right shifted for a level.
247  */
248 static unsigned amdgpu_vm_level_shift(struct amdgpu_device *adev,
249 				      unsigned level)
250 {
251 	unsigned shift = 0xff;
252 
253 	switch (level) {
254 	case AMDGPU_VM_PDB2:
255 	case AMDGPU_VM_PDB1:
256 	case AMDGPU_VM_PDB0:
257 		shift = 9 * (AMDGPU_VM_PDB0 - level) +
258 			adev->vm_manager.block_size;
259 		break;
260 	case AMDGPU_VM_PTB:
261 		shift = 0;
262 		break;
263 	default:
264 		dev_err(adev->dev, "the level%d isn't supported.\n", level);
265 	}
266 
267 	return shift;
268 }
269 
270 /**
271  * amdgpu_vm_num_entries - return the number of entries in a PD/PT
272  *
273  * @adev: amdgpu_device pointer
274  * @level: VMPT level
275  *
276  * Returns:
277  * The number of entries in a page directory or page table.
278  */
279 static unsigned amdgpu_vm_num_entries(struct amdgpu_device *adev,
280 				      unsigned level)
281 {
282 	unsigned shift = amdgpu_vm_level_shift(adev,
283 					       adev->vm_manager.root_level);
284 
285 	if (level == adev->vm_manager.root_level)
286 		/* For the root directory */
287 		return round_up(adev->vm_manager.max_pfn, 1 << shift) >> shift;
288 	else if (level != AMDGPU_VM_PTB)
289 		/* Everything in between */
290 		return 512;
291 	else
292 		/* For the page tables on the leaves */
293 		return AMDGPU_VM_PTE_COUNT(adev);
294 }
295 
296 /**
297  * amdgpu_vm_bo_size - returns the size of the BOs in bytes
298  *
299  * @adev: amdgpu_device pointer
300  * @level: VMPT level
301  *
302  * Returns:
303  * The size of the BO for a page directory or page table in bytes.
304  */
305 static unsigned amdgpu_vm_bo_size(struct amdgpu_device *adev, unsigned level)
306 {
307 	return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_num_entries(adev, level) * 8);
308 }
309 
310 /**
311  * amdgpu_vm_get_pd_bo - add the VM PD to a validation list
312  *
313  * @vm: vm providing the BOs
314  * @validated: head of validation list
315  * @entry: entry to add
316  *
317  * Add the page directory to the list of BOs to
318  * validate for command submission.
319  */
320 void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
321 			 struct list_head *validated,
322 			 struct amdgpu_bo_list_entry *entry)
323 {
324 	entry->robj = vm->root.base.bo;
325 	entry->priority = 0;
326 	entry->tv.bo = &entry->robj->tbo;
327 	entry->tv.shared = true;
328 	entry->user_pages = NULL;
329 	list_add(&entry->tv.head, validated);
330 }
331 
332 /**
333  * amdgpu_vm_validate_pt_bos - validate the page table BOs
334  *
335  * @adev: amdgpu device pointer
336  * @vm: vm providing the BOs
337  * @validate: callback to do the validation
338  * @param: parameter for the validation callback
339  *
340  * Validate the page table BOs on command submission if neccessary.
341  *
342  * Returns:
343  * Validation result.
344  */
345 int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
346 			      int (*validate)(void *p, struct amdgpu_bo *bo),
347 			      void *param)
348 {
349 	struct ttm_bo_global *glob = adev->mman.bdev.glob;
350 	struct amdgpu_vm_bo_base *bo_base, *tmp;
351 	int r = 0;
352 
353 	list_for_each_entry_safe(bo_base, tmp, &vm->evicted, vm_status) {
354 		struct amdgpu_bo *bo = bo_base->bo;
355 
356 		if (bo->parent) {
357 			r = validate(param, bo);
358 			if (r)
359 				break;
360 
361 			spin_lock(&glob->lru_lock);
362 			ttm_bo_move_to_lru_tail(&bo->tbo);
363 			if (bo->shadow)
364 				ttm_bo_move_to_lru_tail(&bo->shadow->tbo);
365 			spin_unlock(&glob->lru_lock);
366 		}
367 
368 		if (bo->tbo.type != ttm_bo_type_kernel) {
369 			spin_lock(&vm->moved_lock);
370 			list_move(&bo_base->vm_status, &vm->moved);
371 			spin_unlock(&vm->moved_lock);
372 		} else {
373 			list_move(&bo_base->vm_status, &vm->relocated);
374 		}
375 	}
376 
377 	spin_lock(&glob->lru_lock);
378 	list_for_each_entry(bo_base, &vm->idle, vm_status) {
379 		struct amdgpu_bo *bo = bo_base->bo;
380 
381 		if (!bo->parent)
382 			continue;
383 
384 		ttm_bo_move_to_lru_tail(&bo->tbo);
385 		if (bo->shadow)
386 			ttm_bo_move_to_lru_tail(&bo->shadow->tbo);
387 	}
388 	spin_unlock(&glob->lru_lock);
389 
390 	return r;
391 }
392 
393 /**
394  * amdgpu_vm_ready - check VM is ready for updates
395  *
396  * @vm: VM to check
397  *
398  * Check if all VM PDs/PTs are ready for updates
399  *
400  * Returns:
401  * True if eviction list is empty.
402  */
403 bool amdgpu_vm_ready(struct amdgpu_vm *vm)
404 {
405 	return list_empty(&vm->evicted);
406 }
407 
408 /**
409  * amdgpu_vm_clear_bo - initially clear the PDs/PTs
410  *
411  * @adev: amdgpu_device pointer
412  * @vm: VM to clear BO from
413  * @bo: BO to clear
414  * @level: level this BO is at
415  * @pte_support_ats: indicate ATS support from PTE
416  *
417  * Root PD needs to be reserved when calling this.
418  *
419  * Returns:
420  * 0 on success, errno otherwise.
421  */
422 static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
423 			      struct amdgpu_vm *vm, struct amdgpu_bo *bo,
424 			      unsigned level, bool pte_support_ats)
425 {
426 	struct ttm_operation_ctx ctx = { true, false };
427 	struct dma_fence *fence = NULL;
428 	unsigned entries, ats_entries;
429 	struct amdgpu_ring *ring;
430 	struct amdgpu_job *job;
431 	uint64_t addr;
432 	int r;
433 
434 	entries = amdgpu_bo_size(bo) / 8;
435 
436 	if (pte_support_ats) {
437 		if (level == adev->vm_manager.root_level) {
438 			ats_entries = amdgpu_vm_level_shift(adev, level);
439 			ats_entries += AMDGPU_GPU_PAGE_SHIFT;
440 			ats_entries = AMDGPU_VA_HOLE_START >> ats_entries;
441 			ats_entries = min(ats_entries, entries);
442 			entries -= ats_entries;
443 		} else {
444 			ats_entries = entries;
445 			entries = 0;
446 		}
447 	} else {
448 		ats_entries = 0;
449 	}
450 
451 	ring = container_of(vm->entity.rq->sched, struct amdgpu_ring, sched);
452 
453 	r = reservation_object_reserve_shared(bo->tbo.resv);
454 	if (r)
455 		return r;
456 
457 	r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
458 	if (r)
459 		goto error;
460 
461 	r = amdgpu_job_alloc_with_ib(adev, 64, &job);
462 	if (r)
463 		goto error;
464 
465 	addr = amdgpu_bo_gpu_offset(bo);
466 	if (ats_entries) {
467 		uint64_t ats_value;
468 
469 		ats_value = AMDGPU_PTE_DEFAULT_ATC;
470 		if (level != AMDGPU_VM_PTB)
471 			ats_value |= AMDGPU_PDE_PTE;
472 
473 		amdgpu_vm_set_pte_pde(adev, &job->ibs[0], addr, 0,
474 				      ats_entries, 0, ats_value);
475 		addr += ats_entries * 8;
476 	}
477 
478 	if (entries)
479 		amdgpu_vm_set_pte_pde(adev, &job->ibs[0], addr, 0,
480 				      entries, 0, 0);
481 
482 	amdgpu_ring_pad_ib(ring, &job->ibs[0]);
483 
484 	WARN_ON(job->ibs[0].length_dw > 64);
485 	r = amdgpu_sync_resv(adev, &job->sync, bo->tbo.resv,
486 			     AMDGPU_FENCE_OWNER_UNDEFINED, false);
487 	if (r)
488 		goto error_free;
489 
490 	r = amdgpu_job_submit(job, &vm->entity, AMDGPU_FENCE_OWNER_UNDEFINED,
491 			      &fence);
492 	if (r)
493 		goto error_free;
494 
495 	amdgpu_bo_fence(bo, fence, true);
496 	dma_fence_put(fence);
497 
498 	if (bo->shadow)
499 		return amdgpu_vm_clear_bo(adev, vm, bo->shadow,
500 					  level, pte_support_ats);
501 
502 	return 0;
503 
504 error_free:
505 	amdgpu_job_free(job);
506 
507 error:
508 	return r;
509 }
510 
511 /**
512  * amdgpu_vm_alloc_levels - allocate the PD/PT levels
513  *
514  * @adev: amdgpu_device pointer
515  * @vm: requested vm
516  * @parent: parent PT
517  * @saddr: start of the address range
518  * @eaddr: end of the address range
519  * @level: VMPT level
520  * @ats: indicate ATS support from PTE
521  *
522  * Make sure the page directories and page tables are allocated
523  *
524  * Returns:
525  * 0 on success, errno otherwise.
526  */
527 static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
528 				  struct amdgpu_vm *vm,
529 				  struct amdgpu_vm_pt *parent,
530 				  uint64_t saddr, uint64_t eaddr,
531 				  unsigned level, bool ats)
532 {
533 	unsigned shift = amdgpu_vm_level_shift(adev, level);
534 	unsigned pt_idx, from, to;
535 	u64 flags;
536 	int r;
537 
538 	if (!parent->entries) {
539 		unsigned num_entries = amdgpu_vm_num_entries(adev, level);
540 
541 		parent->entries = kvmalloc_array(num_entries,
542 						   sizeof(struct amdgpu_vm_pt),
543 						   GFP_KERNEL | __GFP_ZERO);
544 		if (!parent->entries)
545 			return -ENOMEM;
546 		memset(parent->entries, 0 , sizeof(struct amdgpu_vm_pt));
547 	}
548 
549 	from = saddr >> shift;
550 	to = eaddr >> shift;
551 	if (from >= amdgpu_vm_num_entries(adev, level) ||
552 	    to >= amdgpu_vm_num_entries(adev, level))
553 		return -EINVAL;
554 
555 	++level;
556 	saddr = saddr & ((1 << shift) - 1);
557 	eaddr = eaddr & ((1 << shift) - 1);
558 
559 	flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
560 	if (vm->root.base.bo->shadow)
561 		flags |= AMDGPU_GEM_CREATE_SHADOW;
562 	if (vm->use_cpu_for_update)
563 		flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
564 	else
565 		flags |= AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
566 
567 	/* walk over the address space and allocate the page tables */
568 	for (pt_idx = from; pt_idx <= to; ++pt_idx) {
569 		struct reservation_object *resv = vm->root.base.bo->tbo.resv;
570 		struct amdgpu_vm_pt *entry = &parent->entries[pt_idx];
571 		struct amdgpu_bo *pt;
572 
573 		if (!entry->base.bo) {
574 			struct amdgpu_bo_param bp;
575 
576 			memset(&bp, 0, sizeof(bp));
577 			bp.size = amdgpu_vm_bo_size(adev, level);
578 			bp.byte_align = AMDGPU_GPU_PAGE_SIZE;
579 			bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
580 			bp.flags = flags;
581 			bp.type = ttm_bo_type_kernel;
582 			bp.resv = resv;
583 			r = amdgpu_bo_create(adev, &bp, &pt);
584 			if (r)
585 				return r;
586 
587 			r = amdgpu_vm_clear_bo(adev, vm, pt, level, ats);
588 			if (r) {
589 				amdgpu_bo_unref(&pt->shadow);
590 				amdgpu_bo_unref(&pt);
591 				return r;
592 			}
593 
594 			if (vm->use_cpu_for_update) {
595 				r = amdgpu_bo_kmap(pt, NULL);
596 				if (r) {
597 					amdgpu_bo_unref(&pt->shadow);
598 					amdgpu_bo_unref(&pt);
599 					return r;
600 				}
601 			}
602 
603 			/* Keep a reference to the root directory to avoid
604 			* freeing them up in the wrong order.
605 			*/
606 			pt->parent = amdgpu_bo_ref(parent->base.bo);
607 
608 			amdgpu_vm_bo_base_init(&entry->base, vm, pt);
609 		}
610 
611 		if (level < AMDGPU_VM_PTB) {
612 			uint64_t sub_saddr = (pt_idx == from) ? saddr : 0;
613 			uint64_t sub_eaddr = (pt_idx == to) ? eaddr :
614 				((1 << shift) - 1);
615 			r = amdgpu_vm_alloc_levels(adev, vm, entry, sub_saddr,
616 						   sub_eaddr, level, ats);
617 			if (r)
618 				return r;
619 		}
620 	}
621 
622 	return 0;
623 }
624 
625 /**
626  * amdgpu_vm_alloc_pts - Allocate page tables.
627  *
628  * @adev: amdgpu_device pointer
629  * @vm: VM to allocate page tables for
630  * @saddr: Start address which needs to be allocated
631  * @size: Size from start address we need.
632  *
633  * Make sure the page tables are allocated.
634  *
635  * Returns:
636  * 0 on success, errno otherwise.
637  */
638 int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
639 			struct amdgpu_vm *vm,
640 			uint64_t saddr, uint64_t size)
641 {
642 	uint64_t eaddr;
643 	bool ats = false;
644 
645 	/* validate the parameters */
646 	if (saddr & AMDGPU_GPU_PAGE_MASK || size & AMDGPU_GPU_PAGE_MASK)
647 		return -EINVAL;
648 
649 	eaddr = saddr + size - 1;
650 
651 	if (vm->pte_support_ats)
652 		ats = saddr < AMDGPU_VA_HOLE_START;
653 
654 	saddr /= AMDGPU_GPU_PAGE_SIZE;
655 	eaddr /= AMDGPU_GPU_PAGE_SIZE;
656 
657 	if (eaddr >= adev->vm_manager.max_pfn) {
658 		dev_err(adev->dev, "va above limit (0x%08llX >= 0x%08llX)\n",
659 			eaddr, adev->vm_manager.max_pfn);
660 		return -EINVAL;
661 	}
662 
663 	return amdgpu_vm_alloc_levels(adev, vm, &vm->root, saddr, eaddr,
664 				      adev->vm_manager.root_level, ats);
665 }
666 
667 /**
668  * amdgpu_vm_check_compute_bug - check whether asic has compute vm bug
669  *
670  * @adev: amdgpu_device pointer
671  */
672 void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev)
673 {
674 	const struct amdgpu_ip_block *ip_block;
675 	bool has_compute_vm_bug;
676 	struct amdgpu_ring *ring;
677 	int i;
678 
679 	has_compute_vm_bug = false;
680 
681 	ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
682 	if (ip_block) {
683 		/* Compute has a VM bug for GFX version < 7.
684 		   Compute has a VM bug for GFX 8 MEC firmware version < 673.*/
685 		if (ip_block->version->major <= 7)
686 			has_compute_vm_bug = true;
687 		else if (ip_block->version->major == 8)
688 			if (adev->gfx.mec_fw_version < 673)
689 				has_compute_vm_bug = true;
690 	}
691 
692 	for (i = 0; i < adev->num_rings; i++) {
693 		ring = adev->rings[i];
694 		if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
695 			/* only compute rings */
696 			ring->has_compute_vm_bug = has_compute_vm_bug;
697 		else
698 			ring->has_compute_vm_bug = false;
699 	}
700 }
701 
702 /**
703  * amdgpu_vm_need_pipeline_sync - Check if pipe sync is needed for job.
704  *
705  * @ring: ring on which the job will be submitted
706  * @job: job to submit
707  *
708  * Returns:
709  * True if sync is needed.
710  */
711 bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
712 				  struct amdgpu_job *job)
713 {
714 	struct amdgpu_device *adev = ring->adev;
715 	unsigned vmhub = ring->funcs->vmhub;
716 	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
717 	struct amdgpu_vmid *id;
718 	bool gds_switch_needed;
719 	bool vm_flush_needed = job->vm_needs_flush || ring->has_compute_vm_bug;
720 
721 	if (job->vmid == 0)
722 		return false;
723 	id = &id_mgr->ids[job->vmid];
724 	gds_switch_needed = ring->funcs->emit_gds_switch && (
725 		id->gds_base != job->gds_base ||
726 		id->gds_size != job->gds_size ||
727 		id->gws_base != job->gws_base ||
728 		id->gws_size != job->gws_size ||
729 		id->oa_base != job->oa_base ||
730 		id->oa_size != job->oa_size);
731 
732 	if (amdgpu_vmid_had_gpu_reset(adev, id))
733 		return true;
734 
735 	return vm_flush_needed || gds_switch_needed;
736 }
737 
738 /**
739  * amdgpu_vm_flush - hardware flush the vm
740  *
741  * @ring: ring to use for flush
742  * @job:  related job
743  * @need_pipe_sync: is pipe sync needed
744  *
745  * Emit a VM flush when it is necessary.
746  *
747  * Returns:
748  * 0 on success, errno otherwise.
749  */
750 int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync)
751 {
752 	struct amdgpu_device *adev = ring->adev;
753 	unsigned vmhub = ring->funcs->vmhub;
754 	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
755 	struct amdgpu_vmid *id = &id_mgr->ids[job->vmid];
756 	bool gds_switch_needed = ring->funcs->emit_gds_switch && (
757 		id->gds_base != job->gds_base ||
758 		id->gds_size != job->gds_size ||
759 		id->gws_base != job->gws_base ||
760 		id->gws_size != job->gws_size ||
761 		id->oa_base != job->oa_base ||
762 		id->oa_size != job->oa_size);
763 	bool vm_flush_needed = job->vm_needs_flush;
764 	bool pasid_mapping_needed = id->pasid != job->pasid ||
765 		!id->pasid_mapping ||
766 		!dma_fence_is_signaled(id->pasid_mapping);
767 	struct dma_fence *fence = NULL;
768 	unsigned patch_offset = 0;
769 	int r;
770 
771 	if (amdgpu_vmid_had_gpu_reset(adev, id)) {
772 		gds_switch_needed = true;
773 		vm_flush_needed = true;
774 		pasid_mapping_needed = true;
775 	}
776 
777 	gds_switch_needed &= !!ring->funcs->emit_gds_switch;
778 	vm_flush_needed &= !!ring->funcs->emit_vm_flush  &&
779 			job->vm_pd_addr != AMDGPU_BO_INVALID_OFFSET;
780 	pasid_mapping_needed &= adev->gmc.gmc_funcs->emit_pasid_mapping &&
781 		ring->funcs->emit_wreg;
782 
783 	if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync)
784 		return 0;
785 
786 	if (ring->funcs->init_cond_exec)
787 		patch_offset = amdgpu_ring_init_cond_exec(ring);
788 
789 	if (need_pipe_sync)
790 		amdgpu_ring_emit_pipeline_sync(ring);
791 
792 	if (vm_flush_needed) {
793 		trace_amdgpu_vm_flush(ring, job->vmid, job->vm_pd_addr);
794 		amdgpu_ring_emit_vm_flush(ring, job->vmid, job->vm_pd_addr);
795 	}
796 
797 	if (pasid_mapping_needed)
798 		amdgpu_gmc_emit_pasid_mapping(ring, job->vmid, job->pasid);
799 
800 	if (vm_flush_needed || pasid_mapping_needed) {
801 		r = amdgpu_fence_emit(ring, &fence, 0);
802 		if (r)
803 			return r;
804 	}
805 
806 	if (vm_flush_needed) {
807 		mutex_lock(&id_mgr->lock);
808 		dma_fence_put(id->last_flush);
809 		id->last_flush = dma_fence_get(fence);
810 		id->current_gpu_reset_count =
811 			atomic_read(&adev->gpu_reset_counter);
812 		mutex_unlock(&id_mgr->lock);
813 	}
814 
815 	if (pasid_mapping_needed) {
816 		id->pasid = job->pasid;
817 		dma_fence_put(id->pasid_mapping);
818 		id->pasid_mapping = dma_fence_get(fence);
819 	}
820 	dma_fence_put(fence);
821 
822 	if (ring->funcs->emit_gds_switch && gds_switch_needed) {
823 		id->gds_base = job->gds_base;
824 		id->gds_size = job->gds_size;
825 		id->gws_base = job->gws_base;
826 		id->gws_size = job->gws_size;
827 		id->oa_base = job->oa_base;
828 		id->oa_size = job->oa_size;
829 		amdgpu_ring_emit_gds_switch(ring, job->vmid, job->gds_base,
830 					    job->gds_size, job->gws_base,
831 					    job->gws_size, job->oa_base,
832 					    job->oa_size);
833 	}
834 
835 	if (ring->funcs->patch_cond_exec)
836 		amdgpu_ring_patch_cond_exec(ring, patch_offset);
837 
838 	/* the double SWITCH_BUFFER here *cannot* be skipped by COND_EXEC */
839 	if (ring->funcs->emit_switch_buffer) {
840 		amdgpu_ring_emit_switch_buffer(ring);
841 		amdgpu_ring_emit_switch_buffer(ring);
842 	}
843 	return 0;
844 }
845 
846 /**
847  * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
848  *
849  * @vm: requested vm
850  * @bo: requested buffer object
851  *
852  * Find @bo inside the requested vm.
853  * Search inside the @bos vm list for the requested vm
854  * Returns the found bo_va or NULL if none is found
855  *
856  * Object has to be reserved!
857  *
858  * Returns:
859  * Found bo_va or NULL.
860  */
861 struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
862 				       struct amdgpu_bo *bo)
863 {
864 	struct amdgpu_bo_va *bo_va;
865 
866 	list_for_each_entry(bo_va, &bo->va, base.bo_list) {
867 		if (bo_va->base.vm == vm) {
868 			return bo_va;
869 		}
870 	}
871 	return NULL;
872 }
873 
874 /**
875  * amdgpu_vm_do_set_ptes - helper to call the right asic function
876  *
877  * @params: see amdgpu_pte_update_params definition
878  * @bo: PD/PT to update
879  * @pe: addr of the page entry
880  * @addr: dst addr to write into pe
881  * @count: number of page entries to update
882  * @incr: increase next addr by incr bytes
883  * @flags: hw access flags
884  *
885  * Traces the parameters and calls the right asic functions
886  * to setup the page table using the DMA.
887  */
888 static void amdgpu_vm_do_set_ptes(struct amdgpu_pte_update_params *params,
889 				  struct amdgpu_bo *bo,
890 				  uint64_t pe, uint64_t addr,
891 				  unsigned count, uint32_t incr,
892 				  uint64_t flags)
893 {
894 	pe += amdgpu_bo_gpu_offset(bo);
895 	trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);
896 
897 	if (count < 3) {
898 		amdgpu_vm_write_pte(params->adev, params->ib, pe,
899 				    addr | flags, count, incr);
900 
901 	} else {
902 		amdgpu_vm_set_pte_pde(params->adev, params->ib, pe, addr,
903 				      count, incr, flags);
904 	}
905 }
906 
907 /**
908  * amdgpu_vm_do_copy_ptes - copy the PTEs from the GART
909  *
910  * @params: see amdgpu_pte_update_params definition
911  * @bo: PD/PT to update
912  * @pe: addr of the page entry
913  * @addr: dst addr to write into pe
914  * @count: number of page entries to update
915  * @incr: increase next addr by incr bytes
916  * @flags: hw access flags
917  *
918  * Traces the parameters and calls the DMA function to copy the PTEs.
919  */
920 static void amdgpu_vm_do_copy_ptes(struct amdgpu_pte_update_params *params,
921 				   struct amdgpu_bo *bo,
922 				   uint64_t pe, uint64_t addr,
923 				   unsigned count, uint32_t incr,
924 				   uint64_t flags)
925 {
926 	uint64_t src = (params->src + (addr >> 12) * 8);
927 
928 	pe += amdgpu_bo_gpu_offset(bo);
929 	trace_amdgpu_vm_copy_ptes(pe, src, count);
930 
931 	amdgpu_vm_copy_pte(params->adev, params->ib, pe, src, count);
932 }
933 
934 /**
935  * amdgpu_vm_map_gart - Resolve gart mapping of addr
936  *
937  * @pages_addr: optional DMA address to use for lookup
938  * @addr: the unmapped addr
939  *
940  * Look up the physical address of the page that the pte resolves
941  * to.
942  *
943  * Returns:
944  * The pointer for the page table entry.
945  */
946 static uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
947 {
948 	uint64_t result;
949 
950 	/* page table offset */
951 	result = pages_addr[addr >> PAGE_SHIFT];
952 
953 	/* in case cpu page size != gpu page size*/
954 	result |= addr & (PAGE_MASK);
955 
956 	result &= 0xFFFFFFFFFFFFF000ULL;
957 
958 	return result;
959 }
960 
961 /**
962  * amdgpu_vm_cpu_set_ptes - helper to update page tables via CPU
963  *
964  * @params: see amdgpu_pte_update_params definition
965  * @bo: PD/PT to update
966  * @pe: kmap addr of the page entry
967  * @addr: dst addr to write into pe
968  * @count: number of page entries to update
969  * @incr: increase next addr by incr bytes
970  * @flags: hw access flags
971  *
972  * Write count number of PT/PD entries directly.
973  */
974 static void amdgpu_vm_cpu_set_ptes(struct amdgpu_pte_update_params *params,
975 				   struct amdgpu_bo *bo,
976 				   uint64_t pe, uint64_t addr,
977 				   unsigned count, uint32_t incr,
978 				   uint64_t flags)
979 {
980 	unsigned int i;
981 	uint64_t value;
982 
983 	pe += (unsigned long)amdgpu_bo_kptr(bo);
984 
985 	trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);
986 
987 	for (i = 0; i < count; i++) {
988 		value = params->pages_addr ?
989 			amdgpu_vm_map_gart(params->pages_addr, addr) :
990 			addr;
991 		amdgpu_gmc_set_pte_pde(params->adev, (void *)(uintptr_t)pe,
992 				       i, value, flags);
993 		addr += incr;
994 	}
995 }
996 
997 
998 /**
999  * amdgpu_vm_wait_pd - Wait for PT BOs to be free.
1000  *
1001  * @adev: amdgpu_device pointer
1002  * @vm: related vm
1003  * @owner: fence owner
1004  *
1005  * Returns:
1006  * 0 on success, errno otherwise.
1007  */
1008 static int amdgpu_vm_wait_pd(struct amdgpu_device *adev, struct amdgpu_vm *vm,
1009 			     void *owner)
1010 {
1011 	struct amdgpu_sync sync;
1012 	int r;
1013 
1014 	amdgpu_sync_create(&sync);
1015 	amdgpu_sync_resv(adev, &sync, vm->root.base.bo->tbo.resv, owner, false);
1016 	r = amdgpu_sync_wait(&sync, true);
1017 	amdgpu_sync_free(&sync);
1018 
1019 	return r;
1020 }
1021 
1022 /*
1023  * amdgpu_vm_update_pde - update a single level in the hierarchy
1024  *
1025  * @param: parameters for the update
1026  * @vm: requested vm
1027  * @parent: parent directory
1028  * @entry: entry to update
1029  *
1030  * Makes sure the requested entry in parent is up to date.
1031  */
1032 static void amdgpu_vm_update_pde(struct amdgpu_pte_update_params *params,
1033 				 struct amdgpu_vm *vm,
1034 				 struct amdgpu_vm_pt *parent,
1035 				 struct amdgpu_vm_pt *entry)
1036 {
1037 	struct amdgpu_bo *bo = parent->base.bo, *pbo;
1038 	uint64_t pde, pt, flags;
1039 	unsigned level;
1040 
1041 	/* Don't update huge pages here */
1042 	if (entry->huge)
1043 		return;
1044 
1045 	for (level = 0, pbo = bo->parent; pbo; ++level)
1046 		pbo = pbo->parent;
1047 
1048 	level += params->adev->vm_manager.root_level;
1049 	pt = amdgpu_bo_gpu_offset(entry->base.bo);
1050 	flags = AMDGPU_PTE_VALID;
1051 	amdgpu_gmc_get_vm_pde(params->adev, level, &pt, &flags);
1052 	pde = (entry - parent->entries) * 8;
1053 	if (bo->shadow)
1054 		params->func(params, bo->shadow, pde, pt, 1, 0, flags);
1055 	params->func(params, bo, pde, pt, 1, 0, flags);
1056 }
1057 
1058 /*
1059  * amdgpu_vm_invalidate_level - mark all PD levels as invalid
1060  *
1061  * @adev: amdgpu_device pointer
1062  * @vm: related vm
1063  * @parent: parent PD
1064  * @level: VMPT level
1065  *
1066  * Mark all PD level as invalid after an error.
1067  */
1068 static void amdgpu_vm_invalidate_level(struct amdgpu_device *adev,
1069 				       struct amdgpu_vm *vm,
1070 				       struct amdgpu_vm_pt *parent,
1071 				       unsigned level)
1072 {
1073 	unsigned pt_idx, num_entries;
1074 
1075 	/*
1076 	 * Recurse into the subdirectories. This recursion is harmless because
1077 	 * we only have a maximum of 5 layers.
1078 	 */
1079 	num_entries = amdgpu_vm_num_entries(adev, level);
1080 	for (pt_idx = 0; pt_idx < num_entries; ++pt_idx) {
1081 		struct amdgpu_vm_pt *entry = &parent->entries[pt_idx];
1082 
1083 		if (!entry->base.bo)
1084 			continue;
1085 
1086 		if (!entry->base.moved)
1087 			list_move(&entry->base.vm_status, &vm->relocated);
1088 		amdgpu_vm_invalidate_level(adev, vm, entry, level + 1);
1089 	}
1090 }
1091 
1092 /*
1093  * amdgpu_vm_update_directories - make sure that all directories are valid
1094  *
1095  * @adev: amdgpu_device pointer
1096  * @vm: requested vm
1097  *
1098  * Makes sure all directories are up to date.
1099  *
1100  * Returns:
1101  * 0 for success, error for failure.
1102  */
1103 int amdgpu_vm_update_directories(struct amdgpu_device *adev,
1104 				 struct amdgpu_vm *vm)
1105 {
1106 	struct amdgpu_pte_update_params params;
1107 	struct amdgpu_job *job;
1108 	unsigned ndw = 0;
1109 	int r = 0;
1110 
1111 	if (list_empty(&vm->relocated))
1112 		return 0;
1113 
1114 restart:
1115 	memset(&params, 0, sizeof(params));
1116 	params.adev = adev;
1117 
1118 	if (vm->use_cpu_for_update) {
1119 		struct amdgpu_vm_bo_base *bo_base;
1120 
1121 		list_for_each_entry(bo_base, &vm->relocated, vm_status) {
1122 			r = amdgpu_bo_kmap(bo_base->bo, NULL);
1123 			if (unlikely(r))
1124 				return r;
1125 		}
1126 
1127 		r = amdgpu_vm_wait_pd(adev, vm, AMDGPU_FENCE_OWNER_VM);
1128 		if (unlikely(r))
1129 			return r;
1130 
1131 		params.func = amdgpu_vm_cpu_set_ptes;
1132 	} else {
1133 		ndw = 512 * 8;
1134 		r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
1135 		if (r)
1136 			return r;
1137 
1138 		params.ib = &job->ibs[0];
1139 		params.func = amdgpu_vm_do_set_ptes;
1140 	}
1141 
1142 	while (!list_empty(&vm->relocated)) {
1143 		struct amdgpu_vm_bo_base *bo_base, *parent;
1144 		struct amdgpu_vm_pt *pt, *entry;
1145 		struct amdgpu_bo *bo;
1146 
1147 		bo_base = list_first_entry(&vm->relocated,
1148 					   struct amdgpu_vm_bo_base,
1149 					   vm_status);
1150 		bo_base->moved = false;
1151 		list_del_init(&bo_base->vm_status);
1152 
1153 		bo = bo_base->bo->parent;
1154 		if (!bo)
1155 			continue;
1156 
1157 		parent = list_first_entry(&bo->va, struct amdgpu_vm_bo_base,
1158 					  bo_list);
1159 		pt = container_of(parent, struct amdgpu_vm_pt, base);
1160 		entry = container_of(bo_base, struct amdgpu_vm_pt, base);
1161 
1162 		amdgpu_vm_update_pde(&params, vm, pt, entry);
1163 
1164 		if (!vm->use_cpu_for_update &&
1165 		    (ndw - params.ib->length_dw) < 32)
1166 			break;
1167 	}
1168 
1169 	if (vm->use_cpu_for_update) {
1170 		/* Flush HDP */
1171 		mb();
1172 		amdgpu_asic_flush_hdp(adev, NULL);
1173 	} else if (params.ib->length_dw == 0) {
1174 		amdgpu_job_free(job);
1175 	} else {
1176 		struct amdgpu_bo *root = vm->root.base.bo;
1177 		struct amdgpu_ring *ring;
1178 		struct dma_fence *fence;
1179 
1180 		ring = container_of(vm->entity.rq->sched, struct amdgpu_ring,
1181 				    sched);
1182 
1183 		amdgpu_ring_pad_ib(ring, params.ib);
1184 		amdgpu_sync_resv(adev, &job->sync, root->tbo.resv,
1185 				 AMDGPU_FENCE_OWNER_VM, false);
1186 		WARN_ON(params.ib->length_dw > ndw);
1187 		r = amdgpu_job_submit(job, &vm->entity, AMDGPU_FENCE_OWNER_VM,
1188 				      &fence);
1189 		if (r)
1190 			goto error;
1191 
1192 		amdgpu_bo_fence(root, fence, true);
1193 		dma_fence_put(vm->last_update);
1194 		vm->last_update = fence;
1195 	}
1196 
1197 	if (!list_empty(&vm->relocated))
1198 		goto restart;
1199 
1200 	return 0;
1201 
1202 error:
1203 	amdgpu_vm_invalidate_level(adev, vm, &vm->root,
1204 				   adev->vm_manager.root_level);
1205 	amdgpu_job_free(job);
1206 	return r;
1207 }
1208 
1209 /**
1210  * amdgpu_vm_find_entry - find the entry for an address
1211  *
1212  * @p: see amdgpu_pte_update_params definition
1213  * @addr: virtual address in question
1214  * @entry: resulting entry or NULL
1215  * @parent: parent entry
1216  *
1217  * Find the vm_pt entry and it's parent for the given address.
1218  */
1219 void amdgpu_vm_get_entry(struct amdgpu_pte_update_params *p, uint64_t addr,
1220 			 struct amdgpu_vm_pt **entry,
1221 			 struct amdgpu_vm_pt **parent)
1222 {
1223 	unsigned level = p->adev->vm_manager.root_level;
1224 
1225 	*parent = NULL;
1226 	*entry = &p->vm->root;
1227 	while ((*entry)->entries) {
1228 		unsigned shift = amdgpu_vm_level_shift(p->adev, level++);
1229 
1230 		*parent = *entry;
1231 		*entry = &(*entry)->entries[addr >> shift];
1232 		addr &= (1ULL << shift) - 1;
1233 	}
1234 
1235 	if (level != AMDGPU_VM_PTB)
1236 		*entry = NULL;
1237 }
1238 
1239 /**
1240  * amdgpu_vm_handle_huge_pages - handle updating the PD with huge pages
1241  *
1242  * @p: see amdgpu_pte_update_params definition
1243  * @entry: vm_pt entry to check
1244  * @parent: parent entry
1245  * @nptes: number of PTEs updated with this operation
1246  * @dst: destination address where the PTEs should point to
1247  * @flags: access flags fro the PTEs
1248  *
1249  * Check if we can update the PD with a huge page.
1250  */
1251 static void amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p,
1252 					struct amdgpu_vm_pt *entry,
1253 					struct amdgpu_vm_pt *parent,
1254 					unsigned nptes, uint64_t dst,
1255 					uint64_t flags)
1256 {
1257 	uint64_t pde;
1258 
1259 	/* In the case of a mixed PT the PDE must point to it*/
1260 	if (p->adev->asic_type >= CHIP_VEGA10 && !p->src &&
1261 	    nptes == AMDGPU_VM_PTE_COUNT(p->adev)) {
1262 		/* Set the huge page flag to stop scanning at this PDE */
1263 		flags |= AMDGPU_PDE_PTE;
1264 	}
1265 
1266 	if (!(flags & AMDGPU_PDE_PTE)) {
1267 		if (entry->huge) {
1268 			/* Add the entry to the relocated list to update it. */
1269 			entry->huge = false;
1270 			list_move(&entry->base.vm_status, &p->vm->relocated);
1271 		}
1272 		return;
1273 	}
1274 
1275 	entry->huge = true;
1276 	amdgpu_gmc_get_vm_pde(p->adev, AMDGPU_VM_PDB0, &dst, &flags);
1277 
1278 	pde = (entry - parent->entries) * 8;
1279 	if (parent->base.bo->shadow)
1280 		p->func(p, parent->base.bo->shadow, pde, dst, 1, 0, flags);
1281 	p->func(p, parent->base.bo, pde, dst, 1, 0, flags);
1282 }
1283 
1284 /**
1285  * amdgpu_vm_update_ptes - make sure that page tables are valid
1286  *
1287  * @params: see amdgpu_pte_update_params definition
1288  * @start: start of GPU address range
1289  * @end: end of GPU address range
1290  * @dst: destination address to map to, the next dst inside the function
1291  * @flags: mapping flags
1292  *
1293  * Update the page tables in the range @start - @end.
1294  *
1295  * Returns:
1296  * 0 for success, -EINVAL for failure.
1297  */
1298 static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
1299 				  uint64_t start, uint64_t end,
1300 				  uint64_t dst, uint64_t flags)
1301 {
1302 	struct amdgpu_device *adev = params->adev;
1303 	const uint64_t mask = AMDGPU_VM_PTE_COUNT(adev) - 1;
1304 
1305 	uint64_t addr, pe_start;
1306 	struct amdgpu_bo *pt;
1307 	unsigned nptes;
1308 
1309 	/* walk over the address space and update the page tables */
1310 	for (addr = start; addr < end; addr += nptes,
1311 	     dst += nptes * AMDGPU_GPU_PAGE_SIZE) {
1312 		struct amdgpu_vm_pt *entry, *parent;
1313 
1314 		amdgpu_vm_get_entry(params, addr, &entry, &parent);
1315 		if (!entry)
1316 			return -ENOENT;
1317 
1318 		if ((addr & ~mask) == (end & ~mask))
1319 			nptes = end - addr;
1320 		else
1321 			nptes = AMDGPU_VM_PTE_COUNT(adev) - (addr & mask);
1322 
1323 		amdgpu_vm_handle_huge_pages(params, entry, parent,
1324 					    nptes, dst, flags);
1325 		/* We don't need to update PTEs for huge pages */
1326 		if (entry->huge)
1327 			continue;
1328 
1329 		pt = entry->base.bo;
1330 		pe_start = (addr & mask) * 8;
1331 		if (pt->shadow)
1332 			params->func(params, pt->shadow, pe_start, dst, nptes,
1333 				     AMDGPU_GPU_PAGE_SIZE, flags);
1334 		params->func(params, pt, pe_start, dst, nptes,
1335 			     AMDGPU_GPU_PAGE_SIZE, flags);
1336 	}
1337 
1338 	return 0;
1339 }
1340 
1341 /*
1342  * amdgpu_vm_frag_ptes - add fragment information to PTEs
1343  *
1344  * @params: see amdgpu_pte_update_params definition
1345  * @vm: requested vm
1346  * @start: first PTE to handle
1347  * @end: last PTE to handle
1348  * @dst: addr those PTEs should point to
1349  * @flags: hw mapping flags
1350  *
1351  * Returns:
1352  * 0 for success, -EINVAL for failure.
1353  */
1354 static int amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params	*params,
1355 				uint64_t start, uint64_t end,
1356 				uint64_t dst, uint64_t flags)
1357 {
1358 	/**
1359 	 * The MC L1 TLB supports variable sized pages, based on a fragment
1360 	 * field in the PTE. When this field is set to a non-zero value, page
1361 	 * granularity is increased from 4KB to (1 << (12 + frag)). The PTE
1362 	 * flags are considered valid for all PTEs within the fragment range
1363 	 * and corresponding mappings are assumed to be physically contiguous.
1364 	 *
1365 	 * The L1 TLB can store a single PTE for the whole fragment,
1366 	 * significantly increasing the space available for translation
1367 	 * caching. This leads to large improvements in throughput when the
1368 	 * TLB is under pressure.
1369 	 *
1370 	 * The L2 TLB distributes small and large fragments into two
1371 	 * asymmetric partitions. The large fragment cache is significantly
1372 	 * larger. Thus, we try to use large fragments wherever possible.
1373 	 * Userspace can support this by aligning virtual base address and
1374 	 * allocation size to the fragment size.
1375 	 */
1376 	unsigned max_frag = params->adev->vm_manager.fragment_size;
1377 	int r;
1378 
1379 	/* system pages are non continuously */
1380 	if (params->src || !(flags & AMDGPU_PTE_VALID))
1381 		return amdgpu_vm_update_ptes(params, start, end, dst, flags);
1382 
1383 	while (start != end) {
1384 		uint64_t frag_flags, frag_end;
1385 		unsigned frag;
1386 
1387 		/* This intentionally wraps around if no bit is set */
1388 		frag = min((unsigned)ffs(start) - 1,
1389 			   (unsigned)fls64(end - start) - 1);
1390 		if (frag >= max_frag) {
1391 			frag_flags = AMDGPU_PTE_FRAG(max_frag);
1392 			frag_end = end & ~((1ULL << max_frag) - 1);
1393 		} else {
1394 			frag_flags = AMDGPU_PTE_FRAG(frag);
1395 			frag_end = start + (1 << frag);
1396 		}
1397 
1398 		r = amdgpu_vm_update_ptes(params, start, frag_end, dst,
1399 					  flags | frag_flags);
1400 		if (r)
1401 			return r;
1402 
1403 		dst += (frag_end - start) * AMDGPU_GPU_PAGE_SIZE;
1404 		start = frag_end;
1405 	}
1406 
1407 	return 0;
1408 }
1409 
1410 /**
1411  * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
1412  *
1413  * @adev: amdgpu_device pointer
1414  * @exclusive: fence we need to sync to
1415  * @pages_addr: DMA addresses to use for mapping
1416  * @vm: requested vm
1417  * @start: start of mapped range
1418  * @last: last mapped entry
1419  * @flags: flags for the entries
1420  * @addr: addr to set the area to
1421  * @fence: optional resulting fence
1422  *
1423  * Fill in the page table entries between @start and @last.
1424  *
1425  * Returns:
1426  * 0 for success, -EINVAL for failure.
1427  */
1428 static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
1429 				       struct dma_fence *exclusive,
1430 				       dma_addr_t *pages_addr,
1431 				       struct amdgpu_vm *vm,
1432 				       uint64_t start, uint64_t last,
1433 				       uint64_t flags, uint64_t addr,
1434 				       struct dma_fence **fence)
1435 {
1436 	struct amdgpu_ring *ring;
1437 	void *owner = AMDGPU_FENCE_OWNER_VM;
1438 	unsigned nptes, ncmds, ndw;
1439 	struct amdgpu_job *job;
1440 	struct amdgpu_pte_update_params params;
1441 	struct dma_fence *f = NULL;
1442 	int r;
1443 
1444 	memset(&params, 0, sizeof(params));
1445 	params.adev = adev;
1446 	params.vm = vm;
1447 
1448 	/* sync to everything on unmapping */
1449 	if (!(flags & AMDGPU_PTE_VALID))
1450 		owner = AMDGPU_FENCE_OWNER_UNDEFINED;
1451 
1452 	if (vm->use_cpu_for_update) {
1453 		/* params.src is used as flag to indicate system Memory */
1454 		if (pages_addr)
1455 			params.src = ~0;
1456 
1457 		/* Wait for PT BOs to be free. PTs share the same resv. object
1458 		 * as the root PD BO
1459 		 */
1460 		r = amdgpu_vm_wait_pd(adev, vm, owner);
1461 		if (unlikely(r))
1462 			return r;
1463 
1464 		params.func = amdgpu_vm_cpu_set_ptes;
1465 		params.pages_addr = pages_addr;
1466 		return amdgpu_vm_frag_ptes(&params, start, last + 1,
1467 					   addr, flags);
1468 	}
1469 
1470 	ring = container_of(vm->entity.rq->sched, struct amdgpu_ring, sched);
1471 
1472 	nptes = last - start + 1;
1473 
1474 	/*
1475 	 * reserve space for two commands every (1 << BLOCK_SIZE)
1476 	 *  entries or 2k dwords (whatever is smaller)
1477          *
1478          * The second command is for the shadow pagetables.
1479 	 */
1480 	if (vm->root.base.bo->shadow)
1481 		ncmds = ((nptes >> min(adev->vm_manager.block_size, 11u)) + 1) * 2;
1482 	else
1483 		ncmds = ((nptes >> min(adev->vm_manager.block_size, 11u)) + 1);
1484 
1485 	/* padding, etc. */
1486 	ndw = 64;
1487 
1488 	if (pages_addr) {
1489 		/* copy commands needed */
1490 		ndw += ncmds * adev->vm_manager.vm_pte_funcs->copy_pte_num_dw;
1491 
1492 		/* and also PTEs */
1493 		ndw += nptes * 2;
1494 
1495 		params.func = amdgpu_vm_do_copy_ptes;
1496 
1497 	} else {
1498 		/* set page commands needed */
1499 		ndw += ncmds * 10;
1500 
1501 		/* extra commands for begin/end fragments */
1502 		if (vm->root.base.bo->shadow)
1503 		        ndw += 2 * 10 * adev->vm_manager.fragment_size * 2;
1504 		else
1505 		        ndw += 2 * 10 * adev->vm_manager.fragment_size;
1506 
1507 		params.func = amdgpu_vm_do_set_ptes;
1508 	}
1509 
1510 	r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
1511 	if (r)
1512 		return r;
1513 
1514 	params.ib = &job->ibs[0];
1515 
1516 	if (pages_addr) {
1517 		uint64_t *pte;
1518 		unsigned i;
1519 
1520 		/* Put the PTEs at the end of the IB. */
1521 		i = ndw - nptes * 2;
1522 		pte= (uint64_t *)&(job->ibs->ptr[i]);
1523 		params.src = job->ibs->gpu_addr + i * 4;
1524 
1525 		for (i = 0; i < nptes; ++i) {
1526 			pte[i] = amdgpu_vm_map_gart(pages_addr, addr + i *
1527 						    AMDGPU_GPU_PAGE_SIZE);
1528 			pte[i] |= flags;
1529 		}
1530 		addr = 0;
1531 	}
1532 
1533 	r = amdgpu_sync_fence(adev, &job->sync, exclusive, false);
1534 	if (r)
1535 		goto error_free;
1536 
1537 	r = amdgpu_sync_resv(adev, &job->sync, vm->root.base.bo->tbo.resv,
1538 			     owner, false);
1539 	if (r)
1540 		goto error_free;
1541 
1542 	r = reservation_object_reserve_shared(vm->root.base.bo->tbo.resv);
1543 	if (r)
1544 		goto error_free;
1545 
1546 	r = amdgpu_vm_frag_ptes(&params, start, last + 1, addr, flags);
1547 	if (r)
1548 		goto error_free;
1549 
1550 	amdgpu_ring_pad_ib(ring, params.ib);
1551 	WARN_ON(params.ib->length_dw > ndw);
1552 	r = amdgpu_job_submit(job, &vm->entity, AMDGPU_FENCE_OWNER_VM, &f);
1553 	if (r)
1554 		goto error_free;
1555 
1556 	amdgpu_bo_fence(vm->root.base.bo, f, true);
1557 	dma_fence_put(*fence);
1558 	*fence = f;
1559 	return 0;
1560 
1561 error_free:
1562 	amdgpu_job_free(job);
1563 	return r;
1564 }
1565 
1566 /**
1567  * amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks
1568  *
1569  * @adev: amdgpu_device pointer
1570  * @exclusive: fence we need to sync to
1571  * @pages_addr: DMA addresses to use for mapping
1572  * @vm: requested vm
1573  * @mapping: mapped range and flags to use for the update
1574  * @flags: HW flags for the mapping
1575  * @nodes: array of drm_mm_nodes with the MC addresses
1576  * @fence: optional resulting fence
1577  *
1578  * Split the mapping into smaller chunks so that each update fits
1579  * into a SDMA IB.
1580  *
1581  * Returns:
1582  * 0 for success, -EINVAL for failure.
1583  */
1584 static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
1585 				      struct dma_fence *exclusive,
1586 				      dma_addr_t *pages_addr,
1587 				      struct amdgpu_vm *vm,
1588 				      struct amdgpu_bo_va_mapping *mapping,
1589 				      uint64_t flags,
1590 				      struct drm_mm_node *nodes,
1591 				      struct dma_fence **fence)
1592 {
1593 	unsigned min_linear_pages = 1 << adev->vm_manager.fragment_size;
1594 	uint64_t pfn, start = mapping->start;
1595 	int r;
1596 
1597 	/* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
1598 	 * but in case of something, we filter the flags in first place
1599 	 */
1600 	if (!(mapping->flags & AMDGPU_PTE_READABLE))
1601 		flags &= ~AMDGPU_PTE_READABLE;
1602 	if (!(mapping->flags & AMDGPU_PTE_WRITEABLE))
1603 		flags &= ~AMDGPU_PTE_WRITEABLE;
1604 
1605 	flags &= ~AMDGPU_PTE_EXECUTABLE;
1606 	flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
1607 
1608 	flags &= ~AMDGPU_PTE_MTYPE_MASK;
1609 	flags |= (mapping->flags & AMDGPU_PTE_MTYPE_MASK);
1610 
1611 	if ((mapping->flags & AMDGPU_PTE_PRT) &&
1612 	    (adev->asic_type >= CHIP_VEGA10)) {
1613 		flags |= AMDGPU_PTE_PRT;
1614 		flags &= ~AMDGPU_PTE_VALID;
1615 	}
1616 
1617 	trace_amdgpu_vm_bo_update(mapping);
1618 
1619 	pfn = mapping->offset >> PAGE_SHIFT;
1620 	if (nodes) {
1621 		while (pfn >= nodes->size) {
1622 			pfn -= nodes->size;
1623 			++nodes;
1624 		}
1625 	}
1626 
1627 	do {
1628 		dma_addr_t *dma_addr = NULL;
1629 		uint64_t max_entries;
1630 		uint64_t addr, last;
1631 
1632 		if (nodes) {
1633 			addr = nodes->start << PAGE_SHIFT;
1634 			max_entries = (nodes->size - pfn) *
1635 				AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1636 		} else {
1637 			addr = 0;
1638 			max_entries = S64_MAX;
1639 		}
1640 
1641 		if (pages_addr) {
1642 			uint64_t count;
1643 
1644 			max_entries = min(max_entries, 16ull * 1024ull);
1645 			for (count = 1;
1646 			     count < max_entries / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1647 			     ++count) {
1648 				uint64_t idx = pfn + count;
1649 
1650 				if (pages_addr[idx] !=
1651 				    (pages_addr[idx - 1] + PAGE_SIZE))
1652 					break;
1653 			}
1654 
1655 			if (count < min_linear_pages) {
1656 				addr = pfn << PAGE_SHIFT;
1657 				dma_addr = pages_addr;
1658 			} else {
1659 				addr = pages_addr[pfn];
1660 				max_entries = count * AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1661 			}
1662 
1663 		} else if (flags & AMDGPU_PTE_VALID) {
1664 			addr += adev->vm_manager.vram_base_offset;
1665 			addr += pfn << PAGE_SHIFT;
1666 		}
1667 
1668 		last = min((uint64_t)mapping->last, start + max_entries - 1);
1669 		r = amdgpu_vm_bo_update_mapping(adev, exclusive, dma_addr, vm,
1670 						start, last, flags, addr,
1671 						fence);
1672 		if (r)
1673 			return r;
1674 
1675 		pfn += (last - start + 1) / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1676 		if (nodes && nodes->size == pfn) {
1677 			pfn = 0;
1678 			++nodes;
1679 		}
1680 		start = last + 1;
1681 
1682 	} while (unlikely(start != mapping->last + 1));
1683 
1684 	return 0;
1685 }
1686 
1687 /**
1688  * amdgpu_vm_bo_update - update all BO mappings in the vm page table
1689  *
1690  * @adev: amdgpu_device pointer
1691  * @bo_va: requested BO and VM object
1692  * @clear: if true clear the entries
1693  *
1694  * Fill in the page table entries for @bo_va.
1695  *
1696  * Returns:
1697  * 0 for success, -EINVAL for failure.
1698  */
1699 int amdgpu_vm_bo_update(struct amdgpu_device *adev,
1700 			struct amdgpu_bo_va *bo_va,
1701 			bool clear)
1702 {
1703 	struct amdgpu_bo *bo = bo_va->base.bo;
1704 	struct amdgpu_vm *vm = bo_va->base.vm;
1705 	struct amdgpu_bo_va_mapping *mapping;
1706 	dma_addr_t *pages_addr = NULL;
1707 	struct ttm_mem_reg *mem;
1708 	struct drm_mm_node *nodes;
1709 	struct dma_fence *exclusive, **last_update;
1710 	uint64_t flags;
1711 	int r;
1712 
1713 	if (clear || !bo) {
1714 		mem = NULL;
1715 		nodes = NULL;
1716 		exclusive = NULL;
1717 	} else {
1718 		struct ttm_dma_tt *ttm;
1719 
1720 		mem = &bo->tbo.mem;
1721 		nodes = mem->mm_node;
1722 		if (mem->mem_type == TTM_PL_TT) {
1723 			ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm);
1724 			pages_addr = ttm->dma_address;
1725 		}
1726 		exclusive = reservation_object_get_excl(bo->tbo.resv);
1727 	}
1728 
1729 	if (bo)
1730 		flags = amdgpu_ttm_tt_pte_flags(adev, bo->tbo.ttm, mem);
1731 	else
1732 		flags = 0x0;
1733 
1734 	if (clear || (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv))
1735 		last_update = &vm->last_update;
1736 	else
1737 		last_update = &bo_va->last_pt_update;
1738 
1739 	if (!clear && bo_va->base.moved) {
1740 		bo_va->base.moved = false;
1741 		list_splice_init(&bo_va->valids, &bo_va->invalids);
1742 
1743 	} else if (bo_va->cleared != clear) {
1744 		list_splice_init(&bo_va->valids, &bo_va->invalids);
1745 	}
1746 
1747 	list_for_each_entry(mapping, &bo_va->invalids, list) {
1748 		r = amdgpu_vm_bo_split_mapping(adev, exclusive, pages_addr, vm,
1749 					       mapping, flags, nodes,
1750 					       last_update);
1751 		if (r)
1752 			return r;
1753 	}
1754 
1755 	if (vm->use_cpu_for_update) {
1756 		/* Flush HDP */
1757 		mb();
1758 		amdgpu_asic_flush_hdp(adev, NULL);
1759 	}
1760 
1761 	spin_lock(&vm->moved_lock);
1762 	list_del_init(&bo_va->base.vm_status);
1763 	spin_unlock(&vm->moved_lock);
1764 
1765 	/* If the BO is not in its preferred location add it back to
1766 	 * the evicted list so that it gets validated again on the
1767 	 * next command submission.
1768 	 */
1769 	if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv) {
1770 		uint32_t mem_type = bo->tbo.mem.mem_type;
1771 
1772 		if (!(bo->preferred_domains & amdgpu_mem_type_to_domain(mem_type)))
1773 			list_add_tail(&bo_va->base.vm_status, &vm->evicted);
1774 		else
1775 			list_add(&bo_va->base.vm_status, &vm->idle);
1776 	}
1777 
1778 	list_splice_init(&bo_va->invalids, &bo_va->valids);
1779 	bo_va->cleared = clear;
1780 
1781 	if (trace_amdgpu_vm_bo_mapping_enabled()) {
1782 		list_for_each_entry(mapping, &bo_va->valids, list)
1783 			trace_amdgpu_vm_bo_mapping(mapping);
1784 	}
1785 
1786 	return 0;
1787 }
1788 
1789 /**
1790  * amdgpu_vm_update_prt_state - update the global PRT state
1791  *
1792  * @adev: amdgpu_device pointer
1793  */
1794 static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev)
1795 {
1796 	unsigned long flags;
1797 	bool enable;
1798 
1799 	spin_lock_irqsave(&adev->vm_manager.prt_lock, flags);
1800 	enable = !!atomic_read(&adev->vm_manager.num_prt_users);
1801 	adev->gmc.gmc_funcs->set_prt(adev, enable);
1802 	spin_unlock_irqrestore(&adev->vm_manager.prt_lock, flags);
1803 }
1804 
1805 /**
1806  * amdgpu_vm_prt_get - add a PRT user
1807  *
1808  * @adev: amdgpu_device pointer
1809  */
1810 static void amdgpu_vm_prt_get(struct amdgpu_device *adev)
1811 {
1812 	if (!adev->gmc.gmc_funcs->set_prt)
1813 		return;
1814 
1815 	if (atomic_inc_return(&adev->vm_manager.num_prt_users) == 1)
1816 		amdgpu_vm_update_prt_state(adev);
1817 }
1818 
1819 /**
1820  * amdgpu_vm_prt_put - drop a PRT user
1821  *
1822  * @adev: amdgpu_device pointer
1823  */
1824 static void amdgpu_vm_prt_put(struct amdgpu_device *adev)
1825 {
1826 	if (atomic_dec_return(&adev->vm_manager.num_prt_users) == 0)
1827 		amdgpu_vm_update_prt_state(adev);
1828 }
1829 
1830 /**
1831  * amdgpu_vm_prt_cb - callback for updating the PRT status
1832  *
1833  * @fence: fence for the callback
1834  * @_cb: the callback function
1835  */
1836 static void amdgpu_vm_prt_cb(struct dma_fence *fence, struct dma_fence_cb *_cb)
1837 {
1838 	struct amdgpu_prt_cb *cb = container_of(_cb, struct amdgpu_prt_cb, cb);
1839 
1840 	amdgpu_vm_prt_put(cb->adev);
1841 	kfree(cb);
1842 }
1843 
1844 /**
1845  * amdgpu_vm_add_prt_cb - add callback for updating the PRT status
1846  *
1847  * @adev: amdgpu_device pointer
1848  * @fence: fence for the callback
1849  */
1850 static void amdgpu_vm_add_prt_cb(struct amdgpu_device *adev,
1851 				 struct dma_fence *fence)
1852 {
1853 	struct amdgpu_prt_cb *cb;
1854 
1855 	if (!adev->gmc.gmc_funcs->set_prt)
1856 		return;
1857 
1858 	cb = kmalloc(sizeof(struct amdgpu_prt_cb), GFP_KERNEL);
1859 	if (!cb) {
1860 		/* Last resort when we are OOM */
1861 		if (fence)
1862 			dma_fence_wait(fence, false);
1863 
1864 		amdgpu_vm_prt_put(adev);
1865 	} else {
1866 		cb->adev = adev;
1867 		if (!fence || dma_fence_add_callback(fence, &cb->cb,
1868 						     amdgpu_vm_prt_cb))
1869 			amdgpu_vm_prt_cb(fence, &cb->cb);
1870 	}
1871 }
1872 
1873 /**
1874  * amdgpu_vm_free_mapping - free a mapping
1875  *
1876  * @adev: amdgpu_device pointer
1877  * @vm: requested vm
1878  * @mapping: mapping to be freed
1879  * @fence: fence of the unmap operation
1880  *
1881  * Free a mapping and make sure we decrease the PRT usage count if applicable.
1882  */
1883 static void amdgpu_vm_free_mapping(struct amdgpu_device *adev,
1884 				   struct amdgpu_vm *vm,
1885 				   struct amdgpu_bo_va_mapping *mapping,
1886 				   struct dma_fence *fence)
1887 {
1888 	if (mapping->flags & AMDGPU_PTE_PRT)
1889 		amdgpu_vm_add_prt_cb(adev, fence);
1890 	kfree(mapping);
1891 }
1892 
1893 /**
1894  * amdgpu_vm_prt_fini - finish all prt mappings
1895  *
1896  * @adev: amdgpu_device pointer
1897  * @vm: requested vm
1898  *
1899  * Register a cleanup callback to disable PRT support after VM dies.
1900  */
1901 static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1902 {
1903 	struct reservation_object *resv = vm->root.base.bo->tbo.resv;
1904 	struct dma_fence *excl, **shared;
1905 	unsigned i, shared_count;
1906 	int r;
1907 
1908 	r = reservation_object_get_fences_rcu(resv, &excl,
1909 					      &shared_count, &shared);
1910 	if (r) {
1911 		/* Not enough memory to grab the fence list, as last resort
1912 		 * block for all the fences to complete.
1913 		 */
1914 		reservation_object_wait_timeout_rcu(resv, true, false,
1915 						    MAX_SCHEDULE_TIMEOUT);
1916 		return;
1917 	}
1918 
1919 	/* Add a callback for each fence in the reservation object */
1920 	amdgpu_vm_prt_get(adev);
1921 	amdgpu_vm_add_prt_cb(adev, excl);
1922 
1923 	for (i = 0; i < shared_count; ++i) {
1924 		amdgpu_vm_prt_get(adev);
1925 		amdgpu_vm_add_prt_cb(adev, shared[i]);
1926 	}
1927 
1928 	kfree(shared);
1929 }
1930 
1931 /**
1932  * amdgpu_vm_clear_freed - clear freed BOs in the PT
1933  *
1934  * @adev: amdgpu_device pointer
1935  * @vm: requested vm
1936  * @fence: optional resulting fence (unchanged if no work needed to be done
1937  * or if an error occurred)
1938  *
1939  * Make sure all freed BOs are cleared in the PT.
1940  * PTs have to be reserved and mutex must be locked!
1941  *
1942  * Returns:
1943  * 0 for success.
1944  *
1945  */
1946 int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
1947 			  struct amdgpu_vm *vm,
1948 			  struct dma_fence **fence)
1949 {
1950 	struct amdgpu_bo_va_mapping *mapping;
1951 	uint64_t init_pte_value = 0;
1952 	struct dma_fence *f = NULL;
1953 	int r;
1954 
1955 	while (!list_empty(&vm->freed)) {
1956 		mapping = list_first_entry(&vm->freed,
1957 			struct amdgpu_bo_va_mapping, list);
1958 		list_del(&mapping->list);
1959 
1960 		if (vm->pte_support_ats && mapping->start < AMDGPU_VA_HOLE_START)
1961 			init_pte_value = AMDGPU_PTE_DEFAULT_ATC;
1962 
1963 		r = amdgpu_vm_bo_update_mapping(adev, NULL, NULL, vm,
1964 						mapping->start, mapping->last,
1965 						init_pte_value, 0, &f);
1966 		amdgpu_vm_free_mapping(adev, vm, mapping, f);
1967 		if (r) {
1968 			dma_fence_put(f);
1969 			return r;
1970 		}
1971 	}
1972 
1973 	if (fence && f) {
1974 		dma_fence_put(*fence);
1975 		*fence = f;
1976 	} else {
1977 		dma_fence_put(f);
1978 	}
1979 
1980 	return 0;
1981 
1982 }
1983 
1984 /**
1985  * amdgpu_vm_handle_moved - handle moved BOs in the PT
1986  *
1987  * @adev: amdgpu_device pointer
1988  * @vm: requested vm
1989  *
1990  * Make sure all BOs which are moved are updated in the PTs.
1991  *
1992  * Returns:
1993  * 0 for success.
1994  *
1995  * PTs have to be reserved!
1996  */
1997 int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
1998 			   struct amdgpu_vm *vm)
1999 {
2000 	struct amdgpu_bo_va *bo_va, *tmp;
2001 	struct list_head moved;
2002 	bool clear;
2003 	int r;
2004 
2005 	INIT_LIST_HEAD(&moved);
2006 	spin_lock(&vm->moved_lock);
2007 	list_splice_init(&vm->moved, &moved);
2008 	spin_unlock(&vm->moved_lock);
2009 
2010 	list_for_each_entry_safe(bo_va, tmp, &moved, base.vm_status) {
2011 		struct reservation_object *resv = bo_va->base.bo->tbo.resv;
2012 
2013 		/* Per VM BOs never need to bo cleared in the page tables */
2014 		if (resv == vm->root.base.bo->tbo.resv)
2015 			clear = false;
2016 		/* Try to reserve the BO to avoid clearing its ptes */
2017 		else if (!amdgpu_vm_debug && reservation_object_trylock(resv))
2018 			clear = false;
2019 		/* Somebody else is using the BO right now */
2020 		else
2021 			clear = true;
2022 
2023 		r = amdgpu_vm_bo_update(adev, bo_va, clear);
2024 		if (r) {
2025 			spin_lock(&vm->moved_lock);
2026 			list_splice(&moved, &vm->moved);
2027 			spin_unlock(&vm->moved_lock);
2028 			return r;
2029 		}
2030 
2031 		if (!clear && resv != vm->root.base.bo->tbo.resv)
2032 			reservation_object_unlock(resv);
2033 
2034 	}
2035 
2036 	return 0;
2037 }
2038 
2039 /**
2040  * amdgpu_vm_bo_add - add a bo to a specific vm
2041  *
2042  * @adev: amdgpu_device pointer
2043  * @vm: requested vm
2044  * @bo: amdgpu buffer object
2045  *
2046  * Add @bo into the requested vm.
2047  * Add @bo to the list of bos associated with the vm
2048  *
2049  * Returns:
2050  * Newly added bo_va or NULL for failure
2051  *
2052  * Object has to be reserved!
2053  */
2054 struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
2055 				      struct amdgpu_vm *vm,
2056 				      struct amdgpu_bo *bo)
2057 {
2058 	struct amdgpu_bo_va *bo_va;
2059 
2060 	bo_va = kzalloc(sizeof(struct amdgpu_bo_va), GFP_KERNEL);
2061 	if (bo_va == NULL) {
2062 		return NULL;
2063 	}
2064 	amdgpu_vm_bo_base_init(&bo_va->base, vm, bo);
2065 
2066 	bo_va->ref_count = 1;
2067 	INIT_LIST_HEAD(&bo_va->valids);
2068 	INIT_LIST_HEAD(&bo_va->invalids);
2069 
2070 	return bo_va;
2071 }
2072 
2073 
2074 /**
2075  * amdgpu_vm_bo_insert_mapping - insert a new mapping
2076  *
2077  * @adev: amdgpu_device pointer
2078  * @bo_va: bo_va to store the address
2079  * @mapping: the mapping to insert
2080  *
2081  * Insert a new mapping into all structures.
2082  */
2083 static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
2084 				    struct amdgpu_bo_va *bo_va,
2085 				    struct amdgpu_bo_va_mapping *mapping)
2086 {
2087 	struct amdgpu_vm *vm = bo_va->base.vm;
2088 	struct amdgpu_bo *bo = bo_va->base.bo;
2089 
2090 	mapping->bo_va = bo_va;
2091 	list_add(&mapping->list, &bo_va->invalids);
2092 	amdgpu_vm_it_insert(mapping, &vm->va);
2093 
2094 	if (mapping->flags & AMDGPU_PTE_PRT)
2095 		amdgpu_vm_prt_get(adev);
2096 
2097 	if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv &&
2098 	    !bo_va->base.moved) {
2099 		spin_lock(&vm->moved_lock);
2100 		list_move(&bo_va->base.vm_status, &vm->moved);
2101 		spin_unlock(&vm->moved_lock);
2102 	}
2103 	trace_amdgpu_vm_bo_map(bo_va, mapping);
2104 }
2105 
2106 /**
2107  * amdgpu_vm_bo_map - map bo inside a vm
2108  *
2109  * @adev: amdgpu_device pointer
2110  * @bo_va: bo_va to store the address
2111  * @saddr: where to map the BO
2112  * @offset: requested offset in the BO
2113  * @size: BO size in bytes
2114  * @flags: attributes of pages (read/write/valid/etc.)
2115  *
2116  * Add a mapping of the BO at the specefied addr into the VM.
2117  *
2118  * Returns:
2119  * 0 for success, error for failure.
2120  *
2121  * Object has to be reserved and unreserved outside!
2122  */
2123 int amdgpu_vm_bo_map(struct amdgpu_device *adev,
2124 		     struct amdgpu_bo_va *bo_va,
2125 		     uint64_t saddr, uint64_t offset,
2126 		     uint64_t size, uint64_t flags)
2127 {
2128 	struct amdgpu_bo_va_mapping *mapping, *tmp;
2129 	struct amdgpu_bo *bo = bo_va->base.bo;
2130 	struct amdgpu_vm *vm = bo_va->base.vm;
2131 	uint64_t eaddr;
2132 
2133 	/* validate the parameters */
2134 	if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
2135 	    size == 0 || size & AMDGPU_GPU_PAGE_MASK)
2136 		return -EINVAL;
2137 
2138 	/* make sure object fit at this offset */
2139 	eaddr = saddr + size - 1;
2140 	if (saddr >= eaddr ||
2141 	    (bo && offset + size > amdgpu_bo_size(bo)))
2142 		return -EINVAL;
2143 
2144 	saddr /= AMDGPU_GPU_PAGE_SIZE;
2145 	eaddr /= AMDGPU_GPU_PAGE_SIZE;
2146 
2147 	tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
2148 	if (tmp) {
2149 		/* bo and tmp overlap, invalid addr */
2150 		dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
2151 			"0x%010Lx-0x%010Lx\n", bo, saddr, eaddr,
2152 			tmp->start, tmp->last + 1);
2153 		return -EINVAL;
2154 	}
2155 
2156 	mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
2157 	if (!mapping)
2158 		return -ENOMEM;
2159 
2160 	mapping->start = saddr;
2161 	mapping->last = eaddr;
2162 	mapping->offset = offset;
2163 	mapping->flags = flags;
2164 
2165 	amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
2166 
2167 	return 0;
2168 }
2169 
2170 /**
2171  * amdgpu_vm_bo_replace_map - map bo inside a vm, replacing existing mappings
2172  *
2173  * @adev: amdgpu_device pointer
2174  * @bo_va: bo_va to store the address
2175  * @saddr: where to map the BO
2176  * @offset: requested offset in the BO
2177  * @size: BO size in bytes
2178  * @flags: attributes of pages (read/write/valid/etc.)
2179  *
2180  * Add a mapping of the BO at the specefied addr into the VM. Replace existing
2181  * mappings as we do so.
2182  *
2183  * Returns:
2184  * 0 for success, error for failure.
2185  *
2186  * Object has to be reserved and unreserved outside!
2187  */
2188 int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
2189 			     struct amdgpu_bo_va *bo_va,
2190 			     uint64_t saddr, uint64_t offset,
2191 			     uint64_t size, uint64_t flags)
2192 {
2193 	struct amdgpu_bo_va_mapping *mapping;
2194 	struct amdgpu_bo *bo = bo_va->base.bo;
2195 	uint64_t eaddr;
2196 	int r;
2197 
2198 	/* validate the parameters */
2199 	if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
2200 	    size == 0 || size & AMDGPU_GPU_PAGE_MASK)
2201 		return -EINVAL;
2202 
2203 	/* make sure object fit at this offset */
2204 	eaddr = saddr + size - 1;
2205 	if (saddr >= eaddr ||
2206 	    (bo && offset + size > amdgpu_bo_size(bo)))
2207 		return -EINVAL;
2208 
2209 	/* Allocate all the needed memory */
2210 	mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
2211 	if (!mapping)
2212 		return -ENOMEM;
2213 
2214 	r = amdgpu_vm_bo_clear_mappings(adev, bo_va->base.vm, saddr, size);
2215 	if (r) {
2216 		kfree(mapping);
2217 		return r;
2218 	}
2219 
2220 	saddr /= AMDGPU_GPU_PAGE_SIZE;
2221 	eaddr /= AMDGPU_GPU_PAGE_SIZE;
2222 
2223 	mapping->start = saddr;
2224 	mapping->last = eaddr;
2225 	mapping->offset = offset;
2226 	mapping->flags = flags;
2227 
2228 	amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
2229 
2230 	return 0;
2231 }
2232 
2233 /**
2234  * amdgpu_vm_bo_unmap - remove bo mapping from vm
2235  *
2236  * @adev: amdgpu_device pointer
2237  * @bo_va: bo_va to remove the address from
2238  * @saddr: where to the BO is mapped
2239  *
2240  * Remove a mapping of the BO at the specefied addr from the VM.
2241  *
2242  * Returns:
2243  * 0 for success, error for failure.
2244  *
2245  * Object has to be reserved and unreserved outside!
2246  */
2247 int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
2248 		       struct amdgpu_bo_va *bo_va,
2249 		       uint64_t saddr)
2250 {
2251 	struct amdgpu_bo_va_mapping *mapping;
2252 	struct amdgpu_vm *vm = bo_va->base.vm;
2253 	bool valid = true;
2254 
2255 	saddr /= AMDGPU_GPU_PAGE_SIZE;
2256 
2257 	list_for_each_entry(mapping, &bo_va->valids, list) {
2258 		if (mapping->start == saddr)
2259 			break;
2260 	}
2261 
2262 	if (&mapping->list == &bo_va->valids) {
2263 		valid = false;
2264 
2265 		list_for_each_entry(mapping, &bo_va->invalids, list) {
2266 			if (mapping->start == saddr)
2267 				break;
2268 		}
2269 
2270 		if (&mapping->list == &bo_va->invalids)
2271 			return -ENOENT;
2272 	}
2273 
2274 	list_del(&mapping->list);
2275 	amdgpu_vm_it_remove(mapping, &vm->va);
2276 	mapping->bo_va = NULL;
2277 	trace_amdgpu_vm_bo_unmap(bo_va, mapping);
2278 
2279 	if (valid)
2280 		list_add(&mapping->list, &vm->freed);
2281 	else
2282 		amdgpu_vm_free_mapping(adev, vm, mapping,
2283 				       bo_va->last_pt_update);
2284 
2285 	return 0;
2286 }
2287 
2288 /**
2289  * amdgpu_vm_bo_clear_mappings - remove all mappings in a specific range
2290  *
2291  * @adev: amdgpu_device pointer
2292  * @vm: VM structure to use
2293  * @saddr: start of the range
2294  * @size: size of the range
2295  *
2296  * Remove all mappings in a range, split them as appropriate.
2297  *
2298  * Returns:
2299  * 0 for success, error for failure.
2300  */
2301 int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
2302 				struct amdgpu_vm *vm,
2303 				uint64_t saddr, uint64_t size)
2304 {
2305 	struct amdgpu_bo_va_mapping *before, *after, *tmp, *next;
2306 	DRM_LIST_HEAD(removed);
2307 	uint64_t eaddr;
2308 
2309 	eaddr = saddr + size - 1;
2310 	saddr /= AMDGPU_GPU_PAGE_SIZE;
2311 	eaddr /= AMDGPU_GPU_PAGE_SIZE;
2312 
2313 	/* Allocate all the needed memory */
2314 	before = kzalloc(sizeof(*before), GFP_KERNEL);
2315 	if (!before)
2316 		return -ENOMEM;
2317 	INIT_LIST_HEAD(&before->list);
2318 
2319 	after = kzalloc(sizeof(*after), GFP_KERNEL);
2320 	if (!after) {
2321 		kfree(before);
2322 		return -ENOMEM;
2323 	}
2324 	INIT_LIST_HEAD(&after->list);
2325 
2326 	/* Now gather all removed mappings */
2327 	tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
2328 	while (tmp) {
2329 		/* Remember mapping split at the start */
2330 		if (tmp->start < saddr) {
2331 			before->start = tmp->start;
2332 			before->last = saddr - 1;
2333 			before->offset = tmp->offset;
2334 			before->flags = tmp->flags;
2335 			before->bo_va = tmp->bo_va;
2336 			list_add(&before->list, &tmp->bo_va->invalids);
2337 		}
2338 
2339 		/* Remember mapping split at the end */
2340 		if (tmp->last > eaddr) {
2341 			after->start = eaddr + 1;
2342 			after->last = tmp->last;
2343 			after->offset = tmp->offset;
2344 			after->offset += after->start - tmp->start;
2345 			after->flags = tmp->flags;
2346 			after->bo_va = tmp->bo_va;
2347 			list_add(&after->list, &tmp->bo_va->invalids);
2348 		}
2349 
2350 		list_del(&tmp->list);
2351 		list_add(&tmp->list, &removed);
2352 
2353 		tmp = amdgpu_vm_it_iter_next(tmp, saddr, eaddr);
2354 	}
2355 
2356 	/* And free them up */
2357 	list_for_each_entry_safe(tmp, next, &removed, list) {
2358 		amdgpu_vm_it_remove(tmp, &vm->va);
2359 		list_del(&tmp->list);
2360 
2361 		if (tmp->start < saddr)
2362 		    tmp->start = saddr;
2363 		if (tmp->last > eaddr)
2364 		    tmp->last = eaddr;
2365 
2366 		tmp->bo_va = NULL;
2367 		list_add(&tmp->list, &vm->freed);
2368 		trace_amdgpu_vm_bo_unmap(NULL, tmp);
2369 	}
2370 
2371 	/* Insert partial mapping before the range */
2372 	if (!list_empty(&before->list)) {
2373 		amdgpu_vm_it_insert(before, &vm->va);
2374 		if (before->flags & AMDGPU_PTE_PRT)
2375 			amdgpu_vm_prt_get(adev);
2376 	} else {
2377 		kfree(before);
2378 	}
2379 
2380 	/* Insert partial mapping after the range */
2381 	if (!list_empty(&after->list)) {
2382 		amdgpu_vm_it_insert(after, &vm->va);
2383 		if (after->flags & AMDGPU_PTE_PRT)
2384 			amdgpu_vm_prt_get(adev);
2385 	} else {
2386 		kfree(after);
2387 	}
2388 
2389 	return 0;
2390 }
2391 
2392 /**
2393  * amdgpu_vm_bo_lookup_mapping - find mapping by address
2394  *
2395  * @vm: the requested VM
2396  * @addr: the address
2397  *
2398  * Find a mapping by it's address.
2399  *
2400  * Returns:
2401  * The amdgpu_bo_va_mapping matching for addr or NULL
2402  *
2403  */
2404 struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
2405 							 uint64_t addr)
2406 {
2407 	return amdgpu_vm_it_iter_first(&vm->va, addr, addr);
2408 }
2409 
2410 /**
2411  * amdgpu_vm_bo_trace_cs - trace all reserved mappings
2412  *
2413  * @vm: the requested vm
2414  * @ticket: CS ticket
2415  *
2416  * Trace all mappings of BOs reserved during a command submission.
2417  */
2418 void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket)
2419 {
2420 	struct amdgpu_bo_va_mapping *mapping;
2421 
2422 	if (!trace_amdgpu_vm_bo_cs_enabled())
2423 		return;
2424 
2425 	for (mapping = amdgpu_vm_it_iter_first(&vm->va, 0, U64_MAX); mapping;
2426 	     mapping = amdgpu_vm_it_iter_next(mapping, 0, U64_MAX)) {
2427 		if (mapping->bo_va && mapping->bo_va->base.bo) {
2428 			struct amdgpu_bo *bo;
2429 
2430 			bo = mapping->bo_va->base.bo;
2431 			if (READ_ONCE(bo->tbo.resv->lock.ctx) != ticket)
2432 				continue;
2433 		}
2434 
2435 		trace_amdgpu_vm_bo_cs(mapping);
2436 	}
2437 }
2438 
2439 /**
2440  * amdgpu_vm_bo_rmv - remove a bo to a specific vm
2441  *
2442  * @adev: amdgpu_device pointer
2443  * @bo_va: requested bo_va
2444  *
2445  * Remove @bo_va->bo from the requested vm.
2446  *
2447  * Object have to be reserved!
2448  */
2449 void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
2450 		      struct amdgpu_bo_va *bo_va)
2451 {
2452 	struct amdgpu_bo_va_mapping *mapping, *next;
2453 	struct amdgpu_vm *vm = bo_va->base.vm;
2454 
2455 	list_del(&bo_va->base.bo_list);
2456 
2457 	spin_lock(&vm->moved_lock);
2458 	list_del(&bo_va->base.vm_status);
2459 	spin_unlock(&vm->moved_lock);
2460 
2461 	list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
2462 		list_del(&mapping->list);
2463 		amdgpu_vm_it_remove(mapping, &vm->va);
2464 		mapping->bo_va = NULL;
2465 		trace_amdgpu_vm_bo_unmap(bo_va, mapping);
2466 		list_add(&mapping->list, &vm->freed);
2467 	}
2468 	list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
2469 		list_del(&mapping->list);
2470 		amdgpu_vm_it_remove(mapping, &vm->va);
2471 		amdgpu_vm_free_mapping(adev, vm, mapping,
2472 				       bo_va->last_pt_update);
2473 	}
2474 
2475 	dma_fence_put(bo_va->last_pt_update);
2476 	kfree(bo_va);
2477 }
2478 
2479 /**
2480  * amdgpu_vm_bo_invalidate - mark the bo as invalid
2481  *
2482  * @adev: amdgpu_device pointer
2483  * @bo: amdgpu buffer object
2484  * @evicted: is the BO evicted
2485  *
2486  * Mark @bo as invalid.
2487  */
2488 void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
2489 			     struct amdgpu_bo *bo, bool evicted)
2490 {
2491 	struct amdgpu_vm_bo_base *bo_base;
2492 
2493 	/* shadow bo doesn't have bo base, its validation needs its parent */
2494 	if (bo->parent && bo->parent->shadow == bo)
2495 		bo = bo->parent;
2496 
2497 	list_for_each_entry(bo_base, &bo->va, bo_list) {
2498 		struct amdgpu_vm *vm = bo_base->vm;
2499 		bool was_moved = bo_base->moved;
2500 
2501 		bo_base->moved = true;
2502 		if (evicted && bo->tbo.resv == vm->root.base.bo->tbo.resv) {
2503 			if (bo->tbo.type == ttm_bo_type_kernel)
2504 				list_move(&bo_base->vm_status, &vm->evicted);
2505 			else
2506 				list_move_tail(&bo_base->vm_status,
2507 					       &vm->evicted);
2508 			continue;
2509 		}
2510 
2511 		if (was_moved)
2512 			continue;
2513 
2514 		if (bo->tbo.type == ttm_bo_type_kernel) {
2515 			list_move(&bo_base->vm_status, &vm->relocated);
2516 		} else {
2517 			spin_lock(&bo_base->vm->moved_lock);
2518 			list_move(&bo_base->vm_status, &vm->moved);
2519 			spin_unlock(&bo_base->vm->moved_lock);
2520 		}
2521 	}
2522 }
2523 
2524 /**
2525  * amdgpu_vm_get_block_size - calculate VM page table size as power of two
2526  *
2527  * @vm_size: VM size
2528  *
2529  * Returns:
2530  * VM page table as power of two
2531  */
2532 static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size)
2533 {
2534 	/* Total bits covered by PD + PTs */
2535 	unsigned bits = ilog2(vm_size) + 18;
2536 
2537 	/* Make sure the PD is 4K in size up to 8GB address space.
2538 	   Above that split equal between PD and PTs */
2539 	if (vm_size <= 8)
2540 		return (bits - 9);
2541 	else
2542 		return ((bits + 3) / 2);
2543 }
2544 
2545 /**
2546  * amdgpu_vm_adjust_size - adjust vm size, block size and fragment size
2547  *
2548  * @adev: amdgpu_device pointer
2549  * @min_vm_size: the minimum vm size in GB if it's set auto
2550  * @fragment_size_default: Default PTE fragment size
2551  * @max_level: max VMPT level
2552  * @max_bits: max address space size in bits
2553  *
2554  */
2555 void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
2556 			   uint32_t fragment_size_default, unsigned max_level,
2557 			   unsigned max_bits)
2558 {
2559 	unsigned int max_size = 1 << (max_bits - 30);
2560 	unsigned int vm_size;
2561 	uint64_t tmp;
2562 
2563 	/* adjust vm size first */
2564 	if (amdgpu_vm_size != -1) {
2565 		vm_size = amdgpu_vm_size;
2566 		if (vm_size > max_size) {
2567 			dev_warn(adev->dev, "VM size (%d) too large, max is %u GB\n",
2568 				 amdgpu_vm_size, max_size);
2569 			vm_size = max_size;
2570 		}
2571 	} else {
2572 #ifdef __linux__
2573 		struct sysinfo si;
2574 #endif
2575 		unsigned int phys_ram_gb;
2576 
2577 		/* Optimal VM size depends on the amount of physical
2578 		 * RAM available. Underlying requirements and
2579 		 * assumptions:
2580 		 *
2581 		 *  - Need to map system memory and VRAM from all GPUs
2582 		 *     - VRAM from other GPUs not known here
2583 		 *     - Assume VRAM <= system memory
2584 		 *  - On GFX8 and older, VM space can be segmented for
2585 		 *    different MTYPEs
2586 		 *  - Need to allow room for fragmentation, guard pages etc.
2587 		 *
2588 		 * This adds up to a rough guess of system memory x3.
2589 		 * Round up to power of two to maximize the available
2590 		 * VM size with the given page table size.
2591 		 */
2592 #ifdef __linux__
2593 		si_meminfo(&si);
2594 		phys_ram_gb = ((uint64_t)si.totalram * si.mem_unit +
2595 			       (1 << 30) - 1) >> 30;
2596 #else
2597 		phys_ram_gb = ((uint64_t)ptoa(physmem) +
2598 			       (1 << 30) - 1) >> 30;
2599 #endif
2600 		vm_size = roundup_pow_of_two(
2601 			min(max(phys_ram_gb * 3, min_vm_size), max_size));
2602 	}
2603 
2604 	adev->vm_manager.max_pfn = (uint64_t)vm_size << 18;
2605 
2606 	tmp = roundup_pow_of_two(adev->vm_manager.max_pfn);
2607 	if (amdgpu_vm_block_size != -1)
2608 		tmp >>= amdgpu_vm_block_size - 9;
2609 	tmp = DIV_ROUND_UP(fls64(tmp) - 1, 9) - 1;
2610 	adev->vm_manager.num_level = min(max_level, (unsigned)tmp);
2611 	switch (adev->vm_manager.num_level) {
2612 	case 3:
2613 		adev->vm_manager.root_level = AMDGPU_VM_PDB2;
2614 		break;
2615 	case 2:
2616 		adev->vm_manager.root_level = AMDGPU_VM_PDB1;
2617 		break;
2618 	case 1:
2619 		adev->vm_manager.root_level = AMDGPU_VM_PDB0;
2620 		break;
2621 	default:
2622 		dev_err(adev->dev, "VMPT only supports 2~4+1 levels\n");
2623 	}
2624 	/* block size depends on vm size and hw setup*/
2625 	if (amdgpu_vm_block_size != -1)
2626 		adev->vm_manager.block_size =
2627 			min((unsigned)amdgpu_vm_block_size, max_bits
2628 			    - AMDGPU_GPU_PAGE_SHIFT
2629 			    - 9 * adev->vm_manager.num_level);
2630 	else if (adev->vm_manager.num_level > 1)
2631 		adev->vm_manager.block_size = 9;
2632 	else
2633 		adev->vm_manager.block_size = amdgpu_vm_get_block_size(tmp);
2634 
2635 	if (amdgpu_vm_fragment_size == -1)
2636 		adev->vm_manager.fragment_size = fragment_size_default;
2637 	else
2638 		adev->vm_manager.fragment_size = amdgpu_vm_fragment_size;
2639 
2640 	DRM_INFO("vm size is %u GB, %u levels, block size is %u-bit, fragment size is %u-bit\n",
2641 		 vm_size, adev->vm_manager.num_level + 1,
2642 		 adev->vm_manager.block_size,
2643 		 adev->vm_manager.fragment_size);
2644 }
2645 
2646 /**
2647  * amdgpu_vm_init - initialize a vm instance
2648  *
2649  * @adev: amdgpu_device pointer
2650  * @vm: requested vm
2651  * @vm_context: Indicates if it GFX or Compute context
2652  * @pasid: Process address space identifier
2653  *
2654  * Init @vm fields.
2655  *
2656  * Returns:
2657  * 0 for success, error for failure.
2658  */
2659 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
2660 		   int vm_context, unsigned int pasid)
2661 {
2662 	struct amdgpu_bo_param bp;
2663 	struct amdgpu_bo *root;
2664 	const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE,
2665 		AMDGPU_VM_PTE_COUNT(adev) * 8);
2666 	unsigned ring_instance;
2667 	struct amdgpu_ring *ring;
2668 	struct drm_sched_rq *rq;
2669 	unsigned long size;
2670 	uint64_t flags;
2671 	int r, i;
2672 
2673 	vm->va = RB_ROOT_CACHED;
2674 	for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
2675 		vm->reserved_vmid[i] = NULL;
2676 	INIT_LIST_HEAD(&vm->evicted);
2677 	INIT_LIST_HEAD(&vm->relocated);
2678 	mtx_init(&vm->moved_lock, IPL_TTY);
2679 	INIT_LIST_HEAD(&vm->moved);
2680 	INIT_LIST_HEAD(&vm->idle);
2681 	INIT_LIST_HEAD(&vm->freed);
2682 
2683 	/* create scheduler entity for page table updates */
2684 
2685 	ring_instance = atomic_inc_return(&adev->vm_manager.vm_pte_next_ring);
2686 	ring_instance %= adev->vm_manager.vm_pte_num_rings;
2687 	ring = adev->vm_manager.vm_pte_rings[ring_instance];
2688 	rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
2689 	r = drm_sched_entity_init(&vm->entity, &rq, 1, NULL);
2690 	if (r)
2691 		return r;
2692 
2693 	vm->pte_support_ats = false;
2694 
2695 	if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE) {
2696 		vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2697 						AMDGPU_VM_USE_CPU_FOR_COMPUTE);
2698 
2699 		if (adev->asic_type == CHIP_RAVEN)
2700 			vm->pte_support_ats = true;
2701 	} else {
2702 		vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2703 						AMDGPU_VM_USE_CPU_FOR_GFX);
2704 	}
2705 	DRM_DEBUG_DRIVER("VM update mode is %s\n",
2706 			 vm->use_cpu_for_update ? "CPU" : "SDMA");
2707 	WARN_ONCE((vm->use_cpu_for_update & !amdgpu_gmc_vram_full_visible(&adev->gmc)),
2708 		  "CPU update of VM recommended only for large BAR system\n");
2709 	vm->last_update = NULL;
2710 
2711 	flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
2712 	if (vm->use_cpu_for_update)
2713 		flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
2714 	else if (vm_context != AMDGPU_VM_CONTEXT_COMPUTE)
2715 		flags |= AMDGPU_GEM_CREATE_SHADOW;
2716 
2717 	size = amdgpu_vm_bo_size(adev, adev->vm_manager.root_level);
2718 	memset(&bp, 0, sizeof(bp));
2719 	bp.size = size;
2720 	bp.byte_align = align;
2721 	bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
2722 	bp.flags = flags;
2723 	bp.type = ttm_bo_type_kernel;
2724 	bp.resv = NULL;
2725 	r = amdgpu_bo_create(adev, &bp, &root);
2726 	if (r)
2727 		goto error_free_sched_entity;
2728 
2729 	r = amdgpu_bo_reserve(root, true);
2730 	if (r)
2731 		goto error_free_root;
2732 
2733 	r = amdgpu_vm_clear_bo(adev, vm, root,
2734 			       adev->vm_manager.root_level,
2735 			       vm->pte_support_ats);
2736 	if (r)
2737 		goto error_unreserve;
2738 
2739 	amdgpu_vm_bo_base_init(&vm->root.base, vm, root);
2740 	amdgpu_bo_unreserve(vm->root.base.bo);
2741 
2742 	if (pasid) {
2743 		unsigned long flags;
2744 
2745 		spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
2746 		r = idr_alloc(&adev->vm_manager.pasid_idr, vm, pasid, pasid + 1,
2747 			      GFP_ATOMIC);
2748 		spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
2749 		if (r < 0)
2750 			goto error_free_root;
2751 
2752 		vm->pasid = pasid;
2753 	}
2754 
2755 #ifdef __linux__
2756 	INIT_KFIFO(vm->faults);
2757 #else
2758 	SIMPLEQ_INIT(&vm->faults);
2759 #endif
2760 	vm->fault_credit = 16;
2761 
2762 	return 0;
2763 
2764 error_unreserve:
2765 	amdgpu_bo_unreserve(vm->root.base.bo);
2766 
2767 error_free_root:
2768 	amdgpu_bo_unref(&vm->root.base.bo->shadow);
2769 	amdgpu_bo_unref(&vm->root.base.bo);
2770 	vm->root.base.bo = NULL;
2771 
2772 error_free_sched_entity:
2773 	drm_sched_entity_destroy(&vm->entity);
2774 
2775 	return r;
2776 }
2777 
2778 /**
2779  * amdgpu_vm_make_compute - Turn a GFX VM into a compute VM
2780  *
2781  * @adev: amdgpu_device pointer
2782  * @vm: requested vm
2783  *
2784  * This only works on GFX VMs that don't have any BOs added and no
2785  * page tables allocated yet.
2786  *
2787  * Changes the following VM parameters:
2788  * - use_cpu_for_update
2789  * - pte_supports_ats
2790  * - pasid (old PASID is released, because compute manages its own PASIDs)
2791  *
2792  * Reinitializes the page directory to reflect the changed ATS
2793  * setting.
2794  *
2795  * Returns:
2796  * 0 for success, -errno for errors.
2797  */
2798 int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2799 {
2800 	bool pte_support_ats = (adev->asic_type == CHIP_RAVEN);
2801 	int r;
2802 
2803 	r = amdgpu_bo_reserve(vm->root.base.bo, true);
2804 	if (r)
2805 		return r;
2806 
2807 	/* Sanity checks */
2808 #ifdef __linux__
2809 	if (!RB_EMPTY_ROOT(&vm->va.rb_root) || vm->root.entries) {
2810 #else
2811 	if (!RB_EMPTY_ROOT(&vm->va) || vm->root.entries) {
2812 #endif
2813 		r = -EINVAL;
2814 		goto error;
2815 	}
2816 
2817 	/* Check if PD needs to be reinitialized and do it before
2818 	 * changing any other state, in case it fails.
2819 	 */
2820 	if (pte_support_ats != vm->pte_support_ats) {
2821 		r = amdgpu_vm_clear_bo(adev, vm, vm->root.base.bo,
2822 			       adev->vm_manager.root_level,
2823 			       pte_support_ats);
2824 		if (r)
2825 			goto error;
2826 	}
2827 
2828 	/* Update VM state */
2829 	vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2830 				    AMDGPU_VM_USE_CPU_FOR_COMPUTE);
2831 	vm->pte_support_ats = pte_support_ats;
2832 	DRM_DEBUG_DRIVER("VM update mode is %s\n",
2833 			 vm->use_cpu_for_update ? "CPU" : "SDMA");
2834 	WARN_ONCE((vm->use_cpu_for_update & !amdgpu_gmc_vram_full_visible(&adev->gmc)),
2835 		  "CPU update of VM recommended only for large BAR system\n");
2836 
2837 	if (vm->pasid) {
2838 		unsigned long flags;
2839 
2840 		spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
2841 		idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
2842 		spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
2843 
2844 		vm->pasid = 0;
2845 	}
2846 
2847 	/* Free the shadow bo for compute VM */
2848 	amdgpu_bo_unref(&vm->root.base.bo->shadow);
2849 
2850 error:
2851 	amdgpu_bo_unreserve(vm->root.base.bo);
2852 	return r;
2853 }
2854 
2855 /**
2856  * amdgpu_vm_free_levels - free PD/PT levels
2857  *
2858  * @adev: amdgpu device structure
2859  * @parent: PD/PT starting level to free
2860  * @level: level of parent structure
2861  *
2862  * Free the page directory or page table level and all sub levels.
2863  */
2864 static void amdgpu_vm_free_levels(struct amdgpu_device *adev,
2865 				  struct amdgpu_vm_pt *parent,
2866 				  unsigned level)
2867 {
2868 	unsigned i, num_entries = amdgpu_vm_num_entries(adev, level);
2869 
2870 	if (parent->base.bo) {
2871 		list_del(&parent->base.bo_list);
2872 		list_del(&parent->base.vm_status);
2873 		amdgpu_bo_unref(&parent->base.bo->shadow);
2874 		amdgpu_bo_unref(&parent->base.bo);
2875 	}
2876 
2877 	if (parent->entries)
2878 		for (i = 0; i < num_entries; i++)
2879 			amdgpu_vm_free_levels(adev, &parent->entries[i],
2880 					      level + 1);
2881 
2882 	kvfree(parent->entries);
2883 }
2884 
2885 /**
2886  * amdgpu_vm_fini - tear down a vm instance
2887  *
2888  * @adev: amdgpu_device pointer
2889  * @vm: requested vm
2890  *
2891  * Tear down @vm.
2892  * Unbind the VM and remove all bos from the vm bo list
2893  */
2894 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2895 {
2896 	struct amdgpu_bo_va_mapping *mapping, *tmp;
2897 	bool prt_fini_needed = !!adev->gmc.gmc_funcs->set_prt;
2898 	struct amdgpu_bo *root;
2899 	u64 fault;
2900 	int i, r;
2901 	struct amdgpu_vm_fault *vmf;
2902 
2903 	amdgpu_amdkfd_gpuvm_destroy_cb(adev, vm);
2904 
2905 	/* Clear pending page faults from IH when the VM is destroyed */
2906 #ifdef __linux__
2907 	while (kfifo_get(&vm->faults, &fault))
2908 		amdgpu_ih_clear_fault(adev, fault);
2909 #else
2910 	while (!SIMPLEQ_EMPTY(&vm->faults)) {
2911 		vmf = SIMPLEQ_FIRST(&vm->faults);
2912 		fault = vmf->val;
2913 		SIMPLEQ_REMOVE_HEAD(&vm->faults, vm_fault_entry);
2914 		free(vmf, M_DRM, sizeof(*vmf));
2915 		amdgpu_ih_clear_fault(adev, fault);
2916 	}
2917 #endif
2918 
2919 	if (vm->pasid) {
2920 		unsigned long flags;
2921 
2922 		spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
2923 		idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
2924 		spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
2925 	}
2926 
2927 	drm_sched_entity_destroy(&vm->entity);
2928 
2929 #ifdef __linux__
2930 	if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
2931 		dev_err(adev->dev, "still active bo inside vm\n");
2932 	}
2933 	rbtree_postorder_for_each_entry_safe(mapping, tmp,
2934 					     &vm->va.rb_root, rb) {
2935 		list_del(&mapping->list);
2936 		amdgpu_vm_it_remove(mapping, &vm->va);
2937 		kfree(mapping);
2938 	}
2939 #else
2940 	if (!RB_EMPTY_ROOT(&vm->va)) {
2941 		dev_err(adev->dev, "still active bo inside vm\n");
2942 	}
2943 	rbtree_postorder_for_each_entry_safe(mapping, tmp,
2944 					     &vm->va, rb) {
2945 		list_del(&mapping->list);
2946 		amdgpu_vm_it_remove(mapping, &vm->va);
2947 		kfree(mapping);
2948 	}
2949 #endif
2950 	list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
2951 		if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) {
2952 			amdgpu_vm_prt_fini(adev, vm);
2953 			prt_fini_needed = false;
2954 		}
2955 
2956 		list_del(&mapping->list);
2957 		amdgpu_vm_free_mapping(adev, vm, mapping, NULL);
2958 	}
2959 
2960 	root = amdgpu_bo_ref(vm->root.base.bo);
2961 	r = amdgpu_bo_reserve(root, true);
2962 	if (r) {
2963 		dev_err(adev->dev, "Leaking page tables because BO reservation failed\n");
2964 	} else {
2965 		amdgpu_vm_free_levels(adev, &vm->root,
2966 				      adev->vm_manager.root_level);
2967 		amdgpu_bo_unreserve(root);
2968 	}
2969 	amdgpu_bo_unref(&root);
2970 	dma_fence_put(vm->last_update);
2971 	for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
2972 		amdgpu_vmid_free_reserved(adev, vm, i);
2973 }
2974 
2975 /**
2976  * amdgpu_vm_pasid_fault_credit - Check fault credit for given PASID
2977  *
2978  * @adev: amdgpu_device pointer
2979  * @pasid: PASID do identify the VM
2980  *
2981  * This function is expected to be called in interrupt context.
2982  *
2983  * Returns:
2984  * True if there was fault credit, false otherwise
2985  */
2986 bool amdgpu_vm_pasid_fault_credit(struct amdgpu_device *adev,
2987 				  unsigned int pasid)
2988 {
2989 	struct amdgpu_vm *vm;
2990 
2991 	spin_lock(&adev->vm_manager.pasid_lock);
2992 	vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
2993 	if (!vm) {
2994 		/* VM not found, can't track fault credit */
2995 		spin_unlock(&adev->vm_manager.pasid_lock);
2996 		return true;
2997 	}
2998 
2999 	/* No lock needed. only accessed by IRQ handler */
3000 	if (!vm->fault_credit) {
3001 		/* Too many faults in this VM */
3002 		spin_unlock(&adev->vm_manager.pasid_lock);
3003 		return false;
3004 	}
3005 
3006 	vm->fault_credit--;
3007 	spin_unlock(&adev->vm_manager.pasid_lock);
3008 	return true;
3009 }
3010 
3011 /**
3012  * amdgpu_vm_manager_init - init the VM manager
3013  *
3014  * @adev: amdgpu_device pointer
3015  *
3016  * Initialize the VM manager structures
3017  */
3018 void amdgpu_vm_manager_init(struct amdgpu_device *adev)
3019 {
3020 	unsigned i;
3021 
3022 	amdgpu_vmid_mgr_init(adev);
3023 
3024 	adev->vm_manager.fence_context =
3025 		dma_fence_context_alloc(AMDGPU_MAX_RINGS);
3026 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
3027 		adev->vm_manager.seqno[i] = 0;
3028 
3029 	atomic_set(&adev->vm_manager.vm_pte_next_ring, 0);
3030 	mtx_init(&adev->vm_manager.prt_lock, IPL_TTY);
3031 	atomic_set(&adev->vm_manager.num_prt_users, 0);
3032 
3033 	/* If not overridden by the user, by default, only in large BAR systems
3034 	 * Compute VM tables will be updated by CPU
3035 	 */
3036 #ifdef __amd64__
3037 	if (amdgpu_vm_update_mode == -1) {
3038 		if (amdgpu_gmc_vram_full_visible(&adev->gmc))
3039 			adev->vm_manager.vm_update_mode =
3040 				AMDGPU_VM_USE_CPU_FOR_COMPUTE;
3041 		else
3042 			adev->vm_manager.vm_update_mode = 0;
3043 	} else
3044 		adev->vm_manager.vm_update_mode = amdgpu_vm_update_mode;
3045 #else
3046 	adev->vm_manager.vm_update_mode = 0;
3047 #endif
3048 
3049 	idr_init(&adev->vm_manager.pasid_idr);
3050 	mtx_init(&adev->vm_manager.pasid_lock, IPL_TTY);
3051 }
3052 
3053 /**
3054  * amdgpu_vm_manager_fini - cleanup VM manager
3055  *
3056  * @adev: amdgpu_device pointer
3057  *
3058  * Cleanup the VM manager and free resources.
3059  */
3060 void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
3061 {
3062 	WARN_ON(!idr_is_empty(&adev->vm_manager.pasid_idr));
3063 	idr_destroy(&adev->vm_manager.pasid_idr);
3064 
3065 	amdgpu_vmid_mgr_fini(adev);
3066 }
3067 
3068 /**
3069  * amdgpu_vm_ioctl - Manages VMID reservation for vm hubs.
3070  *
3071  * @dev: drm device pointer
3072  * @data: drm_amdgpu_vm
3073  * @filp: drm file pointer
3074  *
3075  * Returns:
3076  * 0 for success, -errno for errors.
3077  */
3078 int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
3079 {
3080 	union drm_amdgpu_vm *args = data;
3081 	struct amdgpu_device *adev = dev->dev_private;
3082 	struct amdgpu_fpriv *fpriv = filp->driver_priv;
3083 	int r;
3084 
3085 	switch (args->in.op) {
3086 	case AMDGPU_VM_OP_RESERVE_VMID:
3087 		/* current, we only have requirement to reserve vmid from gfxhub */
3088 		r = amdgpu_vmid_alloc_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB);
3089 		if (r)
3090 			return r;
3091 		break;
3092 	case AMDGPU_VM_OP_UNRESERVE_VMID:
3093 		amdgpu_vmid_free_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB);
3094 		break;
3095 	default:
3096 		return -EINVAL;
3097 	}
3098 
3099 	return 0;
3100 }
3101 
3102 /**
3103  * amdgpu_vm_get_task_info - Extracts task info for a PASID.
3104  *
3105  * @dev: drm device pointer
3106  * @pasid: PASID identifier for VM
3107  * @task_info: task_info to fill.
3108  */
3109 void amdgpu_vm_get_task_info(struct amdgpu_device *adev, unsigned int pasid,
3110 			 struct amdgpu_task_info *task_info)
3111 {
3112 	struct amdgpu_vm *vm;
3113 	unsigned long flags;
3114 
3115 	spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
3116 
3117 	vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
3118 	if (vm)
3119 		*task_info = vm->task_info;
3120 
3121 	spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
3122 }
3123 
3124 /**
3125  * amdgpu_vm_set_task_info - Sets VMs task info.
3126  *
3127  * @vm: vm for which to set the info
3128  */
3129 void amdgpu_vm_set_task_info(struct amdgpu_vm *vm)
3130 {
3131 	if (!vm->task_info.pid) {
3132 #ifdef __linux__
3133 		vm->task_info.pid = current->pid;
3134 		get_task_comm(vm->task_info.task_name, current);
3135 
3136 		if (current->group_leader->mm == current->mm) {
3137 			vm->task_info.tgid = current->group_leader->pid;
3138 			get_task_comm(vm->task_info.process_name, current->group_leader);
3139 		}
3140 #else
3141 		vm->task_info.pid = curproc->p_p->ps_pid;
3142 		strlcpy(vm->task_info.task_name, curproc->p_p->ps_comm,
3143 		    sizeof(vm->task_info.task_name));
3144 #endif
3145 	}
3146 }
3147