xref: /openbsd-src/sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c (revision 99fd087599a8791921855f21bd7e36130f39aadc)
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <linux/dma-fence-array.h>
29 #include <linux/interval_tree_generic.h>
30 #include <linux/idr.h>
31 #include <drm/drmP.h>
32 #include <drm/amdgpu_drm.h>
33 #include "amdgpu.h"
34 #include "amdgpu_trace.h"
35 #include "amdgpu_amdkfd.h"
36 #include "amdgpu_gmc.h"
37 
38 /**
39  * DOC: GPUVM
40  *
41  * GPUVM is similar to the legacy gart on older asics, however
42  * rather than there being a single global gart table
43  * for the entire GPU, there are multiple VM page tables active
44  * at any given time.  The VM page tables can contain a mix
45  * vram pages and system memory pages and system memory pages
46  * can be mapped as snooped (cached system pages) or unsnooped
47  * (uncached system pages).
48  * Each VM has an ID associated with it and there is a page table
49  * associated with each VMID.  When execting a command buffer,
50  * the kernel tells the the ring what VMID to use for that command
51  * buffer.  VMIDs are allocated dynamically as commands are submitted.
52  * The userspace drivers maintain their own address space and the kernel
53  * sets up their pages tables accordingly when they submit their
54  * command buffers and a VMID is assigned.
55  * Cayman/Trinity support up to 8 active VMs at any given time;
56  * SI supports 16.
57  */
58 
59 #define START(node) ((node)->start)
60 #define LAST(node) ((node)->last)
61 
62 #ifdef __linux__
63 INTERVAL_TREE_DEFINE(struct amdgpu_bo_va_mapping, rb, uint64_t, __subtree_last,
64 		     START, LAST, static, amdgpu_vm_it)
65 #else
66 static struct amdgpu_bo_va_mapping *
67 amdgpu_vm_it_iter_first(struct rb_root_cached *root, uint64_t start,
68     uint64_t last)
69 {
70 	struct amdgpu_bo_va_mapping *node;
71 	struct rb_node *rb;
72 
73 	for (rb = rb_first(root); rb; rb = rb_next(rb)) {
74 		node = rb_entry(rb, typeof(*node), rb);
75 		if (LAST(node) >= start && START(node) <= last)
76 			return node;
77 	}
78 	return NULL;
79 }
80 
81 static struct amdgpu_bo_va_mapping *
82 amdgpu_vm_it_iter_next(struct amdgpu_bo_va_mapping *node, uint64_t start,
83     uint64_t last)
84 {
85 	STUB();
86 	struct rb_node *rb = &node->rb;
87 
88 	for (rb = rb_next(rb); rb; rb = rb_next(rb)) {
89 		node = rb_entry(rb, typeof(*node), rb);
90 		if (LAST(node) >= start && START(node) <= last)
91 			return node;
92 	}
93 	return NULL;
94 }
95 
96 static void
97 amdgpu_vm_it_remove(struct amdgpu_bo_va_mapping *node,
98     struct rb_root_cached *root)
99 {
100 	rb_erase(&node->rb, root);
101 }
102 
103 static void
104 amdgpu_vm_it_insert(struct amdgpu_bo_va_mapping *node,
105     struct rb_root_cached *root)
106 {
107 	struct rb_node **iter = &root->rb_node;
108 	struct rb_node *parent = NULL;
109 	struct amdgpu_bo_va_mapping *iter_node;
110 
111 	while (*iter) {
112 		parent = *iter;
113 		iter_node = rb_entry(*iter, struct amdgpu_bo_va_mapping, rb);
114 
115 		if (node->start < iter_node->start)
116 			iter = &(*iter)->rb_left;
117 		else
118 			iter = &(*iter)->rb_right;
119 	}
120 
121 	rb_link_node(&node->rb, parent, iter);
122 	rb_insert_color(&node->rb, root);
123 }
124 #endif
125 
126 #undef START
127 #undef LAST
128 
129 /**
130  * struct amdgpu_pte_update_params - Local structure
131  *
132  * Encapsulate some VM table update parameters to reduce
133  * the number of function parameters
134  *
135  */
136 struct amdgpu_pte_update_params {
137 
138 	/**
139 	 * @adev: amdgpu device we do this update for
140 	 */
141 	struct amdgpu_device *adev;
142 
143 	/**
144 	 * @vm: optional amdgpu_vm we do this update for
145 	 */
146 	struct amdgpu_vm *vm;
147 
148 	/**
149 	 * @src: address where to copy page table entries from
150 	 */
151 	uint64_t src;
152 
153 	/**
154 	 * @ib: indirect buffer to fill with commands
155 	 */
156 	struct amdgpu_ib *ib;
157 
158 	/**
159 	 * @func: Function which actually does the update
160 	 */
161 	void (*func)(struct amdgpu_pte_update_params *params,
162 		     struct amdgpu_bo *bo, uint64_t pe,
163 		     uint64_t addr, unsigned count, uint32_t incr,
164 		     uint64_t flags);
165 	/**
166 	 * @pages_addr:
167 	 *
168 	 * DMA addresses to use for mapping, used during VM update by CPU
169 	 */
170 	dma_addr_t *pages_addr;
171 
172 	/**
173 	 * @kptr:
174 	 *
175 	 * Kernel pointer of PD/PT BO that needs to be updated,
176 	 * used during VM update by CPU
177 	 */
178 	void *kptr;
179 };
180 
181 /**
182  * struct amdgpu_prt_cb - Helper to disable partial resident texture feature from a fence callback
183  */
184 struct amdgpu_prt_cb {
185 
186 	/**
187 	 * @adev: amdgpu device
188 	 */
189 	struct amdgpu_device *adev;
190 
191 	/**
192 	 * @cb: callback
193 	 */
194 	struct dma_fence_cb cb;
195 };
196 
197 /**
198  * amdgpu_vm_bo_base_init - Adds bo to the list of bos associated with the vm
199  *
200  * @base: base structure for tracking BO usage in a VM
201  * @vm: vm to which bo is to be added
202  * @bo: amdgpu buffer object
203  *
204  * Initialize a bo_va_base structure and add it to the appropriate lists
205  *
206  */
207 static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
208 				   struct amdgpu_vm *vm,
209 				   struct amdgpu_bo *bo)
210 {
211 	base->vm = vm;
212 	base->bo = bo;
213 	INIT_LIST_HEAD(&base->bo_list);
214 	INIT_LIST_HEAD(&base->vm_status);
215 
216 	if (!bo)
217 		return;
218 	list_add_tail(&base->bo_list, &bo->va);
219 
220 	if (bo->tbo.type == ttm_bo_type_kernel)
221 		list_move(&base->vm_status, &vm->relocated);
222 
223 	if (bo->tbo.resv != vm->root.base.bo->tbo.resv)
224 		return;
225 
226 	if (bo->preferred_domains &
227 	    amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type))
228 		return;
229 
230 	/*
231 	 * we checked all the prerequisites, but it looks like this per vm bo
232 	 * is currently evicted. add the bo to the evicted list to make sure it
233 	 * is validated on next vm use to avoid fault.
234 	 * */
235 	list_move_tail(&base->vm_status, &vm->evicted);
236 	base->moved = true;
237 }
238 
239 /**
240  * amdgpu_vm_level_shift - return the addr shift for each level
241  *
242  * @adev: amdgpu_device pointer
243  * @level: VMPT level
244  *
245  * Returns:
246  * The number of bits the pfn needs to be right shifted for a level.
247  */
248 static unsigned amdgpu_vm_level_shift(struct amdgpu_device *adev,
249 				      unsigned level)
250 {
251 	unsigned shift = 0xff;
252 
253 	switch (level) {
254 	case AMDGPU_VM_PDB2:
255 	case AMDGPU_VM_PDB1:
256 	case AMDGPU_VM_PDB0:
257 		shift = 9 * (AMDGPU_VM_PDB0 - level) +
258 			adev->vm_manager.block_size;
259 		break;
260 	case AMDGPU_VM_PTB:
261 		shift = 0;
262 		break;
263 	default:
264 		dev_err(adev->dev, "the level%d isn't supported.\n", level);
265 	}
266 
267 	return shift;
268 }
269 
270 /**
271  * amdgpu_vm_num_entries - return the number of entries in a PD/PT
272  *
273  * @adev: amdgpu_device pointer
274  * @level: VMPT level
275  *
276  * Returns:
277  * The number of entries in a page directory or page table.
278  */
279 static unsigned amdgpu_vm_num_entries(struct amdgpu_device *adev,
280 				      unsigned level)
281 {
282 	unsigned shift = amdgpu_vm_level_shift(adev,
283 					       adev->vm_manager.root_level);
284 
285 	if (level == adev->vm_manager.root_level)
286 		/* For the root directory */
287 		return round_up(adev->vm_manager.max_pfn, 1 << shift) >> shift;
288 	else if (level != AMDGPU_VM_PTB)
289 		/* Everything in between */
290 		return 512;
291 	else
292 		/* For the page tables on the leaves */
293 		return AMDGPU_VM_PTE_COUNT(adev);
294 }
295 
296 /**
297  * amdgpu_vm_bo_size - returns the size of the BOs in bytes
298  *
299  * @adev: amdgpu_device pointer
300  * @level: VMPT level
301  *
302  * Returns:
303  * The size of the BO for a page directory or page table in bytes.
304  */
305 static unsigned amdgpu_vm_bo_size(struct amdgpu_device *adev, unsigned level)
306 {
307 	return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_num_entries(adev, level) * 8);
308 }
309 
310 /**
311  * amdgpu_vm_get_pd_bo - add the VM PD to a validation list
312  *
313  * @vm: vm providing the BOs
314  * @validated: head of validation list
315  * @entry: entry to add
316  *
317  * Add the page directory to the list of BOs to
318  * validate for command submission.
319  */
320 void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
321 			 struct list_head *validated,
322 			 struct amdgpu_bo_list_entry *entry)
323 {
324 	entry->robj = vm->root.base.bo;
325 	entry->priority = 0;
326 	entry->tv.bo = &entry->robj->tbo;
327 	entry->tv.shared = true;
328 	entry->user_pages = NULL;
329 	list_add(&entry->tv.head, validated);
330 }
331 
332 /**
333  * amdgpu_vm_validate_pt_bos - validate the page table BOs
334  *
335  * @adev: amdgpu device pointer
336  * @vm: vm providing the BOs
337  * @validate: callback to do the validation
338  * @param: parameter for the validation callback
339  *
340  * Validate the page table BOs on command submission if neccessary.
341  *
342  * Returns:
343  * Validation result.
344  */
345 int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
346 			      int (*validate)(void *p, struct amdgpu_bo *bo),
347 			      void *param)
348 {
349 	struct ttm_bo_global *glob = adev->mman.bdev.glob;
350 	struct amdgpu_vm_bo_base *bo_base, *tmp;
351 	int r = 0;
352 
353 	list_for_each_entry_safe(bo_base, tmp, &vm->evicted, vm_status) {
354 		struct amdgpu_bo *bo = bo_base->bo;
355 
356 		if (bo->parent) {
357 			r = validate(param, bo);
358 			if (r)
359 				break;
360 
361 			spin_lock(&glob->lru_lock);
362 			ttm_bo_move_to_lru_tail(&bo->tbo);
363 			if (bo->shadow)
364 				ttm_bo_move_to_lru_tail(&bo->shadow->tbo);
365 			spin_unlock(&glob->lru_lock);
366 		}
367 
368 		if (bo->tbo.type != ttm_bo_type_kernel) {
369 			spin_lock(&vm->moved_lock);
370 			list_move(&bo_base->vm_status, &vm->moved);
371 			spin_unlock(&vm->moved_lock);
372 		} else {
373 			list_move(&bo_base->vm_status, &vm->relocated);
374 		}
375 	}
376 
377 	spin_lock(&glob->lru_lock);
378 	list_for_each_entry(bo_base, &vm->idle, vm_status) {
379 		struct amdgpu_bo *bo = bo_base->bo;
380 
381 		if (!bo->parent)
382 			continue;
383 
384 		ttm_bo_move_to_lru_tail(&bo->tbo);
385 		if (bo->shadow)
386 			ttm_bo_move_to_lru_tail(&bo->shadow->tbo);
387 	}
388 	spin_unlock(&glob->lru_lock);
389 
390 	return r;
391 }
392 
393 /**
394  * amdgpu_vm_ready - check VM is ready for updates
395  *
396  * @vm: VM to check
397  *
398  * Check if all VM PDs/PTs are ready for updates
399  *
400  * Returns:
401  * True if eviction list is empty.
402  */
403 bool amdgpu_vm_ready(struct amdgpu_vm *vm)
404 {
405 	return list_empty(&vm->evicted);
406 }
407 
408 /**
409  * amdgpu_vm_clear_bo - initially clear the PDs/PTs
410  *
411  * @adev: amdgpu_device pointer
412  * @vm: VM to clear BO from
413  * @bo: BO to clear
414  * @level: level this BO is at
415  * @pte_support_ats: indicate ATS support from PTE
416  *
417  * Root PD needs to be reserved when calling this.
418  *
419  * Returns:
420  * 0 on success, errno otherwise.
421  */
422 static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
423 			      struct amdgpu_vm *vm, struct amdgpu_bo *bo,
424 			      unsigned level, bool pte_support_ats)
425 {
426 	struct ttm_operation_ctx ctx = { true, false };
427 	struct dma_fence *fence = NULL;
428 	unsigned entries, ats_entries;
429 	struct amdgpu_ring *ring;
430 	struct amdgpu_job *job;
431 	uint64_t addr;
432 	int r;
433 
434 	entries = amdgpu_bo_size(bo) / 8;
435 
436 	if (pte_support_ats) {
437 		if (level == adev->vm_manager.root_level) {
438 			ats_entries = amdgpu_vm_level_shift(adev, level);
439 			ats_entries += AMDGPU_GPU_PAGE_SHIFT;
440 			ats_entries = AMDGPU_VA_HOLE_START >> ats_entries;
441 			ats_entries = min(ats_entries, entries);
442 			entries -= ats_entries;
443 		} else {
444 			ats_entries = entries;
445 			entries = 0;
446 		}
447 	} else {
448 		ats_entries = 0;
449 	}
450 
451 	ring = container_of(vm->entity.rq->sched, struct amdgpu_ring, sched);
452 
453 	r = reservation_object_reserve_shared(bo->tbo.resv);
454 	if (r)
455 		return r;
456 
457 	r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
458 	if (r)
459 		goto error;
460 
461 	r = amdgpu_job_alloc_with_ib(adev, 64, &job);
462 	if (r)
463 		goto error;
464 
465 	addr = amdgpu_bo_gpu_offset(bo);
466 	if (ats_entries) {
467 		uint64_t ats_value;
468 
469 		ats_value = AMDGPU_PTE_DEFAULT_ATC;
470 		if (level != AMDGPU_VM_PTB)
471 			ats_value |= AMDGPU_PDE_PTE;
472 
473 		amdgpu_vm_set_pte_pde(adev, &job->ibs[0], addr, 0,
474 				      ats_entries, 0, ats_value);
475 		addr += ats_entries * 8;
476 	}
477 
478 	if (entries)
479 		amdgpu_vm_set_pte_pde(adev, &job->ibs[0], addr, 0,
480 				      entries, 0, 0);
481 
482 	amdgpu_ring_pad_ib(ring, &job->ibs[0]);
483 
484 	WARN_ON(job->ibs[0].length_dw > 64);
485 	r = amdgpu_sync_resv(adev, &job->sync, bo->tbo.resv,
486 			     AMDGPU_FENCE_OWNER_UNDEFINED, false);
487 	if (r)
488 		goto error_free;
489 
490 	r = amdgpu_job_submit(job, &vm->entity, AMDGPU_FENCE_OWNER_UNDEFINED,
491 			      &fence);
492 	if (r)
493 		goto error_free;
494 
495 	amdgpu_bo_fence(bo, fence, true);
496 	dma_fence_put(fence);
497 
498 	if (bo->shadow)
499 		return amdgpu_vm_clear_bo(adev, vm, bo->shadow,
500 					  level, pte_support_ats);
501 
502 	return 0;
503 
504 error_free:
505 	amdgpu_job_free(job);
506 
507 error:
508 	return r;
509 }
510 
511 /**
512  * amdgpu_vm_alloc_levels - allocate the PD/PT levels
513  *
514  * @adev: amdgpu_device pointer
515  * @vm: requested vm
516  * @parent: parent PT
517  * @saddr: start of the address range
518  * @eaddr: end of the address range
519  * @level: VMPT level
520  * @ats: indicate ATS support from PTE
521  *
522  * Make sure the page directories and page tables are allocated
523  *
524  * Returns:
525  * 0 on success, errno otherwise.
526  */
527 static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
528 				  struct amdgpu_vm *vm,
529 				  struct amdgpu_vm_pt *parent,
530 				  uint64_t saddr, uint64_t eaddr,
531 				  unsigned level, bool ats)
532 {
533 	unsigned shift = amdgpu_vm_level_shift(adev, level);
534 	unsigned pt_idx, from, to;
535 	u64 flags;
536 	int r;
537 
538 	if (!parent->entries) {
539 		unsigned num_entries = amdgpu_vm_num_entries(adev, level);
540 
541 		parent->entries = kvmalloc_array(num_entries,
542 						   sizeof(struct amdgpu_vm_pt),
543 						   GFP_KERNEL | __GFP_ZERO);
544 		if (!parent->entries)
545 			return -ENOMEM;
546 		memset(parent->entries, 0 , sizeof(struct amdgpu_vm_pt));
547 	}
548 
549 	from = saddr >> shift;
550 	to = eaddr >> shift;
551 	if (from >= amdgpu_vm_num_entries(adev, level) ||
552 	    to >= amdgpu_vm_num_entries(adev, level))
553 		return -EINVAL;
554 
555 	++level;
556 	saddr = saddr & ((1 << shift) - 1);
557 	eaddr = eaddr & ((1 << shift) - 1);
558 
559 	flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
560 	if (vm->root.base.bo->shadow)
561 		flags |= AMDGPU_GEM_CREATE_SHADOW;
562 	if (vm->use_cpu_for_update)
563 		flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
564 	else
565 		flags |= AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
566 
567 	/* walk over the address space and allocate the page tables */
568 	for (pt_idx = from; pt_idx <= to; ++pt_idx) {
569 		struct reservation_object *resv = vm->root.base.bo->tbo.resv;
570 		struct amdgpu_vm_pt *entry = &parent->entries[pt_idx];
571 		struct amdgpu_bo *pt;
572 
573 		if (!entry->base.bo) {
574 			struct amdgpu_bo_param bp;
575 
576 			memset(&bp, 0, sizeof(bp));
577 			bp.size = amdgpu_vm_bo_size(adev, level);
578 			bp.byte_align = AMDGPU_GPU_PAGE_SIZE;
579 			bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
580 			bp.flags = flags;
581 			bp.type = ttm_bo_type_kernel;
582 			bp.resv = resv;
583 			r = amdgpu_bo_create(adev, &bp, &pt);
584 			if (r)
585 				return r;
586 
587 			r = amdgpu_vm_clear_bo(adev, vm, pt, level, ats);
588 			if (r) {
589 				amdgpu_bo_unref(&pt->shadow);
590 				amdgpu_bo_unref(&pt);
591 				return r;
592 			}
593 
594 			if (vm->use_cpu_for_update) {
595 				r = amdgpu_bo_kmap(pt, NULL);
596 				if (r) {
597 					amdgpu_bo_unref(&pt->shadow);
598 					amdgpu_bo_unref(&pt);
599 					return r;
600 				}
601 			}
602 
603 			/* Keep a reference to the root directory to avoid
604 			* freeing them up in the wrong order.
605 			*/
606 			pt->parent = amdgpu_bo_ref(parent->base.bo);
607 
608 			amdgpu_vm_bo_base_init(&entry->base, vm, pt);
609 		}
610 
611 		if (level < AMDGPU_VM_PTB) {
612 			uint64_t sub_saddr = (pt_idx == from) ? saddr : 0;
613 			uint64_t sub_eaddr = (pt_idx == to) ? eaddr :
614 				((1 << shift) - 1);
615 			r = amdgpu_vm_alloc_levels(adev, vm, entry, sub_saddr,
616 						   sub_eaddr, level, ats);
617 			if (r)
618 				return r;
619 		}
620 	}
621 
622 	return 0;
623 }
624 
625 /**
626  * amdgpu_vm_alloc_pts - Allocate page tables.
627  *
628  * @adev: amdgpu_device pointer
629  * @vm: VM to allocate page tables for
630  * @saddr: Start address which needs to be allocated
631  * @size: Size from start address we need.
632  *
633  * Make sure the page tables are allocated.
634  *
635  * Returns:
636  * 0 on success, errno otherwise.
637  */
638 int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
639 			struct amdgpu_vm *vm,
640 			uint64_t saddr, uint64_t size)
641 {
642 	uint64_t eaddr;
643 	bool ats = false;
644 
645 	/* validate the parameters */
646 	if (saddr & AMDGPU_GPU_PAGE_MASK || size & AMDGPU_GPU_PAGE_MASK)
647 		return -EINVAL;
648 
649 	eaddr = saddr + size - 1;
650 
651 	if (vm->pte_support_ats)
652 		ats = saddr < AMDGPU_VA_HOLE_START;
653 
654 	saddr /= AMDGPU_GPU_PAGE_SIZE;
655 	eaddr /= AMDGPU_GPU_PAGE_SIZE;
656 
657 	if (eaddr >= adev->vm_manager.max_pfn) {
658 		dev_err(adev->dev, "va above limit (0x%08llX >= 0x%08llX)\n",
659 			eaddr, adev->vm_manager.max_pfn);
660 		return -EINVAL;
661 	}
662 
663 	return amdgpu_vm_alloc_levels(adev, vm, &vm->root, saddr, eaddr,
664 				      adev->vm_manager.root_level, ats);
665 }
666 
667 /**
668  * amdgpu_vm_check_compute_bug - check whether asic has compute vm bug
669  *
670  * @adev: amdgpu_device pointer
671  */
672 void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev)
673 {
674 	const struct amdgpu_ip_block *ip_block;
675 	bool has_compute_vm_bug;
676 	struct amdgpu_ring *ring;
677 	int i;
678 
679 	has_compute_vm_bug = false;
680 
681 	ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
682 	if (ip_block) {
683 		/* Compute has a VM bug for GFX version < 7.
684 		   Compute has a VM bug for GFX 8 MEC firmware version < 673.*/
685 		if (ip_block->version->major <= 7)
686 			has_compute_vm_bug = true;
687 		else if (ip_block->version->major == 8)
688 			if (adev->gfx.mec_fw_version < 673)
689 				has_compute_vm_bug = true;
690 	}
691 
692 	for (i = 0; i < adev->num_rings; i++) {
693 		ring = adev->rings[i];
694 		if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
695 			/* only compute rings */
696 			ring->has_compute_vm_bug = has_compute_vm_bug;
697 		else
698 			ring->has_compute_vm_bug = false;
699 	}
700 }
701 
702 /**
703  * amdgpu_vm_need_pipeline_sync - Check if pipe sync is needed for job.
704  *
705  * @ring: ring on which the job will be submitted
706  * @job: job to submit
707  *
708  * Returns:
709  * True if sync is needed.
710  */
711 bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
712 				  struct amdgpu_job *job)
713 {
714 	struct amdgpu_device *adev = ring->adev;
715 	unsigned vmhub = ring->funcs->vmhub;
716 	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
717 	struct amdgpu_vmid *id;
718 	bool gds_switch_needed;
719 	bool vm_flush_needed = job->vm_needs_flush || ring->has_compute_vm_bug;
720 
721 	if (job->vmid == 0)
722 		return false;
723 	id = &id_mgr->ids[job->vmid];
724 	gds_switch_needed = ring->funcs->emit_gds_switch && (
725 		id->gds_base != job->gds_base ||
726 		id->gds_size != job->gds_size ||
727 		id->gws_base != job->gws_base ||
728 		id->gws_size != job->gws_size ||
729 		id->oa_base != job->oa_base ||
730 		id->oa_size != job->oa_size);
731 
732 	if (amdgpu_vmid_had_gpu_reset(adev, id))
733 		return true;
734 
735 	return vm_flush_needed || gds_switch_needed;
736 }
737 
738 /**
739  * amdgpu_vm_flush - hardware flush the vm
740  *
741  * @ring: ring to use for flush
742  * @job:  related job
743  * @need_pipe_sync: is pipe sync needed
744  *
745  * Emit a VM flush when it is necessary.
746  *
747  * Returns:
748  * 0 on success, errno otherwise.
749  */
750 int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync)
751 {
752 	struct amdgpu_device *adev = ring->adev;
753 	unsigned vmhub = ring->funcs->vmhub;
754 	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
755 	struct amdgpu_vmid *id = &id_mgr->ids[job->vmid];
756 	bool gds_switch_needed = ring->funcs->emit_gds_switch && (
757 		id->gds_base != job->gds_base ||
758 		id->gds_size != job->gds_size ||
759 		id->gws_base != job->gws_base ||
760 		id->gws_size != job->gws_size ||
761 		id->oa_base != job->oa_base ||
762 		id->oa_size != job->oa_size);
763 	bool vm_flush_needed = job->vm_needs_flush;
764 	struct dma_fence *fence = NULL;
765 	bool pasid_mapping_needed = false;
766 	unsigned patch_offset = 0;
767 	int r;
768 
769 	if (amdgpu_vmid_had_gpu_reset(adev, id)) {
770 		gds_switch_needed = true;
771 		vm_flush_needed = true;
772 		pasid_mapping_needed = true;
773 	}
774 
775 	mutex_lock(&id_mgr->lock);
776 	if (id->pasid != job->pasid || !id->pasid_mapping ||
777 	    !dma_fence_is_signaled(id->pasid_mapping))
778 		pasid_mapping_needed = true;
779 	mutex_unlock(&id_mgr->lock);
780 
781 	gds_switch_needed &= !!ring->funcs->emit_gds_switch;
782 	vm_flush_needed &= !!ring->funcs->emit_vm_flush  &&
783 			job->vm_pd_addr != AMDGPU_BO_INVALID_OFFSET;
784 	pasid_mapping_needed &= adev->gmc.gmc_funcs->emit_pasid_mapping &&
785 		ring->funcs->emit_wreg;
786 
787 	if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync)
788 		return 0;
789 
790 	if (ring->funcs->init_cond_exec)
791 		patch_offset = amdgpu_ring_init_cond_exec(ring);
792 
793 	if (need_pipe_sync)
794 		amdgpu_ring_emit_pipeline_sync(ring);
795 
796 	if (vm_flush_needed) {
797 		trace_amdgpu_vm_flush(ring, job->vmid, job->vm_pd_addr);
798 		amdgpu_ring_emit_vm_flush(ring, job->vmid, job->vm_pd_addr);
799 	}
800 
801 	if (pasid_mapping_needed)
802 		amdgpu_gmc_emit_pasid_mapping(ring, job->vmid, job->pasid);
803 
804 	if (vm_flush_needed || pasid_mapping_needed) {
805 		r = amdgpu_fence_emit(ring, &fence, 0);
806 		if (r)
807 			return r;
808 	}
809 
810 	if (vm_flush_needed) {
811 		mutex_lock(&id_mgr->lock);
812 		dma_fence_put(id->last_flush);
813 		id->last_flush = dma_fence_get(fence);
814 		id->current_gpu_reset_count =
815 			atomic_read(&adev->gpu_reset_counter);
816 		mutex_unlock(&id_mgr->lock);
817 	}
818 
819 	if (pasid_mapping_needed) {
820 		mutex_lock(&id_mgr->lock);
821 		id->pasid = job->pasid;
822 		dma_fence_put(id->pasid_mapping);
823 		id->pasid_mapping = dma_fence_get(fence);
824 		mutex_unlock(&id_mgr->lock);
825 	}
826 	dma_fence_put(fence);
827 
828 	if (ring->funcs->emit_gds_switch && gds_switch_needed) {
829 		id->gds_base = job->gds_base;
830 		id->gds_size = job->gds_size;
831 		id->gws_base = job->gws_base;
832 		id->gws_size = job->gws_size;
833 		id->oa_base = job->oa_base;
834 		id->oa_size = job->oa_size;
835 		amdgpu_ring_emit_gds_switch(ring, job->vmid, job->gds_base,
836 					    job->gds_size, job->gws_base,
837 					    job->gws_size, job->oa_base,
838 					    job->oa_size);
839 	}
840 
841 	if (ring->funcs->patch_cond_exec)
842 		amdgpu_ring_patch_cond_exec(ring, patch_offset);
843 
844 	/* the double SWITCH_BUFFER here *cannot* be skipped by COND_EXEC */
845 	if (ring->funcs->emit_switch_buffer) {
846 		amdgpu_ring_emit_switch_buffer(ring);
847 		amdgpu_ring_emit_switch_buffer(ring);
848 	}
849 	return 0;
850 }
851 
852 /**
853  * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
854  *
855  * @vm: requested vm
856  * @bo: requested buffer object
857  *
858  * Find @bo inside the requested vm.
859  * Search inside the @bos vm list for the requested vm
860  * Returns the found bo_va or NULL if none is found
861  *
862  * Object has to be reserved!
863  *
864  * Returns:
865  * Found bo_va or NULL.
866  */
867 struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
868 				       struct amdgpu_bo *bo)
869 {
870 	struct amdgpu_bo_va *bo_va;
871 
872 	list_for_each_entry(bo_va, &bo->va, base.bo_list) {
873 		if (bo_va->base.vm == vm) {
874 			return bo_va;
875 		}
876 	}
877 	return NULL;
878 }
879 
880 /**
881  * amdgpu_vm_do_set_ptes - helper to call the right asic function
882  *
883  * @params: see amdgpu_pte_update_params definition
884  * @bo: PD/PT to update
885  * @pe: addr of the page entry
886  * @addr: dst addr to write into pe
887  * @count: number of page entries to update
888  * @incr: increase next addr by incr bytes
889  * @flags: hw access flags
890  *
891  * Traces the parameters and calls the right asic functions
892  * to setup the page table using the DMA.
893  */
894 static void amdgpu_vm_do_set_ptes(struct amdgpu_pte_update_params *params,
895 				  struct amdgpu_bo *bo,
896 				  uint64_t pe, uint64_t addr,
897 				  unsigned count, uint32_t incr,
898 				  uint64_t flags)
899 {
900 	pe += amdgpu_bo_gpu_offset(bo);
901 	trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);
902 
903 	if (count < 3) {
904 		amdgpu_vm_write_pte(params->adev, params->ib, pe,
905 				    addr | flags, count, incr);
906 
907 	} else {
908 		amdgpu_vm_set_pte_pde(params->adev, params->ib, pe, addr,
909 				      count, incr, flags);
910 	}
911 }
912 
913 /**
914  * amdgpu_vm_do_copy_ptes - copy the PTEs from the GART
915  *
916  * @params: see amdgpu_pte_update_params definition
917  * @bo: PD/PT to update
918  * @pe: addr of the page entry
919  * @addr: dst addr to write into pe
920  * @count: number of page entries to update
921  * @incr: increase next addr by incr bytes
922  * @flags: hw access flags
923  *
924  * Traces the parameters and calls the DMA function to copy the PTEs.
925  */
926 static void amdgpu_vm_do_copy_ptes(struct amdgpu_pte_update_params *params,
927 				   struct amdgpu_bo *bo,
928 				   uint64_t pe, uint64_t addr,
929 				   unsigned count, uint32_t incr,
930 				   uint64_t flags)
931 {
932 	uint64_t src = (params->src + (addr >> 12) * 8);
933 
934 	pe += amdgpu_bo_gpu_offset(bo);
935 	trace_amdgpu_vm_copy_ptes(pe, src, count);
936 
937 	amdgpu_vm_copy_pte(params->adev, params->ib, pe, src, count);
938 }
939 
940 /**
941  * amdgpu_vm_map_gart - Resolve gart mapping of addr
942  *
943  * @pages_addr: optional DMA address to use for lookup
944  * @addr: the unmapped addr
945  *
946  * Look up the physical address of the page that the pte resolves
947  * to.
948  *
949  * Returns:
950  * The pointer for the page table entry.
951  */
952 static uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
953 {
954 	uint64_t result;
955 
956 	/* page table offset */
957 	result = pages_addr[addr >> PAGE_SHIFT];
958 
959 	/* in case cpu page size != gpu page size*/
960 	result |= addr & (PAGE_MASK);
961 
962 	result &= 0xFFFFFFFFFFFFF000ULL;
963 
964 	return result;
965 }
966 
967 /**
968  * amdgpu_vm_cpu_set_ptes - helper to update page tables via CPU
969  *
970  * @params: see amdgpu_pte_update_params definition
971  * @bo: PD/PT to update
972  * @pe: kmap addr of the page entry
973  * @addr: dst addr to write into pe
974  * @count: number of page entries to update
975  * @incr: increase next addr by incr bytes
976  * @flags: hw access flags
977  *
978  * Write count number of PT/PD entries directly.
979  */
980 static void amdgpu_vm_cpu_set_ptes(struct amdgpu_pte_update_params *params,
981 				   struct amdgpu_bo *bo,
982 				   uint64_t pe, uint64_t addr,
983 				   unsigned count, uint32_t incr,
984 				   uint64_t flags)
985 {
986 	unsigned int i;
987 	uint64_t value;
988 
989 	pe += (unsigned long)amdgpu_bo_kptr(bo);
990 
991 	trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);
992 
993 	for (i = 0; i < count; i++) {
994 		value = params->pages_addr ?
995 			amdgpu_vm_map_gart(params->pages_addr, addr) :
996 			addr;
997 		amdgpu_gmc_set_pte_pde(params->adev, (void *)(uintptr_t)pe,
998 				       i, value, flags);
999 		addr += incr;
1000 	}
1001 }
1002 
1003 
1004 /**
1005  * amdgpu_vm_wait_pd - Wait for PT BOs to be free.
1006  *
1007  * @adev: amdgpu_device pointer
1008  * @vm: related vm
1009  * @owner: fence owner
1010  *
1011  * Returns:
1012  * 0 on success, errno otherwise.
1013  */
1014 static int amdgpu_vm_wait_pd(struct amdgpu_device *adev, struct amdgpu_vm *vm,
1015 			     void *owner)
1016 {
1017 	struct amdgpu_sync sync;
1018 	int r;
1019 
1020 	amdgpu_sync_create(&sync);
1021 	amdgpu_sync_resv(adev, &sync, vm->root.base.bo->tbo.resv, owner, false);
1022 	r = amdgpu_sync_wait(&sync, true);
1023 	amdgpu_sync_free(&sync);
1024 
1025 	return r;
1026 }
1027 
1028 /*
1029  * amdgpu_vm_update_pde - update a single level in the hierarchy
1030  *
1031  * @param: parameters for the update
1032  * @vm: requested vm
1033  * @parent: parent directory
1034  * @entry: entry to update
1035  *
1036  * Makes sure the requested entry in parent is up to date.
1037  */
1038 static void amdgpu_vm_update_pde(struct amdgpu_pte_update_params *params,
1039 				 struct amdgpu_vm *vm,
1040 				 struct amdgpu_vm_pt *parent,
1041 				 struct amdgpu_vm_pt *entry)
1042 {
1043 	struct amdgpu_bo *bo = parent->base.bo, *pbo;
1044 	uint64_t pde, pt, flags;
1045 	unsigned level;
1046 
1047 	/* Don't update huge pages here */
1048 	if (entry->huge)
1049 		return;
1050 
1051 	for (level = 0, pbo = bo->parent; pbo; ++level)
1052 		pbo = pbo->parent;
1053 
1054 	level += params->adev->vm_manager.root_level;
1055 	pt = amdgpu_bo_gpu_offset(entry->base.bo);
1056 	flags = AMDGPU_PTE_VALID;
1057 	amdgpu_gmc_get_vm_pde(params->adev, level, &pt, &flags);
1058 	pde = (entry - parent->entries) * 8;
1059 	if (bo->shadow)
1060 		params->func(params, bo->shadow, pde, pt, 1, 0, flags);
1061 	params->func(params, bo, pde, pt, 1, 0, flags);
1062 }
1063 
1064 /*
1065  * amdgpu_vm_invalidate_level - mark all PD levels as invalid
1066  *
1067  * @adev: amdgpu_device pointer
1068  * @vm: related vm
1069  * @parent: parent PD
1070  * @level: VMPT level
1071  *
1072  * Mark all PD level as invalid after an error.
1073  */
1074 static void amdgpu_vm_invalidate_level(struct amdgpu_device *adev,
1075 				       struct amdgpu_vm *vm,
1076 				       struct amdgpu_vm_pt *parent,
1077 				       unsigned level)
1078 {
1079 	unsigned pt_idx, num_entries;
1080 
1081 	/*
1082 	 * Recurse into the subdirectories. This recursion is harmless because
1083 	 * we only have a maximum of 5 layers.
1084 	 */
1085 	num_entries = amdgpu_vm_num_entries(adev, level);
1086 	for (pt_idx = 0; pt_idx < num_entries; ++pt_idx) {
1087 		struct amdgpu_vm_pt *entry = &parent->entries[pt_idx];
1088 
1089 		if (!entry->base.bo)
1090 			continue;
1091 
1092 		if (!entry->base.moved)
1093 			list_move(&entry->base.vm_status, &vm->relocated);
1094 		amdgpu_vm_invalidate_level(adev, vm, entry, level + 1);
1095 	}
1096 }
1097 
1098 /*
1099  * amdgpu_vm_update_directories - make sure that all directories are valid
1100  *
1101  * @adev: amdgpu_device pointer
1102  * @vm: requested vm
1103  *
1104  * Makes sure all directories are up to date.
1105  *
1106  * Returns:
1107  * 0 for success, error for failure.
1108  */
1109 int amdgpu_vm_update_directories(struct amdgpu_device *adev,
1110 				 struct amdgpu_vm *vm)
1111 {
1112 	struct amdgpu_pte_update_params params;
1113 	struct amdgpu_job *job;
1114 	unsigned ndw = 0;
1115 	int r = 0;
1116 
1117 	if (list_empty(&vm->relocated))
1118 		return 0;
1119 
1120 restart:
1121 	memset(&params, 0, sizeof(params));
1122 	params.adev = adev;
1123 
1124 	if (vm->use_cpu_for_update) {
1125 		struct amdgpu_vm_bo_base *bo_base;
1126 
1127 		list_for_each_entry(bo_base, &vm->relocated, vm_status) {
1128 			r = amdgpu_bo_kmap(bo_base->bo, NULL);
1129 			if (unlikely(r))
1130 				return r;
1131 		}
1132 
1133 		r = amdgpu_vm_wait_pd(adev, vm, AMDGPU_FENCE_OWNER_VM);
1134 		if (unlikely(r))
1135 			return r;
1136 
1137 		params.func = amdgpu_vm_cpu_set_ptes;
1138 	} else {
1139 		ndw = 512 * 8;
1140 		r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
1141 		if (r)
1142 			return r;
1143 
1144 		params.ib = &job->ibs[0];
1145 		params.func = amdgpu_vm_do_set_ptes;
1146 	}
1147 
1148 	while (!list_empty(&vm->relocated)) {
1149 		struct amdgpu_vm_bo_base *bo_base, *parent;
1150 		struct amdgpu_vm_pt *pt, *entry;
1151 		struct amdgpu_bo *bo;
1152 
1153 		bo_base = list_first_entry(&vm->relocated,
1154 					   struct amdgpu_vm_bo_base,
1155 					   vm_status);
1156 		bo_base->moved = false;
1157 		list_del_init(&bo_base->vm_status);
1158 
1159 		bo = bo_base->bo->parent;
1160 		if (!bo)
1161 			continue;
1162 
1163 		parent = list_first_entry(&bo->va, struct amdgpu_vm_bo_base,
1164 					  bo_list);
1165 		pt = container_of(parent, struct amdgpu_vm_pt, base);
1166 		entry = container_of(bo_base, struct amdgpu_vm_pt, base);
1167 
1168 		amdgpu_vm_update_pde(&params, vm, pt, entry);
1169 
1170 		if (!vm->use_cpu_for_update &&
1171 		    (ndw - params.ib->length_dw) < 32)
1172 			break;
1173 	}
1174 
1175 	if (vm->use_cpu_for_update) {
1176 		/* Flush HDP */
1177 		mb();
1178 		amdgpu_asic_flush_hdp(adev, NULL);
1179 	} else if (params.ib->length_dw == 0) {
1180 		amdgpu_job_free(job);
1181 	} else {
1182 		struct amdgpu_bo *root = vm->root.base.bo;
1183 		struct amdgpu_ring *ring;
1184 		struct dma_fence *fence;
1185 
1186 		ring = container_of(vm->entity.rq->sched, struct amdgpu_ring,
1187 				    sched);
1188 
1189 		amdgpu_ring_pad_ib(ring, params.ib);
1190 		amdgpu_sync_resv(adev, &job->sync, root->tbo.resv,
1191 				 AMDGPU_FENCE_OWNER_VM, false);
1192 		WARN_ON(params.ib->length_dw > ndw);
1193 		r = amdgpu_job_submit(job, &vm->entity, AMDGPU_FENCE_OWNER_VM,
1194 				      &fence);
1195 		if (r)
1196 			goto error;
1197 
1198 		amdgpu_bo_fence(root, fence, true);
1199 		dma_fence_put(vm->last_update);
1200 		vm->last_update = fence;
1201 	}
1202 
1203 	if (!list_empty(&vm->relocated))
1204 		goto restart;
1205 
1206 	return 0;
1207 
1208 error:
1209 	amdgpu_vm_invalidate_level(adev, vm, &vm->root,
1210 				   adev->vm_manager.root_level);
1211 	amdgpu_job_free(job);
1212 	return r;
1213 }
1214 
1215 /**
1216  * amdgpu_vm_find_entry - find the entry for an address
1217  *
1218  * @p: see amdgpu_pte_update_params definition
1219  * @addr: virtual address in question
1220  * @entry: resulting entry or NULL
1221  * @parent: parent entry
1222  *
1223  * Find the vm_pt entry and it's parent for the given address.
1224  */
1225 void amdgpu_vm_get_entry(struct amdgpu_pte_update_params *p, uint64_t addr,
1226 			 struct amdgpu_vm_pt **entry,
1227 			 struct amdgpu_vm_pt **parent)
1228 {
1229 	unsigned level = p->adev->vm_manager.root_level;
1230 
1231 	*parent = NULL;
1232 	*entry = &p->vm->root;
1233 	while ((*entry)->entries) {
1234 		unsigned shift = amdgpu_vm_level_shift(p->adev, level++);
1235 
1236 		*parent = *entry;
1237 		*entry = &(*entry)->entries[addr >> shift];
1238 		addr &= (1ULL << shift) - 1;
1239 	}
1240 
1241 	if (level != AMDGPU_VM_PTB)
1242 		*entry = NULL;
1243 }
1244 
1245 /**
1246  * amdgpu_vm_handle_huge_pages - handle updating the PD with huge pages
1247  *
1248  * @p: see amdgpu_pte_update_params definition
1249  * @entry: vm_pt entry to check
1250  * @parent: parent entry
1251  * @nptes: number of PTEs updated with this operation
1252  * @dst: destination address where the PTEs should point to
1253  * @flags: access flags fro the PTEs
1254  *
1255  * Check if we can update the PD with a huge page.
1256  */
1257 static void amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p,
1258 					struct amdgpu_vm_pt *entry,
1259 					struct amdgpu_vm_pt *parent,
1260 					unsigned nptes, uint64_t dst,
1261 					uint64_t flags)
1262 {
1263 	uint64_t pde;
1264 
1265 	/* In the case of a mixed PT the PDE must point to it*/
1266 	if (p->adev->asic_type >= CHIP_VEGA10 && !p->src &&
1267 	    nptes == AMDGPU_VM_PTE_COUNT(p->adev)) {
1268 		/* Set the huge page flag to stop scanning at this PDE */
1269 		flags |= AMDGPU_PDE_PTE;
1270 	}
1271 
1272 	if (!(flags & AMDGPU_PDE_PTE)) {
1273 		if (entry->huge) {
1274 			/* Add the entry to the relocated list to update it. */
1275 			entry->huge = false;
1276 			list_move(&entry->base.vm_status, &p->vm->relocated);
1277 		}
1278 		return;
1279 	}
1280 
1281 	entry->huge = true;
1282 	amdgpu_gmc_get_vm_pde(p->adev, AMDGPU_VM_PDB0, &dst, &flags);
1283 
1284 	pde = (entry - parent->entries) * 8;
1285 	if (parent->base.bo->shadow)
1286 		p->func(p, parent->base.bo->shadow, pde, dst, 1, 0, flags);
1287 	p->func(p, parent->base.bo, pde, dst, 1, 0, flags);
1288 }
1289 
1290 /**
1291  * amdgpu_vm_update_ptes - make sure that page tables are valid
1292  *
1293  * @params: see amdgpu_pte_update_params definition
1294  * @start: start of GPU address range
1295  * @end: end of GPU address range
1296  * @dst: destination address to map to, the next dst inside the function
1297  * @flags: mapping flags
1298  *
1299  * Update the page tables in the range @start - @end.
1300  *
1301  * Returns:
1302  * 0 for success, -EINVAL for failure.
1303  */
1304 static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
1305 				  uint64_t start, uint64_t end,
1306 				  uint64_t dst, uint64_t flags)
1307 {
1308 	struct amdgpu_device *adev = params->adev;
1309 	const uint64_t mask = AMDGPU_VM_PTE_COUNT(adev) - 1;
1310 
1311 	uint64_t addr, pe_start;
1312 	struct amdgpu_bo *pt;
1313 	unsigned nptes;
1314 
1315 	/* walk over the address space and update the page tables */
1316 	for (addr = start; addr < end; addr += nptes,
1317 	     dst += nptes * AMDGPU_GPU_PAGE_SIZE) {
1318 		struct amdgpu_vm_pt *entry, *parent;
1319 
1320 		amdgpu_vm_get_entry(params, addr, &entry, &parent);
1321 		if (!entry)
1322 			return -ENOENT;
1323 
1324 		if ((addr & ~mask) == (end & ~mask))
1325 			nptes = end - addr;
1326 		else
1327 			nptes = AMDGPU_VM_PTE_COUNT(adev) - (addr & mask);
1328 
1329 		amdgpu_vm_handle_huge_pages(params, entry, parent,
1330 					    nptes, dst, flags);
1331 		/* We don't need to update PTEs for huge pages */
1332 		if (entry->huge)
1333 			continue;
1334 
1335 		pt = entry->base.bo;
1336 		pe_start = (addr & mask) * 8;
1337 		if (pt->shadow)
1338 			params->func(params, pt->shadow, pe_start, dst, nptes,
1339 				     AMDGPU_GPU_PAGE_SIZE, flags);
1340 		params->func(params, pt, pe_start, dst, nptes,
1341 			     AMDGPU_GPU_PAGE_SIZE, flags);
1342 	}
1343 
1344 	return 0;
1345 }
1346 
1347 /*
1348  * amdgpu_vm_frag_ptes - add fragment information to PTEs
1349  *
1350  * @params: see amdgpu_pte_update_params definition
1351  * @vm: requested vm
1352  * @start: first PTE to handle
1353  * @end: last PTE to handle
1354  * @dst: addr those PTEs should point to
1355  * @flags: hw mapping flags
1356  *
1357  * Returns:
1358  * 0 for success, -EINVAL for failure.
1359  */
1360 static int amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params	*params,
1361 				uint64_t start, uint64_t end,
1362 				uint64_t dst, uint64_t flags)
1363 {
1364 	/**
1365 	 * The MC L1 TLB supports variable sized pages, based on a fragment
1366 	 * field in the PTE. When this field is set to a non-zero value, page
1367 	 * granularity is increased from 4KB to (1 << (12 + frag)). The PTE
1368 	 * flags are considered valid for all PTEs within the fragment range
1369 	 * and corresponding mappings are assumed to be physically contiguous.
1370 	 *
1371 	 * The L1 TLB can store a single PTE for the whole fragment,
1372 	 * significantly increasing the space available for translation
1373 	 * caching. This leads to large improvements in throughput when the
1374 	 * TLB is under pressure.
1375 	 *
1376 	 * The L2 TLB distributes small and large fragments into two
1377 	 * asymmetric partitions. The large fragment cache is significantly
1378 	 * larger. Thus, we try to use large fragments wherever possible.
1379 	 * Userspace can support this by aligning virtual base address and
1380 	 * allocation size to the fragment size.
1381 	 */
1382 	unsigned max_frag = params->adev->vm_manager.fragment_size;
1383 	int r;
1384 
1385 	/* system pages are non continuously */
1386 	if (params->src || !(flags & AMDGPU_PTE_VALID))
1387 		return amdgpu_vm_update_ptes(params, start, end, dst, flags);
1388 
1389 	while (start != end) {
1390 		uint64_t frag_flags, frag_end;
1391 		unsigned frag;
1392 
1393 		/* This intentionally wraps around if no bit is set */
1394 		frag = min((unsigned)ffs(start) - 1,
1395 			   (unsigned)fls64(end - start) - 1);
1396 		if (frag >= max_frag) {
1397 			frag_flags = AMDGPU_PTE_FRAG(max_frag);
1398 			frag_end = end & ~((1ULL << max_frag) - 1);
1399 		} else {
1400 			frag_flags = AMDGPU_PTE_FRAG(frag);
1401 			frag_end = start + (1 << frag);
1402 		}
1403 
1404 		r = amdgpu_vm_update_ptes(params, start, frag_end, dst,
1405 					  flags | frag_flags);
1406 		if (r)
1407 			return r;
1408 
1409 		dst += (frag_end - start) * AMDGPU_GPU_PAGE_SIZE;
1410 		start = frag_end;
1411 	}
1412 
1413 	return 0;
1414 }
1415 
1416 /**
1417  * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
1418  *
1419  * @adev: amdgpu_device pointer
1420  * @exclusive: fence we need to sync to
1421  * @pages_addr: DMA addresses to use for mapping
1422  * @vm: requested vm
1423  * @start: start of mapped range
1424  * @last: last mapped entry
1425  * @flags: flags for the entries
1426  * @addr: addr to set the area to
1427  * @fence: optional resulting fence
1428  *
1429  * Fill in the page table entries between @start and @last.
1430  *
1431  * Returns:
1432  * 0 for success, -EINVAL for failure.
1433  */
1434 static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
1435 				       struct dma_fence *exclusive,
1436 				       dma_addr_t *pages_addr,
1437 				       struct amdgpu_vm *vm,
1438 				       uint64_t start, uint64_t last,
1439 				       uint64_t flags, uint64_t addr,
1440 				       struct dma_fence **fence)
1441 {
1442 	struct amdgpu_ring *ring;
1443 	void *owner = AMDGPU_FENCE_OWNER_VM;
1444 	unsigned nptes, ncmds, ndw;
1445 	struct amdgpu_job *job;
1446 	struct amdgpu_pte_update_params params;
1447 	struct dma_fence *f = NULL;
1448 	int r;
1449 
1450 	memset(&params, 0, sizeof(params));
1451 	params.adev = adev;
1452 	params.vm = vm;
1453 
1454 	/* sync to everything on unmapping */
1455 	if (!(flags & AMDGPU_PTE_VALID))
1456 		owner = AMDGPU_FENCE_OWNER_UNDEFINED;
1457 
1458 	if (vm->use_cpu_for_update) {
1459 		/* params.src is used as flag to indicate system Memory */
1460 		if (pages_addr)
1461 			params.src = ~0;
1462 
1463 		/* Wait for PT BOs to be free. PTs share the same resv. object
1464 		 * as the root PD BO
1465 		 */
1466 		r = amdgpu_vm_wait_pd(adev, vm, owner);
1467 		if (unlikely(r))
1468 			return r;
1469 
1470 		params.func = amdgpu_vm_cpu_set_ptes;
1471 		params.pages_addr = pages_addr;
1472 		return amdgpu_vm_frag_ptes(&params, start, last + 1,
1473 					   addr, flags);
1474 	}
1475 
1476 	ring = container_of(vm->entity.rq->sched, struct amdgpu_ring, sched);
1477 
1478 	nptes = last - start + 1;
1479 
1480 	/*
1481 	 * reserve space for two commands every (1 << BLOCK_SIZE)
1482 	 *  entries or 2k dwords (whatever is smaller)
1483          *
1484          * The second command is for the shadow pagetables.
1485 	 */
1486 	if (vm->root.base.bo->shadow)
1487 		ncmds = ((nptes >> min(adev->vm_manager.block_size, 11u)) + 1) * 2;
1488 	else
1489 		ncmds = ((nptes >> min(adev->vm_manager.block_size, 11u)) + 1);
1490 
1491 	/* padding, etc. */
1492 	ndw = 64;
1493 
1494 	if (pages_addr) {
1495 		/* copy commands needed */
1496 		ndw += ncmds * adev->vm_manager.vm_pte_funcs->copy_pte_num_dw;
1497 
1498 		/* and also PTEs */
1499 		ndw += nptes * 2;
1500 
1501 		params.func = amdgpu_vm_do_copy_ptes;
1502 
1503 	} else {
1504 		/* set page commands needed */
1505 		ndw += ncmds * 10;
1506 
1507 		/* extra commands for begin/end fragments */
1508 		if (vm->root.base.bo->shadow)
1509 		        ndw += 2 * 10 * adev->vm_manager.fragment_size * 2;
1510 		else
1511 		        ndw += 2 * 10 * adev->vm_manager.fragment_size;
1512 
1513 		params.func = amdgpu_vm_do_set_ptes;
1514 	}
1515 
1516 	r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
1517 	if (r)
1518 		return r;
1519 
1520 	params.ib = &job->ibs[0];
1521 
1522 	if (pages_addr) {
1523 		uint64_t *pte;
1524 		unsigned i;
1525 
1526 		/* Put the PTEs at the end of the IB. */
1527 		i = ndw - nptes * 2;
1528 		pte= (uint64_t *)&(job->ibs->ptr[i]);
1529 		params.src = job->ibs->gpu_addr + i * 4;
1530 
1531 		for (i = 0; i < nptes; ++i) {
1532 			pte[i] = amdgpu_vm_map_gart(pages_addr, addr + i *
1533 						    AMDGPU_GPU_PAGE_SIZE);
1534 			pte[i] |= flags;
1535 		}
1536 		addr = 0;
1537 	}
1538 
1539 	r = amdgpu_sync_fence(adev, &job->sync, exclusive, false);
1540 	if (r)
1541 		goto error_free;
1542 
1543 	r = amdgpu_sync_resv(adev, &job->sync, vm->root.base.bo->tbo.resv,
1544 			     owner, false);
1545 	if (r)
1546 		goto error_free;
1547 
1548 	r = reservation_object_reserve_shared(vm->root.base.bo->tbo.resv);
1549 	if (r)
1550 		goto error_free;
1551 
1552 	r = amdgpu_vm_frag_ptes(&params, start, last + 1, addr, flags);
1553 	if (r)
1554 		goto error_free;
1555 
1556 	amdgpu_ring_pad_ib(ring, params.ib);
1557 	WARN_ON(params.ib->length_dw > ndw);
1558 	r = amdgpu_job_submit(job, &vm->entity, AMDGPU_FENCE_OWNER_VM, &f);
1559 	if (r)
1560 		goto error_free;
1561 
1562 	amdgpu_bo_fence(vm->root.base.bo, f, true);
1563 	dma_fence_put(*fence);
1564 	*fence = f;
1565 	return 0;
1566 
1567 error_free:
1568 	amdgpu_job_free(job);
1569 	return r;
1570 }
1571 
1572 /**
1573  * amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks
1574  *
1575  * @adev: amdgpu_device pointer
1576  * @exclusive: fence we need to sync to
1577  * @pages_addr: DMA addresses to use for mapping
1578  * @vm: requested vm
1579  * @mapping: mapped range and flags to use for the update
1580  * @flags: HW flags for the mapping
1581  * @nodes: array of drm_mm_nodes with the MC addresses
1582  * @fence: optional resulting fence
1583  *
1584  * Split the mapping into smaller chunks so that each update fits
1585  * into a SDMA IB.
1586  *
1587  * Returns:
1588  * 0 for success, -EINVAL for failure.
1589  */
1590 static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
1591 				      struct dma_fence *exclusive,
1592 				      dma_addr_t *pages_addr,
1593 				      struct amdgpu_vm *vm,
1594 				      struct amdgpu_bo_va_mapping *mapping,
1595 				      uint64_t flags,
1596 				      struct drm_mm_node *nodes,
1597 				      struct dma_fence **fence)
1598 {
1599 	unsigned min_linear_pages = 1 << adev->vm_manager.fragment_size;
1600 	uint64_t pfn, start = mapping->start;
1601 	int r;
1602 
1603 	/* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
1604 	 * but in case of something, we filter the flags in first place
1605 	 */
1606 	if (!(mapping->flags & AMDGPU_PTE_READABLE))
1607 		flags &= ~AMDGPU_PTE_READABLE;
1608 	if (!(mapping->flags & AMDGPU_PTE_WRITEABLE))
1609 		flags &= ~AMDGPU_PTE_WRITEABLE;
1610 
1611 	flags &= ~AMDGPU_PTE_EXECUTABLE;
1612 	flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
1613 
1614 	flags &= ~AMDGPU_PTE_MTYPE_MASK;
1615 	flags |= (mapping->flags & AMDGPU_PTE_MTYPE_MASK);
1616 
1617 	if ((mapping->flags & AMDGPU_PTE_PRT) &&
1618 	    (adev->asic_type >= CHIP_VEGA10)) {
1619 		flags |= AMDGPU_PTE_PRT;
1620 		flags &= ~AMDGPU_PTE_VALID;
1621 	}
1622 
1623 	trace_amdgpu_vm_bo_update(mapping);
1624 
1625 	pfn = mapping->offset >> PAGE_SHIFT;
1626 	if (nodes) {
1627 		while (pfn >= nodes->size) {
1628 			pfn -= nodes->size;
1629 			++nodes;
1630 		}
1631 	}
1632 
1633 	do {
1634 		dma_addr_t *dma_addr = NULL;
1635 		uint64_t max_entries;
1636 		uint64_t addr, last;
1637 
1638 		if (nodes) {
1639 			addr = nodes->start << PAGE_SHIFT;
1640 			max_entries = (nodes->size - pfn) *
1641 				AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1642 		} else {
1643 			addr = 0;
1644 			max_entries = S64_MAX;
1645 		}
1646 
1647 		if (pages_addr) {
1648 			uint64_t count;
1649 
1650 			max_entries = min(max_entries, 16ull * 1024ull);
1651 			for (count = 1;
1652 			     count < max_entries / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1653 			     ++count) {
1654 				uint64_t idx = pfn + count;
1655 
1656 				if (pages_addr[idx] !=
1657 				    (pages_addr[idx - 1] + PAGE_SIZE))
1658 					break;
1659 			}
1660 
1661 			if (count < min_linear_pages) {
1662 				addr = pfn << PAGE_SHIFT;
1663 				dma_addr = pages_addr;
1664 			} else {
1665 				addr = pages_addr[pfn];
1666 				max_entries = count * AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1667 			}
1668 
1669 		} else if (flags & AMDGPU_PTE_VALID) {
1670 			addr += adev->vm_manager.vram_base_offset;
1671 			addr += pfn << PAGE_SHIFT;
1672 		}
1673 
1674 		last = min((uint64_t)mapping->last, start + max_entries - 1);
1675 		r = amdgpu_vm_bo_update_mapping(adev, exclusive, dma_addr, vm,
1676 						start, last, flags, addr,
1677 						fence);
1678 		if (r)
1679 			return r;
1680 
1681 		pfn += (last - start + 1) / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1682 		if (nodes && nodes->size == pfn) {
1683 			pfn = 0;
1684 			++nodes;
1685 		}
1686 		start = last + 1;
1687 
1688 	} while (unlikely(start != mapping->last + 1));
1689 
1690 	return 0;
1691 }
1692 
1693 /**
1694  * amdgpu_vm_bo_update - update all BO mappings in the vm page table
1695  *
1696  * @adev: amdgpu_device pointer
1697  * @bo_va: requested BO and VM object
1698  * @clear: if true clear the entries
1699  *
1700  * Fill in the page table entries for @bo_va.
1701  *
1702  * Returns:
1703  * 0 for success, -EINVAL for failure.
1704  */
1705 int amdgpu_vm_bo_update(struct amdgpu_device *adev,
1706 			struct amdgpu_bo_va *bo_va,
1707 			bool clear)
1708 {
1709 	struct amdgpu_bo *bo = bo_va->base.bo;
1710 	struct amdgpu_vm *vm = bo_va->base.vm;
1711 	struct amdgpu_bo_va_mapping *mapping;
1712 	dma_addr_t *pages_addr = NULL;
1713 	struct ttm_mem_reg *mem;
1714 	struct drm_mm_node *nodes;
1715 	struct dma_fence *exclusive, **last_update;
1716 	uint64_t flags;
1717 	int r;
1718 
1719 	if (clear || !bo) {
1720 		mem = NULL;
1721 		nodes = NULL;
1722 		exclusive = NULL;
1723 	} else {
1724 		struct ttm_dma_tt *ttm;
1725 
1726 		mem = &bo->tbo.mem;
1727 		nodes = mem->mm_node;
1728 		if (mem->mem_type == TTM_PL_TT) {
1729 			ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm);
1730 			pages_addr = ttm->dma_address;
1731 		}
1732 		exclusive = reservation_object_get_excl(bo->tbo.resv);
1733 	}
1734 
1735 	if (bo)
1736 		flags = amdgpu_ttm_tt_pte_flags(adev, bo->tbo.ttm, mem);
1737 	else
1738 		flags = 0x0;
1739 
1740 	if (clear || (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv))
1741 		last_update = &vm->last_update;
1742 	else
1743 		last_update = &bo_va->last_pt_update;
1744 
1745 	if (!clear && bo_va->base.moved) {
1746 		bo_va->base.moved = false;
1747 		list_splice_init(&bo_va->valids, &bo_va->invalids);
1748 
1749 	} else if (bo_va->cleared != clear) {
1750 		list_splice_init(&bo_va->valids, &bo_va->invalids);
1751 	}
1752 
1753 	list_for_each_entry(mapping, &bo_va->invalids, list) {
1754 		r = amdgpu_vm_bo_split_mapping(adev, exclusive, pages_addr, vm,
1755 					       mapping, flags, nodes,
1756 					       last_update);
1757 		if (r)
1758 			return r;
1759 	}
1760 
1761 	if (vm->use_cpu_for_update) {
1762 		/* Flush HDP */
1763 		mb();
1764 		amdgpu_asic_flush_hdp(adev, NULL);
1765 	}
1766 
1767 	spin_lock(&vm->moved_lock);
1768 	list_del_init(&bo_va->base.vm_status);
1769 	spin_unlock(&vm->moved_lock);
1770 
1771 	/* If the BO is not in its preferred location add it back to
1772 	 * the evicted list so that it gets validated again on the
1773 	 * next command submission.
1774 	 */
1775 	if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv) {
1776 		uint32_t mem_type = bo->tbo.mem.mem_type;
1777 
1778 		if (!(bo->preferred_domains & amdgpu_mem_type_to_domain(mem_type)))
1779 			list_add_tail(&bo_va->base.vm_status, &vm->evicted);
1780 		else
1781 			list_add(&bo_va->base.vm_status, &vm->idle);
1782 	}
1783 
1784 	list_splice_init(&bo_va->invalids, &bo_va->valids);
1785 	bo_va->cleared = clear;
1786 
1787 	if (trace_amdgpu_vm_bo_mapping_enabled()) {
1788 		list_for_each_entry(mapping, &bo_va->valids, list)
1789 			trace_amdgpu_vm_bo_mapping(mapping);
1790 	}
1791 
1792 	return 0;
1793 }
1794 
1795 /**
1796  * amdgpu_vm_update_prt_state - update the global PRT state
1797  *
1798  * @adev: amdgpu_device pointer
1799  */
1800 static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev)
1801 {
1802 	unsigned long flags;
1803 	bool enable;
1804 
1805 	spin_lock_irqsave(&adev->vm_manager.prt_lock, flags);
1806 	enable = !!atomic_read(&adev->vm_manager.num_prt_users);
1807 	adev->gmc.gmc_funcs->set_prt(adev, enable);
1808 	spin_unlock_irqrestore(&adev->vm_manager.prt_lock, flags);
1809 }
1810 
1811 /**
1812  * amdgpu_vm_prt_get - add a PRT user
1813  *
1814  * @adev: amdgpu_device pointer
1815  */
1816 static void amdgpu_vm_prt_get(struct amdgpu_device *adev)
1817 {
1818 	if (!adev->gmc.gmc_funcs->set_prt)
1819 		return;
1820 
1821 	if (atomic_inc_return(&adev->vm_manager.num_prt_users) == 1)
1822 		amdgpu_vm_update_prt_state(adev);
1823 }
1824 
1825 /**
1826  * amdgpu_vm_prt_put - drop a PRT user
1827  *
1828  * @adev: amdgpu_device pointer
1829  */
1830 static void amdgpu_vm_prt_put(struct amdgpu_device *adev)
1831 {
1832 	if (atomic_dec_return(&adev->vm_manager.num_prt_users) == 0)
1833 		amdgpu_vm_update_prt_state(adev);
1834 }
1835 
1836 /**
1837  * amdgpu_vm_prt_cb - callback for updating the PRT status
1838  *
1839  * @fence: fence for the callback
1840  * @_cb: the callback function
1841  */
1842 static void amdgpu_vm_prt_cb(struct dma_fence *fence, struct dma_fence_cb *_cb)
1843 {
1844 	struct amdgpu_prt_cb *cb = container_of(_cb, struct amdgpu_prt_cb, cb);
1845 
1846 	amdgpu_vm_prt_put(cb->adev);
1847 	kfree(cb);
1848 }
1849 
1850 /**
1851  * amdgpu_vm_add_prt_cb - add callback for updating the PRT status
1852  *
1853  * @adev: amdgpu_device pointer
1854  * @fence: fence for the callback
1855  */
1856 static void amdgpu_vm_add_prt_cb(struct amdgpu_device *adev,
1857 				 struct dma_fence *fence)
1858 {
1859 	struct amdgpu_prt_cb *cb;
1860 
1861 	if (!adev->gmc.gmc_funcs->set_prt)
1862 		return;
1863 
1864 	cb = kmalloc(sizeof(struct amdgpu_prt_cb), GFP_KERNEL);
1865 	if (!cb) {
1866 		/* Last resort when we are OOM */
1867 		if (fence)
1868 			dma_fence_wait(fence, false);
1869 
1870 		amdgpu_vm_prt_put(adev);
1871 	} else {
1872 		cb->adev = adev;
1873 		if (!fence || dma_fence_add_callback(fence, &cb->cb,
1874 						     amdgpu_vm_prt_cb))
1875 			amdgpu_vm_prt_cb(fence, &cb->cb);
1876 	}
1877 }
1878 
1879 /**
1880  * amdgpu_vm_free_mapping - free a mapping
1881  *
1882  * @adev: amdgpu_device pointer
1883  * @vm: requested vm
1884  * @mapping: mapping to be freed
1885  * @fence: fence of the unmap operation
1886  *
1887  * Free a mapping and make sure we decrease the PRT usage count if applicable.
1888  */
1889 static void amdgpu_vm_free_mapping(struct amdgpu_device *adev,
1890 				   struct amdgpu_vm *vm,
1891 				   struct amdgpu_bo_va_mapping *mapping,
1892 				   struct dma_fence *fence)
1893 {
1894 	if (mapping->flags & AMDGPU_PTE_PRT)
1895 		amdgpu_vm_add_prt_cb(adev, fence);
1896 	kfree(mapping);
1897 }
1898 
1899 /**
1900  * amdgpu_vm_prt_fini - finish all prt mappings
1901  *
1902  * @adev: amdgpu_device pointer
1903  * @vm: requested vm
1904  *
1905  * Register a cleanup callback to disable PRT support after VM dies.
1906  */
1907 static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1908 {
1909 	struct reservation_object *resv = vm->root.base.bo->tbo.resv;
1910 	struct dma_fence *excl, **shared;
1911 	unsigned i, shared_count;
1912 	int r;
1913 
1914 	r = reservation_object_get_fences_rcu(resv, &excl,
1915 					      &shared_count, &shared);
1916 	if (r) {
1917 		/* Not enough memory to grab the fence list, as last resort
1918 		 * block for all the fences to complete.
1919 		 */
1920 		reservation_object_wait_timeout_rcu(resv, true, false,
1921 						    MAX_SCHEDULE_TIMEOUT);
1922 		return;
1923 	}
1924 
1925 	/* Add a callback for each fence in the reservation object */
1926 	amdgpu_vm_prt_get(adev);
1927 	amdgpu_vm_add_prt_cb(adev, excl);
1928 
1929 	for (i = 0; i < shared_count; ++i) {
1930 		amdgpu_vm_prt_get(adev);
1931 		amdgpu_vm_add_prt_cb(adev, shared[i]);
1932 	}
1933 
1934 	kfree(shared);
1935 }
1936 
1937 /**
1938  * amdgpu_vm_clear_freed - clear freed BOs in the PT
1939  *
1940  * @adev: amdgpu_device pointer
1941  * @vm: requested vm
1942  * @fence: optional resulting fence (unchanged if no work needed to be done
1943  * or if an error occurred)
1944  *
1945  * Make sure all freed BOs are cleared in the PT.
1946  * PTs have to be reserved and mutex must be locked!
1947  *
1948  * Returns:
1949  * 0 for success.
1950  *
1951  */
1952 int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
1953 			  struct amdgpu_vm *vm,
1954 			  struct dma_fence **fence)
1955 {
1956 	struct amdgpu_bo_va_mapping *mapping;
1957 	uint64_t init_pte_value = 0;
1958 	struct dma_fence *f = NULL;
1959 	int r;
1960 
1961 	while (!list_empty(&vm->freed)) {
1962 		mapping = list_first_entry(&vm->freed,
1963 			struct amdgpu_bo_va_mapping, list);
1964 		list_del(&mapping->list);
1965 
1966 		if (vm->pte_support_ats && mapping->start < AMDGPU_VA_HOLE_START)
1967 			init_pte_value = AMDGPU_PTE_DEFAULT_ATC;
1968 
1969 		r = amdgpu_vm_bo_update_mapping(adev, NULL, NULL, vm,
1970 						mapping->start, mapping->last,
1971 						init_pte_value, 0, &f);
1972 		amdgpu_vm_free_mapping(adev, vm, mapping, f);
1973 		if (r) {
1974 			dma_fence_put(f);
1975 			return r;
1976 		}
1977 	}
1978 
1979 	if (fence && f) {
1980 		dma_fence_put(*fence);
1981 		*fence = f;
1982 	} else {
1983 		dma_fence_put(f);
1984 	}
1985 
1986 	return 0;
1987 
1988 }
1989 
1990 /**
1991  * amdgpu_vm_handle_moved - handle moved BOs in the PT
1992  *
1993  * @adev: amdgpu_device pointer
1994  * @vm: requested vm
1995  *
1996  * Make sure all BOs which are moved are updated in the PTs.
1997  *
1998  * Returns:
1999  * 0 for success.
2000  *
2001  * PTs have to be reserved!
2002  */
2003 int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
2004 			   struct amdgpu_vm *vm)
2005 {
2006 	struct amdgpu_bo_va *bo_va, *tmp;
2007 	struct list_head moved;
2008 	bool clear;
2009 	int r;
2010 
2011 	INIT_LIST_HEAD(&moved);
2012 	spin_lock(&vm->moved_lock);
2013 	list_splice_init(&vm->moved, &moved);
2014 	spin_unlock(&vm->moved_lock);
2015 
2016 	list_for_each_entry_safe(bo_va, tmp, &moved, base.vm_status) {
2017 		struct reservation_object *resv = bo_va->base.bo->tbo.resv;
2018 
2019 		/* Per VM BOs never need to bo cleared in the page tables */
2020 		if (resv == vm->root.base.bo->tbo.resv)
2021 			clear = false;
2022 		/* Try to reserve the BO to avoid clearing its ptes */
2023 		else if (!amdgpu_vm_debug && reservation_object_trylock(resv))
2024 			clear = false;
2025 		/* Somebody else is using the BO right now */
2026 		else
2027 			clear = true;
2028 
2029 		r = amdgpu_vm_bo_update(adev, bo_va, clear);
2030 		if (r) {
2031 			spin_lock(&vm->moved_lock);
2032 			list_splice(&moved, &vm->moved);
2033 			spin_unlock(&vm->moved_lock);
2034 			return r;
2035 		}
2036 
2037 		if (!clear && resv != vm->root.base.bo->tbo.resv)
2038 			reservation_object_unlock(resv);
2039 
2040 	}
2041 
2042 	return 0;
2043 }
2044 
2045 /**
2046  * amdgpu_vm_bo_add - add a bo to a specific vm
2047  *
2048  * @adev: amdgpu_device pointer
2049  * @vm: requested vm
2050  * @bo: amdgpu buffer object
2051  *
2052  * Add @bo into the requested vm.
2053  * Add @bo to the list of bos associated with the vm
2054  *
2055  * Returns:
2056  * Newly added bo_va or NULL for failure
2057  *
2058  * Object has to be reserved!
2059  */
2060 struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
2061 				      struct amdgpu_vm *vm,
2062 				      struct amdgpu_bo *bo)
2063 {
2064 	struct amdgpu_bo_va *bo_va;
2065 
2066 	bo_va = kzalloc(sizeof(struct amdgpu_bo_va), GFP_KERNEL);
2067 	if (bo_va == NULL) {
2068 		return NULL;
2069 	}
2070 	amdgpu_vm_bo_base_init(&bo_va->base, vm, bo);
2071 
2072 	bo_va->ref_count = 1;
2073 	INIT_LIST_HEAD(&bo_va->valids);
2074 	INIT_LIST_HEAD(&bo_va->invalids);
2075 
2076 	return bo_va;
2077 }
2078 
2079 
2080 /**
2081  * amdgpu_vm_bo_insert_mapping - insert a new mapping
2082  *
2083  * @adev: amdgpu_device pointer
2084  * @bo_va: bo_va to store the address
2085  * @mapping: the mapping to insert
2086  *
2087  * Insert a new mapping into all structures.
2088  */
2089 static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
2090 				    struct amdgpu_bo_va *bo_va,
2091 				    struct amdgpu_bo_va_mapping *mapping)
2092 {
2093 	struct amdgpu_vm *vm = bo_va->base.vm;
2094 	struct amdgpu_bo *bo = bo_va->base.bo;
2095 
2096 	mapping->bo_va = bo_va;
2097 	list_add(&mapping->list, &bo_va->invalids);
2098 	amdgpu_vm_it_insert(mapping, &vm->va);
2099 
2100 	if (mapping->flags & AMDGPU_PTE_PRT)
2101 		amdgpu_vm_prt_get(adev);
2102 
2103 	if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv &&
2104 	    !bo_va->base.moved) {
2105 		spin_lock(&vm->moved_lock);
2106 		list_move(&bo_va->base.vm_status, &vm->moved);
2107 		spin_unlock(&vm->moved_lock);
2108 	}
2109 	trace_amdgpu_vm_bo_map(bo_va, mapping);
2110 }
2111 
2112 /**
2113  * amdgpu_vm_bo_map - map bo inside a vm
2114  *
2115  * @adev: amdgpu_device pointer
2116  * @bo_va: bo_va to store the address
2117  * @saddr: where to map the BO
2118  * @offset: requested offset in the BO
2119  * @size: BO size in bytes
2120  * @flags: attributes of pages (read/write/valid/etc.)
2121  *
2122  * Add a mapping of the BO at the specefied addr into the VM.
2123  *
2124  * Returns:
2125  * 0 for success, error for failure.
2126  *
2127  * Object has to be reserved and unreserved outside!
2128  */
2129 int amdgpu_vm_bo_map(struct amdgpu_device *adev,
2130 		     struct amdgpu_bo_va *bo_va,
2131 		     uint64_t saddr, uint64_t offset,
2132 		     uint64_t size, uint64_t flags)
2133 {
2134 	struct amdgpu_bo_va_mapping *mapping, *tmp;
2135 	struct amdgpu_bo *bo = bo_va->base.bo;
2136 	struct amdgpu_vm *vm = bo_va->base.vm;
2137 	uint64_t eaddr;
2138 
2139 	/* validate the parameters */
2140 	if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
2141 	    size == 0 || size & AMDGPU_GPU_PAGE_MASK)
2142 		return -EINVAL;
2143 
2144 	/* make sure object fit at this offset */
2145 	eaddr = saddr + size - 1;
2146 	if (saddr >= eaddr ||
2147 	    (bo && offset + size > amdgpu_bo_size(bo)))
2148 		return -EINVAL;
2149 
2150 	saddr /= AMDGPU_GPU_PAGE_SIZE;
2151 	eaddr /= AMDGPU_GPU_PAGE_SIZE;
2152 
2153 	tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
2154 	if (tmp) {
2155 		/* bo and tmp overlap, invalid addr */
2156 		dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
2157 			"0x%010Lx-0x%010Lx\n", bo, saddr, eaddr,
2158 			tmp->start, tmp->last + 1);
2159 		return -EINVAL;
2160 	}
2161 
2162 	mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
2163 	if (!mapping)
2164 		return -ENOMEM;
2165 
2166 	mapping->start = saddr;
2167 	mapping->last = eaddr;
2168 	mapping->offset = offset;
2169 	mapping->flags = flags;
2170 
2171 	amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
2172 
2173 	return 0;
2174 }
2175 
2176 /**
2177  * amdgpu_vm_bo_replace_map - map bo inside a vm, replacing existing mappings
2178  *
2179  * @adev: amdgpu_device pointer
2180  * @bo_va: bo_va to store the address
2181  * @saddr: where to map the BO
2182  * @offset: requested offset in the BO
2183  * @size: BO size in bytes
2184  * @flags: attributes of pages (read/write/valid/etc.)
2185  *
2186  * Add a mapping of the BO at the specefied addr into the VM. Replace existing
2187  * mappings as we do so.
2188  *
2189  * Returns:
2190  * 0 for success, error for failure.
2191  *
2192  * Object has to be reserved and unreserved outside!
2193  */
2194 int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
2195 			     struct amdgpu_bo_va *bo_va,
2196 			     uint64_t saddr, uint64_t offset,
2197 			     uint64_t size, uint64_t flags)
2198 {
2199 	struct amdgpu_bo_va_mapping *mapping;
2200 	struct amdgpu_bo *bo = bo_va->base.bo;
2201 	uint64_t eaddr;
2202 	int r;
2203 
2204 	/* validate the parameters */
2205 	if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
2206 	    size == 0 || size & AMDGPU_GPU_PAGE_MASK)
2207 		return -EINVAL;
2208 
2209 	/* make sure object fit at this offset */
2210 	eaddr = saddr + size - 1;
2211 	if (saddr >= eaddr ||
2212 	    (bo && offset + size > amdgpu_bo_size(bo)))
2213 		return -EINVAL;
2214 
2215 	/* Allocate all the needed memory */
2216 	mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
2217 	if (!mapping)
2218 		return -ENOMEM;
2219 
2220 	r = amdgpu_vm_bo_clear_mappings(adev, bo_va->base.vm, saddr, size);
2221 	if (r) {
2222 		kfree(mapping);
2223 		return r;
2224 	}
2225 
2226 	saddr /= AMDGPU_GPU_PAGE_SIZE;
2227 	eaddr /= AMDGPU_GPU_PAGE_SIZE;
2228 
2229 	mapping->start = saddr;
2230 	mapping->last = eaddr;
2231 	mapping->offset = offset;
2232 	mapping->flags = flags;
2233 
2234 	amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
2235 
2236 	return 0;
2237 }
2238 
2239 /**
2240  * amdgpu_vm_bo_unmap - remove bo mapping from vm
2241  *
2242  * @adev: amdgpu_device pointer
2243  * @bo_va: bo_va to remove the address from
2244  * @saddr: where to the BO is mapped
2245  *
2246  * Remove a mapping of the BO at the specefied addr from the VM.
2247  *
2248  * Returns:
2249  * 0 for success, error for failure.
2250  *
2251  * Object has to be reserved and unreserved outside!
2252  */
2253 int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
2254 		       struct amdgpu_bo_va *bo_va,
2255 		       uint64_t saddr)
2256 {
2257 	struct amdgpu_bo_va_mapping *mapping;
2258 	struct amdgpu_vm *vm = bo_va->base.vm;
2259 	bool valid = true;
2260 
2261 	saddr /= AMDGPU_GPU_PAGE_SIZE;
2262 
2263 	list_for_each_entry(mapping, &bo_va->valids, list) {
2264 		if (mapping->start == saddr)
2265 			break;
2266 	}
2267 
2268 	if (&mapping->list == &bo_va->valids) {
2269 		valid = false;
2270 
2271 		list_for_each_entry(mapping, &bo_va->invalids, list) {
2272 			if (mapping->start == saddr)
2273 				break;
2274 		}
2275 
2276 		if (&mapping->list == &bo_va->invalids)
2277 			return -ENOENT;
2278 	}
2279 
2280 	list_del(&mapping->list);
2281 	amdgpu_vm_it_remove(mapping, &vm->va);
2282 	mapping->bo_va = NULL;
2283 	trace_amdgpu_vm_bo_unmap(bo_va, mapping);
2284 
2285 	if (valid)
2286 		list_add(&mapping->list, &vm->freed);
2287 	else
2288 		amdgpu_vm_free_mapping(adev, vm, mapping,
2289 				       bo_va->last_pt_update);
2290 
2291 	return 0;
2292 }
2293 
2294 /**
2295  * amdgpu_vm_bo_clear_mappings - remove all mappings in a specific range
2296  *
2297  * @adev: amdgpu_device pointer
2298  * @vm: VM structure to use
2299  * @saddr: start of the range
2300  * @size: size of the range
2301  *
2302  * Remove all mappings in a range, split them as appropriate.
2303  *
2304  * Returns:
2305  * 0 for success, error for failure.
2306  */
2307 int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
2308 				struct amdgpu_vm *vm,
2309 				uint64_t saddr, uint64_t size)
2310 {
2311 	struct amdgpu_bo_va_mapping *before, *after, *tmp, *next;
2312 	DRM_LIST_HEAD(removed);
2313 	uint64_t eaddr;
2314 
2315 	eaddr = saddr + size - 1;
2316 	saddr /= AMDGPU_GPU_PAGE_SIZE;
2317 	eaddr /= AMDGPU_GPU_PAGE_SIZE;
2318 
2319 	/* Allocate all the needed memory */
2320 	before = kzalloc(sizeof(*before), GFP_KERNEL);
2321 	if (!before)
2322 		return -ENOMEM;
2323 	INIT_LIST_HEAD(&before->list);
2324 
2325 	after = kzalloc(sizeof(*after), GFP_KERNEL);
2326 	if (!after) {
2327 		kfree(before);
2328 		return -ENOMEM;
2329 	}
2330 	INIT_LIST_HEAD(&after->list);
2331 
2332 	/* Now gather all removed mappings */
2333 	tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
2334 	while (tmp) {
2335 		/* Remember mapping split at the start */
2336 		if (tmp->start < saddr) {
2337 			before->start = tmp->start;
2338 			before->last = saddr - 1;
2339 			before->offset = tmp->offset;
2340 			before->flags = tmp->flags;
2341 			before->bo_va = tmp->bo_va;
2342 			list_add(&before->list, &tmp->bo_va->invalids);
2343 		}
2344 
2345 		/* Remember mapping split at the end */
2346 		if (tmp->last > eaddr) {
2347 			after->start = eaddr + 1;
2348 			after->last = tmp->last;
2349 			after->offset = tmp->offset;
2350 			after->offset += after->start - tmp->start;
2351 			after->flags = tmp->flags;
2352 			after->bo_va = tmp->bo_va;
2353 			list_add(&after->list, &tmp->bo_va->invalids);
2354 		}
2355 
2356 		list_del(&tmp->list);
2357 		list_add(&tmp->list, &removed);
2358 
2359 		tmp = amdgpu_vm_it_iter_next(tmp, saddr, eaddr);
2360 	}
2361 
2362 	/* And free them up */
2363 	list_for_each_entry_safe(tmp, next, &removed, list) {
2364 		amdgpu_vm_it_remove(tmp, &vm->va);
2365 		list_del(&tmp->list);
2366 
2367 		if (tmp->start < saddr)
2368 		    tmp->start = saddr;
2369 		if (tmp->last > eaddr)
2370 		    tmp->last = eaddr;
2371 
2372 		tmp->bo_va = NULL;
2373 		list_add(&tmp->list, &vm->freed);
2374 		trace_amdgpu_vm_bo_unmap(NULL, tmp);
2375 	}
2376 
2377 	/* Insert partial mapping before the range */
2378 	if (!list_empty(&before->list)) {
2379 		amdgpu_vm_it_insert(before, &vm->va);
2380 		if (before->flags & AMDGPU_PTE_PRT)
2381 			amdgpu_vm_prt_get(adev);
2382 	} else {
2383 		kfree(before);
2384 	}
2385 
2386 	/* Insert partial mapping after the range */
2387 	if (!list_empty(&after->list)) {
2388 		amdgpu_vm_it_insert(after, &vm->va);
2389 		if (after->flags & AMDGPU_PTE_PRT)
2390 			amdgpu_vm_prt_get(adev);
2391 	} else {
2392 		kfree(after);
2393 	}
2394 
2395 	return 0;
2396 }
2397 
2398 /**
2399  * amdgpu_vm_bo_lookup_mapping - find mapping by address
2400  *
2401  * @vm: the requested VM
2402  * @addr: the address
2403  *
2404  * Find a mapping by it's address.
2405  *
2406  * Returns:
2407  * The amdgpu_bo_va_mapping matching for addr or NULL
2408  *
2409  */
2410 struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
2411 							 uint64_t addr)
2412 {
2413 	return amdgpu_vm_it_iter_first(&vm->va, addr, addr);
2414 }
2415 
2416 /**
2417  * amdgpu_vm_bo_trace_cs - trace all reserved mappings
2418  *
2419  * @vm: the requested vm
2420  * @ticket: CS ticket
2421  *
2422  * Trace all mappings of BOs reserved during a command submission.
2423  */
2424 void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket)
2425 {
2426 	struct amdgpu_bo_va_mapping *mapping;
2427 
2428 	if (!trace_amdgpu_vm_bo_cs_enabled())
2429 		return;
2430 
2431 	for (mapping = amdgpu_vm_it_iter_first(&vm->va, 0, U64_MAX); mapping;
2432 	     mapping = amdgpu_vm_it_iter_next(mapping, 0, U64_MAX)) {
2433 		if (mapping->bo_va && mapping->bo_va->base.bo) {
2434 			struct amdgpu_bo *bo;
2435 
2436 			bo = mapping->bo_va->base.bo;
2437 			if (READ_ONCE(bo->tbo.resv->lock.ctx) != ticket)
2438 				continue;
2439 		}
2440 
2441 		trace_amdgpu_vm_bo_cs(mapping);
2442 	}
2443 }
2444 
2445 /**
2446  * amdgpu_vm_bo_rmv - remove a bo to a specific vm
2447  *
2448  * @adev: amdgpu_device pointer
2449  * @bo_va: requested bo_va
2450  *
2451  * Remove @bo_va->bo from the requested vm.
2452  *
2453  * Object have to be reserved!
2454  */
2455 void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
2456 		      struct amdgpu_bo_va *bo_va)
2457 {
2458 	struct amdgpu_bo_va_mapping *mapping, *next;
2459 	struct amdgpu_vm *vm = bo_va->base.vm;
2460 
2461 	list_del(&bo_va->base.bo_list);
2462 
2463 	spin_lock(&vm->moved_lock);
2464 	list_del(&bo_va->base.vm_status);
2465 	spin_unlock(&vm->moved_lock);
2466 
2467 	list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
2468 		list_del(&mapping->list);
2469 		amdgpu_vm_it_remove(mapping, &vm->va);
2470 		mapping->bo_va = NULL;
2471 		trace_amdgpu_vm_bo_unmap(bo_va, mapping);
2472 		list_add(&mapping->list, &vm->freed);
2473 	}
2474 	list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
2475 		list_del(&mapping->list);
2476 		amdgpu_vm_it_remove(mapping, &vm->va);
2477 		amdgpu_vm_free_mapping(adev, vm, mapping,
2478 				       bo_va->last_pt_update);
2479 	}
2480 
2481 	dma_fence_put(bo_va->last_pt_update);
2482 	kfree(bo_va);
2483 }
2484 
2485 /**
2486  * amdgpu_vm_bo_invalidate - mark the bo as invalid
2487  *
2488  * @adev: amdgpu_device pointer
2489  * @bo: amdgpu buffer object
2490  * @evicted: is the BO evicted
2491  *
2492  * Mark @bo as invalid.
2493  */
2494 void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
2495 			     struct amdgpu_bo *bo, bool evicted)
2496 {
2497 	struct amdgpu_vm_bo_base *bo_base;
2498 
2499 	/* shadow bo doesn't have bo base, its validation needs its parent */
2500 	if (bo->parent && bo->parent->shadow == bo)
2501 		bo = bo->parent;
2502 
2503 	list_for_each_entry(bo_base, &bo->va, bo_list) {
2504 		struct amdgpu_vm *vm = bo_base->vm;
2505 		bool was_moved = bo_base->moved;
2506 
2507 		bo_base->moved = true;
2508 		if (evicted && bo->tbo.resv == vm->root.base.bo->tbo.resv) {
2509 			if (bo->tbo.type == ttm_bo_type_kernel)
2510 				list_move(&bo_base->vm_status, &vm->evicted);
2511 			else
2512 				list_move_tail(&bo_base->vm_status,
2513 					       &vm->evicted);
2514 			continue;
2515 		}
2516 
2517 		if (was_moved)
2518 			continue;
2519 
2520 		if (bo->tbo.type == ttm_bo_type_kernel) {
2521 			list_move(&bo_base->vm_status, &vm->relocated);
2522 		} else {
2523 			spin_lock(&bo_base->vm->moved_lock);
2524 			list_move(&bo_base->vm_status, &vm->moved);
2525 			spin_unlock(&bo_base->vm->moved_lock);
2526 		}
2527 	}
2528 }
2529 
2530 /**
2531  * amdgpu_vm_get_block_size - calculate VM page table size as power of two
2532  *
2533  * @vm_size: VM size
2534  *
2535  * Returns:
2536  * VM page table as power of two
2537  */
2538 static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size)
2539 {
2540 	/* Total bits covered by PD + PTs */
2541 	unsigned bits = ilog2(vm_size) + 18;
2542 
2543 	/* Make sure the PD is 4K in size up to 8GB address space.
2544 	   Above that split equal between PD and PTs */
2545 	if (vm_size <= 8)
2546 		return (bits - 9);
2547 	else
2548 		return ((bits + 3) / 2);
2549 }
2550 
2551 /**
2552  * amdgpu_vm_adjust_size - adjust vm size, block size and fragment size
2553  *
2554  * @adev: amdgpu_device pointer
2555  * @min_vm_size: the minimum vm size in GB if it's set auto
2556  * @fragment_size_default: Default PTE fragment size
2557  * @max_level: max VMPT level
2558  * @max_bits: max address space size in bits
2559  *
2560  */
2561 void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
2562 			   uint32_t fragment_size_default, unsigned max_level,
2563 			   unsigned max_bits)
2564 {
2565 	unsigned int max_size = 1 << (max_bits - 30);
2566 	unsigned int vm_size;
2567 	uint64_t tmp;
2568 
2569 	/* adjust vm size first */
2570 	if (amdgpu_vm_size != -1) {
2571 		vm_size = amdgpu_vm_size;
2572 		if (vm_size > max_size) {
2573 			dev_warn(adev->dev, "VM size (%d) too large, max is %u GB\n",
2574 				 amdgpu_vm_size, max_size);
2575 			vm_size = max_size;
2576 		}
2577 	} else {
2578 #ifdef __linux__
2579 		struct sysinfo si;
2580 #endif
2581 		unsigned int phys_ram_gb;
2582 
2583 		/* Optimal VM size depends on the amount of physical
2584 		 * RAM available. Underlying requirements and
2585 		 * assumptions:
2586 		 *
2587 		 *  - Need to map system memory and VRAM from all GPUs
2588 		 *     - VRAM from other GPUs not known here
2589 		 *     - Assume VRAM <= system memory
2590 		 *  - On GFX8 and older, VM space can be segmented for
2591 		 *    different MTYPEs
2592 		 *  - Need to allow room for fragmentation, guard pages etc.
2593 		 *
2594 		 * This adds up to a rough guess of system memory x3.
2595 		 * Round up to power of two to maximize the available
2596 		 * VM size with the given page table size.
2597 		 */
2598 #ifdef __linux__
2599 		si_meminfo(&si);
2600 		phys_ram_gb = ((uint64_t)si.totalram * si.mem_unit +
2601 			       (1 << 30) - 1) >> 30;
2602 #else
2603 		phys_ram_gb = ((uint64_t)ptoa(physmem) +
2604 			       (1 << 30) - 1) >> 30;
2605 #endif
2606 		vm_size = roundup_pow_of_two(
2607 			min(max(phys_ram_gb * 3, min_vm_size), max_size));
2608 	}
2609 
2610 	adev->vm_manager.max_pfn = (uint64_t)vm_size << 18;
2611 
2612 	tmp = roundup_pow_of_two(adev->vm_manager.max_pfn);
2613 	if (amdgpu_vm_block_size != -1)
2614 		tmp >>= amdgpu_vm_block_size - 9;
2615 	tmp = DIV_ROUND_UP(fls64(tmp) - 1, 9) - 1;
2616 	adev->vm_manager.num_level = min(max_level, (unsigned)tmp);
2617 	switch (adev->vm_manager.num_level) {
2618 	case 3:
2619 		adev->vm_manager.root_level = AMDGPU_VM_PDB2;
2620 		break;
2621 	case 2:
2622 		adev->vm_manager.root_level = AMDGPU_VM_PDB1;
2623 		break;
2624 	case 1:
2625 		adev->vm_manager.root_level = AMDGPU_VM_PDB0;
2626 		break;
2627 	default:
2628 		dev_err(adev->dev, "VMPT only supports 2~4+1 levels\n");
2629 	}
2630 	/* block size depends on vm size and hw setup*/
2631 	if (amdgpu_vm_block_size != -1)
2632 		adev->vm_manager.block_size =
2633 			min((unsigned)amdgpu_vm_block_size, max_bits
2634 			    - AMDGPU_GPU_PAGE_SHIFT
2635 			    - 9 * adev->vm_manager.num_level);
2636 	else if (adev->vm_manager.num_level > 1)
2637 		adev->vm_manager.block_size = 9;
2638 	else
2639 		adev->vm_manager.block_size = amdgpu_vm_get_block_size(tmp);
2640 
2641 	if (amdgpu_vm_fragment_size == -1)
2642 		adev->vm_manager.fragment_size = fragment_size_default;
2643 	else
2644 		adev->vm_manager.fragment_size = amdgpu_vm_fragment_size;
2645 
2646 	DRM_INFO("vm size is %u GB, %u levels, block size is %u-bit, fragment size is %u-bit\n",
2647 		 vm_size, adev->vm_manager.num_level + 1,
2648 		 adev->vm_manager.block_size,
2649 		 adev->vm_manager.fragment_size);
2650 }
2651 
2652 /**
2653  * amdgpu_vm_init - initialize a vm instance
2654  *
2655  * @adev: amdgpu_device pointer
2656  * @vm: requested vm
2657  * @vm_context: Indicates if it GFX or Compute context
2658  * @pasid: Process address space identifier
2659  *
2660  * Init @vm fields.
2661  *
2662  * Returns:
2663  * 0 for success, error for failure.
2664  */
2665 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
2666 		   int vm_context, unsigned int pasid)
2667 {
2668 	struct amdgpu_bo_param bp;
2669 	struct amdgpu_bo *root;
2670 	const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE,
2671 		AMDGPU_VM_PTE_COUNT(adev) * 8);
2672 	unsigned ring_instance;
2673 	struct amdgpu_ring *ring;
2674 	struct drm_sched_rq *rq;
2675 	unsigned long size;
2676 	uint64_t flags;
2677 	int r, i;
2678 
2679 	vm->va = RB_ROOT_CACHED;
2680 	for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
2681 		vm->reserved_vmid[i] = NULL;
2682 	INIT_LIST_HEAD(&vm->evicted);
2683 	INIT_LIST_HEAD(&vm->relocated);
2684 	mtx_init(&vm->moved_lock, IPL_TTY);
2685 	INIT_LIST_HEAD(&vm->moved);
2686 	INIT_LIST_HEAD(&vm->idle);
2687 	INIT_LIST_HEAD(&vm->freed);
2688 
2689 	/* create scheduler entity for page table updates */
2690 
2691 	ring_instance = atomic_inc_return(&adev->vm_manager.vm_pte_next_ring);
2692 	ring_instance %= adev->vm_manager.vm_pte_num_rings;
2693 	ring = adev->vm_manager.vm_pte_rings[ring_instance];
2694 	rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
2695 	r = drm_sched_entity_init(&vm->entity, &rq, 1, NULL);
2696 	if (r)
2697 		return r;
2698 
2699 	vm->pte_support_ats = false;
2700 
2701 	if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE) {
2702 		vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2703 						AMDGPU_VM_USE_CPU_FOR_COMPUTE);
2704 
2705 		if (adev->asic_type == CHIP_RAVEN)
2706 			vm->pte_support_ats = true;
2707 	} else {
2708 		vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2709 						AMDGPU_VM_USE_CPU_FOR_GFX);
2710 	}
2711 	DRM_DEBUG_DRIVER("VM update mode is %s\n",
2712 			 vm->use_cpu_for_update ? "CPU" : "SDMA");
2713 	WARN_ONCE((vm->use_cpu_for_update & !amdgpu_gmc_vram_full_visible(&adev->gmc)),
2714 		  "CPU update of VM recommended only for large BAR system\n");
2715 	vm->last_update = NULL;
2716 
2717 	flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
2718 	if (vm->use_cpu_for_update)
2719 		flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
2720 	else if (vm_context != AMDGPU_VM_CONTEXT_COMPUTE)
2721 		flags |= AMDGPU_GEM_CREATE_SHADOW;
2722 
2723 	size = amdgpu_vm_bo_size(adev, adev->vm_manager.root_level);
2724 	memset(&bp, 0, sizeof(bp));
2725 	bp.size = size;
2726 	bp.byte_align = align;
2727 	bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
2728 	bp.flags = flags;
2729 	bp.type = ttm_bo_type_kernel;
2730 	bp.resv = NULL;
2731 	r = amdgpu_bo_create(adev, &bp, &root);
2732 	if (r)
2733 		goto error_free_sched_entity;
2734 
2735 	r = amdgpu_bo_reserve(root, true);
2736 	if (r)
2737 		goto error_free_root;
2738 
2739 	r = amdgpu_vm_clear_bo(adev, vm, root,
2740 			       adev->vm_manager.root_level,
2741 			       vm->pte_support_ats);
2742 	if (r)
2743 		goto error_unreserve;
2744 
2745 	amdgpu_vm_bo_base_init(&vm->root.base, vm, root);
2746 	amdgpu_bo_unreserve(vm->root.base.bo);
2747 
2748 	if (pasid) {
2749 		unsigned long flags;
2750 
2751 		spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
2752 		r = idr_alloc(&adev->vm_manager.pasid_idr, vm, pasid, pasid + 1,
2753 			      GFP_ATOMIC);
2754 		spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
2755 		if (r < 0)
2756 			goto error_free_root;
2757 
2758 		vm->pasid = pasid;
2759 	}
2760 
2761 #ifdef __linux__
2762 	INIT_KFIFO(vm->faults);
2763 #else
2764 	SIMPLEQ_INIT(&vm->faults);
2765 #endif
2766 	vm->fault_credit = 16;
2767 
2768 	return 0;
2769 
2770 error_unreserve:
2771 	amdgpu_bo_unreserve(vm->root.base.bo);
2772 
2773 error_free_root:
2774 	amdgpu_bo_unref(&vm->root.base.bo->shadow);
2775 	amdgpu_bo_unref(&vm->root.base.bo);
2776 	vm->root.base.bo = NULL;
2777 
2778 error_free_sched_entity:
2779 	drm_sched_entity_destroy(&vm->entity);
2780 
2781 	return r;
2782 }
2783 
2784 /**
2785  * amdgpu_vm_make_compute - Turn a GFX VM into a compute VM
2786  *
2787  * @adev: amdgpu_device pointer
2788  * @vm: requested vm
2789  *
2790  * This only works on GFX VMs that don't have any BOs added and no
2791  * page tables allocated yet.
2792  *
2793  * Changes the following VM parameters:
2794  * - use_cpu_for_update
2795  * - pte_supports_ats
2796  * - pasid (old PASID is released, because compute manages its own PASIDs)
2797  *
2798  * Reinitializes the page directory to reflect the changed ATS
2799  * setting.
2800  *
2801  * Returns:
2802  * 0 for success, -errno for errors.
2803  */
2804 int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2805 {
2806 	bool pte_support_ats = (adev->asic_type == CHIP_RAVEN);
2807 	int r;
2808 
2809 	r = amdgpu_bo_reserve(vm->root.base.bo, true);
2810 	if (r)
2811 		return r;
2812 
2813 	/* Sanity checks */
2814 #ifdef __linux__
2815 	if (!RB_EMPTY_ROOT(&vm->va.rb_root) || vm->root.entries) {
2816 #else
2817 	if (!RB_EMPTY_ROOT(&vm->va) || vm->root.entries) {
2818 #endif
2819 		r = -EINVAL;
2820 		goto error;
2821 	}
2822 
2823 	/* Check if PD needs to be reinitialized and do it before
2824 	 * changing any other state, in case it fails.
2825 	 */
2826 	if (pte_support_ats != vm->pte_support_ats) {
2827 		r = amdgpu_vm_clear_bo(adev, vm, vm->root.base.bo,
2828 			       adev->vm_manager.root_level,
2829 			       pte_support_ats);
2830 		if (r)
2831 			goto error;
2832 	}
2833 
2834 	/* Update VM state */
2835 	vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2836 				    AMDGPU_VM_USE_CPU_FOR_COMPUTE);
2837 	vm->pte_support_ats = pte_support_ats;
2838 	DRM_DEBUG_DRIVER("VM update mode is %s\n",
2839 			 vm->use_cpu_for_update ? "CPU" : "SDMA");
2840 	WARN_ONCE((vm->use_cpu_for_update & !amdgpu_gmc_vram_full_visible(&adev->gmc)),
2841 		  "CPU update of VM recommended only for large BAR system\n");
2842 
2843 	if (vm->pasid) {
2844 		unsigned long flags;
2845 
2846 		spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
2847 		idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
2848 		spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
2849 
2850 		vm->pasid = 0;
2851 	}
2852 
2853 	/* Free the shadow bo for compute VM */
2854 	amdgpu_bo_unref(&vm->root.base.bo->shadow);
2855 
2856 error:
2857 	amdgpu_bo_unreserve(vm->root.base.bo);
2858 	return r;
2859 }
2860 
2861 /**
2862  * amdgpu_vm_free_levels - free PD/PT levels
2863  *
2864  * @adev: amdgpu device structure
2865  * @parent: PD/PT starting level to free
2866  * @level: level of parent structure
2867  *
2868  * Free the page directory or page table level and all sub levels.
2869  */
2870 static void amdgpu_vm_free_levels(struct amdgpu_device *adev,
2871 				  struct amdgpu_vm_pt *parent,
2872 				  unsigned level)
2873 {
2874 	unsigned i, num_entries = amdgpu_vm_num_entries(adev, level);
2875 
2876 	if (parent->base.bo) {
2877 		list_del(&parent->base.bo_list);
2878 		list_del(&parent->base.vm_status);
2879 		amdgpu_bo_unref(&parent->base.bo->shadow);
2880 		amdgpu_bo_unref(&parent->base.bo);
2881 	}
2882 
2883 	if (parent->entries)
2884 		for (i = 0; i < num_entries; i++)
2885 			amdgpu_vm_free_levels(adev, &parent->entries[i],
2886 					      level + 1);
2887 
2888 	kvfree(parent->entries);
2889 }
2890 
2891 /**
2892  * amdgpu_vm_fini - tear down a vm instance
2893  *
2894  * @adev: amdgpu_device pointer
2895  * @vm: requested vm
2896  *
2897  * Tear down @vm.
2898  * Unbind the VM and remove all bos from the vm bo list
2899  */
2900 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2901 {
2902 	struct amdgpu_bo_va_mapping *mapping, *tmp;
2903 	bool prt_fini_needed = !!adev->gmc.gmc_funcs->set_prt;
2904 	struct amdgpu_bo *root;
2905 	u64 fault;
2906 	int i, r;
2907 	struct amdgpu_vm_fault *vmf;
2908 
2909 	amdgpu_amdkfd_gpuvm_destroy_cb(adev, vm);
2910 
2911 	/* Clear pending page faults from IH when the VM is destroyed */
2912 #ifdef __linux__
2913 	while (kfifo_get(&vm->faults, &fault))
2914 		amdgpu_ih_clear_fault(adev, fault);
2915 #else
2916 	while (!SIMPLEQ_EMPTY(&vm->faults)) {
2917 		vmf = SIMPLEQ_FIRST(&vm->faults);
2918 		fault = vmf->val;
2919 		SIMPLEQ_REMOVE_HEAD(&vm->faults, vm_fault_entry);
2920 		free(vmf, M_DRM, sizeof(*vmf));
2921 		amdgpu_ih_clear_fault(adev, fault);
2922 	}
2923 #endif
2924 
2925 	if (vm->pasid) {
2926 		unsigned long flags;
2927 
2928 		spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
2929 		idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
2930 		spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
2931 	}
2932 
2933 	drm_sched_entity_destroy(&vm->entity);
2934 
2935 #ifdef __linux__
2936 	if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
2937 		dev_err(adev->dev, "still active bo inside vm\n");
2938 	}
2939 	rbtree_postorder_for_each_entry_safe(mapping, tmp,
2940 					     &vm->va.rb_root, rb) {
2941 		list_del(&mapping->list);
2942 		amdgpu_vm_it_remove(mapping, &vm->va);
2943 		kfree(mapping);
2944 	}
2945 #else
2946 	if (!RB_EMPTY_ROOT(&vm->va)) {
2947 		dev_err(adev->dev, "still active bo inside vm\n");
2948 	}
2949 	rbtree_postorder_for_each_entry_safe(mapping, tmp,
2950 					     &vm->va, rb) {
2951 		list_del(&mapping->list);
2952 		amdgpu_vm_it_remove(mapping, &vm->va);
2953 		kfree(mapping);
2954 	}
2955 #endif
2956 	list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
2957 		if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) {
2958 			amdgpu_vm_prt_fini(adev, vm);
2959 			prt_fini_needed = false;
2960 		}
2961 
2962 		list_del(&mapping->list);
2963 		amdgpu_vm_free_mapping(adev, vm, mapping, NULL);
2964 	}
2965 
2966 	root = amdgpu_bo_ref(vm->root.base.bo);
2967 	r = amdgpu_bo_reserve(root, true);
2968 	if (r) {
2969 		dev_err(adev->dev, "Leaking page tables because BO reservation failed\n");
2970 	} else {
2971 		amdgpu_vm_free_levels(adev, &vm->root,
2972 				      adev->vm_manager.root_level);
2973 		amdgpu_bo_unreserve(root);
2974 	}
2975 	amdgpu_bo_unref(&root);
2976 	dma_fence_put(vm->last_update);
2977 	for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
2978 		amdgpu_vmid_free_reserved(adev, vm, i);
2979 }
2980 
2981 /**
2982  * amdgpu_vm_pasid_fault_credit - Check fault credit for given PASID
2983  *
2984  * @adev: amdgpu_device pointer
2985  * @pasid: PASID do identify the VM
2986  *
2987  * This function is expected to be called in interrupt context.
2988  *
2989  * Returns:
2990  * True if there was fault credit, false otherwise
2991  */
2992 bool amdgpu_vm_pasid_fault_credit(struct amdgpu_device *adev,
2993 				  unsigned int pasid)
2994 {
2995 	struct amdgpu_vm *vm;
2996 
2997 	spin_lock(&adev->vm_manager.pasid_lock);
2998 	vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
2999 	if (!vm) {
3000 		/* VM not found, can't track fault credit */
3001 		spin_unlock(&adev->vm_manager.pasid_lock);
3002 		return true;
3003 	}
3004 
3005 	/* No lock needed. only accessed by IRQ handler */
3006 	if (!vm->fault_credit) {
3007 		/* Too many faults in this VM */
3008 		spin_unlock(&adev->vm_manager.pasid_lock);
3009 		return false;
3010 	}
3011 
3012 	vm->fault_credit--;
3013 	spin_unlock(&adev->vm_manager.pasid_lock);
3014 	return true;
3015 }
3016 
3017 /**
3018  * amdgpu_vm_manager_init - init the VM manager
3019  *
3020  * @adev: amdgpu_device pointer
3021  *
3022  * Initialize the VM manager structures
3023  */
3024 void amdgpu_vm_manager_init(struct amdgpu_device *adev)
3025 {
3026 	unsigned i;
3027 
3028 	amdgpu_vmid_mgr_init(adev);
3029 
3030 	adev->vm_manager.fence_context =
3031 		dma_fence_context_alloc(AMDGPU_MAX_RINGS);
3032 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
3033 		adev->vm_manager.seqno[i] = 0;
3034 
3035 	atomic_set(&adev->vm_manager.vm_pte_next_ring, 0);
3036 	mtx_init(&adev->vm_manager.prt_lock, IPL_TTY);
3037 	atomic_set(&adev->vm_manager.num_prt_users, 0);
3038 
3039 	/* If not overridden by the user, by default, only in large BAR systems
3040 	 * Compute VM tables will be updated by CPU
3041 	 */
3042 #ifdef CONFIG_X86_64
3043 	if (amdgpu_vm_update_mode == -1) {
3044 		if (amdgpu_gmc_vram_full_visible(&adev->gmc))
3045 			adev->vm_manager.vm_update_mode =
3046 				AMDGPU_VM_USE_CPU_FOR_COMPUTE;
3047 		else
3048 			adev->vm_manager.vm_update_mode = 0;
3049 	} else
3050 		adev->vm_manager.vm_update_mode = amdgpu_vm_update_mode;
3051 #else
3052 	adev->vm_manager.vm_update_mode = 0;
3053 #endif
3054 
3055 	idr_init(&adev->vm_manager.pasid_idr);
3056 	mtx_init(&adev->vm_manager.pasid_lock, IPL_TTY);
3057 }
3058 
3059 /**
3060  * amdgpu_vm_manager_fini - cleanup VM manager
3061  *
3062  * @adev: amdgpu_device pointer
3063  *
3064  * Cleanup the VM manager and free resources.
3065  */
3066 void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
3067 {
3068 	WARN_ON(!idr_is_empty(&adev->vm_manager.pasid_idr));
3069 	idr_destroy(&adev->vm_manager.pasid_idr);
3070 
3071 	amdgpu_vmid_mgr_fini(adev);
3072 }
3073 
3074 /**
3075  * amdgpu_vm_ioctl - Manages VMID reservation for vm hubs.
3076  *
3077  * @dev: drm device pointer
3078  * @data: drm_amdgpu_vm
3079  * @filp: drm file pointer
3080  *
3081  * Returns:
3082  * 0 for success, -errno for errors.
3083  */
3084 int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
3085 {
3086 	union drm_amdgpu_vm *args = data;
3087 	struct amdgpu_device *adev = dev->dev_private;
3088 	struct amdgpu_fpriv *fpriv = filp->driver_priv;
3089 	int r;
3090 
3091 	switch (args->in.op) {
3092 	case AMDGPU_VM_OP_RESERVE_VMID:
3093 		/* current, we only have requirement to reserve vmid from gfxhub */
3094 		r = amdgpu_vmid_alloc_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB);
3095 		if (r)
3096 			return r;
3097 		break;
3098 	case AMDGPU_VM_OP_UNRESERVE_VMID:
3099 		amdgpu_vmid_free_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB);
3100 		break;
3101 	default:
3102 		return -EINVAL;
3103 	}
3104 
3105 	return 0;
3106 }
3107 
3108 /**
3109  * amdgpu_vm_get_task_info - Extracts task info for a PASID.
3110  *
3111  * @dev: drm device pointer
3112  * @pasid: PASID identifier for VM
3113  * @task_info: task_info to fill.
3114  */
3115 void amdgpu_vm_get_task_info(struct amdgpu_device *adev, unsigned int pasid,
3116 			 struct amdgpu_task_info *task_info)
3117 {
3118 	struct amdgpu_vm *vm;
3119 	unsigned long flags;
3120 
3121 	spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
3122 
3123 	vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
3124 	if (vm)
3125 		*task_info = vm->task_info;
3126 
3127 	spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
3128 }
3129 
3130 /**
3131  * amdgpu_vm_set_task_info - Sets VMs task info.
3132  *
3133  * @vm: vm for which to set the info
3134  */
3135 void amdgpu_vm_set_task_info(struct amdgpu_vm *vm)
3136 {
3137 	if (!vm->task_info.pid) {
3138 #ifdef __linux__
3139 		vm->task_info.pid = current->pid;
3140 		get_task_comm(vm->task_info.task_name, current);
3141 
3142 		if (current->group_leader->mm == current->mm) {
3143 			vm->task_info.tgid = current->group_leader->pid;
3144 			get_task_comm(vm->task_info.process_name, current->group_leader);
3145 		}
3146 #else
3147 		vm->task_info.pid = curproc->p_p->ps_pid;
3148 		strlcpy(vm->task_info.task_name, curproc->p_p->ps_comm,
3149 		    sizeof(vm->task_info.task_name));
3150 #endif
3151 	}
3152 }
3153