xref: /openbsd-src/sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c (revision a3e4d62dc8a51d2922cbfbc17b990b2537d3f865)
1fb4d8502Sjsg /*
2fb4d8502Sjsg  * Copyright 2008 Advanced Micro Devices, Inc.
3fb4d8502Sjsg  * Copyright 2008 Red Hat Inc.
4fb4d8502Sjsg  * Copyright 2009 Jerome Glisse.
5fb4d8502Sjsg  *
6fb4d8502Sjsg  * Permission is hereby granted, free of charge, to any person obtaining a
7fb4d8502Sjsg  * copy of this software and associated documentation files (the "Software"),
8fb4d8502Sjsg  * to deal in the Software without restriction, including without limitation
9fb4d8502Sjsg  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10fb4d8502Sjsg  * and/or sell copies of the Software, and to permit persons to whom the
11fb4d8502Sjsg  * Software is furnished to do so, subject to the following conditions:
12fb4d8502Sjsg  *
13fb4d8502Sjsg  * The above copyright notice and this permission notice shall be included in
14fb4d8502Sjsg  * all copies or substantial portions of the Software.
15fb4d8502Sjsg  *
16fb4d8502Sjsg  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17fb4d8502Sjsg  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18fb4d8502Sjsg  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19fb4d8502Sjsg  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20fb4d8502Sjsg  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21fb4d8502Sjsg  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22fb4d8502Sjsg  * OTHER DEALINGS IN THE SOFTWARE.
23fb4d8502Sjsg  *
24fb4d8502Sjsg  * Authors: Dave Airlie
25fb4d8502Sjsg  *          Alex Deucher
26fb4d8502Sjsg  *          Jerome Glisse
27fb4d8502Sjsg  */
285ca02815Sjsg 
29fb4d8502Sjsg #include <linux/dma-fence-array.h>
30fb4d8502Sjsg #include <linux/interval_tree_generic.h>
31fb4d8502Sjsg #include <linux/idr.h>
32ad8b1aafSjsg #include <linux/dma-buf.h>
33c349dbc7Sjsg 
34fb4d8502Sjsg #include <drm/amdgpu_drm.h>
355ca02815Sjsg #include <drm/drm_drv.h>
36f005ef32Sjsg #include <drm/ttm/ttm_tt.h>
37f005ef32Sjsg #include <drm/drm_exec.h>
38fb4d8502Sjsg #include "amdgpu.h"
39fb4d8502Sjsg #include "amdgpu_trace.h"
40fb4d8502Sjsg #include "amdgpu_amdkfd.h"
41fb4d8502Sjsg #include "amdgpu_gmc.h"
42c349dbc7Sjsg #include "amdgpu_xgmi.h"
43ad8b1aafSjsg #include "amdgpu_dma_buf.h"
445ca02815Sjsg #include "amdgpu_res_cursor.h"
455ca02815Sjsg #include "../amdkfd/kfd_svm.h"
46fb4d8502Sjsg 
47fb4d8502Sjsg /**
48fb4d8502Sjsg  * DOC: GPUVM
49fb4d8502Sjsg  *
50f005ef32Sjsg  * GPUVM is the MMU functionality provided on the GPU.
51f005ef32Sjsg  * GPUVM is similar to the legacy GART on older asics, however
52f005ef32Sjsg  * rather than there being a single global GART table
53f005ef32Sjsg  * for the entire GPU, there can be multiple GPUVM page tables active
54f005ef32Sjsg  * at any given time.  The GPUVM page tables can contain a mix
55f005ef32Sjsg  * VRAM pages and system pages (both memory and MMIO) and system pages
56fb4d8502Sjsg  * can be mapped as snooped (cached system pages) or unsnooped
57fb4d8502Sjsg  * (uncached system pages).
58f005ef32Sjsg  *
59f005ef32Sjsg  * Each active GPUVM has an ID associated with it and there is a page table
60f005ef32Sjsg  * linked with each VMID.  When executing a command buffer,
61f005ef32Sjsg  * the kernel tells the engine what VMID to use for that command
62fb4d8502Sjsg  * buffer.  VMIDs are allocated dynamically as commands are submitted.
63fb4d8502Sjsg  * The userspace drivers maintain their own address space and the kernel
64fb4d8502Sjsg  * sets up their pages tables accordingly when they submit their
65fb4d8502Sjsg  * command buffers and a VMID is assigned.
66f005ef32Sjsg  * The hardware supports up to 16 active GPUVMs at any given time.
67f005ef32Sjsg  *
68f005ef32Sjsg  * Each GPUVM is represented by a 1-2 or 1-5 level page table, depending
69f005ef32Sjsg  * on the ASIC family.  GPUVM supports RWX attributes on each page as well
70f005ef32Sjsg  * as other features such as encryption and caching attributes.
71f005ef32Sjsg  *
72f005ef32Sjsg  * VMID 0 is special.  It is the GPUVM used for the kernel driver.  In
73f005ef32Sjsg  * addition to an aperture managed by a page table, VMID 0 also has
74f005ef32Sjsg  * several other apertures.  There is an aperture for direct access to VRAM
75f005ef32Sjsg  * and there is a legacy AGP aperture which just forwards accesses directly
76f005ef32Sjsg  * to the matching system physical addresses (or IOVAs when an IOMMU is
77f005ef32Sjsg  * present).  These apertures provide direct access to these memories without
78f005ef32Sjsg  * incurring the overhead of a page table.  VMID 0 is used by the kernel
79f005ef32Sjsg  * driver for tasks like memory management.
80f005ef32Sjsg  *
81f005ef32Sjsg  * GPU clients (i.e., engines on the GPU) use GPUVM VMIDs to access memory.
82f005ef32Sjsg  * For user applications, each application can have their own unique GPUVM
83f005ef32Sjsg  * address space.  The application manages the address space and the kernel
84f005ef32Sjsg  * driver manages the GPUVM page tables for each process.  If an GPU client
85f005ef32Sjsg  * accesses an invalid page, it will generate a GPU page fault, similar to
86f005ef32Sjsg  * accessing an invalid page on a CPU.
87fb4d8502Sjsg  */
88fb4d8502Sjsg 
89fb4d8502Sjsg #define START(node) ((node)->start)
90fb4d8502Sjsg #define LAST(node) ((node)->last)
91fb4d8502Sjsg 
92fb4d8502Sjsg #ifdef __linux__
93fb4d8502Sjsg INTERVAL_TREE_DEFINE(struct amdgpu_bo_va_mapping, rb, uint64_t, __subtree_last,
94fb4d8502Sjsg 		     START, LAST, static, amdgpu_vm_it)
95fb4d8502Sjsg #else
96fb4d8502Sjsg static struct amdgpu_bo_va_mapping *
97fb4d8502Sjsg amdgpu_vm_it_iter_first(struct rb_root_cached *root, uint64_t start,
98fb4d8502Sjsg     uint64_t last)
99fb4d8502Sjsg {
100fb4d8502Sjsg 	struct amdgpu_bo_va_mapping *node;
101fb4d8502Sjsg 	struct rb_node *rb;
102fb4d8502Sjsg 
103c349dbc7Sjsg 	for (rb = rb_first_cached(root); rb; rb = rb_next(rb)) {
104fb4d8502Sjsg 		node = rb_entry(rb, typeof(*node), rb);
105fb4d8502Sjsg 		if (LAST(node) >= start && START(node) <= last)
106fb4d8502Sjsg 			return node;
107fb4d8502Sjsg 	}
108fb4d8502Sjsg 	return NULL;
109fb4d8502Sjsg }
110fb4d8502Sjsg 
111fb4d8502Sjsg static struct amdgpu_bo_va_mapping *
112fb4d8502Sjsg amdgpu_vm_it_iter_next(struct amdgpu_bo_va_mapping *node, uint64_t start,
113fb4d8502Sjsg     uint64_t last)
114fb4d8502Sjsg {
115fb4d8502Sjsg 	struct rb_node *rb = &node->rb;
116fb4d8502Sjsg 
117fb4d8502Sjsg 	for (rb = rb_next(rb); rb; rb = rb_next(rb)) {
118fb4d8502Sjsg 		node = rb_entry(rb, typeof(*node), rb);
119fb4d8502Sjsg 		if (LAST(node) >= start && START(node) <= last)
120fb4d8502Sjsg 			return node;
121fb4d8502Sjsg 	}
122fb4d8502Sjsg 	return NULL;
123fb4d8502Sjsg }
124fb4d8502Sjsg 
125fb4d8502Sjsg static void
126fb4d8502Sjsg amdgpu_vm_it_remove(struct amdgpu_bo_va_mapping *node,
127fb4d8502Sjsg     struct rb_root_cached *root)
128fb4d8502Sjsg {
129c349dbc7Sjsg 	rb_erase_cached(&node->rb, root);
130fb4d8502Sjsg }
131fb4d8502Sjsg 
132fb4d8502Sjsg static void
133fb4d8502Sjsg amdgpu_vm_it_insert(struct amdgpu_bo_va_mapping *node,
134fb4d8502Sjsg     struct rb_root_cached *root)
135fb4d8502Sjsg {
136c349dbc7Sjsg 	struct rb_node **iter = &root->rb_root.rb_node;
137fb4d8502Sjsg 	struct rb_node *parent = NULL;
138fb4d8502Sjsg 	struct amdgpu_bo_va_mapping *iter_node;
139fb4d8502Sjsg 
140fb4d8502Sjsg 	while (*iter) {
141fb4d8502Sjsg 		parent = *iter;
142fb4d8502Sjsg 		iter_node = rb_entry(*iter, struct amdgpu_bo_va_mapping, rb);
143fb4d8502Sjsg 
144fb4d8502Sjsg 		if (node->start < iter_node->start)
145fb4d8502Sjsg 			iter = &(*iter)->rb_left;
146fb4d8502Sjsg 		else
147fb4d8502Sjsg 			iter = &(*iter)->rb_right;
148fb4d8502Sjsg 	}
149fb4d8502Sjsg 
150fb4d8502Sjsg 	rb_link_node(&node->rb, parent, iter);
151c349dbc7Sjsg 	rb_insert_color_cached(&node->rb, root, false);
152fb4d8502Sjsg }
153fb4d8502Sjsg #endif
154fb4d8502Sjsg 
155fb4d8502Sjsg #undef START
156fb4d8502Sjsg #undef LAST
157fb4d8502Sjsg 
158fb4d8502Sjsg /**
159fb4d8502Sjsg  * struct amdgpu_prt_cb - Helper to disable partial resident texture feature from a fence callback
160fb4d8502Sjsg  */
161fb4d8502Sjsg struct amdgpu_prt_cb {
162fb4d8502Sjsg 
163fb4d8502Sjsg 	/**
164fb4d8502Sjsg 	 * @adev: amdgpu device
165fb4d8502Sjsg 	 */
166fb4d8502Sjsg 	struct amdgpu_device *adev;
167fb4d8502Sjsg 
168fb4d8502Sjsg 	/**
169fb4d8502Sjsg 	 * @cb: callback
170fb4d8502Sjsg 	 */
171fb4d8502Sjsg 	struct dma_fence_cb cb;
172fb4d8502Sjsg };
173fb4d8502Sjsg 
1745ca02815Sjsg /**
175f005ef32Sjsg  * struct amdgpu_vm_tlb_seq_struct - Helper to increment the TLB flush sequence
1761bb76ff1Sjsg  */
177f005ef32Sjsg struct amdgpu_vm_tlb_seq_struct {
1781bb76ff1Sjsg 	/**
1791bb76ff1Sjsg 	 * @vm: pointer to the amdgpu_vm structure to set the fence sequence on
1801bb76ff1Sjsg 	 */
1811bb76ff1Sjsg 	struct amdgpu_vm *vm;
1821bb76ff1Sjsg 
1831bb76ff1Sjsg 	/**
1841bb76ff1Sjsg 	 * @cb: callback
1851bb76ff1Sjsg 	 */
1861bb76ff1Sjsg 	struct dma_fence_cb cb;
1871bb76ff1Sjsg };
1881bb76ff1Sjsg 
1891bb76ff1Sjsg /**
1905ca02815Sjsg  * amdgpu_vm_set_pasid - manage pasid and vm ptr mapping
1915ca02815Sjsg  *
1925ca02815Sjsg  * @adev: amdgpu_device pointer
1935ca02815Sjsg  * @vm: amdgpu_vm pointer
1945ca02815Sjsg  * @pasid: the pasid the VM is using on this GPU
1955ca02815Sjsg  *
1965ca02815Sjsg  * Set the pasid this VM is using on this GPU, can also be used to remove the
1975ca02815Sjsg  * pasid by passing in zero.
1985ca02815Sjsg  *
1995ca02815Sjsg  */
2005ca02815Sjsg int amdgpu_vm_set_pasid(struct amdgpu_device *adev, struct amdgpu_vm *vm,
2015ca02815Sjsg 			u32 pasid)
2025ca02815Sjsg {
2035ca02815Sjsg 	int r;
2045ca02815Sjsg 
2055ca02815Sjsg 	if (vm->pasid == pasid)
2065ca02815Sjsg 		return 0;
2075ca02815Sjsg 
2085ca02815Sjsg 	if (vm->pasid) {
2095ca02815Sjsg 		r = xa_err(xa_erase_irq(&adev->vm_manager.pasids, vm->pasid));
2105ca02815Sjsg 		if (r < 0)
2115ca02815Sjsg 			return r;
2125ca02815Sjsg 
2135ca02815Sjsg 		vm->pasid = 0;
2145ca02815Sjsg 	}
2155ca02815Sjsg 
2165ca02815Sjsg 	if (pasid) {
2175ca02815Sjsg 		r = xa_err(xa_store_irq(&adev->vm_manager.pasids, pasid, vm,
2185ca02815Sjsg 					GFP_KERNEL));
2195ca02815Sjsg 		if (r < 0)
2205ca02815Sjsg 			return r;
2215ca02815Sjsg 
2225ca02815Sjsg 		vm->pasid = pasid;
2235ca02815Sjsg 	}
2245ca02815Sjsg 
2255ca02815Sjsg 
2265ca02815Sjsg 	return 0;
2275ca02815Sjsg }
2285ca02815Sjsg 
229fb4d8502Sjsg /**
230c349dbc7Sjsg  * amdgpu_vm_bo_evicted - vm_bo is evicted
231c349dbc7Sjsg  *
232c349dbc7Sjsg  * @vm_bo: vm_bo which is evicted
233c349dbc7Sjsg  *
234c349dbc7Sjsg  * State for PDs/PTs and per VM BOs which are not at the location they should
235c349dbc7Sjsg  * be.
236c349dbc7Sjsg  */
237c349dbc7Sjsg static void amdgpu_vm_bo_evicted(struct amdgpu_vm_bo_base *vm_bo)
238c349dbc7Sjsg {
239c349dbc7Sjsg 	struct amdgpu_vm *vm = vm_bo->vm;
240c349dbc7Sjsg 	struct amdgpu_bo *bo = vm_bo->bo;
241c349dbc7Sjsg 
242c349dbc7Sjsg 	vm_bo->moved = true;
2431bb76ff1Sjsg 	spin_lock(&vm_bo->vm->status_lock);
244c349dbc7Sjsg 	if (bo->tbo.type == ttm_bo_type_kernel)
245c349dbc7Sjsg 		list_move(&vm_bo->vm_status, &vm->evicted);
246c349dbc7Sjsg 	else
247c349dbc7Sjsg 		list_move_tail(&vm_bo->vm_status, &vm->evicted);
2481bb76ff1Sjsg 	spin_unlock(&vm_bo->vm->status_lock);
249c349dbc7Sjsg }
250c349dbc7Sjsg /**
251c349dbc7Sjsg  * amdgpu_vm_bo_moved - vm_bo is moved
252c349dbc7Sjsg  *
253c349dbc7Sjsg  * @vm_bo: vm_bo which is moved
254c349dbc7Sjsg  *
255c349dbc7Sjsg  * State for per VM BOs which are moved, but that change is not yet reflected
256c349dbc7Sjsg  * in the page tables.
257c349dbc7Sjsg  */
258c349dbc7Sjsg static void amdgpu_vm_bo_moved(struct amdgpu_vm_bo_base *vm_bo)
259c349dbc7Sjsg {
2601bb76ff1Sjsg 	spin_lock(&vm_bo->vm->status_lock);
261c349dbc7Sjsg 	list_move(&vm_bo->vm_status, &vm_bo->vm->moved);
2621bb76ff1Sjsg 	spin_unlock(&vm_bo->vm->status_lock);
263c349dbc7Sjsg }
264c349dbc7Sjsg 
265c349dbc7Sjsg /**
266c349dbc7Sjsg  * amdgpu_vm_bo_idle - vm_bo is idle
267c349dbc7Sjsg  *
268c349dbc7Sjsg  * @vm_bo: vm_bo which is now idle
269c349dbc7Sjsg  *
270c349dbc7Sjsg  * State for PDs/PTs and per VM BOs which have gone through the state machine
271c349dbc7Sjsg  * and are now idle.
272c349dbc7Sjsg  */
273c349dbc7Sjsg static void amdgpu_vm_bo_idle(struct amdgpu_vm_bo_base *vm_bo)
274c349dbc7Sjsg {
2751bb76ff1Sjsg 	spin_lock(&vm_bo->vm->status_lock);
276c349dbc7Sjsg 	list_move(&vm_bo->vm_status, &vm_bo->vm->idle);
2771bb76ff1Sjsg 	spin_unlock(&vm_bo->vm->status_lock);
278c349dbc7Sjsg 	vm_bo->moved = false;
279c349dbc7Sjsg }
280c349dbc7Sjsg 
281c349dbc7Sjsg /**
282c349dbc7Sjsg  * amdgpu_vm_bo_invalidated - vm_bo is invalidated
283c349dbc7Sjsg  *
284c349dbc7Sjsg  * @vm_bo: vm_bo which is now invalidated
285c349dbc7Sjsg  *
286c349dbc7Sjsg  * State for normal BOs which are invalidated and that change not yet reflected
287c349dbc7Sjsg  * in the PTs.
288c349dbc7Sjsg  */
289c349dbc7Sjsg static void amdgpu_vm_bo_invalidated(struct amdgpu_vm_bo_base *vm_bo)
290c349dbc7Sjsg {
2911bb76ff1Sjsg 	spin_lock(&vm_bo->vm->status_lock);
292c349dbc7Sjsg 	list_move(&vm_bo->vm_status, &vm_bo->vm->invalidated);
2931bb76ff1Sjsg 	spin_unlock(&vm_bo->vm->status_lock);
294c349dbc7Sjsg }
295c349dbc7Sjsg 
296c349dbc7Sjsg /**
297c349dbc7Sjsg  * amdgpu_vm_bo_relocated - vm_bo is reloacted
298c349dbc7Sjsg  *
299c349dbc7Sjsg  * @vm_bo: vm_bo which is relocated
300c349dbc7Sjsg  *
301c349dbc7Sjsg  * State for PDs/PTs which needs to update their parent PD.
302c349dbc7Sjsg  * For the root PD, just move to idle state.
303c349dbc7Sjsg  */
304c349dbc7Sjsg static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base *vm_bo)
305c349dbc7Sjsg {
3061bb76ff1Sjsg 	if (vm_bo->bo->parent) {
3071bb76ff1Sjsg 		spin_lock(&vm_bo->vm->status_lock);
308c349dbc7Sjsg 		list_move(&vm_bo->vm_status, &vm_bo->vm->relocated);
3091bb76ff1Sjsg 		spin_unlock(&vm_bo->vm->status_lock);
3101bb76ff1Sjsg 	} else {
311c349dbc7Sjsg 		amdgpu_vm_bo_idle(vm_bo);
312c349dbc7Sjsg 	}
3131bb76ff1Sjsg }
314c349dbc7Sjsg 
315c349dbc7Sjsg /**
316c349dbc7Sjsg  * amdgpu_vm_bo_done - vm_bo is done
317c349dbc7Sjsg  *
318c349dbc7Sjsg  * @vm_bo: vm_bo which is now done
319c349dbc7Sjsg  *
320c349dbc7Sjsg  * State for normal BOs which are invalidated and that change has been updated
321c349dbc7Sjsg  * in the PTs.
322c349dbc7Sjsg  */
323c349dbc7Sjsg static void amdgpu_vm_bo_done(struct amdgpu_vm_bo_base *vm_bo)
324c349dbc7Sjsg {
3251bb76ff1Sjsg 	spin_lock(&vm_bo->vm->status_lock);
3265ca02815Sjsg 	list_move(&vm_bo->vm_status, &vm_bo->vm->done);
3271bb76ff1Sjsg 	spin_unlock(&vm_bo->vm->status_lock);
328c349dbc7Sjsg }
329c349dbc7Sjsg 
330c349dbc7Sjsg /**
331f005ef32Sjsg  * amdgpu_vm_bo_reset_state_machine - reset the vm_bo state machine
332f005ef32Sjsg  * @vm: the VM which state machine to reset
333f005ef32Sjsg  *
334f005ef32Sjsg  * Move all vm_bo object in the VM into a state where they will be updated
335f005ef32Sjsg  * again during validation.
336f005ef32Sjsg  */
337f005ef32Sjsg static void amdgpu_vm_bo_reset_state_machine(struct amdgpu_vm *vm)
338f005ef32Sjsg {
339f005ef32Sjsg 	struct amdgpu_vm_bo_base *vm_bo, *tmp;
340f005ef32Sjsg 
341f005ef32Sjsg 	spin_lock(&vm->status_lock);
342f005ef32Sjsg 	list_splice_init(&vm->done, &vm->invalidated);
343f005ef32Sjsg 	list_for_each_entry(vm_bo, &vm->invalidated, vm_status)
344f005ef32Sjsg 		vm_bo->moved = true;
345f005ef32Sjsg 	list_for_each_entry_safe(vm_bo, tmp, &vm->idle, vm_status) {
346f005ef32Sjsg 		struct amdgpu_bo *bo = vm_bo->bo;
347f005ef32Sjsg 
348f005ef32Sjsg 		vm_bo->moved = true;
349f005ef32Sjsg 		if (!bo || bo->tbo.type != ttm_bo_type_kernel)
350f005ef32Sjsg 			list_move(&vm_bo->vm_status, &vm_bo->vm->moved);
351f005ef32Sjsg 		else if (bo->parent)
352f005ef32Sjsg 			list_move(&vm_bo->vm_status, &vm_bo->vm->relocated);
353f005ef32Sjsg 	}
354f005ef32Sjsg 	spin_unlock(&vm->status_lock);
355f005ef32Sjsg }
356f005ef32Sjsg 
357f005ef32Sjsg /**
358c349dbc7Sjsg  * amdgpu_vm_bo_base_init - Adds bo to the list of bos associated with the vm
359c349dbc7Sjsg  *
360c349dbc7Sjsg  * @base: base structure for tracking BO usage in a VM
361c349dbc7Sjsg  * @vm: vm to which bo is to be added
362c349dbc7Sjsg  * @bo: amdgpu buffer object
363c349dbc7Sjsg  *
364c349dbc7Sjsg  * Initialize a bo_va_base structure and add it to the appropriate lists
365c349dbc7Sjsg  *
366c349dbc7Sjsg  */
3671bb76ff1Sjsg void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
3681bb76ff1Sjsg 			    struct amdgpu_vm *vm, struct amdgpu_bo *bo)
369c349dbc7Sjsg {
370c349dbc7Sjsg 	base->vm = vm;
371c349dbc7Sjsg 	base->bo = bo;
372c349dbc7Sjsg 	base->next = NULL;
373c349dbc7Sjsg 	INIT_LIST_HEAD(&base->vm_status);
374c349dbc7Sjsg 
375c349dbc7Sjsg 	if (!bo)
376c349dbc7Sjsg 		return;
377c349dbc7Sjsg 	base->next = bo->vm_bo;
378c349dbc7Sjsg 	bo->vm_bo = base;
379c349dbc7Sjsg 
3805ca02815Sjsg 	if (bo->tbo.base.resv != vm->root.bo->tbo.base.resv)
381c349dbc7Sjsg 		return;
382c349dbc7Sjsg 
3831bb76ff1Sjsg 	dma_resv_assert_held(vm->root.bo->tbo.base.resv);
3841bb76ff1Sjsg 
3851bb76ff1Sjsg 	ttm_bo_set_bulk_move(&bo->tbo, &vm->lru_bulk_move);
386c349dbc7Sjsg 	if (bo->tbo.type == ttm_bo_type_kernel && bo->parent)
387c349dbc7Sjsg 		amdgpu_vm_bo_relocated(base);
388c349dbc7Sjsg 	else
389c349dbc7Sjsg 		amdgpu_vm_bo_idle(base);
390c349dbc7Sjsg 
391c349dbc7Sjsg 	if (bo->preferred_domains &
3925ca02815Sjsg 	    amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type))
393c349dbc7Sjsg 		return;
394c349dbc7Sjsg 
395c349dbc7Sjsg 	/*
396c349dbc7Sjsg 	 * we checked all the prerequisites, but it looks like this per vm bo
397c349dbc7Sjsg 	 * is currently evicted. add the bo to the evicted list to make sure it
398c349dbc7Sjsg 	 * is validated on next vm use to avoid fault.
399c349dbc7Sjsg 	 * */
400c349dbc7Sjsg 	amdgpu_vm_bo_evicted(base);
401c349dbc7Sjsg }
402c349dbc7Sjsg 
403c349dbc7Sjsg /**
404f005ef32Sjsg  * amdgpu_vm_lock_pd - lock PD in drm_exec
405fb4d8502Sjsg  *
406fb4d8502Sjsg  * @vm: vm providing the BOs
407f005ef32Sjsg  * @exec: drm execution context
408f005ef32Sjsg  * @num_fences: number of extra fences to reserve
409fb4d8502Sjsg  *
410f005ef32Sjsg  * Lock the VM root PD in the DRM execution context.
411fb4d8502Sjsg  */
412f005ef32Sjsg int amdgpu_vm_lock_pd(struct amdgpu_vm *vm, struct drm_exec *exec,
413f005ef32Sjsg 		      unsigned int num_fences)
414fb4d8502Sjsg {
415f005ef32Sjsg 	/* We need at least two fences for the VM PD/PT updates */
416f005ef32Sjsg 	return drm_exec_prepare_obj(exec, &vm->root.bo->tbo.base,
417f005ef32Sjsg 				    2 + num_fences);
418fb4d8502Sjsg }
419fb4d8502Sjsg 
420fb4d8502Sjsg /**
421c349dbc7Sjsg  * amdgpu_vm_move_to_lru_tail - move all BOs to the end of LRU
422c349dbc7Sjsg  *
423c349dbc7Sjsg  * @adev: amdgpu device pointer
424c349dbc7Sjsg  * @vm: vm providing the BOs
425c349dbc7Sjsg  *
426c349dbc7Sjsg  * Move all BOs to the end of LRU and remember their positions to put them
427c349dbc7Sjsg  * together.
428c349dbc7Sjsg  */
429c349dbc7Sjsg void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
430c349dbc7Sjsg 				struct amdgpu_vm *vm)
431c349dbc7Sjsg {
4325ca02815Sjsg 	spin_lock(&adev->mman.bdev.lru_lock);
4331bb76ff1Sjsg 	ttm_lru_bulk_move_tail(&vm->lru_bulk_move);
4345ca02815Sjsg 	spin_unlock(&adev->mman.bdev.lru_lock);
435c349dbc7Sjsg }
436c349dbc7Sjsg 
437f005ef32Sjsg /* Create scheduler entities for page table updates */
438f005ef32Sjsg static int amdgpu_vm_init_entities(struct amdgpu_device *adev,
439f005ef32Sjsg 				   struct amdgpu_vm *vm)
440f005ef32Sjsg {
441f005ef32Sjsg 	int r;
442f005ef32Sjsg 
443f005ef32Sjsg 	r = drm_sched_entity_init(&vm->immediate, DRM_SCHED_PRIORITY_NORMAL,
444f005ef32Sjsg 				  adev->vm_manager.vm_pte_scheds,
445f005ef32Sjsg 				  adev->vm_manager.vm_pte_num_scheds, NULL);
446f005ef32Sjsg 	if (r)
447f005ef32Sjsg 		goto error;
448f005ef32Sjsg 
449f005ef32Sjsg 	return drm_sched_entity_init(&vm->delayed, DRM_SCHED_PRIORITY_NORMAL,
450f005ef32Sjsg 				     adev->vm_manager.vm_pte_scheds,
451f005ef32Sjsg 				     adev->vm_manager.vm_pte_num_scheds, NULL);
452f005ef32Sjsg 
453f005ef32Sjsg error:
454f005ef32Sjsg 	drm_sched_entity_destroy(&vm->immediate);
455f005ef32Sjsg 	return r;
456f005ef32Sjsg }
457f005ef32Sjsg 
458f005ef32Sjsg /* Destroy the entities for page table updates again */
459f005ef32Sjsg static void amdgpu_vm_fini_entities(struct amdgpu_vm *vm)
460f005ef32Sjsg {
461f005ef32Sjsg 	drm_sched_entity_destroy(&vm->immediate);
462f005ef32Sjsg 	drm_sched_entity_destroy(&vm->delayed);
463f005ef32Sjsg }
464f005ef32Sjsg 
465f005ef32Sjsg /**
466f005ef32Sjsg  * amdgpu_vm_generation - return the page table re-generation counter
467f005ef32Sjsg  * @adev: the amdgpu_device
468f005ef32Sjsg  * @vm: optional VM to check, might be NULL
469f005ef32Sjsg  *
470f005ef32Sjsg  * Returns a page table re-generation token to allow checking if submissions
471f005ef32Sjsg  * are still valid to use this VM. The VM parameter might be NULL in which case
472f005ef32Sjsg  * just the VRAM lost counter will be used.
473f005ef32Sjsg  */
474f005ef32Sjsg uint64_t amdgpu_vm_generation(struct amdgpu_device *adev, struct amdgpu_vm *vm)
475f005ef32Sjsg {
476f005ef32Sjsg 	uint64_t result = (u64)atomic_read(&adev->vram_lost_counter) << 32;
477f005ef32Sjsg 
478f005ef32Sjsg 	if (!vm)
479f005ef32Sjsg 		return result;
480f005ef32Sjsg 
48126d6e9f7Sjsg 	result += lower_32_bits(vm->generation);
482f005ef32Sjsg 	/* Add one if the page tables will be re-generated on next CS */
483f005ef32Sjsg 	if (drm_sched_entity_error(&vm->delayed))
484f005ef32Sjsg 		++result;
485f005ef32Sjsg 
486f005ef32Sjsg 	return result;
487f005ef32Sjsg }
488f005ef32Sjsg 
489c349dbc7Sjsg /**
490fb4d8502Sjsg  * amdgpu_vm_validate_pt_bos - validate the page table BOs
491fb4d8502Sjsg  *
492fb4d8502Sjsg  * @adev: amdgpu device pointer
493fb4d8502Sjsg  * @vm: vm providing the BOs
494fb4d8502Sjsg  * @validate: callback to do the validation
495fb4d8502Sjsg  * @param: parameter for the validation callback
496fb4d8502Sjsg  *
497fb4d8502Sjsg  * Validate the page table BOs on command submission if neccessary.
498fb4d8502Sjsg  *
499fb4d8502Sjsg  * Returns:
500fb4d8502Sjsg  * Validation result.
501fb4d8502Sjsg  */
502fb4d8502Sjsg int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
503fb4d8502Sjsg 			      int (*validate)(void *p, struct amdgpu_bo *bo),
504fb4d8502Sjsg 			      void *param)
505fb4d8502Sjsg {
50626d6e9f7Sjsg 	uint64_t new_vm_generation = amdgpu_vm_generation(adev, vm);
5071bb76ff1Sjsg 	struct amdgpu_vm_bo_base *bo_base;
5081bb76ff1Sjsg 	struct amdgpu_bo *shadow;
5091bb76ff1Sjsg 	struct amdgpu_bo *bo;
510c349dbc7Sjsg 	int r;
511c349dbc7Sjsg 
51226d6e9f7Sjsg 	if (vm->generation != new_vm_generation) {
51326d6e9f7Sjsg 		vm->generation = new_vm_generation;
514f005ef32Sjsg 		amdgpu_vm_bo_reset_state_machine(vm);
515f005ef32Sjsg 		amdgpu_vm_fini_entities(vm);
516f005ef32Sjsg 		r = amdgpu_vm_init_entities(adev, vm);
517f005ef32Sjsg 		if (r)
518f005ef32Sjsg 			return r;
519f005ef32Sjsg 	}
520f005ef32Sjsg 
5211bb76ff1Sjsg 	spin_lock(&vm->status_lock);
5221bb76ff1Sjsg 	while (!list_empty(&vm->evicted)) {
5231bb76ff1Sjsg 		bo_base = list_first_entry(&vm->evicted,
5241bb76ff1Sjsg 					   struct amdgpu_vm_bo_base,
5251bb76ff1Sjsg 					   vm_status);
5261bb76ff1Sjsg 		spin_unlock(&vm->status_lock);
527fb4d8502Sjsg 
5281bb76ff1Sjsg 		bo = bo_base->bo;
5291bb76ff1Sjsg 		shadow = amdgpu_bo_shadowed(bo);
530fb4d8502Sjsg 
531fb4d8502Sjsg 		r = validate(param, bo);
532fb4d8502Sjsg 		if (r)
533c349dbc7Sjsg 			return r;
5345ca02815Sjsg 		if (shadow) {
5355ca02815Sjsg 			r = validate(param, shadow);
5365ca02815Sjsg 			if (r)
5375ca02815Sjsg 				return r;
5385ca02815Sjsg 		}
539fb4d8502Sjsg 
540fb4d8502Sjsg 		if (bo->tbo.type != ttm_bo_type_kernel) {
541c349dbc7Sjsg 			amdgpu_vm_bo_moved(bo_base);
542fb4d8502Sjsg 		} else {
5435ca02815Sjsg 			vm->update_funcs->map_table(to_amdgpu_bo_vm(bo));
544c349dbc7Sjsg 			amdgpu_vm_bo_relocated(bo_base);
545fb4d8502Sjsg 		}
5461bb76ff1Sjsg 		spin_lock(&vm->status_lock);
547fb4d8502Sjsg 	}
5481bb76ff1Sjsg 	spin_unlock(&vm->status_lock);
549fb4d8502Sjsg 
550c349dbc7Sjsg 	amdgpu_vm_eviction_lock(vm);
551c349dbc7Sjsg 	vm->evicting = false;
552c349dbc7Sjsg 	amdgpu_vm_eviction_unlock(vm);
553fb4d8502Sjsg 
554c349dbc7Sjsg 	return 0;
555fb4d8502Sjsg }
556fb4d8502Sjsg 
557fb4d8502Sjsg /**
558fb4d8502Sjsg  * amdgpu_vm_ready - check VM is ready for updates
559fb4d8502Sjsg  *
560fb4d8502Sjsg  * @vm: VM to check
561fb4d8502Sjsg  *
562fb4d8502Sjsg  * Check if all VM PDs/PTs are ready for updates
563fb4d8502Sjsg  *
564fb4d8502Sjsg  * Returns:
5655dd3e06bSjsg  * True if VM is not evicting.
566fb4d8502Sjsg  */
567fb4d8502Sjsg bool amdgpu_vm_ready(struct amdgpu_vm *vm)
568fb4d8502Sjsg {
5691bb76ff1Sjsg 	bool empty;
5705dd3e06bSjsg 	bool ret;
5715dd3e06bSjsg 
5725dd3e06bSjsg 	amdgpu_vm_eviction_lock(vm);
5735dd3e06bSjsg 	ret = !vm->evicting;
5745dd3e06bSjsg 	amdgpu_vm_eviction_unlock(vm);
575f24eb5deSjsg 
5761bb76ff1Sjsg 	spin_lock(&vm->status_lock);
5771bb76ff1Sjsg 	empty = list_empty(&vm->evicted);
5781bb76ff1Sjsg 	spin_unlock(&vm->status_lock);
579fb4d8502Sjsg 
5801bb76ff1Sjsg 	return ret && empty;
581fb4d8502Sjsg }
582fb4d8502Sjsg 
583fb4d8502Sjsg /**
584fb4d8502Sjsg  * amdgpu_vm_check_compute_bug - check whether asic has compute vm bug
585fb4d8502Sjsg  *
586fb4d8502Sjsg  * @adev: amdgpu_device pointer
587fb4d8502Sjsg  */
588fb4d8502Sjsg void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev)
589fb4d8502Sjsg {
590fb4d8502Sjsg 	const struct amdgpu_ip_block *ip_block;
591fb4d8502Sjsg 	bool has_compute_vm_bug;
592fb4d8502Sjsg 	struct amdgpu_ring *ring;
593fb4d8502Sjsg 	int i;
594fb4d8502Sjsg 
595fb4d8502Sjsg 	has_compute_vm_bug = false;
596fb4d8502Sjsg 
597fb4d8502Sjsg 	ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
598fb4d8502Sjsg 	if (ip_block) {
599fb4d8502Sjsg 		/* Compute has a VM bug for GFX version < 7.
600fb4d8502Sjsg 		   Compute has a VM bug for GFX 8 MEC firmware version < 673.*/
601fb4d8502Sjsg 		if (ip_block->version->major <= 7)
602fb4d8502Sjsg 			has_compute_vm_bug = true;
603fb4d8502Sjsg 		else if (ip_block->version->major == 8)
604fb4d8502Sjsg 			if (adev->gfx.mec_fw_version < 673)
605fb4d8502Sjsg 				has_compute_vm_bug = true;
606fb4d8502Sjsg 	}
607fb4d8502Sjsg 
608fb4d8502Sjsg 	for (i = 0; i < adev->num_rings; i++) {
609fb4d8502Sjsg 		ring = adev->rings[i];
610fb4d8502Sjsg 		if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
611fb4d8502Sjsg 			/* only compute rings */
612fb4d8502Sjsg 			ring->has_compute_vm_bug = has_compute_vm_bug;
613fb4d8502Sjsg 		else
614fb4d8502Sjsg 			ring->has_compute_vm_bug = false;
615fb4d8502Sjsg 	}
616fb4d8502Sjsg }
617fb4d8502Sjsg 
618fb4d8502Sjsg /**
619fb4d8502Sjsg  * amdgpu_vm_need_pipeline_sync - Check if pipe sync is needed for job.
620fb4d8502Sjsg  *
621fb4d8502Sjsg  * @ring: ring on which the job will be submitted
622fb4d8502Sjsg  * @job: job to submit
623fb4d8502Sjsg  *
624fb4d8502Sjsg  * Returns:
625fb4d8502Sjsg  * True if sync is needed.
626fb4d8502Sjsg  */
627fb4d8502Sjsg bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
628fb4d8502Sjsg 				  struct amdgpu_job *job)
629fb4d8502Sjsg {
630fb4d8502Sjsg 	struct amdgpu_device *adev = ring->adev;
631f005ef32Sjsg 	unsigned vmhub = ring->vm_hub;
632fb4d8502Sjsg 	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
633fb4d8502Sjsg 
634fb4d8502Sjsg 	if (job->vmid == 0)
635fb4d8502Sjsg 		return false;
636fb4d8502Sjsg 
637f005ef32Sjsg 	if (job->vm_needs_flush || ring->has_compute_vm_bug)
638fb4d8502Sjsg 		return true;
639fb4d8502Sjsg 
640f005ef32Sjsg 	if (ring->funcs->emit_gds_switch && job->gds_switch_needed)
641f005ef32Sjsg 		return true;
642f005ef32Sjsg 
643f005ef32Sjsg 	if (amdgpu_vmid_had_gpu_reset(adev, &id_mgr->ids[job->vmid]))
644f005ef32Sjsg 		return true;
645f005ef32Sjsg 
646f005ef32Sjsg 	return false;
647fb4d8502Sjsg }
648fb4d8502Sjsg 
649fb4d8502Sjsg /**
650fb4d8502Sjsg  * amdgpu_vm_flush - hardware flush the vm
651fb4d8502Sjsg  *
652fb4d8502Sjsg  * @ring: ring to use for flush
653fb4d8502Sjsg  * @job:  related job
654fb4d8502Sjsg  * @need_pipe_sync: is pipe sync needed
655fb4d8502Sjsg  *
656fb4d8502Sjsg  * Emit a VM flush when it is necessary.
657fb4d8502Sjsg  *
658fb4d8502Sjsg  * Returns:
659fb4d8502Sjsg  * 0 on success, errno otherwise.
660fb4d8502Sjsg  */
661c349dbc7Sjsg int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
662c349dbc7Sjsg 		    bool need_pipe_sync)
663fb4d8502Sjsg {
664fb4d8502Sjsg 	struct amdgpu_device *adev = ring->adev;
665f005ef32Sjsg 	unsigned vmhub = ring->vm_hub;
666fb4d8502Sjsg 	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
667fb4d8502Sjsg 	struct amdgpu_vmid *id = &id_mgr->ids[job->vmid];
668f005ef32Sjsg 	bool spm_update_needed = job->spm_update_needed;
669f005ef32Sjsg 	bool gds_switch_needed = ring->funcs->emit_gds_switch &&
670f005ef32Sjsg 		job->gds_switch_needed;
671fb4d8502Sjsg 	bool vm_flush_needed = job->vm_needs_flush;
672fb4d8502Sjsg 	struct dma_fence *fence = NULL;
6730ef7205aSjsg 	bool pasid_mapping_needed = false;
674fb4d8502Sjsg 	unsigned patch_offset = 0;
675fb4d8502Sjsg 	int r;
676fb4d8502Sjsg 
677fb4d8502Sjsg 	if (amdgpu_vmid_had_gpu_reset(adev, id)) {
678fb4d8502Sjsg 		gds_switch_needed = true;
679fb4d8502Sjsg 		vm_flush_needed = true;
680fb4d8502Sjsg 		pasid_mapping_needed = true;
681f005ef32Sjsg 		spm_update_needed = true;
682fb4d8502Sjsg 	}
683fb4d8502Sjsg 
684ee7378f0Sjsg 	mutex_lock(&id_mgr->lock);
685ee7378f0Sjsg 	if (id->pasid != job->pasid || !id->pasid_mapping ||
686ee7378f0Sjsg 	    !dma_fence_is_signaled(id->pasid_mapping))
687ee7378f0Sjsg 		pasid_mapping_needed = true;
688ee7378f0Sjsg 	mutex_unlock(&id_mgr->lock);
689ee7378f0Sjsg 
690fb4d8502Sjsg 	gds_switch_needed &= !!ring->funcs->emit_gds_switch;
691fb4d8502Sjsg 	vm_flush_needed &= !!ring->funcs->emit_vm_flush  &&
692fb4d8502Sjsg 			job->vm_pd_addr != AMDGPU_BO_INVALID_OFFSET;
693fb4d8502Sjsg 	pasid_mapping_needed &= adev->gmc.gmc_funcs->emit_pasid_mapping &&
694fb4d8502Sjsg 		ring->funcs->emit_wreg;
695fb4d8502Sjsg 
696fb4d8502Sjsg 	if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync)
697fb4d8502Sjsg 		return 0;
698fb4d8502Sjsg 
699f005ef32Sjsg 	amdgpu_ring_ib_begin(ring);
700fb4d8502Sjsg 	if (ring->funcs->init_cond_exec)
701fb4d8502Sjsg 		patch_offset = amdgpu_ring_init_cond_exec(ring);
702fb4d8502Sjsg 
703fb4d8502Sjsg 	if (need_pipe_sync)
704fb4d8502Sjsg 		amdgpu_ring_emit_pipeline_sync(ring);
705fb4d8502Sjsg 
706fb4d8502Sjsg 	if (vm_flush_needed) {
707fb4d8502Sjsg 		trace_amdgpu_vm_flush(ring, job->vmid, job->vm_pd_addr);
708fb4d8502Sjsg 		amdgpu_ring_emit_vm_flush(ring, job->vmid, job->vm_pd_addr);
709fb4d8502Sjsg 	}
710fb4d8502Sjsg 
711fb4d8502Sjsg 	if (pasid_mapping_needed)
712fb4d8502Sjsg 		amdgpu_gmc_emit_pasid_mapping(ring, job->vmid, job->pasid);
713fb4d8502Sjsg 
714f005ef32Sjsg 	if (spm_update_needed && adev->gfx.rlc.funcs->update_spm_vmid)
715f005ef32Sjsg 		adev->gfx.rlc.funcs->update_spm_vmid(adev, job->vmid);
716f005ef32Sjsg 
717f005ef32Sjsg 	if (!ring->is_mes_queue && ring->funcs->emit_gds_switch &&
718f005ef32Sjsg 	    gds_switch_needed) {
719f005ef32Sjsg 		amdgpu_ring_emit_gds_switch(ring, job->vmid, job->gds_base,
720f005ef32Sjsg 					    job->gds_size, job->gws_base,
721f005ef32Sjsg 					    job->gws_size, job->oa_base,
722f005ef32Sjsg 					    job->oa_size);
723f005ef32Sjsg 	}
724f005ef32Sjsg 
725fb4d8502Sjsg 	if (vm_flush_needed || pasid_mapping_needed) {
7265ca02815Sjsg 		r = amdgpu_fence_emit(ring, &fence, NULL, 0);
727fb4d8502Sjsg 		if (r)
728fb4d8502Sjsg 			return r;
729fb4d8502Sjsg 	}
730fb4d8502Sjsg 
731fb4d8502Sjsg 	if (vm_flush_needed) {
732fb4d8502Sjsg 		mutex_lock(&id_mgr->lock);
733fb4d8502Sjsg 		dma_fence_put(id->last_flush);
734fb4d8502Sjsg 		id->last_flush = dma_fence_get(fence);
735fb4d8502Sjsg 		id->current_gpu_reset_count =
736fb4d8502Sjsg 			atomic_read(&adev->gpu_reset_counter);
737fb4d8502Sjsg 		mutex_unlock(&id_mgr->lock);
738fb4d8502Sjsg 	}
739fb4d8502Sjsg 
740fb4d8502Sjsg 	if (pasid_mapping_needed) {
741ee7378f0Sjsg 		mutex_lock(&id_mgr->lock);
742fb4d8502Sjsg 		id->pasid = job->pasid;
743fb4d8502Sjsg 		dma_fence_put(id->pasid_mapping);
744fb4d8502Sjsg 		id->pasid_mapping = dma_fence_get(fence);
745ee7378f0Sjsg 		mutex_unlock(&id_mgr->lock);
746fb4d8502Sjsg 	}
747fb4d8502Sjsg 	dma_fence_put(fence);
748fb4d8502Sjsg 
749fb4d8502Sjsg 	if (ring->funcs->patch_cond_exec)
750fb4d8502Sjsg 		amdgpu_ring_patch_cond_exec(ring, patch_offset);
751fb4d8502Sjsg 
752fb4d8502Sjsg 	/* the double SWITCH_BUFFER here *cannot* be skipped by COND_EXEC */
753fb4d8502Sjsg 	if (ring->funcs->emit_switch_buffer) {
754fb4d8502Sjsg 		amdgpu_ring_emit_switch_buffer(ring);
755fb4d8502Sjsg 		amdgpu_ring_emit_switch_buffer(ring);
756fb4d8502Sjsg 	}
757f005ef32Sjsg 	amdgpu_ring_ib_end(ring);
758fb4d8502Sjsg 	return 0;
759fb4d8502Sjsg }
760fb4d8502Sjsg 
761fb4d8502Sjsg /**
762fb4d8502Sjsg  * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
763fb4d8502Sjsg  *
764fb4d8502Sjsg  * @vm: requested vm
765fb4d8502Sjsg  * @bo: requested buffer object
766fb4d8502Sjsg  *
767fb4d8502Sjsg  * Find @bo inside the requested vm.
768fb4d8502Sjsg  * Search inside the @bos vm list for the requested vm
769fb4d8502Sjsg  * Returns the found bo_va or NULL if none is found
770fb4d8502Sjsg  *
771fb4d8502Sjsg  * Object has to be reserved!
772fb4d8502Sjsg  *
773fb4d8502Sjsg  * Returns:
774fb4d8502Sjsg  * Found bo_va or NULL.
775fb4d8502Sjsg  */
776fb4d8502Sjsg struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
777fb4d8502Sjsg 				       struct amdgpu_bo *bo)
778fb4d8502Sjsg {
779c349dbc7Sjsg 	struct amdgpu_vm_bo_base *base;
780fb4d8502Sjsg 
781c349dbc7Sjsg 	for (base = bo->vm_bo; base; base = base->next) {
782c349dbc7Sjsg 		if (base->vm != vm)
783c349dbc7Sjsg 			continue;
784c349dbc7Sjsg 
785c349dbc7Sjsg 		return container_of(base, struct amdgpu_bo_va, base);
786fb4d8502Sjsg 	}
787fb4d8502Sjsg 	return NULL;
788fb4d8502Sjsg }
789fb4d8502Sjsg 
790fb4d8502Sjsg /**
791fb4d8502Sjsg  * amdgpu_vm_map_gart - Resolve gart mapping of addr
792fb4d8502Sjsg  *
793fb4d8502Sjsg  * @pages_addr: optional DMA address to use for lookup
794fb4d8502Sjsg  * @addr: the unmapped addr
795fb4d8502Sjsg  *
796fb4d8502Sjsg  * Look up the physical address of the page that the pte resolves
797fb4d8502Sjsg  * to.
798fb4d8502Sjsg  *
799fb4d8502Sjsg  * Returns:
800fb4d8502Sjsg  * The pointer for the page table entry.
801fb4d8502Sjsg  */
802c349dbc7Sjsg uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
803fb4d8502Sjsg {
804fb4d8502Sjsg 	uint64_t result;
805fb4d8502Sjsg 
806fb4d8502Sjsg 	/* page table offset */
807fb4d8502Sjsg 	result = pages_addr[addr >> PAGE_SHIFT];
808fb4d8502Sjsg 
809fb4d8502Sjsg 	/* in case cpu page size != gpu page size*/
810ad8b1aafSjsg 	result |= addr & (~LINUX_PAGE_MASK);
811fb4d8502Sjsg 
812fb4d8502Sjsg 	result &= 0xFFFFFFFFFFFFF000ULL;
813fb4d8502Sjsg 
814fb4d8502Sjsg 	return result;
815fb4d8502Sjsg }
816fb4d8502Sjsg 
817fb4d8502Sjsg /**
818c349dbc7Sjsg  * amdgpu_vm_update_pdes - make sure that all directories are valid
819fb4d8502Sjsg  *
820fb4d8502Sjsg  * @adev: amdgpu_device pointer
821fb4d8502Sjsg  * @vm: requested vm
822ad8b1aafSjsg  * @immediate: submit immediately to the paging queue
823fb4d8502Sjsg  *
824fb4d8502Sjsg  * Makes sure all directories are up to date.
825fb4d8502Sjsg  *
826fb4d8502Sjsg  * Returns:
827fb4d8502Sjsg  * 0 for success, error for failure.
828fb4d8502Sjsg  */
829c349dbc7Sjsg int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
830ad8b1aafSjsg 			  struct amdgpu_vm *vm, bool immediate)
831fb4d8502Sjsg {
832c349dbc7Sjsg 	struct amdgpu_vm_update_params params;
8331bb76ff1Sjsg 	struct amdgpu_vm_bo_base *entry;
8341bb76ff1Sjsg 	bool flush_tlb_needed = false;
8351bb76ff1Sjsg 	DRM_LIST_HEAD(relocated);
8361bb76ff1Sjsg 	int r, idx;
837fb4d8502Sjsg 
8381bb76ff1Sjsg 	spin_lock(&vm->status_lock);
8391bb76ff1Sjsg 	list_splice_init(&vm->relocated, &relocated);
8401bb76ff1Sjsg 	spin_unlock(&vm->status_lock);
8411bb76ff1Sjsg 
8421bb76ff1Sjsg 	if (list_empty(&relocated))
843fb4d8502Sjsg 		return 0;
844fb4d8502Sjsg 
8451bb76ff1Sjsg 	if (!drm_dev_enter(adev_to_drm(adev), &idx))
8461bb76ff1Sjsg 		return -ENODEV;
8471bb76ff1Sjsg 
848fb4d8502Sjsg 	memset(&params, 0, sizeof(params));
849fb4d8502Sjsg 	params.adev = adev;
850c349dbc7Sjsg 	params.vm = vm;
851ad8b1aafSjsg 	params.immediate = immediate;
852fb4d8502Sjsg 
853c349dbc7Sjsg 	r = vm->update_funcs->prepare(&params, NULL, AMDGPU_SYNC_EXPLICIT);
854fb4d8502Sjsg 	if (r)
8551bb76ff1Sjsg 		goto error;
856fb4d8502Sjsg 
8571bb76ff1Sjsg 	list_for_each_entry(entry, &relocated, vm_status) {
8581bb76ff1Sjsg 		/* vm_flush_needed after updating moved PDEs */
8591bb76ff1Sjsg 		flush_tlb_needed |= entry->moved;
860fb4d8502Sjsg 
8611bb76ff1Sjsg 		r = amdgpu_vm_pde_update(&params, entry);
862fb4d8502Sjsg 		if (r)
863fb4d8502Sjsg 			goto error;
864fb4d8502Sjsg 	}
865fb4d8502Sjsg 
866c349dbc7Sjsg 	r = vm->update_funcs->commit(&params, &vm->last_update);
867c349dbc7Sjsg 	if (r)
868c349dbc7Sjsg 		goto error;
8691bb76ff1Sjsg 
8701bb76ff1Sjsg 	if (flush_tlb_needed)
8711bb76ff1Sjsg 		atomic64_inc(&vm->tlb_seq);
8721bb76ff1Sjsg 
8731bb76ff1Sjsg 	while (!list_empty(&relocated)) {
8741bb76ff1Sjsg 		entry = list_first_entry(&relocated, struct amdgpu_vm_bo_base,
8751bb76ff1Sjsg 					 vm_status);
8761bb76ff1Sjsg 		amdgpu_vm_bo_idle(entry);
8771bb76ff1Sjsg 	}
878fb4d8502Sjsg 
879fb4d8502Sjsg error:
8801bb76ff1Sjsg 	drm_dev_exit(idx);
881fb4d8502Sjsg 	return r;
882fb4d8502Sjsg }
883fb4d8502Sjsg 
8841bb76ff1Sjsg /**
8851bb76ff1Sjsg  * amdgpu_vm_tlb_seq_cb - make sure to increment tlb sequence
8861bb76ff1Sjsg  * @fence: unused
8871bb76ff1Sjsg  * @cb: the callback structure
888fb4d8502Sjsg  *
8891bb76ff1Sjsg  * Increments the tlb sequence to make sure that future CS execute a VM flush.
890c349dbc7Sjsg  */
8911bb76ff1Sjsg static void amdgpu_vm_tlb_seq_cb(struct dma_fence *fence,
8921bb76ff1Sjsg 				 struct dma_fence_cb *cb)
893c349dbc7Sjsg {
894f005ef32Sjsg 	struct amdgpu_vm_tlb_seq_struct *tlb_cb;
895c349dbc7Sjsg 
8961bb76ff1Sjsg 	tlb_cb = container_of(cb, typeof(*tlb_cb), cb);
8971bb76ff1Sjsg 	atomic64_inc(&tlb_cb->vm->tlb_seq);
8981bb76ff1Sjsg 	kfree(tlb_cb);
899c349dbc7Sjsg }
900c349dbc7Sjsg 
901c349dbc7Sjsg /**
9021bb76ff1Sjsg  * amdgpu_vm_update_range - update a range in the vm page table
903c349dbc7Sjsg  *
9041bb76ff1Sjsg  * @adev: amdgpu_device pointer to use for commands
9051bb76ff1Sjsg  * @vm: the VM to update the range
906ad8b1aafSjsg  * @immediate: immediate submission in a page fault
907ad8b1aafSjsg  * @unlocked: unlocked invalidation during MM callback
9081bb76ff1Sjsg  * @flush_tlb: trigger tlb invalidation after update completed
909c349dbc7Sjsg  * @resv: fences we need to sync to
910fb4d8502Sjsg  * @start: start of mapped range
911fb4d8502Sjsg  * @last: last mapped entry
912fb4d8502Sjsg  * @flags: flags for the entries
9135ca02815Sjsg  * @offset: offset into nodes and pages_addr
9141bb76ff1Sjsg  * @vram_base: base for vram mappings
9155ca02815Sjsg  * @res: ttm_resource to map
916c349dbc7Sjsg  * @pages_addr: DMA addresses to use for mapping
917fb4d8502Sjsg  * @fence: optional resulting fence
918fb4d8502Sjsg  *
919fb4d8502Sjsg  * Fill in the page table entries between @start and @last.
920fb4d8502Sjsg  *
921fb4d8502Sjsg  * Returns:
9221bb76ff1Sjsg  * 0 for success, negative erro code for failure.
923fb4d8502Sjsg  */
9241bb76ff1Sjsg int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
9251bb76ff1Sjsg 			   bool immediate, bool unlocked, bool flush_tlb,
9261bb76ff1Sjsg 			   struct dma_resv *resv, uint64_t start, uint64_t last,
9271bb76ff1Sjsg 			   uint64_t flags, uint64_t offset, uint64_t vram_base,
9281bb76ff1Sjsg 			   struct ttm_resource *res, dma_addr_t *pages_addr,
9291bb76ff1Sjsg 			   struct dma_fence **fence)
930fb4d8502Sjsg {
931c349dbc7Sjsg 	struct amdgpu_vm_update_params params;
932f005ef32Sjsg 	struct amdgpu_vm_tlb_seq_struct *tlb_cb;
9335ca02815Sjsg 	struct amdgpu_res_cursor cursor;
934c349dbc7Sjsg 	enum amdgpu_sync_mode sync_mode;
9355ca02815Sjsg 	int r, idx;
9365ca02815Sjsg 
9371bb76ff1Sjsg 	if (!drm_dev_enter(adev_to_drm(adev), &idx))
9385ca02815Sjsg 		return -ENODEV;
939fb4d8502Sjsg 
9401bb76ff1Sjsg 	tlb_cb = kmalloc(sizeof(*tlb_cb), GFP_KERNEL);
9411bb76ff1Sjsg 	if (!tlb_cb) {
9421bb76ff1Sjsg 		r = -ENOMEM;
9431bb76ff1Sjsg 		goto error_unlock;
9441bb76ff1Sjsg 	}
9451bb76ff1Sjsg 
9461bb76ff1Sjsg 	/* Vega20+XGMI where PTEs get inadvertently cached in L2 texture cache,
9471bb76ff1Sjsg 	 * heavy-weight flush TLB unconditionally.
9481bb76ff1Sjsg 	 */
9491bb76ff1Sjsg 	flush_tlb |= adev->gmc.xgmi.num_physical_nodes &&
9501bb76ff1Sjsg 		     adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 0);
9511bb76ff1Sjsg 
9521bb76ff1Sjsg 	/*
9531bb76ff1Sjsg 	 * On GFX8 and older any 8 PTE block with a valid bit set enters the TLB
9541bb76ff1Sjsg 	 */
9551bb76ff1Sjsg 	flush_tlb |= adev->ip_versions[GC_HWIP][0] < IP_VERSION(9, 0, 0);
9561bb76ff1Sjsg 
957fb4d8502Sjsg 	memset(&params, 0, sizeof(params));
958fb4d8502Sjsg 	params.adev = adev;
959fb4d8502Sjsg 	params.vm = vm;
960ad8b1aafSjsg 	params.immediate = immediate;
961fb4d8502Sjsg 	params.pages_addr = pages_addr;
962ad8b1aafSjsg 	params.unlocked = unlocked;
963fb4d8502Sjsg 
964c349dbc7Sjsg 	/* Implicitly sync to command submissions in the same VM before
965c349dbc7Sjsg 	 * unmapping. Sync to moving fences before mapping.
966fb4d8502Sjsg 	 */
967c349dbc7Sjsg 	if (!(flags & AMDGPU_PTE_VALID))
968c349dbc7Sjsg 		sync_mode = AMDGPU_SYNC_EQ_OWNER;
969fb4d8502Sjsg 	else
970c349dbc7Sjsg 		sync_mode = AMDGPU_SYNC_EXPLICIT;
971fb4d8502Sjsg 
972c349dbc7Sjsg 	amdgpu_vm_eviction_lock(vm);
973c349dbc7Sjsg 	if (vm->evicting) {
974c349dbc7Sjsg 		r = -EBUSY;
9751bb76ff1Sjsg 		goto error_free;
976fb4d8502Sjsg 	}
977fb4d8502Sjsg 
978ad8b1aafSjsg 	if (!unlocked && !dma_fence_is_signaled(vm->last_unlocked)) {
979ad8b1aafSjsg 		struct dma_fence *tmp = dma_fence_get_stub();
980fb4d8502Sjsg 
9815ca02815Sjsg 		amdgpu_bo_fence(vm->root.bo, vm->last_unlocked, true);
982ad8b1aafSjsg 		swap(vm->last_unlocked, tmp);
983ad8b1aafSjsg 		dma_fence_put(tmp);
984fb4d8502Sjsg 	}
985fb4d8502Sjsg 
986c349dbc7Sjsg 	r = vm->update_funcs->prepare(&params, resv, sync_mode);
987fb4d8502Sjsg 	if (r)
9881bb76ff1Sjsg 		goto error_free;
989fb4d8502Sjsg 
9905ca02815Sjsg 	amdgpu_res_first(pages_addr ? NULL : res, offset,
9915ca02815Sjsg 			 (last - start + 1) * AMDGPU_GPU_PAGE_SIZE, &cursor);
9925ca02815Sjsg 	while (cursor.remaining) {
9935ca02815Sjsg 		uint64_t tmp, num_entries, addr;
994fb4d8502Sjsg 
9955ca02815Sjsg 		num_entries = cursor.size >> AMDGPU_GPU_PAGE_SHIFT;
9965ca02815Sjsg 		if (pages_addr) {
9975ca02815Sjsg 			bool contiguous = true;
998fb4d8502Sjsg 
9995ca02815Sjsg 			if (num_entries > AMDGPU_GPU_PAGES_IN_CPU_PAGE) {
10005ca02815Sjsg 				uint64_t pfn = cursor.start >> PAGE_SHIFT;
10015ca02815Sjsg 				uint64_t count;
10025ca02815Sjsg 
10035ca02815Sjsg 				contiguous = pages_addr[pfn + 1] ==
10045ca02815Sjsg 					pages_addr[pfn] + PAGE_SIZE;
10055ca02815Sjsg 
10065ca02815Sjsg 				tmp = num_entries /
10075ca02815Sjsg 					AMDGPU_GPU_PAGES_IN_CPU_PAGE;
10085ca02815Sjsg 				for (count = 2; count < tmp; ++count) {
10095ca02815Sjsg 					uint64_t idx = pfn + count;
10105ca02815Sjsg 
10115ca02815Sjsg 					if (contiguous != (pages_addr[idx] ==
10125ca02815Sjsg 					    pages_addr[idx - 1] + PAGE_SIZE))
10135ca02815Sjsg 						break;
10145ca02815Sjsg 				}
1015f005ef32Sjsg 				if (!contiguous)
1016f005ef32Sjsg 					count--;
10175ca02815Sjsg 				num_entries = count *
10185ca02815Sjsg 					AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1019fb4d8502Sjsg 			}
1020fb4d8502Sjsg 
10215ca02815Sjsg 			if (!contiguous) {
10225ca02815Sjsg 				addr = cursor.start;
10235ca02815Sjsg 				params.pages_addr = pages_addr;
10245ca02815Sjsg 			} else {
10255ca02815Sjsg 				addr = pages_addr[cursor.start >> PAGE_SHIFT];
10265ca02815Sjsg 				params.pages_addr = NULL;
1027fb4d8502Sjsg 			}
1028fb4d8502Sjsg 
10295ca02815Sjsg 		} else if (flags & (AMDGPU_PTE_VALID | AMDGPU_PTE_PRT)) {
10301bb76ff1Sjsg 			addr = vram_base + cursor.start;
1031fb4d8502Sjsg 		} else {
1032fb4d8502Sjsg 			addr = 0;
1033fb4d8502Sjsg 		}
1034fb4d8502Sjsg 
10355ca02815Sjsg 		tmp = start + num_entries;
10361bb76ff1Sjsg 		r = amdgpu_vm_ptes_update(&params, start, tmp, addr, flags);
1037fb4d8502Sjsg 		if (r)
10381bb76ff1Sjsg 			goto error_free;
10395ca02815Sjsg 
10405ca02815Sjsg 		amdgpu_res_next(&cursor, num_entries * AMDGPU_GPU_PAGE_SIZE);
10415ca02815Sjsg 		start = tmp;
10425ca02815Sjsg 	}
10435ca02815Sjsg 
10445ca02815Sjsg 	r = vm->update_funcs->commit(&params, fence);
10455ca02815Sjsg 
10461bb76ff1Sjsg 	if (flush_tlb || params.table_freed) {
10471bb76ff1Sjsg 		tlb_cb->vm = vm;
10481bb76ff1Sjsg 		if (fence && *fence &&
10491bb76ff1Sjsg 		    !dma_fence_add_callback(*fence, &tlb_cb->cb,
10501bb76ff1Sjsg 					   amdgpu_vm_tlb_seq_cb)) {
10511bb76ff1Sjsg 			dma_fence_put(vm->last_tlb_flush);
10521bb76ff1Sjsg 			vm->last_tlb_flush = dma_fence_get(*fence);
10531bb76ff1Sjsg 		} else {
10541bb76ff1Sjsg 			amdgpu_vm_tlb_seq_cb(NULL, &tlb_cb->cb);
10551bb76ff1Sjsg 		}
10561bb76ff1Sjsg 		tlb_cb = NULL;
10571bb76ff1Sjsg 	}
10581bb76ff1Sjsg 
10591bb76ff1Sjsg error_free:
10601bb76ff1Sjsg 	kfree(tlb_cb);
10615ca02815Sjsg 
10625ca02815Sjsg error_unlock:
10635ca02815Sjsg 	amdgpu_vm_eviction_unlock(vm);
10645ca02815Sjsg 	drm_dev_exit(idx);
1065fb4d8502Sjsg 	return r;
1066fb4d8502Sjsg }
1067fb4d8502Sjsg 
1068f005ef32Sjsg static void amdgpu_vm_bo_get_memory(struct amdgpu_bo_va *bo_va,
1069f005ef32Sjsg 				    struct amdgpu_mem_stats *stats)
1070f005ef32Sjsg {
1071f005ef32Sjsg 	struct amdgpu_vm *vm = bo_va->base.vm;
1072f005ef32Sjsg 	struct amdgpu_bo *bo = bo_va->base.bo;
1073f005ef32Sjsg 
1074f005ef32Sjsg 	if (!bo)
1075f005ef32Sjsg 		return;
1076f005ef32Sjsg 
1077f005ef32Sjsg 	/*
1078f005ef32Sjsg 	 * For now ignore BOs which are currently locked and potentially
1079f005ef32Sjsg 	 * changing their location.
1080f005ef32Sjsg 	 */
1081f005ef32Sjsg 	if (bo->tbo.base.resv != vm->root.bo->tbo.base.resv &&
1082f005ef32Sjsg 	    !dma_resv_trylock(bo->tbo.base.resv))
1083f005ef32Sjsg 		return;
1084f005ef32Sjsg 
1085f005ef32Sjsg 	amdgpu_bo_get_memory(bo, stats);
1086f005ef32Sjsg 	if (bo->tbo.base.resv != vm->root.bo->tbo.base.resv)
1087f005ef32Sjsg 	    dma_resv_unlock(bo->tbo.base.resv);
1088f005ef32Sjsg }
1089f005ef32Sjsg 
1090f005ef32Sjsg void amdgpu_vm_get_memory(struct amdgpu_vm *vm,
1091f005ef32Sjsg 			  struct amdgpu_mem_stats *stats)
10925ca02815Sjsg {
10935ca02815Sjsg 	struct amdgpu_bo_va *bo_va, *tmp;
10945ca02815Sjsg 
10951bb76ff1Sjsg 	spin_lock(&vm->status_lock);
1096f005ef32Sjsg 	list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status)
1097f005ef32Sjsg 		amdgpu_vm_bo_get_memory(bo_va, stats);
1098f005ef32Sjsg 
1099f005ef32Sjsg 	list_for_each_entry_safe(bo_va, tmp, &vm->evicted, base.vm_status)
1100f005ef32Sjsg 		amdgpu_vm_bo_get_memory(bo_va, stats);
1101f005ef32Sjsg 
1102f005ef32Sjsg 	list_for_each_entry_safe(bo_va, tmp, &vm->relocated, base.vm_status)
1103f005ef32Sjsg 		amdgpu_vm_bo_get_memory(bo_va, stats);
1104f005ef32Sjsg 
1105f005ef32Sjsg 	list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status)
1106f005ef32Sjsg 		amdgpu_vm_bo_get_memory(bo_va, stats);
1107f005ef32Sjsg 
1108f005ef32Sjsg 	list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status)
1109f005ef32Sjsg 		amdgpu_vm_bo_get_memory(bo_va, stats);
1110f005ef32Sjsg 
1111f005ef32Sjsg 	list_for_each_entry_safe(bo_va, tmp, &vm->done, base.vm_status)
1112f005ef32Sjsg 		amdgpu_vm_bo_get_memory(bo_va, stats);
11131bb76ff1Sjsg 	spin_unlock(&vm->status_lock);
11145ca02815Sjsg }
1115f005ef32Sjsg 
1116fb4d8502Sjsg /**
1117fb4d8502Sjsg  * amdgpu_vm_bo_update - update all BO mappings in the vm page table
1118fb4d8502Sjsg  *
1119fb4d8502Sjsg  * @adev: amdgpu_device pointer
1120fb4d8502Sjsg  * @bo_va: requested BO and VM object
1121fb4d8502Sjsg  * @clear: if true clear the entries
1122fb4d8502Sjsg  *
1123fb4d8502Sjsg  * Fill in the page table entries for @bo_va.
1124fb4d8502Sjsg  *
1125fb4d8502Sjsg  * Returns:
1126fb4d8502Sjsg  * 0 for success, -EINVAL for failure.
1127fb4d8502Sjsg  */
1128c349dbc7Sjsg int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
11291bb76ff1Sjsg 			bool clear)
1130fb4d8502Sjsg {
1131fb4d8502Sjsg 	struct amdgpu_bo *bo = bo_va->base.bo;
1132fb4d8502Sjsg 	struct amdgpu_vm *vm = bo_va->base.vm;
1133fb4d8502Sjsg 	struct amdgpu_bo_va_mapping *mapping;
1134fb4d8502Sjsg 	dma_addr_t *pages_addr = NULL;
1135ad8b1aafSjsg 	struct ttm_resource *mem;
1136c349dbc7Sjsg 	struct dma_fence **last_update;
11371bb76ff1Sjsg 	bool flush_tlb = clear;
1138c349dbc7Sjsg 	struct dma_resv *resv;
11391bb76ff1Sjsg 	uint64_t vram_base;
1140fb4d8502Sjsg 	uint64_t flags;
1141fb4d8502Sjsg 	int r;
1142fb4d8502Sjsg 
1143fb4d8502Sjsg 	if (clear || !bo) {
1144fb4d8502Sjsg 		mem = NULL;
11455ca02815Sjsg 		resv = vm->root.bo->tbo.base.resv;
1146fb4d8502Sjsg 	} else {
1147ad8b1aafSjsg 		struct drm_gem_object *obj = &bo->tbo.base;
1148fb4d8502Sjsg 
1149ad8b1aafSjsg 		resv = bo->tbo.base.resv;
1150ad8b1aafSjsg #ifdef notyet
1151ad8b1aafSjsg 		if (obj->import_attach && bo_va->is_xgmi) {
1152ad8b1aafSjsg 			struct dma_buf *dma_buf = obj->import_attach->dmabuf;
1153ad8b1aafSjsg 			struct drm_gem_object *gobj = dma_buf->priv;
1154ad8b1aafSjsg 			struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
1155ad8b1aafSjsg 
1156f005ef32Sjsg 			if (abo->tbo.resource &&
1157f005ef32Sjsg 			    abo->tbo.resource->mem_type == TTM_PL_VRAM)
1158ad8b1aafSjsg 				bo = gem_to_amdgpu_bo(gobj);
1159ad8b1aafSjsg 		}
1160ad8b1aafSjsg #endif
11615ca02815Sjsg 		mem = bo->tbo.resource;
1162f005ef32Sjsg 		if (mem && (mem->mem_type == TTM_PL_TT ||
1163f005ef32Sjsg 			    mem->mem_type == AMDGPU_PL_PREEMPT))
11645ca02815Sjsg 			pages_addr = bo->tbo.ttm->dma_address;
1165fb4d8502Sjsg 	}
1166fb4d8502Sjsg 
1167c349dbc7Sjsg 	if (bo) {
11681bb76ff1Sjsg 		struct amdgpu_device *bo_adev;
11691bb76ff1Sjsg 
1170fb4d8502Sjsg 		flags = amdgpu_ttm_tt_pte_flags(adev, bo->tbo.ttm, mem);
1171ad8b1aafSjsg 
1172ad8b1aafSjsg 		if (amdgpu_bo_encrypted(bo))
1173ad8b1aafSjsg 			flags |= AMDGPU_PTE_TMZ;
1174ad8b1aafSjsg 
1175c349dbc7Sjsg 		bo_adev = amdgpu_ttm_adev(bo->tbo.bdev);
11761bb76ff1Sjsg 		vram_base = bo_adev->vm_manager.vram_base_offset;
1177c349dbc7Sjsg 	} else {
1178fb4d8502Sjsg 		flags = 0x0;
11791bb76ff1Sjsg 		vram_base = 0;
1180c349dbc7Sjsg 	}
1181fb4d8502Sjsg 
1182c349dbc7Sjsg 	if (clear || (bo && bo->tbo.base.resv ==
11835ca02815Sjsg 		      vm->root.bo->tbo.base.resv))
1184fb4d8502Sjsg 		last_update = &vm->last_update;
1185fb4d8502Sjsg 	else
1186fb4d8502Sjsg 		last_update = &bo_va->last_pt_update;
1187fb4d8502Sjsg 
1188fb4d8502Sjsg 	if (!clear && bo_va->base.moved) {
11891bb76ff1Sjsg 		flush_tlb = true;
1190fb4d8502Sjsg 		list_splice_init(&bo_va->valids, &bo_va->invalids);
1191fb4d8502Sjsg 
1192fb4d8502Sjsg 	} else if (bo_va->cleared != clear) {
1193fb4d8502Sjsg 		list_splice_init(&bo_va->valids, &bo_va->invalids);
1194fb4d8502Sjsg 	}
1195fb4d8502Sjsg 
1196fb4d8502Sjsg 	list_for_each_entry(mapping, &bo_va->invalids, list) {
11975ca02815Sjsg 		uint64_t update_flags = flags;
11985ca02815Sjsg 
11995ca02815Sjsg 		/* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
12005ca02815Sjsg 		 * but in case of something, we filter the flags in first place
12015ca02815Sjsg 		 */
12025ca02815Sjsg 		if (!(mapping->flags & AMDGPU_PTE_READABLE))
12035ca02815Sjsg 			update_flags &= ~AMDGPU_PTE_READABLE;
12045ca02815Sjsg 		if (!(mapping->flags & AMDGPU_PTE_WRITEABLE))
12055ca02815Sjsg 			update_flags &= ~AMDGPU_PTE_WRITEABLE;
12065ca02815Sjsg 
12075ca02815Sjsg 		/* Apply ASIC specific mapping flags */
12085ca02815Sjsg 		amdgpu_gmc_get_vm_pte(adev, mapping, &update_flags);
12095ca02815Sjsg 
12105ca02815Sjsg 		trace_amdgpu_vm_bo_update(mapping);
12115ca02815Sjsg 
12121bb76ff1Sjsg 		r = amdgpu_vm_update_range(adev, vm, false, false, flush_tlb,
12131bb76ff1Sjsg 					   resv, mapping->start, mapping->last,
12141bb76ff1Sjsg 					   update_flags, mapping->offset,
12151bb76ff1Sjsg 					   vram_base, mem, pages_addr,
12161bb76ff1Sjsg 					   last_update);
1217fb4d8502Sjsg 		if (r)
1218fb4d8502Sjsg 			return r;
1219fb4d8502Sjsg 	}
1220fb4d8502Sjsg 
1221fb4d8502Sjsg 	/* If the BO is not in its preferred location add it back to
1222fb4d8502Sjsg 	 * the evicted list so that it gets validated again on the
1223fb4d8502Sjsg 	 * next command submission.
1224fb4d8502Sjsg 	 */
12255ca02815Sjsg 	if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv) {
1226*a3e4d62dSjsg 		if (bo->tbo.resource &&
1227*a3e4d62dSjsg 		    !(bo->preferred_domains &
1228*a3e4d62dSjsg 		      amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type)))
1229c349dbc7Sjsg 			amdgpu_vm_bo_evicted(&bo_va->base);
1230fb4d8502Sjsg 		else
1231c349dbc7Sjsg 			amdgpu_vm_bo_idle(&bo_va->base);
1232c349dbc7Sjsg 	} else {
1233c349dbc7Sjsg 		amdgpu_vm_bo_done(&bo_va->base);
1234fb4d8502Sjsg 	}
1235fb4d8502Sjsg 
1236fb4d8502Sjsg 	list_splice_init(&bo_va->invalids, &bo_va->valids);
1237fb4d8502Sjsg 	bo_va->cleared = clear;
12381bb76ff1Sjsg 	bo_va->base.moved = false;
1239fb4d8502Sjsg 
1240fb4d8502Sjsg 	if (trace_amdgpu_vm_bo_mapping_enabled()) {
1241fb4d8502Sjsg 		list_for_each_entry(mapping, &bo_va->valids, list)
1242fb4d8502Sjsg 			trace_amdgpu_vm_bo_mapping(mapping);
1243fb4d8502Sjsg 	}
1244fb4d8502Sjsg 
1245fb4d8502Sjsg 	return 0;
1246fb4d8502Sjsg }
1247fb4d8502Sjsg 
1248fb4d8502Sjsg /**
1249fb4d8502Sjsg  * amdgpu_vm_update_prt_state - update the global PRT state
1250fb4d8502Sjsg  *
1251fb4d8502Sjsg  * @adev: amdgpu_device pointer
1252fb4d8502Sjsg  */
1253fb4d8502Sjsg static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev)
1254fb4d8502Sjsg {
1255fb4d8502Sjsg 	unsigned long flags;
1256fb4d8502Sjsg 	bool enable;
1257fb4d8502Sjsg 
1258fb4d8502Sjsg 	spin_lock_irqsave(&adev->vm_manager.prt_lock, flags);
1259fb4d8502Sjsg 	enable = !!atomic_read(&adev->vm_manager.num_prt_users);
1260fb4d8502Sjsg 	adev->gmc.gmc_funcs->set_prt(adev, enable);
1261fb4d8502Sjsg 	spin_unlock_irqrestore(&adev->vm_manager.prt_lock, flags);
1262fb4d8502Sjsg }
1263fb4d8502Sjsg 
1264fb4d8502Sjsg /**
1265fb4d8502Sjsg  * amdgpu_vm_prt_get - add a PRT user
1266fb4d8502Sjsg  *
1267fb4d8502Sjsg  * @adev: amdgpu_device pointer
1268fb4d8502Sjsg  */
1269fb4d8502Sjsg static void amdgpu_vm_prt_get(struct amdgpu_device *adev)
1270fb4d8502Sjsg {
1271fb4d8502Sjsg 	if (!adev->gmc.gmc_funcs->set_prt)
1272fb4d8502Sjsg 		return;
1273fb4d8502Sjsg 
1274fb4d8502Sjsg 	if (atomic_inc_return(&adev->vm_manager.num_prt_users) == 1)
1275fb4d8502Sjsg 		amdgpu_vm_update_prt_state(adev);
1276fb4d8502Sjsg }
1277fb4d8502Sjsg 
1278fb4d8502Sjsg /**
1279fb4d8502Sjsg  * amdgpu_vm_prt_put - drop a PRT user
1280fb4d8502Sjsg  *
1281fb4d8502Sjsg  * @adev: amdgpu_device pointer
1282fb4d8502Sjsg  */
1283fb4d8502Sjsg static void amdgpu_vm_prt_put(struct amdgpu_device *adev)
1284fb4d8502Sjsg {
1285fb4d8502Sjsg 	if (atomic_dec_return(&adev->vm_manager.num_prt_users) == 0)
1286fb4d8502Sjsg 		amdgpu_vm_update_prt_state(adev);
1287fb4d8502Sjsg }
1288fb4d8502Sjsg 
1289fb4d8502Sjsg /**
1290fb4d8502Sjsg  * amdgpu_vm_prt_cb - callback for updating the PRT status
1291fb4d8502Sjsg  *
1292fb4d8502Sjsg  * @fence: fence for the callback
1293fb4d8502Sjsg  * @_cb: the callback function
1294fb4d8502Sjsg  */
1295fb4d8502Sjsg static void amdgpu_vm_prt_cb(struct dma_fence *fence, struct dma_fence_cb *_cb)
1296fb4d8502Sjsg {
1297fb4d8502Sjsg 	struct amdgpu_prt_cb *cb = container_of(_cb, struct amdgpu_prt_cb, cb);
1298fb4d8502Sjsg 
1299fb4d8502Sjsg 	amdgpu_vm_prt_put(cb->adev);
1300fb4d8502Sjsg 	kfree(cb);
1301fb4d8502Sjsg }
1302fb4d8502Sjsg 
1303fb4d8502Sjsg /**
1304fb4d8502Sjsg  * amdgpu_vm_add_prt_cb - add callback for updating the PRT status
1305fb4d8502Sjsg  *
1306fb4d8502Sjsg  * @adev: amdgpu_device pointer
1307fb4d8502Sjsg  * @fence: fence for the callback
1308fb4d8502Sjsg  */
1309fb4d8502Sjsg static void amdgpu_vm_add_prt_cb(struct amdgpu_device *adev,
1310fb4d8502Sjsg 				 struct dma_fence *fence)
1311fb4d8502Sjsg {
1312fb4d8502Sjsg 	struct amdgpu_prt_cb *cb;
1313fb4d8502Sjsg 
1314fb4d8502Sjsg 	if (!adev->gmc.gmc_funcs->set_prt)
1315fb4d8502Sjsg 		return;
1316fb4d8502Sjsg 
1317fb4d8502Sjsg 	cb = kmalloc(sizeof(struct amdgpu_prt_cb), GFP_KERNEL);
1318fb4d8502Sjsg 	if (!cb) {
1319fb4d8502Sjsg 		/* Last resort when we are OOM */
1320fb4d8502Sjsg 		if (fence)
1321fb4d8502Sjsg 			dma_fence_wait(fence, false);
1322fb4d8502Sjsg 
1323fb4d8502Sjsg 		amdgpu_vm_prt_put(adev);
1324fb4d8502Sjsg 	} else {
1325fb4d8502Sjsg 		cb->adev = adev;
1326fb4d8502Sjsg 		if (!fence || dma_fence_add_callback(fence, &cb->cb,
1327fb4d8502Sjsg 						     amdgpu_vm_prt_cb))
1328fb4d8502Sjsg 			amdgpu_vm_prt_cb(fence, &cb->cb);
1329fb4d8502Sjsg 	}
1330fb4d8502Sjsg }
1331fb4d8502Sjsg 
1332fb4d8502Sjsg /**
1333fb4d8502Sjsg  * amdgpu_vm_free_mapping - free a mapping
1334fb4d8502Sjsg  *
1335fb4d8502Sjsg  * @adev: amdgpu_device pointer
1336fb4d8502Sjsg  * @vm: requested vm
1337fb4d8502Sjsg  * @mapping: mapping to be freed
1338fb4d8502Sjsg  * @fence: fence of the unmap operation
1339fb4d8502Sjsg  *
1340fb4d8502Sjsg  * Free a mapping and make sure we decrease the PRT usage count if applicable.
1341fb4d8502Sjsg  */
1342fb4d8502Sjsg static void amdgpu_vm_free_mapping(struct amdgpu_device *adev,
1343fb4d8502Sjsg 				   struct amdgpu_vm *vm,
1344fb4d8502Sjsg 				   struct amdgpu_bo_va_mapping *mapping,
1345fb4d8502Sjsg 				   struct dma_fence *fence)
1346fb4d8502Sjsg {
1347fb4d8502Sjsg 	if (mapping->flags & AMDGPU_PTE_PRT)
1348fb4d8502Sjsg 		amdgpu_vm_add_prt_cb(adev, fence);
1349fb4d8502Sjsg 	kfree(mapping);
1350fb4d8502Sjsg }
1351fb4d8502Sjsg 
1352fb4d8502Sjsg /**
1353fb4d8502Sjsg  * amdgpu_vm_prt_fini - finish all prt mappings
1354fb4d8502Sjsg  *
1355fb4d8502Sjsg  * @adev: amdgpu_device pointer
1356fb4d8502Sjsg  * @vm: requested vm
1357fb4d8502Sjsg  *
1358fb4d8502Sjsg  * Register a cleanup callback to disable PRT support after VM dies.
1359fb4d8502Sjsg  */
1360fb4d8502Sjsg static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1361fb4d8502Sjsg {
13625ca02815Sjsg 	struct dma_resv *resv = vm->root.bo->tbo.base.resv;
13631bb76ff1Sjsg 	struct dma_resv_iter cursor;
13641bb76ff1Sjsg 	struct dma_fence *fence;
1365fb4d8502Sjsg 
13661bb76ff1Sjsg 	dma_resv_for_each_fence(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP, fence) {
1367fb4d8502Sjsg 		/* Add a callback for each fence in the reservation object */
1368fb4d8502Sjsg 		amdgpu_vm_prt_get(adev);
13691bb76ff1Sjsg 		amdgpu_vm_add_prt_cb(adev, fence);
1370fb4d8502Sjsg 	}
1371fb4d8502Sjsg }
1372fb4d8502Sjsg 
1373fb4d8502Sjsg /**
1374fb4d8502Sjsg  * amdgpu_vm_clear_freed - clear freed BOs in the PT
1375fb4d8502Sjsg  *
1376fb4d8502Sjsg  * @adev: amdgpu_device pointer
1377fb4d8502Sjsg  * @vm: requested vm
1378fb4d8502Sjsg  * @fence: optional resulting fence (unchanged if no work needed to be done
1379fb4d8502Sjsg  * or if an error occurred)
1380fb4d8502Sjsg  *
1381fb4d8502Sjsg  * Make sure all freed BOs are cleared in the PT.
1382fb4d8502Sjsg  * PTs have to be reserved and mutex must be locked!
1383fb4d8502Sjsg  *
1384fb4d8502Sjsg  * Returns:
1385fb4d8502Sjsg  * 0 for success.
1386fb4d8502Sjsg  *
1387fb4d8502Sjsg  */
1388fb4d8502Sjsg int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
1389fb4d8502Sjsg 			  struct amdgpu_vm *vm,
1390fb4d8502Sjsg 			  struct dma_fence **fence)
1391fb4d8502Sjsg {
13925ca02815Sjsg 	struct dma_resv *resv = vm->root.bo->tbo.base.resv;
1393fb4d8502Sjsg 	struct amdgpu_bo_va_mapping *mapping;
1394fb4d8502Sjsg 	uint64_t init_pte_value = 0;
1395fb4d8502Sjsg 	struct dma_fence *f = NULL;
1396fb4d8502Sjsg 	int r;
1397fb4d8502Sjsg 
1398fb4d8502Sjsg 	while (!list_empty(&vm->freed)) {
1399fb4d8502Sjsg 		mapping = list_first_entry(&vm->freed,
1400fb4d8502Sjsg 			struct amdgpu_bo_va_mapping, list);
1401fb4d8502Sjsg 		list_del(&mapping->list);
1402fb4d8502Sjsg 
1403c349dbc7Sjsg 		if (vm->pte_support_ats &&
1404c349dbc7Sjsg 		    mapping->start < AMDGPU_GMC_HOLE_START)
1405fb4d8502Sjsg 			init_pte_value = AMDGPU_PTE_DEFAULT_ATC;
1406fb4d8502Sjsg 
14071bb76ff1Sjsg 		r = amdgpu_vm_update_range(adev, vm, false, false, true, resv,
14081bb76ff1Sjsg 					   mapping->start, mapping->last,
14091bb76ff1Sjsg 					   init_pte_value, 0, 0, NULL, NULL,
14101bb76ff1Sjsg 					   &f);
1411fb4d8502Sjsg 		amdgpu_vm_free_mapping(adev, vm, mapping, f);
1412fb4d8502Sjsg 		if (r) {
1413fb4d8502Sjsg 			dma_fence_put(f);
1414fb4d8502Sjsg 			return r;
1415fb4d8502Sjsg 		}
1416fb4d8502Sjsg 	}
1417fb4d8502Sjsg 
1418fb4d8502Sjsg 	if (fence && f) {
1419fb4d8502Sjsg 		dma_fence_put(*fence);
1420fb4d8502Sjsg 		*fence = f;
1421fb4d8502Sjsg 	} else {
1422fb4d8502Sjsg 		dma_fence_put(f);
1423fb4d8502Sjsg 	}
1424fb4d8502Sjsg 
1425fb4d8502Sjsg 	return 0;
1426fb4d8502Sjsg 
1427fb4d8502Sjsg }
1428fb4d8502Sjsg 
1429fb4d8502Sjsg /**
1430fb4d8502Sjsg  * amdgpu_vm_handle_moved - handle moved BOs in the PT
1431fb4d8502Sjsg  *
1432fb4d8502Sjsg  * @adev: amdgpu_device pointer
1433fb4d8502Sjsg  * @vm: requested vm
1434fb4d8502Sjsg  *
1435fb4d8502Sjsg  * Make sure all BOs which are moved are updated in the PTs.
1436fb4d8502Sjsg  *
1437fb4d8502Sjsg  * Returns:
1438fb4d8502Sjsg  * 0 for success.
1439fb4d8502Sjsg  *
1440fb4d8502Sjsg  * PTs have to be reserved!
1441fb4d8502Sjsg  */
1442fb4d8502Sjsg int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
1443fb4d8502Sjsg 			   struct amdgpu_vm *vm)
1444fb4d8502Sjsg {
14451bb76ff1Sjsg 	struct amdgpu_bo_va *bo_va;
1446c349dbc7Sjsg 	struct dma_resv *resv;
1447fb4d8502Sjsg 	bool clear;
1448fb4d8502Sjsg 	int r;
1449fb4d8502Sjsg 
14501bb76ff1Sjsg 	spin_lock(&vm->status_lock);
14511bb76ff1Sjsg 	while (!list_empty(&vm->moved)) {
14521bb76ff1Sjsg 		bo_va = list_first_entry(&vm->moved, struct amdgpu_bo_va,
14531bb76ff1Sjsg 					 base.vm_status);
14541bb76ff1Sjsg 		spin_unlock(&vm->status_lock);
14551bb76ff1Sjsg 
1456fb4d8502Sjsg 		/* Per VM BOs never need to bo cleared in the page tables */
14571bb76ff1Sjsg 		r = amdgpu_vm_bo_update(adev, bo_va, false);
1458c349dbc7Sjsg 		if (r)
1459c349dbc7Sjsg 			return r;
14601bb76ff1Sjsg 		spin_lock(&vm->status_lock);
1461c349dbc7Sjsg 	}
1462c349dbc7Sjsg 
1463c349dbc7Sjsg 	while (!list_empty(&vm->invalidated)) {
1464c349dbc7Sjsg 		bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va,
1465c349dbc7Sjsg 					 base.vm_status);
1466c349dbc7Sjsg 		resv = bo_va->base.bo->tbo.base.resv;
14671bb76ff1Sjsg 		spin_unlock(&vm->status_lock);
1468c349dbc7Sjsg 
1469fb4d8502Sjsg 		/* Try to reserve the BO to avoid clearing its ptes */
1470c349dbc7Sjsg 		if (!amdgpu_vm_debug && dma_resv_trylock(resv))
1471fb4d8502Sjsg 			clear = false;
1472fb4d8502Sjsg 		/* Somebody else is using the BO right now */
1473fb4d8502Sjsg 		else
1474fb4d8502Sjsg 			clear = true;
1475fb4d8502Sjsg 
14761bb76ff1Sjsg 		r = amdgpu_vm_bo_update(adev, bo_va, clear);
1477c349dbc7Sjsg 		if (r)
1478fb4d8502Sjsg 			return r;
1479fb4d8502Sjsg 
1480c349dbc7Sjsg 		if (!clear)
1481c349dbc7Sjsg 			dma_resv_unlock(resv);
14821bb76ff1Sjsg 		spin_lock(&vm->status_lock);
1483fb4d8502Sjsg 	}
14841bb76ff1Sjsg 	spin_unlock(&vm->status_lock);
1485fb4d8502Sjsg 
1486fb4d8502Sjsg 	return 0;
1487fb4d8502Sjsg }
1488fb4d8502Sjsg 
1489fb4d8502Sjsg /**
1490fb4d8502Sjsg  * amdgpu_vm_bo_add - add a bo to a specific vm
1491fb4d8502Sjsg  *
1492fb4d8502Sjsg  * @adev: amdgpu_device pointer
1493fb4d8502Sjsg  * @vm: requested vm
1494fb4d8502Sjsg  * @bo: amdgpu buffer object
1495fb4d8502Sjsg  *
1496fb4d8502Sjsg  * Add @bo into the requested vm.
1497fb4d8502Sjsg  * Add @bo to the list of bos associated with the vm
1498fb4d8502Sjsg  *
1499fb4d8502Sjsg  * Returns:
1500fb4d8502Sjsg  * Newly added bo_va or NULL for failure
1501fb4d8502Sjsg  *
1502fb4d8502Sjsg  * Object has to be reserved!
1503fb4d8502Sjsg  */
1504fb4d8502Sjsg struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
1505fb4d8502Sjsg 				      struct amdgpu_vm *vm,
1506fb4d8502Sjsg 				      struct amdgpu_bo *bo)
1507fb4d8502Sjsg {
1508fb4d8502Sjsg 	struct amdgpu_bo_va *bo_va;
1509fb4d8502Sjsg 
1510fb4d8502Sjsg 	bo_va = kzalloc(sizeof(struct amdgpu_bo_va), GFP_KERNEL);
1511fb4d8502Sjsg 	if (bo_va == NULL) {
1512fb4d8502Sjsg 		return NULL;
1513fb4d8502Sjsg 	}
1514fb4d8502Sjsg 	amdgpu_vm_bo_base_init(&bo_va->base, vm, bo);
1515fb4d8502Sjsg 
1516fb4d8502Sjsg 	bo_va->ref_count = 1;
15170c513800Sjsg 	bo_va->last_pt_update = dma_fence_get_stub();
1518fb4d8502Sjsg 	INIT_LIST_HEAD(&bo_va->valids);
1519fb4d8502Sjsg 	INIT_LIST_HEAD(&bo_va->invalids);
1520fb4d8502Sjsg 
1521ad8b1aafSjsg 	if (!bo)
1522ad8b1aafSjsg 		return bo_va;
1523ad8b1aafSjsg 
15241bb76ff1Sjsg 	dma_resv_assert_held(bo->tbo.base.resv);
1525ad8b1aafSjsg 	if (amdgpu_dmabuf_is_xgmi_accessible(adev, bo)) {
1526c349dbc7Sjsg 		bo_va->is_xgmi = true;
1527c349dbc7Sjsg 		/* Power up XGMI if it can be potentially used */
1528ad8b1aafSjsg 		amdgpu_xgmi_set_pstate(adev, AMDGPU_XGMI_PSTATE_MAX_VEGA20);
1529c349dbc7Sjsg 	}
1530c349dbc7Sjsg 
1531fb4d8502Sjsg 	return bo_va;
1532fb4d8502Sjsg }
1533fb4d8502Sjsg 
1534fb4d8502Sjsg 
1535fb4d8502Sjsg /**
15365ca02815Sjsg  * amdgpu_vm_bo_insert_map - insert a new mapping
1537fb4d8502Sjsg  *
1538fb4d8502Sjsg  * @adev: amdgpu_device pointer
1539fb4d8502Sjsg  * @bo_va: bo_va to store the address
1540fb4d8502Sjsg  * @mapping: the mapping to insert
1541fb4d8502Sjsg  *
1542fb4d8502Sjsg  * Insert a new mapping into all structures.
1543fb4d8502Sjsg  */
1544fb4d8502Sjsg static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
1545fb4d8502Sjsg 				    struct amdgpu_bo_va *bo_va,
1546fb4d8502Sjsg 				    struct amdgpu_bo_va_mapping *mapping)
1547fb4d8502Sjsg {
1548fb4d8502Sjsg 	struct amdgpu_vm *vm = bo_va->base.vm;
1549fb4d8502Sjsg 	struct amdgpu_bo *bo = bo_va->base.bo;
1550fb4d8502Sjsg 
1551fb4d8502Sjsg 	mapping->bo_va = bo_va;
1552fb4d8502Sjsg 	list_add(&mapping->list, &bo_va->invalids);
1553fb4d8502Sjsg 	amdgpu_vm_it_insert(mapping, &vm->va);
1554fb4d8502Sjsg 
1555fb4d8502Sjsg 	if (mapping->flags & AMDGPU_PTE_PRT)
1556fb4d8502Sjsg 		amdgpu_vm_prt_get(adev);
1557fb4d8502Sjsg 
15585ca02815Sjsg 	if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv &&
1559fb4d8502Sjsg 	    !bo_va->base.moved) {
15601bb76ff1Sjsg 		amdgpu_vm_bo_moved(&bo_va->base);
1561fb4d8502Sjsg 	}
1562fb4d8502Sjsg 	trace_amdgpu_vm_bo_map(bo_va, mapping);
1563fb4d8502Sjsg }
1564fb4d8502Sjsg 
156551c13bbeSjsg /* Validate operation parameters to prevent potential abuse */
156651c13bbeSjsg static int amdgpu_vm_verify_parameters(struct amdgpu_device *adev,
156751c13bbeSjsg 					  struct amdgpu_bo *bo,
156851c13bbeSjsg 					  uint64_t saddr,
156951c13bbeSjsg 					  uint64_t offset,
157051c13bbeSjsg 					  uint64_t size)
157151c13bbeSjsg {
157251c13bbeSjsg 	uint64_t tmp, lpfn;
157351c13bbeSjsg 
157451c13bbeSjsg 	if (saddr & AMDGPU_GPU_PAGE_MASK
157551c13bbeSjsg 	    || offset & AMDGPU_GPU_PAGE_MASK
157651c13bbeSjsg 	    || size & AMDGPU_GPU_PAGE_MASK)
157751c13bbeSjsg 		return -EINVAL;
157851c13bbeSjsg 
157951c13bbeSjsg 	if (check_add_overflow(saddr, size, &tmp)
158051c13bbeSjsg 	    || check_add_overflow(offset, size, &tmp)
158151c13bbeSjsg 	    || size == 0 /* which also leads to end < begin */)
158251c13bbeSjsg 		return -EINVAL;
158351c13bbeSjsg 
158451c13bbeSjsg 	/* make sure object fit at this offset */
158551c13bbeSjsg 	if (bo && offset + size > amdgpu_bo_size(bo))
158651c13bbeSjsg 		return -EINVAL;
158751c13bbeSjsg 
158851c13bbeSjsg 	/* Ensure last pfn not exceed max_pfn */
158951c13bbeSjsg 	lpfn = (saddr + size - 1) >> AMDGPU_GPU_PAGE_SHIFT;
159051c13bbeSjsg 	if (lpfn >= adev->vm_manager.max_pfn)
159151c13bbeSjsg 		return -EINVAL;
159251c13bbeSjsg 
159351c13bbeSjsg 	return 0;
159451c13bbeSjsg }
159551c13bbeSjsg 
1596fb4d8502Sjsg /**
1597fb4d8502Sjsg  * amdgpu_vm_bo_map - map bo inside a vm
1598fb4d8502Sjsg  *
1599fb4d8502Sjsg  * @adev: amdgpu_device pointer
1600fb4d8502Sjsg  * @bo_va: bo_va to store the address
1601fb4d8502Sjsg  * @saddr: where to map the BO
1602fb4d8502Sjsg  * @offset: requested offset in the BO
1603fb4d8502Sjsg  * @size: BO size in bytes
1604fb4d8502Sjsg  * @flags: attributes of pages (read/write/valid/etc.)
1605fb4d8502Sjsg  *
1606fb4d8502Sjsg  * Add a mapping of the BO at the specefied addr into the VM.
1607fb4d8502Sjsg  *
1608fb4d8502Sjsg  * Returns:
1609fb4d8502Sjsg  * 0 for success, error for failure.
1610fb4d8502Sjsg  *
1611fb4d8502Sjsg  * Object has to be reserved and unreserved outside!
1612fb4d8502Sjsg  */
1613fb4d8502Sjsg int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1614fb4d8502Sjsg 		     struct amdgpu_bo_va *bo_va,
1615fb4d8502Sjsg 		     uint64_t saddr, uint64_t offset,
1616fb4d8502Sjsg 		     uint64_t size, uint64_t flags)
1617fb4d8502Sjsg {
1618fb4d8502Sjsg 	struct amdgpu_bo_va_mapping *mapping, *tmp;
1619fb4d8502Sjsg 	struct amdgpu_bo *bo = bo_va->base.bo;
1620fb4d8502Sjsg 	struct amdgpu_vm *vm = bo_va->base.vm;
1621fb4d8502Sjsg 	uint64_t eaddr;
162251c13bbeSjsg 	int r;
1623fb4d8502Sjsg 
162451c13bbeSjsg 	r = amdgpu_vm_verify_parameters(adev, bo, saddr, offset, size);
162551c13bbeSjsg 	if (r)
162651c13bbeSjsg 		return r;
1627fb4d8502Sjsg 
1628fb4d8502Sjsg 	saddr /= AMDGPU_GPU_PAGE_SIZE;
162951c13bbeSjsg 	eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
1630fb4d8502Sjsg 
1631fb4d8502Sjsg 	tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
1632fb4d8502Sjsg 	if (tmp) {
1633fb4d8502Sjsg 		/* bo and tmp overlap, invalid addr */
163408da896eSjsg 		dev_err(adev->dev, "bo %p va 0x%010llx-0x%010llx conflict with "
163508da896eSjsg 			"0x%010llx-0x%010llx\n", bo, saddr, eaddr,
1636fb4d8502Sjsg 			tmp->start, tmp->last + 1);
1637fb4d8502Sjsg 		return -EINVAL;
1638fb4d8502Sjsg 	}
1639fb4d8502Sjsg 
1640fb4d8502Sjsg 	mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
1641fb4d8502Sjsg 	if (!mapping)
1642fb4d8502Sjsg 		return -ENOMEM;
1643fb4d8502Sjsg 
1644fb4d8502Sjsg 	mapping->start = saddr;
1645fb4d8502Sjsg 	mapping->last = eaddr;
1646fb4d8502Sjsg 	mapping->offset = offset;
1647fb4d8502Sjsg 	mapping->flags = flags;
1648fb4d8502Sjsg 
1649fb4d8502Sjsg 	amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
1650fb4d8502Sjsg 
1651fb4d8502Sjsg 	return 0;
1652fb4d8502Sjsg }
1653fb4d8502Sjsg 
1654fb4d8502Sjsg /**
1655fb4d8502Sjsg  * amdgpu_vm_bo_replace_map - map bo inside a vm, replacing existing mappings
1656fb4d8502Sjsg  *
1657fb4d8502Sjsg  * @adev: amdgpu_device pointer
1658fb4d8502Sjsg  * @bo_va: bo_va to store the address
1659fb4d8502Sjsg  * @saddr: where to map the BO
1660fb4d8502Sjsg  * @offset: requested offset in the BO
1661fb4d8502Sjsg  * @size: BO size in bytes
1662fb4d8502Sjsg  * @flags: attributes of pages (read/write/valid/etc.)
1663fb4d8502Sjsg  *
1664fb4d8502Sjsg  * Add a mapping of the BO at the specefied addr into the VM. Replace existing
1665fb4d8502Sjsg  * mappings as we do so.
1666fb4d8502Sjsg  *
1667fb4d8502Sjsg  * Returns:
1668fb4d8502Sjsg  * 0 for success, error for failure.
1669fb4d8502Sjsg  *
1670fb4d8502Sjsg  * Object has to be reserved and unreserved outside!
1671fb4d8502Sjsg  */
1672fb4d8502Sjsg int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
1673fb4d8502Sjsg 			     struct amdgpu_bo_va *bo_va,
1674fb4d8502Sjsg 			     uint64_t saddr, uint64_t offset,
1675fb4d8502Sjsg 			     uint64_t size, uint64_t flags)
1676fb4d8502Sjsg {
1677fb4d8502Sjsg 	struct amdgpu_bo_va_mapping *mapping;
1678fb4d8502Sjsg 	struct amdgpu_bo *bo = bo_va->base.bo;
1679fb4d8502Sjsg 	uint64_t eaddr;
1680fb4d8502Sjsg 	int r;
1681fb4d8502Sjsg 
168251c13bbeSjsg 	r = amdgpu_vm_verify_parameters(adev, bo, saddr, offset, size);
168351c13bbeSjsg 	if (r)
168451c13bbeSjsg 		return r;
1685fb4d8502Sjsg 
1686fb4d8502Sjsg 	/* Allocate all the needed memory */
1687fb4d8502Sjsg 	mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
1688fb4d8502Sjsg 	if (!mapping)
1689fb4d8502Sjsg 		return -ENOMEM;
1690fb4d8502Sjsg 
1691fb4d8502Sjsg 	r = amdgpu_vm_bo_clear_mappings(adev, bo_va->base.vm, saddr, size);
1692fb4d8502Sjsg 	if (r) {
1693fb4d8502Sjsg 		kfree(mapping);
1694fb4d8502Sjsg 		return r;
1695fb4d8502Sjsg 	}
1696fb4d8502Sjsg 
1697fb4d8502Sjsg 	saddr /= AMDGPU_GPU_PAGE_SIZE;
169851c13bbeSjsg 	eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
1699fb4d8502Sjsg 
1700fb4d8502Sjsg 	mapping->start = saddr;
1701fb4d8502Sjsg 	mapping->last = eaddr;
1702fb4d8502Sjsg 	mapping->offset = offset;
1703fb4d8502Sjsg 	mapping->flags = flags;
1704fb4d8502Sjsg 
1705fb4d8502Sjsg 	amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
1706fb4d8502Sjsg 
1707fb4d8502Sjsg 	return 0;
1708fb4d8502Sjsg }
1709fb4d8502Sjsg 
1710fb4d8502Sjsg /**
1711fb4d8502Sjsg  * amdgpu_vm_bo_unmap - remove bo mapping from vm
1712fb4d8502Sjsg  *
1713fb4d8502Sjsg  * @adev: amdgpu_device pointer
1714fb4d8502Sjsg  * @bo_va: bo_va to remove the address from
1715fb4d8502Sjsg  * @saddr: where to the BO is mapped
1716fb4d8502Sjsg  *
1717fb4d8502Sjsg  * Remove a mapping of the BO at the specefied addr from the VM.
1718fb4d8502Sjsg  *
1719fb4d8502Sjsg  * Returns:
1720fb4d8502Sjsg  * 0 for success, error for failure.
1721fb4d8502Sjsg  *
1722fb4d8502Sjsg  * Object has to be reserved and unreserved outside!
1723fb4d8502Sjsg  */
1724fb4d8502Sjsg int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
1725fb4d8502Sjsg 		       struct amdgpu_bo_va *bo_va,
1726fb4d8502Sjsg 		       uint64_t saddr)
1727fb4d8502Sjsg {
1728fb4d8502Sjsg 	struct amdgpu_bo_va_mapping *mapping;
1729fb4d8502Sjsg 	struct amdgpu_vm *vm = bo_va->base.vm;
1730fb4d8502Sjsg 	bool valid = true;
1731fb4d8502Sjsg 
1732fb4d8502Sjsg 	saddr /= AMDGPU_GPU_PAGE_SIZE;
1733fb4d8502Sjsg 
1734fb4d8502Sjsg 	list_for_each_entry(mapping, &bo_va->valids, list) {
1735fb4d8502Sjsg 		if (mapping->start == saddr)
1736fb4d8502Sjsg 			break;
1737fb4d8502Sjsg 	}
1738fb4d8502Sjsg 
1739fb4d8502Sjsg 	if (&mapping->list == &bo_va->valids) {
1740fb4d8502Sjsg 		valid = false;
1741fb4d8502Sjsg 
1742fb4d8502Sjsg 		list_for_each_entry(mapping, &bo_va->invalids, list) {
1743fb4d8502Sjsg 			if (mapping->start == saddr)
1744fb4d8502Sjsg 				break;
1745fb4d8502Sjsg 		}
1746fb4d8502Sjsg 
1747fb4d8502Sjsg 		if (&mapping->list == &bo_va->invalids)
1748fb4d8502Sjsg 			return -ENOENT;
1749fb4d8502Sjsg 	}
1750fb4d8502Sjsg 
1751fb4d8502Sjsg 	list_del(&mapping->list);
1752fb4d8502Sjsg 	amdgpu_vm_it_remove(mapping, &vm->va);
1753fb4d8502Sjsg 	mapping->bo_va = NULL;
1754fb4d8502Sjsg 	trace_amdgpu_vm_bo_unmap(bo_va, mapping);
1755fb4d8502Sjsg 
1756fb4d8502Sjsg 	if (valid)
1757fb4d8502Sjsg 		list_add(&mapping->list, &vm->freed);
1758fb4d8502Sjsg 	else
1759fb4d8502Sjsg 		amdgpu_vm_free_mapping(adev, vm, mapping,
1760fb4d8502Sjsg 				       bo_va->last_pt_update);
1761fb4d8502Sjsg 
1762fb4d8502Sjsg 	return 0;
1763fb4d8502Sjsg }
1764fb4d8502Sjsg 
1765fb4d8502Sjsg /**
1766fb4d8502Sjsg  * amdgpu_vm_bo_clear_mappings - remove all mappings in a specific range
1767fb4d8502Sjsg  *
1768fb4d8502Sjsg  * @adev: amdgpu_device pointer
1769fb4d8502Sjsg  * @vm: VM structure to use
1770fb4d8502Sjsg  * @saddr: start of the range
1771fb4d8502Sjsg  * @size: size of the range
1772fb4d8502Sjsg  *
1773fb4d8502Sjsg  * Remove all mappings in a range, split them as appropriate.
1774fb4d8502Sjsg  *
1775fb4d8502Sjsg  * Returns:
1776fb4d8502Sjsg  * 0 for success, error for failure.
1777fb4d8502Sjsg  */
1778fb4d8502Sjsg int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
1779fb4d8502Sjsg 				struct amdgpu_vm *vm,
1780fb4d8502Sjsg 				uint64_t saddr, uint64_t size)
1781fb4d8502Sjsg {
1782fb4d8502Sjsg 	struct amdgpu_bo_va_mapping *before, *after, *tmp, *next;
1783fb4d8502Sjsg 	DRM_LIST_HEAD(removed);
1784fb4d8502Sjsg 	uint64_t eaddr;
178551c13bbeSjsg 	int r;
1786fb4d8502Sjsg 
178751c13bbeSjsg 	r = amdgpu_vm_verify_parameters(adev, NULL, saddr, 0, size);
178851c13bbeSjsg 	if (r)
178951c13bbeSjsg 		return r;
179051c13bbeSjsg 
1791fb4d8502Sjsg 	saddr /= AMDGPU_GPU_PAGE_SIZE;
179251c13bbeSjsg 	eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
1793fb4d8502Sjsg 
1794fb4d8502Sjsg 	/* Allocate all the needed memory */
1795fb4d8502Sjsg 	before = kzalloc(sizeof(*before), GFP_KERNEL);
1796fb4d8502Sjsg 	if (!before)
1797fb4d8502Sjsg 		return -ENOMEM;
1798fb4d8502Sjsg 	INIT_LIST_HEAD(&before->list);
1799fb4d8502Sjsg 
1800fb4d8502Sjsg 	after = kzalloc(sizeof(*after), GFP_KERNEL);
1801fb4d8502Sjsg 	if (!after) {
1802fb4d8502Sjsg 		kfree(before);
1803fb4d8502Sjsg 		return -ENOMEM;
1804fb4d8502Sjsg 	}
1805fb4d8502Sjsg 	INIT_LIST_HEAD(&after->list);
1806fb4d8502Sjsg 
1807fb4d8502Sjsg 	/* Now gather all removed mappings */
1808fb4d8502Sjsg 	tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
1809fb4d8502Sjsg 	while (tmp) {
1810fb4d8502Sjsg 		/* Remember mapping split at the start */
1811fb4d8502Sjsg 		if (tmp->start < saddr) {
1812fb4d8502Sjsg 			before->start = tmp->start;
1813fb4d8502Sjsg 			before->last = saddr - 1;
1814fb4d8502Sjsg 			before->offset = tmp->offset;
1815fb4d8502Sjsg 			before->flags = tmp->flags;
1816fb4d8502Sjsg 			before->bo_va = tmp->bo_va;
1817fb4d8502Sjsg 			list_add(&before->list, &tmp->bo_va->invalids);
1818fb4d8502Sjsg 		}
1819fb4d8502Sjsg 
1820fb4d8502Sjsg 		/* Remember mapping split at the end */
1821fb4d8502Sjsg 		if (tmp->last > eaddr) {
1822fb4d8502Sjsg 			after->start = eaddr + 1;
1823fb4d8502Sjsg 			after->last = tmp->last;
1824fb4d8502Sjsg 			after->offset = tmp->offset;
1825ad8b1aafSjsg 			after->offset += (after->start - tmp->start) << PAGE_SHIFT;
1826fb4d8502Sjsg 			after->flags = tmp->flags;
1827fb4d8502Sjsg 			after->bo_va = tmp->bo_va;
1828fb4d8502Sjsg 			list_add(&after->list, &tmp->bo_va->invalids);
1829fb4d8502Sjsg 		}
1830fb4d8502Sjsg 
1831fb4d8502Sjsg 		list_del(&tmp->list);
1832fb4d8502Sjsg 		list_add(&tmp->list, &removed);
1833fb4d8502Sjsg 
1834fb4d8502Sjsg 		tmp = amdgpu_vm_it_iter_next(tmp, saddr, eaddr);
1835fb4d8502Sjsg 	}
1836fb4d8502Sjsg 
1837fb4d8502Sjsg 	/* And free them up */
1838fb4d8502Sjsg 	list_for_each_entry_safe(tmp, next, &removed, list) {
1839fb4d8502Sjsg 		amdgpu_vm_it_remove(tmp, &vm->va);
1840fb4d8502Sjsg 		list_del(&tmp->list);
1841fb4d8502Sjsg 
1842fb4d8502Sjsg 		if (tmp->start < saddr)
1843fb4d8502Sjsg 		    tmp->start = saddr;
1844fb4d8502Sjsg 		if (tmp->last > eaddr)
1845fb4d8502Sjsg 		    tmp->last = eaddr;
1846fb4d8502Sjsg 
1847fb4d8502Sjsg 		tmp->bo_va = NULL;
1848fb4d8502Sjsg 		list_add(&tmp->list, &vm->freed);
1849fb4d8502Sjsg 		trace_amdgpu_vm_bo_unmap(NULL, tmp);
1850fb4d8502Sjsg 	}
1851fb4d8502Sjsg 
1852fb4d8502Sjsg 	/* Insert partial mapping before the range */
1853fb4d8502Sjsg 	if (!list_empty(&before->list)) {
185400d84c35Sjsg 		struct amdgpu_bo *bo = before->bo_va->base.bo;
185500d84c35Sjsg 
1856fb4d8502Sjsg 		amdgpu_vm_it_insert(before, &vm->va);
1857fb4d8502Sjsg 		if (before->flags & AMDGPU_PTE_PRT)
1858fb4d8502Sjsg 			amdgpu_vm_prt_get(adev);
185900d84c35Sjsg 
186000d84c35Sjsg 		if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv &&
186100d84c35Sjsg 		    !before->bo_va->base.moved)
186200d84c35Sjsg 			amdgpu_vm_bo_moved(&before->bo_va->base);
1863fb4d8502Sjsg 	} else {
1864fb4d8502Sjsg 		kfree(before);
1865fb4d8502Sjsg 	}
1866fb4d8502Sjsg 
1867fb4d8502Sjsg 	/* Insert partial mapping after the range */
1868fb4d8502Sjsg 	if (!list_empty(&after->list)) {
186900d84c35Sjsg 		struct amdgpu_bo *bo = after->bo_va->base.bo;
187000d84c35Sjsg 
1871fb4d8502Sjsg 		amdgpu_vm_it_insert(after, &vm->va);
1872fb4d8502Sjsg 		if (after->flags & AMDGPU_PTE_PRT)
1873fb4d8502Sjsg 			amdgpu_vm_prt_get(adev);
187400d84c35Sjsg 
187500d84c35Sjsg 		if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv &&
187600d84c35Sjsg 		    !after->bo_va->base.moved)
187700d84c35Sjsg 			amdgpu_vm_bo_moved(&after->bo_va->base);
1878fb4d8502Sjsg 	} else {
1879fb4d8502Sjsg 		kfree(after);
1880fb4d8502Sjsg 	}
1881fb4d8502Sjsg 
1882fb4d8502Sjsg 	return 0;
1883fb4d8502Sjsg }
1884fb4d8502Sjsg 
1885fb4d8502Sjsg /**
1886fb4d8502Sjsg  * amdgpu_vm_bo_lookup_mapping - find mapping by address
1887fb4d8502Sjsg  *
1888fb4d8502Sjsg  * @vm: the requested VM
1889fb4d8502Sjsg  * @addr: the address
1890fb4d8502Sjsg  *
1891fb4d8502Sjsg  * Find a mapping by it's address.
1892fb4d8502Sjsg  *
1893fb4d8502Sjsg  * Returns:
1894fb4d8502Sjsg  * The amdgpu_bo_va_mapping matching for addr or NULL
1895fb4d8502Sjsg  *
1896fb4d8502Sjsg  */
1897fb4d8502Sjsg struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
1898fb4d8502Sjsg 							 uint64_t addr)
1899fb4d8502Sjsg {
1900fb4d8502Sjsg 	return amdgpu_vm_it_iter_first(&vm->va, addr, addr);
1901fb4d8502Sjsg }
1902fb4d8502Sjsg 
1903fb4d8502Sjsg /**
1904fb4d8502Sjsg  * amdgpu_vm_bo_trace_cs - trace all reserved mappings
1905fb4d8502Sjsg  *
1906fb4d8502Sjsg  * @vm: the requested vm
1907fb4d8502Sjsg  * @ticket: CS ticket
1908fb4d8502Sjsg  *
1909fb4d8502Sjsg  * Trace all mappings of BOs reserved during a command submission.
1910fb4d8502Sjsg  */
1911fb4d8502Sjsg void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket)
1912fb4d8502Sjsg {
1913fb4d8502Sjsg 	struct amdgpu_bo_va_mapping *mapping;
1914fb4d8502Sjsg 
1915fb4d8502Sjsg 	if (!trace_amdgpu_vm_bo_cs_enabled())
1916fb4d8502Sjsg 		return;
1917fb4d8502Sjsg 
1918fb4d8502Sjsg 	for (mapping = amdgpu_vm_it_iter_first(&vm->va, 0, U64_MAX); mapping;
1919fb4d8502Sjsg 	     mapping = amdgpu_vm_it_iter_next(mapping, 0, U64_MAX)) {
1920fb4d8502Sjsg 		if (mapping->bo_va && mapping->bo_va->base.bo) {
1921fb4d8502Sjsg 			struct amdgpu_bo *bo;
1922fb4d8502Sjsg 
1923fb4d8502Sjsg 			bo = mapping->bo_va->base.bo;
1924c349dbc7Sjsg 			if (dma_resv_locking_ctx(bo->tbo.base.resv) !=
1925c349dbc7Sjsg 			    ticket)
1926fb4d8502Sjsg 				continue;
1927fb4d8502Sjsg 		}
1928fb4d8502Sjsg 
1929fb4d8502Sjsg 		trace_amdgpu_vm_bo_cs(mapping);
1930fb4d8502Sjsg 	}
1931fb4d8502Sjsg }
1932fb4d8502Sjsg 
1933fb4d8502Sjsg /**
19341bb76ff1Sjsg  * amdgpu_vm_bo_del - remove a bo from a specific vm
1935fb4d8502Sjsg  *
1936fb4d8502Sjsg  * @adev: amdgpu_device pointer
1937fb4d8502Sjsg  * @bo_va: requested bo_va
1938fb4d8502Sjsg  *
1939fb4d8502Sjsg  * Remove @bo_va->bo from the requested vm.
1940fb4d8502Sjsg  *
1941fb4d8502Sjsg  * Object have to be reserved!
1942fb4d8502Sjsg  */
19431bb76ff1Sjsg void amdgpu_vm_bo_del(struct amdgpu_device *adev,
1944fb4d8502Sjsg 		      struct amdgpu_bo_va *bo_va)
1945fb4d8502Sjsg {
1946fb4d8502Sjsg 	struct amdgpu_bo_va_mapping *mapping, *next;
1947c349dbc7Sjsg 	struct amdgpu_bo *bo = bo_va->base.bo;
1948fb4d8502Sjsg 	struct amdgpu_vm *vm = bo_va->base.vm;
1949c349dbc7Sjsg 	struct amdgpu_vm_bo_base **base;
1950fb4d8502Sjsg 
19511bb76ff1Sjsg 	dma_resv_assert_held(vm->root.bo->tbo.base.resv);
19521bb76ff1Sjsg 
1953c349dbc7Sjsg 	if (bo) {
19541bb76ff1Sjsg 		dma_resv_assert_held(bo->tbo.base.resv);
19555ca02815Sjsg 		if (bo->tbo.base.resv == vm->root.bo->tbo.base.resv)
19561bb76ff1Sjsg 			ttm_bo_set_bulk_move(&bo->tbo, NULL);
1957fb4d8502Sjsg 
1958c349dbc7Sjsg 		for (base = &bo_va->base.bo->vm_bo; *base;
1959c349dbc7Sjsg 		     base = &(*base)->next) {
1960c349dbc7Sjsg 			if (*base != &bo_va->base)
1961c349dbc7Sjsg 				continue;
1962c349dbc7Sjsg 
1963c349dbc7Sjsg 			*base = bo_va->base.next;
1964c349dbc7Sjsg 			break;
1965c349dbc7Sjsg 		}
1966c349dbc7Sjsg 	}
1967c349dbc7Sjsg 
19681bb76ff1Sjsg 	spin_lock(&vm->status_lock);
1969fb4d8502Sjsg 	list_del(&bo_va->base.vm_status);
19701bb76ff1Sjsg 	spin_unlock(&vm->status_lock);
1971fb4d8502Sjsg 
1972fb4d8502Sjsg 	list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
1973fb4d8502Sjsg 		list_del(&mapping->list);
1974fb4d8502Sjsg 		amdgpu_vm_it_remove(mapping, &vm->va);
1975fb4d8502Sjsg 		mapping->bo_va = NULL;
1976fb4d8502Sjsg 		trace_amdgpu_vm_bo_unmap(bo_va, mapping);
1977fb4d8502Sjsg 		list_add(&mapping->list, &vm->freed);
1978fb4d8502Sjsg 	}
1979fb4d8502Sjsg 	list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
1980fb4d8502Sjsg 		list_del(&mapping->list);
1981fb4d8502Sjsg 		amdgpu_vm_it_remove(mapping, &vm->va);
1982fb4d8502Sjsg 		amdgpu_vm_free_mapping(adev, vm, mapping,
1983fb4d8502Sjsg 				       bo_va->last_pt_update);
1984fb4d8502Sjsg 	}
1985fb4d8502Sjsg 
1986fb4d8502Sjsg 	dma_fence_put(bo_va->last_pt_update);
1987c349dbc7Sjsg 
1988ad8b1aafSjsg 	if (bo && bo_va->is_xgmi)
1989ad8b1aafSjsg 		amdgpu_xgmi_set_pstate(adev, AMDGPU_XGMI_PSTATE_MIN);
1990c349dbc7Sjsg 
1991fb4d8502Sjsg 	kfree(bo_va);
1992fb4d8502Sjsg }
1993fb4d8502Sjsg 
1994fb4d8502Sjsg /**
1995c349dbc7Sjsg  * amdgpu_vm_evictable - check if we can evict a VM
1996c349dbc7Sjsg  *
1997c349dbc7Sjsg  * @bo: A page table of the VM.
1998c349dbc7Sjsg  *
1999c349dbc7Sjsg  * Check if it is possible to evict a VM.
2000c349dbc7Sjsg  */
2001c349dbc7Sjsg bool amdgpu_vm_evictable(struct amdgpu_bo *bo)
2002c349dbc7Sjsg {
2003c349dbc7Sjsg 	struct amdgpu_vm_bo_base *bo_base = bo->vm_bo;
2004c349dbc7Sjsg 
2005c349dbc7Sjsg 	/* Page tables of a destroyed VM can go away immediately */
2006c349dbc7Sjsg 	if (!bo_base || !bo_base->vm)
2007c349dbc7Sjsg 		return true;
2008c349dbc7Sjsg 
2009c349dbc7Sjsg 	/* Don't evict VM page tables while they are busy */
20101bb76ff1Sjsg 	if (!dma_resv_test_signaled(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP))
2011c349dbc7Sjsg 		return false;
2012c349dbc7Sjsg 
2013c349dbc7Sjsg 	/* Try to block ongoing updates */
2014c349dbc7Sjsg 	if (!amdgpu_vm_eviction_trylock(bo_base->vm))
2015c349dbc7Sjsg 		return false;
2016c349dbc7Sjsg 
2017c349dbc7Sjsg 	/* Don't evict VM page tables while they are updated */
2018ad8b1aafSjsg 	if (!dma_fence_is_signaled(bo_base->vm->last_unlocked)) {
2019c349dbc7Sjsg 		amdgpu_vm_eviction_unlock(bo_base->vm);
2020c349dbc7Sjsg 		return false;
2021c349dbc7Sjsg 	}
2022c349dbc7Sjsg 
2023c349dbc7Sjsg 	bo_base->vm->evicting = true;
2024c349dbc7Sjsg 	amdgpu_vm_eviction_unlock(bo_base->vm);
2025c349dbc7Sjsg 	return true;
2026c349dbc7Sjsg }
2027c349dbc7Sjsg 
2028c349dbc7Sjsg /**
2029fb4d8502Sjsg  * amdgpu_vm_bo_invalidate - mark the bo as invalid
2030fb4d8502Sjsg  *
2031fb4d8502Sjsg  * @adev: amdgpu_device pointer
2032fb4d8502Sjsg  * @bo: amdgpu buffer object
2033fb4d8502Sjsg  * @evicted: is the BO evicted
2034fb4d8502Sjsg  *
2035fb4d8502Sjsg  * Mark @bo as invalid.
2036fb4d8502Sjsg  */
2037fb4d8502Sjsg void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
2038fb4d8502Sjsg 			     struct amdgpu_bo *bo, bool evicted)
2039fb4d8502Sjsg {
2040fb4d8502Sjsg 	struct amdgpu_vm_bo_base *bo_base;
2041fb4d8502Sjsg 
2042fb4d8502Sjsg 	/* shadow bo doesn't have bo base, its validation needs its parent */
20435ca02815Sjsg 	if (bo->parent && (amdgpu_bo_shadowed(bo->parent) == bo))
2044fb4d8502Sjsg 		bo = bo->parent;
2045fb4d8502Sjsg 
2046c349dbc7Sjsg 	for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
2047fb4d8502Sjsg 		struct amdgpu_vm *vm = bo_base->vm;
2048fb4d8502Sjsg 
20495ca02815Sjsg 		if (evicted && bo->tbo.base.resv == vm->root.bo->tbo.base.resv) {
2050c349dbc7Sjsg 			amdgpu_vm_bo_evicted(bo_base);
2051c349dbc7Sjsg 			continue;
2052c349dbc7Sjsg 		}
2053c349dbc7Sjsg 
2054c349dbc7Sjsg 		if (bo_base->moved)
2055c349dbc7Sjsg 			continue;
2056fb4d8502Sjsg 		bo_base->moved = true;
2057c349dbc7Sjsg 
2058fb4d8502Sjsg 		if (bo->tbo.type == ttm_bo_type_kernel)
2059c349dbc7Sjsg 			amdgpu_vm_bo_relocated(bo_base);
20605ca02815Sjsg 		else if (bo->tbo.base.resv == vm->root.bo->tbo.base.resv)
2061c349dbc7Sjsg 			amdgpu_vm_bo_moved(bo_base);
2062fb4d8502Sjsg 		else
2063c349dbc7Sjsg 			amdgpu_vm_bo_invalidated(bo_base);
2064fb4d8502Sjsg 	}
2065fb4d8502Sjsg }
2066fb4d8502Sjsg 
2067fb4d8502Sjsg /**
2068fb4d8502Sjsg  * amdgpu_vm_get_block_size - calculate VM page table size as power of two
2069fb4d8502Sjsg  *
2070fb4d8502Sjsg  * @vm_size: VM size
2071fb4d8502Sjsg  *
2072fb4d8502Sjsg  * Returns:
2073fb4d8502Sjsg  * VM page table as power of two
2074fb4d8502Sjsg  */
2075fb4d8502Sjsg static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size)
2076fb4d8502Sjsg {
2077fb4d8502Sjsg 	/* Total bits covered by PD + PTs */
2078fb4d8502Sjsg 	unsigned bits = ilog2(vm_size) + 18;
2079fb4d8502Sjsg 
2080fb4d8502Sjsg 	/* Make sure the PD is 4K in size up to 8GB address space.
2081fb4d8502Sjsg 	   Above that split equal between PD and PTs */
2082fb4d8502Sjsg 	if (vm_size <= 8)
2083fb4d8502Sjsg 		return (bits - 9);
2084fb4d8502Sjsg 	else
2085fb4d8502Sjsg 		return ((bits + 3) / 2);
2086fb4d8502Sjsg }
2087fb4d8502Sjsg 
2088fb4d8502Sjsg /**
2089fb4d8502Sjsg  * amdgpu_vm_adjust_size - adjust vm size, block size and fragment size
2090fb4d8502Sjsg  *
2091fb4d8502Sjsg  * @adev: amdgpu_device pointer
2092fb4d8502Sjsg  * @min_vm_size: the minimum vm size in GB if it's set auto
2093fb4d8502Sjsg  * @fragment_size_default: Default PTE fragment size
2094fb4d8502Sjsg  * @max_level: max VMPT level
2095fb4d8502Sjsg  * @max_bits: max address space size in bits
2096fb4d8502Sjsg  *
2097fb4d8502Sjsg  */
2098fb4d8502Sjsg void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
2099fb4d8502Sjsg 			   uint32_t fragment_size_default, unsigned max_level,
2100fb4d8502Sjsg 			   unsigned max_bits)
2101fb4d8502Sjsg {
2102fb4d8502Sjsg 	unsigned int max_size = 1 << (max_bits - 30);
2103fb4d8502Sjsg 	unsigned int vm_size;
2104fb4d8502Sjsg 	uint64_t tmp;
2105fb4d8502Sjsg 
2106fb4d8502Sjsg 	/* adjust vm size first */
2107fb4d8502Sjsg 	if (amdgpu_vm_size != -1) {
2108fb4d8502Sjsg 		vm_size = amdgpu_vm_size;
2109fb4d8502Sjsg 		if (vm_size > max_size) {
2110fb4d8502Sjsg 			dev_warn(adev->dev, "VM size (%d) too large, max is %u GB\n",
2111fb4d8502Sjsg 				 amdgpu_vm_size, max_size);
2112fb4d8502Sjsg 			vm_size = max_size;
2113fb4d8502Sjsg 		}
2114fb4d8502Sjsg 	} else {
2115fb4d8502Sjsg #ifdef __linux__
2116fb4d8502Sjsg 		struct sysinfo si;
2117fb4d8502Sjsg #endif
2118fb4d8502Sjsg 		unsigned int phys_ram_gb;
2119fb4d8502Sjsg 
2120fb4d8502Sjsg 		/* Optimal VM size depends on the amount of physical
2121fb4d8502Sjsg 		 * RAM available. Underlying requirements and
2122fb4d8502Sjsg 		 * assumptions:
2123fb4d8502Sjsg 		 *
2124fb4d8502Sjsg 		 *  - Need to map system memory and VRAM from all GPUs
2125fb4d8502Sjsg 		 *     - VRAM from other GPUs not known here
2126fb4d8502Sjsg 		 *     - Assume VRAM <= system memory
2127fb4d8502Sjsg 		 *  - On GFX8 and older, VM space can be segmented for
2128fb4d8502Sjsg 		 *    different MTYPEs
2129fb4d8502Sjsg 		 *  - Need to allow room for fragmentation, guard pages etc.
2130fb4d8502Sjsg 		 *
2131fb4d8502Sjsg 		 * This adds up to a rough guess of system memory x3.
2132fb4d8502Sjsg 		 * Round up to power of two to maximize the available
2133fb4d8502Sjsg 		 * VM size with the given page table size.
2134fb4d8502Sjsg 		 */
2135fb4d8502Sjsg #ifdef __linux__
2136fb4d8502Sjsg 		si_meminfo(&si);
2137fb4d8502Sjsg 		phys_ram_gb = ((uint64_t)si.totalram * si.mem_unit +
2138fb4d8502Sjsg 			       (1 << 30) - 1) >> 30;
2139fb4d8502Sjsg #else
2140fb4d8502Sjsg 		phys_ram_gb = ((uint64_t)ptoa(physmem) +
2141fb4d8502Sjsg 			       (1 << 30) - 1) >> 30;
2142fb4d8502Sjsg #endif
2143fb4d8502Sjsg 		vm_size = roundup_pow_of_two(
2144fb4d8502Sjsg 			min(max(phys_ram_gb * 3, min_vm_size), max_size));
2145fb4d8502Sjsg 	}
2146fb4d8502Sjsg 
2147fb4d8502Sjsg 	adev->vm_manager.max_pfn = (uint64_t)vm_size << 18;
2148fb4d8502Sjsg 
2149fb4d8502Sjsg 	tmp = roundup_pow_of_two(adev->vm_manager.max_pfn);
2150fb4d8502Sjsg 	if (amdgpu_vm_block_size != -1)
2151fb4d8502Sjsg 		tmp >>= amdgpu_vm_block_size - 9;
2152fb4d8502Sjsg 	tmp = DIV_ROUND_UP(fls64(tmp) - 1, 9) - 1;
2153fb4d8502Sjsg 	adev->vm_manager.num_level = min(max_level, (unsigned)tmp);
2154fb4d8502Sjsg 	switch (adev->vm_manager.num_level) {
2155fb4d8502Sjsg 	case 3:
2156fb4d8502Sjsg 		adev->vm_manager.root_level = AMDGPU_VM_PDB2;
2157fb4d8502Sjsg 		break;
2158fb4d8502Sjsg 	case 2:
2159fb4d8502Sjsg 		adev->vm_manager.root_level = AMDGPU_VM_PDB1;
2160fb4d8502Sjsg 		break;
2161fb4d8502Sjsg 	case 1:
2162fb4d8502Sjsg 		adev->vm_manager.root_level = AMDGPU_VM_PDB0;
2163fb4d8502Sjsg 		break;
2164fb4d8502Sjsg 	default:
2165fb4d8502Sjsg 		dev_err(adev->dev, "VMPT only supports 2~4+1 levels\n");
2166fb4d8502Sjsg 	}
2167fb4d8502Sjsg 	/* block size depends on vm size and hw setup*/
2168fb4d8502Sjsg 	if (amdgpu_vm_block_size != -1)
2169fb4d8502Sjsg 		adev->vm_manager.block_size =
2170fb4d8502Sjsg 			min((unsigned)amdgpu_vm_block_size, max_bits
2171fb4d8502Sjsg 			    - AMDGPU_GPU_PAGE_SHIFT
2172fb4d8502Sjsg 			    - 9 * adev->vm_manager.num_level);
2173fb4d8502Sjsg 	else if (adev->vm_manager.num_level > 1)
2174fb4d8502Sjsg 		adev->vm_manager.block_size = 9;
2175fb4d8502Sjsg 	else
2176fb4d8502Sjsg 		adev->vm_manager.block_size = amdgpu_vm_get_block_size(tmp);
2177fb4d8502Sjsg 
2178fb4d8502Sjsg 	if (amdgpu_vm_fragment_size == -1)
2179fb4d8502Sjsg 		adev->vm_manager.fragment_size = fragment_size_default;
2180fb4d8502Sjsg 	else
2181fb4d8502Sjsg 		adev->vm_manager.fragment_size = amdgpu_vm_fragment_size;
2182fb4d8502Sjsg 
2183fb4d8502Sjsg 	DRM_INFO("vm size is %u GB, %u levels, block size is %u-bit, fragment size is %u-bit\n",
2184fb4d8502Sjsg 		 vm_size, adev->vm_manager.num_level + 1,
2185fb4d8502Sjsg 		 adev->vm_manager.block_size,
2186fb4d8502Sjsg 		 adev->vm_manager.fragment_size);
2187fb4d8502Sjsg }
2188fb4d8502Sjsg 
2189fb4d8502Sjsg /**
2190c349dbc7Sjsg  * amdgpu_vm_wait_idle - wait for the VM to become idle
2191c349dbc7Sjsg  *
2192c349dbc7Sjsg  * @vm: VM object to wait for
2193c349dbc7Sjsg  * @timeout: timeout to wait for VM to become idle
2194c349dbc7Sjsg  */
2195c349dbc7Sjsg long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
2196c349dbc7Sjsg {
21971bb76ff1Sjsg 	timeout = dma_resv_wait_timeout(vm->root.bo->tbo.base.resv,
21981bb76ff1Sjsg 					DMA_RESV_USAGE_BOOKKEEP,
21995ca02815Sjsg 					true, timeout);
2200c349dbc7Sjsg 	if (timeout <= 0)
2201c349dbc7Sjsg 		return timeout;
2202c349dbc7Sjsg 
2203ad8b1aafSjsg 	return dma_fence_wait_timeout(vm->last_unlocked, true, timeout);
2204c349dbc7Sjsg }
2205c349dbc7Sjsg 
2206c349dbc7Sjsg /**
2207fb4d8502Sjsg  * amdgpu_vm_init - initialize a vm instance
2208fb4d8502Sjsg  *
2209fb4d8502Sjsg  * @adev: amdgpu_device pointer
2210fb4d8502Sjsg  * @vm: requested vm
2211f005ef32Sjsg  * @xcp_id: GPU partition selection id
2212fb4d8502Sjsg  *
2213fb4d8502Sjsg  * Init @vm fields.
2214fb4d8502Sjsg  *
2215fb4d8502Sjsg  * Returns:
2216fb4d8502Sjsg  * 0 for success, error for failure.
2217fb4d8502Sjsg  */
2218f005ef32Sjsg int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
2219f005ef32Sjsg 		   int32_t xcp_id)
2220fb4d8502Sjsg {
22215ca02815Sjsg 	struct amdgpu_bo *root_bo;
22225ca02815Sjsg 	struct amdgpu_bo_vm *root;
2223fb4d8502Sjsg 	int r, i;
2224fb4d8502Sjsg 
2225fb4d8502Sjsg 	vm->va = RB_ROOT_CACHED;
2226fb4d8502Sjsg 	for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
2227fb4d8502Sjsg 		vm->reserved_vmid[i] = NULL;
2228fb4d8502Sjsg 	INIT_LIST_HEAD(&vm->evicted);
2229fb4d8502Sjsg 	INIT_LIST_HEAD(&vm->relocated);
2230fb4d8502Sjsg 	INIT_LIST_HEAD(&vm->moved);
2231fb4d8502Sjsg 	INIT_LIST_HEAD(&vm->idle);
2232c349dbc7Sjsg 	INIT_LIST_HEAD(&vm->invalidated);
22331bb76ff1Sjsg 	mtx_init(&vm->status_lock, IPL_NONE);
2234fb4d8502Sjsg 	INIT_LIST_HEAD(&vm->freed);
22355ca02815Sjsg 	INIT_LIST_HEAD(&vm->done);
22361bb76ff1Sjsg 	INIT_LIST_HEAD(&vm->pt_freed);
22371bb76ff1Sjsg 	INIT_WORK(&vm->pt_free_work, amdgpu_vm_pt_free_work);
2238f005ef32Sjsg #ifdef __linux__
2239f005ef32Sjsg 	INIT_KFIFO(vm->faults);
2240f005ef32Sjsg #else
2241f005ef32Sjsg 	SIMPLEQ_INIT(&vm->faults);
2242f005ef32Sjsg #endif
2243fb4d8502Sjsg 
2244f005ef32Sjsg 	r = amdgpu_vm_init_entities(adev, vm);
2245fb4d8502Sjsg 	if (r)
2246fb4d8502Sjsg 		return r;
2247fb4d8502Sjsg 
2248fb4d8502Sjsg 	vm->pte_support_ats = false;
2249c349dbc7Sjsg 	vm->is_compute_context = false;
2250fb4d8502Sjsg 
2251fb4d8502Sjsg 	vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2252fb4d8502Sjsg 				    AMDGPU_VM_USE_CPU_FOR_GFX);
22535ca02815Sjsg 
2254fb4d8502Sjsg 	DRM_DEBUG_DRIVER("VM update mode is %s\n",
2255fb4d8502Sjsg 			 vm->use_cpu_for_update ? "CPU" : "SDMA");
2256c349dbc7Sjsg 	WARN_ONCE((vm->use_cpu_for_update &&
2257c349dbc7Sjsg 		   !amdgpu_gmc_vram_full_visible(&adev->gmc)),
2258fb4d8502Sjsg 		  "CPU update of VM recommended only for large BAR system\n");
2259fb4d8502Sjsg 
2260fb4d8502Sjsg 	if (vm->use_cpu_for_update)
2261c349dbc7Sjsg 		vm->update_funcs = &amdgpu_vm_cpu_funcs;
2262c349dbc7Sjsg 	else
2263c349dbc7Sjsg 		vm->update_funcs = &amdgpu_vm_sdma_funcs;
22640c513800Sjsg 
22650c513800Sjsg 	vm->last_update = dma_fence_get_stub();
2266ad8b1aafSjsg 	vm->last_unlocked = dma_fence_get_stub();
22671bb76ff1Sjsg 	vm->last_tlb_flush = dma_fence_get_stub();
226826d6e9f7Sjsg 	vm->generation = amdgpu_vm_generation(adev, NULL);
2269fb4d8502Sjsg 
2270c349dbc7Sjsg 	rw_init(&vm->eviction_lock, "avmev");
2271c349dbc7Sjsg 	vm->evicting = false;
2272c349dbc7Sjsg 
22735ca02815Sjsg 	r = amdgpu_vm_pt_create(adev, vm, adev->vm_manager.root_level,
2274f005ef32Sjsg 				false, &root, xcp_id);
2275fb4d8502Sjsg 	if (r)
2276c349dbc7Sjsg 		goto error_free_delayed;
2277f005ef32Sjsg 
2278f005ef32Sjsg 	root_bo = amdgpu_bo_ref(&root->bo);
22795ca02815Sjsg 	r = amdgpu_bo_reserve(root_bo, true);
2280f005ef32Sjsg 	if (r) {
2281f005ef32Sjsg 		amdgpu_bo_unref(&root->shadow);
2282f005ef32Sjsg 		amdgpu_bo_unref(&root_bo);
2283f005ef32Sjsg 		goto error_free_delayed;
2284f005ef32Sjsg 	}
2285f005ef32Sjsg 
2286f005ef32Sjsg 	amdgpu_vm_bo_base_init(&vm->root, vm, root_bo);
2287f005ef32Sjsg 	r = dma_resv_reserve_fences(root_bo->tbo.base.resv, 1);
2288fb4d8502Sjsg 	if (r)
2289fb4d8502Sjsg 		goto error_free_root;
2290fb4d8502Sjsg 
22911bb76ff1Sjsg 	r = amdgpu_vm_pt_clear(adev, vm, root, false);
2292c349dbc7Sjsg 	if (r)
2293f005ef32Sjsg 		goto error_free_root;
2294c349dbc7Sjsg 
22955ca02815Sjsg 	amdgpu_bo_unreserve(vm->root.bo);
2296f005ef32Sjsg 	amdgpu_bo_unref(&root_bo);
2297fb4d8502Sjsg 
2298fb4d8502Sjsg 	return 0;
2299fb4d8502Sjsg 
2300fb4d8502Sjsg error_free_root:
2301f005ef32Sjsg 	amdgpu_vm_pt_free_root(adev, vm);
2302f005ef32Sjsg 	amdgpu_bo_unreserve(vm->root.bo);
23035ca02815Sjsg 	amdgpu_bo_unref(&root_bo);
2304fb4d8502Sjsg 
2305c349dbc7Sjsg error_free_delayed:
23061bb76ff1Sjsg 	dma_fence_put(vm->last_tlb_flush);
2307ad8b1aafSjsg 	dma_fence_put(vm->last_unlocked);
2308f005ef32Sjsg 	amdgpu_vm_fini_entities(vm);
2309fb4d8502Sjsg 
2310fb4d8502Sjsg 	return r;
2311fb4d8502Sjsg }
2312fb4d8502Sjsg 
2313fb4d8502Sjsg /**
2314fb4d8502Sjsg  * amdgpu_vm_make_compute - Turn a GFX VM into a compute VM
2315fb4d8502Sjsg  *
2316fb4d8502Sjsg  * @adev: amdgpu_device pointer
2317fb4d8502Sjsg  * @vm: requested vm
2318fb4d8502Sjsg  *
2319fb4d8502Sjsg  * This only works on GFX VMs that don't have any BOs added and no
2320fb4d8502Sjsg  * page tables allocated yet.
2321fb4d8502Sjsg  *
2322fb4d8502Sjsg  * Changes the following VM parameters:
2323fb4d8502Sjsg  * - use_cpu_for_update
2324fb4d8502Sjsg  * - pte_supports_ats
2325fb4d8502Sjsg  *
2326fb4d8502Sjsg  * Reinitializes the page directory to reflect the changed ATS
2327fb4d8502Sjsg  * setting.
2328fb4d8502Sjsg  *
2329fb4d8502Sjsg  * Returns:
2330fb4d8502Sjsg  * 0 for success, -errno for errors.
2331fb4d8502Sjsg  */
23325ca02815Sjsg int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2333fb4d8502Sjsg {
23343ee1c80bSjsg 	bool pte_support_ats = (adev->asic_type == CHIP_RAVEN);
2335fb4d8502Sjsg 	int r;
2336fb4d8502Sjsg 
23375ca02815Sjsg 	r = amdgpu_bo_reserve(vm->root.bo, true);
2338fb4d8502Sjsg 	if (r)
2339fb4d8502Sjsg 		return r;
2340fb4d8502Sjsg 
2341f005ef32Sjsg 	/* Check if PD needs to be reinitialized and do it before
2342f005ef32Sjsg 	 * changing any other state, in case it fails.
2343f005ef32Sjsg 	 */
2344f005ef32Sjsg 	if (pte_support_ats != vm->pte_support_ats) {
2345fb4d8502Sjsg 		/* Sanity checks */
23461bb76ff1Sjsg 		if (!amdgpu_vm_pt_is_root_clean(adev, vm)) {
23471bb76ff1Sjsg 			r = -EINVAL;
2348c349dbc7Sjsg 			goto unreserve_bo;
23491bb76ff1Sjsg 		}
2350c349dbc7Sjsg 
2351c349dbc7Sjsg 		vm->pte_support_ats = pte_support_ats;
23521bb76ff1Sjsg 		r = amdgpu_vm_pt_clear(adev, vm, to_amdgpu_bo_vm(vm->root.bo),
23535ca02815Sjsg 				       false);
2354fb4d8502Sjsg 		if (r)
23555ca02815Sjsg 			goto unreserve_bo;
2356fb4d8502Sjsg 	}
2357fb4d8502Sjsg 
2358fb4d8502Sjsg 	/* Update VM state */
2359fb4d8502Sjsg 	vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2360fb4d8502Sjsg 				    AMDGPU_VM_USE_CPU_FOR_COMPUTE);
2361fb4d8502Sjsg 	DRM_DEBUG_DRIVER("VM update mode is %s\n",
2362fb4d8502Sjsg 			 vm->use_cpu_for_update ? "CPU" : "SDMA");
2363c349dbc7Sjsg 	WARN_ONCE((vm->use_cpu_for_update &&
2364c349dbc7Sjsg 		   !amdgpu_gmc_vram_full_visible(&adev->gmc)),
2365fb4d8502Sjsg 		  "CPU update of VM recommended only for large BAR system\n");
2366fb4d8502Sjsg 
23674e4b099eSjsg 	if (vm->use_cpu_for_update) {
23684e4b099eSjsg 		/* Sync with last SDMA update/clear before switching to CPU */
23695ca02815Sjsg 		r = amdgpu_bo_sync_wait(vm->root.bo,
23704e4b099eSjsg 					AMDGPU_FENCE_OWNER_UNDEFINED, true);
23714e4b099eSjsg 		if (r)
23725ca02815Sjsg 			goto unreserve_bo;
23734e4b099eSjsg 
2374c349dbc7Sjsg 		vm->update_funcs = &amdgpu_vm_cpu_funcs;
2375f005ef32Sjsg 		r = amdgpu_vm_pt_map_tables(adev, vm);
2376f005ef32Sjsg 		if (r)
2377f005ef32Sjsg 			goto unreserve_bo;
2378f005ef32Sjsg 
23794e4b099eSjsg 	} else {
2380c349dbc7Sjsg 		vm->update_funcs = &amdgpu_vm_sdma_funcs;
23814e4b099eSjsg 	}
23821bb76ff1Sjsg 
2383c349dbc7Sjsg 	dma_fence_put(vm->last_update);
23840c513800Sjsg 	vm->last_update = dma_fence_get_stub();
2385c349dbc7Sjsg 	vm->is_compute_context = true;
2386c349dbc7Sjsg 
2387fb4d8502Sjsg 	/* Free the shadow bo for compute VM */
23885ca02815Sjsg 	amdgpu_bo_unref(&to_amdgpu_bo_vm(vm->root.bo)->shadow);
2389c349dbc7Sjsg 
2390c349dbc7Sjsg 	goto unreserve_bo;
2391c349dbc7Sjsg 
2392c349dbc7Sjsg unreserve_bo:
23935ca02815Sjsg 	amdgpu_bo_unreserve(vm->root.bo);
2394fb4d8502Sjsg 	return r;
2395fb4d8502Sjsg }
2396fb4d8502Sjsg 
2397fb4d8502Sjsg /**
2398c349dbc7Sjsg  * amdgpu_vm_release_compute - release a compute vm
2399c349dbc7Sjsg  * @adev: amdgpu_device pointer
2400c349dbc7Sjsg  * @vm: a vm turned into compute vm by calling amdgpu_vm_make_compute
2401fb4d8502Sjsg  *
2402c349dbc7Sjsg  * This is a correspondant of amdgpu_vm_make_compute. It decouples compute
2403c349dbc7Sjsg  * pasid from vm. Compute should stop use of vm after this call.
2404fb4d8502Sjsg  */
2405c349dbc7Sjsg void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2406fb4d8502Sjsg {
24075ca02815Sjsg 	amdgpu_vm_set_pasid(adev, vm, 0);
2408c349dbc7Sjsg 	vm->is_compute_context = false;
2409fb4d8502Sjsg }
2410fb4d8502Sjsg 
2411fb4d8502Sjsg /**
2412fb4d8502Sjsg  * amdgpu_vm_fini - tear down a vm instance
2413fb4d8502Sjsg  *
2414fb4d8502Sjsg  * @adev: amdgpu_device pointer
2415fb4d8502Sjsg  * @vm: requested vm
2416fb4d8502Sjsg  *
2417fb4d8502Sjsg  * Tear down @vm.
2418fb4d8502Sjsg  * Unbind the VM and remove all bos from the vm bo list
2419fb4d8502Sjsg  */
2420fb4d8502Sjsg void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2421fb4d8502Sjsg {
2422fb4d8502Sjsg 	struct amdgpu_bo_va_mapping *mapping, *tmp;
2423fb4d8502Sjsg 	bool prt_fini_needed = !!adev->gmc.gmc_funcs->set_prt;
2424fb4d8502Sjsg 	struct amdgpu_bo *root;
24251bb76ff1Sjsg 	unsigned long flags;
2426c349dbc7Sjsg 	int i;
2427fb4d8502Sjsg 
2428fb4d8502Sjsg 	amdgpu_amdkfd_gpuvm_destroy_cb(adev, vm);
2429fb4d8502Sjsg 
24301bb76ff1Sjsg 	flush_work(&vm->pt_free_work);
24311bb76ff1Sjsg 
24325ca02815Sjsg 	root = amdgpu_bo_ref(vm->root.bo);
2433c349dbc7Sjsg 	amdgpu_bo_reserve(root, true);
24345ca02815Sjsg 	amdgpu_vm_set_pasid(adev, vm, 0);
2435ad8b1aafSjsg 	dma_fence_wait(vm->last_unlocked, false);
2436ad8b1aafSjsg 	dma_fence_put(vm->last_unlocked);
24371bb76ff1Sjsg 	dma_fence_wait(vm->last_tlb_flush, false);
24381bb76ff1Sjsg 	/* Make sure that all fence callbacks have completed */
24391bb76ff1Sjsg 	spin_lock_irqsave(vm->last_tlb_flush->lock, flags);
24401bb76ff1Sjsg 	spin_unlock_irqrestore(vm->last_tlb_flush->lock, flags);
24411bb76ff1Sjsg 	dma_fence_put(vm->last_tlb_flush);
2442fb4d8502Sjsg 
2443fb4d8502Sjsg 	list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
2444fb4d8502Sjsg 		if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) {
2445fb4d8502Sjsg 			amdgpu_vm_prt_fini(adev, vm);
2446fb4d8502Sjsg 			prt_fini_needed = false;
2447fb4d8502Sjsg 		}
2448fb4d8502Sjsg 
2449fb4d8502Sjsg 		list_del(&mapping->list);
2450fb4d8502Sjsg 		amdgpu_vm_free_mapping(adev, vm, mapping, NULL);
2451fb4d8502Sjsg 	}
2452fb4d8502Sjsg 
24531bb76ff1Sjsg 	amdgpu_vm_pt_free_root(adev, vm);
2454fb4d8502Sjsg 	amdgpu_bo_unreserve(root);
2455fb4d8502Sjsg 	amdgpu_bo_unref(&root);
24565ca02815Sjsg 	WARN_ON(vm->root.bo);
2457c349dbc7Sjsg 
2458f005ef32Sjsg 	amdgpu_vm_fini_entities(vm);
2459c349dbc7Sjsg 
2460c349dbc7Sjsg 	if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
2461c349dbc7Sjsg 		dev_err(adev->dev, "still active bo inside vm\n");
2462c349dbc7Sjsg 	}
2463c349dbc7Sjsg 	rbtree_postorder_for_each_entry_safe(mapping, tmp,
2464c349dbc7Sjsg 					     &vm->va.rb_root, rb) {
2465c349dbc7Sjsg 		/* Don't remove the mapping here, we don't want to trigger a
2466c349dbc7Sjsg 		 * rebalance and the tree is about to be destroyed anyway.
2467c349dbc7Sjsg 		 */
2468c349dbc7Sjsg 		list_del(&mapping->list);
2469c349dbc7Sjsg 		kfree(mapping);
2470c349dbc7Sjsg 	}
2471c349dbc7Sjsg 
2472fb4d8502Sjsg 	dma_fence_put(vm->last_update);
2473f005ef32Sjsg 
2474f005ef32Sjsg 	for (i = 0; i < AMDGPU_MAX_VMHUBS; i++) {
2475f005ef32Sjsg 		if (vm->reserved_vmid[i]) {
2476f005ef32Sjsg 			amdgpu_vmid_free_reserved(adev, i);
2477f005ef32Sjsg 			vm->reserved_vmid[i] = false;
2478f005ef32Sjsg 		}
2479f005ef32Sjsg 	}
2480f005ef32Sjsg 
2481fb4d8502Sjsg }
2482fb4d8502Sjsg 
2483fb4d8502Sjsg /**
2484fb4d8502Sjsg  * amdgpu_vm_manager_init - init the VM manager
2485fb4d8502Sjsg  *
2486fb4d8502Sjsg  * @adev: amdgpu_device pointer
2487fb4d8502Sjsg  *
2488fb4d8502Sjsg  * Initialize the VM manager structures
2489fb4d8502Sjsg  */
2490fb4d8502Sjsg void amdgpu_vm_manager_init(struct amdgpu_device *adev)
2491fb4d8502Sjsg {
2492fb4d8502Sjsg 	unsigned i;
2493fb4d8502Sjsg 
2494ad8b1aafSjsg 	/* Concurrent flushes are only possible starting with Vega10 and
2495ad8b1aafSjsg 	 * are broken on Navi10 and Navi14.
2496ad8b1aafSjsg 	 */
2497ad8b1aafSjsg 	adev->vm_manager.concurrent_flush = !(adev->asic_type < CHIP_VEGA10 ||
2498ad8b1aafSjsg 					      adev->asic_type == CHIP_NAVI10 ||
2499ad8b1aafSjsg 					      adev->asic_type == CHIP_NAVI14);
2500fb4d8502Sjsg 	amdgpu_vmid_mgr_init(adev);
2501fb4d8502Sjsg 
2502fb4d8502Sjsg 	adev->vm_manager.fence_context =
2503fb4d8502Sjsg 		dma_fence_context_alloc(AMDGPU_MAX_RINGS);
2504fb4d8502Sjsg 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
2505fb4d8502Sjsg 		adev->vm_manager.seqno[i] = 0;
2506fb4d8502Sjsg 
2507fb4d8502Sjsg 	mtx_init(&adev->vm_manager.prt_lock, IPL_TTY);
2508fb4d8502Sjsg 	atomic_set(&adev->vm_manager.num_prt_users, 0);
2509fb4d8502Sjsg 
2510fb4d8502Sjsg 	/* If not overridden by the user, by default, only in large BAR systems
2511fb4d8502Sjsg 	 * Compute VM tables will be updated by CPU
2512fb4d8502Sjsg 	 */
251316fe02eaSjsg #ifdef CONFIG_X86_64
2514fb4d8502Sjsg 	if (amdgpu_vm_update_mode == -1) {
25151bb76ff1Sjsg 		/* For asic with VF MMIO access protection
25161bb76ff1Sjsg 		 * avoid using CPU for VM table updates
25171bb76ff1Sjsg 		 */
25181bb76ff1Sjsg 		if (amdgpu_gmc_vram_full_visible(&adev->gmc) &&
25191bb76ff1Sjsg 		    !amdgpu_sriov_vf_mmio_access_protection(adev))
2520fb4d8502Sjsg 			adev->vm_manager.vm_update_mode =
2521fb4d8502Sjsg 				AMDGPU_VM_USE_CPU_FOR_COMPUTE;
2522fb4d8502Sjsg 		else
2523fb4d8502Sjsg 			adev->vm_manager.vm_update_mode = 0;
2524fb4d8502Sjsg 	} else
2525fb4d8502Sjsg 		adev->vm_manager.vm_update_mode = amdgpu_vm_update_mode;
2526fb4d8502Sjsg #else
2527fb4d8502Sjsg 	adev->vm_manager.vm_update_mode = 0;
2528fb4d8502Sjsg #endif
2529fb4d8502Sjsg 
25305ca02815Sjsg 	xa_init_flags(&adev->vm_manager.pasids, XA_FLAGS_LOCK_IRQ);
2531fb4d8502Sjsg }
2532fb4d8502Sjsg 
2533fb4d8502Sjsg /**
2534fb4d8502Sjsg  * amdgpu_vm_manager_fini - cleanup VM manager
2535fb4d8502Sjsg  *
2536fb4d8502Sjsg  * @adev: amdgpu_device pointer
2537fb4d8502Sjsg  *
2538fb4d8502Sjsg  * Cleanup the VM manager and free resources.
2539fb4d8502Sjsg  */
2540fb4d8502Sjsg void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
2541fb4d8502Sjsg {
25425ca02815Sjsg 	WARN_ON(!xa_empty(&adev->vm_manager.pasids));
25435ca02815Sjsg 	xa_destroy(&adev->vm_manager.pasids);
2544fb4d8502Sjsg 
2545fb4d8502Sjsg 	amdgpu_vmid_mgr_fini(adev);
2546fb4d8502Sjsg }
2547fb4d8502Sjsg 
2548fb4d8502Sjsg /**
2549fb4d8502Sjsg  * amdgpu_vm_ioctl - Manages VMID reservation for vm hubs.
2550fb4d8502Sjsg  *
2551fb4d8502Sjsg  * @dev: drm device pointer
2552fb4d8502Sjsg  * @data: drm_amdgpu_vm
2553fb4d8502Sjsg  * @filp: drm file pointer
2554fb4d8502Sjsg  *
2555fb4d8502Sjsg  * Returns:
2556fb4d8502Sjsg  * 0 for success, -errno for errors.
2557fb4d8502Sjsg  */
2558fb4d8502Sjsg int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
2559fb4d8502Sjsg {
2560fb4d8502Sjsg 	union drm_amdgpu_vm *args = data;
2561ad8b1aafSjsg 	struct amdgpu_device *adev = drm_to_adev(dev);
2562fb4d8502Sjsg 	struct amdgpu_fpriv *fpriv = filp->driver_priv;
2563fb4d8502Sjsg 
25648cd973deSjsg 	/* No valid flags defined yet */
25658cd973deSjsg 	if (args->in.flags)
25668cd973deSjsg 		return -EINVAL;
25678cd973deSjsg 
2568fb4d8502Sjsg 	switch (args->in.op) {
2569fb4d8502Sjsg 	case AMDGPU_VM_OP_RESERVE_VMID:
2570c349dbc7Sjsg 		/* We only have requirement to reserve vmid from gfxhub */
2571f005ef32Sjsg 		if (!fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)]) {
2572f005ef32Sjsg 			amdgpu_vmid_alloc_reserved(adev, AMDGPU_GFXHUB(0));
2573f005ef32Sjsg 			fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)] = true;
2574f005ef32Sjsg 		}
2575f005ef32Sjsg 
2576fb4d8502Sjsg 		break;
2577fb4d8502Sjsg 	case AMDGPU_VM_OP_UNRESERVE_VMID:
2578f005ef32Sjsg 		if (fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)]) {
2579f005ef32Sjsg 			amdgpu_vmid_free_reserved(adev, AMDGPU_GFXHUB(0));
2580f005ef32Sjsg 			fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)] = false;
2581f005ef32Sjsg 		}
2582fb4d8502Sjsg 		break;
2583fb4d8502Sjsg 	default:
2584fb4d8502Sjsg 		return -EINVAL;
2585fb4d8502Sjsg 	}
2586fb4d8502Sjsg 
2587fb4d8502Sjsg 	return 0;
2588fb4d8502Sjsg }
2589fb4d8502Sjsg 
2590fb4d8502Sjsg /**
2591fb4d8502Sjsg  * amdgpu_vm_get_task_info - Extracts task info for a PASID.
2592fb4d8502Sjsg  *
2593c349dbc7Sjsg  * @adev: drm device pointer
2594fb4d8502Sjsg  * @pasid: PASID identifier for VM
2595fb4d8502Sjsg  * @task_info: task_info to fill.
2596fb4d8502Sjsg  */
2597ad8b1aafSjsg void amdgpu_vm_get_task_info(struct amdgpu_device *adev, u32 pasid,
2598fb4d8502Sjsg 			 struct amdgpu_task_info *task_info)
2599fb4d8502Sjsg {
2600fb4d8502Sjsg 	struct amdgpu_vm *vm;
2601fb4d8502Sjsg 	unsigned long flags;
2602fb4d8502Sjsg 
26035ca02815Sjsg 	xa_lock_irqsave(&adev->vm_manager.pasids, flags);
2604fb4d8502Sjsg 
26055ca02815Sjsg 	vm = xa_load(&adev->vm_manager.pasids, pasid);
2606fb4d8502Sjsg 	if (vm)
2607fb4d8502Sjsg 		*task_info = vm->task_info;
2608fb4d8502Sjsg 
26095ca02815Sjsg 	xa_unlock_irqrestore(&adev->vm_manager.pasids, flags);
2610fb4d8502Sjsg }
2611fb4d8502Sjsg 
2612fb4d8502Sjsg /**
2613fb4d8502Sjsg  * amdgpu_vm_set_task_info - Sets VMs task info.
2614fb4d8502Sjsg  *
2615fb4d8502Sjsg  * @vm: vm for which to set the info
2616fb4d8502Sjsg  */
2617fb4d8502Sjsg void amdgpu_vm_set_task_info(struct amdgpu_vm *vm)
2618fb4d8502Sjsg {
2619c349dbc7Sjsg 	if (vm->task_info.pid)
2620c349dbc7Sjsg 		return;
2621c349dbc7Sjsg 
2622fb4d8502Sjsg #ifdef __linux__
2623fb4d8502Sjsg 	vm->task_info.pid = current->pid;
2624fb4d8502Sjsg 	get_task_comm(vm->task_info.task_name, current);
2625fb4d8502Sjsg 
2626c349dbc7Sjsg 	if (current->group_leader->mm != current->mm)
2627c349dbc7Sjsg 		return;
2628c349dbc7Sjsg 
2629fb4d8502Sjsg 	vm->task_info.tgid = current->group_leader->pid;
2630fb4d8502Sjsg 	get_task_comm(vm->task_info.process_name, current->group_leader);
2631fb4d8502Sjsg #else
263290c94250Sjsg 	/* thread */
263390c94250Sjsg 	vm->task_info.pid = curproc->p_tid;
2634fb4d8502Sjsg 	strlcpy(vm->task_info.task_name, curproc->p_p->ps_comm,
2635fb4d8502Sjsg 	    sizeof(vm->task_info.task_name));
263690c94250Sjsg 
263790c94250Sjsg 	/* process */
263890c94250Sjsg 	vm->task_info.tgid = curproc->p_p->ps_pid;
263990c94250Sjsg 	strlcpy(vm->task_info.process_name, curproc->p_p->ps_comm,
264090c94250Sjsg 	    sizeof(vm->task_info.process_name));
2641fb4d8502Sjsg #endif
2642fb4d8502Sjsg }
2643c349dbc7Sjsg 
2644c349dbc7Sjsg /**
2645c349dbc7Sjsg  * amdgpu_vm_handle_fault - graceful handling of VM faults.
2646c349dbc7Sjsg  * @adev: amdgpu device pointer
2647c349dbc7Sjsg  * @pasid: PASID of the VM
2648f005ef32Sjsg  * @vmid: VMID, only used for GFX 9.4.3.
2649f005ef32Sjsg  * @node_id: Node_id received in IH cookie. Only applicable for
2650f005ef32Sjsg  *           GFX 9.4.3.
2651c349dbc7Sjsg  * @addr: Address of the fault
26525ca02815Sjsg  * @write_fault: true is write fault, false is read fault
2653c349dbc7Sjsg  *
2654c349dbc7Sjsg  * Try to gracefully handle a VM fault. Return true if the fault was handled and
2655c349dbc7Sjsg  * shouldn't be reported any more.
2656c349dbc7Sjsg  */
2657ad8b1aafSjsg bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
2658f005ef32Sjsg 			    u32 vmid, u32 node_id, uint64_t addr,
2659f005ef32Sjsg 			    bool write_fault)
2660c349dbc7Sjsg {
26615ca02815Sjsg 	bool is_compute_context = false;
2662c349dbc7Sjsg 	struct amdgpu_bo *root;
26635ca02815Sjsg 	unsigned long irqflags;
2664c349dbc7Sjsg 	uint64_t value, flags;
2665c349dbc7Sjsg 	struct amdgpu_vm *vm;
26665ca02815Sjsg 	int r;
2667c349dbc7Sjsg 
26685ca02815Sjsg 	xa_lock_irqsave(&adev->vm_manager.pasids, irqflags);
26695ca02815Sjsg 	vm = xa_load(&adev->vm_manager.pasids, pasid);
26705ca02815Sjsg 	if (vm) {
26715ca02815Sjsg 		root = amdgpu_bo_ref(vm->root.bo);
26725ca02815Sjsg 		is_compute_context = vm->is_compute_context;
26735ca02815Sjsg 	} else {
2674c349dbc7Sjsg 		root = NULL;
26755ca02815Sjsg 	}
26765ca02815Sjsg 	xa_unlock_irqrestore(&adev->vm_manager.pasids, irqflags);
2677c349dbc7Sjsg 
2678c349dbc7Sjsg 	if (!root)
2679c349dbc7Sjsg 		return false;
2680c349dbc7Sjsg 
26815ca02815Sjsg 	addr /= AMDGPU_GPU_PAGE_SIZE;
26825ca02815Sjsg 
2683f005ef32Sjsg 	if (is_compute_context && !svm_range_restore_pages(adev, pasid, vmid,
2684f005ef32Sjsg 	    node_id, addr, write_fault)) {
26855ca02815Sjsg 		amdgpu_bo_unref(&root);
26865ca02815Sjsg 		return true;
26875ca02815Sjsg 	}
26885ca02815Sjsg 
2689c349dbc7Sjsg 	r = amdgpu_bo_reserve(root, true);
2690c349dbc7Sjsg 	if (r)
2691c349dbc7Sjsg 		goto error_unref;
2692c349dbc7Sjsg 
2693c349dbc7Sjsg 	/* Double check that the VM still exists */
26945ca02815Sjsg 	xa_lock_irqsave(&adev->vm_manager.pasids, irqflags);
26955ca02815Sjsg 	vm = xa_load(&adev->vm_manager.pasids, pasid);
26965ca02815Sjsg 	if (vm && vm->root.bo != root)
2697c349dbc7Sjsg 		vm = NULL;
26985ca02815Sjsg 	xa_unlock_irqrestore(&adev->vm_manager.pasids, irqflags);
2699c349dbc7Sjsg 	if (!vm)
2700c349dbc7Sjsg 		goto error_unlock;
2701c349dbc7Sjsg 
2702c349dbc7Sjsg 	flags = AMDGPU_PTE_VALID | AMDGPU_PTE_SNOOPED |
2703c349dbc7Sjsg 		AMDGPU_PTE_SYSTEM;
2704c349dbc7Sjsg 
27055ca02815Sjsg 	if (is_compute_context) {
2706c349dbc7Sjsg 		/* Intentionally setting invalid PTE flag
2707c349dbc7Sjsg 		 * combination to force a no-retry-fault
2708c349dbc7Sjsg 		 */
2709f005ef32Sjsg 		flags = AMDGPU_VM_NORETRY_FLAGS;
2710c349dbc7Sjsg 		value = 0;
2711c349dbc7Sjsg 	} else if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_NEVER) {
2712c349dbc7Sjsg 		/* Redirect the access to the dummy page */
2713c349dbc7Sjsg 		value = adev->dummy_page_addr;
2714c349dbc7Sjsg 		flags |= AMDGPU_PTE_EXECUTABLE | AMDGPU_PTE_READABLE |
2715c349dbc7Sjsg 			AMDGPU_PTE_WRITEABLE;
2716c349dbc7Sjsg 
2717c349dbc7Sjsg 	} else {
2718c349dbc7Sjsg 		/* Let the hw retry silently on the PTE */
2719c349dbc7Sjsg 		value = 0;
2720c349dbc7Sjsg 	}
2721c349dbc7Sjsg 
27221bb76ff1Sjsg 	r = dma_resv_reserve_fences(root->tbo.base.resv, 1);
27235ca02815Sjsg 	if (r) {
27245ca02815Sjsg 		pr_debug("failed %d to reserve fence slot\n", r);
27255ca02815Sjsg 		goto error_unlock;
27265ca02815Sjsg 	}
27275ca02815Sjsg 
27281bb76ff1Sjsg 	r = amdgpu_vm_update_range(adev, vm, true, false, false, NULL, addr,
27291bb76ff1Sjsg 				   addr, flags, value, 0, NULL, NULL, NULL);
2730c349dbc7Sjsg 	if (r)
2731c349dbc7Sjsg 		goto error_unlock;
2732c349dbc7Sjsg 
2733c349dbc7Sjsg 	r = amdgpu_vm_update_pdes(adev, vm, true);
2734c349dbc7Sjsg 
2735c349dbc7Sjsg error_unlock:
2736c349dbc7Sjsg 	amdgpu_bo_unreserve(root);
2737c349dbc7Sjsg 	if (r < 0)
27385ca02815Sjsg 		DRM_ERROR("Can't handle page fault (%d)\n", r);
2739c349dbc7Sjsg 
2740c349dbc7Sjsg error_unref:
2741c349dbc7Sjsg 	amdgpu_bo_unref(&root);
2742c349dbc7Sjsg 
2743c349dbc7Sjsg 	return false;
2744fb4d8502Sjsg }
27455ca02815Sjsg 
27465ca02815Sjsg #if defined(CONFIG_DEBUG_FS)
27475ca02815Sjsg /**
27485ca02815Sjsg  * amdgpu_debugfs_vm_bo_info  - print BO info for the VM
27495ca02815Sjsg  *
27505ca02815Sjsg  * @vm: Requested VM for printing BO info
27515ca02815Sjsg  * @m: debugfs file
27525ca02815Sjsg  *
27535ca02815Sjsg  * Print BO information in debugfs file for the VM
27545ca02815Sjsg  */
27555ca02815Sjsg void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m)
27565ca02815Sjsg {
27575ca02815Sjsg 	struct amdgpu_bo_va *bo_va, *tmp;
27585ca02815Sjsg 	u64 total_idle = 0;
27595ca02815Sjsg 	u64 total_evicted = 0;
27605ca02815Sjsg 	u64 total_relocated = 0;
27615ca02815Sjsg 	u64 total_moved = 0;
27625ca02815Sjsg 	u64 total_invalidated = 0;
27635ca02815Sjsg 	u64 total_done = 0;
27645ca02815Sjsg 	unsigned int total_idle_objs = 0;
27655ca02815Sjsg 	unsigned int total_evicted_objs = 0;
27665ca02815Sjsg 	unsigned int total_relocated_objs = 0;
27675ca02815Sjsg 	unsigned int total_moved_objs = 0;
27685ca02815Sjsg 	unsigned int total_invalidated_objs = 0;
27695ca02815Sjsg 	unsigned int total_done_objs = 0;
27705ca02815Sjsg 	unsigned int id = 0;
27715ca02815Sjsg 
27721bb76ff1Sjsg 	spin_lock(&vm->status_lock);
27735ca02815Sjsg 	seq_puts(m, "\tIdle BOs:\n");
27745ca02815Sjsg 	list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status) {
27755ca02815Sjsg 		if (!bo_va->base.bo)
27765ca02815Sjsg 			continue;
27775ca02815Sjsg 		total_idle += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
27785ca02815Sjsg 	}
27795ca02815Sjsg 	total_idle_objs = id;
27805ca02815Sjsg 	id = 0;
27815ca02815Sjsg 
27825ca02815Sjsg 	seq_puts(m, "\tEvicted BOs:\n");
27835ca02815Sjsg 	list_for_each_entry_safe(bo_va, tmp, &vm->evicted, base.vm_status) {
27845ca02815Sjsg 		if (!bo_va->base.bo)
27855ca02815Sjsg 			continue;
27865ca02815Sjsg 		total_evicted += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
27875ca02815Sjsg 	}
27885ca02815Sjsg 	total_evicted_objs = id;
27895ca02815Sjsg 	id = 0;
27905ca02815Sjsg 
27915ca02815Sjsg 	seq_puts(m, "\tRelocated BOs:\n");
27925ca02815Sjsg 	list_for_each_entry_safe(bo_va, tmp, &vm->relocated, base.vm_status) {
27935ca02815Sjsg 		if (!bo_va->base.bo)
27945ca02815Sjsg 			continue;
27955ca02815Sjsg 		total_relocated += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
27965ca02815Sjsg 	}
27975ca02815Sjsg 	total_relocated_objs = id;
27985ca02815Sjsg 	id = 0;
27995ca02815Sjsg 
28005ca02815Sjsg 	seq_puts(m, "\tMoved BOs:\n");
28015ca02815Sjsg 	list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) {
28025ca02815Sjsg 		if (!bo_va->base.bo)
28035ca02815Sjsg 			continue;
28045ca02815Sjsg 		total_moved += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
28055ca02815Sjsg 	}
28065ca02815Sjsg 	total_moved_objs = id;
28075ca02815Sjsg 	id = 0;
28085ca02815Sjsg 
28095ca02815Sjsg 	seq_puts(m, "\tInvalidated BOs:\n");
28105ca02815Sjsg 	list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status) {
28115ca02815Sjsg 		if (!bo_va->base.bo)
28125ca02815Sjsg 			continue;
28135ca02815Sjsg 		total_invalidated += amdgpu_bo_print_info(id++,	bo_va->base.bo, m);
28145ca02815Sjsg 	}
28155ca02815Sjsg 	total_invalidated_objs = id;
28165ca02815Sjsg 	id = 0;
28175ca02815Sjsg 
28185ca02815Sjsg 	seq_puts(m, "\tDone BOs:\n");
28195ca02815Sjsg 	list_for_each_entry_safe(bo_va, tmp, &vm->done, base.vm_status) {
28205ca02815Sjsg 		if (!bo_va->base.bo)
28215ca02815Sjsg 			continue;
28225ca02815Sjsg 		total_done += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
28235ca02815Sjsg 	}
28241bb76ff1Sjsg 	spin_unlock(&vm->status_lock);
28255ca02815Sjsg 	total_done_objs = id;
28265ca02815Sjsg 
28275ca02815Sjsg 	seq_printf(m, "\tTotal idle size:        %12lld\tobjs:\t%d\n", total_idle,
28285ca02815Sjsg 		   total_idle_objs);
28295ca02815Sjsg 	seq_printf(m, "\tTotal evicted size:     %12lld\tobjs:\t%d\n", total_evicted,
28305ca02815Sjsg 		   total_evicted_objs);
28315ca02815Sjsg 	seq_printf(m, "\tTotal relocated size:   %12lld\tobjs:\t%d\n", total_relocated,
28325ca02815Sjsg 		   total_relocated_objs);
28335ca02815Sjsg 	seq_printf(m, "\tTotal moved size:       %12lld\tobjs:\t%d\n", total_moved,
28345ca02815Sjsg 		   total_moved_objs);
28355ca02815Sjsg 	seq_printf(m, "\tTotal invalidated size: %12lld\tobjs:\t%d\n", total_invalidated,
28365ca02815Sjsg 		   total_invalidated_objs);
28375ca02815Sjsg 	seq_printf(m, "\tTotal done size:        %12lld\tobjs:\t%d\n", total_done,
28385ca02815Sjsg 		   total_done_objs);
28395ca02815Sjsg }
28405ca02815Sjsg #endif
2841