1*b843c749SSergey Zigachev /* 2*b843c749SSergey Zigachev * Copyright 2016 Advanced Micro Devices, Inc. 3*b843c749SSergey Zigachev * 4*b843c749SSergey Zigachev * Permission is hereby granted, free of charge, to any person obtaining a 5*b843c749SSergey Zigachev * copy of this software and associated documentation files (the "Software"), 6*b843c749SSergey Zigachev * to deal in the Software without restriction, including without limitation 7*b843c749SSergey Zigachev * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8*b843c749SSergey Zigachev * and/or sell copies of the Software, and to permit persons to whom the 9*b843c749SSergey Zigachev * Software is furnished to do so, subject to the following conditions: 10*b843c749SSergey Zigachev * 11*b843c749SSergey Zigachev * The above copyright notice and this permission notice shall be included in 12*b843c749SSergey Zigachev * all copies or substantial portions of the Software. 13*b843c749SSergey Zigachev * 14*b843c749SSergey Zigachev * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15*b843c749SSergey Zigachev * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16*b843c749SSergey Zigachev * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17*b843c749SSergey Zigachev * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18*b843c749SSergey Zigachev * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19*b843c749SSergey Zigachev * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20*b843c749SSergey Zigachev * OTHER DEALINGS IN THE SOFTWARE. 21*b843c749SSergey Zigachev * 22*b843c749SSergey Zigachev * Authors: Christian König 23*b843c749SSergey Zigachev */ 24*b843c749SSergey Zigachev #ifndef __AMDGPU_VM_H__ 25*b843c749SSergey Zigachev #define __AMDGPU_VM_H__ 26*b843c749SSergey Zigachev 27*b843c749SSergey Zigachev #include <linux/idr.h> 28*b843c749SSergey Zigachev #include <linux/kfifo.h> 29*b843c749SSergey Zigachev #include <linux/rbtree.h> 30*b843c749SSergey Zigachev #include <drm/gpu_scheduler.h> 31*b843c749SSergey Zigachev #include <drm/drm_file.h> 32*b843c749SSergey Zigachev 33*b843c749SSergey Zigachev #include "amdgpu_sync.h" 34*b843c749SSergey Zigachev #include "amdgpu_ring.h" 35*b843c749SSergey Zigachev #include "amdgpu_ids.h" 36*b843c749SSergey Zigachev 37*b843c749SSergey Zigachev struct amdgpu_bo_va; 38*b843c749SSergey Zigachev struct amdgpu_job; 39*b843c749SSergey Zigachev struct amdgpu_bo_list_entry; 40*b843c749SSergey Zigachev 41*b843c749SSergey Zigachev /* 42*b843c749SSergey Zigachev * GPUVM handling 43*b843c749SSergey Zigachev */ 44*b843c749SSergey Zigachev 45*b843c749SSergey Zigachev /* Maximum number of PTEs the hardware can write with one command */ 46*b843c749SSergey Zigachev #define AMDGPU_VM_MAX_UPDATE_SIZE 0x3FFFF 47*b843c749SSergey Zigachev 48*b843c749SSergey Zigachev /* number of entries in page table */ 49*b843c749SSergey Zigachev #define AMDGPU_VM_PTE_COUNT(adev) (1 << (adev)->vm_manager.block_size) 50*b843c749SSergey Zigachev 51*b843c749SSergey Zigachev /* PTBs (Page Table Blocks) need to be aligned to 32K */ 52*b843c749SSergey Zigachev #define AMDGPU_VM_PTB_ALIGN_SIZE 32768 53*b843c749SSergey Zigachev 54*b843c749SSergey Zigachev #define AMDGPU_PTE_VALID (1ULL << 0) 55*b843c749SSergey Zigachev #define AMDGPU_PTE_SYSTEM (1ULL << 1) 56*b843c749SSergey Zigachev #define AMDGPU_PTE_SNOOPED (1ULL << 2) 57*b843c749SSergey Zigachev 58*b843c749SSergey Zigachev /* VI only */ 59*b843c749SSergey Zigachev #define AMDGPU_PTE_EXECUTABLE (1ULL << 4) 60*b843c749SSergey Zigachev 61*b843c749SSergey Zigachev #define AMDGPU_PTE_READABLE (1ULL << 5) 62*b843c749SSergey Zigachev #define AMDGPU_PTE_WRITEABLE (1ULL << 6) 63*b843c749SSergey Zigachev 64*b843c749SSergey Zigachev #define AMDGPU_PTE_FRAG(x) ((x & 0x1fULL) << 7) 65*b843c749SSergey Zigachev 66*b843c749SSergey Zigachev /* TILED for VEGA10, reserved for older ASICs */ 67*b843c749SSergey Zigachev #define AMDGPU_PTE_PRT (1ULL << 51) 68*b843c749SSergey Zigachev 69*b843c749SSergey Zigachev /* PDE is handled as PTE for VEGA10 */ 70*b843c749SSergey Zigachev #define AMDGPU_PDE_PTE (1ULL << 54) 71*b843c749SSergey Zigachev 72*b843c749SSergey Zigachev /* PTE is handled as PDE for VEGA10 (Translate Further) */ 73*b843c749SSergey Zigachev #define AMDGPU_PTE_TF (1ULL << 56) 74*b843c749SSergey Zigachev 75*b843c749SSergey Zigachev /* PDE Block Fragment Size for VEGA10 */ 76*b843c749SSergey Zigachev #define AMDGPU_PDE_BFS(a) ((uint64_t)a << 59) 77*b843c749SSergey Zigachev 78*b843c749SSergey Zigachev 79*b843c749SSergey Zigachev /* For GFX9 */ 80*b843c749SSergey Zigachev #define AMDGPU_PTE_MTYPE(a) ((uint64_t)a << 57) 81*b843c749SSergey Zigachev #define AMDGPU_PTE_MTYPE_MASK AMDGPU_PTE_MTYPE(3ULL) 82*b843c749SSergey Zigachev 83*b843c749SSergey Zigachev #define AMDGPU_MTYPE_NC 0 84*b843c749SSergey Zigachev #define AMDGPU_MTYPE_CC 2 85*b843c749SSergey Zigachev 86*b843c749SSergey Zigachev #define AMDGPU_PTE_DEFAULT_ATC (AMDGPU_PTE_SYSTEM \ 87*b843c749SSergey Zigachev | AMDGPU_PTE_SNOOPED \ 88*b843c749SSergey Zigachev | AMDGPU_PTE_EXECUTABLE \ 89*b843c749SSergey Zigachev | AMDGPU_PTE_READABLE \ 90*b843c749SSergey Zigachev | AMDGPU_PTE_WRITEABLE \ 91*b843c749SSergey Zigachev | AMDGPU_PTE_MTYPE(AMDGPU_MTYPE_CC)) 92*b843c749SSergey Zigachev 93*b843c749SSergey Zigachev /* How to programm VM fault handling */ 94*b843c749SSergey Zigachev #define AMDGPU_VM_FAULT_STOP_NEVER 0 95*b843c749SSergey Zigachev #define AMDGPU_VM_FAULT_STOP_FIRST 1 96*b843c749SSergey Zigachev #define AMDGPU_VM_FAULT_STOP_ALWAYS 2 97*b843c749SSergey Zigachev 98*b843c749SSergey Zigachev /* max number of VMHUB */ 99*b843c749SSergey Zigachev #define AMDGPU_MAX_VMHUBS 2 100*b843c749SSergey Zigachev #define AMDGPU_GFXHUB 0 101*b843c749SSergey Zigachev #define AMDGPU_MMHUB 1 102*b843c749SSergey Zigachev 103*b843c749SSergey Zigachev /* hardcode that limit for now */ 104*b843c749SSergey Zigachev #define AMDGPU_VA_RESERVED_SIZE (1ULL << 20) 105*b843c749SSergey Zigachev 106*b843c749SSergey Zigachev /* VA hole for 48bit addresses on Vega10 */ 107*b843c749SSergey Zigachev #define AMDGPU_VA_HOLE_START 0x0000800000000000ULL 108*b843c749SSergey Zigachev #define AMDGPU_VA_HOLE_END 0xffff800000000000ULL 109*b843c749SSergey Zigachev 110*b843c749SSergey Zigachev /* 111*b843c749SSergey Zigachev * Hardware is programmed as if the hole doesn't exists with start and end 112*b843c749SSergey Zigachev * address values. 113*b843c749SSergey Zigachev * 114*b843c749SSergey Zigachev * This mask is used to remove the upper 16bits of the VA and so come up with 115*b843c749SSergey Zigachev * the linear addr value. 116*b843c749SSergey Zigachev */ 117*b843c749SSergey Zigachev #define AMDGPU_VA_HOLE_MASK 0x0000ffffffffffffULL 118*b843c749SSergey Zigachev 119*b843c749SSergey Zigachev /* max vmids dedicated for process */ 120*b843c749SSergey Zigachev #define AMDGPU_VM_MAX_RESERVED_VMID 1 121*b843c749SSergey Zigachev 122*b843c749SSergey Zigachev #define AMDGPU_VM_CONTEXT_GFX 0 123*b843c749SSergey Zigachev #define AMDGPU_VM_CONTEXT_COMPUTE 1 124*b843c749SSergey Zigachev 125*b843c749SSergey Zigachev /* See vm_update_mode */ 126*b843c749SSergey Zigachev #define AMDGPU_VM_USE_CPU_FOR_GFX (1 << 0) 127*b843c749SSergey Zigachev #define AMDGPU_VM_USE_CPU_FOR_COMPUTE (1 << 1) 128*b843c749SSergey Zigachev 129*b843c749SSergey Zigachev /* VMPT level enumerate, and the hiberachy is: 130*b843c749SSergey Zigachev * PDB2->PDB1->PDB0->PTB 131*b843c749SSergey Zigachev */ 132*b843c749SSergey Zigachev enum amdgpu_vm_level { 133*b843c749SSergey Zigachev AMDGPU_VM_PDB2, 134*b843c749SSergey Zigachev AMDGPU_VM_PDB1, 135*b843c749SSergey Zigachev AMDGPU_VM_PDB0, 136*b843c749SSergey Zigachev AMDGPU_VM_PTB 137*b843c749SSergey Zigachev }; 138*b843c749SSergey Zigachev 139*b843c749SSergey Zigachev /* base structure for tracking BO usage in a VM */ 140*b843c749SSergey Zigachev struct amdgpu_vm_bo_base { 141*b843c749SSergey Zigachev /* constant after initialization */ 142*b843c749SSergey Zigachev struct amdgpu_vm *vm; 143*b843c749SSergey Zigachev struct amdgpu_bo *bo; 144*b843c749SSergey Zigachev 145*b843c749SSergey Zigachev /* protected by bo being reserved */ 146*b843c749SSergey Zigachev struct list_head bo_list; 147*b843c749SSergey Zigachev 148*b843c749SSergey Zigachev /* protected by spinlock */ 149*b843c749SSergey Zigachev struct list_head vm_status; 150*b843c749SSergey Zigachev 151*b843c749SSergey Zigachev /* protected by the BO being reserved */ 152*b843c749SSergey Zigachev bool moved; 153*b843c749SSergey Zigachev }; 154*b843c749SSergey Zigachev 155*b843c749SSergey Zigachev struct amdgpu_vm_pt { 156*b843c749SSergey Zigachev struct amdgpu_vm_bo_base base; 157*b843c749SSergey Zigachev bool huge; 158*b843c749SSergey Zigachev 159*b843c749SSergey Zigachev /* array of page tables, one for each directory entry */ 160*b843c749SSergey Zigachev struct amdgpu_vm_pt *entries; 161*b843c749SSergey Zigachev }; 162*b843c749SSergey Zigachev 163*b843c749SSergey Zigachev #define AMDGPU_VM_FAULT(pasid, addr) (((u64)(pasid) << 48) | (addr)) 164*b843c749SSergey Zigachev #define AMDGPU_VM_FAULT_PASID(fault) ((u64)(fault) >> 48) 165*b843c749SSergey Zigachev #define AMDGPU_VM_FAULT_ADDR(fault) ((u64)(fault) & 0xfffffffff000ULL) 166*b843c749SSergey Zigachev 167*b843c749SSergey Zigachev 168*b843c749SSergey Zigachev struct amdgpu_task_info { 169*b843c749SSergey Zigachev char process_name[TASK_COMM_LEN]; 170*b843c749SSergey Zigachev char task_name[TASK_COMM_LEN]; 171*b843c749SSergey Zigachev pid_t pid; 172*b843c749SSergey Zigachev pid_t tgid; 173*b843c749SSergey Zigachev }; 174*b843c749SSergey Zigachev 175*b843c749SSergey Zigachev struct amdgpu_vm { 176*b843c749SSergey Zigachev /* tree of virtual addresses mapped */ 177*b843c749SSergey Zigachev struct rb_root_cached va; 178*b843c749SSergey Zigachev 179*b843c749SSergey Zigachev /* BOs who needs a validation */ 180*b843c749SSergey Zigachev struct list_head evicted; 181*b843c749SSergey Zigachev 182*b843c749SSergey Zigachev /* PT BOs which relocated and their parent need an update */ 183*b843c749SSergey Zigachev struct list_head relocated; 184*b843c749SSergey Zigachev 185*b843c749SSergey Zigachev /* BOs moved, but not yet updated in the PT */ 186*b843c749SSergey Zigachev struct list_head moved; 187*b843c749SSergey Zigachev spinlock_t moved_lock; 188*b843c749SSergey Zigachev 189*b843c749SSergey Zigachev /* All BOs of this VM not currently in the state machine */ 190*b843c749SSergey Zigachev struct list_head idle; 191*b843c749SSergey Zigachev 192*b843c749SSergey Zigachev /* BO mappings freed, but not yet updated in the PT */ 193*b843c749SSergey Zigachev struct list_head freed; 194*b843c749SSergey Zigachev 195*b843c749SSergey Zigachev /* contains the page directory */ 196*b843c749SSergey Zigachev struct amdgpu_vm_pt root; 197*b843c749SSergey Zigachev struct dma_fence *last_update; 198*b843c749SSergey Zigachev 199*b843c749SSergey Zigachev /* Scheduler entity for page table updates */ 200*b843c749SSergey Zigachev struct drm_sched_entity entity; 201*b843c749SSergey Zigachev 202*b843c749SSergey Zigachev unsigned int pasid; 203*b843c749SSergey Zigachev /* dedicated to vm */ 204*b843c749SSergey Zigachev struct amdgpu_vmid *reserved_vmid[AMDGPU_MAX_VMHUBS]; 205*b843c749SSergey Zigachev 206*b843c749SSergey Zigachev /* Flag to indicate if VM tables are updated by CPU or GPU (SDMA) */ 207*b843c749SSergey Zigachev bool use_cpu_for_update; 208*b843c749SSergey Zigachev 209*b843c749SSergey Zigachev /* Flag to indicate ATS support from PTE for GFX9 */ 210*b843c749SSergey Zigachev bool pte_support_ats; 211*b843c749SSergey Zigachev 212*b843c749SSergey Zigachev /* Up to 128 pending retry page faults */ 213*b843c749SSergey Zigachev DECLARE_KFIFO(faults, u64, 128); 214*b843c749SSergey Zigachev 215*b843c749SSergey Zigachev /* Limit non-retry fault storms */ 216*b843c749SSergey Zigachev unsigned int fault_credit; 217*b843c749SSergey Zigachev 218*b843c749SSergey Zigachev /* Points to the KFD process VM info */ 219*b843c749SSergey Zigachev struct amdkfd_process_info *process_info; 220*b843c749SSergey Zigachev 221*b843c749SSergey Zigachev /* List node in amdkfd_process_info.vm_list_head */ 222*b843c749SSergey Zigachev struct list_head vm_list_node; 223*b843c749SSergey Zigachev 224*b843c749SSergey Zigachev /* Valid while the PD is reserved or fenced */ 225*b843c749SSergey Zigachev uint64_t pd_phys_addr; 226*b843c749SSergey Zigachev 227*b843c749SSergey Zigachev /* Some basic info about the task */ 228*b843c749SSergey Zigachev struct amdgpu_task_info task_info; 229*b843c749SSergey Zigachev }; 230*b843c749SSergey Zigachev 231*b843c749SSergey Zigachev struct amdgpu_vm_manager { 232*b843c749SSergey Zigachev /* Handling of VMIDs */ 233*b843c749SSergey Zigachev struct amdgpu_vmid_mgr id_mgr[AMDGPU_MAX_VMHUBS]; 234*b843c749SSergey Zigachev 235*b843c749SSergey Zigachev /* Handling of VM fences */ 236*b843c749SSergey Zigachev u64 fence_context; 237*b843c749SSergey Zigachev unsigned seqno[AMDGPU_MAX_RINGS]; 238*b843c749SSergey Zigachev 239*b843c749SSergey Zigachev uint64_t max_pfn; 240*b843c749SSergey Zigachev uint32_t num_level; 241*b843c749SSergey Zigachev uint32_t block_size; 242*b843c749SSergey Zigachev uint32_t fragment_size; 243*b843c749SSergey Zigachev enum amdgpu_vm_level root_level; 244*b843c749SSergey Zigachev /* vram base address for page table entry */ 245*b843c749SSergey Zigachev u64 vram_base_offset; 246*b843c749SSergey Zigachev /* vm pte handling */ 247*b843c749SSergey Zigachev const struct amdgpu_vm_pte_funcs *vm_pte_funcs; 248*b843c749SSergey Zigachev struct amdgpu_ring *vm_pte_rings[AMDGPU_MAX_RINGS]; 249*b843c749SSergey Zigachev unsigned vm_pte_num_rings; 250*b843c749SSergey Zigachev atomic_t vm_pte_next_ring; 251*b843c749SSergey Zigachev 252*b843c749SSergey Zigachev /* partial resident texture handling */ 253*b843c749SSergey Zigachev spinlock_t prt_lock; 254*b843c749SSergey Zigachev atomic_t num_prt_users; 255*b843c749SSergey Zigachev 256*b843c749SSergey Zigachev /* controls how VM page tables are updated for Graphics and Compute. 257*b843c749SSergey Zigachev * BIT0[= 0] Graphics updated by SDMA [= 1] by CPU 258*b843c749SSergey Zigachev * BIT1[= 0] Compute updated by SDMA [= 1] by CPU 259*b843c749SSergey Zigachev */ 260*b843c749SSergey Zigachev int vm_update_mode; 261*b843c749SSergey Zigachev 262*b843c749SSergey Zigachev /* PASID to VM mapping, will be used in interrupt context to 263*b843c749SSergey Zigachev * look up VM of a page fault 264*b843c749SSergey Zigachev */ 265*b843c749SSergey Zigachev struct idr pasid_idr; 266*b843c749SSergey Zigachev spinlock_t pasid_lock; 267*b843c749SSergey Zigachev }; 268*b843c749SSergey Zigachev 269*b843c749SSergey Zigachev void amdgpu_vm_manager_init(struct amdgpu_device *adev); 270*b843c749SSergey Zigachev void amdgpu_vm_manager_fini(struct amdgpu_device *adev); 271*b843c749SSergey Zigachev int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, 272*b843c749SSergey Zigachev int vm_context, unsigned int pasid); 273*b843c749SSergey Zigachev int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm); 274*b843c749SSergey Zigachev void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm); 275*b843c749SSergey Zigachev bool amdgpu_vm_pasid_fault_credit(struct amdgpu_device *adev, 276*b843c749SSergey Zigachev unsigned int pasid); 277*b843c749SSergey Zigachev void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, 278*b843c749SSergey Zigachev struct list_head *validated, 279*b843c749SSergey Zigachev struct amdgpu_bo_list_entry *entry); 280*b843c749SSergey Zigachev bool amdgpu_vm_ready(struct amdgpu_vm *vm); 281*b843c749SSergey Zigachev int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, 282*b843c749SSergey Zigachev int (*callback)(void *p, struct amdgpu_bo *bo), 283*b843c749SSergey Zigachev void *param); 284*b843c749SSergey Zigachev int amdgpu_vm_alloc_pts(struct amdgpu_device *adev, 285*b843c749SSergey Zigachev struct amdgpu_vm *vm, 286*b843c749SSergey Zigachev uint64_t saddr, uint64_t size); 287*b843c749SSergey Zigachev int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync); 288*b843c749SSergey Zigachev int amdgpu_vm_update_directories(struct amdgpu_device *adev, 289*b843c749SSergey Zigachev struct amdgpu_vm *vm); 290*b843c749SSergey Zigachev int amdgpu_vm_clear_freed(struct amdgpu_device *adev, 291*b843c749SSergey Zigachev struct amdgpu_vm *vm, 292*b843c749SSergey Zigachev struct dma_fence **fence); 293*b843c749SSergey Zigachev int amdgpu_vm_handle_moved(struct amdgpu_device *adev, 294*b843c749SSergey Zigachev struct amdgpu_vm *vm); 295*b843c749SSergey Zigachev int amdgpu_vm_bo_update(struct amdgpu_device *adev, 296*b843c749SSergey Zigachev struct amdgpu_bo_va *bo_va, 297*b843c749SSergey Zigachev bool clear); 298*b843c749SSergey Zigachev void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, 299*b843c749SSergey Zigachev struct amdgpu_bo *bo, bool evicted); 300*b843c749SSergey Zigachev struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, 301*b843c749SSergey Zigachev struct amdgpu_bo *bo); 302*b843c749SSergey Zigachev struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev, 303*b843c749SSergey Zigachev struct amdgpu_vm *vm, 304*b843c749SSergey Zigachev struct amdgpu_bo *bo); 305*b843c749SSergey Zigachev int amdgpu_vm_bo_map(struct amdgpu_device *adev, 306*b843c749SSergey Zigachev struct amdgpu_bo_va *bo_va, 307*b843c749SSergey Zigachev uint64_t addr, uint64_t offset, 308*b843c749SSergey Zigachev uint64_t size, uint64_t flags); 309*b843c749SSergey Zigachev int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev, 310*b843c749SSergey Zigachev struct amdgpu_bo_va *bo_va, 311*b843c749SSergey Zigachev uint64_t addr, uint64_t offset, 312*b843c749SSergey Zigachev uint64_t size, uint64_t flags); 313*b843c749SSergey Zigachev int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, 314*b843c749SSergey Zigachev struct amdgpu_bo_va *bo_va, 315*b843c749SSergey Zigachev uint64_t addr); 316*b843c749SSergey Zigachev int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev, 317*b843c749SSergey Zigachev struct amdgpu_vm *vm, 318*b843c749SSergey Zigachev uint64_t saddr, uint64_t size); 319*b843c749SSergey Zigachev struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm, 320*b843c749SSergey Zigachev uint64_t addr); 321*b843c749SSergey Zigachev void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket); 322*b843c749SSergey Zigachev void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, 323*b843c749SSergey Zigachev struct amdgpu_bo_va *bo_va); 324*b843c749SSergey Zigachev void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size, 325*b843c749SSergey Zigachev uint32_t fragment_size_default, unsigned max_level, 326*b843c749SSergey Zigachev unsigned max_bits); 327*b843c749SSergey Zigachev int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); 328*b843c749SSergey Zigachev bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring, 329*b843c749SSergey Zigachev struct amdgpu_job *job); 330*b843c749SSergey Zigachev void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev); 331*b843c749SSergey Zigachev 332*b843c749SSergey Zigachev void amdgpu_vm_get_task_info(struct amdgpu_device *adev, unsigned int pasid, 333*b843c749SSergey Zigachev struct amdgpu_task_info *task_info); 334*b843c749SSergey Zigachev 335*b843c749SSergey Zigachev void amdgpu_vm_set_task_info(struct amdgpu_vm *vm); 336*b843c749SSergey Zigachev 337*b843c749SSergey Zigachev #endif 338