1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 */ 28 #ifndef __AMDGPU_OBJECT_H__ 29 #define __AMDGPU_OBJECT_H__ 30 31 #include <drm/amdgpu_drm.h> 32 #include "amdgpu.h" 33 34 #define AMDGPU_BO_INVALID_OFFSET LONG_MAX 35 #define AMDGPU_BO_MAX_PLACEMENTS 3 36 37 struct amdgpu_bo_param { 38 unsigned long size; 39 int byte_align; 40 u32 domain; 41 u32 preferred_domain; 42 u64 flags; 43 enum ttm_bo_type type; 44 struct reservation_object *resv; 45 }; 46 47 /* bo virtual addresses in a vm */ 48 struct amdgpu_bo_va_mapping { 49 struct amdgpu_bo_va *bo_va; 50 struct list_head list; 51 struct rb_node rb; 52 uint64_t start; 53 uint64_t last; 54 uint64_t __subtree_last; 55 uint64_t offset; 56 uint64_t flags; 57 }; 58 59 /* User space allocated BO in a VM */ 60 struct amdgpu_bo_va { 61 struct amdgpu_vm_bo_base base; 62 63 /* protected by bo being reserved */ 64 unsigned ref_count; 65 66 /* all other members protected by the VM PD being reserved */ 67 struct dma_fence *last_pt_update; 68 69 /* mappings for this bo_va */ 70 struct list_head invalids; 71 struct list_head valids; 72 73 /* If the mappings are cleared or filled */ 74 bool cleared; 75 }; 76 77 struct amdgpu_bo { 78 /* Protected by tbo.reserved */ 79 u32 preferred_domains; 80 u32 allowed_domains; 81 struct ttm_place placements[AMDGPU_BO_MAX_PLACEMENTS]; 82 struct ttm_placement placement; 83 struct ttm_buffer_object tbo; 84 struct ttm_bo_kmap_obj kmap; 85 u64 flags; 86 unsigned pin_count; 87 u64 tiling_flags; 88 u64 metadata_flags; 89 void *metadata; 90 u32 metadata_size; 91 unsigned prime_shared_count; 92 /* list of all virtual address to which this bo is associated to */ 93 struct list_head va; 94 /* Constant after initialization */ 95 struct amdgpu_device *adev; 96 struct drm_gem_object gem_base; 97 struct amdgpu_bo *parent; 98 struct amdgpu_bo *shadow; 99 100 struct ttm_bo_kmap_obj dma_buf_vmap; 101 struct amdgpu_mn *mn; 102 103 union { 104 struct list_head mn_list; 105 struct list_head shadow_list; 106 }; 107 108 struct kgd_mem *kfd_bo; 109 }; 110 111 static inline struct amdgpu_bo *ttm_to_amdgpu_bo(struct ttm_buffer_object *tbo) 112 { 113 return container_of(tbo, struct amdgpu_bo, tbo); 114 } 115 116 /** 117 * amdgpu_mem_type_to_domain - return domain corresponding to mem_type 118 * @mem_type: ttm memory type 119 * 120 * Returns corresponding domain of the ttm mem_type 121 */ 122 static inline unsigned amdgpu_mem_type_to_domain(u32 mem_type) 123 { 124 switch (mem_type) { 125 case TTM_PL_VRAM: 126 return AMDGPU_GEM_DOMAIN_VRAM; 127 case TTM_PL_TT: 128 return AMDGPU_GEM_DOMAIN_GTT; 129 case TTM_PL_SYSTEM: 130 return AMDGPU_GEM_DOMAIN_CPU; 131 case AMDGPU_PL_GDS: 132 return AMDGPU_GEM_DOMAIN_GDS; 133 case AMDGPU_PL_GWS: 134 return AMDGPU_GEM_DOMAIN_GWS; 135 case AMDGPU_PL_OA: 136 return AMDGPU_GEM_DOMAIN_OA; 137 default: 138 break; 139 } 140 return 0; 141 } 142 143 /** 144 * amdgpu_bo_reserve - reserve bo 145 * @bo: bo structure 146 * @no_intr: don't return -ERESTARTSYS on pending signal 147 * 148 * Returns: 149 * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by 150 * a signal. Release all buffer reservations and return to user-space. 151 */ 152 static inline int amdgpu_bo_reserve(struct amdgpu_bo *bo, bool no_intr) 153 { 154 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 155 int r; 156 157 r = ttm_bo_reserve(&bo->tbo, !no_intr, false, NULL); 158 if (unlikely(r != 0)) { 159 if (r != -ERESTARTSYS) 160 dev_err(adev->dev, "%p reserve failed\n", bo); 161 return r; 162 } 163 return 0; 164 } 165 166 static inline void amdgpu_bo_unreserve(struct amdgpu_bo *bo) 167 { 168 ttm_bo_unreserve(&bo->tbo); 169 } 170 171 static inline unsigned long amdgpu_bo_size(struct amdgpu_bo *bo) 172 { 173 return bo->tbo.num_pages << PAGE_SHIFT; 174 } 175 176 static inline unsigned amdgpu_bo_ngpu_pages(struct amdgpu_bo *bo) 177 { 178 return (bo->tbo.num_pages << PAGE_SHIFT) / AMDGPU_GPU_PAGE_SIZE; 179 } 180 181 static inline unsigned amdgpu_bo_gpu_page_alignment(struct amdgpu_bo *bo) 182 { 183 return (bo->tbo.mem.page_alignment << PAGE_SHIFT) / AMDGPU_GPU_PAGE_SIZE; 184 } 185 186 /** 187 * amdgpu_bo_mmap_offset - return mmap offset of bo 188 * @bo: amdgpu object for which we query the offset 189 * 190 * Returns mmap offset of the object. 191 */ 192 static inline u64 amdgpu_bo_mmap_offset(struct amdgpu_bo *bo) 193 { 194 return drm_vma_node_offset_addr(&bo->tbo.vma_node); 195 } 196 197 /** 198 * amdgpu_bo_gpu_accessible - return whether the bo is currently in memory that 199 * is accessible to the GPU. 200 */ 201 static inline bool amdgpu_bo_gpu_accessible(struct amdgpu_bo *bo) 202 { 203 switch (bo->tbo.mem.mem_type) { 204 case TTM_PL_TT: return amdgpu_gtt_mgr_has_gart_addr(&bo->tbo.mem); 205 case TTM_PL_VRAM: return true; 206 default: return false; 207 } 208 } 209 210 /** 211 * amdgpu_bo_in_cpu_visible_vram - check if BO is (partly) in visible VRAM 212 */ 213 static inline bool amdgpu_bo_in_cpu_visible_vram(struct amdgpu_bo *bo) 214 { 215 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 216 unsigned fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT; 217 struct drm_mm_node *node = bo->tbo.mem.mm_node; 218 unsigned long pages_left; 219 220 if (bo->tbo.mem.mem_type != TTM_PL_VRAM) 221 return false; 222 223 for (pages_left = bo->tbo.mem.num_pages; pages_left; 224 pages_left -= node->size, node++) 225 if (node->start < fpfn) 226 return true; 227 228 return false; 229 } 230 231 /** 232 * amdgpu_bo_explicit_sync - return whether the bo is explicitly synced 233 */ 234 static inline bool amdgpu_bo_explicit_sync(struct amdgpu_bo *bo) 235 { 236 return bo->flags & AMDGPU_GEM_CREATE_EXPLICIT_SYNC; 237 } 238 239 bool amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object *bo); 240 void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain); 241 242 int amdgpu_bo_create(struct amdgpu_device *adev, 243 struct amdgpu_bo_param *bp, 244 struct amdgpu_bo **bo_ptr); 245 int amdgpu_bo_create_reserved(struct amdgpu_device *adev, 246 unsigned long size, int align, 247 u32 domain, struct amdgpu_bo **bo_ptr, 248 u64 *gpu_addr, void **cpu_addr); 249 int amdgpu_bo_create_kernel(struct amdgpu_device *adev, 250 unsigned long size, int align, 251 u32 domain, struct amdgpu_bo **bo_ptr, 252 u64 *gpu_addr, void **cpu_addr); 253 void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr, 254 void **cpu_addr); 255 int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr); 256 void *amdgpu_bo_kptr(struct amdgpu_bo *bo); 257 void amdgpu_bo_kunmap(struct amdgpu_bo *bo); 258 struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo); 259 void amdgpu_bo_unref(struct amdgpu_bo **bo); 260 int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain); 261 int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, 262 u64 min_offset, u64 max_offset); 263 int amdgpu_bo_unpin(struct amdgpu_bo *bo); 264 int amdgpu_bo_evict_vram(struct amdgpu_device *adev); 265 int amdgpu_bo_init(struct amdgpu_device *adev); 266 int amdgpu_bo_late_init(struct amdgpu_device *adev); 267 void amdgpu_bo_fini(struct amdgpu_device *adev); 268 #ifdef notyet 269 int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo, 270 struct vm_area_struct *vma); 271 #endif 272 int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags); 273 void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags); 274 int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata, 275 uint32_t metadata_size, uint64_t flags); 276 int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer, 277 size_t buffer_size, uint32_t *metadata_size, 278 uint64_t *flags); 279 void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, 280 bool evict, 281 struct ttm_mem_reg *new_mem); 282 int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo); 283 void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence, 284 bool shared); 285 u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo); 286 int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev, 287 struct amdgpu_ring *ring, 288 struct amdgpu_bo *bo, 289 struct reservation_object *resv, 290 struct dma_fence **fence, bool direct); 291 int amdgpu_bo_validate(struct amdgpu_bo *bo); 292 int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev, 293 struct amdgpu_ring *ring, 294 struct amdgpu_bo *bo, 295 struct reservation_object *resv, 296 struct dma_fence **fence, 297 bool direct); 298 uint32_t amdgpu_bo_get_preferred_pin_domain(struct amdgpu_device *adev, 299 uint32_t domain); 300 301 /* 302 * sub allocation 303 */ 304 305 static inline uint64_t amdgpu_sa_bo_gpu_addr(struct amdgpu_sa_bo *sa_bo) 306 { 307 return sa_bo->manager->gpu_addr + sa_bo->soffset; 308 } 309 310 static inline void * amdgpu_sa_bo_cpu_addr(struct amdgpu_sa_bo *sa_bo) 311 { 312 return sa_bo->manager->cpu_ptr + sa_bo->soffset; 313 } 314 315 int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev, 316 struct amdgpu_sa_manager *sa_manager, 317 unsigned size, u32 align, u32 domain); 318 void amdgpu_sa_bo_manager_fini(struct amdgpu_device *adev, 319 struct amdgpu_sa_manager *sa_manager); 320 int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev, 321 struct amdgpu_sa_manager *sa_manager); 322 int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager, 323 struct amdgpu_sa_bo **sa_bo, 324 unsigned size, unsigned align); 325 void amdgpu_sa_bo_free(struct amdgpu_device *adev, 326 struct amdgpu_sa_bo **sa_bo, 327 struct dma_fence *fence); 328 #if defined(CONFIG_DEBUG_FS) 329 void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager, 330 struct seq_file *m); 331 #endif 332 333 334 #endif 335