1 /* $NetBSD: amdgpu_object.h,v 1.3 2018/08/27 14:04:50 riastradh Exp $ */ 2 3 /* 4 * Copyright 2008 Advanced Micro Devices, Inc. 5 * Copyright 2008 Red Hat Inc. 6 * Copyright 2009 Jerome Glisse. 7 * 8 * Permission is hereby granted, free of charge, to any person obtaining a 9 * copy of this software and associated documentation files (the "Software"), 10 * to deal in the Software without restriction, including without limitation 11 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 12 * and/or sell copies of the Software, and to permit persons to whom the 13 * Software is furnished to do so, subject to the following conditions: 14 * 15 * The above copyright notice and this permission notice shall be included in 16 * all copies or substantial portions of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 22 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 23 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 24 * OTHER DEALINGS IN THE SOFTWARE. 25 * 26 * Authors: Dave Airlie 27 * Alex Deucher 28 * Jerome Glisse 29 */ 30 #ifndef __AMDGPU_OBJECT_H__ 31 #define __AMDGPU_OBJECT_H__ 32 33 #include <linux/device.h> 34 #include <drm/amdgpu_drm.h> 35 #include "amdgpu.h" 36 37 /** 38 * amdgpu_mem_type_to_domain - return domain corresponding to mem_type 39 * @mem_type: ttm memory type 40 * 41 * Returns corresponding domain of the ttm mem_type 42 */ 43 static inline unsigned amdgpu_mem_type_to_domain(u32 mem_type) 44 { 45 switch (mem_type) { 46 case TTM_PL_VRAM: 47 return AMDGPU_GEM_DOMAIN_VRAM; 48 case TTM_PL_TT: 49 return AMDGPU_GEM_DOMAIN_GTT; 50 case TTM_PL_SYSTEM: 51 return AMDGPU_GEM_DOMAIN_CPU; 52 case AMDGPU_PL_GDS: 53 return AMDGPU_GEM_DOMAIN_GDS; 54 case AMDGPU_PL_GWS: 55 return AMDGPU_GEM_DOMAIN_GWS; 56 case AMDGPU_PL_OA: 57 return AMDGPU_GEM_DOMAIN_OA; 58 default: 59 break; 60 } 61 return 0; 62 } 63 64 /** 65 * amdgpu_bo_reserve - reserve bo 66 * @bo: bo structure 67 * @no_intr: don't return -ERESTARTSYS on pending signal 68 * 69 * Returns: 70 * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by 71 * a signal. Release all buffer reservations and return to user-space. 72 */ 73 static inline int amdgpu_bo_reserve(struct amdgpu_bo *bo, bool no_intr) 74 { 75 int r; 76 77 r = ttm_bo_reserve(&bo->tbo, !no_intr, false, false, 0); 78 if (unlikely(r != 0)) { 79 if (r != -ERESTARTSYS) 80 dev_err(bo->adev->dev, "%p reserve failed\n", bo); 81 return r; 82 } 83 return 0; 84 } 85 86 static inline void amdgpu_bo_unreserve(struct amdgpu_bo *bo) 87 { 88 ttm_bo_unreserve(&bo->tbo); 89 } 90 91 /** 92 * amdgpu_bo_gpu_offset - return GPU offset of bo 93 * @bo: amdgpu object for which we query the offset 94 * 95 * Returns current GPU offset of the object. 96 * 97 * Note: object should either be pinned or reserved when calling this 98 * function, it might be useful to add check for this for debugging. 99 */ 100 static inline u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo) 101 { 102 return bo->tbo.offset; 103 } 104 105 static inline unsigned long amdgpu_bo_size(struct amdgpu_bo *bo) 106 { 107 return bo->tbo.num_pages << PAGE_SHIFT; 108 } 109 110 static inline unsigned amdgpu_bo_ngpu_pages(struct amdgpu_bo *bo) 111 { 112 return (bo->tbo.num_pages << PAGE_SHIFT) / AMDGPU_GPU_PAGE_SIZE; 113 } 114 115 static inline unsigned amdgpu_bo_gpu_page_alignment(struct amdgpu_bo *bo) 116 { 117 return (bo->tbo.mem.page_alignment << PAGE_SHIFT) / AMDGPU_GPU_PAGE_SIZE; 118 } 119 120 /** 121 * amdgpu_bo_mmap_offset - return mmap offset of bo 122 * @bo: amdgpu object for which we query the offset 123 * 124 * Returns mmap offset of the object. 125 */ 126 static inline u64 amdgpu_bo_mmap_offset(struct amdgpu_bo *bo) 127 { 128 return drm_vma_node_offset_addr(&bo->tbo.vma_node); 129 } 130 131 int amdgpu_bo_create(struct amdgpu_device *adev, 132 unsigned long size, int byte_align, 133 bool kernel, u32 domain, u64 flags, 134 struct sg_table *sg, 135 struct reservation_object *resv, 136 struct amdgpu_bo **bo_ptr); 137 int amdgpu_bo_create_restricted(struct amdgpu_device *adev, 138 unsigned long size, int byte_align, 139 bool kernel, u32 domain, u64 flags, 140 struct sg_table *sg, 141 struct ttm_placement *placement, 142 struct reservation_object *resv, 143 struct amdgpu_bo **bo_ptr); 144 int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr); 145 void amdgpu_bo_kunmap(struct amdgpu_bo *bo); 146 struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo); 147 void amdgpu_bo_unref(struct amdgpu_bo **bo); 148 int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain, u64 *gpu_addr); 149 int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, 150 u64 min_offset, u64 max_offset, 151 u64 *gpu_addr); 152 int amdgpu_bo_unpin(struct amdgpu_bo *bo); 153 int amdgpu_bo_evict_vram(struct amdgpu_device *adev); 154 void amdgpu_bo_force_delete(struct amdgpu_device *adev); 155 int amdgpu_bo_init(struct amdgpu_device *adev); 156 void amdgpu_bo_fini(struct amdgpu_device *adev); 157 #ifndef __NetBSD__ 158 int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo, 159 struct vm_area_struct *vma); 160 #endif 161 int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags); 162 void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags); 163 int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata, 164 uint32_t metadata_size, uint64_t flags); 165 int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer, 166 size_t buffer_size, uint32_t *metadata_size, 167 uint64_t *flags); 168 void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, 169 struct ttm_mem_reg *new_mem); 170 int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo); 171 void amdgpu_bo_fence(struct amdgpu_bo *bo, struct fence *fence, 172 bool shared); 173 174 /* 175 * sub allocation 176 */ 177 178 static inline uint64_t amdgpu_sa_bo_gpu_addr(struct amdgpu_sa_bo *sa_bo) 179 { 180 return sa_bo->manager->gpu_addr + sa_bo->soffset; 181 } 182 183 static inline void * amdgpu_sa_bo_cpu_addr(struct amdgpu_sa_bo *sa_bo) 184 { 185 return (char *)sa_bo->manager->cpu_ptr + sa_bo->soffset; 186 } 187 188 int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev, 189 struct amdgpu_sa_manager *sa_manager, 190 unsigned size, u32 align, u32 domain); 191 void amdgpu_sa_bo_manager_fini(struct amdgpu_device *adev, 192 struct amdgpu_sa_manager *sa_manager); 193 int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev, 194 struct amdgpu_sa_manager *sa_manager); 195 int amdgpu_sa_bo_manager_suspend(struct amdgpu_device *adev, 196 struct amdgpu_sa_manager *sa_manager); 197 int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager, 198 struct amdgpu_sa_bo **sa_bo, 199 unsigned size, unsigned align); 200 void amdgpu_sa_bo_free(struct amdgpu_device *adev, 201 struct amdgpu_sa_bo **sa_bo, 202 struct fence *fence); 203 #if defined(CONFIG_DEBUG_FS) 204 void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager, 205 struct seq_file *m); 206 #endif 207 208 209 #endif 210