1 /* $NetBSD: amdgpu_ttm.h,v 1.5 2021/12/19 12:21:29 riastradh Exp $ */ 2 3 /* 4 * Copyright 2016 Advanced Micro Devices, Inc. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 */ 25 26 #ifndef __AMDGPU_TTM_H__ 27 #define __AMDGPU_TTM_H__ 28 29 #include "amdgpu.h" 30 #include <drm/gpu_scheduler.h> 31 32 #define AMDGPU_PL_GDS (TTM_PL_PRIV + 0) 33 #define AMDGPU_PL_GWS (TTM_PL_PRIV + 1) 34 #define AMDGPU_PL_OA (TTM_PL_PRIV + 2) 35 36 #define AMDGPU_PL_FLAG_GDS (TTM_PL_FLAG_PRIV << 0) 37 #define AMDGPU_PL_FLAG_GWS (TTM_PL_FLAG_PRIV << 1) 38 #define AMDGPU_PL_FLAG_OA (TTM_PL_FLAG_PRIV << 2) 39 40 #define AMDGPU_GTT_MAX_TRANSFER_SIZE 512 41 #define AMDGPU_GTT_NUM_TRANSFER_WINDOWS 2 42 43 #define AMDGPU_POISON 0xd0bed0be 44 45 #ifdef __NetBSD__ 46 # define __amdgpu_aperture_iomem 47 # define __iomem __amdgpu_aperture_iomem 48 #endif 49 50 struct amdgpu_mman { 51 struct ttm_bo_device bdev; 52 bool mem_global_referenced; 53 bool initialized; 54 #ifdef __NetBSD__ 55 bus_space_handle_t aper_base_handle; 56 void *aper_base_kaddr; 57 #else 58 void __iomem *aper_base_kaddr; 59 #endif 60 61 #if defined(CONFIG_DEBUG_FS) 62 struct dentry *debugfs_entries[8]; 63 #endif 64 65 /* buffer handling */ 66 const struct amdgpu_buffer_funcs *buffer_funcs; 67 struct amdgpu_ring *buffer_funcs_ring; 68 bool buffer_funcs_enabled; 69 70 struct mutex gtt_window_lock; 71 /* Scheduler entity for buffer moves */ 72 struct drm_sched_entity entity; 73 }; 74 75 #ifdef __NetBSD__ 76 # undef __iomem 77 #endif 78 79 struct amdgpu_copy_mem { 80 struct ttm_buffer_object *bo; 81 struct ttm_mem_reg *mem; 82 unsigned long offset; 83 }; 84 85 extern const struct ttm_mem_type_manager_func amdgpu_gtt_mgr_func; 86 extern const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func; 87 88 bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_mem_reg *mem); 89 uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man); 90 int amdgpu_gtt_mgr_recover(struct ttm_mem_type_manager *man); 91 92 u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo); 93 uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man); 94 uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager *man); 95 96 int amdgpu_ttm_init(struct amdgpu_device *adev); 97 void amdgpu_ttm_late_init(struct amdgpu_device *adev); 98 void amdgpu_ttm_fini(struct amdgpu_device *adev); 99 void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, 100 bool enable); 101 102 int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset, 103 uint64_t dst_offset, uint32_t byte_count, 104 struct dma_resv *resv, 105 struct dma_fence **fence, bool direct_submit, 106 bool vm_needs_flush); 107 int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev, 108 struct amdgpu_copy_mem *src, 109 struct amdgpu_copy_mem *dst, 110 uint64_t size, 111 struct dma_resv *resv, 112 struct dma_fence **f); 113 int amdgpu_fill_buffer(struct amdgpu_bo *bo, 114 uint32_t src_data, 115 struct dma_resv *resv, 116 struct dma_fence **fence); 117 118 #ifdef __NetBSD__ 119 int amdgpu_mmap_object(struct drm_device *, off_t, size_t, vm_prot_t, 120 struct uvm_object **, voff_t *, struct file *); 121 #else 122 int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma); 123 #endif 124 int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo); 125 int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo); 126 127 #if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR) 128 int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages); 129 bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm); 130 #else 131 static inline int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, 132 struct page **pages) 133 { 134 return -EPERM; 135 } 136 static inline bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm) 137 { 138 return false; 139 } 140 #endif 141 142 void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages); 143 int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr, 144 uint32_t flags); 145 bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm); 146 #ifdef __NetBSD__ 147 struct vmspace *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm); 148 #else 149 struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm); 150 #endif 151 bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start, 152 unsigned long end); 153 bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm, 154 int *last_invalidated); 155 bool amdgpu_ttm_tt_is_userptr(struct ttm_tt *ttm); 156 bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm); 157 uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_mem_reg *mem); 158 uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm, 159 struct ttm_mem_reg *mem); 160 161 #endif 162