/netbsd-src/sys/external/bsd/drm2/dist/drm/qxl/ |
H A D | qxl_object.c | 35 static void qxl_ttm_bo_destroy(struct ttm_buffer_object *tbo) in qxl_ttm_bo_destroy() argument 40 bo = to_qxl_bo(tbo); in qxl_ttm_bo_destroy() 41 qdev = (struct qxl_device *)bo->tbo.base.dev->dev_private; in qxl_ttm_bo_destroy() 48 drm_gem_object_release(&bo->tbo.base); in qxl_ttm_bo_destroy() 67 if (qbo->tbo.base.size <= PAGE_SIZE) in qxl_ttm_placement_from_domain() 121 r = drm_gem_object_init(&qdev->ddev, &bo->tbo.base, size); in qxl_bo_create() 126 bo->tbo.base.funcs = &qxl_object_funcs; in qxl_bo_create() 137 r = ttm_bo_init(&qdev->mman.bdev, &bo->tbo, size, type, in qxl_bo_create() 162 r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap); in qxl_bo_kmap() 179 if (bo->tbo.mem.mem_type == TTM_PL_VRAM) in qxl_bo_kmap_atomic_page() [all …]
|
H A D | qxl_object.h | 36 r = ttm_bo_reserve(&bo->tbo, true, no_wait, NULL); in qxl_bo_reserve() 39 struct drm_device *ddev = bo->tbo.base.dev; in qxl_bo_reserve() 50 ttm_bo_unreserve(&bo->tbo); in qxl_bo_unreserve() 55 return bo->tbo.offset; in qxl_bo_gpu_offset() 60 return bo->tbo.num_pages << PAGE_SHIFT; in qxl_bo_size() 65 return drm_vma_node_offset_addr(&bo->tbo.base.vma_node); in qxl_bo_mmap_offset() 73 r = ttm_bo_reserve(&bo->tbo, true, no_wait, NULL); in qxl_bo_wait() 76 struct drm_device *ddev = bo->tbo.base.dev; in qxl_bo_wait() 84 *mem_type = bo->tbo.mem.mem_type; in qxl_bo_wait() 86 r = ttm_bo_wait(&bo->tbo, true, no_wait); in qxl_bo_wait() [all …]
|
H A D | qxl_gem.c | 40 struct ttm_buffer_object *tbo; in qxl_gem_object_free() local 46 tbo = &qobj->tbo; in qxl_gem_object_free() 47 ttm_bo_put(tbo); in qxl_gem_object_free() 71 *obj = &qbo->tbo.base; in qxl_gem_object_create()
|
H A D | qxl_drv.h | 77 struct ttm_buffer_object tbo; member 99 #define gem_to_qxl_bo(gobj) container_of((gobj), struct qxl_bo, tbo.base) 100 #define to_qxl_bo(tobj) container_of((tobj), struct qxl_bo, tbo) 313 (bo->tbo.mem.mem_type == TTM_PL_VRAM) in qxl_bo_physical_address() 316 WARN_ON_ONCE((bo->tbo.offset & slot->gpu_offset) != slot->gpu_offset); in qxl_bo_physical_address() 319 return slot->high_bits | (bo->tbo.offset - slot->gpu_offset + offset); in qxl_bo_physical_address()
|
H A D | qxl_release.c | 219 if (entry->tv.bo == &bo->tbo) in qxl_release_list_add() 228 entry->tv.bo = &bo->tbo; in qxl_release_list_add() 241 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); in qxl_release_validate_bo() 246 ret = dma_resv_reserve_shared(bo->tbo.base.resv, 1); in qxl_release_validate_bo() 251 ret = qxl_bo_check_id(bo->tbo.base.dev->dev_private, bo); in qxl_release_validate_bo()
|
/netbsd-src/sys/external/bsd/drm2/dist/drm/amd/amdgpu/ |
H A D | amdgpu_object.c | 71 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); in amdgpu_bo_subtract_pin_size() 73 if (bo->tbo.mem.mem_type == TTM_PL_VRAM) { in amdgpu_bo_subtract_pin_size() 77 } else if (bo->tbo.mem.mem_type == TTM_PL_TT) { in amdgpu_bo_subtract_pin_size() 82 static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo) in amdgpu_bo_destroy() argument 84 struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev); in amdgpu_bo_destroy() 85 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo); in amdgpu_bo_destroy() 92 if (bo->tbo.base.import_attach) in amdgpu_bo_destroy() 93 drm_prime_gem_destroy(&bo->tbo.base, bo->tbo.sg); in amdgpu_bo_destroy() 94 drm_gem_object_release(&bo->tbo.base); in amdgpu_bo_destroy() 134 struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev); in amdgpu_bo_placement_from_domain() [all …]
|
H A D | amdgpu_object.h | 91 struct ttm_buffer_object tbo; member 119 static inline struct amdgpu_bo *ttm_to_amdgpu_bo(struct ttm_buffer_object *tbo) in ttm_to_amdgpu_bo() argument 121 return container_of(tbo, struct amdgpu_bo, tbo); in ttm_to_amdgpu_bo() 162 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); in amdgpu_bo_reserve() 165 r = __ttm_bo_reserve(&bo->tbo, !no_intr, false, NULL); in amdgpu_bo_reserve() 176 ttm_bo_unreserve(&bo->tbo); in amdgpu_bo_unreserve() 181 return bo->tbo.num_pages << PAGE_SHIFT; in amdgpu_bo_size() 186 return (bo->tbo.num_pages << PAGE_SHIFT) / AMDGPU_GPU_PAGE_SIZE; in amdgpu_bo_ngpu_pages() 191 return (bo->tbo.mem.page_alignment << PAGE_SHIFT) / AMDGPU_GPU_PAGE_SIZE; in amdgpu_bo_gpu_page_alignment() 202 return drm_vma_node_offset_addr(&bo->tbo.base.vma_node); in amdgpu_bo_mmap_offset() [all …]
|
H A D | amdgpu_dma_buf.c | 61 ret = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, in amdgpu_gem_prime_vmap() 108 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); in amdgpu_gem_prime_mmap() 126 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) || in amdgpu_gem_prime_mmap() 199 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); in amdgpu_dma_buf_attach() 221 r = __dma_resv_make_exclusive(bo->tbo.base.resv); in amdgpu_dma_buf_attach() 243 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); in amdgpu_dma_buf_detach() 280 sgt = drm_prime_pages_to_sg(bo->tbo.ttm->pages, bo->tbo.num_pages); in amdgpu_dma_buf_map() 335 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); in amdgpu_dma_buf_begin_cpu_access() 352 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); in amdgpu_dma_buf_begin_cpu_access() 388 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) || in amdgpu_gem_prime_export() [all …]
|
H A D | amdgpu_gem.c | 95 *obj = &bo->tbo.base; in amdgpu_gem_object_create() 132 struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev); in amdgpu_gem_object_open() 143 mm = amdgpu_ttm_tt_get_usermm(abo->tbo.ttm); in amdgpu_gem_object_open() 152 abo->tbo.base.resv != vm->root.base.bo->tbo.base.resv) in amdgpu_gem_object_open() 173 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); in amdgpu_gem_object_close() 187 tv.bo = &bo->tbo; in amdgpu_gem_object_close() 270 resv = vm->root.base.bo->tbo.base.resv; in amdgpu_gem_create_ioctl() 336 r = amdgpu_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags); in amdgpu_gem_userptr_ioctl() 347 r = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages); in amdgpu_gem_userptr_ioctl() 356 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); in amdgpu_gem_userptr_ioctl() [all …]
|
H A D | amdgpu_amdkfd_gpuvm.c | 206 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); in amdgpu_amdkfd_unreserve_memory_limit() 231 struct dma_resv *resv = bo->tbo.base.resv; in amdgpu_amdkfd_remove_eviction_fence() 290 if (WARN(amdgpu_ttm_tt_get_usermm(bo->tbo.ttm), in amdgpu_amdkfd_bo_validate() 296 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); in amdgpu_amdkfd_bo_validate() 323 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev); in vm_validate_pt_pd_bos() 359 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev); in vm_update_pds() 371 struct amdgpu_device *bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev); in get_pte_flags() 423 unsigned long bo_size = bo->tbo.mem.size; in add_bo_to_vm() 494 entry->bo = &bo->tbo; in add_kgd_mem_to_kfd_bo_list() 535 ret = amdgpu_ttm_tt_set_userptr(bo->tbo.ttm, user_addr, 0); in init_user_pages() [all …]
|
H A D | amdgpu_gtt_mgr.c | 40 struct ttm_buffer_object *tbo; member 187 struct ttm_buffer_object *tbo, in amdgpu_gtt_mgr_alloc() argument 238 struct ttm_buffer_object *tbo, in amdgpu_gtt_mgr_new() argument 247 if ((&tbo->mem == mem || tbo->mem.mem_type != TTM_PL_TT) && in amdgpu_gtt_mgr_new() 263 node->tbo = tbo; in amdgpu_gtt_mgr_new() 267 r = amdgpu_gtt_mgr_alloc(man, tbo, place, mem); in amdgpu_gtt_mgr_new() 339 r = amdgpu_ttm_recover_gart(node->tbo); in amdgpu_gtt_mgr_recover()
|
H A D | amdgpu_vm.c | 239 if (bo->tbo.type == ttm_bo_type_kernel) in amdgpu_vm_bo_evicted() 338 if (bo->tbo.base.resv != vm->root.base.bo->tbo.base.resv) in amdgpu_vm_bo_base_init() 342 if (bo->tbo.type == ttm_bo_type_kernel && bo->parent) in amdgpu_vm_bo_base_init() 348 amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type)) in amdgpu_vm_bo_base_init() 596 entry->tv.bo = &vm->root.base.bo->tbo; in amdgpu_vm_get_pd_bo() 628 if (abo->tbo.base.resv == vm->root.base.bo->tbo.base.resv) in amdgpu_vm_del_from_lru_notify() 663 ttm_bo_move_to_lru_tail(&bo->tbo, &vm->lru_bulk_move); in amdgpu_vm_move_to_lru_tail() 665 ttm_bo_move_to_lru_tail(&bo->shadow->tbo, in amdgpu_vm_move_to_lru_tail() 702 if (bo->tbo.type != ttm_bo_type_kernel) { in amdgpu_vm_validate_pt_bos() 792 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); in amdgpu_vm_clear_bo() [all …]
|
H A D | amdgpu_gmc.c | 53 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); in amdgpu_gmc_get_pde_for_bo() 56 switch (bo->tbo.mem.mem_type) { in amdgpu_gmc_get_pde_for_bo() 58 ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm); in amdgpu_gmc_get_pde_for_bo() 72 *flags = amdgpu_ttm_tt_pde_flags(bo->tbo.ttm, &bo->tbo.mem); in amdgpu_gmc_get_pde_for_bo() 82 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); in amdgpu_gmc_pd_addr()
|
H A D | amdgpu_mn.c | 73 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); in amdgpu_mn_invalidate_gfx() 83 r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, true, false, in amdgpu_mn_invalidate_gfx() 110 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); in amdgpu_mn_invalidate_hsa()
|
H A D | amdgpu_cs.c | 60 p->uf_entry.tv.bo = &bo->tbo; in amdgpu_cs_user_fence_chunk() 72 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) { in amdgpu_cs_user_fence_chunk() 407 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); in amdgpu_cs_bo_validate() 411 .resv = bo->tbo.base.resv, in amdgpu_cs_bo_validate() 443 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); in amdgpu_cs_bo_validate() 488 usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm); in amdgpu_cs_list_validate() 496 if (amdgpu_ttm_tt_is_userptr(bo->tbo.ttm) && in amdgpu_cs_list_validate() 500 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); in amdgpu_cs_list_validate() 504 amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm, in amdgpu_cs_list_validate() 570 e->user_pages = kvmalloc_array(bo->tbo.ttm->num_pages, in amdgpu_cs_parser_bos() [all …]
|
H A D | amdgpu_ttm.c | 242 return drm_vma_node_verify_access(&abo->tbo.base.vma_node, in amdgpu_verify_access() 245 return drm_vma_node_verify_access(&abo->tbo.base.vma_node, in amdgpu_verify_access() 829 struct ttm_tt *ttm = bo->tbo.ttm; in amdgpu_ttm_tt_get_user_pages() 1067 struct ttm_buffer_object *tbo, in amdgpu_ttm_gart_bind() argument 1070 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(tbo); in amdgpu_ttm_gart_bind() 1071 struct ttm_tt *ttm = tbo->ttm; in amdgpu_ttm_gart_bind() 1220 int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo) in amdgpu_ttm_recover_gart() argument 1222 struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev); in amdgpu_ttm_recover_gart() 1226 if (!tbo->ttm) in amdgpu_ttm_recover_gart() 1229 flags = amdgpu_ttm_tt_pte_flags(adev, tbo->ttm, &tbo->mem); in amdgpu_ttm_recover_gart() [all …]
|
H A D | amdgpu_vm_sdma.c | 45 r = amdgpu_ttm_alloc_gart(&table->tbo); in amdgpu_vm_sdma_map_table() 50 r = amdgpu_ttm_alloc_gart(&table->shadow->tbo); in amdgpu_vm_sdma_map_table() 87 return amdgpu_sync_resv(p->adev, &p->job->sync, root->tbo.base.resv, in amdgpu_vm_sdma_prepare()
|
/netbsd-src/sys/external/bsd/drm2/dist/drm/radeon/ |
H A D | radeon_object.c | 64 u64 size = (u64)bo->tbo.num_pages << PAGE_SHIFT; in radeon_update_memory_usage() 82 static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo) in radeon_ttm_bo_destroy() argument 86 bo = container_of(tbo, struct radeon_bo, tbo); in radeon_ttm_bo_destroy() 88 radeon_update_memory_usage(bo, bo->tbo.mem.mem_type, -1); in radeon_ttm_bo_destroy() 95 if (bo->tbo.base.import_attach) in radeon_ttm_bo_destroy() 96 drm_prime_gem_destroy(&bo->tbo.base, bo->tbo.sg); in radeon_ttm_bo_destroy() 97 drm_gem_object_release(&bo->tbo.base); in radeon_ttm_bo_destroy() 219 drm_gem_private_object_init(rdev->ddev, &bo->tbo.base, size); in radeon_bo_create() 269 r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type, in radeon_bo_create() 294 r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap); in radeon_bo_kmap() [all …]
|
H A D | radeon_object.h | 70 r = ttm_bo_reserve(&bo->tbo, !no_intr, false, NULL); in radeon_bo_reserve() 81 ttm_bo_unreserve(&bo->tbo); in radeon_bo_unreserve() 95 return bo->tbo.offset; in radeon_bo_gpu_offset() 100 return bo->tbo.num_pages << PAGE_SHIFT; in radeon_bo_size() 105 return (bo->tbo.num_pages << PAGE_SHIFT) / RADEON_GPU_PAGE_SIZE; in radeon_bo_ngpu_pages() 110 return (bo->tbo.mem.page_alignment << PAGE_SHIFT) / RADEON_GPU_PAGE_SIZE; in radeon_bo_gpu_page_alignment() 121 return drm_vma_node_offset_addr(&bo->tbo.base.vma_node); in radeon_bo_mmap_offset()
|
H A D | radeon_prime.c | 42 int npages = bo->tbo.num_pages; in radeon_gem_prime_get_sg_table() 44 return drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages); in radeon_gem_prime_get_sg_table() 52 ret = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, in radeon_gem_prime_vmap() 88 return &bo->tbo.base; in radeon_gem_prime_import_sg_table() 129 if (radeon_ttm_tt_has_userptr(bo->tbo.ttm)) in radeon_gem_prime_export()
|
H A D | radeon_mn.c | 61 if (!bo->tbo.ttm || bo->tbo.ttm->state != tt_bound) in radeon_mn_invalidate() 73 r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, true, false, in radeon_mn_invalidate() 79 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); in radeon_mn_invalidate()
|
H A D | radeon_gem.c | 94 *obj = &robj->tbo.base; in radeon_gem_object_create() 127 r = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true, 30 * HZ); in radeon_gem_set_domain() 343 r = radeon_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags); in radeon_gem_userptr_ioctl() 370 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); in radeon_gem_userptr_ioctl() 444 if (radeon_ttm_tt_has_userptr(robj->tbo.ttm)) { in radeon_mode_dumb_mmap() 476 r = dma_resv_test_signaled_rcu(robj->tbo.base.resv, true); in radeon_gem_busy_ioctl() 482 cur_placement = READ_ONCE(robj->tbo.mem.mem_type); in radeon_gem_busy_ioctl() 505 ret = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true, 30 * HZ); in radeon_gem_wait_idle_ioctl() 512 cur_placement = READ_ONCE(robj->tbo.mem.mem_type); in radeon_gem_wait_idle_ioctl() 583 tv.bo = &bo_va->bo->tbo; in radeon_gem_va_update_vm() [all …]
|
H A D | radeon_cs.c | 168 if (radeon_ttm_tt_has_userptr(p->relocs[i].robj->tbo.ttm)) { in radeon_cs_parser_relocs() 191 p->relocs[i].tv.bo = &p->relocs[i].robj->tbo; in radeon_cs_parser_relocs() 275 resv = reloc->robj->tbo.base.resv; in radeon_cs_sync_rings() 419 return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages; in cmp_size_smaller_first() 461 drm_gem_object_put_unlocked(&bo->tbo.base); in radeon_cs_parser_fini() 533 &rdev->ring_tmp_bo.bo->tbo.mem); in radeon_bo_vm_update_pte() 547 r = radeon_vm_bo_update(rdev, bo_va, &bo->tbo.mem); in radeon_bo_vm_update_pte()
|
/netbsd-src/sys/external/bsd/drm2/radeon/ |
H A D | radeondrmkmsfb.c | 174 const unsigned num_pages __diagused = rbo->tbo.num_pages; in radeonfb_drmfb_mmapfb() 177 KASSERT(rbo->tbo.mem.bus.is_iomem); in radeonfb_drmfb_mmapfb() 179 if (ISSET(rbo->tbo.mem.placement, TTM_PL_FLAG_WC)) in radeonfb_drmfb_mmapfb() 182 return bus_space_mmap(rbo->tbo.bdev->memt, in radeonfb_drmfb_mmapfb() 183 rbo->tbo.mem.bus.base, rbo->tbo.mem.bus.offset + offset, in radeonfb_drmfb_mmapfb()
|
/netbsd-src/sys/external/bsd/drm2/amdgpu/ |
H A D | amdgpufb.c | 169 const unsigned num_pages __diagused = rbo->tbo.num_pages; in amdgpufb_drmfb_mmapfb() 174 KASSERT(rbo->tbo.mem.bus.is_iomem); in amdgpufb_drmfb_mmapfb() 176 if (ISSET(rbo->tbo.mem.placement, TTM_PL_FLAG_WC)) in amdgpufb_drmfb_mmapfb() 179 return bus_space_mmap(rbo->tbo.bdev->memt, rbo->tbo.mem.bus.base, in amdgpufb_drmfb_mmapfb() 180 rbo->tbo.mem.bus.offset + offset, prot, flags); in amdgpufb_drmfb_mmapfb()
|