Lines Matching refs:ttm
46 #include <drm/ttm/ttm_bo.h>
47 #include <drm/ttm/ttm_placement.h>
48 #include <drm/ttm/ttm_range_manager.h>
49 #include <drm/ttm/ttm_tt.h>
69 struct ttm_tt *ttm,
72 struct ttm_tt *ttm);
249 flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, mem);
258 dma_addr = &bo->ttm->dma_address[mm_cur->start >> PAGE_SHIFT];
479 r = amdgpu_ttm_backend_bind(bo->bdev, bo->ttm, new_mem);
488 bo->ttm == NULL)) {
507 amdgpu_ttm_backend_unbind(bo->bdev, bo->ttm);
655 struct ttm_tt ttm;
665 #define ttm_to_amdgpu_ttm_tt(ptr) container_of(ptr, struct amdgpu_ttm_tt, ttm)
678 struct ttm_tt *ttm = bo->tbo.ttm;
679 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
710 readonly = amdgpu_ttm_tt_is_readonly(ttm);
711 r = amdgpu_hmm_range_get_pages(&bo->notifier, start, ttm->num_pages,
725 void amdgpu_ttm_tt_discard_user_pages(struct ttm_tt *ttm,
728 struct amdgpu_ttm_tt *gtt = (void *)ttm;
736 * Check if the pages backing this ttm range have been invalidated
740 bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm,
743 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
749 gtt->userptr, ttm->num_pages);
764 void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct vm_page **pages)
768 for (i = 0; i < ttm->num_pages; ++i)
769 ttm->pages[i] = pages ? pages[i] : NULL;
778 struct ttm_tt *ttm)
784 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
791 r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0,
792 (u64)ttm->num_pages << PAGE_SHIFT,
798 r = dma_map_sgtable(adev->dev, ttm->sg, direction, 0);
803 drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address,
804 ttm->num_pages);
809 sg_free_table(ttm->sg);
811 kfree(ttm->sg);
812 ttm->sg = NULL;
821 struct ttm_tt *ttm)
826 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
832 if (!ttm->sg || !ttm->sg->sgl)
836 dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0);
837 sg_free_table(ttm->sg);
849 struct ttm_tt *ttm, uint64_t flags)
851 struct amdgpu_ttm_tt *gtt = (void *)ttm;
852 uint64_t total_pages = ttm->num_pages;
866 1, >t->ttm.dma_address[page_idx], flags);
874 >t->ttm.dma_address[page_idx + 1],
884 struct ttm_tt *ttm = tbo->ttm;
885 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
891 amdgpu_ttm_gart_bind_gfx9_mqd(adev, ttm, flags);
893 amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
894 gtt->ttm.dma_address, flags);
906 struct ttm_tt *ttm,
910 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
921 r = amdgpu_ttm_tt_pin_userptr(bdev, ttm);
926 } else if (ttm->page_flags & TTM_TT_FLAG_EXTERNAL) {
927 if (!ttm->sg) {
941 ttm->sg = sgt;
944 drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address,
945 ttm->num_pages);
948 if (!ttm->num_pages) {
950 ttm->num_pages, bo_mem, ttm);
960 flags = amdgpu_ttm_tt_pte_flags(adev, ttm, bo_mem);
964 amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
965 gtt->ttm.dma_address, flags);
982 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(bo->ttm);
1013 flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, tmp);
1036 if (!tbo->ttm)
1039 flags = amdgpu_ttm_tt_pte_flags(adev, tbo->ttm, tbo->resource);
1050 struct ttm_tt *ttm)
1053 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
1057 amdgpu_ttm_tt_unpin_userptr(bdev, ttm);
1058 } else if (ttm->sg && gtt->gobj->import_attach) {
1063 dma_buf_unmap_attachment(attach, ttm->sg, DMA_BIDIRECTIONAL);
1067 ttm->sg = NULL;
1077 amdgpu_gart_unbind(adev, gtt->offset, ttm->num_pages);
1082 struct ttm_tt *ttm)
1084 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
1091 ttm_tt_fini(>t->ttm);
1127 if (ttm_sg_tt_init(>t->ttm, bo, page_flags, caching)) {
1131 return >t->ttm;
1141 struct ttm_tt *ttm,
1145 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
1152 ttm->sg = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
1153 if (!ttm->sg)
1158 if (ttm->page_flags & TTM_TT_FLAG_EXTERNAL)
1165 ret = ttm_pool_alloc(pool, ttm, ctx);
1170 for (i = 0; i < ttm->num_pages; ++i)
1171 ttm->pages[i]->mapping = bdev->dev_mapping;
1184 struct ttm_tt *ttm)
1186 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
1192 amdgpu_ttm_backend_unbind(bdev, ttm);
1195 amdgpu_ttm_tt_set_user_pages(ttm, NULL);
1196 kfree(ttm->sg);
1197 ttm->sg = NULL;
1201 if (ttm->page_flags & TTM_TT_FLAG_EXTERNAL)
1204 for (i = 0; i < ttm->num_pages; ++i) {
1205 page = ttm->pages[i];
1218 return ttm_pool_free(pool, ttm);
1233 if (!tbo->ttm)
1236 gtt = (void *)tbo->ttm;
1258 if (!bo->ttm) {
1260 bo->ttm = amdgpu_ttm_tt_create(bo, 0);
1261 if (bo->ttm == NULL)
1266 bo->ttm->page_flags |= TTM_TT_FLAG_EXTERNAL;
1268 gtt = ttm_to_amdgpu_ttm_tt(bo->ttm);
1285 struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm)
1287 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
1308 bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
1311 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
1320 size = (unsigned long)gtt->ttm.num_pages * PAGE_SIZE;
1332 bool amdgpu_ttm_tt_is_userptr(struct ttm_tt *ttm)
1334 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
1345 bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm)
1347 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
1358 * @ttm: The ttm_tt object to compute the flags for
1363 uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_resource *mem)
1375 if (ttm->caching == ttm_cached)
1390 * @ttm: The ttm_tt object to compute the flags for
1395 uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
1398 uint64_t flags = amdgpu_ttm_tt_pde_flags(ttm, mem);
1403 if (!amdgpu_ttm_tt_is_readonly(ttm))
1860 * amdgpu_ttm_init - Init the memory management (ttm) as well as various
1897 DRM_ERROR("failed to init ttm pools(%d).\n", r);
2118 DRM_INFO("amdgpu: ttm finalized\n");