15ca02815Sjsg // SPDX-License-Identifier: MIT 25ca02815Sjsg /* 35ca02815Sjsg * Copyright © 2021 Intel Corporation 45ca02815Sjsg */ 55ca02815Sjsg #include <drm/ttm/ttm_device.h> 65ca02815Sjsg #include <drm/ttm/ttm_range_manager.h> 75ca02815Sjsg 85ca02815Sjsg #include "i915_drv.h" 95ca02815Sjsg #include "i915_scatterlist.h" 105ca02815Sjsg #include "i915_ttm_buddy_manager.h" 115ca02815Sjsg 125ca02815Sjsg #include "intel_region_ttm.h" 135ca02815Sjsg 141bb76ff1Sjsg #include "gem/i915_gem_region.h" 155ca02815Sjsg #include "gem/i915_gem_ttm.h" /* For the funcs/ops export only */ 165ca02815Sjsg /** 175ca02815Sjsg * DOC: TTM support structure 185ca02815Sjsg * 195ca02815Sjsg * The code in this file deals with setting up memory managers for TTM 205ca02815Sjsg * LMEM and MOCK regions and converting the output from 215ca02815Sjsg * the managers to struct sg_table, Basically providing the mapping from 225ca02815Sjsg * i915 GEM regions to TTM memory types and resource managers. 235ca02815Sjsg */ 245ca02815Sjsg 255ca02815Sjsg /** 265ca02815Sjsg * intel_region_ttm_device_init - Initialize a TTM device 275ca02815Sjsg * @dev_priv: Pointer to an i915 device private structure. 285ca02815Sjsg * 295ca02815Sjsg * Return: 0 on success, negative error code on failure. 305ca02815Sjsg */ 315ca02815Sjsg int intel_region_ttm_device_init(struct drm_i915_private *dev_priv) 325ca02815Sjsg { 335ca02815Sjsg struct drm_device *drm = &dev_priv->drm; 345ca02815Sjsg 355ca02815Sjsg #ifdef notyet 365ca02815Sjsg return ttm_device_init(&dev_priv->bdev, i915_ttm_driver(), 375ca02815Sjsg drm->dev, drm->anon_inode->i_mapping, 385ca02815Sjsg drm->vma_offset_manager, false, false); 395ca02815Sjsg #else 405ca02815Sjsg return ttm_device_init(&dev_priv->bdev, i915_ttm_driver(), 415ca02815Sjsg drm->dev, /*drm->anon_inode->i_mapping*/NULL, 425ca02815Sjsg drm->vma_offset_manager, false, false); 435ca02815Sjsg #endif 445ca02815Sjsg } 455ca02815Sjsg 465ca02815Sjsg /** 475ca02815Sjsg * intel_region_ttm_device_fini - Finalize a TTM device 485ca02815Sjsg * @dev_priv: Pointer to an i915 device private structure. 495ca02815Sjsg */ 505ca02815Sjsg void intel_region_ttm_device_fini(struct drm_i915_private *dev_priv) 515ca02815Sjsg { 525ca02815Sjsg ttm_device_fini(&dev_priv->bdev); 535ca02815Sjsg } 545ca02815Sjsg 555ca02815Sjsg /* 565ca02815Sjsg * Map the i915 memory regions to TTM memory types. We use the 575ca02815Sjsg * driver-private types for now, reserving TTM_PL_VRAM for stolen 585ca02815Sjsg * memory and TTM_PL_TT for GGTT use if decided to implement this. 595ca02815Sjsg */ 605ca02815Sjsg int intel_region_to_ttm_type(const struct intel_memory_region *mem) 615ca02815Sjsg { 625ca02815Sjsg int type; 635ca02815Sjsg 645ca02815Sjsg GEM_BUG_ON(mem->type != INTEL_MEMORY_LOCAL && 655ca02815Sjsg mem->type != INTEL_MEMORY_MOCK && 665ca02815Sjsg mem->type != INTEL_MEMORY_SYSTEM); 675ca02815Sjsg 685ca02815Sjsg if (mem->type == INTEL_MEMORY_SYSTEM) 695ca02815Sjsg return TTM_PL_SYSTEM; 705ca02815Sjsg 715ca02815Sjsg type = mem->instance + TTM_PL_PRIV; 725ca02815Sjsg GEM_BUG_ON(type >= TTM_NUM_MEM_TYPES); 735ca02815Sjsg 745ca02815Sjsg return type; 755ca02815Sjsg } 765ca02815Sjsg 775ca02815Sjsg /** 785ca02815Sjsg * intel_region_ttm_init - Initialize a memory region for TTM. 795ca02815Sjsg * @mem: The region to initialize. 805ca02815Sjsg * 815ca02815Sjsg * This function initializes a suitable TTM resource manager for the 825ca02815Sjsg * region, and if it's a LMEM region type, attaches it to the TTM 835ca02815Sjsg * device. MOCK regions are NOT attached to the TTM device, since we don't 845ca02815Sjsg * have one for the mock selftests. 855ca02815Sjsg * 865ca02815Sjsg * Return: 0 on success, negative error code on failure. 875ca02815Sjsg */ 885ca02815Sjsg int intel_region_ttm_init(struct intel_memory_region *mem) 895ca02815Sjsg { 905ca02815Sjsg struct ttm_device *bdev = &mem->i915->bdev; 915ca02815Sjsg int mem_type = intel_region_to_ttm_type(mem); 925ca02815Sjsg int ret; 935ca02815Sjsg 945ca02815Sjsg ret = i915_ttm_buddy_man_init(bdev, mem_type, false, 955ca02815Sjsg resource_size(&mem->region), 96*52571687Sjsg resource_size(&mem->io), 975ca02815Sjsg mem->min_page_size, PAGE_SIZE); 985ca02815Sjsg if (ret) 995ca02815Sjsg return ret; 1005ca02815Sjsg 1015ca02815Sjsg mem->region_private = ttm_manager_type(bdev, mem_type); 1025ca02815Sjsg 1035ca02815Sjsg return 0; 1045ca02815Sjsg } 1055ca02815Sjsg 1065ca02815Sjsg /** 1075ca02815Sjsg * intel_region_ttm_fini - Finalize a TTM region. 1085ca02815Sjsg * @mem: The memory region 1095ca02815Sjsg * 1105ca02815Sjsg * This functions takes down the TTM resource manager associated with the 1115ca02815Sjsg * memory region, and if it was registered with the TTM device, 1125ca02815Sjsg * removes that registration. 1135ca02815Sjsg */ 1141bb76ff1Sjsg int intel_region_ttm_fini(struct intel_memory_region *mem) 1155ca02815Sjsg { 1161bb76ff1Sjsg struct ttm_resource_manager *man = mem->region_private; 1171bb76ff1Sjsg int ret = -EBUSY; 1181bb76ff1Sjsg int count; 1191bb76ff1Sjsg 1201bb76ff1Sjsg /* 1211bb76ff1Sjsg * Put the region's move fences. This releases requests that 1221bb76ff1Sjsg * may hold on to contexts and vms that may hold on to buffer 1231bb76ff1Sjsg * objects placed in this region. 1241bb76ff1Sjsg */ 1251bb76ff1Sjsg if (man) 1261bb76ff1Sjsg ttm_resource_manager_cleanup(man); 1271bb76ff1Sjsg 1281bb76ff1Sjsg /* Flush objects from region. */ 1291bb76ff1Sjsg for (count = 0; count < 10; ++count) { 1301bb76ff1Sjsg i915_gem_flush_free_objects(mem->i915); 1311bb76ff1Sjsg 1321bb76ff1Sjsg mutex_lock(&mem->objects.lock); 1331bb76ff1Sjsg if (list_empty(&mem->objects.list)) 1341bb76ff1Sjsg ret = 0; 1351bb76ff1Sjsg mutex_unlock(&mem->objects.lock); 1361bb76ff1Sjsg if (!ret) 1371bb76ff1Sjsg break; 1381bb76ff1Sjsg 1391bb76ff1Sjsg drm_msleep(20); 140f005ef32Sjsg drain_workqueue(mem->i915->bdev.wq); 1411bb76ff1Sjsg } 1421bb76ff1Sjsg 1431bb76ff1Sjsg /* If we leaked objects, Don't free the region causing use after free */ 1441bb76ff1Sjsg if (ret || !man) 1451bb76ff1Sjsg return ret; 1465ca02815Sjsg 1475ca02815Sjsg ret = i915_ttm_buddy_man_fini(&mem->i915->bdev, 1485ca02815Sjsg intel_region_to_ttm_type(mem)); 1495ca02815Sjsg GEM_WARN_ON(ret); 1505ca02815Sjsg mem->region_private = NULL; 1511bb76ff1Sjsg 1521bb76ff1Sjsg return ret; 1535ca02815Sjsg } 1545ca02815Sjsg 1555ca02815Sjsg /** 1561bb76ff1Sjsg * intel_region_ttm_resource_to_rsgt - 1571bb76ff1Sjsg * Convert an opaque TTM resource manager resource to a refcounted sg_table. 1585ca02815Sjsg * @mem: The memory region. 1595ca02815Sjsg * @res: The resource manager resource obtained from the TTM resource manager. 1601bb76ff1Sjsg * @page_alignment: Required page alignment for each sg entry. Power of two. 1615ca02815Sjsg * 1625ca02815Sjsg * The gem backends typically use sg-tables for operations on the underlying 1635ca02815Sjsg * io_memory. So provide a way for the backends to translate the 1645ca02815Sjsg * nodes they are handed from TTM to sg-tables. 1655ca02815Sjsg * 1665ca02815Sjsg * Return: A malloced sg_table on success, an error pointer on failure. 1675ca02815Sjsg */ 1681bb76ff1Sjsg struct i915_refct_sgt * 1691bb76ff1Sjsg intel_region_ttm_resource_to_rsgt(struct intel_memory_region *mem, 1701bb76ff1Sjsg struct ttm_resource *res, 1711bb76ff1Sjsg u32 page_alignment) 1725ca02815Sjsg { 1735ca02815Sjsg if (mem->is_range_manager) { 1745ca02815Sjsg struct ttm_range_mgr_node *range_node = 1755ca02815Sjsg to_ttm_range_mgr_node(res); 1765ca02815Sjsg 1771bb76ff1Sjsg return i915_rsgt_from_mm_node(&range_node->mm_nodes[0], 1781bb76ff1Sjsg mem->region.start, 1791bb76ff1Sjsg page_alignment); 1805ca02815Sjsg } else { 1811bb76ff1Sjsg return i915_rsgt_from_buddy_resource(res, mem->region.start, 1821bb76ff1Sjsg page_alignment); 1835ca02815Sjsg } 1845ca02815Sjsg } 1855ca02815Sjsg 1865ca02815Sjsg #ifdef CONFIG_DRM_I915_SELFTEST 1875ca02815Sjsg /** 1885ca02815Sjsg * intel_region_ttm_resource_alloc - Allocate memory resources from a region 1895ca02815Sjsg * @mem: The memory region, 190f005ef32Sjsg * @offset: BO offset 1915ca02815Sjsg * @size: The requested size in bytes 1925ca02815Sjsg * @flags: Allocation flags 1935ca02815Sjsg * 1945ca02815Sjsg * This functionality is provided only for callers that need to allocate 1955ca02815Sjsg * memory from standalone TTM range managers, without the TTM eviction 1965ca02815Sjsg * functionality. Don't use if you are not completely sure that's the 1975ca02815Sjsg * case. The returned opaque node can be converted to an sg_table using 1985ca02815Sjsg * intel_region_ttm_resource_to_st(), and can be freed using 1995ca02815Sjsg * intel_region_ttm_resource_free(). 2005ca02815Sjsg * 2015ca02815Sjsg * Return: A valid pointer on success, an error pointer on failure. 2025ca02815Sjsg */ 2035ca02815Sjsg struct ttm_resource * 2045ca02815Sjsg intel_region_ttm_resource_alloc(struct intel_memory_region *mem, 2051bb76ff1Sjsg resource_size_t offset, 2065ca02815Sjsg resource_size_t size, 2075ca02815Sjsg unsigned int flags) 2085ca02815Sjsg { 2095ca02815Sjsg struct ttm_resource_manager *man = mem->region_private; 2105ca02815Sjsg struct ttm_place place = {}; 2115ca02815Sjsg struct ttm_buffer_object mock_bo = {}; 2125ca02815Sjsg struct ttm_resource *res; 2135ca02815Sjsg int ret; 2145ca02815Sjsg 2151bb76ff1Sjsg if (flags & I915_BO_ALLOC_CONTIGUOUS) 2161bb76ff1Sjsg place.flags |= TTM_PL_FLAG_CONTIGUOUS; 2171bb76ff1Sjsg if (offset != I915_BO_INVALID_OFFSET) { 218f005ef32Sjsg if (WARN_ON(overflows_type(offset >> PAGE_SHIFT, place.fpfn))) { 219f005ef32Sjsg ret = -E2BIG; 220f005ef32Sjsg goto out; 221f005ef32Sjsg } 2221bb76ff1Sjsg place.fpfn = offset >> PAGE_SHIFT; 223f005ef32Sjsg if (WARN_ON(overflows_type(place.fpfn + (size >> PAGE_SHIFT), place.lpfn))) { 224f005ef32Sjsg ret = -E2BIG; 225f005ef32Sjsg goto out; 226f005ef32Sjsg } 2271bb76ff1Sjsg place.lpfn = place.fpfn + (size >> PAGE_SHIFT); 228*52571687Sjsg } else if (resource_size(&mem->io) && resource_size(&mem->io) < mem->total) { 2291bb76ff1Sjsg if (flags & I915_BO_ALLOC_GPU_ONLY) { 2301bb76ff1Sjsg place.flags |= TTM_PL_FLAG_TOPDOWN; 2311bb76ff1Sjsg } else { 2321bb76ff1Sjsg place.fpfn = 0; 233*52571687Sjsg if (WARN_ON(overflows_type(resource_size(&mem->io) >> PAGE_SHIFT, place.lpfn))) { 234f005ef32Sjsg ret = -E2BIG; 235f005ef32Sjsg goto out; 236f005ef32Sjsg } 237*52571687Sjsg place.lpfn = resource_size(&mem->io) >> PAGE_SHIFT; 2381bb76ff1Sjsg } 2391bb76ff1Sjsg } 2401bb76ff1Sjsg 2415ca02815Sjsg mock_bo.base.size = size; 2421bb76ff1Sjsg mock_bo.bdev = &mem->i915->bdev; 2435ca02815Sjsg 2445ca02815Sjsg ret = man->func->alloc(man, &mock_bo, &place, &res); 245f005ef32Sjsg 246f005ef32Sjsg out: 2475ca02815Sjsg if (ret == -ENOSPC) 2485ca02815Sjsg ret = -ENXIO; 2491bb76ff1Sjsg if (!ret) 2501bb76ff1Sjsg res->bo = NULL; /* Rather blow up, then some uaf */ 2515ca02815Sjsg return ret ? ERR_PTR(ret) : res; 2525ca02815Sjsg } 2535ca02815Sjsg 2545ca02815Sjsg #endif 2555ca02815Sjsg 2565ca02815Sjsg /** 2575ca02815Sjsg * intel_region_ttm_resource_free - Free a resource allocated from a resource manager 2585ca02815Sjsg * @mem: The region the resource was allocated from. 2595ca02815Sjsg * @res: The opaque resource representing an allocation. 2605ca02815Sjsg */ 2615ca02815Sjsg void intel_region_ttm_resource_free(struct intel_memory_region *mem, 2625ca02815Sjsg struct ttm_resource *res) 2635ca02815Sjsg { 2645ca02815Sjsg struct ttm_resource_manager *man = mem->region_private; 2651bb76ff1Sjsg struct ttm_buffer_object mock_bo = {}; 2661bb76ff1Sjsg 267f005ef32Sjsg mock_bo.base.size = res->size; 2681bb76ff1Sjsg mock_bo.bdev = &mem->i915->bdev; 2691bb76ff1Sjsg res->bo = &mock_bo; 2705ca02815Sjsg 2715ca02815Sjsg man->func->free(man, res); 2725ca02815Sjsg } 273