1fb4d8502Sjsg /* 2fb4d8502Sjsg * Copyright 2008 Advanced Micro Devices, Inc. 3fb4d8502Sjsg * Copyright 2008 Red Hat Inc. 4fb4d8502Sjsg * Copyright 2009 Jerome Glisse. 5fb4d8502Sjsg * 6fb4d8502Sjsg * Permission is hereby granted, free of charge, to any person obtaining a 7fb4d8502Sjsg * copy of this software and associated documentation files (the "Software"), 8fb4d8502Sjsg * to deal in the Software without restriction, including without limitation 9fb4d8502Sjsg * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10fb4d8502Sjsg * and/or sell copies of the Software, and to permit persons to whom the 11fb4d8502Sjsg * Software is furnished to do so, subject to the following conditions: 12fb4d8502Sjsg * 13fb4d8502Sjsg * The above copyright notice and this permission notice shall be included in 14fb4d8502Sjsg * all copies or substantial portions of the Software. 15fb4d8502Sjsg * 16fb4d8502Sjsg * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17fb4d8502Sjsg * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18fb4d8502Sjsg * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19fb4d8502Sjsg * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20fb4d8502Sjsg * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21fb4d8502Sjsg * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22fb4d8502Sjsg * OTHER DEALINGS IN THE SOFTWARE. 23fb4d8502Sjsg * 24fb4d8502Sjsg * Authors: Dave Airlie 25fb4d8502Sjsg * Alex Deucher 26fb4d8502Sjsg * Jerome Glisse 27fb4d8502Sjsg */ 28c349dbc7Sjsg 29c349dbc7Sjsg #include <linux/pci.h> 30c349dbc7Sjsg #include <linux/vmalloc.h> 31c349dbc7Sjsg 32fb4d8502Sjsg #include <drm/amdgpu_drm.h> 33fb4d8502Sjsg #ifdef CONFIG_X86 34fb4d8502Sjsg #include <asm/set_memory.h> 35fb4d8502Sjsg #endif 36fb4d8502Sjsg #include "amdgpu.h" 37*f556b3f0Sjsg #include "amdgpu_reset.h" 381bb76ff1Sjsg #include <drm/drm_drv.h> 39f005ef32Sjsg #include <drm/ttm/ttm_tt.h> 40fb4d8502Sjsg 41fb4d8502Sjsg /* 42fb4d8502Sjsg * GART 43fb4d8502Sjsg * The GART (Graphics Aperture Remapping Table) is an aperture 44fb4d8502Sjsg * in the GPU's address space. System pages can be mapped into 45fb4d8502Sjsg * the aperture and look like contiguous pages from the GPU's 46fb4d8502Sjsg * perspective. A page table maps the pages in the aperture 47fb4d8502Sjsg * to the actual backing pages in system memory. 48fb4d8502Sjsg * 49fb4d8502Sjsg * Radeon GPUs support both an internal GART, as described above, 50fb4d8502Sjsg * and AGP. AGP works similarly, but the GART table is configured 51fb4d8502Sjsg * and maintained by the northbridge rather than the driver. 52fb4d8502Sjsg * Radeon hw has a separate AGP aperture that is programmed to 53fb4d8502Sjsg * point to the AGP aperture provided by the northbridge and the 54fb4d8502Sjsg * requests are passed through to the northbridge aperture. 55fb4d8502Sjsg * Both AGP and internal GART can be used at the same time, however 56fb4d8502Sjsg * that is not currently supported by the driver. 57fb4d8502Sjsg * 58fb4d8502Sjsg * This file handles the common internal GART management. 59fb4d8502Sjsg */ 60fb4d8502Sjsg 61fb4d8502Sjsg /* 62fb4d8502Sjsg * Common GART table functions. 63fb4d8502Sjsg */ 64fb4d8502Sjsg 65fb4d8502Sjsg /** 665ca02815Sjsg * amdgpu_gart_dummy_page_init - init dummy page used by the driver 67fb4d8502Sjsg * 68fb4d8502Sjsg * @adev: amdgpu_device pointer 69fb4d8502Sjsg * 70fb4d8502Sjsg * Allocate the dummy page used by the driver (all asics). 71fb4d8502Sjsg * This dummy page is used by the driver as a filler for gart entries 72fb4d8502Sjsg * when pages are taken out of the GART 73fb4d8502Sjsg * Returns 0 on sucess, -ENOMEM on failure. 74fb4d8502Sjsg */ 75fb4d8502Sjsg static int amdgpu_gart_dummy_page_init(struct amdgpu_device *adev) 76fb4d8502Sjsg { 775ca02815Sjsg struct vm_page *dummy_page = ttm_glob.dummy_read_page; 78fb4d8502Sjsg 79fb4d8502Sjsg if (adev->dummy_page_addr) 80fb4d8502Sjsg return 0; 815ca02815Sjsg adev->dummy_page_addr = dma_map_page(&adev->pdev->dev, dummy_page, 0, 825ca02815Sjsg PAGE_SIZE, DMA_BIDIRECTIONAL); 835ca02815Sjsg if (dma_mapping_error(&adev->pdev->dev, adev->dummy_page_addr)) { 84fb4d8502Sjsg dev_err(&adev->pdev->dev, "Failed to DMA MAP the dummy page\n"); 85fb4d8502Sjsg adev->dummy_page_addr = 0; 86fb4d8502Sjsg return -ENOMEM; 87fb4d8502Sjsg } 88fb4d8502Sjsg return 0; 89fb4d8502Sjsg } 90fb4d8502Sjsg 91fb4d8502Sjsg /** 925ca02815Sjsg * amdgpu_gart_dummy_page_fini - free dummy page used by the driver 93fb4d8502Sjsg * 94fb4d8502Sjsg * @adev: amdgpu_device pointer 95fb4d8502Sjsg * 96fb4d8502Sjsg * Frees the dummy page used by the driver (all asics). 97fb4d8502Sjsg */ 985ca02815Sjsg void amdgpu_gart_dummy_page_fini(struct amdgpu_device *adev) 99fb4d8502Sjsg { 100fb4d8502Sjsg if (!adev->dummy_page_addr) 101fb4d8502Sjsg return; 1025ca02815Sjsg dma_unmap_page(&adev->pdev->dev, adev->dummy_page_addr, PAGE_SIZE, 1035ca02815Sjsg DMA_BIDIRECTIONAL); 104fb4d8502Sjsg adev->dummy_page_addr = 0; 105fb4d8502Sjsg } 106fb4d8502Sjsg 107fb4d8502Sjsg /** 108f005ef32Sjsg * amdgpu_gart_table_ram_alloc - allocate system ram for gart page table 109f005ef32Sjsg * 110f005ef32Sjsg * @adev: amdgpu_device pointer 111f005ef32Sjsg * 112f005ef32Sjsg * Allocate system memory for GART page table for ASICs that don't have 113f005ef32Sjsg * dedicated VRAM. 114f005ef32Sjsg * Returns 0 for success, error for failure. 115f005ef32Sjsg */ 116f005ef32Sjsg int amdgpu_gart_table_ram_alloc(struct amdgpu_device *adev) 117f005ef32Sjsg { 118f005ef32Sjsg STUB(); 119f005ef32Sjsg return -ENOSYS; 120f005ef32Sjsg #ifdef notyet 121f005ef32Sjsg unsigned int order = get_order(adev->gart.table_size); 122f005ef32Sjsg gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO; 123f005ef32Sjsg struct amdgpu_bo *bo = NULL; 124f005ef32Sjsg struct sg_table *sg = NULL; 125f005ef32Sjsg struct amdgpu_bo_param bp; 126f005ef32Sjsg dma_addr_t dma_addr; 127f005ef32Sjsg struct vm_page *p; 128f005ef32Sjsg int ret; 129f005ef32Sjsg 130f005ef32Sjsg if (adev->gart.bo != NULL) 131f005ef32Sjsg return 0; 132f005ef32Sjsg 133f005ef32Sjsg p = alloc_pages(gfp_flags, order); 134f005ef32Sjsg if (!p) 135f005ef32Sjsg return -ENOMEM; 136f005ef32Sjsg 137f005ef32Sjsg /* If the hardware does not support UTCL2 snooping of the CPU caches 138f005ef32Sjsg * then set_memory_wc() could be used as a workaround to mark the pages 139f005ef32Sjsg * as write combine memory. 140f005ef32Sjsg */ 141f005ef32Sjsg dma_addr = dma_map_page(&adev->pdev->dev, p, 0, adev->gart.table_size, 142f005ef32Sjsg DMA_BIDIRECTIONAL); 143f005ef32Sjsg if (dma_mapping_error(&adev->pdev->dev, dma_addr)) { 144f005ef32Sjsg dev_err(&adev->pdev->dev, "Failed to DMA MAP the GART BO page\n"); 145f005ef32Sjsg __free_pages(p, order); 146f005ef32Sjsg p = NULL; 147f005ef32Sjsg return -EFAULT; 148f005ef32Sjsg } 149f005ef32Sjsg 150f005ef32Sjsg dev_info(adev->dev, "%s dma_addr:%pad\n", __func__, &dma_addr); 151f005ef32Sjsg /* Create SG table */ 152f005ef32Sjsg sg = kmalloc(sizeof(*sg), GFP_KERNEL); 153f005ef32Sjsg if (!sg) { 154f005ef32Sjsg ret = -ENOMEM; 155f005ef32Sjsg goto error; 156f005ef32Sjsg } 157f005ef32Sjsg ret = sg_alloc_table(sg, 1, GFP_KERNEL); 158f005ef32Sjsg if (ret) 159f005ef32Sjsg goto error; 160f005ef32Sjsg 161f005ef32Sjsg sg_dma_address(sg->sgl) = dma_addr; 162f005ef32Sjsg sg->sgl->length = adev->gart.table_size; 163f005ef32Sjsg #ifdef CONFIG_NEED_SG_DMA_LENGTH 164f005ef32Sjsg sg->sgl->dma_length = adev->gart.table_size; 165f005ef32Sjsg #endif 166f005ef32Sjsg /* Create SG BO */ 167f005ef32Sjsg memset(&bp, 0, sizeof(bp)); 168f005ef32Sjsg bp.size = adev->gart.table_size; 169f005ef32Sjsg bp.byte_align = PAGE_SIZE; 170f005ef32Sjsg bp.domain = AMDGPU_GEM_DOMAIN_CPU; 171f005ef32Sjsg bp.type = ttm_bo_type_sg; 172f005ef32Sjsg bp.resv = NULL; 173f005ef32Sjsg bp.bo_ptr_size = sizeof(struct amdgpu_bo); 174f005ef32Sjsg bp.flags = 0; 175f005ef32Sjsg ret = amdgpu_bo_create(adev, &bp, &bo); 176f005ef32Sjsg if (ret) 177f005ef32Sjsg goto error; 178f005ef32Sjsg 179f005ef32Sjsg bo->tbo.sg = sg; 180f005ef32Sjsg bo->tbo.ttm->sg = sg; 181f005ef32Sjsg bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT; 182f005ef32Sjsg bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT; 183f005ef32Sjsg 184f005ef32Sjsg ret = amdgpu_bo_reserve(bo, true); 185f005ef32Sjsg if (ret) { 186f005ef32Sjsg dev_err(adev->dev, "(%d) failed to reserve bo for GART system bo\n", ret); 187f005ef32Sjsg goto error; 188f005ef32Sjsg } 189f005ef32Sjsg 190f005ef32Sjsg ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT); 191f005ef32Sjsg WARN(ret, "Pinning the GART table failed"); 192f005ef32Sjsg if (ret) 193f005ef32Sjsg goto error_resv; 194f005ef32Sjsg 195f005ef32Sjsg adev->gart.bo = bo; 196f005ef32Sjsg adev->gart.ptr = page_to_virt(p); 197f005ef32Sjsg /* Make GART table accessible in VMID0 */ 198f005ef32Sjsg ret = amdgpu_ttm_alloc_gart(&adev->gart.bo->tbo); 199f005ef32Sjsg if (ret) 200f005ef32Sjsg amdgpu_gart_table_ram_free(adev); 201f005ef32Sjsg amdgpu_bo_unreserve(bo); 202f005ef32Sjsg 203f005ef32Sjsg return 0; 204f005ef32Sjsg 205f005ef32Sjsg error_resv: 206f005ef32Sjsg amdgpu_bo_unreserve(bo); 207f005ef32Sjsg error: 208f005ef32Sjsg amdgpu_bo_unref(&bo); 209f005ef32Sjsg if (sg) { 210f005ef32Sjsg sg_free_table(sg); 211f005ef32Sjsg kfree(sg); 212f005ef32Sjsg } 213f005ef32Sjsg __free_pages(p, order); 214f005ef32Sjsg return ret; 215f005ef32Sjsg #endif 216f005ef32Sjsg } 217f005ef32Sjsg 218f005ef32Sjsg /** 219f005ef32Sjsg * amdgpu_gart_table_ram_free - free gart page table system ram 220f005ef32Sjsg * 221f005ef32Sjsg * @adev: amdgpu_device pointer 222f005ef32Sjsg * 223f005ef32Sjsg * Free the system memory used for the GART page tableon ASICs that don't 224f005ef32Sjsg * have dedicated VRAM. 225f005ef32Sjsg */ 226f005ef32Sjsg void amdgpu_gart_table_ram_free(struct amdgpu_device *adev) 227f005ef32Sjsg { 228f005ef32Sjsg unsigned int order = get_order(adev->gart.table_size); 229f005ef32Sjsg struct sg_table *sg = adev->gart.bo->tbo.sg; 230f005ef32Sjsg struct vm_page *p; 231f005ef32Sjsg int ret; 232f005ef32Sjsg 233f005ef32Sjsg ret = amdgpu_bo_reserve(adev->gart.bo, false); 234f005ef32Sjsg if (!ret) { 235f005ef32Sjsg amdgpu_bo_unpin(adev->gart.bo); 236f005ef32Sjsg amdgpu_bo_unreserve(adev->gart.bo); 237f005ef32Sjsg } 238f005ef32Sjsg amdgpu_bo_unref(&adev->gart.bo); 239f005ef32Sjsg sg_free_table(sg); 240f005ef32Sjsg kfree(sg); 241f005ef32Sjsg p = virt_to_page(adev->gart.ptr); 242f005ef32Sjsg __free_pages(p, order); 243f005ef32Sjsg 244f005ef32Sjsg adev->gart.ptr = NULL; 245f005ef32Sjsg } 246f005ef32Sjsg 247f005ef32Sjsg /** 248fb4d8502Sjsg * amdgpu_gart_table_vram_alloc - allocate vram for gart page table 249fb4d8502Sjsg * 250fb4d8502Sjsg * @adev: amdgpu_device pointer 251fb4d8502Sjsg * 252fb4d8502Sjsg * Allocate video memory for GART page table 253fb4d8502Sjsg * (pcie r4xx, r5xx+). These asics require the 254fb4d8502Sjsg * gart table to be in video memory. 255fb4d8502Sjsg * Returns 0 for success, error for failure. 256fb4d8502Sjsg */ 257fb4d8502Sjsg int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev) 258fb4d8502Sjsg { 2591bb76ff1Sjsg if (adev->gart.bo != NULL) 260fb4d8502Sjsg return 0; 261fb4d8502Sjsg 2621bb76ff1Sjsg return amdgpu_bo_create_kernel(adev, adev->gart.table_size, PAGE_SIZE, 2631bb76ff1Sjsg AMDGPU_GEM_DOMAIN_VRAM, &adev->gart.bo, 2641bb76ff1Sjsg NULL, (void *)&adev->gart.ptr); 265fb4d8502Sjsg } 266fb4d8502Sjsg 267fb4d8502Sjsg /** 268fb4d8502Sjsg * amdgpu_gart_table_vram_free - free gart page table vram 269fb4d8502Sjsg * 270fb4d8502Sjsg * @adev: amdgpu_device pointer 271fb4d8502Sjsg * 272fb4d8502Sjsg * Free the video memory used for the GART page table 273fb4d8502Sjsg * (pcie r4xx, r5xx+). These asics require the gart table to 274fb4d8502Sjsg * be in video memory. 275fb4d8502Sjsg */ 276fb4d8502Sjsg void amdgpu_gart_table_vram_free(struct amdgpu_device *adev) 277fb4d8502Sjsg { 2781bb76ff1Sjsg amdgpu_bo_free_kernel(&adev->gart.bo, NULL, (void *)&adev->gart.ptr); 279fb4d8502Sjsg } 280fb4d8502Sjsg 281fb4d8502Sjsg /* 282fb4d8502Sjsg * Common gart functions. 283fb4d8502Sjsg */ 284fb4d8502Sjsg /** 285fb4d8502Sjsg * amdgpu_gart_unbind - unbind pages from the gart page table 286fb4d8502Sjsg * 287fb4d8502Sjsg * @adev: amdgpu_device pointer 288fb4d8502Sjsg * @offset: offset into the GPU's gart aperture 289fb4d8502Sjsg * @pages: number of pages to unbind 290fb4d8502Sjsg * 291fb4d8502Sjsg * Unbinds the requested pages from the gart page table and 292fb4d8502Sjsg * replaces them with the dummy page (all asics). 293fb4d8502Sjsg * Returns 0 for success, -EINVAL for failure. 294fb4d8502Sjsg */ 2951bb76ff1Sjsg void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset, 296fb4d8502Sjsg int pages) 297fb4d8502Sjsg { 298fb4d8502Sjsg unsigned t; 299fb4d8502Sjsg unsigned p; 300fb4d8502Sjsg int i, j; 301fb4d8502Sjsg u64 page_base; 302fb4d8502Sjsg /* Starting from VEGA10, system bit must be 0 to mean invalid. */ 303fb4d8502Sjsg uint64_t flags = 0; 3041bb76ff1Sjsg int idx; 305fb4d8502Sjsg 3061bb76ff1Sjsg if (!adev->gart.ptr) 3071bb76ff1Sjsg return; 3081bb76ff1Sjsg 3091bb76ff1Sjsg if (!drm_dev_enter(adev_to_drm(adev), &idx)) 3101bb76ff1Sjsg return; 311fb4d8502Sjsg 312fb4d8502Sjsg t = offset / AMDGPU_GPU_PAGE_SIZE; 313fb4d8502Sjsg p = t / AMDGPU_GPU_PAGES_IN_CPU_PAGE; 314fb4d8502Sjsg for (i = 0; i < pages; i++, p++) { 315fb4d8502Sjsg page_base = adev->dummy_page_addr; 316fb4d8502Sjsg if (!adev->gart.ptr) 317fb4d8502Sjsg continue; 318fb4d8502Sjsg 319fb4d8502Sjsg for (j = 0; j < AMDGPU_GPU_PAGES_IN_CPU_PAGE; j++, t++) { 320fb4d8502Sjsg amdgpu_gmc_set_pte_pde(adev, adev->gart.ptr, 321fb4d8502Sjsg t, page_base, flags); 322fb4d8502Sjsg page_base += AMDGPU_GPU_PAGE_SIZE; 323fb4d8502Sjsg } 324fb4d8502Sjsg } 325fb4d8502Sjsg mb(); 3265ca02815Sjsg amdgpu_device_flush_hdp(adev, NULL); 327f005ef32Sjsg for_each_set_bit(i, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS) 328c349dbc7Sjsg amdgpu_gmc_flush_gpu_tlb(adev, 0, i, 0); 329c349dbc7Sjsg 3301bb76ff1Sjsg drm_dev_exit(idx); 331fb4d8502Sjsg } 332fb4d8502Sjsg 333fb4d8502Sjsg /** 334fb4d8502Sjsg * amdgpu_gart_map - map dma_addresses into GART entries 335fb4d8502Sjsg * 336fb4d8502Sjsg * @adev: amdgpu_device pointer 337fb4d8502Sjsg * @offset: offset into the GPU's gart aperture 338fb4d8502Sjsg * @pages: number of pages to bind 339fb4d8502Sjsg * @dma_addr: DMA addresses of pages 340c349dbc7Sjsg * @flags: page table entry flags 341c349dbc7Sjsg * @dst: CPU address of the gart table 342fb4d8502Sjsg * 343fb4d8502Sjsg * Map the dma_addresses into GART entries (all asics). 344fb4d8502Sjsg * Returns 0 for success, -EINVAL for failure. 345fb4d8502Sjsg */ 3461bb76ff1Sjsg void amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset, 347fb4d8502Sjsg int pages, dma_addr_t *dma_addr, uint64_t flags, 348fb4d8502Sjsg void *dst) 349fb4d8502Sjsg { 350fb4d8502Sjsg uint64_t page_base; 351fb4d8502Sjsg unsigned i, j, t; 3521bb76ff1Sjsg int idx; 353fb4d8502Sjsg 3541bb76ff1Sjsg if (!drm_dev_enter(adev_to_drm(adev), &idx)) 3551bb76ff1Sjsg return; 356fb4d8502Sjsg 357fb4d8502Sjsg t = offset / AMDGPU_GPU_PAGE_SIZE; 358fb4d8502Sjsg 359fb4d8502Sjsg for (i = 0; i < pages; i++) { 360fb4d8502Sjsg page_base = dma_addr[i]; 361fb4d8502Sjsg for (j = 0; j < AMDGPU_GPU_PAGES_IN_CPU_PAGE; j++, t++) { 362fb4d8502Sjsg amdgpu_gmc_set_pte_pde(adev, dst, t, page_base, flags); 363fb4d8502Sjsg page_base += AMDGPU_GPU_PAGE_SIZE; 364fb4d8502Sjsg } 365fb4d8502Sjsg } 3661bb76ff1Sjsg drm_dev_exit(idx); 367fb4d8502Sjsg } 368fb4d8502Sjsg 369fb4d8502Sjsg /** 370fb4d8502Sjsg * amdgpu_gart_bind - bind pages into the gart page table 371fb4d8502Sjsg * 372fb4d8502Sjsg * @adev: amdgpu_device pointer 373fb4d8502Sjsg * @offset: offset into the GPU's gart aperture 374fb4d8502Sjsg * @pages: number of pages to bind 375fb4d8502Sjsg * @dma_addr: DMA addresses of pages 376c349dbc7Sjsg * @flags: page table entry flags 377fb4d8502Sjsg * 378fb4d8502Sjsg * Binds the requested pages to the gart page table 379fb4d8502Sjsg * (all asics). 380fb4d8502Sjsg * Returns 0 for success, -EINVAL for failure. 381fb4d8502Sjsg */ 3821bb76ff1Sjsg void amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset, 3835ca02815Sjsg int pages, dma_addr_t *dma_addr, 384fb4d8502Sjsg uint64_t flags) 385fb4d8502Sjsg { 386fb4d8502Sjsg if (!adev->gart.ptr) 3871bb76ff1Sjsg return; 388fb4d8502Sjsg 3891bb76ff1Sjsg amdgpu_gart_map(adev, offset, pages, dma_addr, flags, adev->gart.ptr); 3905ca02815Sjsg } 3915ca02815Sjsg 3925ca02815Sjsg /** 3935ca02815Sjsg * amdgpu_gart_invalidate_tlb - invalidate gart TLB 3945ca02815Sjsg * 3955ca02815Sjsg * @adev: amdgpu device driver pointer 3965ca02815Sjsg * 3975ca02815Sjsg * Invalidate gart TLB which can be use as a way to flush gart changes 3985ca02815Sjsg * 3995ca02815Sjsg */ 4005ca02815Sjsg void amdgpu_gart_invalidate_tlb(struct amdgpu_device *adev) 4015ca02815Sjsg { 4025ca02815Sjsg int i; 403fb4d8502Sjsg 4041bb76ff1Sjsg if (!adev->gart.ptr) 4051bb76ff1Sjsg return; 4061bb76ff1Sjsg 407fb4d8502Sjsg mb(); 408*f556b3f0Sjsg if (down_read_trylock(&adev->reset_domain->sem)) { 4095ca02815Sjsg amdgpu_device_flush_hdp(adev, NULL); 410*f556b3f0Sjsg up_read(&adev->reset_domain->sem); 411*f556b3f0Sjsg } 412f005ef32Sjsg for_each_set_bit(i, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS) 413c349dbc7Sjsg amdgpu_gmc_flush_gpu_tlb(adev, 0, i, 0); 414fb4d8502Sjsg } 415fb4d8502Sjsg 416fb4d8502Sjsg /** 417fb4d8502Sjsg * amdgpu_gart_init - init the driver info for managing the gart 418fb4d8502Sjsg * 419fb4d8502Sjsg * @adev: amdgpu_device pointer 420fb4d8502Sjsg * 421fb4d8502Sjsg * Allocate the dummy page and init the gart driver info (all asics). 422fb4d8502Sjsg * Returns 0 for success, error for failure. 423fb4d8502Sjsg */ 424fb4d8502Sjsg int amdgpu_gart_init(struct amdgpu_device *adev) 425fb4d8502Sjsg { 426fb4d8502Sjsg int r; 427fb4d8502Sjsg 428fb4d8502Sjsg if (adev->dummy_page_addr) 429fb4d8502Sjsg return 0; 430fb4d8502Sjsg 431fb4d8502Sjsg /* We need PAGE_SIZE >= AMDGPU_GPU_PAGE_SIZE */ 432fb4d8502Sjsg if (PAGE_SIZE < AMDGPU_GPU_PAGE_SIZE) { 433fb4d8502Sjsg DRM_ERROR("Page size is smaller than GPU page size!\n"); 434fb4d8502Sjsg return -EINVAL; 435fb4d8502Sjsg } 436fb4d8502Sjsg r = amdgpu_gart_dummy_page_init(adev); 437fb4d8502Sjsg if (r) 438fb4d8502Sjsg return r; 439fb4d8502Sjsg /* Compute table size */ 440fb4d8502Sjsg adev->gart.num_cpu_pages = adev->gmc.gart_size / PAGE_SIZE; 441fb4d8502Sjsg adev->gart.num_gpu_pages = adev->gmc.gart_size / AMDGPU_GPU_PAGE_SIZE; 442fb4d8502Sjsg DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n", 443fb4d8502Sjsg adev->gart.num_cpu_pages, adev->gart.num_gpu_pages); 444fb4d8502Sjsg 445fb4d8502Sjsg return 0; 446fb4d8502Sjsg } 447