1 /* $NetBSD: radeon_gart.c,v 1.11 2020/01/20 23:22:09 jmcneill Exp $ */ 2 3 /* 4 * Copyright 2008 Advanced Micro Devices, Inc. 5 * Copyright 2008 Red Hat Inc. 6 * Copyright 2009 Jerome Glisse. 7 * 8 * Permission is hereby granted, free of charge, to any person obtaining a 9 * copy of this software and associated documentation files (the "Software"), 10 * to deal in the Software without restriction, including without limitation 11 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 12 * and/or sell copies of the Software, and to permit persons to whom the 13 * Software is furnished to do so, subject to the following conditions: 14 * 15 * The above copyright notice and this permission notice shall be included in 16 * all copies or substantial portions of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 22 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 23 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 24 * OTHER DEALINGS IN THE SOFTWARE. 25 * 26 * Authors: Dave Airlie 27 * Alex Deucher 28 * Jerome Glisse 29 */ 30 #include <sys/cdefs.h> 31 __KERNEL_RCSID(0, "$NetBSD: radeon_gart.c,v 1.11 2020/01/20 23:22:09 jmcneill Exp $"); 32 33 #include <drm/drmP.h> 34 #include <drm/radeon_drm.h> 35 #include "radeon.h" 36 37 /* 38 * GART 39 * The GART (Graphics Aperture Remapping Table) is an aperture 40 * in the GPU's address space. System pages can be mapped into 41 * the aperture and look like contiguous pages from the GPU's 42 * perspective. A page table maps the pages in the aperture 43 * to the actual backing pages in system memory. 44 * 45 * Radeon GPUs support both an internal GART, as described above, 46 * and AGP. AGP works similarly, but the GART table is configured 47 * and maintained by the northbridge rather than the driver. 48 * Radeon hw has a separate AGP aperture that is programmed to 49 * point to the AGP aperture provided by the northbridge and the 50 * requests are passed through to the northbridge aperture. 51 * Both AGP and internal GART can be used at the same time, however 52 * that is not currently supported by the driver. 53 * 54 * This file handles the common internal GART management. 55 */ 56 57 /* 58 * Common GART table functions. 59 */ 60 /** 61 * radeon_gart_table_ram_alloc - allocate system ram for gart page table 62 * 63 * @rdev: radeon_device pointer 64 * 65 * Allocate system memory for GART page table 66 * (r1xx-r3xx, non-pcie r4xx, rs400). These asics require the 67 * gart table to be in system memory. 68 * Returns 0 for success, -ENOMEM for failure. 69 */ 70 int radeon_gart_table_ram_alloc(struct radeon_device *rdev) 71 { 72 #ifdef __NetBSD__ 73 int rsegs; 74 int error; 75 76 error = bus_dmamem_alloc(rdev->ddev->dmat, rdev->gart.table_size, 77 PAGE_SIZE, 0, &rdev->gart.rg_table_seg, 1, &rsegs, BUS_DMA_WAITOK); 78 if (error) 79 goto fail0; 80 KASSERT(rsegs == 1); 81 error = bus_dmamap_create(rdev->ddev->dmat, rdev->gart.table_size, 1, 82 rdev->gart.table_size, 0, BUS_DMA_WAITOK, 83 &rdev->gart.rg_table_map); 84 if (error) 85 goto fail1; 86 error = bus_dmamem_map(rdev->ddev->dmat, &rdev->gart.rg_table_seg, 1, 87 rdev->gart.table_size, &rdev->gart.ptr, 88 BUS_DMA_WAITOK|BUS_DMA_NOCACHE); 89 if (error) 90 goto fail2; 91 error = bus_dmamap_load(rdev->ddev->dmat, rdev->gart.rg_table_map, 92 rdev->gart.ptr, rdev->gart.table_size, NULL, BUS_DMA_WAITOK); 93 if (error) 94 goto fail3; 95 96 memset((void *)rdev->gart.ptr, 0, rdev->gart.table_size); 97 98 /* Success! */ 99 rdev->gart.table_addr = rdev->gart.rg_table_map->dm_segs[0].ds_addr; 100 return 0; 101 102 fail4: __unused 103 bus_dmamap_unload(rdev->ddev->dmat, rdev->gart.rg_table_map); 104 fail3: bus_dmamem_unmap(rdev->ddev->dmat, rdev->gart.ptr, 105 rdev->gart.table_size); 106 fail2: bus_dmamap_destroy(rdev->ddev->dmat, rdev->gart.rg_table_map); 107 fail1: bus_dmamem_free(rdev->ddev->dmat, &rdev->gart.rg_table_seg, 1); 108 fail0: KASSERT(error); 109 /* XXX errno NetBSD->Linux */ 110 return -error; 111 #else 112 void *ptr; 113 114 ptr = pci_alloc_consistent(rdev->pdev, rdev->gart.table_size, 115 &rdev->gart.table_addr); 116 if (ptr == NULL) { 117 return -ENOMEM; 118 } 119 #ifdef CONFIG_X86 120 if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480 || 121 rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) { 122 set_memory_uc((unsigned long)ptr, 123 rdev->gart.table_size >> PAGE_SHIFT); 124 } 125 #endif 126 rdev->gart.ptr = ptr; 127 memset((void *)rdev->gart.ptr, 0, rdev->gart.table_size); 128 return 0; 129 #endif 130 } 131 132 /** 133 * radeon_gart_table_ram_free - free system ram for gart page table 134 * 135 * @rdev: radeon_device pointer 136 * 137 * Free system memory for GART page table 138 * (r1xx-r3xx, non-pcie r4xx, rs400). These asics require the 139 * gart table to be in system memory. 140 */ 141 void radeon_gart_table_ram_free(struct radeon_device *rdev) 142 { 143 if (rdev->gart.ptr == NULL) { 144 return; 145 } 146 #ifdef __NetBSD__ 147 bus_dmamap_unload(rdev->ddev->dmat, rdev->gart.rg_table_map); 148 bus_dmamem_unmap(rdev->ddev->dmat, rdev->gart.ptr, 149 rdev->gart.table_size); 150 bus_dmamap_destroy(rdev->ddev->dmat, rdev->gart.rg_table_map); 151 bus_dmamem_free(rdev->ddev->dmat, &rdev->gart.rg_table_seg, 1); 152 #else 153 #ifdef CONFIG_X86 154 if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480 || 155 rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) { 156 set_memory_wb((unsigned long)rdev->gart.ptr, 157 rdev->gart.table_size >> PAGE_SHIFT); 158 } 159 #endif 160 pci_free_consistent(rdev->pdev, rdev->gart.table_size, 161 (void *)rdev->gart.ptr, 162 rdev->gart.table_addr); 163 rdev->gart.ptr = NULL; 164 rdev->gart.table_addr = 0; 165 #endif 166 } 167 168 /** 169 * radeon_gart_table_vram_alloc - allocate vram for gart page table 170 * 171 * @rdev: radeon_device pointer 172 * 173 * Allocate video memory for GART page table 174 * (pcie r4xx, r5xx+). These asics require the 175 * gart table to be in video memory. 176 * Returns 0 for success, error for failure. 177 */ 178 int radeon_gart_table_vram_alloc(struct radeon_device *rdev) 179 { 180 int r; 181 182 if (rdev->gart.robj == NULL) { 183 r = radeon_bo_create(rdev, rdev->gart.table_size, 184 PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, 185 0, NULL, NULL, &rdev->gart.robj); 186 if (r) { 187 return r; 188 } 189 } 190 return 0; 191 } 192 193 /** 194 * radeon_gart_table_vram_pin - pin gart page table in vram 195 * 196 * @rdev: radeon_device pointer 197 * 198 * Pin the GART page table in vram so it will not be moved 199 * by the memory manager (pcie r4xx, r5xx+). These asics require the 200 * gart table to be in video memory. 201 * Returns 0 for success, error for failure. 202 */ 203 int radeon_gart_table_vram_pin(struct radeon_device *rdev) 204 { 205 uint64_t gpu_addr; 206 int r; 207 208 r = radeon_bo_reserve(rdev->gart.robj, false); 209 if (unlikely(r != 0)) 210 return r; 211 r = radeon_bo_pin(rdev->gart.robj, 212 RADEON_GEM_DOMAIN_VRAM, &gpu_addr); 213 if (r) { 214 radeon_bo_unreserve(rdev->gart.robj); 215 return r; 216 } 217 r = radeon_bo_kmap(rdev->gart.robj, &rdev->gart.ptr); 218 if (r) 219 radeon_bo_unpin(rdev->gart.robj); 220 radeon_bo_unreserve(rdev->gart.robj); 221 rdev->gart.table_addr = gpu_addr; 222 223 if (!r) { 224 int i; 225 226 /* We might have dropped some GART table updates while it wasn't 227 * mapped, restore all entries 228 */ 229 for (i = 0; i < rdev->gart.num_gpu_pages; i++) 230 radeon_gart_set_page(rdev, i, rdev->gart.pages_entry[i]); 231 mb(); 232 radeon_gart_tlb_flush(rdev); 233 } 234 235 return r; 236 } 237 238 /** 239 * radeon_gart_table_vram_unpin - unpin gart page table in vram 240 * 241 * @rdev: radeon_device pointer 242 * 243 * Unpin the GART page table in vram (pcie r4xx, r5xx+). 244 * These asics require the gart table to be in video memory. 245 */ 246 void radeon_gart_table_vram_unpin(struct radeon_device *rdev) 247 { 248 int r; 249 250 if (rdev->gart.robj == NULL) { 251 return; 252 } 253 r = radeon_bo_reserve(rdev->gart.robj, false); 254 if (likely(r == 0)) { 255 radeon_bo_kunmap(rdev->gart.robj); 256 radeon_bo_unpin(rdev->gart.robj); 257 radeon_bo_unreserve(rdev->gart.robj); 258 rdev->gart.ptr = NULL; 259 } 260 } 261 262 /** 263 * radeon_gart_table_vram_free - free gart page table vram 264 * 265 * @rdev: radeon_device pointer 266 * 267 * Free the video memory used for the GART page table 268 * (pcie r4xx, r5xx+). These asics require the gart table to 269 * be in video memory. 270 */ 271 void radeon_gart_table_vram_free(struct radeon_device *rdev) 272 { 273 if (rdev->gart.robj == NULL) { 274 return; 275 } 276 radeon_bo_unref(&rdev->gart.robj); 277 } 278 279 #ifdef __NetBSD__ 280 static void 281 radeon_gart_pre_update(struct radeon_device *rdev, unsigned gpu_pgstart, 282 unsigned gpu_npages) 283 { 284 285 if (rdev->gart.rg_table_map != NULL) { 286 const unsigned entsize = 287 rdev->gart.table_size / rdev->gart.num_gpu_pages; 288 289 bus_dmamap_sync(rdev->ddev->dmat, rdev->gart.rg_table_map, 290 gpu_pgstart*entsize, gpu_npages*entsize, 291 BUS_DMASYNC_POSTWRITE); 292 } 293 } 294 295 static void 296 radeon_gart_post_update(struct radeon_device *rdev, unsigned gpu_pgstart, 297 unsigned gpu_npages) 298 { 299 300 if (rdev->gart.rg_table_map != NULL) { 301 const unsigned entsize = 302 rdev->gart.table_size / rdev->gart.num_gpu_pages; 303 304 bus_dmamap_sync(rdev->ddev->dmat, rdev->gart.rg_table_map, 305 gpu_pgstart*entsize, gpu_npages*entsize, 306 BUS_DMASYNC_PREWRITE); 307 } 308 if (rdev->gart.ptr != NULL) { 309 membar_sync(); /* XXX overkill */ 310 radeon_gart_tlb_flush(rdev); 311 } 312 } 313 #endif 314 315 /* 316 * Common gart functions. 317 */ 318 #ifdef __NetBSD__ 319 void 320 radeon_gart_unbind(struct radeon_device *rdev, unsigned gpu_start, 321 unsigned npages) 322 { 323 const unsigned gpu_per_cpu = (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); 324 const unsigned gpu_npages = (npages * gpu_per_cpu); 325 const unsigned gpu_pgstart = (gpu_start / RADEON_GPU_PAGE_SIZE); 326 const unsigned pgstart = (gpu_pgstart / gpu_per_cpu); 327 unsigned pgno, gpu_pgno; 328 329 KASSERT(pgstart == (gpu_start / PAGE_SIZE)); 330 KASSERT(npages <= rdev->gart.num_cpu_pages); 331 KASSERT(gpu_npages <= rdev->gart.num_cpu_pages); 332 333 if (!rdev->gart.ready) { 334 WARN(1, "trying to bind memory to uninitialized GART !\n"); 335 return; 336 } 337 338 radeon_gart_pre_update(rdev, gpu_pgstart, gpu_npages); 339 for (pgno = 0; pgno < npages; pgno++) { 340 if (rdev->gart.pages[pgstart + pgno] == NULL) 341 continue; 342 rdev->gart.pages[pgstart + pgno] = NULL; 343 for (gpu_pgno = 0; gpu_pgno < gpu_per_cpu; gpu_pgno++) { 344 const unsigned t = gpu_pgstart + gpu_per_cpu*pgno + 345 gpu_pgno; 346 rdev->gart.pages_entry[t] = rdev->dummy_page.entry; 347 if (rdev->gart.ptr == NULL) 348 continue; 349 radeon_gart_set_page(rdev, t, rdev->dummy_page.entry); 350 } 351 } 352 radeon_gart_post_update(rdev, gpu_pgstart, gpu_npages); 353 } 354 #else 355 /** 356 * radeon_gart_unbind - unbind pages from the gart page table 357 * 358 * @rdev: radeon_device pointer 359 * @offset: offset into the GPU's gart aperture 360 * @pages: number of pages to unbind 361 * 362 * Unbinds the requested pages from the gart page table and 363 * replaces them with the dummy page (all asics). 364 */ 365 void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset, 366 int pages) 367 { 368 unsigned t; 369 unsigned p; 370 int i, j; 371 372 if (!rdev->gart.ready) { 373 WARN(1, "trying to unbind memory from uninitialized GART !\n"); 374 return; 375 } 376 t = offset / RADEON_GPU_PAGE_SIZE; 377 p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); 378 for (i = 0; i < pages; i++, p++) { 379 if (rdev->gart.pages[p]) { 380 rdev->gart.pages[p] = NULL; 381 for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) { 382 rdev->gart.pages_entry[t] = rdev->dummy_page.entry; 383 if (rdev->gart.ptr) { 384 radeon_gart_set_page(rdev, t, 385 rdev->dummy_page.entry); 386 } 387 } 388 } 389 } 390 if (rdev->gart.ptr) { 391 mb(); 392 radeon_gart_tlb_flush(rdev); 393 } 394 } 395 #endif 396 397 #ifdef __NetBSD__ 398 int 399 radeon_gart_bind(struct radeon_device *rdev, unsigned gpu_start, 400 unsigned npages, struct page **pages, bus_dmamap_t dmamap, uint32_t flags) 401 { 402 const unsigned gpu_per_cpu = (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); 403 const unsigned gpu_npages = (npages * gpu_per_cpu); 404 const unsigned gpu_pgstart = (gpu_start / RADEON_GPU_PAGE_SIZE); 405 const unsigned pgstart = (gpu_pgstart / gpu_per_cpu); 406 unsigned pgno, gpu_pgno; 407 uint64_t page_entry; 408 409 KASSERT(pgstart == (gpu_start / PAGE_SIZE)); 410 KASSERT(npages == dmamap->dm_nsegs); 411 KASSERT(npages <= rdev->gart.num_cpu_pages); 412 KASSERT(gpu_npages <= rdev->gart.num_cpu_pages); 413 414 if (!rdev->gart.ready) { 415 WARN(1, "trying to bind memory to uninitialized GART !\n"); 416 return -EINVAL; 417 } 418 419 radeon_gart_pre_update(rdev, gpu_pgstart, gpu_npages); 420 for (pgno = 0; pgno < npages; pgno++) { 421 const bus_addr_t addr = dmamap->dm_segs[pgno].ds_addr; 422 423 KASSERT(dmamap->dm_segs[pgno].ds_len == PAGE_SIZE); 424 rdev->gart.pages[pgstart + pgno] = pages[pgno]; 425 for (gpu_pgno = 0; gpu_pgno < gpu_per_cpu; gpu_pgno++) { 426 const unsigned i = gpu_pgstart + gpu_per_cpu*pgno + 427 gpu_pgno; 428 page_entry = radeon_gart_get_page_entry( 429 addr + gpu_pgno*RADEON_GPU_PAGE_SIZE, flags); 430 rdev->gart.pages_entry[i] = page_entry; 431 if (rdev->gart.ptr == NULL) 432 continue; 433 radeon_gart_set_page(rdev, i, page_entry); 434 } 435 } 436 radeon_gart_post_update(rdev, gpu_pgstart, gpu_npages); 437 438 return 0; 439 } 440 #else 441 /** 442 * radeon_gart_bind - bind pages into the gart page table 443 * 444 * @rdev: radeon_device pointer 445 * @offset: offset into the GPU's gart aperture 446 * @pages: number of pages to bind 447 * @pagelist: pages to bind 448 * @dma_addr: DMA addresses of pages 449 * @flags: RADEON_GART_PAGE_* flags 450 * 451 * Binds the requested pages to the gart page table 452 * (all asics). 453 * Returns 0 for success, -EINVAL for failure. 454 */ 455 int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, 456 int pages, struct page **pagelist, dma_addr_t *dma_addr, 457 uint32_t flags) 458 { 459 unsigned t; 460 unsigned p; 461 uint64_t page_base, page_entry; 462 int i, j; 463 464 if (!rdev->gart.ready) { 465 WARN(1, "trying to bind memory to uninitialized GART !\n"); 466 return -EINVAL; 467 } 468 t = offset / RADEON_GPU_PAGE_SIZE; 469 p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); 470 471 for (i = 0; i < pages; i++, p++) { 472 rdev->gart.pages[p] = pagelist[i]; 473 page_base = dma_addr[i]; 474 for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) { 475 page_entry = radeon_gart_get_page_entry(page_base, flags); 476 rdev->gart.pages_entry[t] = page_entry; 477 if (rdev->gart.ptr) { 478 radeon_gart_set_page(rdev, t, page_entry); 479 } 480 page_base += RADEON_GPU_PAGE_SIZE; 481 } 482 } 483 if (rdev->gart.ptr) { 484 mb(); 485 radeon_gart_tlb_flush(rdev); 486 } 487 return 0; 488 } 489 #endif 490 491 /** 492 * radeon_gart_init - init the driver info for managing the gart 493 * 494 * @rdev: radeon_device pointer 495 * 496 * Allocate the dummy page and init the gart driver info (all asics). 497 * Returns 0 for success, error for failure. 498 */ 499 int radeon_gart_init(struct radeon_device *rdev) 500 { 501 int r, i; 502 503 if (rdev->gart.pages) { 504 return 0; 505 } 506 /* We need PAGE_SIZE >= RADEON_GPU_PAGE_SIZE */ 507 if (PAGE_SIZE < RADEON_GPU_PAGE_SIZE) { 508 DRM_ERROR("Page size is smaller than GPU page size!\n"); 509 return -EINVAL; 510 } 511 r = radeon_dummy_page_init(rdev); 512 if (r) 513 return r; 514 /* Compute table size */ 515 rdev->gart.num_cpu_pages = rdev->mc.gtt_size / PAGE_SIZE; 516 rdev->gart.num_gpu_pages = rdev->mc.gtt_size / RADEON_GPU_PAGE_SIZE; 517 DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n", 518 rdev->gart.num_cpu_pages, rdev->gart.num_gpu_pages); 519 /* Allocate pages table */ 520 rdev->gart.pages = vzalloc(sizeof(void *) * rdev->gart.num_cpu_pages); 521 if (rdev->gart.pages == NULL) { 522 radeon_gart_fini(rdev); 523 return -ENOMEM; 524 } 525 rdev->gart.pages_entry = vmalloc(sizeof(uint64_t) * 526 rdev->gart.num_gpu_pages); 527 if (rdev->gart.pages_entry == NULL) { 528 radeon_gart_fini(rdev); 529 return -ENOMEM; 530 } 531 /* set GART entry to point to the dummy page by default */ 532 for (i = 0; i < rdev->gart.num_gpu_pages; i++) 533 rdev->gart.pages_entry[i] = rdev->dummy_page.entry; 534 return 0; 535 } 536 537 /** 538 * radeon_gart_fini - tear down the driver info for managing the gart 539 * 540 * @rdev: radeon_device pointer 541 * 542 * Tear down the gart driver info and free the dummy page (all asics). 543 */ 544 void radeon_gart_fini(struct radeon_device *rdev) 545 { 546 if (rdev->gart.ready) { 547 /* unbind pages */ 548 radeon_gart_unbind(rdev, 0, rdev->gart.num_cpu_pages); 549 } 550 rdev->gart.ready = false; 551 vfree(rdev->gart.pages); 552 vfree(rdev->gart.pages_entry); 553 rdev->gart.pages = NULL; 554 rdev->gart.pages_entry = NULL; 555 556 radeon_dummy_page_fini(rdev); 557 } 558