1 /* $NetBSD: ttm_tt.c,v 1.12 2020/02/14 14:34:59 maya Exp $ */ 2 3 /************************************************************************** 4 * 5 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA 6 * All Rights Reserved. 7 * 8 * Permission is hereby granted, free of charge, to any person obtaining a 9 * copy of this software and associated documentation files (the 10 * "Software"), to deal in the Software without restriction, including 11 * without limitation the rights to use, copy, modify, merge, publish, 12 * distribute, sub license, and/or sell copies of the Software, and to 13 * permit persons to whom the Software is furnished to do so, subject to 14 * the following conditions: 15 * 16 * The above copyright notice and this permission notice (including the 17 * next paragraph) shall be included in all copies or substantial portions 18 * of the Software. 19 * 20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 23 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 24 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 25 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 26 * USE OR OTHER DEALINGS IN THE SOFTWARE. 27 * 28 **************************************************************************/ 29 /* 30 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> 31 */ 32 33 #include <sys/cdefs.h> 34 __KERNEL_RCSID(0, "$NetBSD: ttm_tt.c,v 1.12 2020/02/14 14:34:59 maya Exp $"); 35 36 #define pr_fmt(fmt) "[TTM] " fmt 37 38 #include <linux/sched.h> 39 #include <linux/highmem.h> 40 #include <linux/pagemap.h> 41 #include <linux/shmem_fs.h> 42 #include <linux/file.h> 43 #include <linux/swap.h> 44 #include <linux/slab.h> 45 #include <linux/export.h> 46 #include <drm/drm_cache.h> 47 #include <drm/drm_mem_util.h> 48 #include <drm/ttm/ttm_module.h> 49 #include <drm/ttm/ttm_bo_driver.h> 50 #include <drm/ttm/ttm_placement.h> 51 #include <drm/ttm/ttm_page_alloc.h> 52 #include <drm/bus_dma_hacks.h> 53 54 /** 55 * Allocates storage for pointers to the pages that back the ttm. 56 */ 57 static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm) 58 { 59 ttm->pages = drm_calloc_large(ttm->num_pages, sizeof(void*)); 60 } 61 62 static void ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm) 63 { 64 #ifdef __NetBSD__ /* cpu/dma addrs handled by bus_dma */ 65 ttm->ttm.pages = drm_calloc_large(ttm->ttm.num_pages, 66 sizeof(*ttm->ttm.pages)); 67 #else 68 ttm->ttm.pages = drm_calloc_large(ttm->ttm.num_pages, 69 sizeof(*ttm->ttm.pages) + 70 sizeof(*ttm->dma_address) + 71 sizeof(*ttm->cpu_address)); 72 ttm->cpu_address = (void *) (ttm->ttm.pages + ttm->ttm.num_pages); 73 ttm->dma_address = (void *) (ttm->cpu_address + ttm->ttm.num_pages); 74 #endif 75 } 76 77 #ifdef CONFIG_X86 78 static inline int ttm_tt_set_page_caching(struct page *p, 79 enum ttm_caching_state c_old, 80 enum ttm_caching_state c_new) 81 { 82 #ifdef __NetBSD__ 83 return 0; 84 #else 85 int ret = 0; 86 87 if (PageHighMem(p)) 88 return 0; 89 90 if (c_old != tt_cached) { 91 /* p isn't in the default caching state, set it to 92 * writeback first to free its current memtype. */ 93 94 ret = set_pages_wb(p, 1); 95 if (ret) 96 return ret; 97 } 98 99 if (c_new == tt_wc) 100 ret = set_memory_wc((unsigned long) page_address(p), 1); 101 else if (c_new == tt_uncached) 102 ret = set_pages_uc(p, 1); 103 104 return ret; 105 #endif 106 } 107 #else /* CONFIG_X86 */ 108 static inline int ttm_tt_set_page_caching(struct page *p, 109 enum ttm_caching_state c_old, 110 enum ttm_caching_state c_new) 111 { 112 return 0; 113 } 114 #endif /* CONFIG_X86 */ 115 116 /* 117 * Change caching policy for the linear kernel map 118 * for range of pages in a ttm. 119 */ 120 121 static int ttm_tt_set_caching(struct ttm_tt *ttm, 122 enum ttm_caching_state c_state) 123 { 124 int i, j; 125 struct page *cur_page; 126 int ret; 127 128 if (ttm->caching_state == c_state) 129 return 0; 130 131 if (ttm->state == tt_unpopulated) { 132 /* Change caching but don't populate */ 133 ttm->caching_state = c_state; 134 return 0; 135 } 136 137 if (ttm->caching_state == tt_cached) 138 drm_clflush_pages(ttm->pages, ttm->num_pages); 139 140 for (i = 0; i < ttm->num_pages; ++i) { 141 cur_page = ttm->pages[i]; 142 if (likely(cur_page != NULL)) { 143 ret = ttm_tt_set_page_caching(cur_page, 144 ttm->caching_state, 145 c_state); 146 if (unlikely(ret != 0)) 147 goto out_err; 148 } 149 } 150 151 ttm->caching_state = c_state; 152 153 return 0; 154 155 out_err: 156 for (j = 0; j < i; ++j) { 157 cur_page = ttm->pages[j]; 158 if (likely(cur_page != NULL)) { 159 (void)ttm_tt_set_page_caching(cur_page, c_state, 160 ttm->caching_state); 161 } 162 } 163 164 return ret; 165 } 166 167 int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement) 168 { 169 enum ttm_caching_state state; 170 171 if (placement & TTM_PL_FLAG_WC) 172 state = tt_wc; 173 else if (placement & TTM_PL_FLAG_UNCACHED) 174 state = tt_uncached; 175 else 176 state = tt_cached; 177 178 return ttm_tt_set_caching(ttm, state); 179 } 180 EXPORT_SYMBOL(ttm_tt_set_placement_caching); 181 182 void ttm_tt_destroy(struct ttm_tt *ttm) 183 { 184 if (unlikely(ttm == NULL)) 185 return; 186 187 if (ttm->state == tt_bound) { 188 ttm_tt_unbind(ttm); 189 } 190 191 if (ttm->state == tt_unbound) 192 ttm_tt_unpopulate(ttm); 193 194 #ifndef __NetBSD__ 195 if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) && 196 ttm->swap_storage) 197 fput(ttm->swap_storage); 198 199 ttm->swap_storage = NULL; 200 #endif 201 ttm->func->destroy(ttm); 202 } 203 204 int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev, 205 unsigned long size, uint32_t page_flags, 206 struct page *dummy_read_page) 207 { 208 ttm->bdev = bdev; 209 ttm->glob = bdev->glob; 210 ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; 211 ttm->caching_state = tt_cached; 212 ttm->page_flags = page_flags; 213 ttm->dummy_read_page = dummy_read_page; 214 ttm->state = tt_unpopulated; 215 #ifdef __NetBSD__ 216 WARN(size == 0, "zero-size allocation in %s, please file a NetBSD PR", 217 __func__); /* paranoia -- can't prove in five minutes */ 218 size = MAX(size, 1); 219 ttm->swap_storage = uao_create(roundup2(size, PAGE_SIZE), 0); 220 uao_set_pgfl(ttm->swap_storage, bus_dmamem_pgfl(bdev->dmat)); 221 #else 222 ttm->swap_storage = NULL; 223 #endif 224 TAILQ_INIT(&ttm->pglist); 225 226 ttm_tt_alloc_page_directory(ttm); 227 if (!ttm->pages) { 228 ttm_tt_destroy(ttm); 229 pr_err("Failed allocating page table\n"); 230 return -ENOMEM; 231 } 232 return 0; 233 } 234 EXPORT_SYMBOL(ttm_tt_init); 235 236 void ttm_tt_fini(struct ttm_tt *ttm) 237 { 238 #ifdef __NetBSD__ 239 uao_detach(ttm->swap_storage); 240 ttm->swap_storage = NULL; 241 #endif 242 drm_free_large(ttm->pages); 243 ttm->pages = NULL; 244 } 245 EXPORT_SYMBOL(ttm_tt_fini); 246 247 int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev, 248 unsigned long size, uint32_t page_flags, 249 struct page *dummy_read_page) 250 { 251 struct ttm_tt *ttm = &ttm_dma->ttm; 252 253 ttm->bdev = bdev; 254 ttm->glob = bdev->glob; 255 ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; 256 ttm->caching_state = tt_cached; 257 ttm->page_flags = page_flags; 258 ttm->dummy_read_page = dummy_read_page; 259 ttm->state = tt_unpopulated; 260 #ifdef __NetBSD__ 261 WARN(size == 0, "zero-size allocation in %s, please file a NetBSD PR", 262 __func__); /* paranoia -- can't prove in five minutes */ 263 size = MAX(size, 1); 264 ttm->swap_storage = uao_create(roundup2(size, PAGE_SIZE), 0); 265 uao_set_pgfl(ttm->swap_storage, bus_dmamem_pgfl(bdev->dmat)); 266 #else 267 ttm->swap_storage = NULL; 268 #endif 269 TAILQ_INIT(&ttm->pglist); 270 271 INIT_LIST_HEAD(&ttm_dma->pages_list); 272 ttm_dma_tt_alloc_page_directory(ttm_dma); 273 #ifdef __NetBSD__ 274 { 275 int error; 276 277 if (ttm->num_pages > (SIZE_MAX / 278 MIN(sizeof(ttm_dma->dma_segs[0]), PAGE_SIZE))) { 279 error = ENOMEM; 280 goto fail0; 281 } 282 ttm_dma->dma_segs = kmem_alloc((ttm->num_pages * 283 sizeof(ttm_dma->dma_segs[0])), KM_SLEEP); 284 error = bus_dmamap_create(ttm->bdev->dmat, 285 (ttm->num_pages * PAGE_SIZE), ttm->num_pages, PAGE_SIZE, 0, 286 BUS_DMA_WAITOK, &ttm_dma->dma_address); 287 if (error) 288 goto fail1; 289 290 return 0; 291 292 fail2: __unused 293 bus_dmamap_destroy(ttm->bdev->dmat, ttm_dma->dma_address); 294 fail1: kmem_free(ttm_dma->dma_segs, (ttm->num_pages * 295 sizeof(ttm_dma->dma_segs[0]))); 296 fail0: KASSERT(error); 297 drm_free_large(ttm->pages); 298 uao_detach(ttm->swap_storage); 299 /* XXX errno NetBSD->Linux */ 300 return -error; 301 } 302 #else 303 if (!ttm->pages) { 304 ttm_tt_destroy(ttm); 305 pr_err("Failed allocating page table\n"); 306 return -ENOMEM; 307 } 308 return 0; 309 #endif 310 } 311 EXPORT_SYMBOL(ttm_dma_tt_init); 312 313 void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma) 314 { 315 struct ttm_tt *ttm = &ttm_dma->ttm; 316 317 #ifdef __NetBSD__ 318 uao_detach(ttm->swap_storage); 319 ttm->swap_storage = NULL; 320 #endif 321 drm_free_large(ttm->pages); 322 ttm->pages = NULL; 323 #ifdef __NetBSD__ 324 bus_dmamap_destroy(ttm->bdev->dmat, ttm_dma->dma_address); 325 kmem_free(ttm_dma->dma_segs, (ttm->num_pages * 326 sizeof(ttm_dma->dma_segs[0]))); 327 #else 328 ttm_dma->cpu_address = NULL; 329 ttm_dma->dma_address = NULL; 330 #endif 331 } 332 EXPORT_SYMBOL(ttm_dma_tt_fini); 333 334 void ttm_tt_unbind(struct ttm_tt *ttm) 335 { 336 int ret __diagused; 337 338 if (ttm->state == tt_bound) { 339 ret = ttm->func->unbind(ttm); 340 BUG_ON(ret); 341 ttm->state = tt_unbound; 342 } 343 } 344 345 int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) 346 { 347 int ret = 0; 348 349 if (!ttm) 350 return -EINVAL; 351 352 if (ttm->state == tt_bound) 353 return 0; 354 355 ret = ttm->bdev->driver->ttm_tt_populate(ttm); 356 if (ret) 357 return ret; 358 359 ret = ttm->func->bind(ttm, bo_mem); 360 if (unlikely(ret != 0)) 361 return ret; 362 363 ttm->state = tt_bound; 364 365 return 0; 366 } 367 EXPORT_SYMBOL(ttm_tt_bind); 368 369 #ifdef __NetBSD__ 370 /* 371 * ttm_tt_wire(ttm) 372 * 373 * Wire the uvm pages of ttm and fill the ttm page array. ttm 374 * must be unpopulated, and must be marked swapped. This does not 375 * change either state -- the caller is expected to include it 376 * among other operations for such a state transition. 377 */ 378 int 379 ttm_tt_wire(struct ttm_tt *ttm) 380 { 381 struct uvm_object *uobj = ttm->swap_storage; 382 struct vm_page *page; 383 unsigned i; 384 int error; 385 386 KASSERTMSG((ttm->state == tt_unpopulated), 387 "ttm_tt %p must be unpopulated for wiring, but state=%d", 388 ttm, (int)ttm->state); 389 KASSERT(ISSET(ttm->page_flags, TTM_PAGE_FLAG_SWAPPED)); 390 KASSERT(uobj != NULL); 391 392 error = uvm_obj_wirepages(uobj, 0, (ttm->num_pages << PAGE_SHIFT), 393 &ttm->pglist); 394 if (error) 395 /* XXX errno NetBSD->Linux */ 396 return -error; 397 398 i = 0; 399 TAILQ_FOREACH(page, &ttm->pglist, pageq.queue) { 400 KASSERT(i < ttm->num_pages); 401 KASSERT(ttm->pages[i] == NULL); 402 ttm->pages[i] = container_of(page, struct page, p_vmp); 403 i++; 404 } 405 KASSERT(i == ttm->num_pages); 406 407 /* Success! */ 408 return 0; 409 } 410 411 /* 412 * ttm_tt_unwire(ttm) 413 * 414 * Nullify the ttm page array and unwire the uvm pages of ttm. 415 * ttm must be unbound and must be marked swapped. This does not 416 * change either state -- the caller is expected to include it 417 * among other operations for such a state transition. 418 */ 419 void 420 ttm_tt_unwire(struct ttm_tt *ttm) 421 { 422 struct uvm_object *uobj = ttm->swap_storage; 423 unsigned i; 424 425 KASSERTMSG((ttm->state == tt_unbound), 426 "ttm_tt %p must be unbound for unwiring, but state=%d", 427 ttm, (int)ttm->state); 428 KASSERT(!ISSET(ttm->page_flags, TTM_PAGE_FLAG_SWAPPED)); 429 KASSERT(uobj != NULL); 430 431 uvm_obj_unwirepages(uobj, 0, (ttm->num_pages << PAGE_SHIFT)); 432 for (i = 0; i < ttm->num_pages; i++) 433 ttm->pages[i] = NULL; 434 } 435 #endif 436 437 #ifndef __NetBSD__ 438 int ttm_tt_swapin(struct ttm_tt *ttm) 439 { 440 struct address_space *swap_space; 441 struct file *swap_storage; 442 struct page *from_page; 443 struct page *to_page; 444 int i; 445 int ret = -ENOMEM; 446 447 swap_storage = ttm->swap_storage; 448 BUG_ON(swap_storage == NULL); 449 450 swap_space = file_inode(swap_storage)->i_mapping; 451 452 for (i = 0; i < ttm->num_pages; ++i) { 453 from_page = shmem_read_mapping_page(swap_space, i); 454 if (IS_ERR(from_page)) { 455 ret = PTR_ERR(from_page); 456 goto out_err; 457 } 458 to_page = ttm->pages[i]; 459 if (unlikely(to_page == NULL)) 460 goto out_err; 461 462 copy_highpage(to_page, from_page); 463 page_cache_release(from_page); 464 } 465 466 if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP)) 467 fput(swap_storage); 468 ttm->swap_storage = NULL; 469 ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED; 470 471 return 0; 472 out_err: 473 return ret; 474 } 475 #endif 476 477 int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage) 478 { 479 #ifdef __NetBSD__ 480 481 KASSERTMSG((ttm->state == tt_unpopulated || ttm->state == tt_unbound), 482 "ttm_tt %p must be unpopulated or unbound for swapout," 483 " but state=%d", 484 ttm, (int)ttm->state); 485 KASSERTMSG((ttm->caching_state == tt_cached), 486 "ttm_tt %p must be cached for swapout, but caching_state=%d", 487 ttm, (int)ttm->caching_state); 488 KASSERT(persistent_swap_storage == NULL); 489 490 ttm->bdev->driver->ttm_tt_swapout(ttm); 491 return 0; 492 #else 493 struct address_space *swap_space; 494 struct file *swap_storage; 495 struct page *from_page; 496 struct page *to_page; 497 int i; 498 int ret = -ENOMEM; 499 500 BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated); 501 BUG_ON(ttm->caching_state != tt_cached); 502 503 if (!persistent_swap_storage) { 504 swap_storage = shmem_file_setup("ttm swap", 505 ttm->num_pages << PAGE_SHIFT, 506 0); 507 if (IS_ERR(swap_storage)) { 508 pr_err("Failed allocating swap storage\n"); 509 return PTR_ERR(swap_storage); 510 } 511 } else 512 swap_storage = persistent_swap_storage; 513 514 swap_space = file_inode(swap_storage)->i_mapping; 515 516 for (i = 0; i < ttm->num_pages; ++i) { 517 from_page = ttm->pages[i]; 518 if (unlikely(from_page == NULL)) 519 continue; 520 to_page = shmem_read_mapping_page(swap_space, i); 521 if (IS_ERR(to_page)) { 522 ret = PTR_ERR(to_page); 523 goto out_err; 524 } 525 copy_highpage(to_page, from_page); 526 set_page_dirty(to_page); 527 mark_page_accessed(to_page); 528 page_cache_release(to_page); 529 } 530 531 ttm_tt_unpopulate(ttm); 532 ttm->swap_storage = swap_storage; 533 ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED; 534 if (persistent_swap_storage) 535 ttm->page_flags |= TTM_PAGE_FLAG_PERSISTENT_SWAP; 536 537 return 0; 538 out_err: 539 if (!persistent_swap_storage) 540 fput(swap_storage); 541 542 return ret; 543 #endif 544 } 545 546 static void ttm_tt_clear_mapping(struct ttm_tt *ttm) 547 { 548 #ifndef __NetBSD__ 549 pgoff_t i; 550 struct page **page = ttm->pages; 551 552 if (ttm->page_flags & TTM_PAGE_FLAG_SG) 553 return; 554 555 for (i = 0; i < ttm->num_pages; ++i) { 556 (*page)->mapping = NULL; 557 (*page++)->index = 0; 558 } 559 #endif 560 } 561 562 void ttm_tt_unpopulate(struct ttm_tt *ttm) 563 { 564 if (ttm->state == tt_unpopulated) 565 return; 566 567 ttm_tt_clear_mapping(ttm); 568 ttm->bdev->driver->ttm_tt_unpopulate(ttm); 569 } 570