1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */ 2 /************************************************************************** 3 * 4 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sub license, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial portions 17 * of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 25 * USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 **************************************************************************/ 28 /* 29 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> 30 */ 31 32 #define pr_fmt(fmt) "[TTM] " fmt 33 34 #include <linux/sched.h> 35 #include <linux/pagemap.h> 36 #include <linux/shmem_fs.h> 37 #include <linux/file.h> 38 #include <drm/drm_cache.h> 39 #include <drm/ttm/ttm_bo_driver.h> 40 #include <drm/ttm/ttm_page_alloc.h> 41 #include <drm/ttm/ttm_set_memory.h> 42 43 /** 44 * Allocates a ttm structure for the given BO. 45 */ 46 int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc) 47 { 48 struct ttm_bo_device *bdev = bo->bdev; 49 uint32_t page_flags = 0; 50 51 dma_resv_assert_held(bo->base.resv); 52 53 if (bdev->need_dma32) 54 page_flags |= TTM_PAGE_FLAG_DMA32; 55 56 if (bdev->no_retry) 57 page_flags |= TTM_PAGE_FLAG_NO_RETRY; 58 59 switch (bo->type) { 60 case ttm_bo_type_device: 61 if (zero_alloc) 62 page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC; 63 break; 64 case ttm_bo_type_kernel: 65 break; 66 case ttm_bo_type_sg: 67 page_flags |= TTM_PAGE_FLAG_SG; 68 break; 69 default: 70 bo->ttm = NULL; 71 pr_err("Illegal buffer object type\n"); 72 return -EINVAL; 73 } 74 75 bo->ttm = bdev->driver->ttm_tt_create(bo, page_flags); 76 if (unlikely(bo->ttm == NULL)) 77 return -ENOMEM; 78 79 return 0; 80 } 81 82 /** 83 * Allocates storage for pointers to the pages that back the ttm. 84 */ 85 static int ttm_tt_alloc_page_directory(struct ttm_tt *ttm) 86 { 87 ttm->pages = kvmalloc_array(ttm->num_pages, sizeof(void*), 88 GFP_KERNEL | __GFP_ZERO); 89 if (!ttm->pages) 90 return -ENOMEM; 91 return 0; 92 } 93 94 static int ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm) 95 { 96 ttm->ttm.pages = kvmalloc_array(ttm->ttm.num_pages, 97 sizeof(*ttm->ttm.pages) + 98 sizeof(*ttm->dma_address), 99 GFP_KERNEL | __GFP_ZERO); 100 if (!ttm->ttm.pages) 101 return -ENOMEM; 102 ttm->dma_address = (void *) (ttm->ttm.pages + ttm->ttm.num_pages); 103 return 0; 104 } 105 106 static int ttm_sg_tt_alloc_page_directory(struct ttm_dma_tt *ttm) 107 { 108 ttm->dma_address = kvmalloc_array(ttm->ttm.num_pages, 109 sizeof(*ttm->dma_address), 110 GFP_KERNEL | __GFP_ZERO); 111 if (!ttm->dma_address) 112 return -ENOMEM; 113 return 0; 114 } 115 116 static int ttm_tt_set_page_caching(struct vm_page *p, 117 enum ttm_caching_state c_old, 118 enum ttm_caching_state c_new) 119 { 120 int ret = 0; 121 122 if (PageHighMem(p)) 123 return 0; 124 125 if (c_old != tt_cached) { 126 /* p isn't in the default caching state, set it to 127 * writeback first to free its current memtype. */ 128 129 ret = ttm_set_pages_wb(p, 1); 130 if (ret) 131 return ret; 132 } 133 134 if (c_new == tt_wc) 135 ret = ttm_set_pages_wc(p, 1); 136 else if (c_new == tt_uncached) 137 ret = ttm_set_pages_uc(p, 1); 138 139 return ret; 140 } 141 142 /* 143 * Change caching policy for the linear kernel map 144 * for range of pages in a ttm. 145 */ 146 147 static int ttm_tt_set_caching(struct ttm_tt *ttm, 148 enum ttm_caching_state c_state) 149 { 150 int i, j; 151 struct vm_page *cur_page; 152 int ret; 153 154 if (ttm->caching_state == c_state) 155 return 0; 156 157 if (ttm->state == tt_unpopulated) { 158 /* Change caching but don't populate */ 159 ttm->caching_state = c_state; 160 return 0; 161 } 162 163 if (ttm->caching_state == tt_cached) 164 drm_clflush_pages(ttm->pages, ttm->num_pages); 165 166 for (i = 0; i < ttm->num_pages; ++i) { 167 cur_page = ttm->pages[i]; 168 if (likely(cur_page != NULL)) { 169 ret = ttm_tt_set_page_caching(cur_page, 170 ttm->caching_state, 171 c_state); 172 if (unlikely(ret != 0)) 173 goto out_err; 174 } 175 } 176 177 ttm->caching_state = c_state; 178 179 return 0; 180 181 out_err: 182 for (j = 0; j < i; ++j) { 183 cur_page = ttm->pages[j]; 184 if (likely(cur_page != NULL)) { 185 (void)ttm_tt_set_page_caching(cur_page, c_state, 186 ttm->caching_state); 187 } 188 } 189 190 return ret; 191 } 192 193 int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement) 194 { 195 enum ttm_caching_state state; 196 197 if (placement & TTM_PL_FLAG_WC) 198 state = tt_wc; 199 else if (placement & TTM_PL_FLAG_UNCACHED) 200 state = tt_uncached; 201 else 202 state = tt_cached; 203 204 return ttm_tt_set_caching(ttm, state); 205 } 206 EXPORT_SYMBOL(ttm_tt_set_placement_caching); 207 208 void ttm_tt_destroy(struct ttm_tt *ttm) 209 { 210 if (ttm == NULL) 211 return; 212 213 ttm_tt_unbind(ttm); 214 215 if (ttm->state == tt_unbound) 216 ttm_tt_unpopulate(ttm); 217 218 if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) && 219 ttm->swap_storage) 220 uao_detach(ttm->swap_storage); 221 222 ttm->swap_storage = NULL; 223 ttm->func->destroy(ttm); 224 } 225 226 static void ttm_tt_init_fields(struct ttm_tt *ttm, 227 struct ttm_buffer_object *bo, 228 uint32_t page_flags) 229 { 230 ttm->bdev = bo->bdev; 231 ttm->num_pages = bo->num_pages; 232 ttm->caching_state = tt_cached; 233 ttm->page_flags = page_flags; 234 ttm->state = tt_unpopulated; 235 ttm->swap_storage = NULL; 236 ttm->sg = bo->sg; 237 } 238 239 int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo, 240 uint32_t page_flags) 241 { 242 ttm_tt_init_fields(ttm, bo, page_flags); 243 244 if (ttm_tt_alloc_page_directory(ttm)) { 245 pr_err("Failed allocating page table\n"); 246 return -ENOMEM; 247 } 248 return 0; 249 } 250 EXPORT_SYMBOL(ttm_tt_init); 251 252 void ttm_tt_fini(struct ttm_tt *ttm) 253 { 254 kvfree(ttm->pages); 255 ttm->pages = NULL; 256 } 257 EXPORT_SYMBOL(ttm_tt_fini); 258 259 int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo, 260 uint32_t page_flags) 261 { 262 struct ttm_tt *ttm = &ttm_dma->ttm; 263 int flags = BUS_DMA_WAITOK; 264 265 ttm_tt_init_fields(ttm, bo, page_flags); 266 267 INIT_LIST_HEAD(&ttm_dma->pages_list); 268 if (ttm_dma_tt_alloc_page_directory(ttm_dma)) { 269 pr_err("Failed allocating page table\n"); 270 return -ENOMEM; 271 } 272 273 ttm_dma->segs = km_alloc(round_page(ttm->num_pages * 274 sizeof(bus_dma_segment_t)), &kv_any, &kp_zero, &kd_waitok); 275 276 ttm_dma->dmat = bo->bdev->dmat; 277 278 if ((page_flags & TTM_PAGE_FLAG_DMA32) == 0) 279 flags |= BUS_DMA_64BIT; 280 if (bus_dmamap_create(ttm_dma->dmat, ttm->num_pages << PAGE_SHIFT, 281 ttm->num_pages, ttm->num_pages << PAGE_SHIFT, 0, flags, 282 &ttm_dma->map)) { 283 km_free(ttm_dma->segs, round_page(ttm->num_pages * 284 sizeof(bus_dma_segment_t)), &kv_any, &kp_zero); 285 ttm_tt_destroy(ttm); 286 pr_err("Failed allocating page table\n"); 287 return -ENOMEM; 288 } 289 290 return 0; 291 } 292 EXPORT_SYMBOL(ttm_dma_tt_init); 293 294 int ttm_sg_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo, 295 uint32_t page_flags) 296 { 297 struct ttm_tt *ttm = &ttm_dma->ttm; 298 int flags = BUS_DMA_WAITOK; 299 int ret; 300 301 ttm_tt_init_fields(ttm, bo, page_flags); 302 303 INIT_LIST_HEAD(&ttm_dma->pages_list); 304 if (page_flags & TTM_PAGE_FLAG_SG) 305 ret = ttm_sg_tt_alloc_page_directory(ttm_dma); 306 else 307 ret = ttm_dma_tt_alloc_page_directory(ttm_dma); 308 if (ret) { 309 pr_err("Failed allocating page table\n"); 310 return -ENOMEM; 311 } 312 313 ttm_dma->segs = km_alloc(round_page(ttm->num_pages * 314 sizeof(bus_dma_segment_t)), &kv_any, &kp_zero, &kd_waitok); 315 316 ttm_dma->dmat = bo->bdev->dmat; 317 318 if ((page_flags & TTM_PAGE_FLAG_DMA32) == 0) 319 flags |= BUS_DMA_64BIT; 320 if (bus_dmamap_create(ttm_dma->dmat, ttm->num_pages << PAGE_SHIFT, 321 ttm->num_pages, ttm->num_pages << PAGE_SHIFT, 0, flags, 322 &ttm_dma->map)) { 323 km_free(ttm_dma->segs, round_page(ttm->num_pages * 324 sizeof(bus_dma_segment_t)), &kv_any, &kp_zero); 325 ttm_tt_destroy(ttm); 326 pr_err("Failed allocating page table\n"); 327 return -ENOMEM; 328 } 329 330 return 0; 331 } 332 EXPORT_SYMBOL(ttm_sg_tt_init); 333 334 void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma) 335 { 336 struct ttm_tt *ttm = &ttm_dma->ttm; 337 338 if (ttm->pages) 339 kvfree(ttm->pages); 340 else 341 kvfree(ttm_dma->dma_address); 342 ttm->pages = NULL; 343 ttm_dma->dma_address = NULL; 344 345 bus_dmamap_destroy(ttm_dma->dmat, ttm_dma->map); 346 km_free(ttm_dma->segs, round_page(ttm->num_pages * 347 sizeof(bus_dma_segment_t)), &kv_any, &kp_zero); 348 } 349 EXPORT_SYMBOL(ttm_dma_tt_fini); 350 351 void ttm_tt_unbind(struct ttm_tt *ttm) 352 { 353 int ret; 354 355 if (ttm->state == tt_bound) { 356 ret = ttm->func->unbind(ttm); 357 BUG_ON(ret); 358 ttm->state = tt_unbound; 359 } 360 } 361 362 int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem, 363 struct ttm_operation_ctx *ctx) 364 { 365 int ret = 0; 366 367 if (!ttm) 368 return -EINVAL; 369 370 if (ttm->state == tt_bound) 371 return 0; 372 373 ret = ttm_tt_populate(ttm, ctx); 374 if (ret) 375 return ret; 376 377 ret = ttm->func->bind(ttm, bo_mem); 378 if (unlikely(ret != 0)) 379 return ret; 380 381 ttm->state = tt_bound; 382 383 return 0; 384 } 385 EXPORT_SYMBOL(ttm_tt_bind); 386 387 int ttm_tt_swapin(struct ttm_tt *ttm) 388 { 389 struct uvm_object *swap_storage; 390 struct vm_page *from_page; 391 struct vm_page *to_page; 392 struct pglist plist; 393 int i; 394 int ret = -ENOMEM; 395 396 swap_storage = ttm->swap_storage; 397 BUG_ON(swap_storage == NULL); 398 399 TAILQ_INIT(&plist); 400 if (uvm_objwire(swap_storage, 0, ttm->num_pages << PAGE_SHIFT, &plist)) 401 goto out_err; 402 403 from_page = TAILQ_FIRST(&plist); 404 for (i = 0; i < ttm->num_pages; ++i) { 405 to_page = ttm->pages[i]; 406 if (unlikely(to_page == NULL)) 407 goto out_err; 408 409 uvm_pagecopy(from_page, to_page); 410 from_page = TAILQ_NEXT(from_page, pageq); 411 } 412 413 uvm_objunwire(swap_storage, 0, ttm->num_pages << PAGE_SHIFT); 414 415 if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP)) 416 uao_detach(swap_storage); 417 ttm->swap_storage = NULL; 418 ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED; 419 420 return 0; 421 out_err: 422 return ret; 423 } 424 425 int ttm_tt_swapout(struct ttm_tt *ttm, struct uvm_object *persistent_swap_storage) 426 { 427 struct uvm_object *swap_storage; 428 struct vm_page *from_page; 429 struct vm_page *to_page; 430 struct pglist plist; 431 int i; 432 int ret = -ENOMEM; 433 434 BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated); 435 BUG_ON(ttm->caching_state != tt_cached); 436 437 if (!persistent_swap_storage) { 438 swap_storage = uao_create(ttm->num_pages << PAGE_SHIFT, 0); 439 #ifdef notyet 440 if (IS_ERR(swap_storage)) { 441 pr_err("Failed allocating swap storage\n"); 442 return PTR_ERR(swap_storage); 443 } 444 #endif 445 } else { 446 swap_storage = persistent_swap_storage; 447 } 448 449 TAILQ_INIT(&plist); 450 if (uvm_objwire(swap_storage, 0, ttm->num_pages << PAGE_SHIFT, &plist)) 451 goto out_err; 452 453 to_page = TAILQ_FIRST(&plist); 454 for (i = 0; i < ttm->num_pages; ++i) { 455 from_page = ttm->pages[i]; 456 if (unlikely(from_page == NULL)) 457 continue; 458 459 uvm_pagecopy(from_page, to_page); 460 #ifdef notyet 461 set_page_dirty(to_page); 462 mark_page_accessed(to_page); 463 #endif 464 to_page = TAILQ_NEXT(to_page, pageq); 465 } 466 467 uvm_objunwire(swap_storage, 0, ttm->num_pages << PAGE_SHIFT); 468 469 ttm->bdev->driver->ttm_tt_unpopulate(ttm); 470 ttm->swap_storage = swap_storage; 471 ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED; 472 if (persistent_swap_storage) 473 ttm->page_flags |= TTM_PAGE_FLAG_PERSISTENT_SWAP; 474 475 return 0; 476 out_err: 477 if (!persistent_swap_storage) 478 uao_detach(swap_storage); 479 480 return ret; 481 } 482 483 static void ttm_tt_add_mapping(struct ttm_tt *ttm) 484 { 485 #ifdef __linux__ 486 pgoff_t i; 487 488 if (ttm->page_flags & TTM_PAGE_FLAG_SG) 489 return; 490 491 for (i = 0; i < ttm->num_pages; ++i) 492 ttm->pages[i]->mapping = ttm->bdev->dev_mapping; 493 #endif 494 } 495 496 int ttm_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) 497 { 498 int ret; 499 500 if (ttm->state != tt_unpopulated) 501 return 0; 502 503 if (ttm->bdev->driver->ttm_tt_populate) 504 ret = ttm->bdev->driver->ttm_tt_populate(ttm, ctx); 505 else 506 ret = ttm_pool_populate(ttm, ctx); 507 if (!ret) 508 ttm_tt_add_mapping(ttm); 509 return ret; 510 } 511 512 static void ttm_tt_clear_mapping(struct ttm_tt *ttm) 513 { 514 int i; 515 struct vm_page *page; 516 517 if (ttm->page_flags & TTM_PAGE_FLAG_SG) 518 return; 519 520 for (i = 0; i < ttm->num_pages; ++i) { 521 page = ttm->pages[i]; 522 if (unlikely(page == NULL)) 523 continue; 524 pmap_page_protect(page, PROT_NONE); 525 } 526 } 527 528 void ttm_tt_unpopulate(struct ttm_tt *ttm) 529 { 530 if (ttm->state == tt_unpopulated) 531 return; 532 533 ttm_tt_clear_mapping(ttm); 534 if (ttm->bdev->driver->ttm_tt_unpopulate) 535 ttm->bdev->driver->ttm_tt_unpopulate(ttm); 536 else 537 ttm_pool_unpopulate(ttm); 538 } 539