1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */ 2 /************************************************************************** 3 * 4 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sub license, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial portions 17 * of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 25 * USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 **************************************************************************/ 28 /* 29 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> 30 */ 31 32 #define pr_fmt(fmt) "[TTM] " fmt 33 34 #include <linux/sched.h> 35 #include <linux/pagemap.h> 36 #include <linux/shmem_fs.h> 37 #include <linux/file.h> 38 #include <drm/drm_cache.h> 39 #include <drm/ttm/ttm_bo_driver.h> 40 #include <drm/ttm/ttm_page_alloc.h> 41 #include <drm/ttm/ttm_set_memory.h> 42 43 /** 44 * Allocates a ttm structure for the given BO. 45 */ 46 int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc) 47 { 48 struct ttm_bo_device *bdev = bo->bdev; 49 uint32_t page_flags = 0; 50 51 dma_resv_assert_held(bo->base.resv); 52 53 if (bo->ttm) 54 return 0; 55 56 if (bdev->need_dma32) 57 page_flags |= TTM_PAGE_FLAG_DMA32; 58 59 if (bdev->no_retry) 60 page_flags |= TTM_PAGE_FLAG_NO_RETRY; 61 62 switch (bo->type) { 63 case ttm_bo_type_device: 64 if (zero_alloc) 65 page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC; 66 break; 67 case ttm_bo_type_kernel: 68 break; 69 case ttm_bo_type_sg: 70 page_flags |= TTM_PAGE_FLAG_SG; 71 break; 72 default: 73 pr_err("Illegal buffer object type\n"); 74 return -EINVAL; 75 } 76 77 bo->ttm = bdev->driver->ttm_tt_create(bo, page_flags); 78 if (unlikely(bo->ttm == NULL)) 79 return -ENOMEM; 80 81 return 0; 82 } 83 84 /** 85 * Allocates storage for pointers to the pages that back the ttm. 86 */ 87 static int ttm_tt_alloc_page_directory(struct ttm_tt *ttm) 88 { 89 ttm->pages = kvmalloc_array(ttm->num_pages, sizeof(void*), 90 GFP_KERNEL | __GFP_ZERO); 91 if (!ttm->pages) 92 return -ENOMEM; 93 return 0; 94 } 95 96 static int ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm) 97 { 98 ttm->ttm.pages = kvmalloc_array(ttm->ttm.num_pages, 99 sizeof(*ttm->ttm.pages) + 100 sizeof(*ttm->dma_address), 101 GFP_KERNEL | __GFP_ZERO); 102 if (!ttm->ttm.pages) 103 return -ENOMEM; 104 ttm->dma_address = (void *) (ttm->ttm.pages + ttm->ttm.num_pages); 105 return 0; 106 } 107 108 static int ttm_sg_tt_alloc_page_directory(struct ttm_dma_tt *ttm) 109 { 110 ttm->dma_address = kvmalloc_array(ttm->ttm.num_pages, 111 sizeof(*ttm->dma_address), 112 GFP_KERNEL | __GFP_ZERO); 113 if (!ttm->dma_address) 114 return -ENOMEM; 115 return 0; 116 } 117 118 static int ttm_tt_set_page_caching(struct vm_page *p, 119 enum ttm_caching_state c_old, 120 enum ttm_caching_state c_new) 121 { 122 int ret = 0; 123 124 if (PageHighMem(p)) 125 return 0; 126 127 if (c_old != tt_cached) { 128 /* p isn't in the default caching state, set it to 129 * writeback first to free its current memtype. */ 130 131 ret = ttm_set_pages_wb(p, 1); 132 if (ret) 133 return ret; 134 } 135 136 if (c_new == tt_wc) 137 ret = ttm_set_pages_wc(p, 1); 138 else if (c_new == tt_uncached) 139 ret = ttm_set_pages_uc(p, 1); 140 141 return ret; 142 } 143 144 /* 145 * Change caching policy for the linear kernel map 146 * for range of pages in a ttm. 147 */ 148 149 static int ttm_tt_set_caching(struct ttm_tt *ttm, 150 enum ttm_caching_state c_state) 151 { 152 int i, j; 153 struct vm_page *cur_page; 154 int ret; 155 156 if (ttm->caching_state == c_state) 157 return 0; 158 159 if (!ttm_tt_is_populated(ttm)) { 160 /* Change caching but don't populate */ 161 ttm->caching_state = c_state; 162 return 0; 163 } 164 165 if (ttm->caching_state == tt_cached) 166 drm_clflush_pages(ttm->pages, ttm->num_pages); 167 168 for (i = 0; i < ttm->num_pages; ++i) { 169 cur_page = ttm->pages[i]; 170 if (likely(cur_page != NULL)) { 171 ret = ttm_tt_set_page_caching(cur_page, 172 ttm->caching_state, 173 c_state); 174 if (unlikely(ret != 0)) 175 goto out_err; 176 } 177 } 178 179 ttm->caching_state = c_state; 180 181 return 0; 182 183 out_err: 184 for (j = 0; j < i; ++j) { 185 cur_page = ttm->pages[j]; 186 if (likely(cur_page != NULL)) { 187 (void)ttm_tt_set_page_caching(cur_page, c_state, 188 ttm->caching_state); 189 } 190 } 191 192 return ret; 193 } 194 195 int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement) 196 { 197 enum ttm_caching_state state; 198 199 if (placement & TTM_PL_FLAG_WC) 200 state = tt_wc; 201 else if (placement & TTM_PL_FLAG_UNCACHED) 202 state = tt_uncached; 203 else 204 state = tt_cached; 205 206 return ttm_tt_set_caching(ttm, state); 207 } 208 EXPORT_SYMBOL(ttm_tt_set_placement_caching); 209 210 void ttm_tt_destroy_common(struct ttm_bo_device *bdev, struct ttm_tt *ttm) 211 { 212 ttm_tt_unpopulate(bdev, ttm); 213 214 if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) && 215 ttm->swap_storage) 216 uao_detach(ttm->swap_storage); 217 218 ttm->swap_storage = NULL; 219 } 220 EXPORT_SYMBOL(ttm_tt_destroy_common); 221 222 void ttm_tt_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm) 223 { 224 bdev->driver->ttm_tt_destroy(bdev, ttm); 225 } 226 227 static void ttm_tt_init_fields(struct ttm_tt *ttm, 228 struct ttm_buffer_object *bo, 229 uint32_t page_flags) 230 { 231 ttm->num_pages = bo->num_pages; 232 ttm->caching_state = tt_cached; 233 ttm->page_flags = page_flags; 234 ttm_tt_set_unpopulated(ttm); 235 ttm->swap_storage = NULL; 236 ttm->sg = bo->sg; 237 } 238 239 int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo, 240 uint32_t page_flags) 241 { 242 ttm_tt_init_fields(ttm, bo, page_flags); 243 244 if (ttm_tt_alloc_page_directory(ttm)) { 245 pr_err("Failed allocating page table\n"); 246 return -ENOMEM; 247 } 248 return 0; 249 } 250 EXPORT_SYMBOL(ttm_tt_init); 251 252 void ttm_tt_fini(struct ttm_tt *ttm) 253 { 254 kvfree(ttm->pages); 255 ttm->pages = NULL; 256 } 257 EXPORT_SYMBOL(ttm_tt_fini); 258 259 int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo, 260 uint32_t page_flags) 261 { 262 struct ttm_tt *ttm = &ttm_dma->ttm; 263 int flags = BUS_DMA_WAITOK; 264 265 ttm_tt_init_fields(ttm, bo, page_flags); 266 267 INIT_LIST_HEAD(&ttm_dma->pages_list); 268 if (ttm_dma_tt_alloc_page_directory(ttm_dma)) { 269 pr_err("Failed allocating page table\n"); 270 return -ENOMEM; 271 } 272 273 ttm_dma->segs = km_alloc(round_page(ttm->num_pages * 274 sizeof(bus_dma_segment_t)), &kv_any, &kp_zero, &kd_waitok); 275 276 ttm_dma->dmat = bo->bdev->dmat; 277 278 if ((page_flags & TTM_PAGE_FLAG_DMA32) == 0) 279 flags |= BUS_DMA_64BIT; 280 if (bus_dmamap_create(ttm_dma->dmat, ttm->num_pages << PAGE_SHIFT, 281 ttm->num_pages, ttm->num_pages << PAGE_SHIFT, 0, flags, 282 &ttm_dma->map)) { 283 km_free(ttm_dma->segs, round_page(ttm->num_pages * 284 sizeof(bus_dma_segment_t)), &kv_any, &kp_zero); 285 kvfree(ttm->pages); 286 ttm->pages = NULL; 287 ttm_dma->dma_address = NULL; 288 pr_err("Failed allocating page table\n"); 289 return -ENOMEM; 290 } 291 292 return 0; 293 } 294 EXPORT_SYMBOL(ttm_dma_tt_init); 295 296 int ttm_sg_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo, 297 uint32_t page_flags) 298 { 299 struct ttm_tt *ttm = &ttm_dma->ttm; 300 int flags = BUS_DMA_WAITOK; 301 int ret; 302 303 ttm_tt_init_fields(ttm, bo, page_flags); 304 305 INIT_LIST_HEAD(&ttm_dma->pages_list); 306 if (page_flags & TTM_PAGE_FLAG_SG) 307 ret = ttm_sg_tt_alloc_page_directory(ttm_dma); 308 else 309 ret = ttm_dma_tt_alloc_page_directory(ttm_dma); 310 if (ret) { 311 pr_err("Failed allocating page table\n"); 312 return -ENOMEM; 313 } 314 315 ttm_dma->segs = km_alloc(round_page(ttm->num_pages * 316 sizeof(bus_dma_segment_t)), &kv_any, &kp_zero, &kd_waitok); 317 318 ttm_dma->dmat = bo->bdev->dmat; 319 320 if ((page_flags & TTM_PAGE_FLAG_DMA32) == 0) 321 flags |= BUS_DMA_64BIT; 322 if (bus_dmamap_create(ttm_dma->dmat, ttm->num_pages << PAGE_SHIFT, 323 ttm->num_pages, ttm->num_pages << PAGE_SHIFT, 0, flags, 324 &ttm_dma->map)) { 325 km_free(ttm_dma->segs, round_page(ttm->num_pages * 326 sizeof(bus_dma_segment_t)), &kv_any, &kp_zero); 327 if (ttm->pages) 328 kvfree(ttm->pages); 329 else 330 kvfree(ttm_dma->dma_address); 331 ttm->pages = NULL; 332 ttm_dma->dma_address = NULL; 333 pr_err("Failed allocating page table\n"); 334 return -ENOMEM; 335 } 336 337 return 0; 338 } 339 EXPORT_SYMBOL(ttm_sg_tt_init); 340 341 void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma) 342 { 343 struct ttm_tt *ttm = &ttm_dma->ttm; 344 345 if (ttm->pages) 346 kvfree(ttm->pages); 347 else 348 kvfree(ttm_dma->dma_address); 349 ttm->pages = NULL; 350 ttm_dma->dma_address = NULL; 351 352 bus_dmamap_destroy(ttm_dma->dmat, ttm_dma->map); 353 km_free(ttm_dma->segs, round_page(ttm->num_pages * 354 sizeof(bus_dma_segment_t)), &kv_any, &kp_zero); 355 } 356 EXPORT_SYMBOL(ttm_dma_tt_fini); 357 358 int ttm_tt_swapin(struct ttm_tt *ttm) 359 { 360 struct uvm_object *swap_storage; 361 struct vm_page *from_page; 362 struct vm_page *to_page; 363 struct pglist plist; 364 int i; 365 int ret = -ENOMEM; 366 367 swap_storage = ttm->swap_storage; 368 BUG_ON(swap_storage == NULL); 369 370 TAILQ_INIT(&plist); 371 if (uvm_obj_wire(swap_storage, 0, ttm->num_pages << PAGE_SHIFT, &plist)) 372 goto out_err; 373 374 from_page = TAILQ_FIRST(&plist); 375 for (i = 0; i < ttm->num_pages; ++i) { 376 to_page = ttm->pages[i]; 377 if (unlikely(to_page == NULL)) 378 goto out_err; 379 380 uvm_pagecopy(from_page, to_page); 381 from_page = TAILQ_NEXT(from_page, pageq); 382 } 383 384 uvm_obj_unwire(swap_storage, 0, ttm->num_pages << PAGE_SHIFT); 385 386 if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP)) 387 uao_detach(swap_storage); 388 ttm->swap_storage = NULL; 389 ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED; 390 391 return 0; 392 out_err: 393 return ret; 394 } 395 396 int ttm_tt_swapout(struct ttm_bo_device *bdev, 397 struct ttm_tt *ttm, struct uvm_object *persistent_swap_storage) 398 { 399 struct uvm_object *swap_storage; 400 struct vm_page *from_page; 401 struct vm_page *to_page; 402 struct pglist plist; 403 int i; 404 int ret = -ENOMEM; 405 406 BUG_ON(ttm->caching_state != tt_cached); 407 408 if (!persistent_swap_storage) { 409 swap_storage = uao_create(ttm->num_pages << PAGE_SHIFT, 0); 410 #ifdef notyet 411 if (IS_ERR(swap_storage)) { 412 pr_err("Failed allocating swap storage\n"); 413 return PTR_ERR(swap_storage); 414 } 415 #endif 416 } else { 417 swap_storage = persistent_swap_storage; 418 } 419 420 TAILQ_INIT(&plist); 421 if (uvm_obj_wire(swap_storage, 0, ttm->num_pages << PAGE_SHIFT, &plist)) 422 goto out_err; 423 424 to_page = TAILQ_FIRST(&plist); 425 for (i = 0; i < ttm->num_pages; ++i) { 426 from_page = ttm->pages[i]; 427 if (unlikely(from_page == NULL)) 428 continue; 429 430 uvm_pagecopy(from_page, to_page); 431 #ifdef notyet 432 set_page_dirty(to_page); 433 mark_page_accessed(to_page); 434 #endif 435 to_page = TAILQ_NEXT(to_page, pageq); 436 } 437 438 uvm_obj_unwire(swap_storage, 0, ttm->num_pages << PAGE_SHIFT); 439 440 ttm_tt_unpopulate(bdev, ttm); 441 ttm->swap_storage = swap_storage; 442 ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED; 443 if (persistent_swap_storage) 444 ttm->page_flags |= TTM_PAGE_FLAG_PERSISTENT_SWAP; 445 446 return 0; 447 out_err: 448 if (!persistent_swap_storage) 449 uao_detach(swap_storage); 450 451 return ret; 452 } 453 454 static void ttm_tt_add_mapping(struct ttm_bo_device *bdev, struct ttm_tt *ttm) 455 { 456 #ifdef __linux__ 457 pgoff_t i; 458 459 if (ttm->page_flags & TTM_PAGE_FLAG_SG) 460 return; 461 462 for (i = 0; i < ttm->num_pages; ++i) 463 ttm->pages[i]->mapping = bdev->dev_mapping; 464 #endif 465 } 466 467 int ttm_tt_populate(struct ttm_bo_device *bdev, 468 struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) 469 { 470 int ret; 471 472 if (!ttm) 473 return -EINVAL; 474 475 if (ttm_tt_is_populated(ttm)) 476 return 0; 477 478 if (bdev->driver->ttm_tt_populate) 479 ret = bdev->driver->ttm_tt_populate(bdev, ttm, ctx); 480 else 481 ret = ttm_pool_populate(ttm, ctx); 482 if (!ret) 483 ttm_tt_add_mapping(bdev, ttm); 484 return ret; 485 } 486 EXPORT_SYMBOL(ttm_tt_populate); 487 488 static void ttm_tt_clear_mapping(struct ttm_tt *ttm) 489 { 490 int i; 491 struct vm_page *page; 492 493 if (ttm->page_flags & TTM_PAGE_FLAG_SG) 494 return; 495 496 for (i = 0; i < ttm->num_pages; ++i) { 497 page = ttm->pages[i]; 498 if (unlikely(page == NULL)) 499 continue; 500 pmap_page_protect(page, PROT_NONE); 501 } 502 } 503 504 void ttm_tt_unpopulate(struct ttm_bo_device *bdev, 505 struct ttm_tt *ttm) 506 { 507 if (!ttm_tt_is_populated(ttm)) 508 return; 509 510 ttm_tt_clear_mapping(ttm); 511 if (bdev->driver->ttm_tt_unpopulate) 512 bdev->driver->ttm_tt_unpopulate(bdev, ttm); 513 else 514 ttm_pool_unpopulate(ttm); 515 } 516