xref: /openbsd-src/sys/dev/pci/drm/ttm/ttm_tt.c (revision c1a45aed656e7d5627c30c92421893a76f370ccb)
1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
2 /**************************************************************************
3  *
4  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25  * USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  **************************************************************************/
28 /*
29  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
30  */
31 
32 #define pr_fmt(fmt) "[TTM] " fmt
33 
34 #include <linux/sched.h>
35 #include <linux/shmem_fs.h>
36 #include <linux/file.h>
37 #include <drm/drm_cache.h>
38 #include <drm/ttm/ttm_bo_driver.h>
39 
40 #include "ttm_module.h"
41 
42 static unsigned long ttm_pages_limit;
43 
44 MODULE_PARM_DESC(pages_limit, "Limit for the allocated pages");
45 module_param_named(pages_limit, ttm_pages_limit, ulong, 0644);
46 
47 static unsigned long ttm_dma32_pages_limit;
48 
49 MODULE_PARM_DESC(dma32_pages_limit, "Limit for the allocated DMA32 pages");
50 module_param_named(dma32_pages_limit, ttm_dma32_pages_limit, ulong, 0644);
51 
52 static atomic_long_t ttm_pages_allocated;
53 static atomic_long_t ttm_dma32_pages_allocated;
54 
55 /*
56  * Allocates a ttm structure for the given BO.
57  */
58 int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc)
59 {
60 	struct ttm_device *bdev = bo->bdev;
61 	uint32_t page_flags = 0;
62 
63 	dma_resv_assert_held(bo->base.resv);
64 
65 	if (bo->ttm)
66 		return 0;
67 
68 	switch (bo->type) {
69 	case ttm_bo_type_device:
70 		if (zero_alloc)
71 			page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
72 		break;
73 	case ttm_bo_type_kernel:
74 		break;
75 	case ttm_bo_type_sg:
76 		page_flags |= TTM_PAGE_FLAG_SG;
77 		break;
78 	default:
79 		pr_err("Illegal buffer object type\n");
80 		return -EINVAL;
81 	}
82 
83 	bo->ttm = bdev->funcs->ttm_tt_create(bo, page_flags);
84 	if (unlikely(bo->ttm == NULL))
85 		return -ENOMEM;
86 
87 	return 0;
88 }
89 
90 /*
91  * Allocates storage for pointers to the pages that back the ttm.
92  */
93 static int ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
94 {
95 	ttm->pages = kvmalloc_array(ttm->num_pages, sizeof(void*),
96 			GFP_KERNEL | __GFP_ZERO);
97 	if (!ttm->pages)
98 		return -ENOMEM;
99 	ttm->orders = kvmalloc_array(ttm->num_pages,
100 				      sizeof(unsigned long),
101 				      GFP_KERNEL | __GFP_ZERO);
102 	if (!ttm->orders)
103 		return -ENOMEM;
104 	return 0;
105 }
106 
107 static int ttm_dma_tt_alloc_page_directory(struct ttm_tt *ttm)
108 {
109 	ttm->pages = kvmalloc_array(ttm->num_pages,
110 				    sizeof(*ttm->pages) +
111 				    sizeof(*ttm->dma_address),
112 				    GFP_KERNEL | __GFP_ZERO);
113 	if (!ttm->pages)
114 		return -ENOMEM;
115 
116 	ttm->dma_address = (void *)(ttm->pages + ttm->num_pages);
117 
118 	ttm->orders = kvmalloc_array(ttm->num_pages,
119 				      sizeof(unsigned long),
120 				      GFP_KERNEL | __GFP_ZERO);
121 	if (!ttm->orders)
122 		return -ENOMEM;
123 	return 0;
124 }
125 
126 static int ttm_sg_tt_alloc_page_directory(struct ttm_tt *ttm)
127 {
128 	ttm->dma_address = kvmalloc_array(ttm->num_pages,
129 					  sizeof(*ttm->dma_address),
130 					  GFP_KERNEL | __GFP_ZERO);
131 	if (!ttm->dma_address)
132 		return -ENOMEM;
133 	return 0;
134 }
135 
136 void ttm_tt_destroy_common(struct ttm_device *bdev, struct ttm_tt *ttm)
137 {
138 	ttm_tt_unpopulate(bdev, ttm);
139 
140 	if (ttm->swap_storage)
141 		uao_detach(ttm->swap_storage);
142 
143 	ttm->swap_storage = NULL;
144 }
145 EXPORT_SYMBOL(ttm_tt_destroy_common);
146 
147 void ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
148 {
149 	bdev->funcs->ttm_tt_destroy(bdev, ttm);
150 }
151 
152 static void ttm_tt_init_fields(struct ttm_tt *ttm,
153 			       struct ttm_buffer_object *bo,
154 			       uint32_t page_flags,
155 			       enum ttm_caching caching)
156 {
157 	ttm->num_pages = PAGE_ALIGN(bo->base.size) >> PAGE_SHIFT;
158 	ttm->caching = ttm_cached;
159 	ttm->page_flags = page_flags;
160 	ttm->dma_address = NULL;
161 	ttm->swap_storage = NULL;
162 	ttm->sg = bo->sg;
163 	ttm->caching = caching;
164 }
165 
166 int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
167 		uint32_t page_flags, enum ttm_caching caching)
168 {
169 	ttm_tt_init_fields(ttm, bo, page_flags, caching);
170 
171 	if (ttm_tt_alloc_page_directory(ttm)) {
172 		pr_err("Failed allocating page table\n");
173 		return -ENOMEM;
174 	}
175 	return 0;
176 }
177 EXPORT_SYMBOL(ttm_tt_init);
178 
179 void ttm_tt_fini(struct ttm_tt *ttm)
180 {
181 	if (ttm->pages)
182 		kvfree(ttm->pages);
183 	else
184 		kvfree(ttm->dma_address);
185 	kvfree(ttm->orders);
186 	ttm->pages = NULL;
187 	ttm->dma_address = NULL;
188 	ttm->orders = NULL;
189 
190 	bus_dmamap_destroy(ttm->dmat, ttm->map);
191 	km_free(ttm->segs, round_page(ttm->num_pages *
192 	    sizeof(bus_dma_segment_t)), &kv_any, &kp_zero);
193 }
194 EXPORT_SYMBOL(ttm_tt_fini);
195 
196 int ttm_sg_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
197 		   uint32_t page_flags, enum ttm_caching caching)
198 {
199 	int ret;
200 	int flags = BUS_DMA_WAITOK;
201 
202 	ttm_tt_init_fields(ttm, bo, page_flags, caching);
203 
204 	if (page_flags & TTM_PAGE_FLAG_SG)
205 		ret = ttm_sg_tt_alloc_page_directory(ttm);
206 	else
207 		ret = ttm_dma_tt_alloc_page_directory(ttm);
208 	if (ret) {
209 		pr_err("Failed allocating page table\n");
210 		return -ENOMEM;
211 	}
212 
213 	ttm->segs = km_alloc(round_page(ttm->num_pages *
214 	    sizeof(bus_dma_segment_t)), &kv_any, &kp_zero, &kd_waitok);
215 
216 	ttm->dmat = bo->bdev->dmat;
217 
218 	if (bo->bdev->pool.use_dma32 == false)
219 		flags |= BUS_DMA_64BIT;
220 	if (bus_dmamap_create(ttm->dmat, ttm->num_pages << PAGE_SHIFT,
221 	    ttm->num_pages, ttm->num_pages << PAGE_SHIFT, 0, flags,
222 	    &ttm->map)) {
223 		km_free(ttm->segs, round_page(ttm->num_pages *
224 		    sizeof(bus_dma_segment_t)), &kv_any, &kp_zero);
225 		if (ttm->pages) {
226 			kvfree(ttm->pages);
227 			kvfree(ttm->orders);
228 		} else
229 			kvfree(ttm->dma_address);
230 		ttm->pages = NULL;
231 		ttm->orders = NULL;
232 		ttm->dma_address = NULL;
233 		pr_err("Failed allocating page table\n");
234 		return -ENOMEM;
235 	}
236 
237 	return 0;
238 }
239 EXPORT_SYMBOL(ttm_sg_tt_init);
240 
241 int ttm_tt_swapin(struct ttm_tt *ttm)
242 {
243 	struct uvm_object *swap_storage;
244 	struct vm_page *from_page;
245 	struct vm_page *to_page;
246 	struct pglist plist;
247 	int i, ret;
248 
249 	swap_storage = ttm->swap_storage;
250 	BUG_ON(swap_storage == NULL);
251 
252 	TAILQ_INIT(&plist);
253 	if (uvm_obj_wire(swap_storage, 0, ttm->num_pages << PAGE_SHIFT,
254 	    &plist)) {
255 		ret = -ENOMEM;
256 		goto out_err;
257 	}
258 
259 	from_page = TAILQ_FIRST(&plist);
260 	for (i = 0; i < ttm->num_pages; ++i) {
261 		to_page = ttm->pages[i];
262 		if (unlikely(to_page == NULL)) {
263 			ret = -ENOMEM;
264 			goto out_err;
265 		}
266 
267 		uvm_pagecopy(from_page, to_page);
268 		from_page = TAILQ_NEXT(from_page, pageq);
269 	}
270 
271 	uvm_obj_unwire(swap_storage, 0, ttm->num_pages << PAGE_SHIFT);
272 
273 	uao_detach(swap_storage);
274 	ttm->swap_storage = NULL;
275 	ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
276 
277 	return 0;
278 
279 out_err:
280 	return ret;
281 }
282 
283 /**
284  * ttm_tt_swapout - swap out tt object
285  *
286  * @bdev: TTM device structure.
287  * @ttm: The struct ttm_tt.
288  * @gfp_flags: Flags to use for memory allocation.
289  *
290  * Swapout a TT object to a shmem_file, return number of pages swapped out or
291  * negative error code.
292  */
293 int ttm_tt_swapout(struct ttm_device *bdev, struct ttm_tt *ttm,
294 		   gfp_t gfp_flags)
295 {
296 	STUB();
297 	return -ENOSYS;
298 #ifdef notyet
299 	loff_t size = (loff_t)ttm->num_pages << PAGE_SHIFT;
300 	struct uvm_object *swap_storage;
301 	struct vm_page *from_page;
302 	struct vm_page *to_page;
303 	int i, ret;
304 
305 	swap_storage = uao_create(size, 0);
306 #ifdef notyet
307 	if (IS_ERR(swap_storage)) {
308 		pr_err("Failed allocating swap storage\n");
309 		return PTR_ERR(swap_storage);
310 	}
311 #endif
312 
313 	for (i = 0; i < ttm->num_pages; ++i) {
314 		from_page = ttm->pages[i];
315 		if (unlikely(from_page == NULL))
316 			continue;
317 
318 		to_page = shmem_read_mapping_page_gfp(swap_space, i, gfp_flags);
319 		if (IS_ERR(to_page)) {
320 			ret = PTR_ERR(to_page);
321 			goto out_err;
322 		}
323 		copy_highpage(to_page, from_page);
324 		set_page_dirty(to_page);
325 		mark_page_accessed(to_page);
326 		put_page(to_page);
327 	}
328 
329 	ttm_tt_unpopulate(bdev, ttm);
330 	ttm->swap_storage = swap_storage;
331 	ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
332 
333 	return ttm->num_pages;
334 
335 out_err:
336 	uao_detach(swap_storage);
337 
338 	return ret;
339 #endif
340 }
341 
342 static void ttm_tt_add_mapping(struct ttm_device *bdev, struct ttm_tt *ttm)
343 {
344 #ifdef __linux__
345 	pgoff_t i;
346 
347 	if (ttm->page_flags & TTM_PAGE_FLAG_SG)
348 		return;
349 
350 	for (i = 0; i < ttm->num_pages; ++i)
351 		ttm->pages[i]->mapping = bdev->dev_mapping;
352 #endif
353 }
354 
355 int ttm_tt_populate(struct ttm_device *bdev,
356 		    struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
357 {
358 	int ret;
359 
360 	if (!ttm)
361 		return -EINVAL;
362 
363 	if (ttm_tt_is_populated(ttm))
364 		return 0;
365 
366 	if (!(ttm->page_flags & TTM_PAGE_FLAG_SG)) {
367 		atomic_long_add(ttm->num_pages, &ttm_pages_allocated);
368 		if (bdev->pool.use_dma32)
369 			atomic_long_add(ttm->num_pages,
370 					&ttm_dma32_pages_allocated);
371 	}
372 
373 	while (atomic_long_read(&ttm_pages_allocated) > ttm_pages_limit ||
374 	       atomic_long_read(&ttm_dma32_pages_allocated) >
375 	       ttm_dma32_pages_limit) {
376 
377 		ret = ttm_global_swapout(ctx, GFP_KERNEL);
378 		if (ret == 0)
379 			break;
380 		if (ret < 0)
381 			goto error;
382 	}
383 
384 	if (bdev->funcs->ttm_tt_populate)
385 		ret = bdev->funcs->ttm_tt_populate(bdev, ttm, ctx);
386 	else
387 		ret = ttm_pool_alloc(&bdev->pool, ttm, ctx);
388 	if (ret)
389 		goto error;
390 
391 	ttm_tt_add_mapping(bdev, ttm);
392 	ttm->page_flags |= TTM_PAGE_FLAG_PRIV_POPULATED;
393 	if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
394 		ret = ttm_tt_swapin(ttm);
395 		if (unlikely(ret != 0)) {
396 			ttm_tt_unpopulate(bdev, ttm);
397 			return ret;
398 		}
399 	}
400 
401 	return 0;
402 
403 error:
404 	if (!(ttm->page_flags & TTM_PAGE_FLAG_SG)) {
405 		atomic_long_sub(ttm->num_pages, &ttm_pages_allocated);
406 		if (bdev->pool.use_dma32)
407 			atomic_long_sub(ttm->num_pages,
408 					&ttm_dma32_pages_allocated);
409 	}
410 	return ret;
411 }
412 EXPORT_SYMBOL(ttm_tt_populate);
413 
414 static void ttm_tt_clear_mapping(struct ttm_tt *ttm)
415 {
416 	int i;
417 	struct vm_page *page;
418 
419 	if (ttm->page_flags & TTM_PAGE_FLAG_SG)
420 		return;
421 
422 	for (i = 0; i < ttm->num_pages; ++i) {
423 		page = ttm->pages[i];
424 		if (unlikely(page == NULL))
425 			continue;
426 		pmap_page_protect(page, PROT_NONE);
427 	}
428 }
429 
430 void ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm)
431 {
432 	if (!ttm_tt_is_populated(ttm))
433 		return;
434 
435 	ttm_tt_clear_mapping(ttm);
436 	if (bdev->funcs->ttm_tt_unpopulate)
437 		bdev->funcs->ttm_tt_unpopulate(bdev, ttm);
438 	else
439 		ttm_pool_free(&bdev->pool, ttm);
440 
441 	if (!(ttm->page_flags & TTM_PAGE_FLAG_SG)) {
442 		atomic_long_sub(ttm->num_pages, &ttm_pages_allocated);
443 		if (bdev->pool.use_dma32)
444 			atomic_long_sub(ttm->num_pages,
445 					&ttm_dma32_pages_allocated);
446 	}
447 
448 	ttm->page_flags &= ~TTM_PAGE_FLAG_PRIV_POPULATED;
449 }
450 
451 #ifdef CONFIG_DEBUG_FS
452 
453 /* Test the shrinker functions and dump the result */
454 static int ttm_tt_debugfs_shrink_show(struct seq_file *m, void *data)
455 {
456 	struct ttm_operation_ctx ctx = { false, false };
457 
458 	seq_printf(m, "%d\n", ttm_global_swapout(&ctx, GFP_KERNEL));
459 	return 0;
460 }
461 DEFINE_SHOW_ATTRIBUTE(ttm_tt_debugfs_shrink);
462 
463 #endif
464 
465 
466 /*
467  * ttm_tt_mgr_init - register with the MM shrinker
468  *
469  * Register with the MM shrinker for swapping out BOs.
470  */
471 void ttm_tt_mgr_init(unsigned long num_pages, unsigned long num_dma32_pages)
472 {
473 #ifdef CONFIG_DEBUG_FS
474 	debugfs_create_file("tt_shrink", 0400, ttm_debugfs_root, NULL,
475 			    &ttm_tt_debugfs_shrink_fops);
476 #endif
477 
478 	if (!ttm_pages_limit)
479 		ttm_pages_limit = num_pages;
480 
481 	if (!ttm_dma32_pages_limit)
482 		ttm_dma32_pages_limit = num_dma32_pages;
483 }
484 
485 static void ttm_kmap_iter_tt_map_local(struct ttm_kmap_iter *iter,
486 				       struct dma_buf_map *dmap,
487 				       pgoff_t i, bus_space_tag_t bst)
488 {
489 	struct ttm_kmap_iter_tt *iter_tt =
490 		container_of(iter, typeof(*iter_tt), base);
491 
492 #ifdef __linux__
493 	dma_buf_map_set_vaddr(dmap, kmap_local_page_prot(iter_tt->tt->pages[i],
494 							 iter_tt->prot));
495 #else
496 	dma_buf_map_set_vaddr(dmap, kmap_atomic_prot(iter_tt->tt->pages[i],
497 							 iter_tt->prot));
498 #endif
499 }
500 
501 static void ttm_kmap_iter_tt_unmap_local(struct ttm_kmap_iter *iter,
502 					 struct dma_buf_map *map, bus_space_tag_t bst)
503 {
504 #ifdef __linux__
505 	kunmap_local(map->vaddr);
506 #else
507 	kunmap_atomic(map->vaddr);
508 #endif
509 }
510 
511 static const struct ttm_kmap_iter_ops ttm_kmap_iter_tt_ops = {
512 	.map_local = ttm_kmap_iter_tt_map_local,
513 	.unmap_local = ttm_kmap_iter_tt_unmap_local,
514 	.maps_tt = true,
515 };
516 
517 /**
518  * ttm_kmap_iter_tt_init - Initialize a struct ttm_kmap_iter_tt
519  * @iter_tt: The struct ttm_kmap_iter_tt to initialize.
520  * @tt: Struct ttm_tt holding page pointers of the struct ttm_resource.
521  *
522  * Return: Pointer to the embedded struct ttm_kmap_iter.
523  */
524 struct ttm_kmap_iter *
525 ttm_kmap_iter_tt_init(struct ttm_kmap_iter_tt *iter_tt,
526 		      struct ttm_tt *tt)
527 {
528 	iter_tt->base.ops = &ttm_kmap_iter_tt_ops;
529 	iter_tt->tt = tt;
530 	if (tt)
531 		iter_tt->prot = ttm_prot_from_caching(tt->caching, PAGE_KERNEL);
532 	else
533 		iter_tt->prot = PAGE_KERNEL;
534 
535 	return &iter_tt->base;
536 }
537 EXPORT_SYMBOL(ttm_kmap_iter_tt_init);
538