xref: /openbsd-src/sys/dev/pci/drm/ttm/ttm_tt.c (revision 46035553bfdd96e63c94e32da0210227ec2e3cf1)
1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
2 /**************************************************************************
3  *
4  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25  * USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  **************************************************************************/
28 /*
29  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
30  */
31 
32 #define pr_fmt(fmt) "[TTM] " fmt
33 
34 #include <linux/sched.h>
35 #include <linux/pagemap.h>
36 #include <linux/shmem_fs.h>
37 #include <linux/file.h>
38 #include <drm/drm_cache.h>
39 #include <drm/ttm/ttm_bo_driver.h>
40 #include <drm/ttm/ttm_page_alloc.h>
41 #include <drm/ttm/ttm_set_memory.h>
42 
43 /**
44  * Allocates a ttm structure for the given BO.
45  */
46 int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc)
47 {
48 	struct ttm_bo_device *bdev = bo->bdev;
49 	uint32_t page_flags = 0;
50 
51 	dma_resv_assert_held(bo->base.resv);
52 
53 	if (bdev->need_dma32)
54 		page_flags |= TTM_PAGE_FLAG_DMA32;
55 
56 	if (bdev->no_retry)
57 		page_flags |= TTM_PAGE_FLAG_NO_RETRY;
58 
59 	switch (bo->type) {
60 	case ttm_bo_type_device:
61 		if (zero_alloc)
62 			page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
63 		break;
64 	case ttm_bo_type_kernel:
65 		break;
66 	case ttm_bo_type_sg:
67 		page_flags |= TTM_PAGE_FLAG_SG;
68 		break;
69 	default:
70 		bo->ttm = NULL;
71 		pr_err("Illegal buffer object type\n");
72 		return -EINVAL;
73 	}
74 
75 	bo->ttm = bdev->driver->ttm_tt_create(bo, page_flags);
76 	if (unlikely(bo->ttm == NULL))
77 		return -ENOMEM;
78 
79 	return 0;
80 }
81 
82 /**
83  * Allocates storage for pointers to the pages that back the ttm.
84  */
85 static int ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
86 {
87 	ttm->pages = kvmalloc_array(ttm->num_pages, sizeof(void*),
88 			GFP_KERNEL | __GFP_ZERO);
89 	if (!ttm->pages)
90 		return -ENOMEM;
91 	return 0;
92 }
93 
94 static int ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
95 {
96 	ttm->ttm.pages = kvmalloc_array(ttm->ttm.num_pages,
97 					  sizeof(*ttm->ttm.pages) +
98 					  sizeof(*ttm->dma_address),
99 					  GFP_KERNEL | __GFP_ZERO);
100 	if (!ttm->ttm.pages)
101 		return -ENOMEM;
102 	ttm->dma_address = (void *) (ttm->ttm.pages + ttm->ttm.num_pages);
103 	return 0;
104 }
105 
106 static int ttm_sg_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
107 {
108 	ttm->dma_address = kvmalloc_array(ttm->ttm.num_pages,
109 					  sizeof(*ttm->dma_address),
110 					  GFP_KERNEL | __GFP_ZERO);
111 	if (!ttm->dma_address)
112 		return -ENOMEM;
113 	return 0;
114 }
115 
116 static int ttm_tt_set_page_caching(struct vm_page *p,
117 				   enum ttm_caching_state c_old,
118 				   enum ttm_caching_state c_new)
119 {
120 	int ret = 0;
121 
122 	if (PageHighMem(p))
123 		return 0;
124 
125 	if (c_old != tt_cached) {
126 		/* p isn't in the default caching state, set it to
127 		 * writeback first to free its current memtype. */
128 
129 		ret = ttm_set_pages_wb(p, 1);
130 		if (ret)
131 			return ret;
132 	}
133 
134 	if (c_new == tt_wc)
135 		ret = ttm_set_pages_wc(p, 1);
136 	else if (c_new == tt_uncached)
137 		ret = ttm_set_pages_uc(p, 1);
138 
139 	return ret;
140 }
141 
142 /*
143  * Change caching policy for the linear kernel map
144  * for range of pages in a ttm.
145  */
146 
147 static int ttm_tt_set_caching(struct ttm_tt *ttm,
148 			      enum ttm_caching_state c_state)
149 {
150 	int i, j;
151 	struct vm_page *cur_page;
152 	int ret;
153 
154 	if (ttm->caching_state == c_state)
155 		return 0;
156 
157 	if (ttm->state == tt_unpopulated) {
158 		/* Change caching but don't populate */
159 		ttm->caching_state = c_state;
160 		return 0;
161 	}
162 
163 	if (ttm->caching_state == tt_cached)
164 		drm_clflush_pages(ttm->pages, ttm->num_pages);
165 
166 	for (i = 0; i < ttm->num_pages; ++i) {
167 		cur_page = ttm->pages[i];
168 		if (likely(cur_page != NULL)) {
169 			ret = ttm_tt_set_page_caching(cur_page,
170 						      ttm->caching_state,
171 						      c_state);
172 			if (unlikely(ret != 0))
173 				goto out_err;
174 		}
175 	}
176 
177 	ttm->caching_state = c_state;
178 
179 	return 0;
180 
181 out_err:
182 	for (j = 0; j < i; ++j) {
183 		cur_page = ttm->pages[j];
184 		if (likely(cur_page != NULL)) {
185 			(void)ttm_tt_set_page_caching(cur_page, c_state,
186 						      ttm->caching_state);
187 		}
188 	}
189 
190 	return ret;
191 }
192 
193 int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement)
194 {
195 	enum ttm_caching_state state;
196 
197 	if (placement & TTM_PL_FLAG_WC)
198 		state = tt_wc;
199 	else if (placement & TTM_PL_FLAG_UNCACHED)
200 		state = tt_uncached;
201 	else
202 		state = tt_cached;
203 
204 	return ttm_tt_set_caching(ttm, state);
205 }
206 EXPORT_SYMBOL(ttm_tt_set_placement_caching);
207 
208 void ttm_tt_destroy(struct ttm_tt *ttm)
209 {
210 	if (ttm == NULL)
211 		return;
212 
213 	ttm_tt_unbind(ttm);
214 
215 	if (ttm->state == tt_unbound)
216 		ttm_tt_unpopulate(ttm);
217 
218 	if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) &&
219 	    ttm->swap_storage)
220 		uao_detach(ttm->swap_storage);
221 
222 	ttm->swap_storage = NULL;
223 	ttm->func->destroy(ttm);
224 }
225 
226 static void ttm_tt_init_fields(struct ttm_tt *ttm,
227 			       struct ttm_buffer_object *bo,
228 			       uint32_t page_flags)
229 {
230 	ttm->bdev = bo->bdev;
231 	ttm->num_pages = bo->num_pages;
232 	ttm->caching_state = tt_cached;
233 	ttm->page_flags = page_flags;
234 	ttm->state = tt_unpopulated;
235 	ttm->swap_storage = NULL;
236 	ttm->sg = bo->sg;
237 }
238 
239 int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
240 		uint32_t page_flags)
241 {
242 	ttm_tt_init_fields(ttm, bo, page_flags);
243 
244 	if (ttm_tt_alloc_page_directory(ttm)) {
245 		pr_err("Failed allocating page table\n");
246 		return -ENOMEM;
247 	}
248 	return 0;
249 }
250 EXPORT_SYMBOL(ttm_tt_init);
251 
252 void ttm_tt_fini(struct ttm_tt *ttm)
253 {
254 	kvfree(ttm->pages);
255 	ttm->pages = NULL;
256 }
257 EXPORT_SYMBOL(ttm_tt_fini);
258 
259 int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
260 		    uint32_t page_flags)
261 {
262 	struct ttm_tt *ttm = &ttm_dma->ttm;
263 	int flags = BUS_DMA_WAITOK;
264 
265 	ttm_tt_init_fields(ttm, bo, page_flags);
266 
267 	INIT_LIST_HEAD(&ttm_dma->pages_list);
268 	if (ttm_dma_tt_alloc_page_directory(ttm_dma)) {
269 		pr_err("Failed allocating page table\n");
270 		return -ENOMEM;
271 	}
272 
273 	ttm_dma->segs = km_alloc(round_page(ttm->num_pages *
274 	    sizeof(bus_dma_segment_t)), &kv_any, &kp_zero, &kd_waitok);
275 
276 	ttm_dma->dmat = bo->bdev->dmat;
277 
278 	if ((page_flags & TTM_PAGE_FLAG_DMA32) == 0)
279 		flags |= BUS_DMA_64BIT;
280 	if (bus_dmamap_create(ttm_dma->dmat, ttm->num_pages << PAGE_SHIFT,
281 	    ttm->num_pages, ttm->num_pages << PAGE_SHIFT, 0, flags,
282 	    &ttm_dma->map)) {
283 		km_free(ttm_dma->segs, round_page(ttm->num_pages *
284 		    sizeof(bus_dma_segment_t)), &kv_any, &kp_zero);
285 		kvfree(ttm->pages);
286 		ttm->pages = NULL;
287 		ttm_dma->dma_address = NULL;
288 		pr_err("Failed allocating page table\n");
289 		return -ENOMEM;
290 	}
291 
292 	return 0;
293 }
294 EXPORT_SYMBOL(ttm_dma_tt_init);
295 
296 int ttm_sg_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
297 		   uint32_t page_flags)
298 {
299 	struct ttm_tt *ttm = &ttm_dma->ttm;
300 	int flags = BUS_DMA_WAITOK;
301 	int ret;
302 
303 	ttm_tt_init_fields(ttm, bo, page_flags);
304 
305 	INIT_LIST_HEAD(&ttm_dma->pages_list);
306 	if (page_flags & TTM_PAGE_FLAG_SG)
307 		ret = ttm_sg_tt_alloc_page_directory(ttm_dma);
308 	else
309 		ret = ttm_dma_tt_alloc_page_directory(ttm_dma);
310 	if (ret) {
311 		pr_err("Failed allocating page table\n");
312 		return -ENOMEM;
313 	}
314 
315 	ttm_dma->segs = km_alloc(round_page(ttm->num_pages *
316 	    sizeof(bus_dma_segment_t)), &kv_any, &kp_zero, &kd_waitok);
317 
318 	ttm_dma->dmat = bo->bdev->dmat;
319 
320 	if ((page_flags & TTM_PAGE_FLAG_DMA32) == 0)
321 		flags |= BUS_DMA_64BIT;
322 	if (bus_dmamap_create(ttm_dma->dmat, ttm->num_pages << PAGE_SHIFT,
323 	    ttm->num_pages, ttm->num_pages << PAGE_SHIFT, 0, flags,
324 	    &ttm_dma->map)) {
325 		km_free(ttm_dma->segs, round_page(ttm->num_pages *
326 		    sizeof(bus_dma_segment_t)), &kv_any, &kp_zero);
327 		if (ttm->pages)
328 			kvfree(ttm->pages);
329 		else
330 			kvfree(ttm_dma->dma_address);
331 		ttm->pages = NULL;
332 		ttm_dma->dma_address = NULL;
333 		pr_err("Failed allocating page table\n");
334 		return -ENOMEM;
335 	}
336 
337 	return 0;
338 }
339 EXPORT_SYMBOL(ttm_sg_tt_init);
340 
341 void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma)
342 {
343 	struct ttm_tt *ttm = &ttm_dma->ttm;
344 
345 	if (ttm->pages)
346 		kvfree(ttm->pages);
347 	else
348 		kvfree(ttm_dma->dma_address);
349 	ttm->pages = NULL;
350 	ttm_dma->dma_address = NULL;
351 
352 	bus_dmamap_destroy(ttm_dma->dmat, ttm_dma->map);
353 	km_free(ttm_dma->segs, round_page(ttm->num_pages *
354 	    sizeof(bus_dma_segment_t)), &kv_any, &kp_zero);
355 }
356 EXPORT_SYMBOL(ttm_dma_tt_fini);
357 
358 void ttm_tt_unbind(struct ttm_tt *ttm)
359 {
360 	int ret;
361 
362 	if (ttm->state == tt_bound) {
363 		ret = ttm->func->unbind(ttm);
364 		BUG_ON(ret);
365 		ttm->state = tt_unbound;
366 	}
367 }
368 
369 int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem,
370 		struct ttm_operation_ctx *ctx)
371 {
372 	int ret = 0;
373 
374 	if (!ttm)
375 		return -EINVAL;
376 
377 	if (ttm->state == tt_bound)
378 		return 0;
379 
380 	ret = ttm_tt_populate(ttm, ctx);
381 	if (ret)
382 		return ret;
383 
384 	ret = ttm->func->bind(ttm, bo_mem);
385 	if (unlikely(ret != 0))
386 		return ret;
387 
388 	ttm->state = tt_bound;
389 
390 	return 0;
391 }
392 EXPORT_SYMBOL(ttm_tt_bind);
393 
394 int ttm_tt_swapin(struct ttm_tt *ttm)
395 {
396 	struct uvm_object *swap_storage;
397 	struct vm_page *from_page;
398 	struct vm_page *to_page;
399 	struct pglist plist;
400 	int i;
401 	int ret = -ENOMEM;
402 
403 	swap_storage = ttm->swap_storage;
404 	BUG_ON(swap_storage == NULL);
405 
406 	TAILQ_INIT(&plist);
407 	if (uvm_objwire(swap_storage, 0, ttm->num_pages << PAGE_SHIFT, &plist))
408 		goto out_err;
409 
410 	from_page = TAILQ_FIRST(&plist);
411 	for (i = 0; i < ttm->num_pages; ++i) {
412 		to_page = ttm->pages[i];
413 		if (unlikely(to_page == NULL))
414 			goto out_err;
415 
416 		uvm_pagecopy(from_page, to_page);
417 		from_page = TAILQ_NEXT(from_page, pageq);
418 	}
419 
420 	uvm_objunwire(swap_storage, 0, ttm->num_pages << PAGE_SHIFT);
421 
422 	if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP))
423 		uao_detach(swap_storage);
424 	ttm->swap_storage = NULL;
425 	ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
426 
427 	return 0;
428 out_err:
429 	return ret;
430 }
431 
432 int ttm_tt_swapout(struct ttm_tt *ttm, struct uvm_object *persistent_swap_storage)
433 {
434 	struct uvm_object *swap_storage;
435 	struct vm_page *from_page;
436 	struct vm_page *to_page;
437 	struct pglist plist;
438 	int i;
439 	int ret = -ENOMEM;
440 
441 	BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated);
442 	BUG_ON(ttm->caching_state != tt_cached);
443 
444 	if (!persistent_swap_storage) {
445 		swap_storage = uao_create(ttm->num_pages << PAGE_SHIFT, 0);
446 #ifdef notyet
447 		if (IS_ERR(swap_storage)) {
448 			pr_err("Failed allocating swap storage\n");
449 			return PTR_ERR(swap_storage);
450 		}
451 #endif
452 	} else {
453 		swap_storage = persistent_swap_storage;
454 	}
455 
456 	TAILQ_INIT(&plist);
457 	if (uvm_objwire(swap_storage, 0, ttm->num_pages << PAGE_SHIFT, &plist))
458 		goto out_err;
459 
460 	to_page = TAILQ_FIRST(&plist);
461 	for (i = 0; i < ttm->num_pages; ++i) {
462 		from_page = ttm->pages[i];
463 		if (unlikely(from_page == NULL))
464 			continue;
465 
466 		uvm_pagecopy(from_page, to_page);
467 #ifdef notyet
468 		set_page_dirty(to_page);
469 		mark_page_accessed(to_page);
470 #endif
471 		to_page = TAILQ_NEXT(to_page, pageq);
472 	}
473 
474 	uvm_objunwire(swap_storage, 0, ttm->num_pages << PAGE_SHIFT);
475 
476 	ttm_tt_unpopulate(ttm);
477 	ttm->swap_storage = swap_storage;
478 	ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
479 	if (persistent_swap_storage)
480 		ttm->page_flags |= TTM_PAGE_FLAG_PERSISTENT_SWAP;
481 
482 	return 0;
483 out_err:
484 	if (!persistent_swap_storage)
485 		uao_detach(swap_storage);
486 
487 	return ret;
488 }
489 
490 static void ttm_tt_add_mapping(struct ttm_tt *ttm)
491 {
492 #ifdef __linux__
493 	pgoff_t i;
494 
495 	if (ttm->page_flags & TTM_PAGE_FLAG_SG)
496 		return;
497 
498 	for (i = 0; i < ttm->num_pages; ++i)
499 		ttm->pages[i]->mapping = ttm->bdev->dev_mapping;
500 #endif
501 }
502 
503 int ttm_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
504 {
505 	int ret;
506 
507 	if (ttm->state != tt_unpopulated)
508 		return 0;
509 
510 	if (ttm->bdev->driver->ttm_tt_populate)
511 		ret = ttm->bdev->driver->ttm_tt_populate(ttm, ctx);
512 	else
513 		ret = ttm_pool_populate(ttm, ctx);
514 	if (!ret)
515 		ttm_tt_add_mapping(ttm);
516 	return ret;
517 }
518 
519 static void ttm_tt_clear_mapping(struct ttm_tt *ttm)
520 {
521 	int i;
522 	struct vm_page *page;
523 
524 	if (ttm->page_flags & TTM_PAGE_FLAG_SG)
525 		return;
526 
527 	for (i = 0; i < ttm->num_pages; ++i) {
528 		page = ttm->pages[i];
529 		if (unlikely(page == NULL))
530 			continue;
531 		pmap_page_protect(page, PROT_NONE);
532 	}
533 }
534 
535 void ttm_tt_unpopulate(struct ttm_tt *ttm)
536 {
537 	if (ttm->state == tt_unpopulated)
538 		return;
539 
540 	ttm_tt_clear_mapping(ttm);
541 	if (ttm->bdev->driver->ttm_tt_unpopulate)
542 		ttm->bdev->driver->ttm_tt_unpopulate(ttm);
543 	else
544 		ttm_pool_unpopulate(ttm);
545 }
546