xref: /openbsd-src/sys/dev/pci/drm/ttm/ttm_bo_util.c (revision 4b70baf6e17fc8b27fc1f7fa7929335753fa94c3)
1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
2 /**************************************************************************
3  *
4  * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25  * USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  **************************************************************************/
28 /*
29  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
30  */
31 
32 #include <drm/ttm/ttm_bo_driver.h>
33 #include <drm/ttm/ttm_placement.h>
34 #include <drm/drm_vma_manager.h>
35 #include <linux/io.h>
36 #include <linux/highmem.h>
37 #include <linux/wait.h>
38 #include <linux/slab.h>
39 #include <linux/vmalloc.h>
40 #include <linux/module.h>
41 #include <linux/reservation.h>
42 
43 struct ttm_transfer_obj {
44 	struct ttm_buffer_object base;
45 	struct ttm_buffer_object *bo;
46 };
47 
48 void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
49 {
50 	ttm_bo_mem_put(bo, &bo->mem);
51 }
52 
53 int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
54 		   struct ttm_operation_ctx *ctx,
55 		    struct ttm_mem_reg *new_mem)
56 {
57 	struct ttm_tt *ttm = bo->ttm;
58 	struct ttm_mem_reg *old_mem = &bo->mem;
59 	int ret;
60 
61 	if (old_mem->mem_type != TTM_PL_SYSTEM) {
62 		ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
63 
64 		if (unlikely(ret != 0)) {
65 			if (ret != -ERESTARTSYS)
66 				pr_err("Failed to expire sync object before unbinding TTM\n");
67 			return ret;
68 		}
69 
70 		ttm_tt_unbind(ttm);
71 		ttm_bo_free_old_node(bo);
72 		ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
73 				TTM_PL_MASK_MEM);
74 		old_mem->mem_type = TTM_PL_SYSTEM;
75 	}
76 
77 	ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
78 	if (unlikely(ret != 0))
79 		return ret;
80 
81 	if (new_mem->mem_type != TTM_PL_SYSTEM) {
82 		ret = ttm_tt_bind(ttm, new_mem, ctx);
83 		if (unlikely(ret != 0))
84 			return ret;
85 	}
86 
87 	*old_mem = *new_mem;
88 	new_mem->mm_node = NULL;
89 
90 	return 0;
91 }
92 EXPORT_SYMBOL(ttm_bo_move_ttm);
93 
94 int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
95 {
96 	if (likely(man->io_reserve_fastpath))
97 		return 0;
98 
99 	if (interruptible)
100 		return mutex_lock_interruptible(&man->io_reserve_mutex);
101 
102 	mutex_lock(&man->io_reserve_mutex);
103 	return 0;
104 }
105 EXPORT_SYMBOL(ttm_mem_io_lock);
106 
107 void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
108 {
109 	if (likely(man->io_reserve_fastpath))
110 		return;
111 
112 	mutex_unlock(&man->io_reserve_mutex);
113 }
114 EXPORT_SYMBOL(ttm_mem_io_unlock);
115 
116 static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
117 {
118 	struct ttm_buffer_object *bo;
119 
120 	if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru))
121 		return -EAGAIN;
122 
123 	bo = list_first_entry(&man->io_reserve_lru,
124 			      struct ttm_buffer_object,
125 			      io_reserve_lru);
126 	list_del_init(&bo->io_reserve_lru);
127 	ttm_bo_unmap_virtual_locked(bo);
128 
129 	return 0;
130 }
131 
132 
133 int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
134 		       struct ttm_mem_reg *mem)
135 {
136 	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
137 	int ret = 0;
138 
139 	if (!bdev->driver->io_mem_reserve)
140 		return 0;
141 	if (likely(man->io_reserve_fastpath))
142 		return bdev->driver->io_mem_reserve(bdev, mem);
143 
144 	if (bdev->driver->io_mem_reserve &&
145 	    mem->bus.io_reserved_count++ == 0) {
146 retry:
147 		ret = bdev->driver->io_mem_reserve(bdev, mem);
148 		if (ret == -EAGAIN) {
149 			ret = ttm_mem_io_evict(man);
150 			if (ret == 0)
151 				goto retry;
152 		}
153 	}
154 	return ret;
155 }
156 EXPORT_SYMBOL(ttm_mem_io_reserve);
157 
158 void ttm_mem_io_free(struct ttm_bo_device *bdev,
159 		     struct ttm_mem_reg *mem)
160 {
161 	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
162 
163 	if (likely(man->io_reserve_fastpath))
164 		return;
165 
166 	if (bdev->driver->io_mem_reserve &&
167 	    --mem->bus.io_reserved_count == 0 &&
168 	    bdev->driver->io_mem_free)
169 		bdev->driver->io_mem_free(bdev, mem);
170 
171 }
172 EXPORT_SYMBOL(ttm_mem_io_free);
173 
174 int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
175 {
176 	struct ttm_mem_reg *mem = &bo->mem;
177 	int ret;
178 
179 	if (!mem->bus.io_reserved_vm) {
180 		struct ttm_mem_type_manager *man =
181 			&bo->bdev->man[mem->mem_type];
182 
183 		ret = ttm_mem_io_reserve(bo->bdev, mem);
184 		if (unlikely(ret != 0))
185 			return ret;
186 		mem->bus.io_reserved_vm = true;
187 		if (man->use_io_reserve_lru)
188 			list_add_tail(&bo->io_reserve_lru,
189 				      &man->io_reserve_lru);
190 	}
191 	return 0;
192 }
193 
194 void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
195 {
196 	struct ttm_mem_reg *mem = &bo->mem;
197 
198 	if (mem->bus.io_reserved_vm) {
199 		mem->bus.io_reserved_vm = false;
200 		list_del_init(&bo->io_reserve_lru);
201 		ttm_mem_io_free(bo->bdev, mem);
202 	}
203 }
204 
205 static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
206 			void **virtual)
207 {
208 	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
209 	int ret;
210 	void *addr;
211 	int flags;
212 
213 	*virtual = NULL;
214 	(void) ttm_mem_io_lock(man, false);
215 	ret = ttm_mem_io_reserve(bdev, mem);
216 	ttm_mem_io_unlock(man);
217 	if (ret || !mem->bus.is_iomem)
218 		return ret;
219 
220 	if (mem->bus.addr) {
221 		addr = mem->bus.addr;
222 	} else {
223 		if (mem->placement & TTM_PL_FLAG_WC)
224 			flags = BUS_SPACE_MAP_PREFETCHABLE;
225 		else
226 			flags = 0;
227 
228 		if (bus_space_map(bdev->memt, mem->bus.base + mem->bus.offset,
229 		    mem->bus.size, BUS_SPACE_MAP_LINEAR | flags,
230 		    &mem->bus.bsh)) {
231 			printf("%s bus_space_map failed\n", __func__);
232 			return -ENOMEM;
233 		}
234 
235 		addr = bus_space_vaddr(bdev->memt, mem->bus.bsh);
236 
237 		if (!addr) {
238 			(void) ttm_mem_io_lock(man, false);
239 			ttm_mem_io_free(bdev, mem);
240 			ttm_mem_io_unlock(man);
241 			return -ENOMEM;
242 		}
243 	}
244 	*virtual = addr;
245 	return 0;
246 }
247 
248 static void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
249 			 void *virtual)
250 {
251 	struct ttm_mem_type_manager *man;
252 
253 	man = &bdev->man[mem->mem_type];
254 
255 	if (virtual && mem->bus.addr == NULL)
256 		bus_space_unmap(bdev->memt, mem->bus.bsh, mem->bus.size);
257 	(void) ttm_mem_io_lock(man, false);
258 	ttm_mem_io_free(bdev, mem);
259 	ttm_mem_io_unlock(man);
260 }
261 
262 static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
263 {
264 	uint32_t *dstP =
265 	    (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
266 	uint32_t *srcP =
267 	    (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
268 
269 	int i;
270 	for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
271 		iowrite32(ioread32(srcP++), dstP++);
272 	return 0;
273 }
274 
275 #ifdef CONFIG_X86
276 #define __ttm_kmap_atomic_prot(__page, __prot) kmap_atomic_prot(__page, __prot)
277 #define __ttm_kunmap_atomic(__addr) kunmap_atomic(__addr)
278 #else
279 #define __ttm_kmap_atomic_prot(__page, __prot) vmap(&__page, 1, 0,  __prot)
280 #define __ttm_kunmap_atomic(__addr) vunmap(__addr, PAGE_SIZE)
281 #endif
282 
283 
284 /**
285  * ttm_kmap_atomic_prot - Efficient kernel map of a single page with
286  * specified page protection.
287  *
288  * @page: The page to map.
289  * @prot: The page protection.
290  *
291  * This function maps a TTM page using the kmap_atomic api if available,
292  * otherwise falls back to vmap. The user must make sure that the
293  * specified page does not have an aliased mapping with a different caching
294  * policy unless the architecture explicitly allows it. Also mapping and
295  * unmapping using this api must be correctly nested. Unmapping should
296  * occur in the reverse order of mapping.
297  */
298 void *ttm_kmap_atomic_prot(struct vm_page *page, pgprot_t prot)
299 {
300 #if defined(__amd64__) || defined(__i386__)
301 	if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL))
302 		return kmap_atomic(page);
303 	else
304 #endif
305 		return __ttm_kmap_atomic_prot(page, prot);
306 }
307 EXPORT_SYMBOL(ttm_kmap_atomic_prot);
308 
309 /**
310  * ttm_kunmap_atomic_prot - Unmap a page that was mapped using
311  * ttm_kmap_atomic_prot.
312  *
313  * @addr: The virtual address from the map.
314  * @prot: The page protection.
315  */
316 void ttm_kunmap_atomic_prot(void *addr, pgprot_t prot)
317 {
318 #if defined(__amd64__) || defined(__i386__)
319 	if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL))
320 		kunmap_atomic(addr);
321 	else
322 #endif
323 		__ttm_kunmap_atomic(addr);
324 }
325 EXPORT_SYMBOL(ttm_kunmap_atomic_prot);
326 
327 static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
328 				unsigned long page,
329 				pgprot_t prot)
330 {
331 	struct vm_page *d = ttm->pages[page];
332 	void *dst;
333 
334 	if (!d)
335 		return -ENOMEM;
336 
337 	src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
338 	dst = ttm_kmap_atomic_prot(d, prot);
339 	if (!dst)
340 		return -ENOMEM;
341 
342 	memcpy_fromio(dst, src, PAGE_SIZE);
343 
344 	ttm_kunmap_atomic_prot(dst, prot);
345 
346 	return 0;
347 }
348 
349 static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
350 				unsigned long page,
351 				pgprot_t prot)
352 {
353 	struct vm_page *s = ttm->pages[page];
354 	void *src;
355 
356 	if (!s)
357 		return -ENOMEM;
358 
359 	dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
360 	src = ttm_kmap_atomic_prot(s, prot);
361 	if (!src)
362 		return -ENOMEM;
363 
364 	memcpy_toio(dst, src, PAGE_SIZE);
365 
366 	ttm_kunmap_atomic_prot(src, prot);
367 
368 	return 0;
369 }
370 
371 int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
372 		       struct ttm_operation_ctx *ctx,
373 		       struct ttm_mem_reg *new_mem)
374 {
375 	struct ttm_bo_device *bdev = bo->bdev;
376 	struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
377 	struct ttm_tt *ttm = bo->ttm;
378 	struct ttm_mem_reg *old_mem = &bo->mem;
379 	struct ttm_mem_reg old_copy = *old_mem;
380 	void *old_iomap;
381 	void *new_iomap;
382 	int ret;
383 	unsigned long i;
384 	unsigned long page;
385 	unsigned long add = 0;
386 	int dir;
387 
388 	ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
389 	if (ret)
390 		return ret;
391 
392 	ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
393 	if (ret)
394 		return ret;
395 	ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
396 	if (ret)
397 		goto out;
398 
399 	/*
400 	 * Single TTM move. NOP.
401 	 */
402 	if (old_iomap == NULL && new_iomap == NULL)
403 		goto out2;
404 
405 	/*
406 	 * Don't move nonexistent data. Clear destination instead.
407 	 */
408 	if (old_iomap == NULL &&
409 	    (ttm == NULL || (ttm->state == tt_unpopulated &&
410 			     !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) {
411 		memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE);
412 		goto out2;
413 	}
414 
415 	/*
416 	 * TTM might be null for moves within the same region.
417 	 */
418 	if (ttm) {
419 		ret = ttm_tt_populate(ttm, ctx);
420 		if (ret)
421 			goto out1;
422 	}
423 
424 	add = 0;
425 	dir = 1;
426 
427 	if ((old_mem->mem_type == new_mem->mem_type) &&
428 	    (new_mem->start < old_mem->start + old_mem->size)) {
429 		dir = -1;
430 		add = new_mem->num_pages - 1;
431 	}
432 
433 	for (i = 0; i < new_mem->num_pages; ++i) {
434 		page = i * dir + add;
435 		if (old_iomap == NULL) {
436 			pgprot_t prot = ttm_io_prot(old_mem->placement,
437 						    PAGE_KERNEL);
438 			ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
439 						   prot);
440 		} else if (new_iomap == NULL) {
441 			pgprot_t prot = ttm_io_prot(new_mem->placement,
442 						    PAGE_KERNEL);
443 			ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
444 						   prot);
445 		} else {
446 			ret = ttm_copy_io_page(new_iomap, old_iomap, page);
447 		}
448 		if (ret)
449 			goto out1;
450 	}
451 	mb();
452 out2:
453 	old_copy = *old_mem;
454 	*old_mem = *new_mem;
455 	new_mem->mm_node = NULL;
456 
457 	if (man->flags & TTM_MEMTYPE_FLAG_FIXED) {
458 		ttm_tt_destroy(ttm);
459 		bo->ttm = NULL;
460 	}
461 
462 out1:
463 	ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
464 out:
465 	ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
466 
467 	/*
468 	 * On error, keep the mm node!
469 	 */
470 	if (!ret)
471 		ttm_bo_mem_put(bo, &old_copy);
472 	return ret;
473 }
474 EXPORT_SYMBOL(ttm_bo_move_memcpy);
475 
476 static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
477 {
478 	struct ttm_transfer_obj *fbo;
479 
480 	fbo = container_of(bo, struct ttm_transfer_obj, base);
481 	ttm_bo_put(fbo->bo);
482 	kfree(fbo);
483 }
484 
485 /**
486  * ttm_buffer_object_transfer
487  *
488  * @bo: A pointer to a struct ttm_buffer_object.
489  * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
490  * holding the data of @bo with the old placement.
491  *
492  * This is a utility function that may be called after an accelerated move
493  * has been scheduled. A new buffer object is created as a placeholder for
494  * the old data while it's being copied. When that buffer object is idle,
495  * it can be destroyed, releasing the space of the old placement.
496  * Returns:
497  * !0: Failure.
498  */
499 
500 static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
501 				      struct ttm_buffer_object **new_obj)
502 {
503 	struct ttm_transfer_obj *fbo;
504 	int ret;
505 
506 	fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
507 	if (!fbo)
508 		return -ENOMEM;
509 
510 	fbo->base = *bo;
511 	fbo->base.mem.placement |= TTM_PL_FLAG_NO_EVICT;
512 
513 	ttm_bo_get(bo);
514 	fbo->bo = bo;
515 
516 	/**
517 	 * Fix up members that we shouldn't copy directly:
518 	 * TODO: Explicit member copy would probably be better here.
519 	 */
520 
521 	atomic_inc(&bo->bdev->glob->bo_count);
522 	INIT_LIST_HEAD(&fbo->base.ddestroy);
523 	INIT_LIST_HEAD(&fbo->base.lru);
524 	INIT_LIST_HEAD(&fbo->base.swap);
525 	INIT_LIST_HEAD(&fbo->base.io_reserve_lru);
526 	rw_init(&fbo->base.wu_mutex, "ttmwm");
527 	fbo->base.moving = NULL;
528 	drm_vma_node_reset(&fbo->base.vma_node);
529 	atomic_set(&fbo->base.cpu_writers, 0);
530 
531 	kref_init(&fbo->base.list_kref);
532 	kref_init(&fbo->base.kref);
533 	fbo->base.destroy = &ttm_transfered_destroy;
534 	fbo->base.acc_size = 0;
535 	fbo->base.resv = &fbo->base.ttm_resv;
536 	reservation_object_init(fbo->base.resv);
537 	ret = reservation_object_trylock(fbo->base.resv);
538 	WARN_ON(!ret);
539 
540 	*new_obj = &fbo->base;
541 	return 0;
542 }
543 
544 #ifdef __linux__
545 pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
546 {
547 	/* Cached mappings need no adjustment */
548 	if (caching_flags & TTM_PL_FLAG_CACHED)
549 		return tmp;
550 
551 #if defined(__i386__) || defined(__x86_64__)
552 	if (caching_flags & TTM_PL_FLAG_WC)
553 		tmp = pgprot_writecombine(tmp);
554 	else if (boot_cpu_data.x86 > 3)
555 		tmp = pgprot_noncached(tmp);
556 #endif
557 #if defined(__ia64__) || defined(__arm__) || defined(__aarch64__) || \
558     defined(__powerpc__)
559 	if (caching_flags & TTM_PL_FLAG_WC)
560 		tmp = pgprot_writecombine(tmp);
561 	else
562 		tmp = pgprot_noncached(tmp);
563 #endif
564 #if defined(__sparc__) || defined(__mips__)
565 	tmp = pgprot_noncached(tmp);
566 #endif
567 	return tmp;
568 }
569 EXPORT_SYMBOL(ttm_io_prot);
570 #endif
571 
572 pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
573 {
574 	/* Cached mappings need no adjustment */
575 	if (caching_flags & TTM_PL_FLAG_CACHED)
576 		return tmp;
577 
578 	if (caching_flags & TTM_PL_FLAG_WC)
579 		tmp = pgprot_writecombine(tmp);
580 	else
581 		tmp = pgprot_noncached(tmp);
582 
583 	return tmp;
584 }
585 
586 static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
587 			  unsigned long offset,
588 			  unsigned long size,
589 			  struct ttm_bo_kmap_obj *map)
590 {
591 	int flags;
592 	struct ttm_mem_reg *mem = &bo->mem;
593 
594 	if (bo->mem.bus.addr) {
595 		map->bo_kmap_type = ttm_bo_map_premapped;
596 		map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
597 	} else {
598 		map->bo_kmap_type = ttm_bo_map_iomap;
599 		if (mem->placement & TTM_PL_FLAG_WC)
600 			flags = BUS_SPACE_MAP_PREFETCHABLE;
601 		else
602 			flags = 0;
603 
604 		if (bus_space_map(bo->bdev->memt,
605 		    mem->bus.base + bo->mem.bus.offset + offset,
606 		    size, BUS_SPACE_MAP_LINEAR | flags,
607 		    &bo->mem.bus.bsh)) {
608 			printf("%s bus_space_map failed\n", __func__);
609 			map->virtual = 0;
610 		} else
611 			map->virtual = bus_space_vaddr(bo->bdev->memt,
612 			    bo->mem.bus.bsh);
613 	}
614 	return (!map->virtual) ? -ENOMEM : 0;
615 }
616 
617 static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
618 			   unsigned long start_page,
619 			   unsigned long num_pages,
620 			   struct ttm_bo_kmap_obj *map)
621 {
622 	struct ttm_mem_reg *mem = &bo->mem;
623 	struct ttm_operation_ctx ctx = {
624 		.interruptible = false,
625 		.no_wait_gpu = false
626 	};
627 	struct ttm_tt *ttm = bo->ttm;
628 	pgprot_t prot;
629 	int ret;
630 
631 	BUG_ON(!ttm);
632 
633 	ret = ttm_tt_populate(ttm, &ctx);
634 	if (ret)
635 		return ret;
636 
637 	if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
638 		/*
639 		 * We're mapping a single page, and the desired
640 		 * page protection is consistent with the bo.
641 		 */
642 
643 		map->bo_kmap_type = ttm_bo_map_kmap;
644 		map->page = ttm->pages[start_page];
645 		map->virtual = kmap(map->page);
646 	} else {
647 		/*
648 		 * We need to use vmap to get the desired page protection
649 		 * or to make the buffer object look contiguous.
650 		 */
651 		prot = ttm_io_prot(mem->placement, PAGE_KERNEL);
652 		map->bo_kmap_type = ttm_bo_map_vmap;
653 		map->virtual = vmap(ttm->pages + start_page, num_pages,
654 				    0, prot);
655 	}
656 	return (!map->virtual) ? -ENOMEM : 0;
657 }
658 
659 int ttm_bo_kmap(struct ttm_buffer_object *bo,
660 		unsigned long start_page, unsigned long num_pages,
661 		struct ttm_bo_kmap_obj *map)
662 {
663 	struct ttm_mem_type_manager *man =
664 		&bo->bdev->man[bo->mem.mem_type];
665 	unsigned long offset, size;
666 	int ret;
667 
668 	map->virtual = NULL;
669 	map->bo = bo;
670 	if (num_pages > bo->num_pages)
671 		return -EINVAL;
672 	if (start_page > bo->num_pages)
673 		return -EINVAL;
674 #if 0
675 	if (num_pages > 1 && !capable(CAP_SYS_ADMIN))
676 		return -EPERM;
677 #endif
678 	(void) ttm_mem_io_lock(man, false);
679 	ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
680 	ttm_mem_io_unlock(man);
681 	if (ret)
682 		return ret;
683 	if (!bo->mem.bus.is_iomem) {
684 		return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
685 	} else {
686 		offset = start_page << PAGE_SHIFT;
687 		size = num_pages << PAGE_SHIFT;
688 		return ttm_bo_ioremap(bo, offset, size, map);
689 	}
690 }
691 EXPORT_SYMBOL(ttm_bo_kmap);
692 
693 void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
694 {
695 	struct ttm_buffer_object *bo = map->bo;
696 	struct ttm_mem_type_manager *man =
697 		&bo->bdev->man[bo->mem.mem_type];
698 
699 	if (!map->virtual)
700 		return;
701 	switch (map->bo_kmap_type) {
702 	case ttm_bo_map_iomap:
703 		bus_space_unmap(bo->bdev->memt, bo->mem.bus.bsh,
704 		    bo->mem.bus.size);
705 		break;
706 	case ttm_bo_map_vmap:
707 		vunmap(map->virtual, bo->mem.bus.size);
708 		break;
709 	case ttm_bo_map_kmap:
710 		kunmap(map->virtual);
711 		break;
712 	case ttm_bo_map_premapped:
713 		break;
714 	default:
715 		BUG();
716 	}
717 	(void) ttm_mem_io_lock(man, false);
718 	ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
719 	ttm_mem_io_unlock(man);
720 	map->virtual = NULL;
721 	map->page = NULL;
722 }
723 EXPORT_SYMBOL(ttm_bo_kunmap);
724 
725 int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
726 			      struct dma_fence *fence,
727 			      bool evict,
728 			      struct ttm_mem_reg *new_mem)
729 {
730 	struct ttm_bo_device *bdev = bo->bdev;
731 	struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
732 	struct ttm_mem_reg *old_mem = &bo->mem;
733 	int ret;
734 	struct ttm_buffer_object *ghost_obj;
735 
736 	reservation_object_add_excl_fence(bo->resv, fence);
737 	if (evict) {
738 		ret = ttm_bo_wait(bo, false, false);
739 		if (ret)
740 			return ret;
741 
742 		if (man->flags & TTM_MEMTYPE_FLAG_FIXED) {
743 			ttm_tt_destroy(bo->ttm);
744 			bo->ttm = NULL;
745 		}
746 		ttm_bo_free_old_node(bo);
747 	} else {
748 		/**
749 		 * This should help pipeline ordinary buffer moves.
750 		 *
751 		 * Hang old buffer memory on a new buffer object,
752 		 * and leave it to be released when the GPU
753 		 * operation has completed.
754 		 */
755 
756 		dma_fence_put(bo->moving);
757 		bo->moving = dma_fence_get(fence);
758 
759 		ret = ttm_buffer_object_transfer(bo, &ghost_obj);
760 		if (ret)
761 			return ret;
762 
763 		reservation_object_add_excl_fence(ghost_obj->resv, fence);
764 
765 		/**
766 		 * If we're not moving to fixed memory, the TTM object
767 		 * needs to stay alive. Otherwhise hang it on the ghost
768 		 * bo to be unbound and destroyed.
769 		 */
770 
771 		if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
772 			ghost_obj->ttm = NULL;
773 		else
774 			bo->ttm = NULL;
775 
776 		ttm_bo_unreserve(ghost_obj);
777 		ttm_bo_put(ghost_obj);
778 	}
779 
780 	*old_mem = *new_mem;
781 	new_mem->mm_node = NULL;
782 
783 	return 0;
784 }
785 EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
786 
787 int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
788 			 struct dma_fence *fence, bool evict,
789 			 struct ttm_mem_reg *new_mem)
790 {
791 	struct ttm_bo_device *bdev = bo->bdev;
792 	struct ttm_mem_reg *old_mem = &bo->mem;
793 
794 	struct ttm_mem_type_manager *from = &bdev->man[old_mem->mem_type];
795 	struct ttm_mem_type_manager *to = &bdev->man[new_mem->mem_type];
796 
797 	int ret;
798 
799 	reservation_object_add_excl_fence(bo->resv, fence);
800 
801 	if (!evict) {
802 		struct ttm_buffer_object *ghost_obj;
803 
804 		/**
805 		 * This should help pipeline ordinary buffer moves.
806 		 *
807 		 * Hang old buffer memory on a new buffer object,
808 		 * and leave it to be released when the GPU
809 		 * operation has completed.
810 		 */
811 
812 		dma_fence_put(bo->moving);
813 		bo->moving = dma_fence_get(fence);
814 
815 		ret = ttm_buffer_object_transfer(bo, &ghost_obj);
816 		if (ret)
817 			return ret;
818 
819 		reservation_object_add_excl_fence(ghost_obj->resv, fence);
820 
821 		/**
822 		 * If we're not moving to fixed memory, the TTM object
823 		 * needs to stay alive. Otherwhise hang it on the ghost
824 		 * bo to be unbound and destroyed.
825 		 */
826 
827 		if (!(to->flags & TTM_MEMTYPE_FLAG_FIXED))
828 			ghost_obj->ttm = NULL;
829 		else
830 			bo->ttm = NULL;
831 
832 		ttm_bo_unreserve(ghost_obj);
833 		ttm_bo_put(ghost_obj);
834 
835 	} else if (from->flags & TTM_MEMTYPE_FLAG_FIXED) {
836 
837 		/**
838 		 * BO doesn't have a TTM we need to bind/unbind. Just remember
839 		 * this eviction and free up the allocation
840 		 */
841 
842 		spin_lock(&from->move_lock);
843 		if (!from->move || dma_fence_is_later(fence, from->move)) {
844 			dma_fence_put(from->move);
845 			from->move = dma_fence_get(fence);
846 		}
847 		spin_unlock(&from->move_lock);
848 
849 		ttm_bo_free_old_node(bo);
850 
851 		dma_fence_put(bo->moving);
852 		bo->moving = dma_fence_get(fence);
853 
854 	} else {
855 		/**
856 		 * Last resort, wait for the move to be completed.
857 		 *
858 		 * Should never happen in pratice.
859 		 */
860 
861 		ret = ttm_bo_wait(bo, false, false);
862 		if (ret)
863 			return ret;
864 
865 		if (to->flags & TTM_MEMTYPE_FLAG_FIXED) {
866 			ttm_tt_destroy(bo->ttm);
867 			bo->ttm = NULL;
868 		}
869 		ttm_bo_free_old_node(bo);
870 	}
871 
872 	*old_mem = *new_mem;
873 	new_mem->mm_node = NULL;
874 
875 	return 0;
876 }
877 EXPORT_SYMBOL(ttm_bo_pipeline_move);
878 
879 int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
880 {
881 	struct ttm_buffer_object *ghost;
882 	int ret;
883 
884 	ret = ttm_buffer_object_transfer(bo, &ghost);
885 	if (ret)
886 		return ret;
887 
888 	ret = reservation_object_copy_fences(ghost->resv, bo->resv);
889 	/* Last resort, wait for the BO to be idle when we are OOM */
890 	if (ret)
891 		ttm_bo_wait(bo, false, false);
892 
893 	memset(&bo->mem, 0, sizeof(bo->mem));
894 	bo->mem.mem_type = TTM_PL_SYSTEM;
895 	bo->ttm = NULL;
896 
897 	ttm_bo_unreserve(ghost);
898 	ttm_bo_put(ghost);
899 
900 	return 0;
901 }
902