xref: /dflybsd-src/sys/dev/drm/ttm/ttm_bo_util.c (revision 6fd42cc50e47866c551d4da5a6ec73cc6e1ef5d7)
1 /**************************************************************************
2  *
3  * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 /*
28  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29  */
30 
31 #include <drm/ttm/ttm_bo_driver.h>
32 #include <drm/ttm/ttm_placement.h>
33 #include <linux/io.h>
34 #include <linux/highmem.h>
35 #include <linux/wait.h>
36 #include <linux/slab.h>
37 #include <linux/vmalloc.h>
38 #include <linux/module.h>
39 
40 void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
41 {
42 	ttm_bo_mem_put(bo, &bo->mem);
43 }
44 
45 int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
46 		    bool evict,
47 		    bool no_wait_gpu, struct ttm_mem_reg *new_mem)
48 {
49 	struct ttm_tt *ttm = bo->ttm;
50 	struct ttm_mem_reg *old_mem = &bo->mem;
51 	int ret;
52 
53 	if (old_mem->mem_type != TTM_PL_SYSTEM) {
54 		ttm_tt_unbind(ttm);
55 		ttm_bo_free_old_node(bo);
56 		ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
57 				TTM_PL_MASK_MEM);
58 		old_mem->mem_type = TTM_PL_SYSTEM;
59 	}
60 
61 	ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
62 	if (unlikely(ret != 0))
63 		return ret;
64 
65 	if (new_mem->mem_type != TTM_PL_SYSTEM) {
66 		ret = ttm_tt_bind(ttm, new_mem);
67 		if (unlikely(ret != 0))
68 			return ret;
69 	}
70 
71 	*old_mem = *new_mem;
72 	new_mem->mm_node = NULL;
73 
74 	return 0;
75 }
76 EXPORT_SYMBOL(ttm_bo_move_ttm);
77 
78 int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
79 {
80 	if (likely(man->io_reserve_fastpath))
81 		return 0;
82 
83 	if (interruptible) {
84 		if (lockmgr(&man->io_reserve_mutex,
85 			    LK_EXCLUSIVE | LK_SLEEPFAIL))
86 			return (-EINTR);
87 		else
88 			return (0);
89 	}
90 
91 	mutex_lock(&man->io_reserve_mutex);
92 	return 0;
93 }
94 EXPORT_SYMBOL(ttm_mem_io_lock);
95 
96 void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
97 {
98 	if (likely(man->io_reserve_fastpath))
99 		return;
100 
101 	mutex_unlock(&man->io_reserve_mutex);
102 }
103 EXPORT_SYMBOL(ttm_mem_io_unlock);
104 
105 static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
106 {
107 	struct ttm_buffer_object *bo;
108 
109 	if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru))
110 		return -EAGAIN;
111 
112 	bo = list_first_entry(&man->io_reserve_lru,
113 			      struct ttm_buffer_object,
114 			      io_reserve_lru);
115 	list_del_init(&bo->io_reserve_lru);
116 	ttm_bo_unmap_virtual_locked(bo);
117 
118 	return 0;
119 }
120 
121 
122 int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
123 		       struct ttm_mem_reg *mem)
124 {
125 	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
126 	int ret = 0;
127 
128 	if (!bdev->driver->io_mem_reserve)
129 		return 0;
130 	if (likely(man->io_reserve_fastpath))
131 		return bdev->driver->io_mem_reserve(bdev, mem);
132 
133 	if (bdev->driver->io_mem_reserve &&
134 	    mem->bus.io_reserved_count++ == 0) {
135 retry:
136 		ret = bdev->driver->io_mem_reserve(bdev, mem);
137 		if (ret == -EAGAIN) {
138 			ret = ttm_mem_io_evict(man);
139 			if (ret == 0)
140 				goto retry;
141 		}
142 	}
143 	return ret;
144 }
145 EXPORT_SYMBOL(ttm_mem_io_reserve);
146 
147 void ttm_mem_io_free(struct ttm_bo_device *bdev,
148 		     struct ttm_mem_reg *mem)
149 {
150 	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
151 
152 	if (likely(man->io_reserve_fastpath))
153 		return;
154 
155 	if (bdev->driver->io_mem_reserve &&
156 	    --mem->bus.io_reserved_count == 0 &&
157 	    bdev->driver->io_mem_free)
158 		bdev->driver->io_mem_free(bdev, mem);
159 
160 }
161 EXPORT_SYMBOL(ttm_mem_io_free);
162 
163 int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
164 {
165 	struct ttm_mem_reg *mem = &bo->mem;
166 	int ret;
167 
168 	if (!mem->bus.io_reserved_vm) {
169 		struct ttm_mem_type_manager *man =
170 			&bo->bdev->man[mem->mem_type];
171 
172 		ret = ttm_mem_io_reserve(bo->bdev, mem);
173 		if (unlikely(ret != 0))
174 			return ret;
175 		mem->bus.io_reserved_vm = true;
176 		if (man->use_io_reserve_lru)
177 			list_add_tail(&bo->io_reserve_lru,
178 				      &man->io_reserve_lru);
179 	}
180 	return 0;
181 }
182 
183 void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
184 {
185 	struct ttm_mem_reg *mem = &bo->mem;
186 
187 	if (mem->bus.io_reserved_vm) {
188 		mem->bus.io_reserved_vm = false;
189 		list_del_init(&bo->io_reserve_lru);
190 		ttm_mem_io_free(bo->bdev, mem);
191 	}
192 }
193 
194 static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
195 			void **virtual)
196 {
197 	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
198 	int ret;
199 	void *addr;
200 
201 	*virtual = NULL;
202 	(void) ttm_mem_io_lock(man, false);
203 	ret = ttm_mem_io_reserve(bdev, mem);
204 	ttm_mem_io_unlock(man);
205 	if (ret || !mem->bus.is_iomem)
206 		return ret;
207 
208 	if (mem->bus.addr) {
209 		addr = mem->bus.addr;
210 	} else {
211 		addr = pmap_mapdev_attr(mem->bus.base + mem->bus.offset,
212 		    mem->bus.size, (mem->placement & TTM_PL_FLAG_WC) ?
213 		    VM_MEMATTR_WRITE_COMBINING : VM_MEMATTR_UNCACHEABLE);
214 		if (!addr) {
215 			(void) ttm_mem_io_lock(man, false);
216 			ttm_mem_io_free(bdev, mem);
217 			ttm_mem_io_unlock(man);
218 			return -ENOMEM;
219 		}
220 	}
221 	*virtual = addr;
222 	return 0;
223 }
224 
225 static void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
226 			 void *virtual)
227 {
228 	struct ttm_mem_type_manager *man;
229 
230 	man = &bdev->man[mem->mem_type];
231 
232 	if (virtual && mem->bus.addr == NULL)
233 		pmap_unmapdev((vm_offset_t)virtual, mem->bus.size);
234 	(void) ttm_mem_io_lock(man, false);
235 	ttm_mem_io_free(bdev, mem);
236 	ttm_mem_io_unlock(man);
237 }
238 
239 static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
240 {
241 	uint32_t *dstP =
242 	    (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
243 	uint32_t *srcP =
244 	    (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
245 
246 	int i;
247 	for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
248 		/* iowrite32(ioread32(srcP++), dstP++); */
249 		*dstP++ = *srcP++;
250 	return 0;
251 }
252 
253 static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
254 				unsigned long page,
255 				pgprot_t prot)
256 {
257 	struct page *d = ttm->pages[page];
258 	void *dst;
259 
260 	if (!d)
261 		return -ENOMEM;
262 
263 	src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
264 
265 #ifdef CONFIG_X86
266 	dst = kmap_atomic_prot(d, prot);
267 #else
268 	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
269 		dst = vmap(&d, 1, 0, prot);
270 	else
271 		dst = kmap(d);
272 #endif
273 	if (!dst)
274 		return -ENOMEM;
275 
276 	memcpy_fromio(dst, src, PAGE_SIZE);
277 
278 #ifdef CONFIG_X86
279 	kunmap_atomic(dst);
280 #else
281 	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
282 		vunmap(dst);
283 	else
284 		kunmap(d);
285 #endif
286 
287 	return 0;
288 }
289 
290 static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
291 				unsigned long page,
292 				pgprot_t prot)
293 {
294 	struct page *s = ttm->pages[page];
295 	void *src;
296 
297 	if (!s)
298 		return -ENOMEM;
299 
300 	dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
301 #ifdef CONFIG_X86
302 	src = kmap_atomic_prot(s, prot);
303 #else
304 	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
305 		src = vmap(&s, 1, 0, prot);
306 	else
307 		src = kmap(s);
308 #endif
309 	if (!src)
310 		return -ENOMEM;
311 
312 	memcpy_toio(dst, src, PAGE_SIZE);
313 
314 #ifdef CONFIG_X86
315 	kunmap_atomic(src);
316 #else
317 	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
318 		vunmap(src);
319 	else
320 		kunmap(s);
321 #endif
322 
323 	return 0;
324 }
325 
326 int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
327 		       bool evict, bool no_wait_gpu,
328 		       struct ttm_mem_reg *new_mem)
329 {
330 	struct ttm_bo_device *bdev = bo->bdev;
331 	struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
332 	struct ttm_tt *ttm = bo->ttm;
333 	struct ttm_mem_reg *old_mem = &bo->mem;
334 	struct ttm_mem_reg old_copy = *old_mem;
335 	void *old_iomap;
336 	void *new_iomap;
337 	int ret;
338 	unsigned long i;
339 	unsigned long page;
340 	unsigned long add = 0;
341 	int dir;
342 
343 	ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
344 	if (ret)
345 		return ret;
346 	ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
347 	if (ret)
348 		goto out;
349 
350 	/*
351 	 * Single TTM move. NOP.
352 	 */
353 	if (old_iomap == NULL && new_iomap == NULL)
354 		goto out2;
355 
356 	/*
357 	 * Don't move nonexistent data. Clear destination instead.
358 	 */
359 	if (old_iomap == NULL &&
360 	    (ttm == NULL || (ttm->state == tt_unpopulated &&
361 			     !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) {
362 		memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE);
363 		goto out2;
364 	}
365 
366 	/*
367 	 * TTM might be null for moves within the same region.
368 	 */
369 	if (ttm && ttm->state == tt_unpopulated) {
370 		ret = ttm->bdev->driver->ttm_tt_populate(ttm);
371 		if (ret)
372 			goto out1;
373 	}
374 
375 	add = 0;
376 	dir = 1;
377 
378 	if ((old_mem->mem_type == new_mem->mem_type) &&
379 	    (new_mem->start < old_mem->start + old_mem->size)) {
380 		dir = -1;
381 		add = new_mem->num_pages - 1;
382 	}
383 
384 	for (i = 0; i < new_mem->num_pages; ++i) {
385 		page = i * dir + add;
386 		if (old_iomap == NULL) {
387 			pgprot_t prot = ttm_io_prot(old_mem->placement,
388 						    PAGE_KERNEL);
389 			ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
390 						   prot);
391 		} else if (new_iomap == NULL) {
392 			pgprot_t prot = ttm_io_prot(new_mem->placement,
393 						    PAGE_KERNEL);
394 			ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
395 						   prot);
396 		} else
397 			ret = ttm_copy_io_page(new_iomap, old_iomap, page);
398 		if (ret) {
399 			/* failing here, means keep old copy as-is */
400 			old_copy.mm_node = NULL;
401 			goto out1;
402 		}
403 	}
404 	cpu_mfence();
405 out2:
406 	old_copy = *old_mem;
407 	*old_mem = *new_mem;
408 	new_mem->mm_node = NULL;
409 
410 	if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) {
411 		ttm_tt_unbind(ttm);
412 		ttm_tt_destroy(ttm);
413 		bo->ttm = NULL;
414 	}
415 
416 out1:
417 	ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
418 out:
419 	ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
420 
421 	/*
422 	 * On error, keep the mm node!
423 	 */
424 	if (!ret)
425 		ttm_bo_mem_put(bo, &old_copy);
426 	return ret;
427 }
428 EXPORT_SYMBOL(ttm_bo_move_memcpy);
429 
430 static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
431 {
432 	kfree(bo);
433 }
434 
435 /**
436  * ttm_buffer_object_transfer
437  *
438  * @bo: A pointer to a struct ttm_buffer_object.
439  * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
440  * holding the data of @bo with the old placement.
441  *
442  * This is a utility function that may be called after an accelerated move
443  * has been scheduled. A new buffer object is created as a placeholder for
444  * the old data while it's being copied. When that buffer object is idle,
445  * it can be destroyed, releasing the space of the old placement.
446  * Returns:
447  * !0: Failure.
448  */
449 
450 static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
451 				      struct ttm_buffer_object **new_obj)
452 {
453 	struct ttm_buffer_object *fbo;
454 	struct ttm_bo_device *bdev = bo->bdev;
455 	struct ttm_bo_driver *driver = bdev->driver;
456 
457 	fbo = kmalloc(sizeof(*fbo), M_DRM, M_WAITOK);
458 	if (!fbo)
459 		return -ENOMEM;
460 
461 	*fbo = *bo;
462 
463 	/**
464 	 * Fix up members that we shouldn't copy directly:
465 	 * TODO: Explicit member copy would probably be better here.
466 	 */
467 
468 	init_waitqueue_head(&fbo->event_queue);
469 	INIT_LIST_HEAD(&fbo->ddestroy);
470 	INIT_LIST_HEAD(&fbo->lru);
471 	INIT_LIST_HEAD(&fbo->swap);
472 	INIT_LIST_HEAD(&fbo->io_reserve_lru);
473 	fbo->vm_node = NULL;
474 	atomic_set(&fbo->cpu_writers, 0);
475 
476 	lockmgr(&bdev->fence_lock, LK_EXCLUSIVE);
477 	if (bo->sync_obj)
478 		fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
479 	else
480 		fbo->sync_obj = NULL;
481 	lockmgr(&bdev->fence_lock, LK_RELEASE);
482 	kref_init(&fbo->list_kref);
483 	kref_init(&fbo->kref);
484 	fbo->destroy = &ttm_transfered_destroy;
485 	fbo->acc_size = 0;
486 
487         /*
488 	 * Mirror ref from kref_init() for list_kref.
489 	 */
490 	set_bit(TTM_BO_PRIV_FLAG_ACTIVE, &fbo->priv_flags);
491 
492 	*new_obj = fbo;
493 	return 0;
494 }
495 
496 pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
497 {
498 #if defined(__i386__) || defined(__x86_64__)
499 	if (caching_flags & TTM_PL_FLAG_WC)
500 		tmp = pgprot_writecombine(tmp);
501 	else
502 		tmp = pgprot_noncached(tmp);
503 
504 #elif defined(__powerpc__)
505 	if (!(caching_flags & TTM_PL_FLAG_CACHED)) {
506 		pgprot_val(tmp) |= _PAGE_NO_CACHE;
507 		if (caching_flags & TTM_PL_FLAG_UNCACHED)
508 			pgprot_val(tmp) |= _PAGE_GUARDED;
509 	}
510 #endif
511 #if defined(__ia64__)
512 	if (caching_flags & TTM_PL_FLAG_WC)
513 		tmp = pgprot_writecombine(tmp);
514 	else
515 		tmp = pgprot_noncached(tmp);
516 #endif
517 #if defined(__sparc__) || defined(__mips__)
518 	if (!(caching_flags & TTM_PL_FLAG_CACHED))
519 		tmp = pgprot_noncached(tmp);
520 #endif
521 	return tmp;
522 }
523 EXPORT_SYMBOL(ttm_io_prot);
524 
525 static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
526 			  unsigned long offset,
527 			  unsigned long size,
528 			  struct ttm_bo_kmap_obj *map)
529 {
530 	struct ttm_mem_reg *mem = &bo->mem;
531 
532 	if (bo->mem.bus.addr) {
533 		map->bo_kmap_type = ttm_bo_map_premapped;
534 		map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
535 	} else {
536 		map->bo_kmap_type = ttm_bo_map_iomap;
537 		if (mem->placement & TTM_PL_FLAG_WC)
538 			map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset,
539 						  size);
540 		else
541 			map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset,
542 						       size);
543 	}
544 	return (!map->virtual) ? -ENOMEM : 0;
545 }
546 
547 static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
548 			   unsigned long start_page,
549 			   unsigned long num_pages,
550 			   struct ttm_bo_kmap_obj *map)
551 {
552 	struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot;
553 	struct ttm_tt *ttm = bo->ttm;
554 	int ret;
555 
556 	BUG_ON(!ttm);
557 
558 	if (ttm->state == tt_unpopulated) {
559 		ret = ttm->bdev->driver->ttm_tt_populate(ttm);
560 		if (ret)
561 			return ret;
562 	}
563 
564 	if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
565 		/*
566 		 * We're mapping a single page, and the desired
567 		 * page protection is consistent with the bo.
568 		 */
569 
570 		map->bo_kmap_type = ttm_bo_map_kmap;
571 		map->page = ttm->pages[start_page];
572 		map->virtual = kmap(map->page);
573 	} else {
574 		/*
575 		 * We need to use vmap to get the desired page protection
576 		 * or to make the buffer object look contiguous.
577 		 */
578 		prot = (mem->placement & TTM_PL_FLAG_CACHED) ?
579 			PAGE_KERNEL :
580 			ttm_io_prot(mem->placement, PAGE_KERNEL);
581 		map->bo_kmap_type = ttm_bo_map_vmap;
582 		map->virtual = vmap(ttm->pages + start_page, num_pages,
583 				    0, prot);
584 	}
585 	return (!map->virtual) ? -ENOMEM : 0;
586 }
587 
588 int ttm_bo_kmap(struct ttm_buffer_object *bo,
589 		unsigned long start_page, unsigned long num_pages,
590 		struct ttm_bo_kmap_obj *map)
591 {
592 	struct ttm_mem_type_manager *man =
593 		&bo->bdev->man[bo->mem.mem_type];
594 	unsigned long offset, size;
595 	int ret;
596 
597 	BUG_ON(!list_empty(&bo->swap));
598 	map->virtual = NULL;
599 	map->bo = bo;
600 	if (num_pages > bo->num_pages)
601 		return -EINVAL;
602 	if (start_page > bo->num_pages)
603 		return -EINVAL;
604 #if 0
605 	if (num_pages > 1 && !capable(CAP_SYS_ADMIN))
606 		return -EPERM;
607 #endif
608 	(void) ttm_mem_io_lock(man, false);
609 	ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
610 	ttm_mem_io_unlock(man);
611 	if (ret)
612 		return ret;
613 	if (!bo->mem.bus.is_iomem) {
614 		return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
615 	} else {
616 		offset = start_page << PAGE_SHIFT;
617 		size = num_pages << PAGE_SHIFT;
618 		return ttm_bo_ioremap(bo, offset, size, map);
619 	}
620 }
621 EXPORT_SYMBOL(ttm_bo_kmap);
622 
623 void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
624 {
625 	struct ttm_buffer_object *bo = map->bo;
626 	struct ttm_mem_type_manager *man =
627 		&bo->bdev->man[bo->mem.mem_type];
628 
629 	if (!map->virtual)
630 		return;
631 	switch (map->bo_kmap_type) {
632 	case ttm_bo_map_iomap:
633 		iounmap(map->virtual);
634 		break;
635 	case ttm_bo_map_vmap:
636 		vunmap(map->virtual);
637 		break;
638 	case ttm_bo_map_kmap:
639 		kunmap(map->page);
640 		break;
641 	case ttm_bo_map_premapped:
642 		break;
643 	default:
644 		BUG();
645 	}
646 	(void) ttm_mem_io_lock(man, false);
647 	ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
648 	ttm_mem_io_unlock(man);
649 	map->virtual = NULL;
650 	map->page = NULL;
651 }
652 EXPORT_SYMBOL(ttm_bo_kunmap);
653 
654 int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
655 			      void *sync_obj,
656 			      bool evict,
657 			      bool no_wait_gpu,
658 			      struct ttm_mem_reg *new_mem)
659 {
660 	struct ttm_bo_device *bdev = bo->bdev;
661 	struct ttm_bo_driver *driver = bdev->driver;
662 	struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
663 	struct ttm_mem_reg *old_mem = &bo->mem;
664 	int ret;
665 	struct ttm_buffer_object *ghost_obj;
666 	void *tmp_obj = NULL;
667 
668 	lockmgr(&bdev->fence_lock, LK_EXCLUSIVE);
669 	if (bo->sync_obj) {
670 		tmp_obj = bo->sync_obj;
671 		bo->sync_obj = NULL;
672 	}
673 	bo->sync_obj = driver->sync_obj_ref(sync_obj);
674 	if (evict) {
675 		ret = ttm_bo_wait(bo, false, false, false);
676 		lockmgr(&bdev->fence_lock, LK_RELEASE);
677 		if (tmp_obj)
678 			driver->sync_obj_unref(&tmp_obj);
679 		if (ret)
680 			return ret;
681 
682 		if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
683 		    (bo->ttm != NULL)) {
684 			ttm_tt_unbind(bo->ttm);
685 			ttm_tt_destroy(bo->ttm);
686 			bo->ttm = NULL;
687 		}
688 		ttm_bo_free_old_node(bo);
689 	} else {
690 		/**
691 		 * This should help pipeline ordinary buffer moves.
692 		 *
693 		 * Hang old buffer memory on a new buffer object,
694 		 * and leave it to be released when the GPU
695 		 * operation has completed.
696 		 */
697 
698 		set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
699 		lockmgr(&bdev->fence_lock, LK_RELEASE);
700 		if (tmp_obj)
701 			driver->sync_obj_unref(&tmp_obj);
702 
703 		ret = ttm_buffer_object_transfer(bo, &ghost_obj);
704 		if (ret)
705 			return ret;
706 
707 		/**
708 		 * If we're not moving to fixed memory, the TTM object
709 		 * needs to stay alive. Otherwhise hang it on the ghost
710 		 * bo to be unbound and destroyed.
711 		 */
712 
713 		if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
714 			ghost_obj->ttm = NULL;
715 		else
716 			bo->ttm = NULL;
717 
718 		ttm_bo_unreserve(ghost_obj);
719 		ttm_bo_unref(&ghost_obj);
720 	}
721 
722 	*old_mem = *new_mem;
723 	new_mem->mm_node = NULL;
724 
725 	return 0;
726 }
727 EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
728