xref: /netbsd-src/sys/external/bsd/drm2/dist/drm/ttm/ttm_bo_util.c (revision 6a493d6bc668897c91594964a732d38505b70cbb)
1 /**************************************************************************
2  *
3  * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 /*
28  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29  */
30 
31 #include <drm/ttm/ttm_bo_driver.h>
32 #include <drm/ttm/ttm_placement.h>
33 #include <linux/io.h>
34 #include <linux/highmem.h>
35 #include <linux/wait.h>
36 #include <linux/slab.h>
37 #include <linux/vmalloc.h>
38 #include <linux/module.h>
39 
40 void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
41 {
42 	ttm_bo_mem_put(bo, &bo->mem);
43 }
44 
45 int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
46 		    bool evict,
47 		    bool no_wait_gpu, struct ttm_mem_reg *new_mem)
48 {
49 	struct ttm_tt *ttm = bo->ttm;
50 	struct ttm_mem_reg *old_mem = &bo->mem;
51 	int ret;
52 
53 	if (old_mem->mem_type != TTM_PL_SYSTEM) {
54 		ttm_tt_unbind(ttm);
55 		ttm_bo_free_old_node(bo);
56 		ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
57 				TTM_PL_MASK_MEM);
58 		old_mem->mem_type = TTM_PL_SYSTEM;
59 	}
60 
61 	ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
62 	if (unlikely(ret != 0))
63 		return ret;
64 
65 	if (new_mem->mem_type != TTM_PL_SYSTEM) {
66 		ret = ttm_tt_bind(ttm, new_mem);
67 		if (unlikely(ret != 0))
68 			return ret;
69 	}
70 
71 	*old_mem = *new_mem;
72 	new_mem->mm_node = NULL;
73 
74 	return 0;
75 }
76 EXPORT_SYMBOL(ttm_bo_move_ttm);
77 
78 int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
79 {
80 	if (likely(man->io_reserve_fastpath))
81 		return 0;
82 
83 	if (interruptible)
84 		return mutex_lock_interruptible(&man->io_reserve_mutex);
85 
86 	mutex_lock(&man->io_reserve_mutex);
87 	return 0;
88 }
89 
90 void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
91 {
92 	if (likely(man->io_reserve_fastpath))
93 		return;
94 
95 	mutex_unlock(&man->io_reserve_mutex);
96 }
97 
98 static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
99 {
100 	struct ttm_buffer_object *bo;
101 
102 	if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru))
103 		return -EAGAIN;
104 
105 	bo = list_first_entry(&man->io_reserve_lru,
106 			      struct ttm_buffer_object,
107 			      io_reserve_lru);
108 	list_del_init(&bo->io_reserve_lru);
109 	ttm_bo_unmap_virtual_locked(bo);
110 
111 	return 0;
112 }
113 
114 static int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
115 			      struct ttm_mem_reg *mem)
116 {
117 	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
118 	int ret = 0;
119 
120 	if (!bdev->driver->io_mem_reserve)
121 		return 0;
122 	if (likely(man->io_reserve_fastpath))
123 		return bdev->driver->io_mem_reserve(bdev, mem);
124 
125 	if (bdev->driver->io_mem_reserve &&
126 	    mem->bus.io_reserved_count++ == 0) {
127 retry:
128 		ret = bdev->driver->io_mem_reserve(bdev, mem);
129 		if (ret == -EAGAIN) {
130 			ret = ttm_mem_io_evict(man);
131 			if (ret == 0)
132 				goto retry;
133 		}
134 	}
135 	return ret;
136 }
137 
138 static void ttm_mem_io_free(struct ttm_bo_device *bdev,
139 			    struct ttm_mem_reg *mem)
140 {
141 	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
142 
143 	if (likely(man->io_reserve_fastpath))
144 		return;
145 
146 	if (bdev->driver->io_mem_reserve &&
147 	    --mem->bus.io_reserved_count == 0 &&
148 	    bdev->driver->io_mem_free)
149 		bdev->driver->io_mem_free(bdev, mem);
150 
151 }
152 
153 int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
154 {
155 	struct ttm_mem_reg *mem = &bo->mem;
156 	int ret;
157 
158 	if (!mem->bus.io_reserved_vm) {
159 		struct ttm_mem_type_manager *man =
160 			&bo->bdev->man[mem->mem_type];
161 
162 		ret = ttm_mem_io_reserve(bo->bdev, mem);
163 		if (unlikely(ret != 0))
164 			return ret;
165 		mem->bus.io_reserved_vm = true;
166 		if (man->use_io_reserve_lru)
167 			list_add_tail(&bo->io_reserve_lru,
168 				      &man->io_reserve_lru);
169 	}
170 	return 0;
171 }
172 
173 void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
174 {
175 	struct ttm_mem_reg *mem = &bo->mem;
176 
177 	if (mem->bus.io_reserved_vm) {
178 		mem->bus.io_reserved_vm = false;
179 		list_del_init(&bo->io_reserve_lru);
180 		ttm_mem_io_free(bo->bdev, mem);
181 	}
182 }
183 
184 int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
185 			void **virtual)
186 {
187 	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
188 	int ret;
189 	void *addr;
190 
191 	*virtual = NULL;
192 	(void) ttm_mem_io_lock(man, false);
193 	ret = ttm_mem_io_reserve(bdev, mem);
194 	ttm_mem_io_unlock(man);
195 	if (ret || !mem->bus.is_iomem)
196 		return ret;
197 
198 	if (mem->bus.addr) {
199 		addr = mem->bus.addr;
200 	} else {
201 		if (mem->placement & TTM_PL_FLAG_WC)
202 			addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size);
203 		else
204 			addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size);
205 		if (!addr) {
206 			(void) ttm_mem_io_lock(man, false);
207 			ttm_mem_io_free(bdev, mem);
208 			ttm_mem_io_unlock(man);
209 			return -ENOMEM;
210 		}
211 	}
212 	*virtual = addr;
213 	return 0;
214 }
215 
216 void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
217 			 void *virtual)
218 {
219 	struct ttm_mem_type_manager *man;
220 
221 	man = &bdev->man[mem->mem_type];
222 
223 	if (virtual && mem->bus.addr == NULL)
224 		iounmap(virtual);
225 	(void) ttm_mem_io_lock(man, false);
226 	ttm_mem_io_free(bdev, mem);
227 	ttm_mem_io_unlock(man);
228 }
229 
230 static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
231 {
232 	uint32_t *dstP =
233 	    (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
234 	uint32_t *srcP =
235 	    (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
236 
237 	int i;
238 	for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
239 		iowrite32(ioread32(srcP++), dstP++);
240 	return 0;
241 }
242 
243 static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
244 				unsigned long page,
245 				pgprot_t prot)
246 {
247 	struct page *d = ttm->pages[page];
248 	void *dst;
249 
250 	if (!d)
251 		return -ENOMEM;
252 
253 	src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
254 
255 #ifdef CONFIG_X86
256 	dst = kmap_atomic_prot(d, prot);
257 #else
258 	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
259 		dst = vmap(&d, 1, 0, prot);
260 	else
261 		dst = kmap(d);
262 #endif
263 	if (!dst)
264 		return -ENOMEM;
265 
266 	memcpy_fromio(dst, src, PAGE_SIZE);
267 
268 #ifdef CONFIG_X86
269 	kunmap_atomic(dst);
270 #else
271 	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
272 		vunmap(dst);
273 	else
274 		kunmap(d);
275 #endif
276 
277 	return 0;
278 }
279 
280 static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
281 				unsigned long page,
282 				pgprot_t prot)
283 {
284 	struct page *s = ttm->pages[page];
285 	void *src;
286 
287 	if (!s)
288 		return -ENOMEM;
289 
290 	dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
291 #ifdef CONFIG_X86
292 	src = kmap_atomic_prot(s, prot);
293 #else
294 	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
295 		src = vmap(&s, 1, 0, prot);
296 	else
297 		src = kmap(s);
298 #endif
299 	if (!src)
300 		return -ENOMEM;
301 
302 	memcpy_toio(dst, src, PAGE_SIZE);
303 
304 #ifdef CONFIG_X86
305 	kunmap_atomic(src);
306 #else
307 	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
308 		vunmap(src);
309 	else
310 		kunmap(s);
311 #endif
312 
313 	return 0;
314 }
315 
316 int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
317 		       bool evict, bool no_wait_gpu,
318 		       struct ttm_mem_reg *new_mem)
319 {
320 	struct ttm_bo_device *bdev = bo->bdev;
321 	struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
322 	struct ttm_tt *ttm = bo->ttm;
323 	struct ttm_mem_reg *old_mem = &bo->mem;
324 	struct ttm_mem_reg old_copy = *old_mem;
325 	void *old_iomap;
326 	void *new_iomap;
327 	int ret;
328 	unsigned long i;
329 	unsigned long page;
330 	unsigned long add = 0;
331 	int dir;
332 
333 	ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
334 	if (ret)
335 		return ret;
336 	ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
337 	if (ret)
338 		goto out;
339 
340 	if (old_iomap == NULL && new_iomap == NULL)
341 		goto out2;
342 	if (old_iomap == NULL && ttm == NULL)
343 		goto out2;
344 
345 	if (ttm->state == tt_unpopulated) {
346 		ret = ttm->bdev->driver->ttm_tt_populate(ttm);
347 		if (ret) {
348 			/* if we fail here don't nuke the mm node
349 			 * as the bo still owns it */
350 			old_copy.mm_node = NULL;
351 			goto out1;
352 		}
353 	}
354 
355 	add = 0;
356 	dir = 1;
357 
358 	if ((old_mem->mem_type == new_mem->mem_type) &&
359 	    (new_mem->start < old_mem->start + old_mem->size)) {
360 		dir = -1;
361 		add = new_mem->num_pages - 1;
362 	}
363 
364 	for (i = 0; i < new_mem->num_pages; ++i) {
365 		page = i * dir + add;
366 		if (old_iomap == NULL) {
367 			pgprot_t prot = ttm_io_prot(old_mem->placement,
368 						    PAGE_KERNEL);
369 			ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
370 						   prot);
371 		} else if (new_iomap == NULL) {
372 			pgprot_t prot = ttm_io_prot(new_mem->placement,
373 						    PAGE_KERNEL);
374 			ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
375 						   prot);
376 		} else
377 			ret = ttm_copy_io_page(new_iomap, old_iomap, page);
378 		if (ret) {
379 			/* failing here, means keep old copy as-is */
380 			old_copy.mm_node = NULL;
381 			goto out1;
382 		}
383 	}
384 	mb();
385 out2:
386 	old_copy = *old_mem;
387 	*old_mem = *new_mem;
388 	new_mem->mm_node = NULL;
389 
390 	if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) {
391 		ttm_tt_unbind(ttm);
392 		ttm_tt_destroy(ttm);
393 		bo->ttm = NULL;
394 	}
395 
396 out1:
397 	ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
398 out:
399 	ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
400 	ttm_bo_mem_put(bo, &old_copy);
401 	return ret;
402 }
403 EXPORT_SYMBOL(ttm_bo_move_memcpy);
404 
405 static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
406 {
407 	kfree(bo);
408 }
409 
410 /**
411  * ttm_buffer_object_transfer
412  *
413  * @bo: A pointer to a struct ttm_buffer_object.
414  * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
415  * holding the data of @bo with the old placement.
416  *
417  * This is a utility function that may be called after an accelerated move
418  * has been scheduled. A new buffer object is created as a placeholder for
419  * the old data while it's being copied. When that buffer object is idle,
420  * it can be destroyed, releasing the space of the old placement.
421  * Returns:
422  * !0: Failure.
423  */
424 
425 static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
426 				      struct ttm_buffer_object **new_obj)
427 {
428 	struct ttm_buffer_object *fbo;
429 	struct ttm_bo_device *bdev = bo->bdev;
430 	struct ttm_bo_driver *driver = bdev->driver;
431 
432 	fbo = kzalloc(sizeof(*fbo), GFP_KERNEL);
433 	if (!fbo)
434 		return -ENOMEM;
435 
436 	*fbo = *bo;
437 
438 	/**
439 	 * Fix up members that we shouldn't copy directly:
440 	 * TODO: Explicit member copy would probably be better here.
441 	 */
442 
443 	init_waitqueue_head(&fbo->event_queue);
444 	INIT_LIST_HEAD(&fbo->ddestroy);
445 	INIT_LIST_HEAD(&fbo->lru);
446 	INIT_LIST_HEAD(&fbo->swap);
447 	INIT_LIST_HEAD(&fbo->io_reserve_lru);
448 	fbo->vm_node = NULL;
449 	atomic_set(&fbo->cpu_writers, 0);
450 
451 	fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
452 	kref_init(&fbo->list_kref);
453 	kref_init(&fbo->kref);
454 	fbo->destroy = &ttm_transfered_destroy;
455 	fbo->acc_size = 0;
456 
457 	*new_obj = fbo;
458 	return 0;
459 }
460 
461 pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
462 {
463 #if defined(__i386__) || defined(__x86_64__)
464 	if (caching_flags & TTM_PL_FLAG_WC)
465 		tmp = pgprot_writecombine(tmp);
466 	else if (boot_cpu_data.x86 > 3)
467 		tmp = pgprot_noncached(tmp);
468 
469 #elif defined(__powerpc__)
470 	if (!(caching_flags & TTM_PL_FLAG_CACHED)) {
471 		pgprot_val(tmp) |= _PAGE_NO_CACHE;
472 		if (caching_flags & TTM_PL_FLAG_UNCACHED)
473 			pgprot_val(tmp) |= _PAGE_GUARDED;
474 	}
475 #endif
476 #if defined(__ia64__)
477 	if (caching_flags & TTM_PL_FLAG_WC)
478 		tmp = pgprot_writecombine(tmp);
479 	else
480 		tmp = pgprot_noncached(tmp);
481 #endif
482 #if defined(__sparc__) || defined(__mips__)
483 	if (!(caching_flags & TTM_PL_FLAG_CACHED))
484 		tmp = pgprot_noncached(tmp);
485 #endif
486 	return tmp;
487 }
488 EXPORT_SYMBOL(ttm_io_prot);
489 
490 static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
491 			  unsigned long offset,
492 			  unsigned long size,
493 			  struct ttm_bo_kmap_obj *map)
494 {
495 	struct ttm_mem_reg *mem = &bo->mem;
496 
497 	if (bo->mem.bus.addr) {
498 		map->bo_kmap_type = ttm_bo_map_premapped;
499 		map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
500 	} else {
501 		map->bo_kmap_type = ttm_bo_map_iomap;
502 		if (mem->placement & TTM_PL_FLAG_WC)
503 			map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset,
504 						  size);
505 		else
506 			map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset,
507 						       size);
508 	}
509 	return (!map->virtual) ? -ENOMEM : 0;
510 }
511 
512 static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
513 			   unsigned long start_page,
514 			   unsigned long num_pages,
515 			   struct ttm_bo_kmap_obj *map)
516 {
517 	struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot;
518 	struct ttm_tt *ttm = bo->ttm;
519 	int ret;
520 
521 	BUG_ON(!ttm);
522 
523 	if (ttm->state == tt_unpopulated) {
524 		ret = ttm->bdev->driver->ttm_tt_populate(ttm);
525 		if (ret)
526 			return ret;
527 	}
528 
529 	if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
530 		/*
531 		 * We're mapping a single page, and the desired
532 		 * page protection is consistent with the bo.
533 		 */
534 
535 		map->bo_kmap_type = ttm_bo_map_kmap;
536 		map->page = ttm->pages[start_page];
537 		map->virtual = kmap(map->page);
538 	} else {
539 		/*
540 		 * We need to use vmap to get the desired page protection
541 		 * or to make the buffer object look contiguous.
542 		 */
543 		prot = (mem->placement & TTM_PL_FLAG_CACHED) ?
544 			PAGE_KERNEL :
545 			ttm_io_prot(mem->placement, PAGE_KERNEL);
546 		map->bo_kmap_type = ttm_bo_map_vmap;
547 		map->virtual = vmap(ttm->pages + start_page, num_pages,
548 				    0, prot);
549 	}
550 	return (!map->virtual) ? -ENOMEM : 0;
551 }
552 
553 int ttm_bo_kmap(struct ttm_buffer_object *bo,
554 		unsigned long start_page, unsigned long num_pages,
555 		struct ttm_bo_kmap_obj *map)
556 {
557 	struct ttm_mem_type_manager *man =
558 		&bo->bdev->man[bo->mem.mem_type];
559 	unsigned long offset, size;
560 	int ret;
561 
562 	BUG_ON(!list_empty(&bo->swap));
563 	map->virtual = NULL;
564 	map->bo = bo;
565 	if (num_pages > bo->num_pages)
566 		return -EINVAL;
567 	if (start_page > bo->num_pages)
568 		return -EINVAL;
569 #if 0
570 	if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
571 		return -EPERM;
572 #endif
573 	(void) ttm_mem_io_lock(man, false);
574 	ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
575 	ttm_mem_io_unlock(man);
576 	if (ret)
577 		return ret;
578 	if (!bo->mem.bus.is_iomem) {
579 		return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
580 	} else {
581 		offset = start_page << PAGE_SHIFT;
582 		size = num_pages << PAGE_SHIFT;
583 		return ttm_bo_ioremap(bo, offset, size, map);
584 	}
585 }
586 EXPORT_SYMBOL(ttm_bo_kmap);
587 
588 void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
589 {
590 	struct ttm_buffer_object *bo = map->bo;
591 	struct ttm_mem_type_manager *man =
592 		&bo->bdev->man[bo->mem.mem_type];
593 
594 	if (!map->virtual)
595 		return;
596 	switch (map->bo_kmap_type) {
597 	case ttm_bo_map_iomap:
598 		iounmap(map->virtual);
599 		break;
600 	case ttm_bo_map_vmap:
601 		vunmap(map->virtual);
602 		break;
603 	case ttm_bo_map_kmap:
604 		kunmap(map->page);
605 		break;
606 	case ttm_bo_map_premapped:
607 		break;
608 	default:
609 		BUG();
610 	}
611 	(void) ttm_mem_io_lock(man, false);
612 	ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
613 	ttm_mem_io_unlock(man);
614 	map->virtual = NULL;
615 	map->page = NULL;
616 }
617 EXPORT_SYMBOL(ttm_bo_kunmap);
618 
619 int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
620 			      void *sync_obj,
621 			      bool evict,
622 			      bool no_wait_gpu,
623 			      struct ttm_mem_reg *new_mem)
624 {
625 	struct ttm_bo_device *bdev = bo->bdev;
626 	struct ttm_bo_driver *driver = bdev->driver;
627 	struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
628 	struct ttm_mem_reg *old_mem = &bo->mem;
629 	int ret;
630 	struct ttm_buffer_object *ghost_obj;
631 	void *tmp_obj = NULL;
632 
633 	spin_lock(&bdev->fence_lock);
634 	if (bo->sync_obj) {
635 		tmp_obj = bo->sync_obj;
636 		bo->sync_obj = NULL;
637 	}
638 	bo->sync_obj = driver->sync_obj_ref(sync_obj);
639 	if (evict) {
640 		ret = ttm_bo_wait(bo, false, false, false);
641 		spin_unlock(&bdev->fence_lock);
642 		if (tmp_obj)
643 			driver->sync_obj_unref(&tmp_obj);
644 		if (ret)
645 			return ret;
646 
647 		if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
648 		    (bo->ttm != NULL)) {
649 			ttm_tt_unbind(bo->ttm);
650 			ttm_tt_destroy(bo->ttm);
651 			bo->ttm = NULL;
652 		}
653 		ttm_bo_free_old_node(bo);
654 	} else {
655 		/**
656 		 * This should help pipeline ordinary buffer moves.
657 		 *
658 		 * Hang old buffer memory on a new buffer object,
659 		 * and leave it to be released when the GPU
660 		 * operation has completed.
661 		 */
662 
663 		set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
664 
665 		/* ttm_buffer_object_transfer accesses bo->sync_obj */
666 		ret = ttm_buffer_object_transfer(bo, &ghost_obj);
667 		spin_unlock(&bdev->fence_lock);
668 		if (tmp_obj)
669 			driver->sync_obj_unref(&tmp_obj);
670 
671 		if (ret)
672 			return ret;
673 
674 		/**
675 		 * If we're not moving to fixed memory, the TTM object
676 		 * needs to stay alive. Otherwhise hang it on the ghost
677 		 * bo to be unbound and destroyed.
678 		 */
679 
680 		if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
681 			ghost_obj->ttm = NULL;
682 		else
683 			bo->ttm = NULL;
684 
685 		ttm_bo_unreserve(ghost_obj);
686 		ttm_bo_unref(&ghost_obj);
687 	}
688 
689 	*old_mem = *new_mem;
690 	new_mem->mm_node = NULL;
691 
692 	return 0;
693 }
694 EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
695