xref: /openbsd-src/sys/dev/pci/drm/ttm/ttm_bo_util.c (revision f2da64fbbbf1b03f09f390ab01267c93dfd77c4c)
1 /*	$OpenBSD: ttm_bo_util.c,v 1.15 2015/09/27 11:09:26 jsg Exp $	*/
2 /**************************************************************************
3  *
4  * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25  * USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  **************************************************************************/
28 /*
29  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
30  */
31 
32 #include <dev/pci/drm/ttm/ttm_bo_driver.h>
33 #include <dev/pci/drm/ttm/ttm_placement.h>
34 
35 int	 ttm_mem_reg_ioremap(struct ttm_bo_device *, struct ttm_mem_reg *,
36 	     void **);
37 void	 ttm_mem_reg_iounmap(struct ttm_bo_device *, struct ttm_mem_reg *,
38 	     void *);
39 
40 void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
41 {
42 	ttm_bo_mem_put(bo, &bo->mem);
43 }
44 
45 int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
46 		    bool evict,
47 		    bool no_wait_gpu, struct ttm_mem_reg *new_mem)
48 {
49 	struct ttm_tt *ttm = bo->ttm;
50 	struct ttm_mem_reg *old_mem = &bo->mem;
51 	int ret;
52 
53 	if (old_mem->mem_type != TTM_PL_SYSTEM) {
54 		ttm_tt_unbind(ttm);
55 		ttm_bo_free_old_node(bo);
56 		ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
57 				TTM_PL_MASK_MEM);
58 		old_mem->mem_type = TTM_PL_SYSTEM;
59 	}
60 
61 	ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
62 	if (unlikely(ret != 0))
63 		return ret;
64 
65 	if (new_mem->mem_type != TTM_PL_SYSTEM) {
66 		ret = ttm_tt_bind(ttm, new_mem);
67 		if (unlikely(ret != 0))
68 			return ret;
69 	}
70 
71 	*old_mem = *new_mem;
72 	new_mem->mm_node = NULL;
73 
74 	return 0;
75 }
76 EXPORT_SYMBOL(ttm_bo_move_ttm);
77 
78 int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
79 {
80 	if (likely(man->io_reserve_fastpath))
81 		return 0;
82 
83 	if (interruptible)
84 		return mutex_lock_interruptible(&man->io_reserve_mutex);
85 
86 	mutex_lock(&man->io_reserve_mutex);
87 	return 0;
88 }
89 
90 void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
91 {
92 	if (likely(man->io_reserve_fastpath))
93 		return;
94 
95 	mutex_unlock(&man->io_reserve_mutex);
96 }
97 
98 static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
99 {
100 	struct ttm_buffer_object *bo;
101 
102 	if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru))
103 		return -EAGAIN;
104 
105 	bo = list_first_entry(&man->io_reserve_lru,
106 			      struct ttm_buffer_object,
107 			      io_reserve_lru);
108 	list_del_init(&bo->io_reserve_lru);
109 	ttm_bo_unmap_virtual_locked(bo);
110 
111 	return 0;
112 }
113 
114 static int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
115 			      struct ttm_mem_reg *mem)
116 {
117 	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
118 	int ret = 0;
119 
120 	if (!bdev->driver->io_mem_reserve)
121 		return 0;
122 	if (likely(man->io_reserve_fastpath))
123 		return bdev->driver->io_mem_reserve(bdev, mem);
124 
125 	if (bdev->driver->io_mem_reserve &&
126 	    mem->bus.io_reserved_count++ == 0) {
127 retry:
128 		ret = bdev->driver->io_mem_reserve(bdev, mem);
129 		if (ret == -EAGAIN) {
130 			ret = ttm_mem_io_evict(man);
131 			if (ret == 0)
132 				goto retry;
133 		}
134 	}
135 	return ret;
136 }
137 
138 static void ttm_mem_io_free(struct ttm_bo_device *bdev,
139 			    struct ttm_mem_reg *mem)
140 {
141 	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
142 
143 	if (likely(man->io_reserve_fastpath))
144 		return;
145 
146 	if (bdev->driver->io_mem_reserve &&
147 	    --mem->bus.io_reserved_count == 0 &&
148 	    bdev->driver->io_mem_free)
149 		bdev->driver->io_mem_free(bdev, mem);
150 
151 }
152 
153 int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
154 {
155 	struct ttm_mem_reg *mem = &bo->mem;
156 	int ret;
157 
158 	if (!mem->bus.io_reserved_vm) {
159 		struct ttm_mem_type_manager *man =
160 			&bo->bdev->man[mem->mem_type];
161 
162 		ret = ttm_mem_io_reserve(bo->bdev, mem);
163 		if (unlikely(ret != 0))
164 			return ret;
165 		mem->bus.io_reserved_vm = true;
166 		if (man->use_io_reserve_lru)
167 			list_add_tail(&bo->io_reserve_lru,
168 				      &man->io_reserve_lru);
169 	}
170 	return 0;
171 }
172 
173 void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
174 {
175 	struct ttm_mem_reg *mem = &bo->mem;
176 
177 	if (mem->bus.io_reserved_vm) {
178 		mem->bus.io_reserved_vm = false;
179 		list_del_init(&bo->io_reserve_lru);
180 		ttm_mem_io_free(bo->bdev, mem);
181 	}
182 }
183 
184 int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
185 			void **virtual)
186 {
187 	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
188 	int ret;
189 	void *addr;
190 	int flags;
191 
192 	*virtual = NULL;
193 	(void) ttm_mem_io_lock(man, false);
194 	ret = ttm_mem_io_reserve(bdev, mem);
195 	ttm_mem_io_unlock(man);
196 	if (ret || !mem->bus.is_iomem)
197 		return ret;
198 
199 	if (mem->bus.addr) {
200 		addr = mem->bus.addr;
201 	} else {
202 		if (mem->placement & TTM_PL_FLAG_WC)
203 			flags = BUS_SPACE_MAP_PREFETCHABLE;
204 		else
205 			flags = 0;
206 
207 		if (bus_space_map(bdev->memt, mem->bus.base + mem->bus.offset,
208 		    mem->bus.size, BUS_SPACE_MAP_LINEAR | flags, &mem->bus.bsh)) {
209 			printf("%s bus_space_map failed\n", __func__);
210 			return -ENOMEM;
211 		}
212 
213 		addr = bus_space_vaddr(bdev->memt, mem->bus.bsh);
214 
215 		if (!addr) {
216 			(void) ttm_mem_io_lock(man, false);
217 			ttm_mem_io_free(bdev, mem);
218 			ttm_mem_io_unlock(man);
219 			return -ENOMEM;
220 		}
221 	}
222 	*virtual = addr;
223 	return 0;
224 }
225 
226 void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
227 			 void *virtual)
228 {
229 	struct ttm_mem_type_manager *man;
230 
231 	man = &bdev->man[mem->mem_type];
232 
233 	if (virtual && mem->bus.addr == NULL)
234 		bus_space_unmap(bdev->memt, mem->bus.bsh, mem->bus.size);
235 	(void) ttm_mem_io_lock(man, false);
236 	ttm_mem_io_free(bdev, mem);
237 	ttm_mem_io_unlock(man);
238 }
239 
240 static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
241 {
242 	uint32_t *dstP =
243 	    (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
244 	uint32_t *srcP =
245 	    (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
246 
247 	int i;
248 	for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
249 		iowrite32(ioread32(srcP++), dstP++);
250 	return 0;
251 }
252 
253 static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
254 				unsigned long page,
255 				pgprot_t prot)
256 {
257 	struct vm_page *d = ttm->pages[page];
258 	void *dst;
259 
260 	if (!d)
261 		return -ENOMEM;
262 
263 	src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
264 
265 	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
266 		dst = vmap(&d, 1, 0, prot);
267 	else
268 		dst = kmap(d);
269 	if (!dst)
270 		return -ENOMEM;
271 
272 	memcpy_fromio(dst, src, PAGE_SIZE);
273 
274 	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
275 		vunmap(dst, PAGE_SIZE);
276 	else
277 		kunmap(d);
278 
279 	return 0;
280 }
281 
282 static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
283 				unsigned long page,
284 				vm_prot_t prot)
285 {
286 	struct vm_page *s = ttm->pages[page];
287 	void *src;
288 
289 	if (!s)
290 		return -ENOMEM;
291 
292 	dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
293 	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
294 		src = vmap(&s, 1, 0, prot);
295 	else
296 		src = kmap(s);
297 	if (!src)
298 		return -ENOMEM;
299 
300 	memcpy_toio(dst, src, PAGE_SIZE);
301 
302 	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
303 		vunmap(src, PAGE_SIZE);
304 	else
305 		kunmap(s);
306 
307 	return 0;
308 }
309 
310 int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
311 		       bool evict, bool no_wait_gpu,
312 		       struct ttm_mem_reg *new_mem)
313 {
314 	struct ttm_bo_device *bdev = bo->bdev;
315 	struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
316 	struct ttm_tt *ttm = bo->ttm;
317 	struct ttm_mem_reg *old_mem = &bo->mem;
318 	struct ttm_mem_reg old_copy = *old_mem;
319 	void *old_iomap;
320 	void *new_iomap;
321 	int ret;
322 	unsigned long i;
323 	unsigned long page;
324 	unsigned long add = 0;
325 	int dir;
326 
327 	ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
328 	if (ret)
329 		return ret;
330 	ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
331 	if (ret)
332 		goto out;
333 
334 	/*
335 	 * Single TTM move. NOP.
336 	 */
337 	if (old_iomap == NULL && new_iomap == NULL)
338 		goto out2;
339 
340 	/*
341 	 * Move nonexistent data. NOP.
342 	 */
343 	if (old_iomap == NULL && ttm == NULL)
344 		goto out2;
345 
346 	/*
347 	 * TTM might be null for moves within the same region.
348 	 */
349 	if (ttm && ttm->state == tt_unpopulated) {
350 		ret = ttm->bdev->driver->ttm_tt_populate(ttm);
351 		if (ret)
352 			goto out1;
353 	}
354 
355 	add = 0;
356 	dir = 1;
357 
358 	if ((old_mem->mem_type == new_mem->mem_type) &&
359 	    (new_mem->start < old_mem->start + old_mem->size)) {
360 		dir = -1;
361 		add = new_mem->num_pages - 1;
362 	}
363 
364 	for (i = 0; i < new_mem->num_pages; ++i) {
365 		page = i * dir + add;
366 		if (old_iomap == NULL) {
367 			pgprot_t prot = ttm_io_prot(old_mem->placement,
368 						    PAGE_KERNEL);
369 			ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
370 						   prot);
371 		} else if (new_iomap == NULL) {
372 			pgprot_t prot = ttm_io_prot(new_mem->placement,
373 						    PAGE_KERNEL);
374 			ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
375 						   prot);
376 		} else
377 			ret = ttm_copy_io_page(new_iomap, old_iomap, page);
378 		if (ret)
379 			goto out1;
380 	}
381 	mb();
382 out2:
383 	old_copy = *old_mem;
384 	*old_mem = *new_mem;
385 	new_mem->mm_node = NULL;
386 
387 	if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) {
388 		ttm_tt_unbind(ttm);
389 		ttm_tt_destroy(ttm);
390 		bo->ttm = NULL;
391 	}
392 
393 out1:
394 	ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
395 out:
396 	ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
397 
398 	/*
399 	 * On error, keep the mm node!
400 	 */
401 	if (!ret)
402 		ttm_bo_mem_put(bo, &old_copy);
403 	return ret;
404 }
405 EXPORT_SYMBOL(ttm_bo_move_memcpy);
406 
407 static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
408 {
409 	kfree(bo);
410 }
411 
412 /**
413  * ttm_buffer_object_transfer
414  *
415  * @bo: A pointer to a struct ttm_buffer_object.
416  * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
417  * holding the data of @bo with the old placement.
418  *
419  * This is a utility function that may be called after an accelerated move
420  * has been scheduled. A new buffer object is created as a placeholder for
421  * the old data while it's being copied. When that buffer object is idle,
422  * it can be destroyed, releasing the space of the old placement.
423  * Returns:
424  * !0: Failure.
425  */
426 
427 static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
428 				      struct ttm_buffer_object **new_obj)
429 {
430 	struct ttm_buffer_object *fbo;
431 	struct ttm_bo_device *bdev = bo->bdev;
432 	struct ttm_bo_driver *driver = bdev->driver;
433 
434 	fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
435 	if (!fbo)
436 		return -ENOMEM;
437 
438 	*fbo = *bo;
439 
440 	/**
441 	 * Fix up members that we shouldn't copy directly:
442 	 * TODO: Explicit member copy would probably be better here.
443 	 */
444 
445 	init_waitqueue_head(&fbo->event_queue);
446 	INIT_LIST_HEAD(&fbo->ddestroy);
447 	INIT_LIST_HEAD(&fbo->lru);
448 	INIT_LIST_HEAD(&fbo->swap);
449 	INIT_LIST_HEAD(&fbo->io_reserve_lru);
450 	fbo->vm_node = NULL;
451 	atomic_set(&fbo->cpu_writers, 0);
452 
453 	spin_lock(&bdev->fence_lock);
454 	if (bo->sync_obj)
455 		fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
456 	else
457 		fbo->sync_obj = NULL;
458 	spin_unlock(&bdev->fence_lock);
459 	kref_init(&fbo->list_kref);
460 	kref_init(&fbo->kref);
461 	fbo->destroy = &ttm_transfered_destroy;
462 	fbo->acc_size = 0;
463 
464 	*new_obj = fbo;
465 	return 0;
466 }
467 
468 pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
469 {
470 #ifdef PMAP_WC
471 	if (caching_flags & TTM_PL_FLAG_WC)
472 		return PMAP_WC;
473 	else
474 #endif
475 		return PMAP_NOCACHE;
476 }
477 EXPORT_SYMBOL(ttm_io_prot);
478 
479 static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
480 			  unsigned long offset,
481 			  unsigned long size,
482 			  struct ttm_bo_kmap_obj *map)
483 {
484 	struct ttm_mem_reg *mem = &bo->mem;
485 	int flags;
486 
487 	if (bo->mem.bus.addr) {
488 		map->bo_kmap_type = ttm_bo_map_premapped;
489 		map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
490 	} else {
491 		map->bo_kmap_type = ttm_bo_map_iomap;
492 		if (mem->placement & TTM_PL_FLAG_WC)
493 			flags = BUS_SPACE_MAP_PREFETCHABLE;
494 		else
495 			flags = 0;
496 
497 		if (bus_space_map(bo->bdev->memt,
498 		    mem->bus.base + bo->mem.bus.offset + offset,
499 		    size, BUS_SPACE_MAP_LINEAR | flags,
500 		    &bo->mem.bus.bsh)) {
501 			printf("%s bus_space_map failed\n", __func__);
502 			map->virtual = 0;
503 		} else
504 			map->virtual = bus_space_vaddr(bo->bdev->memt,
505 			    bo->mem.bus.bsh);
506 	}
507 	return (!map->virtual) ? -ENOMEM : 0;
508 }
509 
510 static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
511 			   unsigned long start_page,
512 			   unsigned long num_pages,
513 			   struct ttm_bo_kmap_obj *map)
514 {
515 	struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot;
516 	struct ttm_tt *ttm = bo->ttm;
517 	int ret;
518 
519 	BUG_ON(!ttm);
520 
521 	if (ttm->state == tt_unpopulated) {
522 		ret = ttm->bdev->driver->ttm_tt_populate(ttm);
523 		if (ret)
524 			return ret;
525 	}
526 
527 	if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
528 		/*
529 		 * We're mapping a single page, and the desired
530 		 * page protection is consistent with the bo.
531 		 */
532 
533 		map->bo_kmap_type = ttm_bo_map_kmap;
534 		map->page = ttm->pages[start_page];
535 		map->virtual = kmap(map->page);
536 	} else {
537 		/*
538 		 * We need to use vmap to get the desired page protection
539 		 * or to make the buffer object look contiguous.
540 		 */
541 		prot = (mem->placement & TTM_PL_FLAG_CACHED) ?
542 			PAGE_KERNEL :
543 			ttm_io_prot(mem->placement, PAGE_KERNEL);
544 		map->bo_kmap_type = ttm_bo_map_vmap;
545 		map->virtual = vmap(ttm->pages + start_page, num_pages,
546 				    0, prot);
547 	}
548 	return (!map->virtual) ? -ENOMEM : 0;
549 }
550 
551 int ttm_bo_kmap(struct ttm_buffer_object *bo,
552 		unsigned long start_page, unsigned long num_pages,
553 		struct ttm_bo_kmap_obj *map)
554 {
555 	struct ttm_mem_type_manager *man =
556 		&bo->bdev->man[bo->mem.mem_type];
557 	unsigned long offset, size;
558 	int ret;
559 
560 	BUG_ON(!list_empty(&bo->swap));
561 	map->virtual = NULL;
562 	map->bo = bo;
563 	if (num_pages > bo->num_pages)
564 		return -EINVAL;
565 	if (start_page > bo->num_pages)
566 		return -EINVAL;
567 #if 0
568 	if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
569 		return -EPERM;
570 #endif
571 	(void) ttm_mem_io_lock(man, false);
572 	ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
573 	ttm_mem_io_unlock(man);
574 	if (ret)
575 		return ret;
576 	if (!bo->mem.bus.is_iomem) {
577 		return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
578 	} else {
579 		offset = start_page << PAGE_SHIFT;
580 		size = num_pages << PAGE_SHIFT;
581 		return ttm_bo_ioremap(bo, offset, size, map);
582 	}
583 }
584 EXPORT_SYMBOL(ttm_bo_kmap);
585 
586 void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
587 {
588 	struct ttm_buffer_object *bo = map->bo;
589 	struct ttm_mem_type_manager *man =
590 		&bo->bdev->man[bo->mem.mem_type];
591 
592 	if (!map->virtual)
593 		return;
594 	switch (map->bo_kmap_type) {
595 	case ttm_bo_map_iomap:
596 		bus_space_unmap(bo->bdev->memt, bo->mem.bus.bsh,
597 		    bo->mem.bus.size);
598 		break;
599 	case ttm_bo_map_vmap:
600 		vunmap(map->virtual, bo->mem.bus.size);
601 		break;
602 	case ttm_bo_map_kmap:
603 		kunmap(map->virtual);
604 		break;
605 	case ttm_bo_map_premapped:
606 		break;
607 	default:
608 		BUG();
609 	}
610 	(void) ttm_mem_io_lock(man, false);
611 	ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
612 	ttm_mem_io_unlock(man);
613 	map->virtual = NULL;
614 	map->page = NULL;
615 }
616 EXPORT_SYMBOL(ttm_bo_kunmap);
617 
618 int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
619 			      void *sync_obj,
620 			      bool evict,
621 			      bool no_wait_gpu,
622 			      struct ttm_mem_reg *new_mem)
623 {
624 	struct ttm_bo_device *bdev = bo->bdev;
625 	struct ttm_bo_driver *driver = bdev->driver;
626 	struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
627 	struct ttm_mem_reg *old_mem = &bo->mem;
628 	int ret;
629 	struct ttm_buffer_object *ghost_obj;
630 	void *tmp_obj = NULL;
631 
632 	spin_lock(&bdev->fence_lock);
633 	if (bo->sync_obj) {
634 		tmp_obj = bo->sync_obj;
635 		bo->sync_obj = NULL;
636 	}
637 	bo->sync_obj = driver->sync_obj_ref(sync_obj);
638 	if (evict) {
639 		ret = ttm_bo_wait(bo, false, false, false);
640 		spin_unlock(&bdev->fence_lock);
641 		if (tmp_obj)
642 			driver->sync_obj_unref(&tmp_obj);
643 		if (ret)
644 			return ret;
645 
646 		if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
647 		    (bo->ttm != NULL)) {
648 			ttm_tt_unbind(bo->ttm);
649 			ttm_tt_destroy(bo->ttm);
650 			bo->ttm = NULL;
651 		}
652 		ttm_bo_free_old_node(bo);
653 	} else {
654 		/**
655 		 * This should help pipeline ordinary buffer moves.
656 		 *
657 		 * Hang old buffer memory on a new buffer object,
658 		 * and leave it to be released when the GPU
659 		 * operation has completed.
660 		 */
661 
662 		set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
663 		spin_unlock(&bdev->fence_lock);
664 		if (tmp_obj)
665 			driver->sync_obj_unref(&tmp_obj);
666 
667 		ret = ttm_buffer_object_transfer(bo, &ghost_obj);
668 		if (ret)
669 			return ret;
670 
671 		/**
672 		 * If we're not moving to fixed memory, the TTM object
673 		 * needs to stay alive. Otherwhise hang it on the ghost
674 		 * bo to be unbound and destroyed.
675 		 */
676 
677 		if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
678 			ghost_obj->ttm = NULL;
679 		else
680 			bo->ttm = NULL;
681 
682 		ttm_bo_unreserve(ghost_obj);
683 		ttm_bo_unref(&ghost_obj);
684 	}
685 
686 	*old_mem = *new_mem;
687 	new_mem->mm_node = NULL;
688 
689 	return 0;
690 }
691 EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
692