xref: /openbsd-src/sys/dev/pci/drm/ttm/ttm_bo_util.c (revision 50b7afb2c2c0993b0894d4e34bf857cb13ed9c80)
1 /*	$OpenBSD: ttm_bo_util.c,v 1.5 2014/02/10 02:24:05 jsg Exp $	*/
2 /**************************************************************************
3  *
4  * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25  * USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  **************************************************************************/
28 /*
29  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
30  */
31 
32 #include <dev/pci/drm/ttm/ttm_bo_driver.h>
33 #include <dev/pci/drm/ttm/ttm_placement.h>
34 #include <dev/pci/drm/refcount.h>
35 
36 int	 ttm_mem_reg_ioremap(struct ttm_bo_device *, struct ttm_mem_reg *,
37 	     void **);
38 void	 ttm_mem_reg_iounmap(struct ttm_bo_device *, struct ttm_mem_reg *,
39 	     void *);
40 
41 void	*kmap(struct vm_page *);
42 void	 kunmap(void *addr);
43 void	*vmap(struct vm_page **, unsigned int, unsigned long, pgprot_t);
44 void	 vunmap(void *, size_t);
45 
46 void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
47 {
48 	ttm_bo_mem_put(bo, &bo->mem);
49 }
50 
51 int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
52 		    bool evict,
53 		    bool no_wait_gpu, struct ttm_mem_reg *new_mem)
54 {
55 	struct ttm_tt *ttm = bo->ttm;
56 	struct ttm_mem_reg *old_mem = &bo->mem;
57 	int ret;
58 
59 	if (old_mem->mem_type != TTM_PL_SYSTEM) {
60 		ttm_tt_unbind(ttm);
61 		ttm_bo_free_old_node(bo);
62 		ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
63 				TTM_PL_MASK_MEM);
64 		old_mem->mem_type = TTM_PL_SYSTEM;
65 	}
66 
67 	ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
68 	if (unlikely(ret != 0))
69 		return ret;
70 
71 	if (new_mem->mem_type != TTM_PL_SYSTEM) {
72 		ret = ttm_tt_bind(ttm, new_mem);
73 		if (unlikely(ret != 0))
74 			return ret;
75 	}
76 
77 	*old_mem = *new_mem;
78 	new_mem->mm_node = NULL;
79 
80 	return 0;
81 }
82 EXPORT_SYMBOL(ttm_bo_move_ttm);
83 
84 int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
85 {
86 	if (likely(man->io_reserve_fastpath))
87 		return 0;
88 
89 	if (interruptible)
90 		return rw_enter(&man->io_reserve_rwlock, RW_WRITE | RW_INTR);
91 
92 	rw_enter_write(&man->io_reserve_rwlock);
93 	return 0;
94 }
95 
96 void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
97 {
98 	if (likely(man->io_reserve_fastpath))
99 		return;
100 
101 	rw_exit_write(&man->io_reserve_rwlock);
102 }
103 
104 static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
105 {
106 	struct ttm_buffer_object *bo;
107 
108 	if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru))
109 		return -EAGAIN;
110 
111 	bo = list_first_entry(&man->io_reserve_lru,
112 			      struct ttm_buffer_object,
113 			      io_reserve_lru);
114 	list_del_init(&bo->io_reserve_lru);
115 	ttm_bo_unmap_virtual_locked(bo);
116 
117 	return 0;
118 }
119 
120 static int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
121 			      struct ttm_mem_reg *mem)
122 {
123 	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
124 	int ret = 0;
125 
126 	if (!bdev->driver->io_mem_reserve)
127 		return 0;
128 	if (likely(man->io_reserve_fastpath))
129 		return bdev->driver->io_mem_reserve(bdev, mem);
130 
131 	if (bdev->driver->io_mem_reserve &&
132 	    mem->bus.io_reserved_count++ == 0) {
133 retry:
134 		ret = bdev->driver->io_mem_reserve(bdev, mem);
135 		if (ret == -EAGAIN) {
136 			ret = ttm_mem_io_evict(man);
137 			if (ret == 0)
138 				goto retry;
139 		}
140 	}
141 	return ret;
142 }
143 
144 static void ttm_mem_io_free(struct ttm_bo_device *bdev,
145 			    struct ttm_mem_reg *mem)
146 {
147 	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
148 
149 	if (likely(man->io_reserve_fastpath))
150 		return;
151 
152 	if (bdev->driver->io_mem_reserve &&
153 	    --mem->bus.io_reserved_count == 0 &&
154 	    bdev->driver->io_mem_free)
155 		bdev->driver->io_mem_free(bdev, mem);
156 
157 }
158 
159 int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
160 {
161 	struct ttm_mem_reg *mem = &bo->mem;
162 	int ret;
163 
164 	if (!mem->bus.io_reserved_vm) {
165 		struct ttm_mem_type_manager *man =
166 			&bo->bdev->man[mem->mem_type];
167 
168 		ret = ttm_mem_io_reserve(bo->bdev, mem);
169 		if (unlikely(ret != 0))
170 			return ret;
171 		mem->bus.io_reserved_vm = true;
172 		if (man->use_io_reserve_lru)
173 			list_add_tail(&bo->io_reserve_lru,
174 				      &man->io_reserve_lru);
175 	}
176 	return 0;
177 }
178 
179 void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
180 {
181 	struct ttm_mem_reg *mem = &bo->mem;
182 
183 	if (mem->bus.io_reserved_vm) {
184 		mem->bus.io_reserved_vm = false;
185 		list_del_init(&bo->io_reserve_lru);
186 		ttm_mem_io_free(bo->bdev, mem);
187 	}
188 }
189 
190 int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
191 			void **virtual)
192 {
193 	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
194 	int ret;
195 	void *addr;
196 	int flags;
197 
198 	*virtual = NULL;
199 	(void) ttm_mem_io_lock(man, false);
200 	ret = ttm_mem_io_reserve(bdev, mem);
201 	ttm_mem_io_unlock(man);
202 	if (ret || !mem->bus.is_iomem)
203 		return ret;
204 
205 	if (mem->bus.addr) {
206 		addr = mem->bus.addr;
207 	} else {
208 		if (mem->placement & TTM_PL_FLAG_WC)
209 			flags = BUS_SPACE_MAP_PREFETCHABLE;
210 		else
211 			flags = 0;
212 
213 		if (bus_space_map(bdev->memt, mem->bus.base + mem->bus.offset,
214 		    mem->bus.size, BUS_SPACE_MAP_LINEAR | flags, &mem->bus.bsh)) {
215 			printf("%s bus_space_map failed\n", __func__);
216 			return -ENOMEM;
217 		}
218 
219 		addr = bus_space_vaddr(bdev->memt, mem->bus.bsh);
220 
221 		if (!addr) {
222 			(void) ttm_mem_io_lock(man, false);
223 			ttm_mem_io_free(bdev, mem);
224 			ttm_mem_io_unlock(man);
225 			return -ENOMEM;
226 		}
227 	}
228 	*virtual = addr;
229 	return 0;
230 }
231 
232 void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
233 			 void *virtual)
234 {
235 	struct ttm_mem_type_manager *man;
236 
237 	man = &bdev->man[mem->mem_type];
238 
239 	if (virtual && mem->bus.addr == NULL)
240 		bus_space_unmap(bdev->memt, mem->bus.bsh, mem->bus.size);
241 	(void) ttm_mem_io_lock(man, false);
242 	ttm_mem_io_free(bdev, mem);
243 	ttm_mem_io_unlock(man);
244 }
245 
246 static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
247 {
248 	uint32_t *dstP =
249 	    (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
250 	uint32_t *srcP =
251 	    (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
252 
253 	int i;
254 	for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
255 #ifdef notyet
256 		iowrite32(ioread32(srcP++), dstP++);
257 #else
258 		*dstP++ = *srcP++;
259 #endif
260 	return 0;
261 }
262 
263 static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
264 				unsigned long page,
265 				pgprot_t prot)
266 {
267 	struct vm_page *d = ttm->pages[page];
268 	void *dst;
269 
270 	if (!d)
271 		return -ENOMEM;
272 
273 	src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
274 
275 	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
276 		dst = vmap(&d, 1, 0, prot);
277 	else
278 		dst = kmap(d);
279 	if (!dst)
280 		return -ENOMEM;
281 
282 	memcpy(dst, src, PAGE_SIZE);
283 
284 	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
285 		vunmap(dst, PAGE_SIZE);
286 	else
287 		kunmap(d);
288 
289 	return 0;
290 }
291 
292 static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
293 				unsigned long page,
294 				vm_prot_t prot)
295 {
296 	struct vm_page *s = ttm->pages[page];
297 	void *src;
298 
299 	if (!s)
300 		return -ENOMEM;
301 
302 	dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
303 	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
304 		src = vmap(&s, 1, 0, prot);
305 	else
306 		src = kmap(s);
307 	if (!src)
308 		return -ENOMEM;
309 
310 #define memcpy_toio(d, s, n) memcpy(d, s, n)
311 	memcpy_toio(dst, src, PAGE_SIZE);
312 
313 	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
314 		vunmap(src, PAGE_SIZE);
315 	else
316 		kunmap(s);
317 
318 	return 0;
319 }
320 
321 int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
322 		       bool evict, bool no_wait_gpu,
323 		       struct ttm_mem_reg *new_mem)
324 {
325 	struct ttm_bo_device *bdev = bo->bdev;
326 	struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
327 	struct ttm_tt *ttm = bo->ttm;
328 	struct ttm_mem_reg *old_mem = &bo->mem;
329 	struct ttm_mem_reg old_copy = *old_mem;
330 	void *old_iomap;
331 	void *new_iomap;
332 	int ret;
333 	unsigned long i;
334 	unsigned long page;
335 	unsigned long add = 0;
336 	int dir;
337 
338 	ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
339 	if (ret)
340 		return ret;
341 	ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
342 	if (ret)
343 		goto out;
344 
345 	/*
346 	 * Single TTM move. NOP.
347 	 */
348 	if (old_iomap == NULL && new_iomap == NULL)
349 		goto out2;
350 
351 	/*
352 	 * Move nonexistent data. NOP.
353 	 */
354 	if (old_iomap == NULL && ttm == NULL)
355 		goto out2;
356 
357 	/*
358 	 * TTM might be null for moves within the same region.
359 	 */
360 	if (ttm && ttm->state == tt_unpopulated) {
361 		ret = ttm->bdev->driver->ttm_tt_populate(ttm);
362 		if (ret)
363 			goto out1;
364 	}
365 
366 	add = 0;
367 	dir = 1;
368 
369 	if ((old_mem->mem_type == new_mem->mem_type) &&
370 	    (new_mem->start < old_mem->start + old_mem->size)) {
371 		dir = -1;
372 		add = new_mem->num_pages - 1;
373 	}
374 
375 	for (i = 0; i < new_mem->num_pages; ++i) {
376 		page = i * dir + add;
377 		if (old_iomap == NULL) {
378 			pgprot_t prot = ttm_io_prot(old_mem->placement,
379 						    PAGE_KERNEL);
380 			ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
381 						   prot);
382 		} else if (new_iomap == NULL) {
383 			pgprot_t prot = ttm_io_prot(new_mem->placement,
384 						    PAGE_KERNEL);
385 			ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
386 						   prot);
387 		} else
388 			ret = ttm_copy_io_page(new_iomap, old_iomap, page);
389 		if (ret)
390 			goto out1;
391 	}
392 	DRM_MEMORYBARRIER();
393 out2:
394 	old_copy = *old_mem;
395 	*old_mem = *new_mem;
396 	new_mem->mm_node = NULL;
397 
398 	if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) {
399 		ttm_tt_unbind(ttm);
400 		ttm_tt_destroy(ttm);
401 		bo->ttm = NULL;
402 	}
403 
404 out1:
405 	ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
406 out:
407 	ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
408 
409 	/*
410 	 * On error, keep the mm node!
411 	 */
412 	if (!ret)
413 		ttm_bo_mem_put(bo, &old_copy);
414 	return ret;
415 }
416 EXPORT_SYMBOL(ttm_bo_move_memcpy);
417 
418 static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
419 {
420 	kfree(bo);
421 }
422 
423 /**
424  * ttm_buffer_object_transfer
425  *
426  * @bo: A pointer to a struct ttm_buffer_object.
427  * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
428  * holding the data of @bo with the old placement.
429  *
430  * This is a utility function that may be called after an accelerated move
431  * has been scheduled. A new buffer object is created as a placeholder for
432  * the old data while it's being copied. When that buffer object is idle,
433  * it can be destroyed, releasing the space of the old placement.
434  * Returns:
435  * !0: Failure.
436  */
437 
438 static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
439 				      struct ttm_buffer_object **new_obj)
440 {
441 	struct ttm_buffer_object *fbo;
442 	struct ttm_bo_device *bdev = bo->bdev;
443 	struct ttm_bo_driver *driver = bdev->driver;
444 
445 	fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
446 	if (!fbo)
447 		return -ENOMEM;
448 
449 	*fbo = *bo;
450 
451 	/**
452 	 * Fix up members that we shouldn't copy directly:
453 	 * TODO: Explicit member copy would probably be better here.
454 	 */
455 
456 #ifdef notyet
457 	init_waitqueue_head(&fbo->event_queue);
458 #endif
459 	INIT_LIST_HEAD(&fbo->ddestroy);
460 	INIT_LIST_HEAD(&fbo->lru);
461 	INIT_LIST_HEAD(&fbo->swap);
462 	INIT_LIST_HEAD(&fbo->io_reserve_lru);
463 	fbo->vm_node = NULL;
464 	atomic_set(&fbo->cpu_writers, 0);
465 
466 	mtx_enter(&bdev->fence_lock);
467 	if (bo->sync_obj)
468 		fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
469 	else
470 		fbo->sync_obj = NULL;
471 	mtx_leave(&bdev->fence_lock);
472 	refcount_init(&fbo->list_kref, 1);
473 	refcount_init(&fbo->kref, 1);
474 	fbo->destroy = &ttm_transfered_destroy;
475 	fbo->acc_size = 0;
476 
477 	*new_obj = fbo;
478 	return 0;
479 }
480 
481 pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
482 {
483 #ifdef PMAP_WC
484 	if (caching_flags & TTM_PL_FLAG_WC)
485 		return PMAP_WC;
486 	else
487 #endif
488 		return PMAP_NOCACHE;
489 }
490 EXPORT_SYMBOL(ttm_io_prot);
491 
492 static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
493 			  unsigned long offset,
494 			  unsigned long size,
495 			  struct ttm_bo_kmap_obj *map)
496 {
497 	struct ttm_mem_reg *mem = &bo->mem;
498 	int flags;
499 
500 	if (bo->mem.bus.addr) {
501 		map->bo_kmap_type = ttm_bo_map_premapped;
502 		map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
503 	} else {
504 		map->bo_kmap_type = ttm_bo_map_iomap;
505 		if (mem->placement & TTM_PL_FLAG_WC)
506 			flags = BUS_SPACE_MAP_PREFETCHABLE;
507 		else
508 			flags = 0;
509 
510 		if (bus_space_map(bo->bdev->memt,
511 		    mem->bus.base + bo->mem.bus.offset + offset,
512 		    size, BUS_SPACE_MAP_LINEAR | flags,
513 		    &bo->mem.bus.bsh)) {
514 			printf("%s bus_space_map failed\n", __func__);
515 			map->virtual = 0;
516 		} else
517 			map->virtual = bus_space_vaddr(bo->bdev->memt,
518 			    bo->mem.bus.bsh);
519 	}
520 	return (!map->virtual) ? -ENOMEM : 0;
521 }
522 
523 void *
524 kmap(struct vm_page *pg)
525 {
526 	vaddr_t va;
527 
528 #if defined (__HAVE_PMAP_DIRECT)
529 	va = pmap_map_direct(pg);
530 #else
531 	va = uvm_km_valloc(kernel_map, PAGE_SIZE);
532 	if (va == 0)
533 		return (NULL);
534 	pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg), UVM_PROT_RW);
535 	pmap_update(pmap_kernel());
536 #endif
537 	return (void *)va;
538 }
539 
540 void
541 kunmap(void *addr)
542 {
543 	vaddr_t va = (vaddr_t)addr;
544 
545 #if defined (__HAVE_PMAP_DIRECT)
546 	pmap_unmap_direct(va);
547 #else
548 	pmap_kremove(va, PAGE_SIZE);
549 	pmap_update(pmap_kernel());
550 	uvm_km_free(kernel_map, va, PAGE_SIZE);
551 #endif
552 }
553 
554 void *
555 vmap(struct vm_page **pages, unsigned int npages, unsigned long flags,
556      pgprot_t prot)
557 {
558 	vaddr_t va;
559 	paddr_t pa;
560 	int i;
561 
562 	va = uvm_km_valloc(kernel_map, PAGE_SIZE * npages);
563 	if (va == 0)
564 		return NULL;
565 	for (i = 0; i < npages; i++) {
566 		pa = VM_PAGE_TO_PHYS(pages[i]) | prot;
567 		pmap_enter(pmap_kernel(), va + (i * PAGE_SIZE), pa,
568 		    VM_PROT_READ | VM_PROT_WRITE,
569 		    VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED);
570 		pmap_update(pmap_kernel());
571 	}
572 
573 	return (void *)va;
574 }
575 
576 void
577 vunmap(void *addr, size_t size)
578 {
579 	vaddr_t va = (vaddr_t)addr;
580 
581 	pmap_remove(pmap_kernel(), va, va + size);
582 	pmap_update(pmap_kernel());
583 	uvm_km_free(kernel_map, va, size);
584 }
585 
586 static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
587 			   unsigned long start_page,
588 			   unsigned long num_pages,
589 			   struct ttm_bo_kmap_obj *map)
590 {
591 	struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot;
592 	struct ttm_tt *ttm = bo->ttm;
593 	int ret;
594 
595 	BUG_ON(!ttm);
596 
597 	if (ttm->state == tt_unpopulated) {
598 		ret = ttm->bdev->driver->ttm_tt_populate(ttm);
599 		if (ret)
600 			return ret;
601 	}
602 
603 	if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
604 		/*
605 		 * We're mapping a single page, and the desired
606 		 * page protection is consistent with the bo.
607 		 */
608 
609 		map->bo_kmap_type = ttm_bo_map_kmap;
610 		map->page = ttm->pages[start_page];
611 		map->virtual = kmap(map->page);
612 	} else {
613 		/*
614 		 * We need to use vmap to get the desired page protection
615 		 * or to make the buffer object look contiguous.
616 		 */
617 		prot = (mem->placement & TTM_PL_FLAG_CACHED) ?
618 			PAGE_KERNEL :
619 			ttm_io_prot(mem->placement, PAGE_KERNEL);
620 		map->bo_kmap_type = ttm_bo_map_vmap;
621 		map->virtual = vmap(ttm->pages + start_page, num_pages,
622 				    0, prot);
623 	}
624 	return (!map->virtual) ? -ENOMEM : 0;
625 }
626 
627 int ttm_bo_kmap(struct ttm_buffer_object *bo,
628 		unsigned long start_page, unsigned long num_pages,
629 		struct ttm_bo_kmap_obj *map)
630 {
631 	struct ttm_mem_type_manager *man =
632 		&bo->bdev->man[bo->mem.mem_type];
633 	unsigned long offset, size;
634 	int ret;
635 
636 	BUG_ON(!list_empty(&bo->swap));
637 	map->virtual = NULL;
638 	map->bo = bo;
639 	if (num_pages > bo->num_pages)
640 		return -EINVAL;
641 	if (start_page > bo->num_pages)
642 		return -EINVAL;
643 #if 0
644 	if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
645 		return -EPERM;
646 #endif
647 	(void) ttm_mem_io_lock(man, false);
648 	ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
649 	ttm_mem_io_unlock(man);
650 	if (ret)
651 		return ret;
652 	if (!bo->mem.bus.is_iomem) {
653 		return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
654 	} else {
655 		offset = start_page << PAGE_SHIFT;
656 		size = num_pages << PAGE_SHIFT;
657 		return ttm_bo_ioremap(bo, offset, size, map);
658 	}
659 }
660 EXPORT_SYMBOL(ttm_bo_kmap);
661 
662 void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
663 {
664 	struct ttm_buffer_object *bo = map->bo;
665 	struct ttm_mem_type_manager *man =
666 		&bo->bdev->man[bo->mem.mem_type];
667 
668 	if (!map->virtual)
669 		return;
670 	switch (map->bo_kmap_type) {
671 	case ttm_bo_map_iomap:
672 		bus_space_unmap(bo->bdev->memt, bo->mem.bus.bsh,
673 		    bo->mem.bus.size);
674 		break;
675 	case ttm_bo_map_vmap:
676 		vunmap(map->virtual, bo->mem.bus.size);
677 		break;
678 	case ttm_bo_map_kmap:
679 		kunmap(map->virtual);
680 		break;
681 	case ttm_bo_map_premapped:
682 		break;
683 	default:
684 		BUG();
685 	}
686 	(void) ttm_mem_io_lock(man, false);
687 	ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
688 	ttm_mem_io_unlock(man);
689 	map->virtual = NULL;
690 	map->page = NULL;
691 }
692 EXPORT_SYMBOL(ttm_bo_kunmap);
693 
694 int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
695 			      void *sync_obj,
696 			      bool evict,
697 			      bool no_wait_gpu,
698 			      struct ttm_mem_reg *new_mem)
699 {
700 	struct ttm_bo_device *bdev = bo->bdev;
701 	struct ttm_bo_driver *driver = bdev->driver;
702 	struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
703 	struct ttm_mem_reg *old_mem = &bo->mem;
704 	int ret;
705 	struct ttm_buffer_object *ghost_obj;
706 	void *tmp_obj = NULL;
707 
708 	mtx_enter(&bdev->fence_lock);
709 	if (bo->sync_obj) {
710 		tmp_obj = bo->sync_obj;
711 		bo->sync_obj = NULL;
712 	}
713 	bo->sync_obj = driver->sync_obj_ref(sync_obj);
714 	if (evict) {
715 		ret = ttm_bo_wait(bo, false, false, false);
716 		mtx_leave(&bdev->fence_lock);
717 		if (tmp_obj)
718 			driver->sync_obj_unref(&tmp_obj);
719 		if (ret)
720 			return ret;
721 
722 		if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
723 		    (bo->ttm != NULL)) {
724 			ttm_tt_unbind(bo->ttm);
725 			ttm_tt_destroy(bo->ttm);
726 			bo->ttm = NULL;
727 		}
728 		ttm_bo_free_old_node(bo);
729 	} else {
730 		/**
731 		 * This should help pipeline ordinary buffer moves.
732 		 *
733 		 * Hang old buffer memory on a new buffer object,
734 		 * and leave it to be released when the GPU
735 		 * operation has completed.
736 		 */
737 
738 		set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
739 		mtx_leave(&bdev->fence_lock);
740 		if (tmp_obj)
741 			driver->sync_obj_unref(&tmp_obj);
742 
743 		ret = ttm_buffer_object_transfer(bo, &ghost_obj);
744 		if (ret)
745 			return ret;
746 
747 		/**
748 		 * If we're not moving to fixed memory, the TTM object
749 		 * needs to stay alive. Otherwhise hang it on the ghost
750 		 * bo to be unbound and destroyed.
751 		 */
752 
753 		if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
754 			ghost_obj->ttm = NULL;
755 		else
756 			bo->ttm = NULL;
757 
758 		ttm_bo_unreserve(ghost_obj);
759 		ttm_bo_unref(&ghost_obj);
760 	}
761 
762 	*old_mem = *new_mem;
763 	new_mem->mm_node = NULL;
764 
765 	return 0;
766 }
767 EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
768