xref: /netbsd-src/sys/external/bsd/drm2/dist/drm/ttm/ttm_bo_util.c (revision 122b5006ee1bd67145794b4cde92f4fe4781a5ec)
1 /*	$NetBSD: ttm_bo_util.c,v 1.20 2020/02/23 15:46:40 ad Exp $	*/
2 
3 /**************************************************************************
4  *
5  * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
6  * All Rights Reserved.
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining a
9  * copy of this software and associated documentation files (the
10  * "Software"), to deal in the Software without restriction, including
11  * without limitation the rights to use, copy, modify, merge, publish,
12  * distribute, sub license, and/or sell copies of the Software, and to
13  * permit persons to whom the Software is furnished to do so, subject to
14  * the following conditions:
15  *
16  * The above copyright notice and this permission notice (including the
17  * next paragraph) shall be included in all copies or substantial portions
18  * of the Software.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
23  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
24  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
25  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
26  * USE OR OTHER DEALINGS IN THE SOFTWARE.
27  *
28  **************************************************************************/
29 /*
30  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
31  */
32 
33 #include <sys/cdefs.h>
34 __KERNEL_RCSID(0, "$NetBSD: ttm_bo_util.c,v 1.20 2020/02/23 15:46:40 ad Exp $");
35 
36 #include <drm/ttm/ttm_bo_driver.h>
37 #include <drm/ttm/ttm_placement.h>
38 #include <drm/drm_vma_manager.h>
39 #include <linux/io.h>
40 #include <linux/highmem.h>
41 #include <linux/wait.h>
42 #include <linux/slab.h>
43 #include <linux/vmalloc.h>
44 #include <linux/module.h>
45 #include <linux/reservation.h>
46 
47 #ifdef __NetBSD__		/* PMAP_* caching flags for ttm_io_prot */
48 #include <uvm/uvm_pmap.h>
49 #include <linux/nbsd-namespace.h>
50 #endif
51 
52 void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
53 {
54 	ttm_bo_mem_put(bo, &bo->mem);
55 }
56 
57 int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
58 		    bool evict,
59 		    bool no_wait_gpu, struct ttm_mem_reg *new_mem)
60 {
61 	struct ttm_tt *ttm = bo->ttm;
62 	struct ttm_mem_reg *old_mem = &bo->mem;
63 	int ret;
64 
65 	if (old_mem->mem_type != TTM_PL_SYSTEM) {
66 		ttm_tt_unbind(ttm);
67 		ttm_bo_free_old_node(bo);
68 		ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
69 				TTM_PL_MASK_MEM);
70 		old_mem->mem_type = TTM_PL_SYSTEM;
71 	}
72 
73 	ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
74 	if (unlikely(ret != 0))
75 		return ret;
76 
77 	if (new_mem->mem_type != TTM_PL_SYSTEM) {
78 		ret = ttm_tt_bind(ttm, new_mem);
79 		if (unlikely(ret != 0))
80 			return ret;
81 	}
82 
83 	*old_mem = *new_mem;
84 	new_mem->mm_node = NULL;
85 
86 	return 0;
87 }
88 EXPORT_SYMBOL(ttm_bo_move_ttm);
89 
90 int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
91 {
92 	if (likely(man->io_reserve_fastpath))
93 		return 0;
94 
95 	if (interruptible)
96 		return mutex_lock_interruptible(&man->io_reserve_mutex);
97 
98 	mutex_lock(&man->io_reserve_mutex);
99 	return 0;
100 }
101 EXPORT_SYMBOL(ttm_mem_io_lock);
102 
103 void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
104 {
105 	if (likely(man->io_reserve_fastpath))
106 		return;
107 
108 	mutex_unlock(&man->io_reserve_mutex);
109 }
110 EXPORT_SYMBOL(ttm_mem_io_unlock);
111 
112 static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
113 {
114 	struct ttm_buffer_object *bo;
115 
116 	if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru))
117 		return -EAGAIN;
118 
119 	bo = list_first_entry(&man->io_reserve_lru,
120 			      struct ttm_buffer_object,
121 			      io_reserve_lru);
122 	list_del_init(&bo->io_reserve_lru);
123 	ttm_bo_unmap_virtual_locked(bo);
124 
125 	return 0;
126 }
127 
128 
129 int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
130 		       struct ttm_mem_reg *mem)
131 {
132 	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
133 	int ret = 0;
134 
135 	if (!bdev->driver->io_mem_reserve)
136 		return 0;
137 	if (likely(man->io_reserve_fastpath))
138 		return bdev->driver->io_mem_reserve(bdev, mem);
139 
140 	if (bdev->driver->io_mem_reserve &&
141 	    mem->bus.io_reserved_count++ == 0) {
142 retry:
143 		ret = bdev->driver->io_mem_reserve(bdev, mem);
144 		if (ret == -EAGAIN) {
145 			ret = ttm_mem_io_evict(man);
146 			if (ret == 0)
147 				goto retry;
148 		}
149 	}
150 	return ret;
151 }
152 EXPORT_SYMBOL(ttm_mem_io_reserve);
153 
154 void ttm_mem_io_free(struct ttm_bo_device *bdev,
155 		     struct ttm_mem_reg *mem)
156 {
157 	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
158 
159 	if (likely(man->io_reserve_fastpath))
160 		return;
161 
162 	if (bdev->driver->io_mem_reserve &&
163 	    --mem->bus.io_reserved_count == 0 &&
164 	    bdev->driver->io_mem_free)
165 		bdev->driver->io_mem_free(bdev, mem);
166 
167 }
168 EXPORT_SYMBOL(ttm_mem_io_free);
169 
170 int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
171 {
172 	struct ttm_mem_reg *mem = &bo->mem;
173 	int ret;
174 
175 	if (!mem->bus.io_reserved_vm) {
176 		struct ttm_mem_type_manager *man =
177 			&bo->bdev->man[mem->mem_type];
178 
179 		ret = ttm_mem_io_reserve(bo->bdev, mem);
180 		if (unlikely(ret != 0))
181 			return ret;
182 		mem->bus.io_reserved_vm = true;
183 		if (man->use_io_reserve_lru)
184 			list_add_tail(&bo->io_reserve_lru,
185 				      &man->io_reserve_lru);
186 	}
187 	return 0;
188 }
189 
190 void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
191 {
192 	struct ttm_mem_reg *mem = &bo->mem;
193 
194 	if (mem->bus.io_reserved_vm) {
195 		mem->bus.io_reserved_vm = false;
196 		list_del_init(&bo->io_reserve_lru);
197 		ttm_mem_io_free(bo->bdev, mem);
198 	}
199 }
200 
201 static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
202 			void **virtual)
203 {
204 	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
205 	int ret;
206 	void *addr;
207 
208 	*virtual = NULL;
209 	(void) ttm_mem_io_lock(man, false);
210 	ret = ttm_mem_io_reserve(bdev, mem);
211 	ttm_mem_io_unlock(man);
212 	if (ret || !mem->bus.is_iomem)
213 		return ret;
214 
215 	if (mem->bus.addr) {
216 		addr = mem->bus.addr;
217 	} else {
218 #ifdef __NetBSD__
219 		const bus_addr_t bus_addr = (mem->bus.base + mem->bus.offset);
220 		int flags = BUS_SPACE_MAP_LINEAR;
221 
222 		if (ISSET(mem->placement, TTM_PL_FLAG_WC))
223 			flags |= BUS_SPACE_MAP_PREFETCHABLE;
224 		/* XXX errno NetBSD->Linux */
225 		ret = -bus_space_map(bdev->memt, bus_addr, mem->bus.size,
226 		    flags, &mem->bus.memh);
227 		if (ret) {
228 			(void) ttm_mem_io_lock(man, false);
229 			ttm_mem_io_free(bdev, mem);
230 			ttm_mem_io_unlock(man);
231 			return ret;
232 		}
233 		addr = bus_space_vaddr(bdev->memt, mem->bus.memh);
234 #else
235 		if (mem->placement & TTM_PL_FLAG_WC)
236 			addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size);
237 		else
238 			addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size);
239 		if (!addr) {
240 			(void) ttm_mem_io_lock(man, false);
241 			ttm_mem_io_free(bdev, mem);
242 			ttm_mem_io_unlock(man);
243 			return -ENOMEM;
244 		}
245 #endif
246 	}
247 	*virtual = addr;
248 	return 0;
249 }
250 
251 static void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
252 			 void *virtual)
253 {
254 	struct ttm_mem_type_manager *man;
255 
256 	man = &bdev->man[mem->mem_type];
257 
258 	if (virtual && mem->bus.addr == NULL)
259 #ifdef __NetBSD__
260 		bus_space_unmap(bdev->memt, mem->bus.memh, mem->bus.size);
261 #else
262 		iounmap(virtual);
263 #endif
264 	(void) ttm_mem_io_lock(man, false);
265 	ttm_mem_io_free(bdev, mem);
266 	ttm_mem_io_unlock(man);
267 }
268 
269 #ifdef __NetBSD__
270 #  define	ioread32	fake_ioread32
271 #  define	iowrite32	fake_iowrite32
272 
273 static inline uint32_t
274 ioread32(const volatile uint32_t *p)
275 {
276 	uint32_t v;
277 
278 	v = *p;
279 	__insn_barrier();	/* XXX ttm io barrier */
280 
281 	return v;		/* XXX ttm byte order */
282 }
283 
284 static inline void
285 iowrite32(uint32_t v, volatile uint32_t *p)
286 {
287 
288 	__insn_barrier();	/* XXX ttm io barrier */
289 	*p = v;			/* XXX ttm byte order */
290 }
291 #endif
292 
293 static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
294 {
295 	uint32_t *dstP =
296 	    (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
297 	uint32_t *srcP =
298 	    (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
299 
300 	int i;
301 	for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
302 		iowrite32(ioread32(srcP++), dstP++);
303 	return 0;
304 }
305 
306 #ifdef __NetBSD__
307 #  undef	ioread32
308 #  undef	iowrite32
309 #endif
310 
311 static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
312 				unsigned long page,
313 				pgprot_t prot)
314 {
315 	struct page *d = ttm->pages[page];
316 	void *dst;
317 
318 	if (!d)
319 		return -ENOMEM;
320 
321 	src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
322 
323 #ifdef CONFIG_X86
324 	dst = kmap_atomic_prot(d, prot);
325 #else
326 	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
327 		dst = vmap(&d, 1, 0, prot);
328 	else
329 		dst = kmap(d);
330 #endif
331 	if (!dst)
332 		return -ENOMEM;
333 
334 	memcpy_fromio(dst, src, PAGE_SIZE);
335 
336 #ifdef CONFIG_X86
337 	kunmap_atomic(dst);
338 #else
339 	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
340 #ifdef __NetBSD__
341 		vunmap(dst, 1);
342 #else
343 		vunmap(dst);
344 #endif
345 	else
346 		kunmap(d);
347 #endif
348 
349 	return 0;
350 }
351 
352 static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
353 				unsigned long page,
354 				pgprot_t prot)
355 {
356 	struct page *s = ttm->pages[page];
357 	void *src;
358 
359 	if (!s)
360 		return -ENOMEM;
361 
362 	dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
363 #ifdef CONFIG_X86
364 	src = kmap_atomic_prot(s, prot);
365 #else
366 	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
367 		src = vmap(&s, 1, 0, prot);
368 	else
369 		src = kmap(s);
370 #endif
371 	if (!src)
372 		return -ENOMEM;
373 
374 	memcpy_toio(dst, src, PAGE_SIZE);
375 
376 #ifdef CONFIG_X86
377 	kunmap_atomic(src);
378 #else
379 	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
380 #ifdef __NetBSD__
381 		vunmap(src, 1);
382 #else
383 		vunmap(src);
384 #endif
385 	else
386 		kunmap(s);
387 #endif
388 
389 	return 0;
390 }
391 
392 int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
393 		       bool evict, bool no_wait_gpu,
394 		       struct ttm_mem_reg *new_mem)
395 {
396 	struct ttm_bo_device *bdev = bo->bdev;
397 	struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
398 	struct ttm_tt *ttm = bo->ttm;
399 	struct ttm_mem_reg *old_mem = &bo->mem;
400 	struct ttm_mem_reg old_copy = *old_mem;
401 	void *old_iomap;
402 	void *new_iomap;
403 	int ret;
404 	unsigned long i;
405 	unsigned long page;
406 	unsigned long add = 0;
407 	int dir;
408 
409 	ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
410 	if (ret)
411 		return ret;
412 	ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
413 	if (ret)
414 		goto out;
415 
416 	/*
417 	 * Single TTM move. NOP.
418 	 */
419 	if (old_iomap == NULL && new_iomap == NULL)
420 		goto out2;
421 
422 	/*
423 	 * Don't move nonexistent data. Clear destination instead.
424 	 */
425 	if (old_iomap == NULL &&
426 	    (ttm == NULL || (ttm->state == tt_unpopulated &&
427 			     !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) {
428 		memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE);
429 		goto out2;
430 	}
431 
432 	/*
433 	 * TTM might be null for moves within the same region.
434 	 */
435 	if (ttm && ttm->state == tt_unpopulated) {
436 		ret = ttm->bdev->driver->ttm_tt_populate(ttm);
437 		if (ret)
438 			goto out1;
439 	}
440 
441 	add = 0;
442 	dir = 1;
443 
444 	if ((old_mem->mem_type == new_mem->mem_type) &&
445 	    (new_mem->start < old_mem->start + old_mem->size)) {
446 		dir = -1;
447 		add = new_mem->num_pages - 1;
448 	}
449 
450 	for (i = 0; i < new_mem->num_pages; ++i) {
451 		page = i * dir + add;
452 		if (old_iomap == NULL) {
453 			pgprot_t prot = ttm_io_prot(old_mem->placement,
454 						    PAGE_KERNEL);
455 			ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
456 						   prot);
457 		} else if (new_iomap == NULL) {
458 			pgprot_t prot = ttm_io_prot(new_mem->placement,
459 						    PAGE_KERNEL);
460 			ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
461 						   prot);
462 		} else
463 			ret = ttm_copy_io_page(new_iomap, old_iomap, page);
464 		if (ret)
465 			goto out1;
466 	}
467 	mb();
468 out2:
469 	old_copy = *old_mem;
470 	*old_mem = *new_mem;
471 	new_mem->mm_node = NULL;
472 
473 	if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) {
474 		ttm_tt_unbind(ttm);
475 		ttm_tt_destroy(ttm);
476 		bo->ttm = NULL;
477 	}
478 
479 out1:
480 	ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
481 out:
482 	ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
483 
484 	/*
485 	 * On error, keep the mm node!
486 	 */
487 	if (!ret)
488 		ttm_bo_mem_put(bo, &old_copy);
489 	return ret;
490 }
491 EXPORT_SYMBOL(ttm_bo_move_memcpy);
492 
493 static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
494 {
495 	kfree(bo);
496 }
497 
498 /**
499  * ttm_buffer_object_transfer
500  *
501  * @bo: A pointer to a struct ttm_buffer_object.
502  * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
503  * holding the data of @bo with the old placement.
504  *
505  * This is a utility function that may be called after an accelerated move
506  * has been scheduled. A new buffer object is created as a placeholder for
507  * the old data while it's being copied. When that buffer object is idle,
508  * it can be destroyed, releasing the space of the old placement.
509  * Returns:
510  * !0: Failure.
511  */
512 
513 static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
514 				      struct ttm_buffer_object **new_obj)
515 {
516 	struct ttm_buffer_object *fbo;
517 	int ret;
518 
519 	fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
520 	if (!fbo)
521 		return -ENOMEM;
522 
523 	*fbo = *bo;
524 
525 	/**
526 	 * Fix up members that we shouldn't copy directly:
527 	 * TODO: Explicit member copy would probably be better here.
528 	 */
529 
530 	INIT_LIST_HEAD(&fbo->ddestroy);
531 	INIT_LIST_HEAD(&fbo->lru);
532 	INIT_LIST_HEAD(&fbo->swap);
533 	INIT_LIST_HEAD(&fbo->io_reserve_lru);
534 	mutex_init(&fbo->wu_mutex);
535 #ifdef __NetBSD__
536 	drm_vma_node_init(&fbo->vma_node);
537 	uvm_obj_init(&fbo->uvmobj, bo->bdev->driver->ttm_uvm_ops, true, 1);
538 	rw_obj_hold(bo->uvmobj.vmobjlock);
539 	uvm_obj_setlock(&fbo->uvmobj, bo->uvmobj.vmobjlock);
540 #else
541 	drm_vma_node_reset(&fbo->vma_node);
542 #endif
543 	atomic_set(&fbo->cpu_writers, 0);
544 
545 	kref_init(&fbo->list_kref);
546 	kref_init(&fbo->kref);
547 	fbo->destroy = &ttm_transfered_destroy;
548 	fbo->acc_size = 0;
549 	fbo->resv = &fbo->ttm_resv;
550 	reservation_object_init(fbo->resv);
551 	ret = ww_mutex_trylock(&fbo->resv->lock);
552 	WARN_ON(!ret);
553 
554 	*new_obj = fbo;
555 	return 0;
556 }
557 
558 pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
559 {
560 	/* Cached mappings need no adjustment */
561 	if (caching_flags & TTM_PL_FLAG_CACHED)
562 		return tmp;
563 
564 #ifdef __NetBSD__
565 	tmp &= ~PMAP_CACHE_MASK;
566 	if (caching_flags & TTM_PL_FLAG_WC)
567 		return (tmp | PMAP_WRITE_COMBINE);
568 	else
569 		return (tmp | PMAP_NOCACHE);
570 #else
571 #if defined(__i386__) || defined(__x86_64__)
572 	if (caching_flags & TTM_PL_FLAG_WC)
573 		tmp = pgprot_writecombine(tmp);
574 	else if (boot_cpu_data.x86 > 3)
575 		tmp = pgprot_noncached(tmp);
576 #endif
577 #if defined(__ia64__) || defined(__arm__) || defined(__aarch64__) || \
578     defined(__powerpc__)
579 	if (caching_flags & TTM_PL_FLAG_WC)
580 		tmp = pgprot_writecombine(tmp);
581 	else
582 		tmp = pgprot_noncached(tmp);
583 #endif
584 #if defined(__sparc__) || defined(__mips__)
585 	tmp = pgprot_noncached(tmp);
586 #endif
587 	return tmp;
588 #endif
589 }
590 EXPORT_SYMBOL(ttm_io_prot);
591 
592 static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
593 			  unsigned long offset,
594 			  unsigned long size,
595 			  struct ttm_bo_kmap_obj *map)
596 {
597 	struct ttm_mem_reg *mem = &bo->mem;
598 
599 	if (bo->mem.bus.addr) {
600 		map->bo_kmap_type = ttm_bo_map_premapped;
601 		map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
602 	} else {
603 		map->bo_kmap_type = ttm_bo_map_iomap;
604 #ifdef __NetBSD__
605 	    {
606 		bus_addr_t addr;
607 		int flags = BUS_SPACE_MAP_LINEAR;
608 		int ret;
609 
610 		addr = (bo->mem.bus.base + bo->mem.bus.offset + offset);
611 		if (ISSET(mem->placement, TTM_PL_FLAG_WC))
612 			flags |= BUS_SPACE_MAP_PREFETCHABLE;
613 		/* XXX errno NetBSD->Linux */
614 		ret = -bus_space_map(bo->bdev->memt, addr, size, flags,
615 		    &map->u.io.memh);
616 		if (ret)
617 			return ret;
618 		map->u.io.size = size;
619 		map->virtual = bus_space_vaddr(bo->bdev->memt, map->u.io.memh);
620 	    }
621 #else
622 		if (mem->placement & TTM_PL_FLAG_WC)
623 			map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset,
624 						  size);
625 		else
626 			map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset,
627 						       size);
628 #endif
629 	}
630 	return (!map->virtual) ? -ENOMEM : 0;
631 }
632 
633 static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
634 			   unsigned long start_page,
635 			   unsigned long num_pages,
636 			   struct ttm_bo_kmap_obj *map)
637 {
638 	struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot;
639 	struct ttm_tt *ttm = bo->ttm;
640 	int ret;
641 
642 	BUG_ON(!ttm);
643 
644 	if (ttm->state == tt_unpopulated) {
645 		ret = ttm->bdev->driver->ttm_tt_populate(ttm);
646 		if (ret)
647 			return ret;
648 	}
649 
650 	if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
651 		/*
652 		 * We're mapping a single page, and the desired
653 		 * page protection is consistent with the bo.
654 		 */
655 
656 		map->bo_kmap_type = ttm_bo_map_kmap;
657 #ifdef __NetBSD__
658 		map->u.kmapped.page = ttm->pages[start_page];
659 		map->virtual = kmap(map->u.kmapped.page);
660 #else
661 		map->page = ttm->pages[start_page];
662 		map->virtual = kmap(map->page);
663 #endif
664 	} else {
665 		/*
666 		 * We need to use vmap to get the desired page protection
667 		 * or to make the buffer object look contiguous.
668 		 */
669 		prot = ttm_io_prot(mem->placement, PAGE_KERNEL);
670 		map->bo_kmap_type = ttm_bo_map_vmap;
671 		map->virtual = vmap(ttm->pages + start_page, num_pages,
672 				    0, prot);
673 #ifdef __NetBSD__
674 		map->u.vmapped.vsize = (vsize_t)num_pages << PAGE_SHIFT;
675 #endif
676 	}
677 	return (!map->virtual) ? -ENOMEM : 0;
678 }
679 
680 int ttm_bo_kmap(struct ttm_buffer_object *bo,
681 		unsigned long start_page, unsigned long num_pages,
682 		struct ttm_bo_kmap_obj *map)
683 {
684 	struct ttm_mem_type_manager *man =
685 		&bo->bdev->man[bo->mem.mem_type];
686 	unsigned long offset, size;
687 	int ret;
688 
689 	BUG_ON(!list_empty(&bo->swap));
690 	map->virtual = NULL;
691 	map->bo = bo;
692 	if (num_pages > bo->num_pages)
693 		return -EINVAL;
694 	if (start_page > bo->num_pages)
695 		return -EINVAL;
696 #if 0
697 	if (num_pages > 1 && !capable(CAP_SYS_ADMIN))
698 		return -EPERM;
699 #endif
700 	(void) ttm_mem_io_lock(man, false);
701 	ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
702 	ttm_mem_io_unlock(man);
703 	if (ret)
704 		return ret;
705 	if (!bo->mem.bus.is_iomem) {
706 		return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
707 	} else {
708 		offset = start_page << PAGE_SHIFT;
709 		size = num_pages << PAGE_SHIFT;
710 		return ttm_bo_ioremap(bo, offset, size, map);
711 	}
712 }
713 EXPORT_SYMBOL(ttm_bo_kmap);
714 
715 void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
716 {
717 	struct ttm_buffer_object *bo = map->bo;
718 	struct ttm_mem_type_manager *man =
719 		&bo->bdev->man[bo->mem.mem_type];
720 
721 	if (!map->virtual)
722 		return;
723 	switch (map->bo_kmap_type) {
724 	case ttm_bo_map_iomap:
725 #ifdef __NetBSD__
726 		bus_space_unmap(bo->bdev->memt, map->u.io.memh,
727 		    map->u.io.size);
728 #else
729 		iounmap(map->virtual);
730 #endif
731 		break;
732 	case ttm_bo_map_vmap:
733 #ifdef __NetBSD__
734 		vunmap(map->virtual, map->u.vmapped.vsize >> PAGE_SHIFT);
735 #else
736 		vunmap(map->virtual);
737 #endif
738 		break;
739 	case ttm_bo_map_kmap:
740 #ifdef __NetBSD__
741 		kunmap(map->u.kmapped.page);
742 #else
743 		kunmap(map->page);
744 #endif
745 		break;
746 	case ttm_bo_map_premapped:
747 		break;
748 	default:
749 		BUG();
750 	}
751 	(void) ttm_mem_io_lock(man, false);
752 	ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
753 	ttm_mem_io_unlock(man);
754 	map->virtual = NULL;
755 #ifndef __NetBSD__
756 	map->page = NULL;
757 #endif
758 }
759 EXPORT_SYMBOL(ttm_bo_kunmap);
760 
761 int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
762 			      struct fence *fence,
763 			      bool evict,
764 			      bool no_wait_gpu,
765 			      struct ttm_mem_reg *new_mem)
766 {
767 	struct ttm_bo_device *bdev = bo->bdev;
768 	struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
769 	struct ttm_mem_reg *old_mem = &bo->mem;
770 	int ret;
771 	struct ttm_buffer_object *ghost_obj;
772 
773 	reservation_object_add_excl_fence(bo->resv, fence);
774 	if (evict) {
775 		ret = ttm_bo_wait(bo, false, false, false);
776 		if (ret)
777 			return ret;
778 
779 		if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
780 		    (bo->ttm != NULL)) {
781 			ttm_tt_unbind(bo->ttm);
782 			ttm_tt_destroy(bo->ttm);
783 			bo->ttm = NULL;
784 		}
785 		ttm_bo_free_old_node(bo);
786 	} else {
787 		/**
788 		 * This should help pipeline ordinary buffer moves.
789 		 *
790 		 * Hang old buffer memory on a new buffer object,
791 		 * and leave it to be released when the GPU
792 		 * operation has completed.
793 		 */
794 
795 		set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
796 
797 		ret = ttm_buffer_object_transfer(bo, &ghost_obj);
798 		if (ret)
799 			return ret;
800 
801 		reservation_object_add_excl_fence(ghost_obj->resv, fence);
802 
803 		/**
804 		 * If we're not moving to fixed memory, the TTM object
805 		 * needs to stay alive. Otherwhise hang it on the ghost
806 		 * bo to be unbound and destroyed.
807 		 */
808 
809 		if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
810 			ghost_obj->ttm = NULL;
811 		else
812 			bo->ttm = NULL;
813 
814 		ttm_bo_unreserve(ghost_obj);
815 		ttm_bo_unref(&ghost_obj);
816 	}
817 
818 	*old_mem = *new_mem;
819 	new_mem->mm_node = NULL;
820 
821 	return 0;
822 }
823 EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
824