xref: /dflybsd-src/sys/dev/drm/ttm/ttm_bo_vm.c (revision 10cf3bfcde2ee9c50d77a153397b93d8026b03e1)
1 /**************************************************************************
2  *
3  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 /*
28  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29  */
30 /*
31  * Copyright (c) 2013 The FreeBSD Foundation
32  * All rights reserved.
33  *
34  * Portions of this software were developed by Konstantin Belousov
35  * <kib@FreeBSD.org> under sponsorship from the FreeBSD Foundation.
36  *
37  *$FreeBSD: head/sys/dev/drm2/ttm/ttm_bo_vm.c 253710 2013-07-27 16:44:37Z kib $
38  */
39 
40 #include "opt_vm.h"
41 
42 #define pr_fmt(fmt) "[TTM] " fmt
43 
44 #include <drm/ttm/ttm_module.h>
45 #include <drm/ttm/ttm_bo_driver.h>
46 #include <drm/ttm/ttm_placement.h>
47 #include <vm/vm.h>
48 #include <vm/vm_page.h>
49 #include <linux/errno.h>
50 #include <linux/export.h>
51 #include <linux/rbtree.h>
52 
53 #define TTM_BO_VM_NUM_PREFAULT 16
54 
55 static struct ttm_buffer_object *ttm_bo_vm_lookup_rb(struct ttm_bo_device *bdev,
56 						     unsigned long page_start,
57 						     unsigned long num_pages)
58 {
59 	struct rb_node *cur = bdev->addr_space_rb.rb_node;
60 	unsigned long cur_offset;
61 	struct ttm_buffer_object *bo;
62 	struct ttm_buffer_object *best_bo = NULL;
63 
64 	while (likely(cur != NULL)) {
65 		bo = rb_entry(cur, struct ttm_buffer_object, vm_rb);
66 		cur_offset = bo->vm_node->start;
67 		if (page_start >= cur_offset) {
68 			cur = cur->rb_right;
69 			best_bo = bo;
70 			if (page_start == cur_offset)
71 				break;
72 		} else
73 			cur = cur->rb_left;
74 	}
75 
76 	if (unlikely(best_bo == NULL))
77 		return NULL;
78 
79 	if (unlikely((best_bo->vm_node->start + best_bo->num_pages) <
80 		     (page_start + num_pages)))
81 		return NULL;
82 
83 	return best_bo;
84 }
85 
86 static int
87 ttm_bo_vm_fault(vm_object_t vm_obj, vm_ooffset_t offset,
88     int prot, vm_page_t *mres)
89 {
90 	struct ttm_buffer_object *bo = vm_obj->handle;
91 	struct ttm_bo_device *bdev = bo->bdev;
92 	struct ttm_tt *ttm = NULL;
93 	vm_page_t m, m1, oldm;
94 	int ret;
95 	int retval = VM_PAGER_OK;
96 	struct ttm_mem_type_manager *man =
97 		&bdev->man[bo->mem.mem_type];
98 
99 	vm_object_pip_add(vm_obj, 1);
100 	oldm = *mres;
101 	if (oldm != NULL) {
102 		vm_page_remove(oldm);
103 		*mres = NULL;
104 	} else
105 		oldm = NULL;
106 retry:
107 	VM_OBJECT_WUNLOCK(vm_obj);
108 	m = NULL;
109 
110 reserve:
111 	ret = ttm_bo_reserve(bo, false, false, false, 0);
112 	if (unlikely(ret != 0)) {
113 		if (ret == -EBUSY) {
114 			lwkt_yield();
115 			goto reserve;
116 		}
117 	}
118 
119 	if (bdev->driver->fault_reserve_notify) {
120 		ret = bdev->driver->fault_reserve_notify(bo);
121 		switch (ret) {
122 		case 0:
123 			break;
124 		case -EBUSY:
125 		case -ERESTARTSYS:
126 		case -EINTR:
127 			lwkt_yield();
128 			goto reserve;
129 		default:
130 			retval = VM_PAGER_ERROR;
131 			goto out_unlock;
132 		}
133 	}
134 
135 	/*
136 	 * Wait for buffer data in transit, due to a pipelined
137 	 * move.
138 	 */
139 
140 	lockmgr(&bdev->fence_lock, LK_EXCLUSIVE);
141 	if (test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)) {
142 		/*
143 		 * Here, the behavior differs between Linux and FreeBSD.
144 		 *
145 		 * On Linux, the wait is interruptible (3rd argument to
146 		 * ttm_bo_wait). There must be some mechanism to resume
147 		 * page fault handling, once the signal is processed.
148 		 *
149 		 * On FreeBSD, the wait is uninteruptible. This is not a
150 		 * problem as we can't end up with an unkillable process
151 		 * here, because the wait will eventually time out.
152 		 *
153 		 * An example of this situation is the Xorg process
154 		 * which uses SIGALRM internally. The signal could
155 		 * interrupt the wait, causing the page fault to fail
156 		 * and the process to receive SIGSEGV.
157 		 */
158 		ret = ttm_bo_wait(bo, false, false, false);
159 		lockmgr(&bdev->fence_lock, LK_RELEASE);
160 		if (unlikely(ret != 0)) {
161 			retval = VM_PAGER_ERROR;
162 			goto out_unlock;
163 		}
164 	} else
165 		lockmgr(&bdev->fence_lock, LK_RELEASE);
166 
167 	ret = ttm_mem_io_lock(man, true);
168 	if (unlikely(ret != 0)) {
169 		retval = VM_PAGER_ERROR;
170 		goto out_unlock;
171 	}
172 	ret = ttm_mem_io_reserve_vm(bo);
173 	if (unlikely(ret != 0)) {
174 		retval = VM_PAGER_ERROR;
175 		goto out_io_unlock;
176 	}
177 
178 	/*
179 	 * Strictly, we're not allowed to modify vma->vm_page_prot here,
180 	 * since the mmap_sem is only held in read mode. However, we
181 	 * modify only the caching bits of vma->vm_page_prot and
182 	 * consider those bits protected by
183 	 * the bo->mutex, as we should be the only writers.
184 	 * There shouldn't really be any readers of these bits except
185 	 * within vm_insert_mixed()? fork?
186 	 *
187 	 * TODO: Add a list of vmas to the bo, and change the
188 	 * vma->vm_page_prot when the object changes caching policy, with
189 	 * the correct locks held.
190 	 */
191 	if (!bo->mem.bus.is_iomem) {
192 		/* Allocate all page at once, most common usage */
193 		ttm = bo->ttm;
194 		if (ttm->bdev->driver->ttm_tt_populate(ttm)) {
195 			retval = VM_PAGER_ERROR;
196 			goto out_io_unlock;
197 		}
198 	}
199 
200 	if (bo->mem.bus.is_iomem) {
201 		m = vm_phys_fictitious_to_vm_page(bo->mem.bus.base +
202 		    bo->mem.bus.offset + offset);
203 		pmap_page_set_memattr(m, ttm_io_prot(bo->mem.placement));
204 	} else {
205 		ttm = bo->ttm;
206 		m = ttm->pages[OFF_TO_IDX(offset)];
207 		if (unlikely(!m)) {
208 			retval = VM_PAGER_ERROR;
209 			goto out_io_unlock;
210 		}
211 		pmap_page_set_memattr(m,
212 		    (bo->mem.placement & TTM_PL_FLAG_CACHED) ?
213 		    VM_MEMATTR_WRITE_BACK : ttm_io_prot(bo->mem.placement));
214 	}
215 
216 	VM_OBJECT_WLOCK(vm_obj);
217 	if ((m->flags & PG_BUSY) != 0) {
218 #if 0
219 		vm_page_sleep(m, "ttmpbs");
220 #endif
221 		ttm_mem_io_unlock(man);
222 		ttm_bo_unreserve(bo);
223 		goto retry;
224 	}
225 	m->valid = VM_PAGE_BITS_ALL;
226 	*mres = m;
227 	m1 = vm_page_lookup(vm_obj, OFF_TO_IDX(offset));
228 	if (m1 == NULL) {
229 		vm_page_insert(m, vm_obj, OFF_TO_IDX(offset));
230 	} else {
231 		KASSERT(m == m1,
232 		    ("inconsistent insert bo %p m %p m1 %p offset %jx",
233 		    bo, m, m1, (uintmax_t)offset));
234 	}
235 	vm_page_busy_try(m, FALSE);
236 
237 	if (oldm != NULL) {
238 		vm_page_free(oldm);
239 	}
240 
241 out_io_unlock1:
242 	ttm_mem_io_unlock(man);
243 out_unlock1:
244 	ttm_bo_unreserve(bo);
245 	vm_object_pip_wakeup(vm_obj);
246 	return (retval);
247 
248 out_io_unlock:
249 	VM_OBJECT_WLOCK(vm_obj);
250 	goto out_io_unlock1;
251 
252 out_unlock:
253 	VM_OBJECT_WLOCK(vm_obj);
254 	goto out_unlock1;
255 }
256 
257 static int
258 ttm_bo_vm_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot,
259     vm_ooffset_t foff, struct ucred *cred, u_short *color)
260 {
261 
262 	/*
263 	 * On Linux, a reference to the buffer object is acquired here.
264 	 * The reason is that this function is not called when the
265 	 * mmap() is initialized, but only when a process forks for
266 	 * instance. Therefore on Linux, the reference on the bo is
267 	 * acquired either in ttm_bo_mmap() or ttm_bo_vm_open(). It's
268 	 * then released in ttm_bo_vm_close().
269 	 *
270 	 * Here, this function is called during mmap() intialization.
271 	 * Thus, the reference acquired in ttm_bo_mmap_single() is
272 	 * sufficient.
273 	 */
274 	*color = 0;
275 	return (0);
276 }
277 
278 static void
279 ttm_bo_vm_dtor(void *handle)
280 {
281 	struct ttm_buffer_object *bo = handle;
282 
283 	ttm_bo_unref(&bo);
284 }
285 
286 static struct cdev_pager_ops ttm_pager_ops = {
287 	.cdev_pg_fault = ttm_bo_vm_fault,
288 	.cdev_pg_ctor = ttm_bo_vm_ctor,
289 	.cdev_pg_dtor = ttm_bo_vm_dtor
290 };
291 
292 int
293 ttm_bo_mmap_single(struct ttm_bo_device *bdev, vm_ooffset_t *offset, vm_size_t size,
294     struct vm_object **obj_res, int nprot)
295 {
296 	struct ttm_bo_driver *driver;
297 	struct ttm_buffer_object *bo;
298 	struct vm_object *vm_obj;
299 	int ret;
300 
301 	*obj_res = NULL;
302 
303 	lockmgr(&bdev->vm_lock, LK_EXCLUSIVE);
304 	bo = ttm_bo_vm_lookup_rb(bdev, OFF_TO_IDX(*offset), OFF_TO_IDX(size));
305 	if (likely(bo != NULL))
306 		kref_get(&bo->kref);
307 	lockmgr(&bdev->vm_lock, LK_RELEASE);
308 
309 	if (unlikely(bo == NULL)) {
310 		kprintf("[TTM] Could not find buffer object to map\n");
311 		return (EINVAL);
312 	}
313 
314 	driver = bo->bdev->driver;
315 	if (unlikely(!driver->verify_access)) {
316 		ret = EPERM;
317 		goto out_unref;
318 	}
319 	ret = -driver->verify_access(bo);
320 	if (unlikely(ret != 0))
321 		goto out_unref;
322 
323 	vm_obj = cdev_pager_allocate(bo, OBJT_MGTDEVICE, &ttm_pager_ops,
324 	    size, nprot, 0, curthread->td_ucred);
325 
326 	if (vm_obj == NULL) {
327 		ret = EINVAL;
328 		goto out_unref;
329 	}
330 	/*
331 	 * Note: We're transferring the bo reference to vm_obj->handle here.
332 	 */
333 	*offset = 0;
334 	*obj_res = vm_obj;
335 	return 0;
336 out_unref:
337 	ttm_bo_unref(&bo);
338 	return ret;
339 }
340 EXPORT_SYMBOL(ttm_bo_mmap);
341 
342 void
343 ttm_bo_release_mmap(struct ttm_buffer_object *bo)
344 {
345 	vm_object_t vm_obj;
346 	vm_page_t m;
347 	int i;
348 
349 	vm_obj = cdev_pager_lookup(bo);
350 	if (vm_obj == NULL)
351 		return;
352 
353 	VM_OBJECT_WLOCK(vm_obj);
354 	for (i = 0; i < bo->num_pages; i++) {
355 		m = vm_page_lookup_busy_wait(vm_obj, i, TRUE, "ttm_unm");
356 		if (m == NULL)
357 			continue;
358 		cdev_pager_free_page(vm_obj, m);
359 	}
360 	VM_OBJECT_WUNLOCK(vm_obj);
361 
362 	vm_object_deallocate(vm_obj);
363 }
364 
365 #if 0
366 int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
367 {
368 	if (vma->vm_pgoff != 0)
369 		return -EACCES;
370 
371 	vma->vm_ops = &ttm_bo_vm_ops;
372 	vma->vm_private_data = ttm_bo_reference(bo);
373 	vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
374 	return 0;
375 }
376 EXPORT_SYMBOL(ttm_fbdev_mmap);
377 
378 
379 ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
380 		  const char __user *wbuf, char __user *rbuf, size_t count,
381 		  loff_t *f_pos, bool write)
382 {
383 	struct ttm_buffer_object *bo;
384 	struct ttm_bo_driver *driver;
385 	struct ttm_bo_kmap_obj map;
386 	unsigned long dev_offset = (*f_pos >> PAGE_SHIFT);
387 	unsigned long kmap_offset;
388 	unsigned long kmap_end;
389 	unsigned long kmap_num;
390 	size_t io_size;
391 	unsigned int page_offset;
392 	char *virtual;
393 	int ret;
394 	bool no_wait = false;
395 	bool dummy;
396 
397 	lockmgr(&bdev->vm_lock, LK_EXCLUSIVE);
398 	bo = ttm_bo_vm_lookup_rb(bdev, dev_offset, 1);
399 	if (likely(bo != NULL))
400 		ttm_bo_reference(bo);
401 	lockmgr(&bdev->vm_lock, LK_RELEASE);
402 
403 	if (unlikely(bo == NULL))
404 		return -EFAULT;
405 
406 	driver = bo->bdev->driver;
407 	if (unlikely(!driver->verify_access)) {
408 		ret = -EPERM;
409 		goto out_unref;
410 	}
411 
412 	ret = driver->verify_access(bo, filp);
413 	if (unlikely(ret != 0))
414 		goto out_unref;
415 
416 	kmap_offset = dev_offset - bo->vm_node->start;
417 	if (unlikely(kmap_offset >= bo->num_pages)) {
418 		ret = -EFBIG;
419 		goto out_unref;
420 	}
421 
422 	page_offset = *f_pos & ~PAGE_MASK;
423 	io_size = bo->num_pages - kmap_offset;
424 	io_size = (io_size << PAGE_SHIFT) - page_offset;
425 	if (count < io_size)
426 		io_size = count;
427 
428 	kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
429 	kmap_num = kmap_end - kmap_offset + 1;
430 
431 	ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
432 
433 	switch (ret) {
434 	case 0:
435 		break;
436 	case -EBUSY:
437 		ret = -EAGAIN;
438 		goto out_unref;
439 	default:
440 		goto out_unref;
441 	}
442 
443 	ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
444 	if (unlikely(ret != 0)) {
445 		ttm_bo_unreserve(bo);
446 		goto out_unref;
447 	}
448 
449 	virtual = ttm_kmap_obj_virtual(&map, &dummy);
450 	virtual += page_offset;
451 
452 	if (write)
453 		ret = copy_from_user(virtual, wbuf, io_size);
454 	else
455 		ret = copy_to_user(rbuf, virtual, io_size);
456 
457 	ttm_bo_kunmap(&map);
458 	ttm_bo_unreserve(bo);
459 	ttm_bo_unref(&bo);
460 
461 	if (unlikely(ret != 0))
462 		return -EFBIG;
463 
464 	*f_pos += io_size;
465 
466 	return io_size;
467 out_unref:
468 	ttm_bo_unref(&bo);
469 	return ret;
470 }
471 
472 ssize_t ttm_bo_fbdev_io(struct ttm_buffer_object *bo, const char __user *wbuf,
473 			char __user *rbuf, size_t count, loff_t *f_pos,
474 			bool write)
475 {
476 	struct ttm_bo_kmap_obj map;
477 	unsigned long kmap_offset;
478 	unsigned long kmap_end;
479 	unsigned long kmap_num;
480 	size_t io_size;
481 	unsigned int page_offset;
482 	char *virtual;
483 	int ret;
484 	bool no_wait = false;
485 	bool dummy;
486 
487 	kmap_offset = (*f_pos >> PAGE_SHIFT);
488 	if (unlikely(kmap_offset >= bo->num_pages))
489 		return -EFBIG;
490 
491 	page_offset = *f_pos & ~PAGE_MASK;
492 	io_size = bo->num_pages - kmap_offset;
493 	io_size = (io_size << PAGE_SHIFT) - page_offset;
494 	if (count < io_size)
495 		io_size = count;
496 
497 	kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
498 	kmap_num = kmap_end - kmap_offset + 1;
499 
500 	ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
501 
502 	switch (ret) {
503 	case 0:
504 		break;
505 	case -EBUSY:
506 		return -EAGAIN;
507 	default:
508 		return ret;
509 	}
510 
511 	ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
512 	if (unlikely(ret != 0)) {
513 		ttm_bo_unreserve(bo);
514 		return ret;
515 	}
516 
517 	virtual = ttm_kmap_obj_virtual(&map, &dummy);
518 	virtual += page_offset;
519 
520 	if (write)
521 		ret = copy_from_user(virtual, wbuf, io_size);
522 	else
523 		ret = copy_to_user(rbuf, virtual, io_size);
524 
525 	ttm_bo_kunmap(&map);
526 	ttm_bo_unreserve(bo);
527 	ttm_bo_unref(&bo);
528 
529 	if (unlikely(ret != 0))
530 		return ret;
531 
532 	*f_pos += io_size;
533 
534 	return io_size;
535 }
536 #endif
537