xref: /openbsd-src/sys/dev/pci/drm/ttm/ttm_bo_vm.c (revision f2da64fbbbf1b03f09f390ab01267c93dfd77c4c)
1 /*	$OpenBSD: ttm_bo_vm.c,v 1.10 2016/04/05 08:22:50 kettenis Exp $	*/
2 /**************************************************************************
3  *
4  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25  * USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  **************************************************************************/
28 /*
29  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
30  */
31 
32 #define pr_fmt(fmt) "[TTM] " fmt
33 
34 #include <dev/pci/drm/ttm/ttm_module.h>
35 #include <dev/pci/drm/ttm/ttm_bo_driver.h>
36 #include <dev/pci/drm/ttm/ttm_placement.h>
37 
38 #define TTM_BO_VM_NUM_PREFAULT 16
39 
40 ssize_t	 ttm_bo_fbdev_io(struct ttm_buffer_object *, const char __user *,
41 	     char __user *, size_t, off_t *, bool);
42 struct ttm_buffer_object *
43 	 ttm_bo_vm_lookup_rb(struct ttm_bo_device *, unsigned long,
44 	     unsigned long);
45 
46 #undef RB_ROOT
47 #define RB_ROOT(head)	(head)->rbh_root
48 
49 RB_GENERATE(ttm_bo_device_buffer_objects, ttm_buffer_object, vm_rb,
50     ttm_bo_cmp_rb_tree_items);
51 
52 int
53 ttm_bo_cmp_rb_tree_items(struct ttm_buffer_object *a,
54     struct ttm_buffer_object *b)
55 {
56 
57 	if (a->vm_node->start < b->vm_node->start) {
58 		return (-1);
59 	} else if (a->vm_node->start > b->vm_node->start) {
60 		return (1);
61 	} else {
62 		return (0);
63 	}
64 }
65 
66 struct ttm_buffer_object *
67 ttm_bo_vm_lookup_rb(struct ttm_bo_device *bdev,
68 		    unsigned long page_start,
69 		    unsigned long num_pages)
70 {
71 	unsigned long cur_offset;
72 	struct ttm_buffer_object *bo;
73 	struct ttm_buffer_object *best_bo = NULL;
74 
75 	bo = RB_ROOT(&bdev->addr_space_rb);
76 	while (bo != NULL) {
77 		cur_offset = bo->vm_node->start;
78 		if (page_start >= cur_offset) {
79 			best_bo = bo;
80 			if (page_start == cur_offset)
81 				break;
82 			bo = RB_RIGHT(bo, vm_rb);
83 		} else
84 			bo = RB_LEFT(bo, vm_rb);
85 	}
86 
87 	if (unlikely(best_bo == NULL))
88 		return NULL;
89 
90 	if (unlikely((best_bo->vm_node->start + best_bo->num_pages) <
91 		     (page_start + num_pages)))
92 		return NULL;
93 
94 	return best_bo;
95 }
96 
97 int ttm_bo_vm_fault(struct uvm_faultinfo *, vaddr_t, vm_page_t *,
98         int, int, vm_fault_t, vm_prot_t, int);
99 void ttm_bo_vm_reference(struct uvm_object *);
100 void ttm_bo_vm_detach(struct uvm_object *);
101 
102 int
103 ttm_bo_vm_fault(struct uvm_faultinfo *ufi, vaddr_t vaddr, vm_page_t *pps,
104     int npages, int centeridx, vm_fault_t fault_type,
105     vm_prot_t access_type, int flags)
106 {
107 	struct uvm_object *uobj = ufi->entry->object.uvm_obj;
108 	struct ttm_buffer_object *bo = (struct ttm_buffer_object *)uobj;
109 	struct ttm_bo_device *bdev = bo->bdev;
110 	unsigned long page_offset;
111 	unsigned long page_last;
112 	struct ttm_tt *ttm = NULL;
113 	struct vm_page *page;
114 	bus_addr_t addr;
115 	paddr_t paddr;
116 	vm_prot_t mapprot;
117 	int pmap_flags;
118 	boolean_t locked = TRUE;
119 	int ret;
120 	int i;
121 	unsigned long address = (unsigned long)vaddr;
122 	int retval = VM_PAGER_OK;
123 	struct ttm_mem_type_manager *man =
124 		&bdev->man[bo->mem.mem_type];
125 
126 	/*
127 	 * Work around locking order reversal in fault / nopfn
128 	 * between mmap_sem and bo_reserve: Perform a trylock operation
129 	 * for reserve, and if it fails, retry the fault after scheduling.
130 	 */
131 
132 	ret = ttm_bo_reserve(bo, true, true, false, 0);
133 	if (unlikely(ret != 0)) {
134 		uvmfault_unlockall(ufi, NULL, uobj, NULL);
135 		ret = ttm_bo_reserve(bo, true, false, false, 0);
136 		locked = uvmfault_relock(ufi);
137 		if (!locked)
138 			return VM_PAGER_REFAULT;
139 	}
140 
141 	if (bdev->driver->fault_reserve_notify) {
142 		ret = bdev->driver->fault_reserve_notify(bo);
143 		switch (ret) {
144 		case 0:
145 			break;
146 		case -EBUSY:
147 #if 0
148 			set_need_resched();
149 #else
150 			printf("resched?\n");
151 #endif
152 		case -ERESTARTSYS:
153 			retval = VM_PAGER_REFAULT;
154 			goto out_unlock;
155 		default:
156 			retval = VM_PAGER_ERROR;
157 			goto out_unlock;
158 		}
159 	}
160 
161 	/*
162 	 * Wait for buffer data in transit, due to a pipelined
163 	 * move.
164 	 */
165 
166 	spin_lock(&bdev->fence_lock);
167 	if (test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)) {
168 		ret = ttm_bo_wait(bo, false, true, false);
169 		spin_unlock(&bdev->fence_lock);
170 		if (unlikely(ret != 0)) {
171 			retval = (ret != -ERESTARTSYS) ?
172 			    VM_PAGER_ERROR : VM_PAGER_REFAULT;
173 			goto out_unlock;
174 		}
175 	} else
176 		spin_unlock(&bdev->fence_lock);
177 
178 	ret = ttm_mem_io_lock(man, true);
179 	if (unlikely(ret != 0)) {
180 		retval = VM_PAGER_REFAULT;
181 		goto out_unlock;
182 	}
183 	ret = ttm_mem_io_reserve_vm(bo);
184 	if (unlikely(ret != 0)) {
185 		retval = VM_PAGER_ERROR;
186 		goto out_io_unlock;
187 	}
188 
189 	page_offset = ((address - ufi->entry->start) >> PAGE_SHIFT) +
190 	    bo->vm_node->start - (ufi->entry->offset >> PAGE_SHIFT);
191 	page_last = ((ufi->entry->end - ufi->entry->start) >> PAGE_SHIFT) +
192 	    bo->vm_node->start - (ufi->entry->offset >> PAGE_SHIFT);
193 
194 	if (unlikely(page_offset >= bo->num_pages)) {
195 		retval = VM_PAGER_ERROR;
196 		goto out_io_unlock;
197 	}
198 
199 	/*
200 	 * Strictly, we're not allowed to modify vma->vm_page_prot here,
201 	 * since the mmap_sem is only held in read mode. However, we
202 	 * modify only the caching bits of vma->vm_page_prot and
203 	 * consider those bits protected by
204 	 * the bo->rwlock, as we should be the only writers.
205 	 * There shouldn't really be any readers of these bits except
206 	 * within vm_insert_mixed()? fork?
207 	 *
208 	 * TODO: Add a list of vmas to the bo, and change the
209 	 * vma->vm_page_prot when the object changes caching policy, with
210 	 * the correct locks held.
211 	 */
212 	mapprot = ufi->entry->protection;
213 	if (bo->mem.bus.is_iomem) {
214 		pmap_flags = ttm_io_prot(bo->mem.placement, 0);
215 	} else {
216 		ttm = bo->ttm;
217 		pmap_flags = (bo->mem.placement & TTM_PL_FLAG_CACHED) ?
218 		    0 : ttm_io_prot(bo->mem.placement, 0);
219 
220 		/* Allocate all page at once, most common usage */
221 		if (ttm->bdev->driver->ttm_tt_populate(ttm)) {
222 			retval = VM_PAGER_ERROR;
223 			goto out_io_unlock;
224 		}
225 	}
226 
227 	/*
228 	 * Speculatively prefault a number of pages. Only error on
229 	 * first page.
230 	 */
231 	for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
232 		if (bo->mem.bus.is_iomem) {
233 			addr = bo->mem.bus.base + bo->mem.bus.offset;
234 			paddr = bus_space_mmap(bdev->memt, addr,
235 					       page_offset << PAGE_SHIFT,
236 					       mapprot, 0);
237 		} else {
238 			page = ttm->pages[page_offset];
239 			if (unlikely(!page && i == 0)) {
240 				retval = VM_PAGER_ERROR;
241 				goto out_io_unlock;
242 			} else if (unlikely(!page)) {
243 				break;
244 			}
245 			paddr = VM_PAGE_TO_PHYS(page);
246 		}
247 
248 		ret = pmap_enter(ufi->orig_map->pmap, vaddr,
249 		    paddr | pmap_flags, mapprot, PMAP_CANFAIL | mapprot);
250 
251 		/*
252 		 * Somebody beat us to this PTE or prefaulting to
253 		 * an already populated PTE, or prefaulting error.
254 		 */
255 
256 		if (ret != 0 && i > 0)
257 			break;
258 		else if (unlikely(ret != 0)) {
259 			uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap,
260 			    NULL, NULL);
261 			uvm_wait("ttmflt");
262 			return VM_PAGER_REFAULT;
263 		}
264 
265 		address += PAGE_SIZE;
266 		vaddr += PAGE_SIZE;
267 		if (unlikely(++page_offset >= page_last))
268 			break;
269 	}
270 	pmap_update(ufi->orig_map->pmap);
271 out_io_unlock:
272 	ttm_mem_io_unlock(man);
273 out_unlock:
274 	uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, NULL, NULL);
275 	ttm_bo_unreserve(bo);
276 	return retval;
277 }
278 
279 void
280 ttm_bo_vm_reference(struct uvm_object *uobj)
281 {
282 	struct ttm_buffer_object *bo =
283 	    (struct ttm_buffer_object *)uobj;
284 
285 	(void)ttm_bo_reference(bo);
286 	uobj->uo_refs++;
287 }
288 
289 void
290 ttm_bo_vm_detach(struct uvm_object *uobj)
291 {
292 	struct ttm_buffer_object *bo = (struct ttm_buffer_object *)uobj;
293 
294 	uobj->uo_refs--;
295 	ttm_bo_unref(&bo);
296 }
297 
298 struct uvm_pagerops ttm_bo_vm_ops = {
299 	.pgo_fault = ttm_bo_vm_fault,
300 	.pgo_reference = ttm_bo_vm_reference,
301 	.pgo_detach = ttm_bo_vm_detach
302 };
303 
304 struct uvm_object *
305 ttm_bo_mmap(voff_t off, vsize_t size, struct ttm_bo_device *bdev)
306 {
307 	struct ttm_bo_driver *driver;
308 	struct ttm_buffer_object *bo;
309 	int ret;
310 
311 	read_lock(&bdev->vm_lock);
312 	bo = ttm_bo_vm_lookup_rb(bdev, off >> PAGE_SHIFT, size >> PAGE_SHIFT);
313 	if (likely(bo != NULL) && !kref_get_unless_zero(&bo->kref))
314 		bo = NULL;
315 	read_unlock(&bdev->vm_lock);
316 
317 	if (unlikely(bo == NULL)) {
318 		pr_err("Could not find buffer object to map\n");
319 		return NULL;
320 	}
321 
322 	driver = bo->bdev->driver;
323 	if (unlikely(!driver->verify_access)) {
324 		ret = -EPERM;
325 		goto out_unref;
326 	}
327 #ifdef notyet
328 	ret = driver->verify_access(bo, filp);
329 	if (unlikely(ret != 0))
330 		goto out_unref;
331 #endif
332 
333 	bo->uobj.pgops = &ttm_bo_vm_ops;
334 
335 #if 0
336 	/*
337 	 * Note: We're transferring the bo reference to
338 	 * vma->vm_private_data here.
339 	 */
340 
341 	vma->vm_private_data = bo;
342 	vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP;
343 #else
344 	bo->uobj.uo_refs++;
345 #endif
346 	return &bo->uobj;
347 out_unref:
348 	ttm_bo_unref(&bo);
349 	return NULL;
350 }
351 EXPORT_SYMBOL(ttm_bo_mmap);
352 
353 #ifdef notyet
354 int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
355 {
356 	if (vma->vm_pgoff != 0)
357 		return -EACCES;
358 
359 	vma->vm_ops = &ttm_bo_vm_ops;
360 	vma->vm_private_data = ttm_bo_reference(bo);
361 	vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
362 	return 0;
363 }
364 #endif
365 EXPORT_SYMBOL(ttm_fbdev_mmap);
366 
367 
368 ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
369 		  const char __user *wbuf, char __user *rbuf, size_t count,
370 		  off_t *f_pos, bool write)
371 {
372 	struct ttm_buffer_object *bo;
373 	struct ttm_bo_driver *driver;
374 	struct ttm_bo_kmap_obj map;
375 	unsigned long dev_offset = (*f_pos >> PAGE_SHIFT);
376 	unsigned long kmap_offset;
377 	unsigned long kmap_end;
378 	unsigned long kmap_num;
379 	size_t io_size;
380 	unsigned int page_offset;
381 	char *virtual;
382 	int ret;
383 	bool no_wait = false;
384 	bool dummy;
385 
386 	read_lock(&bdev->vm_lock);
387 	bo = ttm_bo_vm_lookup_rb(bdev, dev_offset, 1);
388 	if (likely(bo != NULL))
389 		ttm_bo_reference(bo);
390 	read_unlock(&bdev->vm_lock);
391 
392 	if (unlikely(bo == NULL))
393 		return -EFAULT;
394 
395 	driver = bo->bdev->driver;
396 	if (unlikely(!driver->verify_access)) {
397 		ret = -EPERM;
398 		goto out_unref;
399 	}
400 
401 	ret = driver->verify_access(bo, filp);
402 	if (unlikely(ret != 0))
403 		goto out_unref;
404 
405 	kmap_offset = dev_offset - bo->vm_node->start;
406 	if (unlikely(kmap_offset >= bo->num_pages)) {
407 		ret = -EFBIG;
408 		goto out_unref;
409 	}
410 
411 	page_offset = *f_pos & PAGE_MASK;
412 	io_size = bo->num_pages - kmap_offset;
413 	io_size = (io_size << PAGE_SHIFT) - page_offset;
414 	if (count < io_size)
415 		io_size = count;
416 
417 	kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
418 	kmap_num = kmap_end - kmap_offset + 1;
419 
420 	ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
421 
422 	switch (ret) {
423 	case 0:
424 		break;
425 	case -EBUSY:
426 		ret = -EAGAIN;
427 		goto out_unref;
428 	default:
429 		goto out_unref;
430 	}
431 
432 	ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
433 	if (unlikely(ret != 0)) {
434 		ttm_bo_unreserve(bo);
435 		goto out_unref;
436 	}
437 
438 	virtual = ttm_kmap_obj_virtual(&map, &dummy);
439 	virtual += page_offset;
440 
441 	if (write)
442 		ret = copy_from_user(virtual, wbuf, io_size);
443 	else
444 		ret = copy_to_user(rbuf, virtual, io_size);
445 
446 	ttm_bo_kunmap(&map);
447 	ttm_bo_unreserve(bo);
448 	ttm_bo_unref(&bo);
449 
450 	if (unlikely(ret != 0))
451 		return -EFBIG;
452 
453 	*f_pos += io_size;
454 
455 	return io_size;
456 out_unref:
457 	ttm_bo_unref(&bo);
458 	return ret;
459 }
460 
461 ssize_t ttm_bo_fbdev_io(struct ttm_buffer_object *bo, const char __user *wbuf,
462 			char __user *rbuf, size_t count, off_t *f_pos,
463 			bool write)
464 {
465 	struct ttm_bo_kmap_obj map;
466 	unsigned long kmap_offset;
467 	unsigned long kmap_end;
468 	unsigned long kmap_num;
469 	size_t io_size;
470 	unsigned int page_offset;
471 	char *virtual;
472 	int ret;
473 	bool no_wait = false;
474 	bool dummy;
475 
476 	kmap_offset = (*f_pos >> PAGE_SHIFT);
477 	if (unlikely(kmap_offset >= bo->num_pages))
478 		return -EFBIG;
479 
480 	page_offset = *f_pos & PAGE_MASK;
481 	io_size = bo->num_pages - kmap_offset;
482 	io_size = (io_size << PAGE_SHIFT) - page_offset;
483 	if (count < io_size)
484 		io_size = count;
485 
486 	kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
487 	kmap_num = kmap_end - kmap_offset + 1;
488 
489 	ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
490 
491 	switch (ret) {
492 	case 0:
493 		break;
494 	case -EBUSY:
495 		return -EAGAIN;
496 	default:
497 		return ret;
498 	}
499 
500 	ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
501 	if (unlikely(ret != 0)) {
502 		ttm_bo_unreserve(bo);
503 		return ret;
504 	}
505 
506 	virtual = ttm_kmap_obj_virtual(&map, &dummy);
507 	virtual += page_offset;
508 
509 	if (write)
510 		ret = copy_from_user(virtual, wbuf, io_size);
511 	else
512 		ret = copy_to_user(rbuf, virtual, io_size);
513 
514 	ttm_bo_kunmap(&map);
515 	ttm_bo_unreserve(bo);
516 	ttm_bo_unref(&bo);
517 
518 	if (unlikely(ret != 0))
519 		return ret;
520 
521 	*f_pos += io_size;
522 
523 	return io_size;
524 }
525