xref: /netbsd-src/sys/external/bsd/drm2/ttm/ttm_bo_vm.c (revision 23e21f9ea7314a8982e8102b56630240fe6ca005)
1 /*	$NetBSD: ttm_bo_vm.c,v 1.28 2024/06/23 00:49:31 riastradh Exp $	*/
2 
3 /*-
4  * Copyright (c) 2014 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Taylor R. Campbell.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /**************************************************************************
33  *
34  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
35  * All Rights Reserved.
36  *
37  * Permission is hereby granted, free of charge, to any person obtaining a
38  * copy of this software and associated documentation files (the
39  * "Software"), to deal in the Software without restriction, including
40  * without limitation the rights to use, copy, modify, merge, publish,
41  * distribute, sub license, and/or sell copies of the Software, and to
42  * permit persons to whom the Software is furnished to do so, subject to
43  * the following conditions:
44  *
45  * The above copyright notice and this permission notice (including the
46  * next paragraph) shall be included in all copies or substantial portions
47  * of the Software.
48  *
49  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
50  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
51  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
52  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
53  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
54  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
55  * USE OR OTHER DEALINGS IN THE SOFTWARE.
56  *
57  **************************************************************************/
58 /*
59  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
60  */
61 
62 #include <sys/cdefs.h>
63 __KERNEL_RCSID(0, "$NetBSD: ttm_bo_vm.c,v 1.28 2024/06/23 00:49:31 riastradh Exp $");
64 
65 #include <sys/types.h>
66 
67 #include <uvm/uvm.h>
68 #include <uvm/uvm_extern.h>
69 #include <uvm/uvm_fault.h>
70 
71 #include <linux/bitops.h>
72 
73 #include <drm/drm_vma_manager.h>
74 
75 #include <ttm/ttm_bo_driver.h>
76 
77 static int	ttm_bo_uvm_lookup(struct ttm_bo_device *, unsigned long,
78 		    unsigned long, struct ttm_buffer_object **);
79 
80 void
ttm_bo_uvm_reference(struct uvm_object * uobj)81 ttm_bo_uvm_reference(struct uvm_object *uobj)
82 {
83 	struct ttm_buffer_object *const bo = container_of(uobj,
84 	    struct ttm_buffer_object, uvmobj);
85 
86 	(void)ttm_bo_get(bo);
87 }
88 
89 void
ttm_bo_uvm_detach(struct uvm_object * uobj)90 ttm_bo_uvm_detach(struct uvm_object *uobj)
91 {
92 	struct ttm_buffer_object *bo = container_of(uobj,
93 	    struct ttm_buffer_object, uvmobj);
94 
95 	ttm_bo_put(bo);
96 }
97 
98 static int
ttm_bo_vm_fault_idle(struct ttm_buffer_object * bo,struct uvm_faultinfo * vmf)99 ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo, struct uvm_faultinfo *vmf)
100 {
101 	int err, ret = 0;
102 
103 	if (__predict_true(!bo->moving))
104 		goto out_unlock;
105 
106 	/*
107 	 * Quick non-stalling check for idle.
108 	 */
109 	if (dma_fence_is_signaled(bo->moving))
110 		goto out_clear;
111 
112 	/*
113 	 * If possible, avoid waiting for GPU with mmap_sem
114 	 * held.
115 	 */
116 	if (1) {		/* always retriable in NetBSD */
117 		ret = ERESTART;
118 
119 		ttm_bo_get(bo);
120 		uvmfault_unlockall(vmf, vmf->entry->aref.ar_amap, NULL);
121 		(void) dma_fence_wait(bo->moving, true);
122 		dma_resv_unlock(bo->base.resv);
123 		ttm_bo_put(bo);
124 		goto out_unlock;
125 	}
126 
127 	/*
128 	 * Ordinary wait.
129 	 */
130 	err = dma_fence_wait(bo->moving, true);
131 	if (__predict_false(err != 0)) {
132 		ret = (err != -ERESTARTSYS) ? EINVAL/*SIGBUS*/ :
133 		    0/*retry access in userland*/;
134 		goto out_unlock;
135 	}
136 
137 out_clear:
138 	dma_fence_put(bo->moving);
139 	bo->moving = NULL;
140 
141 out_unlock:
142 	return ret;
143 }
144 
145 static int
ttm_bo_vm_reserve(struct ttm_buffer_object * bo,struct uvm_faultinfo * vmf)146 ttm_bo_vm_reserve(struct ttm_buffer_object *bo, struct uvm_faultinfo *vmf)
147 {
148 
149 	/*
150 	 * Work around locking order reversal in fault / nopfn
151 	 * between mmap_sem and bo_reserve: Perform a trylock operation
152 	 * for reserve, and if it fails, retry the fault after waiting
153 	 * for the buffer to become unreserved.
154 	 */
155 	if (__predict_false(!dma_resv_trylock(bo->base.resv))) {
156 		ttm_bo_get(bo);
157 		uvmfault_unlockall(vmf, vmf->entry->aref.ar_amap, NULL);
158 		if (!dma_resv_lock_interruptible(bo->base.resv, NULL))
159 			dma_resv_unlock(bo->base.resv);
160 		ttm_bo_put(bo);
161 		return ERESTART;
162 	}
163 
164 	return 0;
165 }
166 
167 static int
ttm_bo_uvm_fault_reserved(struct uvm_faultinfo * vmf,vaddr_t vaddr,struct vm_page ** pps,int npages,int centeridx,vm_prot_t access_type,int flags)168 ttm_bo_uvm_fault_reserved(struct uvm_faultinfo *vmf, vaddr_t vaddr,
169     struct vm_page **pps, int npages, int centeridx, vm_prot_t access_type,
170     int flags)
171 {
172 	struct uvm_object *const uobj = vmf->entry->object.uvm_obj;
173 	struct ttm_buffer_object *const bo = container_of(uobj,
174 	    struct ttm_buffer_object, uvmobj);
175 	struct ttm_bo_device *const bdev = bo->bdev;
176 	struct ttm_mem_type_manager *man =
177 	    &bdev->man[bo->mem.mem_type];
178 	union {
179 		bus_addr_t base;
180 		struct ttm_tt *ttm;
181 	} u;
182 	size_t size __diagused;
183 	voff_t uoffset;		/* offset in bytes into bo */
184 	unsigned startpage;	/* offset in pages into bo */
185 	unsigned i;
186 	vm_prot_t vm_prot = vmf->entry->protection; /* VM_PROT_* */
187 	pgprot_t prot = vm_prot; /* VM_PROT_* | PMAP_* cacheability flags */
188 	int err, ret;
189 
190 	/*
191 	 * Refuse to fault imported pages. This should be handled
192 	 * (if at all) by redirecting mmap to the exporter.
193 	 */
194 	if (bo->ttm && (bo->ttm->page_flags & TTM_PAGE_FLAG_SG))
195 		return EINVAL;	/* SIGBUS */
196 
197 	if (bdev->driver->fault_reserve_notify) {
198 		struct dma_fence *moving = dma_fence_get(bo->moving);
199 
200 		err = bdev->driver->fault_reserve_notify(bo);
201 		switch (err) {
202 		case 0:
203 			break;
204 		case -EBUSY:
205 		case -ERESTARTSYS:
206 			return 0;	/* retry access in userland */
207 		default:
208 			return EINVAL;	/* SIGBUS */
209 		}
210 
211 		if (bo->moving != moving) {
212 			spin_lock(&ttm_bo_glob.lru_lock);
213 			ttm_bo_move_to_lru_tail(bo, NULL);
214 			spin_unlock(&ttm_bo_glob.lru_lock);
215 		}
216 		dma_fence_put(moving);
217 	}
218 
219 	/*
220 	 * Wait for buffer data in transit, due to a pipelined
221 	 * move.
222 	 */
223 	ret = ttm_bo_vm_fault_idle(bo, vmf);
224 	if (__predict_false(ret != 0))
225 		return ret;
226 
227 	err = ttm_mem_io_lock(man, true);
228 	if (__predict_false(err != 0))
229 		return 0;	/* retry access in userland */
230 	err = ttm_mem_io_reserve_vm(bo);
231 	if (__predict_false(err != 0)) {
232 		ret = EINVAL;	/* SIGBUS */
233 		goto out_io_unlock;
234 	}
235 
236 	prot = ttm_io_prot(bo->mem.placement, prot);
237 	if (!bo->mem.bus.is_iomem) {
238 		struct ttm_operation_ctx ctx = {
239 			.interruptible = false,
240 			.no_wait_gpu = false,
241 			.flags = TTM_OPT_FLAG_FORCE_ALLOC
242 
243 		};
244 
245 		u.ttm = bo->ttm;
246 		size = (size_t)bo->ttm->num_pages << PAGE_SHIFT;
247 		if (ttm_tt_populate(bo->ttm, &ctx)) {
248 			ret = ENOMEM;
249 			goto out_io_unlock;
250 		}
251 	} else {
252 		u.base = (bo->mem.bus.base + bo->mem.bus.offset);
253 		size = bo->mem.bus.size;
254 	}
255 
256 	KASSERT(vmf->entry->start <= vaddr);
257 	KASSERT((vmf->entry->offset & (PAGE_SIZE - 1)) == 0);
258 	KASSERT(vmf->entry->offset <= size);
259 	KASSERT((vaddr - vmf->entry->start) <= (size - vmf->entry->offset));
260 	KASSERTMSG(((size_t)npages << PAGE_SHIFT <=
261 		((size - vmf->entry->offset) - (vaddr - vmf->entry->start))),
262 	    "vaddr=%jx npages=%d bo=%p is_iomem=%d size=%zu"
263 	    " start=%jx offset=%jx",
264 	    (uintmax_t)vaddr, npages, bo, (int)bo->mem.bus.is_iomem, size,
265 	    (uintmax_t)vmf->entry->start, (uintmax_t)vmf->entry->offset);
266 	uoffset = (vmf->entry->offset + (vaddr - vmf->entry->start));
267 	startpage = (uoffset >> PAGE_SHIFT);
268 	for (i = 0; i < npages; i++) {
269 		paddr_t paddr;
270 
271 		if ((flags & PGO_ALLPAGES) == 0 && i != centeridx)
272 			continue;
273 		if (pps[i] == PGO_DONTCARE)
274 			continue;
275 		if (!bo->mem.bus.is_iomem) {
276 			paddr = page_to_phys(u.ttm->pages[startpage + i]);
277 		} else if (bdev->driver->io_mem_pfn) {
278 			paddr = (paddr_t)(*bdev->driver->io_mem_pfn)(bo,
279 			    startpage + i) << PAGE_SHIFT;
280 		} else {
281 			const paddr_t cookie = bus_space_mmap(bdev->memt,
282 			    u.base, (off_t)(startpage + i) << PAGE_SHIFT,
283 			    vm_prot, 0);
284 
285 			paddr = pmap_phys_address(cookie);
286 #if 0				/* XXX Why no PMAP_* flags added here? */
287 			mmapflags = pmap_mmap_flags(cookie);
288 #endif
289 		}
290 		ret = pmap_enter(vmf->orig_map->pmap, vaddr + i*PAGE_SIZE,
291 		    paddr, vm_prot, PMAP_CANFAIL | prot);
292 		if (ret) {
293 			/*
294 			 * XXX Continue with ret=0 if i != centeridx,
295 			 * so we don't fail if only readahead pages
296 			 * fail?
297 			 */
298 			KASSERT(ret != ERESTART);
299 			break;
300 		}
301 	}
302 	pmap_update(vmf->orig_map->pmap);
303 	ret = 0;		/* retry access in userland */
304 out_io_unlock:
305 	ttm_mem_io_unlock(man);
306 	KASSERT(ret != ERESTART);
307 	return ret;
308 }
309 
310 int
ttm_bo_uvm_fault(struct uvm_faultinfo * vmf,vaddr_t vaddr,struct vm_page ** pps,int npages,int centeridx,vm_prot_t access_type,int flags)311 ttm_bo_uvm_fault(struct uvm_faultinfo *vmf, vaddr_t vaddr,
312     struct vm_page **pps, int npages, int centeridx, vm_prot_t access_type,
313     int flags)
314 {
315 	struct uvm_object *const uobj = vmf->entry->object.uvm_obj;
316 	struct ttm_buffer_object *const bo = container_of(uobj,
317 	    struct ttm_buffer_object, uvmobj);
318 	int ret;
319 
320 	/* Thanks, uvm, but we don't need this lock.  */
321 	rw_exit(uobj->vmobjlock);
322 
323 	/* Copy-on-write mappings make no sense for the graphics aperture.  */
324 	if (UVM_ET_ISCOPYONWRITE(vmf->entry)) {
325 		ret = EINVAL;	/* SIGBUS */
326 		goto out;
327 	}
328 
329 	ret = ttm_bo_vm_reserve(bo, vmf);
330 	if (ret) {
331 		/* ttm_bo_vm_reserve already unlocked on ERESTART */
332 		KASSERTMSG(ret == ERESTART, "ret=%d", ret);
333 		return ret;
334 	}
335 
336 	ret = ttm_bo_uvm_fault_reserved(vmf, vaddr, pps, npages, centeridx,
337 	    access_type, flags);
338 	if (ret == ERESTART)	/* already unlocked on ERESTART */
339 		return ret;
340 
341 	dma_resv_unlock(bo->base.resv);
342 
343 out:	uvmfault_unlockall(vmf, vmf->entry->aref.ar_amap, NULL);
344 	return ret;
345 }
346 
347 int
ttm_bo_mmap_object(struct ttm_bo_device * bdev,off_t offset,size_t size,vm_prot_t prot,struct uvm_object ** uobjp,voff_t * uoffsetp,struct file * file)348 ttm_bo_mmap_object(struct ttm_bo_device *bdev, off_t offset, size_t size,
349     vm_prot_t prot, struct uvm_object **uobjp, voff_t *uoffsetp,
350     struct file *file)
351 {
352 	const unsigned long startpage = (offset >> PAGE_SHIFT);
353 	const unsigned long npages = (size >> PAGE_SHIFT);
354 	struct ttm_buffer_object *bo;
355 	int ret;
356 
357 	KASSERT(0 == (offset & (PAGE_SIZE - 1)));
358 	KASSERT(0 == (size & (PAGE_SIZE - 1)));
359 
360 	ret = ttm_bo_uvm_lookup(bdev, startpage, npages, &bo);
361 	if (ret)
362 		goto fail0;
363 	KASSERTMSG((drm_vma_node_start(&bo->base.vma_node) <= startpage),
364 	    "mapping npages=0x%jx @ pfn=0x%jx"
365 	    " from vma npages=0x%jx @ pfn=0x%jx",
366 	    (uintmax_t)npages,
367 	    (uintmax_t)startpage,
368 	    (uintmax_t)drm_vma_node_size(&bo->base.vma_node),
369 	    (uintmax_t)drm_vma_node_start(&bo->base.vma_node));
370 	KASSERTMSG((npages <= drm_vma_node_size(&bo->base.vma_node)),
371 	    "mapping npages=0x%jx @ pfn=0x%jx"
372 	    " from vma npages=0x%jx @ pfn=0x%jx",
373 	    (uintmax_t)npages,
374 	    (uintmax_t)startpage,
375 	    (uintmax_t)drm_vma_node_size(&bo->base.vma_node),
376 	    (uintmax_t)drm_vma_node_start(&bo->base.vma_node));
377 	KASSERTMSG(((startpage - drm_vma_node_start(&bo->base.vma_node))
378 		<= (drm_vma_node_size(&bo->base.vma_node) - npages)),
379 	    "mapping npages=0x%jx @ pfn=0x%jx"
380 	    " from vma npages=0x%jx @ pfn=0x%jx",
381 	    (uintmax_t)npages,
382 	    (uintmax_t)startpage,
383 	    (uintmax_t)drm_vma_node_size(&bo->base.vma_node),
384 	    (uintmax_t)drm_vma_node_start(&bo->base.vma_node));
385 
386 	/* XXX Just assert this?  */
387 	if (__predict_false(bdev->driver->verify_access == NULL)) {
388 		ret = -EPERM;
389 		goto fail1;
390 	}
391 	ret = (*bdev->driver->verify_access)(bo, file);
392 	if (ret)
393 		goto fail1;
394 
395 	/* Success!  */
396 	*uobjp = &bo->uvmobj;
397 	*uoffsetp = (offset -
398 	    ((off_t)drm_vma_node_start(&bo->base.vma_node) << PAGE_SHIFT));
399 	return 0;
400 
401 fail1:	ttm_bo_put(bo);
402 fail0:	KASSERT(ret);
403 	return ret;
404 }
405 
406 static int
ttm_bo_uvm_lookup(struct ttm_bo_device * bdev,unsigned long startpage,unsigned long npages,struct ttm_buffer_object ** bop)407 ttm_bo_uvm_lookup(struct ttm_bo_device *bdev, unsigned long startpage,
408     unsigned long npages, struct ttm_buffer_object **bop)
409 {
410 	struct ttm_buffer_object *bo = NULL;
411 	struct drm_vma_offset_node *node;
412 
413 	drm_vma_offset_lock_lookup(bdev->vma_manager);
414 	node = drm_vma_offset_lookup_locked(bdev->vma_manager, startpage,
415 	    npages);
416 	if (node != NULL) {
417 		bo = container_of(node, struct ttm_buffer_object,
418 		    base.vma_node);
419 		if (!kref_get_unless_zero(&bo->kref))
420 			bo = NULL;
421 	}
422 	drm_vma_offset_unlock_lookup(bdev->vma_manager);
423 
424 	if (bo == NULL)
425 		return -ENOENT;
426 
427 	*bop = bo;
428 	return 0;
429 }
430